repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
marvin-hansen/iggy-streaming-system
| 42,272
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/p384_montjmixadd_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point mixed addition on NIST curve P-384 in Montgomery-Jacobian coordinates
//
// extern void p384_montjmixadd_alt
// (uint64_t p3[static 18],uint64_t p1[static 18],uint64_t p2[static 12]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
// The "mixed" part means that p2 only has x and y coordinates, with the
// implicit z coordinate assumed to be the identity.
//
// Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjmixadd_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjmixadd_alt)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
// Stable homes for input arguments during main code sequence
#define input_z x24
#define input_x x25
#define input_y x26
// Pointer-offset pairs for inputs and outputs
#define x_1 input_x, #0
#define y_1 input_x, #NUMSIZE
#define z_1 input_x, #(2*NUMSIZE)
#define x_2 input_y, #0
#define y_2 input_y, #NUMSIZE
#define x_3 input_z, #0
#define y_3 input_z, #NUMSIZE
#define z_3 input_z, #(2*NUMSIZE)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define zp2 sp, #(NUMSIZE*0)
#define ww sp, #(NUMSIZE*0)
#define resx sp, #(NUMSIZE*0)
#define yd sp, #(NUMSIZE*1)
#define y2a sp, #(NUMSIZE*1)
#define x2a sp, #(NUMSIZE*2)
#define zzx2 sp, #(NUMSIZE*2)
#define zz sp, #(NUMSIZE*3)
#define t1 sp, #(NUMSIZE*3)
#define t2 sp, #(NUMSIZE*4)
#define zzx1 sp, #(NUMSIZE*4)
#define resy sp, #(NUMSIZE*4)
#define xd sp, #(NUMSIZE*5)
#define resz sp, #(NUMSIZE*5)
#define NSPACE (NUMSIZE*6)
// Corresponds exactly to bignum_montmul_p384_alt
#define montmul_p384(P0,P1,P2) \
ldp x3, x4, [P1]; \
ldp x5, x6, [P2]; \
mul x12, x3, x5; \
umulh x13, x3, x5; \
mul x11, x3, x6; \
umulh x14, x3, x6; \
adds x13, x13, x11; \
ldp x7, x8, [P2+16]; \
mul x11, x3, x7; \
umulh x15, x3, x7; \
adcs x14, x14, x11; \
mul x11, x3, x8; \
umulh x16, x3, x8; \
adcs x15, x15, x11; \
ldp x9, x10, [P2+32]; \
mul x11, x3, x9; \
umulh x17, x3, x9; \
adcs x16, x16, x11; \
mul x11, x3, x10; \
umulh x19, x3, x10; \
adcs x17, x17, x11; \
adc x19, x19, xzr; \
mul x11, x4, x5; \
adds x13, x13, x11; \
mul x11, x4, x6; \
adcs x14, x14, x11; \
mul x11, x4, x7; \
adcs x15, x15, x11; \
mul x11, x4, x8; \
adcs x16, x16, x11; \
mul x11, x4, x9; \
adcs x17, x17, x11; \
mul x11, x4, x10; \
adcs x19, x19, x11; \
cset x20, cs; \
umulh x11, x4, x5; \
adds x14, x14, x11; \
umulh x11, x4, x6; \
adcs x15, x15, x11; \
umulh x11, x4, x7; \
adcs x16, x16, x11; \
umulh x11, x4, x8; \
adcs x17, x17, x11; \
umulh x11, x4, x9; \
adcs x19, x19, x11; \
umulh x11, x4, x10; \
adc x20, x20, x11; \
ldp x3, x4, [P1+16]; \
mul x11, x3, x5; \
adds x14, x14, x11; \
mul x11, x3, x6; \
adcs x15, x15, x11; \
mul x11, x3, x7; \
adcs x16, x16, x11; \
mul x11, x3, x8; \
adcs x17, x17, x11; \
mul x11, x3, x9; \
adcs x19, x19, x11; \
mul x11, x3, x10; \
adcs x20, x20, x11; \
cset x21, cs; \
umulh x11, x3, x5; \
adds x15, x15, x11; \
umulh x11, x3, x6; \
adcs x16, x16, x11; \
umulh x11, x3, x7; \
adcs x17, x17, x11; \
umulh x11, x3, x8; \
adcs x19, x19, x11; \
umulh x11, x3, x9; \
adcs x20, x20, x11; \
umulh x11, x3, x10; \
adc x21, x21, x11; \
mul x11, x4, x5; \
adds x15, x15, x11; \
mul x11, x4, x6; \
adcs x16, x16, x11; \
mul x11, x4, x7; \
adcs x17, x17, x11; \
mul x11, x4, x8; \
adcs x19, x19, x11; \
mul x11, x4, x9; \
adcs x20, x20, x11; \
mul x11, x4, x10; \
adcs x21, x21, x11; \
cset x22, cs; \
umulh x11, x4, x5; \
adds x16, x16, x11; \
umulh x11, x4, x6; \
adcs x17, x17, x11; \
umulh x11, x4, x7; \
adcs x19, x19, x11; \
umulh x11, x4, x8; \
adcs x20, x20, x11; \
umulh x11, x4, x9; \
adcs x21, x21, x11; \
umulh x11, x4, x10; \
adc x22, x22, x11; \
ldp x3, x4, [P1+32]; \
mul x11, x3, x5; \
adds x16, x16, x11; \
mul x11, x3, x6; \
adcs x17, x17, x11; \
mul x11, x3, x7; \
adcs x19, x19, x11; \
mul x11, x3, x8; \
adcs x20, x20, x11; \
mul x11, x3, x9; \
adcs x21, x21, x11; \
mul x11, x3, x10; \
adcs x22, x22, x11; \
cset x2, cs; \
umulh x11, x3, x5; \
adds x17, x17, x11; \
umulh x11, x3, x6; \
adcs x19, x19, x11; \
umulh x11, x3, x7; \
adcs x20, x20, x11; \
umulh x11, x3, x8; \
adcs x21, x21, x11; \
umulh x11, x3, x9; \
adcs x22, x22, x11; \
umulh x11, x3, x10; \
adc x2, x2, x11; \
mul x11, x4, x5; \
adds x17, x17, x11; \
mul x11, x4, x6; \
adcs x19, x19, x11; \
mul x11, x4, x7; \
adcs x20, x20, x11; \
mul x11, x4, x8; \
adcs x21, x21, x11; \
mul x11, x4, x9; \
adcs x22, x22, x11; \
mul x11, x4, x10; \
adcs x2, x2, x11; \
cset x1, cs; \
umulh x11, x4, x5; \
adds x19, x19, x11; \
umulh x11, x4, x6; \
adcs x20, x20, x11; \
umulh x11, x4, x7; \
adcs x21, x21, x11; \
umulh x11, x4, x8; \
adcs x22, x22, x11; \
umulh x11, x4, x9; \
adcs x2, x2, x11; \
umulh x11, x4, x10; \
adc x1, x1, x11; \
lsl x7, x12, #32; \
add x12, x7, x12; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x12; \
mov x6, #0xffffffff; \
mul x5, x6, x12; \
umulh x6, x6, x12; \
adds x7, x7, x5; \
adcs x6, x6, x12; \
adc x5, xzr, xzr; \
subs x13, x13, x7; \
sbcs x14, x14, x6; \
sbcs x15, x15, x5; \
sbcs x16, x16, xzr; \
sbcs x17, x17, xzr; \
sbc x12, x12, xzr; \
lsl x7, x13, #32; \
add x13, x7, x13; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x13; \
mov x6, #0xffffffff; \
mul x5, x6, x13; \
umulh x6, x6, x13; \
adds x7, x7, x5; \
adcs x6, x6, x13; \
adc x5, xzr, xzr; \
subs x14, x14, x7; \
sbcs x15, x15, x6; \
sbcs x16, x16, x5; \
sbcs x17, x17, xzr; \
sbcs x12, x12, xzr; \
sbc x13, x13, xzr; \
lsl x7, x14, #32; \
add x14, x7, x14; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x14; \
mov x6, #0xffffffff; \
mul x5, x6, x14; \
umulh x6, x6, x14; \
adds x7, x7, x5; \
adcs x6, x6, x14; \
adc x5, xzr, xzr; \
subs x15, x15, x7; \
sbcs x16, x16, x6; \
sbcs x17, x17, x5; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
sbc x14, x14, xzr; \
lsl x7, x15, #32; \
add x15, x7, x15; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x15; \
mov x6, #0xffffffff; \
mul x5, x6, x15; \
umulh x6, x6, x15; \
adds x7, x7, x5; \
adcs x6, x6, x15; \
adc x5, xzr, xzr; \
subs x16, x16, x7; \
sbcs x17, x17, x6; \
sbcs x12, x12, x5; \
sbcs x13, x13, xzr; \
sbcs x14, x14, xzr; \
sbc x15, x15, xzr; \
lsl x7, x16, #32; \
add x16, x7, x16; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x16; \
mov x6, #0xffffffff; \
mul x5, x6, x16; \
umulh x6, x6, x16; \
adds x7, x7, x5; \
adcs x6, x6, x16; \
adc x5, xzr, xzr; \
subs x17, x17, x7; \
sbcs x12, x12, x6; \
sbcs x13, x13, x5; \
sbcs x14, x14, xzr; \
sbcs x15, x15, xzr; \
sbc x16, x16, xzr; \
lsl x7, x17, #32; \
add x17, x7, x17; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x17; \
mov x6, #0xffffffff; \
mul x5, x6, x17; \
umulh x6, x6, x17; \
adds x7, x7, x5; \
adcs x6, x6, x17; \
adc x5, xzr, xzr; \
subs x12, x12, x7; \
sbcs x13, x13, x6; \
sbcs x14, x14, x5; \
sbcs x15, x15, xzr; \
sbcs x16, x16, xzr; \
sbc x17, x17, xzr; \
adds x12, x12, x19; \
adcs x13, x13, x20; \
adcs x14, x14, x21; \
adcs x15, x15, x22; \
adcs x16, x16, x2; \
adcs x17, x17, x1; \
adc x10, xzr, xzr; \
mov x11, #0xffffffff00000001; \
adds x19, x12, x11; \
mov x11, #0xffffffff; \
adcs x20, x13, x11; \
mov x11, #0x1; \
adcs x21, x14, x11; \
adcs x22, x15, xzr; \
adcs x2, x16, xzr; \
adcs x1, x17, xzr; \
adcs x10, x10, xzr; \
csel x12, x12, x19, eq; \
csel x13, x13, x20, eq; \
csel x14, x14, x21, eq; \
csel x15, x15, x22, eq; \
csel x16, x16, x2, eq; \
csel x17, x17, x1, eq; \
stp x12, x13, [P0]; \
stp x14, x15, [P0+16]; \
stp x16, x17, [P0+32]
// Corresponds exactly to bignum_montsqr_p384_alt
#define montsqr_p384(P0,P1) \
ldp x2, x3, [P1]; \
mul x9, x2, x3; \
umulh x10, x2, x3; \
ldp x4, x5, [P1+16]; \
mul x8, x2, x4; \
adds x10, x10, x8; \
mul x11, x2, x5; \
mul x8, x3, x4; \
adcs x11, x11, x8; \
umulh x12, x2, x5; \
mul x8, x3, x5; \
adcs x12, x12, x8; \
ldp x6, x7, [P1+32]; \
mul x13, x2, x7; \
mul x8, x3, x6; \
adcs x13, x13, x8; \
umulh x14, x2, x7; \
mul x8, x3, x7; \
adcs x14, x14, x8; \
mul x15, x5, x6; \
adcs x15, x15, xzr; \
umulh x16, x5, x6; \
adc x16, x16, xzr; \
umulh x8, x2, x4; \
adds x11, x11, x8; \
umulh x8, x3, x4; \
adcs x12, x12, x8; \
umulh x8, x3, x5; \
adcs x13, x13, x8; \
umulh x8, x3, x6; \
adcs x14, x14, x8; \
umulh x8, x3, x7; \
adcs x15, x15, x8; \
adc x16, x16, xzr; \
mul x8, x2, x6; \
adds x12, x12, x8; \
mul x8, x4, x5; \
adcs x13, x13, x8; \
mul x8, x4, x6; \
adcs x14, x14, x8; \
mul x8, x4, x7; \
adcs x15, x15, x8; \
mul x8, x5, x7; \
adcs x16, x16, x8; \
mul x17, x6, x7; \
adcs x17, x17, xzr; \
umulh x19, x6, x7; \
adc x19, x19, xzr; \
umulh x8, x2, x6; \
adds x13, x13, x8; \
umulh x8, x4, x5; \
adcs x14, x14, x8; \
umulh x8, x4, x6; \
adcs x15, x15, x8; \
umulh x8, x4, x7; \
adcs x16, x16, x8; \
umulh x8, x5, x7; \
adcs x17, x17, x8; \
adc x19, x19, xzr; \
adds x9, x9, x9; \
adcs x10, x10, x10; \
adcs x11, x11, x11; \
adcs x12, x12, x12; \
adcs x13, x13, x13; \
adcs x14, x14, x14; \
adcs x15, x15, x15; \
adcs x16, x16, x16; \
adcs x17, x17, x17; \
adcs x19, x19, x19; \
cset x20, hs; \
umulh x8, x2, x2; \
mul x2, x2, x2; \
adds x9, x9, x8; \
mul x8, x3, x3; \
adcs x10, x10, x8; \
umulh x8, x3, x3; \
adcs x11, x11, x8; \
mul x8, x4, x4; \
adcs x12, x12, x8; \
umulh x8, x4, x4; \
adcs x13, x13, x8; \
mul x8, x5, x5; \
adcs x14, x14, x8; \
umulh x8, x5, x5; \
adcs x15, x15, x8; \
mul x8, x6, x6; \
adcs x16, x16, x8; \
umulh x8, x6, x6; \
adcs x17, x17, x8; \
mul x8, x7, x7; \
adcs x19, x19, x8; \
umulh x8, x7, x7; \
adc x20, x20, x8; \
lsl x5, x2, #32; \
add x2, x5, x2; \
mov x5, #-4294967295; \
umulh x5, x5, x2; \
mov x4, #4294967295; \
mul x3, x4, x2; \
umulh x4, x4, x2; \
adds x5, x5, x3; \
adcs x4, x4, x2; \
adc x3, xzr, xzr; \
subs x9, x9, x5; \
sbcs x10, x10, x4; \
sbcs x11, x11, x3; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
sbc x2, x2, xzr; \
lsl x5, x9, #32; \
add x9, x5, x9; \
mov x5, #-4294967295; \
umulh x5, x5, x9; \
mov x4, #4294967295; \
mul x3, x4, x9; \
umulh x4, x4, x9; \
adds x5, x5, x3; \
adcs x4, x4, x9; \
adc x3, xzr, xzr; \
subs x10, x10, x5; \
sbcs x11, x11, x4; \
sbcs x12, x12, x3; \
sbcs x13, x13, xzr; \
sbcs x2, x2, xzr; \
sbc x9, x9, xzr; \
lsl x5, x10, #32; \
add x10, x5, x10; \
mov x5, #-4294967295; \
umulh x5, x5, x10; \
mov x4, #4294967295; \
mul x3, x4, x10; \
umulh x4, x4, x10; \
adds x5, x5, x3; \
adcs x4, x4, x10; \
adc x3, xzr, xzr; \
subs x11, x11, x5; \
sbcs x12, x12, x4; \
sbcs x13, x13, x3; \
sbcs x2, x2, xzr; \
sbcs x9, x9, xzr; \
sbc x10, x10, xzr; \
lsl x5, x11, #32; \
add x11, x5, x11; \
mov x5, #-4294967295; \
umulh x5, x5, x11; \
mov x4, #4294967295; \
mul x3, x4, x11; \
umulh x4, x4, x11; \
adds x5, x5, x3; \
adcs x4, x4, x11; \
adc x3, xzr, xzr; \
subs x12, x12, x5; \
sbcs x13, x13, x4; \
sbcs x2, x2, x3; \
sbcs x9, x9, xzr; \
sbcs x10, x10, xzr; \
sbc x11, x11, xzr; \
lsl x5, x12, #32; \
add x12, x5, x12; \
mov x5, #-4294967295; \
umulh x5, x5, x12; \
mov x4, #4294967295; \
mul x3, x4, x12; \
umulh x4, x4, x12; \
adds x5, x5, x3; \
adcs x4, x4, x12; \
adc x3, xzr, xzr; \
subs x13, x13, x5; \
sbcs x2, x2, x4; \
sbcs x9, x9, x3; \
sbcs x10, x10, xzr; \
sbcs x11, x11, xzr; \
sbc x12, x12, xzr; \
lsl x5, x13, #32; \
add x13, x5, x13; \
mov x5, #-4294967295; \
umulh x5, x5, x13; \
mov x4, #4294967295; \
mul x3, x4, x13; \
umulh x4, x4, x13; \
adds x5, x5, x3; \
adcs x4, x4, x13; \
adc x3, xzr, xzr; \
subs x2, x2, x5; \
sbcs x9, x9, x4; \
sbcs x10, x10, x3; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbc x13, x13, xzr; \
adds x2, x2, x14; \
adcs x9, x9, x15; \
adcs x10, x10, x16; \
adcs x11, x11, x17; \
adcs x12, x12, x19; \
adcs x13, x13, x20; \
adc x6, xzr, xzr; \
mov x8, #-4294967295; \
adds x14, x2, x8; \
mov x8, #4294967295; \
adcs x15, x9, x8; \
mov x8, #1; \
adcs x16, x10, x8; \
adcs x17, x11, xzr; \
adcs x19, x12, xzr; \
adcs x20, x13, xzr; \
adcs x6, x6, xzr; \
csel x2, x2, x14, eq; \
csel x9, x9, x15, eq; \
csel x10, x10, x16, eq; \
csel x11, x11, x17, eq; \
csel x12, x12, x19, eq; \
csel x13, x13, x20, eq; \
stp x2, x9, [P0]; \
stp x10, x11, [P0+16]; \
stp x12, x13, [P0+32]
// Almost-Montgomery variant which we use when an input to other muls
// with the other argument fully reduced (which is always safe). In
// fact, with the Karatsuba-based Montgomery mul here, we don't even
// *need* the restriction that the other argument is reduced.
#define amontsqr_p384(P0,P1) \
ldp x2, x3, [P1]; \
mul x9, x2, x3; \
umulh x10, x2, x3; \
ldp x4, x5, [P1+16]; \
mul x8, x2, x4; \
adds x10, x10, x8; \
mul x11, x2, x5; \
mul x8, x3, x4; \
adcs x11, x11, x8; \
umulh x12, x2, x5; \
mul x8, x3, x5; \
adcs x12, x12, x8; \
ldp x6, x7, [P1+32]; \
mul x13, x2, x7; \
mul x8, x3, x6; \
adcs x13, x13, x8; \
umulh x14, x2, x7; \
mul x8, x3, x7; \
adcs x14, x14, x8; \
mul x15, x5, x6; \
adcs x15, x15, xzr; \
umulh x16, x5, x6; \
adc x16, x16, xzr; \
umulh x8, x2, x4; \
adds x11, x11, x8; \
umulh x8, x3, x4; \
adcs x12, x12, x8; \
umulh x8, x3, x5; \
adcs x13, x13, x8; \
umulh x8, x3, x6; \
adcs x14, x14, x8; \
umulh x8, x3, x7; \
adcs x15, x15, x8; \
adc x16, x16, xzr; \
mul x8, x2, x6; \
adds x12, x12, x8; \
mul x8, x4, x5; \
adcs x13, x13, x8; \
mul x8, x4, x6; \
adcs x14, x14, x8; \
mul x8, x4, x7; \
adcs x15, x15, x8; \
mul x8, x5, x7; \
adcs x16, x16, x8; \
mul x17, x6, x7; \
adcs x17, x17, xzr; \
umulh x19, x6, x7; \
adc x19, x19, xzr; \
umulh x8, x2, x6; \
adds x13, x13, x8; \
umulh x8, x4, x5; \
adcs x14, x14, x8; \
umulh x8, x4, x6; \
adcs x15, x15, x8; \
umulh x8, x4, x7; \
adcs x16, x16, x8; \
umulh x8, x5, x7; \
adcs x17, x17, x8; \
adc x19, x19, xzr; \
adds x9, x9, x9; \
adcs x10, x10, x10; \
adcs x11, x11, x11; \
adcs x12, x12, x12; \
adcs x13, x13, x13; \
adcs x14, x14, x14; \
adcs x15, x15, x15; \
adcs x16, x16, x16; \
adcs x17, x17, x17; \
adcs x19, x19, x19; \
cset x20, hs; \
umulh x8, x2, x2; \
mul x2, x2, x2; \
adds x9, x9, x8; \
mul x8, x3, x3; \
adcs x10, x10, x8; \
umulh x8, x3, x3; \
adcs x11, x11, x8; \
mul x8, x4, x4; \
adcs x12, x12, x8; \
umulh x8, x4, x4; \
adcs x13, x13, x8; \
mul x8, x5, x5; \
adcs x14, x14, x8; \
umulh x8, x5, x5; \
adcs x15, x15, x8; \
mul x8, x6, x6; \
adcs x16, x16, x8; \
umulh x8, x6, x6; \
adcs x17, x17, x8; \
mul x8, x7, x7; \
adcs x19, x19, x8; \
umulh x8, x7, x7; \
adc x20, x20, x8; \
lsl x5, x2, #32; \
add x2, x5, x2; \
mov x5, #-4294967295; \
umulh x5, x5, x2; \
mov x4, #4294967295; \
mul x3, x4, x2; \
umulh x4, x4, x2; \
adds x5, x5, x3; \
adcs x4, x4, x2; \
adc x3, xzr, xzr; \
subs x9, x9, x5; \
sbcs x10, x10, x4; \
sbcs x11, x11, x3; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
sbc x2, x2, xzr; \
lsl x5, x9, #32; \
add x9, x5, x9; \
mov x5, #-4294967295; \
umulh x5, x5, x9; \
mov x4, #4294967295; \
mul x3, x4, x9; \
umulh x4, x4, x9; \
adds x5, x5, x3; \
adcs x4, x4, x9; \
adc x3, xzr, xzr; \
subs x10, x10, x5; \
sbcs x11, x11, x4; \
sbcs x12, x12, x3; \
sbcs x13, x13, xzr; \
sbcs x2, x2, xzr; \
sbc x9, x9, xzr; \
lsl x5, x10, #32; \
add x10, x5, x10; \
mov x5, #-4294967295; \
umulh x5, x5, x10; \
mov x4, #4294967295; \
mul x3, x4, x10; \
umulh x4, x4, x10; \
adds x5, x5, x3; \
adcs x4, x4, x10; \
adc x3, xzr, xzr; \
subs x11, x11, x5; \
sbcs x12, x12, x4; \
sbcs x13, x13, x3; \
sbcs x2, x2, xzr; \
sbcs x9, x9, xzr; \
sbc x10, x10, xzr; \
lsl x5, x11, #32; \
add x11, x5, x11; \
mov x5, #-4294967295; \
umulh x5, x5, x11; \
mov x4, #4294967295; \
mul x3, x4, x11; \
umulh x4, x4, x11; \
adds x5, x5, x3; \
adcs x4, x4, x11; \
adc x3, xzr, xzr; \
subs x12, x12, x5; \
sbcs x13, x13, x4; \
sbcs x2, x2, x3; \
sbcs x9, x9, xzr; \
sbcs x10, x10, xzr; \
sbc x11, x11, xzr; \
lsl x5, x12, #32; \
add x12, x5, x12; \
mov x5, #-4294967295; \
umulh x5, x5, x12; \
mov x4, #4294967295; \
mul x3, x4, x12; \
umulh x4, x4, x12; \
adds x5, x5, x3; \
adcs x4, x4, x12; \
adc x3, xzr, xzr; \
subs x13, x13, x5; \
sbcs x2, x2, x4; \
sbcs x9, x9, x3; \
sbcs x10, x10, xzr; \
sbcs x11, x11, xzr; \
sbc x12, x12, xzr; \
lsl x5, x13, #32; \
add x13, x5, x13; \
mov x5, #-4294967295; \
umulh x5, x5, x13; \
mov x4, #4294967295; \
mul x3, x4, x13; \
umulh x4, x4, x13; \
adds x5, x5, x3; \
adcs x4, x4, x13; \
adc x3, xzr, xzr; \
subs x2, x2, x5; \
sbcs x9, x9, x4; \
sbcs x10, x10, x3; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbc x13, x13, xzr; \
adds x2, x2, x14; \
adcs x9, x9, x15; \
adcs x10, x10, x16; \
adcs x11, x11, x17; \
adcs x12, x12, x19; \
adcs x13, x13, x20; \
mov x14, #-4294967295; \
mov x15, #4294967295; \
csel x14, x14, xzr, cs; \
csel x15, x15, xzr, cs; \
cset x16, cs; \
adds x2, x2, x14; \
adcs x9, x9, x15; \
adcs x10, x10, x16; \
adcs x11, x11, xzr; \
adcs x12, x12, xzr; \
adc x13, x13, xzr; \
stp x2, x9, [P0]; \
stp x10, x11, [P0+16]; \
stp x12, x13, [P0+32]
// Corresponds exactly to bignum_sub_p384
#define sub_p384(P0,P1,P2) \
ldp x5, x6, [P1]; \
ldp x4, x3, [P2]; \
subs x5, x5, x4; \
sbcs x6, x6, x3; \
ldp x7, x8, [P1+16]; \
ldp x4, x3, [P2+16]; \
sbcs x7, x7, x4; \
sbcs x8, x8, x3; \
ldp x9, x10, [P1+32]; \
ldp x4, x3, [P2+32]; \
sbcs x9, x9, x4; \
sbcs x10, x10, x3; \
csetm x3, lo; \
mov x4, #4294967295; \
and x4, x4, x3; \
adds x5, x5, x4; \
eor x4, x4, x3; \
adcs x6, x6, x4; \
mov x4, #-2; \
and x4, x4, x3; \
adcs x7, x7, x4; \
adcs x8, x8, x3; \
adcs x9, x9, x3; \
adc x10, x10, x3; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]
S2N_BN_SYMBOL(p384_montjmixadd_alt):
// Save regs and make room on stack for temporary variables
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
sub sp, sp, NSPACE
// Move the input arguments to stable places
mov input_z, x0
mov input_x, x1
mov input_y, x2
// Main code, just a sequence of basic field operations
// 8 * multiply + 3 * square + 7 * subtract
amontsqr_p384(zp2,z_1)
montmul_p384(y2a,z_1,y_2)
montmul_p384(x2a,zp2,x_2)
montmul_p384(y2a,zp2,y2a)
sub_p384(xd,x2a,x_1)
sub_p384(yd,y2a,y_1)
amontsqr_p384(zz,xd)
montsqr_p384(ww,yd)
montmul_p384(zzx1,zz,x_1)
montmul_p384(zzx2,zz,x2a)
sub_p384(resx,ww,zzx1)
sub_p384(t1,zzx2,zzx1)
montmul_p384(resz,xd,z_1)
sub_p384(resx,resx,zzx2)
sub_p384(t2,zzx1,resx)
montmul_p384(t1,t1,y_1)
montmul_p384(t2,yd,t2)
sub_p384(resy,t2,t1)
// Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence)
ldp x0, x1, [z_1]
ldp x2, x3, [z_1+16]
ldp x4, x5, [z_1+32]
orr x6, x0, x1
orr x7, x2, x3
orr x8, x4, x5
orr x6, x6, x7
orr x6, x6, x8
cmp x6, xzr
// Multiplex: if p1 <> 0 just copy the computed result from the staging area.
// If p1 = 0 then return the point p2 augmented with a z = 1 coordinate (in
// Montgomery form so not the simple constant 1 but rather 2^384 - p_384),
// hence giving 0 + p2 = p2 for the final result.
ldp x0, x1, [resx]
ldp x19, x20, [x_2]
csel x0, x0, x19, ne
csel x1, x1, x20, ne
ldp x2, x3, [resx+16]
ldp x19, x20, [x_2+16]
csel x2, x2, x19, ne
csel x3, x3, x20, ne
ldp x4, x5, [resx+32]
ldp x19, x20, [x_2+32]
csel x4, x4, x19, ne
csel x5, x5, x20, ne
ldp x6, x7, [resy]
ldp x19, x20, [y_2]
csel x6, x6, x19, ne
csel x7, x7, x20, ne
ldp x8, x9, [resy+16]
ldp x19, x20, [y_2+16]
csel x8, x8, x19, ne
csel x9, x9, x20, ne
ldp x10, x11, [resy+32]
ldp x19, x20, [y_2+32]
csel x10, x10, x19, ne
csel x11, x11, x20, ne
ldp x12, x13, [resz]
mov x19, #0xffffffff00000001
mov x20, #0x00000000ffffffff
csel x12, x12, x19, ne
csel x13, x13, x20, ne
ldp x14, x15, [resz+16]
mov x19, #1
csel x14, x14, x19, ne
csel x15, x15, xzr, ne
ldp x16, x17, [resz+32]
csel x16, x16, xzr, ne
csel x17, x17, xzr, ne
stp x0, x1, [x_3]
stp x2, x3, [x_3+16]
stp x4, x5, [x_3+32]
stp x6, x7, [y_3]
stp x8, x9, [y_3+16]
stp x10, x11, [y_3+32]
stp x12, x13, [z_3]
stp x14, x15, [z_3+16]
stp x16, x17, [z_3+32]
// Restore stack and registers
add sp, sp, NSPACE
ldp x25, x26, [sp], 16
ldp x23, x24, [sp], 16
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 5,313
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_tomont_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert to Montgomery form z := (2^384 * x) mod p_384
// Input x[6]; output z[6]
//
// extern void bignum_tomont_p384
// (uint64_t z[static 6], uint64_t x[static 6]);
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_p384)
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_p384_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_p384_alt)
.text
.balign 4
// ----------------------------------------------------------------------------
// Core "x |-> (2^64 * x) mod p_384" macro, with x assumed to be < p_384.
// Input is in [d6;d5;d4;d3;d2;d1] and output in [d5;d4;d3;d2;d1;d0]
// using d6 as well as t1, t2, t3 as temporaries.
// ----------------------------------------------------------------------------
#define modstep_p384(d6,d5,d4,d3,d2,d1,d0, t1,t2,t3) \
/* Initial quotient approximation q = min (h + 1) (2^64 - 1) */ \
adds d6, d6, #1; \
csetm t3, cs; \
add d6, d6, t3; \
orn t3, xzr, t3; \
sub t2, d6, #1; \
sub t1, xzr, d6; \
/* Correction term [d6;t2;t1;d0] = q * (2^384 - p_384) */ \
lsl d0, t1, #32; \
extr t1, t2, t1, #32; \
lsr t2, t2, #32; \
adds d0, d0, d6; \
adcs t1, t1, xzr; \
adcs t2, t2, d6; \
adc d6, xzr, xzr; \
/* Addition to the initial value */ \
adds d1, d1, t1; \
adcs d2, d2, t2; \
adcs d3, d3, d6; \
adcs d4, d4, xzr; \
adcs d5, d5, xzr; \
adc t3, t3, xzr; \
/* Use net top of the 7-word answer in t3 for masked correction */ \
mov t1, #0x00000000ffffffff; \
and t1, t1, t3; \
adds d0, d0, t1; \
eor t1, t1, t3; \
adcs d1, d1, t1; \
mov t1, #0xfffffffffffffffe; \
and t1, t1, t3; \
adcs d2, d2, t1; \
adcs d3, d3, t3; \
adcs d4, d4, t3; \
adc d5, d5, t3
S2N_BN_SYMBOL(bignum_tomont_p384):
S2N_BN_SYMBOL(bignum_tomont_p384_alt):
#define d0 x2
#define d1 x3
#define d2 x4
#define d3 x5
#define d4 x6
#define d5 x7
#define d6 x8
#define t1 x9
#define t2 x10
#define t3 x11
#define n0 x8
#define n1 x9
#define n2 x10
#define n3 x11
#define n4 x12
#define n5 x1
// Load the inputs
ldp d0, d1, [x1]
ldp d2, d3, [x1, #16]
ldp d4, d5, [x1, #32]
// Do an initial reduction to make sure this is < p_384, using just
// a copy of the bignum_mod_p384_6 code. This is needed to set up the
// invariant "input < p_384" for the main modular reduction steps.
mov n0, #0x00000000ffffffff
mov n1, #0xffffffff00000000
mov n2, #0xfffffffffffffffe
subs n0, d0, n0
sbcs n1, d1, n1
sbcs n2, d2, n2
adcs n3, d3, xzr
adcs n4, d4, xzr
adcs n5, d5, xzr
csel d0, d0, n0, cc
csel d1, d1, n1, cc
csel d2, d2, n2, cc
csel d3, d3, n3, cc
csel d4, d4, n4, cc
csel d5, d5, n5, cc
// Successively multiply by 2^64 and reduce
modstep_p384(d5,d4,d3,d2,d1,d0,d6, t1,t2,t3)
modstep_p384(d4,d3,d2,d1,d0,d6,d5, t1,t2,t3)
modstep_p384(d3,d2,d1,d0,d6,d5,d4, t1,t2,t3)
modstep_p384(d2,d1,d0,d6,d5,d4,d3, t1,t2,t3)
modstep_p384(d1,d0,d6,d5,d4,d3,d2, t1,t2,t3)
modstep_p384(d0,d6,d5,d4,d3,d2,d1, t1,t2,t3)
// Store the result and return
stp d1, d2, [x0]
stp d3, d4, [x0, #16]
stp d5, d6, [x0, #32]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 2,094
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_mod_p384_6.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Reduce modulo field characteristic, z := x mod p_384
// Input x[6]; output z[6]
//
// extern void bignum_mod_p384_6
// (uint64_t z[static 6], uint64_t x[static 6]);
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p384_6)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p384_6)
.text
.balign 4
#define z x0
#define x x1
#define n0 x2
#define n1 x3
#define n2 x4
#define n3 x5
#define n4 x6
#define n5 x7
#define d0 x8
#define d1 x9
#define d2 x10
#define d3 x11
#define d4 x12
#define d5 x13
S2N_BN_SYMBOL(bignum_mod_p384_6):
// Load the complicated lower three words of p_384 = [-1;-1;-1;n2;n1;n0]
mov n0, #0x00000000ffffffff
mov n1, #0xffffffff00000000
mov n2, #0xfffffffffffffffe
// Load the input number
ldp d0, d1, [x]
ldp d2, d3, [x, #16]
ldp d4, d5, [x, #32]
// Do the subtraction. Since the top three words of p_384 are all 1s
// we can devolve the top to adding 0, thanks to the inverted carry.
subs n0, d0, n0
sbcs n1, d1, n1
sbcs n2, d2, n2
adcs n3, d3, xzr
adcs n4, d4, xzr
adcs n5, d5, xzr
// Now if the carry is *clear* (inversion at work) the subtraction carried
// and hence we should have done nothing, so we reset each n_i = d_i
csel n0, d0, n0, cc
csel n1, d1, n1, cc
csel n2, d2, n2, cc
csel n3, d3, n3, cc
csel n4, d4, n4, cc
csel n5, d5, n5, cc
// Store the end result
stp n0, n1, [z]
stp n2, n3, [z, #16]
stp n4, n5, [z, #32]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 4,918
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_littleendian_6.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert 6-digit (384-bit) bignum to/from little-endian form
// Input x[6]; output z[6]
//
// extern void bignum_littleendian_6
// (uint64_t z[static 6], uint64_t x[static 6]);
//
// The same function is given two other prototypes whose names reflect the
// treatment of one or other argument as a byte array rather than word array:
//
// extern void bignum_fromlebytes_6
// (uint64_t z[static 6], uint8_t x[static 48]);
//
// extern void bignum_tolebytes_6
// (uint8_t z[static 48], uint64_t x[static 6]);
//
// The implementation works by loading in bytes and storing in words (i.e.
// stylistically it is "fromlebytes"); in the more common little-endian
// usage of ARM, this is just copying.
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_littleendian_6)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_littleendian_6)
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_fromlebytes_6)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_fromlebytes_6)
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tolebytes_6)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tolebytes_6)
.text
.balign 4
#define z x0
#define x x1
#define d x2
#define dshort w2
#define a x3
S2N_BN_SYMBOL(bignum_littleendian_6):
S2N_BN_SYMBOL(bignum_fromlebytes_6):
S2N_BN_SYMBOL(bignum_tolebytes_6):
// word 0
ldrb dshort, [x]
extr a, d, xzr, #8
ldrb dshort, [x, #1]
extr a, d, a, #8
ldrb dshort, [x, #2]
extr a, d, a, #8
ldrb dshort, [x, #3]
extr a, d, a, #8
ldrb dshort, [x, #4]
extr a, d, a, #8
ldrb dshort, [x, #5]
extr a, d, a, #8
ldrb dshort, [x, #6]
extr a, d, a, #8
ldrb dshort, [x, #7]
extr a, d, a, #8
str a, [z]
// word 1
ldrb dshort, [x, #8]
extr a, d, xzr, #8
ldrb dshort, [x, #9]
extr a, d, a, #8
ldrb dshort, [x, #10]
extr a, d, a, #8
ldrb dshort, [x, #11]
extr a, d, a, #8
ldrb dshort, [x, #12]
extr a, d, a, #8
ldrb dshort, [x, #13]
extr a, d, a, #8
ldrb dshort, [x, #14]
extr a, d, a, #8
ldrb dshort, [x, #15]
extr a, d, a, #8
str a, [z, #8]
// word 2
ldrb dshort, [x, #16]
extr a, d, xzr, #8
ldrb dshort, [x, #17]
extr a, d, a, #8
ldrb dshort, [x, #18]
extr a, d, a, #8
ldrb dshort, [x, #19]
extr a, d, a, #8
ldrb dshort, [x, #20]
extr a, d, a, #8
ldrb dshort, [x, #21]
extr a, d, a, #8
ldrb dshort, [x, #22]
extr a, d, a, #8
ldrb dshort, [x, #23]
extr a, d, a, #8
str a, [z, #16]
// word 3
ldrb dshort, [x, #24]
extr a, d, xzr, #8
ldrb dshort, [x, #25]
extr a, d, a, #8
ldrb dshort, [x, #26]
extr a, d, a, #8
ldrb dshort, [x, #27]
extr a, d, a, #8
ldrb dshort, [x, #28]
extr a, d, a, #8
ldrb dshort, [x, #29]
extr a, d, a, #8
ldrb dshort, [x, #30]
extr a, d, a, #8
ldrb dshort, [x, #31]
extr a, d, a, #8
str a, [z, #24]
// word 4
ldrb dshort, [x, #32]
extr a, d, xzr, #8
ldrb dshort, [x, #33]
extr a, d, a, #8
ldrb dshort, [x, #34]
extr a, d, a, #8
ldrb dshort, [x, #35]
extr a, d, a, #8
ldrb dshort, [x, #36]
extr a, d, a, #8
ldrb dshort, [x, #37]
extr a, d, a, #8
ldrb dshort, [x, #38]
extr a, d, a, #8
ldrb dshort, [x, #39]
extr a, d, a, #8
str a, [z, #32]
// word 5
ldrb dshort, [x, #40]
extr a, d, xzr, #8
ldrb dshort, [x, #41]
extr a, d, a, #8
ldrb dshort, [x, #42]
extr a, d, a, #8
ldrb dshort, [x, #43]
extr a, d, a, #8
ldrb dshort, [x, #44]
extr a, d, a, #8
ldrb dshort, [x, #45]
extr a, d, a, #8
ldrb dshort, [x, #46]
extr a, d, a, #8
ldrb dshort, [x, #47]
extr a, d, a, #8
str a, [z, #40]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 5,265
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_deamont_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert from almost-Montgomery form, z := (x / 2^384) mod p_384
// Input x[6]; output z[6]
//
// extern void bignum_deamont_p384
// (uint64_t z[static 6], uint64_t x[static 6]);
//
// Convert a 6-digit bignum x out of its (optionally almost) Montgomery form,
// "almost" meaning any 6-digit input will work, with no range restriction.
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_deamont_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_deamont_p384)
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_deamont_p384_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_deamont_p384_alt)
.text
.balign 4
// ---------------------------------------------------------------------------
// Core one-step "short" Montgomery reduction macro. Takes input in
// [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1],
// adding to the existing contents of [d5;d4;d3;d2;d1]. It is fine
// for d6 to be the same register as d0.
//
// We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w
// where w = [d0 + (d0<<32)] mod 2^64
// ---------------------------------------------------------------------------
#define montreds(d6,d5,d4,d3,d2,d1,d0, t3,t2,t1) \
/* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \
/* Recycle d0 (which we know gets implicitly cancelled) to store it */ \
lsl t1, d0, #32; \
add d0, t1, d0; \
/* Now let [t2;t1] = 2^64 * w - w + w_hi where w_hi = floor(w/2^32) */ \
/* We need to subtract 2^32 * this, and we can ignore its lower 32 */ \
/* bits since by design it will cancel anyway; we only need the w_hi */ \
/* part to get the carry propagation going. */ \
lsr t1, d0, #32; \
subs t1, t1, d0; \
sbc t2, d0, xzr; \
/* Now select in t1 the field to subtract from d1 */ \
extr t1, t2, t1, #32; \
/* And now get the terms to subtract from d2 and d3 */ \
lsr t2, t2, #32; \
adds t2, t2, d0; \
adc t3, xzr, xzr; \
/* Do the subtraction of that portion */ \
subs d1, d1, t1; \
sbcs d2, d2, t2; \
sbcs d3, d3, t3; \
sbcs d4, d4, xzr; \
sbcs d5, d5, xzr; \
/* Now effectively add 2^384 * w by taking d0 as the input for last sbc */ \
sbc d6, d0, xzr
// Input parameters
#define z x0
#define x x1
// Rotating registers for the intermediate windows
#define d0 x2
#define d1 x3
#define d2 x4
#define d3 x5
#define d4 x6
#define d5 x7
// Other temporaries
#define u x8
#define v x9
#define w x10
S2N_BN_SYMBOL(bignum_deamont_p384):
S2N_BN_SYMBOL(bignum_deamont_p384_alt):
// Set up an initial window with the input x and an extra leading zero
ldp d0, d1, [x]
ldp d2, d3, [x, #16]
ldp d4, d5, [x, #32]
// Systematically scroll left doing 1-step reductions
montreds(d0,d5,d4,d3,d2,d1,d0, u,v,w)
montreds(d1,d0,d5,d4,d3,d2,d1, u,v,w)
montreds(d2,d1,d0,d5,d4,d3,d2, u,v,w)
montreds(d3,d2,d1,d0,d5,d4,d3, u,v,w)
montreds(d4,d3,d2,d1,d0,d5,d4, u,v,w)
montreds(d5,d4,d3,d2,d1,d0,d5, u,v,w)
// Now compare end result in [d5;d4;d3;d2;d1;d0] = dd with p_384 by *adding*
// 2^384 - p_384 = [0;0;0;w;v;u]. This will set CF if
// dd + (2^384 - p_384) >= 2^384, hence iff dd >= p_384
mov u, #0xffffffff00000001
mov v, #0x00000000ffffffff
mov w, #0x0000000000000001
adds xzr, d0, u
adcs xzr, d1, v
adcs xzr, d2, w
adcs xzr, d3, xzr
adcs xzr, d4, xzr
adcs xzr, d5, xzr
// Convert the condition dd >= p_384 into a bitmask in w and do a masked
// subtraction of p_384, via a masked addition of 2^384 - p_384:
csetm w, cs
and u, u, w
adds d0, d0, u
and v, v, w
adcs d1, d1, v
and w, w, #1
adcs d2, d2, w
adcs d3, d3, xzr
adcs d4, d4, xzr
adc d5, d5, xzr
// Store it back
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 7,814
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_montsqr_p384_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery square, z := (x^2 / 2^384) mod p_384
// Input x[6]; output z[6]
//
// extern void bignum_montsqr_p384_alt
// (uint64_t z[static 6], uint64_t x[static 6]);
//
// Does z := (x^2 / 2^384) mod p_384, assuming x^2 <= 2^384 * p_384, which is
// guaranteed in particular if x < p_384 initially (the "intended" case).
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p384_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p384_alt)
.text
.balign 4
// ---------------------------------------------------------------------------
// Core one-step "short" Montgomery reduction macro. Takes input in
// [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1],
// adding to the existing contents of [d5;d4;d3;d2;d1]. It is fine
// for d6 to be the same register as d0.
//
// We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w
// where w = [d0 + (d0<<32)] mod 2^64
// ---------------------------------------------------------------------------
#define montreds(d6,d5,d4,d3,d2,d1,d0, t3,t2,t1) \
/* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \
/* Store it in d6 to make the 2^384 * w contribution already */ \
lsl t1, d0, #32; \
add d6, t1, d0; \
/* Now let [t3;t2;t1;-] = (2^384 - p_384) * w */ \
/* We know the lowest word will cancel d0 so we don't need it */ \
mov t1, #0xffffffff00000001; \
umulh t1, t1, d6; \
mov t2, #0x00000000ffffffff; \
mul t3, t2, d6; \
umulh t2, t2, d6; \
adds t1, t1, t3; \
adcs t2, t2, d6; \
adc t3, xzr, xzr; \
/* Now add it, by subtracting from 2^384 * w + x */ \
subs d1, d1, t1; \
sbcs d2, d2, t2; \
sbcs d3, d3, t3; \
sbcs d4, d4, xzr; \
sbcs d5, d5, xzr; \
sbc d6, d6, xzr
#define z x0
#define x x1
#define a0 x2
#define a1 x3
#define a2 x4
#define a3 x5
#define a4 x6
#define a5 x7
#define l x8
#define u0 x2 // The same as a0, which is safe
#define u1 x9
#define u2 x10
#define u3 x11
#define u4 x12
#define u5 x13
#define u6 x14
#define u7 x15
#define u8 x16
#define u9 x17
#define u10 x19
#define u11 x20
#define h x6 // same as a4
S2N_BN_SYMBOL(bignum_montsqr_p384_alt):
// It's convenient to have two more registers to play with
stp x19, x20, [sp, #-16]!
// Load all the elements as [a5;a4;a3;a2;a1;a0], set up an initial
// window [u8;u7; u6;u5; u4;u3; u2;u1] = [34;05;03;01], and then
// chain in the addition of 02 + 12 + 13 + 14 + 15 to that window
// (no carry-out possible since we add it to the top of a product).
ldp a0, a1, [x]
mul u1, a0, a1
umulh u2, a0, a1
ldp a2, a3, [x, #16]
mul l, a0, a2
adds u2, u2, l
mul u3, a0, a3
mul l, a1, a2
adcs u3, u3, l
umulh u4, a0, a3
mul l, a1, a3
adcs u4, u4, l
ldp a4, a5, [x, #32]
mul u5, a0, a5
mul l, a1, a4
adcs u5, u5, l
umulh u6, a0, a5
mul l, a1, a5
adcs u6, u6, l
mul u7, a3, a4
adcs u7, u7, xzr
umulh u8, a3, a4
adc u8, u8, xzr
umulh l, a0, a2
adds u3, u3, l
umulh l, a1, a2
adcs u4, u4, l
umulh l, a1, a3
adcs u5, u5, l
umulh l, a1, a4
adcs u6, u6, l
umulh l, a1, a5
adcs u7, u7, l
adc u8, u8, xzr
// Now chain in the 04 + 23 + 24 + 25 + 35 + 45 terms
mul l, a0, a4
adds u4, u4, l
mul l, a2, a3
adcs u5, u5, l
mul l, a2, a4
adcs u6, u6, l
mul l, a2, a5
adcs u7, u7, l
mul l, a3, a5
adcs u8, u8, l
mul u9, a4, a5
adcs u9, u9, xzr
umulh u10, a4, a5
adc u10, u10, xzr
umulh l, a0, a4
adds u5, u5, l
umulh l, a2, a3
adcs u6, u6, l
umulh l, a2, a4
adcs u7, u7, l
umulh l, a2, a5
adcs u8, u8, l
umulh l, a3, a5
adcs u9, u9, l
adc u10, u10, xzr
// Double that, with u11 holding the top carry
adds u1, u1, u1
adcs u2, u2, u2
adcs u3, u3, u3
adcs u4, u4, u4
adcs u5, u5, u5
adcs u6, u6, u6
adcs u7, u7, u7
adcs u8, u8, u8
adcs u9, u9, u9
adcs u10, u10, u10
cset u11, cs
// Add the homogeneous terms 00 + 11 + 22 + 33 + 44 + 55
umulh l, a0, a0
mul u0, a0, a0
adds u1, u1, l
mul l, a1, a1
adcs u2, u2, l
umulh l, a1, a1
adcs u3, u3, l
mul l, a2, a2
adcs u4, u4, l
umulh l, a2, a2
adcs u5, u5, l
mul l, a3, a3
adcs u6, u6, l
umulh l, a3, a3
adcs u7, u7, l
mul l, a4, a4
adcs u8, u8, l
umulh l, a4, a4
adcs u9, u9, l
mul l, a5, a5
adcs u10, u10, l
umulh l, a5, a5
adc u11, u11, l
// Montgomery rotate the low half
montreds(u0,u5,u4,u3,u2,u1,u0, a1,a2,a3)
montreds(u1,u0,u5,u4,u3,u2,u1, a1,a2,a3)
montreds(u2,u1,u0,u5,u4,u3,u2, a1,a2,a3)
montreds(u3,u2,u1,u0,u5,u4,u3, a1,a2,a3)
montreds(u4,u3,u2,u1,u0,u5,u4, a1,a2,a3)
montreds(u5,u4,u3,u2,u1,u0,u5, a1,a2,a3)
// Add up the high and low parts as [h; u5;u4;u3;u2;u1;u0] = z
adds u0, u0, u6
adcs u1, u1, u7
adcs u2, u2, u8
adcs u3, u3, u9
adcs u4, u4, u10
adcs u5, u5, u11
adc h, xzr, xzr
// Now add [h; u11;u10;u9;u8;u7;u6] = z + (2^384 - p_384)
mov l, #0xffffffff00000001
adds u6, u0, l
mov l, #0x00000000ffffffff
adcs u7, u1, l
mov l, #0x0000000000000001
adcs u8, u2, l
adcs u9, u3, xzr
adcs u10, u4, xzr
adcs u11, u5, xzr
adcs h, h, xzr
// Now z >= p_384 iff h is nonzero, so select accordingly
csel u0, u0, u6, eq
csel u1, u1, u7, eq
csel u2, u2, u8, eq
csel u3, u3, u9, eq
csel u4, u4, u10, eq
csel u5, u5, u11, eq
// Store back final result
stp u0, u1, [z]
stp u2, u3, [z, #16]
stp u4, u5, [z, #32]
// Restore registers
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 5,084
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_bigendian_6.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert 6-digit (384-bit) bignum to/from big-endian form
// Input x[6]; output z[6]
//
// extern void bignum_bigendian_6
// (uint64_t z[static 6], uint64_t x[static 6]);
//
// The same function is given two other prototypes whose names reflect the
// treatment of one or other argument as a byte array rather than word array:
//
// extern void bignum_frombebytes_6
// (uint64_t z[static 6], uint8_t x[static 48]);
//
// extern void bignum_tobebytes_6
// (uint8_t z[static 48], uint64_t x[static 6]);
//
// The implementation works by loading in bytes and storing in words (i.e.
// stylistically it is "frombebytes"); in the more common little-endian
// usage of ARM, this is just byte reversal.
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_bigendian_6)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_bigendian_6)
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_frombebytes_6)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_frombebytes_6)
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tobebytes_6)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tobebytes_6)
.text
.balign 4
#define z x0
#define x x1
#define d x2
#define dshort w2
#define a x3
#define c x4
// The reads and writes are organized in mirror-image pairs (0-5, 1-4, 2-3)
// to allow x and z to point to the same buffer without using more
// intermediate registers.
S2N_BN_SYMBOL(bignum_bigendian_6):
S2N_BN_SYMBOL(bignum_frombebytes_6):
S2N_BN_SYMBOL(bignum_tobebytes_6):
// 0 and 5 words
ldrb dshort, [x, #7]
extr a, d, xzr, #8
ldrb dshort, [x, #6]
extr a, d, a, #8
ldrb dshort, [x, #5]
extr a, d, a, #8
ldrb dshort, [x, #4]
extr a, d, a, #8
ldrb dshort, [x, #3]
extr a, d, a, #8
ldrb dshort, [x, #2]
extr a, d, a, #8
ldrb dshort, [x, #1]
extr a, d, a, #8
ldrb dshort, [x]
extr a, d, a, #8
ldrb dshort, [x, #47]
extr c, d, xzr, #8
ldrb dshort, [x, #46]
extr c, d, c, #8
ldrb dshort, [x, #45]
extr c, d, c, #8
ldrb dshort, [x, #44]
extr c, d, c, #8
ldrb dshort, [x, #43]
extr c, d, c, #8
ldrb dshort, [x, #42]
extr c, d, c, #8
ldrb dshort, [x, #41]
extr c, d, c, #8
ldrb dshort, [x, #40]
extr c, d, c, #8
str a, [z, #40]
str c, [z]
// 1 and 4 words
ldrb dshort, [x, #15]
extr a, d, xzr, #8
ldrb dshort, [x, #14]
extr a, d, a, #8
ldrb dshort, [x, #13]
extr a, d, a, #8
ldrb dshort, [x, #12]
extr a, d, a, #8
ldrb dshort, [x, #11]
extr a, d, a, #8
ldrb dshort, [x, #10]
extr a, d, a, #8
ldrb dshort, [x, #9]
extr a, d, a, #8
ldrb dshort, [x, #8]
extr a, d, a, #8
ldrb dshort, [x, #39]
extr c, d, xzr, #8
ldrb dshort, [x, #38]
extr c, d, c, #8
ldrb dshort, [x, #37]
extr c, d, c, #8
ldrb dshort, [x, #36]
extr c, d, c, #8
ldrb dshort, [x, #35]
extr c, d, c, #8
ldrb dshort, [x, #34]
extr c, d, c, #8
ldrb dshort, [x, #33]
extr c, d, c, #8
ldrb dshort, [x, #32]
extr c, d, c, #8
str a, [z, #32]
str c, [z, #8]
// 2 and 3 words
ldrb dshort, [x, #23]
extr a, d, xzr, #8
ldrb dshort, [x, #22]
extr a, d, a, #8
ldrb dshort, [x, #21]
extr a, d, a, #8
ldrb dshort, [x, #20]
extr a, d, a, #8
ldrb dshort, [x, #19]
extr a, d, a, #8
ldrb dshort, [x, #18]
extr a, d, a, #8
ldrb dshort, [x, #17]
extr a, d, a, #8
ldrb dshort, [x, #16]
extr a, d, a, #8
ldrb dshort, [x, #31]
extr c, d, xzr, #8
ldrb dshort, [x, #30]
extr c, d, c, #8
ldrb dshort, [x, #29]
extr c, d, c, #8
ldrb dshort, [x, #28]
extr c, d, c, #8
ldrb dshort, [x, #27]
extr c, d, c, #8
ldrb dshort, [x, #26]
extr c, d, c, #8
ldrb dshort, [x, #25]
extr c, d, c, #8
ldrb dshort, [x, #24]
extr c, d, c, #8
str a, [z, #24]
str c, [z, #16]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 4,412
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_demont_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert from Montgomery form z := (x / 2^384) mod p_384, assuming x reduced
// Input x[6]; output z[6]
//
// extern void bignum_demont_p384
// (uint64_t z[static 6], uint64_t x[static 6]);
//
// This assumes the input is < p_384 for correctness. If this is not the case,
// use the variant "bignum_deamont_p384" instead.
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_demont_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_demont_p384)
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_demont_p384_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_demont_p384_alt)
.text
.balign 4
// ---------------------------------------------------------------------------
// Core one-step "short" Montgomery reduction macro. Takes input in
// [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1],
// adding to the existing contents of [d5;d4;d3;d2;d1]. It is fine
// for d6 to be the same register as d0.
//
// We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w
// where w = [d0 + (d0<<32)] mod 2^64
// ---------------------------------------------------------------------------
#define montreds(d6,d5,d4,d3,d2,d1,d0, t3,t2,t1) \
/* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \
/* Recycle d0 (which we know gets implicitly cancelled) to store it */ \
lsl t1, d0, #32; \
add d0, t1, d0; \
/* Now let [t2;t1] = 2^64 * w - w + w_hi where w_hi = floor(w/2^32) */ \
/* We need to subtract 2^32 * this, and we can ignore its lower 32 */ \
/* bits since by design it will cancel anyway; we only need the w_hi */ \
/* part to get the carry propagation going. */ \
lsr t1, d0, #32; \
subs t1, t1, d0; \
sbc t2, d0, xzr; \
/* Now select in t1 the field to subtract from d1 */ \
extr t1, t2, t1, #32; \
/* And now get the terms to subtract from d2 and d3 */ \
lsr t2, t2, #32; \
adds t2, t2, d0; \
adc t3, xzr, xzr; \
/* Do the subtraction of that portion */ \
subs d1, d1, t1; \
sbcs d2, d2, t2; \
sbcs d3, d3, t3; \
sbcs d4, d4, xzr; \
sbcs d5, d5, xzr; \
/* Now effectively add 2^384 * w by taking d0 as the input for last sbc */ \
sbc d6, d0, xzr
// Input parameters
#define z x0
#define x x1
// Rotating registers for the intermediate windows
#define d0 x2
#define d1 x3
#define d2 x4
#define d3 x5
#define d4 x6
#define d5 x7
// Other temporaries
#define u x8
#define v x9
#define w x10
S2N_BN_SYMBOL(bignum_demont_p384):
S2N_BN_SYMBOL(bignum_demont_p384_alt):
// Set up an initial window with the input x and an extra leading zero
ldp d0, d1, [x]
ldp d2, d3, [x, #16]
ldp d4, d5, [x, #32]
// Systematically scroll left doing 1-step reductions
montreds(d0,d5,d4,d3,d2,d1,d0, u,v,w)
montreds(d1,d0,d5,d4,d3,d2,d1, u,v,w)
montreds(d2,d1,d0,d5,d4,d3,d2, u,v,w)
montreds(d3,d2,d1,d0,d5,d4,d3, u,v,w)
montreds(d4,d3,d2,d1,d0,d5,d4, u,v,w)
montreds(d5,d4,d3,d2,d1,d0,d5, u,v,w)
// This is already our answer with no correction needed
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 1,915
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_neg_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Negate modulo p_384, z := (-x) mod p_384, assuming x reduced
// Input x[6]; output z[6]
//
// extern void bignum_neg_p384 (uint64_t z[static 6], uint64_t x[static 6]);
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_neg_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_neg_p384)
.text
.balign 4
#define z x0
#define x x1
#define p x2
#define t x3
#define d0 x4
#define d1 x5
#define d2 x6
#define d3 x7
#define d4 x8
#define d5 x9
S2N_BN_SYMBOL(bignum_neg_p384):
// Load the 6 digits of x
ldp d0, d1, [x]
ldp d2, d3, [x, #16]
ldp d4, d5, [x, #32]
// Set a bitmask p for the input being nonzero, so that we avoid doing
// -0 = p_384 and hence maintain strict modular reduction
orr p, d0, d1
orr t, d2, d3
orr p, p, t
orr t, d4, d5
orr p, p, t
cmp p, #0
csetm p, ne
// Mask the complicated lower three words of p_384 = [-1;-1;-1;n2;n1;n0]
// and subtract, using mask itself for upper digits
and t, p, #0x00000000ffffffff
subs d0, t, d0
and t, p, #0xffffffff00000000
sbcs d1, t, d1
and t, p, #0xfffffffffffffffe
sbcs d2, t, d2
sbcs d3, p, d3
sbcs d4, p, d4
sbc d5, p, d5
// Write back the result
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
// Return
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 3,805
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_cmul_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply by a single word modulo p_384, z := (c * x) mod p_384, assuming
// x reduced
// Inputs c, x[6]; output z[6]
//
// extern void bignum_cmul_p384
// (uint64_t z[static 6], uint64_t c, uint64_t x[static 6]);
//
// Standard ARM ABI: X0 = z, X1 = c, X2 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p384)
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p384_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p384_alt)
.text
.balign 4
#define z x0
#define c x1
#define x x2
#define d0 x2
#define d1 x3
#define d2 x4
#define d3 x5
#define d4 x6
#define d5 x7
#define a0 x8
#define a1 x9
#define a2 x10
#define a3 x11
#define a4 x12
#define a5 x13
// Some shared here
#define h x1
#define h1 x12
#define hn x13
#define m x8
#define l x9
S2N_BN_SYMBOL(bignum_cmul_p384):
S2N_BN_SYMBOL(bignum_cmul_p384_alt):
// First do the multiply, straightforwardly, getting [h; d5; ...; d0]
ldp a0, a1, [x]
ldp a2, a3, [x, #16]
ldp a4, a5, [x, #32]
mul d0, c, a0
mul d1, c, a1
mul d2, c, a2
mul d3, c, a3
mul d4, c, a4
mul d5, c, a5
umulh a0, c, a0
umulh a1, c, a1
umulh a2, c, a2
umulh a3, c, a3
umulh a4, c, a4
umulh h, c, a5
adds d1, d1, a0
adcs d2, d2, a1
adcs d3, d3, a2
adcs d4, d4, a3
adcs d5, d5, a4
adc h, h, xzr
// Let h be the top word of this intermediate product and l the low 6 words.
// By the range hypothesis on the input, we know h1 = h + 1 does not wrap
// And then -p_384 <= z - h1 * p_384 < p_384, so we just need to subtract
// h1 * p_384 and then correct if that is negative by adding p_384.
//
// Write p_384 = 2^384 - r where r = 2^128 + 2^96 - 2^32 + 1
//
// We want z - (h + 1) * (2^384 - r)
// = (2^384 * h + l) - (h + 1) * (2^384 - r)
// = (l + (h + 1) * r) - 2^384.
//
// Thus we can do the computation in 6 words of l + (h + 1) * r, and if it
// does *not* carry we need to add p_384. We can rewrite this as the following,
// using ~h = 2^64 - (h + 1) and absorbing the 2^64 in the higher term
// using h instead of h + 1.
//
// l + (h + 1) * r
// = l + 2^128 * (h + 1) + 2^96 * (h + 1) - 2^32 * (h + 1) + (h + 1)
// = l + 2^128 * (h + 1) + 2^96 * h + 2^32 * ~h + (h + 1)
add h1, h, #1
orn hn, xzr, h
lsl a0, hn, #32
extr a1, h, hn, #32
lsr a2, h, #32
adds a0, a0, h1
adcs a1, a1, xzr
adcs a2, a2, h1
adc a3, xzr, xzr
adds d0, d0, a0
adcs d1, d1, a1
adcs d2, d2, a2
adcs d3, d3, a3
adcs d4, d4, xzr
adcs d5, d5, xzr
// Catch the carry and do a masked addition of p_384
csetm m, cc
mov l, #0x00000000ffffffff
and l, l, m
adds d0, d0, l
eor l, l, m
adcs d1, d1, l
mov l, #0xfffffffffffffffe
and l, l, m
adcs d2, d2, l
adcs d3, d3, m
adcs d4, d4, m
adc d5, d5, m
// Store the result
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 12,551
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_montmul_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery multiply, z := (x * y / 2^384) mod p_384
// Inputs x[6], y[6]; output z[6]
//
// extern void bignum_montmul_p384
// (uint64_t z[static 6], uint64_t x[static 6], uint64_t y[static 6]);
//
// Does z := (2^{-384} * x * y) mod p_384, assuming that the inputs x and y
// satisfy x * y <= 2^384 * p_384 (in particular this is true if we are in
// the "usual" case x < p_384 and y < p_384).
//
// Standard ARM ABI: X0 = z, X1 = x, X2 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p384)
.text
.balign 4
// ---------------------------------------------------------------------------
// Macro returning (c,h,l) = 3-word 1s complement (x - y) * (w - z)
// c,h,l,t should all be different
// t,h should not overlap w,z
// ---------------------------------------------------------------------------
#define muldiffn(c,h,l, t, x,y, w,z) \
subs t, x, y; \
cneg t, t, cc; \
csetm c, cc; \
subs h, w, z; \
cneg h, h, cc; \
mul l, t, h; \
umulh h, t, h; \
cinv c, c, cc; \
eor l, l, c; \
eor h, h, c
// ---------------------------------------------------------------------------
// Core one-step "short" Montgomery reduction macro. Takes input in
// [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1],
// adding to the existing contents of [d5;d4;d3;d2;d1]. It is fine
// for d6 to be the same register as d0.
//
// We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w
// where w = [d0 + (d0<<32)] mod 2^64
// ---------------------------------------------------------------------------
#define montreds(d6,d5,d4,d3,d2,d1,d0, t3,t2,t1) \
/* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \
/* Recycle d0 (which we know gets implicitly cancelled) to store it */ \
lsl t1, d0, #32; \
add d0, t1, d0; \
/* Now let [t2;t1] = 2^64 * w - w + w_hi where w_hi = floor(w/2^32) */ \
/* We need to subtract 2^32 * this, and we can ignore its lower 32 */ \
/* bits since by design it will cancel anyway; we only need the w_hi */ \
/* part to get the carry propagation going. */ \
lsr t1, d0, #32; \
subs t1, t1, d0; \
sbc t2, d0, xzr; \
/* Now select in t1 the field to subtract from d1 */ \
extr t1, t2, t1, #32; \
/* And now get the terms to subtract from d2 and d3 */ \
lsr t2, t2, #32; \
adds t2, t2, d0; \
adc t3, xzr, xzr; \
/* Do the subtraction of that portion */ \
subs d1, d1, t1; \
sbcs d2, d2, t2; \
sbcs d3, d3, t3; \
sbcs d4, d4, xzr; \
sbcs d5, d5, xzr; \
/* Now effectively add 2^384 * w by taking d0 as the input for last sbc */ \
sbc d6, d0, xzr
#define a0 x3
#define a1 x4
#define a2 x5
#define a3 x6
#define a4 x7
#define a5 x8
#define b0 x9
#define b1 x10
#define b2 x11
#define b3 x12
#define b4 x13
#define b5 x14
#define s0 x15
#define s1 x16
#define s2 x17
#define s3 x19
#define s4 x20
#define s5 x1
#define s6 x2
#define t1 x21
#define t2 x22
#define t3 x23
#define t4 x24
S2N_BN_SYMBOL(bignum_montmul_p384):
// Save some registers
stp x19, x20, [sp, -16]!
stp x21, x22, [sp, -16]!
stp x23, x24, [sp, -16]!
// Load in all words of both inputs
ldp a0, a1, [x1]
ldp a2, a3, [x1, #16]
ldp a4, a5, [x1, #32]
ldp b0, b1, [x2]
ldp b2, b3, [x2, #16]
ldp b4, b5, [x2, #32]
// Multiply low halves with a 3x3->6 ADK multiplier as [s5;s4;s3;s2;s1;s0]
mul s0, a0, b0
mul t1, a1, b1
mul t2, a2, b2
umulh t3, a0, b0
umulh t4, a1, b1
umulh s5, a2, b2
adds t3, t3, t1
adcs t4, t4, t2
adc s5, s5, xzr
adds s1, t3, s0
adcs s2, t4, t3
adcs s3, s5, t4
adc s4, s5, xzr
adds s2, s2, s0
adcs s3, s3, t3
adcs s4, s4, t4
adc s5, s5, xzr
muldiffn(t3,t2,t1, t4, a0,a1, b1,b0)
adds xzr, t3, #1
adcs s1, s1, t1
adcs s2, s2, t2
adcs s3, s3, t3
adcs s4, s4, t3
adc s5, s5, t3
muldiffn(t3,t2,t1, t4, a0,a2, b2,b0)
adds xzr, t3, #1
adcs s2, s2, t1
adcs s3, s3, t2
adcs s4, s4, t3
adc s5, s5, t3
muldiffn(t3,t2,t1, t4, a1,a2, b2,b1)
adds xzr, t3, #1
adcs s3, s3, t1
adcs s4, s4, t2
adc s5, s5, t3
// Perform three "short" Montgomery steps on the low product
// This shifts it to an offset compatible with middle terms
// Stash the result temporarily in the output buffer
// We could keep this in registers by directly adding to it in the next
// ADK block, but if anything that seems to be slightly slower
montreds(s0,s5,s4,s3,s2,s1,s0, t1,t2,t3)
montreds(s1,s0,s5,s4,s3,s2,s1, t1,t2,t3)
montreds(s2,s1,s0,s5,s4,s3,s2, t1,t2,t3)
stp s3, s4, [x0]
stp s5, s0, [x0, #16]
stp s1, s2, [x0, #32]
// Multiply high halves with a 3x3->6 ADK multiplier as [s5;s4;s3;s2;s1;s0]
mul s0, a3, b3
mul t1, a4, b4
mul t2, a5, b5
umulh t3, a3, b3
umulh t4, a4, b4
umulh s5, a5, b5
adds t3, t3, t1
adcs t4, t4, t2
adc s5, s5, xzr
adds s1, t3, s0
adcs s2, t4, t3
adcs s3, s5, t4
adc s4, s5, xzr
adds s2, s2, s0
adcs s3, s3, t3
adcs s4, s4, t4
adc s5, s5, xzr
muldiffn(t3,t2,t1, t4, a3,a4, b4,b3)
adds xzr, t3, #1
adcs s1, s1, t1
adcs s2, s2, t2
adcs s3, s3, t3
adcs s4, s4, t3
adc s5, s5, t3
muldiffn(t3,t2,t1, t4, a3,a5, b5,b3)
adds xzr, t3, #1
adcs s2, s2, t1
adcs s3, s3, t2
adcs s4, s4, t3
adc s5, s5, t3
muldiffn(t3,t2,t1, t4, a4,a5, b5,b4)
adds xzr, t3, #1
adcs s3, s3, t1
adcs s4, s4, t2
adc s5, s5, t3
// Compute sign-magnitude a0,[a5,a4,a3] = x_hi - x_lo
subs a3, a3, a0
sbcs a4, a4, a1
sbcs a5, a5, a2
sbc a0, xzr, xzr
adds xzr, a0, #1
eor a3, a3, a0
adcs a3, a3, xzr
eor a4, a4, a0
adcs a4, a4, xzr
eor a5, a5, a0
adc a5, a5, xzr
// Compute sign-magnitude b5,[b2,b1,b0] = y_lo - y_hi
subs b0, b0, b3
sbcs b1, b1, b4
sbcs b2, b2, b5
sbc b5, xzr, xzr
adds xzr, b5, #1
eor b0, b0, b5
adcs b0, b0, xzr
eor b1, b1, b5
adcs b1, b1, xzr
eor b2, b2, b5
adc b2, b2, xzr
// Save the correct sign for the sub-product in b5
eor b5, a0, b5
// Add the high H to the modified low term L' and re-stash 6 words,
// keeping top word in s6
ldp t1, t2, [x0]
adds s0, s0, t1
adcs s1, s1, t2
ldp t1, t2, [x0, #16]
adcs s2, s2, t1
adcs s3, s3, t2
ldp t1, t2, [x0, #32]
adcs s4, s4, t1
adcs s5, s5, t2
adc s6, xzr, xzr
stp s0, s1, [x0]
stp s2, s3, [x0, #16]
stp s4, s5, [x0, #32]
// Multiply with yet a third 3x3 ADK for the complex mid-term
mul s0, a3, b0
mul t1, a4, b1
mul t2, a5, b2
umulh t3, a3, b0
umulh t4, a4, b1
umulh s5, a5, b2
adds t3, t3, t1
adcs t4, t4, t2
adc s5, s5, xzr
adds s1, t3, s0
adcs s2, t4, t3
adcs s3, s5, t4
adc s4, s5, xzr
adds s2, s2, s0
adcs s3, s3, t3
adcs s4, s4, t4
adc s5, s5, xzr
muldiffn(t3,t2,t1, t4, a3,a4, b1,b0)
adds xzr, t3, #1
adcs s1, s1, t1
adcs s2, s2, t2
adcs s3, s3, t3
adcs s4, s4, t3
adc s5, s5, t3
muldiffn(t3,t2,t1, t4, a3,a5, b2,b0)
adds xzr, t3, #1
adcs s2, s2, t1
adcs s3, s3, t2
adcs s4, s4, t3
adc s5, s5, t3
muldiffn(t3,t2,t1, t4, a4,a5, b2,b1)
adds xzr, t3, #1
adcs s3, s3, t1
adcs s4, s4, t2
adc s5, s5, t3
// Unstash the H + L' sum to add in twice
ldp a0, a1, [x0]
ldp a2, a3, [x0, #16]
ldp a4, a5, [x0, #32]
// Set up a sign-modified version of the mid-product in a long accumulator
// as [b3;b2;b1;b0;s5;s4;s3;s2;s1;s0], adding in the H + L' term once with
// zero offset as this signed value is created
adds xzr, b5, #1
eor s0, s0, b5
adcs s0, s0, a0
eor s1, s1, b5
adcs s1, s1, a1
eor s2, s2, b5
adcs s2, s2, a2
eor s3, s3, b5
adcs s3, s3, a3
eor s4, s4, b5
adcs s4, s4, a4
eor s5, s5, b5
adcs s5, s5, a5
adcs b0, b5, s6
adcs b1, b5, xzr
adcs b2, b5, xzr
adc b3, b5, xzr
// Add in the stashed H + L' term an offset of 3 words as well
adds s3, s3, a0
adcs s4, s4, a1
adcs s5, s5, a2
adcs b0, b0, a3
adcs b1, b1, a4
adcs b2, b2, a5
adc b3, b3, s6
// Do three more Montgomery steps on the composed term
montreds(s0,s5,s4,s3,s2,s1,s0, t1,t2,t3)
montreds(s1,s0,s5,s4,s3,s2,s1, t1,t2,t3)
montreds(s2,s1,s0,s5,s4,s3,s2, t1,t2,t3)
adds b0, b0, s0
adcs b1, b1, s1
adcs b2, b2, s2
adc b3, b3, xzr
// Because of the way we added L' in two places, we can overspill by
// more than usual in Montgomery, with the result being only known to
// be < 3 * p_384, not the usual < 2 * p_384. So now we do a more
// elaborate final correction in the style of bignum_cmul_p384, just
// a little bit simpler because we know q is small.
add t2, b3, #1
lsl t1, t2, #32
subs t4, t2, t1
sbc t1, t1, xzr
adds s3, s3, t4
adcs s4, s4, t1
adcs s5, s5, t2
adcs b0, b0, xzr
adcs b1, b1, xzr
adcs b2, b2, xzr
csetm t2, cc
mov t3, #0x00000000ffffffff
and t3, t3, t2
adds s3, s3, t3
eor t3, t3, t2
adcs s4, s4, t3
mov t3, #0xfffffffffffffffe
and t3, t3, t2
adcs s5, s5, t3
adcs b0, b0, t2
adcs b1, b1, t2
adc b2, b2, t2
// Write back the result
stp s3, s4, [x0]
stp s5, b0, [x0, #16]
stp b1, b2, [x0, #32]
// Restore registers and return
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 69,878
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_inv_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Modular inverse modulo p_384 = 2^384 - 2^128 - 2^96 + 2^32 - 1
// Input x[6]; output z[6]
//
// extern void bignum_inv_p384(uint64_t z[static 6],uint64_t x[static 6]);
//
// If the 6-digit input x is coprime to p_384, i.e. is not divisible
// by it, returns z < p_384 such that x * z == 1 (mod p_384). Note that
// x does not need to be reduced modulo p_384, but the output always is.
// If the input is divisible (i.e. is 0 or p_384), then there can be no
// modular inverse and z = 0 is returned.
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_inv_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_inv_p384)
.text
.balign 4
// Size in bytes of a 64-bit word
#define N 8
// Used for the return pointer
#define res x20
// Loop counter and d = 2 * delta value for divstep
#define i x21
#define d x22
// Registers used for matrix element magnitudes and signs
#define m00 x10
#define m01 x11
#define m10 x12
#define m11 x13
#define s00 x14
#define s01 x15
#define s10 x16
#define s11 x17
// Initial carries for combinations
#define car0 x9
#define car1 x19
// Input and output, plain registers treated according to pattern
#define reg0 x0, #0
#define reg1 x1, #0
#define reg2 x2, #0
#define reg3 x3, #0
#define reg4 x4, #0
#define x x1, #0
#define z x0, #0
// Pointer-offset pairs for temporaries on stack
// The u and v variables are 6 words each as expected, but the f and g
// variables are 8 words each -- they need to have at least one extra
// word for a sign word, and to preserve alignment we "round up" to 8.
// In fact, we currently keep an extra word in u and v as well.
#define f sp, #0
#define g sp, #(8*N)
#define u sp, #(16*N)
#define v sp, #(24*N)
// Total size to reserve on the stack
#define NSPACE #(32*N)
// ---------------------------------------------------------------------------
// Core signed almost-Montgomery reduction macro. Takes input in
// [d6;d5;d4;d3;d2;d1;d0] and returns result in [d6;d5d4;d3;d2;d1], adding
// to the existing [d6;d5;d4;d3;d2;d1], and re-using d0 as a temporary
// internally as well as t0, t1, t2. This is almost-Montgomery, i.e. the
// result fits in 6 digits but is not necessarily strictly reduced mod p_384.
// ---------------------------------------------------------------------------
#define amontred(d6,d5,d4,d3,d2,d1,d0, t3,t2,t1) \
/* We only know the input is -2^444 < x < 2^444. To do traditional */ \
/* unsigned Montgomery reduction, start by adding 2^61 * p_384. */ \
mov t1, #0xe000000000000000; \
adds d0, d0, t1; \
mov t2, #0x000000001fffffff; \
adcs d1, d1, t2; \
mov t3, #0xffffffffe0000000; \
bic t3, t3, #0x2000000000000000; \
adcs d2, d2, t3; \
sbcs d3, d3, xzr; \
sbcs d4, d4, xzr; \
sbcs d5, d5, xzr; \
mov t1, #0x1fffffffffffffff; \
adc d6, d6, t1; \
/* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \
/* Store it back into d0 since we no longer need that digit. */ \
add d0, d0, d0, lsl #32; \
/* Now let [t3;t2;t1;-] = (2^384 - p_384) * w */ \
/* We know the lowest word will cancel d0 so we don't need it */ \
mov t1, #0xffffffff00000001; \
umulh t1, t1, d0; \
mov t2, #0x00000000ffffffff; \
mul t3, t2, d0; \
umulh t2, t2, d0; \
adds t1, t1, t3; \
adcs t2, t2, d0; \
cset t3, cs; \
/* Now x + p_384 * w = (x + 2^384 * w) - (2^384 - p_384) * w */ \
/* We catch the net top carry from add-subtract in the digit d0 */ \
adds d6, d6, d0; \
cset d0, cs; \
subs d1, d1, t1; \
sbcs d2, d2, t2; \
sbcs d3, d3, t3; \
sbcs d4, d4, xzr; \
sbcs d5, d5, xzr; \
sbcs d6, d6, xzr; \
sbcs d0, d0, xzr; \
/* Now if d0 is nonzero we subtract p_384 (almost-Montgomery) */ \
neg d0, d0; \
and t1, d0, #0x00000000ffffffff; \
and t2, d0, #0xffffffff00000000; \
and t3, d0, #0xfffffffffffffffe; \
subs d1, d1, t1; \
sbcs d2, d2, t2; \
sbcs d3, d3, t3; \
sbcs d4, d4, d0; \
sbcs d5, d5, d0; \
sbc d6, d6, d0
// Very similar to a subroutine call to the s2n-bignum word_divstep59.
// But different in register usage and returning the final matrix in
// registers as follows
//
// [ m00 m01]
// [ m10 m11]
#define divstep59() \
and x4, x2, #0xfffff; \
orr x4, x4, #0xfffffe0000000000; \
and x5, x3, #0xfffff; \
orr x5, x5, #0xc000000000000000; \
tst x5, #0x1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
asr x5, x5, #1; \
add x8, x4, #0x100, lsl #12; \
sbfx x8, x8, #21, #21; \
mov x11, #0x100000; \
add x11, x11, x11, lsl #21; \
add x9, x4, x11; \
asr x9, x9, #42; \
add x10, x5, #0x100, lsl #12; \
sbfx x10, x10, #21, #21; \
add x11, x5, x11; \
asr x11, x11, #42; \
mul x6, x8, x2; \
mul x7, x9, x3; \
mul x2, x10, x2; \
mul x3, x11, x3; \
add x4, x6, x7; \
add x5, x2, x3; \
asr x2, x4, #20; \
asr x3, x5, #20; \
and x4, x2, #0xfffff; \
orr x4, x4, #0xfffffe0000000000; \
and x5, x3, #0xfffff; \
orr x5, x5, #0xc000000000000000; \
tst x5, #0x1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
asr x5, x5, #1; \
add x12, x4, #0x100, lsl #12; \
sbfx x12, x12, #21, #21; \
mov x15, #0x100000; \
add x15, x15, x15, lsl #21; \
add x13, x4, x15; \
asr x13, x13, #42; \
add x14, x5, #0x100, lsl #12; \
sbfx x14, x14, #21, #21; \
add x15, x5, x15; \
asr x15, x15, #42; \
mul x6, x12, x2; \
mul x7, x13, x3; \
mul x2, x14, x2; \
mul x3, x15, x3; \
add x4, x6, x7; \
add x5, x2, x3; \
asr x2, x4, #20; \
asr x3, x5, #20; \
and x4, x2, #0xfffff; \
orr x4, x4, #0xfffffe0000000000; \
and x5, x3, #0xfffff; \
orr x5, x5, #0xc000000000000000; \
tst x5, #0x1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
mul x2, x12, x8; \
mul x3, x12, x9; \
mul x6, x14, x8; \
mul x7, x14, x9; \
madd x8, x13, x10, x2; \
madd x9, x13, x11, x3; \
madd x16, x15, x10, x6; \
madd x17, x15, x11, x7; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
asr x5, x5, #1; \
add x12, x4, #0x100, lsl #12; \
sbfx x12, x12, #22, #21; \
mov x15, #0x100000; \
add x15, x15, x15, lsl #21; \
add x13, x4, x15; \
asr x13, x13, #43; \
add x14, x5, #0x100, lsl #12; \
sbfx x14, x14, #22, #21; \
add x15, x5, x15; \
asr x15, x15, #43; \
mneg x2, x12, x8; \
mneg x3, x12, x9; \
mneg x4, x14, x8; \
mneg x5, x14, x9; \
msub m00, x13, x16, x2; \
msub m01, x13, x17, x3; \
msub m10, x15, x16, x4; \
msub m11, x15, x17, x5
S2N_BN_SYMBOL(bignum_inv_p384):
// Save registers and make room for temporaries
stp x19, x20, [sp, -16]!
stp x21, x22, [sp, -16]!
stp x23, x24, [sp, -16]!
sub sp, sp, NSPACE
// Save the return pointer for the end so we can overwrite x0 later
mov res, x0
// Copy the prime and input into the main f and g variables respectively.
// Make sure x is reduced so that g <= f as assumed in the bound proof.
mov x10, #0x00000000ffffffff
mov x11, #0xffffffff00000000
mov x12, #0xfffffffffffffffe
mov x15, #0xffffffffffffffff
stp x10, x11, [f]
stp x12, x15, [f+2*N]
stp x15, x15, [f+4*N]
str xzr, [f+6*N]
ldp x2, x3, [x1]
subs x10, x2, x10
sbcs x11, x3, x11
ldp x4, x5, [x1, #(2*N)]
sbcs x12, x4, x12
sbcs x13, x5, x15
ldp x6, x7, [x1, #(4*N)]
sbcs x14, x6, x15
sbcs x15, x7, x15
csel x2, x2, x10, cc
csel x3, x3, x11, cc
csel x4, x4, x12, cc
csel x5, x5, x13, cc
csel x6, x6, x14, cc
csel x7, x7, x15, cc
stp x2, x3, [g]
stp x4, x5, [g+2*N]
stp x6, x7, [g+4*N]
str xzr, [g+6*N]
// Also maintain reduced < 2^384 vector [u,v] such that
// [f,g] == x * 2^{5*i-75} * [u,v] (mod p_384)
// starting with [p_384,x] == x * 2^{5*0-75} * [0,2^75] (mod p_384)
// The weird-looking 5*i modifications come in because we are doing
// 64-bit word-sized Montgomery reductions at each stage, which is
// 5 bits more than the 59-bit requirement to keep things stable.
stp xzr, xzr, [u]
stp xzr, xzr, [u+2*N]
stp xzr, xzr, [u+4*N]
mov x10, #2048
stp xzr, x10, [v]
stp xzr, xzr, [v+2*N]
stp xzr, xzr, [v+4*N]
// Start of main loop. We jump into the middle so that the divstep
// portion is common to the special fifteenth iteration after a uniform
// first 14.
mov i, #15
mov d, #1
b midloop
loop:
// Separate the matrix elements into sign-magnitude pairs
cmp m00, xzr
csetm s00, mi
cneg m00, m00, mi
cmp m01, xzr
csetm s01, mi
cneg m01, m01, mi
cmp m10, xzr
csetm s10, mi
cneg m10, m10, mi
cmp m11, xzr
csetm s11, mi
cneg m11, m11, mi
// Adjust the initial values to allow for complement instead of negation
// This initial offset is the same for [f,g] and [u,v] compositions.
// Save it in stable registers for the [u,v] part and do [f,g] first.
and x0, m00, s00
and x1, m01, s01
add car0, x0, x1
and x0, m10, s10
and x1, m11, s11
add car1, x0, x1
// Now the computation of the updated f and g values. This maintains a
// 2-word carry between stages so we can conveniently insert the shift
// right by 59 before storing back, and not overwrite digits we need
// again of the old f and g values.
//
// Digit 0 of [f,g]
ldr x7, [f]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x4, car0, x0
adc x2, xzr, x1
ldr x8, [g]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x4, x4, x0
adc x2, x2, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x5, car1, x0
adc x3, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x5, x5, x0
adc x3, x3, x1
// Digit 1 of [f,g]
ldr x7, [f+N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x2, x2, x0
adc x6, xzr, x1
ldr x8, [g+N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x2, x2, x0
adc x6, x6, x1
extr x4, x2, x4, #59
str x4, [f]
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x3, x3, x0
adc x4, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x3, x3, x0
adc x4, x4, x1
extr x5, x3, x5, #59
str x5, [g]
// Digit 2 of [f,g]
ldr x7, [f+2*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x6, x6, x0
adc x5, xzr, x1
ldr x8, [g+2*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x6, x6, x0
adc x5, x5, x1
extr x2, x6, x2, #59
str x2, [f+N]
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x4, x4, x0
adc x2, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x4, x4, x0
adc x2, x2, x1
extr x3, x4, x3, #59
str x3, [g+N]
// Digit 3 of [f,g]
ldr x7, [f+3*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x5, x5, x0
adc x3, xzr, x1
ldr x8, [g+3*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x5, x5, x0
adc x3, x3, x1
extr x6, x5, x6, #59
str x6, [f+2*N]
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x2, x2, x0
adc x6, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x2, x2, x0
adc x6, x6, x1
extr x4, x2, x4, #59
str x4, [g+2*N]
// Digit 4 of [f,g]
ldr x7, [f+4*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x3, x3, x0
adc x4, xzr, x1
ldr x8, [g+4*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x3, x3, x0
adc x4, x4, x1
extr x5, x3, x5, #59
str x5, [f+3*N]
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x6, x6, x0
adc x5, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x6, x6, x0
adc x5, x5, x1
extr x2, x6, x2, #59
str x2, [g+3*N]
// Digits 5 and 6 of [f,g]
ldr x7, [f+5*N]
eor x1, x7, s00
ldr x23, [f+6*N]
eor x2, x23, s00
and x2, x2, m00
neg x2, x2
mul x0, x1, m00
umulh x1, x1, m00
adds x4, x4, x0
adc x2, x2, x1
ldr x8, [g+5*N]
eor x1, x8, s01
ldr x24, [g+6*N]
eor x0, x24, s01
and x0, x0, m01
sub x2, x2, x0
mul x0, x1, m01
umulh x1, x1, m01
adds x4, x4, x0
adc x2, x2, x1
extr x3, x4, x3, #59
str x3, [f+4*N]
extr x4, x2, x4, #59
str x4, [f+5*N]
asr x2, x2, #59
str x2, [f+6*N]
eor x1, x7, s10
eor x4, x23, s10
and x4, x4, m10
neg x4, x4
mul x0, x1, m10
umulh x1, x1, m10
adds x5, x5, x0
adc x4, x4, x1
eor x1, x8, s11
eor x0, x24, s11
and x0, x0, m11
sub x4, x4, x0
mul x0, x1, m11
umulh x1, x1, m11
adds x5, x5, x0
adc x4, x4, x1
extr x6, x5, x6, #59
str x6, [g+4*N]
extr x5, x4, x5, #59
str x5, [g+5*N]
asr x4, x4, #59
str x4, [g+6*N]
// Now the computation of the updated u and v values and their
// Montgomery reductions. A very similar accumulation except that
// the top words of u and v are unsigned and we don't shift.
//
// Digit 0 of [u,v]
ldr x7, [u]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x4, car0, x0
adc x2, xzr, x1
ldr x8, [v]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x4, x4, x0
str x4, [u]
adc x2, x2, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x5, car1, x0
adc x3, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x5, x5, x0
str x5, [v]
adc x3, x3, x1
// Digit 1 of [u,v]
ldr x7, [u+N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x2, x2, x0
adc x6, xzr, x1
ldr x8, [v+N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x2, x2, x0
str x2, [u+N]
adc x6, x6, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x3, x3, x0
adc x4, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x3, x3, x0
str x3, [v+N]
adc x4, x4, x1
// Digit 2 of [u,v]
ldr x7, [u+2*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x6, x6, x0
adc x5, xzr, x1
ldr x8, [v+2*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x6, x6, x0
str x6, [u+2*N]
adc x5, x5, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x4, x4, x0
adc x2, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x4, x4, x0
str x4, [v+2*N]
adc x2, x2, x1
// Digit 3 of [u,v]
ldr x7, [u+3*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x5, x5, x0
adc x3, xzr, x1
ldr x8, [v+3*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x5, x5, x0
str x5, [u+3*N]
adc x3, x3, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x2, x2, x0
adc x6, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x2, x2, x0
str x2, [v+3*N]
adc x6, x6, x1
// Digit 4 of [u,v]
ldr x7, [u+4*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x3, x3, x0
adc x4, xzr, x1
ldr x8, [v+4*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x3, x3, x0
str x3, [u+4*N]
adc x4, x4, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x6, x6, x0
adc x5, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x6, x6, x0
str x6, [v+4*N]
adc x5, x5, x1
// Digits 5 and 6 of [u,v] (top is unsigned)
ldr x7, [u+5*N]
eor x1, x7, s00
and x2, s00, m00
neg x2, x2
mul x0, x1, m00
umulh x1, x1, m00
adds x4, x4, x0
adc x2, x2, x1
ldr x8, [v+5*N]
eor x1, x8, s01
and x0, s01, m01
sub x2, x2, x0
mul x0, x1, m01
umulh x1, x1, m01
adds x4, x4, x0
str x4, [u+5*N]
adc x2, x2, x1
str x2, [u+6*N]
eor x1, x7, s10
and x4, s10, m10
neg x4, x4
mul x0, x1, m10
umulh x1, x1, m10
adds x5, x5, x0
adc x4, x4, x1
eor x1, x8, s11
and x0, s11, m11
sub x4, x4, x0
mul x0, x1, m11
umulh x1, x1, m11
adds x5, x5, x0
str x5, [v+5*N]
adc x4, x4, x1
str x4, [v+6*N]
// Montgomery reduction of u
ldp x0, x1, [u]
ldp x2, x3, [u+16]
ldp x4, x5, [u+32]
ldr x6, [u+48]
amontred(x6,x5,x4,x3,x2,x1,x0, x9,x8,x7)
stp x1, x2, [u]
stp x3, x4, [u+16]
stp x5, x6, [u+32]
// Montgomery reduction of v
ldp x0, x1, [v]
ldp x2, x3, [v+16]
ldp x4, x5, [v+32]
ldr x6, [v+48]
amontred(x6,x5,x4,x3,x2,x1,x0, x9,x8,x7)
stp x1, x2, [v]
stp x3, x4, [v+16]
stp x5, x6, [v+32]
midloop:
mov x1, d
ldr x2, [f]
ldr x3, [g]
divstep59()
mov d, x1
// Next iteration
subs i, i, #1
bne loop
// The 15th and last iteration does not need anything except the
// u value and the sign of f; the latter can be obtained from the
// lowest word of f. So it's done differently from the main loop.
// Find the sign of the new f. For this we just need one digit
// since we know (for in-scope cases) that f is either +1 or -1.
// We don't explicitly shift right by 59 either, but looking at
// bit 63 (or any bit >= 60) of the unshifted result is enough
// to distinguish -1 from +1; this is then made into a mask.
ldr x0, [f]
ldr x1, [g]
mul x0, x0, m00
madd x1, x1, m01, x0
asr x0, x1, #63
// Now separate out the matrix into sign-magnitude pairs
// and adjust each one based on the sign of f.
//
// Note that at this point we expect |f|=1 and we got its
// sign above, so then since [f,0] == x * [u,v] (mod p_384)
// we want to flip the sign of u according to that of f.
cmp m00, xzr
csetm s00, mi
cneg m00, m00, mi
eor s00, s00, x0
cmp m01, xzr
csetm s01, mi
cneg m01, m01, mi
eor s01, s01, x0
cmp m10, xzr
csetm s10, mi
cneg m10, m10, mi
eor s10, s10, x0
cmp m11, xzr
csetm s11, mi
cneg m11, m11, mi
eor s11, s11, x0
// Adjust the initial value to allow for complement instead of negation
and x0, m00, s00
and x1, m01, s01
add car0, x0, x1
// Digit 0 of [u]
ldr x7, [u]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x4, car0, x0
adc x2, xzr, x1
ldr x8, [v]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x4, x4, x0
str x4, [u]
adc x2, x2, x1
// Digit 1 of [u]
ldr x7, [u+N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x2, x2, x0
adc x6, xzr, x1
ldr x8, [v+N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x2, x2, x0
str x2, [u+N]
adc x6, x6, x1
// Digit 2 of [u]
ldr x7, [u+2*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x6, x6, x0
adc x5, xzr, x1
ldr x8, [v+2*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x6, x6, x0
str x6, [u+2*N]
adc x5, x5, x1
// Digit 3 of [u]
ldr x7, [u+3*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x5, x5, x0
adc x3, xzr, x1
ldr x8, [v+3*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x5, x5, x0
str x5, [u+3*N]
adc x3, x3, x1
// Digit 4 of [u]
ldr x7, [u+4*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x3, x3, x0
adc x4, xzr, x1
ldr x8, [v+4*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x3, x3, x0
str x3, [u+4*N]
adc x4, x4, x1
// Digits 5 and 6 of [u] (top is unsigned)
ldr x7, [u+5*N]
eor x1, x7, s00
and x2, s00, m00
neg x2, x2
mul x0, x1, m00
umulh x1, x1, m00
adds x4, x4, x0
adc x2, x2, x1
ldr x8, [v+5*N]
eor x1, x8, s01
and x0, s01, m01
sub x2, x2, x0
mul x0, x1, m01
umulh x1, x1, m01
adds x4, x4, x0
str x4, [u+5*N]
adc x2, x2, x1
str x2, [u+6*N]
// Montgomery reduction of u. This needs to be strict not "almost"
// so it is followed by an optional subtraction of p_384
ldp x10, x0, [u]
ldp x1, x2, [u+16]
ldp x3, x4, [u+32]
ldr x5, [u+48]
amontred(x5,x4,x3,x2,x1,x0,x10, x9,x8,x7)
mov x10, #0x00000000ffffffff
subs x10, x0, x10
mov x11, #0xffffffff00000000
sbcs x11, x1, x11
mov x12, #0xfffffffffffffffe
sbcs x12, x2, x12
mov x15, #0xffffffffffffffff
sbcs x13, x3, x15
sbcs x14, x4, x15
sbcs x15, x5, x15
csel x0, x0, x10, cc
csel x1, x1, x11, cc
csel x2, x2, x12, cc
csel x3, x3, x13, cc
csel x4, x4, x14, cc
csel x5, x5, x15, cc
// Store it back to the final output
stp x0, x1, [res]
stp x2, x3, [res, #16]
stp x4, x5, [res, #32]
// Restore stack and registers
add sp, sp, NSPACE
ldp x23, x24, [sp], 16
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 209,871
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/p384_montjscalarmul_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery-Jacobian form scalar multiplication for P-384
// Input scalar[6], point[18]; output res[18]
//
// extern void p384_montjscalarmul_alt
// (uint64_t res[static 18],
// uint64_t scalar[static 6],
// uint64_t point[static 18]);
//
// This function is a variant of its affine point version p384_scalarmul_alt.
// Here, input and output points are assumed to be in Jacobian form with
// their coordinates in the Montgomery domain. Thus, if priming indicates
// Montgomery form, x' = (2^384 * x) mod p_384 etc., each point argument
// is a triple (x',y',z') representing the affine point (x/z^2,y/z^3) when
// z' is nonzero or the point at infinity (group identity) if z' = 0.
//
// Given scalar = n and point = P, assumed to be on the NIST elliptic
// curve P-384, returns a representation of n * P. If the result is the
// point at infinity (either because the input point was or because the
// scalar was a multiple of p_384) then the output is guaranteed to
// represent the point at infinity, i.e. to have its z coordinate zero.
//
// Standard ARM ABI: X0 = res, X1 = scalar, X2 = point
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjscalarmul_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjscalarmul_alt)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
#define JACSIZE (3*NUMSIZE)
// Safe copies of input res and additional values in variables.
#define bf x22
#define sgn x23
#define j x24
#define res x25
// Intermediate variables on the stack.
// The table is 16 entries, each of size JACSIZE = 3 * NUMSIZE
#define scalarb sp, #(0*NUMSIZE)
#define acc sp, #(1*NUMSIZE)
#define tabent sp, #(4*NUMSIZE)
#define tab sp, #(7*NUMSIZE)
#define NSPACE #(55*NUMSIZE)
// Avoid using .rep for the sake of the BoringSSL/AWS-LC delocator,
// which doesn't accept repetitions, assembler macros etc.
#define selectblock(I) \
cmp bf, #(1*I); \
ldp x20, x21, [x19]; \
csel x0, x20, x0, eq; \
csel x1, x21, x1, eq; \
ldp x20, x21, [x19, #16]; \
csel x2, x20, x2, eq; \
csel x3, x21, x3, eq; \
ldp x20, x21, [x19, #32]; \
csel x4, x20, x4, eq; \
csel x5, x21, x5, eq; \
ldp x20, x21, [x19, #48]; \
csel x6, x20, x6, eq; \
csel x7, x21, x7, eq; \
ldp x20, x21, [x19, #64]; \
csel x8, x20, x8, eq; \
csel x9, x21, x9, eq; \
ldp x20, x21, [x19, #80]; \
csel x10, x20, x10, eq; \
csel x11, x21, x11, eq; \
ldp x20, x21, [x19, #96]; \
csel x12, x20, x12, eq; \
csel x13, x21, x13, eq; \
ldp x20, x21, [x19, #112]; \
csel x14, x20, x14, eq; \
csel x15, x21, x15, eq; \
ldp x20, x21, [x19, #128]; \
csel x16, x20, x16, eq; \
csel x17, x21, x17, eq; \
add x19, x19, #JACSIZE
// Loading large constants
#define movbig(nn,n3,n2,n1,n0) \
movz nn, n0; \
movk nn, n1, lsl #16; \
movk nn, n2, lsl #32; \
movk nn, n3, lsl #48
S2N_BN_SYMBOL(p384_montjscalarmul_alt):
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x30, [sp, #-16]!
sub sp, sp, NSPACE
// Preserve the "res" input argument; others get processed early.
mov res, x0
// Reduce the input scalar mod n_384, i.e. conditionally subtract n_384.
// Store it to "scalarb".
ldp x3, x4, [x1]
movbig(x15, #0xecec, #0x196a, #0xccc5, #0x2973)
ldp x5, x6, [x1, #16]
movbig(x16, #0x581a, #0x0db2, #0x48b0, #0xa77a)
ldp x7, x8, [x1, #32]
movbig(x17, #0xc763, #0x4d81, #0xf437, #0x2ddf)
subs x9, x3, x15
sbcs x10, x4, x16
sbcs x11, x5, x17
adcs x12, x6, xzr
adcs x13, x7, xzr
adcs x14, x8, xzr
csel x3, x3, x9, cc
csel x4, x4, x10, cc
csel x5, x5, x11, cc
csel x6, x6, x12, cc
csel x7, x7, x13, cc
csel x8, x8, x14, cc
stp x3, x4, [scalarb]
stp x5, x6, [scalarb+16]
stp x7, x8, [scalarb+32]
// Set the tab[0] table entry to the input point = 1 * P
ldp x10, x11, [x2]
stp x10, x11, [tab]
ldp x12, x13, [x2, #16]
stp x12, x13, [tab+16]
ldp x14, x15, [x2, #32]
stp x14, x15, [tab+32]
ldp x10, x11, [x2, #48]
stp x10, x11, [tab+48]
ldp x12, x13, [x2, #64]
stp x12, x13, [tab+64]
ldp x14, x15, [x2, #80]
stp x14, x15, [tab+80]
ldp x10, x11, [x2, #96]
stp x10, x11, [tab+96]
ldp x12, x13, [x2, #112]
stp x12, x13, [tab+112]
ldp x14, x15, [x2, #128]
stp x14, x15, [tab+128]
// Compute and record tab[1] = 2 * p, ..., tab[15] = 16 * P
add x0, tab+JACSIZE*1
add x1, tab
bl p384_montjscalarmul_alt_p384_montjdouble
add x0, tab+JACSIZE*2
add x1, tab+JACSIZE*1
add x2, tab
bl p384_montjscalarmul_alt_p384_montjadd
add x0, tab+JACSIZE*3
add x1, tab+JACSIZE*1
bl p384_montjscalarmul_alt_p384_montjdouble
add x0, tab+JACSIZE*4
add x1, tab+JACSIZE*3
add x2, tab
bl p384_montjscalarmul_alt_p384_montjadd
add x0, tab+JACSIZE*5
add x1, tab+JACSIZE*2
bl p384_montjscalarmul_alt_p384_montjdouble
add x0, tab+JACSIZE*6
add x1, tab+JACSIZE*5
add x2, tab
bl p384_montjscalarmul_alt_p384_montjadd
add x0, tab+JACSIZE*7
add x1, tab+JACSIZE*3
bl p384_montjscalarmul_alt_p384_montjdouble
add x0, tab+JACSIZE*8
add x1, tab+JACSIZE*7
add x2, tab
bl p384_montjscalarmul_alt_p384_montjadd
add x0, tab+JACSIZE*9
add x1, tab+JACSIZE*4
bl p384_montjscalarmul_alt_p384_montjdouble
add x0, tab+JACSIZE*10
add x1, tab+JACSIZE*9
add x2, tab
bl p384_montjscalarmul_alt_p384_montjadd
add x0, tab+JACSIZE*11
add x1, tab+JACSIZE*5
bl p384_montjscalarmul_alt_p384_montjdouble
add x0, tab+JACSIZE*12
add x1, tab+JACSIZE*11
add x2, tab
bl p384_montjscalarmul_alt_p384_montjadd
add x0, tab+JACSIZE*13
add x1, tab+JACSIZE*6
bl p384_montjscalarmul_alt_p384_montjdouble
add x0, tab+JACSIZE*14
add x1, tab+JACSIZE*13
add x2, tab
bl p384_montjscalarmul_alt_p384_montjadd
add x0, tab+JACSIZE*15
add x1, tab+JACSIZE*7
bl p384_montjscalarmul_alt_p384_montjdouble
// Add the recoding constant sum_i(16 * 32^i) to the scalar to allow signed
// digits. The digits of the constant, in lowest-to-highest order, are as
// follows; they are generated dynamically since none is a simple ARM load.
//
// 0x0842108421084210
// 0x1084210842108421
// 0x2108421084210842
// 0x4210842108421084
// 0x8421084210842108
// 0x0842108421084210
ldp x0, x1, [scalarb]
ldp x2, x3, [scalarb+16]
ldp x4, x5, [scalarb+32]
movbig(x8, #0x1084, #0x2108, #0x4210, #0x8421)
adds x0, x0, x8, lsr #1
adcs x1, x1, x8
lsl x8, x8, #1
adcs x2, x2, x8
lsl x8, x8, #1
adcs x3, x3, x8
lsl x8, x8, #1
adcs x4, x4, x8
lsr x8, x8, #4
adcs x5, x5, x8
cset x6, cs
// Record the top bitfield then shift the whole scalar left 4 bits
// to align the top of the next bitfield with the MSB (bits 379..383).
extr bf, x6, x5, #60
extr x5, x5, x4, #60
extr x4, x4, x3, #60
extr x3, x3, x2, #60
extr x2, x2, x1, #60
extr x1, x1, x0, #60
lsl x0, x0, #4
stp x0, x1, [scalarb]
stp x2, x3, [scalarb+16]
stp x4, x5, [scalarb+32]
// Initialize the accumulator to the corresponding entry using constant-time
// lookup in the table. This top digit, uniquely, is not recoded so there is
// no sign adjustment to make.
mov x0, xzr
mov x1, xzr
mov x2, xzr
mov x3, xzr
mov x4, xzr
mov x5, xzr
mov x6, xzr
mov x7, xzr
mov x8, xzr
mov x9, xzr
mov x10, xzr
mov x11, xzr
mov x12, xzr
mov x13, xzr
mov x14, xzr
mov x15, xzr
mov x16, xzr
mov x17, xzr
add x19, tab
selectblock(1)
selectblock(2)
selectblock(3)
selectblock(4)
selectblock(5)
selectblock(6)
selectblock(7)
selectblock(8)
selectblock(9)
selectblock(10)
selectblock(11)
selectblock(12)
selectblock(13)
selectblock(14)
selectblock(15)
selectblock(16)
stp x0, x1, [acc]
stp x2, x3, [acc+16]
stp x4, x5, [acc+32]
stp x6, x7, [acc+48]
stp x8, x9, [acc+64]
stp x10, x11, [acc+80]
stp x12, x13, [acc+96]
stp x14, x15, [acc+112]
stp x16, x17, [acc+128]
mov j, #380
// Main loop over size-5 bitfields: double 5 times then add signed digit
// At each stage we shift the scalar left by 5 bits so we can simply pick
// the top 5 bits as the bitfield, saving some fiddle over indexing.
p384_montjscalarmul_alt_mainloop:
sub j, j, #5
add x0, acc
add x1, acc
bl p384_montjscalarmul_alt_p384_montjdouble
add x0, acc
add x1, acc
bl p384_montjscalarmul_alt_p384_montjdouble
add x0, acc
add x1, acc
bl p384_montjscalarmul_alt_p384_montjdouble
add x0, acc
add x1, acc
bl p384_montjscalarmul_alt_p384_montjdouble
add x0, acc
add x1, acc
bl p384_montjscalarmul_alt_p384_montjdouble
// Choose the bitfield and adjust it to sign and magnitude
ldp x0, x1, [scalarb]
ldp x2, x3, [scalarb+16]
ldp x4, x5, [scalarb+32]
lsr bf, x5, #59
extr x5, x5, x4, #59
extr x4, x4, x3, #59
extr x3, x3, x2, #59
extr x2, x2, x1, #59
extr x1, x1, x0, #59
lsl x0, x0, #5
stp x0, x1, [scalarb]
stp x2, x3, [scalarb+16]
stp x4, x5, [scalarb+32]
subs bf, bf, #16
cset sgn, lo // sgn = sign of digit (1 = negative)
cneg bf, bf, lo // bf = absolute value of digit
// Conditionally select the table entry tab[i-1] = i * P in constant time
mov x0, xzr
mov x1, xzr
mov x2, xzr
mov x3, xzr
mov x4, xzr
mov x5, xzr
mov x6, xzr
mov x7, xzr
mov x8, xzr
mov x9, xzr
mov x10, xzr
mov x11, xzr
mov x12, xzr
mov x13, xzr
mov x14, xzr
mov x15, xzr
mov x16, xzr
mov x17, xzr
add x19, tab
selectblock(1)
selectblock(2)
selectblock(3)
selectblock(4)
selectblock(5)
selectblock(6)
selectblock(7)
selectblock(8)
selectblock(9)
selectblock(10)
selectblock(11)
selectblock(12)
selectblock(13)
selectblock(14)
selectblock(15)
selectblock(16)
// Store it to "tabent" with the y coordinate optionally negated.
// This is done carefully to give coordinates < p_384 even in
// the degenerate case y = 0 (when z = 0 for points on the curve).
stp x0, x1, [tabent]
stp x2, x3, [tabent+16]
stp x4, x5, [tabent+32]
stp x12, x13, [tabent+96]
stp x14, x15, [tabent+112]
stp x16, x17, [tabent+128]
mov x0, #0x00000000ffffffff
subs x0, x0, x6
orr x12, x6, x7
mov x1, #0xffffffff00000000
sbcs x1, x1, x7
orr x13, x8, x9
mov x2, #0xfffffffffffffffe
sbcs x2, x2, x8
orr x14, x10, x11
mov x5, #0xffffffffffffffff
sbcs x3, x5, x9
orr x12, x12, x13
sbcs x4, x5, x10
orr x12, x12, x14
sbcs x5, x5, x11
cmp sgn, xzr
ccmp x12, xzr, #4, ne
csel x6, x0, x6, ne
csel x7, x1, x7, ne
csel x8, x2, x8, ne
csel x9, x3, x9, ne
csel x10, x4, x10, ne
csel x11, x5, x11, ne
stp x6, x7, [tabent+48]
stp x8, x9, [tabent+64]
stp x10, x11, [tabent+80]
// Add to the accumulator
add x0, acc
add x1, acc
add x2, tabent
bl p384_montjscalarmul_alt_p384_montjadd
cbnz j, p384_montjscalarmul_alt_mainloop
// That's the end of the main loop, and we just need to copy the
// result in "acc" to the output.
ldp x0, x1, [acc]
stp x0, x1, [res]
ldp x0, x1, [acc+16]
stp x0, x1, [res, #16]
ldp x0, x1, [acc+32]
stp x0, x1, [res, #32]
ldp x0, x1, [acc+48]
stp x0, x1, [res, #48]
ldp x0, x1, [acc+64]
stp x0, x1, [res, #64]
ldp x0, x1, [acc+80]
stp x0, x1, [res, #80]
ldp x0, x1, [acc+96]
stp x0, x1, [res, #96]
ldp x0, x1, [acc+112]
stp x0, x1, [res, #112]
ldp x0, x1, [acc+128]
stp x0, x1, [res, #128]
// Restore stack and registers and return
add sp, sp, NSPACE
ldp x25, x30, [sp], 16
ldp x23, x24, [sp], 16
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
// Local copies of subroutines, complete clones at the moment
p384_montjscalarmul_alt_p384_montjadd:
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
sub sp, sp, #336
mov x24, x0
mov x25, x1
mov x26, x2
ldp x2, x3, [x25, #96]
mul x9, x2, x3
umulh x10, x2, x3
ldp x4, x5, [x25, #112]
mul x8, x2, x4
adds x10, x10, x8
mul x11, x2, x5
mul x8, x3, x4
adcs x11, x11, x8
umulh x12, x2, x5
mul x8, x3, x5
adcs x12, x12, x8
ldp x6, x7, [x25, #128]
mul x13, x2, x7
mul x8, x3, x6
adcs x13, x13, x8
umulh x14, x2, x7
mul x8, x3, x7
adcs x14, x14, x8
mul x15, x5, x6
adcs x15, x15, xzr
umulh x16, x5, x6
adc x16, x16, xzr
umulh x8, x2, x4
adds x11, x11, x8
umulh x8, x3, x4
adcs x12, x12, x8
umulh x8, x3, x5
adcs x13, x13, x8
umulh x8, x3, x6
adcs x14, x14, x8
umulh x8, x3, x7
adcs x15, x15, x8
adc x16, x16, xzr
mul x8, x2, x6
adds x12, x12, x8
mul x8, x4, x5
adcs x13, x13, x8
mul x8, x4, x6
adcs x14, x14, x8
mul x8, x4, x7
adcs x15, x15, x8
mul x8, x5, x7
adcs x16, x16, x8
mul x17, x6, x7
adcs x17, x17, xzr
umulh x19, x6, x7
adc x19, x19, xzr
umulh x8, x2, x6
adds x13, x13, x8
umulh x8, x4, x5
adcs x14, x14, x8
umulh x8, x4, x6
adcs x15, x15, x8
umulh x8, x4, x7
adcs x16, x16, x8
umulh x8, x5, x7
adcs x17, x17, x8
adc x19, x19, xzr
adds x9, x9, x9
adcs x10, x10, x10
adcs x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adcs x17, x17, x17
adcs x19, x19, x19
cset x20, hs
umulh x8, x2, x2
mul x2, x2, x2
adds x9, x9, x8
mul x8, x3, x3
adcs x10, x10, x8
umulh x8, x3, x3
adcs x11, x11, x8
mul x8, x4, x4
adcs x12, x12, x8
umulh x8, x4, x4
adcs x13, x13, x8
mul x8, x5, x5
adcs x14, x14, x8
umulh x8, x5, x5
adcs x15, x15, x8
mul x8, x6, x6
adcs x16, x16, x8
umulh x8, x6, x6
adcs x17, x17, x8
mul x8, x7, x7
adcs x19, x19, x8
umulh x8, x7, x7
adc x20, x20, x8
lsl x5, x2, #32
add x2, x5, x2
mov x5, #-4294967295
umulh x5, x5, x2
mov x4, #4294967295
mul x3, x4, x2
umulh x4, x4, x2
adds x5, x5, x3
adcs x4, x4, x2
adc x3, xzr, xzr
subs x9, x9, x5
sbcs x10, x10, x4
sbcs x11, x11, x3
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x2, x2, xzr
lsl x5, x9, #32
add x9, x5, x9
mov x5, #-4294967295
umulh x5, x5, x9
mov x4, #4294967295
mul x3, x4, x9
umulh x4, x4, x9
adds x5, x5, x3
adcs x4, x4, x9
adc x3, xzr, xzr
subs x10, x10, x5
sbcs x11, x11, x4
sbcs x12, x12, x3
sbcs x13, x13, xzr
sbcs x2, x2, xzr
sbc x9, x9, xzr
lsl x5, x10, #32
add x10, x5, x10
mov x5, #-4294967295
umulh x5, x5, x10
mov x4, #4294967295
mul x3, x4, x10
umulh x4, x4, x10
adds x5, x5, x3
adcs x4, x4, x10
adc x3, xzr, xzr
subs x11, x11, x5
sbcs x12, x12, x4
sbcs x13, x13, x3
sbcs x2, x2, xzr
sbcs x9, x9, xzr
sbc x10, x10, xzr
lsl x5, x11, #32
add x11, x5, x11
mov x5, #-4294967295
umulh x5, x5, x11
mov x4, #4294967295
mul x3, x4, x11
umulh x4, x4, x11
adds x5, x5, x3
adcs x4, x4, x11
adc x3, xzr, xzr
subs x12, x12, x5
sbcs x13, x13, x4
sbcs x2, x2, x3
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbc x11, x11, xzr
lsl x5, x12, #32
add x12, x5, x12
mov x5, #-4294967295
umulh x5, x5, x12
mov x4, #4294967295
mul x3, x4, x12
umulh x4, x4, x12
adds x5, x5, x3
adcs x4, x4, x12
adc x3, xzr, xzr
subs x13, x13, x5
sbcs x2, x2, x4
sbcs x9, x9, x3
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbc x12, x12, xzr
lsl x5, x13, #32
add x13, x5, x13
mov x5, #-4294967295
umulh x5, x5, x13
mov x4, #4294967295
mul x3, x4, x13
umulh x4, x4, x13
adds x5, x5, x3
adcs x4, x4, x13
adc x3, xzr, xzr
subs x2, x2, x5
sbcs x9, x9, x4
sbcs x10, x10, x3
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
adds x2, x2, x14
adcs x9, x9, x15
adcs x10, x10, x16
adcs x11, x11, x17
adcs x12, x12, x19
adcs x13, x13, x20
mov x14, #-4294967295
mov x15, #4294967295
csel x14, x14, xzr, hs
csel x15, x15, xzr, hs
cset x16, hs
adds x2, x2, x14
adcs x9, x9, x15
adcs x10, x10, x16
adcs x11, x11, xzr
adcs x12, x12, xzr
adc x13, x13, xzr
stp x2, x9, [sp]
stp x10, x11, [sp, #16]
stp x12, x13, [sp, #32]
ldp x2, x3, [x26, #96]
mul x9, x2, x3
umulh x10, x2, x3
ldp x4, x5, [x26, #112]
mul x8, x2, x4
adds x10, x10, x8
mul x11, x2, x5
mul x8, x3, x4
adcs x11, x11, x8
umulh x12, x2, x5
mul x8, x3, x5
adcs x12, x12, x8
ldp x6, x7, [x26, #128]
mul x13, x2, x7
mul x8, x3, x6
adcs x13, x13, x8
umulh x14, x2, x7
mul x8, x3, x7
adcs x14, x14, x8
mul x15, x5, x6
adcs x15, x15, xzr
umulh x16, x5, x6
adc x16, x16, xzr
umulh x8, x2, x4
adds x11, x11, x8
umulh x8, x3, x4
adcs x12, x12, x8
umulh x8, x3, x5
adcs x13, x13, x8
umulh x8, x3, x6
adcs x14, x14, x8
umulh x8, x3, x7
adcs x15, x15, x8
adc x16, x16, xzr
mul x8, x2, x6
adds x12, x12, x8
mul x8, x4, x5
adcs x13, x13, x8
mul x8, x4, x6
adcs x14, x14, x8
mul x8, x4, x7
adcs x15, x15, x8
mul x8, x5, x7
adcs x16, x16, x8
mul x17, x6, x7
adcs x17, x17, xzr
umulh x19, x6, x7
adc x19, x19, xzr
umulh x8, x2, x6
adds x13, x13, x8
umulh x8, x4, x5
adcs x14, x14, x8
umulh x8, x4, x6
adcs x15, x15, x8
umulh x8, x4, x7
adcs x16, x16, x8
umulh x8, x5, x7
adcs x17, x17, x8
adc x19, x19, xzr
adds x9, x9, x9
adcs x10, x10, x10
adcs x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adcs x17, x17, x17
adcs x19, x19, x19
cset x20, hs
umulh x8, x2, x2
mul x2, x2, x2
adds x9, x9, x8
mul x8, x3, x3
adcs x10, x10, x8
umulh x8, x3, x3
adcs x11, x11, x8
mul x8, x4, x4
adcs x12, x12, x8
umulh x8, x4, x4
adcs x13, x13, x8
mul x8, x5, x5
adcs x14, x14, x8
umulh x8, x5, x5
adcs x15, x15, x8
mul x8, x6, x6
adcs x16, x16, x8
umulh x8, x6, x6
adcs x17, x17, x8
mul x8, x7, x7
adcs x19, x19, x8
umulh x8, x7, x7
adc x20, x20, x8
lsl x5, x2, #32
add x2, x5, x2
mov x5, #-4294967295
umulh x5, x5, x2
mov x4, #4294967295
mul x3, x4, x2
umulh x4, x4, x2
adds x5, x5, x3
adcs x4, x4, x2
adc x3, xzr, xzr
subs x9, x9, x5
sbcs x10, x10, x4
sbcs x11, x11, x3
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x2, x2, xzr
lsl x5, x9, #32
add x9, x5, x9
mov x5, #-4294967295
umulh x5, x5, x9
mov x4, #4294967295
mul x3, x4, x9
umulh x4, x4, x9
adds x5, x5, x3
adcs x4, x4, x9
adc x3, xzr, xzr
subs x10, x10, x5
sbcs x11, x11, x4
sbcs x12, x12, x3
sbcs x13, x13, xzr
sbcs x2, x2, xzr
sbc x9, x9, xzr
lsl x5, x10, #32
add x10, x5, x10
mov x5, #-4294967295
umulh x5, x5, x10
mov x4, #4294967295
mul x3, x4, x10
umulh x4, x4, x10
adds x5, x5, x3
adcs x4, x4, x10
adc x3, xzr, xzr
subs x11, x11, x5
sbcs x12, x12, x4
sbcs x13, x13, x3
sbcs x2, x2, xzr
sbcs x9, x9, xzr
sbc x10, x10, xzr
lsl x5, x11, #32
add x11, x5, x11
mov x5, #-4294967295
umulh x5, x5, x11
mov x4, #4294967295
mul x3, x4, x11
umulh x4, x4, x11
adds x5, x5, x3
adcs x4, x4, x11
adc x3, xzr, xzr
subs x12, x12, x5
sbcs x13, x13, x4
sbcs x2, x2, x3
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbc x11, x11, xzr
lsl x5, x12, #32
add x12, x5, x12
mov x5, #-4294967295
umulh x5, x5, x12
mov x4, #4294967295
mul x3, x4, x12
umulh x4, x4, x12
adds x5, x5, x3
adcs x4, x4, x12
adc x3, xzr, xzr
subs x13, x13, x5
sbcs x2, x2, x4
sbcs x9, x9, x3
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbc x12, x12, xzr
lsl x5, x13, #32
add x13, x5, x13
mov x5, #-4294967295
umulh x5, x5, x13
mov x4, #4294967295
mul x3, x4, x13
umulh x4, x4, x13
adds x5, x5, x3
adcs x4, x4, x13
adc x3, xzr, xzr
subs x2, x2, x5
sbcs x9, x9, x4
sbcs x10, x10, x3
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
adds x2, x2, x14
adcs x9, x9, x15
adcs x10, x10, x16
adcs x11, x11, x17
adcs x12, x12, x19
adcs x13, x13, x20
mov x14, #-4294967295
mov x15, #4294967295
csel x14, x14, xzr, hs
csel x15, x15, xzr, hs
cset x16, hs
adds x2, x2, x14
adcs x9, x9, x15
adcs x10, x10, x16
adcs x11, x11, xzr
adcs x12, x12, xzr
adc x13, x13, xzr
stp x2, x9, [sp, #240]
stp x10, x11, [sp, #256]
stp x12, x13, [sp, #272]
ldp x3, x4, [x26, #96]
ldp x5, x6, [x25, #48]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [x25, #64]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [x25, #80]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [x26, #112]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [x26, #128]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #288]
stp x14, x15, [sp, #304]
stp x16, x17, [sp, #320]
ldp x3, x4, [x25, #96]
ldp x5, x6, [x26, #48]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [x26, #64]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [x26, #80]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [x25, #112]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [x25, #128]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #48]
stp x14, x15, [sp, #64]
stp x16, x17, [sp, #80]
ldp x3, x4, [sp]
ldp x5, x6, [x26]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [x26, #16]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [x26, #32]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [sp, #16]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [sp, #32]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #96]
stp x14, x15, [sp, #112]
stp x16, x17, [sp, #128]
ldp x3, x4, [sp, #240]
ldp x5, x6, [x25]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [x25, #16]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [x25, #32]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [sp, #256]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [sp, #272]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #192]
stp x14, x15, [sp, #208]
stp x16, x17, [sp, #224]
ldp x3, x4, [sp]
ldp x5, x6, [sp, #48]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [sp, #64]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [sp, #80]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [sp, #16]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [sp, #32]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #48]
stp x14, x15, [sp, #64]
stp x16, x17, [sp, #80]
ldp x3, x4, [sp, #240]
ldp x5, x6, [sp, #288]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [sp, #304]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [sp, #320]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [sp, #256]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [sp, #272]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #288]
stp x14, x15, [sp, #304]
stp x16, x17, [sp, #320]
ldp x5, x6, [sp, #96]
ldp x4, x3, [sp, #192]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #112]
ldp x4, x3, [sp, #208]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #128]
ldp x4, x3, [sp, #224]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, lo
mov x4, #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #240]
stp x7, x8, [sp, #256]
stp x9, x10, [sp, #272]
ldp x5, x6, [sp, #48]
ldp x4, x3, [sp, #288]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #64]
ldp x4, x3, [sp, #304]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #80]
ldp x4, x3, [sp, #320]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, lo
mov x4, #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #48]
stp x7, x8, [sp, #64]
stp x9, x10, [sp, #80]
ldp x2, x3, [sp, #240]
mul x9, x2, x3
umulh x10, x2, x3
ldp x4, x5, [sp, #256]
mul x8, x2, x4
adds x10, x10, x8
mul x11, x2, x5
mul x8, x3, x4
adcs x11, x11, x8
umulh x12, x2, x5
mul x8, x3, x5
adcs x12, x12, x8
ldp x6, x7, [sp, #272]
mul x13, x2, x7
mul x8, x3, x6
adcs x13, x13, x8
umulh x14, x2, x7
mul x8, x3, x7
adcs x14, x14, x8
mul x15, x5, x6
adcs x15, x15, xzr
umulh x16, x5, x6
adc x16, x16, xzr
umulh x8, x2, x4
adds x11, x11, x8
umulh x8, x3, x4
adcs x12, x12, x8
umulh x8, x3, x5
adcs x13, x13, x8
umulh x8, x3, x6
adcs x14, x14, x8
umulh x8, x3, x7
adcs x15, x15, x8
adc x16, x16, xzr
mul x8, x2, x6
adds x12, x12, x8
mul x8, x4, x5
adcs x13, x13, x8
mul x8, x4, x6
adcs x14, x14, x8
mul x8, x4, x7
adcs x15, x15, x8
mul x8, x5, x7
adcs x16, x16, x8
mul x17, x6, x7
adcs x17, x17, xzr
umulh x19, x6, x7
adc x19, x19, xzr
umulh x8, x2, x6
adds x13, x13, x8
umulh x8, x4, x5
adcs x14, x14, x8
umulh x8, x4, x6
adcs x15, x15, x8
umulh x8, x4, x7
adcs x16, x16, x8
umulh x8, x5, x7
adcs x17, x17, x8
adc x19, x19, xzr
adds x9, x9, x9
adcs x10, x10, x10
adcs x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adcs x17, x17, x17
adcs x19, x19, x19
cset x20, hs
umulh x8, x2, x2
mul x2, x2, x2
adds x9, x9, x8
mul x8, x3, x3
adcs x10, x10, x8
umulh x8, x3, x3
adcs x11, x11, x8
mul x8, x4, x4
adcs x12, x12, x8
umulh x8, x4, x4
adcs x13, x13, x8
mul x8, x5, x5
adcs x14, x14, x8
umulh x8, x5, x5
adcs x15, x15, x8
mul x8, x6, x6
adcs x16, x16, x8
umulh x8, x6, x6
adcs x17, x17, x8
mul x8, x7, x7
adcs x19, x19, x8
umulh x8, x7, x7
adc x20, x20, x8
lsl x5, x2, #32
add x2, x5, x2
mov x5, #-4294967295
umulh x5, x5, x2
mov x4, #4294967295
mul x3, x4, x2
umulh x4, x4, x2
adds x5, x5, x3
adcs x4, x4, x2
adc x3, xzr, xzr
subs x9, x9, x5
sbcs x10, x10, x4
sbcs x11, x11, x3
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x2, x2, xzr
lsl x5, x9, #32
add x9, x5, x9
mov x5, #-4294967295
umulh x5, x5, x9
mov x4, #4294967295
mul x3, x4, x9
umulh x4, x4, x9
adds x5, x5, x3
adcs x4, x4, x9
adc x3, xzr, xzr
subs x10, x10, x5
sbcs x11, x11, x4
sbcs x12, x12, x3
sbcs x13, x13, xzr
sbcs x2, x2, xzr
sbc x9, x9, xzr
lsl x5, x10, #32
add x10, x5, x10
mov x5, #-4294967295
umulh x5, x5, x10
mov x4, #4294967295
mul x3, x4, x10
umulh x4, x4, x10
adds x5, x5, x3
adcs x4, x4, x10
adc x3, xzr, xzr
subs x11, x11, x5
sbcs x12, x12, x4
sbcs x13, x13, x3
sbcs x2, x2, xzr
sbcs x9, x9, xzr
sbc x10, x10, xzr
lsl x5, x11, #32
add x11, x5, x11
mov x5, #-4294967295
umulh x5, x5, x11
mov x4, #4294967295
mul x3, x4, x11
umulh x4, x4, x11
adds x5, x5, x3
adcs x4, x4, x11
adc x3, xzr, xzr
subs x12, x12, x5
sbcs x13, x13, x4
sbcs x2, x2, x3
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbc x11, x11, xzr
lsl x5, x12, #32
add x12, x5, x12
mov x5, #-4294967295
umulh x5, x5, x12
mov x4, #4294967295
mul x3, x4, x12
umulh x4, x4, x12
adds x5, x5, x3
adcs x4, x4, x12
adc x3, xzr, xzr
subs x13, x13, x5
sbcs x2, x2, x4
sbcs x9, x9, x3
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbc x12, x12, xzr
lsl x5, x13, #32
add x13, x5, x13
mov x5, #-4294967295
umulh x5, x5, x13
mov x4, #4294967295
mul x3, x4, x13
umulh x4, x4, x13
adds x5, x5, x3
adcs x4, x4, x13
adc x3, xzr, xzr
subs x2, x2, x5
sbcs x9, x9, x4
sbcs x10, x10, x3
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
adds x2, x2, x14
adcs x9, x9, x15
adcs x10, x10, x16
adcs x11, x11, x17
adcs x12, x12, x19
adcs x13, x13, x20
mov x14, #-4294967295
mov x15, #4294967295
csel x14, x14, xzr, hs
csel x15, x15, xzr, hs
cset x16, hs
adds x2, x2, x14
adcs x9, x9, x15
adcs x10, x10, x16
adcs x11, x11, xzr
adcs x12, x12, xzr
adc x13, x13, xzr
stp x2, x9, [sp, #144]
stp x10, x11, [sp, #160]
stp x12, x13, [sp, #176]
ldp x2, x3, [sp, #48]
mul x9, x2, x3
umulh x10, x2, x3
ldp x4, x5, [sp, #64]
mul x8, x2, x4
adds x10, x10, x8
mul x11, x2, x5
mul x8, x3, x4
adcs x11, x11, x8
umulh x12, x2, x5
mul x8, x3, x5
adcs x12, x12, x8
ldp x6, x7, [sp, #80]
mul x13, x2, x7
mul x8, x3, x6
adcs x13, x13, x8
umulh x14, x2, x7
mul x8, x3, x7
adcs x14, x14, x8
mul x15, x5, x6
adcs x15, x15, xzr
umulh x16, x5, x6
adc x16, x16, xzr
umulh x8, x2, x4
adds x11, x11, x8
umulh x8, x3, x4
adcs x12, x12, x8
umulh x8, x3, x5
adcs x13, x13, x8
umulh x8, x3, x6
adcs x14, x14, x8
umulh x8, x3, x7
adcs x15, x15, x8
adc x16, x16, xzr
mul x8, x2, x6
adds x12, x12, x8
mul x8, x4, x5
adcs x13, x13, x8
mul x8, x4, x6
adcs x14, x14, x8
mul x8, x4, x7
adcs x15, x15, x8
mul x8, x5, x7
adcs x16, x16, x8
mul x17, x6, x7
adcs x17, x17, xzr
umulh x19, x6, x7
adc x19, x19, xzr
umulh x8, x2, x6
adds x13, x13, x8
umulh x8, x4, x5
adcs x14, x14, x8
umulh x8, x4, x6
adcs x15, x15, x8
umulh x8, x4, x7
adcs x16, x16, x8
umulh x8, x5, x7
adcs x17, x17, x8
adc x19, x19, xzr
adds x9, x9, x9
adcs x10, x10, x10
adcs x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adcs x17, x17, x17
adcs x19, x19, x19
cset x20, hs
umulh x8, x2, x2
mul x2, x2, x2
adds x9, x9, x8
mul x8, x3, x3
adcs x10, x10, x8
umulh x8, x3, x3
adcs x11, x11, x8
mul x8, x4, x4
adcs x12, x12, x8
umulh x8, x4, x4
adcs x13, x13, x8
mul x8, x5, x5
adcs x14, x14, x8
umulh x8, x5, x5
adcs x15, x15, x8
mul x8, x6, x6
adcs x16, x16, x8
umulh x8, x6, x6
adcs x17, x17, x8
mul x8, x7, x7
adcs x19, x19, x8
umulh x8, x7, x7
adc x20, x20, x8
lsl x5, x2, #32
add x2, x5, x2
mov x5, #-4294967295
umulh x5, x5, x2
mov x4, #4294967295
mul x3, x4, x2
umulh x4, x4, x2
adds x5, x5, x3
adcs x4, x4, x2
adc x3, xzr, xzr
subs x9, x9, x5
sbcs x10, x10, x4
sbcs x11, x11, x3
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x2, x2, xzr
lsl x5, x9, #32
add x9, x5, x9
mov x5, #-4294967295
umulh x5, x5, x9
mov x4, #4294967295
mul x3, x4, x9
umulh x4, x4, x9
adds x5, x5, x3
adcs x4, x4, x9
adc x3, xzr, xzr
subs x10, x10, x5
sbcs x11, x11, x4
sbcs x12, x12, x3
sbcs x13, x13, xzr
sbcs x2, x2, xzr
sbc x9, x9, xzr
lsl x5, x10, #32
add x10, x5, x10
mov x5, #-4294967295
umulh x5, x5, x10
mov x4, #4294967295
mul x3, x4, x10
umulh x4, x4, x10
adds x5, x5, x3
adcs x4, x4, x10
adc x3, xzr, xzr
subs x11, x11, x5
sbcs x12, x12, x4
sbcs x13, x13, x3
sbcs x2, x2, xzr
sbcs x9, x9, xzr
sbc x10, x10, xzr
lsl x5, x11, #32
add x11, x5, x11
mov x5, #-4294967295
umulh x5, x5, x11
mov x4, #4294967295
mul x3, x4, x11
umulh x4, x4, x11
adds x5, x5, x3
adcs x4, x4, x11
adc x3, xzr, xzr
subs x12, x12, x5
sbcs x13, x13, x4
sbcs x2, x2, x3
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbc x11, x11, xzr
lsl x5, x12, #32
add x12, x5, x12
mov x5, #-4294967295
umulh x5, x5, x12
mov x4, #4294967295
mul x3, x4, x12
umulh x4, x4, x12
adds x5, x5, x3
adcs x4, x4, x12
adc x3, xzr, xzr
subs x13, x13, x5
sbcs x2, x2, x4
sbcs x9, x9, x3
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbc x12, x12, xzr
lsl x5, x13, #32
add x13, x5, x13
mov x5, #-4294967295
umulh x5, x5, x13
mov x4, #4294967295
mul x3, x4, x13
umulh x4, x4, x13
adds x5, x5, x3
adcs x4, x4, x13
adc x3, xzr, xzr
subs x2, x2, x5
sbcs x9, x9, x4
sbcs x10, x10, x3
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
adds x2, x2, x14
adcs x9, x9, x15
adcs x10, x10, x16
adcs x11, x11, x17
adcs x12, x12, x19
adcs x13, x13, x20
adc x6, xzr, xzr
mov x8, #-4294967295
adds x14, x2, x8
mov x8, #4294967295
adcs x15, x9, x8
mov x8, #1
adcs x16, x10, x8
adcs x17, x11, xzr
adcs x19, x12, xzr
adcs x20, x13, xzr
adcs x6, x6, xzr
csel x2, x2, x14, eq
csel x9, x9, x15, eq
csel x10, x10, x16, eq
csel x11, x11, x17, eq
csel x12, x12, x19, eq
csel x13, x13, x20, eq
stp x2, x9, [sp]
stp x10, x11, [sp, #16]
stp x12, x13, [sp, #32]
ldp x3, x4, [sp, #144]
ldp x5, x6, [sp, #192]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [sp, #208]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [sp, #224]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [sp, #160]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [sp, #176]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #192]
stp x14, x15, [sp, #208]
stp x16, x17, [sp, #224]
ldp x3, x4, [sp, #144]
ldp x5, x6, [sp, #96]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [sp, #112]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [sp, #128]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [sp, #160]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [sp, #176]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #96]
stp x14, x15, [sp, #112]
stp x16, x17, [sp, #128]
ldp x5, x6, [sp]
ldp x4, x3, [sp, #192]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #16]
ldp x4, x3, [sp, #208]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #32]
ldp x4, x3, [sp, #224]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, lo
mov x4, #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp]
stp x7, x8, [sp, #16]
stp x9, x10, [sp, #32]
ldp x5, x6, [sp, #96]
ldp x4, x3, [sp, #192]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #112]
ldp x4, x3, [sp, #208]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #128]
ldp x4, x3, [sp, #224]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, lo
mov x4, #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #144]
stp x7, x8, [sp, #160]
stp x9, x10, [sp, #176]
ldp x3, x4, [sp, #240]
ldp x5, x6, [x25, #96]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [x25, #112]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [x25, #128]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [sp, #256]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [sp, #272]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #240]
stp x14, x15, [sp, #256]
stp x16, x17, [sp, #272]
ldp x5, x6, [sp]
ldp x4, x3, [sp, #96]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #16]
ldp x4, x3, [sp, #112]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #32]
ldp x4, x3, [sp, #128]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, lo
mov x4, #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp]
stp x7, x8, [sp, #16]
stp x9, x10, [sp, #32]
ldp x5, x6, [sp, #192]
ldp x4, x3, [sp]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #208]
ldp x4, x3, [sp, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #224]
ldp x4, x3, [sp, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, lo
mov x4, #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #192]
stp x7, x8, [sp, #208]
stp x9, x10, [sp, #224]
ldp x3, x4, [sp, #144]
ldp x5, x6, [sp, #288]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [sp, #304]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [sp, #320]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [sp, #160]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [sp, #176]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #144]
stp x14, x15, [sp, #160]
stp x16, x17, [sp, #176]
ldp x3, x4, [sp, #240]
ldp x5, x6, [x26, #96]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [x26, #112]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [x26, #128]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [sp, #256]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [sp, #272]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #240]
stp x14, x15, [sp, #256]
stp x16, x17, [sp, #272]
ldp x3, x4, [sp, #48]
ldp x5, x6, [sp, #192]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [sp, #208]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [sp, #224]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [sp, #64]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [sp, #80]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #192]
stp x14, x15, [sp, #208]
stp x16, x17, [sp, #224]
ldp x5, x6, [sp, #192]
ldp x4, x3, [sp, #144]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #208]
ldp x4, x3, [sp, #160]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #224]
ldp x4, x3, [sp, #176]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, lo
mov x4, #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #192]
stp x7, x8, [sp, #208]
stp x9, x10, [sp, #224]
ldp x0, x1, [x25, #96]
ldp x2, x3, [x25, #112]
ldp x4, x5, [x25, #128]
orr x20, x0, x1
orr x21, x2, x3
orr x22, x4, x5
orr x20, x20, x21
orr x20, x20, x22
cmp x20, xzr
cset x20, ne
ldp x6, x7, [x26, #96]
ldp x8, x9, [x26, #112]
ldp x10, x11, [x26, #128]
orr x21, x6, x7
orr x22, x8, x9
orr x23, x10, x11
orr x21, x21, x22
orr x21, x21, x23
cmp x21, xzr
cset x21, ne
cmp x21, x20
ldp x12, x13, [sp, #240]
csel x12, x0, x12, lo
csel x13, x1, x13, lo
csel x12, x6, x12, hi
csel x13, x7, x13, hi
ldp x14, x15, [sp, #256]
csel x14, x2, x14, lo
csel x15, x3, x15, lo
csel x14, x8, x14, hi
csel x15, x9, x15, hi
ldp x16, x17, [sp, #272]
csel x16, x4, x16, lo
csel x17, x5, x17, lo
csel x16, x10, x16, hi
csel x17, x11, x17, hi
ldp x20, x21, [x25]
ldp x0, x1, [sp]
csel x0, x20, x0, lo
csel x1, x21, x1, lo
ldp x20, x21, [x26]
csel x0, x20, x0, hi
csel x1, x21, x1, hi
ldp x20, x21, [x25, #16]
ldp x2, x3, [sp, #16]
csel x2, x20, x2, lo
csel x3, x21, x3, lo
ldp x20, x21, [x26, #16]
csel x2, x20, x2, hi
csel x3, x21, x3, hi
ldp x20, x21, [x25, #32]
ldp x4, x5, [sp, #32]
csel x4, x20, x4, lo
csel x5, x21, x5, lo
ldp x20, x21, [x26, #32]
csel x4, x20, x4, hi
csel x5, x21, x5, hi
ldp x20, x21, [x25, #48]
ldp x6, x7, [sp, #192]
csel x6, x20, x6, lo
csel x7, x21, x7, lo
ldp x20, x21, [x26, #48]
csel x6, x20, x6, hi
csel x7, x21, x7, hi
ldp x20, x21, [x25, #64]
ldp x8, x9, [sp, #208]
csel x8, x20, x8, lo
csel x9, x21, x9, lo
ldp x20, x21, [x26, #64]
csel x8, x20, x8, hi
csel x9, x21, x9, hi
ldp x20, x21, [x25, #80]
ldp x10, x11, [sp, #224]
csel x10, x20, x10, lo
csel x11, x21, x11, lo
ldp x20, x21, [x26, #80]
csel x10, x20, x10, hi
csel x11, x21, x11, hi
stp x0, x1, [x24]
stp x2, x3, [x24, #16]
stp x4, x5, [x24, #32]
stp x6, x7, [x24, #48]
stp x8, x9, [x24, #64]
stp x10, x11, [x24, #80]
stp x12, x13, [x24, #96]
stp x14, x15, [x24, #112]
stp x16, x17, [x24, #128]
add sp, sp, #336
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
p384_montjscalarmul_alt_p384_montjdouble:
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
sub sp, sp, #336
mov x23, x0
mov x24, x1
ldp x2, x3, [x24, #96]
mul x9, x2, x3
umulh x10, x2, x3
ldp x4, x5, [x24, #112]
mul x8, x2, x4
adds x10, x10, x8
mul x11, x2, x5
mul x8, x3, x4
adcs x11, x11, x8
umulh x12, x2, x5
mul x8, x3, x5
adcs x12, x12, x8
ldp x6, x7, [x24, #128]
mul x13, x2, x7
mul x8, x3, x6
adcs x13, x13, x8
umulh x14, x2, x7
mul x8, x3, x7
adcs x14, x14, x8
mul x15, x5, x6
adcs x15, x15, xzr
umulh x16, x5, x6
adc x16, x16, xzr
umulh x8, x2, x4
adds x11, x11, x8
umulh x8, x3, x4
adcs x12, x12, x8
umulh x8, x3, x5
adcs x13, x13, x8
umulh x8, x3, x6
adcs x14, x14, x8
umulh x8, x3, x7
adcs x15, x15, x8
adc x16, x16, xzr
mul x8, x2, x6
adds x12, x12, x8
mul x8, x4, x5
adcs x13, x13, x8
mul x8, x4, x6
adcs x14, x14, x8
mul x8, x4, x7
adcs x15, x15, x8
mul x8, x5, x7
adcs x16, x16, x8
mul x17, x6, x7
adcs x17, x17, xzr
umulh x19, x6, x7
adc x19, x19, xzr
umulh x8, x2, x6
adds x13, x13, x8
umulh x8, x4, x5
adcs x14, x14, x8
umulh x8, x4, x6
adcs x15, x15, x8
umulh x8, x4, x7
adcs x16, x16, x8
umulh x8, x5, x7
adcs x17, x17, x8
adc x19, x19, xzr
adds x9, x9, x9
adcs x10, x10, x10
adcs x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adcs x17, x17, x17
adcs x19, x19, x19
cset x20, hs
umulh x8, x2, x2
mul x2, x2, x2
adds x9, x9, x8
mul x8, x3, x3
adcs x10, x10, x8
umulh x8, x3, x3
adcs x11, x11, x8
mul x8, x4, x4
adcs x12, x12, x8
umulh x8, x4, x4
adcs x13, x13, x8
mul x8, x5, x5
adcs x14, x14, x8
umulh x8, x5, x5
adcs x15, x15, x8
mul x8, x6, x6
adcs x16, x16, x8
umulh x8, x6, x6
adcs x17, x17, x8
mul x8, x7, x7
adcs x19, x19, x8
umulh x8, x7, x7
adc x20, x20, x8
lsl x5, x2, #32
add x2, x5, x2
mov x5, #-4294967295
umulh x5, x5, x2
mov x4, #4294967295
mul x3, x4, x2
umulh x4, x4, x2
adds x5, x5, x3
adcs x4, x4, x2
adc x3, xzr, xzr
subs x9, x9, x5
sbcs x10, x10, x4
sbcs x11, x11, x3
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x2, x2, xzr
lsl x5, x9, #32
add x9, x5, x9
mov x5, #-4294967295
umulh x5, x5, x9
mov x4, #4294967295
mul x3, x4, x9
umulh x4, x4, x9
adds x5, x5, x3
adcs x4, x4, x9
adc x3, xzr, xzr
subs x10, x10, x5
sbcs x11, x11, x4
sbcs x12, x12, x3
sbcs x13, x13, xzr
sbcs x2, x2, xzr
sbc x9, x9, xzr
lsl x5, x10, #32
add x10, x5, x10
mov x5, #-4294967295
umulh x5, x5, x10
mov x4, #4294967295
mul x3, x4, x10
umulh x4, x4, x10
adds x5, x5, x3
adcs x4, x4, x10
adc x3, xzr, xzr
subs x11, x11, x5
sbcs x12, x12, x4
sbcs x13, x13, x3
sbcs x2, x2, xzr
sbcs x9, x9, xzr
sbc x10, x10, xzr
lsl x5, x11, #32
add x11, x5, x11
mov x5, #-4294967295
umulh x5, x5, x11
mov x4, #4294967295
mul x3, x4, x11
umulh x4, x4, x11
adds x5, x5, x3
adcs x4, x4, x11
adc x3, xzr, xzr
subs x12, x12, x5
sbcs x13, x13, x4
sbcs x2, x2, x3
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbc x11, x11, xzr
lsl x5, x12, #32
add x12, x5, x12
mov x5, #-4294967295
umulh x5, x5, x12
mov x4, #4294967295
mul x3, x4, x12
umulh x4, x4, x12
adds x5, x5, x3
adcs x4, x4, x12
adc x3, xzr, xzr
subs x13, x13, x5
sbcs x2, x2, x4
sbcs x9, x9, x3
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbc x12, x12, xzr
lsl x5, x13, #32
add x13, x5, x13
mov x5, #-4294967295
umulh x5, x5, x13
mov x4, #4294967295
mul x3, x4, x13
umulh x4, x4, x13
adds x5, x5, x3
adcs x4, x4, x13
adc x3, xzr, xzr
subs x2, x2, x5
sbcs x9, x9, x4
sbcs x10, x10, x3
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
adds x2, x2, x14
adcs x9, x9, x15
adcs x10, x10, x16
adcs x11, x11, x17
adcs x12, x12, x19
adcs x13, x13, x20
adc x6, xzr, xzr
mov x8, #-4294967295
adds x14, x2, x8
mov x8, #4294967295
adcs x15, x9, x8
mov x8, #1
adcs x16, x10, x8
adcs x17, x11, xzr
adcs x19, x12, xzr
adcs x20, x13, xzr
adcs x6, x6, xzr
csel x2, x2, x14, eq
csel x9, x9, x15, eq
csel x10, x10, x16, eq
csel x11, x11, x17, eq
csel x12, x12, x19, eq
csel x13, x13, x20, eq
stp x2, x9, [sp]
stp x10, x11, [sp, #16]
stp x12, x13, [sp, #32]
ldp x2, x3, [x24, #48]
mul x9, x2, x3
umulh x10, x2, x3
ldp x4, x5, [x24, #64]
mul x8, x2, x4
adds x10, x10, x8
mul x11, x2, x5
mul x8, x3, x4
adcs x11, x11, x8
umulh x12, x2, x5
mul x8, x3, x5
adcs x12, x12, x8
ldp x6, x7, [x24, #80]
mul x13, x2, x7
mul x8, x3, x6
adcs x13, x13, x8
umulh x14, x2, x7
mul x8, x3, x7
adcs x14, x14, x8
mul x15, x5, x6
adcs x15, x15, xzr
umulh x16, x5, x6
adc x16, x16, xzr
umulh x8, x2, x4
adds x11, x11, x8
umulh x8, x3, x4
adcs x12, x12, x8
umulh x8, x3, x5
adcs x13, x13, x8
umulh x8, x3, x6
adcs x14, x14, x8
umulh x8, x3, x7
adcs x15, x15, x8
adc x16, x16, xzr
mul x8, x2, x6
adds x12, x12, x8
mul x8, x4, x5
adcs x13, x13, x8
mul x8, x4, x6
adcs x14, x14, x8
mul x8, x4, x7
adcs x15, x15, x8
mul x8, x5, x7
adcs x16, x16, x8
mul x17, x6, x7
adcs x17, x17, xzr
umulh x19, x6, x7
adc x19, x19, xzr
umulh x8, x2, x6
adds x13, x13, x8
umulh x8, x4, x5
adcs x14, x14, x8
umulh x8, x4, x6
adcs x15, x15, x8
umulh x8, x4, x7
adcs x16, x16, x8
umulh x8, x5, x7
adcs x17, x17, x8
adc x19, x19, xzr
adds x9, x9, x9
adcs x10, x10, x10
adcs x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adcs x17, x17, x17
adcs x19, x19, x19
cset x20, hs
umulh x8, x2, x2
mul x2, x2, x2
adds x9, x9, x8
mul x8, x3, x3
adcs x10, x10, x8
umulh x8, x3, x3
adcs x11, x11, x8
mul x8, x4, x4
adcs x12, x12, x8
umulh x8, x4, x4
adcs x13, x13, x8
mul x8, x5, x5
adcs x14, x14, x8
umulh x8, x5, x5
adcs x15, x15, x8
mul x8, x6, x6
adcs x16, x16, x8
umulh x8, x6, x6
adcs x17, x17, x8
mul x8, x7, x7
adcs x19, x19, x8
umulh x8, x7, x7
adc x20, x20, x8
lsl x5, x2, #32
add x2, x5, x2
mov x5, #-4294967295
umulh x5, x5, x2
mov x4, #4294967295
mul x3, x4, x2
umulh x4, x4, x2
adds x5, x5, x3
adcs x4, x4, x2
adc x3, xzr, xzr
subs x9, x9, x5
sbcs x10, x10, x4
sbcs x11, x11, x3
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x2, x2, xzr
lsl x5, x9, #32
add x9, x5, x9
mov x5, #-4294967295
umulh x5, x5, x9
mov x4, #4294967295
mul x3, x4, x9
umulh x4, x4, x9
adds x5, x5, x3
adcs x4, x4, x9
adc x3, xzr, xzr
subs x10, x10, x5
sbcs x11, x11, x4
sbcs x12, x12, x3
sbcs x13, x13, xzr
sbcs x2, x2, xzr
sbc x9, x9, xzr
lsl x5, x10, #32
add x10, x5, x10
mov x5, #-4294967295
umulh x5, x5, x10
mov x4, #4294967295
mul x3, x4, x10
umulh x4, x4, x10
adds x5, x5, x3
adcs x4, x4, x10
adc x3, xzr, xzr
subs x11, x11, x5
sbcs x12, x12, x4
sbcs x13, x13, x3
sbcs x2, x2, xzr
sbcs x9, x9, xzr
sbc x10, x10, xzr
lsl x5, x11, #32
add x11, x5, x11
mov x5, #-4294967295
umulh x5, x5, x11
mov x4, #4294967295
mul x3, x4, x11
umulh x4, x4, x11
adds x5, x5, x3
adcs x4, x4, x11
adc x3, xzr, xzr
subs x12, x12, x5
sbcs x13, x13, x4
sbcs x2, x2, x3
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbc x11, x11, xzr
lsl x5, x12, #32
add x12, x5, x12
mov x5, #-4294967295
umulh x5, x5, x12
mov x4, #4294967295
mul x3, x4, x12
umulh x4, x4, x12
adds x5, x5, x3
adcs x4, x4, x12
adc x3, xzr, xzr
subs x13, x13, x5
sbcs x2, x2, x4
sbcs x9, x9, x3
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbc x12, x12, xzr
lsl x5, x13, #32
add x13, x5, x13
mov x5, #-4294967295
umulh x5, x5, x13
mov x4, #4294967295
mul x3, x4, x13
umulh x4, x4, x13
adds x5, x5, x3
adcs x4, x4, x13
adc x3, xzr, xzr
subs x2, x2, x5
sbcs x9, x9, x4
sbcs x10, x10, x3
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
adds x2, x2, x14
adcs x9, x9, x15
adcs x10, x10, x16
adcs x11, x11, x17
adcs x12, x12, x19
adcs x13, x13, x20
adc x6, xzr, xzr
mov x8, #-4294967295
adds x14, x2, x8
mov x8, #4294967295
adcs x15, x9, x8
mov x8, #1
adcs x16, x10, x8
adcs x17, x11, xzr
adcs x19, x12, xzr
adcs x20, x13, xzr
adcs x6, x6, xzr
csel x2, x2, x14, eq
csel x9, x9, x15, eq
csel x10, x10, x16, eq
csel x11, x11, x17, eq
csel x12, x12, x19, eq
csel x13, x13, x20, eq
stp x2, x9, [sp, #48]
stp x10, x11, [sp, #64]
stp x12, x13, [sp, #80]
ldp x5, x6, [x24]
ldp x4, x3, [sp]
adds x5, x5, x4
adcs x6, x6, x3
ldp x7, x8, [x24, #16]
ldp x4, x3, [sp, #16]
adcs x7, x7, x4
adcs x8, x8, x3
ldp x9, x10, [x24, #32]
ldp x4, x3, [sp, #32]
adcs x9, x9, x4
adcs x10, x10, x3
csetm x3, hs
mov x4, #4294967295
and x4, x4, x3
subs x5, x5, x4
eor x4, x4, x3
sbcs x6, x6, x4
mov x4, #-2
and x4, x4, x3
sbcs x7, x7, x4
sbcs x8, x8, x3
sbcs x9, x9, x3
sbc x10, x10, x3
stp x5, x6, [sp, #240]
stp x7, x8, [sp, #256]
stp x9, x10, [sp, #272]
ldp x5, x6, [x24]
ldp x4, x3, [sp]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x24, #16]
ldp x4, x3, [sp, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x24, #32]
ldp x4, x3, [sp, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, lo
mov x4, #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #192]
stp x7, x8, [sp, #208]
stp x9, x10, [sp, #224]
ldp x3, x4, [sp, #240]
ldp x5, x6, [sp, #192]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [sp, #208]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [sp, #224]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [sp, #256]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [sp, #272]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #96]
stp x14, x15, [sp, #112]
stp x16, x17, [sp, #128]
ldp x5, x6, [x24, #48]
ldp x4, x3, [x24, #96]
adds x5, x5, x4
adcs x6, x6, x3
ldp x7, x8, [x24, #64]
ldp x4, x3, [x24, #112]
adcs x7, x7, x4
adcs x8, x8, x3
ldp x9, x10, [x24, #80]
ldp x4, x3, [x24, #128]
adcs x9, x9, x4
adcs x10, x10, x3
adc x3, xzr, xzr
mov x4, #4294967295
cmp x5, x4
mov x4, #-4294967296
sbcs xzr, x6, x4
mov x4, #-2
sbcs xzr, x7, x4
adcs xzr, x8, xzr
adcs xzr, x9, xzr
adcs xzr, x10, xzr
adcs x3, x3, xzr
csetm x3, ne
mov x4, #4294967295
and x4, x4, x3
subs x5, x5, x4
eor x4, x4, x3
sbcs x6, x6, x4
mov x4, #-2
and x4, x4, x3
sbcs x7, x7, x4
sbcs x8, x8, x3
sbcs x9, x9, x3
sbc x10, x10, x3
stp x5, x6, [sp, #240]
stp x7, x8, [sp, #256]
stp x9, x10, [sp, #272]
ldp x2, x3, [sp, #96]
mul x9, x2, x3
umulh x10, x2, x3
ldp x4, x5, [sp, #112]
mul x8, x2, x4
adds x10, x10, x8
mul x11, x2, x5
mul x8, x3, x4
adcs x11, x11, x8
umulh x12, x2, x5
mul x8, x3, x5
adcs x12, x12, x8
ldp x6, x7, [sp, #128]
mul x13, x2, x7
mul x8, x3, x6
adcs x13, x13, x8
umulh x14, x2, x7
mul x8, x3, x7
adcs x14, x14, x8
mul x15, x5, x6
adcs x15, x15, xzr
umulh x16, x5, x6
adc x16, x16, xzr
umulh x8, x2, x4
adds x11, x11, x8
umulh x8, x3, x4
adcs x12, x12, x8
umulh x8, x3, x5
adcs x13, x13, x8
umulh x8, x3, x6
adcs x14, x14, x8
umulh x8, x3, x7
adcs x15, x15, x8
adc x16, x16, xzr
mul x8, x2, x6
adds x12, x12, x8
mul x8, x4, x5
adcs x13, x13, x8
mul x8, x4, x6
adcs x14, x14, x8
mul x8, x4, x7
adcs x15, x15, x8
mul x8, x5, x7
adcs x16, x16, x8
mul x17, x6, x7
adcs x17, x17, xzr
umulh x19, x6, x7
adc x19, x19, xzr
umulh x8, x2, x6
adds x13, x13, x8
umulh x8, x4, x5
adcs x14, x14, x8
umulh x8, x4, x6
adcs x15, x15, x8
umulh x8, x4, x7
adcs x16, x16, x8
umulh x8, x5, x7
adcs x17, x17, x8
adc x19, x19, xzr
adds x9, x9, x9
adcs x10, x10, x10
adcs x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adcs x17, x17, x17
adcs x19, x19, x19
cset x20, hs
umulh x8, x2, x2
mul x2, x2, x2
adds x9, x9, x8
mul x8, x3, x3
adcs x10, x10, x8
umulh x8, x3, x3
adcs x11, x11, x8
mul x8, x4, x4
adcs x12, x12, x8
umulh x8, x4, x4
adcs x13, x13, x8
mul x8, x5, x5
adcs x14, x14, x8
umulh x8, x5, x5
adcs x15, x15, x8
mul x8, x6, x6
adcs x16, x16, x8
umulh x8, x6, x6
adcs x17, x17, x8
mul x8, x7, x7
adcs x19, x19, x8
umulh x8, x7, x7
adc x20, x20, x8
lsl x5, x2, #32
add x2, x5, x2
mov x5, #-4294967295
umulh x5, x5, x2
mov x4, #4294967295
mul x3, x4, x2
umulh x4, x4, x2
adds x5, x5, x3
adcs x4, x4, x2
adc x3, xzr, xzr
subs x9, x9, x5
sbcs x10, x10, x4
sbcs x11, x11, x3
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x2, x2, xzr
lsl x5, x9, #32
add x9, x5, x9
mov x5, #-4294967295
umulh x5, x5, x9
mov x4, #4294967295
mul x3, x4, x9
umulh x4, x4, x9
adds x5, x5, x3
adcs x4, x4, x9
adc x3, xzr, xzr
subs x10, x10, x5
sbcs x11, x11, x4
sbcs x12, x12, x3
sbcs x13, x13, xzr
sbcs x2, x2, xzr
sbc x9, x9, xzr
lsl x5, x10, #32
add x10, x5, x10
mov x5, #-4294967295
umulh x5, x5, x10
mov x4, #4294967295
mul x3, x4, x10
umulh x4, x4, x10
adds x5, x5, x3
adcs x4, x4, x10
adc x3, xzr, xzr
subs x11, x11, x5
sbcs x12, x12, x4
sbcs x13, x13, x3
sbcs x2, x2, xzr
sbcs x9, x9, xzr
sbc x10, x10, xzr
lsl x5, x11, #32
add x11, x5, x11
mov x5, #-4294967295
umulh x5, x5, x11
mov x4, #4294967295
mul x3, x4, x11
umulh x4, x4, x11
adds x5, x5, x3
adcs x4, x4, x11
adc x3, xzr, xzr
subs x12, x12, x5
sbcs x13, x13, x4
sbcs x2, x2, x3
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbc x11, x11, xzr
lsl x5, x12, #32
add x12, x5, x12
mov x5, #-4294967295
umulh x5, x5, x12
mov x4, #4294967295
mul x3, x4, x12
umulh x4, x4, x12
adds x5, x5, x3
adcs x4, x4, x12
adc x3, xzr, xzr
subs x13, x13, x5
sbcs x2, x2, x4
sbcs x9, x9, x3
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbc x12, x12, xzr
lsl x5, x13, #32
add x13, x5, x13
mov x5, #-4294967295
umulh x5, x5, x13
mov x4, #4294967295
mul x3, x4, x13
umulh x4, x4, x13
adds x5, x5, x3
adcs x4, x4, x13
adc x3, xzr, xzr
subs x2, x2, x5
sbcs x9, x9, x4
sbcs x10, x10, x3
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
adds x2, x2, x14
adcs x9, x9, x15
adcs x10, x10, x16
adcs x11, x11, x17
adcs x12, x12, x19
adcs x13, x13, x20
adc x6, xzr, xzr
mov x8, #-4294967295
adds x14, x2, x8
mov x8, #4294967295
adcs x15, x9, x8
mov x8, #1
adcs x16, x10, x8
adcs x17, x11, xzr
adcs x19, x12, xzr
adcs x20, x13, xzr
adcs x6, x6, xzr
csel x2, x2, x14, eq
csel x9, x9, x15, eq
csel x10, x10, x16, eq
csel x11, x11, x17, eq
csel x12, x12, x19, eq
csel x13, x13, x20, eq
stp x2, x9, [sp, #288]
stp x10, x11, [sp, #304]
stp x12, x13, [sp, #320]
ldp x3, x4, [x24]
ldp x5, x6, [sp, #48]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [sp, #64]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [sp, #80]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [x24, #16]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [x24, #32]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #144]
stp x14, x15, [sp, #160]
stp x16, x17, [sp, #176]
ldp x2, x3, [sp, #240]
mul x9, x2, x3
umulh x10, x2, x3
ldp x4, x5, [sp, #256]
mul x8, x2, x4
adds x10, x10, x8
mul x11, x2, x5
mul x8, x3, x4
adcs x11, x11, x8
umulh x12, x2, x5
mul x8, x3, x5
adcs x12, x12, x8
ldp x6, x7, [sp, #272]
mul x13, x2, x7
mul x8, x3, x6
adcs x13, x13, x8
umulh x14, x2, x7
mul x8, x3, x7
adcs x14, x14, x8
mul x15, x5, x6
adcs x15, x15, xzr
umulh x16, x5, x6
adc x16, x16, xzr
umulh x8, x2, x4
adds x11, x11, x8
umulh x8, x3, x4
adcs x12, x12, x8
umulh x8, x3, x5
adcs x13, x13, x8
umulh x8, x3, x6
adcs x14, x14, x8
umulh x8, x3, x7
adcs x15, x15, x8
adc x16, x16, xzr
mul x8, x2, x6
adds x12, x12, x8
mul x8, x4, x5
adcs x13, x13, x8
mul x8, x4, x6
adcs x14, x14, x8
mul x8, x4, x7
adcs x15, x15, x8
mul x8, x5, x7
adcs x16, x16, x8
mul x17, x6, x7
adcs x17, x17, xzr
umulh x19, x6, x7
adc x19, x19, xzr
umulh x8, x2, x6
adds x13, x13, x8
umulh x8, x4, x5
adcs x14, x14, x8
umulh x8, x4, x6
adcs x15, x15, x8
umulh x8, x4, x7
adcs x16, x16, x8
umulh x8, x5, x7
adcs x17, x17, x8
adc x19, x19, xzr
adds x9, x9, x9
adcs x10, x10, x10
adcs x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adcs x17, x17, x17
adcs x19, x19, x19
cset x20, hs
umulh x8, x2, x2
mul x2, x2, x2
adds x9, x9, x8
mul x8, x3, x3
adcs x10, x10, x8
umulh x8, x3, x3
adcs x11, x11, x8
mul x8, x4, x4
adcs x12, x12, x8
umulh x8, x4, x4
adcs x13, x13, x8
mul x8, x5, x5
adcs x14, x14, x8
umulh x8, x5, x5
adcs x15, x15, x8
mul x8, x6, x6
adcs x16, x16, x8
umulh x8, x6, x6
adcs x17, x17, x8
mul x8, x7, x7
adcs x19, x19, x8
umulh x8, x7, x7
adc x20, x20, x8
lsl x5, x2, #32
add x2, x5, x2
mov x5, #-4294967295
umulh x5, x5, x2
mov x4, #4294967295
mul x3, x4, x2
umulh x4, x4, x2
adds x5, x5, x3
adcs x4, x4, x2
adc x3, xzr, xzr
subs x9, x9, x5
sbcs x10, x10, x4
sbcs x11, x11, x3
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x2, x2, xzr
lsl x5, x9, #32
add x9, x5, x9
mov x5, #-4294967295
umulh x5, x5, x9
mov x4, #4294967295
mul x3, x4, x9
umulh x4, x4, x9
adds x5, x5, x3
adcs x4, x4, x9
adc x3, xzr, xzr
subs x10, x10, x5
sbcs x11, x11, x4
sbcs x12, x12, x3
sbcs x13, x13, xzr
sbcs x2, x2, xzr
sbc x9, x9, xzr
lsl x5, x10, #32
add x10, x5, x10
mov x5, #-4294967295
umulh x5, x5, x10
mov x4, #4294967295
mul x3, x4, x10
umulh x4, x4, x10
adds x5, x5, x3
adcs x4, x4, x10
adc x3, xzr, xzr
subs x11, x11, x5
sbcs x12, x12, x4
sbcs x13, x13, x3
sbcs x2, x2, xzr
sbcs x9, x9, xzr
sbc x10, x10, xzr
lsl x5, x11, #32
add x11, x5, x11
mov x5, #-4294967295
umulh x5, x5, x11
mov x4, #4294967295
mul x3, x4, x11
umulh x4, x4, x11
adds x5, x5, x3
adcs x4, x4, x11
adc x3, xzr, xzr
subs x12, x12, x5
sbcs x13, x13, x4
sbcs x2, x2, x3
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbc x11, x11, xzr
lsl x5, x12, #32
add x12, x5, x12
mov x5, #-4294967295
umulh x5, x5, x12
mov x4, #4294967295
mul x3, x4, x12
umulh x4, x4, x12
adds x5, x5, x3
adcs x4, x4, x12
adc x3, xzr, xzr
subs x13, x13, x5
sbcs x2, x2, x4
sbcs x9, x9, x3
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbc x12, x12, xzr
lsl x5, x13, #32
add x13, x5, x13
mov x5, #-4294967295
umulh x5, x5, x13
mov x4, #4294967295
mul x3, x4, x13
umulh x4, x4, x13
adds x5, x5, x3
adcs x4, x4, x13
adc x3, xzr, xzr
subs x2, x2, x5
sbcs x9, x9, x4
sbcs x10, x10, x3
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
adds x2, x2, x14
adcs x9, x9, x15
adcs x10, x10, x16
adcs x11, x11, x17
adcs x12, x12, x19
adcs x13, x13, x20
adc x6, xzr, xzr
mov x8, #-4294967295
adds x14, x2, x8
mov x8, #4294967295
adcs x15, x9, x8
mov x8, #1
adcs x16, x10, x8
adcs x17, x11, xzr
adcs x19, x12, xzr
adcs x20, x13, xzr
adcs x6, x6, xzr
csel x2, x2, x14, eq
csel x9, x9, x15, eq
csel x10, x10, x16, eq
csel x11, x11, x17, eq
csel x12, x12, x19, eq
csel x13, x13, x20, eq
stp x2, x9, [sp, #192]
stp x10, x11, [sp, #208]
stp x12, x13, [sp, #224]
ldp x0, x1, [sp, #288]
mov x6, #4294967295
subs x6, x6, x0
mov x7, #-4294967296
sbcs x7, x7, x1
ldp x0, x1, [sp, #304]
mov x8, #-2
sbcs x8, x8, x0
mov x13, #-1
sbcs x9, x13, x1
ldp x0, x1, [sp, #320]
sbcs x10, x13, x0
sbc x11, x13, x1
mov x12, #9
mul x0, x12, x6
mul x1, x12, x7
mul x2, x12, x8
mul x3, x12, x9
mul x4, x12, x10
mul x5, x12, x11
umulh x6, x12, x6
umulh x7, x12, x7
umulh x8, x12, x8
umulh x9, x12, x9
umulh x10, x12, x10
umulh x12, x12, x11
adds x1, x1, x6
adcs x2, x2, x7
adcs x3, x3, x8
adcs x4, x4, x9
adcs x5, x5, x10
mov x6, #1
adc x6, x12, x6
ldp x8, x9, [sp, #144]
ldp x10, x11, [sp, #160]
ldp x12, x13, [sp, #176]
mov x14, #12
mul x15, x14, x8
umulh x8, x14, x8
adds x0, x0, x15
mul x15, x14, x9
umulh x9, x14, x9
adcs x1, x1, x15
mul x15, x14, x10
umulh x10, x14, x10
adcs x2, x2, x15
mul x15, x14, x11
umulh x11, x14, x11
adcs x3, x3, x15
mul x15, x14, x12
umulh x12, x14, x12
adcs x4, x4, x15
mul x15, x14, x13
umulh x13, x14, x13
adcs x5, x5, x15
adc x6, x6, xzr
adds x1, x1, x8
adcs x2, x2, x9
adcs x3, x3, x10
adcs x4, x4, x11
adcs x5, x5, x12
adcs x6, x6, x13
lsl x7, x6, #32
subs x8, x6, x7
sbc x7, x7, xzr
adds x0, x0, x8
adcs x1, x1, x7
adcs x2, x2, x6
adcs x3, x3, xzr
adcs x4, x4, xzr
adcs x5, x5, xzr
csetm x6, lo
mov x7, #4294967295
and x7, x7, x6
adds x0, x0, x7
eor x7, x7, x6
adcs x1, x1, x7
mov x7, #-2
and x7, x7, x6
adcs x2, x2, x7
adcs x3, x3, x6
adcs x4, x4, x6
adc x5, x5, x6
stp x0, x1, [sp, #288]
stp x2, x3, [sp, #304]
stp x4, x5, [sp, #320]
ldp x5, x6, [sp, #192]
ldp x4, x3, [sp]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #208]
ldp x4, x3, [sp, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #224]
ldp x4, x3, [sp, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, lo
mov x4, #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #240]
stp x7, x8, [sp, #256]
stp x9, x10, [sp, #272]
ldp x2, x3, [sp, #48]
mul x9, x2, x3
umulh x10, x2, x3
ldp x4, x5, [sp, #64]
mul x8, x2, x4
adds x10, x10, x8
mul x11, x2, x5
mul x8, x3, x4
adcs x11, x11, x8
umulh x12, x2, x5
mul x8, x3, x5
adcs x12, x12, x8
ldp x6, x7, [sp, #80]
mul x13, x2, x7
mul x8, x3, x6
adcs x13, x13, x8
umulh x14, x2, x7
mul x8, x3, x7
adcs x14, x14, x8
mul x15, x5, x6
adcs x15, x15, xzr
umulh x16, x5, x6
adc x16, x16, xzr
umulh x8, x2, x4
adds x11, x11, x8
umulh x8, x3, x4
adcs x12, x12, x8
umulh x8, x3, x5
adcs x13, x13, x8
umulh x8, x3, x6
adcs x14, x14, x8
umulh x8, x3, x7
adcs x15, x15, x8
adc x16, x16, xzr
mul x8, x2, x6
adds x12, x12, x8
mul x8, x4, x5
adcs x13, x13, x8
mul x8, x4, x6
adcs x14, x14, x8
mul x8, x4, x7
adcs x15, x15, x8
mul x8, x5, x7
adcs x16, x16, x8
mul x17, x6, x7
adcs x17, x17, xzr
umulh x19, x6, x7
adc x19, x19, xzr
umulh x8, x2, x6
adds x13, x13, x8
umulh x8, x4, x5
adcs x14, x14, x8
umulh x8, x4, x6
adcs x15, x15, x8
umulh x8, x4, x7
adcs x16, x16, x8
umulh x8, x5, x7
adcs x17, x17, x8
adc x19, x19, xzr
adds x9, x9, x9
adcs x10, x10, x10
adcs x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adcs x17, x17, x17
adcs x19, x19, x19
cset x20, hs
umulh x8, x2, x2
mul x2, x2, x2
adds x9, x9, x8
mul x8, x3, x3
adcs x10, x10, x8
umulh x8, x3, x3
adcs x11, x11, x8
mul x8, x4, x4
adcs x12, x12, x8
umulh x8, x4, x4
adcs x13, x13, x8
mul x8, x5, x5
adcs x14, x14, x8
umulh x8, x5, x5
adcs x15, x15, x8
mul x8, x6, x6
adcs x16, x16, x8
umulh x8, x6, x6
adcs x17, x17, x8
mul x8, x7, x7
adcs x19, x19, x8
umulh x8, x7, x7
adc x20, x20, x8
lsl x5, x2, #32
add x2, x5, x2
mov x5, #-4294967295
umulh x5, x5, x2
mov x4, #4294967295
mul x3, x4, x2
umulh x4, x4, x2
adds x5, x5, x3
adcs x4, x4, x2
adc x3, xzr, xzr
subs x9, x9, x5
sbcs x10, x10, x4
sbcs x11, x11, x3
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x2, x2, xzr
lsl x5, x9, #32
add x9, x5, x9
mov x5, #-4294967295
umulh x5, x5, x9
mov x4, #4294967295
mul x3, x4, x9
umulh x4, x4, x9
adds x5, x5, x3
adcs x4, x4, x9
adc x3, xzr, xzr
subs x10, x10, x5
sbcs x11, x11, x4
sbcs x12, x12, x3
sbcs x13, x13, xzr
sbcs x2, x2, xzr
sbc x9, x9, xzr
lsl x5, x10, #32
add x10, x5, x10
mov x5, #-4294967295
umulh x5, x5, x10
mov x4, #4294967295
mul x3, x4, x10
umulh x4, x4, x10
adds x5, x5, x3
adcs x4, x4, x10
adc x3, xzr, xzr
subs x11, x11, x5
sbcs x12, x12, x4
sbcs x13, x13, x3
sbcs x2, x2, xzr
sbcs x9, x9, xzr
sbc x10, x10, xzr
lsl x5, x11, #32
add x11, x5, x11
mov x5, #-4294967295
umulh x5, x5, x11
mov x4, #4294967295
mul x3, x4, x11
umulh x4, x4, x11
adds x5, x5, x3
adcs x4, x4, x11
adc x3, xzr, xzr
subs x12, x12, x5
sbcs x13, x13, x4
sbcs x2, x2, x3
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbc x11, x11, xzr
lsl x5, x12, #32
add x12, x5, x12
mov x5, #-4294967295
umulh x5, x5, x12
mov x4, #4294967295
mul x3, x4, x12
umulh x4, x4, x12
adds x5, x5, x3
adcs x4, x4, x12
adc x3, xzr, xzr
subs x13, x13, x5
sbcs x2, x2, x4
sbcs x9, x9, x3
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbc x12, x12, xzr
lsl x5, x13, #32
add x13, x5, x13
mov x5, #-4294967295
umulh x5, x5, x13
mov x4, #4294967295
mul x3, x4, x13
umulh x4, x4, x13
adds x5, x5, x3
adcs x4, x4, x13
adc x3, xzr, xzr
subs x2, x2, x5
sbcs x9, x9, x4
sbcs x10, x10, x3
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
adds x2, x2, x14
adcs x9, x9, x15
adcs x10, x10, x16
adcs x11, x11, x17
adcs x12, x12, x19
adcs x13, x13, x20
adc x6, xzr, xzr
mov x8, #-4294967295
adds x14, x2, x8
mov x8, #4294967295
adcs x15, x9, x8
mov x8, #1
adcs x16, x10, x8
adcs x17, x11, xzr
adcs x19, x12, xzr
adcs x20, x13, xzr
adcs x6, x6, xzr
csel x2, x2, x14, eq
csel x9, x9, x15, eq
csel x10, x10, x16, eq
csel x11, x11, x17, eq
csel x12, x12, x19, eq
csel x13, x13, x20, eq
stp x2, x9, [sp, #192]
stp x10, x11, [sp, #208]
stp x12, x13, [sp, #224]
ldp x5, x6, [sp, #240]
ldp x4, x3, [sp, #48]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #256]
ldp x4, x3, [sp, #64]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #272]
ldp x4, x3, [sp, #80]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, lo
mov x4, #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [x23, #96]
stp x7, x8, [x23, #112]
stp x9, x10, [x23, #128]
ldp x3, x4, [sp, #288]
ldp x5, x6, [sp, #96]
mul x12, x3, x5
umulh x13, x3, x5
mul x11, x3, x6
umulh x14, x3, x6
adds x13, x13, x11
ldp x7, x8, [sp, #112]
mul x11, x3, x7
umulh x15, x3, x7
adcs x14, x14, x11
mul x11, x3, x8
umulh x16, x3, x8
adcs x15, x15, x11
ldp x9, x10, [sp, #128]
mul x11, x3, x9
umulh x17, x3, x9
adcs x16, x16, x11
mul x11, x3, x10
umulh x19, x3, x10
adcs x17, x17, x11
adc x19, x19, xzr
mul x11, x4, x5
adds x13, x13, x11
mul x11, x4, x6
adcs x14, x14, x11
mul x11, x4, x7
adcs x15, x15, x11
mul x11, x4, x8
adcs x16, x16, x11
mul x11, x4, x9
adcs x17, x17, x11
mul x11, x4, x10
adcs x19, x19, x11
cset x20, hs
umulh x11, x4, x5
adds x14, x14, x11
umulh x11, x4, x6
adcs x15, x15, x11
umulh x11, x4, x7
adcs x16, x16, x11
umulh x11, x4, x8
adcs x17, x17, x11
umulh x11, x4, x9
adcs x19, x19, x11
umulh x11, x4, x10
adc x20, x20, x11
ldp x3, x4, [sp, #304]
mul x11, x3, x5
adds x14, x14, x11
mul x11, x3, x6
adcs x15, x15, x11
mul x11, x3, x7
adcs x16, x16, x11
mul x11, x3, x8
adcs x17, x17, x11
mul x11, x3, x9
adcs x19, x19, x11
mul x11, x3, x10
adcs x20, x20, x11
cset x21, hs
umulh x11, x3, x5
adds x15, x15, x11
umulh x11, x3, x6
adcs x16, x16, x11
umulh x11, x3, x7
adcs x17, x17, x11
umulh x11, x3, x8
adcs x19, x19, x11
umulh x11, x3, x9
adcs x20, x20, x11
umulh x11, x3, x10
adc x21, x21, x11
mul x11, x4, x5
adds x15, x15, x11
mul x11, x4, x6
adcs x16, x16, x11
mul x11, x4, x7
adcs x17, x17, x11
mul x11, x4, x8
adcs x19, x19, x11
mul x11, x4, x9
adcs x20, x20, x11
mul x11, x4, x10
adcs x21, x21, x11
cset x22, hs
umulh x11, x4, x5
adds x16, x16, x11
umulh x11, x4, x6
adcs x17, x17, x11
umulh x11, x4, x7
adcs x19, x19, x11
umulh x11, x4, x8
adcs x20, x20, x11
umulh x11, x4, x9
adcs x21, x21, x11
umulh x11, x4, x10
adc x22, x22, x11
ldp x3, x4, [sp, #320]
mul x11, x3, x5
adds x16, x16, x11
mul x11, x3, x6
adcs x17, x17, x11
mul x11, x3, x7
adcs x19, x19, x11
mul x11, x3, x8
adcs x20, x20, x11
mul x11, x3, x9
adcs x21, x21, x11
mul x11, x3, x10
adcs x22, x22, x11
cset x2, hs
umulh x11, x3, x5
adds x17, x17, x11
umulh x11, x3, x6
adcs x19, x19, x11
umulh x11, x3, x7
adcs x20, x20, x11
umulh x11, x3, x8
adcs x21, x21, x11
umulh x11, x3, x9
adcs x22, x22, x11
umulh x11, x3, x10
adc x2, x2, x11
mul x11, x4, x5
adds x17, x17, x11
mul x11, x4, x6
adcs x19, x19, x11
mul x11, x4, x7
adcs x20, x20, x11
mul x11, x4, x8
adcs x21, x21, x11
mul x11, x4, x9
adcs x22, x22, x11
mul x11, x4, x10
adcs x2, x2, x11
cset x1, hs
umulh x11, x4, x5
adds x19, x19, x11
umulh x11, x4, x6
adcs x20, x20, x11
umulh x11, x4, x7
adcs x21, x21, x11
umulh x11, x4, x8
adcs x22, x22, x11
umulh x11, x4, x9
adcs x2, x2, x11
umulh x11, x4, x10
adc x1, x1, x11
lsl x7, x12, #32
add x12, x7, x12
mov x7, #-4294967295
umulh x7, x7, x12
mov x6, #4294967295
mul x5, x6, x12
umulh x6, x6, x12
adds x7, x7, x5
adcs x6, x6, x12
adc x5, xzr, xzr
subs x13, x13, x7
sbcs x14, x14, x6
sbcs x15, x15, x5
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x12, x12, xzr
lsl x7, x13, #32
add x13, x7, x13
mov x7, #-4294967295
umulh x7, x7, x13
mov x6, #4294967295
mul x5, x6, x13
umulh x6, x6, x13
adds x7, x7, x5
adcs x6, x6, x13
adc x5, xzr, xzr
subs x14, x14, x7
sbcs x15, x15, x6
sbcs x16, x16, x5
sbcs x17, x17, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
lsl x7, x14, #32
add x14, x7, x14
mov x7, #-4294967295
umulh x7, x7, x14
mov x6, #4294967295
mul x5, x6, x14
umulh x6, x6, x14
adds x7, x7, x5
adcs x6, x6, x14
adc x5, xzr, xzr
subs x15, x15, x7
sbcs x16, x16, x6
sbcs x17, x17, x5
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
lsl x7, x15, #32
add x15, x7, x15
mov x7, #-4294967295
umulh x7, x7, x15
mov x6, #4294967295
mul x5, x6, x15
umulh x6, x6, x15
adds x7, x7, x5
adcs x6, x6, x15
adc x5, xzr, xzr
subs x16, x16, x7
sbcs x17, x17, x6
sbcs x12, x12, x5
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbc x15, x15, xzr
lsl x7, x16, #32
add x16, x7, x16
mov x7, #-4294967295
umulh x7, x7, x16
mov x6, #4294967295
mul x5, x6, x16
umulh x6, x6, x16
adds x7, x7, x5
adcs x6, x6, x16
adc x5, xzr, xzr
subs x17, x17, x7
sbcs x12, x12, x6
sbcs x13, x13, x5
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbc x16, x16, xzr
lsl x7, x17, #32
add x17, x7, x17
mov x7, #-4294967295
umulh x7, x7, x17
mov x6, #4294967295
mul x5, x6, x17
umulh x6, x6, x17
adds x7, x7, x5
adcs x6, x6, x17
adc x5, xzr, xzr
subs x12, x12, x7
sbcs x13, x13, x6
sbcs x14, x14, x5
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbc x17, x17, xzr
adds x12, x12, x19
adcs x13, x13, x20
adcs x14, x14, x21
adcs x15, x15, x22
adcs x16, x16, x2
adcs x17, x17, x1
adc x10, xzr, xzr
mov x11, #-4294967295
adds x19, x12, x11
mov x11, #4294967295
adcs x20, x13, x11
mov x11, #1
adcs x21, x14, x11
adcs x22, x15, xzr
adcs x2, x16, xzr
adcs x1, x17, xzr
adcs x10, x10, xzr
csel x12, x12, x19, eq
csel x13, x13, x20, eq
csel x14, x14, x21, eq
csel x15, x15, x22, eq
csel x16, x16, x2, eq
csel x17, x17, x1, eq
stp x12, x13, [sp, #240]
stp x14, x15, [sp, #256]
stp x16, x17, [sp, #272]
ldp x1, x2, [sp, #144]
ldp x3, x4, [sp, #160]
ldp x5, x6, [sp, #176]
lsl x0, x1, #2
ldp x7, x8, [sp, #288]
subs x0, x0, x7
extr x1, x2, x1, #62
sbcs x1, x1, x8
ldp x7, x8, [sp, #304]
extr x2, x3, x2, #62
sbcs x2, x2, x7
extr x3, x4, x3, #62
sbcs x3, x3, x8
extr x4, x5, x4, #62
ldp x7, x8, [sp, #320]
sbcs x4, x4, x7
extr x5, x6, x5, #62
sbcs x5, x5, x8
lsr x6, x6, #62
adc x6, x6, xzr
lsl x7, x6, #32
subs x8, x6, x7
sbc x7, x7, xzr
adds x0, x0, x8
adcs x1, x1, x7
adcs x2, x2, x6
adcs x3, x3, xzr
adcs x4, x4, xzr
adcs x5, x5, xzr
csetm x8, lo
mov x9, #4294967295
and x9, x9, x8
adds x0, x0, x9
eor x9, x9, x8
adcs x1, x1, x9
mov x9, #-2
and x9, x9, x8
adcs x2, x2, x9
adcs x3, x3, x8
adcs x4, x4, x8
adc x5, x5, x8
stp x0, x1, [x23]
stp x2, x3, [x23, #16]
stp x4, x5, [x23, #32]
ldp x0, x1, [sp, #192]
mov x6, #4294967295
subs x6, x6, x0
mov x7, #-4294967296
sbcs x7, x7, x1
ldp x0, x1, [sp, #208]
mov x8, #-2
sbcs x8, x8, x0
mov x13, #-1
sbcs x9, x13, x1
ldp x0, x1, [sp, #224]
sbcs x10, x13, x0
sbc x11, x13, x1
lsl x0, x6, #3
extr x1, x7, x6, #61
extr x2, x8, x7, #61
extr x3, x9, x8, #61
extr x4, x10, x9, #61
extr x5, x11, x10, #61
lsr x6, x11, #61
add x6, x6, #1
ldp x8, x9, [sp, #240]
ldp x10, x11, [sp, #256]
ldp x12, x13, [sp, #272]
mov x14, #3
mul x15, x14, x8
umulh x8, x14, x8
adds x0, x0, x15
mul x15, x14, x9
umulh x9, x14, x9
adcs x1, x1, x15
mul x15, x14, x10
umulh x10, x14, x10
adcs x2, x2, x15
mul x15, x14, x11
umulh x11, x14, x11
adcs x3, x3, x15
mul x15, x14, x12
umulh x12, x14, x12
adcs x4, x4, x15
mul x15, x14, x13
umulh x13, x14, x13
adcs x5, x5, x15
adc x6, x6, xzr
adds x1, x1, x8
adcs x2, x2, x9
adcs x3, x3, x10
adcs x4, x4, x11
adcs x5, x5, x12
adcs x6, x6, x13
lsl x7, x6, #32
subs x8, x6, x7
sbc x7, x7, xzr
adds x0, x0, x8
adcs x1, x1, x7
adcs x2, x2, x6
adcs x3, x3, xzr
adcs x4, x4, xzr
adcs x5, x5, xzr
csetm x6, lo
mov x7, #4294967295
and x7, x7, x6
adds x0, x0, x7
eor x7, x7, x6
adcs x1, x1, x7
mov x7, #-2
and x7, x7, x6
adcs x2, x2, x7
adcs x3, x3, x6
adcs x4, x4, x6
adc x5, x5, x6
stp x0, x1, [x23, #48]
stp x2, x3, [x23, #64]
stp x4, x5, [x23, #80]
add sp, sp, #336
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 169,561
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/p384_montjadd.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point addition on NIST curve P-384 in Montgomery-Jacobian coordinates
//
// extern void p384_montjadd
// (uint64_t p3[static 18],uint64_t p1[static 18],uint64_t p2[static 18]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
//
// Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
// This is functionally equivalent to p384_montjadd in unopt/p384_montjadd.S.
// This is the result of doing the following sequence of optimizations:
// 1. Function inlining
// 2. Eliminating redundant load/store instructions
// 3. Folding (add addr, const) + load/store
// Function inlining is done manually. The second and third optimizations are
// done by a script.
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjadd)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjadd)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
// 7 NUMSIZEs for the point operation, one extra NUMSIZE for field operations
#define NSPACE (NUMSIZE*8)
S2N_BN_SYMBOL(p384_montjadd):
// Save regs and make room on stack for temporary variables
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, xzr, [sp, #-16]!
sub sp, sp, NSPACE
mov x24, x0
mov x25, x1
mov x26, x2
mov x0, sp
ldr q1, [x25, #96]
ldp x9, x2, [x25, #96]
ldr q0, [x25, #96]
ldp x4, x6, [x25, #112]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [x25, #128]
xtn v30.2s, v0.2d
ldr q1, [x25, #128]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [x25, #128]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [x0]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [x0, #16]
csetm x15, cc
cneg x1, x1, cc
stp x11, x14, [x0, #32]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc
cinv x16, x15, cc
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc
cneg x9, x9, cc
subs x4, x2, x4
cneg x4, x4, cc
csetm x7, cc
subs x2, x10, x6
cinv x8, x8, cc
cneg x2, x2, cc
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc
cneg x1, x1, cc
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [x0, #16]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [x0]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [x0, #32]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001
adcs x14, x14, x2
mov x2, #0x1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [x0]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [x0, #16]
adc x17, x14, xzr
stp x2, x17, [x0, #32]
ldr q1, [x26, #96]
ldp x9, x2, [x26, #96]
ldr q0, [x26, #96]
ldp x4, x6, [x26, #112]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [x26, #128]
xtn v30.2s, v0.2d
ldr q1, [x26, #128]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [x26, #128]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [sp, #240]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [sp, #256]
csetm x15, cc
cneg x1, x1, cc
stp x11, x14, [sp, #272]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc
cinv x16, x15, cc
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc
cneg x9, x9, cc
subs x4, x2, x4
cneg x4, x4, cc
csetm x7, cc
subs x2, x10, x6
cinv x8, x8, cc
cneg x2, x2, cc
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc
cneg x1, x1, cc
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [sp, #256]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [sp, #240]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [sp, #272]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001
adcs x14, x14, x2
mov x2, #0x1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [sp, #240]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [sp, #256]
adc x17, x14, xzr
stp x2, x17, [sp, #272]
stp x23, x24, [sp, #0x150] // It is #-48 after inlining, but access to sp+negative in the middle of fn is bad
ldr q3, [x26, #96]
ldr q25, [x25, #48]
ldp x13, x23, [x25, #48]
ldp x3, x21, [x26, #96]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [x25, #80]
ldp x8, x24, [x26, #112]
subs x6, x3, x21
ldr q0, [x26, #128]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [x25, #64]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [x25, #80]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [x26, #128]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #288]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #304]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #320]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #288]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #304]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #320]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #288]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #304]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #320]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #288]
ldp x21, x12, [sp, #304]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #320]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #288]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #304]
adc x12, x15, x23
stp x21, x12, [sp, #320]
ldr q3, [x25, #96]
ldr q25, [x26, #48]
ldp x13, x23, [x26, #48]
ldp x3, x21, [x25, #96]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [x26, #80]
ldp x8, x24, [x25, #112]
subs x6, x3, x21
ldr q0, [x25, #128]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [x26, #64]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [x26, #80]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [x25, #128]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #48]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #64]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #80]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #48]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #64]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #80]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #48]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #64]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #80]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #48]
ldp x21, x12, [sp, #64]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #80]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #48]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #64]
adc x12, x15, x23
stp x21, x12, [sp, #80]
mov x1, sp
ldr q3, [x1]
ldr q25, [x26, #0]
ldp x13, x23, [x26, #0]
ldp x3, x21, [x1]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [x26, #32]
ldp x8, x24, [x1, #16]
subs x6, x3, x21
ldr q0, [x1, #32]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [x26, #16]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [x26, #32]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [x1, #32]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #96]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #112]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #128]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #96]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #112]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #128]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #96]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #112]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #128]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #96]
ldp x21, x12, [sp, #112]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #128]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #96]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #112]
adc x12, x15, x23
stp x21, x12, [sp, #128]
ldr q3, [sp, #240]
ldr q25, [x25, #0]
ldp x13, x23, [x25, #0]
ldp x3, x21, [sp, #240]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [x25, #32]
ldp x8, x24, [sp, #256]
subs x6, x3, x21
ldr q0, [sp, #272]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [x25, #16]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [x25, #32]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #272]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #192]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #208]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #224]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #192]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #208]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #224]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #192]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #208]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #224]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #192]
ldp x21, x12, [sp, #208]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #224]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #192]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #208]
adc x12, x15, x23
stp x21, x12, [sp, #224]
mov x1, sp
ldr q3, [x1]
ldr q25, [sp, #48]
ldp x13, x23, [sp, #48]
ldp x3, x21, [x1]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #80]
ldp x8, x24, [x1, #16]
subs x6, x3, x21
ldr q0, [x1, #32]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [sp, #64]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #80]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [x1, #32]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #48]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #64]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #80]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #48]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #64]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #80]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #48]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #64]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #80]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #48]
ldp x21, x12, [sp, #64]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #80]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #48]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #64]
adc x12, x15, x23
stp x21, x12, [sp, #80]
ldr q3, [sp, #240]
ldr q25, [sp, #288]
ldp x13, x23, [sp, #288]
ldp x3, x21, [sp, #240]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #320]
ldp x8, x24, [sp, #256]
subs x6, x3, x21
ldr q0, [sp, #272]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [sp, #304]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #320]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #272]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #288]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #304]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #320]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #288]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #304]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #320]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #288]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #304]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #320]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #288]
ldp x21, x12, [sp, #304]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #320]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x2, x24, x11
stp x22, x5, [sp, #288]
adcs x11, x13, x23
adcs x12, x8, x23
stp x2, x11, [sp, #304]
adc x13, x15, x23
stp x12, x13, [sp, #320]
ldp x5, x6, [sp, #96]
ldp x4, x3, [sp, #192]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #112]
ldp x4, x3, [sp, #208]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #128]
ldp x4, x3, [sp, #224]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, cc
mov x4, #0xffffffff
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #240]
stp x7, x8, [sp, #256]
stp x9, x10, [sp, #272]
ldp x5, x6, [sp, #48]
ldp x4, x3, [sp, #288]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #64]
sbcs x7, x7, x2
sbcs x8, x8, x11
ldp x9, x10, [sp, #80]
sbcs x9, x9, x12
sbcs x10, x10, x13
csetm x3, cc
mov x4, #0xffffffff
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #48]
stp x7, x8, [sp, #64]
stp x9, x10, [sp, #80]
ldr q1, [sp, #240]
ldp x9, x2, [sp, #240]
ldr q0, [sp, #240]
ldp x4, x6, [sp, #256]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [sp, #272]
xtn v30.2s, v0.2d
ldr q1, [sp, #272]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [sp, #272]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [sp, #144]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [sp, #160]
csetm x15, cc
cneg x1, x1, cc
stp x11, x14, [sp, #176]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc
cinv x16, x15, cc
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc
cneg x9, x9, cc
subs x4, x2, x4
cneg x4, x4, cc
csetm x7, cc
subs x2, x10, x6
cinv x8, x8, cc
cneg x2, x2, cc
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc
cneg x1, x1, cc
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [sp, #160]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [sp, #144]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [sp, #176]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001
adcs x14, x14, x2
mov x2, #0x1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [sp, #144]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [sp, #160]
adc x17, x14, xzr
stp x2, x17, [sp, #176]
mov x0, sp
ldr q1, [sp, #48]
ldp x9, x2, [sp, #48]
ldr q0, [sp, #48]
ldp x4, x6, [sp, #64]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [sp, #80]
xtn v30.2s, v0.2d
ldr q1, [sp, #80]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [sp, #80]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [x0]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [x0, #16]
csetm x15, cc
cneg x1, x1, cc
stp x11, x14, [x0, #32]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc
cinv x16, x15, cc
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc
cneg x9, x9, cc
subs x4, x2, x4
cneg x4, x4, cc
csetm x7, cc
subs x2, x10, x6
cinv x8, x8, cc
cneg x2, x2, cc
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc
cneg x1, x1, cc
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [x0, #16]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [x0]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [x0, #32]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001
adcs x14, x14, x2
mov x2, #0x1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [x0]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [x0, #16]
adc x17, x14, xzr
stp x2, x17, [x0, #32]
ldr q3, [sp, #144]
ldr q25, [sp, #192]
ldp x13, x23, [sp, #192]
ldp x3, x21, [sp, #144]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #224]
ldp x8, x24, [sp, #160]
subs x6, x3, x21
ldr q0, [sp, #176]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [sp, #208]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #224]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #176]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #192]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #208]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #224]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #192]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #208]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #224]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #192]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #208]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #224]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #192]
ldp x21, x12, [sp, #208]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #224]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #192]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #208]
adc x12, x15, x23
stp x21, x12, [sp, #224]
ldr q3, [sp, #144]
ldr q25, [sp, #96]
ldp x13, x23, [sp, #96]
ldp x3, x21, [sp, #144]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #128]
ldp x8, x24, [sp, #160]
subs x6, x3, x21
ldr q0, [sp, #176]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [sp, #112]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #128]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #176]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #96]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #112]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #128]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #96]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #112]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #128]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #96]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #112]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #128]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #96]
ldp x21, x12, [sp, #112]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #128]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x2, x24, x11
stp x22, x5, [sp, #96]
adcs x11, x13, x23
adcs x12, x8, x23
stp x2, x11, [sp, #112]
adc x13, x15, x23
stp x12, x13, [sp, #128]
mov x0, sp
mov x1, sp
ldp x5, x6, [x1]
ldp x4, x3, [sp, #192]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x1, #16]
ldp x4, x3, [sp, #208]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x1, #32]
ldp x4, x3, [sp, #224]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, cc
mov x4, #0xffffffff
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [x0]
stp x7, x8, [x0, #16]
stp x9, x10, [x0, #32]
ldp x5, x6, [sp, #96]
ldp x4, x3, [sp, #192]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x4, x3, [sp, #208]
sbcs x7, x2, x4
sbcs x8, x11, x3
ldp x4, x3, [sp, #224]
sbcs x9, x12, x4
sbcs x10, x13, x3
csetm x3, cc
mov x4, #0xffffffff
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #144]
stp x7, x8, [sp, #160]
stp x9, x10, [sp, #176]
ldr q3, [sp, #240]
ldr q25, [x25, #96]
ldp x13, x23, [x25, #96]
ldp x3, x21, [sp, #240]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [x25, #128]
ldp x8, x24, [sp, #256]
subs x6, x3, x21
ldr q0, [sp, #272]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [x25, #112]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [x25, #128]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #272]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #240]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #256]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #272]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #240]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #256]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #272]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #240]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #256]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #272]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #240]
ldp x21, x12, [sp, #256]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #272]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #240]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #256]
adc x12, x15, x23
stp x21, x12, [sp, #272]
mov x0, sp
mov x1, sp
ldp x5, x6, [x1]
ldp x4, x3, [sp, #96]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x1, #16]
ldp x4, x3, [sp, #112]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x1, #32]
ldp x4, x3, [sp, #128]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, cc
mov x4, #0xffffffff
and x4, x4, x3
adds x2, x5, x4
eor x4, x4, x3
adcs x11, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
adcs x4, x7, x4
adcs x12, x8, x3
adcs x13, x9, x3
adc x3, x10, x3
stp x2, x11, [x0]
stp x4, x12, [x0, #16]
stp x13, x3, [x0, #32]
ldp x5, x6, [sp, #192]
subs x5, x5, x2
sbcs x6, x6, x11
ldp x7, x8, [sp, #208]
sbcs x7, x7, x4
sbcs x8, x8, x12
ldp x9, x10, [sp, #224]
sbcs x9, x9, x13
sbcs x10, x10, x3
csetm x3, cc
mov x4, #0xffffffff
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #192]
stp x7, x8, [sp, #208]
stp x9, x10, [sp, #224]
ldr q3, [sp, #144]
ldr q25, [sp, #288]
ldp x13, x23, [sp, #288]
ldp x3, x21, [sp, #144]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #320]
ldp x8, x24, [sp, #160]
subs x6, x3, x21
ldr q0, [sp, #176]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [sp, #304]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #320]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #176]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #144]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #160]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #176]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #144]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #160]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #176]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #144]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #160]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #176]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #144]
ldp x21, x12, [sp, #160]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #176]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #144]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #160]
adc x12, x15, x23
stp x21, x12, [sp, #176]
ldr q3, [sp, #240]
ldr q25, [x26, #96]
ldp x13, x23, [x26, #96]
ldp x3, x21, [sp, #240]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [x26, #128]
ldp x8, x24, [sp, #256]
subs x6, x3, x21
ldr q0, [sp, #272]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [x26, #112]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [x26, #128]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #272]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #240]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #256]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #272]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #240]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #256]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #272]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #240]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #256]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #272]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #240]
ldp x21, x12, [sp, #256]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #272]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #240]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #256]
adc x12, x15, x23
stp x21, x12, [sp, #272]
ldp x2, x27, [sp, #0x150] // It is #-48 after inlining, but access to sp+negative in the middle of fn is bad
ldr q3, [sp, #48]
ldr q25, [sp, #192]
ldp x13, x23, [sp, #192]
ldp x3, x21, [sp, #48]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #224]
ldp x8, x24, [sp, #64]
subs x6, x3, x21
ldr q0, [sp, #80]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [sp, #208]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #224]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #80]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #192]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #208]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #224]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #192]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #208]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #224]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #192]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #208]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #224]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #192]
ldp x21, x12, [sp, #208]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #224]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x2, x6, x20
eor x3, x20, x23
adcs x6, x7, x3
adcs x7, x24, x11
adcs x9, x13, x23
adcs x10, x8, x23
adc x11, x15, x23
ldp x4, x3, [sp, #144]
subs x5, x2, x4
sbcs x6, x6, x3
ldp x4, x3, [sp, #160]
sbcs x7, x7, x4
sbcs x8, x9, x3
ldp x4, x3, [sp, #176]
sbcs x9, x10, x4
sbcs x10, x11, x3
csetm x3, cc
mov x4, #0xffffffff
and x4, x4, x3
adds x19, x5, x4
eor x4, x4, x3
adcs x24, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x7, x8, [sp, #208]
stp x9, x10, [sp, #224]
ldp x0, x1, [x25, #96]
ldp x2, x3, [x25, #112]
ldp x4, x5, [x25, #128]
orr x20, x0, x1
orr x21, x2, x3
orr x22, x4, x5
orr x20, x20, x21
orr x20, x20, x22
cmp x20, xzr
cset x20, ne
ldp x6, x7, [x26, #96]
ldp x8, x9, [x26, #112]
ldp x10, x11, [x26, #128]
orr x21, x6, x7
orr x22, x8, x9
orr x23, x10, x11
orr x21, x21, x22
orr x21, x21, x23
cmp x21, xzr
cset x21, ne
cmp x21, x20
ldp x12, x13, [sp, #240]
csel x12, x0, x12, cc
csel x13, x1, x13, cc
csel x12, x6, x12, hi
csel x13, x7, x13, hi
ldp x14, x15, [sp, #256]
csel x14, x2, x14, cc
csel x15, x3, x15, cc
csel x14, x8, x14, hi
csel x15, x9, x15, hi
ldp x16, x17, [sp, #272]
csel x16, x4, x16, cc
csel x17, x5, x17, cc
csel x16, x10, x16, hi
csel x17, x11, x17, hi
ldp x20, x21, [x25]
ldp x0, x1, [sp, #0]
csel x0, x20, x0, cc
csel x1, x21, x1, cc
ldp x20, x21, [x26]
csel x0, x20, x0, hi
csel x1, x21, x1, hi
ldp x20, x21, [x25, #16]
ldp x2, x3, [sp, #16]
csel x2, x20, x2, cc
csel x3, x21, x3, cc
ldp x20, x21, [x26, #16]
csel x2, x20, x2, hi
csel x3, x21, x3, hi
ldp x20, x21, [x25, #32]
ldp x4, x5, [sp, #32]
csel x4, x20, x4, cc
csel x5, x21, x5, cc
ldp x20, x21, [x26, #32]
csel x4, x20, x4, hi
csel x5, x21, x5, hi
ldp x20, x21, [x25, #48]
csel x6, x20, x19, cc
csel x7, x21, x24, cc
ldp x20, x21, [x26, #48]
csel x6, x20, x6, hi
csel x7, x21, x7, hi
ldp x20, x21, [x25, #64]
ldp x8, x9, [sp, #208]
csel x8, x20, x8, cc
csel x9, x21, x9, cc
ldp x20, x21, [x26, #64]
csel x8, x20, x8, hi
csel x9, x21, x9, hi
ldp x20, x21, [x25, #80]
ldp x10, x11, [sp, #224]
csel x10, x20, x10, cc
csel x11, x21, x11, cc
ldp x20, x21, [x26, #80]
csel x10, x20, x10, hi
csel x11, x21, x11, hi
stp x0, x1, [x27]
stp x2, x3, [x27, #16]
stp x4, x5, [x27, #32]
stp x6, x7, [x27, #48]
stp x8, x9, [x27, #64]
stp x10, x11, [x27, #80]
stp x12, x13, [x27, #96]
stp x14, x15, [x27, #112]
stp x16, x17, [x27, #128]
// Restore stack and registers
add sp, sp, NSPACE
ldp x27, xzr, [sp], 16
ldp x25, x26, [sp], 16
ldp x23, x24, [sp], 16
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 43,472
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/p384_montjadd_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point addition on NIST curve P-384 in Montgomery-Jacobian coordinates
//
// extern void p384_montjadd_alt
// (uint64_t p3[static 18],uint64_t p1[static 18],uint64_t p2[static 18]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
//
// Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjadd_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjadd_alt)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
// Stable homes for input arguments during main code sequence
#define input_z x24
#define input_x x25
#define input_y x26
// Pointer-offset pairs for inputs and outputs
#define x_1 input_x, #0
#define y_1 input_x, #NUMSIZE
#define z_1 input_x, #(2*NUMSIZE)
#define x_2 input_y, #0
#define y_2 input_y, #NUMSIZE
#define z_2 input_y, #(2*NUMSIZE)
#define x_3 input_z, #0
#define y_3 input_z, #NUMSIZE
#define z_3 input_z, #(2*NUMSIZE)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z1sq sp, #(NUMSIZE*0)
#define ww sp, #(NUMSIZE*0)
#define resx sp, #(NUMSIZE*0)
#define yd sp, #(NUMSIZE*1)
#define y2a sp, #(NUMSIZE*1)
#define x2a sp, #(NUMSIZE*2)
#define zzx2 sp, #(NUMSIZE*2)
#define zz sp, #(NUMSIZE*3)
#define t1 sp, #(NUMSIZE*3)
#define t2 sp, #(NUMSIZE*4)
#define x1a sp, #(NUMSIZE*4)
#define zzx1 sp, #(NUMSIZE*4)
#define resy sp, #(NUMSIZE*4)
#define xd sp, #(NUMSIZE*5)
#define z2sq sp, #(NUMSIZE*5)
#define resz sp, #(NUMSIZE*5)
#define y1a sp, #(NUMSIZE*6)
#define NSPACE (NUMSIZE*7)
// Corresponds exactly to bignum_montmul_p384_alt
#define montmul_p384(P0,P1,P2) \
ldp x3, x4, [P1]; \
ldp x5, x6, [P2]; \
mul x12, x3, x5; \
umulh x13, x3, x5; \
mul x11, x3, x6; \
umulh x14, x3, x6; \
adds x13, x13, x11; \
ldp x7, x8, [P2+16]; \
mul x11, x3, x7; \
umulh x15, x3, x7; \
adcs x14, x14, x11; \
mul x11, x3, x8; \
umulh x16, x3, x8; \
adcs x15, x15, x11; \
ldp x9, x10, [P2+32]; \
mul x11, x3, x9; \
umulh x17, x3, x9; \
adcs x16, x16, x11; \
mul x11, x3, x10; \
umulh x19, x3, x10; \
adcs x17, x17, x11; \
adc x19, x19, xzr; \
mul x11, x4, x5; \
adds x13, x13, x11; \
mul x11, x4, x6; \
adcs x14, x14, x11; \
mul x11, x4, x7; \
adcs x15, x15, x11; \
mul x11, x4, x8; \
adcs x16, x16, x11; \
mul x11, x4, x9; \
adcs x17, x17, x11; \
mul x11, x4, x10; \
adcs x19, x19, x11; \
cset x20, cs; \
umulh x11, x4, x5; \
adds x14, x14, x11; \
umulh x11, x4, x6; \
adcs x15, x15, x11; \
umulh x11, x4, x7; \
adcs x16, x16, x11; \
umulh x11, x4, x8; \
adcs x17, x17, x11; \
umulh x11, x4, x9; \
adcs x19, x19, x11; \
umulh x11, x4, x10; \
adc x20, x20, x11; \
ldp x3, x4, [P1+16]; \
mul x11, x3, x5; \
adds x14, x14, x11; \
mul x11, x3, x6; \
adcs x15, x15, x11; \
mul x11, x3, x7; \
adcs x16, x16, x11; \
mul x11, x3, x8; \
adcs x17, x17, x11; \
mul x11, x3, x9; \
adcs x19, x19, x11; \
mul x11, x3, x10; \
adcs x20, x20, x11; \
cset x21, cs; \
umulh x11, x3, x5; \
adds x15, x15, x11; \
umulh x11, x3, x6; \
adcs x16, x16, x11; \
umulh x11, x3, x7; \
adcs x17, x17, x11; \
umulh x11, x3, x8; \
adcs x19, x19, x11; \
umulh x11, x3, x9; \
adcs x20, x20, x11; \
umulh x11, x3, x10; \
adc x21, x21, x11; \
mul x11, x4, x5; \
adds x15, x15, x11; \
mul x11, x4, x6; \
adcs x16, x16, x11; \
mul x11, x4, x7; \
adcs x17, x17, x11; \
mul x11, x4, x8; \
adcs x19, x19, x11; \
mul x11, x4, x9; \
adcs x20, x20, x11; \
mul x11, x4, x10; \
adcs x21, x21, x11; \
cset x22, cs; \
umulh x11, x4, x5; \
adds x16, x16, x11; \
umulh x11, x4, x6; \
adcs x17, x17, x11; \
umulh x11, x4, x7; \
adcs x19, x19, x11; \
umulh x11, x4, x8; \
adcs x20, x20, x11; \
umulh x11, x4, x9; \
adcs x21, x21, x11; \
umulh x11, x4, x10; \
adc x22, x22, x11; \
ldp x3, x4, [P1+32]; \
mul x11, x3, x5; \
adds x16, x16, x11; \
mul x11, x3, x6; \
adcs x17, x17, x11; \
mul x11, x3, x7; \
adcs x19, x19, x11; \
mul x11, x3, x8; \
adcs x20, x20, x11; \
mul x11, x3, x9; \
adcs x21, x21, x11; \
mul x11, x3, x10; \
adcs x22, x22, x11; \
cset x2, cs; \
umulh x11, x3, x5; \
adds x17, x17, x11; \
umulh x11, x3, x6; \
adcs x19, x19, x11; \
umulh x11, x3, x7; \
adcs x20, x20, x11; \
umulh x11, x3, x8; \
adcs x21, x21, x11; \
umulh x11, x3, x9; \
adcs x22, x22, x11; \
umulh x11, x3, x10; \
adc x2, x2, x11; \
mul x11, x4, x5; \
adds x17, x17, x11; \
mul x11, x4, x6; \
adcs x19, x19, x11; \
mul x11, x4, x7; \
adcs x20, x20, x11; \
mul x11, x4, x8; \
adcs x21, x21, x11; \
mul x11, x4, x9; \
adcs x22, x22, x11; \
mul x11, x4, x10; \
adcs x2, x2, x11; \
cset x1, cs; \
umulh x11, x4, x5; \
adds x19, x19, x11; \
umulh x11, x4, x6; \
adcs x20, x20, x11; \
umulh x11, x4, x7; \
adcs x21, x21, x11; \
umulh x11, x4, x8; \
adcs x22, x22, x11; \
umulh x11, x4, x9; \
adcs x2, x2, x11; \
umulh x11, x4, x10; \
adc x1, x1, x11; \
lsl x7, x12, #32; \
add x12, x7, x12; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x12; \
mov x6, #0xffffffff; \
mul x5, x6, x12; \
umulh x6, x6, x12; \
adds x7, x7, x5; \
adcs x6, x6, x12; \
adc x5, xzr, xzr; \
subs x13, x13, x7; \
sbcs x14, x14, x6; \
sbcs x15, x15, x5; \
sbcs x16, x16, xzr; \
sbcs x17, x17, xzr; \
sbc x12, x12, xzr; \
lsl x7, x13, #32; \
add x13, x7, x13; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x13; \
mov x6, #0xffffffff; \
mul x5, x6, x13; \
umulh x6, x6, x13; \
adds x7, x7, x5; \
adcs x6, x6, x13; \
adc x5, xzr, xzr; \
subs x14, x14, x7; \
sbcs x15, x15, x6; \
sbcs x16, x16, x5; \
sbcs x17, x17, xzr; \
sbcs x12, x12, xzr; \
sbc x13, x13, xzr; \
lsl x7, x14, #32; \
add x14, x7, x14; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x14; \
mov x6, #0xffffffff; \
mul x5, x6, x14; \
umulh x6, x6, x14; \
adds x7, x7, x5; \
adcs x6, x6, x14; \
adc x5, xzr, xzr; \
subs x15, x15, x7; \
sbcs x16, x16, x6; \
sbcs x17, x17, x5; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
sbc x14, x14, xzr; \
lsl x7, x15, #32; \
add x15, x7, x15; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x15; \
mov x6, #0xffffffff; \
mul x5, x6, x15; \
umulh x6, x6, x15; \
adds x7, x7, x5; \
adcs x6, x6, x15; \
adc x5, xzr, xzr; \
subs x16, x16, x7; \
sbcs x17, x17, x6; \
sbcs x12, x12, x5; \
sbcs x13, x13, xzr; \
sbcs x14, x14, xzr; \
sbc x15, x15, xzr; \
lsl x7, x16, #32; \
add x16, x7, x16; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x16; \
mov x6, #0xffffffff; \
mul x5, x6, x16; \
umulh x6, x6, x16; \
adds x7, x7, x5; \
adcs x6, x6, x16; \
adc x5, xzr, xzr; \
subs x17, x17, x7; \
sbcs x12, x12, x6; \
sbcs x13, x13, x5; \
sbcs x14, x14, xzr; \
sbcs x15, x15, xzr; \
sbc x16, x16, xzr; \
lsl x7, x17, #32; \
add x17, x7, x17; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x17; \
mov x6, #0xffffffff; \
mul x5, x6, x17; \
umulh x6, x6, x17; \
adds x7, x7, x5; \
adcs x6, x6, x17; \
adc x5, xzr, xzr; \
subs x12, x12, x7; \
sbcs x13, x13, x6; \
sbcs x14, x14, x5; \
sbcs x15, x15, xzr; \
sbcs x16, x16, xzr; \
sbc x17, x17, xzr; \
adds x12, x12, x19; \
adcs x13, x13, x20; \
adcs x14, x14, x21; \
adcs x15, x15, x22; \
adcs x16, x16, x2; \
adcs x17, x17, x1; \
adc x10, xzr, xzr; \
mov x11, #0xffffffff00000001; \
adds x19, x12, x11; \
mov x11, #0xffffffff; \
adcs x20, x13, x11; \
mov x11, #0x1; \
adcs x21, x14, x11; \
adcs x22, x15, xzr; \
adcs x2, x16, xzr; \
adcs x1, x17, xzr; \
adcs x10, x10, xzr; \
csel x12, x12, x19, eq; \
csel x13, x13, x20, eq; \
csel x14, x14, x21, eq; \
csel x15, x15, x22, eq; \
csel x16, x16, x2, eq; \
csel x17, x17, x1, eq; \
stp x12, x13, [P0]; \
stp x14, x15, [P0+16]; \
stp x16, x17, [P0+32]
// Corresponds exactly to bignum_montsqr_p384_alt
#define montsqr_p384(P0,P1) \
ldp x2, x3, [P1]; \
mul x9, x2, x3; \
umulh x10, x2, x3; \
ldp x4, x5, [P1+16]; \
mul x8, x2, x4; \
adds x10, x10, x8; \
mul x11, x2, x5; \
mul x8, x3, x4; \
adcs x11, x11, x8; \
umulh x12, x2, x5; \
mul x8, x3, x5; \
adcs x12, x12, x8; \
ldp x6, x7, [P1+32]; \
mul x13, x2, x7; \
mul x8, x3, x6; \
adcs x13, x13, x8; \
umulh x14, x2, x7; \
mul x8, x3, x7; \
adcs x14, x14, x8; \
mul x15, x5, x6; \
adcs x15, x15, xzr; \
umulh x16, x5, x6; \
adc x16, x16, xzr; \
umulh x8, x2, x4; \
adds x11, x11, x8; \
umulh x8, x3, x4; \
adcs x12, x12, x8; \
umulh x8, x3, x5; \
adcs x13, x13, x8; \
umulh x8, x3, x6; \
adcs x14, x14, x8; \
umulh x8, x3, x7; \
adcs x15, x15, x8; \
adc x16, x16, xzr; \
mul x8, x2, x6; \
adds x12, x12, x8; \
mul x8, x4, x5; \
adcs x13, x13, x8; \
mul x8, x4, x6; \
adcs x14, x14, x8; \
mul x8, x4, x7; \
adcs x15, x15, x8; \
mul x8, x5, x7; \
adcs x16, x16, x8; \
mul x17, x6, x7; \
adcs x17, x17, xzr; \
umulh x19, x6, x7; \
adc x19, x19, xzr; \
umulh x8, x2, x6; \
adds x13, x13, x8; \
umulh x8, x4, x5; \
adcs x14, x14, x8; \
umulh x8, x4, x6; \
adcs x15, x15, x8; \
umulh x8, x4, x7; \
adcs x16, x16, x8; \
umulh x8, x5, x7; \
adcs x17, x17, x8; \
adc x19, x19, xzr; \
adds x9, x9, x9; \
adcs x10, x10, x10; \
adcs x11, x11, x11; \
adcs x12, x12, x12; \
adcs x13, x13, x13; \
adcs x14, x14, x14; \
adcs x15, x15, x15; \
adcs x16, x16, x16; \
adcs x17, x17, x17; \
adcs x19, x19, x19; \
cset x20, hs; \
umulh x8, x2, x2; \
mul x2, x2, x2; \
adds x9, x9, x8; \
mul x8, x3, x3; \
adcs x10, x10, x8; \
umulh x8, x3, x3; \
adcs x11, x11, x8; \
mul x8, x4, x4; \
adcs x12, x12, x8; \
umulh x8, x4, x4; \
adcs x13, x13, x8; \
mul x8, x5, x5; \
adcs x14, x14, x8; \
umulh x8, x5, x5; \
adcs x15, x15, x8; \
mul x8, x6, x6; \
adcs x16, x16, x8; \
umulh x8, x6, x6; \
adcs x17, x17, x8; \
mul x8, x7, x7; \
adcs x19, x19, x8; \
umulh x8, x7, x7; \
adc x20, x20, x8; \
lsl x5, x2, #32; \
add x2, x5, x2; \
mov x5, #-4294967295; \
umulh x5, x5, x2; \
mov x4, #4294967295; \
mul x3, x4, x2; \
umulh x4, x4, x2; \
adds x5, x5, x3; \
adcs x4, x4, x2; \
adc x3, xzr, xzr; \
subs x9, x9, x5; \
sbcs x10, x10, x4; \
sbcs x11, x11, x3; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
sbc x2, x2, xzr; \
lsl x5, x9, #32; \
add x9, x5, x9; \
mov x5, #-4294967295; \
umulh x5, x5, x9; \
mov x4, #4294967295; \
mul x3, x4, x9; \
umulh x4, x4, x9; \
adds x5, x5, x3; \
adcs x4, x4, x9; \
adc x3, xzr, xzr; \
subs x10, x10, x5; \
sbcs x11, x11, x4; \
sbcs x12, x12, x3; \
sbcs x13, x13, xzr; \
sbcs x2, x2, xzr; \
sbc x9, x9, xzr; \
lsl x5, x10, #32; \
add x10, x5, x10; \
mov x5, #-4294967295; \
umulh x5, x5, x10; \
mov x4, #4294967295; \
mul x3, x4, x10; \
umulh x4, x4, x10; \
adds x5, x5, x3; \
adcs x4, x4, x10; \
adc x3, xzr, xzr; \
subs x11, x11, x5; \
sbcs x12, x12, x4; \
sbcs x13, x13, x3; \
sbcs x2, x2, xzr; \
sbcs x9, x9, xzr; \
sbc x10, x10, xzr; \
lsl x5, x11, #32; \
add x11, x5, x11; \
mov x5, #-4294967295; \
umulh x5, x5, x11; \
mov x4, #4294967295; \
mul x3, x4, x11; \
umulh x4, x4, x11; \
adds x5, x5, x3; \
adcs x4, x4, x11; \
adc x3, xzr, xzr; \
subs x12, x12, x5; \
sbcs x13, x13, x4; \
sbcs x2, x2, x3; \
sbcs x9, x9, xzr; \
sbcs x10, x10, xzr; \
sbc x11, x11, xzr; \
lsl x5, x12, #32; \
add x12, x5, x12; \
mov x5, #-4294967295; \
umulh x5, x5, x12; \
mov x4, #4294967295; \
mul x3, x4, x12; \
umulh x4, x4, x12; \
adds x5, x5, x3; \
adcs x4, x4, x12; \
adc x3, xzr, xzr; \
subs x13, x13, x5; \
sbcs x2, x2, x4; \
sbcs x9, x9, x3; \
sbcs x10, x10, xzr; \
sbcs x11, x11, xzr; \
sbc x12, x12, xzr; \
lsl x5, x13, #32; \
add x13, x5, x13; \
mov x5, #-4294967295; \
umulh x5, x5, x13; \
mov x4, #4294967295; \
mul x3, x4, x13; \
umulh x4, x4, x13; \
adds x5, x5, x3; \
adcs x4, x4, x13; \
adc x3, xzr, xzr; \
subs x2, x2, x5; \
sbcs x9, x9, x4; \
sbcs x10, x10, x3; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbc x13, x13, xzr; \
adds x2, x2, x14; \
adcs x9, x9, x15; \
adcs x10, x10, x16; \
adcs x11, x11, x17; \
adcs x12, x12, x19; \
adcs x13, x13, x20; \
adc x6, xzr, xzr; \
mov x8, #-4294967295; \
adds x14, x2, x8; \
mov x8, #4294967295; \
adcs x15, x9, x8; \
mov x8, #1; \
adcs x16, x10, x8; \
adcs x17, x11, xzr; \
adcs x19, x12, xzr; \
adcs x20, x13, xzr; \
adcs x6, x6, xzr; \
csel x2, x2, x14, eq; \
csel x9, x9, x15, eq; \
csel x10, x10, x16, eq; \
csel x11, x11, x17, eq; \
csel x12, x12, x19, eq; \
csel x13, x13, x20, eq; \
stp x2, x9, [P0]; \
stp x10, x11, [P0+16]; \
stp x12, x13, [P0+32]
// Almost-Montgomery variant which we use when an input to other muls
// with the other argument fully reduced (which is always safe). In
// fact, with the Karatsuba-based Montgomery mul here, we don't even
// *need* the restriction that the other argument is reduced.
#define amontsqr_p384(P0,P1) \
ldp x2, x3, [P1]; \
mul x9, x2, x3; \
umulh x10, x2, x3; \
ldp x4, x5, [P1+16]; \
mul x8, x2, x4; \
adds x10, x10, x8; \
mul x11, x2, x5; \
mul x8, x3, x4; \
adcs x11, x11, x8; \
umulh x12, x2, x5; \
mul x8, x3, x5; \
adcs x12, x12, x8; \
ldp x6, x7, [P1+32]; \
mul x13, x2, x7; \
mul x8, x3, x6; \
adcs x13, x13, x8; \
umulh x14, x2, x7; \
mul x8, x3, x7; \
adcs x14, x14, x8; \
mul x15, x5, x6; \
adcs x15, x15, xzr; \
umulh x16, x5, x6; \
adc x16, x16, xzr; \
umulh x8, x2, x4; \
adds x11, x11, x8; \
umulh x8, x3, x4; \
adcs x12, x12, x8; \
umulh x8, x3, x5; \
adcs x13, x13, x8; \
umulh x8, x3, x6; \
adcs x14, x14, x8; \
umulh x8, x3, x7; \
adcs x15, x15, x8; \
adc x16, x16, xzr; \
mul x8, x2, x6; \
adds x12, x12, x8; \
mul x8, x4, x5; \
adcs x13, x13, x8; \
mul x8, x4, x6; \
adcs x14, x14, x8; \
mul x8, x4, x7; \
adcs x15, x15, x8; \
mul x8, x5, x7; \
adcs x16, x16, x8; \
mul x17, x6, x7; \
adcs x17, x17, xzr; \
umulh x19, x6, x7; \
adc x19, x19, xzr; \
umulh x8, x2, x6; \
adds x13, x13, x8; \
umulh x8, x4, x5; \
adcs x14, x14, x8; \
umulh x8, x4, x6; \
adcs x15, x15, x8; \
umulh x8, x4, x7; \
adcs x16, x16, x8; \
umulh x8, x5, x7; \
adcs x17, x17, x8; \
adc x19, x19, xzr; \
adds x9, x9, x9; \
adcs x10, x10, x10; \
adcs x11, x11, x11; \
adcs x12, x12, x12; \
adcs x13, x13, x13; \
adcs x14, x14, x14; \
adcs x15, x15, x15; \
adcs x16, x16, x16; \
adcs x17, x17, x17; \
adcs x19, x19, x19; \
cset x20, hs; \
umulh x8, x2, x2; \
mul x2, x2, x2; \
adds x9, x9, x8; \
mul x8, x3, x3; \
adcs x10, x10, x8; \
umulh x8, x3, x3; \
adcs x11, x11, x8; \
mul x8, x4, x4; \
adcs x12, x12, x8; \
umulh x8, x4, x4; \
adcs x13, x13, x8; \
mul x8, x5, x5; \
adcs x14, x14, x8; \
umulh x8, x5, x5; \
adcs x15, x15, x8; \
mul x8, x6, x6; \
adcs x16, x16, x8; \
umulh x8, x6, x6; \
adcs x17, x17, x8; \
mul x8, x7, x7; \
adcs x19, x19, x8; \
umulh x8, x7, x7; \
adc x20, x20, x8; \
lsl x5, x2, #32; \
add x2, x5, x2; \
mov x5, #-4294967295; \
umulh x5, x5, x2; \
mov x4, #4294967295; \
mul x3, x4, x2; \
umulh x4, x4, x2; \
adds x5, x5, x3; \
adcs x4, x4, x2; \
adc x3, xzr, xzr; \
subs x9, x9, x5; \
sbcs x10, x10, x4; \
sbcs x11, x11, x3; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
sbc x2, x2, xzr; \
lsl x5, x9, #32; \
add x9, x5, x9; \
mov x5, #-4294967295; \
umulh x5, x5, x9; \
mov x4, #4294967295; \
mul x3, x4, x9; \
umulh x4, x4, x9; \
adds x5, x5, x3; \
adcs x4, x4, x9; \
adc x3, xzr, xzr; \
subs x10, x10, x5; \
sbcs x11, x11, x4; \
sbcs x12, x12, x3; \
sbcs x13, x13, xzr; \
sbcs x2, x2, xzr; \
sbc x9, x9, xzr; \
lsl x5, x10, #32; \
add x10, x5, x10; \
mov x5, #-4294967295; \
umulh x5, x5, x10; \
mov x4, #4294967295; \
mul x3, x4, x10; \
umulh x4, x4, x10; \
adds x5, x5, x3; \
adcs x4, x4, x10; \
adc x3, xzr, xzr; \
subs x11, x11, x5; \
sbcs x12, x12, x4; \
sbcs x13, x13, x3; \
sbcs x2, x2, xzr; \
sbcs x9, x9, xzr; \
sbc x10, x10, xzr; \
lsl x5, x11, #32; \
add x11, x5, x11; \
mov x5, #-4294967295; \
umulh x5, x5, x11; \
mov x4, #4294967295; \
mul x3, x4, x11; \
umulh x4, x4, x11; \
adds x5, x5, x3; \
adcs x4, x4, x11; \
adc x3, xzr, xzr; \
subs x12, x12, x5; \
sbcs x13, x13, x4; \
sbcs x2, x2, x3; \
sbcs x9, x9, xzr; \
sbcs x10, x10, xzr; \
sbc x11, x11, xzr; \
lsl x5, x12, #32; \
add x12, x5, x12; \
mov x5, #-4294967295; \
umulh x5, x5, x12; \
mov x4, #4294967295; \
mul x3, x4, x12; \
umulh x4, x4, x12; \
adds x5, x5, x3; \
adcs x4, x4, x12; \
adc x3, xzr, xzr; \
subs x13, x13, x5; \
sbcs x2, x2, x4; \
sbcs x9, x9, x3; \
sbcs x10, x10, xzr; \
sbcs x11, x11, xzr; \
sbc x12, x12, xzr; \
lsl x5, x13, #32; \
add x13, x5, x13; \
mov x5, #-4294967295; \
umulh x5, x5, x13; \
mov x4, #4294967295; \
mul x3, x4, x13; \
umulh x4, x4, x13; \
adds x5, x5, x3; \
adcs x4, x4, x13; \
adc x3, xzr, xzr; \
subs x2, x2, x5; \
sbcs x9, x9, x4; \
sbcs x10, x10, x3; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbc x13, x13, xzr; \
adds x2, x2, x14; \
adcs x9, x9, x15; \
adcs x10, x10, x16; \
adcs x11, x11, x17; \
adcs x12, x12, x19; \
adcs x13, x13, x20; \
mov x14, #-4294967295; \
mov x15, #4294967295; \
csel x14, x14, xzr, cs; \
csel x15, x15, xzr, cs; \
cset x16, cs; \
adds x2, x2, x14; \
adcs x9, x9, x15; \
adcs x10, x10, x16; \
adcs x11, x11, xzr; \
adcs x12, x12, xzr; \
adc x13, x13, xzr; \
stp x2, x9, [P0]; \
stp x10, x11, [P0+16]; \
stp x12, x13, [P0+32]
// Corresponds exactly to bignum_sub_p384
#define sub_p384(P0,P1,P2) \
ldp x5, x6, [P1]; \
ldp x4, x3, [P2]; \
subs x5, x5, x4; \
sbcs x6, x6, x3; \
ldp x7, x8, [P1+16]; \
ldp x4, x3, [P2+16]; \
sbcs x7, x7, x4; \
sbcs x8, x8, x3; \
ldp x9, x10, [P1+32]; \
ldp x4, x3, [P2+32]; \
sbcs x9, x9, x4; \
sbcs x10, x10, x3; \
csetm x3, lo; \
mov x4, #4294967295; \
and x4, x4, x3; \
adds x5, x5, x4; \
eor x4, x4, x3; \
adcs x6, x6, x4; \
mov x4, #-2; \
and x4, x4, x3; \
adcs x7, x7, x4; \
adcs x8, x8, x3; \
adcs x9, x9, x3; \
adc x10, x10, x3; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]
S2N_BN_SYMBOL(p384_montjadd_alt):
// Save regs and make room on stack for temporary variables
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
sub sp, sp, NSPACE
// Move the input arguments to stable places
mov input_z, x0
mov input_x, x1
mov input_y, x2
// Main code, just a sequence of basic field operations
// 8 * multiply + 3 * square + 7 * subtract
amontsqr_p384(z1sq,z_1)
amontsqr_p384(z2sq,z_2)
montmul_p384(y1a,z_2,y_1)
montmul_p384(y2a,z_1,y_2)
montmul_p384(x2a,z1sq,x_2)
montmul_p384(x1a,z2sq,x_1)
montmul_p384(y2a,z1sq,y2a)
montmul_p384(y1a,z2sq,y1a)
sub_p384(xd,x2a,x1a)
sub_p384(yd,y2a,y1a)
amontsqr_p384(zz,xd)
montsqr_p384(ww,yd)
montmul_p384(zzx1,zz,x1a)
montmul_p384(zzx2,zz,x2a)
sub_p384(resx,ww,zzx1)
sub_p384(t1,zzx2,zzx1)
montmul_p384(xd,xd,z_1)
sub_p384(resx,resx,zzx2)
sub_p384(t2,zzx1,resx)
montmul_p384(t1,t1,y1a)
montmul_p384(resz,xd,z_2)
montmul_p384(t2,yd,t2)
sub_p384(resy,t2,t1)
// Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0
// The condition codes get set by a comparison (P2 != 0) - (P1 != 0)
// So "HI" <=> CF /\ ~ZF <=> P1 = 0 /\ ~(P2 = 0)
// and "LO" <=> ~CF <=> ~(P1 = 0) /\ P2 = 0
ldp x0, x1, [z_1]
ldp x2, x3, [z_1+16]
ldp x4, x5, [z_1+32]
orr x20, x0, x1
orr x21, x2, x3
orr x22, x4, x5
orr x20, x20, x21
orr x20, x20, x22
cmp x20, xzr
cset x20, ne
ldp x6, x7, [z_2]
ldp x8, x9, [z_2+16]
ldp x10, x11, [z_2+32]
orr x21, x6, x7
orr x22, x8, x9
orr x23, x10, x11
orr x21, x21, x22
orr x21, x21, x23
cmp x21, xzr
cset x21, ne
cmp x21, x20
// Multiplex the outputs accordingly, re-using the z's in registers
ldp x12, x13, [resz]
csel x12, x0, x12, lo
csel x13, x1, x13, lo
csel x12, x6, x12, hi
csel x13, x7, x13, hi
ldp x14, x15, [resz+16]
csel x14, x2, x14, lo
csel x15, x3, x15, lo
csel x14, x8, x14, hi
csel x15, x9, x15, hi
ldp x16, x17, [resz+32]
csel x16, x4, x16, lo
csel x17, x5, x17, lo
csel x16, x10, x16, hi
csel x17, x11, x17, hi
ldp x20, x21, [x_1]
ldp x0, x1, [resx]
csel x0, x20, x0, lo
csel x1, x21, x1, lo
ldp x20, x21, [x_2]
csel x0, x20, x0, hi
csel x1, x21, x1, hi
ldp x20, x21, [x_1+16]
ldp x2, x3, [resx+16]
csel x2, x20, x2, lo
csel x3, x21, x3, lo
ldp x20, x21, [x_2+16]
csel x2, x20, x2, hi
csel x3, x21, x3, hi
ldp x20, x21, [x_1+32]
ldp x4, x5, [resx+32]
csel x4, x20, x4, lo
csel x5, x21, x5, lo
ldp x20, x21, [x_2+32]
csel x4, x20, x4, hi
csel x5, x21, x5, hi
ldp x20, x21, [y_1]
ldp x6, x7, [resy]
csel x6, x20, x6, lo
csel x7, x21, x7, lo
ldp x20, x21, [y_2]
csel x6, x20, x6, hi
csel x7, x21, x7, hi
ldp x20, x21, [y_1+16]
ldp x8, x9, [resy+16]
csel x8, x20, x8, lo
csel x9, x21, x9, lo
ldp x20, x21, [y_2+16]
csel x8, x20, x8, hi
csel x9, x21, x9, hi
ldp x20, x21, [y_1+32]
ldp x10, x11, [resy+32]
csel x10, x20, x10, lo
csel x11, x21, x11, lo
ldp x20, x21, [y_2+32]
csel x10, x20, x10, hi
csel x11, x21, x11, hi
// Finally store back the multiplexed values
stp x0, x1, [x_3]
stp x2, x3, [x_3+16]
stp x4, x5, [x_3+32]
stp x6, x7, [y_3]
stp x8, x9, [y_3+16]
stp x10, x11, [y_3+32]
stp x12, x13, [z_3]
stp x14, x15, [z_3+16]
stp x16, x17, [z_3+32]
// Restore stack and registers
add sp, sp, NSPACE
ldp x25, x26, [sp], 16
ldp x23, x24, [sp], 16
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 2,444
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_add_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Add modulo p_384, z := (x + y) mod p_384, assuming x and y reduced
// Inputs x[6], y[6]; output z[6]
//
// extern void bignum_add_p384
// (uint64_t z[static 6], uint64_t x[static 6], uint64_t y[static 6]);
//
// Standard ARM ABI: X0 = z, X1 = x, X2 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_add_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_add_p384)
.text
.balign 4
#define z x0
#define x x1
#define y x2
#define c x3
#define l x4
#define d0 x5
#define d1 x6
#define d2 x7
#define d3 x8
#define d4 x9
#define d5 x10
S2N_BN_SYMBOL(bignum_add_p384):
// First just add the numbers as c + [d5; d4; d3; d2; d1; d0]
ldp d0, d1, [x]
ldp l, c, [y]
adds d0, d0, l
adcs d1, d1, c
ldp d2, d3, [x, #16]
ldp l, c, [y, #16]
adcs d2, d2, l
adcs d3, d3, c
ldp d4, d5, [x, #32]
ldp l, c, [y, #32]
adcs d4, d4, l
adcs d5, d5, c
adc c, xzr, xzr
// Now compare [d5; d4; d3; d2; d1; d0] with p_384
mov l, #0x00000000ffffffff
subs xzr, d0, l
mov l, #0xffffffff00000000
sbcs xzr, d1, l
mov l, #0xfffffffffffffffe
sbcs xzr, d2, l
adcs xzr, d3, xzr
adcs xzr, d4, xzr
adcs xzr, d5, xzr
// Now CF is set (because of inversion) if (x + y) % 2^384 >= p_384
// Thus we want to correct if either this is set or the original carry c was
adcs c, c, xzr
csetm c, ne
// Now correct by subtracting masked p_384
mov l, #0x00000000ffffffff
and l, l, c
subs d0, d0, l
eor l, l, c
sbcs d1, d1, l
mov l, #0xfffffffffffffffe
and l, l, c
sbcs d2, d2, l
sbcs d3, d3, c
sbcs d4, d4, c
sbc d5, d5, c
// Store the result
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 1,246
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_nonzero_6.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// 384-bit nonzeroness test, returning 1 if x is nonzero, 0 if x is zero
// Input x[6]; output function return
//
// extern uint64_t bignum_nonzero_6(uint64_t x[static 6]);
//
// Standard ARM ABI: X0 = x, returns X0
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_nonzero_6)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_nonzero_6)
.text
.balign 4
#define x x0
#define a x1
#define d x2
#define c x3
S2N_BN_SYMBOL(bignum_nonzero_6):
// Generate a = an OR of all the words in the bignum
ldp a, d, [x]
orr a, a, d
ldp c, d, [x, #16]
orr c, c, d
orr a, a, c
ldp c, d, [x, #32]
orr c, c, d
orr a, a, c
// Set a standard C condition based on whether a is nonzero
cmp a, xzr
cset x0, ne
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 42,887
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/p384_montjdouble_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point doubling on NIST curve P-384 in Montgomery-Jacobian coordinates
//
// extern void p384_montjdouble_alt
// (uint64_t p3[static 18],uint64_t p1[static 18]);
//
// Does p3 := 2 * p1 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
//
// Standard ARM ABI: X0 = p3, X1 = p1
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjdouble_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjdouble_alt)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
// Stable homes for input arguments during main code sequence
#define input_z x23
#define input_x x24
// Pointer-offset pairs for inputs and outputs
#define x_1 input_x, #0
#define y_1 input_x, #NUMSIZE
#define z_1 input_x, #(2*NUMSIZE)
#define x_3 input_z, #0
#define y_3 input_z, #NUMSIZE
#define z_3 input_z, #(2*NUMSIZE)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z2 sp, #(NUMSIZE*0)
#define y2 sp, #(NUMSIZE*1)
#define x2p sp, #(NUMSIZE*2)
#define xy2 sp, #(NUMSIZE*3)
#define y4 sp, #(NUMSIZE*4)
#define t2 sp, #(NUMSIZE*4)
#define dx2 sp, #(NUMSIZE*5)
#define t1 sp, #(NUMSIZE*5)
#define d sp, #(NUMSIZE*6)
#define x4p sp, #(NUMSIZE*6)
#define NSPACE (NUMSIZE*7)
// Corresponds exactly to bignum_montmul_p384_alt
#define montmul_p384(P0,P1,P2) \
ldp x3, x4, [P1]; \
ldp x5, x6, [P2]; \
mul x12, x3, x5; \
umulh x13, x3, x5; \
mul x11, x3, x6; \
umulh x14, x3, x6; \
adds x13, x13, x11; \
ldp x7, x8, [P2+16]; \
mul x11, x3, x7; \
umulh x15, x3, x7; \
adcs x14, x14, x11; \
mul x11, x3, x8; \
umulh x16, x3, x8; \
adcs x15, x15, x11; \
ldp x9, x10, [P2+32]; \
mul x11, x3, x9; \
umulh x17, x3, x9; \
adcs x16, x16, x11; \
mul x11, x3, x10; \
umulh x19, x3, x10; \
adcs x17, x17, x11; \
adc x19, x19, xzr; \
mul x11, x4, x5; \
adds x13, x13, x11; \
mul x11, x4, x6; \
adcs x14, x14, x11; \
mul x11, x4, x7; \
adcs x15, x15, x11; \
mul x11, x4, x8; \
adcs x16, x16, x11; \
mul x11, x4, x9; \
adcs x17, x17, x11; \
mul x11, x4, x10; \
adcs x19, x19, x11; \
cset x20, cs; \
umulh x11, x4, x5; \
adds x14, x14, x11; \
umulh x11, x4, x6; \
adcs x15, x15, x11; \
umulh x11, x4, x7; \
adcs x16, x16, x11; \
umulh x11, x4, x8; \
adcs x17, x17, x11; \
umulh x11, x4, x9; \
adcs x19, x19, x11; \
umulh x11, x4, x10; \
adc x20, x20, x11; \
ldp x3, x4, [P1+16]; \
mul x11, x3, x5; \
adds x14, x14, x11; \
mul x11, x3, x6; \
adcs x15, x15, x11; \
mul x11, x3, x7; \
adcs x16, x16, x11; \
mul x11, x3, x8; \
adcs x17, x17, x11; \
mul x11, x3, x9; \
adcs x19, x19, x11; \
mul x11, x3, x10; \
adcs x20, x20, x11; \
cset x21, cs; \
umulh x11, x3, x5; \
adds x15, x15, x11; \
umulh x11, x3, x6; \
adcs x16, x16, x11; \
umulh x11, x3, x7; \
adcs x17, x17, x11; \
umulh x11, x3, x8; \
adcs x19, x19, x11; \
umulh x11, x3, x9; \
adcs x20, x20, x11; \
umulh x11, x3, x10; \
adc x21, x21, x11; \
mul x11, x4, x5; \
adds x15, x15, x11; \
mul x11, x4, x6; \
adcs x16, x16, x11; \
mul x11, x4, x7; \
adcs x17, x17, x11; \
mul x11, x4, x8; \
adcs x19, x19, x11; \
mul x11, x4, x9; \
adcs x20, x20, x11; \
mul x11, x4, x10; \
adcs x21, x21, x11; \
cset x22, cs; \
umulh x11, x4, x5; \
adds x16, x16, x11; \
umulh x11, x4, x6; \
adcs x17, x17, x11; \
umulh x11, x4, x7; \
adcs x19, x19, x11; \
umulh x11, x4, x8; \
adcs x20, x20, x11; \
umulh x11, x4, x9; \
adcs x21, x21, x11; \
umulh x11, x4, x10; \
adc x22, x22, x11; \
ldp x3, x4, [P1+32]; \
mul x11, x3, x5; \
adds x16, x16, x11; \
mul x11, x3, x6; \
adcs x17, x17, x11; \
mul x11, x3, x7; \
adcs x19, x19, x11; \
mul x11, x3, x8; \
adcs x20, x20, x11; \
mul x11, x3, x9; \
adcs x21, x21, x11; \
mul x11, x3, x10; \
adcs x22, x22, x11; \
cset x2, cs; \
umulh x11, x3, x5; \
adds x17, x17, x11; \
umulh x11, x3, x6; \
adcs x19, x19, x11; \
umulh x11, x3, x7; \
adcs x20, x20, x11; \
umulh x11, x3, x8; \
adcs x21, x21, x11; \
umulh x11, x3, x9; \
adcs x22, x22, x11; \
umulh x11, x3, x10; \
adc x2, x2, x11; \
mul x11, x4, x5; \
adds x17, x17, x11; \
mul x11, x4, x6; \
adcs x19, x19, x11; \
mul x11, x4, x7; \
adcs x20, x20, x11; \
mul x11, x4, x8; \
adcs x21, x21, x11; \
mul x11, x4, x9; \
adcs x22, x22, x11; \
mul x11, x4, x10; \
adcs x2, x2, x11; \
cset x1, cs; \
umulh x11, x4, x5; \
adds x19, x19, x11; \
umulh x11, x4, x6; \
adcs x20, x20, x11; \
umulh x11, x4, x7; \
adcs x21, x21, x11; \
umulh x11, x4, x8; \
adcs x22, x22, x11; \
umulh x11, x4, x9; \
adcs x2, x2, x11; \
umulh x11, x4, x10; \
adc x1, x1, x11; \
lsl x7, x12, #32; \
add x12, x7, x12; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x12; \
mov x6, #0xffffffff; \
mul x5, x6, x12; \
umulh x6, x6, x12; \
adds x7, x7, x5; \
adcs x6, x6, x12; \
adc x5, xzr, xzr; \
subs x13, x13, x7; \
sbcs x14, x14, x6; \
sbcs x15, x15, x5; \
sbcs x16, x16, xzr; \
sbcs x17, x17, xzr; \
sbc x12, x12, xzr; \
lsl x7, x13, #32; \
add x13, x7, x13; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x13; \
mov x6, #0xffffffff; \
mul x5, x6, x13; \
umulh x6, x6, x13; \
adds x7, x7, x5; \
adcs x6, x6, x13; \
adc x5, xzr, xzr; \
subs x14, x14, x7; \
sbcs x15, x15, x6; \
sbcs x16, x16, x5; \
sbcs x17, x17, xzr; \
sbcs x12, x12, xzr; \
sbc x13, x13, xzr; \
lsl x7, x14, #32; \
add x14, x7, x14; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x14; \
mov x6, #0xffffffff; \
mul x5, x6, x14; \
umulh x6, x6, x14; \
adds x7, x7, x5; \
adcs x6, x6, x14; \
adc x5, xzr, xzr; \
subs x15, x15, x7; \
sbcs x16, x16, x6; \
sbcs x17, x17, x5; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
sbc x14, x14, xzr; \
lsl x7, x15, #32; \
add x15, x7, x15; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x15; \
mov x6, #0xffffffff; \
mul x5, x6, x15; \
umulh x6, x6, x15; \
adds x7, x7, x5; \
adcs x6, x6, x15; \
adc x5, xzr, xzr; \
subs x16, x16, x7; \
sbcs x17, x17, x6; \
sbcs x12, x12, x5; \
sbcs x13, x13, xzr; \
sbcs x14, x14, xzr; \
sbc x15, x15, xzr; \
lsl x7, x16, #32; \
add x16, x7, x16; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x16; \
mov x6, #0xffffffff; \
mul x5, x6, x16; \
umulh x6, x6, x16; \
adds x7, x7, x5; \
adcs x6, x6, x16; \
adc x5, xzr, xzr; \
subs x17, x17, x7; \
sbcs x12, x12, x6; \
sbcs x13, x13, x5; \
sbcs x14, x14, xzr; \
sbcs x15, x15, xzr; \
sbc x16, x16, xzr; \
lsl x7, x17, #32; \
add x17, x7, x17; \
mov x7, #0xffffffff00000001; \
umulh x7, x7, x17; \
mov x6, #0xffffffff; \
mul x5, x6, x17; \
umulh x6, x6, x17; \
adds x7, x7, x5; \
adcs x6, x6, x17; \
adc x5, xzr, xzr; \
subs x12, x12, x7; \
sbcs x13, x13, x6; \
sbcs x14, x14, x5; \
sbcs x15, x15, xzr; \
sbcs x16, x16, xzr; \
sbc x17, x17, xzr; \
adds x12, x12, x19; \
adcs x13, x13, x20; \
adcs x14, x14, x21; \
adcs x15, x15, x22; \
adcs x16, x16, x2; \
adcs x17, x17, x1; \
adc x10, xzr, xzr; \
mov x11, #0xffffffff00000001; \
adds x19, x12, x11; \
mov x11, #0xffffffff; \
adcs x20, x13, x11; \
mov x11, #0x1; \
adcs x21, x14, x11; \
adcs x22, x15, xzr; \
adcs x2, x16, xzr; \
adcs x1, x17, xzr; \
adcs x10, x10, xzr; \
csel x12, x12, x19, eq; \
csel x13, x13, x20, eq; \
csel x14, x14, x21, eq; \
csel x15, x15, x22, eq; \
csel x16, x16, x2, eq; \
csel x17, x17, x1, eq; \
stp x12, x13, [P0]; \
stp x14, x15, [P0+16]; \
stp x16, x17, [P0+32]
// Corresponds exactly to bignum_montsqr_p384_alt
#define montsqr_p384(P0,P1) \
ldp x2, x3, [P1]; \
mul x9, x2, x3; \
umulh x10, x2, x3; \
ldp x4, x5, [P1+16]; \
mul x8, x2, x4; \
adds x10, x10, x8; \
mul x11, x2, x5; \
mul x8, x3, x4; \
adcs x11, x11, x8; \
umulh x12, x2, x5; \
mul x8, x3, x5; \
adcs x12, x12, x8; \
ldp x6, x7, [P1+32]; \
mul x13, x2, x7; \
mul x8, x3, x6; \
adcs x13, x13, x8; \
umulh x14, x2, x7; \
mul x8, x3, x7; \
adcs x14, x14, x8; \
mul x15, x5, x6; \
adcs x15, x15, xzr; \
umulh x16, x5, x6; \
adc x16, x16, xzr; \
umulh x8, x2, x4; \
adds x11, x11, x8; \
umulh x8, x3, x4; \
adcs x12, x12, x8; \
umulh x8, x3, x5; \
adcs x13, x13, x8; \
umulh x8, x3, x6; \
adcs x14, x14, x8; \
umulh x8, x3, x7; \
adcs x15, x15, x8; \
adc x16, x16, xzr; \
mul x8, x2, x6; \
adds x12, x12, x8; \
mul x8, x4, x5; \
adcs x13, x13, x8; \
mul x8, x4, x6; \
adcs x14, x14, x8; \
mul x8, x4, x7; \
adcs x15, x15, x8; \
mul x8, x5, x7; \
adcs x16, x16, x8; \
mul x17, x6, x7; \
adcs x17, x17, xzr; \
umulh x19, x6, x7; \
adc x19, x19, xzr; \
umulh x8, x2, x6; \
adds x13, x13, x8; \
umulh x8, x4, x5; \
adcs x14, x14, x8; \
umulh x8, x4, x6; \
adcs x15, x15, x8; \
umulh x8, x4, x7; \
adcs x16, x16, x8; \
umulh x8, x5, x7; \
adcs x17, x17, x8; \
adc x19, x19, xzr; \
adds x9, x9, x9; \
adcs x10, x10, x10; \
adcs x11, x11, x11; \
adcs x12, x12, x12; \
adcs x13, x13, x13; \
adcs x14, x14, x14; \
adcs x15, x15, x15; \
adcs x16, x16, x16; \
adcs x17, x17, x17; \
adcs x19, x19, x19; \
cset x20, hs; \
umulh x8, x2, x2; \
mul x2, x2, x2; \
adds x9, x9, x8; \
mul x8, x3, x3; \
adcs x10, x10, x8; \
umulh x8, x3, x3; \
adcs x11, x11, x8; \
mul x8, x4, x4; \
adcs x12, x12, x8; \
umulh x8, x4, x4; \
adcs x13, x13, x8; \
mul x8, x5, x5; \
adcs x14, x14, x8; \
umulh x8, x5, x5; \
adcs x15, x15, x8; \
mul x8, x6, x6; \
adcs x16, x16, x8; \
umulh x8, x6, x6; \
adcs x17, x17, x8; \
mul x8, x7, x7; \
adcs x19, x19, x8; \
umulh x8, x7, x7; \
adc x20, x20, x8; \
lsl x5, x2, #32; \
add x2, x5, x2; \
mov x5, #-4294967295; \
umulh x5, x5, x2; \
mov x4, #4294967295; \
mul x3, x4, x2; \
umulh x4, x4, x2; \
adds x5, x5, x3; \
adcs x4, x4, x2; \
adc x3, xzr, xzr; \
subs x9, x9, x5; \
sbcs x10, x10, x4; \
sbcs x11, x11, x3; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
sbc x2, x2, xzr; \
lsl x5, x9, #32; \
add x9, x5, x9; \
mov x5, #-4294967295; \
umulh x5, x5, x9; \
mov x4, #4294967295; \
mul x3, x4, x9; \
umulh x4, x4, x9; \
adds x5, x5, x3; \
adcs x4, x4, x9; \
adc x3, xzr, xzr; \
subs x10, x10, x5; \
sbcs x11, x11, x4; \
sbcs x12, x12, x3; \
sbcs x13, x13, xzr; \
sbcs x2, x2, xzr; \
sbc x9, x9, xzr; \
lsl x5, x10, #32; \
add x10, x5, x10; \
mov x5, #-4294967295; \
umulh x5, x5, x10; \
mov x4, #4294967295; \
mul x3, x4, x10; \
umulh x4, x4, x10; \
adds x5, x5, x3; \
adcs x4, x4, x10; \
adc x3, xzr, xzr; \
subs x11, x11, x5; \
sbcs x12, x12, x4; \
sbcs x13, x13, x3; \
sbcs x2, x2, xzr; \
sbcs x9, x9, xzr; \
sbc x10, x10, xzr; \
lsl x5, x11, #32; \
add x11, x5, x11; \
mov x5, #-4294967295; \
umulh x5, x5, x11; \
mov x4, #4294967295; \
mul x3, x4, x11; \
umulh x4, x4, x11; \
adds x5, x5, x3; \
adcs x4, x4, x11; \
adc x3, xzr, xzr; \
subs x12, x12, x5; \
sbcs x13, x13, x4; \
sbcs x2, x2, x3; \
sbcs x9, x9, xzr; \
sbcs x10, x10, xzr; \
sbc x11, x11, xzr; \
lsl x5, x12, #32; \
add x12, x5, x12; \
mov x5, #-4294967295; \
umulh x5, x5, x12; \
mov x4, #4294967295; \
mul x3, x4, x12; \
umulh x4, x4, x12; \
adds x5, x5, x3; \
adcs x4, x4, x12; \
adc x3, xzr, xzr; \
subs x13, x13, x5; \
sbcs x2, x2, x4; \
sbcs x9, x9, x3; \
sbcs x10, x10, xzr; \
sbcs x11, x11, xzr; \
sbc x12, x12, xzr; \
lsl x5, x13, #32; \
add x13, x5, x13; \
mov x5, #-4294967295; \
umulh x5, x5, x13; \
mov x4, #4294967295; \
mul x3, x4, x13; \
umulh x4, x4, x13; \
adds x5, x5, x3; \
adcs x4, x4, x13; \
adc x3, xzr, xzr; \
subs x2, x2, x5; \
sbcs x9, x9, x4; \
sbcs x10, x10, x3; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbc x13, x13, xzr; \
adds x2, x2, x14; \
adcs x9, x9, x15; \
adcs x10, x10, x16; \
adcs x11, x11, x17; \
adcs x12, x12, x19; \
adcs x13, x13, x20; \
adc x6, xzr, xzr; \
mov x8, #-4294967295; \
adds x14, x2, x8; \
mov x8, #4294967295; \
adcs x15, x9, x8; \
mov x8, #1; \
adcs x16, x10, x8; \
adcs x17, x11, xzr; \
adcs x19, x12, xzr; \
adcs x20, x13, xzr; \
adcs x6, x6, xzr; \
csel x2, x2, x14, eq; \
csel x9, x9, x15, eq; \
csel x10, x10, x16, eq; \
csel x11, x11, x17, eq; \
csel x12, x12, x19, eq; \
csel x13, x13, x20, eq; \
stp x2, x9, [P0]; \
stp x10, x11, [P0+16]; \
stp x12, x13, [P0+32]
// Corresponds exactly to bignum_sub_p384
#define sub_p384(P0,P1,P2) \
ldp x5, x6, [P1]; \
ldp x4, x3, [P2]; \
subs x5, x5, x4; \
sbcs x6, x6, x3; \
ldp x7, x8, [P1+16]; \
ldp x4, x3, [P2+16]; \
sbcs x7, x7, x4; \
sbcs x8, x8, x3; \
ldp x9, x10, [P1+32]; \
ldp x4, x3, [P2+32]; \
sbcs x9, x9, x4; \
sbcs x10, x10, x3; \
csetm x3, lo; \
mov x4, #4294967295; \
and x4, x4, x3; \
adds x5, x5, x4; \
eor x4, x4, x3; \
adcs x6, x6, x4; \
mov x4, #-2; \
and x4, x4, x3; \
adcs x7, x7, x4; \
adcs x8, x8, x3; \
adcs x9, x9, x3; \
adc x10, x10, x3; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]
// Corresponds exactly to bignum_add_p384
#define add_p384(P0,P1,P2) \
ldp x5, x6, [P1]; \
ldp x4, x3, [P2]; \
adds x5, x5, x4; \
adcs x6, x6, x3; \
ldp x7, x8, [P1+16]; \
ldp x4, x3, [P2+16]; \
adcs x7, x7, x4; \
adcs x8, x8, x3; \
ldp x9, x10, [P1+32]; \
ldp x4, x3, [P2+32]; \
adcs x9, x9, x4; \
adcs x10, x10, x3; \
adc x3, xzr, xzr; \
mov x4, #0xffffffff; \
cmp x5, x4; \
mov x4, #0xffffffff00000000; \
sbcs xzr, x6, x4; \
mov x4, #0xfffffffffffffffe; \
sbcs xzr, x7, x4; \
adcs xzr, x8, xzr; \
adcs xzr, x9, xzr; \
adcs xzr, x10, xzr; \
adcs x3, x3, xzr; \
csetm x3, ne; \
mov x4, #0xffffffff; \
and x4, x4, x3; \
subs x5, x5, x4; \
eor x4, x4, x3; \
sbcs x6, x6, x4; \
mov x4, #0xfffffffffffffffe; \
and x4, x4, x3; \
sbcs x7, x7, x4; \
sbcs x8, x8, x3; \
sbcs x9, x9, x3; \
sbc x10, x10, x3; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]
// P0 = 4 * P1 - P2
#define cmsub41_p384(P0,P1,P2) \
ldp x1, x2, [P1]; \
ldp x3, x4, [P1+16]; \
ldp x5, x6, [P1+32]; \
lsl x0, x1, #2; \
ldp x7, x8, [P2]; \
subs x0, x0, x7; \
extr x1, x2, x1, #62; \
sbcs x1, x1, x8; \
ldp x7, x8, [P2+16]; \
extr x2, x3, x2, #62; \
sbcs x2, x2, x7; \
extr x3, x4, x3, #62; \
sbcs x3, x3, x8; \
extr x4, x5, x4, #62; \
ldp x7, x8, [P2+32]; \
sbcs x4, x4, x7; \
extr x5, x6, x5, #62; \
sbcs x5, x5, x8; \
lsr x6, x6, #62; \
adc x6, x6, xzr; \
lsl x7, x6, #32; \
subs x8, x6, x7; \
sbc x7, x7, xzr; \
adds x0, x0, x8; \
adcs x1, x1, x7; \
adcs x2, x2, x6; \
adcs x3, x3, xzr; \
adcs x4, x4, xzr; \
adcs x5, x5, xzr; \
csetm x8, cc; \
mov x9, #0xffffffff; \
and x9, x9, x8; \
adds x0, x0, x9; \
eor x9, x9, x8; \
adcs x1, x1, x9; \
mov x9, #0xfffffffffffffffe; \
and x9, x9, x8; \
adcs x2, x2, x9; \
adcs x3, x3, x8; \
adcs x4, x4, x8; \
adc x5, x5, x8; \
stp x0, x1, [P0]; \
stp x2, x3, [P0+16]; \
stp x4, x5, [P0+32]
// P0 = C * P1 - D * P2
#define cmsub_p384(P0,C,P1,D,P2) \
ldp x0, x1, [P2]; \
mov x6, #0x00000000ffffffff; \
subs x6, x6, x0; \
mov x7, #0xffffffff00000000; \
sbcs x7, x7, x1; \
ldp x0, x1, [P2+16]; \
mov x8, #0xfffffffffffffffe; \
sbcs x8, x8, x0; \
mov x13, #0xffffffffffffffff; \
sbcs x9, x13, x1; \
ldp x0, x1, [P2+32]; \
sbcs x10, x13, x0; \
sbc x11, x13, x1; \
mov x12, D; \
mul x0, x12, x6; \
mul x1, x12, x7; \
mul x2, x12, x8; \
mul x3, x12, x9; \
mul x4, x12, x10; \
mul x5, x12, x11; \
umulh x6, x12, x6; \
umulh x7, x12, x7; \
umulh x8, x12, x8; \
umulh x9, x12, x9; \
umulh x10, x12, x10; \
umulh x12, x12, x11; \
adds x1, x1, x6; \
adcs x2, x2, x7; \
adcs x3, x3, x8; \
adcs x4, x4, x9; \
adcs x5, x5, x10; \
mov x6, #1; \
adc x6, x12, x6; \
ldp x8, x9, [P1]; \
ldp x10, x11, [P1+16]; \
ldp x12, x13, [P1+32]; \
mov x14, C; \
mul x15, x14, x8; \
umulh x8, x14, x8; \
adds x0, x0, x15; \
mul x15, x14, x9; \
umulh x9, x14, x9; \
adcs x1, x1, x15; \
mul x15, x14, x10; \
umulh x10, x14, x10; \
adcs x2, x2, x15; \
mul x15, x14, x11; \
umulh x11, x14, x11; \
adcs x3, x3, x15; \
mul x15, x14, x12; \
umulh x12, x14, x12; \
adcs x4, x4, x15; \
mul x15, x14, x13; \
umulh x13, x14, x13; \
adcs x5, x5, x15; \
adc x6, x6, xzr; \
adds x1, x1, x8; \
adcs x2, x2, x9; \
adcs x3, x3, x10; \
adcs x4, x4, x11; \
adcs x5, x5, x12; \
adcs x6, x6, x13; \
lsl x7, x6, #32; \
subs x8, x6, x7; \
sbc x7, x7, xzr; \
adds x0, x0, x8; \
adcs x1, x1, x7; \
adcs x2, x2, x6; \
adcs x3, x3, xzr; \
adcs x4, x4, xzr; \
adcs x5, x5, xzr; \
csetm x6, cc; \
mov x7, #0xffffffff; \
and x7, x7, x6; \
adds x0, x0, x7; \
eor x7, x7, x6; \
adcs x1, x1, x7; \
mov x7, #0xfffffffffffffffe; \
and x7, x7, x6; \
adcs x2, x2, x7; \
adcs x3, x3, x6; \
adcs x4, x4, x6; \
adc x5, x5, x6; \
stp x0, x1, [P0]; \
stp x2, x3, [P0+16]; \
stp x4, x5, [P0+32]
// A weak version of add that only guarantees sum in 6 digits
#define weakadd_p384(P0,P1,P2) \
ldp x5, x6, [P1]; \
ldp x4, x3, [P2]; \
adds x5, x5, x4; \
adcs x6, x6, x3; \
ldp x7, x8, [P1+16]; \
ldp x4, x3, [P2+16]; \
adcs x7, x7, x4; \
adcs x8, x8, x3; \
ldp x9, x10, [P1+32]; \
ldp x4, x3, [P2+32]; \
adcs x9, x9, x4; \
adcs x10, x10, x3; \
csetm x3, cs; \
mov x4, #0xffffffff; \
and x4, x4, x3; \
subs x5, x5, x4; \
eor x4, x4, x3; \
sbcs x6, x6, x4; \
mov x4, #0xfffffffffffffffe; \
and x4, x4, x3; \
sbcs x7, x7, x4; \
sbcs x8, x8, x3; \
sbcs x9, x9, x3; \
sbc x10, x10, x3; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]
// P0 = 3 * P1 - 8 * P2
#define cmsub38_p384(P0,P1,P2) \
ldp x0, x1, [P2]; \
mov x6, #0x00000000ffffffff; \
subs x6, x6, x0; \
mov x7, #0xffffffff00000000; \
sbcs x7, x7, x1; \
ldp x0, x1, [P2+16]; \
mov x8, #0xfffffffffffffffe; \
sbcs x8, x8, x0; \
mov x13, #0xffffffffffffffff; \
sbcs x9, x13, x1; \
ldp x0, x1, [P2+32]; \
sbcs x10, x13, x0; \
sbc x11, x13, x1; \
lsl x0, x6, #3; \
extr x1, x7, x6, #61; \
extr x2, x8, x7, #61; \
extr x3, x9, x8, #61; \
extr x4, x10, x9, #61; \
extr x5, x11, x10, #61; \
lsr x6, x11, #61; \
add x6, x6, #1; \
ldp x8, x9, [P1]; \
ldp x10, x11, [P1+16]; \
ldp x12, x13, [P1+32]; \
mov x14, 3; \
mul x15, x14, x8; \
umulh x8, x14, x8; \
adds x0, x0, x15; \
mul x15, x14, x9; \
umulh x9, x14, x9; \
adcs x1, x1, x15; \
mul x15, x14, x10; \
umulh x10, x14, x10; \
adcs x2, x2, x15; \
mul x15, x14, x11; \
umulh x11, x14, x11; \
adcs x3, x3, x15; \
mul x15, x14, x12; \
umulh x12, x14, x12; \
adcs x4, x4, x15; \
mul x15, x14, x13; \
umulh x13, x14, x13; \
adcs x5, x5, x15; \
adc x6, x6, xzr; \
adds x1, x1, x8; \
adcs x2, x2, x9; \
adcs x3, x3, x10; \
adcs x4, x4, x11; \
adcs x5, x5, x12; \
adcs x6, x6, x13; \
lsl x7, x6, #32; \
subs x8, x6, x7; \
sbc x7, x7, xzr; \
adds x0, x0, x8; \
adcs x1, x1, x7; \
adcs x2, x2, x6; \
adcs x3, x3, xzr; \
adcs x4, x4, xzr; \
adcs x5, x5, xzr; \
csetm x6, cc; \
mov x7, #0xffffffff; \
and x7, x7, x6; \
adds x0, x0, x7; \
eor x7, x7, x6; \
adcs x1, x1, x7; \
mov x7, #0xfffffffffffffffe; \
and x7, x7, x6; \
adcs x2, x2, x7; \
adcs x3, x3, x6; \
adcs x4, x4, x6; \
adc x5, x5, x6; \
stp x0, x1, [P0]; \
stp x2, x3, [P0+16]; \
stp x4, x5, [P0+32]
S2N_BN_SYMBOL(p384_montjdouble_alt):
// Save regs and make room on stack for temporary variables
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
sub sp, sp, NSPACE
// Move the input arguments to stable places
mov input_z, x0
mov input_x, x1
// Main code, just a sequence of basic field operations
// z2 = z^2
// y2 = y^2
montsqr_p384(z2,z_1)
montsqr_p384(y2,y_1)
// x2p = x^2 - z^4 = (x + z^2) * (x - z^2)
weakadd_p384(t1,x_1,z2)
sub_p384(t2,x_1,z2)
montmul_p384(x2p,t1,t2)
// t1 = y + z
// x4p = x2p^2
// xy2 = x * y^2
add_p384(t1,y_1,z_1)
montsqr_p384(x4p,x2p)
montmul_p384(xy2,x_1,y2)
// t2 = (y + z)^2
montsqr_p384(t2,t1)
// d = 12 * xy2 - 9 * x4p
// t1 = y^2 + 2 * y * z
cmsub_p384(d,12,xy2,9,x4p)
sub_p384(t1,t2,z2)
// y4 = y^4
montsqr_p384(y4,y2)
// z_3' = 2 * y * z
// dx2 = d * x2p
sub_p384(z_3,t1,y2)
montmul_p384(dx2,d,x2p)
// x' = 4 * xy2 - d
cmsub41_p384(x_3,xy2,d)
// y' = 3 * dx2 - 8 * y4
cmsub38_p384(y_3,dx2,y4)
// Restore stack and registers
add sp, sp, NSPACE
ldp x23, x24, [sp], 16
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 4,398
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_mod_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Reduce modulo field characteristic, z := x mod p_384
// Input x[k]; output z[6]
//
// extern void bignum_mod_p384
// (uint64_t z[static 6], uint64_t k, uint64_t *x);
//
// Standard ARM ABI: X0 = z, X1 = k, X2 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p384)
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p384_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p384_alt)
.text
.balign 4
#define z x0
#define k x1
#define x x2
#define m0 x3
#define m1 x4
#define m2 x5
#define m3 x6
#define m4 x7
#define m5 x8
#define t0 x9
#define t1 x10
#define t2 x11
#define t3 x12
#define t4 x13
#define t5 x14
#define n0 x15
#define n1 x16
#define n2 x17
S2N_BN_SYMBOL(bignum_mod_p384):
S2N_BN_SYMBOL(bignum_mod_p384_alt):
// If the input is already <= 5 words long, go to a trivial "copy" path
cmp k, #6
bcc short
// Otherwise load the top 6 digits (top-down) and reduce k by 6
sub k, k, #6
lsl t0, k, #3
add t0, t0, x
ldp m4, m5, [t0, #32]
ldp m2, m3, [t0, #16]
ldp m0, m1, [t0]
// Load the complicated lower three words of p_384 = [-1;-1;-1;n2;n1;n0]
mov n0, #0x00000000ffffffff
mov n1, #0xffffffff00000000
mov n2, #0xfffffffffffffffe
// Reduce the top 6 digits mod p_384 (a conditional subtraction of p_384)
subs t0, m0, n0
sbcs t1, m1, n1
sbcs t2, m2, n2
adcs t3, m3, xzr
adcs t4, m4, xzr
adcs t5, m5, xzr
csel m0, m0, t0, cc
csel m1, m1, t1, cc
csel m2, m2, t2, cc
csel m3, m3, t3, cc
csel m4, m4, t4, cc
csel m5, m5, t5, cc
// Now do (k-6) iterations of 7->6 word modular reduction
cbz k, writeback
loop:
// Decrement k and load the next digit as t5. We now want to reduce
// [m5;m4;m3;m2;m1;m0;t5] |-> [m5;m4;m3;m2;m1;m0]; the shuffling downwards is
// absorbed into the various ALU operations
sub k, k, #1
ldr t5, [x, k, lsl #3]
// Initial quotient approximation q = min (h + 1) (2^64 - 1)
adds m5, m5, #1
csetm t3, cs
add m5, m5, t3
orn n1, xzr, t3
sub t2, m5, #1
sub t1, xzr, m5
// Correction term [m5;t2;t1;t0] = q * (2^384 - p_384), using m5 as a temp
lsl t0, t1, #32
extr t1, t2, t1, #32
lsr t2, t2, #32
adds t0, t0, m5
adcs t1, t1, xzr
adcs t2, t2, m5
adc m5, xzr, xzr
// Addition to the initial value
adds t0, t5, t0
adcs t1, m0, t1
adcs t2, m1, t2
adcs t3, m2, m5
adcs t4, m3, xzr
adcs t5, m4, xzr
adc n1, n1, xzr
// Use net top of the 7-word answer (now in n1) for masked correction
and m5, n0, n1
adds m0, t0, m5
eor m5, m5, n1
adcs m1, t1, m5
and m5, n2, n1
adcs m2, t2, m5
adcs m3, t3, n1
adcs m4, t4, n1
adc m5, t5, n1
cbnz k, loop
// Finally write back [m5;m4;m3;m2;m1;m0] and return
writeback:
stp m0, m1, [z]
stp m2, m3, [z, #16]
stp m4, m5, [z, #32]
ret
// Short case: just copy the input with zero-padding
short:
mov m0, xzr
mov m1, xzr
mov m2, xzr
mov m3, xzr
mov m4, xzr
mov m5, xzr
cbz k, writeback
ldr m0, [x]
subs k, k, #1
beq writeback
ldr m1, [x, #8]
subs k, k, #1
beq writeback
ldr m2, [x, #16]
subs k, k, #1
beq writeback
ldr m3, [x, #24]
subs k, k, #1
beq writeback
ldr m4, [x, #32]
b writeback
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 2,459
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_optneg_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Optionally negate modulo p_384, z := (-x) mod p_384 (if p nonzero) or
// z := x (if p zero), assuming x reduced
// Inputs p, x[6]; output z[6]
//
// extern void bignum_optneg_p384
// (uint64_t z[static 6], uint64_t p, uint64_t x[static 6]);
//
// Standard ARM ABI: X0 = z, X1 = p, X2 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_optneg_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_optneg_p384)
.text
.balign 4
#define z x0
#define p x1
#define x x2
#define d0 x3
#define d1 x4
#define d2 x5
#define d3 x6
#define d4 x7
#define d5 x8
#define n0 x9
#define n1 x10
#define n2 x11
#define n3 x12
#define n4 x13
#define n5 x14
S2N_BN_SYMBOL(bignum_optneg_p384):
// Load the 6 digits of x
ldp d0, d1, [x]
ldp d2, d3, [x, #16]
ldp d4, d5, [x, #32]
// Adjust p by zeroing it if the input is zero (to avoid giving -0 = p, which
// is not strictly reduced even though it's correct modulo p)
orr n0, d0, d1
orr n1, d2, d3
orr n2, d4, d5
orr n3, n0, n1
orr n4, n2, n3
cmp n4, #0
csel p, xzr, p, eq
// Load the complicated lower three words of p_384 = [-1;-1;-1;n2;n1;n0] and -1
mov n0, #0x00000000ffffffff
mov n1, #0xffffffff00000000
mov n2, #0xfffffffffffffffe
mov n5, #0xffffffffffffffff
// Do the subtraction, which by hypothesis does not underflow
subs n0, n0, d0
sbcs n1, n1, d1
sbcs n2, n2, d2
sbcs n3, n5, d3
sbcs n4, n5, d4
sbcs n5, n5, d5
// Set condition code if original x is nonzero and p was nonzero
cmp p, #0
// Hence multiplex and write back
csel n0, n0, d0, ne
csel n1, n1, d1, ne
csel n2, n2, d2, ne
csel n3, n3, d3, ne
csel n4, n4, d4, ne
csel n5, n5, d5, ne
stp n0, n1, [z]
stp n2, n3, [z, #16]
stp n4, n5, [z, #32]
// Return
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 1,950
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_half_p384.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Halve modulo p_384, z := (x / 2) mod p_384, assuming x reduced
// Input x[6]; output z[6]
//
// extern void bignum_half_p384
// (uint64_t z[static 6], uint64_t x[static 6]);
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_half_p384)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_half_p384)
.text
.balign 4
#define z x0
#define x x1
#define d0 x2
#define d1 x3
#define d2 x4
#define d3 x5
#define d4 x6
#define d5 x7
#define d6 x8
#define d7 x9
#define m x10
#define n x11
S2N_BN_SYMBOL(bignum_half_p384):
// Load the 4 digits of x
ldp d0, d1, [x]
ldp d2, d3, [x, #16]
ldp d4, d5, [x, #32]
// Get a bitmask corresponding to the lowest bit of the input
and m, d0, #1
neg m, m
// Do a masked addition of p_384, catching carry in a 7th word
and n, m, #0x00000000ffffffff
adds d0, d0, n
and n, m, #0xffffffff00000000
adcs d1, d1, n
and n, m, #0xfffffffffffffffe
adcs d2, d2, n
adcs d3, d3, m
adcs d4, d4, m
adcs d5, d5, m
adc d6, xzr, xzr
// Now shift that sum right one place
extr d0, d1, d0, #1
extr d1, d2, d1, #1
extr d2, d3, d2, #1
extr d3, d4, d3, #1
extr d4, d5, d4, #1
extr d5, d6, d5, #1
// Store back
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
// Return
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 282,429
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/p384_montjscalarmul.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery-Jacobian form scalar multiplication for P-384
// Input scalar[6], point[18]; output res[18]
//
// extern void p384_montjscalarmul
// (uint64_t res[static 18],
// uint64_t scalar[static 6],
// uint64_t point[static 18]);
//
// This function is a variant of its affine point version p384_scalarmul.
// Here, input and output points are assumed to be in Jacobian form with
// their coordinates in the Montgomery domain. Thus, if priming indicates
// Montgomery form, x' = (2^384 * x) mod p_384 etc., each point argument
// is a triple (x',y',z') representing the affine point (x/z^2,y/z^3) when
// z' is nonzero or the point at infinity (group identity) if z' = 0.
//
// Given scalar = n and point = P, assumed to be on the NIST elliptic
// curve P-384, returns a representation of n * P. If the result is the
// point at infinity (either because the input point was or because the
// scalar was a multiple of p_384) then the output is guaranteed to
// represent the point at infinity, i.e. to have its z coordinate zero.
//
// Standard ARM ABI: X0 = res, X1 = scalar, X2 = point
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjscalarmul)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjscalarmul)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
#define JACSIZE (3*NUMSIZE)
// Safe copies of input res and additional values in variables.
#define bf x22
#define sgn x23
#define j x24
#define res x25
// Intermediate variables on the stack.
// The table is 16 entries, each of size JACSIZE = 3 * NUMSIZE
#define scalarb sp, #(0*NUMSIZE)
#define acc sp, #(1*NUMSIZE)
#define tabent sp, #(4*NUMSIZE)
#define tab sp, #(7*NUMSIZE)
#define NSPACE #(55*NUMSIZE)
// Avoid using .rep for the sake of the BoringSSL/AWS-LC delocator,
// which doesn't accept repetitions, assembler macros etc.
#define selectblock(I) \
cmp bf, #(1*I); \
ldp x20, x21, [x19]; \
csel x0, x20, x0, eq; \
csel x1, x21, x1, eq; \
ldp x20, x21, [x19, #16]; \
csel x2, x20, x2, eq; \
csel x3, x21, x3, eq; \
ldp x20, x21, [x19, #32]; \
csel x4, x20, x4, eq; \
csel x5, x21, x5, eq; \
ldp x20, x21, [x19, #48]; \
csel x6, x20, x6, eq; \
csel x7, x21, x7, eq; \
ldp x20, x21, [x19, #64]; \
csel x8, x20, x8, eq; \
csel x9, x21, x9, eq; \
ldp x20, x21, [x19, #80]; \
csel x10, x20, x10, eq; \
csel x11, x21, x11, eq; \
ldp x20, x21, [x19, #96]; \
csel x12, x20, x12, eq; \
csel x13, x21, x13, eq; \
ldp x20, x21, [x19, #112]; \
csel x14, x20, x14, eq; \
csel x15, x21, x15, eq; \
ldp x20, x21, [x19, #128]; \
csel x16, x20, x16, eq; \
csel x17, x21, x17, eq; \
add x19, x19, #JACSIZE
// Loading large constants
#define movbig(nn,n3,n2,n1,n0) \
movz nn, n0; \
movk nn, n1, lsl #16; \
movk nn, n2, lsl #32; \
movk nn, n3, lsl #48
S2N_BN_SYMBOL(p384_montjscalarmul):
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x30, [sp, #-16]!
sub sp, sp, NSPACE
// Preserve the "res" input argument; others get processed early.
mov res, x0
// Reduce the input scalar mod n_384, i.e. conditionally subtract n_384.
// Store it to "scalarb".
ldp x3, x4, [x1]
movbig(x15, #0xecec, #0x196a, #0xccc5, #0x2973)
ldp x5, x6, [x1, #16]
movbig(x16, #0x581a, #0x0db2, #0x48b0, #0xa77a)
ldp x7, x8, [x1, #32]
movbig(x17, #0xc763, #0x4d81, #0xf437, #0x2ddf)
subs x9, x3, x15
sbcs x10, x4, x16
sbcs x11, x5, x17
adcs x12, x6, xzr
adcs x13, x7, xzr
adcs x14, x8, xzr
csel x3, x3, x9, cc
csel x4, x4, x10, cc
csel x5, x5, x11, cc
csel x6, x6, x12, cc
csel x7, x7, x13, cc
csel x8, x8, x14, cc
stp x3, x4, [scalarb]
stp x5, x6, [scalarb+16]
stp x7, x8, [scalarb+32]
// Set the tab[0] table entry to the input point = 1 * P
ldp x10, x11, [x2]
stp x10, x11, [tab]
ldp x12, x13, [x2, #16]
stp x12, x13, [tab+16]
ldp x14, x15, [x2, #32]
stp x14, x15, [tab+32]
ldp x10, x11, [x2, #48]
stp x10, x11, [tab+48]
ldp x12, x13, [x2, #64]
stp x12, x13, [tab+64]
ldp x14, x15, [x2, #80]
stp x14, x15, [tab+80]
ldp x10, x11, [x2, #96]
stp x10, x11, [tab+96]
ldp x12, x13, [x2, #112]
stp x12, x13, [tab+112]
ldp x14, x15, [x2, #128]
stp x14, x15, [tab+128]
// Compute and record tab[1] = 2 * p, ..., tab[15] = 16 * P
add x0, tab+JACSIZE*1
add x1, tab
bl p384_montjscalarmul_p384_montjdouble
add x0, tab+JACSIZE*2
add x1, tab+JACSIZE*1
add x2, tab
bl p384_montjscalarmul_p384_montjadd
add x0, tab+JACSIZE*3
add x1, tab+JACSIZE*1
bl p384_montjscalarmul_p384_montjdouble
add x0, tab+JACSIZE*4
add x1, tab+JACSIZE*3
add x2, tab
bl p384_montjscalarmul_p384_montjadd
add x0, tab+JACSIZE*5
add x1, tab+JACSIZE*2
bl p384_montjscalarmul_p384_montjdouble
add x0, tab+JACSIZE*6
add x1, tab+JACSIZE*5
add x2, tab
bl p384_montjscalarmul_p384_montjadd
add x0, tab+JACSIZE*7
add x1, tab+JACSIZE*3
bl p384_montjscalarmul_p384_montjdouble
add x0, tab+JACSIZE*8
add x1, tab+JACSIZE*7
add x2, tab
bl p384_montjscalarmul_p384_montjadd
add x0, tab+JACSIZE*9
add x1, tab+JACSIZE*4
bl p384_montjscalarmul_p384_montjdouble
add x0, tab+JACSIZE*10
add x1, tab+JACSIZE*9
add x2, tab
bl p384_montjscalarmul_p384_montjadd
add x0, tab+JACSIZE*11
add x1, tab+JACSIZE*5
bl p384_montjscalarmul_p384_montjdouble
add x0, tab+JACSIZE*12
add x1, tab+JACSIZE*11
add x2, tab
bl p384_montjscalarmul_p384_montjadd
add x0, tab+JACSIZE*13
add x1, tab+JACSIZE*6
bl p384_montjscalarmul_p384_montjdouble
add x0, tab+JACSIZE*14
add x1, tab+JACSIZE*13
add x2, tab
bl p384_montjscalarmul_p384_montjadd
add x0, tab+JACSIZE*15
add x1, tab+JACSIZE*7
bl p384_montjscalarmul_p384_montjdouble
// Add the recoding constant sum_i(16 * 32^i) to the scalar to allow signed
// digits. The digits of the constant, in lowest-to-highest order, are as
// follows; they are generated dynamically since none is a simple ARM load.
//
// 0x0842108421084210
// 0x1084210842108421
// 0x2108421084210842
// 0x4210842108421084
// 0x8421084210842108
// 0x0842108421084210
ldp x0, x1, [scalarb]
ldp x2, x3, [scalarb+16]
ldp x4, x5, [scalarb+32]
movbig(x8, #0x1084, #0x2108, #0x4210, #0x8421)
adds x0, x0, x8, lsr #1
adcs x1, x1, x8
lsl x8, x8, #1
adcs x2, x2, x8
lsl x8, x8, #1
adcs x3, x3, x8
lsl x8, x8, #1
adcs x4, x4, x8
lsr x8, x8, #4
adcs x5, x5, x8
cset x6, cs
// Record the top bitfield then shift the whole scalar left 4 bits
// to align the top of the next bitfield with the MSB (bits 379..383).
extr bf, x6, x5, #60
extr x5, x5, x4, #60
extr x4, x4, x3, #60
extr x3, x3, x2, #60
extr x2, x2, x1, #60
extr x1, x1, x0, #60
lsl x0, x0, #4
stp x0, x1, [scalarb]
stp x2, x3, [scalarb+16]
stp x4, x5, [scalarb+32]
// Initialize the accumulator to the corresponding entry using constant-time
// lookup in the table. This top digit, uniquely, is not recoded so there is
// no sign adjustment to make.
mov x0, xzr
mov x1, xzr
mov x2, xzr
mov x3, xzr
mov x4, xzr
mov x5, xzr
mov x6, xzr
mov x7, xzr
mov x8, xzr
mov x9, xzr
mov x10, xzr
mov x11, xzr
mov x12, xzr
mov x13, xzr
mov x14, xzr
mov x15, xzr
mov x16, xzr
mov x17, xzr
add x19, tab
selectblock(1)
selectblock(2)
selectblock(3)
selectblock(4)
selectblock(5)
selectblock(6)
selectblock(7)
selectblock(8)
selectblock(9)
selectblock(10)
selectblock(11)
selectblock(12)
selectblock(13)
selectblock(14)
selectblock(15)
selectblock(16)
stp x0, x1, [acc]
stp x2, x3, [acc+16]
stp x4, x5, [acc+32]
stp x6, x7, [acc+48]
stp x8, x9, [acc+64]
stp x10, x11, [acc+80]
stp x12, x13, [acc+96]
stp x14, x15, [acc+112]
stp x16, x17, [acc+128]
mov j, #380
// Main loop over size-5 bitfields: double 5 times then add signed digit
// At each stage we shift the scalar left by 5 bits so we can simply pick
// the top 5 bits as the bitfield, saving some fiddle over indexing.
p384_montjscalarmul_mainloop:
sub j, j, #5
add x0, acc
add x1, acc
bl p384_montjscalarmul_p384_montjdouble
add x0, acc
add x1, acc
bl p384_montjscalarmul_p384_montjdouble
add x0, acc
add x1, acc
bl p384_montjscalarmul_p384_montjdouble
add x0, acc
add x1, acc
bl p384_montjscalarmul_p384_montjdouble
add x0, acc
add x1, acc
bl p384_montjscalarmul_p384_montjdouble
// Choose the bitfield and adjust it to sign and magnitude
ldp x0, x1, [scalarb]
ldp x2, x3, [scalarb+16]
ldp x4, x5, [scalarb+32]
lsr bf, x5, #59
extr x5, x5, x4, #59
extr x4, x4, x3, #59
extr x3, x3, x2, #59
extr x2, x2, x1, #59
extr x1, x1, x0, #59
lsl x0, x0, #5
stp x0, x1, [scalarb]
stp x2, x3, [scalarb+16]
stp x4, x5, [scalarb+32]
subs bf, bf, #16
cset sgn, lo // sgn = sign of digit (1 = negative)
cneg bf, bf, lo // bf = absolute value of digit
// Conditionally select the table entry tab[i-1] = i * P in constant time
mov x0, xzr
mov x1, xzr
mov x2, xzr
mov x3, xzr
mov x4, xzr
mov x5, xzr
mov x6, xzr
mov x7, xzr
mov x8, xzr
mov x9, xzr
mov x10, xzr
mov x11, xzr
mov x12, xzr
mov x13, xzr
mov x14, xzr
mov x15, xzr
mov x16, xzr
mov x17, xzr
add x19, tab
selectblock(1)
selectblock(2)
selectblock(3)
selectblock(4)
selectblock(5)
selectblock(6)
selectblock(7)
selectblock(8)
selectblock(9)
selectblock(10)
selectblock(11)
selectblock(12)
selectblock(13)
selectblock(14)
selectblock(15)
selectblock(16)
// Store it to "tabent" with the y coordinate optionally negated.
// This is done carefully to give coordinates < p_384 even in
// the degenerate case y = 0 (when z = 0 for points on the curve).
stp x0, x1, [tabent]
stp x2, x3, [tabent+16]
stp x4, x5, [tabent+32]
stp x12, x13, [tabent+96]
stp x14, x15, [tabent+112]
stp x16, x17, [tabent+128]
mov x0, #0x00000000ffffffff
subs x0, x0, x6
orr x12, x6, x7
mov x1, #0xffffffff00000000
sbcs x1, x1, x7
orr x13, x8, x9
mov x2, #0xfffffffffffffffe
sbcs x2, x2, x8
orr x14, x10, x11
mov x5, #0xffffffffffffffff
sbcs x3, x5, x9
orr x12, x12, x13
sbcs x4, x5, x10
orr x12, x12, x14
sbcs x5, x5, x11
cmp sgn, xzr
ccmp x12, xzr, #4, ne
csel x6, x0, x6, ne
csel x7, x1, x7, ne
csel x8, x2, x8, ne
csel x9, x3, x9, ne
csel x10, x4, x10, ne
csel x11, x5, x11, ne
stp x6, x7, [tabent+48]
stp x8, x9, [tabent+64]
stp x10, x11, [tabent+80]
// Add to the accumulator
add x0, acc
add x1, acc
add x2, tabent
bl p384_montjscalarmul_p384_montjadd
cbnz j, p384_montjscalarmul_mainloop
// That's the end of the main loop, and we just need to copy the
// result in "acc" to the output.
ldp x0, x1, [acc]
stp x0, x1, [res]
ldp x0, x1, [acc+16]
stp x0, x1, [res, #16]
ldp x0, x1, [acc+32]
stp x0, x1, [res, #32]
ldp x0, x1, [acc+48]
stp x0, x1, [res, #48]
ldp x0, x1, [acc+64]
stp x0, x1, [res, #64]
ldp x0, x1, [acc+80]
stp x0, x1, [res, #80]
ldp x0, x1, [acc+96]
stp x0, x1, [res, #96]
ldp x0, x1, [acc+112]
stp x0, x1, [res, #112]
ldp x0, x1, [acc+128]
stp x0, x1, [res, #128]
// Restore stack and registers and return
add sp, sp, NSPACE
ldp x25, x30, [sp], 16
ldp x23, x24, [sp], 16
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
// Local copies of subroutines, complete clones at the moment
p384_montjscalarmul_p384_montjadd:
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, xzr, [sp, #-16]!
sub sp, sp, #0x180
mov x24, x0
mov x25, x1
mov x26, x2
mov x0, sp
ldr q1, [x25, #96]
ldp x9, x2, [x25, #96]
ldr q0, [x25, #96]
ldp x4, x6, [x25, #112]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [x25, #128]
xtn v30.2s, v0.2d
ldr q1, [x25, #128]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [x25, #128]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [x0]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [x0, #16]
csetm x15, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
stp x11, x14, [x0, #32]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc // cc = lo, ul, last
cinv x16, x15, cc // cc = lo, ul, last
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x4, x2, x4
cneg x4, x4, cc // cc = lo, ul, last
csetm x7, cc // cc = lo, ul, last
subs x2, x10, x6
cinv x8, x8, cc // cc = lo, ul, last
cneg x2, x2, cc // cc = lo, ul, last
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [x0, #16]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [x0]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [x0, #32]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff // #4294967295
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001 // #-4294967295
adcs x14, x14, x2
mov x2, #0x1 // #1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [x0]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [x0, #16]
adc x17, x14, xzr
stp x2, x17, [x0, #32]
ldr q1, [x26, #96]
ldp x9, x2, [x26, #96]
ldr q0, [x26, #96]
ldp x4, x6, [x26, #112]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [x26, #128]
xtn v30.2s, v0.2d
ldr q1, [x26, #128]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [x26, #128]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [sp, #240]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [sp, #256]
csetm x15, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
stp x11, x14, [sp, #272]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc // cc = lo, ul, last
cinv x16, x15, cc // cc = lo, ul, last
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x4, x2, x4
cneg x4, x4, cc // cc = lo, ul, last
csetm x7, cc // cc = lo, ul, last
subs x2, x10, x6
cinv x8, x8, cc // cc = lo, ul, last
cneg x2, x2, cc // cc = lo, ul, last
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [sp, #256]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [sp, #240]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [sp, #272]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff // #4294967295
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001 // #-4294967295
adcs x14, x14, x2
mov x2, #0x1 // #1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [sp, #240]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [sp, #256]
adc x17, x14, xzr
stp x2, x17, [sp, #272]
stp x23, x24, [sp, #0x150]
ldr q3, [x26, #96]
ldr q25, [x25, #48]
ldp x13, x23, [x25, #48]
ldp x3, x21, [x26, #96]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [x25, #80]
ldp x8, x24, [x26, #112]
subs x6, x3, x21
ldr q0, [x26, #128]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [x25, #64]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [x25, #80]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [x26, #128]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #288]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #304]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #320]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #288]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #304]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #320]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #288]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #304]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #320]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #288]
ldp x21, x12, [sp, #304]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #320]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #288]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #304]
adc x12, x15, x23
stp x21, x12, [sp, #320]
ldr q3, [x25, #96]
ldr q25, [x26, #48]
ldp x13, x23, [x26, #48]
ldp x3, x21, [x25, #96]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [x26, #80]
ldp x8, x24, [x25, #112]
subs x6, x3, x21
ldr q0, [x25, #128]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [x26, #64]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [x26, #80]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [x25, #128]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #48]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #64]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #80]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #48]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #64]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #80]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #48]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #64]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #80]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #48]
ldp x21, x12, [sp, #64]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #80]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #48]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #64]
adc x12, x15, x23
stp x21, x12, [sp, #80]
mov x1, sp
ldr q3, [x1]
ldr q25, [x26]
ldp x13, x23, [x26]
ldp x3, x21, [x1]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [x26, #32]
ldp x8, x24, [x1, #16]
subs x6, x3, x21
ldr q0, [x1, #32]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [x26, #16]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [x26, #32]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [x1, #32]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #96]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #112]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #128]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #96]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #112]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #128]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #96]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #112]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #128]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #96]
ldp x21, x12, [sp, #112]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #128]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #96]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #112]
adc x12, x15, x23
stp x21, x12, [sp, #128]
ldr q3, [sp, #240]
ldr q25, [x25]
ldp x13, x23, [x25]
ldp x3, x21, [sp, #240]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [x25, #32]
ldp x8, x24, [sp, #256]
subs x6, x3, x21
ldr q0, [sp, #272]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [x25, #16]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [x25, #32]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #272]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #192]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #208]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #224]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #192]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #208]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #224]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #192]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #208]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #224]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #192]
ldp x21, x12, [sp, #208]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #224]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #192]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #208]
adc x12, x15, x23
stp x21, x12, [sp, #224]
mov x1, sp
ldr q3, [x1]
ldr q25, [sp, #48]
ldp x13, x23, [sp, #48]
ldp x3, x21, [x1]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #80]
ldp x8, x24, [x1, #16]
subs x6, x3, x21
ldr q0, [x1, #32]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [sp, #64]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #80]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [x1, #32]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #48]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #64]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #80]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #48]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #64]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #80]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #48]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #64]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #80]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #48]
ldp x21, x12, [sp, #64]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #80]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #48]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #64]
adc x12, x15, x23
stp x21, x12, [sp, #80]
ldr q3, [sp, #240]
ldr q25, [sp, #288]
ldp x13, x23, [sp, #288]
ldp x3, x21, [sp, #240]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #320]
ldp x8, x24, [sp, #256]
subs x6, x3, x21
ldr q0, [sp, #272]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [sp, #304]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #320]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #272]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #288]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #304]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #320]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #288]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #304]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #320]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #288]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #304]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #320]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #288]
ldp x21, x12, [sp, #304]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #320]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x2, x24, x11
stp x22, x5, [sp, #288]
adcs x11, x13, x23
adcs x12, x8, x23
stp x2, x11, [sp, #304]
adc x13, x15, x23
stp x12, x13, [sp, #320]
ldp x5, x6, [sp, #96]
ldp x4, x3, [sp, #192]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #112]
ldp x4, x3, [sp, #208]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #128]
ldp x4, x3, [sp, #224]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, cc // cc = lo, ul, last
mov x4, #0xffffffff // #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe // #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #240]
stp x7, x8, [sp, #256]
stp x9, x10, [sp, #272]
ldp x5, x6, [sp, #48]
ldp x4, x3, [sp, #288]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #64]
sbcs x7, x7, x2
sbcs x8, x8, x11
ldp x9, x10, [sp, #80]
sbcs x9, x9, x12
sbcs x10, x10, x13
csetm x3, cc // cc = lo, ul, last
mov x4, #0xffffffff // #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe // #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #48]
stp x7, x8, [sp, #64]
stp x9, x10, [sp, #80]
ldr q1, [sp, #240]
ldp x9, x2, [sp, #240]
ldr q0, [sp, #240]
ldp x4, x6, [sp, #256]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [sp, #272]
xtn v30.2s, v0.2d
ldr q1, [sp, #272]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [sp, #272]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [sp, #144]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [sp, #160]
csetm x15, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
stp x11, x14, [sp, #176]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc // cc = lo, ul, last
cinv x16, x15, cc // cc = lo, ul, last
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x4, x2, x4
cneg x4, x4, cc // cc = lo, ul, last
csetm x7, cc // cc = lo, ul, last
subs x2, x10, x6
cinv x8, x8, cc // cc = lo, ul, last
cneg x2, x2, cc // cc = lo, ul, last
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [sp, #160]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [sp, #144]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [sp, #176]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff // #4294967295
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001 // #-4294967295
adcs x14, x14, x2
mov x2, #0x1 // #1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [sp, #144]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [sp, #160]
adc x17, x14, xzr
stp x2, x17, [sp, #176]
mov x0, sp
ldr q1, [sp, #48]
ldp x9, x2, [sp, #48]
ldr q0, [sp, #48]
ldp x4, x6, [sp, #64]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [sp, #80]
xtn v30.2s, v0.2d
ldr q1, [sp, #80]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [sp, #80]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [x0]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [x0, #16]
csetm x15, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
stp x11, x14, [x0, #32]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc // cc = lo, ul, last
cinv x16, x15, cc // cc = lo, ul, last
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x4, x2, x4
cneg x4, x4, cc // cc = lo, ul, last
csetm x7, cc // cc = lo, ul, last
subs x2, x10, x6
cinv x8, x8, cc // cc = lo, ul, last
cneg x2, x2, cc // cc = lo, ul, last
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [x0, #16]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [x0]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [x0, #32]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff // #4294967295
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001 // #-4294967295
adcs x14, x14, x2
mov x2, #0x1 // #1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [x0]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [x0, #16]
adc x17, x14, xzr
stp x2, x17, [x0, #32]
ldr q3, [sp, #144]
ldr q25, [sp, #192]
ldp x13, x23, [sp, #192]
ldp x3, x21, [sp, #144]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #224]
ldp x8, x24, [sp, #160]
subs x6, x3, x21
ldr q0, [sp, #176]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [sp, #208]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #224]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #176]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #192]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #208]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #224]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #192]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #208]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #224]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #192]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #208]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #224]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #192]
ldp x21, x12, [sp, #208]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #224]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #192]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #208]
adc x12, x15, x23
stp x21, x12, [sp, #224]
ldr q3, [sp, #144]
ldr q25, [sp, #96]
ldp x13, x23, [sp, #96]
ldp x3, x21, [sp, #144]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #128]
ldp x8, x24, [sp, #160]
subs x6, x3, x21
ldr q0, [sp, #176]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [sp, #112]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #128]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #176]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #96]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #112]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #128]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #96]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #112]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #128]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #96]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #112]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #128]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #96]
ldp x21, x12, [sp, #112]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #128]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x2, x24, x11
stp x22, x5, [sp, #96]
adcs x11, x13, x23
adcs x12, x8, x23
stp x2, x11, [sp, #112]
adc x13, x15, x23
stp x12, x13, [sp, #128]
mov x0, sp
mov x1, sp
ldp x5, x6, [x1]
ldp x4, x3, [sp, #192]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x1, #16]
ldp x4, x3, [sp, #208]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x1, #32]
ldp x4, x3, [sp, #224]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, cc // cc = lo, ul, last
mov x4, #0xffffffff // #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe // #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [x0]
stp x7, x8, [x0, #16]
stp x9, x10, [x0, #32]
ldp x5, x6, [sp, #96]
ldp x4, x3, [sp, #192]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x4, x3, [sp, #208]
sbcs x7, x2, x4
sbcs x8, x11, x3
ldp x4, x3, [sp, #224]
sbcs x9, x12, x4
sbcs x10, x13, x3
csetm x3, cc // cc = lo, ul, last
mov x4, #0xffffffff // #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe // #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #144]
stp x7, x8, [sp, #160]
stp x9, x10, [sp, #176]
ldr q3, [sp, #240]
ldr q25, [x25, #96]
ldp x13, x23, [x25, #96]
ldp x3, x21, [sp, #240]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [x25, #128]
ldp x8, x24, [sp, #256]
subs x6, x3, x21
ldr q0, [sp, #272]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [x25, #112]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [x25, #128]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #272]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #240]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #256]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #272]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #240]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #256]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #272]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #240]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #256]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #272]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #240]
ldp x21, x12, [sp, #256]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #272]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #240]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #256]
adc x12, x15, x23
stp x21, x12, [sp, #272]
mov x0, sp
mov x1, sp
ldp x5, x6, [x1]
ldp x4, x3, [sp, #96]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x1, #16]
ldp x4, x3, [sp, #112]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x1, #32]
ldp x4, x3, [sp, #128]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, cc // cc = lo, ul, last
mov x4, #0xffffffff // #4294967295
and x4, x4, x3
adds x2, x5, x4
eor x4, x4, x3
adcs x11, x6, x4
mov x4, #0xfffffffffffffffe // #-2
and x4, x4, x3
adcs x4, x7, x4
adcs x12, x8, x3
adcs x13, x9, x3
adc x3, x10, x3
stp x2, x11, [x0]
stp x4, x12, [x0, #16]
stp x13, x3, [x0, #32]
ldp x5, x6, [sp, #192]
subs x5, x5, x2
sbcs x6, x6, x11
ldp x7, x8, [sp, #208]
sbcs x7, x7, x4
sbcs x8, x8, x12
ldp x9, x10, [sp, #224]
sbcs x9, x9, x13
sbcs x10, x10, x3
csetm x3, cc // cc = lo, ul, last
mov x4, #0xffffffff // #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe // #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #192]
stp x7, x8, [sp, #208]
stp x9, x10, [sp, #224]
ldr q3, [sp, #144]
ldr q25, [sp, #288]
ldp x13, x23, [sp, #288]
ldp x3, x21, [sp, #144]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #320]
ldp x8, x24, [sp, #160]
subs x6, x3, x21
ldr q0, [sp, #176]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [sp, #304]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #320]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #176]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #144]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #160]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #176]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #144]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #160]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #176]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #144]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #160]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #176]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #144]
ldp x21, x12, [sp, #160]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #176]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #144]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #160]
adc x12, x15, x23
stp x21, x12, [sp, #176]
ldr q3, [sp, #240]
ldr q25, [x26, #96]
ldp x13, x23, [x26, #96]
ldp x3, x21, [sp, #240]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [x26, #128]
ldp x8, x24, [sp, #256]
subs x6, x3, x21
ldr q0, [sp, #272]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [x26, #112]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [x26, #128]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #272]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #240]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #256]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #272]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #240]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #256]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #272]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #240]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #256]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #272]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #240]
ldp x21, x12, [sp, #256]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #272]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #240]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #256]
adc x12, x15, x23
stp x21, x12, [sp, #272]
ldp x2, x27, [sp, #0x150]
ldr q3, [sp, #48]
ldr q25, [sp, #192]
ldp x13, x23, [sp, #192]
ldp x3, x21, [sp, #48]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #224]
ldp x8, x24, [sp, #64]
subs x6, x3, x21
ldr q0, [sp, #80]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [sp, #208]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #224]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #80]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #192]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #208]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #224]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #192]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #208]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #224]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #192]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #208]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #224]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #192]
ldp x21, x12, [sp, #208]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #224]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x2, x6, x20
eor x3, x20, x23
adcs x6, x7, x3
adcs x7, x24, x11
adcs x9, x13, x23
adcs x10, x8, x23
adc x11, x15, x23
ldp x4, x3, [sp, #144]
subs x5, x2, x4
sbcs x6, x6, x3
ldp x4, x3, [sp, #160]
sbcs x7, x7, x4
sbcs x8, x9, x3
ldp x4, x3, [sp, #176]
sbcs x9, x10, x4
sbcs x10, x11, x3
csetm x3, cc // cc = lo, ul, last
mov x4, #0xffffffff // #4294967295
and x4, x4, x3
adds x19, x5, x4
eor x4, x4, x3
adcs x24, x6, x4
mov x4, #0xfffffffffffffffe // #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x7, x8, [sp, #208]
stp x9, x10, [sp, #224]
ldp x0, x1, [x25, #96]
ldp x2, x3, [x25, #112]
ldp x4, x5, [x25, #128]
orr x20, x0, x1
orr x21, x2, x3
orr x22, x4, x5
orr x20, x20, x21
orr x20, x20, x22
cmp x20, xzr
cset x20, ne // ne = any
ldp x6, x7, [x26, #96]
ldp x8, x9, [x26, #112]
ldp x10, x11, [x26, #128]
orr x21, x6, x7
orr x22, x8, x9
orr x23, x10, x11
orr x21, x21, x22
orr x21, x21, x23
cmp x21, xzr
cset x21, ne // ne = any
cmp x21, x20
ldp x12, x13, [sp, #240]
csel x12, x0, x12, cc // cc = lo, ul, last
csel x13, x1, x13, cc // cc = lo, ul, last
csel x12, x6, x12, hi // hi = pmore
csel x13, x7, x13, hi // hi = pmore
ldp x14, x15, [sp, #256]
csel x14, x2, x14, cc // cc = lo, ul, last
csel x15, x3, x15, cc // cc = lo, ul, last
csel x14, x8, x14, hi // hi = pmore
csel x15, x9, x15, hi // hi = pmore
ldp x16, x17, [sp, #272]
csel x16, x4, x16, cc // cc = lo, ul, last
csel x17, x5, x17, cc // cc = lo, ul, last
csel x16, x10, x16, hi // hi = pmore
csel x17, x11, x17, hi // hi = pmore
ldp x20, x21, [x25]
ldp x0, x1, [sp]
csel x0, x20, x0, cc // cc = lo, ul, last
csel x1, x21, x1, cc // cc = lo, ul, last
ldp x20, x21, [x26]
csel x0, x20, x0, hi // hi = pmore
csel x1, x21, x1, hi // hi = pmore
ldp x20, x21, [x25, #16]
ldp x2, x3, [sp, #16]
csel x2, x20, x2, cc // cc = lo, ul, last
csel x3, x21, x3, cc // cc = lo, ul, last
ldp x20, x21, [x26, #16]
csel x2, x20, x2, hi // hi = pmore
csel x3, x21, x3, hi // hi = pmore
ldp x20, x21, [x25, #32]
ldp x4, x5, [sp, #32]
csel x4, x20, x4, cc // cc = lo, ul, last
csel x5, x21, x5, cc // cc = lo, ul, last
ldp x20, x21, [x26, #32]
csel x4, x20, x4, hi // hi = pmore
csel x5, x21, x5, hi // hi = pmore
ldp x20, x21, [x25, #48]
csel x6, x20, x19, cc // cc = lo, ul, last
csel x7, x21, x24, cc // cc = lo, ul, last
ldp x20, x21, [x26, #48]
csel x6, x20, x6, hi // hi = pmore
csel x7, x21, x7, hi // hi = pmore
ldp x20, x21, [x25, #64]
ldp x8, x9, [sp, #208]
csel x8, x20, x8, cc // cc = lo, ul, last
csel x9, x21, x9, cc // cc = lo, ul, last
ldp x20, x21, [x26, #64]
csel x8, x20, x8, hi // hi = pmore
csel x9, x21, x9, hi // hi = pmore
ldp x20, x21, [x25, #80]
ldp x10, x11, [sp, #224]
csel x10, x20, x10, cc // cc = lo, ul, last
csel x11, x21, x11, cc // cc = lo, ul, last
ldp x20, x21, [x26, #80]
csel x10, x20, x10, hi // hi = pmore
csel x11, x21, x11, hi // hi = pmore
stp x0, x1, [x27]
stp x2, x3, [x27, #16]
stp x4, x5, [x27, #32]
stp x6, x7, [x27, #48]
stp x8, x9, [x27, #64]
stp x10, x11, [x27, #80]
stp x12, x13, [x27, #96]
stp x14, x15, [x27, #112]
stp x16, x17, [x27, #128]
add sp, sp, #0x180
ldp x27, xzr, [sp], #16
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
p384_montjscalarmul_p384_montjdouble:
sub sp, sp, #0x1a0
stp x19, x20, [sp, #336]
stp x21, x22, [sp, #352]
stp x23, x24, [sp, #368]
stp x25, x26, [sp, #384]
stp x27, xzr, [sp, #400]
mov x25, x0
mov x26, x1
mov x0, sp
ldr q1, [x26, #96]
ldp x9, x2, [x26, #96]
ldr q0, [x26, #96]
ldp x4, x6, [x26, #112]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [x26, #128]
xtn v30.2s, v0.2d
ldr q1, [x26, #128]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [x26, #128]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [x0]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [x0, #16]
csetm x15, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
stp x11, x14, [x0, #32]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc // cc = lo, ul, last
cinv x16, x15, cc // cc = lo, ul, last
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x4, x2, x4
cneg x4, x4, cc // cc = lo, ul, last
csetm x7, cc // cc = lo, ul, last
subs x2, x10, x6
cinv x8, x8, cc // cc = lo, ul, last
cneg x2, x2, cc // cc = lo, ul, last
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [x0, #16]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [x0]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [x0, #32]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff // #4294967295
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001 // #-4294967295
adcs x14, x14, x2
mov x2, #0x1 // #1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [x0]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [x0, #16]
adc x17, x14, xzr
stp x2, x17, [x0, #32]
ldr q1, [x26, #48]
ldp x9, x2, [x26, #48]
ldr q0, [x26, #48]
ldp x4, x6, [x26, #64]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [x26, #80]
xtn v30.2s, v0.2d
ldr q1, [x26, #80]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [x26, #80]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [sp, #48]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [sp, #64]
csetm x15, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
stp x11, x14, [sp, #80]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc // cc = lo, ul, last
cinv x16, x15, cc // cc = lo, ul, last
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x4, x2, x4
cneg x4, x4, cc // cc = lo, ul, last
csetm x7, cc // cc = lo, ul, last
subs x2, x10, x6
cinv x8, x8, cc // cc = lo, ul, last
cneg x2, x2, cc // cc = lo, ul, last
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [sp, #64]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [sp, #48]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [sp, #80]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff // #4294967295
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001 // #-4294967295
adcs x14, x14, x2
mov x2, #0x1 // #1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [sp, #48]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [sp, #64]
adc x17, x14, xzr
stp x2, x17, [sp, #80]
ldp x5, x6, [x26]
ldp x4, x3, [sp]
adds x5, x5, x4
adcs x6, x6, x3
ldp x7, x8, [x26, #16]
ldp x4, x3, [sp, #16]
adcs x7, x7, x4
adcs x8, x8, x3
ldp x9, x10, [x26, #32]
ldp x4, x3, [sp, #32]
adcs x9, x9, x4
adcs x10, x10, x3
csetm x3, cs // cs = hs, nlast
mov x4, #0xffffffff // #4294967295
and x4, x4, x3
subs x5, x5, x4
eor x4, x4, x3
sbcs x6, x6, x4
mov x4, #0xfffffffffffffffe // #-2
and x4, x4, x3
sbcs x7, x7, x4
sbcs x8, x8, x3
sbcs x9, x9, x3
sbc x10, x10, x3
stp x5, x6, [sp, #240]
stp x7, x8, [sp, #256]
stp x9, x10, [sp, #272]
mov x2, sp
ldp x5, x6, [x26]
ldp x4, x3, [x2]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x26, #16]
ldp x4, x3, [x2, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x26, #32]
ldp x4, x3, [x2, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, cc // cc = lo, ul, last
mov x4, #0xffffffff // #4294967295
and x4, x4, x3
adds x13, x5, x4
eor x4, x4, x3
adcs x23, x6, x4
mov x4, #0xfffffffffffffffe // #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x13, x23, [sp, #192]
stp x7, x8, [sp, #208]
stp x9, x10, [sp, #224]
ldr q3, [sp, #240]
ldr q25, [sp, #192]
ldp x3, x21, [sp, #240]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #224]
ldp x8, x24, [sp, #256]
subs x6, x3, x21
ldr q0, [sp, #272]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [sp, #208]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #224]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #272]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #96]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #112]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #128]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #96]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #112]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #128]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #96]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #112]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #128]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #96]
ldp x21, x12, [sp, #112]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #128]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #96]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #112]
adc x12, x15, x23
stp x21, x12, [sp, #128]
ldp x5, x6, [x26, #48]
ldp x4, x3, [x26, #96]
adds x5, x5, x4
adcs x6, x6, x3
ldp x7, x8, [x26, #64]
ldp x4, x3, [x26, #112]
adcs x7, x7, x4
adcs x8, x8, x3
ldp x9, x10, [x26, #80]
ldp x4, x3, [x26, #128]
adcs x9, x9, x4
adcs x10, x10, x3
adc x3, xzr, xzr
mov x4, #0xffffffff // #4294967295
cmp x5, x4
mov x4, #0xffffffff00000000 // #-4294967296
sbcs xzr, x6, x4
mov x4, #0xfffffffffffffffe // #-2
sbcs xzr, x7, x4
adcs xzr, x8, xzr
adcs xzr, x9, xzr
adcs xzr, x10, xzr
adcs x3, x3, xzr
csetm x3, ne // ne = any
mov x4, #0xffffffff // #4294967295
and x4, x4, x3
subs x5, x5, x4
eor x4, x4, x3
sbcs x6, x6, x4
mov x4, #0xfffffffffffffffe // #-2
and x4, x4, x3
sbcs x7, x7, x4
sbcs x8, x8, x3
sbcs x9, x9, x3
sbc x10, x10, x3
stp x5, x6, [sp, #240]
stp x7, x8, [sp, #256]
stp x9, x10, [sp, #272]
ldr q1, [sp, #96]
ldp x9, x2, [sp, #96]
ldr q0, [sp, #96]
ldp x4, x6, [sp, #112]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [sp, #128]
xtn v30.2s, v0.2d
ldr q1, [sp, #128]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [sp, #128]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [sp, #288]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [sp, #304]
csetm x15, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
stp x11, x14, [sp, #320]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc // cc = lo, ul, last
cinv x16, x15, cc // cc = lo, ul, last
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x4, x2, x4
cneg x4, x4, cc // cc = lo, ul, last
csetm x7, cc // cc = lo, ul, last
subs x2, x10, x6
cinv x8, x8, cc // cc = lo, ul, last
cneg x2, x2, cc // cc = lo, ul, last
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [sp, #304]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [sp, #288]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [sp, #320]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff // #4294967295
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001 // #-4294967295
adcs x14, x14, x2
mov x2, #0x1 // #1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [sp, #288]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [sp, #304]
adc x17, x14, xzr
stp x2, x17, [sp, #320]
ldr q3, [x26]
ldr q25, [sp, #48]
ldp x13, x23, [sp, #48]
ldp x3, x21, [x26]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #80]
ldp x8, x24, [x26, #16]
subs x6, x3, x21
ldr q0, [x26, #32]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [sp, #64]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #80]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [x26, #32]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x26, x4, x16
mov x4, v27.d[0]
sbcs x27, x20, x11
sbcs x20, x9, x12
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #160]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #176]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #160]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #176]
adds x20, x22, x26
mul x10, x13, x14
adcs x11, x11, x27
eor x9, x8, x21
adcs x26, x19, x17
stp x20, x11, [sp, #144]
adcs x27, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #176]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #144]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #176]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x26
eor x1, x22, x9
adcs x24, x23, x27
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x26
adcs x15, x17, x27
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #144]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #160]
adc x12, x15, x23
stp x21, x12, [sp, #176]
ldr q1, [sp, #240]
ldp x9, x2, [sp, #240]
ldr q0, [sp, #240]
ldp x4, x6, [sp, #256]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [sp, #272]
xtn v30.2s, v0.2d
ldr q1, [sp, #272]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [sp, #272]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x19, x3, x17
sbcs x20, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [sp, #192]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
csetm x15, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
stp x11, x14, [sp, #224]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc // cc = lo, ul, last
cinv x16, x15, cc // cc = lo, ul, last
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x4, x2, x4
cneg x4, x4, cc // cc = lo, ul, last
csetm x7, cc // cc = lo, ul, last
subs x2, x10, x6
cinv x8, x8, cc // cc = lo, ul, last
cneg x2, x2, cc // cc = lo, ul, last
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [sp, #192]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [sp, #224]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x19
adcs x1, x1, x20
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff // #4294967295
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001 // #-4294967295
adcs x14, x14, x2
mov x2, #0x1 // #1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x19, x13, x1
and x13, x4, x9
adcs x20, x6, x13
and x1, x2, x9
adcs x7, x8, x1
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [sp, #208]
adc x17, x14, xzr
stp x2, x17, [sp, #224]
ldp x0, x1, [sp, #288]
mov x6, #0xffffffff // #4294967295
subs x6, x6, x0
mov x7, #0xffffffff00000000 // #-4294967296
sbcs x7, x7, x1
ldp x0, x1, [sp, #304]
mov x8, #0xfffffffffffffffe // #-2
sbcs x8, x8, x0
mov x13, #0xffffffffffffffff // #-1
sbcs x9, x13, x1
ldp x0, x1, [sp, #320]
sbcs x10, x13, x0
sbc x11, x13, x1
mov x12, #0x9 // #9
mul x0, x12, x6
mul x1, x12, x7
mul x2, x12, x8
mul x3, x12, x9
mul x4, x12, x10
mul x5, x12, x11
umulh x6, x12, x6
umulh x7, x12, x7
umulh x8, x12, x8
umulh x9, x12, x9
umulh x10, x12, x10
umulh x12, x12, x11
adds x1, x1, x6
adcs x2, x2, x7
adcs x3, x3, x8
adcs x4, x4, x9
adcs x5, x5, x10
mov x6, #0x1 // #1
adc x6, x12, x6
ldp x8, x9, [sp, #144]
ldp x10, x11, [sp, #160]
ldp x12, x13, [sp, #176]
mov x14, #0xc // #12
mul x15, x14, x8
umulh x8, x14, x8
adds x0, x0, x15
mul x15, x14, x9
umulh x9, x14, x9
adcs x1, x1, x15
mul x15, x14, x10
umulh x10, x14, x10
adcs x2, x2, x15
mul x15, x14, x11
umulh x11, x14, x11
adcs x3, x3, x15
mul x15, x14, x12
umulh x12, x14, x12
adcs x4, x4, x15
mul x15, x14, x13
umulh x13, x14, x13
adcs x5, x5, x15
adc x6, x6, xzr
adds x1, x1, x8
adcs x2, x2, x9
adcs x3, x3, x10
adcs x4, x4, x11
adcs x5, x5, x12
adcs x6, x6, x13
lsl x7, x6, #32
subs x8, x6, x7
sbc x7, x7, xzr
adds x0, x0, x8
adcs x1, x1, x7
adcs x2, x2, x6
adcs x3, x3, xzr
adcs x4, x4, xzr
adcs x5, x5, xzr
csetm x6, cc // cc = lo, ul, last
mov x7, #0xffffffff // #4294967295
and x7, x7, x6
adds x0, x0, x7
eor x7, x7, x6
adcs x1, x1, x7
mov x7, #0xfffffffffffffffe // #-2
and x7, x7, x6
adcs x2, x2, x7
adcs x3, x3, x6
adcs x4, x4, x6
adc x5, x5, x6
stp x0, x1, [sp, #288]
stp x2, x3, [sp, #304]
stp x4, x5, [sp, #320]
mov x2, sp
ldp x4, x3, [x2]
subs x5, x19, x4
sbcs x6, x20, x3
ldp x7, x8, [sp, #208]
ldp x4, x3, [x2, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #224]
ldp x4, x3, [x2, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, cc // cc = lo, ul, last
mov x4, #0xffffffff // #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe // #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #240]
stp x7, x8, [sp, #256]
stp x9, x10, [sp, #272]
ldr q1, [sp, #48]
ldp x9, x2, [sp, #48]
ldr q0, [sp, #48]
ldp x4, x6, [sp, #64]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [sp, #80]
xtn v30.2s, v0.2d
ldr q1, [sp, #80]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [sp, #80]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x19, x3, x17
sbcs x20, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [sp, #192]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
csetm x15, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
stp x11, x14, [sp, #224]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc // cc = lo, ul, last
cinv x16, x15, cc // cc = lo, ul, last
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x4, x2, x4
cneg x4, x4, cc // cc = lo, ul, last
csetm x7, cc // cc = lo, ul, last
subs x2, x10, x6
cinv x8, x8, cc // cc = lo, ul, last
cneg x2, x2, cc // cc = lo, ul, last
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc // cc = lo, ul, last
cneg x1, x1, cc // cc = lo, ul, last
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [sp, #192]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [sp, #224]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x19
adcs x1, x1, x20
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff // #4294967295
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001 // #-4294967295
adcs x14, x14, x2
mov x2, #0x1 // #1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [sp, #192]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [sp, #208]
adc x17, x14, xzr
stp x2, x17, [sp, #224]
ldp x5, x6, [sp, #240]
ldp x4, x3, [sp, #48]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #256]
ldp x4, x3, [sp, #64]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #272]
ldp x4, x3, [sp, #80]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, cc // cc = lo, ul, last
mov x4, #0xffffffff // #4294967295
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe // #-2
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [x25, #96]
stp x7, x8, [x25, #112]
stp x9, x10, [x25, #128]
ldr q3, [sp, #288]
ldr q25, [sp, #96]
ldp x13, x23, [sp, #96]
ldp x3, x21, [sp, #288]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #128]
ldp x8, x24, [sp, #304]
subs x6, x3, x21
ldr q0, [sp, #320]
movi v23.2d, #0xffffffff
csetm x10, cc // cc = lo, ul, last
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc // cc = lo, ul, last
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc // cc = lo, ul, last
ldp x6, x14, [sp, #112]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc // cc = lo, ul, last
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc // cc = lo, ul, last
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc // cc = lo, ul, last
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc // cc = lo, ul, last
cinv x10, x10, cc // cc = lo, ul, last
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc // cc = lo, ul, last
eor x19, x19, x10
csetm x4, cc // cc = lo, ul, last
subs x16, x6, x23
cneg x16, x16, cc // cc = lo, ul, last
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc // cc = lo, ul, last
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #128]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #320]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x26, x4, x16
mov x4, v27.d[0]
sbcs x27, x20, x11
sbcs x20, x9, x12
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #256]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc // cc = lo, ul, last
csetm x2, cc // cc = lo, ul, last
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc // cc = lo, ul, last
cneg x19, x19, cc // cc = lo, ul, last
stp x9, x20, [sp, #272]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc // cc = lo, ul, last
csetm x12, cc // cc = lo, ul, last
subs x9, x17, x14
cinv x12, x12, cc // cc = lo, ul, last
cneg x9, x9, cc // cc = lo, ul, last
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc // cc = lo, ul, last
cneg x24, x10, cc // cc = lo, ul, last
subs x10, x17, x15
cinv x7, x7, cc // cc = lo, ul, last
cneg x10, x10, cc // cc = lo, ul, last
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #256]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #272]
adds x20, x22, x26
mul x10, x13, x14
adcs x11, x11, x27
eor x9, x8, x21
adcs x26, x19, x17
stp x20, x11, [sp, #240]
adcs x27, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #272]
cneg x3, x21, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc // cc = lo, ul, last
csetm x16, cc // cc = lo, ul, last
subs x21, x6, x15
cneg x22, x21, cc // cc = lo, ul, last
cinv x21, x24, cc // cc = lo, ul, last
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc // cc = lo, ul, last
csetm x24, cc // cc = lo, ul, last
subs x20, x14, x15
cinv x24, x24, cc // cc = lo, ul, last
mul x22, x3, x22
cneg x3, x20, cc // cc = lo, ul, last
subs x13, x6, x14
cneg x20, x13, cc // cc = lo, ul, last
cinv x15, x16, cc // cc = lo, ul, last
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe // #-2
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #240]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #272]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x26
eor x1, x22, x9
adcs x24, x23, x27
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x26
adcs x15, x17, x27
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff // #4294967295
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc // cc = lo, ul, last
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #240]
adcs x5, x13, x23
adcs x12, x8, x23
stp x14, x5, [sp, #256]
adc x19, x15, x23
ldp x1, x2, [sp, #144]
ldp x3, x4, [sp, #160]
ldp x5, x6, [sp, #176]
lsl x0, x1, #2
ldp x7, x8, [sp, #288]
subs x0, x0, x7
extr x1, x2, x1, #62
sbcs x1, x1, x8
ldp x7, x8, [sp, #304]
extr x2, x3, x2, #62
sbcs x2, x2, x7
extr x3, x4, x3, #62
sbcs x3, x3, x8
extr x4, x5, x4, #62
ldp x7, x8, [sp, #320]
sbcs x4, x4, x7
extr x5, x6, x5, #62
sbcs x5, x5, x8
lsr x6, x6, #62
adc x6, x6, xzr
lsl x7, x6, #32
subs x8, x6, x7
sbc x7, x7, xzr
adds x0, x0, x8
adcs x1, x1, x7
adcs x2, x2, x6
adcs x3, x3, xzr
adcs x4, x4, xzr
adcs x5, x5, xzr
csetm x8, cc // cc = lo, ul, last
mov x9, #0xffffffff // #4294967295
and x9, x9, x8
adds x0, x0, x9
eor x9, x9, x8
adcs x1, x1, x9
mov x9, #0xfffffffffffffffe // #-2
and x9, x9, x8
adcs x2, x2, x9
adcs x3, x3, x8
adcs x4, x4, x8
adc x5, x5, x8
stp x0, x1, [x25]
stp x2, x3, [x25, #16]
stp x4, x5, [x25, #32]
ldp x0, x1, [sp, #192]
mov x6, #0xffffffff // #4294967295
subs x6, x6, x0
mov x7, #0xffffffff00000000 // #-4294967296
sbcs x7, x7, x1
ldp x0, x1, [sp, #208]
mov x8, #0xfffffffffffffffe // #-2
sbcs x8, x8, x0
mov x13, #0xffffffffffffffff // #-1
sbcs x9, x13, x1
ldp x0, x1, [sp, #224]
sbcs x10, x13, x0
sbc x11, x13, x1
lsl x0, x6, #3
extr x1, x7, x6, #61
extr x2, x8, x7, #61
extr x3, x9, x8, #61
extr x4, x10, x9, #61
extr x5, x11, x10, #61
lsr x6, x11, #61
add x6, x6, #0x1
ldp x8, x9, [sp, #240]
ldp x10, x11, [sp, #256]
mov x14, #0x3 // #3
mul x15, x14, x8
umulh x8, x14, x8
adds x0, x0, x15
mul x15, x14, x9
umulh x9, x14, x9
adcs x1, x1, x15
mul x15, x14, x10
umulh x10, x14, x10
adcs x2, x2, x15
mul x15, x14, x11
umulh x11, x14, x11
adcs x3, x3, x15
mul x15, x14, x12
umulh x12, x14, x12
adcs x4, x4, x15
mul x15, x14, x19
umulh x13, x14, x19
adcs x5, x5, x15
adc x6, x6, xzr
adds x1, x1, x8
adcs x2, x2, x9
adcs x3, x3, x10
adcs x4, x4, x11
adcs x5, x5, x12
adcs x6, x6, x13
lsl x7, x6, #32
subs x8, x6, x7
sbc x7, x7, xzr
adds x0, x0, x8
adcs x1, x1, x7
adcs x2, x2, x6
adcs x3, x3, xzr
adcs x4, x4, xzr
adcs x5, x5, xzr
csetm x6, cc // cc = lo, ul, last
mov x7, #0xffffffff // #4294967295
and x7, x7, x6
adds x0, x0, x7
eor x7, x7, x6
adcs x1, x1, x7
mov x7, #0xfffffffffffffffe // #-2
and x7, x7, x6
adcs x2, x2, x7
adcs x3, x3, x6
adcs x4, x4, x6
adc x5, x5, x6
stp x0, x1, [x25, #48]
stp x2, x3, [x25, #64]
stp x4, x5, [x25, #80]
ldp x19, x20, [sp, #336]
ldp x21, x22, [sp, #352]
ldp x23, x24, [sp, #368]
ldp x25, x26, [sp, #384]
ldp x27, xzr, [sp, #400]
add sp, sp, #0x1a0
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 1,750
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/bignum_mux_6.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// 384-bit multiplex/select z := x (if p nonzero) or z := y (if p zero)
// Inputs p, x[6], y[6]; output z[6]
//
// extern void bignum_mux_6
// (uint64_t p, uint64_t z[static 6],
// uint64_t x[static 6], uint64_t y[static 6]);
//
// It is assumed that all numbers x, y and z have the same size 6 digits.
//
// Standard ARM ABI: X0 = p, X1 = z, X2 = x, X3 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mux_6)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mux_6)
.text
.balign 4
#define p x0
#define z x1
#define x x2
#define y x3
#define a x4
S2N_BN_SYMBOL(bignum_mux_6):
cmp p, #0 // Set condition codes p = 0
ldr a, [x]
ldr p, [y]
csel a, a, p, ne
str a, [z]
ldr a, [x, #8]
ldr p, [y, #8]
csel a, a, p, ne
str a, [z, #8]
ldr a, [x, #16]
ldr p, [y, #16]
csel a, a, p, ne
str a, [z, #16]
ldr a, [x, #24]
ldr p, [y, #24]
csel a, a, p, ne
str a, [z, #24]
ldr a, [x, #32]
ldr p, [y, #32]
csel a, a, p, ne
str a, [z, #32]
ldr a, [x, #40]
ldr p, [y, #40]
csel a, a, p, ne
str a, [z, #40]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 83,222
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/p384_montjdouble.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point doubling on NIST curve P-384 in Montgomery-Jacobian coordinates
//
// extern void p384_montjdouble
// (uint64_t p3[static 18],uint64_t p1[static 18]);
//
// Does p3 := 2 * p1 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
//
// Standard ARM ABI: X0 = p3, X1 = p1
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
// This is functionally equivalent to p384_montjdouble in unopt/p384_montjdouble.S.
// This is the result of doing the following sequence of optimizations:
// 1. Function inlining
// 2. Eliminating redundant load/store instructions
// 3. Folding (add addr, const) + load/store
// Function inlining is done manually. The second and third optimizations are
// done by a script.
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjdouble)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjdouble)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
#define NSPACE #(NUMSIZE*7)
S2N_BN_SYMBOL(p384_montjdouble):
// Save regs and make room on stack for temporary variables
sub sp, sp, NSPACE+80
stp x19, x20, [sp, NSPACE]
stp x21, x22, [sp, NSPACE+16]
stp x23, x24, [sp, NSPACE+32]
stp x25, x26, [sp, NSPACE+48]
stp x27, xzr, [sp, NSPACE+64]
mov x25, x0
mov x26, x1
mov x0, sp
ldr q1, [x26, #96]
ldp x9, x2, [x26, #96]
ldr q0, [x26, #96]
ldp x4, x6, [x26, #112]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [x26, #128]
xtn v30.2s, v0.2d
ldr q1, [x26, #128]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [x26, #128]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [x0]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [x0, #16]
csetm x15, cc
cneg x1, x1, cc
stp x11, x14, [x0, #32]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc
cinv x16, x15, cc
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc
cneg x9, x9, cc
subs x4, x2, x4
cneg x4, x4, cc
csetm x7, cc
subs x2, x10, x6
cinv x8, x8, cc
cneg x2, x2, cc
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc
cneg x1, x1, cc
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [x0, #16]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [x0]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [x0, #32]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001
adcs x14, x14, x2
mov x2, #0x1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [x0]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [x0, #16]
adc x17, x14, xzr
stp x2, x17, [x0, #32]
ldr q1, [x26, #48]
ldp x9, x2, [x26, #48]
ldr q0, [x26, #48]
ldp x4, x6, [x26, #64]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [x26, #80]
xtn v30.2s, v0.2d
ldr q1, [x26, #80]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [x26, #80]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [sp, #48]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [sp, #64]
csetm x15, cc
cneg x1, x1, cc
stp x11, x14, [sp, #80]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc
cinv x16, x15, cc
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc
cneg x9, x9, cc
subs x4, x2, x4
cneg x4, x4, cc
csetm x7, cc
subs x2, x10, x6
cinv x8, x8, cc
cneg x2, x2, cc
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc
cneg x1, x1, cc
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [sp, #64]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [sp, #48]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [sp, #80]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001
adcs x14, x14, x2
mov x2, #0x1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [sp, #48]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [sp, #64]
adc x17, x14, xzr
stp x2, x17, [sp, #80]
ldp x5, x6, [x26]
ldp x4, x3, [sp]
adds x5, x5, x4
adcs x6, x6, x3
ldp x7, x8, [x26, #16]
ldp x4, x3, [sp, #16]
adcs x7, x7, x4
adcs x8, x8, x3
ldp x9, x10, [x26, #32]
ldp x4, x3, [sp, #32]
adcs x9, x9, x4
adcs x10, x10, x3
csetm x3, cs
mov x4, #0xffffffff
and x4, x4, x3
subs x5, x5, x4
eor x4, x4, x3
sbcs x6, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
sbcs x7, x7, x4
sbcs x8, x8, x3
sbcs x9, x9, x3
sbc x10, x10, x3
stp x5, x6, [sp, #240]
stp x7, x8, [sp, #256]
stp x9, x10, [sp, #272]
mov x2, sp
ldp x5, x6, [x26, #0]
ldp x4, x3, [x2]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x26, #16]
ldp x4, x3, [x2, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x26, #32]
ldp x4, x3, [x2, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, cc
mov x4, #0xffffffff
and x4, x4, x3
adds x13, x5, x4
eor x4, x4, x3
adcs x23, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x13, x23, [sp, #192]
stp x7, x8, [sp, #208]
stp x9, x10, [sp, #224]
ldr q3, [sp, #240]
ldr q25, [sp, #192]
ldp x3, x21, [sp, #240]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #224]
ldp x8, x24, [sp, #256]
subs x6, x3, x21
ldr q0, [sp, #272]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [sp, #208]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #224]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #272]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [sp, #96]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #112]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #128]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [sp, #96]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #112]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #128]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [sp, #96]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [sp, #112]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #128]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #96]
ldp x21, x12, [sp, #112]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #128]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #96]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #112]
adc x12, x15, x23
stp x21, x12, [sp, #128]
ldp x5, x6, [x26, #48]
ldp x4, x3, [x26, #96]
adds x5, x5, x4
adcs x6, x6, x3
ldp x7, x8, [x26, #64]
ldp x4, x3, [x26, #112]
adcs x7, x7, x4
adcs x8, x8, x3
ldp x9, x10, [x26, #80]
ldp x4, x3, [x26, #128]
adcs x9, x9, x4
adcs x10, x10, x3
adc x3, xzr, xzr
mov x4, #0xffffffff
cmp x5, x4
mov x4, #0xffffffff00000000
sbcs xzr, x6, x4
mov x4, #0xfffffffffffffffe
sbcs xzr, x7, x4
adcs xzr, x8, xzr
adcs xzr, x9, xzr
adcs xzr, x10, xzr
adcs x3, x3, xzr
csetm x3, ne
mov x4, #0xffffffff
and x4, x4, x3
subs x5, x5, x4
eor x4, x4, x3
sbcs x6, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
sbcs x7, x7, x4
sbcs x8, x8, x3
sbcs x9, x9, x3
sbc x10, x10, x3
stp x5, x6, [sp, #240]
stp x7, x8, [sp, #256]
stp x9, x10, [sp, #272]
ldr q1, [sp, #96]
ldp x9, x2, [sp, #96]
ldr q0, [sp, #96]
ldp x4, x6, [sp, #112]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [sp, #128]
xtn v30.2s, v0.2d
ldr q1, [sp, #128]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [sp, #128]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [sp, #288]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [sp, #304]
csetm x15, cc
cneg x1, x1, cc
stp x11, x14, [sp, #320]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc
cinv x16, x15, cc
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc
cneg x9, x9, cc
subs x4, x2, x4
cneg x4, x4, cc
csetm x7, cc
subs x2, x10, x6
cinv x8, x8, cc
cneg x2, x2, cc
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc
cneg x1, x1, cc
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [sp, #304]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [sp, #288]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [sp, #320]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001
adcs x14, x14, x2
mov x2, #0x1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [sp, #288]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [sp, #304]
adc x17, x14, xzr
stp x2, x17, [sp, #320]
ldr q3, [x26, #0]
ldr q25, [sp, #48]
ldp x13, x23, [sp, #48]
ldp x3, x21, [x26, #0]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #80]
ldp x8, x24, [x26, #16]
subs x6, x3, x21
ldr q0, [x26, #32]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [sp, #64]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #80]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [x26, #32]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x26, x4, x16
mov x4, v27.d[0]
sbcs x27, x20, x11
sbcs x20, x9, x12
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #160]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #176]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #160]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #176]
adds x20, x22, x26
mul x10, x13, x14
adcs x11, x11, x27
eor x9, x8, x21
adcs x26, x19, x17
stp x20, x11, [sp, #144]
adcs x27, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #176]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #144]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #176]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x26
eor x1, x22, x9
adcs x24, x23, x27
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x26
adcs x15, x17, x27
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #144]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [sp, #160]
adc x12, x15, x23
stp x21, x12, [sp, #176]
ldr q1, [sp, #240]
ldp x9, x2, [sp, #240]
ldr q0, [sp, #240]
ldp x4, x6, [sp, #256]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [sp, #272]
xtn v30.2s, v0.2d
ldr q1, [sp, #272]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [sp, #272]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x19, x3, x17
sbcs x20, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [sp, #192]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
csetm x15, cc
cneg x1, x1, cc
stp x11, x14, [sp, #224]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc
cinv x16, x15, cc
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc
cneg x9, x9, cc
subs x4, x2, x4
cneg x4, x4, cc
csetm x7, cc
subs x2, x10, x6
cinv x8, x8, cc
cneg x2, x2, cc
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc
cneg x1, x1, cc
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [sp, #192]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [sp, #224]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x19
adcs x1, x1, x20
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001
adcs x14, x14, x2
mov x2, #0x1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x19, x13, x1
and x13, x4, x9
adcs x20, x6, x13
and x1, x2, x9
adcs x7, x8, x1
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [sp, #208]
adc x17, x14, xzr
stp x2, x17, [sp, #224]
ldp x0, x1, [sp, #288]
mov x6, #0xffffffff
subs x6, x6, x0
mov x7, #0xffffffff00000000
sbcs x7, x7, x1
ldp x0, x1, [sp, #304]
mov x8, #0xfffffffffffffffe
sbcs x8, x8, x0
mov x13, #0xffffffffffffffff
sbcs x9, x13, x1
ldp x0, x1, [sp, #320]
sbcs x10, x13, x0
sbc x11, x13, x1
mov x12, #0x9
mul x0, x12, x6
mul x1, x12, x7
mul x2, x12, x8
mul x3, x12, x9
mul x4, x12, x10
mul x5, x12, x11
umulh x6, x12, x6
umulh x7, x12, x7
umulh x8, x12, x8
umulh x9, x12, x9
umulh x10, x12, x10
umulh x12, x12, x11
adds x1, x1, x6
adcs x2, x2, x7
adcs x3, x3, x8
adcs x4, x4, x9
adcs x5, x5, x10
mov x6, #0x1
adc x6, x12, x6
ldp x8, x9, [sp, #144]
ldp x10, x11, [sp, #160]
ldp x12, x13, [sp, #176]
mov x14, #0xc
mul x15, x14, x8
umulh x8, x14, x8
adds x0, x0, x15
mul x15, x14, x9
umulh x9, x14, x9
adcs x1, x1, x15
mul x15, x14, x10
umulh x10, x14, x10
adcs x2, x2, x15
mul x15, x14, x11
umulh x11, x14, x11
adcs x3, x3, x15
mul x15, x14, x12
umulh x12, x14, x12
adcs x4, x4, x15
mul x15, x14, x13
umulh x13, x14, x13
adcs x5, x5, x15
adc x6, x6, xzr
adds x1, x1, x8
adcs x2, x2, x9
adcs x3, x3, x10
adcs x4, x4, x11
adcs x5, x5, x12
adcs x6, x6, x13
lsl x7, x6, #32
subs x8, x6, x7
sbc x7, x7, xzr
adds x0, x0, x8
adcs x1, x1, x7
adcs x2, x2, x6
adcs x3, x3, xzr
adcs x4, x4, xzr
adcs x5, x5, xzr
csetm x6, cc
mov x7, #0xffffffff
and x7, x7, x6
adds x0, x0, x7
eor x7, x7, x6
adcs x1, x1, x7
mov x7, #0xfffffffffffffffe
and x7, x7, x6
adcs x2, x2, x7
adcs x3, x3, x6
adcs x4, x4, x6
adc x5, x5, x6
stp x0, x1, [sp, #288]
stp x2, x3, [sp, #304]
stp x4, x5, [sp, #320]
mov x2, sp
ldp x4, x3, [x2]
subs x5, x19, x4
sbcs x6, x20, x3
ldp x7, x8, [sp, #208]
ldp x4, x3, [x2, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #224]
ldp x4, x3, [x2, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, cc
mov x4, #0xffffffff
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [sp, #240]
stp x7, x8, [sp, #256]
stp x9, x10, [sp, #272]
ldr q1, [sp, #48]
ldp x9, x2, [sp, #48]
ldr q0, [sp, #48]
ldp x4, x6, [sp, #64]
rev64 v21.4s, v1.4s
uzp2 v28.4s, v1.4s, v1.4s
umulh x7, x9, x2
xtn v17.2s, v1.2d
mul v27.4s, v21.4s, v0.4s
ldr q20, [sp, #80]
xtn v30.2s, v0.2d
ldr q1, [sp, #80]
uzp2 v31.4s, v0.4s, v0.4s
ldp x5, x10, [sp, #80]
umulh x8, x9, x4
uaddlp v3.2d, v27.4s
umull v16.2d, v30.2s, v17.2s
mul x16, x9, x4
umull v27.2d, v30.2s, v28.2s
shrn v0.2s, v20.2d, #32
xtn v7.2s, v20.2d
shl v20.2d, v3.2d, #32
umull v3.2d, v31.2s, v28.2s
mul x3, x2, x4
umlal v20.2d, v30.2s, v17.2s
umull v22.2d, v7.2s, v0.2s
usra v27.2d, v16.2d, #32
umulh x11, x2, x4
movi v21.2d, #0xffffffff
uzp2 v28.4s, v1.4s, v1.4s
adds x15, x16, x7
and v5.16b, v27.16b, v21.16b
adcs x3, x3, x8
usra v3.2d, v27.2d, #32
dup v29.2d, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2d, v31.2s, v17.2s
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2d, v22.2d, #33
xtn v25.2s, v29.2d
rev64 v31.4s, v1.4s
lsl x13, x14, #32
uzp2 v6.4s, v29.4s, v29.4s
umlal v19.2d, v7.2s, v7.2s
usra v3.2d, v5.2d, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4s, v31.4s, v29.4s
xtn v4.2s, v1.2d
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2d, v25.2s, v28.2s
adcs x11, x16, x16
umull v21.2d, v25.2s, v4.2s
mov x17, v3.d[0]
umull v18.2d, v6.2s, v28.2s
adc x16, x8, xzr
uaddlp v16.2d, v17.4s
movi v1.2d, #0xffffffff
subs x13, x13, x12
usra v31.2d, v21.2d, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2d, v16.2d, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16b, v31.16b, v1.16b
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2d, v6.2s, v4.2s
usra v18.2d, v31.2d, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2d, v25.2s, v4.2s
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2d, v3.2d, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x19, x3, x17
sbcs x20, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [sp, #192]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
csetm x15, cc
cneg x1, x1, cc
stp x11, x14, [sp, #224]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc
cinv x16, x15, cc
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc
cneg x9, x9, cc
subs x4, x2, x4
cneg x4, x4, cc
csetm x7, cc
subs x2, x10, x6
cinv x8, x8, cc
cneg x2, x2, cc
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc
cneg x1, x1, cc
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [sp, #192]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [sp, #224]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x19
adcs x1, x1, x20
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001
adcs x14, x14, x2
mov x2, #0x1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [sp, #192]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [sp, #208]
adc x17, x14, xzr
stp x2, x17, [sp, #224]
ldp x5, x6, [sp, #240]
ldp x4, x3, [sp, #48]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #256]
ldp x4, x3, [sp, #64]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #272]
ldp x4, x3, [sp, #80]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, cc
mov x4, #0xffffffff
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [x25, #96]
stp x7, x8, [x25, #112]
stp x9, x10, [x25, #128]
ldr q3, [sp, #288]
ldr q25, [sp, #96]
ldp x13, x23, [sp, #96]
ldp x3, x21, [sp, #288]
rev64 v23.4s, v25.4s
uzp1 v17.4s, v25.4s, v3.4s
umulh x15, x3, x13
mul v6.4s, v23.4s, v3.4s
uzp1 v3.4s, v3.4s, v3.4s
ldr q27, [sp, #128]
ldp x8, x24, [sp, #304]
subs x6, x3, x21
ldr q0, [sp, #320]
movi v23.2d, #0xffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4s, v27.4s
uzp2 v25.4s, v27.4s, v27.4s
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2s, v0.2d
xtn v24.2s, v27.2d
cneg x20, x7, cc
ldp x6, x14, [sp, #112]
mul v27.4s, v4.4s, v0.4s
uaddlp v20.2d, v6.4s
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4s, v0.4s, v0.4s
umull v21.2d, v22.2s, v25.2s
shl v0.2d, v20.2d, #32
umlal v0.2d, v3.2s, v17.2s
mul x22, x8, x6
umull v1.2d, v6.2s, v25.2s
subs x12, x3, x8
umull v20.2d, v22.2s, v24.2s
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2d, v20.2d, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2d, v21.2d, #32
adds x22, x15, x7
and v26.16b, v21.16b, v23.16b
adcs x16, x12, x15
uaddlp v25.2d, v27.4s
adcs x9, x19, x12
umlal v26.2d, v6.2s, v24.2s
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2d, v25.2d, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2d, v22.2s, v24.2s
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2d, v26.2d, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [sp, #128]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [sp, #320]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x26, x4, x16
mov x4, v27.d[0]
sbcs x27, x20, x11
sbcs x20, x9, x12
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [sp, #256]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [sp, #272]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [sp, #256]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [sp, #272]
adds x20, x22, x26
mul x10, x13, x14
adcs x11, x11, x27
eor x9, x8, x21
adcs x26, x19, x17
stp x20, x11, [sp, #240]
adcs x27, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [sp, #272]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [sp, #240]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [sp, #272]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x26
eor x1, x22, x9
adcs x24, x23, x27
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x26
adcs x15, x17, x27
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [sp, #240]
adcs x5, x13, x23
adcs x12, x8, x23
stp x14, x5, [sp, #256]
adc x19, x15, x23
ldp x1, x2, [sp, #144]
ldp x3, x4, [sp, #160]
ldp x5, x6, [sp, #176]
lsl x0, x1, #2
ldp x7, x8, [sp, #288]
subs x0, x0, x7
extr x1, x2, x1, #62
sbcs x1, x1, x8
ldp x7, x8, [sp, #304]
extr x2, x3, x2, #62
sbcs x2, x2, x7
extr x3, x4, x3, #62
sbcs x3, x3, x8
extr x4, x5, x4, #62
ldp x7, x8, [sp, #320]
sbcs x4, x4, x7
extr x5, x6, x5, #62
sbcs x5, x5, x8
lsr x6, x6, #62
adc x6, x6, xzr
lsl x7, x6, #32
subs x8, x6, x7
sbc x7, x7, xzr
adds x0, x0, x8
adcs x1, x1, x7
adcs x2, x2, x6
adcs x3, x3, xzr
adcs x4, x4, xzr
adcs x5, x5, xzr
csetm x8, cc
mov x9, #0xffffffff
and x9, x9, x8
adds x0, x0, x9
eor x9, x9, x8
adcs x1, x1, x9
mov x9, #0xfffffffffffffffe
and x9, x9, x8
adcs x2, x2, x9
adcs x3, x3, x8
adcs x4, x4, x8
adc x5, x5, x8
stp x0, x1, [x25]
stp x2, x3, [x25, #16]
stp x4, x5, [x25, #32]
ldp x0, x1, [sp, #192]
mov x6, #0xffffffff
subs x6, x6, x0
mov x7, #0xffffffff00000000
sbcs x7, x7, x1
ldp x0, x1, [sp, #208]
mov x8, #0xfffffffffffffffe
sbcs x8, x8, x0
mov x13, #0xffffffffffffffff
sbcs x9, x13, x1
ldp x0, x1, [sp, #224]
sbcs x10, x13, x0
sbc x11, x13, x1
lsl x0, x6, #3
extr x1, x7, x6, #61
extr x2, x8, x7, #61
extr x3, x9, x8, #61
extr x4, x10, x9, #61
extr x5, x11, x10, #61
lsr x6, x11, #61
add x6, x6, #0x1
ldp x8, x9, [sp, #240]
ldp x10, x11, [sp, #256]
mov x14, #0x3
mul x15, x14, x8
umulh x8, x14, x8
adds x0, x0, x15
mul x15, x14, x9
umulh x9, x14, x9
adcs x1, x1, x15
mul x15, x14, x10
umulh x10, x14, x10
adcs x2, x2, x15
mul x15, x14, x11
umulh x11, x14, x11
adcs x3, x3, x15
mul x15, x14, x12
umulh x12, x14, x12
adcs x4, x4, x15
mul x15, x14, x19
umulh x13, x14, x19
adcs x5, x5, x15
adc x6, x6, xzr
adds x1, x1, x8
adcs x2, x2, x9
adcs x3, x3, x10
adcs x4, x4, x11
adcs x5, x5, x12
adcs x6, x6, x13
lsl x7, x6, #32
subs x8, x6, x7
sbc x7, x7, xzr
adds x0, x0, x8
adcs x1, x1, x7
adcs x2, x2, x6
adcs x3, x3, xzr
adcs x4, x4, xzr
adcs x5, x5, xzr
csetm x6, cc
mov x7, #0xffffffff
and x7, x7, x6
adds x0, x0, x7
eor x7, x7, x6
adcs x1, x1, x7
mov x7, #0xfffffffffffffffe
and x7, x7, x6
adcs x2, x2, x7
adcs x3, x3, x6
adcs x4, x4, x6
adc x5, x5, x6
stp x0, x1, [x25, #48]
stp x2, x3, [x25, #64]
stp x4, x5, [x25, #80]
// Restore stack and registers
ldp x19, x20, [sp, NSPACE]
ldp x21, x22, [sp, NSPACE+16]
ldp x23, x24, [sp, NSPACE+32]
ldp x25, x26, [sp, NSPACE+48]
ldp x27, xzr, [sp, NSPACE+64]
add sp, sp, NSPACE+80
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 3,859
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_cmul_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply by a single word modulo p_521, z := (c * x) mod p_521, assuming
// x reduced
// Inputs c, x[9]; output z[9]
//
// extern void bignum_cmul_p521
// (uint64_t z[static 9], uint64_t c, uint64_t x[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = c, X2 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p521)
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p521_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p521_alt)
.text
.balign 4
#define z x0
#define c x1
#define x x2
#define d0 x3
#define d1 x4
#define d2 x5
#define d3 x6
#define d4 x7
#define d5 x8
#define d6 x9
#define d7 x10
#define d8 x11
#define d9 x12
// Heavily aliased subject to ordering
#define a0 d3
#define a1 d4
#define a2 d5
#define a3 d6
#define a4 d7
#define a5 d8
#define a6 d9
#define h d9
// Other variables
#define a7 x13
#define a8 x14
#define dd x15
S2N_BN_SYMBOL(bignum_cmul_p521):
S2N_BN_SYMBOL(bignum_cmul_p521_alt):
// First do the multiply, getting [d9; ...; d0], and as this is done
// accumulate an AND "dd" of digits d7,...,d1 for later use
ldp a0, a1, [x]
mul d0, c, a0
mul d1, c, a1
umulh a0, c, a0
adds d1, d1, a0
umulh a1, c, a1
ldp a2, a3, [x, #16]
mul d2, c, a2
mul d3, c, a3
umulh a2, c, a2
adcs d2, d2, a1
and dd, d1, d2
umulh a3, c, a3
adcs d3, d3, a2
and dd, dd, d3
ldp a4, a5, [x, #32]
mul d4, c, a4
mul d5, c, a5
umulh a4, c, a4
adcs d4, d4, a3
and dd, dd, d4
umulh a5, c, a5
adcs d5, d5, a4
and dd, dd, d5
ldp a6, a7, [x, #48]
mul d6, c, a6
mul d7, c, a7
umulh a6, c, a6
adcs d6, d6, a5
and dd, dd, d6
umulh a7, c, a7
adcs d7, d7, a6
and dd, dd, d7
ldr a8, [x, #64]
mul d8, c, a8
adcs d8, d8, a7
umulh a8, c, a8
adc d9, xzr, a8
// Extract the high part h and mask off the low part l = [d8;d7;...;d0]
// but stuff d8 with 1 bits at the left to ease a comparison below
extr h, d9, d8, #9
orr d8, d8, #~0x1FF
// Decide whether h + l >= p_521 <=> h + l + 1 >= 2^521. Since this can only
// happen if digits d7,...d1 are all 1s, we use the AND of them "dd" to
// condense the carry chain, and since we stuffed 1 bits into d8 we get
// the result in CF without an additional comparison.
subs xzr, xzr, xzr
adcs xzr, d0, h
adcs xzr, dd, xzr
adcs xzr, d8, xzr
// Now if CF is set we want (h + l) - p_521 = (h + l + 1) - 2^521
// while otherwise we want just h + l. So mask h + l + CF to 521 bits.
// This masking also gets rid of the stuffing with 1s we did above.
adcs d0, d0, h
adcs d1, d1, xzr
adcs d2, d2, xzr
adcs d3, d3, xzr
adcs d4, d4, xzr
adcs d5, d5, xzr
adcs d6, d6, xzr
adcs d7, d7, xzr
adc d8, d8, xzr
and d8, d8, #0x1FF
// Store the result
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
stp d6, d7, [z, #48]
str d8, [z, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 38,606
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/p521_jmixadd_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point mixed addition on NIST curve P-521 in Jacobian coordinates
//
// extern void p521_jmixadd_alt
// (uint64_t p3[static 27],uint64_t p1[static 27],uint64_t p2[static 18]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// The "mixed" part means that p2 only has x and y coordinates, with the
// implicit z coordinate assumed to be the identity. It is assumed that
// all the coordinates of the input points p1 and p2 are fully reduced
// mod p_521, that the z coordinate of p1 is nonzero and that neither
// p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents the same affine
// point as".
//
// Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jmixadd_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jmixadd_alt)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 72
// Stable homes for input arguments during main code sequence
#define input_z x26
#define input_x x27
#define input_y x28
// Pointer-offset pairs for inputs and outputs
#define x_1 input_x, #0
#define y_1 input_x, #NUMSIZE
#define z_1 input_x, #(2*NUMSIZE)
#define x_2 input_y, #0
#define y_2 input_y, #NUMSIZE
#define x_3 input_z, #0
#define y_3 input_z, #NUMSIZE
#define z_3 input_z, #(2*NUMSIZE)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define zp2 sp, #(NUMSIZE*0)
#define ww sp, #(NUMSIZE*0)
#define resx sp, #(NUMSIZE*0)
#define yd sp, #(NUMSIZE*1)
#define y2a sp, #(NUMSIZE*1)
#define x2a sp, #(NUMSIZE*2)
#define zzx2 sp, #(NUMSIZE*2)
#define zz sp, #(NUMSIZE*3)
#define t1 sp, #(NUMSIZE*3)
#define t2 sp, #(NUMSIZE*4)
#define zzx1 sp, #(NUMSIZE*4)
#define resy sp, #(NUMSIZE*4)
#define xd sp, #(NUMSIZE*5)
#define resz sp, #(NUMSIZE*5)
#define NSPACE (NUMSIZE*6)
// Corresponds exactly to bignum_mul_p521_alt
#define mul_p521(P0,P1,P2) \
ldp x3, x4, [P1]; \
ldp x5, x6, [P2]; \
mul x15, x3, x5; \
umulh x16, x3, x5; \
mul x14, x3, x6; \
umulh x17, x3, x6; \
adds x16, x16, x14; \
ldp x7, x8, [P2+16]; \
mul x14, x3, x7; \
umulh x19, x3, x7; \
adcs x17, x17, x14; \
mul x14, x3, x8; \
umulh x20, x3, x8; \
adcs x19, x19, x14; \
ldp x9, x10, [P2+32]; \
mul x14, x3, x9; \
umulh x21, x3, x9; \
adcs x20, x20, x14; \
mul x14, x3, x10; \
umulh x22, x3, x10; \
adcs x21, x21, x14; \
ldp x11, x12, [P2+48]; \
mul x14, x3, x11; \
umulh x23, x3, x11; \
adcs x22, x22, x14; \
ldr x13, [P2+64]; \
mul x14, x3, x12; \
umulh x24, x3, x12; \
adcs x23, x23, x14; \
mul x14, x3, x13; \
umulh x1, x3, x13; \
adcs x24, x24, x14; \
adc x1, x1, xzr; \
mul x14, x4, x5; \
adds x16, x16, x14; \
mul x14, x4, x6; \
adcs x17, x17, x14; \
mul x14, x4, x7; \
adcs x19, x19, x14; \
mul x14, x4, x8; \
adcs x20, x20, x14; \
mul x14, x4, x9; \
adcs x21, x21, x14; \
mul x14, x4, x10; \
adcs x22, x22, x14; \
mul x14, x4, x11; \
adcs x23, x23, x14; \
mul x14, x4, x12; \
adcs x24, x24, x14; \
mul x14, x4, x13; \
adcs x1, x1, x14; \
cset x0, hs; \
umulh x14, x4, x5; \
adds x17, x17, x14; \
umulh x14, x4, x6; \
adcs x19, x19, x14; \
umulh x14, x4, x7; \
adcs x20, x20, x14; \
umulh x14, x4, x8; \
adcs x21, x21, x14; \
umulh x14, x4, x9; \
adcs x22, x22, x14; \
umulh x14, x4, x10; \
adcs x23, x23, x14; \
umulh x14, x4, x11; \
adcs x24, x24, x14; \
umulh x14, x4, x12; \
adcs x1, x1, x14; \
umulh x14, x4, x13; \
adc x0, x0, x14; \
stp x15, x16, [P0]; \
ldp x3, x4, [P1+16]; \
mul x14, x3, x5; \
adds x17, x17, x14; \
mul x14, x3, x6; \
adcs x19, x19, x14; \
mul x14, x3, x7; \
adcs x20, x20, x14; \
mul x14, x3, x8; \
adcs x21, x21, x14; \
mul x14, x3, x9; \
adcs x22, x22, x14; \
mul x14, x3, x10; \
adcs x23, x23, x14; \
mul x14, x3, x11; \
adcs x24, x24, x14; \
mul x14, x3, x12; \
adcs x1, x1, x14; \
mul x14, x3, x13; \
adcs x0, x0, x14; \
cset x15, hs; \
umulh x14, x3, x5; \
adds x19, x19, x14; \
umulh x14, x3, x6; \
adcs x20, x20, x14; \
umulh x14, x3, x7; \
adcs x21, x21, x14; \
umulh x14, x3, x8; \
adcs x22, x22, x14; \
umulh x14, x3, x9; \
adcs x23, x23, x14; \
umulh x14, x3, x10; \
adcs x24, x24, x14; \
umulh x14, x3, x11; \
adcs x1, x1, x14; \
umulh x14, x3, x12; \
adcs x0, x0, x14; \
umulh x14, x3, x13; \
adc x15, x15, x14; \
mul x14, x4, x5; \
adds x19, x19, x14; \
mul x14, x4, x6; \
adcs x20, x20, x14; \
mul x14, x4, x7; \
adcs x21, x21, x14; \
mul x14, x4, x8; \
adcs x22, x22, x14; \
mul x14, x4, x9; \
adcs x23, x23, x14; \
mul x14, x4, x10; \
adcs x24, x24, x14; \
mul x14, x4, x11; \
adcs x1, x1, x14; \
mul x14, x4, x12; \
adcs x0, x0, x14; \
mul x14, x4, x13; \
adcs x15, x15, x14; \
cset x16, hs; \
umulh x14, x4, x5; \
adds x20, x20, x14; \
umulh x14, x4, x6; \
adcs x21, x21, x14; \
umulh x14, x4, x7; \
adcs x22, x22, x14; \
umulh x14, x4, x8; \
adcs x23, x23, x14; \
umulh x14, x4, x9; \
adcs x24, x24, x14; \
umulh x14, x4, x10; \
adcs x1, x1, x14; \
umulh x14, x4, x11; \
adcs x0, x0, x14; \
umulh x14, x4, x12; \
adcs x15, x15, x14; \
umulh x14, x4, x13; \
adc x16, x16, x14; \
stp x17, x19, [P0+16]; \
ldp x3, x4, [P1+32]; \
mul x14, x3, x5; \
adds x20, x20, x14; \
mul x14, x3, x6; \
adcs x21, x21, x14; \
mul x14, x3, x7; \
adcs x22, x22, x14; \
mul x14, x3, x8; \
adcs x23, x23, x14; \
mul x14, x3, x9; \
adcs x24, x24, x14; \
mul x14, x3, x10; \
adcs x1, x1, x14; \
mul x14, x3, x11; \
adcs x0, x0, x14; \
mul x14, x3, x12; \
adcs x15, x15, x14; \
mul x14, x3, x13; \
adcs x16, x16, x14; \
cset x17, hs; \
umulh x14, x3, x5; \
adds x21, x21, x14; \
umulh x14, x3, x6; \
adcs x22, x22, x14; \
umulh x14, x3, x7; \
adcs x23, x23, x14; \
umulh x14, x3, x8; \
adcs x24, x24, x14; \
umulh x14, x3, x9; \
adcs x1, x1, x14; \
umulh x14, x3, x10; \
adcs x0, x0, x14; \
umulh x14, x3, x11; \
adcs x15, x15, x14; \
umulh x14, x3, x12; \
adcs x16, x16, x14; \
umulh x14, x3, x13; \
adc x17, x17, x14; \
mul x14, x4, x5; \
adds x21, x21, x14; \
mul x14, x4, x6; \
adcs x22, x22, x14; \
mul x14, x4, x7; \
adcs x23, x23, x14; \
mul x14, x4, x8; \
adcs x24, x24, x14; \
mul x14, x4, x9; \
adcs x1, x1, x14; \
mul x14, x4, x10; \
adcs x0, x0, x14; \
mul x14, x4, x11; \
adcs x15, x15, x14; \
mul x14, x4, x12; \
adcs x16, x16, x14; \
mul x14, x4, x13; \
adcs x17, x17, x14; \
cset x19, hs; \
umulh x14, x4, x5; \
adds x22, x22, x14; \
umulh x14, x4, x6; \
adcs x23, x23, x14; \
umulh x14, x4, x7; \
adcs x24, x24, x14; \
umulh x14, x4, x8; \
adcs x1, x1, x14; \
umulh x14, x4, x9; \
adcs x0, x0, x14; \
umulh x14, x4, x10; \
adcs x15, x15, x14; \
umulh x14, x4, x11; \
adcs x16, x16, x14; \
umulh x14, x4, x12; \
adcs x17, x17, x14; \
umulh x14, x4, x13; \
adc x19, x19, x14; \
stp x20, x21, [P0+32]; \
ldp x3, x4, [P1+48]; \
mul x14, x3, x5; \
adds x22, x22, x14; \
mul x14, x3, x6; \
adcs x23, x23, x14; \
mul x14, x3, x7; \
adcs x24, x24, x14; \
mul x14, x3, x8; \
adcs x1, x1, x14; \
mul x14, x3, x9; \
adcs x0, x0, x14; \
mul x14, x3, x10; \
adcs x15, x15, x14; \
mul x14, x3, x11; \
adcs x16, x16, x14; \
mul x14, x3, x12; \
adcs x17, x17, x14; \
mul x14, x3, x13; \
adcs x19, x19, x14; \
cset x20, hs; \
umulh x14, x3, x5; \
adds x23, x23, x14; \
umulh x14, x3, x6; \
adcs x24, x24, x14; \
umulh x14, x3, x7; \
adcs x1, x1, x14; \
umulh x14, x3, x8; \
adcs x0, x0, x14; \
umulh x14, x3, x9; \
adcs x15, x15, x14; \
umulh x14, x3, x10; \
adcs x16, x16, x14; \
umulh x14, x3, x11; \
adcs x17, x17, x14; \
umulh x14, x3, x12; \
adcs x19, x19, x14; \
umulh x14, x3, x13; \
adc x20, x20, x14; \
mul x14, x4, x5; \
adds x23, x23, x14; \
mul x14, x4, x6; \
adcs x24, x24, x14; \
mul x14, x4, x7; \
adcs x1, x1, x14; \
mul x14, x4, x8; \
adcs x0, x0, x14; \
mul x14, x4, x9; \
adcs x15, x15, x14; \
mul x14, x4, x10; \
adcs x16, x16, x14; \
mul x14, x4, x11; \
adcs x17, x17, x14; \
mul x14, x4, x12; \
adcs x19, x19, x14; \
mul x14, x4, x13; \
adcs x20, x20, x14; \
cset x21, hs; \
umulh x14, x4, x5; \
adds x24, x24, x14; \
umulh x14, x4, x6; \
adcs x1, x1, x14; \
umulh x14, x4, x7; \
adcs x0, x0, x14; \
umulh x14, x4, x8; \
adcs x15, x15, x14; \
umulh x14, x4, x9; \
adcs x16, x16, x14; \
umulh x14, x4, x10; \
adcs x17, x17, x14; \
umulh x14, x4, x11; \
adcs x19, x19, x14; \
umulh x14, x4, x12; \
adcs x20, x20, x14; \
umulh x14, x4, x13; \
adc x21, x21, x14; \
stp x22, x23, [P0+48]; \
ldr x3, [P1+64]; \
mul x14, x3, x5; \
adds x24, x24, x14; \
mul x14, x3, x6; \
adcs x1, x1, x14; \
mul x14, x3, x7; \
adcs x0, x0, x14; \
mul x14, x3, x8; \
adcs x15, x15, x14; \
mul x14, x3, x9; \
adcs x16, x16, x14; \
mul x14, x3, x10; \
adcs x17, x17, x14; \
mul x14, x3, x11; \
adcs x19, x19, x14; \
mul x14, x3, x12; \
adcs x20, x20, x14; \
mul x14, x3, x13; \
adc x21, x21, x14; \
umulh x14, x3, x5; \
adds x1, x1, x14; \
umulh x14, x3, x6; \
adcs x0, x0, x14; \
umulh x14, x3, x7; \
adcs x15, x15, x14; \
umulh x14, x3, x8; \
adcs x16, x16, x14; \
umulh x14, x3, x9; \
adcs x17, x17, x14; \
umulh x14, x3, x10; \
adcs x19, x19, x14; \
umulh x14, x3, x11; \
adcs x20, x20, x14; \
umulh x14, x3, x12; \
adc x21, x21, x14; \
cmp xzr, xzr; \
ldp x5, x6, [P0]; \
extr x14, x1, x24, #9; \
adcs x5, x5, x14; \
extr x14, x0, x1, #9; \
adcs x6, x6, x14; \
ldp x7, x8, [P0+16]; \
extr x14, x15, x0, #9; \
adcs x7, x7, x14; \
extr x14, x16, x15, #9; \
adcs x8, x8, x14; \
ldp x9, x10, [P0+32]; \
extr x14, x17, x16, #9; \
adcs x9, x9, x14; \
extr x14, x19, x17, #9; \
adcs x10, x10, x14; \
ldp x11, x12, [P0+48]; \
extr x14, x20, x19, #9; \
adcs x11, x11, x14; \
extr x14, x21, x20, #9; \
adcs x12, x12, x14; \
orr x13, x24, #0xfffffffffffffe00; \
lsr x14, x21, #9; \
adcs x13, x13, x14; \
sbcs x5, x5, xzr; \
sbcs x6, x6, xzr; \
sbcs x7, x7, xzr; \
sbcs x8, x8, xzr; \
sbcs x9, x9, xzr; \
sbcs x10, x10, xzr; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbc x13, x13, xzr; \
and x13, x13, #0x1ff; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]; \
stp x11, x12, [P0+48]; \
str x13, [P0+64]
// Corresponds exactly to bignum_sqr_p521_alt
#define sqr_p521(P0,P1) \
ldp x2, x3, [P1]; \
mul x11, x2, x3; \
umulh x12, x2, x3; \
ldp x4, x5, [P1+16]; \
mul x10, x2, x4; \
umulh x13, x2, x4; \
adds x12, x12, x10; \
ldp x6, x7, [P1+32]; \
mul x10, x2, x5; \
umulh x14, x2, x5; \
adcs x13, x13, x10; \
ldp x8, x9, [P1+48]; \
mul x10, x2, x6; \
umulh x15, x2, x6; \
adcs x14, x14, x10; \
mul x10, x2, x7; \
umulh x16, x2, x7; \
adcs x15, x15, x10; \
mul x10, x2, x8; \
umulh x17, x2, x8; \
adcs x16, x16, x10; \
mul x10, x2, x9; \
umulh x19, x2, x9; \
adcs x17, x17, x10; \
adc x19, x19, xzr; \
mul x10, x3, x4; \
adds x13, x13, x10; \
mul x10, x3, x5; \
adcs x14, x14, x10; \
mul x10, x3, x6; \
adcs x15, x15, x10; \
mul x10, x3, x7; \
adcs x16, x16, x10; \
mul x10, x3, x8; \
adcs x17, x17, x10; \
mul x10, x3, x9; \
adcs x19, x19, x10; \
cset x20, hs; \
umulh x10, x3, x4; \
adds x14, x14, x10; \
umulh x10, x3, x5; \
adcs x15, x15, x10; \
umulh x10, x3, x6; \
adcs x16, x16, x10; \
umulh x10, x3, x7; \
adcs x17, x17, x10; \
umulh x10, x3, x8; \
adcs x19, x19, x10; \
umulh x10, x3, x9; \
adc x20, x20, x10; \
mul x10, x6, x7; \
umulh x21, x6, x7; \
adds x20, x20, x10; \
adc x21, x21, xzr; \
mul x10, x4, x5; \
adds x15, x15, x10; \
mul x10, x4, x6; \
adcs x16, x16, x10; \
mul x10, x4, x7; \
adcs x17, x17, x10; \
mul x10, x4, x8; \
adcs x19, x19, x10; \
mul x10, x4, x9; \
adcs x20, x20, x10; \
mul x10, x6, x8; \
adcs x21, x21, x10; \
cset x22, hs; \
umulh x10, x4, x5; \
adds x16, x16, x10; \
umulh x10, x4, x6; \
adcs x17, x17, x10; \
umulh x10, x4, x7; \
adcs x19, x19, x10; \
umulh x10, x4, x8; \
adcs x20, x20, x10; \
umulh x10, x4, x9; \
adcs x21, x21, x10; \
umulh x10, x6, x8; \
adc x22, x22, x10; \
mul x10, x7, x8; \
umulh x23, x7, x8; \
adds x22, x22, x10; \
adc x23, x23, xzr; \
mul x10, x5, x6; \
adds x17, x17, x10; \
mul x10, x5, x7; \
adcs x19, x19, x10; \
mul x10, x5, x8; \
adcs x20, x20, x10; \
mul x10, x5, x9; \
adcs x21, x21, x10; \
mul x10, x6, x9; \
adcs x22, x22, x10; \
mul x10, x7, x9; \
adcs x23, x23, x10; \
cset x24, hs; \
umulh x10, x5, x6; \
adds x19, x19, x10; \
umulh x10, x5, x7; \
adcs x20, x20, x10; \
umulh x10, x5, x8; \
adcs x21, x21, x10; \
umulh x10, x5, x9; \
adcs x22, x22, x10; \
umulh x10, x6, x9; \
adcs x23, x23, x10; \
umulh x10, x7, x9; \
adc x24, x24, x10; \
mul x10, x8, x9; \
umulh x25, x8, x9; \
adds x24, x24, x10; \
adc x25, x25, xzr; \
adds x11, x11, x11; \
adcs x12, x12, x12; \
adcs x13, x13, x13; \
adcs x14, x14, x14; \
adcs x15, x15, x15; \
adcs x16, x16, x16; \
adcs x17, x17, x17; \
adcs x19, x19, x19; \
adcs x20, x20, x20; \
adcs x21, x21, x21; \
adcs x22, x22, x22; \
adcs x23, x23, x23; \
adcs x24, x24, x24; \
adcs x25, x25, x25; \
cset x0, hs; \
umulh x10, x2, x2; \
adds x11, x11, x10; \
mul x10, x3, x3; \
adcs x12, x12, x10; \
umulh x10, x3, x3; \
adcs x13, x13, x10; \
mul x10, x4, x4; \
adcs x14, x14, x10; \
umulh x10, x4, x4; \
adcs x15, x15, x10; \
mul x10, x5, x5; \
adcs x16, x16, x10; \
umulh x10, x5, x5; \
adcs x17, x17, x10; \
mul x10, x6, x6; \
adcs x19, x19, x10; \
umulh x10, x6, x6; \
adcs x20, x20, x10; \
mul x10, x7, x7; \
adcs x21, x21, x10; \
umulh x10, x7, x7; \
adcs x22, x22, x10; \
mul x10, x8, x8; \
adcs x23, x23, x10; \
umulh x10, x8, x8; \
adcs x24, x24, x10; \
mul x10, x9, x9; \
adcs x25, x25, x10; \
umulh x10, x9, x9; \
adc x0, x0, x10; \
ldr x1, [P1+64]; \
add x1, x1, x1; \
mul x10, x1, x2; \
adds x19, x19, x10; \
umulh x10, x1, x2; \
adcs x20, x20, x10; \
mul x10, x1, x4; \
adcs x21, x21, x10; \
umulh x10, x1, x4; \
adcs x22, x22, x10; \
mul x10, x1, x6; \
adcs x23, x23, x10; \
umulh x10, x1, x6; \
adcs x24, x24, x10; \
mul x10, x1, x8; \
adcs x25, x25, x10; \
umulh x10, x1, x8; \
adcs x0, x0, x10; \
lsr x4, x1, #1; \
mul x4, x4, x4; \
adc x4, x4, xzr; \
mul x10, x1, x3; \
adds x20, x20, x10; \
umulh x10, x1, x3; \
adcs x21, x21, x10; \
mul x10, x1, x5; \
adcs x22, x22, x10; \
umulh x10, x1, x5; \
adcs x23, x23, x10; \
mul x10, x1, x7; \
adcs x24, x24, x10; \
umulh x10, x1, x7; \
adcs x25, x25, x10; \
mul x10, x1, x9; \
adcs x0, x0, x10; \
umulh x10, x1, x9; \
adc x4, x4, x10; \
mul x2, x2, x2; \
cmp xzr, xzr; \
extr x10, x20, x19, #9; \
adcs x2, x2, x10; \
extr x10, x21, x20, #9; \
adcs x11, x11, x10; \
extr x10, x22, x21, #9; \
adcs x12, x12, x10; \
extr x10, x23, x22, #9; \
adcs x13, x13, x10; \
extr x10, x24, x23, #9; \
adcs x14, x14, x10; \
extr x10, x25, x24, #9; \
adcs x15, x15, x10; \
extr x10, x0, x25, #9; \
adcs x16, x16, x10; \
extr x10, x4, x0, #9; \
adcs x17, x17, x10; \
orr x19, x19, #0xfffffffffffffe00; \
lsr x10, x4, #9; \
adcs x19, x19, x10; \
sbcs x2, x2, xzr; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
sbcs x14, x14, xzr; \
sbcs x15, x15, xzr; \
sbcs x16, x16, xzr; \
sbcs x17, x17, xzr; \
sbc x19, x19, xzr; \
and x19, x19, #0x1ff; \
stp x2, x11, [P0]; \
stp x12, x13, [P0+16]; \
stp x14, x15, [P0+32]; \
stp x16, x17, [P0+48]; \
str x19, [P0+64]
// Corresponds exactly to bignum_sub_p521
#define sub_p521(P0,P1,P2) \
ldp x5, x6, [P1]; \
ldp x4, x3, [P2]; \
subs x5, x5, x4; \
sbcs x6, x6, x3; \
ldp x7, x8, [P1+16]; \
ldp x4, x3, [P2+16]; \
sbcs x7, x7, x4; \
sbcs x8, x8, x3; \
ldp x9, x10, [P1+32]; \
ldp x4, x3, [P2+32]; \
sbcs x9, x9, x4; \
sbcs x10, x10, x3; \
ldp x11, x12, [P1+48]; \
ldp x4, x3, [P2+48]; \
sbcs x11, x11, x4; \
sbcs x12, x12, x3; \
ldr x13, [P1+64]; \
ldr x4, [P2+64]; \
sbcs x13, x13, x4; \
sbcs x5, x5, xzr; \
sbcs x6, x6, xzr; \
sbcs x7, x7, xzr; \
sbcs x8, x8, xzr; \
sbcs x9, x9, xzr; \
sbcs x10, x10, xzr; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
and x13, x13, #0x1ff; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]; \
stp x11, x12, [P0+48]; \
str x13, [P0+64]
S2N_BN_SYMBOL(p521_jmixadd_alt):
// Save regs and make room on stack for temporary variables
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]!
sub sp, sp, NSPACE
// Move the input arguments to stable places
mov input_z, x0
mov input_x, x1
mov input_y, x2
// Main code, just a sequence of basic field operations
sqr_p521(zp2,z_1)
mul_p521(y2a,z_1,y_2)
mul_p521(x2a,zp2,x_2)
mul_p521(y2a,zp2,y2a)
sub_p521(xd,x2a,x_1)
sub_p521(yd,y2a,y_1)
sqr_p521(zz,xd)
sqr_p521(ww,yd)
mul_p521(zzx1,zz,x_1)
mul_p521(zzx2,zz,x2a)
sub_p521(resx,ww,zzx1)
sub_p521(t1,zzx2,zzx1)
mul_p521(resz,xd,z_1)
sub_p521(resx,resx,zzx2)
sub_p521(t2,zzx1,resx)
mul_p521(t1,t1,y_1)
mul_p521(t2,yd,t2)
sub_p521(resy,t2,t1)
// Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence)
ldp x0, x1, [z_1]
orr x0, x0, x1
ldp x2, x3, [z_1+16]
orr x2, x2, x3
ldp x4, x5, [z_1+32]
orr x4, x4, x5
ldp x6, x7, [z_1+48]
orr x6, x6, x7
ldr x8, [z_1+64]
orr x0, x0, x2
orr x4, x4, x6
orr x0, x0, x4
orr x0, x0, x8
cmp x0, xzr
// Multiplex: if p1 <> 0 just copy the computed result from the staging area.
// If p1 = 0 then return the point p2 augmented with an extra z = 1
// coordinate, hence giving 0 + p2 = p2 for the final result.
ldp x0, x1, [resx]
ldp x20, x21, [x_2]
csel x0, x0, x20, ne
csel x1, x1, x21, ne
ldp x2, x3, [resx+16]
ldp x20, x21, [x_2+16]
csel x2, x2, x20, ne
csel x3, x3, x21, ne
ldp x4, x5, [resx+32]
ldp x20, x21, [x_2+32]
csel x4, x4, x20, ne
csel x5, x5, x21, ne
ldp x6, x7, [resx+48]
ldp x20, x21, [x_2+48]
csel x6, x6, x20, ne
csel x7, x7, x21, ne
ldr x8, [resx+64]
ldr x20, [x_2+64]
csel x8, x8, x20, ne
ldp x10, x11, [resy]
ldp x20, x21, [y_2]
csel x10, x10, x20, ne
csel x11, x11, x21, ne
ldp x12, x13, [resy+16]
ldp x20, x21, [y_2+16]
csel x12, x12, x20, ne
csel x13, x13, x21, ne
ldp x14, x15, [resy+32]
ldp x20, x21, [y_2+32]
csel x14, x14, x20, ne
csel x15, x15, x21, ne
ldp x16, x17, [resy+48]
ldp x20, x21, [y_2+48]
csel x16, x16, x20, ne
csel x17, x17, x21, ne
ldr x19, [resy+64]
ldr x20, [y_2+64]
csel x19, x19, x20, ne
stp x0, x1, [x_3]
stp x2, x3, [x_3+16]
stp x4, x5, [x_3+32]
stp x6, x7, [x_3+48]
str x8, [x_3+64]
stp x10, x11, [y_3]
stp x12, x13, [y_3+16]
stp x14, x15, [y_3+32]
stp x16, x17, [y_3+48]
str x19, [y_3+64]
ldp x0, x1, [resz]
mov x20, #1
csel x0, x0, x20, ne
csel x1, x1, xzr, ne
ldp x2, x3, [resz+16]
csel x2, x2, xzr, ne
csel x3, x3, xzr, ne
ldp x4, x5, [resz+32]
csel x4, x4, xzr, ne
csel x5, x5, xzr, ne
ldp x6, x7, [resz+48]
csel x6, x6, xzr, ne
csel x7, x7, xzr, ne
ldr x8, [resz+64]
csel x8, x8, xzr, ne
stp x0, x1, [z_3]
stp x2, x3, [z_3+16]
stp x4, x5, [z_3+32]
stp x6, x7, [z_3+48]
str x8, [z_3+64]
// Restore stack and registers
add sp, sp, NSPACE
ldp x27, x28, [sp], 16
ldp x25, x26, [sp], 16
ldp x23, x24, [sp], 16
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 53,301
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/p521_jdouble.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point doubling on NIST curve P-521 in Jacobian coordinates
//
// extern void p521_jdouble
// (uint64_t p3[static 27],uint64_t p1[static 27]);
//
// Does p3 := 2 * p1 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// It is assumed that all coordinates of the input point are fully
// reduced mod p_521 and that the z coordinate is not zero.
//
// Standard ARM ABI: X0 = p3, X1 = p1
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jdouble)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jdouble)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 72
// Stable homes for input arguments during main code sequence
#define input_z x27
#define input_x x28
// Pointer-offset pairs for inputs and outputs
#define x_1 input_x, #0
#define y_1 input_x, #NUMSIZE
#define z_1 input_x, #(2*NUMSIZE)
#define x_3 input_z, #0
#define y_3 input_z, #NUMSIZE
#define z_3 input_z, #(2*NUMSIZE)
// Pointer-offset pairs for temporaries
#define z2 sp, #(NUMSIZE*0)
#define y2 sp, #(NUMSIZE*1)
#define x2p sp, #(NUMSIZE*2)
#define xy2 sp, #(NUMSIZE*3)
#define y4 sp, #(NUMSIZE*4)
#define t2 sp, #(NUMSIZE*4)
#define dx2 sp, #(NUMSIZE*5)
#define t1 sp, #(NUMSIZE*5)
#define d_ sp, #(NUMSIZE*6)
#define x4p sp, #(NUMSIZE*6)
// NUMSIZE*7 is not 16-aligned so we round it up
#define NSPACE (NUMSIZE*7+8)
// For the two "big" field operations, we use subroutines not inlining.
// Call local code very close to bignum_mul_p521 and bignum_sqr_p521.
#define mul_p521(P0,P1,P2) \
add x0, P0; \
add x1, P1; \
add x2, P2; \
bl local_mul_p521
// Call local code equivalent to bignum_sqr_p521
#define sqr_p521(P0,P1) \
add x0, P0; \
add x1, P1; \
bl local_sqr_p521
// Corresponds exactly to bignum_add_p521
#define add_p521(P0,P1,P2) \
cmp xzr, xzr; \
ldp x5, x6, [P1]; \
ldp x4, x3, [P2]; \
adcs x5, x5, x4; \
adcs x6, x6, x3; \
ldp x7, x8, [P1+16]; \
ldp x4, x3, [P2+16]; \
adcs x7, x7, x4; \
adcs x8, x8, x3; \
ldp x9, x10, [P1+32]; \
ldp x4, x3, [P2+32]; \
adcs x9, x9, x4; \
adcs x10, x10, x3; \
ldp x11, x12, [P1+48]; \
ldp x4, x3, [P2+48]; \
adcs x11, x11, x4; \
adcs x12, x12, x3; \
ldr x13, [P1+64]; \
ldr x4, [P2+64]; \
adc x13, x13, x4; \
subs x4, x13, #512; \
csetm x4, hs; \
sbcs x5, x5, xzr; \
and x4, x4, #0x200; \
sbcs x6, x6, xzr; \
sbcs x7, x7, xzr; \
sbcs x8, x8, xzr; \
sbcs x9, x9, xzr; \
sbcs x10, x10, xzr; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbc x13, x13, x4; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]; \
stp x11, x12, [P0+48]; \
str x13, [P0+64]
// Corresponds exactly to bignum_sub_p521
#define sub_p521(P0,P1,P2) \
ldp x5, x6, [P1]; \
ldp x4, x3, [P2]; \
subs x5, x5, x4; \
sbcs x6, x6, x3; \
ldp x7, x8, [P1+16]; \
ldp x4, x3, [P2+16]; \
sbcs x7, x7, x4; \
sbcs x8, x8, x3; \
ldp x9, x10, [P1+32]; \
ldp x4, x3, [P2+32]; \
sbcs x9, x9, x4; \
sbcs x10, x10, x3; \
ldp x11, x12, [P1+48]; \
ldp x4, x3, [P2+48]; \
sbcs x11, x11, x4; \
sbcs x12, x12, x3; \
ldr x13, [P1+64]; \
ldr x4, [P2+64]; \
sbcs x13, x13, x4; \
sbcs x5, x5, xzr; \
sbcs x6, x6, xzr; \
sbcs x7, x7, xzr; \
sbcs x8, x8, xzr; \
sbcs x9, x9, xzr; \
sbcs x10, x10, xzr; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
and x13, x13, #0x1ff; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]; \
stp x11, x12, [P0+48]; \
str x13, [P0+64]
// P0 = C * P1 - D * P2 == C * P1 + D * (p_521 - P2)
#define cmsub_p521(P0,C,P1,D,P2) \
ldp x6, x7, [P1]; \
mov x1, #(C); \
mul x3, x1, x6; \
mul x4, x1, x7; \
umulh x6, x1, x6; \
adds x4, x4, x6; \
umulh x7, x1, x7; \
ldp x8, x9, [P1+16]; \
mul x5, x1, x8; \
mul x6, x1, x9; \
umulh x8, x1, x8; \
adcs x5, x5, x7; \
umulh x9, x1, x9; \
adcs x6, x6, x8; \
ldp x10, x11, [P1+32]; \
mul x7, x1, x10; \
mul x8, x1, x11; \
umulh x10, x1, x10; \
adcs x7, x7, x9; \
umulh x11, x1, x11; \
adcs x8, x8, x10; \
ldp x12, x13, [P1+48]; \
mul x9, x1, x12; \
mul x10, x1, x13; \
umulh x12, x1, x12; \
adcs x9, x9, x11; \
umulh x13, x1, x13; \
adcs x10, x10, x12; \
ldr x14, [P1+64]; \
mul x11, x1, x14; \
adc x11, x11, x13; \
mov x1, #(D); \
ldp x20, x21, [P2]; \
mvn x20, x20; \
mul x0, x1, x20; \
umulh x20, x1, x20; \
adds x3, x3, x0; \
mvn x21, x21; \
mul x0, x1, x21; \
umulh x21, x1, x21; \
adcs x4, x4, x0; \
ldp x22, x23, [P2+16]; \
mvn x22, x22; \
mul x0, x1, x22; \
umulh x22, x1, x22; \
adcs x5, x5, x0; \
mvn x23, x23; \
mul x0, x1, x23; \
umulh x23, x1, x23; \
adcs x6, x6, x0; \
ldp x17, x19, [P2+32]; \
mvn x17, x17; \
mul x0, x1, x17; \
umulh x17, x1, x17; \
adcs x7, x7, x0; \
mvn x19, x19; \
mul x0, x1, x19; \
umulh x19, x1, x19; \
adcs x8, x8, x0; \
ldp x2, x16, [P2+48]; \
mvn x2, x2; \
mul x0, x1, x2; \
umulh x2, x1, x2; \
adcs x9, x9, x0; \
mvn x16, x16; \
mul x0, x1, x16; \
umulh x16, x1, x16; \
adcs x10, x10, x0; \
ldr x0, [P2+64]; \
eor x0, x0, #0x1ff; \
mul x0, x1, x0; \
adc x11, x11, x0; \
adds x4, x4, x20; \
adcs x5, x5, x21; \
and x15, x4, x5; \
adcs x6, x6, x22; \
and x15, x15, x6; \
adcs x7, x7, x23; \
and x15, x15, x7; \
adcs x8, x8, x17; \
and x15, x15, x8; \
adcs x9, x9, x19; \
and x15, x15, x9; \
adcs x10, x10, x2; \
and x15, x15, x10; \
adc x11, x11, x16; \
lsr x12, x11, #9; \
orr x11, x11, #0xfffffffffffffe00; \
cmp xzr, xzr; \
adcs xzr, x3, x12; \
adcs xzr, x15, xzr; \
adcs xzr, x11, xzr; \
adcs x3, x3, x12; \
adcs x4, x4, xzr; \
adcs x5, x5, xzr; \
adcs x6, x6, xzr; \
adcs x7, x7, xzr; \
adcs x8, x8, xzr; \
adcs x9, x9, xzr; \
adcs x10, x10, xzr; \
adc x11, x11, xzr; \
and x11, x11, #0x1ff; \
stp x3, x4, [P0]; \
stp x5, x6, [P0+16]; \
stp x7, x8, [P0+32]; \
stp x9, x10, [P0+48]; \
str x11, [P0+64]
// P0 = 3 * P1 - 8 * P2 == 3 * P1 + 8 * (p_521 - P2)
#define cmsub38_p521(P0,P1,P2) \
ldp x6, x7, [P1]; \
lsl x3, x6, #1; \
adds x3, x3, x6; \
extr x4, x7, x6, #63; \
adcs x4, x4, x7; \
ldp x8, x9, [P1+16]; \
extr x5, x8, x7, #63; \
adcs x5, x5, x8; \
extr x6, x9, x8, #63; \
adcs x6, x6, x9; \
ldp x10, x11, [P1+32]; \
extr x7, x10, x9, #63; \
adcs x7, x7, x10; \
extr x8, x11, x10, #63; \
adcs x8, x8, x11; \
ldp x12, x13, [P1+48]; \
extr x9, x12, x11, #63; \
adcs x9, x9, x12; \
extr x10, x13, x12, #63; \
adcs x10, x10, x13; \
ldr x14, [P1+64]; \
extr x11, x14, x13, #63; \
adc x11, x11, x14; \
ldp x20, x21, [P2]; \
mvn x20, x20; \
lsl x0, x20, #3; \
adds x3, x3, x0; \
mvn x21, x21; \
extr x0, x21, x20, #61; \
adcs x4, x4, x0; \
ldp x22, x23, [P2+16]; \
mvn x22, x22; \
extr x0, x22, x21, #61; \
adcs x5, x5, x0; \
and x15, x4, x5; \
mvn x23, x23; \
extr x0, x23, x22, #61; \
adcs x6, x6, x0; \
and x15, x15, x6; \
ldp x20, x21, [P2+32]; \
mvn x20, x20; \
extr x0, x20, x23, #61; \
adcs x7, x7, x0; \
and x15, x15, x7; \
mvn x21, x21; \
extr x0, x21, x20, #61; \
adcs x8, x8, x0; \
and x15, x15, x8; \
ldp x22, x23, [P2+48]; \
mvn x22, x22; \
extr x0, x22, x21, #61; \
adcs x9, x9, x0; \
and x15, x15, x9; \
mvn x23, x23; \
extr x0, x23, x22, #61; \
adcs x10, x10, x0; \
and x15, x15, x10; \
ldr x0, [P2+64]; \
eor x0, x0, #0x1ff; \
extr x0, x0, x23, #61; \
adc x11, x11, x0; \
lsr x12, x11, #9; \
orr x11, x11, #0xfffffffffffffe00; \
cmp xzr, xzr; \
adcs xzr, x3, x12; \
adcs xzr, x15, xzr; \
adcs xzr, x11, xzr; \
adcs x3, x3, x12; \
adcs x4, x4, xzr; \
adcs x5, x5, xzr; \
adcs x6, x6, xzr; \
adcs x7, x7, xzr; \
adcs x8, x8, xzr; \
adcs x9, x9, xzr; \
adcs x10, x10, xzr; \
adc x11, x11, xzr; \
and x11, x11, #0x1ff; \
stp x3, x4, [P0]; \
stp x5, x6, [P0+16]; \
stp x7, x8, [P0+32]; \
stp x9, x10, [P0+48]; \
str x11, [P0+64]
// P0 = 4 * P1 - P2 = 4 * P1 + (p_521 - P2)
#define cmsub41_p521(P0,P1,P2) \
ldp x6, x7, [P1]; \
lsl x3, x6, #2; \
extr x4, x7, x6, #62; \
ldp x8, x9, [P1+16]; \
extr x5, x8, x7, #62; \
extr x6, x9, x8, #62; \
ldp x10, x11, [P1+32]; \
extr x7, x10, x9, #62; \
extr x8, x11, x10, #62; \
ldp x12, x13, [P1+48]; \
extr x9, x12, x11, #62; \
extr x10, x13, x12, #62; \
ldr x14, [P1+64]; \
extr x11, x14, x13, #62; \
ldp x0, x1, [P2]; \
mvn x0, x0; \
adds x3, x3, x0; \
sbcs x4, x4, x1; \
ldp x0, x1, [P2+16]; \
sbcs x5, x5, x0; \
and x15, x4, x5; \
sbcs x6, x6, x1; \
and x15, x15, x6; \
ldp x0, x1, [P2+32]; \
sbcs x7, x7, x0; \
and x15, x15, x7; \
sbcs x8, x8, x1; \
and x15, x15, x8; \
ldp x0, x1, [P2+48]; \
sbcs x9, x9, x0; \
and x15, x15, x9; \
sbcs x10, x10, x1; \
and x15, x15, x10; \
ldr x0, [P2+64]; \
eor x0, x0, #0x1ff; \
adc x11, x11, x0; \
lsr x12, x11, #9; \
orr x11, x11, #0xfffffffffffffe00; \
cmp xzr, xzr; \
adcs xzr, x3, x12; \
adcs xzr, x15, xzr; \
adcs xzr, x11, xzr; \
adcs x3, x3, x12; \
adcs x4, x4, xzr; \
adcs x5, x5, xzr; \
adcs x6, x6, xzr; \
adcs x7, x7, xzr; \
adcs x8, x8, xzr; \
adcs x9, x9, xzr; \
adcs x10, x10, xzr; \
adc x11, x11, xzr; \
and x11, x11, #0x1ff; \
stp x3, x4, [P0]; \
stp x5, x6, [P0+16]; \
stp x7, x8, [P0+32]; \
stp x9, x10, [P0+48]; \
str x11, [P0+64]
S2N_BN_SYMBOL(p521_jdouble):
// Save regs and make room on stack for temporary variables
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x29, x30, [sp, #-16]!
sub sp, sp, NSPACE
// Move the input arguments to stable places
mov input_z, x0
mov input_x, x1
// Main code, just a sequence of basic field operations
// z2 = z^2
// y2 = y^2
sqr_p521(z2,z_1)
sqr_p521(y2,y_1)
// x2p = x^2 - z^4 = (x + z^2) * (x - z^2)
add_p521(t1,x_1,z2)
sub_p521(t2,x_1,z2)
mul_p521(x2p,t1,t2)
// t1 = y + z
// x4p = x2p^2
// xy2 = x * y^2
add_p521(t1,y_1,z_1)
sqr_p521(x4p,x2p)
mul_p521(xy2,x_1,y2)
// t2 = (y + z)^2
sqr_p521(t2,t1)
// d = 12 * xy2 - 9 * x4p
// t1 = y^2 + 2 * y * z
cmsub_p521(d_,12,xy2,9,x4p)
sub_p521(t1,t2,z2)
// y4 = y^4
sqr_p521(y4,y2)
// z_3' = 2 * y * z
// dx2 = d * x2p
sub_p521(z_3,t1,y2)
mul_p521(dx2,d_,x2p)
// x' = 4 * xy2 - d
cmsub41_p521(x_3,xy2,d_)
// y' = 3 * dx2 - 8 * y4
cmsub38_p521(y_3,dx2,y4)
// Restore stack and registers
add sp, sp, NSPACE
ldp x29, x30, [sp], 16
ldp x27, x28, [sp], 16
ldp x25, x26, [sp], 16
ldp x23, x24, [sp], 16
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
// Local versions of the two "big" field operations, identical to
// bignum_mul_p521_neon and bignum_sqr_p521_neon.
local_mul_p521:
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
sub sp, sp, #80
ldr q6, [x2]
ldp x10, x17, [x1, #16]
ldr q4, [x1]
ldr q16, [x2, #32]
ldp x5, x20, [x2, #16]
ldr q2, [x1, #32]
movi v31.2D, #0x00000000ffffffff
uzp2 v17.4S, v6.4S, v6.4S
rev64 v7.4S, v6.4S
ldp x15, x21, [x1]
xtn v25.2S, v6.2D
xtn v22.2S, v4.2D
subs x14, x10, x17
mul v7.4S, v7.4S, v4.4S
csetm x8, cc
rev64 v3.4S, v16.4S
xtn v1.2S, v16.2D
ldp x13, x16, [x2]
mul x26, x10, x5
uzp2 v16.4S, v16.4S, v16.4S
uaddlp v26.2D, v7.4S
cneg x4, x14, cc
subs x24, x15, x21
xtn v5.2S, v2.2D
mul v28.4S, v3.4S, v2.4S
shl v26.2D, v26.2D, #32
mul x22, x17, x20
umull v20.2D, v22.2S, v25.2S
uzp2 v6.4S, v4.4S, v4.4S
umull v18.2D, v22.2S, v17.2S
uzp2 v4.4S, v2.4S, v2.4S
cneg x14, x24, cc
csetm x7, cc
umulh x11, x17, x20
usra v18.2D, v20.2D, #32
uaddlp v7.2D, v28.4S
subs x19, x16, x13
umlal v26.2D, v22.2S, v25.2S
cneg x19, x19, cc
shl v28.2D, v7.2D, #32
umull v7.2D, v5.2S, v1.2S
umull v30.2D, v5.2S, v16.2S
cinv x6, x7, cc
mul x25, x14, x19
umlal v28.2D, v5.2S, v1.2S
umull v21.2D, v6.2S, v17.2S
umulh x14, x14, x19
usra v30.2D, v7.2D, #32
subs x9, x20, x5
and v29.16B, v18.16B, v31.16B
cinv x23, x8, cc
mov x8, v26.d[1]
cneg x12, x9, cc
usra v21.2D, v18.2D, #32
umlal v29.2D, v6.2S, v25.2S
mul x24, x4, x12
umull v18.2D, v4.2S, v16.2S
movi v25.2D, #0x00000000ffffffff
eor x9, x14, x6
and v7.16B, v30.16B, v25.16B
usra v21.2D, v29.2D, #32
umulh x7, x10, x5
usra v18.2D, v30.2D, #32
umlal v7.2D, v4.2S, v1.2S
mov x19, v21.d[0]
umulh x3, x4, x12
mov x14, v21.d[1]
usra v18.2D, v7.2D, #32
adds x4, x8, x19
mov x8, v26.d[0]
adcs x19, x26, x14
adcs x14, x22, x7
adc x12, x11, xzr
adds x11, x4, x8
adcs x26, x19, x4
adcs x22, x14, x19
eor x4, x24, x23
adcs x14, x12, x14
eor x7, x25, x6
adc x25, xzr, x12
eor x19, x3, x23
adds x3, x26, x8
adcs x24, x22, x11
adcs x12, x14, x26
adcs x22, x25, x22
adcs x26, xzr, x14
adc x14, xzr, x25
cmn x23, #0x1
adcs x22, x22, x4
adcs x19, x26, x19
adc x25, x14, x23
subs x14, x21, x17
cneg x23, x14, cc
csetm x26, cc
subs x4, x20, x16
cneg x14, x4, cc
cinv x4, x26, cc
cmn x6, #0x1
adcs x11, x11, x7
mul x7, x23, x14
adcs x9, x3, x9
adcs x26, x24, x6
umulh x3, x23, x14
adcs x14, x12, x6
adcs x22, x22, x6
adcs x12, x19, x6
extr x24, x11, x8, #55
adc x6, x25, x6
subs x19, x15, x17
csetm x17, cc
cneg x23, x19, cc
subs x19, x20, x13
lsl x25, x8, #9
eor x8, x7, x4
cneg x20, x19, cc
umulh x7, x23, x20
cinv x19, x17, cc
subs x17, x15, x10
csetm x15, cc
stp x25, x24, [sp, #32]
cneg x24, x17, cc
mul x20, x23, x20
subs x25, x5, x13
cneg x13, x25, cc
cinv x15, x15, cc
mul x25, x24, x13
subs x21, x21, x10
csetm x23, cc
cneg x17, x21, cc
subs x21, x5, x16
umulh x13, x24, x13
cinv x10, x23, cc
cneg x23, x21, cc
cmn x4, #0x1
adcs x14, x14, x8
eor x21, x3, x4
adcs x21, x22, x21
eor x5, x20, x19
adcs x24, x12, x4
mul x12, x17, x23
eor x8, x25, x15
adc x25, x6, x4
cmn x15, #0x1
adcs x6, x9, x8
ldp x20, x8, [x2, #48]
eor x9, x13, x15
adcs x4, x26, x9
umulh x26, x17, x23
ldp x17, x13, [x1, #48]
adcs x9, x14, x15
adcs x16, x21, x15
adcs x14, x24, x15
eor x21, x7, x19
mul x23, x17, x20
adc x24, x25, x15
cmn x19, #0x1
adcs x7, x4, x5
adcs x9, x9, x21
umulh x3, x13, x8
adcs x16, x16, x19
adcs x22, x14, x19
eor x5, x12, x10
adc x12, x24, x19
cmn x10, #0x1
adcs x19, x7, x5
eor x14, x26, x10
mov x7, v28.d[1]
adcs x24, x9, x14
extr x4, x19, x6, #55
umulh x15, x17, x20
mov x14, v18.d[1]
lsr x9, x19, #55
adcs x5, x16, x10
mov x16, v18.d[0]
adcs x19, x22, x10
str x9, [sp, #64]
extr x25, x6, x11, #55
adc x21, x12, x10
subs x26, x17, x13
stp x25, x4, [sp, #48]
stp x19, x21, [sp, #16]
csetm x6, cc
cneg x4, x26, cc
mul x19, x13, x8
subs x11, x8, x20
stp x24, x5, [sp]
ldp x21, x10, [x1, #32]
cinv x12, x6, cc
cneg x6, x11, cc
mov x9, v28.d[0]
umulh x25, x4, x6
adds x22, x7, x16
ldp x16, x5, [x2, #32]
adcs x14, x23, x14
adcs x11, x19, x15
adc x24, x3, xzr
adds x3, x22, x9
adcs x15, x14, x22
mul x22, x4, x6
adcs x6, x11, x14
adcs x4, x24, x11
eor x14, x25, x12
adc x26, xzr, x24
subs x7, x21, x10
csetm x23, cc
cneg x19, x7, cc
subs x24, x5, x16
cneg x11, x24, cc
cinv x7, x23, cc
adds x25, x15, x9
eor x23, x22, x12
adcs x22, x6, x3
mul x24, x19, x11
adcs x15, x4, x15
adcs x6, x26, x6
umulh x19, x19, x11
adcs x11, xzr, x4
adc x26, xzr, x26
cmn x12, #0x1
adcs x4, x6, x23
eor x6, x24, x7
adcs x14, x11, x14
adc x26, x26, x12
subs x11, x10, x13
cneg x12, x11, cc
csetm x11, cc
eor x19, x19, x7
subs x24, x8, x5
cinv x11, x11, cc
cneg x24, x24, cc
cmn x7, #0x1
adcs x3, x3, x6
mul x23, x12, x24
adcs x25, x25, x19
adcs x6, x22, x7
umulh x19, x12, x24
adcs x22, x15, x7
adcs x12, x4, x7
eor x24, x23, x11
adcs x4, x14, x7
adc x26, x26, x7
eor x19, x19, x11
subs x14, x21, x17
cneg x7, x14, cc
csetm x14, cc
subs x23, x20, x16
cinv x14, x14, cc
cneg x23, x23, cc
cmn x11, #0x1
adcs x22, x22, x24
mul x24, x7, x23
adcs x15, x12, x19
adcs x4, x4, x11
adc x19, x26, x11
umulh x26, x7, x23
subs x7, x21, x13
eor x11, x24, x14
cneg x23, x7, cc
csetm x12, cc
subs x7, x8, x16
cneg x7, x7, cc
cinv x12, x12, cc
cmn x14, #0x1
eor x26, x26, x14
adcs x11, x25, x11
mul x25, x23, x7
adcs x26, x6, x26
adcs x6, x22, x14
adcs x24, x15, x14
umulh x23, x23, x7
adcs x4, x4, x14
adc x22, x19, x14
eor x14, x25, x12
eor x7, x23, x12
cmn x12, #0x1
adcs x14, x26, x14
ldp x19, x25, [x2]
ldp x15, x23, [x2, #16]
adcs x26, x6, x7
adcs x24, x24, x12
adcs x7, x4, x12
adc x4, x22, x12
subs x19, x19, x16
ldp x16, x22, [x1]
sbcs x6, x25, x5
ldp x12, x25, [x1, #16]
sbcs x15, x15, x20
sbcs x8, x23, x8
csetm x23, cc
subs x21, x21, x16
eor x16, x19, x23
sbcs x19, x10, x22
eor x22, x6, x23
eor x8, x8, x23
sbcs x6, x17, x12
sbcs x13, x13, x25
csetm x12, cc
subs x10, x10, x17
cneg x17, x10, cc
csetm x25, cc
subs x5, x20, x5
eor x10, x19, x12
cneg x19, x5, cc
eor x20, x15, x23
eor x21, x21, x12
cinv x15, x25, cc
mul x25, x17, x19
subs x16, x16, x23
sbcs x5, x22, x23
eor x6, x6, x12
sbcs x20, x20, x23
eor x22, x13, x12
sbc x8, x8, x23
subs x21, x21, x12
umulh x19, x17, x19
sbcs x10, x10, x12
sbcs x17, x6, x12
eor x6, x19, x15
eor x19, x25, x15
umulh x25, x17, x20
sbc x13, x22, x12
cmn x15, #0x1
adcs x22, x14, x19
adcs x19, x26, x6
ldp x6, x26, [sp]
adcs x14, x24, x15
umulh x24, x21, x16
adcs x7, x7, x15
adc x15, x4, x15
adds x4, x9, x6
eor x9, x23, x12
adcs x12, x3, x26
stp x4, x12, [sp]
ldp x4, x26, [sp, #16]
umulh x12, x10, x5
ldp x6, x23, [sp, #32]
adcs x3, x11, x4
mul x4, x13, x8
adcs x26, x22, x26
ldp x22, x11, [sp, #48]
adcs x6, x19, x6
stp x3, x26, [sp, #16]
mul x26, x10, x5
adcs x14, x14, x23
stp x6, x14, [sp, #32]
ldr x6, [sp, #64]
adcs x22, x7, x22
adcs x14, x15, x11
mul x11, x17, x20
adc x19, x6, xzr
stp x22, x14, [sp, #48]
adds x14, x26, x24
str x19, [sp, #64]
umulh x19, x13, x8
adcs x7, x11, x12
adcs x22, x4, x25
mul x6, x21, x16
adc x19, x19, xzr
subs x11, x17, x13
cneg x12, x11, cc
csetm x11, cc
subs x24, x8, x20
cinv x11, x11, cc
cneg x24, x24, cc
adds x4, x14, x6
adcs x14, x7, x14
mul x3, x12, x24
adcs x7, x22, x7
adcs x22, x19, x22
umulh x12, x12, x24
adc x24, xzr, x19
adds x19, x14, x6
eor x3, x3, x11
adcs x26, x7, x4
adcs x14, x22, x14
adcs x25, x24, x7
adcs x23, xzr, x22
eor x7, x12, x11
adc x12, xzr, x24
subs x22, x21, x10
cneg x24, x22, cc
csetm x22, cc
subs x15, x5, x16
cinv x22, x22, cc
cneg x15, x15, cc
cmn x11, #0x1
adcs x3, x25, x3
mul x25, x24, x15
adcs x23, x23, x7
adc x11, x12, x11
subs x7, x10, x13
umulh x15, x24, x15
cneg x12, x7, cc
csetm x7, cc
eor x24, x25, x22
eor x25, x15, x22
cmn x22, #0x1
adcs x24, x4, x24
adcs x19, x19, x25
adcs x15, x26, x22
adcs x4, x14, x22
adcs x26, x3, x22
adcs x25, x23, x22
adc x23, x11, x22
subs x14, x21, x17
cneg x3, x14, cc
csetm x11, cc
subs x14, x8, x5
cneg x14, x14, cc
cinv x7, x7, cc
subs x13, x21, x13
cneg x21, x13, cc
csetm x13, cc
mul x22, x12, x14
subs x8, x8, x16
cinv x13, x13, cc
umulh x14, x12, x14
cneg x12, x8, cc
subs x8, x20, x16
cneg x8, x8, cc
cinv x16, x11, cc
eor x22, x22, x7
cmn x7, #0x1
eor x14, x14, x7
adcs x4, x4, x22
mul x11, x3, x8
adcs x22, x26, x14
adcs x14, x25, x7
eor x25, x24, x9
adc x26, x23, x7
umulh x7, x3, x8
subs x17, x10, x17
cneg x24, x17, cc
eor x3, x11, x16
csetm x11, cc
subs x20, x20, x5
cneg x5, x20, cc
cinv x11, x11, cc
cmn x16, #0x1
mul x17, x21, x12
eor x8, x7, x16
adcs x10, x19, x3
and x19, x9, #0x1ff
adcs x20, x15, x8
umulh x15, x21, x12
eor x12, x10, x9
eor x8, x6, x9
adcs x6, x4, x16
adcs x4, x22, x16
adcs x21, x14, x16
adc x7, x26, x16
mul x10, x24, x5
cmn x13, #0x1
ldp x3, x14, [x1]
eor x17, x17, x13
umulh x5, x24, x5
adcs x20, x20, x17
eor x17, x15, x13
adcs x16, x6, x17
eor x22, x10, x11
adcs x23, x4, x13
extr x10, x14, x3, #52
and x26, x3, #0xfffffffffffff
adcs x24, x21, x13
and x15, x10, #0xfffffffffffff
adc x6, x7, x13
cmn x11, #0x1
adcs x17, x20, x22
eor x4, x5, x11
ldp x21, x10, [sp]
adcs x7, x16, x4
eor x16, x17, x9
eor x13, x7, x9
ldp x3, x17, [sp, #16]
adcs x7, x23, x11
eor x23, x7, x9
ldp x5, x22, [sp, #32]
adcs x7, x24, x11
adc x24, x6, x11
ldr x6, [x2, #64]
adds x20, x8, x21
lsl x11, x20, #9
eor x4, x7, x9
orr x7, x11, x19
eor x8, x24, x9
adcs x11, x25, x10
mul x26, x6, x26
ldp x19, x24, [sp, #48]
adcs x12, x12, x3
adcs x16, x16, x17
adcs x9, x13, x5
ldr x25, [sp, #64]
extr x20, x11, x20, #55
adcs x13, x23, x22
adcs x4, x4, x19
extr x23, x12, x11, #55
adcs x8, x8, x24
adc x11, x25, xzr
adds x21, x9, x21
extr x9, x16, x12, #55
lsr x12, x16, #55
adcs x10, x13, x10
mul x15, x6, x15
adcs x13, x4, x3
ldp x16, x4, [x2]
ldr x3, [x1, #64]
adcs x17, x8, x17
adcs x5, x5, x7
adcs x20, x22, x20
adcs x8, x19, x23
and x22, x16, #0xfffffffffffff
ldp x19, x7, [x1, #16]
adcs x9, x24, x9
extr x24, x4, x16, #52
adc x16, x12, x25
mul x22, x3, x22
and x25, x24, #0xfffffffffffff
extr x14, x19, x14, #40
and x12, x14, #0xfffffffffffff
extr x23, x7, x19, #28
ldp x19, x24, [x2, #16]
mul x14, x3, x25
and x23, x23, #0xfffffffffffff
add x22, x26, x22
lsl x11, x11, #48
lsr x26, x22, #52
lsl x25, x22, #12
mul x22, x6, x12
extr x12, x19, x4, #40
add x4, x15, x14
mul x15, x6, x23
add x4, x4, x26
extr x23, x24, x19, #28
ldp x14, x19, [x1, #32]
and x26, x12, #0xfffffffffffff
extr x12, x4, x25, #12
and x25, x23, #0xfffffffffffff
adds x21, x21, x12
mul x12, x3, x26
extr x23, x14, x7, #16
and x23, x23, #0xfffffffffffff
mul x7, x3, x25
ldp x25, x26, [x2, #32]
add x12, x22, x12
extr x22, x19, x14, #56
mul x23, x6, x23
lsr x14, x14, #4
extr x24, x25, x24, #16
add x7, x15, x7
and x15, x24, #0xfffffffffffff
and x22, x22, #0xfffffffffffff
lsr x24, x4, #52
mul x15, x3, x15
and x14, x14, #0xfffffffffffff
add x12, x12, x24
lsl x24, x4, #12
lsr x4, x12, #52
extr x24, x12, x24, #24
adcs x10, x10, x24
lsl x24, x12, #12
add x12, x7, x4
mul x22, x6, x22
add x4, x23, x15
extr x7, x12, x24, #36
adcs x13, x13, x7
lsl x15, x12, #12
add x7, x4, x11
lsr x24, x12, #52
ldp x23, x11, [x2, #48]
add x4, x7, x24
mul x12, x6, x14
extr x7, x26, x25, #56
extr x14, x4, x15, #48
and x2, x7, #0xfffffffffffff
extr x24, x11, x23, #32
ldp x15, x7, [x1, #48]
and x1, x24, #0xfffffffffffff
lsr x24, x4, #52
mul x2, x3, x2
extr x26, x23, x26, #44
lsr x23, x25, #4
and x23, x23, #0xfffffffffffff
and x25, x26, #0xfffffffffffff
extr x26, x7, x15, #32
extr x19, x15, x19, #44
mul x23, x3, x23
and x15, x26, #0xfffffffffffff
lsl x26, x4, #12
and x4, x19, #0xfffffffffffff
lsr x11, x11, #20
mul x19, x6, x4
adcs x17, x17, x14
add x14, x22, x2
add x22, x12, x23
lsr x7, x7, #20
add x22, x22, x24
extr x2, x22, x26, #60
mul x24, x3, x25
lsr x22, x22, #52
add x14, x14, x22
lsl x22, x2, #8
extr x22, x14, x22, #8
lsl x2, x14, #12
mul x1, x3, x1
adcs x12, x5, x22
mul x5, x6, x15
and x26, x10, x13
and x4, x26, x17
add x23, x19, x24
lsr x14, x14, #52
mul x22, x3, x11
add x11, x23, x14
extr x25, x11, x2, #20
lsl x19, x11, #12
adcs x25, x20, x25
and x14, x4, x12
add x1, x5, x1
and x14, x14, x25
mul x15, x6, x7
add x26, x15, x22
mul x6, x6, x3
lsr x22, x11, #52
add x4, x1, x22
lsr x1, x4, #52
extr x3, x4, x19, #32
lsl x15, x4, #12
add x7, x26, x1
adcs x23, x8, x3
extr x20, x7, x15, #44
and x3, x14, x23
lsr x19, x7, #44
adcs x7, x9, x20
add x11, x6, x19
adc x4, x16, x11
lsr x14, x4, #9
cmp xzr, xzr
and x15, x3, x7
orr x3, x4, #0xfffffffffffffe00
adcs xzr, x21, x14
adcs xzr, x15, xzr
adcs xzr, x3, xzr
adcs x11, x21, x14
and x14, x11, #0x1ff
adcs x1, x10, xzr
extr x10, x1, x11, #9
str x14, [x0, #64]
adcs x14, x13, xzr
extr x11, x14, x1, #9
adcs x1, x17, xzr
extr x4, x1, x14, #9
stp x10, x11, [x0]
adcs x11, x12, xzr
extr x14, x11, x1, #9
adcs x10, x25, xzr
extr x11, x10, x11, #9
stp x4, x14, [x0, #16]
adcs x14, x23, xzr
extr x10, x14, x10, #9
adcs x1, x7, xzr
stp x11, x10, [x0, #32]
extr x14, x1, x14, #9
adc x10, x3, xzr
extr x26, x10, x1, #9
stp x14, x26, [x0, #48]
add sp, sp, #80
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
local_sqr_p521:
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
ldr q23, [x1, #32]
ldp x9, x2, [x1, #32]
ldr q16, [x1, #32]
ldr q20, [x1, #48]
ldp x6, x13, [x1, #48]
rev64 v2.4S, v23.4S
mul x14, x9, x2
ldr q31, [x1, #48]
subs x22, x9, x2
uzp2 v26.4S, v23.4S, v23.4S
mul v30.4S, v2.4S, v16.4S
xtn v0.2S, v20.2D
csetm x12, cc
xtn v21.2S, v16.2D
xtn v23.2S, v23.2D
umulh x10, x9, x6
rev64 v27.4S, v31.4S
umull v2.2D, v21.2S, v26.2S
cneg x23, x22, cc
uaddlp v25.2D, v30.4S
umull v18.2D, v21.2S, v23.2S
mul x22, x9, x6
mul v6.4S, v27.4S, v20.4S
uzp2 v17.4S, v20.4S, v20.4S
shl v20.2D, v25.2D, #32
uzp2 v27.4S, v31.4S, v31.4S
mul x16, x2, x13
umlal v20.2D, v21.2S, v23.2S
usra v2.2D, v18.2D, #32
adds x8, x22, x10
umull v25.2D, v17.2S, v27.2S
xtn v31.2S, v31.2D
movi v1.2D, #0xffffffff
adc x3, x10, xzr
umulh x21, x2, x13
uzp2 v21.4S, v16.4S, v16.4S
umull v18.2D, v0.2S, v27.2S
subs x19, x13, x6
and v7.16B, v2.16B, v1.16B
umull v27.2D, v0.2S, v31.2S
cneg x20, x19, cc
movi v30.2D, #0xffffffff
umull v16.2D, v21.2S, v26.2S
umlal v7.2D, v21.2S, v23.2S
mul x19, x23, x20
cinv x7, x12, cc
uaddlp v6.2D, v6.4S
eor x12, x19, x7
adds x11, x8, x16
umulh x10, x23, x20
ldr q1, [x1]
usra v16.2D, v2.2D, #32
adcs x19, x3, x21
shl v2.2D, v6.2D, #32
adc x20, x21, xzr
adds x17, x19, x16
usra v18.2D, v27.2D, #32
adc x19, x20, xzr
cmn x7, #0x1
umlal v2.2D, v0.2S, v31.2S
umulh x16, x9, x2
adcs x8, x11, x12
usra v16.2D, v7.2D, #32
ldr x12, [x1, #64]
eor x20, x10, x7
umulh x10, x6, x13
mov x23, v2.d[0]
mov x3, v2.d[1]
adcs x21, x17, x20
usra v25.2D, v18.2D, #32
and v23.16B, v18.16B, v30.16B
adc x7, x19, x7
adds x22, x22, x22
ldr q7, [x1, #16]
adcs x17, x8, x8
umlal v23.2D, v17.2S, v31.2S
mov x19, v16.d[0]
mul x11, x12, x12
ldr q4, [x1]
usra v25.2D, v23.2D, #32
add x5, x12, x12
adcs x15, x21, x21
ldr q28, [x1]
mov x12, v20.d[1]
adcs x24, x7, x7
mov x21, v16.d[1]
adc x4, xzr, xzr
adds x19, x19, x14
ldr q18, [x1, #16]
xtn v26.2S, v1.2D
adcs x8, x12, x16
adc x21, x21, xzr
adds x7, x19, x14
xtn v23.2S, v7.2D
rev64 v21.4S, v28.4S
adcs x12, x8, x16
ldp x20, x19, [x1]
mov x16, v25.d[1]
xtn v22.2S, v28.2D
adc x14, x21, xzr
adds x8, x22, x12
uzp2 v24.4S, v28.4S, v28.4S
rev64 v28.4S, v18.4S
mul x12, x6, x13
mul v16.4S, v21.4S, v1.4S
shrn v31.2S, v7.2D, #32
adcs x22, x17, x14
mov x14, v25.d[0]
and x21, x20, #0xfffffffffffff
umull v17.2D, v26.2S, v24.2S
ldr q2, [x1, #32]
adcs x17, x15, xzr
ldr q30, [x1, #48]
umull v7.2D, v26.2S, v22.2S
adcs x15, x24, xzr
ldr q0, [x1, #16]
movi v6.2D, #0xffffffff
adc x4, x4, xzr
adds x14, x14, x12
uzp1 v27.4S, v18.4S, v4.4S
uzp2 v19.4S, v1.4S, v1.4S
adcs x24, x3, x10
mul x3, x5, x21
umull v29.2D, v23.2S, v31.2S
ldr q5, [x1]
adc x21, x16, xzr
adds x16, x14, x12
extr x12, x19, x20, #52
umull v18.2D, v19.2S, v24.2S
adcs x24, x24, x10
and x10, x12, #0xfffffffffffff
ldp x14, x12, [x1, #16]
usra v17.2D, v7.2D, #32
adc x21, x21, xzr
adds x23, x23, x17
mul x17, x5, x10
shl v21.2D, v29.2D, #33
lsl x10, x3, #12
lsr x1, x3, #52
rev64 v29.4S, v2.4S
uaddlp v25.2D, v16.4S
add x17, x17, x1
adcs x16, x16, x15
extr x3, x14, x19, #40
mov x15, v20.d[0]
extr x10, x17, x10, #12
and x3, x3, #0xfffffffffffff
shl v3.2D, v25.2D, #32
and v6.16B, v17.16B, v6.16B
mul x1, x5, x3
usra v18.2D, v17.2D, #32
adcs x3, x24, x4
extr x4, x12, x14, #28
umlal v6.2D, v19.2S, v22.2S
xtn v20.2S, v2.2D
umlal v3.2D, v26.2S, v22.2S
movi v26.2D, #0xffffffff
lsr x24, x17, #52
and x4, x4, #0xfffffffffffff
uzp2 v19.4S, v2.4S, v2.4S
add x1, x1, x24
mul x24, x5, x4
lsl x4, x17, #12
xtn v24.2S, v5.2D
extr x17, x1, x4, #24
adc x21, x21, xzr
umlal v21.2D, v23.2S, v23.2S
adds x4, x15, x10
lsl x10, x1, #12
adcs x15, x7, x17
mul v23.4S, v28.4S, v4.4S
and x7, x4, #0x1ff
lsr x17, x1, #52
umulh x1, x19, x12
uzp2 v17.4S, v5.4S, v5.4S
extr x4, x15, x4, #9
add x24, x24, x17
mul v29.4S, v29.4S, v5.4S
extr x17, x24, x10, #36
extr x10, x9, x12, #16
uzp1 v28.4S, v4.4S, v4.4S
adcs x17, x8, x17
and x8, x10, #0xfffffffffffff
umull v16.2D, v24.2S, v20.2S
extr x10, x17, x15, #9
mul x15, x5, x8
stp x4, x10, [x0]
lsl x4, x24, #12
lsr x8, x9, #4
uaddlp v4.2D, v23.4S
and x8, x8, #0xfffffffffffff
umull v23.2D, v24.2S, v19.2S
mul x8, x5, x8
extr x10, x2, x9, #56
lsr x24, x24, #52
and x10, x10, #0xfffffffffffff
add x15, x15, x24
extr x4, x15, x4, #48
mul x24, x5, x10
lsr x10, x15, #52
usra v23.2D, v16.2D, #32
add x10, x8, x10
shl v4.2D, v4.2D, #32
adcs x22, x22, x4
extr x4, x6, x2, #44
lsl x15, x15, #12
lsr x8, x10, #52
extr x15, x10, x15, #60
and x10, x4, #0xfffffffffffff
umlal v4.2D, v28.2S, v27.2S
add x8, x24, x8
extr x4, x13, x6, #32
mul x24, x5, x10
uzp2 v16.4S, v30.4S, v30.4S
lsl x10, x15, #8
rev64 v28.4S, v30.4S
and x15, x4, #0xfffffffffffff
extr x4, x8, x10, #8
mul x10, x5, x15
lsl x15, x8, #12
adcs x23, x23, x4
lsr x4, x8, #52
lsr x8, x13, #20
add x4, x24, x4
mul x8, x5, x8
lsr x24, x4, #52
extr x15, x4, x15, #20
lsl x4, x4, #12
add x10, x10, x24
adcs x15, x16, x15
extr x4, x10, x4, #32
umulh x5, x20, x14
adcs x3, x3, x4
usra v18.2D, v6.2D, #32
lsl x16, x10, #12
extr x24, x15, x23, #9
lsr x10, x10, #52
uzp2 v27.4S, v0.4S, v0.4S
add x8, x8, x10
extr x10, x3, x15, #9
extr x4, x22, x17, #9
and v25.16B, v23.16B, v26.16B
lsr x17, x8, #44
extr x15, x8, x16, #44
extr x16, x23, x22, #9
xtn v7.2S, v30.2D
mov x8, v4.d[0]
stp x24, x10, [x0, #32]
uaddlp v30.2D, v29.4S
stp x4, x16, [x0, #16]
umulh x24, x20, x19
adcs x15, x21, x15
adc x16, x11, x17
subs x11, x20, x19
xtn v5.2S, v0.2D
csetm x17, cc
extr x3, x15, x3, #9
mov x22, v4.d[1]
cneg x21, x11, cc
subs x10, x12, x14
mul v31.4S, v28.4S, v0.4S
cneg x10, x10, cc
cinv x11, x17, cc
shl v4.2D, v30.2D, #32
umull v28.2D, v5.2S, v16.2S
extr x23, x16, x15, #9
adds x4, x8, x5
mul x17, x21, x10
umull v22.2D, v5.2S, v7.2S
adc x15, x5, xzr
adds x4, x4, x22
uaddlp v2.2D, v31.4S
lsr x5, x16, #9
adcs x16, x15, x1
mov x15, v18.d[0]
adc x1, x1, xzr
umulh x10, x21, x10
adds x22, x16, x22
umlal v4.2D, v24.2S, v20.2S
umull v30.2D, v27.2S, v16.2S
stp x3, x23, [x0, #48]
add x3, x7, x5
adc x16, x1, xzr
usra v28.2D, v22.2D, #32
mul x23, x20, x19
eor x1, x17, x11
cmn x11, #0x1
mov x17, v18.d[1]
umull v18.2D, v17.2S, v19.2S
adcs x7, x4, x1
eor x1, x10, x11
umlal v25.2D, v17.2S, v20.2S
movi v16.2D, #0xffffffff
adcs x22, x22, x1
usra v18.2D, v23.2D, #32
umulh x4, x14, x14
adc x1, x16, x11
adds x10, x8, x8
shl v23.2D, v2.2D, #32
str x3, [x0, #64]
adcs x5, x7, x7
and v16.16B, v28.16B, v16.16B
usra v30.2D, v28.2D, #32
adcs x7, x22, x22
mov x21, v3.d[1]
adcs x11, x1, x1
umlal v16.2D, v27.2S, v7.2S
adc x22, xzr, xzr
adds x16, x15, x23
mul x8, x14, x12
umlal v23.2D, v5.2S, v7.2S
usra v18.2D, v25.2D, #32
umulh x15, x14, x12
adcs x21, x21, x24
usra v30.2D, v16.2D, #32
adc x1, x17, xzr
adds x3, x16, x23
adcs x21, x21, x24
adc x1, x1, xzr
adds x24, x10, x21
umulh x21, x12, x12
adcs x16, x5, x1
adcs x10, x7, xzr
mov x17, v21.d[1]
adcs x23, x11, xzr
adc x5, x22, xzr
adds x1, x4, x8
adcs x22, x17, x15
ldp x17, x4, [x0]
mov x11, v21.d[0]
adc x21, x21, xzr
adds x1, x1, x8
adcs x15, x22, x15
adc x8, x21, xzr
adds x22, x11, x10
mov x21, v3.d[0]
adcs x11, x1, x23
ldp x1, x10, [x0, #16]
adcs x15, x15, x5
adc x7, x8, xzr
adds x8, x17, x21
mov x23, v4.d[1]
ldp x5, x21, [x0, #32]
adcs x17, x4, x3
ldr x4, [x0, #64]
mov x3, v18.d[0]
adcs x24, x1, x24
stp x8, x17, [x0]
adcs x17, x10, x16
ldp x1, x16, [x0, #48]
adcs x5, x5, x22
adcs x8, x21, x11
stp x5, x8, [x0, #32]
adcs x1, x1, x15
mov x15, v23.d[1]
adcs x21, x16, x7
stp x1, x21, [x0, #48]
adc x10, x4, xzr
subs x7, x14, x12
mov x16, v18.d[1]
cneg x5, x7, cc
csetm x4, cc
subs x11, x13, x6
mov x8, v23.d[0]
cneg x7, x11, cc
cinv x21, x4, cc
mov x11, v30.d[0]
adds x4, x23, x3
mul x22, x5, x7
mov x23, v30.d[1]
adcs x8, x8, x16
adcs x16, x15, x11
adc x11, x23, xzr
umulh x3, x5, x7
stp x24, x17, [x0, #16]
mov x5, v4.d[0]
subs x15, x20, x19
cneg x7, x15, cc
str x10, [x0, #64]
csetm x1, cc
subs x24, x2, x9
cneg x17, x24, cc
cinv x15, x1, cc
adds x23, x4, x5
umulh x1, x7, x17
adcs x24, x8, x4
adcs x10, x16, x8
eor x8, x22, x21
adcs x16, x11, x16
mul x22, x7, x17
eor x17, x1, x15
adc x1, xzr, x11
adds x11, x24, x5
eor x7, x3, x21
adcs x3, x10, x23
adcs x24, x16, x24
adcs x4, x1, x10
eor x10, x22, x15
adcs x16, xzr, x16
adc x1, xzr, x1
cmn x21, #0x1
adcs x8, x4, x8
adcs x22, x16, x7
adc x7, x1, x21
subs x21, x19, x12
csetm x4, cc
cneg x1, x21, cc
subs x21, x13, x2
cinv x16, x4, cc
cneg x4, x21, cc
cmn x15, #0x1
adcs x21, x23, x10
mul x23, x1, x4
adcs x11, x11, x17
adcs x3, x3, x15
umulh x1, x1, x4
adcs x24, x24, x15
adcs x8, x8, x15
adcs x22, x22, x15
eor x17, x23, x16
adc x15, x7, x15
subs x7, x20, x14
cneg x7, x7, cc
csetm x4, cc
subs x10, x20, x12
cneg x23, x10, cc
csetm x10, cc
subs x12, x6, x9
cinv x20, x4, cc
cneg x12, x12, cc
cmn x16, #0x1
eor x1, x1, x16
adcs x17, x24, x17
mul x4, x7, x12
adcs x8, x8, x1
umulh x1, x7, x12
adcs x24, x22, x16
adc x7, x15, x16
subs x12, x13, x9
cneg x12, x12, cc
cinv x13, x10, cc
subs x19, x19, x14
mul x9, x23, x12
cneg x19, x19, cc
csetm x10, cc
eor x16, x1, x20
subs x22, x6, x2
umulh x12, x23, x12
eor x1, x4, x20
cinv x4, x10, cc
cneg x22, x22, cc
cmn x20, #0x1
adcs x15, x11, x1
eor x6, x12, x13
adcs x10, x3, x16
adcs x17, x17, x20
eor x23, x9, x13
adcs x2, x8, x20
mul x11, x19, x22
adcs x24, x24, x20
adc x7, x7, x20
cmn x13, #0x1
adcs x3, x10, x23
umulh x22, x19, x22
adcs x17, x17, x6
eor x12, x22, x4
extr x22, x15, x21, #63
adcs x8, x2, x13
extr x21, x21, x5, #63
ldp x16, x23, [x0]
adcs x20, x24, x13
eor x1, x11, x4
adc x6, x7, x13
cmn x4, #0x1
ldp x2, x7, [x0, #16]
adcs x1, x3, x1
extr x19, x1, x15, #63
adcs x14, x17, x12
extr x1, x14, x1, #63
lsl x17, x5, #1
adcs x8, x8, x4
extr x12, x8, x14, #8
ldp x15, x11, [x0, #32]
adcs x9, x20, x4
adc x3, x6, x4
adds x16, x12, x16
extr x6, x9, x8, #8
ldp x14, x12, [x0, #48]
extr x8, x3, x9, #8
adcs x20, x6, x23
ldr x24, [x0, #64]
lsr x6, x3, #8
adcs x8, x8, x2
and x2, x1, #0x1ff
and x1, x20, x8
adcs x4, x6, x7
adcs x3, x17, x15
and x1, x1, x4
adcs x9, x21, x11
and x1, x1, x3
adcs x6, x22, x14
and x1, x1, x9
and x21, x1, x6
adcs x14, x19, x12
adc x1, x24, x2
cmp xzr, xzr
orr x12, x1, #0xfffffffffffffe00
lsr x1, x1, #9
adcs xzr, x16, x1
and x21, x21, x14
adcs xzr, x21, xzr
adcs xzr, x12, xzr
adcs x21, x16, x1
adcs x1, x20, xzr
adcs x19, x8, xzr
stp x21, x1, [x0]
adcs x1, x4, xzr
adcs x21, x3, xzr
stp x19, x1, [x0, #16]
adcs x1, x9, xzr
stp x21, x1, [x0, #32]
adcs x21, x6, xzr
adcs x1, x14, xzr
stp x21, x1, [x0, #48]
adc x1, x12, xzr
and x1, x1, #0x1ff
str x1, [x0, #64]
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 18,205
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_mul_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply modulo p_521, z := (x * y) mod p_521, assuming x and y reduced
// Inputs x[9], y[9]; output z[9]
//
// extern void bignum_mul_p521
// (uint64_t z[static 9], uint64_t x[static 9], uint64_t y[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = x, X2 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_p521)
.text
.balign 4
// ---------------------------------------------------------------------------
// Macro computing [c,b,a] := [b,a] + (x - y) * (w - z), adding with carry
// to the [b,a] components but leaving CF aligned with the c term, which is
// a sign bitmask for (x - y) * (w - z). Continued add-with-carry operations
// with [c,...,c] will continue the carry chain correctly starting from
// the c position if desired to add to a longer term of the form [...,b,a].
//
// c,h,l,t should all be different and t,h should not overlap w,z.
// ---------------------------------------------------------------------------
#define muldiffnadd(b,a,x,y,w,z) \
subs t, x, y; \
cneg t, t, cc; \
csetm c, cc; \
subs h, w, z; \
cneg h, h, cc; \
mul l, t, h; \
umulh h, t, h; \
cinv c, c, cc; \
adds xzr, c, #1; \
eor l, l, c; \
adcs a, a, l; \
eor h, h, c; \
adcs b, b, h
#define z x0
#define x x1
#define y x2
#define a0 x3
#define a1 x4
#define a2 x5
#define a3 x6
#define b0 x7
#define b1 x8
#define b2 x9
#define b3 x10
#define s0 x11
#define s1 x12
#define s2 x13
#define s3 x14
#define s4 x15
#define s5 x16
#define s6 x17
#define s7 x19
#define s8 x20
#define c x21
#define h x22
#define l x23
#define t x24
#define s x25
#define u x26
// ---------------------------------------------------------------------------
// Core 4x4->8 ADK multiplication macro
// Does [s7,s6,s5,s4,s3,s2,s1,s0] = [a3,a2,a1,a0] * [b3,b2,b1,b0]
// ---------------------------------------------------------------------------
#define mul4 \
/* First accumulate all the "simple" products as [s7,s6,s5,s4,s0] */ \
\
mul s0, a0, b0; \
mul s4, a1, b1; \
mul s5, a2, b2; \
mul s6, a3, b3; \
\
umulh s7, a0, b0; \
adds s4, s4, s7; \
umulh s7, a1, b1; \
adcs s5, s5, s7; \
umulh s7, a2, b2; \
adcs s6, s6, s7; \
umulh s7, a3, b3; \
adc s7, s7, xzr; \
\
/* Multiply by B + 1 to get [s7;s6;s5;s4;s1;s0] */ \
\
adds s1, s4, s0; \
adcs s4, s5, s4; \
adcs s5, s6, s5; \
adcs s6, s7, s6; \
adc s7, xzr, s7; \
\
/* Multiply by B^2 + 1 to get [s7;s6;s5;s4;s3;s2;s1;s0] */ \
\
adds s2, s4, s0; \
adcs s3, s5, s1; \
adcs s4, s6, s4; \
adcs s5, s7, s5; \
adcs s6, xzr, s6; \
adc s7, xzr, s7; \
\
/* Now add in all the "complicated" terms. */ \
\
muldiffnadd(s6,s5, a2,a3, b3,b2); \
adc s7, s7, c; \
\
muldiffnadd(s2,s1, a0,a1, b1,b0); \
adcs s3, s3, c; \
adcs s4, s4, c; \
adcs s5, s5, c; \
adcs s6, s6, c; \
adc s7, s7, c; \
\
muldiffnadd(s5,s4, a1,a3, b3,b1); \
adcs s6, s6, c; \
adc s7, s7, c; \
\
muldiffnadd(s3,s2, a0,a2, b2,b0); \
adcs s4, s4, c; \
adcs s5, s5, c; \
adcs s6, s6, c; \
adc s7, s7, c; \
\
muldiffnadd(s4,s3, a0,a3, b3,b0); \
adcs s5, s5, c; \
adcs s6, s6, c; \
adc s7, s7, c; \
muldiffnadd(s4,s3, a1,a2, b2,b1); \
adcs s5, s5, c; \
adcs s6, s6, c; \
adc s7, s7, c \
S2N_BN_SYMBOL(bignum_mul_p521):
// Save registers and make space for the temporary buffer
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
sub sp, sp, #80
// Load 4-digit low parts and multiply them to get L
ldp a0, a1, [x]
ldp a2, a3, [x, #16]
ldp b0, b1, [y]
ldp b2, b3, [y, #16]
mul4
// Shift right 256 bits modulo p_521 and stash in temp buffer
lsl c, s0, #9
extr s0, s1, s0, #55
extr s1, s2, s1, #55
extr s2, s3, s2, #55
lsr s3, s3, #55
stp s4, s5, [sp]
stp s6, s7, [sp, #16]
stp c, s0, [sp, #32]
stp s1, s2, [sp, #48]
str s3, [sp, #64]
// Load 4-digit low parts and multiply them to get H
ldp a0, a1, [x, #32]
ldp a2, a3, [x, #48]
ldp b0, b1, [y, #32]
ldp b2, b3, [y, #48]
mul4
// Add to the existing temporary buffer and re-stash.
// This gives a result HL congruent to (2^256 * H + L) / 2^256 modulo p_521
ldp l, h, [sp]
adds s0, s0, l
adcs s1, s1, h
stp s0, s1, [sp]
ldp l, h, [sp, #16]
adcs s2, s2, l
adcs s3, s3, h
stp s2, s3, [sp, #16]
ldp l, h, [sp, #32]
adcs s4, s4, l
adcs s5, s5, h
stp s4, s5, [sp, #32]
ldp l, h, [sp, #48]
adcs s6, s6, l
adcs s7, s7, h
stp s6, s7, [sp, #48]
ldr c, [sp, #64]
adc c, c, xzr
str c, [sp, #64]
// Compute t,[a3,a2,a1,a0] = x_hi - x_lo
// and s,[b3,b2,b1,b0] = y_lo - y_hi
// sign-magnitude differences, then XOR overall sign bitmask into s
ldp l, h, [x]
subs a0, a0, l
sbcs a1, a1, h
ldp l, h, [x, #16]
sbcs a2, a2, l
sbcs a3, a3, h
csetm t, cc
ldp l, h, [y]
subs b0, l, b0
sbcs b1, h, b1
ldp l, h, [y, #16]
sbcs b2, l, b2
sbcs b3, h, b3
csetm s, cc
eor a0, a0, t
subs a0, a0, t
eor a1, a1, t
sbcs a1, a1, t
eor a2, a2, t
sbcs a2, a2, t
eor a3, a3, t
sbc a3, a3, t
eor b0, b0, s
subs b0, b0, s
eor b1, b1, s
sbcs b1, b1, s
eor b2, b2, s
sbcs b2, b2, s
eor b3, b3, s
sbc b3, b3, s
eor s, s, t
// Now do yet a third 4x4 multiply to get mid-term product M
mul4
// We now want, at the 256 position, 2^256 * HL + HL + (-1)^s * M
// To keep things positive we use M' = p_521 - M in place of -M,
// and this notion of negation just amounts to complementation in 521 bits.
// Fold in the re-addition of the appropriately scaled lowest 4 words
// The initial result is [s8; b3;b2;b1;b0; s7;s6;s5;s4;s3;s2;s1;s0]
// Rebase it as a 9-word value at the 512 bit position using
// [s8; b3;b2;b1;b0; s7;s6;s5;s4;s3;s2;s1;s0] ==
// [s8; b3;b2;b1;b0; s7;s6;s5;s4] + 2^265 * [s3;s2;s1;s0] =
// ([s8; b3;b2;b1;b0] + 2^9 * [s3;s2;s1;s0]); s7;s6;s5;s4]
//
// Accumulate as [s8; b3;b2;b1;b0; s7;s6;s5;s4] but leave out an additional
// small c (s8 + suspended carry) to add at the 256 position here (512
// overall). This can be added in the next block (to b0 = sum4).
ldp a0, a1, [sp]
ldp a2, a3, [sp, #16]
eor s0, s0, s
adds s0, s0, a0
eor s1, s1, s
adcs s1, s1, a1
eor s2, s2, s
adcs s2, s2, a2
eor s3, s3, s
adcs s3, s3, a3
eor s4, s4, s
ldp b0, b1, [sp, #32]
ldp b2, b3, [sp, #48]
ldr s8, [sp, #64]
adcs s4, s4, b0
eor s5, s5, s
adcs s5, s5, b1
eor s6, s6, s
adcs s6, s6, b2
eor s7, s7, s
adcs s7, s7, b3
adc c, s8, xzr
adds s4, s4, a0
adcs s5, s5, a1
adcs s6, s6, a2
adcs s7, s7, a3
and s, s, #0x1FF
lsl t, s0, #9
orr t, t, s
adcs b0, b0, t
extr t, s1, s0, #55
adcs b1, b1, t
extr t, s2, s1, #55
adcs b2, b2, t
extr t, s3, s2, #55
adcs b3, b3, t
lsr t, s3, #55
adc s8, t, s8
// Augment the total with the contribution from the top little words
// w and v. If we write the inputs as 2^512 * w + x and 2^512 * v + y
// then we are otherwise just doing x * y so we actually need to add
// 2^512 * (2^512 * w * v + w * y + v * x). We do this is an involved
// way chopping x and y into 52-bit chunks so we can do most of the core
// arithmetic using only basic muls, no umulh (since w, v are only 9 bits).
// This does however involve some intricate bit-splicing plus arithmetic.
// To make things marginally less confusing we introduce some new names
// at the human level: x = [c7;...;c0] and y = [d7;...d0], which are
// not all distinct, and [sum8;sum7;...;sum0] for the running sum.
// Also accumulate u = sum1 AND ... AND sum7 for the later comparison
#define sum0 s4
#define sum1 s5
#define sum2 s6
#define sum3 s7
#define sum4 b0
#define sum5 b1
#define sum6 b2
#define sum7 b3
#define sum8 s8
#define c0 a0
#define c1 a1
#define c2 a2
#define c3 a0
#define c4 a1
#define c5 a2
#define c6 a0
#define c7 a1
#define d0 s0
#define d1 s1
#define d2 s2
#define d3 s0
#define d4 s1
#define d5 s2
#define d6 s0
#define d7 s1
#define v a3
#define w s3
// 0 * 52 = 64 * 0 + 0
ldr v, [y, #64]
ldp c0, c1, [x]
and l, c0, #0x000fffffffffffff
mul l, v, l
ldr w, [x, #64]
ldp d0, d1, [y]
and t, d0, #0x000fffffffffffff
mul t, w, t
add l, l, t
// 1 * 52 = 64 * 0 + 52
extr t, c1, c0, #52
and t, t, #0x000fffffffffffff
mul h, v, t
extr t, d1, d0, #52
and t, t, #0x000fffffffffffff
mul t, w, t
add h, h, t
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #12
adds sum0, sum0, t
// 2 * 52 = 64 * 1 + 40
ldp c2, c3, [x, #16]
ldp d2, d3, [y, #16]
extr t, c2, c1, #40
and t, t, #0x000fffffffffffff
mul l, v, t
extr t, d2, d1, #40
and t, t, #0x000fffffffffffff
mul t, w, t
add l, l, t
lsr t, h, #52
add l, l, t
lsl h, h, #12
extr t, l, h, #24
adcs sum1, sum1, t
// 3 * 52 = 64 * 2 + 28
extr t, c3, c2, #28
and t, t, #0x000fffffffffffff
mul h, v, t
extr t, d3, d2, #28
and t, t, #0x000fffffffffffff
mul t, w, t
add h, h, t
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #36
adcs sum2, sum2, t
and u, sum1, sum2
// 4 * 52 = 64 * 3 + 16
// At this point we also fold in the addition of c at the right place.
// Note that 4 * 64 = 4 * 52 + 48 so we shift c left 48 places to align.
ldp c4, c5, [x, #32]
ldp d4, d5, [y, #32]
extr t, c4, c3, #16
and t, t, #0x000fffffffffffff
mul l, v, t
extr t, d4, d3, #16
and t, t, #0x000fffffffffffff
mul t, w, t
add l, l, t
lsl c, c, #48
add l, l, c
lsr t, h, #52
add l, l, t
lsl h, h, #12
extr t, l, h, #48
adcs sum3, sum3, t
and u, u, sum3
// 5 * 52 = 64 * 4 + 4
lsr t, c4, #4
and t, t, #0x000fffffffffffff
mul h, v, t
lsr t, d4, #4
and t, t, #0x000fffffffffffff
mul t, w, t
add h, h, t
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr s, h, l, #60
// 6 * 52 = 64 * 4 + 56
extr t, c5, c4, #56
and t, t, #0x000fffffffffffff
mul l, v, t
extr t, d5, d4, #56
and t, t, #0x000fffffffffffff
mul t, w, t
add l, l, t
lsr t, h, #52
add l, l, t
lsl s, s, #8
extr t, l, s, #8
adcs sum4, sum4, t
and u, u, sum4
// 7 * 52 = 64 * 5 + 44
ldp c6, c7, [x, #48]
ldp d6, d7, [y, #48]
extr t, c6, c5, #44
and t, t, #0x000fffffffffffff
mul h, v, t
extr t, d6, d5, #44
and t, t, #0x000fffffffffffff
mul t, w, t
add h, h, t
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #20
adcs sum5, sum5, t
and u, u, sum5
// 8 * 52 = 64 * 6 + 32
extr t, c7, c6, #32
and t, t, #0x000fffffffffffff
mul l, v, t
extr t, d7, d6, #32
and t, t, #0x000fffffffffffff
mul t, w, t
add l, l, t
lsr t, h, #52
add l, l, t
lsl h, h, #12
extr t, l, h, #32
adcs sum6, sum6, t
and u, u, sum6
// 9 * 52 = 64 * 7 + 20
lsr t, c7, #20
mul h, v, t
lsr t, d7, #20
mul t, w, t
add h, h, t
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #44
adcs sum7, sum7, t
and u, u, sum7
// Top word
mul t, v, w
lsr h, h, #44
add t, t, h
adc sum8, sum8, t
// Extract the high part h and mask off the low part l = [sum8;sum7;...;sum0]
// but stuff sum8 with 1 bits at the left to ease a comparison below
lsr h, sum8, #9
orr sum8, sum8, #~0x1FF
// Decide whether h + l >= p_521 <=> h + l + 1 >= 2^521. Since this can only
// happen if digits sum7,...sum1 are all 1s, we use the AND of them "u" to
// condense the carry chain, and since we stuffed 1 bits into sum8 we get
// the result in CF without an additional comparison.
subs xzr, xzr, xzr
adcs xzr, sum0, h
adcs xzr, u, xzr
adcs xzr, sum8, xzr
// Now if CF is set we want (h + l) - p_521 = (h + l + 1) - 2^521
// while otherwise we want just h + l. So mask h + l + CF to 521 bits.
// We don't need to mask away bits above 521 since they disappear below.
adcs sum0, sum0, h
adcs sum1, sum1, xzr
adcs sum2, sum2, xzr
adcs sum3, sum3, xzr
adcs sum4, sum4, xzr
adcs sum5, sum5, xzr
adcs sum6, sum6, xzr
adcs sum7, sum7, xzr
adc sum8, sum8, xzr
// The result is actually 2^512 * [sum8;...;sum0] == 2^-9 * [sum8;...;sum0]
// so we rotate right by 9 bits
and h, sum0, #0x1FF
extr sum0, sum1, sum0, #9
extr sum1, sum2, sum1, #9
stp sum0, sum1, [z]
extr sum2, sum3, sum2, #9
extr sum3, sum4, sum3, #9
stp sum2, sum3, [z, #16]
extr sum4, sum5, sum4, #9
extr sum5, sum6, sum5, #9
stp sum4, sum5, [z, #32]
extr sum6, sum7, sum6, #9
extr sum7, sum8, sum7, #9
stp sum6, sum7, [z, #48]
str h, [z, #64]
// Restore regs and return
add sp, sp, #80
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 40,830
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_mul_p521_neon.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply modulo p_521, z := (x * y) mod p_521, assuming x and y reduced
// Inputs x[9], y[9]; output z[9]
//
// extern void bignum_mul_p521_neon
// (uint64_t z[static 9], uint64_t x[static 9], uint64_t y[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = x, X2 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
// bignum_mul_p521_neon is functionally equivalent to bignum_mul_p521.
// It is written in a way that
// 1. A subset of scalar multiplications in bignum_montmul_p384 are carefully
// chosen and vectorized
// 2. The vectorized assembly is rescheduled using the SLOTHY superoptimizer.
// https://github.com/slothy-optimizer/slothy
//
// The output program of step 1. is as follows:
//
// stp x19, x20, [sp, #-16]!
// stp x21, x22, [sp, #-16]!
// stp x23, x24, [sp, #-16]!
// stp x25, x26, [sp, #-16]!
// sub sp, sp, #80
// ldp x15, x21, [x1]
// ldp x10, x17, [x1, #16]
// ldp x13, x16, [x2]
// ldr q18, [x1]
// ldr q28, [x2]
// ldp x5, x20, [x2, #16]
// movi v16.2D, #0x00000000ffffffff
// uzp2 v7.4S, v28.4S, v28.4S
// xtn v4.2S, v18.2D
// xtn v1.2S, v28.2D
// rev64 v27.4S, v28.4S
// umull v21.2D, v4.2S, v1.2S
// umull v28.2D, v4.2S, v7.2S
// uzp2 v5.4S, v18.4S, v18.4S
// mul v18.4S, v27.4S, v18.4S
// usra v28.2D, v21.2D, #32
// umull v29.2D, v5.2S, v7.2S
// uaddlp v18.2D, v18.4S
// and v16.16B, v28.16B, v16.16B
// umlal v16.2D, v5.2S, v1.2S
// shl v18.2D, v18.2D, #32
// usra v29.2D, v28.2D, #32
// umlal v18.2D, v4.2S, v1.2S
// usra v29.2D, v16.2D, #32
// mov x8, v18.d[0]
// mov x9, v18.d[1]
// mul x6, x10, x5
// mul x19, x17, x20
// mov x14, v29.d[0]
// adds x9, x9, x14
// mov x14, v29.d[1]
// adcs x6, x6, x14
// umulh x14, x10, x5
// adcs x19, x19, x14
// umulh x14, x17, x20
// adc x14, x14, xzr
// adds x11, x9, x8
// adcs x9, x6, x9
// adcs x6, x19, x6
// adcs x19, x14, x19
// adc x14, xzr, x14
// adds x3, x9, x8
// adcs x24, x6, x11
// adcs x9, x19, x9
// adcs x6, x14, x6
// adcs x19, xzr, x19
// adc x14, xzr, x14
// subs x4, x10, x17
// cneg x4, x4, cc
// csetm x7, cc
// subs x23, x20, x5
// cneg x23, x23, cc
// mul x22, x4, x23
// umulh x4, x4, x23
// cinv x7, x7, cc
// cmn x7, #0x1
// eor x23, x22, x7
// adcs x6, x6, x23
// eor x4, x4, x7
// adcs x19, x19, x4
// adc x14, x14, x7
// subs x4, x15, x21
// cneg x4, x4, cc
// csetm x7, cc
// subs x23, x16, x13
// cneg x23, x23, cc
// mul x22, x4, x23
// umulh x4, x4, x23
// cinv x7, x7, cc
// cmn x7, #0x1
// eor x23, x22, x7
// adcs x11, x11, x23
// eor x4, x4, x7
// adcs x3, x3, x4
// adcs x24, x24, x7
// adcs x9, x9, x7
// adcs x6, x6, x7
// adcs x19, x19, x7
// adc x14, x14, x7
// subs x4, x21, x17
// cneg x4, x4, cc
// csetm x7, cc
// subs x23, x20, x16
// cneg x23, x23, cc
// mul x22, x4, x23
// umulh x4, x4, x23
// cinv x7, x7, cc
// cmn x7, #0x1
// eor x23, x22, x7
// adcs x9, x9, x23
// eor x4, x4, x7
// adcs x6, x6, x4
// adcs x19, x19, x7
// adc x14, x14, x7
// subs x4, x15, x10
// cneg x4, x4, cc
// csetm x7, cc
// subs x23, x5, x13
// cneg x23, x23, cc
// mul x22, x4, x23
// umulh x4, x4, x23
// cinv x7, x7, cc
// cmn x7, #0x1
// eor x23, x22, x7
// adcs x3, x3, x23
// eor x4, x4, x7
// adcs x24, x24, x4
// adcs x9, x9, x7
// adcs x6, x6, x7
// adcs x19, x19, x7
// adc x14, x14, x7
// subs x17, x15, x17
// cneg x17, x17, cc
// csetm x4, cc
// subs x13, x20, x13
// cneg x13, x13, cc
// mul x20, x17, x13
// umulh x17, x17, x13
// cinv x13, x4, cc
// cmn x13, #0x1
// eor x20, x20, x13
// adcs x20, x24, x20
// eor x17, x17, x13
// adcs x17, x9, x17
// adcs x9, x6, x13
// adcs x6, x19, x13
// adc x13, x14, x13
// subs x21, x21, x10
// cneg x21, x21, cc
// csetm x10, cc
// subs x16, x5, x16
// cneg x16, x16, cc
// mul x5, x21, x16
// umulh x21, x21, x16
// cinv x10, x10, cc
// cmn x10, #0x1
// eor x16, x5, x10
// adcs x16, x20, x16
// eor x21, x21, x10
// adcs x21, x17, x21
// adcs x17, x9, x10
// adcs x5, x6, x10
// adc x10, x13, x10
// lsl x13, x8, #9
// extr x20, x11, x8, #55
// extr x8, x3, x11, #55
// extr x9, x16, x3, #55
// lsr x16, x16, #55
// stp x21, x17, [sp] // @slothy:writes=stack0
// stp x5, x10, [sp, #16] // @slothy:writes=stack16
// stp x13, x20, [sp, #32] // @slothy:writes=stack32
// stp x8, x9, [sp, #48] // @slothy:writes=stack48
// str x16, [sp, #64] // @slothy:writes=stack64
// ldp x21, x10, [x1, #32]
// ldp x17, x13, [x1, #48]
// ldp x16, x5, [x2, #32]
// ldr q18, [x1, #32]
// ldr q28, [x2, #32]
// ldp x20, x8, [x2, #48]
// movi v16.2D, #0x00000000ffffffff
// uzp2 v7.4S, v28.4S, v28.4S
// xtn v4.2S, v18.2D
// xtn v1.2S, v28.2D
// rev64 v28.4S, v28.4S
// umull v27.2D, v4.2S, v1.2S
// umull v29.2D, v4.2S, v7.2S
// uzp2 v21.4S, v18.4S, v18.4S
// mul v28.4S, v28.4S, v18.4S
// usra v29.2D, v27.2D, #32
// umull v18.2D, v21.2S, v7.2S
// uaddlp v28.2D, v28.4S
// and v16.16B, v29.16B, v16.16B
// umlal v16.2D, v21.2S, v1.2S
// shl v28.2D, v28.2D, #32
// usra v18.2D, v29.2D, #32
// umlal v28.2D, v4.2S, v1.2S
// usra v18.2D, v16.2D, #32
// mov x9, v28.d[0]
// mov x6, v28.d[1]
// mul x19, x17, x20
// mul x14, x13, x8
// mov x11, v18.d[0]
// adds x6, x6, x11
// mov x11, v18.d[1]
// adcs x19, x19, x11
// umulh x11, x17, x20
// adcs x14, x14, x11
// umulh x11, x13, x8
// adc x11, x11, xzr
// adds x3, x6, x9
// adcs x6, x19, x6
// adcs x19, x14, x19
// adcs x14, x11, x14
// adc x11, xzr, x11
// adds x24, x6, x9
// adcs x4, x19, x3
// adcs x6, x14, x6
// adcs x19, x11, x19
// adcs x14, xzr, x14
// adc x11, xzr, x11
// subs x7, x17, x13
// cneg x7, x7, cc
// csetm x23, cc
// subs x22, x8, x20
// cneg x22, x22, cc
// mul x12, x7, x22
// umulh x7, x7, x22
// cinv x23, x23, cc
// cmn x23, #0x1
// eor x22, x12, x23
// adcs x19, x19, x22
// eor x7, x7, x23
// adcs x14, x14, x7
// adc x11, x11, x23
// subs x7, x21, x10
// cneg x7, x7, cc
// csetm x23, cc
// subs x22, x5, x16
// cneg x22, x22, cc
// mul x12, x7, x22
// umulh x7, x7, x22
// cinv x23, x23, cc
// cmn x23, #0x1
// eor x22, x12, x23
// adcs x3, x3, x22
// eor x7, x7, x23
// adcs x24, x24, x7
// adcs x4, x4, x23
// adcs x6, x6, x23
// adcs x19, x19, x23
// adcs x14, x14, x23
// adc x11, x11, x23
// subs x7, x10, x13
// cneg x7, x7, cc
// csetm x23, cc
// subs x22, x8, x5
// cneg x22, x22, cc
// mul x12, x7, x22
// umulh x7, x7, x22
// cinv x23, x23, cc
// cmn x23, #0x1
// eor x22, x12, x23
// adcs x6, x6, x22
// eor x7, x7, x23
// adcs x19, x19, x7
// adcs x14, x14, x23
// adc x11, x11, x23
// subs x7, x21, x17
// cneg x7, x7, cc
// csetm x23, cc
// subs x22, x20, x16
// cneg x22, x22, cc
// mul x12, x7, x22
// umulh x7, x7, x22
// cinv x23, x23, cc
// cmn x23, #0x1
// eor x22, x12, x23
// adcs x24, x24, x22
// eor x7, x7, x23
// adcs x4, x4, x7
// adcs x6, x6, x23
// adcs x19, x19, x23
// adcs x14, x14, x23
// adc x11, x11, x23
// subs x7, x21, x13
// cneg x7, x7, cc
// csetm x23, cc
// subs x22, x8, x16
// cneg x22, x22, cc
// mul x12, x7, x22
// umulh x7, x7, x22
// cinv x23, x23, cc
// cmn x23, #0x1
// eor x22, x12, x23
// adcs x4, x4, x22
// eor x7, x7, x23
// adcs x6, x6, x7
// adcs x19, x19, x23
// adcs x14, x14, x23
// adc x11, x11, x23
// subs x7, x10, x17
// cneg x7, x7, cc
// csetm x23, cc
// subs x22, x20, x5
// cneg x22, x22, cc
// mul x12, x7, x22
// umulh x7, x7, x22
// cinv x23, x23, cc
// cmn x23, #0x1
// eor x22, x12, x23
// adcs x4, x4, x22
// eor x7, x7, x23
// adcs x6, x6, x7
// adcs x19, x19, x23
// adcs x14, x14, x23
// adc x11, x11, x23
// ldp x7, x23, [sp] // @slothy:reads=stack0
// adds x9, x9, x7
// adcs x3, x3, x23
// stp x9, x3, [sp] // @slothy:writes=stack0
// ldp x9, x3, [sp, #16] // @slothy:reads=stack16
// adcs x9, x24, x9
// adcs x3, x4, x3
// stp x9, x3, [sp, #16] // @slothy:writes=stack16
// ldp x9, x3, [sp, #32] // @slothy:reads=stack32
// adcs x9, x6, x9
// adcs x6, x19, x3
// stp x9, x6, [sp, #32] // @slothy:writes=stack32
// ldp x9, x6, [sp, #48] // @slothy:reads=stack48
// adcs x9, x14, x9
// adcs x6, x11, x6
// stp x9, x6, [sp, #48] // @slothy:writes=stack48
// ldr x9, [sp, #64] // @slothy:reads=stack64
// adc x9, x9, xzr
// str x9, [sp, #64] // @slothy:writes=stack64
// ldp x9, x6, [x1]
// subs x21, x21, x9
// sbcs x10, x10, x6
// ldp x9, x6, [x1, #16]
// sbcs x17, x17, x9
// sbcs x13, x13, x6
// csetm x9, cc
// ldp x6, x19, [x2]
// subs x16, x6, x16
// sbcs x5, x19, x5
// ldp x6, x19, [x2, #16]
// sbcs x20, x6, x20
// sbcs x8, x19, x8
// csetm x6, cc
// eor x21, x21, x9
// subs x21, x21, x9
// eor x10, x10, x9
// sbcs x10, x10, x9
// eor x17, x17, x9
// sbcs x17, x17, x9
// eor x13, x13, x9
// sbc x13, x13, x9
// eor x16, x16, x6
// subs x16, x16, x6
// eor x5, x5, x6
// sbcs x5, x5, x6
// eor x20, x20, x6
// sbcs x20, x20, x6
// eor x8, x8, x6
// sbc x8, x8, x6
// eor x9, x6, x9
// mul x6, x21, x16
// mul x19, x10, x5
// mul x14, x17, x20
// mul x11, x13, x8
// umulh x3, x21, x16
// adds x19, x19, x3
// umulh x3, x10, x5
// adcs x14, x14, x3
// umulh x3, x17, x20
// adcs x11, x11, x3
// umulh x3, x13, x8
// adc x3, x3, xzr
// adds x24, x19, x6
// adcs x19, x14, x19
// adcs x14, x11, x14
// adcs x11, x3, x11
// adc x3, xzr, x3
// adds x4, x19, x6
// adcs x7, x14, x24
// adcs x19, x11, x19
// adcs x14, x3, x14
// adcs x11, xzr, x11
// adc x3, xzr, x3
// subs x23, x17, x13
// cneg x23, x23, cc
// csetm x22, cc
// subs x12, x8, x20
// cneg x12, x12, cc
// mul x15, x23, x12
// umulh x23, x23, x12
// cinv x22, x22, cc
// cmn x22, #0x1
// eor x12, x15, x22
// adcs x14, x14, x12
// eor x23, x23, x22
// adcs x11, x11, x23
// adc x3, x3, x22
// subs x23, x21, x10
// cneg x23, x23, cc
// csetm x22, cc
// subs x12, x5, x16
// cneg x12, x12, cc
// mul x15, x23, x12
// umulh x23, x23, x12
// cinv x22, x22, cc
// cmn x22, #0x1
// eor x12, x15, x22
// adcs x24, x24, x12
// eor x23, x23, x22
// adcs x4, x4, x23
// adcs x7, x7, x22
// adcs x19, x19, x22
// adcs x14, x14, x22
// adcs x11, x11, x22
// adc x3, x3, x22
// subs x23, x10, x13
// cneg x23, x23, cc
// csetm x22, cc
// subs x12, x8, x5
// cneg x12, x12, cc
// mul x15, x23, x12
// umulh x23, x23, x12
// cinv x22, x22, cc
// cmn x22, #0x1
// eor x12, x15, x22
// adcs x19, x19, x12
// eor x23, x23, x22
// adcs x14, x14, x23
// adcs x11, x11, x22
// adc x3, x3, x22
// subs x23, x21, x17
// cneg x23, x23, cc
// csetm x22, cc
// subs x12, x20, x16
// cneg x12, x12, cc
// mul x15, x23, x12
// umulh x23, x23, x12
// cinv x22, x22, cc
// cmn x22, #0x1
// eor x12, x15, x22
// adcs x4, x4, x12
// eor x23, x23, x22
// adcs x7, x7, x23
// adcs x19, x19, x22
// adcs x14, x14, x22
// adcs x11, x11, x22
// adc x3, x3, x22
// subs x21, x21, x13
// cneg x21, x21, cc
// csetm x13, cc
// subs x16, x8, x16
// cneg x16, x16, cc
// mul x8, x21, x16
// umulh x21, x21, x16
// cinv x13, x13, cc
// cmn x13, #0x1
// eor x16, x8, x13
// adcs x16, x7, x16
// eor x21, x21, x13
// adcs x21, x19, x21
// adcs x8, x14, x13
// adcs x19, x11, x13
// adc x13, x3, x13
// subs x10, x10, x17
// cneg x10, x10, cc
// csetm x17, cc
// subs x5, x20, x5
// cneg x5, x5, cc
// mul x20, x10, x5
// umulh x10, x10, x5
// cinv x17, x17, cc
// cmn x17, #0x1
// eor x5, x20, x17
// adcs x16, x16, x5
// eor x10, x10, x17
// adcs x21, x21, x10
// adcs x10, x8, x17
// adcs x5, x19, x17
// adc x17, x13, x17
// ldp x13, x20, [sp] // @slothy:reads=stack0
// ldp x8, x19, [sp, #16] // @slothy:reads=stack16
// eor x6, x6, x9
// adds x6, x6, x13
// eor x14, x24, x9
// adcs x14, x14, x20
// eor x11, x4, x9
// adcs x11, x11, x8
// eor x16, x16, x9
// adcs x16, x16, x19
// eor x21, x21, x9
// ldp x3, x24, [sp, #32] // @slothy:reads=stack32
// ldp x4, x7, [sp, #48] // @slothy:reads=stack48
// ldr x23, [sp, #64] // @slothy:reads=stack64
// adcs x21, x21, x3
// eor x10, x10, x9
// adcs x10, x10, x24
// eor x5, x5, x9
// adcs x5, x5, x4
// eor x17, x17, x9
// adcs x17, x17, x7
// adc x22, x23, xzr
// adds x21, x21, x13
// adcs x10, x10, x20
// adcs x13, x5, x8
// adcs x17, x17, x19
// and x5, x9, #0x1ff
// lsl x20, x6, #9
// orr x5, x20, x5
// adcs x5, x3, x5
// extr x20, x14, x6, #55
// adcs x20, x24, x20
// extr x8, x11, x14, #55
// adcs x8, x4, x8
// extr x9, x16, x11, #55
// adcs x9, x7, x9
// lsr x16, x16, #55
// adc x16, x16, x23
// ldr x6, [x2, #64]
// ldp x19, x14, [x1]
// and x11, x19, #0xfffffffffffff
// mul x11, x6, x11
// ldr x3, [x1, #64]
// ldp x24, x4, [x2]
// and x7, x24, #0xfffffffffffff
// mul x7, x3, x7
// add x11, x11, x7
// extr x19, x14, x19, #52
// and x19, x19, #0xfffffffffffff
// mul x19, x6, x19
// extr x24, x4, x24, #52
// and x24, x24, #0xfffffffffffff
// mul x24, x3, x24
// add x19, x19, x24
// lsr x24, x11, #52
// add x19, x19, x24
// lsl x11, x11, #12
// extr x11, x19, x11, #12
// adds x21, x21, x11
// ldp x11, x24, [x1, #16]
// ldp x7, x23, [x2, #16]
// extr x14, x11, x14, #40
// and x14, x14, #0xfffffffffffff
// mul x14, x6, x14
// extr x4, x7, x4, #40
// and x4, x4, #0xfffffffffffff
// mul x4, x3, x4
// add x14, x14, x4
// lsr x4, x19, #52
// add x14, x14, x4
// lsl x19, x19, #12
// extr x19, x14, x19, #24
// adcs x10, x10, x19
// extr x19, x24, x11, #28
// and x19, x19, #0xfffffffffffff
// mul x19, x6, x19
// extr x11, x23, x7, #28
// and x11, x11, #0xfffffffffffff
// mul x11, x3, x11
// add x19, x19, x11
// lsr x11, x14, #52
// add x19, x19, x11
// lsl x14, x14, #12
// extr x14, x19, x14, #36
// adcs x13, x13, x14
// and x14, x10, x13
// ldp x11, x4, [x1, #32]
// ldp x7, x12, [x2, #32]
// extr x24, x11, x24, #16
// and x24, x24, #0xfffffffffffff
// mul x24, x6, x24
// extr x23, x7, x23, #16
// and x23, x23, #0xfffffffffffff
// mul x23, x3, x23
// add x24, x24, x23
// lsl x23, x22, #48
// add x24, x24, x23
// lsr x23, x19, #52
// add x24, x24, x23
// lsl x19, x19, #12
// extr x19, x24, x19, #48
// adcs x17, x17, x19
// and x19, x14, x17
// lsr x14, x11, #4
// and x14, x14, #0xfffffffffffff
// mul x14, x6, x14
// lsr x23, x7, #4
// and x23, x23, #0xfffffffffffff
// mul x23, x3, x23
// add x14, x14, x23
// lsr x23, x24, #52
// add x14, x14, x23
// lsl x24, x24, #12
// extr x24, x14, x24, #60
// extr x11, x4, x11, #56
// and x11, x11, #0xfffffffffffff
// mul x11, x6, x11
// extr x7, x12, x7, #56
// and x7, x7, #0xfffffffffffff
// mul x7, x3, x7
// add x11, x11, x7
// lsr x14, x14, #52
// add x14, x11, x14
// lsl x11, x24, #8
// extr x11, x14, x11, #8
// adcs x5, x5, x11
// and x19, x19, x5
// ldp x11, x24, [x1, #48]
// ldp x2, x7, [x2, #48]
// extr x4, x11, x4, #44
// and x4, x4, #0xfffffffffffff
// mul x4, x6, x4
// extr x23, x2, x12, #44
// and x23, x23, #0xfffffffffffff
// mul x23, x3, x23
// add x4, x4, x23
// lsr x23, x14, #52
// add x4, x4, x23
// lsl x14, x14, #12
// extr x14, x4, x14, #20
// adcs x20, x20, x14
// and x19, x19, x20
// extr x14, x24, x11, #32
// and x14, x14, #0xfffffffffffff
// mul x14, x6, x14
// extr x2, x7, x2, #32
// and x2, x2, #0xfffffffffffff
// mul x2, x3, x2
// add x2, x14, x2
// lsr x14, x4, #52
// add x2, x2, x14
// lsl x14, x4, #12
// extr x14, x2, x14, #32
// adcs x8, x8, x14
// and x19, x19, x8
// lsr x14, x24, #20
// mul x14, x6, x14
// lsr x11, x7, #20
// mul x11, x3, x11
// add x14, x14, x11
// lsr x11, x2, #52
// add x14, x14, x11
// lsl x2, x2, #12
// extr x2, x14, x2, #44
// adcs x9, x9, x2
// and x2, x19, x9
// mul x6, x6, x3
// lsr x19, x14, #44
// add x6, x6, x19
// adc x16, x16, x6
// lsr x6, x16, #9
// orr x16, x16, #0xfffffffffffffe00
// cmp xzr, xzr
// adcs xzr, x21, x6
// adcs xzr, x2, xzr
// adcs xzr, x16, xzr
// adcs x21, x21, x6
// adcs x10, x10, xzr
// adcs x13, x13, xzr
// adcs x17, x17, xzr
// adcs x5, x5, xzr
// adcs x20, x20, xzr
// adcs x8, x8, xzr
// adcs x9, x9, xzr
// adc x16, x16, xzr
// and x2, x21, #0x1ff
// extr x21, x10, x21, #9
// extr x10, x13, x10, #9
// stp x21, x10, [x0] // @slothy:writes=buffer0
// extr x21, x17, x13, #9
// extr x10, x5, x17, #9
// stp x21, x10, [x0, #16] // @slothy:writes=buffer16
// extr x21, x20, x5, #9
// extr x10, x8, x20, #9
// stp x21, x10, [x0, #32] // @slothy:writes=buffer32
// extr x21, x9, x8, #9
// extr x10, x16, x9, #9
// stp x21, x10, [x0, #48] // @slothy:writes=buffer48
// str x2, [x0, #64] // @slothy:writes=buffer64
// add sp, sp, #80
// ldp x25, x26, [sp], #16
// ldp x23, x24, [sp], #16
// ldp x21, x22, [sp], #16
// ldp x19, x20, [sp], #16
// ret
//
// The bash script used for step 2 is as follows:
//
// # Store the assembly instructions except the last 'ret',
// # callee-register store/loads and add/sub sp #80 as, say, 'input.S'.
// export OUTPUTS="[hint_buffer0,hint_buffer16,hint_buffer32,hint_buffer48,hint_buffer64]"
// export RESERVED_REGS="[x18,x27,x28,x29,x30,sp,q8,q9,q10,q11,q12,q13,q14,q15,v8,v9,v10,v11,v12,v13,v14,v15]"
// <s2n-bignum>/tools/external/slothy.sh input.S my_out_dir
// # my_out_dir/3.opt.s is the optimized assembly. Its output may differ
// # from this file since the sequence is non-deterministically chosen.
// # Please add 'ret' at the end of the output assembly.
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_p521_neon)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_p521_neon)
.text
.balign 4
S2N_BN_SYMBOL(bignum_mul_p521_neon):
// Save registers and make space for the temporary buffer
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
sub sp, sp, #80
ldr q6, [x2]
ldp x10, x17, [x1, #16]
ldr q4, [x1]
ldr q16, [x2, #32]
ldp x5, x20, [x2, #16]
ldr q2, [x1, #32]
movi v31.2D, #0x00000000ffffffff
uzp2 v17.4S, v6.4S, v6.4S
rev64 v7.4S, v6.4S
ldp x15, x21, [x1]
xtn v25.2S, v6.2D
xtn v22.2S, v4.2D
subs x14, x10, x17
mul v7.4S, v7.4S, v4.4S
csetm x8, cc
rev64 v3.4S, v16.4S
xtn v1.2S, v16.2D
ldp x13, x16, [x2]
mul x26, x10, x5
uzp2 v16.4S, v16.4S, v16.4S
uaddlp v26.2D, v7.4S
cneg x4, x14, cc
subs x24, x15, x21
xtn v5.2S, v2.2D
mul v28.4S, v3.4S, v2.4S
shl v26.2D, v26.2D, #32
mul x22, x17, x20
umull v20.2D, v22.2S, v25.2S
uzp2 v6.4S, v4.4S, v4.4S
umull v18.2D, v22.2S, v17.2S
uzp2 v4.4S, v2.4S, v2.4S
cneg x14, x24, cc
csetm x7, cc
umulh x11, x17, x20
usra v18.2D, v20.2D, #32
uaddlp v7.2D, v28.4S
subs x19, x16, x13
umlal v26.2D, v22.2S, v25.2S
cneg x19, x19, cc
shl v28.2D, v7.2D, #32
umull v7.2D, v5.2S, v1.2S
umull v30.2D, v5.2S, v16.2S
cinv x6, x7, cc
mul x25, x14, x19
umlal v28.2D, v5.2S, v1.2S
umull v21.2D, v6.2S, v17.2S
umulh x14, x14, x19
usra v30.2D, v7.2D, #32
subs x9, x20, x5
and v29.16B, v18.16B, v31.16B
cinv x23, x8, cc
mov x8, v26.d[1]
cneg x12, x9, cc
usra v21.2D, v18.2D, #32
umlal v29.2D, v6.2S, v25.2S
mul x24, x4, x12
umull v18.2D, v4.2S, v16.2S
movi v25.2D, #0x00000000ffffffff
eor x9, x14, x6
and v7.16B, v30.16B, v25.16B
usra v21.2D, v29.2D, #32
umulh x7, x10, x5
usra v18.2D, v30.2D, #32
umlal v7.2D, v4.2S, v1.2S
mov x19, v21.d[0]
umulh x3, x4, x12
mov x14, v21.d[1]
usra v18.2D, v7.2D, #32
adds x4, x8, x19
mov x8, v26.d[0]
adcs x19, x26, x14
adcs x14, x22, x7
adc x12, x11, xzr
adds x11, x4, x8
adcs x26, x19, x4
adcs x22, x14, x19
eor x4, x24, x23
adcs x14, x12, x14
eor x7, x25, x6
adc x25, xzr, x12
eor x19, x3, x23
adds x3, x26, x8
adcs x24, x22, x11
adcs x12, x14, x26
adcs x22, x25, x22
adcs x26, xzr, x14
adc x14, xzr, x25
cmn x23, #0x1
adcs x22, x22, x4
adcs x19, x26, x19
adc x25, x14, x23
subs x14, x21, x17
cneg x23, x14, cc
csetm x26, cc
subs x4, x20, x16
cneg x14, x4, cc
cinv x4, x26, cc
cmn x6, #0x1
adcs x11, x11, x7
mul x7, x23, x14
adcs x9, x3, x9
adcs x26, x24, x6
umulh x3, x23, x14
adcs x14, x12, x6
adcs x22, x22, x6
adcs x12, x19, x6
extr x24, x11, x8, #55
adc x6, x25, x6
subs x19, x15, x17
csetm x17, cc
cneg x23, x19, cc
subs x19, x20, x13
lsl x25, x8, #9
eor x8, x7, x4
cneg x20, x19, cc
umulh x7, x23, x20
cinv x19, x17, cc
subs x17, x15, x10
csetm x15, cc
stp x25, x24, [sp, #32]
cneg x24, x17, cc
mul x20, x23, x20
subs x25, x5, x13
cneg x13, x25, cc
cinv x15, x15, cc
mul x25, x24, x13
subs x21, x21, x10
csetm x23, cc
cneg x17, x21, cc
subs x21, x5, x16
umulh x13, x24, x13
cinv x10, x23, cc
cneg x23, x21, cc
cmn x4, #0x1
adcs x14, x14, x8
eor x21, x3, x4
adcs x21, x22, x21
eor x5, x20, x19
adcs x24, x12, x4
mul x12, x17, x23
eor x8, x25, x15
adc x25, x6, x4
cmn x15, #0x1
adcs x6, x9, x8
ldp x20, x8, [x2, #48]
eor x9, x13, x15
adcs x4, x26, x9
umulh x26, x17, x23
ldp x17, x13, [x1, #48]
adcs x9, x14, x15
adcs x16, x21, x15
adcs x14, x24, x15
eor x21, x7, x19
mul x23, x17, x20
adc x24, x25, x15
cmn x19, #0x1
adcs x7, x4, x5
adcs x9, x9, x21
umulh x3, x13, x8
adcs x16, x16, x19
adcs x22, x14, x19
eor x5, x12, x10
adc x12, x24, x19
cmn x10, #0x1
adcs x19, x7, x5
eor x14, x26, x10
mov x7, v28.d[1]
adcs x24, x9, x14
extr x4, x19, x6, #55
umulh x15, x17, x20
mov x14, v18.d[1]
lsr x9, x19, #55
adcs x5, x16, x10
mov x16, v18.d[0]
adcs x19, x22, x10
str x9, [sp, #64]
extr x25, x6, x11, #55
adc x21, x12, x10
subs x26, x17, x13
stp x25, x4, [sp, #48]
stp x19, x21, [sp, #16]
csetm x6, cc
cneg x4, x26, cc
mul x19, x13, x8
subs x11, x8, x20
stp x24, x5, [sp]
ldp x21, x10, [x1, #32]
cinv x12, x6, cc
cneg x6, x11, cc
mov x9, v28.d[0]
umulh x25, x4, x6
adds x22, x7, x16
ldp x16, x5, [x2, #32]
adcs x14, x23, x14
adcs x11, x19, x15
adc x24, x3, xzr
adds x3, x22, x9
adcs x15, x14, x22
mul x22, x4, x6
adcs x6, x11, x14
adcs x4, x24, x11
eor x14, x25, x12
adc x26, xzr, x24
subs x7, x21, x10
csetm x23, cc
cneg x19, x7, cc
subs x24, x5, x16
cneg x11, x24, cc
cinv x7, x23, cc
adds x25, x15, x9
eor x23, x22, x12
adcs x22, x6, x3
mul x24, x19, x11
adcs x15, x4, x15
adcs x6, x26, x6
umulh x19, x19, x11
adcs x11, xzr, x4
adc x26, xzr, x26
cmn x12, #0x1
adcs x4, x6, x23
eor x6, x24, x7
adcs x14, x11, x14
adc x26, x26, x12
subs x11, x10, x13
cneg x12, x11, cc
csetm x11, cc
eor x19, x19, x7
subs x24, x8, x5
cinv x11, x11, cc
cneg x24, x24, cc
cmn x7, #0x1
adcs x3, x3, x6
mul x23, x12, x24
adcs x25, x25, x19
adcs x6, x22, x7
umulh x19, x12, x24
adcs x22, x15, x7
adcs x12, x4, x7
eor x24, x23, x11
adcs x4, x14, x7
adc x26, x26, x7
eor x19, x19, x11
subs x14, x21, x17
cneg x7, x14, cc
csetm x14, cc
subs x23, x20, x16
cinv x14, x14, cc
cneg x23, x23, cc
cmn x11, #0x1
adcs x22, x22, x24
mul x24, x7, x23
adcs x15, x12, x19
adcs x4, x4, x11
adc x19, x26, x11
umulh x26, x7, x23
subs x7, x21, x13
eor x11, x24, x14
cneg x23, x7, cc
csetm x12, cc
subs x7, x8, x16
cneg x7, x7, cc
cinv x12, x12, cc
cmn x14, #0x1
eor x26, x26, x14
adcs x11, x25, x11
mul x25, x23, x7
adcs x26, x6, x26
adcs x6, x22, x14
adcs x24, x15, x14
umulh x23, x23, x7
adcs x4, x4, x14
adc x22, x19, x14
eor x14, x25, x12
eor x7, x23, x12
cmn x12, #0x1
adcs x14, x26, x14
ldp x19, x25, [x2]
ldp x15, x23, [x2, #16]
adcs x26, x6, x7
adcs x24, x24, x12
adcs x7, x4, x12
adc x4, x22, x12
subs x19, x19, x16
ldp x16, x22, [x1]
sbcs x6, x25, x5
ldp x12, x25, [x1, #16]
sbcs x15, x15, x20
sbcs x8, x23, x8
csetm x23, cc
subs x21, x21, x16
eor x16, x19, x23
sbcs x19, x10, x22
eor x22, x6, x23
eor x8, x8, x23
sbcs x6, x17, x12
sbcs x13, x13, x25
csetm x12, cc
subs x10, x10, x17
cneg x17, x10, cc
csetm x25, cc
subs x5, x20, x5
eor x10, x19, x12
cneg x19, x5, cc
eor x20, x15, x23
eor x21, x21, x12
cinv x15, x25, cc
mul x25, x17, x19
subs x16, x16, x23
sbcs x5, x22, x23
eor x6, x6, x12
sbcs x20, x20, x23
eor x22, x13, x12
sbc x8, x8, x23
subs x21, x21, x12
umulh x19, x17, x19
sbcs x10, x10, x12
sbcs x17, x6, x12
eor x6, x19, x15
eor x19, x25, x15
umulh x25, x17, x20
sbc x13, x22, x12
cmn x15, #0x1
adcs x22, x14, x19
adcs x19, x26, x6
ldp x6, x26, [sp]
adcs x14, x24, x15
umulh x24, x21, x16
adcs x7, x7, x15
adc x15, x4, x15
adds x4, x9, x6
eor x9, x23, x12
adcs x12, x3, x26
stp x4, x12, [sp]
ldp x4, x26, [sp, #16]
umulh x12, x10, x5
ldp x6, x23, [sp, #32]
adcs x3, x11, x4
mul x4, x13, x8
adcs x26, x22, x26
ldp x22, x11, [sp, #48]
adcs x6, x19, x6
stp x3, x26, [sp, #16]
mul x26, x10, x5
adcs x14, x14, x23
stp x6, x14, [sp, #32]
ldr x6, [sp, #64]
adcs x22, x7, x22
adcs x14, x15, x11
mul x11, x17, x20
adc x19, x6, xzr
stp x22, x14, [sp, #48]
adds x14, x26, x24
str x19, [sp, #64]
umulh x19, x13, x8
adcs x7, x11, x12
adcs x22, x4, x25
mul x6, x21, x16
adc x19, x19, xzr
subs x11, x17, x13
cneg x12, x11, cc
csetm x11, cc
subs x24, x8, x20
cinv x11, x11, cc
cneg x24, x24, cc
adds x4, x14, x6
adcs x14, x7, x14
mul x3, x12, x24
adcs x7, x22, x7
adcs x22, x19, x22
umulh x12, x12, x24
adc x24, xzr, x19
adds x19, x14, x6
eor x3, x3, x11
adcs x26, x7, x4
adcs x14, x22, x14
adcs x25, x24, x7
adcs x23, xzr, x22
eor x7, x12, x11
adc x12, xzr, x24
subs x22, x21, x10
cneg x24, x22, cc
csetm x22, cc
subs x15, x5, x16
cinv x22, x22, cc
cneg x15, x15, cc
cmn x11, #0x1
adcs x3, x25, x3
mul x25, x24, x15
adcs x23, x23, x7
adc x11, x12, x11
subs x7, x10, x13
umulh x15, x24, x15
cneg x12, x7, cc
csetm x7, cc
eor x24, x25, x22
eor x25, x15, x22
cmn x22, #0x1
adcs x24, x4, x24
adcs x19, x19, x25
adcs x15, x26, x22
adcs x4, x14, x22
adcs x26, x3, x22
adcs x25, x23, x22
adc x23, x11, x22
subs x14, x21, x17
cneg x3, x14, cc
csetm x11, cc
subs x14, x8, x5
cneg x14, x14, cc
cinv x7, x7, cc
subs x13, x21, x13
cneg x21, x13, cc
csetm x13, cc
mul x22, x12, x14
subs x8, x8, x16
cinv x13, x13, cc
umulh x14, x12, x14
cneg x12, x8, cc
subs x8, x20, x16
cneg x8, x8, cc
cinv x16, x11, cc
eor x22, x22, x7
cmn x7, #0x1
eor x14, x14, x7
adcs x4, x4, x22
mul x11, x3, x8
adcs x22, x26, x14
adcs x14, x25, x7
eor x25, x24, x9
adc x26, x23, x7
umulh x7, x3, x8
subs x17, x10, x17
cneg x24, x17, cc
eor x3, x11, x16
csetm x11, cc
subs x20, x20, x5
cneg x5, x20, cc
cinv x11, x11, cc
cmn x16, #0x1
mul x17, x21, x12
eor x8, x7, x16
adcs x10, x19, x3
and x19, x9, #0x1ff
adcs x20, x15, x8
umulh x15, x21, x12
eor x12, x10, x9
eor x8, x6, x9
adcs x6, x4, x16
adcs x4, x22, x16
adcs x21, x14, x16
adc x7, x26, x16
mul x10, x24, x5
cmn x13, #0x1
ldp x3, x14, [x1]
eor x17, x17, x13
umulh x5, x24, x5
adcs x20, x20, x17
eor x17, x15, x13
adcs x16, x6, x17
eor x22, x10, x11
adcs x23, x4, x13
extr x10, x14, x3, #52
and x26, x3, #0xfffffffffffff
adcs x24, x21, x13
and x15, x10, #0xfffffffffffff
adc x6, x7, x13
cmn x11, #0x1
adcs x17, x20, x22
eor x4, x5, x11
ldp x21, x10, [sp]
adcs x7, x16, x4
eor x16, x17, x9
eor x13, x7, x9
ldp x3, x17, [sp, #16]
adcs x7, x23, x11
eor x23, x7, x9
ldp x5, x22, [sp, #32]
adcs x7, x24, x11
adc x24, x6, x11
ldr x6, [x2, #64]
adds x20, x8, x21
lsl x11, x20, #9
eor x4, x7, x9
orr x7, x11, x19
eor x8, x24, x9
adcs x11, x25, x10
mul x26, x6, x26
ldp x19, x24, [sp, #48]
adcs x12, x12, x3
adcs x16, x16, x17
adcs x9, x13, x5
ldr x25, [sp, #64]
extr x20, x11, x20, #55
adcs x13, x23, x22
adcs x4, x4, x19
extr x23, x12, x11, #55
adcs x8, x8, x24
adc x11, x25, xzr
adds x21, x9, x21
extr x9, x16, x12, #55
lsr x12, x16, #55
adcs x10, x13, x10
mul x15, x6, x15
adcs x13, x4, x3
ldp x16, x4, [x2]
ldr x3, [x1, #64]
adcs x17, x8, x17
adcs x5, x5, x7
adcs x20, x22, x20
adcs x8, x19, x23
and x22, x16, #0xfffffffffffff
ldp x19, x7, [x1, #16]
adcs x9, x24, x9
extr x24, x4, x16, #52
adc x16, x12, x25
mul x22, x3, x22
and x25, x24, #0xfffffffffffff
extr x14, x19, x14, #40
and x12, x14, #0xfffffffffffff
extr x23, x7, x19, #28
ldp x19, x24, [x2, #16]
mul x14, x3, x25
and x23, x23, #0xfffffffffffff
add x22, x26, x22
lsl x11, x11, #48
lsr x26, x22, #52
lsl x25, x22, #12
mul x22, x6, x12
extr x12, x19, x4, #40
add x4, x15, x14
mul x15, x6, x23
add x4, x4, x26
extr x23, x24, x19, #28
ldp x14, x19, [x1, #32]
and x26, x12, #0xfffffffffffff
extr x12, x4, x25, #12
and x25, x23, #0xfffffffffffff
adds x21, x21, x12
mul x12, x3, x26
extr x23, x14, x7, #16
and x23, x23, #0xfffffffffffff
mul x7, x3, x25
ldp x25, x26, [x2, #32]
add x12, x22, x12
extr x22, x19, x14, #56
mul x23, x6, x23
lsr x14, x14, #4
extr x24, x25, x24, #16
add x7, x15, x7
and x15, x24, #0xfffffffffffff
and x22, x22, #0xfffffffffffff
lsr x24, x4, #52
mul x15, x3, x15
and x14, x14, #0xfffffffffffff
add x12, x12, x24
lsl x24, x4, #12
lsr x4, x12, #52
extr x24, x12, x24, #24
adcs x10, x10, x24
lsl x24, x12, #12
add x12, x7, x4
mul x22, x6, x22
add x4, x23, x15
extr x7, x12, x24, #36
adcs x13, x13, x7
lsl x15, x12, #12
add x7, x4, x11
lsr x24, x12, #52
ldp x23, x11, [x2, #48]
add x4, x7, x24
mul x12, x6, x14
extr x7, x26, x25, #56
extr x14, x4, x15, #48
and x2, x7, #0xfffffffffffff
extr x24, x11, x23, #32
ldp x15, x7, [x1, #48]
and x1, x24, #0xfffffffffffff
lsr x24, x4, #52
mul x2, x3, x2
extr x26, x23, x26, #44
lsr x23, x25, #4
and x23, x23, #0xfffffffffffff
and x25, x26, #0xfffffffffffff
extr x26, x7, x15, #32
extr x19, x15, x19, #44
mul x23, x3, x23
and x15, x26, #0xfffffffffffff
lsl x26, x4, #12
and x4, x19, #0xfffffffffffff
lsr x11, x11, #20
mul x19, x6, x4
adcs x17, x17, x14
add x14, x22, x2
add x22, x12, x23
lsr x7, x7, #20
add x22, x22, x24
extr x2, x22, x26, #60
mul x24, x3, x25
lsr x22, x22, #52
add x14, x14, x22
lsl x22, x2, #8
extr x22, x14, x22, #8
lsl x2, x14, #12
mul x1, x3, x1
adcs x12, x5, x22
mul x5, x6, x15
and x26, x10, x13
and x4, x26, x17
add x23, x19, x24
lsr x14, x14, #52
mul x22, x3, x11
add x11, x23, x14
extr x25, x11, x2, #20
lsl x19, x11, #12
adcs x25, x20, x25
and x14, x4, x12
add x1, x5, x1
and x14, x14, x25
mul x15, x6, x7
add x26, x15, x22
mul x6, x6, x3
lsr x22, x11, #52
add x4, x1, x22
lsr x1, x4, #52
extr x3, x4, x19, #32
lsl x15, x4, #12
add x7, x26, x1
adcs x23, x8, x3
extr x20, x7, x15, #44
and x3, x14, x23
lsr x19, x7, #44
adcs x7, x9, x20
add x11, x6, x19
adc x4, x16, x11
lsr x14, x4, #9
cmp xzr, xzr
and x15, x3, x7
orr x3, x4, #0xfffffffffffffe00
adcs xzr, x21, x14
adcs xzr, x15, xzr
adcs xzr, x3, xzr
adcs x11, x21, x14
and x14, x11, #0x1ff
adcs x1, x10, xzr
extr x10, x1, x11, #9
str x14, [x0, #64]
adcs x14, x13, xzr
extr x11, x14, x1, #9
adcs x1, x17, xzr
extr x4, x1, x14, #9
stp x10, x11, [x0]
adcs x11, x12, xzr
extr x14, x11, x1, #9
adcs x10, x25, xzr
extr x11, x10, x11, #9
stp x4, x14, [x0, #16]
adcs x14, x23, xzr
extr x10, x14, x10, #9
adcs x1, x7, xzr
stp x11, x10, [x0, #32]
extr x14, x1, x14, #9
adc x10, x3, xzr
extr x26, x10, x1, #9
stp x14, x26, [x0, #48]
// Restore regs and return
add sp, sp, #80
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 13,787
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_montmul_p521_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery multiply, z := (x * y / 2^576) mod p_521
// Inputs x[9], y[9]; output z[9]
//
// extern void bignum_montmul_p521_alt
// (uint64_t z[static 9], uint64_t x[static 9], uint64_t y[static 9]);
//
// Does z := (x * y / 2^576) mod p_521, assuming x < p_521, y < p_521. This
// means the Montgomery base is the "native size" 2^{9*64} = 2^576; since
// p_521 is a Mersenne prime the basic modular multiplication bignum_mul_p521
// can be considered a Montgomery operation to base 2^521.
//
// Standard ARM ABI: X0 = z, X1 = x, X2 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p521_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p521_alt)
.text
.balign 4
#define z x0
#define x x1
#define y x2
// These are repeated mod 2 as we load paris of inputs
#define a0 x3
#define a1 x4
#define a2 x3
#define a3 x4
#define a4 x3
#define a5 x4
#define a6 x3
#define a7 x4
#define a8 x3
#define b0 x5
#define b1 x6
#define b2 x7
#define b3 x8
#define b4 x9
#define b5 x10
#define b6 x11
#define b7 x12
#define b8 x13
#define t x14
// These repeat mod 11 as we stash some intermediate results in the
// output buffer.
#define u0 x15
#define u1 x16
#define u2 x17
#define u3 x19
#define u4 x20
#define u5 x21
#define u6 x22
#define u7 x23
#define u8 x24
#define u9 x25
#define u10 x26
#define u11 x15
#define u12 x16
#define u13 x17
#define u14 x19
#define u15 x20
#define u16 x21
S2N_BN_SYMBOL(bignum_montmul_p521_alt):
// Save more registers and make space for the temporary buffer
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
sub sp, sp, #64
// Load operands and set up row 0 = [u9;...;u0] = a0 * [b8;...;b0]
ldp a0, a1, [x]
ldp b0, b1, [y]
mul u0, a0, b0
umulh u1, a0, b0
mul t, a0, b1
umulh u2, a0, b1
adds u1, u1, t
ldp b2, b3, [y, #16]
mul t, a0, b2
umulh u3, a0, b2
adcs u2, u2, t
mul t, a0, b3
umulh u4, a0, b3
adcs u3, u3, t
ldp b4, b5, [y, #32]
mul t, a0, b4
umulh u5, a0, b4
adcs u4, u4, t
mul t, a0, b5
umulh u6, a0, b5
adcs u5, u5, t
ldp b6, b7, [y, #48]
mul t, a0, b6
umulh u7, a0, b6
adcs u6, u6, t
ldr b8, [y, #64]
mul t, a0, b7
umulh u8, a0, b7
adcs u7, u7, t
mul t, a0, b8
umulh u9, a0, b8
adcs u8, u8, t
adc u9, u9, xzr
// Row 1 = [u10;...;u0] = [a1;a0] * [b8;...;b0]
mul t, a1, b0
adds u1, u1, t
mul t, a1, b1
adcs u2, u2, t
mul t, a1, b2
adcs u3, u3, t
mul t, a1, b3
adcs u4, u4, t
mul t, a1, b4
adcs u5, u5, t
mul t, a1, b5
adcs u6, u6, t
mul t, a1, b6
adcs u7, u7, t
mul t, a1, b7
adcs u8, u8, t
mul t, a1, b8
adcs u9, u9, t
cset u10, cs
umulh t, a1, b0
adds u2, u2, t
umulh t, a1, b1
adcs u3, u3, t
umulh t, a1, b2
adcs u4, u4, t
umulh t, a1, b3
adcs u5, u5, t
umulh t, a1, b4
adcs u6, u6, t
umulh t, a1, b5
adcs u7, u7, t
umulh t, a1, b6
adcs u8, u8, t
umulh t, a1, b7
adcs u9, u9, t
umulh t, a1, b8
adc u10, u10, t
stp u0, u1, [sp]
// Row 2 = [u11;...;u0] = [a2;a1;a0] * [b8;...;b0]
ldp a2, a3, [x, #16]
mul t, a2, b0
adds u2, u2, t
mul t, a2, b1
adcs u3, u3, t
mul t, a2, b2
adcs u4, u4, t
mul t, a2, b3
adcs u5, u5, t
mul t, a2, b4
adcs u6, u6, t
mul t, a2, b5
adcs u7, u7, t
mul t, a2, b6
adcs u8, u8, t
mul t, a2, b7
adcs u9, u9, t
mul t, a2, b8
adcs u10, u10, t
cset u11, cs
umulh t, a2, b0
adds u3, u3, t
umulh t, a2, b1
adcs u4, u4, t
umulh t, a2, b2
adcs u5, u5, t
umulh t, a2, b3
adcs u6, u6, t
umulh t, a2, b4
adcs u7, u7, t
umulh t, a2, b5
adcs u8, u8, t
umulh t, a2, b6
adcs u9, u9, t
umulh t, a2, b7
adcs u10, u10, t
umulh t, a2, b8
adc u11, u11, t
// Row 3 = [u12;...;u0] = [a3;a2;a1;a0] * [b8;...;b0]
mul t, a3, b0
adds u3, u3, t
mul t, a3, b1
adcs u4, u4, t
mul t, a3, b2
adcs u5, u5, t
mul t, a3, b3
adcs u6, u6, t
mul t, a3, b4
adcs u7, u7, t
mul t, a3, b5
adcs u8, u8, t
mul t, a3, b6
adcs u9, u9, t
mul t, a3, b7
adcs u10, u10, t
mul t, a3, b8
adcs u11, u11, t
cset u12, cs
umulh t, a3, b0
adds u4, u4, t
umulh t, a3, b1
adcs u5, u5, t
umulh t, a3, b2
adcs u6, u6, t
umulh t, a3, b3
adcs u7, u7, t
umulh t, a3, b4
adcs u8, u8, t
umulh t, a3, b5
adcs u9, u9, t
umulh t, a3, b6
adcs u10, u10, t
umulh t, a3, b7
adcs u11, u11, t
umulh t, a3, b8
adc u12, u12, t
stp u2, u3, [sp, #16]
// Row 4 = [u13;...;u0] = [a4;a3;a2;a1;a0] * [b8;...;b0]
ldp a4, a5, [x, #32]
mul t, a4, b0
adds u4, u4, t
mul t, a4, b1
adcs u5, u5, t
mul t, a4, b2
adcs u6, u6, t
mul t, a4, b3
adcs u7, u7, t
mul t, a4, b4
adcs u8, u8, t
mul t, a4, b5
adcs u9, u9, t
mul t, a4, b6
adcs u10, u10, t
mul t, a4, b7
adcs u11, u11, t
mul t, a4, b8
adcs u12, u12, t
cset u13, cs
umulh t, a4, b0
adds u5, u5, t
umulh t, a4, b1
adcs u6, u6, t
umulh t, a4, b2
adcs u7, u7, t
umulh t, a4, b3
adcs u8, u8, t
umulh t, a4, b4
adcs u9, u9, t
umulh t, a4, b5
adcs u10, u10, t
umulh t, a4, b6
adcs u11, u11, t
umulh t, a4, b7
adcs u12, u12, t
umulh t, a4, b8
adc u13, u13, t
// Row 5 = [u14;...;u0] = [a5;a4;a3;a2;a1;a0] * [b8;...;b0]
mul t, a5, b0
adds u5, u5, t
mul t, a5, b1
adcs u6, u6, t
mul t, a5, b2
adcs u7, u7, t
mul t, a5, b3
adcs u8, u8, t
mul t, a5, b4
adcs u9, u9, t
mul t, a5, b5
adcs u10, u10, t
mul t, a5, b6
adcs u11, u11, t
mul t, a5, b7
adcs u12, u12, t
mul t, a5, b8
adcs u13, u13, t
cset u14, cs
umulh t, a5, b0
adds u6, u6, t
umulh t, a5, b1
adcs u7, u7, t
umulh t, a5, b2
adcs u8, u8, t
umulh t, a5, b3
adcs u9, u9, t
umulh t, a5, b4
adcs u10, u10, t
umulh t, a5, b5
adcs u11, u11, t
umulh t, a5, b6
adcs u12, u12, t
umulh t, a5, b7
adcs u13, u13, t
umulh t, a5, b8
adc u14, u14, t
stp u4, u5, [sp, #32]
// Row 6 = [u15;...;u0] = [a6;a5;a4;a3;a2;a1;a0] * [b8;...;b0]
ldp a6, a7, [x, #48]
mul t, a6, b0
adds u6, u6, t
mul t, a6, b1
adcs u7, u7, t
mul t, a6, b2
adcs u8, u8, t
mul t, a6, b3
adcs u9, u9, t
mul t, a6, b4
adcs u10, u10, t
mul t, a6, b5
adcs u11, u11, t
mul t, a6, b6
adcs u12, u12, t
mul t, a6, b7
adcs u13, u13, t
mul t, a6, b8
adcs u14, u14, t
cset u15, cs
umulh t, a6, b0
adds u7, u7, t
umulh t, a6, b1
adcs u8, u8, t
umulh t, a6, b2
adcs u9, u9, t
umulh t, a6, b3
adcs u10, u10, t
umulh t, a6, b4
adcs u11, u11, t
umulh t, a6, b5
adcs u12, u12, t
umulh t, a6, b6
adcs u13, u13, t
umulh t, a6, b7
adcs u14, u14, t
umulh t, a6, b8
adc u15, u15, t
// Row 7 = [u16;...;u0] = [a7;a6;a5;a4;a3;a2;a1;a0] * [b8;...;b0]
mul t, a7, b0
adds u7, u7, t
mul t, a7, b1
adcs u8, u8, t
mul t, a7, b2
adcs u9, u9, t
mul t, a7, b3
adcs u10, u10, t
mul t, a7, b4
adcs u11, u11, t
mul t, a7, b5
adcs u12, u12, t
mul t, a7, b6
adcs u13, u13, t
mul t, a7, b7
adcs u14, u14, t
mul t, a7, b8
adcs u15, u15, t
cset u16, cs
umulh t, a7, b0
adds u8, u8, t
umulh t, a7, b1
adcs u9, u9, t
umulh t, a7, b2
adcs u10, u10, t
umulh t, a7, b3
adcs u11, u11, t
umulh t, a7, b4
adcs u12, u12, t
umulh t, a7, b5
adcs u13, u13, t
umulh t, a7, b6
adcs u14, u14, t
umulh t, a7, b7
adcs u15, u15, t
umulh t, a7, b8
adc u16, u16, t
stp u6, u7, [sp, #48]
// Row 8 = [u16;...;u0] = [a8;a7;a6;a5;a4;a3;a2;a1;a0] * [b8;...;b0]
ldr a8, [x, #64]
mul t, a8, b0
adds u8, u8, t
mul t, a8, b1
adcs u9, u9, t
mul t, a8, b2
adcs u10, u10, t
mul t, a8, b3
adcs u11, u11, t
mul t, a8, b4
adcs u12, u12, t
mul t, a8, b5
adcs u13, u13, t
mul t, a8, b6
adcs u14, u14, t
mul t, a8, b7
adcs u15, u15, t
mul t, a8, b8
adc u16, u16, t
umulh t, a8, b0
adds u9, u9, t
umulh t, a8, b1
adcs u10, u10, t
umulh t, a8, b2
adcs u11, u11, t
umulh t, a8, b3
adcs u12, u12, t
umulh t, a8, b4
adcs u13, u13, t
umulh t, a8, b5
adcs u14, u14, t
umulh t, a8, b6
adcs u15, u15, t
umulh t, a8, b7
adc u16, u16, t
// Now we have the full product, which we consider as
// 2^521 * h + l. Form h + l + 1
subs xzr, xzr, xzr
ldp b0, b1, [sp]
extr t, u9, u8, #9
adcs b0, b0, t
extr t, u10, u9, #9
adcs b1, b1, t
ldp b2, b3, [sp, #16]
extr t, u11, u10, #9
adcs b2, b2, t
extr t, u12, u11, #9
adcs b3, b3, t
ldp b4, b5, [sp, #32]
extr t, u13, u12, #9
adcs b4, b4, t
extr t, u14, u13, #9
adcs b5, b5, t
ldp b6, b7, [sp, #48]
extr t, u15, u14, #9
adcs b6, b6, t
extr t, u16, u15, #9
adcs b7, b7, t
orr b8, u8, #~0x1FF
lsr t, u16, #9
adcs b8, b8, t
// Now CF is set if h + l + 1 >= 2^521, which means it's already
// the answer, while if ~CF the answer is h + l so we should subtract
// 1 (all considered in 521 bits). Hence subtract ~CF and mask.
sbcs b0, b0, xzr
sbcs b1, b1, xzr
sbcs b2, b2, xzr
sbcs b3, b3, xzr
sbcs b4, b4, xzr
sbcs b5, b5, xzr
sbcs b6, b6, xzr
sbcs b7, b7, xzr
sbc b8, b8, xzr
and b8, b8, #0x1FF
// So far, this has been the same as a pure modular multiplication.
// Now finally the Montgomery ingredient, which is just a 521-bit
// rotation by 9*64 - 521 = 55 bits right.
lsl t, b0, #9
extr b0, b1, b0, #55
extr b1, b2, b1, #55
extr b2, b3, b2, #55
extr b3, b4, b3, #55
orr b8, b8, t
extr b4, b5, b4, #55
extr b5, b6, b5, #55
extr b6, b7, b6, #55
extr b7, b8, b7, #55
lsr b8, b8, #55
// Store back digits of final result
stp b0, b1, [z]
stp b2, b3, [z, #16]
stp b4, b5, [z, #32]
stp b6, b7, [z, #48]
str b8, [z, #64]
// Restore registers
add sp, sp, #64
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 40,077
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/p521_jmixadd.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point mixed addition on NIST curve P-521 in Jacobian coordinates
//
// extern void p521_jmixadd
// (uint64_t p3[static 27],uint64_t p1[static 27],uint64_t p2[static 18]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// The "mixed" part means that p2 only has x and y coordinates, with the
// implicit z coordinate assumed to be the identity. It is assumed that
// all the coordinates of the input points p1 and p2 are fully reduced
// mod p_521, that the z coordinate of p1 is nonzero and that neither
// p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents the same affine
// point as".
//
// Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jmixadd)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jmixadd)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 72
// Stable homes for input arguments during main code sequence
#define input_z x26
#define input_x x27
#define input_y x28
// Pointer-offset pairs for inputs and outputs
#define x_1 input_x, #0
#define y_1 input_x, #NUMSIZE
#define z_1 input_x, #(2*NUMSIZE)
#define x_2 input_y, #0
#define y_2 input_y, #NUMSIZE
#define x_3 input_z, #0
#define y_3 input_z, #NUMSIZE
#define z_3 input_z, #(2*NUMSIZE)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define zp2 sp, #(NUMSIZE*0)
#define ww sp, #(NUMSIZE*0)
#define resx sp, #(NUMSIZE*0)
#define yd sp, #(NUMSIZE*1)
#define y2a sp, #(NUMSIZE*1)
#define x2a sp, #(NUMSIZE*2)
#define zzx2 sp, #(NUMSIZE*2)
#define zz sp, #(NUMSIZE*3)
#define t1 sp, #(NUMSIZE*3)
#define t2 sp, #(NUMSIZE*4)
#define zzx1 sp, #(NUMSIZE*4)
#define resy sp, #(NUMSIZE*4)
#define xd sp, #(NUMSIZE*5)
#define resz sp, #(NUMSIZE*5)
#define tmp sp, #(NUMSIZE*6)
#define NSPACE (NUMSIZE*7+8)
// For the three field operations, we use subroutines not inlining.
// Call local code very close to bignum_mul_p521 and bignum_sqr_p521
// and bignum_sub_p521
#define mul_p521(P0,P1,P2) \
add x0, P0; \
add x1, P1; \
add x2, P2; \
bl local_mul_p521
#define sqr_p521(P0,P1) \
add x0, P0; \
add x1, P1; \
bl local_sqr_p521
#define sub_p521(P0,P1,P2) \
add x0, P0; \
add x1, P1; \
add x2, P2; \
bl local_sub_p521
S2N_BN_SYMBOL(p521_jmixadd):
// Save regs and make room on stack for temporary variables
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x29, x30, [sp, #-16]!
sub sp, sp, NSPACE
// Move the input arguments to stable places
mov input_z, x0
mov input_x, x1
mov input_y, x2
// Main code, just a sequence of basic field operations
sqr_p521(zp2,z_1)
mul_p521(y2a,z_1,y_2)
mul_p521(x2a,zp2,x_2)
mul_p521(y2a,zp2,y2a)
sub_p521(xd,x2a,x_1)
sub_p521(yd,y2a,y_1)
sqr_p521(zz,xd)
sqr_p521(ww,yd)
mul_p521(zzx1,zz,x_1)
mul_p521(zzx2,zz,x2a)
sub_p521(resx,ww,zzx1)
sub_p521(t1,zzx2,zzx1)
mul_p521(resz,xd,z_1)
sub_p521(resx,resx,zzx2)
sub_p521(t2,zzx1,resx)
mul_p521(t1,t1,y_1)
mul_p521(t2,yd,t2)
sub_p521(resy,t2,t1)
// Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence)
ldp x0, x1, [z_1]
orr x0, x0, x1
ldp x2, x3, [z_1+16]
orr x2, x2, x3
ldp x4, x5, [z_1+32]
orr x4, x4, x5
ldp x6, x7, [z_1+48]
orr x6, x6, x7
ldr x8, [z_1+64]
orr x0, x0, x2
orr x4, x4, x6
orr x0, x0, x4
orr x0, x0, x8
cmp x0, xzr
// Multiplex: if p1 <> 0 just copy the computed result from the staging area.
// If p1 = 0 then return the point p2 augmented with an extra z = 1
// coordinate, hence giving 0 + p2 = p2 for the final result.
ldp x0, x1, [resx]
ldp x20, x21, [x_2]
csel x0, x0, x20, ne
csel x1, x1, x21, ne
ldp x2, x3, [resx+16]
ldp x20, x21, [x_2+16]
csel x2, x2, x20, ne
csel x3, x3, x21, ne
ldp x4, x5, [resx+32]
ldp x20, x21, [x_2+32]
csel x4, x4, x20, ne
csel x5, x5, x21, ne
ldp x6, x7, [resx+48]
ldp x20, x21, [x_2+48]
csel x6, x6, x20, ne
csel x7, x7, x21, ne
ldr x8, [resx+64]
ldr x20, [x_2+64]
csel x8, x8, x20, ne
ldp x10, x11, [resy]
ldp x20, x21, [y_2]
csel x10, x10, x20, ne
csel x11, x11, x21, ne
ldp x12, x13, [resy+16]
ldp x20, x21, [y_2+16]
csel x12, x12, x20, ne
csel x13, x13, x21, ne
ldp x14, x15, [resy+32]
ldp x20, x21, [y_2+32]
csel x14, x14, x20, ne
csel x15, x15, x21, ne
ldp x16, x17, [resy+48]
ldp x20, x21, [y_2+48]
csel x16, x16, x20, ne
csel x17, x17, x21, ne
ldr x19, [resy+64]
ldr x20, [y_2+64]
csel x19, x19, x20, ne
stp x0, x1, [x_3]
stp x2, x3, [x_3+16]
stp x4, x5, [x_3+32]
stp x6, x7, [x_3+48]
str x8, [x_3+64]
stp x10, x11, [y_3]
stp x12, x13, [y_3+16]
stp x14, x15, [y_3+32]
stp x16, x17, [y_3+48]
str x19, [y_3+64]
ldp x0, x1, [resz]
mov x20, #1
csel x0, x0, x20, ne
csel x1, x1, xzr, ne
ldp x2, x3, [resz+16]
csel x2, x2, xzr, ne
csel x3, x3, xzr, ne
ldp x4, x5, [resz+32]
csel x4, x4, xzr, ne
csel x5, x5, xzr, ne
ldp x6, x7, [resz+48]
csel x6, x6, xzr, ne
csel x7, x7, xzr, ne
ldr x8, [resz+64]
csel x8, x8, xzr, ne
stp x0, x1, [z_3]
stp x2, x3, [z_3+16]
stp x4, x5, [z_3+32]
stp x6, x7, [z_3+48]
str x8, [z_3+64]
// Restore stack and registers
add sp, sp, NSPACE
ldp x29, x30, [sp], 16
ldp x27, x28, [sp], 16
ldp x25, x26, [sp], 16
ldp x23, x24, [sp], 16
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
// Local versions of the three field operations, almost identical to
// bignum_mul_p521, bignum_sqr_p521 and bignum_sub_p521 except for
// avoiding all intial register save-restore, and in the case of
// local_mul_p521, using the tmp buffer as temporary storage and
// avoiding x26.
local_mul_p521:
ldp x3, x4, [x1]
ldp x5, x6, [x1, #16]
ldp x7, x8, [x2]
ldp x9, x10, [x2, #16]
mul x11, x3, x7
mul x15, x4, x8
mul x16, x5, x9
mul x17, x6, x10
umulh x19, x3, x7
adds x15, x15, x19
umulh x19, x4, x8
adcs x16, x16, x19
umulh x19, x5, x9
adcs x17, x17, x19
umulh x19, x6, x10
adc x19, x19, xzr
adds x12, x15, x11
adcs x15, x16, x15
adcs x16, x17, x16
adcs x17, x19, x17
adc x19, xzr, x19
adds x13, x15, x11
adcs x14, x16, x12
adcs x15, x17, x15
adcs x16, x19, x16
adcs x17, xzr, x17
adc x19, xzr, x19
subs x24, x5, x6
cneg x24, x24, lo
csetm x21, lo
subs x22, x10, x9
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x16, x16, x23
eor x22, x22, x21
adcs x17, x17, x22
adc x19, x19, x21
subs x24, x3, x4
cneg x24, x24, lo
csetm x21, lo
subs x22, x8, x7
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x12, x12, x23
eor x22, x22, x21
adcs x13, x13, x22
adcs x14, x14, x21
adcs x15, x15, x21
adcs x16, x16, x21
adcs x17, x17, x21
adc x19, x19, x21
subs x24, x4, x6
cneg x24, x24, lo
csetm x21, lo
subs x22, x10, x8
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x15, x15, x23
eor x22, x22, x21
adcs x16, x16, x22
adcs x17, x17, x21
adc x19, x19, x21
subs x24, x3, x5
cneg x24, x24, lo
csetm x21, lo
subs x22, x9, x7
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x13, x13, x23
eor x22, x22, x21
adcs x14, x14, x22
adcs x15, x15, x21
adcs x16, x16, x21
adcs x17, x17, x21
adc x19, x19, x21
subs x24, x3, x6
cneg x24, x24, lo
csetm x21, lo
subs x22, x10, x7
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x14, x14, x23
eor x22, x22, x21
adcs x15, x15, x22
adcs x16, x16, x21
adcs x17, x17, x21
adc x19, x19, x21
subs x24, x4, x5
cneg x24, x24, lo
csetm x21, lo
subs x22, x9, x8
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x14, x14, x23
eor x22, x22, x21
adcs x15, x15, x22
adcs x16, x16, x21
adcs x17, x17, x21
adc x19, x19, x21
lsl x21, x11, #9
extr x11, x12, x11, #55
extr x12, x13, x12, #55
extr x13, x14, x13, #55
lsr x14, x14, #55
ldp x3, x4, [x1, #32]
ldp x5, x6, [x1, #48]
ldp x7, x8, [x2, #32]
ldp x9, x10, [x2, #48]
stp x15, x16, [tmp]
stp x17, x19, [tmp+16]
stp x21, x11, [tmp+32]
stp x12, x13, [tmp+48]
str x14, [tmp+64]
mul x11, x3, x7
mul x15, x4, x8
mul x16, x5, x9
mul x17, x6, x10
umulh x19, x3, x7
adds x15, x15, x19
umulh x19, x4, x8
adcs x16, x16, x19
umulh x19, x5, x9
adcs x17, x17, x19
umulh x19, x6, x10
adc x19, x19, xzr
adds x12, x15, x11
adcs x15, x16, x15
adcs x16, x17, x16
adcs x17, x19, x17
adc x19, xzr, x19
adds x13, x15, x11
adcs x14, x16, x12
adcs x15, x17, x15
adcs x16, x19, x16
adcs x17, xzr, x17
adc x19, xzr, x19
subs x24, x5, x6
cneg x24, x24, lo
csetm x21, lo
subs x22, x10, x9
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x16, x16, x23
eor x22, x22, x21
adcs x17, x17, x22
adc x19, x19, x21
subs x24, x3, x4
cneg x24, x24, lo
csetm x21, lo
subs x22, x8, x7
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x12, x12, x23
eor x22, x22, x21
adcs x13, x13, x22
adcs x14, x14, x21
adcs x15, x15, x21
adcs x16, x16, x21
adcs x17, x17, x21
adc x19, x19, x21
subs x24, x4, x6
cneg x24, x24, lo
csetm x21, lo
subs x22, x10, x8
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x15, x15, x23
eor x22, x22, x21
adcs x16, x16, x22
adcs x17, x17, x21
adc x19, x19, x21
subs x24, x3, x5
cneg x24, x24, lo
csetm x21, lo
subs x22, x9, x7
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x13, x13, x23
eor x22, x22, x21
adcs x14, x14, x22
adcs x15, x15, x21
adcs x16, x16, x21
adcs x17, x17, x21
adc x19, x19, x21
subs x24, x3, x6
cneg x24, x24, lo
csetm x21, lo
subs x22, x10, x7
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x14, x14, x23
eor x22, x22, x21
adcs x15, x15, x22
adcs x16, x16, x21
adcs x17, x17, x21
adc x19, x19, x21
subs x24, x4, x5
cneg x24, x24, lo
csetm x21, lo
subs x22, x9, x8
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x14, x14, x23
eor x22, x22, x21
adcs x15, x15, x22
adcs x16, x16, x21
adcs x17, x17, x21
adc x19, x19, x21
ldp x23, x22, [tmp]
adds x11, x11, x23
adcs x12, x12, x22
stp x11, x12, [tmp]
ldp x23, x22, [tmp+16]
adcs x13, x13, x23
adcs x14, x14, x22
stp x13, x14, [tmp+16]
ldp x23, x22, [tmp+32]
adcs x15, x15, x23
adcs x16, x16, x22
stp x15, x16, [tmp+32]
ldp x23, x22, [tmp+48]
adcs x17, x17, x23
adcs x19, x19, x22
stp x17, x19, [tmp+48]
ldr x21, [tmp+64]
adc x21, x21, xzr
str x21, [tmp+64]
ldp x23, x22, [x1]
subs x3, x3, x23
sbcs x4, x4, x22
ldp x23, x22, [x1, #16]
sbcs x5, x5, x23
sbcs x6, x6, x22
csetm x24, lo
ldp x23, x22, [x2]
subs x7, x23, x7
sbcs x8, x22, x8
ldp x23, x22, [x2, #16]
sbcs x9, x23, x9
sbcs x10, x22, x10
csetm x25, lo
eor x3, x3, x24
subs x3, x3, x24
eor x4, x4, x24
sbcs x4, x4, x24
eor x5, x5, x24
sbcs x5, x5, x24
eor x6, x6, x24
sbc x6, x6, x24
eor x7, x7, x25
subs x7, x7, x25
eor x8, x8, x25
sbcs x8, x8, x25
eor x9, x9, x25
sbcs x9, x9, x25
eor x10, x10, x25
sbc x10, x10, x25
eor x25, x25, x24
mul x11, x3, x7
mul x15, x4, x8
mul x16, x5, x9
mul x17, x6, x10
umulh x19, x3, x7
adds x15, x15, x19
umulh x19, x4, x8
adcs x16, x16, x19
umulh x19, x5, x9
adcs x17, x17, x19
umulh x19, x6, x10
adc x19, x19, xzr
adds x12, x15, x11
adcs x15, x16, x15
adcs x16, x17, x16
adcs x17, x19, x17
adc x19, xzr, x19
adds x13, x15, x11
adcs x14, x16, x12
adcs x15, x17, x15
adcs x16, x19, x16
adcs x17, xzr, x17
adc x19, xzr, x19
subs x24, x5, x6
cneg x24, x24, lo
csetm x21, lo
subs x22, x10, x9
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x16, x16, x23
eor x22, x22, x21
adcs x17, x17, x22
adc x19, x19, x21
subs x24, x3, x4
cneg x24, x24, lo
csetm x21, lo
subs x22, x8, x7
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x12, x12, x23
eor x22, x22, x21
adcs x13, x13, x22
adcs x14, x14, x21
adcs x15, x15, x21
adcs x16, x16, x21
adcs x17, x17, x21
adc x19, x19, x21
subs x24, x4, x6
cneg x24, x24, lo
csetm x21, lo
subs x22, x10, x8
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x15, x15, x23
eor x22, x22, x21
adcs x16, x16, x22
adcs x17, x17, x21
adc x19, x19, x21
subs x24, x3, x5
cneg x24, x24, lo
csetm x21, lo
subs x22, x9, x7
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x13, x13, x23
eor x22, x22, x21
adcs x14, x14, x22
adcs x15, x15, x21
adcs x16, x16, x21
adcs x17, x17, x21
adc x19, x19, x21
subs x24, x3, x6
cneg x24, x24, lo
csetm x21, lo
subs x22, x10, x7
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x14, x14, x23
eor x22, x22, x21
adcs x15, x15, x22
adcs x16, x16, x21
adcs x17, x17, x21
adc x19, x19, x21
subs x24, x4, x5
cneg x24, x24, lo
csetm x21, lo
subs x22, x9, x8
cneg x22, x22, lo
mul x23, x24, x22
umulh x22, x24, x22
cinv x21, x21, lo
cmn x21, #1
eor x23, x23, x21
adcs x14, x14, x23
eor x22, x22, x21
adcs x15, x15, x22
adcs x16, x16, x21
adcs x17, x17, x21
adc x19, x19, x21
ldp x3, x4, [tmp]
ldp x5, x6, [tmp+16]
eor x11, x11, x25
adds x11, x11, x3
eor x12, x12, x25
adcs x12, x12, x4
eor x13, x13, x25
adcs x13, x13, x5
eor x14, x14, x25
adcs x14, x14, x6
eor x15, x15, x25
ldp x7, x8, [tmp+32]
ldp x9, x10, [tmp+48]
ldr x20, [tmp+64]
adcs x15, x15, x7
eor x16, x16, x25
adcs x16, x16, x8
eor x17, x17, x25
adcs x17, x17, x9
eor x19, x19, x25
adcs x19, x19, x10
adc x21, x20, xzr
adds x15, x15, x3
adcs x16, x16, x4
adcs x17, x17, x5
adcs x19, x19, x6
and x25, x25, #0x1ff
lsl x24, x11, #9
orr x24, x24, x25
adcs x7, x7, x24
extr x24, x12, x11, #55
adcs x8, x8, x24
extr x24, x13, x12, #55
adcs x9, x9, x24
extr x24, x14, x13, #55
adcs x10, x10, x24
lsr x24, x14, #55
adc x20, x24, x20
ldr x6, [x2, #64]
ldp x3, x4, [x1]
and x23, x3, #0xfffffffffffff
mul x23, x6, x23
ldr x14, [x1, #64]
ldp x11, x12, [x2]
and x24, x11, #0xfffffffffffff
mul x24, x14, x24
add x23, x23, x24
extr x24, x4, x3, #52
and x24, x24, #0xfffffffffffff
mul x22, x6, x24
extr x24, x12, x11, #52
and x24, x24, #0xfffffffffffff
mul x24, x14, x24
add x22, x22, x24
lsr x24, x23, #52
add x22, x22, x24
lsl x23, x23, #12
extr x24, x22, x23, #12
adds x15, x15, x24
ldp x5, x3, [x1, #16]
ldp x13, x11, [x2, #16]
extr x24, x5, x4, #40
and x24, x24, #0xfffffffffffff
mul x23, x6, x24
extr x24, x13, x12, #40
and x24, x24, #0xfffffffffffff
mul x24, x14, x24
add x23, x23, x24
lsr x24, x22, #52
add x23, x23, x24
lsl x22, x22, #12
extr x24, x23, x22, #24
adcs x16, x16, x24
extr x24, x3, x5, #28
and x24, x24, #0xfffffffffffff
mul x22, x6, x24
extr x24, x11, x13, #28
and x24, x24, #0xfffffffffffff
mul x24, x14, x24
add x22, x22, x24
lsr x24, x23, #52
add x22, x22, x24
lsl x23, x23, #12
extr x24, x22, x23, #36
adcs x17, x17, x24
and x25, x16, x17
ldp x4, x5, [x1, #32]
ldp x12, x13, [x2, #32]
extr x24, x4, x3, #16
and x24, x24, #0xfffffffffffff
mul x23, x6, x24
extr x24, x12, x11, #16
and x24, x24, #0xfffffffffffff
mul x24, x14, x24
add x23, x23, x24
lsl x21, x21, #48
add x23, x23, x21
lsr x24, x22, #52
add x23, x23, x24
lsl x22, x22, #12
extr x24, x23, x22, #48
adcs x19, x19, x24
and x25, x25, x19
lsr x24, x4, #4
and x24, x24, #0xfffffffffffff
mul x22, x6, x24
lsr x24, x12, #4
and x24, x24, #0xfffffffffffff
mul x24, x14, x24
add x22, x22, x24
lsr x24, x23, #52
add x22, x22, x24
lsl x23, x23, #12
extr x21, x22, x23, #60
extr x24, x5, x4, #56
and x24, x24, #0xfffffffffffff
mul x23, x6, x24
extr x24, x13, x12, #56
and x24, x24, #0xfffffffffffff
mul x24, x14, x24
add x23, x23, x24
lsr x24, x22, #52
add x23, x23, x24
lsl x21, x21, #8
extr x24, x23, x21, #8
adcs x7, x7, x24
and x25, x25, x7
ldp x3, x4, [x1, #48]
ldp x11, x12, [x2, #48]
extr x24, x3, x5, #44
and x24, x24, #0xfffffffffffff
mul x22, x6, x24
extr x24, x11, x13, #44
and x24, x24, #0xfffffffffffff
mul x24, x14, x24
add x22, x22, x24
lsr x24, x23, #52
add x22, x22, x24
lsl x23, x23, #12
extr x24, x22, x23, #20
adcs x8, x8, x24
and x25, x25, x8
extr x24, x4, x3, #32
and x24, x24, #0xfffffffffffff
mul x23, x6, x24
extr x24, x12, x11, #32
and x24, x24, #0xfffffffffffff
mul x24, x14, x24
add x23, x23, x24
lsr x24, x22, #52
add x23, x23, x24
lsl x22, x22, #12
extr x24, x23, x22, #32
adcs x9, x9, x24
and x25, x25, x9
lsr x24, x4, #20
mul x22, x6, x24
lsr x24, x12, #20
mul x24, x14, x24
add x22, x22, x24
lsr x24, x23, #52
add x22, x22, x24
lsl x23, x23, #12
extr x24, x22, x23, #44
adcs x10, x10, x24
and x25, x25, x10
mul x24, x6, x14
lsr x22, x22, #44
add x24, x24, x22
adc x20, x20, x24
lsr x22, x20, #9
orr x20, x20, #0xfffffffffffffe00
cmp xzr, xzr
adcs xzr, x15, x22
adcs xzr, x25, xzr
adcs xzr, x20, xzr
adcs x15, x15, x22
adcs x16, x16, xzr
adcs x17, x17, xzr
adcs x19, x19, xzr
adcs x7, x7, xzr
adcs x8, x8, xzr
adcs x9, x9, xzr
adcs x10, x10, xzr
adc x20, x20, xzr
and x22, x15, #0x1ff
extr x15, x16, x15, #9
extr x16, x17, x16, #9
stp x15, x16, [x0]
extr x17, x19, x17, #9
extr x19, x7, x19, #9
stp x17, x19, [x0, #16]
extr x7, x8, x7, #9
extr x8, x9, x8, #9
stp x7, x8, [x0, #32]
extr x9, x10, x9, #9
extr x10, x20, x10, #9
stp x9, x10, [x0, #48]
str x22, [x0, #64]
ret
local_sqr_p521:
ldp x2, x3, [x1]
ldp x4, x5, [x1, #16]
ldp x6, x7, [x1, #32]
ldp x8, x9, [x1, #48]
mul x12, x6, x8
mul x17, x7, x9
umulh x22, x6, x8
subs x23, x6, x7
cneg x23, x23, cc
csetm x11, cc
subs x10, x9, x8
cneg x10, x10, cc
mul x16, x23, x10
umulh x10, x23, x10
cinv x11, x11, cc
eor x16, x16, x11
eor x10, x10, x11
adds x13, x12, x22
adc x22, x22, xzr
umulh x23, x7, x9
adds x13, x13, x17
adcs x22, x22, x23
adc x23, x23, xzr
adds x22, x22, x17
adc x23, x23, xzr
cmn x11, #0x1
adcs x13, x13, x16
adcs x22, x22, x10
adc x23, x23, x11
adds x12, x12, x12
adcs x13, x13, x13
adcs x22, x22, x22
adcs x23, x23, x23
adc x19, xzr, xzr
mul x10, x6, x6
mul x16, x7, x7
mul x21, x6, x7
umulh x11, x6, x6
umulh x17, x7, x7
umulh x20, x6, x7
adds x11, x11, x21
adcs x16, x16, x20
adc x17, x17, xzr
adds x11, x11, x21
adcs x16, x16, x20
adc x17, x17, xzr
adds x12, x12, x16
adcs x13, x13, x17
adcs x22, x22, xzr
adcs x23, x23, xzr
adc x19, x19, xzr
mul x14, x8, x8
mul x16, x9, x9
mul x21, x8, x9
umulh x15, x8, x8
umulh x17, x9, x9
umulh x20, x8, x9
adds x15, x15, x21
adcs x16, x16, x20
adc x17, x17, xzr
adds x15, x15, x21
adcs x16, x16, x20
adc x17, x17, xzr
adds x14, x14, x22
adcs x15, x15, x23
adcs x16, x16, x19
adc x17, x17, xzr
ldr x19, [x1, #64]
add x23, x19, x19
mul x19, x19, x19
and x21, x2, #0xfffffffffffff
mul x21, x23, x21
extr x20, x3, x2, #52
and x20, x20, #0xfffffffffffff
mul x20, x23, x20
lsr x22, x21, #52
add x20, x20, x22
lsl x21, x21, #12
extr x22, x20, x21, #12
adds x10, x10, x22
extr x21, x4, x3, #40
and x21, x21, #0xfffffffffffff
mul x21, x23, x21
lsr x22, x20, #52
add x21, x21, x22
lsl x20, x20, #12
extr x22, x21, x20, #24
adcs x11, x11, x22
extr x20, x5, x4, #28
and x20, x20, #0xfffffffffffff
mul x20, x23, x20
lsr x22, x21, #52
add x20, x20, x22
lsl x21, x21, #12
extr x22, x20, x21, #36
adcs x12, x12, x22
extr x21, x6, x5, #16
and x21, x21, #0xfffffffffffff
mul x21, x23, x21
lsr x22, x20, #52
add x21, x21, x22
lsl x20, x20, #12
extr x22, x21, x20, #48
adcs x13, x13, x22
lsr x20, x6, #4
and x20, x20, #0xfffffffffffff
mul x20, x23, x20
lsr x22, x21, #52
add x20, x20, x22
lsl x21, x21, #12
extr x24, x20, x21, #60
extr x21, x7, x6, #56
and x21, x21, #0xfffffffffffff
mul x21, x23, x21
lsr x22, x20, #52
add x21, x21, x22
lsl x24, x24, #8
extr x22, x21, x24, #8
adcs x14, x14, x22
extr x20, x8, x7, #44
and x20, x20, #0xfffffffffffff
mul x20, x23, x20
lsr x22, x21, #52
add x20, x20, x22
lsl x21, x21, #12
extr x22, x20, x21, #20
adcs x15, x15, x22
extr x21, x9, x8, #32
and x21, x21, #0xfffffffffffff
mul x21, x23, x21
lsr x22, x20, #52
add x21, x21, x22
lsl x20, x20, #12
extr x22, x21, x20, #32
adcs x16, x16, x22
lsr x20, x9, #20
mul x20, x23, x20
lsr x22, x21, #52
add x20, x20, x22
lsl x21, x21, #12
extr x22, x20, x21, #44
adcs x17, x17, x22
lsr x20, x20, #44
adc x19, x19, x20
extr x21, x11, x10, #9
extr x20, x12, x11, #9
stp x21, x20, [x0]
extr x21, x13, x12, #9
extr x20, x14, x13, #9
stp x21, x20, [x0, #16]
extr x21, x15, x14, #9
extr x20, x16, x15, #9
stp x21, x20, [x0, #32]
extr x21, x17, x16, #9
extr x20, x19, x17, #9
stp x21, x20, [x0, #48]
and x22, x10, #0x1ff
lsr x19, x19, #9
add x22, x22, x19
str x22, [x0, #64]
mul x12, x2, x4
mul x17, x3, x5
umulh x22, x2, x4
subs x23, x2, x3
cneg x23, x23, cc
csetm x11, cc
subs x10, x5, x4
cneg x10, x10, cc
mul x16, x23, x10
umulh x10, x23, x10
cinv x11, x11, cc
eor x16, x16, x11
eor x10, x10, x11
adds x13, x12, x22
adc x22, x22, xzr
umulh x23, x3, x5
adds x13, x13, x17
adcs x22, x22, x23
adc x23, x23, xzr
adds x22, x22, x17
adc x23, x23, xzr
cmn x11, #0x1
adcs x13, x13, x16
adcs x22, x22, x10
adc x23, x23, x11
adds x12, x12, x12
adcs x13, x13, x13
adcs x22, x22, x22
adcs x23, x23, x23
adc x19, xzr, xzr
mul x10, x2, x2
mul x16, x3, x3
mul x21, x2, x3
umulh x11, x2, x2
umulh x17, x3, x3
umulh x20, x2, x3
adds x11, x11, x21
adcs x16, x16, x20
adc x17, x17, xzr
adds x11, x11, x21
adcs x16, x16, x20
adc x17, x17, xzr
adds x12, x12, x16
adcs x13, x13, x17
adcs x22, x22, xzr
adcs x23, x23, xzr
adc x19, x19, xzr
mul x14, x4, x4
mul x16, x5, x5
mul x21, x4, x5
umulh x15, x4, x4
umulh x17, x5, x5
umulh x20, x4, x5
adds x15, x15, x21
adcs x16, x16, x20
adc x17, x17, xzr
adds x15, x15, x21
adcs x16, x16, x20
adc x17, x17, xzr
adds x14, x14, x22
adcs x15, x15, x23
adcs x16, x16, x19
adc x17, x17, xzr
ldp x21, x20, [x0]
adds x21, x21, x10
adcs x20, x20, x11
stp x21, x20, [x0]
ldp x21, x20, [x0, #16]
adcs x21, x21, x12
adcs x20, x20, x13
stp x21, x20, [x0, #16]
ldp x21, x20, [x0, #32]
adcs x21, x21, x14
adcs x20, x20, x15
stp x21, x20, [x0, #32]
ldp x21, x20, [x0, #48]
adcs x21, x21, x16
adcs x20, x20, x17
stp x21, x20, [x0, #48]
ldr x22, [x0, #64]
adc x22, x22, xzr
str x22, [x0, #64]
mul x10, x2, x6
mul x14, x3, x7
mul x15, x4, x8
mul x16, x5, x9
umulh x17, x2, x6
adds x14, x14, x17
umulh x17, x3, x7
adcs x15, x15, x17
umulh x17, x4, x8
adcs x16, x16, x17
umulh x17, x5, x9
adc x17, x17, xzr
adds x11, x14, x10
adcs x14, x15, x14
adcs x15, x16, x15
adcs x16, x17, x16
adc x17, xzr, x17
adds x12, x14, x10
adcs x13, x15, x11
adcs x14, x16, x14
adcs x15, x17, x15
adcs x16, xzr, x16
adc x17, xzr, x17
subs x22, x4, x5
cneg x22, x22, cc
csetm x19, cc
subs x20, x9, x8
cneg x20, x20, cc
mul x21, x22, x20
umulh x20, x22, x20
cinv x19, x19, cc
cmn x19, #0x1
eor x21, x21, x19
adcs x15, x15, x21
eor x20, x20, x19
adcs x16, x16, x20
adc x17, x17, x19
subs x22, x2, x3
cneg x22, x22, cc
csetm x19, cc
subs x20, x7, x6
cneg x20, x20, cc
mul x21, x22, x20
umulh x20, x22, x20
cinv x19, x19, cc
cmn x19, #0x1
eor x21, x21, x19
adcs x11, x11, x21
eor x20, x20, x19
adcs x12, x12, x20
adcs x13, x13, x19
adcs x14, x14, x19
adcs x15, x15, x19
adcs x16, x16, x19
adc x17, x17, x19
subs x22, x3, x5
cneg x22, x22, cc
csetm x19, cc
subs x20, x9, x7
cneg x20, x20, cc
mul x21, x22, x20
umulh x20, x22, x20
cinv x19, x19, cc
cmn x19, #0x1
eor x21, x21, x19
adcs x14, x14, x21
eor x20, x20, x19
adcs x15, x15, x20
adcs x16, x16, x19
adc x17, x17, x19
subs x22, x2, x4
cneg x22, x22, cc
csetm x19, cc
subs x20, x8, x6
cneg x20, x20, cc
mul x21, x22, x20
umulh x20, x22, x20
cinv x19, x19, cc
cmn x19, #0x1
eor x21, x21, x19
adcs x12, x12, x21
eor x20, x20, x19
adcs x13, x13, x20
adcs x14, x14, x19
adcs x15, x15, x19
adcs x16, x16, x19
adc x17, x17, x19
subs x22, x2, x5
cneg x22, x22, cc
csetm x19, cc
subs x20, x9, x6
cneg x20, x20, cc
mul x21, x22, x20
umulh x20, x22, x20
cinv x19, x19, cc
cmn x19, #0x1
eor x21, x21, x19
adcs x13, x13, x21
eor x20, x20, x19
adcs x14, x14, x20
adcs x15, x15, x19
adcs x16, x16, x19
adc x17, x17, x19
subs x22, x3, x4
cneg x22, x22, cc
csetm x19, cc
subs x20, x8, x7
cneg x20, x20, cc
mul x21, x22, x20
umulh x20, x22, x20
cinv x19, x19, cc
cmn x19, #0x1
eor x21, x21, x19
adcs x13, x13, x21
eor x20, x20, x19
adcs x14, x14, x20
adcs x15, x15, x19
adcs x16, x16, x19
adc x17, x17, x19
ldp x21, x20, [x0]
extr x2, x15, x14, #8
adds x2, x2, x21
extr x3, x16, x15, #8
adcs x3, x3, x20
ldp x21, x20, [x0, #16]
extr x4, x17, x16, #8
adcs x4, x4, x21
and x22, x3, x4
lsr x5, x17, #8
adcs x5, x5, x20
and x22, x22, x5
ldp x21, x20, [x0, #32]
lsl x6, x10, #1
adcs x6, x6, x21
and x22, x22, x6
extr x7, x11, x10, #63
adcs x7, x7, x20
and x22, x22, x7
ldp x21, x20, [x0, #48]
extr x8, x12, x11, #63
adcs x8, x8, x21
and x22, x22, x8
extr x9, x13, x12, #63
adcs x9, x9, x20
and x22, x22, x9
ldr x21, [x0, #64]
extr x10, x14, x13, #63
and x10, x10, #0x1ff
adc x10, x21, x10
lsr x20, x10, #9
orr x10, x10, #0xfffffffffffffe00
cmp xzr, xzr
adcs xzr, x2, x20
adcs xzr, x22, xzr
adcs xzr, x10, xzr
adcs x2, x2, x20
adcs x3, x3, xzr
adcs x4, x4, xzr
adcs x5, x5, xzr
adcs x6, x6, xzr
adcs x7, x7, xzr
adcs x8, x8, xzr
adcs x9, x9, xzr
adc x10, x10, xzr
and x10, x10, #0x1ff
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
stp x6, x7, [x0, #32]
stp x8, x9, [x0, #48]
str x10, [x0, #64]
ret
local_sub_p521:
ldp x5, x6, [x1]
ldp x4, x3, [x2]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x1, #16]
ldp x4, x3, [x2, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x1, #32]
ldp x4, x3, [x2, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
ldp x11, x12, [x1, #48]
ldp x4, x3, [x2, #48]
sbcs x11, x11, x4
sbcs x12, x12, x3
ldr x13, [x1, #64]
ldr x4, [x2, #64]
sbcs x13, x13, x4
sbcs x5, x5, xzr
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbcs x13, x13, xzr
and x13, x13, #0x1ff
stp x5, x6, [x0]
stp x7, x8, [x0, #16]
stp x9, x10, [x0, #32]
stp x11, x12, [x0, #48]
str x13, [x0, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 33,857
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_sqr_p521_neon.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Square modulo p_521, z := (x^2) mod p_521, assuming x reduced
// Input x[9]; output z[9]
//
// extern void bignum_sqr_p521_neon (uint64_t z[static 9],
// uint64_t x[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
// bignum_montsqr_p521_neon is functionally equivalent to bignum_montsqr_p521.
// It is written in a way that
// 1. A subset of scalar multiplications in bignum_montmul_p384 are carefully
// chosen and vectorized
// 2. The vectorized assembly is rescheduled using the SLOTHY superoptimizer.
// https://github.com/slothy-optimizer/slothy
//
// The output program of step 1. is as follows:
//
// stp x19, x20, [sp, #-16]!
// stp x21, x22, [sp, #-16]!
// stp x23, x24, [sp, #-16]!
// ldp x20, x19, [x1]
// ldr q23, [x1]
// ldr q1, [x1]
// ldr q16, [x1]
// ldp x14, x12, [x1, #16]
// ldr q28, [x1, #16]
// ldr q31, [x1, #16]
// ldp x9, x2, [x1, #32]
// ldr q29, [x1, #32]
// ldr q4, [x1, #32]
// ldr q5, [x1]
// ldr q2, [x1, #32]
// ldp x6, x13, [x1, #48]
// ldr q24, [x1, #48]
// ldr q27, [x1, #48]
// ldr q0, [x1, #16]
// ldr q30, [x1, #48]
// mul x17, x9, x6
// mul x10, x2, x13
// umulh x24, x9, x6
// subs x4, x9, x2
// cneg x4, x4, cc
// csetm x16, cc
// subs x3, x13, x6
// cneg x23, x3, cc
// mul x3, x4, x23
// umulh x4, x4, x23
// cinv x22, x16, cc
// eor x23, x3, x22
// eor x16, x4, x22
// adds x3, x17, x24
// adc x24, x24, xzr
// umulh x4, x2, x13
// adds x3, x3, x10
// adcs x24, x24, x4
// adc x4, x4, xzr
// adds x24, x24, x10
// adc x10, x4, xzr
// cmn x22, #0x1
// adcs x4, x3, x23
// adcs x24, x24, x16
// adc x10, x10, x22
// adds x8, x17, x17
// adcs x22, x4, x4
// adcs x5, x24, x24
// adcs x11, x10, x10
// adc x23, xzr, xzr
// movi v25.2D, #0xffffffff
// uzp2 v19.4S, v4.4S, v4.4S
// xtn v26.2S, v29.2D
// xtn v22.2S, v4.2D
// rev64 v4.4S, v4.4S
// umull v7.2D, v26.2S, v22.2S
// umull v21.2D, v26.2S, v19.2S
// uzp2 v17.4S, v29.4S, v29.4S
// mul v4.4S, v4.4S, v29.4S
// usra v21.2D, v7.2D, #32
// umull v18.2D, v17.2S, v19.2S
// uaddlp v4.2D, v4.4S
// and v7.16B, v21.16B, v25.16B
// umlal v7.2D, v17.2S, v22.2S
// shl v4.2D, v4.2D, #32
// usra v18.2D, v21.2D, #32
// umlal v4.2D, v26.2S, v22.2S
// usra v18.2D, v7.2D, #32
// mov x15, v4.d[0]
// mov x16, v4.d[1]
// mul x3, x9, x2
// mov x10, v18.d[0]
// mov x17, v18.d[1]
// umulh x4, x9, x2
// adds x24, x10, x3
// adcs x10, x16, x4
// adc x17, x17, xzr
// adds x7, x24, x3
// adcs x10, x10, x4
// adc x17, x17, xzr
// adds x8, x8, x10
// adcs x22, x22, x17
// adcs x21, x5, xzr
// adcs x5, x11, xzr
// adc x11, x23, xzr
// movi v25.2D, #0xffffffff
// uzp2 v19.4S, v27.4S, v27.4S
// xtn v26.2S, v24.2D
// xtn v22.2S, v27.2D
// rev64 v4.4S, v27.4S
// umull v7.2D, v26.2S, v22.2S
// umull v21.2D, v26.2S, v19.2S
// uzp2 v17.4S, v24.4S, v24.4S
// mul v4.4S, v4.4S, v24.4S
// usra v21.2D, v7.2D, #32
// umull v18.2D, v17.2S, v19.2S
// uaddlp v4.2D, v4.4S
// and v7.16B, v21.16B, v25.16B
// umlal v7.2D, v17.2S, v22.2S
// shl v4.2D, v4.2D, #32
// usra v18.2D, v21.2D, #32
// umlal v4.2D, v26.2S, v22.2S
// usra v18.2D, v7.2D, #32
// mov x23, v4.d[0]
// mov x16, v4.d[1]
// mul x3, x6, x13
// mov x10, v18.d[0]
// mov x17, v18.d[1]
// umulh x4, x6, x13
// adds x24, x10, x3
// adcs x10, x16, x4
// adc x17, x17, xzr
// adds x24, x24, x3
// adcs x10, x10, x4
// adc x17, x17, xzr
// adds x23, x23, x21
// adcs x16, x24, x5
// adcs x3, x10, x11
// adc x21, x17, xzr
// ldr x17, [x1, #64]
// add x5, x17, x17
// mul x11, x17, x17
// and x17, x20, #0xfffffffffffff
// mul x4, x5, x17
// extr x17, x19, x20, #52
// and x17, x17, #0xfffffffffffff
// mul x10, x5, x17
// lsr x17, x4, #52
// add x24, x10, x17
// lsl x17, x4, #12
// extr x17, x24, x17, #12
// adds x15, x15, x17
// extr x17, x14, x19, #40
// and x17, x17, #0xfffffffffffff
// mul x10, x5, x17
// lsr x17, x24, #52
// add x4, x10, x17
// lsl x17, x24, #12
// extr x17, x4, x17, #24
// adcs x7, x7, x17
// extr x17, x12, x14, #28
// and x17, x17, #0xfffffffffffff
// mul x10, x5, x17
// lsr x17, x4, #52
// add x24, x10, x17
// lsl x17, x4, #12
// extr x17, x24, x17, #36
// adcs x8, x8, x17
// extr x17, x9, x12, #16
// and x17, x17, #0xfffffffffffff
// mul x10, x5, x17
// lsr x17, x24, #52
// add x4, x10, x17
// lsl x17, x24, #12
// extr x17, x4, x17, #48
// adcs x22, x22, x17
// lsr x17, x9, #4
// and x17, x17, #0xfffffffffffff
// mul x10, x5, x17
// lsr x17, x4, #52
// add x24, x10, x17
// lsl x17, x4, #12
// extr x4, x24, x17, #60
// extr x17, x2, x9, #56
// and x17, x17, #0xfffffffffffff
// mul x10, x5, x17
// lsr x17, x24, #52
// add x24, x10, x17
// lsl x17, x4, #8
// extr x17, x24, x17, #8
// adcs x23, x23, x17
// extr x17, x6, x2, #44
// and x17, x17, #0xfffffffffffff
// mul x10, x5, x17
// lsr x17, x24, #52
// add x4, x10, x17
// lsl x17, x24, #12
// extr x17, x4, x17, #20
// adcs x16, x16, x17
// extr x17, x13, x6, #32
// and x17, x17, #0xfffffffffffff
// mul x10, x5, x17
// lsr x17, x4, #52
// add x24, x10, x17
// lsl x17, x4, #12
// extr x17, x24, x17, #32
// adcs x3, x3, x17
// lsr x17, x13, #20
// mul x10, x5, x17
// lsr x17, x24, #52
// add x10, x10, x17
// lsl x17, x24, #12
// extr x17, x10, x17, #44
// adcs x4, x21, x17
// lsr x17, x10, #44
// adc x24, x11, x17
// extr x10, x7, x15, #9
// extr x17, x8, x7, #9
// stp x10, x17, [x0] // @slothy:writes=buffer0
// extr x10, x22, x8, #9
// extr x17, x23, x22, #9
// stp x10, x17, [x0, #16] // @slothy:writes=buffer16
// extr x10, x16, x23, #9
// extr x17, x3, x16, #9
// stp x10, x17, [x0, #32] // @slothy:writes=buffer32
// extr x10, x4, x3, #9
// extr x17, x24, x4, #9
// stp x10, x17, [x0, #48] // @slothy:writes=buffer48
// and x10, x15, #0x1ff
// lsr x17, x24, #9
// add x17, x10, x17
// str x17, [x0, #64] // @slothy:writes=buffer64
// uzp1 v17.4S, v28.4S, v23.4S
// rev64 v4.4S, v28.4S
// uzp1 v7.4S, v23.4S, v23.4S
// mul v4.4S, v4.4S, v23.4S
// uaddlp v4.2D, v4.4S
// shl v4.2D, v4.2D, #32
// umlal v4.2D, v7.2S, v17.2S
// mov x8, v4.d[0]
// mov x22, v4.d[1]
// umulh x23, x20, x14
// subs x17, x20, x19
// cneg x4, x17, cc
// csetm x24, cc
// subs x17, x12, x14
// cneg x17, x17, cc
// mul x10, x4, x17
// umulh x17, x4, x17
// cinv x16, x24, cc
// eor x3, x10, x16
// eor x4, x17, x16
// adds x24, x8, x23
// adc x10, x23, xzr
// umulh x17, x19, x12
// adds x24, x24, x22
// adcs x10, x10, x17
// adc x17, x17, xzr
// adds x10, x10, x22
// adc x17, x17, xzr
// cmn x16, #0x1
// adcs x24, x24, x3
// adcs x10, x10, x4
// adc x17, x17, x16
// adds x15, x8, x8
// adcs x7, x24, x24
// adcs x8, x10, x10
// adcs x22, x17, x17
// adc x23, xzr, xzr
// movi v25.2D, #0xffffffff
// uzp2 v19.4S, v16.4S, v16.4S
// xtn v26.2S, v1.2D
// xtn v22.2S, v16.2D
// rev64 v4.4S, v16.4S
// umull v7.2D, v26.2S, v22.2S
// umull v21.2D, v26.2S, v19.2S
// uzp2 v17.4S, v1.4S, v1.4S
// mul v4.4S, v4.4S, v1.4S
// usra v21.2D, v7.2D, #32
// umull v18.2D, v17.2S, v19.2S
// uaddlp v4.2D, v4.4S
// and v7.16B, v21.16B, v25.16B
// umlal v7.2D, v17.2S, v22.2S
// shl v4.2D, v4.2D, #32
// usra v18.2D, v21.2D, #32
// umlal v4.2D, v26.2S, v22.2S
// usra v18.2D, v7.2D, #32
// mov x21, v4.d[0]
// mov x16, v4.d[1]
// mul x3, x20, x19
// mov x10, v18.d[0]
// mov x17, v18.d[1]
// umulh x4, x20, x19
// adds x24, x10, x3
// adcs x10, x16, x4
// adc x17, x17, xzr
// adds x5, x24, x3
// adcs x10, x10, x4
// adc x17, x17, xzr
// adds x11, x15, x10
// adcs x15, x7, x17
// adcs x7, x8, xzr
// adcs x8, x22, xzr
// adc x22, x23, xzr
// xtn v7.2S, v31.2D
// shrn v4.2S, v31.2D, #32
// umull v4.2D, v7.2S, v4.2S
// shl v4.2D, v4.2D, #33
// umlal v4.2D, v7.2S, v7.2S
// mov x23, v4.d[0]
// mov x16, v4.d[1]
// mul x3, x14, x12
// umulh x10, x14, x14
// umulh x17, x12, x12
// umulh x4, x14, x12
// adds x24, x10, x3
// adcs x10, x16, x4
// adc x17, x17, xzr
// adds x24, x24, x3
// adcs x10, x10, x4
// adc x17, x17, xzr
// adds x16, x23, x7
// adcs x3, x24, x8
// adcs x4, x10, x22
// adc x24, x17, xzr
// ldp x10, x17, [x0] // @slothy:reads=buffer0
// adds x10, x10, x21
// adcs x17, x17, x5
// stp x10, x17, [x0] // @slothy:writes=buffer0
// ldp x10, x17, [x0, #16] // @slothy:reads=buffer16
// adcs x10, x10, x11
// adcs x17, x17, x15
// stp x10, x17, [x0, #16] // @slothy:writes=buffer16
// ldp x10, x17, [x0, #32] // @slothy:reads=buffer32
// adcs x10, x10, x16
// adcs x17, x17, x3
// stp x10, x17, [x0, #32] // @slothy:writes=buffer32
// ldp x10, x17, [x0, #48] // @slothy:reads=buffer48
// adcs x10, x10, x4
// adcs x17, x17, x24
// stp x10, x17, [x0, #48] // @slothy:writes=buffer48
// ldr x17, [x0, #64] // @slothy:reads=buffer64
// adc x17, x17, xzr
// str x17, [x0, #64] // @slothy:writes=buffer64
// movi v25.2D, #0xffffffff
// uzp2 v19.4S, v2.4S, v2.4S
// xtn v26.2S, v5.2D
// xtn v22.2S, v2.2D
// rev64 v4.4S, v2.4S
// umull v7.2D, v26.2S, v22.2S
// umull v21.2D, v26.2S, v19.2S
// uzp2 v17.4S, v5.4S, v5.4S
// mul v4.4S, v4.4S, v5.4S
// usra v21.2D, v7.2D, #32
// umull v18.2D, v17.2S, v19.2S
// uaddlp v4.2D, v4.4S
// and v7.16B, v21.16B, v25.16B
// umlal v7.2D, v17.2S, v22.2S
// shl v4.2D, v4.2D, #32
// usra v18.2D, v21.2D, #32
// umlal v4.2D, v26.2S, v22.2S
// usra v18.2D, v7.2D, #32
// mov x5, v4.d[0]
// mov x4, v4.d[1]
// movi v25.2D, #0xffffffff
// uzp2 v17.4S, v30.4S, v30.4S
// xtn v19.2S, v0.2D
// xtn v26.2S, v30.2D
// rev64 v4.4S, v30.4S
// umull v7.2D, v19.2S, v26.2S
// umull v22.2D, v19.2S, v17.2S
// uzp2 v21.4S, v0.4S, v0.4S
// mul v4.4S, v4.4S, v0.4S
// usra v22.2D, v7.2D, #32
// umull v17.2D, v21.2S, v17.2S
// uaddlp v4.2D, v4.4S
// and v7.16B, v22.16B, v25.16B
// umlal v7.2D, v21.2S, v26.2S
// shl v4.2D, v4.2D, #32
// usra v17.2D, v22.2D, #32
// umlal v4.2D, v19.2S, v26.2S
// usra v17.2D, v7.2D, #32
// mov x24, v4.d[0]
// mov x10, v4.d[1]
// mov x17, v18.d[0]
// adds x4, x4, x17
// mov x17, v18.d[1]
// adcs x24, x24, x17
// mov x17, v17.d[0]
// adcs x10, x10, x17
// mov x17, v17.d[1]
// adc x17, x17, xzr
// adds x15, x4, x5
// adcs x4, x24, x4
// adcs x24, x10, x24
// adcs x10, x17, x10
// adc x17, xzr, x17
// adds x7, x4, x5
// adcs x8, x24, x15
// adcs x22, x10, x4
// adcs x23, x17, x24
// adcs x16, xzr, x10
// adc x3, xzr, x17
// subs x17, x14, x12
// cneg x24, x17, cc
// csetm x4, cc
// subs x17, x13, x6
// cneg x10, x17, cc
// mul x17, x24, x10
// umulh x24, x24, x10
// cinv x10, x4, cc
// cmn x10, #0x1
// eor x17, x17, x10
// adcs x23, x23, x17
// eor x17, x24, x10
// adcs x16, x16, x17
// adc x3, x3, x10
// subs x17, x20, x19
// cneg x24, x17, cc
// csetm x4, cc
// subs x17, x2, x9
// cneg x10, x17, cc
// mul x17, x24, x10
// umulh x24, x24, x10
// cinv x10, x4, cc
// cmn x10, #0x1
// eor x17, x17, x10
// adcs x11, x15, x17
// eor x17, x24, x10
// adcs x15, x7, x17
// adcs x7, x8, x10
// adcs x22, x22, x10
// adcs x23, x23, x10
// adcs x16, x16, x10
// adc x3, x3, x10
// subs x17, x19, x12
// cneg x24, x17, cc
// csetm x4, cc
// subs x17, x13, x2
// cneg x10, x17, cc
// mul x17, x24, x10
// umulh x24, x24, x10
// cinv x10, x4, cc
// cmn x10, #0x1
// eor x17, x17, x10
// adcs x8, x22, x17
// eor x17, x24, x10
// adcs x23, x23, x17
// adcs x16, x16, x10
// adc x3, x3, x10
// subs x17, x20, x14
// cneg x24, x17, cc
// csetm x4, cc
// subs x17, x6, x9
// cneg x10, x17, cc
// mul x17, x24, x10
// umulh x24, x24, x10
// cinv x10, x4, cc
// cmn x10, #0x1
// eor x17, x17, x10
// adcs x22, x15, x17
// eor x17, x24, x10
// adcs x4, x7, x17
// adcs x24, x8, x10
// adcs x23, x23, x10
// adcs x16, x16, x10
// adc x3, x3, x10
// subs x12, x20, x12
// cneg x10, x12, cc
// csetm x17, cc
// subs x12, x13, x9
// cneg x9, x12, cc
// mul x12, x10, x9
// umulh x13, x10, x9
// cinv x9, x17, cc
// cmn x9, #0x1
// eor x12, x12, x9
// adcs x4, x4, x12
// eor x12, x13, x9
// adcs x24, x24, x12
// adcs x10, x23, x9
// adcs x17, x16, x9
// adc x13, x3, x9
// subs x19, x19, x14
// cneg x12, x19, cc
// csetm x9, cc
// subs x6, x6, x2
// cneg x14, x6, cc
// mul x19, x12, x14
// umulh x12, x12, x14
// cinv x14, x9, cc
// cmn x14, #0x1
// eor x19, x19, x14
// adcs x23, x4, x19
// eor x19, x12, x14
// adcs x16, x24, x19
// adcs x6, x10, x14
// adcs x2, x17, x14
// adc x9, x13, x14
// ldp x12, x14, [x0] // @slothy:reads=buffer0
// extr x19, x6, x16, #8
// adds x10, x19, x12
// extr x19, x2, x6, #8
// adcs x17, x19, x14
// ldp x14, x12, [x0, #16] // @slothy:reads=buffer16
// extr x19, x9, x2, #8
// adcs x13, x19, x14
// and x14, x17, x13
// lsr x19, x9, #8
// adcs x6, x19, x12
// and x9, x14, x6
// ldp x14, x12, [x0, #32] // @slothy:reads=buffer32
// lsl x19, x5, #1
// adcs x2, x19, x14
// and x14, x9, x2
// extr x19, x11, x5, #63
// adcs x3, x19, x12
// and x9, x14, x3
// ldp x14, x12, [x0, #48] // @slothy:reads=buffer48
// extr x19, x22, x11, #63
// adcs x4, x19, x14
// and x14, x9, x4
// extr x19, x23, x22, #63
// adcs x24, x19, x12
// and x12, x14, x24
// ldr x14, [x0, #64] // @slothy:reads=buffer64
// extr x19, x16, x23, #63
// and x19, x19, #0x1ff
// adc x19, x14, x19
// lsr x14, x19, #9
// orr x19, x19, #0xfffffffffffffe00
// cmp xzr, xzr
// adcs xzr, x10, x14
// adcs xzr, x12, xzr
// adcs xzr, x19, xzr
// adcs x10, x10, x14
// adcs x17, x17, xzr
// adcs x13, x13, xzr
// adcs x6, x6, xzr
// adcs x2, x2, xzr
// adcs x9, x3, xzr
// adcs x12, x4, xzr
// adcs x14, x24, xzr
// adc x19, x19, xzr
// and x19, x19, #0x1ff
// stp x10, x17, [x0] // @slothy:writes=buffer0
// stp x13, x6, [x0, #16] // @slothy:writes=buffer16
// stp x2, x9, [x0, #32] // @slothy:writes=buffer32
// stp x12, x14, [x0, #48] // @slothy:writes=buffer48
// str x19, [x0, #64] // @slothy:writes=buffer64
// ldp x23, x24, [sp], #16
// ldp x21, x22, [sp], #16
// ldp x19, x20, [sp], #16
// ret
//
// The bash script used for step 2 is as follows:
//
// # Store the assembly instructions except the last 'ret',
// # callee-register store/loads as, say, 'input.S'.
// export OUTPUTS="[hint_buffer0,hint_buffer16,hint_buffer32,hint_buffer48,hint_buffer64]"
// export RESERVED_REGS="[x18,x25,x26,x27,x28,x29,x30,sp,q8,q9,q10,q11,q12,q13,q14,q15,v8,v9,v10,v11,v12,v13,v14,v15]"
// <s2n-bignum>/tools/external/slothy.sh input.S my_out_dir
// # my_out_dir/3.opt.s is the optimized assembly. Its output may differ
// # from this file since the sequence is non-deterministically chosen.
// # Please add 'ret' at the end of the output assembly.
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_p521_neon)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_p521_neon)
.text
.balign 4
S2N_BN_SYMBOL(bignum_sqr_p521_neon):
// Save registers
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
ldr q23, [x1, #32]
ldp x9, x2, [x1, #32]
ldr q16, [x1, #32]
ldr q20, [x1, #48]
ldp x6, x13, [x1, #48]
rev64 v2.4S, v23.4S
mul x14, x9, x2
ldr q31, [x1, #48]
subs x22, x9, x2
uzp2 v26.4S, v23.4S, v23.4S
mul v30.4S, v2.4S, v16.4S
xtn v0.2S, v20.2D
csetm x12, cc
xtn v21.2S, v16.2D
xtn v23.2S, v23.2D
umulh x10, x9, x6
rev64 v27.4S, v31.4S
umull v2.2D, v21.2S, v26.2S
cneg x23, x22, cc
uaddlp v25.2D, v30.4S
umull v18.2D, v21.2S, v23.2S
mul x22, x9, x6
mul v6.4S, v27.4S, v20.4S
uzp2 v17.4S, v20.4S, v20.4S
shl v20.2D, v25.2D, #32
uzp2 v27.4S, v31.4S, v31.4S
mul x16, x2, x13
umlal v20.2D, v21.2S, v23.2S
usra v2.2D, v18.2D, #32
adds x8, x22, x10
umull v25.2D, v17.2S, v27.2S
xtn v31.2S, v31.2D
movi v1.2D, #0xffffffff
adc x3, x10, xzr
umulh x21, x2, x13
uzp2 v21.4S, v16.4S, v16.4S
umull v18.2D, v0.2S, v27.2S
subs x19, x13, x6
and v7.16B, v2.16B, v1.16B
umull v27.2D, v0.2S, v31.2S
cneg x20, x19, cc
movi v30.2D, #0xffffffff
umull v16.2D, v21.2S, v26.2S
umlal v7.2D, v21.2S, v23.2S
mul x19, x23, x20
cinv x7, x12, cc
uaddlp v6.2D, v6.4S
eor x12, x19, x7
adds x11, x8, x16
umulh x10, x23, x20
ldr q1, [x1]
usra v16.2D, v2.2D, #32
adcs x19, x3, x21
shl v2.2D, v6.2D, #32
adc x20, x21, xzr
adds x17, x19, x16
usra v18.2D, v27.2D, #32
adc x19, x20, xzr
cmn x7, #0x1
umlal v2.2D, v0.2S, v31.2S
umulh x16, x9, x2
adcs x8, x11, x12
usra v16.2D, v7.2D, #32
ldr x12, [x1, #64]
eor x20, x10, x7
umulh x10, x6, x13
mov x23, v2.d[0]
mov x3, v2.d[1]
adcs x21, x17, x20
usra v25.2D, v18.2D, #32
and v23.16B, v18.16B, v30.16B
adc x7, x19, x7
adds x22, x22, x22
ldr q7, [x1, #16]
adcs x17, x8, x8
umlal v23.2D, v17.2S, v31.2S
mov x19, v16.d[0]
mul x11, x12, x12
ldr q4, [x1]
usra v25.2D, v23.2D, #32
add x5, x12, x12
adcs x15, x21, x21
ldr q28, [x1]
mov x12, v20.d[1]
adcs x24, x7, x7
mov x21, v16.d[1]
adc x4, xzr, xzr
adds x19, x19, x14
ldr q18, [x1, #16]
xtn v26.2S, v1.2D
adcs x8, x12, x16
adc x21, x21, xzr
adds x7, x19, x14
xtn v23.2S, v7.2D
rev64 v21.4S, v28.4S
adcs x12, x8, x16
ldp x20, x19, [x1]
mov x16, v25.d[1]
xtn v22.2S, v28.2D
adc x14, x21, xzr
adds x8, x22, x12
uzp2 v24.4S, v28.4S, v28.4S
rev64 v28.4S, v18.4S
mul x12, x6, x13
mul v16.4S, v21.4S, v1.4S
shrn v31.2S, v7.2D, #32
adcs x22, x17, x14
mov x14, v25.d[0]
and x21, x20, #0xfffffffffffff
umull v17.2D, v26.2S, v24.2S
ldr q2, [x1, #32]
adcs x17, x15, xzr
ldr q30, [x1, #48]
umull v7.2D, v26.2S, v22.2S
adcs x15, x24, xzr
ldr q0, [x1, #16]
movi v6.2D, #0xffffffff
adc x4, x4, xzr
adds x14, x14, x12
uzp1 v27.4S, v18.4S, v4.4S
uzp2 v19.4S, v1.4S, v1.4S
adcs x24, x3, x10
mul x3, x5, x21
umull v29.2D, v23.2S, v31.2S
ldr q5, [x1]
adc x21, x16, xzr
adds x16, x14, x12
extr x12, x19, x20, #52
umull v18.2D, v19.2S, v24.2S
adcs x24, x24, x10
and x10, x12, #0xfffffffffffff
ldp x14, x12, [x1, #16]
usra v17.2D, v7.2D, #32
adc x21, x21, xzr
adds x23, x23, x17
mul x17, x5, x10
shl v21.2D, v29.2D, #33
lsl x10, x3, #12
lsr x1, x3, #52
rev64 v29.4S, v2.4S
uaddlp v25.2D, v16.4S
add x17, x17, x1
adcs x16, x16, x15
extr x3, x14, x19, #40
mov x15, v20.d[0]
extr x10, x17, x10, #12
and x3, x3, #0xfffffffffffff
shl v3.2D, v25.2D, #32
and v6.16B, v17.16B, v6.16B
mul x1, x5, x3
usra v18.2D, v17.2D, #32
adcs x3, x24, x4
extr x4, x12, x14, #28
umlal v6.2D, v19.2S, v22.2S
xtn v20.2S, v2.2D
umlal v3.2D, v26.2S, v22.2S
movi v26.2D, #0xffffffff
lsr x24, x17, #52
and x4, x4, #0xfffffffffffff
uzp2 v19.4S, v2.4S, v2.4S
add x1, x1, x24
mul x24, x5, x4
lsl x4, x17, #12
xtn v24.2S, v5.2D
extr x17, x1, x4, #24
adc x21, x21, xzr
umlal v21.2D, v23.2S, v23.2S
adds x4, x15, x10
lsl x10, x1, #12
adcs x15, x7, x17
mul v23.4S, v28.4S, v4.4S
and x7, x4, #0x1ff
lsr x17, x1, #52
umulh x1, x19, x12
uzp2 v17.4S, v5.4S, v5.4S
extr x4, x15, x4, #9
add x24, x24, x17
mul v29.4S, v29.4S, v5.4S
extr x17, x24, x10, #36
extr x10, x9, x12, #16
uzp1 v28.4S, v4.4S, v4.4S
adcs x17, x8, x17
and x8, x10, #0xfffffffffffff
umull v16.2D, v24.2S, v20.2S
extr x10, x17, x15, #9
mul x15, x5, x8
stp x4, x10, [x0]
lsl x4, x24, #12
lsr x8, x9, #4
uaddlp v4.2D, v23.4S
and x8, x8, #0xfffffffffffff
umull v23.2D, v24.2S, v19.2S
mul x8, x5, x8
extr x10, x2, x9, #56
lsr x24, x24, #52
and x10, x10, #0xfffffffffffff
add x15, x15, x24
extr x4, x15, x4, #48
mul x24, x5, x10
lsr x10, x15, #52
usra v23.2D, v16.2D, #32
add x10, x8, x10
shl v4.2D, v4.2D, #32
adcs x22, x22, x4
extr x4, x6, x2, #44
lsl x15, x15, #12
lsr x8, x10, #52
extr x15, x10, x15, #60
and x10, x4, #0xfffffffffffff
umlal v4.2D, v28.2S, v27.2S
add x8, x24, x8
extr x4, x13, x6, #32
mul x24, x5, x10
uzp2 v16.4S, v30.4S, v30.4S
lsl x10, x15, #8
rev64 v28.4S, v30.4S
and x15, x4, #0xfffffffffffff
extr x4, x8, x10, #8
mul x10, x5, x15
lsl x15, x8, #12
adcs x23, x23, x4
lsr x4, x8, #52
lsr x8, x13, #20
add x4, x24, x4
mul x8, x5, x8
lsr x24, x4, #52
extr x15, x4, x15, #20
lsl x4, x4, #12
add x10, x10, x24
adcs x15, x16, x15
extr x4, x10, x4, #32
umulh x5, x20, x14
adcs x3, x3, x4
usra v18.2D, v6.2D, #32
lsl x16, x10, #12
extr x24, x15, x23, #9
lsr x10, x10, #52
uzp2 v27.4S, v0.4S, v0.4S
add x8, x8, x10
extr x10, x3, x15, #9
extr x4, x22, x17, #9
and v25.16B, v23.16B, v26.16B
lsr x17, x8, #44
extr x15, x8, x16, #44
extr x16, x23, x22, #9
xtn v7.2S, v30.2D
mov x8, v4.d[0]
stp x24, x10, [x0, #32]
uaddlp v30.2D, v29.4S
stp x4, x16, [x0, #16]
umulh x24, x20, x19
adcs x15, x21, x15
adc x16, x11, x17
subs x11, x20, x19
xtn v5.2S, v0.2D
csetm x17, cc
extr x3, x15, x3, #9
mov x22, v4.d[1]
cneg x21, x11, cc
subs x10, x12, x14
mul v31.4S, v28.4S, v0.4S
cneg x10, x10, cc
cinv x11, x17, cc
shl v4.2D, v30.2D, #32
umull v28.2D, v5.2S, v16.2S
extr x23, x16, x15, #9
adds x4, x8, x5
mul x17, x21, x10
umull v22.2D, v5.2S, v7.2S
adc x15, x5, xzr
adds x4, x4, x22
uaddlp v2.2D, v31.4S
lsr x5, x16, #9
adcs x16, x15, x1
mov x15, v18.d[0]
adc x1, x1, xzr
umulh x10, x21, x10
adds x22, x16, x22
umlal v4.2D, v24.2S, v20.2S
umull v30.2D, v27.2S, v16.2S
stp x3, x23, [x0, #48]
add x3, x7, x5
adc x16, x1, xzr
usra v28.2D, v22.2D, #32
mul x23, x20, x19
eor x1, x17, x11
cmn x11, #0x1
mov x17, v18.d[1]
umull v18.2D, v17.2S, v19.2S
adcs x7, x4, x1
eor x1, x10, x11
umlal v25.2D, v17.2S, v20.2S
movi v16.2D, #0xffffffff
adcs x22, x22, x1
usra v18.2D, v23.2D, #32
umulh x4, x14, x14
adc x1, x16, x11
adds x10, x8, x8
shl v23.2D, v2.2D, #32
str x3, [x0, #64]
adcs x5, x7, x7
and v16.16B, v28.16B, v16.16B
usra v30.2D, v28.2D, #32
adcs x7, x22, x22
mov x21, v3.d[1]
adcs x11, x1, x1
umlal v16.2D, v27.2S, v7.2S
adc x22, xzr, xzr
adds x16, x15, x23
mul x8, x14, x12
umlal v23.2D, v5.2S, v7.2S
usra v18.2D, v25.2D, #32
umulh x15, x14, x12
adcs x21, x21, x24
usra v30.2D, v16.2D, #32
adc x1, x17, xzr
adds x3, x16, x23
adcs x21, x21, x24
adc x1, x1, xzr
adds x24, x10, x21
umulh x21, x12, x12
adcs x16, x5, x1
adcs x10, x7, xzr
mov x17, v21.d[1]
adcs x23, x11, xzr
adc x5, x22, xzr
adds x1, x4, x8
adcs x22, x17, x15
ldp x17, x4, [x0]
mov x11, v21.d[0]
adc x21, x21, xzr
adds x1, x1, x8
adcs x15, x22, x15
adc x8, x21, xzr
adds x22, x11, x10
mov x21, v3.d[0]
adcs x11, x1, x23
ldp x1, x10, [x0, #16]
adcs x15, x15, x5
adc x7, x8, xzr
adds x8, x17, x21
mov x23, v4.d[1]
ldp x5, x21, [x0, #32]
adcs x17, x4, x3
ldr x4, [x0, #64]
mov x3, v18.d[0]
adcs x24, x1, x24
stp x8, x17, [x0]
adcs x17, x10, x16
ldp x1, x16, [x0, #48]
adcs x5, x5, x22
adcs x8, x21, x11
stp x5, x8, [x0, #32]
adcs x1, x1, x15
mov x15, v23.d[1]
adcs x21, x16, x7
stp x1, x21, [x0, #48]
adc x10, x4, xzr
subs x7, x14, x12
mov x16, v18.d[1]
cneg x5, x7, cc
csetm x4, cc
subs x11, x13, x6
mov x8, v23.d[0]
cneg x7, x11, cc
cinv x21, x4, cc
mov x11, v30.d[0]
adds x4, x23, x3
mul x22, x5, x7
mov x23, v30.d[1]
adcs x8, x8, x16
adcs x16, x15, x11
adc x11, x23, xzr
umulh x3, x5, x7
stp x24, x17, [x0, #16]
mov x5, v4.d[0]
subs x15, x20, x19
cneg x7, x15, cc
str x10, [x0, #64]
csetm x1, cc
subs x24, x2, x9
cneg x17, x24, cc
cinv x15, x1, cc
adds x23, x4, x5
umulh x1, x7, x17
adcs x24, x8, x4
adcs x10, x16, x8
eor x8, x22, x21
adcs x16, x11, x16
mul x22, x7, x17
eor x17, x1, x15
adc x1, xzr, x11
adds x11, x24, x5
eor x7, x3, x21
adcs x3, x10, x23
adcs x24, x16, x24
adcs x4, x1, x10
eor x10, x22, x15
adcs x16, xzr, x16
adc x1, xzr, x1
cmn x21, #0x1
adcs x8, x4, x8
adcs x22, x16, x7
adc x7, x1, x21
subs x21, x19, x12
csetm x4, cc
cneg x1, x21, cc
subs x21, x13, x2
cinv x16, x4, cc
cneg x4, x21, cc
cmn x15, #0x1
adcs x21, x23, x10
mul x23, x1, x4
adcs x11, x11, x17
adcs x3, x3, x15
umulh x1, x1, x4
adcs x24, x24, x15
adcs x8, x8, x15
adcs x22, x22, x15
eor x17, x23, x16
adc x15, x7, x15
subs x7, x20, x14
cneg x7, x7, cc
csetm x4, cc
subs x10, x20, x12
cneg x23, x10, cc
csetm x10, cc
subs x12, x6, x9
cinv x20, x4, cc
cneg x12, x12, cc
cmn x16, #0x1
eor x1, x1, x16
adcs x17, x24, x17
mul x4, x7, x12
adcs x8, x8, x1
umulh x1, x7, x12
adcs x24, x22, x16
adc x7, x15, x16
subs x12, x13, x9
cneg x12, x12, cc
cinv x13, x10, cc
subs x19, x19, x14
mul x9, x23, x12
cneg x19, x19, cc
csetm x10, cc
eor x16, x1, x20
subs x22, x6, x2
umulh x12, x23, x12
eor x1, x4, x20
cinv x4, x10, cc
cneg x22, x22, cc
cmn x20, #0x1
adcs x15, x11, x1
eor x6, x12, x13
adcs x10, x3, x16
adcs x17, x17, x20
eor x23, x9, x13
adcs x2, x8, x20
mul x11, x19, x22
adcs x24, x24, x20
adc x7, x7, x20
cmn x13, #0x1
adcs x3, x10, x23
umulh x22, x19, x22
adcs x17, x17, x6
eor x12, x22, x4
extr x22, x15, x21, #63
adcs x8, x2, x13
extr x21, x21, x5, #63
ldp x16, x23, [x0]
adcs x20, x24, x13
eor x1, x11, x4
adc x6, x7, x13
cmn x4, #0x1
ldp x2, x7, [x0, #16]
adcs x1, x3, x1
extr x19, x1, x15, #63
adcs x14, x17, x12
extr x1, x14, x1, #63
lsl x17, x5, #1
adcs x8, x8, x4
extr x12, x8, x14, #8
ldp x15, x11, [x0, #32]
adcs x9, x20, x4
adc x3, x6, x4
adds x16, x12, x16
extr x6, x9, x8, #8
ldp x14, x12, [x0, #48]
extr x8, x3, x9, #8
adcs x20, x6, x23
ldr x24, [x0, #64]
lsr x6, x3, #8
adcs x8, x8, x2
and x2, x1, #0x1ff
and x1, x20, x8
adcs x4, x6, x7
adcs x3, x17, x15
and x1, x1, x4
adcs x9, x21, x11
and x1, x1, x3
adcs x6, x22, x14
and x1, x1, x9
and x21, x1, x6
adcs x14, x19, x12
adc x1, x24, x2
cmp xzr, xzr
orr x12, x1, #0xfffffffffffffe00
lsr x1, x1, #9
adcs xzr, x16, x1
and x21, x21, x14
adcs xzr, x21, xzr
adcs xzr, x12, xzr
adcs x21, x16, x1
adcs x1, x20, xzr
adcs x19, x8, xzr
stp x21, x1, [x0]
adcs x1, x4, xzr
adcs x21, x3, xzr
stp x19, x1, [x0, #16]
adcs x1, x9, xzr
stp x21, x1, [x0, #32]
adcs x21, x6, xzr
adcs x1, x14, xzr
stp x21, x1, [x0, #48]
adc x1, x12, xzr
and x1, x1, #0x1ff
str x1, [x0, #64]
// Restore regs and return
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 3,087
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_triple_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Triple modulo p_521, z := (3 * x) mod p_521, assuming x reduced
// Input x[9]; output z[9]
//
// extern void bignum_triple_p521
// (uint64_t z[static 9], uint64_t x[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p521)
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p521_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p521_alt)
.text
.balign 4
#define z x0
#define x x1
#define h x2
#define l x3
#define d0 x4
#define d1 x5
#define d2 x6
#define d3 x7
#define d4 x8
#define d5 x9
#define d6 x10
#define d7 x11
#define d8 x12
S2N_BN_SYMBOL(bignum_triple_p521):
S2N_BN_SYMBOL(bignum_triple_p521_alt):
// Pick out top bit to wrap to the zero position in the doubling step
ldr d8, [x, #64]
lsl l, d8, #55
// Rotate left to get x' == 2 * x (mod p_521) and add to x + 1 (carryin) to get
// s = [d8;d7;d6;d5;d4;d3;d2;d1;d0] = x + x' + 1 == 3 * x + 1 (mod p_521)
subs xzr, xzr, xzr
ldp d0, d1, [x]
extr l, d0, l, #63
extr h, d1, d0, #63
adcs d0, d0, l
ldp d2, d3, [x, #16]
extr l, d2, d1, #63
adcs d1, d1, h
extr h, d3, d2, #63
adcs d2, d2, l
ldp d4, d5, [x, #32]
extr l, d4, d3, #63
adcs d3, d3, h
extr h, d5, d4, #63
adcs d4, d4, l
ldp d6, d7, [x, #48]
extr l, d6, d5, #63
adcs d5, d5, h
extr h, d7, d6, #63
adcs d6, d6, l
extr l, d8, d7, #63
adcs d7, d7, h
and l, l, #0x1FF
adcs d8, d8, l
// We know x, x' < p_521 (they are the same bits except for the positions)
// so x + x' + 1 <= 2 * (p_521 - 1) + 1 < 2 * p_521.
// Note that x + x' >= p_521 <=> s = x + x' + 1 >= 2^521
// Set CF <=> s = x + x' + 1 >= 2^521 and make it a mask in l as well
subs l, d8, #512
csetm l, cs
// Now if CF is set (and l is all 1s), we want (x + x') - p_521 = s - 2^521
// while otherwise we want x + x' = s - 1 (from existing CF, which is nice)
sbcs d0, d0, xzr
and l, l, #512
sbcs d1, d1, xzr
sbcs d2, d2, xzr
sbcs d3, d3, xzr
sbcs d4, d4, xzr
sbcs d5, d5, xzr
sbcs d6, d6, xzr
sbcs d7, d7, xzr
sbc d8, d8, l
// Store the result
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
stp d6, d7, [z, #48]
str d8, [z, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 16,163
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_montsqr_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery square, z := (x^2 / 2^576) mod p_521
// Input x[9]; output z[9]
//
// extern void bignum_montsqr_p521
// (uint64_t z[static 9], uint64_t x[static 9]);
//
// Does z := (x^2 / 2^576) mod p_521, assuming x < p_521. This means the
// Montgomery base is the "native size" 2^{9*64} = 2^576; since p_521 is
// a Mersenne prime the basic modular squaring bignum_sqr_p521 can be
// considered a Montgomery operation to base 2^521.
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p521)
.text
.balign 4
#define z x0
#define x x1
#define a0 x2
#define a1 x3
#define a2 x4
#define a3 x5
#define b0 x6
#define b1 x7
#define b2 x8
#define b3 x9
#define s0 x10
#define s1 x11
#define s2 x12
#define s3 x13
#define s4 x14
#define s5 x15
#define s6 x16
#define s7 x17
#define c x19
#define h x20
#define l x21
#define t x22
#define u x23
#define v x24
// Aliased to earlier ones we no longer need
#define d0 x2
#define d1 x3
#define d2 x4
#define d3 x5
#define d4 x6
#define d5 x7
#define d6 x8
#define d7 x9
#define d8 x10
S2N_BN_SYMBOL(bignum_montsqr_p521):
// Save registers
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
// Load all the inputs first
ldp a0, a1, [x]
ldp a2, a3, [x, #16]
ldp b0, b1, [x, #32]
ldp b2, b3, [x, #48]
// Square the upper half with a register-renamed variant of bignum_sqr_4_8
mul s2, b0, b2
mul s7, b1, b3
umulh t, b0, b2
subs u, b0, b1
cneg u, u, cc
csetm s1, cc
subs s0, b3, b2
cneg s0, s0, cc
mul s6, u, s0
umulh s0, u, s0
cinv s1, s1, cc
eor s6, s6, s1
eor s0, s0, s1
adds s3, s2, t
adc t, t, xzr
umulh u, b1, b3
adds s3, s3, s7
adcs t, t, u
adc u, u, xzr
adds t, t, s7
adc u, u, xzr
cmn s1, #0x1
adcs s3, s3, s6
adcs t, t, s0
adc u, u, s1
adds s2, s2, s2
adcs s3, s3, s3
adcs t, t, t
adcs u, u, u
adc c, xzr, xzr
mul s0, b0, b0
mul s6, b1, b1
mul l, b0, b1
umulh s1, b0, b0
umulh s7, b1, b1
umulh h, b0, b1
adds s1, s1, l
adcs s6, s6, h
adc s7, s7, xzr
adds s1, s1, l
adcs s6, s6, h
adc s7, s7, xzr
adds s2, s2, s6
adcs s3, s3, s7
adcs t, t, xzr
adcs u, u, xzr
adc c, c, xzr
mul s4, b2, b2
mul s6, b3, b3
mul l, b2, b3
umulh s5, b2, b2
umulh s7, b3, b3
umulh h, b2, b3
adds s5, s5, l
adcs s6, s6, h
adc s7, s7, xzr
adds s5, s5, l
adcs s6, s6, h
adc s7, s7, xzr
adds s4, s4, t
adcs s5, s5, u
adcs s6, s6, c
adc s7, s7, xzr
// Augment the high part with the contribution from the top little word C.
// If we write the input as 2^512 * C + x then we are otherwise just doing
// x^2, so we need to add to the high part 2^512 * C^2 + (2 * C) * x.
// Accumulate it as [c;s7;...;s0] = H'. Since 2 * C is only 10 bits long
// we multiply 52-bit chunks of the x digits by 2 * C and solve the overlap
// with non-overflowing addition to get 52-bit chunks of the result with
// similar alignment. Then we stitch these back together and add them into
// the running total. This is quite a bit of palaver, but it avoids using
// the standard 2-part multiplications involving umulh, and on target
// microarchitectures seems to improve performance by about 5%. We could
// equally well use 53 or 54 since they are still <= 64 - 10, but below
// 52 we would end up using more multiplications.
ldr c, [x, #64]
add u, c, c
mul c, c, c
// 0 * 52 = 64 * 0 + 0
and l, a0, #0x000fffffffffffff
mul l, u, l
// 1 * 52 = 64 * 0 + 52
extr h, a1, a0, #52
and h, h, #0x000fffffffffffff
mul h, u, h
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #12
adds s0, s0, t
// 2 * 52 = 64 * 1 + 40
extr l, a2, a1, #40
and l, l, #0x000fffffffffffff
mul l, u, l
lsr t, h, #52
add l, l, t
lsl h, h, #12
extr t, l, h, #24
adcs s1, s1, t
// 3 * 52 = 64 * 2 + 28
extr h, a3, a2, #28
and h, h, #0x000fffffffffffff
mul h, u, h
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #36
adcs s2, s2, t
// 4 * 52 = 64 * 3 + 16
extr l, b0, a3, #16
and l, l, #0x000fffffffffffff
mul l, u, l
lsr t, h, #52
add l, l, t
lsl h, h, #12
extr t, l, h, #48
adcs s3, s3, t
// 5 * 52 = 64 * 4 + 4
lsr h, b0, #4
and h, h, #0x000fffffffffffff
mul h, u, h
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr v, h, l, #60
// 6 * 52 = 64 * 4 + 56
extr l, b1, b0, #56
and l, l, #0x000fffffffffffff
mul l, u, l
lsr t, h, #52
add l, l, t
lsl v, v, #8
extr t, l, v, #8
adcs s4, s4, t
// 7 * 52 = 64 * 5 + 44
extr h, b2, b1, #44
and h, h, #0x000fffffffffffff
mul h, u, h
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #20
adcs s5, s5, t
// 8 * 52 = 64 * 6 + 32
extr l, b3, b2, #32
and l, l, #0x000fffffffffffff
mul l, u, l
lsr t, h, #52
add l, l, t
lsl h, h, #12
extr t, l, h, #32
adcs s6, s6, t
// 9 * 52 = 64 * 7 + 20
lsr h, b3, #20
mul h, u, h
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #44
adcs s7, s7, t
// Top word
lsr h, h, #44
adc c, c, h
// Rotate [c;s7;...;s0] before storing in the buffer.
// We want to add 2^512 * H', which splitting H' at bit 9 is
// 2^521 * H_top + 2^512 * H_bot == 2^512 * H_bot + H_top (mod p_521)
extr l, s1, s0, #9
extr h, s2, s1, #9
stp l, h, [z]
extr l, s3, s2, #9
extr h, s4, s3, #9
stp l, h, [z, #16]
extr l, s5, s4, #9
extr h, s6, s5, #9
stp l, h, [z, #32]
extr l, s7, s6, #9
extr h, c, s7, #9
stp l, h, [z, #48]
and t, s0, #0x1FF
lsr c, c, #9
add t, t, c
str t, [z, #64]
// Square the lower half with an analogous variant of bignum_sqr_4_8
mul s2, a0, a2
mul s7, a1, a3
umulh t, a0, a2
subs u, a0, a1
cneg u, u, cc
csetm s1, cc
subs s0, a3, a2
cneg s0, s0, cc
mul s6, u, s0
umulh s0, u, s0
cinv s1, s1, cc
eor s6, s6, s1
eor s0, s0, s1
adds s3, s2, t
adc t, t, xzr
umulh u, a1, a3
adds s3, s3, s7
adcs t, t, u
adc u, u, xzr
adds t, t, s7
adc u, u, xzr
cmn s1, #0x1
adcs s3, s3, s6
adcs t, t, s0
adc u, u, s1
adds s2, s2, s2
adcs s3, s3, s3
adcs t, t, t
adcs u, u, u
adc c, xzr, xzr
mul s0, a0, a0
mul s6, a1, a1
mul l, a0, a1
umulh s1, a0, a0
umulh s7, a1, a1
umulh h, a0, a1
adds s1, s1, l
adcs s6, s6, h
adc s7, s7, xzr
adds s1, s1, l
adcs s6, s6, h
adc s7, s7, xzr
adds s2, s2, s6
adcs s3, s3, s7
adcs t, t, xzr
adcs u, u, xzr
adc c, c, xzr
mul s4, a2, a2
mul s6, a3, a3
mul l, a2, a3
umulh s5, a2, a2
umulh s7, a3, a3
umulh h, a2, a3
adds s5, s5, l
adcs s6, s6, h
adc s7, s7, xzr
adds s5, s5, l
adcs s6, s6, h
adc s7, s7, xzr
adds s4, s4, t
adcs s5, s5, u
adcs s6, s6, c
adc s7, s7, xzr
// Add it directly to the existing buffer
ldp l, h, [z]
adds l, l, s0
adcs h, h, s1
stp l, h, [z]
ldp l, h, [z, #16]
adcs l, l, s2
adcs h, h, s3
stp l, h, [z, #16]
ldp l, h, [z, #32]
adcs l, l, s4
adcs h, h, s5
stp l, h, [z, #32]
ldp l, h, [z, #48]
adcs l, l, s6
adcs h, h, s7
stp l, h, [z, #48]
ldr t, [z, #64]
adc t, t, xzr
str t, [z, #64]
// Now get the cross-product in [s7,...,s0] with variant of bignum_mul_4_8
mul s0, a0, b0
mul s4, a1, b1
mul s5, a2, b2
mul s6, a3, b3
umulh s7, a0, b0
adds s4, s4, s7
umulh s7, a1, b1
adcs s5, s5, s7
umulh s7, a2, b2
adcs s6, s6, s7
umulh s7, a3, b3
adc s7, s7, xzr
adds s1, s4, s0
adcs s4, s5, s4
adcs s5, s6, s5
adcs s6, s7, s6
adc s7, xzr, s7
adds s2, s4, s0
adcs s3, s5, s1
adcs s4, s6, s4
adcs s5, s7, s5
adcs s6, xzr, s6
adc s7, xzr, s7
subs t, a2, a3
cneg t, t, cc
csetm c, cc
subs h, b3, b2
cneg h, h, cc
mul l, t, h
umulh h, t, h
cinv c, c, cc
cmn c, #0x1
eor l, l, c
adcs s5, s5, l
eor h, h, c
adcs s6, s6, h
adc s7, s7, c
subs t, a0, a1
cneg t, t, cc
csetm c, cc
subs h, b1, b0
cneg h, h, cc
mul l, t, h
umulh h, t, h
cinv c, c, cc
cmn c, #0x1
eor l, l, c
adcs s1, s1, l
eor h, h, c
adcs s2, s2, h
adcs s3, s3, c
adcs s4, s4, c
adcs s5, s5, c
adcs s6, s6, c
adc s7, s7, c
subs t, a1, a3
cneg t, t, cc
csetm c, cc
subs h, b3, b1
cneg h, h, cc
mul l, t, h
umulh h, t, h
cinv c, c, cc
cmn c, #0x1
eor l, l, c
adcs s4, s4, l
eor h, h, c
adcs s5, s5, h
adcs s6, s6, c
adc s7, s7, c
subs t, a0, a2
cneg t, t, cc
csetm c, cc
subs h, b2, b0
cneg h, h, cc
mul l, t, h
umulh h, t, h
cinv c, c, cc
cmn c, #0x1
eor l, l, c
adcs s2, s2, l
eor h, h, c
adcs s3, s3, h
adcs s4, s4, c
adcs s5, s5, c
adcs s6, s6, c
adc s7, s7, c
subs t, a0, a3
cneg t, t, cc
csetm c, cc
subs h, b3, b0
cneg h, h, cc
mul l, t, h
umulh h, t, h
cinv c, c, cc
cmn c, #0x1
eor l, l, c
adcs s3, s3, l
eor h, h, c
adcs s4, s4, h
adcs s5, s5, c
adcs s6, s6, c
adc s7, s7, c
subs t, a1, a2
cneg t, t, cc
csetm c, cc
subs h, b2, b1
cneg h, h, cc
mul l, t, h
umulh h, t, h
cinv c, c, cc
cmn c, #0x1
eor l, l, c
adcs s3, s3, l
eor h, h, c
adcs s4, s4, h
adcs s5, s5, c
adcs s6, s6, c
adc s7, s7, c
// Let the cross product be M. We want to add 2^256 * 2 * M to the buffer
// Split M into M_top (248 bits) and M_bot (264 bits), so we add
// 2^521 * M_top + 2^257 * M_bot == 2^257 * M_bot + M_top (mod p_521)
// Accumulate the (non-reduced in general) 9-word answer [d8;...;d0]
// As this sum is built, accumulate t = AND of words d7...d1 to help
// in condensing the carry chain in the comparison that comes next
ldp l, h, [z]
extr d0, s5, s4, #8
adds d0, d0, l
extr d1, s6, s5, #8
adcs d1, d1, h
ldp l, h, [z, #16]
extr d2, s7, s6, #8
adcs d2, d2, l
and t, d1, d2
lsr d3, s7, #8
adcs d3, d3, h
and t, t, d3
ldp l, h, [z, #32]
lsl d4, s0, #1
adcs d4, d4, l
and t, t, d4
extr d5, s1, s0, #63
adcs d5, d5, h
and t, t, d5
ldp l, h, [z, #48]
extr d6, s2, s1, #63
adcs d6, d6, l
and t, t, d6
extr d7, s3, s2, #63
adcs d7, d7, h
and t, t, d7
ldr l, [z, #64]
extr d8, s4, s3, #63
and d8, d8, #0x1FF
adc d8, l, d8
// Extract the high part h and mask off the low part l = [d8;d7;...;d0]
// but stuff d8 with 1 bits at the left to ease a comparison below
lsr h, d8, #9
orr d8, d8, #~0x1FF
// Decide whether h + l >= p_521 <=> h + l + 1 >= 2^521. Since this can only
// happen if digits d7,...d1 are all 1s, we use the AND of them "t" to
// condense the carry chain, and since we stuffed 1 bits into d8 we get
// the result in CF without an additional comparison.
subs xzr, xzr, xzr
adcs xzr, d0, h
adcs xzr, t, xzr
adcs xzr, d8, xzr
// Now if CF is set we want (h + l) - p_521 = (h + l + 1) - 2^521
// while otherwise we want just h + l. So mask h + l + CF to 521 bits.
// This masking also gets rid of the stuffing with 1s we did above.
adcs d0, d0, h
adcs d1, d1, xzr
adcs d2, d2, xzr
adcs d3, d3, xzr
adcs d4, d4, xzr
adcs d5, d5, xzr
adcs d6, d6, xzr
adcs d7, d7, xzr
adc d8, d8, xzr
and d8, d8, #0x1FF
// So far, this has been the same as a pure modular squaring.
// Now finally the Montgomery ingredient, which is just a 521-bit
// rotation by 9*64 - 521 = 55 bits right.
lsl c, d0, #9
extr d0, d1, d0, #55
extr d1, d2, d1, #55
extr d2, d3, d2, #55
extr d3, d4, d3, #55
orr d8, d8, c
extr d4, d5, d4, #55
extr d5, d6, d5, #55
extr d6, d7, d6, #55
extr d7, d8, d7, #55
lsr d8, d8, #55
// Store the final result
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
stp d6, d7, [z, #48]
str d8, [z, #64]
// Restore regs and return
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 68,018
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/p521_jdouble_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point doubling on NIST curve P-521 in Jacobian coordinates
//
// extern void p521_jdouble_alt
// (uint64_t p3[static 27],uint64_t p1[static 27]);
//
// Does p3 := 2 * p1 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// It is assumed that all coordinates of the input point are fully
// reduced mod p_521 and that the z coordinate is not zero.
//
// Standard ARM ABI: X0 = p3, X1 = p1
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jdouble_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jdouble_alt)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 72
// Stable homes for input arguments during main code sequence
#define input_z x26
#define input_x x27
// Pointer-offset pairs for inputs and outputs
#define x_1 input_x, #0
#define y_1 input_x, #NUMSIZE
#define z_1 input_x, #(2*NUMSIZE)
#define x_3 input_z, #0
#define y_3 input_z, #NUMSIZE
#define z_3 input_z, #(2*NUMSIZE)
// Pointer-offset pairs for temporaries
#define z2 sp, #(NUMSIZE*0)
#define y2 sp, #(NUMSIZE*1)
#define x2p sp, #(NUMSIZE*2)
#define xy2 sp, #(NUMSIZE*3)
#define y4 sp, #(NUMSIZE*4)
#define t2 sp, #(NUMSIZE*4)
#define dx2 sp, #(NUMSIZE*5)
#define t1 sp, #(NUMSIZE*5)
#define d sp, #(NUMSIZE*6)
#define x4p sp, #(NUMSIZE*6)
// NUMSIZE*7 is not 16-aligned so we round it up
#define NSPACE (NUMSIZE*7+8)
// Corresponds exactly to bignum_mul_p521_alt
#define mul_p521(P0,P1,P2) \
ldp x3, x4, [P1]; \
ldp x5, x6, [P2]; \
mul x15, x3, x5; \
umulh x16, x3, x5; \
mul x14, x3, x6; \
umulh x17, x3, x6; \
adds x16, x16, x14; \
ldp x7, x8, [P2+16]; \
mul x14, x3, x7; \
umulh x19, x3, x7; \
adcs x17, x17, x14; \
mul x14, x3, x8; \
umulh x20, x3, x8; \
adcs x19, x19, x14; \
ldp x9, x10, [P2+32]; \
mul x14, x3, x9; \
umulh x21, x3, x9; \
adcs x20, x20, x14; \
mul x14, x3, x10; \
umulh x22, x3, x10; \
adcs x21, x21, x14; \
ldp x11, x12, [P2+48]; \
mul x14, x3, x11; \
umulh x23, x3, x11; \
adcs x22, x22, x14; \
ldr x13, [P2+64]; \
mul x14, x3, x12; \
umulh x24, x3, x12; \
adcs x23, x23, x14; \
mul x14, x3, x13; \
umulh x1, x3, x13; \
adcs x24, x24, x14; \
adc x1, x1, xzr; \
mul x14, x4, x5; \
adds x16, x16, x14; \
mul x14, x4, x6; \
adcs x17, x17, x14; \
mul x14, x4, x7; \
adcs x19, x19, x14; \
mul x14, x4, x8; \
adcs x20, x20, x14; \
mul x14, x4, x9; \
adcs x21, x21, x14; \
mul x14, x4, x10; \
adcs x22, x22, x14; \
mul x14, x4, x11; \
adcs x23, x23, x14; \
mul x14, x4, x12; \
adcs x24, x24, x14; \
mul x14, x4, x13; \
adcs x1, x1, x14; \
cset x0, hs; \
umulh x14, x4, x5; \
adds x17, x17, x14; \
umulh x14, x4, x6; \
adcs x19, x19, x14; \
umulh x14, x4, x7; \
adcs x20, x20, x14; \
umulh x14, x4, x8; \
adcs x21, x21, x14; \
umulh x14, x4, x9; \
adcs x22, x22, x14; \
umulh x14, x4, x10; \
adcs x23, x23, x14; \
umulh x14, x4, x11; \
adcs x24, x24, x14; \
umulh x14, x4, x12; \
adcs x1, x1, x14; \
umulh x14, x4, x13; \
adc x0, x0, x14; \
stp x15, x16, [P0]; \
ldp x3, x4, [P1+16]; \
mul x14, x3, x5; \
adds x17, x17, x14; \
mul x14, x3, x6; \
adcs x19, x19, x14; \
mul x14, x3, x7; \
adcs x20, x20, x14; \
mul x14, x3, x8; \
adcs x21, x21, x14; \
mul x14, x3, x9; \
adcs x22, x22, x14; \
mul x14, x3, x10; \
adcs x23, x23, x14; \
mul x14, x3, x11; \
adcs x24, x24, x14; \
mul x14, x3, x12; \
adcs x1, x1, x14; \
mul x14, x3, x13; \
adcs x0, x0, x14; \
cset x15, hs; \
umulh x14, x3, x5; \
adds x19, x19, x14; \
umulh x14, x3, x6; \
adcs x20, x20, x14; \
umulh x14, x3, x7; \
adcs x21, x21, x14; \
umulh x14, x3, x8; \
adcs x22, x22, x14; \
umulh x14, x3, x9; \
adcs x23, x23, x14; \
umulh x14, x3, x10; \
adcs x24, x24, x14; \
umulh x14, x3, x11; \
adcs x1, x1, x14; \
umulh x14, x3, x12; \
adcs x0, x0, x14; \
umulh x14, x3, x13; \
adc x15, x15, x14; \
mul x14, x4, x5; \
adds x19, x19, x14; \
mul x14, x4, x6; \
adcs x20, x20, x14; \
mul x14, x4, x7; \
adcs x21, x21, x14; \
mul x14, x4, x8; \
adcs x22, x22, x14; \
mul x14, x4, x9; \
adcs x23, x23, x14; \
mul x14, x4, x10; \
adcs x24, x24, x14; \
mul x14, x4, x11; \
adcs x1, x1, x14; \
mul x14, x4, x12; \
adcs x0, x0, x14; \
mul x14, x4, x13; \
adcs x15, x15, x14; \
cset x16, hs; \
umulh x14, x4, x5; \
adds x20, x20, x14; \
umulh x14, x4, x6; \
adcs x21, x21, x14; \
umulh x14, x4, x7; \
adcs x22, x22, x14; \
umulh x14, x4, x8; \
adcs x23, x23, x14; \
umulh x14, x4, x9; \
adcs x24, x24, x14; \
umulh x14, x4, x10; \
adcs x1, x1, x14; \
umulh x14, x4, x11; \
adcs x0, x0, x14; \
umulh x14, x4, x12; \
adcs x15, x15, x14; \
umulh x14, x4, x13; \
adc x16, x16, x14; \
stp x17, x19, [P0+16]; \
ldp x3, x4, [P1+32]; \
mul x14, x3, x5; \
adds x20, x20, x14; \
mul x14, x3, x6; \
adcs x21, x21, x14; \
mul x14, x3, x7; \
adcs x22, x22, x14; \
mul x14, x3, x8; \
adcs x23, x23, x14; \
mul x14, x3, x9; \
adcs x24, x24, x14; \
mul x14, x3, x10; \
adcs x1, x1, x14; \
mul x14, x3, x11; \
adcs x0, x0, x14; \
mul x14, x3, x12; \
adcs x15, x15, x14; \
mul x14, x3, x13; \
adcs x16, x16, x14; \
cset x17, hs; \
umulh x14, x3, x5; \
adds x21, x21, x14; \
umulh x14, x3, x6; \
adcs x22, x22, x14; \
umulh x14, x3, x7; \
adcs x23, x23, x14; \
umulh x14, x3, x8; \
adcs x24, x24, x14; \
umulh x14, x3, x9; \
adcs x1, x1, x14; \
umulh x14, x3, x10; \
adcs x0, x0, x14; \
umulh x14, x3, x11; \
adcs x15, x15, x14; \
umulh x14, x3, x12; \
adcs x16, x16, x14; \
umulh x14, x3, x13; \
adc x17, x17, x14; \
mul x14, x4, x5; \
adds x21, x21, x14; \
mul x14, x4, x6; \
adcs x22, x22, x14; \
mul x14, x4, x7; \
adcs x23, x23, x14; \
mul x14, x4, x8; \
adcs x24, x24, x14; \
mul x14, x4, x9; \
adcs x1, x1, x14; \
mul x14, x4, x10; \
adcs x0, x0, x14; \
mul x14, x4, x11; \
adcs x15, x15, x14; \
mul x14, x4, x12; \
adcs x16, x16, x14; \
mul x14, x4, x13; \
adcs x17, x17, x14; \
cset x19, hs; \
umulh x14, x4, x5; \
adds x22, x22, x14; \
umulh x14, x4, x6; \
adcs x23, x23, x14; \
umulh x14, x4, x7; \
adcs x24, x24, x14; \
umulh x14, x4, x8; \
adcs x1, x1, x14; \
umulh x14, x4, x9; \
adcs x0, x0, x14; \
umulh x14, x4, x10; \
adcs x15, x15, x14; \
umulh x14, x4, x11; \
adcs x16, x16, x14; \
umulh x14, x4, x12; \
adcs x17, x17, x14; \
umulh x14, x4, x13; \
adc x19, x19, x14; \
stp x20, x21, [P0+32]; \
ldp x3, x4, [P1+48]; \
mul x14, x3, x5; \
adds x22, x22, x14; \
mul x14, x3, x6; \
adcs x23, x23, x14; \
mul x14, x3, x7; \
adcs x24, x24, x14; \
mul x14, x3, x8; \
adcs x1, x1, x14; \
mul x14, x3, x9; \
adcs x0, x0, x14; \
mul x14, x3, x10; \
adcs x15, x15, x14; \
mul x14, x3, x11; \
adcs x16, x16, x14; \
mul x14, x3, x12; \
adcs x17, x17, x14; \
mul x14, x3, x13; \
adcs x19, x19, x14; \
cset x20, hs; \
umulh x14, x3, x5; \
adds x23, x23, x14; \
umulh x14, x3, x6; \
adcs x24, x24, x14; \
umulh x14, x3, x7; \
adcs x1, x1, x14; \
umulh x14, x3, x8; \
adcs x0, x0, x14; \
umulh x14, x3, x9; \
adcs x15, x15, x14; \
umulh x14, x3, x10; \
adcs x16, x16, x14; \
umulh x14, x3, x11; \
adcs x17, x17, x14; \
umulh x14, x3, x12; \
adcs x19, x19, x14; \
umulh x14, x3, x13; \
adc x20, x20, x14; \
mul x14, x4, x5; \
adds x23, x23, x14; \
mul x14, x4, x6; \
adcs x24, x24, x14; \
mul x14, x4, x7; \
adcs x1, x1, x14; \
mul x14, x4, x8; \
adcs x0, x0, x14; \
mul x14, x4, x9; \
adcs x15, x15, x14; \
mul x14, x4, x10; \
adcs x16, x16, x14; \
mul x14, x4, x11; \
adcs x17, x17, x14; \
mul x14, x4, x12; \
adcs x19, x19, x14; \
mul x14, x4, x13; \
adcs x20, x20, x14; \
cset x21, hs; \
umulh x14, x4, x5; \
adds x24, x24, x14; \
umulh x14, x4, x6; \
adcs x1, x1, x14; \
umulh x14, x4, x7; \
adcs x0, x0, x14; \
umulh x14, x4, x8; \
adcs x15, x15, x14; \
umulh x14, x4, x9; \
adcs x16, x16, x14; \
umulh x14, x4, x10; \
adcs x17, x17, x14; \
umulh x14, x4, x11; \
adcs x19, x19, x14; \
umulh x14, x4, x12; \
adcs x20, x20, x14; \
umulh x14, x4, x13; \
adc x21, x21, x14; \
stp x22, x23, [P0+48]; \
ldr x3, [P1+64]; \
mul x14, x3, x5; \
adds x24, x24, x14; \
mul x14, x3, x6; \
adcs x1, x1, x14; \
mul x14, x3, x7; \
adcs x0, x0, x14; \
mul x14, x3, x8; \
adcs x15, x15, x14; \
mul x14, x3, x9; \
adcs x16, x16, x14; \
mul x14, x3, x10; \
adcs x17, x17, x14; \
mul x14, x3, x11; \
adcs x19, x19, x14; \
mul x14, x3, x12; \
adcs x20, x20, x14; \
mul x14, x3, x13; \
adc x21, x21, x14; \
umulh x14, x3, x5; \
adds x1, x1, x14; \
umulh x14, x3, x6; \
adcs x0, x0, x14; \
umulh x14, x3, x7; \
adcs x15, x15, x14; \
umulh x14, x3, x8; \
adcs x16, x16, x14; \
umulh x14, x3, x9; \
adcs x17, x17, x14; \
umulh x14, x3, x10; \
adcs x19, x19, x14; \
umulh x14, x3, x11; \
adcs x20, x20, x14; \
umulh x14, x3, x12; \
adc x21, x21, x14; \
cmp xzr, xzr; \
ldp x5, x6, [P0]; \
extr x14, x1, x24, #9; \
adcs x5, x5, x14; \
extr x14, x0, x1, #9; \
adcs x6, x6, x14; \
ldp x7, x8, [P0+16]; \
extr x14, x15, x0, #9; \
adcs x7, x7, x14; \
extr x14, x16, x15, #9; \
adcs x8, x8, x14; \
ldp x9, x10, [P0+32]; \
extr x14, x17, x16, #9; \
adcs x9, x9, x14; \
extr x14, x19, x17, #9; \
adcs x10, x10, x14; \
ldp x11, x12, [P0+48]; \
extr x14, x20, x19, #9; \
adcs x11, x11, x14; \
extr x14, x21, x20, #9; \
adcs x12, x12, x14; \
orr x13, x24, #0xfffffffffffffe00; \
lsr x14, x21, #9; \
adcs x13, x13, x14; \
sbcs x5, x5, xzr; \
sbcs x6, x6, xzr; \
sbcs x7, x7, xzr; \
sbcs x8, x8, xzr; \
sbcs x9, x9, xzr; \
sbcs x10, x10, xzr; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbc x13, x13, xzr; \
and x13, x13, #0x1ff; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]; \
stp x11, x12, [P0+48]; \
str x13, [P0+64]
// Corresponds exactly to bignum_sqr_p521_alt
#define sqr_p521(P0,P1) \
ldp x2, x3, [P1]; \
mul x11, x2, x3; \
umulh x12, x2, x3; \
ldp x4, x5, [P1+16]; \
mul x10, x2, x4; \
umulh x13, x2, x4; \
adds x12, x12, x10; \
ldp x6, x7, [P1+32]; \
mul x10, x2, x5; \
umulh x14, x2, x5; \
adcs x13, x13, x10; \
ldp x8, x9, [P1+48]; \
mul x10, x2, x6; \
umulh x15, x2, x6; \
adcs x14, x14, x10; \
mul x10, x2, x7; \
umulh x16, x2, x7; \
adcs x15, x15, x10; \
mul x10, x2, x8; \
umulh x17, x2, x8; \
adcs x16, x16, x10; \
mul x10, x2, x9; \
umulh x19, x2, x9; \
adcs x17, x17, x10; \
adc x19, x19, xzr; \
mul x10, x3, x4; \
adds x13, x13, x10; \
mul x10, x3, x5; \
adcs x14, x14, x10; \
mul x10, x3, x6; \
adcs x15, x15, x10; \
mul x10, x3, x7; \
adcs x16, x16, x10; \
mul x10, x3, x8; \
adcs x17, x17, x10; \
mul x10, x3, x9; \
adcs x19, x19, x10; \
cset x20, hs; \
umulh x10, x3, x4; \
adds x14, x14, x10; \
umulh x10, x3, x5; \
adcs x15, x15, x10; \
umulh x10, x3, x6; \
adcs x16, x16, x10; \
umulh x10, x3, x7; \
adcs x17, x17, x10; \
umulh x10, x3, x8; \
adcs x19, x19, x10; \
umulh x10, x3, x9; \
adc x20, x20, x10; \
mul x10, x6, x7; \
umulh x21, x6, x7; \
adds x20, x20, x10; \
adc x21, x21, xzr; \
mul x10, x4, x5; \
adds x15, x15, x10; \
mul x10, x4, x6; \
adcs x16, x16, x10; \
mul x10, x4, x7; \
adcs x17, x17, x10; \
mul x10, x4, x8; \
adcs x19, x19, x10; \
mul x10, x4, x9; \
adcs x20, x20, x10; \
mul x10, x6, x8; \
adcs x21, x21, x10; \
cset x22, hs; \
umulh x10, x4, x5; \
adds x16, x16, x10; \
umulh x10, x4, x6; \
adcs x17, x17, x10; \
umulh x10, x4, x7; \
adcs x19, x19, x10; \
umulh x10, x4, x8; \
adcs x20, x20, x10; \
umulh x10, x4, x9; \
adcs x21, x21, x10; \
umulh x10, x6, x8; \
adc x22, x22, x10; \
mul x10, x7, x8; \
umulh x23, x7, x8; \
adds x22, x22, x10; \
adc x23, x23, xzr; \
mul x10, x5, x6; \
adds x17, x17, x10; \
mul x10, x5, x7; \
adcs x19, x19, x10; \
mul x10, x5, x8; \
adcs x20, x20, x10; \
mul x10, x5, x9; \
adcs x21, x21, x10; \
mul x10, x6, x9; \
adcs x22, x22, x10; \
mul x10, x7, x9; \
adcs x23, x23, x10; \
cset x24, hs; \
umulh x10, x5, x6; \
adds x19, x19, x10; \
umulh x10, x5, x7; \
adcs x20, x20, x10; \
umulh x10, x5, x8; \
adcs x21, x21, x10; \
umulh x10, x5, x9; \
adcs x22, x22, x10; \
umulh x10, x6, x9; \
adcs x23, x23, x10; \
umulh x10, x7, x9; \
adc x24, x24, x10; \
mul x10, x8, x9; \
umulh x25, x8, x9; \
adds x24, x24, x10; \
adc x25, x25, xzr; \
adds x11, x11, x11; \
adcs x12, x12, x12; \
adcs x13, x13, x13; \
adcs x14, x14, x14; \
adcs x15, x15, x15; \
adcs x16, x16, x16; \
adcs x17, x17, x17; \
adcs x19, x19, x19; \
adcs x20, x20, x20; \
adcs x21, x21, x21; \
adcs x22, x22, x22; \
adcs x23, x23, x23; \
adcs x24, x24, x24; \
adcs x25, x25, x25; \
cset x0, hs; \
umulh x10, x2, x2; \
adds x11, x11, x10; \
mul x10, x3, x3; \
adcs x12, x12, x10; \
umulh x10, x3, x3; \
adcs x13, x13, x10; \
mul x10, x4, x4; \
adcs x14, x14, x10; \
umulh x10, x4, x4; \
adcs x15, x15, x10; \
mul x10, x5, x5; \
adcs x16, x16, x10; \
umulh x10, x5, x5; \
adcs x17, x17, x10; \
mul x10, x6, x6; \
adcs x19, x19, x10; \
umulh x10, x6, x6; \
adcs x20, x20, x10; \
mul x10, x7, x7; \
adcs x21, x21, x10; \
umulh x10, x7, x7; \
adcs x22, x22, x10; \
mul x10, x8, x8; \
adcs x23, x23, x10; \
umulh x10, x8, x8; \
adcs x24, x24, x10; \
mul x10, x9, x9; \
adcs x25, x25, x10; \
umulh x10, x9, x9; \
adc x0, x0, x10; \
ldr x1, [P1+64]; \
add x1, x1, x1; \
mul x10, x1, x2; \
adds x19, x19, x10; \
umulh x10, x1, x2; \
adcs x20, x20, x10; \
mul x10, x1, x4; \
adcs x21, x21, x10; \
umulh x10, x1, x4; \
adcs x22, x22, x10; \
mul x10, x1, x6; \
adcs x23, x23, x10; \
umulh x10, x1, x6; \
adcs x24, x24, x10; \
mul x10, x1, x8; \
adcs x25, x25, x10; \
umulh x10, x1, x8; \
adcs x0, x0, x10; \
lsr x4, x1, #1; \
mul x4, x4, x4; \
adc x4, x4, xzr; \
mul x10, x1, x3; \
adds x20, x20, x10; \
umulh x10, x1, x3; \
adcs x21, x21, x10; \
mul x10, x1, x5; \
adcs x22, x22, x10; \
umulh x10, x1, x5; \
adcs x23, x23, x10; \
mul x10, x1, x7; \
adcs x24, x24, x10; \
umulh x10, x1, x7; \
adcs x25, x25, x10; \
mul x10, x1, x9; \
adcs x0, x0, x10; \
umulh x10, x1, x9; \
adc x4, x4, x10; \
mul x2, x2, x2; \
cmp xzr, xzr; \
extr x10, x20, x19, #9; \
adcs x2, x2, x10; \
extr x10, x21, x20, #9; \
adcs x11, x11, x10; \
extr x10, x22, x21, #9; \
adcs x12, x12, x10; \
extr x10, x23, x22, #9; \
adcs x13, x13, x10; \
extr x10, x24, x23, #9; \
adcs x14, x14, x10; \
extr x10, x25, x24, #9; \
adcs x15, x15, x10; \
extr x10, x0, x25, #9; \
adcs x16, x16, x10; \
extr x10, x4, x0, #9; \
adcs x17, x17, x10; \
orr x19, x19, #0xfffffffffffffe00; \
lsr x10, x4, #9; \
adcs x19, x19, x10; \
sbcs x2, x2, xzr; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
sbcs x14, x14, xzr; \
sbcs x15, x15, xzr; \
sbcs x16, x16, xzr; \
sbcs x17, x17, xzr; \
sbc x19, x19, xzr; \
and x19, x19, #0x1ff; \
stp x2, x11, [P0]; \
stp x12, x13, [P0+16]; \
stp x14, x15, [P0+32]; \
stp x16, x17, [P0+48]; \
str x19, [P0+64]
// Corresponds exactly to bignum_add_p521
#define add_p521(P0,P1,P2) \
cmp xzr, xzr; \
ldp x5, x6, [P1]; \
ldp x4, x3, [P2]; \
adcs x5, x5, x4; \
adcs x6, x6, x3; \
ldp x7, x8, [P1+16]; \
ldp x4, x3, [P2+16]; \
adcs x7, x7, x4; \
adcs x8, x8, x3; \
ldp x9, x10, [P1+32]; \
ldp x4, x3, [P2+32]; \
adcs x9, x9, x4; \
adcs x10, x10, x3; \
ldp x11, x12, [P1+48]; \
ldp x4, x3, [P2+48]; \
adcs x11, x11, x4; \
adcs x12, x12, x3; \
ldr x13, [P1+64]; \
ldr x4, [P2+64]; \
adc x13, x13, x4; \
subs x4, x13, #512; \
csetm x4, hs; \
sbcs x5, x5, xzr; \
and x4, x4, #0x200; \
sbcs x6, x6, xzr; \
sbcs x7, x7, xzr; \
sbcs x8, x8, xzr; \
sbcs x9, x9, xzr; \
sbcs x10, x10, xzr; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbc x13, x13, x4; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]; \
stp x11, x12, [P0+48]; \
str x13, [P0+64]
// Corresponds exactly to bignum_sub_p521
#define sub_p521(P0,P1,P2) \
ldp x5, x6, [P1]; \
ldp x4, x3, [P2]; \
subs x5, x5, x4; \
sbcs x6, x6, x3; \
ldp x7, x8, [P1+16]; \
ldp x4, x3, [P2+16]; \
sbcs x7, x7, x4; \
sbcs x8, x8, x3; \
ldp x9, x10, [P1+32]; \
ldp x4, x3, [P2+32]; \
sbcs x9, x9, x4; \
sbcs x10, x10, x3; \
ldp x11, x12, [P1+48]; \
ldp x4, x3, [P2+48]; \
sbcs x11, x11, x4; \
sbcs x12, x12, x3; \
ldr x13, [P1+64]; \
ldr x4, [P2+64]; \
sbcs x13, x13, x4; \
sbcs x5, x5, xzr; \
sbcs x6, x6, xzr; \
sbcs x7, x7, xzr; \
sbcs x8, x8, xzr; \
sbcs x9, x9, xzr; \
sbcs x10, x10, xzr; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
and x13, x13, #0x1ff; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]; \
stp x11, x12, [P0+48]; \
str x13, [P0+64]
// Weak multiplication not fully reducing
#define weakmul_p521(P0,P1,P2) \
ldp x3, x4, [P1]; \
ldp x5, x6, [P2]; \
mul x15, x3, x5; \
umulh x16, x3, x5; \
mul x14, x3, x6; \
umulh x17, x3, x6; \
adds x16, x16, x14; \
ldp x7, x8, [P2+16]; \
mul x14, x3, x7; \
umulh x19, x3, x7; \
adcs x17, x17, x14; \
mul x14, x3, x8; \
umulh x20, x3, x8; \
adcs x19, x19, x14; \
ldp x9, x10, [P2+32]; \
mul x14, x3, x9; \
umulh x21, x3, x9; \
adcs x20, x20, x14; \
mul x14, x3, x10; \
umulh x22, x3, x10; \
adcs x21, x21, x14; \
ldp x11, x12, [P2+48]; \
mul x14, x3, x11; \
umulh x23, x3, x11; \
adcs x22, x22, x14; \
ldr x13, [P2+64]; \
mul x14, x3, x12; \
umulh x24, x3, x12; \
adcs x23, x23, x14; \
mul x14, x3, x13; \
umulh x1, x3, x13; \
adcs x24, x24, x14; \
adc x1, x1, xzr; \
mul x14, x4, x5; \
adds x16, x16, x14; \
mul x14, x4, x6; \
adcs x17, x17, x14; \
mul x14, x4, x7; \
adcs x19, x19, x14; \
mul x14, x4, x8; \
adcs x20, x20, x14; \
mul x14, x4, x9; \
adcs x21, x21, x14; \
mul x14, x4, x10; \
adcs x22, x22, x14; \
mul x14, x4, x11; \
adcs x23, x23, x14; \
mul x14, x4, x12; \
adcs x24, x24, x14; \
mul x14, x4, x13; \
adcs x1, x1, x14; \
cset x0, hs; \
umulh x14, x4, x5; \
adds x17, x17, x14; \
umulh x14, x4, x6; \
adcs x19, x19, x14; \
umulh x14, x4, x7; \
adcs x20, x20, x14; \
umulh x14, x4, x8; \
adcs x21, x21, x14; \
umulh x14, x4, x9; \
adcs x22, x22, x14; \
umulh x14, x4, x10; \
adcs x23, x23, x14; \
umulh x14, x4, x11; \
adcs x24, x24, x14; \
umulh x14, x4, x12; \
adcs x1, x1, x14; \
umulh x14, x4, x13; \
adc x0, x0, x14; \
stp x15, x16, [P0]; \
ldp x3, x4, [P1+16]; \
mul x14, x3, x5; \
adds x17, x17, x14; \
mul x14, x3, x6; \
adcs x19, x19, x14; \
mul x14, x3, x7; \
adcs x20, x20, x14; \
mul x14, x3, x8; \
adcs x21, x21, x14; \
mul x14, x3, x9; \
adcs x22, x22, x14; \
mul x14, x3, x10; \
adcs x23, x23, x14; \
mul x14, x3, x11; \
adcs x24, x24, x14; \
mul x14, x3, x12; \
adcs x1, x1, x14; \
mul x14, x3, x13; \
adcs x0, x0, x14; \
cset x15, hs; \
umulh x14, x3, x5; \
adds x19, x19, x14; \
umulh x14, x3, x6; \
adcs x20, x20, x14; \
umulh x14, x3, x7; \
adcs x21, x21, x14; \
umulh x14, x3, x8; \
adcs x22, x22, x14; \
umulh x14, x3, x9; \
adcs x23, x23, x14; \
umulh x14, x3, x10; \
adcs x24, x24, x14; \
umulh x14, x3, x11; \
adcs x1, x1, x14; \
umulh x14, x3, x12; \
adcs x0, x0, x14; \
umulh x14, x3, x13; \
adc x15, x15, x14; \
mul x14, x4, x5; \
adds x19, x19, x14; \
mul x14, x4, x6; \
adcs x20, x20, x14; \
mul x14, x4, x7; \
adcs x21, x21, x14; \
mul x14, x4, x8; \
adcs x22, x22, x14; \
mul x14, x4, x9; \
adcs x23, x23, x14; \
mul x14, x4, x10; \
adcs x24, x24, x14; \
mul x14, x4, x11; \
adcs x1, x1, x14; \
mul x14, x4, x12; \
adcs x0, x0, x14; \
mul x14, x4, x13; \
adcs x15, x15, x14; \
cset x16, hs; \
umulh x14, x4, x5; \
adds x20, x20, x14; \
umulh x14, x4, x6; \
adcs x21, x21, x14; \
umulh x14, x4, x7; \
adcs x22, x22, x14; \
umulh x14, x4, x8; \
adcs x23, x23, x14; \
umulh x14, x4, x9; \
adcs x24, x24, x14; \
umulh x14, x4, x10; \
adcs x1, x1, x14; \
umulh x14, x4, x11; \
adcs x0, x0, x14; \
umulh x14, x4, x12; \
adcs x15, x15, x14; \
umulh x14, x4, x13; \
adc x16, x16, x14; \
stp x17, x19, [P0+16]; \
ldp x3, x4, [P1+32]; \
mul x14, x3, x5; \
adds x20, x20, x14; \
mul x14, x3, x6; \
adcs x21, x21, x14; \
mul x14, x3, x7; \
adcs x22, x22, x14; \
mul x14, x3, x8; \
adcs x23, x23, x14; \
mul x14, x3, x9; \
adcs x24, x24, x14; \
mul x14, x3, x10; \
adcs x1, x1, x14; \
mul x14, x3, x11; \
adcs x0, x0, x14; \
mul x14, x3, x12; \
adcs x15, x15, x14; \
mul x14, x3, x13; \
adcs x16, x16, x14; \
cset x17, hs; \
umulh x14, x3, x5; \
adds x21, x21, x14; \
umulh x14, x3, x6; \
adcs x22, x22, x14; \
umulh x14, x3, x7; \
adcs x23, x23, x14; \
umulh x14, x3, x8; \
adcs x24, x24, x14; \
umulh x14, x3, x9; \
adcs x1, x1, x14; \
umulh x14, x3, x10; \
adcs x0, x0, x14; \
umulh x14, x3, x11; \
adcs x15, x15, x14; \
umulh x14, x3, x12; \
adcs x16, x16, x14; \
umulh x14, x3, x13; \
adc x17, x17, x14; \
mul x14, x4, x5; \
adds x21, x21, x14; \
mul x14, x4, x6; \
adcs x22, x22, x14; \
mul x14, x4, x7; \
adcs x23, x23, x14; \
mul x14, x4, x8; \
adcs x24, x24, x14; \
mul x14, x4, x9; \
adcs x1, x1, x14; \
mul x14, x4, x10; \
adcs x0, x0, x14; \
mul x14, x4, x11; \
adcs x15, x15, x14; \
mul x14, x4, x12; \
adcs x16, x16, x14; \
mul x14, x4, x13; \
adcs x17, x17, x14; \
cset x19, hs; \
umulh x14, x4, x5; \
adds x22, x22, x14; \
umulh x14, x4, x6; \
adcs x23, x23, x14; \
umulh x14, x4, x7; \
adcs x24, x24, x14; \
umulh x14, x4, x8; \
adcs x1, x1, x14; \
umulh x14, x4, x9; \
adcs x0, x0, x14; \
umulh x14, x4, x10; \
adcs x15, x15, x14; \
umulh x14, x4, x11; \
adcs x16, x16, x14; \
umulh x14, x4, x12; \
adcs x17, x17, x14; \
umulh x14, x4, x13; \
adc x19, x19, x14; \
stp x20, x21, [P0+32]; \
ldp x3, x4, [P1+48]; \
mul x14, x3, x5; \
adds x22, x22, x14; \
mul x14, x3, x6; \
adcs x23, x23, x14; \
mul x14, x3, x7; \
adcs x24, x24, x14; \
mul x14, x3, x8; \
adcs x1, x1, x14; \
mul x14, x3, x9; \
adcs x0, x0, x14; \
mul x14, x3, x10; \
adcs x15, x15, x14; \
mul x14, x3, x11; \
adcs x16, x16, x14; \
mul x14, x3, x12; \
adcs x17, x17, x14; \
mul x14, x3, x13; \
adcs x19, x19, x14; \
cset x20, hs; \
umulh x14, x3, x5; \
adds x23, x23, x14; \
umulh x14, x3, x6; \
adcs x24, x24, x14; \
umulh x14, x3, x7; \
adcs x1, x1, x14; \
umulh x14, x3, x8; \
adcs x0, x0, x14; \
umulh x14, x3, x9; \
adcs x15, x15, x14; \
umulh x14, x3, x10; \
adcs x16, x16, x14; \
umulh x14, x3, x11; \
adcs x17, x17, x14; \
umulh x14, x3, x12; \
adcs x19, x19, x14; \
umulh x14, x3, x13; \
adc x20, x20, x14; \
mul x14, x4, x5; \
adds x23, x23, x14; \
mul x14, x4, x6; \
adcs x24, x24, x14; \
mul x14, x4, x7; \
adcs x1, x1, x14; \
mul x14, x4, x8; \
adcs x0, x0, x14; \
mul x14, x4, x9; \
adcs x15, x15, x14; \
mul x14, x4, x10; \
adcs x16, x16, x14; \
mul x14, x4, x11; \
adcs x17, x17, x14; \
mul x14, x4, x12; \
adcs x19, x19, x14; \
mul x14, x4, x13; \
adcs x20, x20, x14; \
cset x21, hs; \
umulh x14, x4, x5; \
adds x24, x24, x14; \
umulh x14, x4, x6; \
adcs x1, x1, x14; \
umulh x14, x4, x7; \
adcs x0, x0, x14; \
umulh x14, x4, x8; \
adcs x15, x15, x14; \
umulh x14, x4, x9; \
adcs x16, x16, x14; \
umulh x14, x4, x10; \
adcs x17, x17, x14; \
umulh x14, x4, x11; \
adcs x19, x19, x14; \
umulh x14, x4, x12; \
adcs x20, x20, x14; \
umulh x14, x4, x13; \
adc x21, x21, x14; \
stp x22, x23, [P0+48]; \
ldr x3, [P1+64]; \
mul x14, x3, x5; \
adds x24, x24, x14; \
mul x14, x3, x6; \
adcs x1, x1, x14; \
mul x14, x3, x7; \
adcs x0, x0, x14; \
mul x14, x3, x8; \
adcs x15, x15, x14; \
mul x14, x3, x9; \
adcs x16, x16, x14; \
mul x14, x3, x10; \
adcs x17, x17, x14; \
mul x14, x3, x11; \
adcs x19, x19, x14; \
mul x14, x3, x12; \
adcs x20, x20, x14; \
mul x14, x3, x13; \
adc x21, x21, x14; \
umulh x14, x3, x5; \
adds x1, x1, x14; \
umulh x14, x3, x6; \
adcs x0, x0, x14; \
umulh x14, x3, x7; \
adcs x15, x15, x14; \
umulh x14, x3, x8; \
adcs x16, x16, x14; \
umulh x14, x3, x9; \
adcs x17, x17, x14; \
umulh x14, x3, x10; \
adcs x19, x19, x14; \
umulh x14, x3, x11; \
adcs x20, x20, x14; \
umulh x14, x3, x12; \
adc x21, x21, x14; \
ldp x5, x6, [P0]; \
extr x14, x1, x24, #9; \
adds x5, x5, x14; \
extr x14, x0, x1, #9; \
adcs x6, x6, x14; \
ldp x7, x8, [P0+16]; \
extr x14, x15, x0, #9; \
adcs x7, x7, x14; \
extr x14, x16, x15, #9; \
adcs x8, x8, x14; \
ldp x9, x10, [P0+32]; \
extr x14, x17, x16, #9; \
adcs x9, x9, x14; \
extr x14, x19, x17, #9; \
adcs x10, x10, x14; \
ldp x11, x12, [P0+48]; \
extr x14, x20, x19, #9; \
adcs x11, x11, x14; \
extr x14, x21, x20, #9; \
adcs x12, x12, x14; \
and x13, x24, #0x1ff; \
lsr x14, x21, #9; \
adc x13, x13, x14; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]; \
stp x11, x12, [P0+48]; \
str x13, [P0+64]
// P0 = C * P1 - D * P2 == C * P1 + D * (p_521 - P2)
#define cmsub_p521(P0,C,P1,D,P2) \
ldp x6, x7, [P1]; \
mov x1, #(C); \
mul x3, x1, x6; \
mul x4, x1, x7; \
umulh x6, x1, x6; \
adds x4, x4, x6; \
umulh x7, x1, x7; \
ldp x8, x9, [P1+16]; \
mul x5, x1, x8; \
mul x6, x1, x9; \
umulh x8, x1, x8; \
adcs x5, x5, x7; \
umulh x9, x1, x9; \
adcs x6, x6, x8; \
ldp x10, x11, [P1+32]; \
mul x7, x1, x10; \
mul x8, x1, x11; \
umulh x10, x1, x10; \
adcs x7, x7, x9; \
umulh x11, x1, x11; \
adcs x8, x8, x10; \
ldp x12, x13, [P1+48]; \
mul x9, x1, x12; \
mul x10, x1, x13; \
umulh x12, x1, x12; \
adcs x9, x9, x11; \
umulh x13, x1, x13; \
adcs x10, x10, x12; \
ldr x14, [P1+64]; \
mul x11, x1, x14; \
adc x11, x11, x13; \
mov x1, #(D); \
ldp x20, x21, [P2]; \
mvn x20, x20; \
mul x0, x1, x20; \
umulh x20, x1, x20; \
adds x3, x3, x0; \
mvn x21, x21; \
mul x0, x1, x21; \
umulh x21, x1, x21; \
adcs x4, x4, x0; \
ldp x22, x23, [P2+16]; \
mvn x22, x22; \
mul x0, x1, x22; \
umulh x22, x1, x22; \
adcs x5, x5, x0; \
mvn x23, x23; \
mul x0, x1, x23; \
umulh x23, x1, x23; \
adcs x6, x6, x0; \
ldp x17, x19, [P2+32]; \
mvn x17, x17; \
mul x0, x1, x17; \
umulh x17, x1, x17; \
adcs x7, x7, x0; \
mvn x19, x19; \
mul x0, x1, x19; \
umulh x19, x1, x19; \
adcs x8, x8, x0; \
ldp x2, x16, [P2+48]; \
mvn x2, x2; \
mul x0, x1, x2; \
umulh x2, x1, x2; \
adcs x9, x9, x0; \
mvn x16, x16; \
mul x0, x1, x16; \
umulh x16, x1, x16; \
adcs x10, x10, x0; \
ldr x0, [P2+64]; \
eor x0, x0, #0x1ff; \
mul x0, x1, x0; \
adc x11, x11, x0; \
adds x4, x4, x20; \
adcs x5, x5, x21; \
and x15, x4, x5; \
adcs x6, x6, x22; \
and x15, x15, x6; \
adcs x7, x7, x23; \
and x15, x15, x7; \
adcs x8, x8, x17; \
and x15, x15, x8; \
adcs x9, x9, x19; \
and x15, x15, x9; \
adcs x10, x10, x2; \
and x15, x15, x10; \
adc x11, x11, x16; \
lsr x12, x11, #9; \
orr x11, x11, #0xfffffffffffffe00; \
cmp xzr, xzr; \
adcs xzr, x3, x12; \
adcs xzr, x15, xzr; \
adcs xzr, x11, xzr; \
adcs x3, x3, x12; \
adcs x4, x4, xzr; \
adcs x5, x5, xzr; \
adcs x6, x6, xzr; \
adcs x7, x7, xzr; \
adcs x8, x8, xzr; \
adcs x9, x9, xzr; \
adcs x10, x10, xzr; \
adc x11, x11, xzr; \
and x11, x11, #0x1ff; \
stp x3, x4, [P0]; \
stp x5, x6, [P0+16]; \
stp x7, x8, [P0+32]; \
stp x9, x10, [P0+48]; \
str x11, [P0+64]
// P0 = 3 * P1 - 8 * P2 == 3 * P1 + 8 * (p_521 - P2)
#define cmsub38_p521(P0,P1,P2) \
ldp x6, x7, [P1]; \
lsl x3, x6, #1; \
adds x3, x3, x6; \
extr x4, x7, x6, #63; \
adcs x4, x4, x7; \
ldp x8, x9, [P1+16]; \
extr x5, x8, x7, #63; \
adcs x5, x5, x8; \
extr x6, x9, x8, #63; \
adcs x6, x6, x9; \
ldp x10, x11, [P1+32]; \
extr x7, x10, x9, #63; \
adcs x7, x7, x10; \
extr x8, x11, x10, #63; \
adcs x8, x8, x11; \
ldp x12, x13, [P1+48]; \
extr x9, x12, x11, #63; \
adcs x9, x9, x12; \
extr x10, x13, x12, #63; \
adcs x10, x10, x13; \
ldr x14, [P1+64]; \
extr x11, x14, x13, #63; \
adc x11, x11, x14; \
ldp x20, x21, [P2]; \
mvn x20, x20; \
lsl x0, x20, #3; \
adds x3, x3, x0; \
mvn x21, x21; \
extr x0, x21, x20, #61; \
adcs x4, x4, x0; \
ldp x22, x23, [P2+16]; \
mvn x22, x22; \
extr x0, x22, x21, #61; \
adcs x5, x5, x0; \
and x15, x4, x5; \
mvn x23, x23; \
extr x0, x23, x22, #61; \
adcs x6, x6, x0; \
and x15, x15, x6; \
ldp x20, x21, [P2+32]; \
mvn x20, x20; \
extr x0, x20, x23, #61; \
adcs x7, x7, x0; \
and x15, x15, x7; \
mvn x21, x21; \
extr x0, x21, x20, #61; \
adcs x8, x8, x0; \
and x15, x15, x8; \
ldp x22, x23, [P2+48]; \
mvn x22, x22; \
extr x0, x22, x21, #61; \
adcs x9, x9, x0; \
and x15, x15, x9; \
mvn x23, x23; \
extr x0, x23, x22, #61; \
adcs x10, x10, x0; \
and x15, x15, x10; \
ldr x0, [P2+64]; \
eor x0, x0, #0x1ff; \
extr x0, x0, x23, #61; \
adc x11, x11, x0; \
lsr x12, x11, #9; \
orr x11, x11, #0xfffffffffffffe00; \
cmp xzr, xzr; \
adcs xzr, x3, x12; \
adcs xzr, x15, xzr; \
adcs xzr, x11, xzr; \
adcs x3, x3, x12; \
adcs x4, x4, xzr; \
adcs x5, x5, xzr; \
adcs x6, x6, xzr; \
adcs x7, x7, xzr; \
adcs x8, x8, xzr; \
adcs x9, x9, xzr; \
adcs x10, x10, xzr; \
adc x11, x11, xzr; \
and x11, x11, #0x1ff; \
stp x3, x4, [P0]; \
stp x5, x6, [P0+16]; \
stp x7, x8, [P0+32]; \
stp x9, x10, [P0+48]; \
str x11, [P0+64]
// P0 = 4 * P1 - P2 = 4 * P1 + (p_521 - P2)
#define cmsub41_p521(P0,P1,P2) \
ldp x6, x7, [P1]; \
lsl x3, x6, #2; \
extr x4, x7, x6, #62; \
ldp x8, x9, [P1+16]; \
extr x5, x8, x7, #62; \
extr x6, x9, x8, #62; \
ldp x10, x11, [P1+32]; \
extr x7, x10, x9, #62; \
extr x8, x11, x10, #62; \
ldp x12, x13, [P1+48]; \
extr x9, x12, x11, #62; \
extr x10, x13, x12, #62; \
ldr x14, [P1+64]; \
extr x11, x14, x13, #62; \
ldp x0, x1, [P2]; \
mvn x0, x0; \
adds x3, x3, x0; \
sbcs x4, x4, x1; \
ldp x0, x1, [P2+16]; \
sbcs x5, x5, x0; \
and x15, x4, x5; \
sbcs x6, x6, x1; \
and x15, x15, x6; \
ldp x0, x1, [P2+32]; \
sbcs x7, x7, x0; \
and x15, x15, x7; \
sbcs x8, x8, x1; \
and x15, x15, x8; \
ldp x0, x1, [P2+48]; \
sbcs x9, x9, x0; \
and x15, x15, x9; \
sbcs x10, x10, x1; \
and x15, x15, x10; \
ldr x0, [P2+64]; \
eor x0, x0, #0x1ff; \
adc x11, x11, x0; \
lsr x12, x11, #9; \
orr x11, x11, #0xfffffffffffffe00; \
cmp xzr, xzr; \
adcs xzr, x3, x12; \
adcs xzr, x15, xzr; \
adcs xzr, x11, xzr; \
adcs x3, x3, x12; \
adcs x4, x4, xzr; \
adcs x5, x5, xzr; \
adcs x6, x6, xzr; \
adcs x7, x7, xzr; \
adcs x8, x8, xzr; \
adcs x9, x9, xzr; \
adcs x10, x10, xzr; \
adc x11, x11, xzr; \
and x11, x11, #0x1ff; \
stp x3, x4, [P0]; \
stp x5, x6, [P0+16]; \
stp x7, x8, [P0+32]; \
stp x9, x10, [P0+48]; \
str x11, [P0+64]
S2N_BN_SYMBOL(p521_jdouble_alt):
// Save regs and make room on stack for temporary variables
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]!
sub sp, sp, NSPACE
// Move the input arguments to stable places
mov input_z, x0
mov input_x, x1
// Main code, just a sequence of basic field operations
// z2 = z^2
// y2 = y^2
sqr_p521(z2,z_1)
sqr_p521(y2,y_1)
// x2p = x^2 - z^4 = (x + z^2) * (x - z^2)
add_p521(t1,x_1,z2)
sub_p521(t2,x_1,z2)
mul_p521(x2p,t1,t2)
// t1 = y + z
// x4p = x2p^2
// xy2 = x * y^2
add_p521(t1,y_1,z_1)
sqr_p521(x4p,x2p)
weakmul_p521(xy2,x_1,y2)
// t2 = (y + z)^2
sqr_p521(t2,t1)
// d = 12 * xy2 - 9 * x4p
// t1 = y^2 + 2 * y * z
cmsub_p521(d,12,xy2,9,x4p)
sub_p521(t1,t2,z2)
// y4 = y^4
sqr_p521(y4,y2)
// z_3' = 2 * y * z
// dx2 = d * x2p
sub_p521(z_3,t1,y2)
weakmul_p521(dx2,d,x2p)
// x' = 4 * xy2 - d
cmsub41_p521(x_3,xy2,d)
// y' = 3 * dx2 - 8 * y4
cmsub38_p521(y_3,dx2,y4)
// Restore stack and registers
add sp, sp, NSPACE
ldp x27, x28, [sp], 16
ldp x25, x26, [sp], 16
ldp x23, x24, [sp], 16
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 12,980
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_mul_p521_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Multiply modulo p_521, z := (x * y) mod p_521, assuming x and y reduced
// Inputs x[9], y[9]; output z[9]
//
// extern void bignum_mul_p521_alt
// (uint64_t z[static 9], uint64_t x[static 9], uint64_t y[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = x, X2 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_p521_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_p521_alt)
.text
.balign 4
#define z x0
#define x x1
#define y x2
// These are repeated mod 2 as we load paris of inputs
#define a0 x3
#define a1 x4
#define a2 x3
#define a3 x4
#define a4 x3
#define a5 x4
#define a6 x3
#define a7 x4
#define a8 x3
#define b0 x5
#define b1 x6
#define b2 x7
#define b3 x8
#define b4 x9
#define b5 x10
#define b6 x11
#define b7 x12
#define b8 x13
#define t x14
// These repeat mod 11 as we stash some intermediate results in the
// output buffer.
#define u0 x15
#define u1 x16
#define u2 x17
#define u3 x19
#define u4 x20
#define u5 x21
#define u6 x22
#define u7 x23
#define u8 x24
#define u9 x25
#define u10 x26
#define u11 x15
#define u12 x16
#define u13 x17
#define u14 x19
#define u15 x20
#define u16 x21
S2N_BN_SYMBOL(bignum_mul_p521_alt):
// Save more registers and make temporary space on stack
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
sub sp, sp, #64
// Load operands and set up row 0 = [u9;...;u0] = a0 * [b8;...;b0]
ldp a0, a1, [x]
ldp b0, b1, [y]
mul u0, a0, b0
umulh u1, a0, b0
mul t, a0, b1
umulh u2, a0, b1
adds u1, u1, t
ldp b2, b3, [y, #16]
mul t, a0, b2
umulh u3, a0, b2
adcs u2, u2, t
mul t, a0, b3
umulh u4, a0, b3
adcs u3, u3, t
ldp b4, b5, [y, #32]
mul t, a0, b4
umulh u5, a0, b4
adcs u4, u4, t
mul t, a0, b5
umulh u6, a0, b5
adcs u5, u5, t
ldp b6, b7, [y, #48]
mul t, a0, b6
umulh u7, a0, b6
adcs u6, u6, t
ldr b8, [y, #64]
mul t, a0, b7
umulh u8, a0, b7
adcs u7, u7, t
mul t, a0, b8
umulh u9, a0, b8
adcs u8, u8, t
adc u9, u9, xzr
// Row 1 = [u10;...;u0] = [a1;a0] * [b8;...;b0]
mul t, a1, b0
adds u1, u1, t
mul t, a1, b1
adcs u2, u2, t
mul t, a1, b2
adcs u3, u3, t
mul t, a1, b3
adcs u4, u4, t
mul t, a1, b4
adcs u5, u5, t
mul t, a1, b5
adcs u6, u6, t
mul t, a1, b6
adcs u7, u7, t
mul t, a1, b7
adcs u8, u8, t
mul t, a1, b8
adcs u9, u9, t
cset u10, cs
umulh t, a1, b0
adds u2, u2, t
umulh t, a1, b1
adcs u3, u3, t
umulh t, a1, b2
adcs u4, u4, t
umulh t, a1, b3
adcs u5, u5, t
umulh t, a1, b4
adcs u6, u6, t
umulh t, a1, b5
adcs u7, u7, t
umulh t, a1, b6
adcs u8, u8, t
umulh t, a1, b7
adcs u9, u9, t
umulh t, a1, b8
adc u10, u10, t
stp u0, u1, [sp]
// Row 2 = [u11;...;u0] = [a2;a1;a0] * [b8;...;b0]
ldp a2, a3, [x, #16]
mul t, a2, b0
adds u2, u2, t
mul t, a2, b1
adcs u3, u3, t
mul t, a2, b2
adcs u4, u4, t
mul t, a2, b3
adcs u5, u5, t
mul t, a2, b4
adcs u6, u6, t
mul t, a2, b5
adcs u7, u7, t
mul t, a2, b6
adcs u8, u8, t
mul t, a2, b7
adcs u9, u9, t
mul t, a2, b8
adcs u10, u10, t
cset u11, cs
umulh t, a2, b0
adds u3, u3, t
umulh t, a2, b1
adcs u4, u4, t
umulh t, a2, b2
adcs u5, u5, t
umulh t, a2, b3
adcs u6, u6, t
umulh t, a2, b4
adcs u7, u7, t
umulh t, a2, b5
adcs u8, u8, t
umulh t, a2, b6
adcs u9, u9, t
umulh t, a2, b7
adcs u10, u10, t
umulh t, a2, b8
adc u11, u11, t
// Row 3 = [u12;...;u0] = [a3;a2;a1;a0] * [b8;...;b0]
mul t, a3, b0
adds u3, u3, t
mul t, a3, b1
adcs u4, u4, t
mul t, a3, b2
adcs u5, u5, t
mul t, a3, b3
adcs u6, u6, t
mul t, a3, b4
adcs u7, u7, t
mul t, a3, b5
adcs u8, u8, t
mul t, a3, b6
adcs u9, u9, t
mul t, a3, b7
adcs u10, u10, t
mul t, a3, b8
adcs u11, u11, t
cset u12, cs
umulh t, a3, b0
adds u4, u4, t
umulh t, a3, b1
adcs u5, u5, t
umulh t, a3, b2
adcs u6, u6, t
umulh t, a3, b3
adcs u7, u7, t
umulh t, a3, b4
adcs u8, u8, t
umulh t, a3, b5
adcs u9, u9, t
umulh t, a3, b6
adcs u10, u10, t
umulh t, a3, b7
adcs u11, u11, t
umulh t, a3, b8
adc u12, u12, t
stp u2, u3, [sp, #16]
// Row 4 = [u13;...;u0] = [a4;a3;a2;a1;a0] * [b8;...;b0]
ldp a4, a5, [x, #32]
mul t, a4, b0
adds u4, u4, t
mul t, a4, b1
adcs u5, u5, t
mul t, a4, b2
adcs u6, u6, t
mul t, a4, b3
adcs u7, u7, t
mul t, a4, b4
adcs u8, u8, t
mul t, a4, b5
adcs u9, u9, t
mul t, a4, b6
adcs u10, u10, t
mul t, a4, b7
adcs u11, u11, t
mul t, a4, b8
adcs u12, u12, t
cset u13, cs
umulh t, a4, b0
adds u5, u5, t
umulh t, a4, b1
adcs u6, u6, t
umulh t, a4, b2
adcs u7, u7, t
umulh t, a4, b3
adcs u8, u8, t
umulh t, a4, b4
adcs u9, u9, t
umulh t, a4, b5
adcs u10, u10, t
umulh t, a4, b6
adcs u11, u11, t
umulh t, a4, b7
adcs u12, u12, t
umulh t, a4, b8
adc u13, u13, t
// Row 5 = [u14;...;u0] = [a5;a4;a3;a2;a1;a0] * [b8;...;b0]
mul t, a5, b0
adds u5, u5, t
mul t, a5, b1
adcs u6, u6, t
mul t, a5, b2
adcs u7, u7, t
mul t, a5, b3
adcs u8, u8, t
mul t, a5, b4
adcs u9, u9, t
mul t, a5, b5
adcs u10, u10, t
mul t, a5, b6
adcs u11, u11, t
mul t, a5, b7
adcs u12, u12, t
mul t, a5, b8
adcs u13, u13, t
cset u14, cs
umulh t, a5, b0
adds u6, u6, t
umulh t, a5, b1
adcs u7, u7, t
umulh t, a5, b2
adcs u8, u8, t
umulh t, a5, b3
adcs u9, u9, t
umulh t, a5, b4
adcs u10, u10, t
umulh t, a5, b5
adcs u11, u11, t
umulh t, a5, b6
adcs u12, u12, t
umulh t, a5, b7
adcs u13, u13, t
umulh t, a5, b8
adc u14, u14, t
stp u4, u5, [sp, #32]
// Row 6 = [u15;...;u0] = [a6;a5;a4;a3;a2;a1;a0] * [b8;...;b0]
ldp a6, a7, [x, #48]
mul t, a6, b0
adds u6, u6, t
mul t, a6, b1
adcs u7, u7, t
mul t, a6, b2
adcs u8, u8, t
mul t, a6, b3
adcs u9, u9, t
mul t, a6, b4
adcs u10, u10, t
mul t, a6, b5
adcs u11, u11, t
mul t, a6, b6
adcs u12, u12, t
mul t, a6, b7
adcs u13, u13, t
mul t, a6, b8
adcs u14, u14, t
cset u15, cs
umulh t, a6, b0
adds u7, u7, t
umulh t, a6, b1
adcs u8, u8, t
umulh t, a6, b2
adcs u9, u9, t
umulh t, a6, b3
adcs u10, u10, t
umulh t, a6, b4
adcs u11, u11, t
umulh t, a6, b5
adcs u12, u12, t
umulh t, a6, b6
adcs u13, u13, t
umulh t, a6, b7
adcs u14, u14, t
umulh t, a6, b8
adc u15, u15, t
// Row 7 = [u16;...;u0] = [a7;a6;a5;a4;a3;a2;a1;a0] * [b8;...;b0]
mul t, a7, b0
adds u7, u7, t
mul t, a7, b1
adcs u8, u8, t
mul t, a7, b2
adcs u9, u9, t
mul t, a7, b3
adcs u10, u10, t
mul t, a7, b4
adcs u11, u11, t
mul t, a7, b5
adcs u12, u12, t
mul t, a7, b6
adcs u13, u13, t
mul t, a7, b7
adcs u14, u14, t
mul t, a7, b8
adcs u15, u15, t
cset u16, cs
umulh t, a7, b0
adds u8, u8, t
umulh t, a7, b1
adcs u9, u9, t
umulh t, a7, b2
adcs u10, u10, t
umulh t, a7, b3
adcs u11, u11, t
umulh t, a7, b4
adcs u12, u12, t
umulh t, a7, b5
adcs u13, u13, t
umulh t, a7, b6
adcs u14, u14, t
umulh t, a7, b7
adcs u15, u15, t
umulh t, a7, b8
adc u16, u16, t
stp u6, u7, [sp, #48]
// Row 8 = [u16;...;u0] = [a8;a7;a6;a5;a4;a3;a2;a1;a0] * [b8;...;b0]
ldr a8, [x, #64]
mul t, a8, b0
adds u8, u8, t
mul t, a8, b1
adcs u9, u9, t
mul t, a8, b2
adcs u10, u10, t
mul t, a8, b3
adcs u11, u11, t
mul t, a8, b4
adcs u12, u12, t
mul t, a8, b5
adcs u13, u13, t
mul t, a8, b6
adcs u14, u14, t
mul t, a8, b7
adcs u15, u15, t
mul t, a8, b8
adc u16, u16, t
umulh t, a8, b0
adds u9, u9, t
umulh t, a8, b1
adcs u10, u10, t
umulh t, a8, b2
adcs u11, u11, t
umulh t, a8, b3
adcs u12, u12, t
umulh t, a8, b4
adcs u13, u13, t
umulh t, a8, b5
adcs u14, u14, t
umulh t, a8, b6
adcs u15, u15, t
umulh t, a8, b7
adc u16, u16, t
// Now we have the full product, which we consider as
// 2^521 * h + l. Form h + l + 1
subs xzr, xzr, xzr
ldp b0, b1, [sp]
extr t, u9, u8, #9
adcs b0, b0, t
extr t, u10, u9, #9
adcs b1, b1, t
ldp b2, b3, [sp, #16]
extr t, u11, u10, #9
adcs b2, b2, t
extr t, u12, u11, #9
adcs b3, b3, t
ldp b4, b5, [sp, #32]
extr t, u13, u12, #9
adcs b4, b4, t
extr t, u14, u13, #9
adcs b5, b5, t
ldp b6, b7, [sp, #48]
extr t, u15, u14, #9
adcs b6, b6, t
extr t, u16, u15, #9
adcs b7, b7, t
orr b8, u8, #~0x1FF
lsr t, u16, #9
adcs b8, b8, t
// Now CF is set if h + l + 1 >= 2^521, which means it's already
// the answer, while if ~CF the answer is h + l so we should subtract
// 1 (all considered in 521 bits). Hence subtract ~CF and mask.
sbcs b0, b0, xzr
sbcs b1, b1, xzr
sbcs b2, b2, xzr
sbcs b3, b3, xzr
sbcs b4, b4, xzr
sbcs b5, b5, xzr
sbcs b6, b6, xzr
sbcs b7, b7, xzr
sbc b8, b8, xzr
and b8, b8, #0x1FF
// Store back digits of final result
stp b0, b1, [z]
stp b2, b3, [z, #16]
stp b4, b5, [z, #32]
stp b6, b7, [z, #48]
str b8, [z, #64]
// Restore registers
add sp, sp, #64
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 4,944
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_tolebytes_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert 9-digit 528-bit bignum to little-endian bytes
//
// extern void bignum_tolebytes_p521
// (uint8_t z[static 66], uint64_t x[static 9]);
//
// This is assuming the input x is < 2^528 so that it fits in 66 bytes.
// In particular this holds if x < p_521 < 2^521 < 2^528.
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tolebytes_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tolebytes_p521)
.text
.balign 4
#define z x0
#define x x1
#define d x2
#define dshort w2
S2N_BN_SYMBOL(bignum_tolebytes_p521):
// word 0
ldr d, [x]
strb dshort, [z]
lsr d, d, #8
strb dshort, [z, #1]
lsr d, d, #8
strb dshort, [z, #2]
lsr d, d, #8
strb dshort, [z, #3]
lsr d, d, #8
strb dshort, [z, #4]
lsr d, d, #8
strb dshort, [z, #5]
lsr d, d, #8
strb dshort, [z, #6]
lsr d, d, #8
strb dshort, [z, #7]
// word 1
ldr d, [x, #8]
strb dshort, [z, #8]
lsr d, d, #8
strb dshort, [z, #9]
lsr d, d, #8
strb dshort, [z, #10]
lsr d, d, #8
strb dshort, [z, #11]
lsr d, d, #8
strb dshort, [z, #12]
lsr d, d, #8
strb dshort, [z, #13]
lsr d, d, #8
strb dshort, [z, #14]
lsr d, d, #8
strb dshort, [z, #15]
// word 2
ldr d, [x, #16]
strb dshort, [z, #16]
lsr d, d, #8
strb dshort, [z, #17]
lsr d, d, #8
strb dshort, [z, #18]
lsr d, d, #8
strb dshort, [z, #19]
lsr d, d, #8
strb dshort, [z, #20]
lsr d, d, #8
strb dshort, [z, #21]
lsr d, d, #8
strb dshort, [z, #22]
lsr d, d, #8
strb dshort, [z, #23]
// word 3
ldr d, [x, #24]
strb dshort, [z, #24]
lsr d, d, #8
strb dshort, [z, #25]
lsr d, d, #8
strb dshort, [z, #26]
lsr d, d, #8
strb dshort, [z, #27]
lsr d, d, #8
strb dshort, [z, #28]
lsr d, d, #8
strb dshort, [z, #29]
lsr d, d, #8
strb dshort, [z, #30]
lsr d, d, #8
strb dshort, [z, #31]
// word 4
ldr d, [x, #32]
strb dshort, [z, #32]
lsr d, d, #8
strb dshort, [z, #33]
lsr d, d, #8
strb dshort, [z, #34]
lsr d, d, #8
strb dshort, [z, #35]
lsr d, d, #8
strb dshort, [z, #36]
lsr d, d, #8
strb dshort, [z, #37]
lsr d, d, #8
strb dshort, [z, #38]
lsr d, d, #8
strb dshort, [z, #39]
// word 5
ldr d, [x, #40]
strb dshort, [z, #40]
lsr d, d, #8
strb dshort, [z, #41]
lsr d, d, #8
strb dshort, [z, #42]
lsr d, d, #8
strb dshort, [z, #43]
lsr d, d, #8
strb dshort, [z, #44]
lsr d, d, #8
strb dshort, [z, #45]
lsr d, d, #8
strb dshort, [z, #46]
lsr d, d, #8
strb dshort, [z, #47]
// word 6
ldr d, [x, #48]
strb dshort, [z, #48]
lsr d, d, #8
strb dshort, [z, #49]
lsr d, d, #8
strb dshort, [z, #50]
lsr d, d, #8
strb dshort, [z, #51]
lsr d, d, #8
strb dshort, [z, #52]
lsr d, d, #8
strb dshort, [z, #53]
lsr d, d, #8
strb dshort, [z, #54]
lsr d, d, #8
strb dshort, [z, #55]
// word 7
ldr d, [x, #56]
strb dshort, [z, #56]
lsr d, d, #8
strb dshort, [z, #57]
lsr d, d, #8
strb dshort, [z, #58]
lsr d, d, #8
strb dshort, [z, #59]
lsr d, d, #8
strb dshort, [z, #60]
lsr d, d, #8
strb dshort, [z, #61]
lsr d, d, #8
strb dshort, [z, #62]
lsr d, d, #8
strb dshort, [z, #63]
// word 8
ldr d, [x, #64]
strb dshort, [z, #64]
lsr d, d, #8
strb dshort, [z, #65]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 3,654
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_tomont_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert to Montgomery form z := (2^576 * x) mod p_521
// Input x[9]; output z[9]
//
// extern void bignum_tomont_p521
// (uint64_t z[static 9], uint64_t x[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_p521)
.text
.balign 4
#define z x0
#define x x1
#define h x2
#define t x3
#define d0 x4
#define d1 x5
#define d2 x6
#define d3 x7
#define d4 x8
#define d5 x9
#define d6 x10
#define d7 x11
#define d8 x12
S2N_BN_SYMBOL(bignum_tomont_p521):
// Load top digit first and get its upper bits in h so that we
// separate out x = 2^521 * H + L with h = H. Now x mod p_521 =
// (H + L) mod p_521 = if H + L >= p_521 then H + L - p_521 else H + L.
ldr d8, [x, #64]
lsr h, d8, #9
// Load in the other digits and decide whether H + L >= p_521. This is
// equivalent to H + L + 1 >= 2^521, and since this can only happen if
// digits d7,...,d1 consist entirely of 1 bits, we can condense the
// carry chain by ANDing digits together, perhaps reducing its latency.
// This condenses only three pairs; the payoff beyond that seems limited.
// By stuffing in 1 bits from 521 position upwards, get CF directly
subs xzr, xzr, xzr
ldp d0, d1, [x]
adcs xzr, d0, h
adcs xzr, d1, xzr
ldp d2, d3, [x, #16]
and t, d2, d3
adcs xzr, t, xzr
ldp d4, d5, [x, #32]
and t, d4, d5
adcs xzr, t, xzr
ldp d6, d7, [x, #48]
and t, d6, d7
adcs xzr, t, xzr
orr t, d8, #~0x1FF
adcs t, t, xzr
// Now H + L >= p_521 <=> H + L + 1 >= 2^521 <=> CF from this comparison.
// So if CF is set we want (H + L) - p_521 = (H + L + 1) - 2^521
// while otherwise we want just H + L. So mask H + L + CF to 521 bits.
adcs d0, d0, h
adcs d1, d1, xzr
adcs d2, d2, xzr
adcs d3, d3, xzr
adcs d4, d4, xzr
adcs d5, d5, xzr
adcs d6, d6, xzr
adcs d7, d7, xzr
adc d8, d8, xzr
// So far, this is just a modular reduction as in bignum_mod_p521_9,
// except that the final masking of d8 is skipped since that comes out
// in the wash anyway from the next block, which is the Montgomery map,
// multiplying by 2^576 modulo p_521. Because 2^521 == 1 (mod p_521)
// this is just rotation left by 576 - 521 = 55 bits. To rotate in a
// right-to-left fashion, which might blend better with the carry
// chain above, the digit register indices themselves get shuffled up.
lsl t, d0, #55
extr d0, d1, d0, #9
extr d1, d2, d1, #9
extr d2, d3, d2, #9
extr d3, d4, d3, #9
extr d4, d5, d4, #9
extr d5, d6, d5, #9
extr d6, d7, d6, #9
extr d7, d8, d7, #9
lsr d8, d7, #9
orr t, t, d8
and d7, d7, #0x1FF
// Store the result from the shuffled registers [d7;d6;...;d1;d0;t]
stp t, d0, [z]
stp d1, d2, [z, #16]
stp d3, d4, [z, #32]
stp d5, d6, [z, #48]
str d7, [z, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 2,192
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_neg_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Negate modulo p_521, z := (-x) mod p_521, assuming x reduced
// Input x[9]; output z[9]
//
// extern void bignum_neg_p521 (uint64_t z[static 9], uint64_t x[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_neg_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_neg_p521)
.text
.balign 4
#define z x0
#define x x1
#define p x2
#define d0 x3
#define d1 x4
#define d2 x5
#define d3 x6
#define d4 x7
#define d5 x8
#define d6 x9
#define d7 x10
#define d8 x11
S2N_BN_SYMBOL(bignum_neg_p521):
// Load the 9 digits of x and generate p = the OR of them all
ldp d0, d1, [x]
orr d6, d0, d1
ldp d2, d3, [x, #16]
orr d7, d2, d3
orr p, d6, d7
ldp d4, d5, [x, #32]
orr d8, d4, d5
orr p, p, d8
ldp d6, d7, [x, #48]
orr d8, d6, d7
orr p, p, d8
ldr d8, [x, #64]
orr p, p, d8
// Turn p into a bitmask for "input is nonzero", so that we avoid doing
// -0 = p_521 and hence maintain strict modular reduction
cmp p, #0
csetm p, ne
// Since p_521 is all 1s, the subtraction is just an exclusive-or with p
// to give an optional inversion, with a slight fiddle for the top digit.
eor d0, d0, p
eor d1, d1, p
eor d2, d2, p
eor d3, d3, p
eor d4, d4, p
eor d5, d5, p
eor d6, d6, p
eor d7, d7, p
and p, p, #0x1FF
eor d8, d8, p
// Write back the result and return
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
stp d6, d7, [z, #48]
str d8, [z, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 73,370
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_inv_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Modular inverse modulo p_521 = 2^521 - 1
// Input x[9]; output z[9]
//
// extern void bignum_inv_p521(uint64_t z[static 9],uint64_t x[static 9]);
//
// Assuming the 9-digit input x is coprime to p_521, i.e. is not divisible
// by it, returns z < p_521 such that x * z == 1 (mod p_521). Note that
// x does not need to be reduced modulo p_521, but the output always is.
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_inv_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_inv_p521)
.text
.balign 4
// Size in bytes of a 64-bit word
#define N 8
// Used for the return pointer
#define res x20
// Loop counter and d = 2 * delta value for divstep
#define i x21
#define d x22
// Registers used for matrix element magnitudes and signs
#define m00 x10
#define m01 x11
#define m10 x12
#define m11 x13
#define s00 x14
#define s01 x15
#define s10 x16
#define s11 x17
// Initial carries for combinations
#define car0 x9
#define car1 x19
// Input and output, plain registers treated according to pattern
#define reg0 x0, #0
#define reg1 x1, #0
#define reg2 x2, #0
#define reg3 x3, #0
#define reg4 x4, #0
#define x x1, #0
#define z x0, #0
// Pointer-offset pairs for temporaries on stack
#define f sp, #0
#define g sp, #(9*N)
#define u sp, #(18*N)
#define v sp, #(27*N)
// Total size to reserve on the stack
#define NSPACE #(36*N)
// Very similar to a subroutine call to the s2n-bignum word_divstep59.
// But different in register usage and returning the final matrix in
// registers as follows
//
// [ m00 m01]
// [ m10 m11]
#define divstep59() \
and x4, x2, #0xfffff; \
orr x4, x4, #0xfffffe0000000000; \
and x5, x3, #0xfffff; \
orr x5, x5, #0xc000000000000000; \
tst x5, #0x1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
asr x5, x5, #1; \
add x8, x4, #0x100, lsl #12; \
sbfx x8, x8, #21, #21; \
mov x11, #0x100000; \
add x11, x11, x11, lsl #21; \
add x9, x4, x11; \
asr x9, x9, #42; \
add x10, x5, #0x100, lsl #12; \
sbfx x10, x10, #21, #21; \
add x11, x5, x11; \
asr x11, x11, #42; \
mul x6, x8, x2; \
mul x7, x9, x3; \
mul x2, x10, x2; \
mul x3, x11, x3; \
add x4, x6, x7; \
add x5, x2, x3; \
asr x2, x4, #20; \
asr x3, x5, #20; \
and x4, x2, #0xfffff; \
orr x4, x4, #0xfffffe0000000000; \
and x5, x3, #0xfffff; \
orr x5, x5, #0xc000000000000000; \
tst x5, #0x1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
asr x5, x5, #1; \
add x12, x4, #0x100, lsl #12; \
sbfx x12, x12, #21, #21; \
mov x15, #0x100000; \
add x15, x15, x15, lsl #21; \
add x13, x4, x15; \
asr x13, x13, #42; \
add x14, x5, #0x100, lsl #12; \
sbfx x14, x14, #21, #21; \
add x15, x5, x15; \
asr x15, x15, #42; \
mul x6, x12, x2; \
mul x7, x13, x3; \
mul x2, x14, x2; \
mul x3, x15, x3; \
add x4, x6, x7; \
add x5, x2, x3; \
asr x2, x4, #20; \
asr x3, x5, #20; \
and x4, x2, #0xfffff; \
orr x4, x4, #0xfffffe0000000000; \
and x5, x3, #0xfffff; \
orr x5, x5, #0xc000000000000000; \
tst x5, #0x1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
mul x2, x12, x8; \
mul x3, x12, x9; \
mul x6, x14, x8; \
mul x7, x14, x9; \
madd x8, x13, x10, x2; \
madd x9, x13, x11, x3; \
madd x16, x15, x10, x6; \
madd x17, x15, x11, x7; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
tst x5, #0x2; \
asr x5, x5, #1; \
csel x6, x4, xzr, ne; \
ccmp x1, xzr, #0x8, ne; \
cneg x1, x1, ge; \
cneg x6, x6, ge; \
csel x4, x5, x4, ge; \
add x5, x5, x6; \
add x1, x1, #0x2; \
asr x5, x5, #1; \
add x12, x4, #0x100, lsl #12; \
sbfx x12, x12, #22, #21; \
mov x15, #0x100000; \
add x15, x15, x15, lsl #21; \
add x13, x4, x15; \
asr x13, x13, #43; \
add x14, x5, #0x100, lsl #12; \
sbfx x14, x14, #22, #21; \
add x15, x5, x15; \
asr x15, x15, #43; \
mneg x2, x12, x8; \
mneg x3, x12, x9; \
mneg x4, x14, x8; \
mneg x5, x14, x9; \
msub m00, x13, x16, x2; \
msub m01, x13, x17, x3; \
msub m10, x15, x16, x4; \
msub m11, x15, x17, x5
// Loading large constants
#define movbig(nn,n3,n2,n1,n0) \
movz nn, n0; \
movk nn, n1, lsl #16; \
movk nn, n2, lsl #32; \
movk nn, n3, lsl #48
S2N_BN_SYMBOL(bignum_inv_p521):
// Save registers and make room for temporaries
stp x19, x20, [sp, -16]!
stp x21, x22, [sp, -16]!
sub sp, sp, NSPACE
// Save the return pointer for the end so we can overwrite x0 later
mov res, x0
// Copy the prime p_521 = 2^521 - 1 into the f variable
mov x10, #0xFFFFFFFFFFFFFFFF
stp x10, x10, [f]
stp x10, x10, [f+16]
stp x10, x10, [f+32]
stp x10, x10, [f+48]
mov x11, #0x1FF
str x11, [f+64]
// Copy the input into the g variable, but reduce it strictly mod p_521
// so that g <= f as assumed in the bound proof. This code fragment is
// very similar to bignum_mod_p521_9 complete with carry condensation.
ldr x8, [x1, #64]
lsr x9, x8, #9
subs xzr, xzr, xzr
ldp x10, x11, [x1]
adcs xzr, x10, x9
adcs xzr, x11, xzr
ldp x12, x13, [x1, #16]
and x7, x12, x13
adcs xzr, x7, xzr
ldp x14, x15, [x1, #32]
and x7, x14, x15
adcs xzr, x7, xzr
ldp x16, x17, [x1, #48]
and x7, x16, x17
adcs xzr, x7, xzr
orr x7, x8, #~0x1FF
adcs x7, x7, xzr
adcs x10, x10, x9
adcs x11, x11, xzr
adcs x12, x12, xzr
adcs x13, x13, xzr
adcs x14, x14, xzr
adcs x15, x15, xzr
adcs x16, x16, xzr
adcs x17, x17, xzr
adc x8, x8, xzr
and x8, x8, #0x1FF
stp x10, x11, [g]
stp x12, x13, [g+16]
stp x14, x15, [g+32]
stp x16, x17, [g+48]
str x8, [g+64]
// Also maintain weakly reduced < 2*p_521 vector [u,v] such that
// [f,g] == x * 2^{1239-59*i} * [u,v] (mod p_521)
// starting with [p_521,x] == x * 2^{1239-59*0} * [0,2^-1239] (mod p_521)
// Note that because (2^{a+521} == 2^a) (mod p_521) we simply have
// (2^-1239 == 2^324) (mod p_521) so the constant initializer is simple.
//
// Based on the standard divstep bound, for inputs <= 2^b we need at least
// n >= (9437 * b + 1) / 4096. Since b is 521, that means 1201 iterations.
// Since we package divstep in multiples of 59 bits, we do 21 blocks of 59
// making *1239* total. (With a bit more effort we could avoid the full 59
// divsteps and use a shorter tail computation, but we keep it simple.)
// Hence, after the 21st iteration we have [f,g] == x * [u,v] and since
// |f| = 1 we get the modular inverse from u by flipping its sign with f.
stp xzr, xzr, [u]
stp xzr, xzr, [u+16]
stp xzr, xzr, [u+32]
stp xzr, xzr, [u+48]
str xzr, [u+64]
mov x10, #16
stp xzr, xzr, [v]
stp xzr, xzr, [v+16]
stp xzr, x10, [v+32]
stp xzr, xzr, [v+48]
str xzr, [v+64]
// Start of main loop. We jump into the middle so that the divstep
// portion is common to the special 21st iteration after a uniform
// first 20.
mov i, #21
mov d, #1
b bignum_inv_p521_midloop
bignum_inv_p521_loop:
// Separate the matrix elements into sign-magnitude pairs
cmp m00, xzr
csetm s00, mi
cneg m00, m00, mi
cmp m01, xzr
csetm s01, mi
cneg m01, m01, mi
cmp m10, xzr
csetm s10, mi
cneg m10, m10, mi
cmp m11, xzr
csetm s11, mi
cneg m11, m11, mi
// Adjust the initial values to allow for complement instead of negation
// This initial offset is the same for [f,g] and [u,v] compositions.
// Save it in stable registers for the [u,v] part and do [f,g] first.
and x0, m00, s00
and x1, m01, s01
add car0, x0, x1
and x0, m10, s10
and x1, m11, s11
add car1, x0, x1
// Now the computation of the updated f and g values. This maintains a
// 2-word carry between stages so we can conveniently insert the shift
// right by 59 before storing back, and not overwrite digits we need
// again of the old f and g values.
//
// Digit 0 of [f,g]
ldr x7, [f]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x4, car0, x0
adc x2, xzr, x1
ldr x8, [g]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x4, x4, x0
adc x2, x2, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x5, car1, x0
adc x3, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x5, x5, x0
adc x3, x3, x1
// Digit 1 of [f,g]
ldr x7, [f+N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x2, x2, x0
adc x6, xzr, x1
ldr x8, [g+N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x2, x2, x0
adc x6, x6, x1
extr x4, x2, x4, #59
str x4, [f]
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x3, x3, x0
adc x4, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x3, x3, x0
adc x4, x4, x1
extr x5, x3, x5, #59
str x5, [g]
// Digit 2 of [f,g]
ldr x7, [f+2*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x6, x6, x0
adc x5, xzr, x1
ldr x8, [g+2*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x6, x6, x0
adc x5, x5, x1
extr x2, x6, x2, #59
str x2, [f+N]
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x4, x4, x0
adc x2, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x4, x4, x0
adc x2, x2, x1
extr x3, x4, x3, #59
str x3, [g+N]
// Digit 3 of [f,g]
ldr x7, [f+3*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x5, x5, x0
adc x3, xzr, x1
ldr x8, [g+3*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x5, x5, x0
adc x3, x3, x1
extr x6, x5, x6, #59
str x6, [f+2*N]
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x2, x2, x0
adc x6, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x2, x2, x0
adc x6, x6, x1
extr x4, x2, x4, #59
str x4, [g+2*N]
// Digit 4 of [f,g]
ldr x7, [f+4*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x3, x3, x0
adc x4, xzr, x1
ldr x8, [g+4*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x3, x3, x0
adc x4, x4, x1
extr x5, x3, x5, #59
str x5, [f+3*N]
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x6, x6, x0
adc x5, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x6, x6, x0
adc x5, x5, x1
extr x2, x6, x2, #59
str x2, [g+3*N]
// Digit 5 of [f,g]
ldr x7, [f+5*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x4, x4, x0
adc x2, xzr, x1
ldr x8, [g+5*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x4, x4, x0
adc x2, x2, x1
extr x3, x4, x3, #59
str x3, [f+4*N]
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x5, x5, x0
adc x3, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x5, x5, x0
adc x3, x3, x1
extr x6, x5, x6, #59
str x6, [g+4*N]
// Digit 6 of [f,g]
ldr x7, [f+6*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x2, x2, x0
adc x6, xzr, x1
ldr x8, [g+6*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x2, x2, x0
adc x6, x6, x1
extr x4, x2, x4, #59
str x4, [f+5*N]
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x3, x3, x0
adc x4, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x3, x3, x0
adc x4, x4, x1
extr x5, x3, x5, #59
str x5, [g+5*N]
// Digit 7 of [f,g]
ldr x7, [f+7*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x6, x6, x0
adc x5, xzr, x1
ldr x8, [g+7*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x6, x6, x0
adc x5, x5, x1
extr x2, x6, x2, #59
str x2, [f+6*N]
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x4, x4, x0
adc x2, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x4, x4, x0
adc x2, x2, x1
extr x3, x4, x3, #59
str x3, [g+6*N]
// Digits 8 and 9 of [f,g]
ldr x7, [f+8*N]
eor x1, x7, s00
asr x3, x1, #63
and x3, x3, m00
neg x3, x3
mul x0, x1, m00
umulh x1, x1, m00
adds x5, x5, x0
adc x3, x3, x1
ldr x8, [g+8*N]
eor x1, x8, s01
asr x0, x1, #63
and x0, x0, m01
sub x3, x3, x0
mul x0, x1, m01
umulh x1, x1, m01
adds x5, x5, x0
adc x3, x3, x1
extr x6, x5, x6, #59
str x6, [f+7*N]
extr x5, x3, x5, #59
str x5, [f+8*N]
eor x1, x7, s10
asr x5, x1, #63
and x5, x5, m10
neg x5, x5
mul x0, x1, m10
umulh x1, x1, m10
adds x2, x2, x0
adc x5, x5, x1
eor x1, x8, s11
asr x0, x1, #63
and x0, x0, m11
sub x5, x5, x0
mul x0, x1, m11
umulh x1, x1, m11
adds x2, x2, x0
adc x5, x5, x1
extr x4, x2, x4, #59
str x4, [g+7*N]
extr x2, x5, x2, #59
str x2, [g+8*N]
// Now the computation of the updated u and v values and their
// modular reductions. A very similar accumulation except that
// the top words of u and v are unsigned and we don't shift.
//
// Digit 0 of [u,v]
ldr x7, [u]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x4, car0, x0
adc x2, xzr, x1
ldr x8, [v]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x4, x4, x0
str x4, [u]
adc x2, x2, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x5, car1, x0
adc x3, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x5, x5, x0
str x5, [v]
adc x3, x3, x1
// Digit 1 of [u,v]
ldr x7, [u+N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x2, x2, x0
adc x6, xzr, x1
ldr x8, [v+N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x2, x2, x0
str x2, [u+N]
adc x6, x6, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x3, x3, x0
adc x4, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x3, x3, x0
str x3, [v+N]
adc x4, x4, x1
// Digit 2 of [u,v]
ldr x7, [u+2*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x6, x6, x0
adc x5, xzr, x1
ldr x8, [v+2*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x6, x6, x0
str x6, [u+2*N]
adc x5, x5, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x4, x4, x0
adc x2, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x4, x4, x0
str x4, [v+2*N]
adc x2, x2, x1
// Digit 3 of [u,v]
ldr x7, [u+3*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x5, x5, x0
adc x3, xzr, x1
ldr x8, [v+3*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x5, x5, x0
str x5, [u+3*N]
adc x3, x3, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x2, x2, x0
adc x6, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x2, x2, x0
str x2, [v+3*N]
adc x6, x6, x1
// Digit 4 of [u,v]
ldr x7, [u+4*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x3, x3, x0
adc x4, xzr, x1
ldr x8, [v+4*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x3, x3, x0
str x3, [u+4*N]
adc x4, x4, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x6, x6, x0
adc x5, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x6, x6, x0
str x6, [v+4*N]
adc x5, x5, x1
// Digit 5 of [u,v]
ldr x7, [u+5*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x4, x4, x0
adc x2, xzr, x1
ldr x8, [v+5*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x4, x4, x0
str x4, [u+5*N]
adc x2, x2, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x5, x5, x0
adc x3, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x5, x5, x0
str x5, [v+5*N]
adc x3, x3, x1
// Digit 6 of [u,v]
ldr x7, [u+6*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x2, x2, x0
adc x6, xzr, x1
ldr x8, [v+6*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x2, x2, x0
str x2, [u+6*N]
adc x6, x6, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x3, x3, x0
adc x4, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x3, x3, x0
str x3, [v+6*N]
adc x4, x4, x1
// Digit 7 of [u,v]
ldr x7, [u+7*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x6, x6, x0
adc x5, xzr, x1
ldr x8, [v+7*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x6, x6, x0
str x6, [u+7*N]
adc x5, x5, x1
eor x1, x7, s10
mul x0, x1, m10
umulh x1, x1, m10
adds x4, x4, x0
adc x2, xzr, x1
eor x1, x8, s11
mul x0, x1, m11
umulh x1, x1, m11
adds x4, x4, x0
str x4, [v+7*N]
adc x2, x2, x1
// Digits 8 and 9 of u (top is unsigned)
ldr x7, [u+8*N]
eor x1, x7, s00
and x3, s00, m00
neg x3, x3
mul x0, x1, m00
umulh x1, x1, m00
adds x5, x5, x0
adc x3, x3, x1
ldr x8, [v+8*N]
eor x1, x8, s01
and x0, s01, m01
sub x3, x3, x0
mul x0, x1, m01
umulh x1, x1, m01
adds x5, x5, x0
adc x3, x3, x1
// Modular reduction of u, reloading as needed from u[0],...,u[7],x5,x3
extr x6, x3, x5, #9
ldp x0, x1, [u]
add x6, x6, x3, asr #63
sub x5, x5, x6, lsl #9
adds x0, x0, x6
asr x6, x6, #63
adcs x1, x1, x6
stp x0, x1, [u]
ldp x0, x1, [u+16]
adcs x0, x0, x6
adcs x1, x1, x6
stp x0, x1, [u+16]
ldp x0, x1, [u+32]
adcs x0, x0, x6
adcs x1, x1, x6
stp x0, x1, [u+32]
ldp x0, x1, [u+48]
adcs x0, x0, x6
adcs x1, x1, x6
stp x0, x1, [u+48]
adc x5, x5, x6
str x5, [u+64]
// Digits 8 and 9 of v (top is unsigned)
eor x1, x7, s10
and x5, s10, m10
neg x5, x5
mul x0, x1, m10
umulh x1, x1, m10
adds x2, x2, x0
adc x5, x5, x1
eor x1, x8, s11
and x0, s11, m11
sub x5, x5, x0
mul x0, x1, m11
umulh x1, x1, m11
adds x2, x2, x0
adc x5, x5, x1
// Modular reduction of v, reloading as needed from v[0],...,v[7],x2,x5
extr x6, x5, x2, #9
ldp x0, x1, [v]
add x6, x6, x5, asr #63
sub x2, x2, x6, lsl #9
adds x0, x0, x6
asr x6, x6, #63
adcs x1, x1, x6
stp x0, x1, [v]
ldp x0, x1, [v+16]
adcs x0, x0, x6
adcs x1, x1, x6
stp x0, x1, [v+16]
ldp x0, x1, [v+32]
adcs x0, x0, x6
adcs x1, x1, x6
stp x0, x1, [v+32]
ldp x0, x1, [v+48]
adcs x0, x0, x6
adcs x1, x1, x6
stp x0, x1, [v+48]
adc x2, x2, x6
str x2, [v+64]
bignum_inv_p521_midloop:
mov x1, d
ldr x2, [f]
ldr x3, [g]
divstep59()
mov d, x1
// Next iteration
subs i, i, #1
bne bignum_inv_p521_loop
// The 21st and last iteration does not need anything except the
// u value and the sign of f; the latter can be obtained from the
// lowest word of f. So it's done differently from the main loop.
// Find the sign of the new f. For this we just need one digit
// since we know (for in-scope cases) that f is either +1 or -1.
// We don't explicitly shift right by 59 either, but looking at
// bit 63 (or any bit >= 60) of the unshifted result is enough
// to distinguish -1 from +1; this is then made into a mask.
ldr x0, [f]
ldr x1, [g]
mul x0, x0, m00
madd x1, x1, m01, x0
asr x0, x1, #63
// Now separate out the matrix into sign-magnitude pairs
// and adjust each one based on the sign of f.
//
// Note that at this point we expect |f|=1 and we got its
// sign above, so then since [f,0] == x * [u,v] (mod p_521)
// we want to flip the sign of u according to that of f.
cmp m00, xzr
csetm s00, mi
cneg m00, m00, mi
eor s00, s00, x0
cmp m01, xzr
csetm s01, mi
cneg m01, m01, mi
eor s01, s01, x0
cmp m10, xzr
csetm s10, mi
cneg m10, m10, mi
eor s10, s10, x0
cmp m11, xzr
csetm s11, mi
cneg m11, m11, mi
eor s11, s11, x0
// Adjust the initial value to allow for complement instead of negation
and x0, m00, s00
and x1, m01, s01
add car0, x0, x1
// Digit 0 of [u]
ldr x7, [u]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x4, car0, x0
adc x2, xzr, x1
ldr x8, [v]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x4, x4, x0
str x4, [u]
adc x2, x2, x1
// Digit 1 of [u]
ldr x7, [u+N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x2, x2, x0
adc x6, xzr, x1
ldr x8, [v+N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x2, x2, x0
str x2, [u+N]
adc x6, x6, x1
// Digit 2 of [u]
ldr x7, [u+2*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x6, x6, x0
adc x5, xzr, x1
ldr x8, [v+2*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x6, x6, x0
str x6, [u+2*N]
adc x5, x5, x1
// Digit 3 of [u]
ldr x7, [u+3*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x5, x5, x0
adc x3, xzr, x1
ldr x8, [v+3*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x5, x5, x0
str x5, [u+3*N]
adc x3, x3, x1
// Digit 4 of [u]
ldr x7, [u+4*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x3, x3, x0
adc x4, xzr, x1
ldr x8, [v+4*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x3, x3, x0
str x3, [u+4*N]
adc x4, x4, x1
// Digit 5 of [u]
ldr x7, [u+5*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x4, x4, x0
adc x2, xzr, x1
ldr x8, [v+5*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x4, x4, x0
str x4, [u+5*N]
adc x2, x2, x1
// Digit 6 of [u]
ldr x7, [u+6*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x2, x2, x0
adc x6, xzr, x1
ldr x8, [v+6*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x2, x2, x0
str x2, [u+6*N]
adc x6, x6, x1
// Digit 7 of [u]
ldr x7, [u+7*N]
eor x1, x7, s00
mul x0, x1, m00
umulh x1, x1, m00
adds x6, x6, x0
adc x5, xzr, x1
ldr x8, [v+7*N]
eor x1, x8, s01
mul x0, x1, m01
umulh x1, x1, m01
adds x6, x6, x0
str x6, [u+7*N]
adc x5, x5, x1
// Digits 8 and 9 of u (top is unsigned)
ldr x7, [u+8*N]
eor x1, x7, s00
and x3, s00, m00
neg x3, x3
mul x0, x1, m00
umulh x1, x1, m00
adds x5, x5, x0
adc x3, x3, x1
ldr x8, [v+8*N]
eor x1, x8, s01
and x0, s01, m01
sub x3, x3, x0
mul x0, x1, m01
umulh x1, x1, m01
adds x5, x5, x0
adc x3, x3, x1
// Modular reduction of u, reloading as needed from u[0],...,u[7],x5,x3
extr x6, x3, x5, #9
ldp x10, x11, [u]
add x6, x6, x3, asr #63
sub x5, x5, x6, lsl #9
adds x10, x10, x6
asr x6, x6, #63
adcs x11, x11, x6
ldp x12, x13, [u+16]
adcs x12, x12, x6
adcs x13, x13, x6
ldp x14, x15, [u+32]
adcs x14, x14, x6
adcs x15, x15, x6
ldp x16, x17, [u+48]
adcs x16, x16, x6
adcs x17, x17, x6
adc x19, x5, x6
// Further strict reduction ready for the output, which just means
// a conditional subtraction of p_521
subs x0, x10, #-1
adcs x1, x11, xzr
adcs x2, x12, xzr
adcs x3, x13, xzr
adcs x4, x14, xzr
adcs x5, x15, xzr
adcs x6, x16, xzr
adcs x7, x17, xzr
mov x8, #0x1FF
sbcs x8, x19, x8
csel x0, x0, x10, cs
csel x1, x1, x11, cs
csel x2, x2, x12, cs
csel x3, x3, x13, cs
csel x4, x4, x14, cs
csel x5, x5, x15, cs
csel x6, x6, x16, cs
csel x7, x7, x17, cs
csel x8, x8, x19, cs
// Store it back to the final output
stp x0, x1, [res]
stp x2, x3, [res, #16]
stp x4, x5, [res, #32]
stp x6, x7, [res, #48]
str x8, [res, #64]
// Restore stack and registers
add sp, sp, NSPACE
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 5,406
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_fromlebytes_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert little-endian bytes to 9-digit 528-bit bignum
//
// extern void bignum_fromlebytes_p521
// (uint64_t z[static 9],uint8_t x[static 66])
//
// The result will be < 2^528 since it is translated from 66 bytes.
// It is mainly intended for inputs x < p_521 < 2^521 < 2^528.
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_fromlebytes_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_fromlebytes_p521)
.text
.balign 4
#define z x0
#define x x1
#define d x2
#define dshort w2
#define a x3
S2N_BN_SYMBOL(bignum_fromlebytes_p521):
// word 0
ldrb dshort, [x]
extr a, d, xzr, #8
ldrb dshort, [x, #1]
extr a, d, a, #8
ldrb dshort, [x, #2]
extr a, d, a, #8
ldrb dshort, [x, #3]
extr a, d, a, #8
ldrb dshort, [x, #4]
extr a, d, a, #8
ldrb dshort, [x, #5]
extr a, d, a, #8
ldrb dshort, [x, #6]
extr a, d, a, #8
ldrb dshort, [x, #7]
extr a, d, a, #8
str a, [z]
// word 1
ldrb dshort, [x, #8]
extr a, d, xzr, #8
ldrb dshort, [x, #9]
extr a, d, a, #8
ldrb dshort, [x, #10]
extr a, d, a, #8
ldrb dshort, [x, #11]
extr a, d, a, #8
ldrb dshort, [x, #12]
extr a, d, a, #8
ldrb dshort, [x, #13]
extr a, d, a, #8
ldrb dshort, [x, #14]
extr a, d, a, #8
ldrb dshort, [x, #15]
extr a, d, a, #8
str a, [z, #8]
// word 2
ldrb dshort, [x, #16]
extr a, d, xzr, #8
ldrb dshort, [x, #17]
extr a, d, a, #8
ldrb dshort, [x, #18]
extr a, d, a, #8
ldrb dshort, [x, #19]
extr a, d, a, #8
ldrb dshort, [x, #20]
extr a, d, a, #8
ldrb dshort, [x, #21]
extr a, d, a, #8
ldrb dshort, [x, #22]
extr a, d, a, #8
ldrb dshort, [x, #23]
extr a, d, a, #8
str a, [z, #16]
// word 3
ldrb dshort, [x, #24]
extr a, d, xzr, #8
ldrb dshort, [x, #25]
extr a, d, a, #8
ldrb dshort, [x, #26]
extr a, d, a, #8
ldrb dshort, [x, #27]
extr a, d, a, #8
ldrb dshort, [x, #28]
extr a, d, a, #8
ldrb dshort, [x, #29]
extr a, d, a, #8
ldrb dshort, [x, #30]
extr a, d, a, #8
ldrb dshort, [x, #31]
extr a, d, a, #8
str a, [z, #24]
// word 4
ldrb dshort, [x, #32]
extr a, d, xzr, #8
ldrb dshort, [x, #33]
extr a, d, a, #8
ldrb dshort, [x, #34]
extr a, d, a, #8
ldrb dshort, [x, #35]
extr a, d, a, #8
ldrb dshort, [x, #36]
extr a, d, a, #8
ldrb dshort, [x, #37]
extr a, d, a, #8
ldrb dshort, [x, #38]
extr a, d, a, #8
ldrb dshort, [x, #39]
extr a, d, a, #8
str a, [z, #32]
// word 5
ldrb dshort, [x, #40]
extr a, d, xzr, #8
ldrb dshort, [x, #41]
extr a, d, a, #8
ldrb dshort, [x, #42]
extr a, d, a, #8
ldrb dshort, [x, #43]
extr a, d, a, #8
ldrb dshort, [x, #44]
extr a, d, a, #8
ldrb dshort, [x, #45]
extr a, d, a, #8
ldrb dshort, [x, #46]
extr a, d, a, #8
ldrb dshort, [x, #47]
extr a, d, a, #8
str a, [z, #40]
// word 6
ldrb dshort, [x, #48]
extr a, d, xzr, #8
ldrb dshort, [x, #49]
extr a, d, a, #8
ldrb dshort, [x, #50]
extr a, d, a, #8
ldrb dshort, [x, #51]
extr a, d, a, #8
ldrb dshort, [x, #52]
extr a, d, a, #8
ldrb dshort, [x, #53]
extr a, d, a, #8
ldrb dshort, [x, #54]
extr a, d, a, #8
ldrb dshort, [x, #55]
extr a, d, a, #8
str a, [z, #48]
// word 7
ldrb dshort, [x, #56]
extr a, d, xzr, #8
ldrb dshort, [x, #57]
extr a, d, a, #8
ldrb dshort, [x, #58]
extr a, d, a, #8
ldrb dshort, [x, #59]
extr a, d, a, #8
ldrb dshort, [x, #60]
extr a, d, a, #8
ldrb dshort, [x, #61]
extr a, d, a, #8
ldrb dshort, [x, #62]
extr a, d, a, #8
ldrb dshort, [x, #63]
extr a, d, a, #8
str a, [z, #56]
// word 8
ldrb dshort, [x, #64]
extr a, d, xzr, #8
ldrb dshort, [x, #65]
extr a, d, a, #56
str a, [z, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 9,213
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_sqr_p521_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Square modulo p_521, z := (x^2) mod p_521, assuming x reduced
// Input x[9]; output z[9]
//
// extern void bignum_sqr_p521_alt (uint64_t z[static 9], uint64_t x[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_p521_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_p521_alt)
.text
.balign 4
#define z x0
#define x x1
#define a0 x2
#define a1 x3
#define a2 x4
#define a3 x5
#define a4 x6
#define a5 x7
#define a6 x8
#define a7 x9
#define a8 x1 // Overwrites input argument at last load
#define l x10
#define u0 x2 // The same as a0
#define u1 x11
#define u2 x12
#define u3 x13
#define u4 x14
#define u5 x15
#define u6 x16
#define u7 x17
#define u8 x19
#define u9 x20
#define u10 x21
#define u11 x22
#define u12 x23
#define u13 x24
#define u14 x25
#define u15 x26
#define u16 x4 // The same as a2
S2N_BN_SYMBOL(bignum_sqr_p521_alt):
// It's convenient to have more registers to play with
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
// Load low 8 elements as [a7;a6;a5;a4;a3;a2;a1;a0], set up an initial
// window [u8;u7;u6;u5;u4;u3;u2;u1] = 10 + 20 + 30 + 40 + 50 + 60 + 70
ldp a0, a1, [x]
mul u1, a0, a1
umulh u2, a0, a1
ldp a2, a3, [x, #16]
mul l, a0, a2
umulh u3, a0, a2
adds u2, u2, l
ldp a4, a5, [x, #32]
mul l, a0, a3
umulh u4, a0, a3
adcs u3, u3, l
ldp a6, a7, [x, #48]
mul l, a0, a4
umulh u5, a0, a4
adcs u4, u4, l
mul l, a0, a5
umulh u6, a0, a5
adcs u5, u5, l
mul l, a0, a6
umulh u7, a0, a6
adcs u6, u6, l
mul l, a0, a7
umulh u8, a0, a7
adcs u7, u7, l
adc u8, u8, xzr
// Add in the next diagonal = 21 + 31 + 41 + 51 + 61 + 71 + 54
mul l, a1, a2
adds u3, u3, l
mul l, a1, a3
adcs u4, u4, l
mul l, a1, a4
adcs u5, u5, l
mul l, a1, a5
adcs u6, u6, l
mul l, a1, a6
adcs u7, u7, l
mul l, a1, a7
adcs u8, u8, l
cset u9, cs
umulh l, a1, a2
adds u4, u4, l
umulh l, a1, a3
adcs u5, u5, l
umulh l, a1, a4
adcs u6, u6, l
umulh l, a1, a5
adcs u7, u7, l
umulh l, a1, a6
adcs u8, u8, l
umulh l, a1, a7
adc u9, u9, l
mul l, a4, a5
umulh u10, a4, a5
adds u9, u9, l
adc u10, u10, xzr
// And the next one = 32 + 42 + 52 + 62 + 72 + 64 + 65
mul l, a2, a3
adds u5, u5, l
mul l, a2, a4
adcs u6, u6, l
mul l, a2, a5
adcs u7, u7, l
mul l, a2, a6
adcs u8, u8, l
mul l, a2, a7
adcs u9, u9, l
mul l, a4, a6
adcs u10, u10, l
cset u11, cs
umulh l, a2, a3
adds u6, u6, l
umulh l, a2, a4
adcs u7, u7, l
umulh l, a2, a5
adcs u8, u8, l
umulh l, a2, a6
adcs u9, u9, l
umulh l, a2, a7
adcs u10, u10, l
umulh l, a4, a6
adc u11, u11, l
mul l, a5, a6
umulh u12, a5, a6
adds u11, u11, l
adc u12, u12, xzr
// And the final one = 43 + 53 + 63 + 73 + 74 + 75 + 76
mul l, a3, a4
adds u7, u7, l
mul l, a3, a5
adcs u8, u8, l
mul l, a3, a6
adcs u9, u9, l
mul l, a3, a7
adcs u10, u10, l
mul l, a4, a7
adcs u11, u11, l
mul l, a5, a7
adcs u12, u12, l
cset u13, cs
umulh l, a3, a4
adds u8, u8, l
umulh l, a3, a5
adcs u9, u9, l
umulh l, a3, a6
adcs u10, u10, l
umulh l, a3, a7
adcs u11, u11, l
umulh l, a4, a7
adcs u12, u12, l
umulh l, a5, a7
adc u13, u13, l
mul l, a6, a7
umulh u14, a6, a7
adds u13, u13, l
adc u14, u14, xzr
// Double that, with u15 holding the top carry
adds u1, u1, u1
adcs u2, u2, u2
adcs u3, u3, u3
adcs u4, u4, u4
adcs u5, u5, u5
adcs u6, u6, u6
adcs u7, u7, u7
adcs u8, u8, u8
adcs u9, u9, u9
adcs u10, u10, u10
adcs u11, u11, u11
adcs u12, u12, u12
adcs u13, u13, u13
adcs u14, u14, u14
cset u15, cs
// Add the homogeneous terms 00 + 11 + 22 + 33 + 44 + 55 + 66 + 77
umulh l, a0, a0
adds u1, u1, l
mul l, a1, a1
adcs u2, u2, l
umulh l, a1, a1
adcs u3, u3, l
mul l, a2, a2
adcs u4, u4, l
umulh l, a2, a2
adcs u5, u5, l
mul l, a3, a3
adcs u6, u6, l
umulh l, a3, a3
adcs u7, u7, l
mul l, a4, a4
adcs u8, u8, l
umulh l, a4, a4
adcs u9, u9, l
mul l, a5, a5
adcs u10, u10, l
umulh l, a5, a5
adcs u11, u11, l
mul l, a6, a6
adcs u12, u12, l
umulh l, a6, a6
adcs u13, u13, l
mul l, a7, a7
adcs u14, u14, l
umulh l, a7, a7
adc u15, u15, l
// Now load in the top digit a8, and immediately double the register
ldr a8, [x, #64]
add a8, a8, a8
// Add (2 * a8) * [a7;...;a0] into the top of the buffer
// At the end of the first chain we form u16 = a8 ^ 2.
// This needs us to shift right the modified a8 again but it saves a
// register, and the overall performance impact seems slightly positive.
mul l, a8, a0
adds u8, u8, l
umulh l, a8, a0
adcs u9, u9, l
mul l, a8, a2
adcs u10, u10, l
umulh l, a8, a2
adcs u11, u11, l
mul l, a8, a4
adcs u12, u12, l
umulh l, a8, a4
adcs u13, u13, l
mul l, a8, a6
adcs u14, u14, l
umulh l, a8, a6
adcs u15, u15, l
lsr u16, a8, #1
mul u16, u16, u16
adc u16, u16, xzr
mul l, a8, a1
adds u9, u9, l
umulh l, a8, a1
adcs u10, u10, l
mul l, a8, a3
adcs u11, u11, l
umulh l, a8, a3
adcs u12, u12, l
mul l, a8, a5
adcs u13, u13, l
umulh l, a8, a5
adcs u14, u14, l
mul l, a8, a7
adcs u15, u15, l
umulh l, a8, a7
adc u16, u16, l
// Finally squeeze in the lowest mul. This didn't need to be involved
// in the addition chains and moreover lets us re-use u0 == a0
mul u0, a0, a0
// Now we have the full product, which we consider as
// 2^521 * h + l. Form h + l + 1
subs xzr, xzr, xzr
extr l, u9, u8, #9
adcs u0, u0, l
extr l, u10, u9, #9
adcs u1, u1, l
extr l, u11, u10, #9
adcs u2, u2, l
extr l, u12, u11, #9
adcs u3, u3, l
extr l, u13, u12, #9
adcs u4, u4, l
extr l, u14, u13, #9
adcs u5, u5, l
extr l, u15, u14, #9
adcs u6, u6, l
extr l, u16, u15, #9
adcs u7, u7, l
orr u8, u8, #~0x1FF
lsr l, u16, #9
adcs u8, u8, l
// Now CF is set if h + l + 1 >= 2^521, which means it's already
// the answer, while if ~CF the answer is h + l so we should subtract
// 1 (all considered in 521 bits). Hence subtract ~CF and mask.
sbcs u0, u0, xzr
sbcs u1, u1, xzr
sbcs u2, u2, xzr
sbcs u3, u3, xzr
sbcs u4, u4, xzr
sbcs u5, u5, xzr
sbcs u6, u6, xzr
sbcs u7, u7, xzr
sbc u8, u8, xzr
and u8, u8, #0x1FF
// Store back digits of final result
stp u0, u1, [z]
stp u2, u3, [z, #16]
stp u4, u5, [z, #32]
stp u6, u7, [z, #48]
str u8, [z, #64]
// Restore registers and return
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 3,195
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_deamont_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert from Montgomery form z := (x / 2^576) mod p_521
// Input x[9]; output z[9]
//
// extern void bignum_deamont_p521
// (uint64_t z[static 9], uint64_t x[static 9]);
//
// Convert a 9-digit bignum x out of its (optionally almost) Montgomery form,
// "almost" meaning any 9-digit input will work, with no range restriction.
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_deamont_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_deamont_p521)
.text
.balign 4
// Input parameters
#define z x0
#define x x1
// Rotating registers for the intermediate windows
#define d0 x2
#define d1 x3
#define d2 x4
#define d3 x5
#define d4 x6
#define d5 x7
#define d6 x8
#define d7 x9
#define d8 x10
// Some other variables, not all distinct
#define c x11
#define h x11
#define l x12
#define u x12
S2N_BN_SYMBOL(bignum_deamont_p521):
// Load all the inputs
ldp d0, d1, [x]
ldp d2, d3, [x, #16]
ldp d4, d5, [x, #32]
ldp d6, d7, [x, #48]
ldr d8, [x, #64]
// Stash the lowest 55 bits at the top of c, then shift the whole 576-bit
// input right by 9*64 - 521 = 576 - 521 = 55 bits. As this is done,
// accumulate an AND of words d0..d6.
lsl c, d0, #9
extr d0, d1, d0, #55
extr d1, d2, d1, #55
and u, d0, d1
extr d2, d3, d2, #55
and u, u, d2
extr d3, d4, d3, #55
and u, u, d3
extr d4, d5, d4, #55
and u, u, d4
extr d5, d6, d5, #55
and u, u, d5
extr d6, d7, d6, #55
and u, u, d6
extr d7, d8, d7, #55
lsr d8, d8, #55
// Now writing x = 2^55 * h + l (so here [d8;..d0] = h and c = 2^9 * l)
// we want (h + 2^{521-55} * l) mod p_521 = s mod p_521. Since s < 2 * p_521
// this is just "if s >= p_521 then s - p_521 else s". First get
// CF <=> s >= p_521, creating the digits [h,l] to add for the l part.
adds xzr, u, #1
lsl l, c, #9
adcs xzr, d7, l
orr d8, d8, #~0x1FF
lsr h, c, #55
adcs xzr, d8, h
// Now the result = s mod p_521 = (if s >= p_521 then s - p_521 else s) =
// (s + CF) mod 2^521. So do the addition inheriting the carry-in.
adcs d0, d0, xzr
adcs d1, d1, xzr
adcs d2, d2, xzr
adcs d3, d3, xzr
adcs d4, d4, xzr
adcs d5, d5, xzr
adcs d6, d6, xzr
adcs d7, d7, l
adc d8, d8, h
and d8, d8, #0x1FF
// Store back the result
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
stp d6, d7, [z, #48]
str d8, [z, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 41,231
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_montmul_p521_neon.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery multiply, z := (x * y / 2^576) mod p_521
// Inputs x[9], y[9]; output z[9]
//
// extern void bignum_montmul_p521_neon
// (uint64_t z[static 9], uint64_t x[static 9], uint64_t y[static 9]);
//
// Does z := (x * y / 2^576) mod p_521, assuming x < p_521, y < p_521. This
// means the Montgomery base is the "native size" 2^{9*64} = 2^576; since
// p_521 is a Mersenne prime the basic modular multiplication bignum_mul_p521
// can be considered a Montgomery operation to base 2^521.
//
// Standard ARM ABI: X0 = z, X1 = x, X2 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
// bignum_montmul_p521_neon is functionally equivalent to bignum_montmul_p521.
// It is written in a way that
// 1. A subset of scalar multiplications in bignum_montmul_p384 are carefully
// chosen and vectorized
// 2. The vectorized assembly is rescheduled using the SLOTHY superoptimizer.
// https://github.com/slothy-optimizer/slothy
//
// The output program of step 1. is as follows:
//
// stp x19, x20, [sp, #-16]!
// stp x21, x22, [sp, #-16]!
// stp x23, x24, [sp, #-16]!
// stp x25, x26, [sp, #-16]!
// sub sp, sp, #80
// ldp x14, x7, [x1]
// ldp x3, x25, [x1, #16]
// ldp x10, x24, [x2]
// ldr q0, [x1]
// ldr q25, [x2]
// ldp x12, x6, [x2, #16]
// movi v18.2D, #0x00000000ffffffff
// uzp2 v3.4S, v25.4S, v25.4S
// xtn v26.2S, v0.2D
// xtn v22.2S, v25.2D
// rev64 v24.4S, v25.4S
// umull v19.2D, v26.2S, v22.2S
// umull v25.2D, v26.2S, v3.2S
// uzp2 v20.4S, v0.4S, v0.4S
// mul v0.4S, v24.4S, v0.4S
// usra v25.2D, v19.2D, #32
// umull v6.2D, v20.2S, v3.2S
// uaddlp v0.2D, v0.4S
// and v18.16B, v25.16B, v18.16B
// umlal v18.2D, v20.2S, v22.2S
// shl v0.2D, v0.2D, #32
// usra v6.2D, v25.2D, #32
// umlal v0.2D, v26.2S, v22.2S
// usra v6.2D, v18.2D, #32
// mov x23, v0.d[0]
// mov x16, v0.d[1]
// mul x5, x3, x12
// mul x21, x25, x6
// mov x19, v6.d[0]
// adds x16, x16, x19
// mov x19, v6.d[1]
// adcs x5, x5, x19
// umulh x19, x3, x12
// adcs x21, x21, x19
// umulh x19, x25, x6
// adc x19, x19, xzr
// adds x8, x16, x23
// adcs x16, x5, x16
// adcs x5, x21, x5
// adcs x21, x19, x21
// adc x19, xzr, x19
// adds x11, x16, x23
// adcs x15, x5, x8
// adcs x16, x21, x16
// adcs x5, x19, x5
// adcs x21, xzr, x21
// adc x19, xzr, x19
// subs x20, x3, x25
// cneg x20, x20, cc
// csetm x9, cc
// subs x13, x6, x12
// cneg x13, x13, cc
// mul x26, x20, x13
// umulh x20, x20, x13
// cinv x9, x9, cc
// cmn x9, #0x1
// eor x13, x26, x9
// adcs x5, x5, x13
// eor x20, x20, x9
// adcs x21, x21, x20
// adc x19, x19, x9
// subs x20, x14, x7
// cneg x20, x20, cc
// csetm x9, cc
// subs x13, x24, x10
// cneg x13, x13, cc
// mul x26, x20, x13
// umulh x20, x20, x13
// cinv x9, x9, cc
// cmn x9, #0x1
// eor x13, x26, x9
// adcs x8, x8, x13
// eor x20, x20, x9
// adcs x11, x11, x20
// adcs x15, x15, x9
// adcs x16, x16, x9
// adcs x5, x5, x9
// adcs x21, x21, x9
// adc x19, x19, x9
// subs x20, x7, x25
// cneg x20, x20, cc
// csetm x9, cc
// subs x13, x6, x24
// cneg x13, x13, cc
// mul x26, x20, x13
// umulh x20, x20, x13
// cinv x9, x9, cc
// cmn x9, #0x1
// eor x13, x26, x9
// adcs x16, x16, x13
// eor x20, x20, x9
// adcs x5, x5, x20
// adcs x21, x21, x9
// adc x19, x19, x9
// subs x20, x14, x3
// cneg x20, x20, cc
// csetm x9, cc
// subs x13, x12, x10
// cneg x13, x13, cc
// mul x26, x20, x13
// umulh x20, x20, x13
// cinv x9, x9, cc
// cmn x9, #0x1
// eor x13, x26, x9
// adcs x11, x11, x13
// eor x20, x20, x9
// adcs x15, x15, x20
// adcs x16, x16, x9
// adcs x5, x5, x9
// adcs x21, x21, x9
// adc x19, x19, x9
// subs x25, x14, x25
// cneg x25, x25, cc
// csetm x20, cc
// subs x10, x6, x10
// cneg x10, x10, cc
// mul x6, x25, x10
// umulh x25, x25, x10
// cinv x10, x20, cc
// cmn x10, #0x1
// eor x6, x6, x10
// adcs x6, x15, x6
// eor x25, x25, x10
// adcs x25, x16, x25
// adcs x16, x5, x10
// adcs x5, x21, x10
// adc x10, x19, x10
// subs x7, x7, x3
// cneg x7, x7, cc
// csetm x3, cc
// subs x24, x12, x24
// cneg x24, x24, cc
// mul x12, x7, x24
// umulh x7, x7, x24
// cinv x3, x3, cc
// cmn x3, #0x1
// eor x24, x12, x3
// adcs x24, x6, x24
// eor x7, x7, x3
// adcs x7, x25, x7
// adcs x25, x16, x3
// adcs x12, x5, x3
// adc x3, x10, x3
// lsl x10, x23, #9
// extr x6, x8, x23, #55
// extr x23, x11, x8, #55
// extr x16, x24, x11, #55
// lsr x24, x24, #55
// stp x7, x25, [sp] // @slothy:writes=stack0
// stp x12, x3, [sp, #16] // @slothy:writes=stack16
// stp x10, x6, [sp, #32] // @slothy:writes=stack32
// stp x23, x16, [sp, #48] // @slothy:writes=stack48
// str x24, [sp, #64] // @slothy:writes=stack64
// ldp x7, x3, [x1, #32]
// ldr q0, [x1, #32]
// ldp x25, x10, [x1, #48]
// ldp x24, x12, [x2, #32]
// ldr q25, [x2, #32]
// ldp x6, x23, [x2, #48]
// ldr q18, [x1, #48]
// ldr q3, [x2, #48]
// uzp1 v26.4S, v25.4S, v0.4S
// rev64 v25.4S, v25.4S
// uzp1 v22.4S, v0.4S, v0.4S
// mul v0.4S, v25.4S, v0.4S
// uaddlp v0.2D, v0.4S
// shl v0.2D, v0.2D, #32
// umlal v0.2D, v22.2S, v26.2S
// mov x16, v0.d[0]
// mov x5, v0.d[1]
// movi v0.2D, #0x00000000ffffffff
// uzp2 v25.4S, v3.4S, v3.4S
// xtn v26.2S, v18.2D
// xtn v22.2S, v3.2D
// rev64 v24.4S, v3.4S
// umull v19.2D, v26.2S, v22.2S
// umull v3.2D, v26.2S, v25.2S
// uzp2 v20.4S, v18.4S, v18.4S
// mul v18.4S, v24.4S, v18.4S
// usra v3.2D, v19.2D, #32
// umull v6.2D, v20.2S, v25.2S
// uaddlp v25.2D, v18.4S
// and v0.16B, v3.16B, v0.16B
// umlal v0.2D, v20.2S, v22.2S
// shl v25.2D, v25.2D, #32
// usra v6.2D, v3.2D, #32
// umlal v25.2D, v26.2S, v22.2S
// usra v6.2D, v0.2D, #32
// mov x21, v25.d[0]
// mov x19, v25.d[1]
// umulh x8, x7, x24
// adds x5, x5, x8
// umulh x8, x3, x12
// adcs x21, x21, x8
// mov x8, v6.d[0]
// adcs x19, x19, x8
// mov x8, v6.d[1]
// adc x8, x8, xzr
// adds x11, x5, x16
// adcs x5, x21, x5
// adcs x21, x19, x21
// adcs x19, x8, x19
// adc x8, xzr, x8
// adds x15, x5, x16
// adcs x20, x21, x11
// adcs x5, x19, x5
// adcs x21, x8, x21
// adcs x19, xzr, x19
// adc x8, xzr, x8
// subs x9, x25, x10
// cneg x9, x9, cc
// csetm x13, cc
// subs x26, x23, x6
// cneg x26, x26, cc
// mul x22, x9, x26
// umulh x9, x9, x26
// cinv x13, x13, cc
// cmn x13, #0x1
// eor x26, x22, x13
// adcs x21, x21, x26
// eor x9, x9, x13
// adcs x19, x19, x9
// adc x8, x8, x13
// subs x9, x7, x3
// cneg x9, x9, cc
// csetm x13, cc
// subs x26, x12, x24
// cneg x26, x26, cc
// mul x22, x9, x26
// umulh x9, x9, x26
// cinv x13, x13, cc
// cmn x13, #0x1
// eor x26, x22, x13
// adcs x11, x11, x26
// eor x9, x9, x13
// adcs x15, x15, x9
// adcs x20, x20, x13
// adcs x5, x5, x13
// adcs x21, x21, x13
// adcs x19, x19, x13
// adc x8, x8, x13
// subs x9, x3, x10
// cneg x9, x9, cc
// csetm x13, cc
// subs x26, x23, x12
// cneg x26, x26, cc
// mul x22, x9, x26
// umulh x9, x9, x26
// cinv x13, x13, cc
// cmn x13, #0x1
// eor x26, x22, x13
// adcs x5, x5, x26
// eor x9, x9, x13
// adcs x14, x21, x9
// adcs x21, x19, x13
// adc x19, x8, x13
// subs x9, x7, x25
// cneg x8, x9, cc
// csetm x9, cc
// subs x13, x6, x24
// cneg x13, x13, cc
// mul x26, x8, x13
// umulh x8, x8, x13
// cinv x9, x9, cc
// cmn x9, #0x1
// eor x13, x26, x9
// adcs x15, x15, x13
// eor x8, x8, x9
// adcs x8, x20, x8
// adcs x5, x5, x9
// adcs x20, x14, x9
// adcs x21, x21, x9
// adc x19, x19, x9
// subs x9, x7, x10
// cneg x9, x9, cc
// csetm x13, cc
// subs x26, x23, x24
// cneg x26, x26, cc
// mul x22, x9, x26
// umulh x9, x9, x26
// cinv x13, x13, cc
// cmn x13, #0x1
// eor x26, x22, x13
// adcs x8, x8, x26
// eor x9, x9, x13
// adcs x5, x5, x9
// adcs x20, x20, x13
// adcs x21, x21, x13
// adc x19, x19, x13
// subs x9, x3, x25
// cneg x9, x9, cc
// csetm x13, cc
// subs x26, x6, x12
// cneg x26, x26, cc
// mul x22, x9, x26
// umulh x9, x9, x26
// cinv x13, x13, cc
// cmn x13, #0x1
// eor x26, x22, x13
// adcs x8, x8, x26
// eor x9, x9, x13
// adcs x5, x5, x9
// adcs x20, x20, x13
// adcs x21, x21, x13
// adc x19, x19, x13
// ldp x9, x13, [sp] // @slothy:reads=stack0
// adds x16, x16, x9
// adcs x11, x11, x13
// stp x16, x11, [sp] // @slothy:writes=stack0
// ldp x16, x11, [sp, #16] // @slothy:reads=stack16
// adcs x16, x15, x16
// adcs x8, x8, x11
// stp x16, x8, [sp, #16] // @slothy:writes=stack16
// ldp x16, x8, [sp, #32] // @slothy:reads=stack32
// adcs x16, x5, x16
// adcs x5, x20, x8
// stp x16, x5, [sp, #32] // @slothy:writes=stack32
// ldp x16, x5, [sp, #48] // @slothy:reads=stack48
// adcs x16, x21, x16
// adcs x5, x19, x5
// stp x16, x5, [sp, #48] // @slothy:writes=stack48
// ldr x16, [sp, #64] // @slothy:reads=stack64
// adc x16, x16, xzr
// str x16, [sp, #64] // @slothy:writes=stack64
// ldp x16, x5, [x1]
// subs x7, x7, x16
// sbcs x3, x3, x5
// ldp x16, x5, [x1, #16]
// sbcs x25, x25, x16
// sbcs x10, x10, x5
// csetm x16, cc
// ldp x5, x21, [x2]
// subs x24, x5, x24
// sbcs x12, x21, x12
// ldp x5, x19, [x2, #16]
// sbcs x6, x5, x6
// sbcs x23, x19, x23
// csetm x5, cc
// eor x7, x7, x16
// subs x7, x7, x16
// eor x3, x3, x16
// sbcs x3, x3, x16
// eor x25, x25, x16
// sbcs x25, x25, x16
// eor x10, x10, x16
// sbc x10, x10, x16
// eor x24, x24, x5
// subs x24, x24, x5
// eor x12, x12, x5
// sbcs x12, x12, x5
// eor x6, x6, x5
// sbcs x6, x6, x5
// eor x23, x23, x5
// sbc x23, x23, x5
// eor x16, x5, x16
// mul x21, x7, x24
// mul x5, x3, x12
// mul x19, x25, x6
// mul x8, x10, x23
// umulh x11, x7, x24
// adds x5, x5, x11
// umulh x11, x3, x12
// adcs x19, x19, x11
// umulh x11, x25, x6
// adcs x8, x8, x11
// umulh x11, x10, x23
// adc x11, x11, xzr
// adds x15, x5, x21
// adcs x5, x19, x5
// adcs x19, x8, x19
// adcs x8, x11, x8
// adc x11, xzr, x11
// adds x20, x5, x21
// adcs x9, x19, x15
// adcs x5, x8, x5
// adcs x19, x11, x19
// adcs x8, xzr, x8
// adc x11, xzr, x11
// subs x13, x25, x10
// cneg x13, x13, cc
// csetm x26, cc
// subs x22, x23, x6
// cneg x22, x22, cc
// mul x4, x13, x22
// umulh x13, x13, x22
// cinv x26, x26, cc
// cmn x26, #0x1
// eor x22, x4, x26
// adcs x19, x19, x22
// eor x13, x13, x26
// adcs x8, x8, x13
// adc x11, x11, x26
// subs x13, x7, x3
// cneg x13, x13, cc
// csetm x26, cc
// subs x22, x12, x24
// cneg x22, x22, cc
// mul x4, x13, x22
// umulh x13, x13, x22
// cinv x26, x26, cc
// cmn x26, #0x1
// eor x22, x4, x26
// adcs x15, x15, x22
// eor x13, x13, x26
// adcs x20, x20, x13
// adcs x9, x9, x26
// adcs x5, x5, x26
// adcs x19, x19, x26
// adcs x8, x8, x26
// adc x11, x11, x26
// subs x13, x3, x10
// cneg x13, x13, cc
// csetm x26, cc
// subs x22, x23, x12
// cneg x22, x22, cc
// mul x4, x13, x22
// umulh x13, x13, x22
// cinv x26, x26, cc
// cmn x26, #0x1
// eor x22, x4, x26
// adcs x5, x5, x22
// eor x13, x13, x26
// adcs x19, x19, x13
// adcs x8, x8, x26
// adc x11, x11, x26
// subs x13, x7, x25
// cneg x13, x13, cc
// csetm x26, cc
// subs x22, x6, x24
// cneg x22, x22, cc
// mul x4, x13, x22
// umulh x13, x13, x22
// cinv x26, x26, cc
// cmn x26, #0x1
// eor x22, x4, x26
// adcs x20, x20, x22
// eor x13, x13, x26
// adcs x9, x9, x13
// adcs x5, x5, x26
// adcs x19, x19, x26
// adcs x8, x8, x26
// adc x11, x11, x26
// subs x7, x7, x10
// cneg x7, x7, cc
// csetm x10, cc
// subs x24, x23, x24
// cneg x24, x24, cc
// mul x23, x7, x24
// umulh x7, x7, x24
// cinv x10, x10, cc
// cmn x10, #0x1
// eor x24, x23, x10
// adcs x24, x9, x24
// eor x7, x7, x10
// adcs x7, x5, x7
// adcs x23, x19, x10
// adcs x5, x8, x10
// adc x10, x11, x10
// subs x3, x3, x25
// cneg x3, x3, cc
// csetm x25, cc
// subs x12, x6, x12
// cneg x12, x12, cc
// mul x6, x3, x12
// umulh x3, x3, x12
// cinv x25, x25, cc
// cmn x25, #0x1
// eor x12, x6, x25
// adcs x24, x24, x12
// eor x3, x3, x25
// adcs x7, x7, x3
// adcs x3, x23, x25
// adcs x12, x5, x25
// adc x25, x10, x25
// ldp x10, x6, [sp] // @slothy:reads=stack0
// ldp x23, x5, [sp, #16] // @slothy:reads=stack16
// eor x21, x21, x16
// adds x21, x21, x10
// eor x19, x15, x16
// adcs x19, x19, x6
// eor x8, x20, x16
// adcs x8, x8, x23
// eor x24, x24, x16
// adcs x24, x24, x5
// eor x7, x7, x16
// ldp x11, x15, [sp, #32] // @slothy:reads=stack32
// ldp x20, x9, [sp, #48] // @slothy:reads=stack48
// ldr x13, [sp, #64] // @slothy:reads=stack64
// adcs x7, x7, x11
// eor x3, x3, x16
// adcs x3, x3, x15
// eor x12, x12, x16
// adcs x12, x12, x20
// eor x25, x25, x16
// adcs x25, x25, x9
// adc x26, x13, xzr
// adds x7, x7, x10
// adcs x3, x3, x6
// adcs x10, x12, x23
// adcs x25, x25, x5
// and x12, x16, #0x1ff
// lsl x6, x21, #9
// orr x12, x6, x12
// adcs x12, x11, x12
// extr x6, x19, x21, #55
// adcs x6, x15, x6
// extr x23, x8, x19, #55
// adcs x23, x20, x23
// extr x16, x24, x8, #55
// adcs x16, x9, x16
// lsr x24, x24, #55
// adc x24, x24, x13
// ldr x5, [x2, #64]
// ldp x21, x19, [x1]
// and x8, x21, #0xfffffffffffff
// mul x8, x5, x8
// ldr x11, [x1, #64]
// ldp x15, x20, [x2]
// and x9, x15, #0xfffffffffffff
// mul x9, x11, x9
// add x8, x8, x9
// extr x21, x19, x21, #52
// and x21, x21, #0xfffffffffffff
// mul x21, x5, x21
// extr x15, x20, x15, #52
// and x15, x15, #0xfffffffffffff
// mul x15, x11, x15
// add x21, x21, x15
// lsr x15, x8, #52
// add x21, x21, x15
// lsl x8, x8, #12
// extr x8, x21, x8, #12
// adds x7, x7, x8
// ldp x8, x15, [x1, #16]
// ldp x9, x13, [x2, #16]
// extr x19, x8, x19, #40
// and x19, x19, #0xfffffffffffff
// mul x19, x5, x19
// extr x20, x9, x20, #40
// and x20, x20, #0xfffffffffffff
// mul x20, x11, x20
// add x19, x19, x20
// lsr x20, x21, #52
// add x19, x19, x20
// lsl x21, x21, #12
// extr x21, x19, x21, #24
// adcs x3, x3, x21
// extr x21, x15, x8, #28
// and x21, x21, #0xfffffffffffff
// mul x21, x5, x21
// extr x8, x13, x9, #28
// and x8, x8, #0xfffffffffffff
// mul x8, x11, x8
// add x21, x21, x8
// lsr x8, x19, #52
// add x21, x21, x8
// lsl x19, x19, #12
// extr x19, x21, x19, #36
// adcs x10, x10, x19
// and x19, x3, x10
// ldp x8, x20, [x1, #32]
// ldp x9, x22, [x2, #32]
// extr x15, x8, x15, #16
// and x15, x15, #0xfffffffffffff
// mul x4, x5, x15
// extr x15, x9, x13, #16
// and x15, x15, #0xfffffffffffff
// mul x15, x11, x15
// add x15, x4, x15
// lsl x13, x26, #48
// add x15, x15, x13
// lsr x13, x21, #52
// add x15, x15, x13
// lsl x21, x21, #12
// extr x21, x15, x21, #48
// adcs x25, x25, x21
// and x21, x19, x25
// lsr x19, x8, #4
// and x19, x19, #0xfffffffffffff
// mul x19, x5, x19
// lsr x26, x9, #4
// and x13, x26, #0xfffffffffffff
// mul x26, x11, x13
// add x19, x19, x26
// lsr x13, x15, #52
// add x19, x19, x13
// lsl x15, x15, #12
// extr x15, x19, x15, #60
// extr x8, x20, x8, #56
// and x8, x8, #0xfffffffffffff
// mul x8, x5, x8
// extr x9, x22, x9, #56
// and x9, x9, #0xfffffffffffff
// mul x9, x11, x9
// add x8, x8, x9
// lsr x19, x19, #52
// add x19, x8, x19
// lsl x8, x15, #8
// extr x8, x19, x8, #8
// adcs x12, x12, x8
// and x21, x21, x12
// ldp x1, x8, [x1, #48]
// ldp x2, x15, [x2, #48]
// extr x20, x1, x20, #44
// and x20, x20, #0xfffffffffffff
// mul x20, x5, x20
// extr x9, x2, x22, #44
// and x9, x9, #0xfffffffffffff
// mul x9, x11, x9
// add x20, x20, x9
// lsr x9, x19, #52
// add x22, x20, x9
// lsl x19, x19, #12
// extr x19, x22, x19, #20
// adcs x6, x6, x19
// and x21, x21, x6
// extr x1, x8, x1, #32
// and x1, x1, #0xfffffffffffff
// mul x1, x5, x1
// extr x2, x15, x2, #32
// and x2, x2, #0xfffffffffffff
// mul x2, x11, x2
// add x2, x1, x2
// lsr x1, x22, #52
// add x2, x2, x1
// lsl x1, x22, #12
// extr x1, x2, x1, #32
// adcs x23, x23, x1
// and x21, x21, x23
// lsr x1, x8, #20
// mul x1, x5, x1
// lsr x19, x15, #20
// mul x19, x11, x19
// add x1, x1, x19
// lsr x19, x2, #52
// add x19, x1, x19
// lsl x2, x2, #12
// extr x2, x19, x2, #44
// adcs x16, x16, x2
// and x2, x21, x16
// mul x5, x5, x11
// lsr x1, x19, #44
// add x5, x5, x1
// adc x24, x24, x5
// lsr x5, x24, #9
// orr x24, x24, #0xfffffffffffffe00
// cmp xzr, xzr
// adcs xzr, x7, x5
// adcs xzr, x2, xzr
// adcs xzr, x24, xzr
// adcs x7, x7, x5
// adcs x2, x3, xzr
// adcs x10, x10, xzr
// adcs x25, x25, xzr
// adcs x12, x12, xzr
// adcs x6, x6, xzr
// adcs x23, x23, xzr
// adcs x16, x16, xzr
// adc x3, x24, xzr
// stp x2, x10, [x0] // @slothy:writes=buffer0
// stp x25, x12, [x0, #16] // @slothy:writes=buffer16
// stp x6, x23, [x0, #32] // @slothy:writes=buffer32
// lsl x25, x7, #9
// and x3, x3, #0x1ff
// orr x3, x3, x25
// stp x16, x3, [x0, #48] // @slothy:writes=buffer48
// lsr x14, x7, #55
// str x14, [x0, #64] // @slothy:writes=buffer64
// add sp, sp, #80
// ldp x25, x26, [sp], #16
// ldp x23, x24, [sp], #16
// ldp x21, x22, [sp], #16
// ldp x19, x20, [sp], #16
// ret
//
// The bash script used for step 2 is as follows:
//
// # Store the assembly instructions except the last 'ret',
// # callee-register store/loads and add/sub sp #80 as, say, 'input.S'.
// export OUTPUTS="[hint_buffer0,hint_buffer16,hint_buffer32,hint_buffer48,hint_buffer64]"
// export RESERVED_REGS="[x18,x27,x28,x29,x30,sp,q8,q9,q10,q11,q12,q13,q14,q15,v8,v9,v10,v11,v12,v13,v14,v15]"
// <s2n-bignum>/tools/external/slothy.sh input.S my_out_dir
// # my_out_dir/3.opt.s is the optimized assembly. Its output may differ
// # from this file since the sequence is non-deterministically chosen.
// # Please add 'ret' at the end of the output assembly.
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p521_neon)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p521_neon)
.text
.balign 4
S2N_BN_SYMBOL(bignum_montmul_p521_neon):
// Save registers and make space for the temporary buffer
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
sub sp, sp, #80
ldr q24, [x2]
ldr q21, [x1]
ldr q1, [x2, #48]
ldp x23, x20, [x1, #16]
movi v18.2D, #0x00000000ffffffff
ldp x19, x17, [x2, #16]
uzp2 v3.4S, v24.4S, v24.4S
xtn v6.2S, v21.2D
ldp x11, x22, [x1]
rev64 v5.4S, v24.4S
xtn v24.2S, v24.2D
subs x16, x23, x20
umull v29.2D, v6.2S, v3.2S
rev64 v31.4S, v1.4S
cneg x26, x16, cc
umull v27.2D, v6.2S, v24.2S
ldr q19, [x1, #48]
csetm x12, cc
mul x15, x20, x17
mul v26.4S, v5.4S, v21.4S
uzp2 v28.4S, v21.4S, v21.4S
subs x6, x17, x19
xtn v7.2S, v1.2D
cinv x10, x12, cc
cneg x3, x6, cc
uzp2 v21.4S, v1.4S, v1.4S
umull v1.2D, v28.2S, v3.2S
mul x12, x26, x3
usra v29.2D, v27.2D, #32
mul v25.4S, v31.4S, v19.4S
usra v1.2D, v29.2D, #32
uaddlp v31.2D, v26.4S
umulh x14, x26, x3
eor x12, x12, x10
and v26.16B, v29.16B, v18.16B
uaddlp v2.2D, v25.4S
subs x16, x11, x22
shl v0.2D, v31.2D, #32
xtn v31.2S, v19.2D
cneg x6, x16, cc
shl v16.2D, v2.2D, #32
umlal v26.2D, v28.2S, v24.2S
umlal v0.2D, v6.2S, v24.2S
uzp2 v30.4S, v19.4S, v19.4S
umulh x26, x20, x17
umull v22.2D, v31.2S, v21.2S
umull v29.2D, v30.2S, v21.2S
usra v1.2D, v26.2D, #32
mul x13, x23, x19
eor x9, x14, x10
ldr q5, [x2, #32]
umull v26.2D, v31.2S, v7.2S
ldp x21, x4, [x2]
csetm x8, cc
mov x16, v0.d[1]
ldr q6, [x1, #32]
umlal v16.2D, v31.2S, v7.2S
mov x3, v0.d[0]
umulh x14, x23, x19
mov x25, v1.d[1]
mov x5, v1.d[0]
usra v22.2D, v26.2D, #32
rev64 v3.4S, v5.4S
adds x16, x16, x5
uzp1 v24.4S, v5.4S, v6.4S
movi v26.2D, #0x00000000ffffffff
adcs x7, x13, x25
uzp1 v0.4S, v6.4S, v6.4S
mul v5.4S, v3.4S, v6.4S
adcs x25, x15, x14
adc x13, x26, xzr
adds x26, x16, x3
and v6.16B, v22.16B, v26.16B
usra v29.2D, v22.2D, #32
adcs x16, x7, x16
adcs x14, x25, x7
umlal v6.2D, v30.2S, v7.2S
adcs x7, x13, x25
uaddlp v7.2D, v5.4S
adc x13, xzr, x13
adds x25, x16, x3
adcs x24, x14, x26
shl v1.2D, v7.2D, #32
adcs x5, x7, x16
usra v29.2D, v6.2D, #32
adcs x16, x13, x14
umlal v1.2D, v0.2S, v24.2S
adcs x14, xzr, x7
adc x13, xzr, x13
subs x7, x4, x21
cneg x7, x7, cc
mul x15, x6, x7
umulh x7, x6, x7
cinv x6, x8, cc
cmn x10, #0x1
adcs x16, x16, x12
eor x8, x15, x6
adcs x14, x14, x9
adc x9, x13, x10
subs x13, x22, x20
cneg x13, x13, cc
csetm x10, cc
subs x12, x17, x4
cinv x15, x10, cc
cneg x10, x12, cc
cmn x6, #0x1
umulh x12, x13, x10
eor x7, x7, x6
adcs x26, x26, x8
adcs x7, x25, x7
adcs x8, x24, x6
adcs x24, x5, x6
adcs x25, x16, x6
mul x5, x13, x10
adcs x13, x14, x6
adc x14, x9, x6
subs x10, x11, x23
csetm x16, cc
cneg x9, x10, cc
subs x6, x19, x21
cinv x10, x16, cc
cneg x16, x6, cc
eor x5, x5, x15
subs x20, x11, x20
mul x6, x9, x16
csetm x11, cc
cneg x20, x20, cc
subs x17, x17, x21
cneg x17, x17, cc
cinv x11, x11, cc
umulh x9, x9, x16
eor x16, x12, x15
subs x21, x22, x23
cneg x22, x21, cc
eor x12, x6, x10
csetm x6, cc
cmn x15, #0x1
eor x9, x9, x10
adcs x5, x24, x5
umulh x23, x20, x17
lsl x24, x3, #9
adcs x25, x25, x16
adcs x21, x13, x15
adc x16, x14, x15
subs x13, x19, x4
cneg x14, x13, cc
cinv x15, x6, cc
cmn x10, #0x1
mul x13, x20, x17
extr x17, x26, x3, #55
adcs x12, x7, x12
adcs x8, x8, x9
eor x19, x23, x11
adcs x6, x5, x10
eor x13, x13, x11
mov x5, v29.d[0]
adcs x25, x25, x10
extr x26, x12, x26, #55
mul x4, x22, x14
adcs x7, x21, x10
stp x24, x17, [sp, #32]
ldp x20, x21, [x1, #48]
adc x24, x16, x10
cmn x11, #0x1
mov x16, v16.d[0]
umulh x17, x22, x14
adcs x13, x8, x13
eor x9, x4, x15
adcs x10, x6, x19
ldp x22, x23, [x1, #32]
adcs x3, x25, x11
ldp x4, x19, [x2, #32]
eor x17, x17, x15
adcs x7, x7, x11
adc x14, x24, x11
subs x6, x20, x21
csetm x11, cc
cneg x8, x6, cc
cmn x15, #0x1
umulh x25, x22, x4
adcs x24, x13, x9
adcs x10, x10, x17
extr x13, x24, x12, #55
adcs x9, x3, x15
ldp x17, x3, [x2, #48]
umulh x6, x23, x19
adcs x7, x7, x15
adc x14, x14, x15
subs x12, x22, x23
stp x10, x9, [sp]
mov x9, v1.d[1]
csetm x10, cc
stp x7, x14, [sp, #16]
cneg x12, x12, cc
subs x14, x3, x17
mov x7, v16.d[1]
cneg x15, x14, cc
mov x14, v29.d[1]
cinv x11, x11, cc
adds x9, x9, x25
mul x25, x8, x15
stp x26, x13, [sp, #48]
lsr x24, x24, #55
adcs x26, x16, x6
mov x13, v1.d[0]
str x24, [sp, #64]
adcs x7, x7, x5
adc x5, x14, xzr
umulh x6, x8, x15
eor x15, x25, x11
subs x25, x19, x4
cinv x16, x10, cc
cneg x10, x25, cc
eor x6, x6, x11
adds x8, x9, x13
adcs x14, x26, x9
mul x9, x12, x10
adcs x24, x7, x26
adcs x7, x5, x7
umulh x25, x12, x10
adc x12, xzr, x5
adds x26, x14, x13
eor x10, x9, x16
adcs x9, x24, x8
adcs x5, x7, x14
adcs x14, x12, x24
adcs x7, xzr, x7
adc x12, xzr, x12
eor x24, x25, x16
cmn x11, #0x1
adcs x25, x14, x15
adcs x14, x7, x6
adc x11, x12, x11
subs x12, x23, x21
csetm x15, cc
cneg x7, x12, cc
subs x12, x3, x19
cneg x12, x12, cc
cinv x15, x15, cc
cmn x16, #0x1
adcs x6, x8, x10
mul x10, x7, x12
adcs x26, x26, x24
adcs x9, x9, x16
umulh x24, x7, x12
eor x8, x10, x15
adcs x5, x5, x16
adcs x25, x25, x16
adcs x7, x14, x16
adc x16, x11, x16
subs x11, x22, x20
cneg x11, x11, cc
csetm x14, cc
subs x10, x17, x4
cinv x14, x14, cc
cneg x10, x10, cc
cmn x15, #0x1
eor x12, x24, x15
adcs x5, x5, x8
mul x24, x11, x10
adcs x8, x25, x12
adcs x25, x7, x15
adc x16, x16, x15
subs x12, x22, x21
umulh x10, x11, x10
cneg x15, x12, cc
csetm x11, cc
subs x12, x3, x4
cneg x12, x12, cc
cinv x7, x11, cc
mul x11, x15, x12
eor x24, x24, x14
cmn x14, #0x1
eor x10, x10, x14
adcs x24, x26, x24
eor x26, x11, x7
adcs x10, x9, x10
ldp x11, x9, [x1, #16]
umulh x15, x15, x12
adcs x5, x5, x14
adcs x8, x8, x14
adcs x25, x25, x14
adc x12, x16, x14
cmn x7, #0x1
adcs x16, x10, x26
eor x14, x15, x7
adcs x26, x5, x14
ldp x5, x10, [x1]
adcs x14, x8, x7
adcs x15, x25, x7
adc x7, x12, x7
subs x25, x23, x20
cneg x25, x25, cc
csetm x8, cc
subs x22, x22, x5
sbcs x10, x23, x10
ldp x23, x12, [x2]
sbcs x20, x20, x11
sbcs x21, x21, x9
csetm x9, cc
subs x11, x17, x19
cneg x5, x11, cc
cinv x11, x8, cc
subs x23, x23, x4
sbcs x19, x12, x19
eor x20, x20, x9
ldp x12, x4, [x2, #16]
eor x21, x21, x9
umulh x8, x25, x5
eor x22, x22, x9
eor x10, x10, x9
sbcs x17, x12, x17
sbcs x3, x4, x3
mul x25, x25, x5
csetm x12, cc
subs x22, x22, x9
eor x4, x23, x12
sbcs x23, x10, x9
eor x10, x3, x12
sbcs x20, x20, x9
eor x5, x8, x11
eor x3, x19, x12
sbc x21, x21, x9
subs x4, x4, x12
eor x25, x25, x11
sbcs x19, x3, x12
eor x3, x17, x12
sbcs x17, x3, x12
umulh x8, x23, x19
sbc x3, x10, x12
cmn x11, #0x1
adcs x25, x16, x25
adcs x26, x26, x5
ldp x10, x5, [sp]
adcs x16, x14, x11
mul x14, x22, x4
adcs x15, x15, x11
adc x7, x7, x11
adds x11, x13, x10
umulh x10, x21, x3
adcs x13, x6, x5
ldp x6, x5, [sp, #16]
stp x11, x13, [sp]
eor x13, x12, x9
mul x9, x23, x19
adcs x6, x24, x6
ldp x11, x24, [sp, #32]
mul x12, x20, x17
adcs x25, x25, x5
stp x6, x25, [sp, #16]
ldp x6, x25, [sp, #48]
umulh x5, x20, x17
adcs x11, x26, x11
ldr x26, [sp, #64]
adcs x16, x16, x24
stp x11, x16, [sp, #32]
adcs x11, x15, x6
umulh x24, x22, x4
adcs x25, x7, x25
adc x7, x26, xzr
stp x11, x25, [sp, #48]
subs x26, x20, x21
csetm x15, cc
cneg x25, x26, cc
str x7, [sp, #64]
mul x11, x21, x3
subs x6, x22, x23
cneg x6, x6, cc
csetm x16, cc
subs x26, x3, x17
cneg x26, x26, cc
cinv x7, x15, cc
adds x24, x9, x24
adcs x8, x12, x8
umulh x12, x25, x26
adcs x5, x11, x5
adc x11, x10, xzr
subs x15, x19, x4
cinv x9, x16, cc
mul x26, x25, x26
eor x25, x12, x7
cneg x12, x15, cc
adds x16, x24, x14
eor x15, x26, x7
umulh x26, x6, x12
adcs x10, x8, x24
adcs x8, x5, x8
adcs x24, x11, x5
adc x5, xzr, x11
adds x11, x10, x14
mul x12, x6, x12
adcs x6, x8, x16
eor x14, x14, x13
adcs x10, x24, x10
adcs x8, x5, x8
adcs x24, xzr, x24
adc x5, xzr, x5
cmn x7, #0x1
adcs x15, x8, x15
adcs x24, x24, x25
eor x25, x26, x9
adc x8, x5, x7
eor x5, x12, x9
subs x26, x23, x21
cneg x12, x26, cc
csetm x26, cc
subs x7, x3, x19
cneg x7, x7, cc
cinv x26, x26, cc
cmn x9, #0x1
adcs x5, x16, x5
mul x16, x12, x7
adcs x25, x11, x25
umulh x7, x12, x7
adcs x12, x6, x9
eor x11, x16, x26
adcs x6, x10, x9
adcs x10, x15, x9
adcs x24, x24, x9
adc x8, x8, x9
subs x15, x22, x20
cneg x15, x15, cc
csetm x9, cc
subs x16, x17, x4
cneg x16, x16, cc
cinv x9, x9, cc
subs x21, x22, x21
mul x22, x15, x16
eor x7, x7, x26
cneg x21, x21, cc
umulh x16, x15, x16
csetm x15, cc
subs x4, x3, x4
cneg x3, x4, cc
eor x4, x22, x9
cinv x15, x15, cc
cmn x26, #0x1
eor x22, x5, x13
adcs x5, x6, x11
adcs x6, x10, x7
adcs x10, x24, x26
eor x11, x16, x9
adc x8, x8, x26
subs x16, x23, x20
cneg x7, x16, cc
csetm x23, cc
cmn x9, #0x1
adcs x16, x25, x4
mul x4, x21, x3
adcs x24, x12, x11
eor x11, x16, x13
adcs x26, x5, x9
adcs x16, x6, x9
umulh x20, x21, x3
adcs x6, x10, x9
ldp x3, x10, [x1]
adc x12, x8, x9
subs x21, x17, x19
cneg x8, x21, cc
eor x25, x20, x15
eor x20, x4, x15
mul x19, x7, x8
cinv x17, x23, cc
cmn x15, #0x1
adcs x4, x24, x20
extr x21, x10, x3, #52
umulh x9, x7, x8
and x24, x21, #0xfffffffffffff
adcs x26, x26, x25
eor x7, x19, x17
adcs x5, x16, x15
and x23, x3, #0xfffffffffffff
eor x9, x9, x17
adcs x21, x6, x15
adc x6, x12, x15
cmn x17, #0x1
adcs x25, x4, x7
and x4, x13, #0x1ff
ldp x16, x8, [sp]
adcs x20, x26, x9
adcs x12, x5, x17
ldp x3, x5, [sp, #16]
eor x15, x12, x13
adcs x12, x21, x17
adc x9, x6, x17
adds x21, x14, x16
lsl x7, x21, #9
eor x26, x12, x13
ldp x19, x17, [sp, #32]
orr x4, x7, x4
eor x14, x25, x13
adcs x7, x22, x8
adcs x12, x11, x3
eor x11, x20, x13
ldp x6, x25, [sp, #48]
eor x20, x9, x13
adcs x22, x14, x5
ldr x14, [x2, #64]
adcs x9, x11, x19
ldr x11, [sp, #64]
adcs x13, x15, x17
adcs x26, x26, x6
adcs x20, x20, x25
adc x15, x11, xzr
adds x16, x9, x16
mul x9, x14, x23
adcs x23, x13, x8
extr x13, x7, x21, #55
adcs x21, x26, x3
ldp x3, x26, [x1, #16]
extr x8, x22, x12, #55
adcs x20, x20, x5
adcs x19, x19, x4
mul x4, x14, x24
ldp x5, x24, [x2]
adcs x17, x17, x13
extr x13, x26, x3, #28
extr x10, x3, x10, #40
extr x7, x12, x7, #55
and x12, x13, #0xfffffffffffff
adcs x3, x6, x7
ldr x6, [x1, #64]
extr x7, x24, x5, #52
and x5, x5, #0xfffffffffffff
mul x12, x14, x12
adcs x13, x25, x8
and x7, x7, #0xfffffffffffff
ldp x8, x25, [x2, #16]
mul x5, x6, x5
extr x24, x8, x24, #40
and x24, x24, #0xfffffffffffff
add x9, x9, x5
lsr x5, x22, #55
mul x7, x6, x7
extr x22, x25, x8, #28
and x10, x10, #0xfffffffffffff
mul x10, x14, x10
lsr x8, x9, #52
lsl x9, x9, #12
add x7, x4, x7
adc x4, x5, x11
ldp x11, x5, [x2, #32]
add x8, x7, x8
and x7, x22, #0xfffffffffffff
extr x22, x8, x9, #12
lsl x9, x15, #48
mul x15, x6, x24
add x10, x10, x15
lsr x15, x8, #52
extr x25, x11, x25, #16
and x25, x25, #0xfffffffffffff
mul x24, x6, x7
add x7, x10, x15
lsr x10, x7, #52
lsl x8, x8, #12
extr x8, x7, x8, #24
adds x22, x16, x22
ldp x16, x15, [x1, #32]
adcs x23, x23, x8
extr x8, x5, x11, #56
mul x25, x6, x25
add x24, x12, x24
add x12, x24, x10
lsr x10, x16, #4
lsl x7, x7, #12
extr x24, x12, x7, #36
and x10, x10, #0xfffffffffffff
extr x26, x16, x26, #16
mul x10, x14, x10
and x8, x8, #0xfffffffffffff
adcs x21, x21, x24
and x7, x26, #0xfffffffffffff
mul x7, x14, x7
lsr x24, x11, #4
and x24, x24, #0xfffffffffffff
extr x11, x15, x16, #56
lsl x26, x12, #12
and x16, x11, #0xfffffffffffff
mul x11, x6, x24
lsr x12, x12, #52
ldp x2, x24, [x2, #48]
add x25, x7, x25
add x25, x25, x9
and x9, x23, x21
mul x8, x6, x8
add x12, x25, x12
add x25, x10, x11
extr x11, x12, x26, #48
ldp x7, x26, [x1, #48]
extr x5, x2, x5, #44
lsr x1, x12, #52
mul x10, x14, x16
lsr x16, x24, #20
add x10, x10, x8
extr x8, x26, x7, #32
and x8, x8, #0xfffffffffffff
extr x24, x24, x2, #32
mul x2, x6, x16
add x1, x25, x1
lsr x25, x26, #20
and x26, x24, #0xfffffffffffff
and x24, x5, #0xfffffffffffff
extr x16, x7, x15, #44
mul x7, x6, x24
adcs x11, x20, x11
and x20, x16, #0xfffffffffffff
lsl x5, x12, #12
and x15, x9, x11
mul x24, x14, x20
lsr x9, x1, #52
add x20, x10, x9
extr x12, x1, x5, #60
lsl x9, x20, #12
lsl x5, x12, #8
mul x10, x14, x8
extr x12, x20, x5, #8
lsr x1, x20, #52
add x7, x24, x7
adcs x8, x19, x12
and x5, x15, x8
add x7, x7, x1
mul x20, x6, x26
extr x24, x7, x9, #20
lsr x19, x7, #52
mul x25, x14, x25
lsl x16, x7, #12
add x20, x10, x20
adcs x12, x17, x24
add x19, x20, x19
lsr x26, x19, #52
mul x24, x14, x6
and x5, x5, x12
add x6, x25, x2
lsl x17, x19, #12
add x14, x6, x26
extr x16, x19, x16, #32
lsr x6, x14, #44
extr x19, x14, x17, #44
add x9, x24, x6
adcs x17, x3, x16
adcs x2, x13, x19
and x7, x5, x17
adc x15, x4, x9
cmp xzr, xzr
orr x1, x15, #0xfffffffffffffe00
lsr x3, x15, #9
adcs xzr, x22, x3
and x15, x7, x2
adcs xzr, x15, xzr
adcs xzr, x1, xzr
adcs x7, x22, x3
lsl x3, x7, #9
lsr x15, x7, #55
str x15, [x0, #64]
adcs x13, x23, xzr
adcs x16, x21, xzr
stp x13, x16, [x0]
adcs x13, x11, xzr
adcs x16, x8, xzr
stp x13, x16, [x0, #16]
adcs x19, x12, xzr
adcs x16, x17, xzr
adcs x13, x2, xzr
stp x19, x16, [x0, #32]
adc x16, x1, xzr
and x16, x16, #0x1ff
orr x16, x16, x3
stp x13, x16, [x0, #48]
// Restore regs and return
add sp, sp, #80
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 40,991
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/p521_jadd_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point addition on NIST curve P-521 in Jacobian coordinates
//
// extern void p521_jadd_alt
// (uint64_t p3[static 27],uint64_t p1[static 27],uint64_t p2[static 27]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// It is assumed that all coordinates of the input points p1 and p2 are
// fully reduced mod p_521, that both z coordinates are nonzero and
// that neither p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents
// the same affine point as".
//
// Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jadd_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jadd_alt)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 72
// Stable homes for input arguments during main code sequence
#define input_z x26
#define input_x x27
#define input_y x28
// Pointer-offset pairs for inputs and outputs
#define x_1 input_x, #0
#define y_1 input_x, #NUMSIZE
#define z_1 input_x, #(2*NUMSIZE)
#define x_2 input_y, #0
#define y_2 input_y, #NUMSIZE
#define z_2 input_y, #(2*NUMSIZE)
#define x_3 input_z, #0
#define y_3 input_z, #NUMSIZE
#define z_3 input_z, #(2*NUMSIZE)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z1sq sp, #(NUMSIZE*0)
#define ww sp, #(NUMSIZE*0)
#define resx sp, #(NUMSIZE*0)
#define yd sp, #(NUMSIZE*1)
#define y2a sp, #(NUMSIZE*1)
#define x2a sp, #(NUMSIZE*2)
#define zzx2 sp, #(NUMSIZE*2)
#define zz sp, #(NUMSIZE*3)
#define t1 sp, #(NUMSIZE*3)
#define t2 sp, #(NUMSIZE*4)
#define x1a sp, #(NUMSIZE*4)
#define zzx1 sp, #(NUMSIZE*4)
#define resy sp, #(NUMSIZE*4)
#define xd sp, #(NUMSIZE*5)
#define z2sq sp, #(NUMSIZE*5)
#define resz sp, #(NUMSIZE*5)
#define y1a sp, #(NUMSIZE*6)
// NUMSIZE*7 is not 16-aligned so we round it up
#define NSPACE (NUMSIZE*7+8)
// Corresponds exactly to bignum_mul_p521_alt
#define mul_p521(P0,P1,P2) \
ldp x3, x4, [P1]; \
ldp x5, x6, [P2]; \
mul x15, x3, x5; \
umulh x16, x3, x5; \
mul x14, x3, x6; \
umulh x17, x3, x6; \
adds x16, x16, x14; \
ldp x7, x8, [P2+16]; \
mul x14, x3, x7; \
umulh x19, x3, x7; \
adcs x17, x17, x14; \
mul x14, x3, x8; \
umulh x20, x3, x8; \
adcs x19, x19, x14; \
ldp x9, x10, [P2+32]; \
mul x14, x3, x9; \
umulh x21, x3, x9; \
adcs x20, x20, x14; \
mul x14, x3, x10; \
umulh x22, x3, x10; \
adcs x21, x21, x14; \
ldp x11, x12, [P2+48]; \
mul x14, x3, x11; \
umulh x23, x3, x11; \
adcs x22, x22, x14; \
ldr x13, [P2+64]; \
mul x14, x3, x12; \
umulh x24, x3, x12; \
adcs x23, x23, x14; \
mul x14, x3, x13; \
umulh x1, x3, x13; \
adcs x24, x24, x14; \
adc x1, x1, xzr; \
mul x14, x4, x5; \
adds x16, x16, x14; \
mul x14, x4, x6; \
adcs x17, x17, x14; \
mul x14, x4, x7; \
adcs x19, x19, x14; \
mul x14, x4, x8; \
adcs x20, x20, x14; \
mul x14, x4, x9; \
adcs x21, x21, x14; \
mul x14, x4, x10; \
adcs x22, x22, x14; \
mul x14, x4, x11; \
adcs x23, x23, x14; \
mul x14, x4, x12; \
adcs x24, x24, x14; \
mul x14, x4, x13; \
adcs x1, x1, x14; \
cset x0, hs; \
umulh x14, x4, x5; \
adds x17, x17, x14; \
umulh x14, x4, x6; \
adcs x19, x19, x14; \
umulh x14, x4, x7; \
adcs x20, x20, x14; \
umulh x14, x4, x8; \
adcs x21, x21, x14; \
umulh x14, x4, x9; \
adcs x22, x22, x14; \
umulh x14, x4, x10; \
adcs x23, x23, x14; \
umulh x14, x4, x11; \
adcs x24, x24, x14; \
umulh x14, x4, x12; \
adcs x1, x1, x14; \
umulh x14, x4, x13; \
adc x0, x0, x14; \
stp x15, x16, [P0]; \
ldp x3, x4, [P1+16]; \
mul x14, x3, x5; \
adds x17, x17, x14; \
mul x14, x3, x6; \
adcs x19, x19, x14; \
mul x14, x3, x7; \
adcs x20, x20, x14; \
mul x14, x3, x8; \
adcs x21, x21, x14; \
mul x14, x3, x9; \
adcs x22, x22, x14; \
mul x14, x3, x10; \
adcs x23, x23, x14; \
mul x14, x3, x11; \
adcs x24, x24, x14; \
mul x14, x3, x12; \
adcs x1, x1, x14; \
mul x14, x3, x13; \
adcs x0, x0, x14; \
cset x15, hs; \
umulh x14, x3, x5; \
adds x19, x19, x14; \
umulh x14, x3, x6; \
adcs x20, x20, x14; \
umulh x14, x3, x7; \
adcs x21, x21, x14; \
umulh x14, x3, x8; \
adcs x22, x22, x14; \
umulh x14, x3, x9; \
adcs x23, x23, x14; \
umulh x14, x3, x10; \
adcs x24, x24, x14; \
umulh x14, x3, x11; \
adcs x1, x1, x14; \
umulh x14, x3, x12; \
adcs x0, x0, x14; \
umulh x14, x3, x13; \
adc x15, x15, x14; \
mul x14, x4, x5; \
adds x19, x19, x14; \
mul x14, x4, x6; \
adcs x20, x20, x14; \
mul x14, x4, x7; \
adcs x21, x21, x14; \
mul x14, x4, x8; \
adcs x22, x22, x14; \
mul x14, x4, x9; \
adcs x23, x23, x14; \
mul x14, x4, x10; \
adcs x24, x24, x14; \
mul x14, x4, x11; \
adcs x1, x1, x14; \
mul x14, x4, x12; \
adcs x0, x0, x14; \
mul x14, x4, x13; \
adcs x15, x15, x14; \
cset x16, hs; \
umulh x14, x4, x5; \
adds x20, x20, x14; \
umulh x14, x4, x6; \
adcs x21, x21, x14; \
umulh x14, x4, x7; \
adcs x22, x22, x14; \
umulh x14, x4, x8; \
adcs x23, x23, x14; \
umulh x14, x4, x9; \
adcs x24, x24, x14; \
umulh x14, x4, x10; \
adcs x1, x1, x14; \
umulh x14, x4, x11; \
adcs x0, x0, x14; \
umulh x14, x4, x12; \
adcs x15, x15, x14; \
umulh x14, x4, x13; \
adc x16, x16, x14; \
stp x17, x19, [P0+16]; \
ldp x3, x4, [P1+32]; \
mul x14, x3, x5; \
adds x20, x20, x14; \
mul x14, x3, x6; \
adcs x21, x21, x14; \
mul x14, x3, x7; \
adcs x22, x22, x14; \
mul x14, x3, x8; \
adcs x23, x23, x14; \
mul x14, x3, x9; \
adcs x24, x24, x14; \
mul x14, x3, x10; \
adcs x1, x1, x14; \
mul x14, x3, x11; \
adcs x0, x0, x14; \
mul x14, x3, x12; \
adcs x15, x15, x14; \
mul x14, x3, x13; \
adcs x16, x16, x14; \
cset x17, hs; \
umulh x14, x3, x5; \
adds x21, x21, x14; \
umulh x14, x3, x6; \
adcs x22, x22, x14; \
umulh x14, x3, x7; \
adcs x23, x23, x14; \
umulh x14, x3, x8; \
adcs x24, x24, x14; \
umulh x14, x3, x9; \
adcs x1, x1, x14; \
umulh x14, x3, x10; \
adcs x0, x0, x14; \
umulh x14, x3, x11; \
adcs x15, x15, x14; \
umulh x14, x3, x12; \
adcs x16, x16, x14; \
umulh x14, x3, x13; \
adc x17, x17, x14; \
mul x14, x4, x5; \
adds x21, x21, x14; \
mul x14, x4, x6; \
adcs x22, x22, x14; \
mul x14, x4, x7; \
adcs x23, x23, x14; \
mul x14, x4, x8; \
adcs x24, x24, x14; \
mul x14, x4, x9; \
adcs x1, x1, x14; \
mul x14, x4, x10; \
adcs x0, x0, x14; \
mul x14, x4, x11; \
adcs x15, x15, x14; \
mul x14, x4, x12; \
adcs x16, x16, x14; \
mul x14, x4, x13; \
adcs x17, x17, x14; \
cset x19, hs; \
umulh x14, x4, x5; \
adds x22, x22, x14; \
umulh x14, x4, x6; \
adcs x23, x23, x14; \
umulh x14, x4, x7; \
adcs x24, x24, x14; \
umulh x14, x4, x8; \
adcs x1, x1, x14; \
umulh x14, x4, x9; \
adcs x0, x0, x14; \
umulh x14, x4, x10; \
adcs x15, x15, x14; \
umulh x14, x4, x11; \
adcs x16, x16, x14; \
umulh x14, x4, x12; \
adcs x17, x17, x14; \
umulh x14, x4, x13; \
adc x19, x19, x14; \
stp x20, x21, [P0+32]; \
ldp x3, x4, [P1+48]; \
mul x14, x3, x5; \
adds x22, x22, x14; \
mul x14, x3, x6; \
adcs x23, x23, x14; \
mul x14, x3, x7; \
adcs x24, x24, x14; \
mul x14, x3, x8; \
adcs x1, x1, x14; \
mul x14, x3, x9; \
adcs x0, x0, x14; \
mul x14, x3, x10; \
adcs x15, x15, x14; \
mul x14, x3, x11; \
adcs x16, x16, x14; \
mul x14, x3, x12; \
adcs x17, x17, x14; \
mul x14, x3, x13; \
adcs x19, x19, x14; \
cset x20, hs; \
umulh x14, x3, x5; \
adds x23, x23, x14; \
umulh x14, x3, x6; \
adcs x24, x24, x14; \
umulh x14, x3, x7; \
adcs x1, x1, x14; \
umulh x14, x3, x8; \
adcs x0, x0, x14; \
umulh x14, x3, x9; \
adcs x15, x15, x14; \
umulh x14, x3, x10; \
adcs x16, x16, x14; \
umulh x14, x3, x11; \
adcs x17, x17, x14; \
umulh x14, x3, x12; \
adcs x19, x19, x14; \
umulh x14, x3, x13; \
adc x20, x20, x14; \
mul x14, x4, x5; \
adds x23, x23, x14; \
mul x14, x4, x6; \
adcs x24, x24, x14; \
mul x14, x4, x7; \
adcs x1, x1, x14; \
mul x14, x4, x8; \
adcs x0, x0, x14; \
mul x14, x4, x9; \
adcs x15, x15, x14; \
mul x14, x4, x10; \
adcs x16, x16, x14; \
mul x14, x4, x11; \
adcs x17, x17, x14; \
mul x14, x4, x12; \
adcs x19, x19, x14; \
mul x14, x4, x13; \
adcs x20, x20, x14; \
cset x21, hs; \
umulh x14, x4, x5; \
adds x24, x24, x14; \
umulh x14, x4, x6; \
adcs x1, x1, x14; \
umulh x14, x4, x7; \
adcs x0, x0, x14; \
umulh x14, x4, x8; \
adcs x15, x15, x14; \
umulh x14, x4, x9; \
adcs x16, x16, x14; \
umulh x14, x4, x10; \
adcs x17, x17, x14; \
umulh x14, x4, x11; \
adcs x19, x19, x14; \
umulh x14, x4, x12; \
adcs x20, x20, x14; \
umulh x14, x4, x13; \
adc x21, x21, x14; \
stp x22, x23, [P0+48]; \
ldr x3, [P1+64]; \
mul x14, x3, x5; \
adds x24, x24, x14; \
mul x14, x3, x6; \
adcs x1, x1, x14; \
mul x14, x3, x7; \
adcs x0, x0, x14; \
mul x14, x3, x8; \
adcs x15, x15, x14; \
mul x14, x3, x9; \
adcs x16, x16, x14; \
mul x14, x3, x10; \
adcs x17, x17, x14; \
mul x14, x3, x11; \
adcs x19, x19, x14; \
mul x14, x3, x12; \
adcs x20, x20, x14; \
mul x14, x3, x13; \
adc x21, x21, x14; \
umulh x14, x3, x5; \
adds x1, x1, x14; \
umulh x14, x3, x6; \
adcs x0, x0, x14; \
umulh x14, x3, x7; \
adcs x15, x15, x14; \
umulh x14, x3, x8; \
adcs x16, x16, x14; \
umulh x14, x3, x9; \
adcs x17, x17, x14; \
umulh x14, x3, x10; \
adcs x19, x19, x14; \
umulh x14, x3, x11; \
adcs x20, x20, x14; \
umulh x14, x3, x12; \
adc x21, x21, x14; \
cmp xzr, xzr; \
ldp x5, x6, [P0]; \
extr x14, x1, x24, #9; \
adcs x5, x5, x14; \
extr x14, x0, x1, #9; \
adcs x6, x6, x14; \
ldp x7, x8, [P0+16]; \
extr x14, x15, x0, #9; \
adcs x7, x7, x14; \
extr x14, x16, x15, #9; \
adcs x8, x8, x14; \
ldp x9, x10, [P0+32]; \
extr x14, x17, x16, #9; \
adcs x9, x9, x14; \
extr x14, x19, x17, #9; \
adcs x10, x10, x14; \
ldp x11, x12, [P0+48]; \
extr x14, x20, x19, #9; \
adcs x11, x11, x14; \
extr x14, x21, x20, #9; \
adcs x12, x12, x14; \
orr x13, x24, #0xfffffffffffffe00; \
lsr x14, x21, #9; \
adcs x13, x13, x14; \
sbcs x5, x5, xzr; \
sbcs x6, x6, xzr; \
sbcs x7, x7, xzr; \
sbcs x8, x8, xzr; \
sbcs x9, x9, xzr; \
sbcs x10, x10, xzr; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbc x13, x13, xzr; \
and x13, x13, #0x1ff; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]; \
stp x11, x12, [P0+48]; \
str x13, [P0+64]
// Corresponds exactly to bignum_sqr_p521_alt
#define sqr_p521(P0,P1) \
ldp x2, x3, [P1]; \
mul x11, x2, x3; \
umulh x12, x2, x3; \
ldp x4, x5, [P1+16]; \
mul x10, x2, x4; \
umulh x13, x2, x4; \
adds x12, x12, x10; \
ldp x6, x7, [P1+32]; \
mul x10, x2, x5; \
umulh x14, x2, x5; \
adcs x13, x13, x10; \
ldp x8, x9, [P1+48]; \
mul x10, x2, x6; \
umulh x15, x2, x6; \
adcs x14, x14, x10; \
mul x10, x2, x7; \
umulh x16, x2, x7; \
adcs x15, x15, x10; \
mul x10, x2, x8; \
umulh x17, x2, x8; \
adcs x16, x16, x10; \
mul x10, x2, x9; \
umulh x19, x2, x9; \
adcs x17, x17, x10; \
adc x19, x19, xzr; \
mul x10, x3, x4; \
adds x13, x13, x10; \
mul x10, x3, x5; \
adcs x14, x14, x10; \
mul x10, x3, x6; \
adcs x15, x15, x10; \
mul x10, x3, x7; \
adcs x16, x16, x10; \
mul x10, x3, x8; \
adcs x17, x17, x10; \
mul x10, x3, x9; \
adcs x19, x19, x10; \
cset x20, hs; \
umulh x10, x3, x4; \
adds x14, x14, x10; \
umulh x10, x3, x5; \
adcs x15, x15, x10; \
umulh x10, x3, x6; \
adcs x16, x16, x10; \
umulh x10, x3, x7; \
adcs x17, x17, x10; \
umulh x10, x3, x8; \
adcs x19, x19, x10; \
umulh x10, x3, x9; \
adc x20, x20, x10; \
mul x10, x6, x7; \
umulh x21, x6, x7; \
adds x20, x20, x10; \
adc x21, x21, xzr; \
mul x10, x4, x5; \
adds x15, x15, x10; \
mul x10, x4, x6; \
adcs x16, x16, x10; \
mul x10, x4, x7; \
adcs x17, x17, x10; \
mul x10, x4, x8; \
adcs x19, x19, x10; \
mul x10, x4, x9; \
adcs x20, x20, x10; \
mul x10, x6, x8; \
adcs x21, x21, x10; \
cset x22, hs; \
umulh x10, x4, x5; \
adds x16, x16, x10; \
umulh x10, x4, x6; \
adcs x17, x17, x10; \
umulh x10, x4, x7; \
adcs x19, x19, x10; \
umulh x10, x4, x8; \
adcs x20, x20, x10; \
umulh x10, x4, x9; \
adcs x21, x21, x10; \
umulh x10, x6, x8; \
adc x22, x22, x10; \
mul x10, x7, x8; \
umulh x23, x7, x8; \
adds x22, x22, x10; \
adc x23, x23, xzr; \
mul x10, x5, x6; \
adds x17, x17, x10; \
mul x10, x5, x7; \
adcs x19, x19, x10; \
mul x10, x5, x8; \
adcs x20, x20, x10; \
mul x10, x5, x9; \
adcs x21, x21, x10; \
mul x10, x6, x9; \
adcs x22, x22, x10; \
mul x10, x7, x9; \
adcs x23, x23, x10; \
cset x24, hs; \
umulh x10, x5, x6; \
adds x19, x19, x10; \
umulh x10, x5, x7; \
adcs x20, x20, x10; \
umulh x10, x5, x8; \
adcs x21, x21, x10; \
umulh x10, x5, x9; \
adcs x22, x22, x10; \
umulh x10, x6, x9; \
adcs x23, x23, x10; \
umulh x10, x7, x9; \
adc x24, x24, x10; \
mul x10, x8, x9; \
umulh x25, x8, x9; \
adds x24, x24, x10; \
adc x25, x25, xzr; \
adds x11, x11, x11; \
adcs x12, x12, x12; \
adcs x13, x13, x13; \
adcs x14, x14, x14; \
adcs x15, x15, x15; \
adcs x16, x16, x16; \
adcs x17, x17, x17; \
adcs x19, x19, x19; \
adcs x20, x20, x20; \
adcs x21, x21, x21; \
adcs x22, x22, x22; \
adcs x23, x23, x23; \
adcs x24, x24, x24; \
adcs x25, x25, x25; \
cset x0, hs; \
umulh x10, x2, x2; \
adds x11, x11, x10; \
mul x10, x3, x3; \
adcs x12, x12, x10; \
umulh x10, x3, x3; \
adcs x13, x13, x10; \
mul x10, x4, x4; \
adcs x14, x14, x10; \
umulh x10, x4, x4; \
adcs x15, x15, x10; \
mul x10, x5, x5; \
adcs x16, x16, x10; \
umulh x10, x5, x5; \
adcs x17, x17, x10; \
mul x10, x6, x6; \
adcs x19, x19, x10; \
umulh x10, x6, x6; \
adcs x20, x20, x10; \
mul x10, x7, x7; \
adcs x21, x21, x10; \
umulh x10, x7, x7; \
adcs x22, x22, x10; \
mul x10, x8, x8; \
adcs x23, x23, x10; \
umulh x10, x8, x8; \
adcs x24, x24, x10; \
mul x10, x9, x9; \
adcs x25, x25, x10; \
umulh x10, x9, x9; \
adc x0, x0, x10; \
ldr x1, [P1+64]; \
add x1, x1, x1; \
mul x10, x1, x2; \
adds x19, x19, x10; \
umulh x10, x1, x2; \
adcs x20, x20, x10; \
mul x10, x1, x4; \
adcs x21, x21, x10; \
umulh x10, x1, x4; \
adcs x22, x22, x10; \
mul x10, x1, x6; \
adcs x23, x23, x10; \
umulh x10, x1, x6; \
adcs x24, x24, x10; \
mul x10, x1, x8; \
adcs x25, x25, x10; \
umulh x10, x1, x8; \
adcs x0, x0, x10; \
lsr x4, x1, #1; \
mul x4, x4, x4; \
adc x4, x4, xzr; \
mul x10, x1, x3; \
adds x20, x20, x10; \
umulh x10, x1, x3; \
adcs x21, x21, x10; \
mul x10, x1, x5; \
adcs x22, x22, x10; \
umulh x10, x1, x5; \
adcs x23, x23, x10; \
mul x10, x1, x7; \
adcs x24, x24, x10; \
umulh x10, x1, x7; \
adcs x25, x25, x10; \
mul x10, x1, x9; \
adcs x0, x0, x10; \
umulh x10, x1, x9; \
adc x4, x4, x10; \
mul x2, x2, x2; \
cmp xzr, xzr; \
extr x10, x20, x19, #9; \
adcs x2, x2, x10; \
extr x10, x21, x20, #9; \
adcs x11, x11, x10; \
extr x10, x22, x21, #9; \
adcs x12, x12, x10; \
extr x10, x23, x22, #9; \
adcs x13, x13, x10; \
extr x10, x24, x23, #9; \
adcs x14, x14, x10; \
extr x10, x25, x24, #9; \
adcs x15, x15, x10; \
extr x10, x0, x25, #9; \
adcs x16, x16, x10; \
extr x10, x4, x0, #9; \
adcs x17, x17, x10; \
orr x19, x19, #0xfffffffffffffe00; \
lsr x10, x4, #9; \
adcs x19, x19, x10; \
sbcs x2, x2, xzr; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
sbcs x14, x14, xzr; \
sbcs x15, x15, xzr; \
sbcs x16, x16, xzr; \
sbcs x17, x17, xzr; \
sbc x19, x19, xzr; \
and x19, x19, #0x1ff; \
stp x2, x11, [P0]; \
stp x12, x13, [P0+16]; \
stp x14, x15, [P0+32]; \
stp x16, x17, [P0+48]; \
str x19, [P0+64]
// Corresponds exactly to bignum_sub_p521
#define sub_p521(P0,P1,P2) \
ldp x5, x6, [P1]; \
ldp x4, x3, [P2]; \
subs x5, x5, x4; \
sbcs x6, x6, x3; \
ldp x7, x8, [P1+16]; \
ldp x4, x3, [P2+16]; \
sbcs x7, x7, x4; \
sbcs x8, x8, x3; \
ldp x9, x10, [P1+32]; \
ldp x4, x3, [P2+32]; \
sbcs x9, x9, x4; \
sbcs x10, x10, x3; \
ldp x11, x12, [P1+48]; \
ldp x4, x3, [P2+48]; \
sbcs x11, x11, x4; \
sbcs x12, x12, x3; \
ldr x13, [P1+64]; \
ldr x4, [P2+64]; \
sbcs x13, x13, x4; \
sbcs x5, x5, xzr; \
sbcs x6, x6, xzr; \
sbcs x7, x7, xzr; \
sbcs x8, x8, xzr; \
sbcs x9, x9, xzr; \
sbcs x10, x10, xzr; \
sbcs x11, x11, xzr; \
sbcs x12, x12, xzr; \
sbcs x13, x13, xzr; \
and x13, x13, #0x1ff; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]; \
stp x11, x12, [P0+48]; \
str x13, [P0+64]
S2N_BN_SYMBOL(p521_jadd_alt):
// Save regs and make room on stack for temporary variables
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]!
sub sp, sp, NSPACE
// Move the input arguments to stable places
mov input_z, x0
mov input_x, x1
mov input_y, x2
// Main code, just a sequence of basic field operations
sqr_p521(z1sq,z_1)
sqr_p521(z2sq,z_2)
mul_p521(y1a,z_2,y_1)
mul_p521(y2a,z_1,y_2)
mul_p521(x2a,z1sq,x_2)
mul_p521(x1a,z2sq,x_1)
mul_p521(y2a,z1sq,y2a)
mul_p521(y1a,z2sq,y1a)
sub_p521(xd,x2a,x1a)
sub_p521(yd,y2a,y1a)
sqr_p521(zz,xd)
sqr_p521(ww,yd)
mul_p521(zzx1,zz,x1a)
mul_p521(zzx2,zz,x2a)
sub_p521(resx,ww,zzx1)
sub_p521(t1,zzx2,zzx1)
mul_p521(xd,xd,z_1)
sub_p521(resx,resx,zzx2)
sub_p521(t2,zzx1,resx)
mul_p521(t1,t1,y1a)
mul_p521(resz,xd,z_2)
mul_p521(t2,yd,t2)
sub_p521(resy,t2,t1)
// Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0
// The condition codes get set by a comparison (P2 != 0) - (P1 != 0)
// So "HI" <=> CF /\ ~ZF <=> P1 = 0 /\ ~(P2 = 0)
// and "LO" <=> ~CF <=> ~(P1 = 0) /\ P2 = 0
// Multiplex the z outputs accordingly and re-store in resz
ldp x0, x1, [z_1]
ldp x2, x3, [z_1+16]
ldp x4, x5, [z_1+32]
ldp x6, x7, [z_1+48]
ldr x8, [z_1+64]
orr x20, x0, x1
orr x21, x2, x3
orr x22, x4, x5
orr x23, x6, x7
orr x20, x20, x21
orr x22, x22, x23
orr x20, x20, x8
orr x20, x20, x22
cmp x20, xzr
cset x20, ne
ldp x10, x11, [z_2]
ldp x12, x13, [z_2+16]
ldp x14, x15, [z_2+32]
ldp x16, x17, [z_2+48]
ldr x19, [z_2+64]
orr x21, x10, x11
orr x22, x12, x13
orr x23, x14, x15
orr x24, x16, x17
orr x21, x21, x22
orr x23, x23, x24
orr x21, x21, x19
orr x21, x21, x23
csel x0, x0, x10, ne
csel x1, x1, x11, ne
csel x2, x2, x12, ne
csel x3, x3, x13, ne
csel x4, x4, x14, ne
csel x5, x5, x15, ne
csel x6, x6, x16, ne
csel x7, x7, x17, ne
csel x8, x8, x19, ne
cmp x21, xzr
cset x21, ne
cmp x21, x20
ldp x10, x11, [resz]
ldp x12, x13, [resz+16]
ldp x14, x15, [resz+32]
ldp x16, x17, [resz+48]
ldr x19, [resz+64]
csel x0, x0, x10, ne
csel x1, x1, x11, ne
csel x2, x2, x12, ne
csel x3, x3, x13, ne
csel x4, x4, x14, ne
csel x5, x5, x15, ne
csel x6, x6, x16, ne
csel x7, x7, x17, ne
csel x8, x8, x19, ne
stp x0, x1, [resz]
stp x2, x3, [resz+16]
stp x4, x5, [resz+32]
stp x6, x7, [resz+48]
str x8, [resz+64]
// Multiplex the x and y outputs too, keeping the results in registers
ldp x20, x21, [x_1]
ldp x0, x1, [resx]
csel x0, x20, x0, lo
csel x1, x21, x1, lo
ldp x20, x21, [x_2]
csel x0, x20, x0, hi
csel x1, x21, x1, hi
ldp x20, x21, [x_1+16]
ldp x2, x3, [resx+16]
csel x2, x20, x2, lo
csel x3, x21, x3, lo
ldp x20, x21, [x_2+16]
csel x2, x20, x2, hi
csel x3, x21, x3, hi
ldp x20, x21, [x_1+32]
ldp x4, x5, [resx+32]
csel x4, x20, x4, lo
csel x5, x21, x5, lo
ldp x20, x21, [x_2+32]
csel x4, x20, x4, hi
csel x5, x21, x5, hi
ldp x20, x21, [x_1+48]
ldp x6, x7, [resx+48]
csel x6, x20, x6, lo
csel x7, x21, x7, lo
ldp x20, x21, [x_2+48]
csel x6, x20, x6, hi
csel x7, x21, x7, hi
ldr x20, [x_1+64]
ldr x8, [resx+64]
csel x8, x20, x8, lo
ldr x21, [x_2+64]
csel x8, x21, x8, hi
ldp x20, x21, [y_1]
ldp x10, x11, [resy]
csel x10, x20, x10, lo
csel x11, x21, x11, lo
ldp x20, x21, [y_2]
csel x10, x20, x10, hi
csel x11, x21, x11, hi
ldp x20, x21, [y_1+16]
ldp x12, x13, [resy+16]
csel x12, x20, x12, lo
csel x13, x21, x13, lo
ldp x20, x21, [y_2+16]
csel x12, x20, x12, hi
csel x13, x21, x13, hi
ldp x20, x21, [y_1+32]
ldp x14, x15, [resy+32]
csel x14, x20, x14, lo
csel x15, x21, x15, lo
ldp x20, x21, [y_2+32]
csel x14, x20, x14, hi
csel x15, x21, x15, hi
ldp x20, x21, [y_1+48]
ldp x16, x17, [resy+48]
csel x16, x20, x16, lo
csel x17, x21, x17, lo
ldp x20, x21, [y_2+48]
csel x16, x20, x16, hi
csel x17, x21, x17, hi
ldr x20, [y_1+64]
ldr x19, [resy+64]
csel x19, x20, x19, lo
ldr x21, [y_2+64]
csel x19, x21, x19, hi
// Finally store back the multiplexed values
stp x0, x1, [x_3]
stp x2, x3, [x_3+16]
stp x4, x5, [x_3+32]
stp x6, x7, [x_3+48]
str x8, [x_3+64]
ldp x0, x1, [resz]
ldp x2, x3, [resz+16]
ldp x4, x5, [resz+32]
ldp x6, x7, [resz+48]
ldr x8, [resz+64]
stp x10, x11, [y_3]
stp x12, x13, [y_3+16]
stp x14, x15, [y_3+32]
stp x16, x17, [y_3+48]
str x19, [y_3+64]
stp x0, x1, [z_3]
stp x2, x3, [z_3+16]
stp x4, x5, [z_3+32]
stp x6, x7, [z_3+48]
str x8, [z_3+64]
// Restore stack and registers
add sp, sp, NSPACE
ldp x27, x28, [sp], 16
ldp x25, x26, [sp], 16
ldp x23, x24, [sp], 16
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 43,932
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/p521_jadd.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point addition on NIST curve P-521 in Jacobian coordinates
//
// extern void p521_jadd
// (uint64_t p3[static 27],uint64_t p1[static 27],uint64_t p2[static 27]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples.
// A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3).
// It is assumed that all coordinates of the input points p1 and p2 are
// fully reduced mod p_521, that both z coordinates are nonzero and
// that neither p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents
// the same affine point as".
//
// Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jadd)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jadd)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 72
// Stable homes for input arguments during main code sequence
#define input_z x26
#define input_x x27
#define input_y x28
// Pointer-offset pairs for inputs and outputs
#define x_1 input_x, #0
#define y_1 input_x, #NUMSIZE
#define z_1 input_x, #(2*NUMSIZE)
#define x_2 input_y, #0
#define y_2 input_y, #NUMSIZE
#define z_2 input_y, #(2*NUMSIZE)
#define x_3 input_z, #0
#define y_3 input_z, #NUMSIZE
#define z_3 input_z, #(2*NUMSIZE)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z1sq sp, #(NUMSIZE*0)
#define ww sp, #(NUMSIZE*0)
#define resx sp, #(NUMSIZE*0)
#define yd sp, #(NUMSIZE*1)
#define y2a sp, #(NUMSIZE*1)
#define x2a sp, #(NUMSIZE*2)
#define zzx2 sp, #(NUMSIZE*2)
#define zz sp, #(NUMSIZE*3)
#define t1 sp, #(NUMSIZE*3)
#define t2 sp, #(NUMSIZE*4)
#define x1a sp, #(NUMSIZE*4)
#define zzx1 sp, #(NUMSIZE*4)
#define resy sp, #(NUMSIZE*4)
#define xd sp, #(NUMSIZE*5)
#define z2sq sp, #(NUMSIZE*5)
#define resz sp, #(NUMSIZE*5)
#define tmp sp, #(NUMSIZE*6)
#define y1a sp, #(NUMSIZE*7)
#define NSPACE (NUMSIZE*8)
// For the three field operations, we use subroutines not inlining.
// Call local code very close to bignum_mul_p521 and bignum_sqr_p521
// and bignum_sub_p521
#define mul_p521(P0,P1,P2) \
add x0, P0; \
add x1, P1; \
add x2, P2; \
bl local_mul_p521
#define sqr_p521(P0,P1) \
add x0, P0; \
add x1, P1; \
bl local_sqr_p521
#define sub_p521(P0,P1,P2) \
add x0, P0; \
add x1, P1; \
add x2, P2; \
bl local_sub_p521
S2N_BN_SYMBOL(p521_jadd):
// Save regs and make room on stack for temporary variables
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x29, x30, [sp, #-16]!
sub sp, sp, NSPACE
// Move the input arguments to stable places
mov input_z, x0
mov input_x, x1
mov input_y, x2
// Main code, just a sequence of basic field operations
sqr_p521(z1sq,z_1)
sqr_p521(z2sq,z_2)
mul_p521(y1a,z_2,y_1)
mul_p521(y2a,z_1,y_2)
mul_p521(x2a,z1sq,x_2)
mul_p521(x1a,z2sq,x_1)
mul_p521(y2a,z1sq,y2a)
mul_p521(y1a,z2sq,y1a)
sub_p521(xd,x2a,x1a)
sub_p521(yd,y2a,y1a)
sqr_p521(zz,xd)
sqr_p521(ww,yd)
mul_p521(zzx1,zz,x1a)
mul_p521(zzx2,zz,x2a)
sub_p521(resx,ww,zzx1)
sub_p521(t1,zzx2,zzx1)
mul_p521(xd,xd,z_1)
sub_p521(resx,resx,zzx2)
sub_p521(t2,zzx1,resx)
mul_p521(t1,t1,y1a)
mul_p521(resz,xd,z_2)
mul_p521(t2,yd,t2)
sub_p521(resy,t2,t1)
// Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0
// The condition codes get set by a comparison (P2 != 0) - (P1 != 0)
// So "HI" <=> CF /\ ~ZF <=> P1 = 0 /\ ~(P2 = 0)
// and "LO" <=> ~CF <=> ~(P1 = 0) /\ P2 = 0
// Multiplex the z outputs accordingly and re-store in resz
ldp x0, x1, [z_1]
ldp x2, x3, [z_1+16]
ldp x4, x5, [z_1+32]
ldp x6, x7, [z_1+48]
ldr x8, [z_1+64]
orr x20, x0, x1
orr x21, x2, x3
orr x22, x4, x5
orr x23, x6, x7
orr x20, x20, x21
orr x22, x22, x23
orr x20, x20, x8
orr x20, x20, x22
cmp x20, xzr
cset x20, ne
ldp x10, x11, [z_2]
ldp x12, x13, [z_2+16]
ldp x14, x15, [z_2+32]
ldp x16, x17, [z_2+48]
ldr x19, [z_2+64]
orr x21, x10, x11
orr x22, x12, x13
orr x23, x14, x15
orr x24, x16, x17
orr x21, x21, x22
orr x23, x23, x24
orr x21, x21, x19
orr x21, x21, x23
csel x0, x0, x10, ne
csel x1, x1, x11, ne
csel x2, x2, x12, ne
csel x3, x3, x13, ne
csel x4, x4, x14, ne
csel x5, x5, x15, ne
csel x6, x6, x16, ne
csel x7, x7, x17, ne
csel x8, x8, x19, ne
cmp x21, xzr
cset x21, ne
cmp x21, x20
ldp x10, x11, [resz]
ldp x12, x13, [resz+16]
ldp x14, x15, [resz+32]
ldp x16, x17, [resz+48]
ldr x19, [resz+64]
csel x0, x0, x10, ne
csel x1, x1, x11, ne
csel x2, x2, x12, ne
csel x3, x3, x13, ne
csel x4, x4, x14, ne
csel x5, x5, x15, ne
csel x6, x6, x16, ne
csel x7, x7, x17, ne
csel x8, x8, x19, ne
stp x0, x1, [resz]
stp x2, x3, [resz+16]
stp x4, x5, [resz+32]
stp x6, x7, [resz+48]
str x8, [resz+64]
// Multiplex the x and y outputs too, keeping the results in registers
ldp x20, x21, [x_1]
ldp x0, x1, [resx]
csel x0, x20, x0, lo
csel x1, x21, x1, lo
ldp x20, x21, [x_2]
csel x0, x20, x0, hi
csel x1, x21, x1, hi
ldp x20, x21, [x_1+16]
ldp x2, x3, [resx+16]
csel x2, x20, x2, lo
csel x3, x21, x3, lo
ldp x20, x21, [x_2+16]
csel x2, x20, x2, hi
csel x3, x21, x3, hi
ldp x20, x21, [x_1+32]
ldp x4, x5, [resx+32]
csel x4, x20, x4, lo
csel x5, x21, x5, lo
ldp x20, x21, [x_2+32]
csel x4, x20, x4, hi
csel x5, x21, x5, hi
ldp x20, x21, [x_1+48]
ldp x6, x7, [resx+48]
csel x6, x20, x6, lo
csel x7, x21, x7, lo
ldp x20, x21, [x_2+48]
csel x6, x20, x6, hi
csel x7, x21, x7, hi
ldr x20, [x_1+64]
ldr x8, [resx+64]
csel x8, x20, x8, lo
ldr x21, [x_2+64]
csel x8, x21, x8, hi
ldp x20, x21, [y_1]
ldp x10, x11, [resy]
csel x10, x20, x10, lo
csel x11, x21, x11, lo
ldp x20, x21, [y_2]
csel x10, x20, x10, hi
csel x11, x21, x11, hi
ldp x20, x21, [y_1+16]
ldp x12, x13, [resy+16]
csel x12, x20, x12, lo
csel x13, x21, x13, lo
ldp x20, x21, [y_2+16]
csel x12, x20, x12, hi
csel x13, x21, x13, hi
ldp x20, x21, [y_1+32]
ldp x14, x15, [resy+32]
csel x14, x20, x14, lo
csel x15, x21, x15, lo
ldp x20, x21, [y_2+32]
csel x14, x20, x14, hi
csel x15, x21, x15, hi
ldp x20, x21, [y_1+48]
ldp x16, x17, [resy+48]
csel x16, x20, x16, lo
csel x17, x21, x17, lo
ldp x20, x21, [y_2+48]
csel x16, x20, x16, hi
csel x17, x21, x17, hi
ldr x20, [y_1+64]
ldr x19, [resy+64]
csel x19, x20, x19, lo
ldr x21, [y_2+64]
csel x19, x21, x19, hi
// Finally store back the multiplexed values
stp x0, x1, [x_3]
stp x2, x3, [x_3+16]
stp x4, x5, [x_3+32]
stp x6, x7, [x_3+48]
str x8, [x_3+64]
ldp x0, x1, [resz]
ldp x2, x3, [resz+16]
ldp x4, x5, [resz+32]
ldp x6, x7, [resz+48]
ldr x8, [resz+64]
stp x10, x11, [y_3]
stp x12, x13, [y_3+16]
stp x14, x15, [y_3+32]
stp x16, x17, [y_3+48]
str x19, [y_3+64]
stp x0, x1, [z_3]
stp x2, x3, [z_3+16]
stp x4, x5, [z_3+32]
stp x6, x7, [z_3+48]
str x8, [z_3+64]
// Restore stack and registers
add sp, sp, NSPACE
ldp x29, x30, [sp], 16
ldp x27, x28, [sp], 16
ldp x25, x26, [sp], 16
ldp x23, x24, [sp], 16
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
// Local versions of the three field operations, identical to
// bignum_mul_p521_neon, bignum_sqr_p521_neon and bignum_sub_p521.
local_mul_p521:
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
sub sp, sp, #80
ldr q6, [x2]
ldp x10, x17, [x1, #16]
ldr q4, [x1]
ldr q16, [x2, #32]
ldp x5, x20, [x2, #16]
ldr q2, [x1, #32]
movi v31.2D, #0x00000000ffffffff
uzp2 v17.4S, v6.4S, v6.4S
rev64 v7.4S, v6.4S
ldp x15, x21, [x1]
xtn v25.2S, v6.2D
xtn v22.2S, v4.2D
subs x14, x10, x17
mul v7.4S, v7.4S, v4.4S
csetm x8, cc
rev64 v3.4S, v16.4S
xtn v1.2S, v16.2D
ldp x13, x16, [x2]
mul x26, x10, x5
uzp2 v16.4S, v16.4S, v16.4S
uaddlp v26.2D, v7.4S
cneg x4, x14, cc
subs x24, x15, x21
xtn v5.2S, v2.2D
mul v28.4S, v3.4S, v2.4S
shl v26.2D, v26.2D, #32
mul x22, x17, x20
umull v20.2D, v22.2S, v25.2S
uzp2 v6.4S, v4.4S, v4.4S
umull v18.2D, v22.2S, v17.2S
uzp2 v4.4S, v2.4S, v2.4S
cneg x14, x24, cc
csetm x7, cc
umulh x11, x17, x20
usra v18.2D, v20.2D, #32
uaddlp v7.2D, v28.4S
subs x19, x16, x13
umlal v26.2D, v22.2S, v25.2S
cneg x19, x19, cc
shl v28.2D, v7.2D, #32
umull v7.2D, v5.2S, v1.2S
umull v30.2D, v5.2S, v16.2S
cinv x6, x7, cc
mul x25, x14, x19
umlal v28.2D, v5.2S, v1.2S
umull v21.2D, v6.2S, v17.2S
umulh x14, x14, x19
usra v30.2D, v7.2D, #32
subs x9, x20, x5
and v29.16B, v18.16B, v31.16B
cinv x23, x8, cc
mov x8, v26.d[1]
cneg x12, x9, cc
usra v21.2D, v18.2D, #32
umlal v29.2D, v6.2S, v25.2S
mul x24, x4, x12
umull v18.2D, v4.2S, v16.2S
movi v25.2D, #0x00000000ffffffff
eor x9, x14, x6
and v7.16B, v30.16B, v25.16B
usra v21.2D, v29.2D, #32
umulh x7, x10, x5
usra v18.2D, v30.2D, #32
umlal v7.2D, v4.2S, v1.2S
mov x19, v21.d[0]
umulh x3, x4, x12
mov x14, v21.d[1]
usra v18.2D, v7.2D, #32
adds x4, x8, x19
mov x8, v26.d[0]
adcs x19, x26, x14
adcs x14, x22, x7
adc x12, x11, xzr
adds x11, x4, x8
adcs x26, x19, x4
adcs x22, x14, x19
eor x4, x24, x23
adcs x14, x12, x14
eor x7, x25, x6
adc x25, xzr, x12
eor x19, x3, x23
adds x3, x26, x8
adcs x24, x22, x11
adcs x12, x14, x26
adcs x22, x25, x22
adcs x26, xzr, x14
adc x14, xzr, x25
cmn x23, #0x1
adcs x22, x22, x4
adcs x19, x26, x19
adc x25, x14, x23
subs x14, x21, x17
cneg x23, x14, cc
csetm x26, cc
subs x4, x20, x16
cneg x14, x4, cc
cinv x4, x26, cc
cmn x6, #0x1
adcs x11, x11, x7
mul x7, x23, x14
adcs x9, x3, x9
adcs x26, x24, x6
umulh x3, x23, x14
adcs x14, x12, x6
adcs x22, x22, x6
adcs x12, x19, x6
extr x24, x11, x8, #55
adc x6, x25, x6
subs x19, x15, x17
csetm x17, cc
cneg x23, x19, cc
subs x19, x20, x13
lsl x25, x8, #9
eor x8, x7, x4
cneg x20, x19, cc
umulh x7, x23, x20
cinv x19, x17, cc
subs x17, x15, x10
csetm x15, cc
stp x25, x24, [sp, #32]
cneg x24, x17, cc
mul x20, x23, x20
subs x25, x5, x13
cneg x13, x25, cc
cinv x15, x15, cc
mul x25, x24, x13
subs x21, x21, x10
csetm x23, cc
cneg x17, x21, cc
subs x21, x5, x16
umulh x13, x24, x13
cinv x10, x23, cc
cneg x23, x21, cc
cmn x4, #0x1
adcs x14, x14, x8
eor x21, x3, x4
adcs x21, x22, x21
eor x5, x20, x19
adcs x24, x12, x4
mul x12, x17, x23
eor x8, x25, x15
adc x25, x6, x4
cmn x15, #0x1
adcs x6, x9, x8
ldp x20, x8, [x2, #48]
eor x9, x13, x15
adcs x4, x26, x9
umulh x26, x17, x23
ldp x17, x13, [x1, #48]
adcs x9, x14, x15
adcs x16, x21, x15
adcs x14, x24, x15
eor x21, x7, x19
mul x23, x17, x20
adc x24, x25, x15
cmn x19, #0x1
adcs x7, x4, x5
adcs x9, x9, x21
umulh x3, x13, x8
adcs x16, x16, x19
adcs x22, x14, x19
eor x5, x12, x10
adc x12, x24, x19
cmn x10, #0x1
adcs x19, x7, x5
eor x14, x26, x10
mov x7, v28.d[1]
adcs x24, x9, x14
extr x4, x19, x6, #55
umulh x15, x17, x20
mov x14, v18.d[1]
lsr x9, x19, #55
adcs x5, x16, x10
mov x16, v18.d[0]
adcs x19, x22, x10
str x9, [sp, #64]
extr x25, x6, x11, #55
adc x21, x12, x10
subs x26, x17, x13
stp x25, x4, [sp, #48]
stp x19, x21, [sp, #16]
csetm x6, cc
cneg x4, x26, cc
mul x19, x13, x8
subs x11, x8, x20
stp x24, x5, [sp]
ldp x21, x10, [x1, #32]
cinv x12, x6, cc
cneg x6, x11, cc
mov x9, v28.d[0]
umulh x25, x4, x6
adds x22, x7, x16
ldp x16, x5, [x2, #32]
adcs x14, x23, x14
adcs x11, x19, x15
adc x24, x3, xzr
adds x3, x22, x9
adcs x15, x14, x22
mul x22, x4, x6
adcs x6, x11, x14
adcs x4, x24, x11
eor x14, x25, x12
adc x26, xzr, x24
subs x7, x21, x10
csetm x23, cc
cneg x19, x7, cc
subs x24, x5, x16
cneg x11, x24, cc
cinv x7, x23, cc
adds x25, x15, x9
eor x23, x22, x12
adcs x22, x6, x3
mul x24, x19, x11
adcs x15, x4, x15
adcs x6, x26, x6
umulh x19, x19, x11
adcs x11, xzr, x4
adc x26, xzr, x26
cmn x12, #0x1
adcs x4, x6, x23
eor x6, x24, x7
adcs x14, x11, x14
adc x26, x26, x12
subs x11, x10, x13
cneg x12, x11, cc
csetm x11, cc
eor x19, x19, x7
subs x24, x8, x5
cinv x11, x11, cc
cneg x24, x24, cc
cmn x7, #0x1
adcs x3, x3, x6
mul x23, x12, x24
adcs x25, x25, x19
adcs x6, x22, x7
umulh x19, x12, x24
adcs x22, x15, x7
adcs x12, x4, x7
eor x24, x23, x11
adcs x4, x14, x7
adc x26, x26, x7
eor x19, x19, x11
subs x14, x21, x17
cneg x7, x14, cc
csetm x14, cc
subs x23, x20, x16
cinv x14, x14, cc
cneg x23, x23, cc
cmn x11, #0x1
adcs x22, x22, x24
mul x24, x7, x23
adcs x15, x12, x19
adcs x4, x4, x11
adc x19, x26, x11
umulh x26, x7, x23
subs x7, x21, x13
eor x11, x24, x14
cneg x23, x7, cc
csetm x12, cc
subs x7, x8, x16
cneg x7, x7, cc
cinv x12, x12, cc
cmn x14, #0x1
eor x26, x26, x14
adcs x11, x25, x11
mul x25, x23, x7
adcs x26, x6, x26
adcs x6, x22, x14
adcs x24, x15, x14
umulh x23, x23, x7
adcs x4, x4, x14
adc x22, x19, x14
eor x14, x25, x12
eor x7, x23, x12
cmn x12, #0x1
adcs x14, x26, x14
ldp x19, x25, [x2]
ldp x15, x23, [x2, #16]
adcs x26, x6, x7
adcs x24, x24, x12
adcs x7, x4, x12
adc x4, x22, x12
subs x19, x19, x16
ldp x16, x22, [x1]
sbcs x6, x25, x5
ldp x12, x25, [x1, #16]
sbcs x15, x15, x20
sbcs x8, x23, x8
csetm x23, cc
subs x21, x21, x16
eor x16, x19, x23
sbcs x19, x10, x22
eor x22, x6, x23
eor x8, x8, x23
sbcs x6, x17, x12
sbcs x13, x13, x25
csetm x12, cc
subs x10, x10, x17
cneg x17, x10, cc
csetm x25, cc
subs x5, x20, x5
eor x10, x19, x12
cneg x19, x5, cc
eor x20, x15, x23
eor x21, x21, x12
cinv x15, x25, cc
mul x25, x17, x19
subs x16, x16, x23
sbcs x5, x22, x23
eor x6, x6, x12
sbcs x20, x20, x23
eor x22, x13, x12
sbc x8, x8, x23
subs x21, x21, x12
umulh x19, x17, x19
sbcs x10, x10, x12
sbcs x17, x6, x12
eor x6, x19, x15
eor x19, x25, x15
umulh x25, x17, x20
sbc x13, x22, x12
cmn x15, #0x1
adcs x22, x14, x19
adcs x19, x26, x6
ldp x6, x26, [sp]
adcs x14, x24, x15
umulh x24, x21, x16
adcs x7, x7, x15
adc x15, x4, x15
adds x4, x9, x6
eor x9, x23, x12
adcs x12, x3, x26
stp x4, x12, [sp]
ldp x4, x26, [sp, #16]
umulh x12, x10, x5
ldp x6, x23, [sp, #32]
adcs x3, x11, x4
mul x4, x13, x8
adcs x26, x22, x26
ldp x22, x11, [sp, #48]
adcs x6, x19, x6
stp x3, x26, [sp, #16]
mul x26, x10, x5
adcs x14, x14, x23
stp x6, x14, [sp, #32]
ldr x6, [sp, #64]
adcs x22, x7, x22
adcs x14, x15, x11
mul x11, x17, x20
adc x19, x6, xzr
stp x22, x14, [sp, #48]
adds x14, x26, x24
str x19, [sp, #64]
umulh x19, x13, x8
adcs x7, x11, x12
adcs x22, x4, x25
mul x6, x21, x16
adc x19, x19, xzr
subs x11, x17, x13
cneg x12, x11, cc
csetm x11, cc
subs x24, x8, x20
cinv x11, x11, cc
cneg x24, x24, cc
adds x4, x14, x6
adcs x14, x7, x14
mul x3, x12, x24
adcs x7, x22, x7
adcs x22, x19, x22
umulh x12, x12, x24
adc x24, xzr, x19
adds x19, x14, x6
eor x3, x3, x11
adcs x26, x7, x4
adcs x14, x22, x14
adcs x25, x24, x7
adcs x23, xzr, x22
eor x7, x12, x11
adc x12, xzr, x24
subs x22, x21, x10
cneg x24, x22, cc
csetm x22, cc
subs x15, x5, x16
cinv x22, x22, cc
cneg x15, x15, cc
cmn x11, #0x1
adcs x3, x25, x3
mul x25, x24, x15
adcs x23, x23, x7
adc x11, x12, x11
subs x7, x10, x13
umulh x15, x24, x15
cneg x12, x7, cc
csetm x7, cc
eor x24, x25, x22
eor x25, x15, x22
cmn x22, #0x1
adcs x24, x4, x24
adcs x19, x19, x25
adcs x15, x26, x22
adcs x4, x14, x22
adcs x26, x3, x22
adcs x25, x23, x22
adc x23, x11, x22
subs x14, x21, x17
cneg x3, x14, cc
csetm x11, cc
subs x14, x8, x5
cneg x14, x14, cc
cinv x7, x7, cc
subs x13, x21, x13
cneg x21, x13, cc
csetm x13, cc
mul x22, x12, x14
subs x8, x8, x16
cinv x13, x13, cc
umulh x14, x12, x14
cneg x12, x8, cc
subs x8, x20, x16
cneg x8, x8, cc
cinv x16, x11, cc
eor x22, x22, x7
cmn x7, #0x1
eor x14, x14, x7
adcs x4, x4, x22
mul x11, x3, x8
adcs x22, x26, x14
adcs x14, x25, x7
eor x25, x24, x9
adc x26, x23, x7
umulh x7, x3, x8
subs x17, x10, x17
cneg x24, x17, cc
eor x3, x11, x16
csetm x11, cc
subs x20, x20, x5
cneg x5, x20, cc
cinv x11, x11, cc
cmn x16, #0x1
mul x17, x21, x12
eor x8, x7, x16
adcs x10, x19, x3
and x19, x9, #0x1ff
adcs x20, x15, x8
umulh x15, x21, x12
eor x12, x10, x9
eor x8, x6, x9
adcs x6, x4, x16
adcs x4, x22, x16
adcs x21, x14, x16
adc x7, x26, x16
mul x10, x24, x5
cmn x13, #0x1
ldp x3, x14, [x1]
eor x17, x17, x13
umulh x5, x24, x5
adcs x20, x20, x17
eor x17, x15, x13
adcs x16, x6, x17
eor x22, x10, x11
adcs x23, x4, x13
extr x10, x14, x3, #52
and x26, x3, #0xfffffffffffff
adcs x24, x21, x13
and x15, x10, #0xfffffffffffff
adc x6, x7, x13
cmn x11, #0x1
adcs x17, x20, x22
eor x4, x5, x11
ldp x21, x10, [sp]
adcs x7, x16, x4
eor x16, x17, x9
eor x13, x7, x9
ldp x3, x17, [sp, #16]
adcs x7, x23, x11
eor x23, x7, x9
ldp x5, x22, [sp, #32]
adcs x7, x24, x11
adc x24, x6, x11
ldr x6, [x2, #64]
adds x20, x8, x21
lsl x11, x20, #9
eor x4, x7, x9
orr x7, x11, x19
eor x8, x24, x9
adcs x11, x25, x10
mul x26, x6, x26
ldp x19, x24, [sp, #48]
adcs x12, x12, x3
adcs x16, x16, x17
adcs x9, x13, x5
ldr x25, [sp, #64]
extr x20, x11, x20, #55
adcs x13, x23, x22
adcs x4, x4, x19
extr x23, x12, x11, #55
adcs x8, x8, x24
adc x11, x25, xzr
adds x21, x9, x21
extr x9, x16, x12, #55
lsr x12, x16, #55
adcs x10, x13, x10
mul x15, x6, x15
adcs x13, x4, x3
ldp x16, x4, [x2]
ldr x3, [x1, #64]
adcs x17, x8, x17
adcs x5, x5, x7
adcs x20, x22, x20
adcs x8, x19, x23
and x22, x16, #0xfffffffffffff
ldp x19, x7, [x1, #16]
adcs x9, x24, x9
extr x24, x4, x16, #52
adc x16, x12, x25
mul x22, x3, x22
and x25, x24, #0xfffffffffffff
extr x14, x19, x14, #40
and x12, x14, #0xfffffffffffff
extr x23, x7, x19, #28
ldp x19, x24, [x2, #16]
mul x14, x3, x25
and x23, x23, #0xfffffffffffff
add x22, x26, x22
lsl x11, x11, #48
lsr x26, x22, #52
lsl x25, x22, #12
mul x22, x6, x12
extr x12, x19, x4, #40
add x4, x15, x14
mul x15, x6, x23
add x4, x4, x26
extr x23, x24, x19, #28
ldp x14, x19, [x1, #32]
and x26, x12, #0xfffffffffffff
extr x12, x4, x25, #12
and x25, x23, #0xfffffffffffff
adds x21, x21, x12
mul x12, x3, x26
extr x23, x14, x7, #16
and x23, x23, #0xfffffffffffff
mul x7, x3, x25
ldp x25, x26, [x2, #32]
add x12, x22, x12
extr x22, x19, x14, #56
mul x23, x6, x23
lsr x14, x14, #4
extr x24, x25, x24, #16
add x7, x15, x7
and x15, x24, #0xfffffffffffff
and x22, x22, #0xfffffffffffff
lsr x24, x4, #52
mul x15, x3, x15
and x14, x14, #0xfffffffffffff
add x12, x12, x24
lsl x24, x4, #12
lsr x4, x12, #52
extr x24, x12, x24, #24
adcs x10, x10, x24
lsl x24, x12, #12
add x12, x7, x4
mul x22, x6, x22
add x4, x23, x15
extr x7, x12, x24, #36
adcs x13, x13, x7
lsl x15, x12, #12
add x7, x4, x11
lsr x24, x12, #52
ldp x23, x11, [x2, #48]
add x4, x7, x24
mul x12, x6, x14
extr x7, x26, x25, #56
extr x14, x4, x15, #48
and x2, x7, #0xfffffffffffff
extr x24, x11, x23, #32
ldp x15, x7, [x1, #48]
and x1, x24, #0xfffffffffffff
lsr x24, x4, #52
mul x2, x3, x2
extr x26, x23, x26, #44
lsr x23, x25, #4
and x23, x23, #0xfffffffffffff
and x25, x26, #0xfffffffffffff
extr x26, x7, x15, #32
extr x19, x15, x19, #44
mul x23, x3, x23
and x15, x26, #0xfffffffffffff
lsl x26, x4, #12
and x4, x19, #0xfffffffffffff
lsr x11, x11, #20
mul x19, x6, x4
adcs x17, x17, x14
add x14, x22, x2
add x22, x12, x23
lsr x7, x7, #20
add x22, x22, x24
extr x2, x22, x26, #60
mul x24, x3, x25
lsr x22, x22, #52
add x14, x14, x22
lsl x22, x2, #8
extr x22, x14, x22, #8
lsl x2, x14, #12
mul x1, x3, x1
adcs x12, x5, x22
mul x5, x6, x15
and x26, x10, x13
and x4, x26, x17
add x23, x19, x24
lsr x14, x14, #52
mul x22, x3, x11
add x11, x23, x14
extr x25, x11, x2, #20
lsl x19, x11, #12
adcs x25, x20, x25
and x14, x4, x12
add x1, x5, x1
and x14, x14, x25
mul x15, x6, x7
add x26, x15, x22
mul x6, x6, x3
lsr x22, x11, #52
add x4, x1, x22
lsr x1, x4, #52
extr x3, x4, x19, #32
lsl x15, x4, #12
add x7, x26, x1
adcs x23, x8, x3
extr x20, x7, x15, #44
and x3, x14, x23
lsr x19, x7, #44
adcs x7, x9, x20
add x11, x6, x19
adc x4, x16, x11
lsr x14, x4, #9
cmp xzr, xzr
and x15, x3, x7
orr x3, x4, #0xfffffffffffffe00
adcs xzr, x21, x14
adcs xzr, x15, xzr
adcs xzr, x3, xzr
adcs x11, x21, x14
and x14, x11, #0x1ff
adcs x1, x10, xzr
extr x10, x1, x11, #9
str x14, [x0, #64]
adcs x14, x13, xzr
extr x11, x14, x1, #9
adcs x1, x17, xzr
extr x4, x1, x14, #9
stp x10, x11, [x0]
adcs x11, x12, xzr
extr x14, x11, x1, #9
adcs x10, x25, xzr
extr x11, x10, x11, #9
stp x4, x14, [x0, #16]
adcs x14, x23, xzr
extr x10, x14, x10, #9
adcs x1, x7, xzr
stp x11, x10, [x0, #32]
extr x14, x1, x14, #9
adc x10, x3, xzr
extr x26, x10, x1, #9
stp x14, x26, [x0, #48]
add sp, sp, #80
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
local_sqr_p521:
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
ldr q23, [x1, #32]
ldp x9, x2, [x1, #32]
ldr q16, [x1, #32]
ldr q20, [x1, #48]
ldp x6, x13, [x1, #48]
rev64 v2.4S, v23.4S
mul x14, x9, x2
ldr q31, [x1, #48]
subs x22, x9, x2
uzp2 v26.4S, v23.4S, v23.4S
mul v30.4S, v2.4S, v16.4S
xtn v0.2S, v20.2D
csetm x12, cc
xtn v21.2S, v16.2D
xtn v23.2S, v23.2D
umulh x10, x9, x6
rev64 v27.4S, v31.4S
umull v2.2D, v21.2S, v26.2S
cneg x23, x22, cc
uaddlp v25.2D, v30.4S
umull v18.2D, v21.2S, v23.2S
mul x22, x9, x6
mul v6.4S, v27.4S, v20.4S
uzp2 v17.4S, v20.4S, v20.4S
shl v20.2D, v25.2D, #32
uzp2 v27.4S, v31.4S, v31.4S
mul x16, x2, x13
umlal v20.2D, v21.2S, v23.2S
usra v2.2D, v18.2D, #32
adds x8, x22, x10
umull v25.2D, v17.2S, v27.2S
xtn v31.2S, v31.2D
movi v1.2D, #0xffffffff
adc x3, x10, xzr
umulh x21, x2, x13
uzp2 v21.4S, v16.4S, v16.4S
umull v18.2D, v0.2S, v27.2S
subs x19, x13, x6
and v7.16B, v2.16B, v1.16B
umull v27.2D, v0.2S, v31.2S
cneg x20, x19, cc
movi v30.2D, #0xffffffff
umull v16.2D, v21.2S, v26.2S
umlal v7.2D, v21.2S, v23.2S
mul x19, x23, x20
cinv x7, x12, cc
uaddlp v6.2D, v6.4S
eor x12, x19, x7
adds x11, x8, x16
umulh x10, x23, x20
ldr q1, [x1]
usra v16.2D, v2.2D, #32
adcs x19, x3, x21
shl v2.2D, v6.2D, #32
adc x20, x21, xzr
adds x17, x19, x16
usra v18.2D, v27.2D, #32
adc x19, x20, xzr
cmn x7, #0x1
umlal v2.2D, v0.2S, v31.2S
umulh x16, x9, x2
adcs x8, x11, x12
usra v16.2D, v7.2D, #32
ldr x12, [x1, #64]
eor x20, x10, x7
umulh x10, x6, x13
mov x23, v2.d[0]
mov x3, v2.d[1]
adcs x21, x17, x20
usra v25.2D, v18.2D, #32
and v23.16B, v18.16B, v30.16B
adc x7, x19, x7
adds x22, x22, x22
ldr q7, [x1, #16]
adcs x17, x8, x8
umlal v23.2D, v17.2S, v31.2S
mov x19, v16.d[0]
mul x11, x12, x12
ldr q4, [x1]
usra v25.2D, v23.2D, #32
add x5, x12, x12
adcs x15, x21, x21
ldr q28, [x1]
mov x12, v20.d[1]
adcs x24, x7, x7
mov x21, v16.d[1]
adc x4, xzr, xzr
adds x19, x19, x14
ldr q18, [x1, #16]
xtn v26.2S, v1.2D
adcs x8, x12, x16
adc x21, x21, xzr
adds x7, x19, x14
xtn v23.2S, v7.2D
rev64 v21.4S, v28.4S
adcs x12, x8, x16
ldp x20, x19, [x1]
mov x16, v25.d[1]
xtn v22.2S, v28.2D
adc x14, x21, xzr
adds x8, x22, x12
uzp2 v24.4S, v28.4S, v28.4S
rev64 v28.4S, v18.4S
mul x12, x6, x13
mul v16.4S, v21.4S, v1.4S
shrn v31.2S, v7.2D, #32
adcs x22, x17, x14
mov x14, v25.d[0]
and x21, x20, #0xfffffffffffff
umull v17.2D, v26.2S, v24.2S
ldr q2, [x1, #32]
adcs x17, x15, xzr
ldr q30, [x1, #48]
umull v7.2D, v26.2S, v22.2S
adcs x15, x24, xzr
ldr q0, [x1, #16]
movi v6.2D, #0xffffffff
adc x4, x4, xzr
adds x14, x14, x12
uzp1 v27.4S, v18.4S, v4.4S
uzp2 v19.4S, v1.4S, v1.4S
adcs x24, x3, x10
mul x3, x5, x21
umull v29.2D, v23.2S, v31.2S
ldr q5, [x1]
adc x21, x16, xzr
adds x16, x14, x12
extr x12, x19, x20, #52
umull v18.2D, v19.2S, v24.2S
adcs x24, x24, x10
and x10, x12, #0xfffffffffffff
ldp x14, x12, [x1, #16]
usra v17.2D, v7.2D, #32
adc x21, x21, xzr
adds x23, x23, x17
mul x17, x5, x10
shl v21.2D, v29.2D, #33
lsl x10, x3, #12
lsr x1, x3, #52
rev64 v29.4S, v2.4S
uaddlp v25.2D, v16.4S
add x17, x17, x1
adcs x16, x16, x15
extr x3, x14, x19, #40
mov x15, v20.d[0]
extr x10, x17, x10, #12
and x3, x3, #0xfffffffffffff
shl v3.2D, v25.2D, #32
and v6.16B, v17.16B, v6.16B
mul x1, x5, x3
usra v18.2D, v17.2D, #32
adcs x3, x24, x4
extr x4, x12, x14, #28
umlal v6.2D, v19.2S, v22.2S
xtn v20.2S, v2.2D
umlal v3.2D, v26.2S, v22.2S
movi v26.2D, #0xffffffff
lsr x24, x17, #52
and x4, x4, #0xfffffffffffff
uzp2 v19.4S, v2.4S, v2.4S
add x1, x1, x24
mul x24, x5, x4
lsl x4, x17, #12
xtn v24.2S, v5.2D
extr x17, x1, x4, #24
adc x21, x21, xzr
umlal v21.2D, v23.2S, v23.2S
adds x4, x15, x10
lsl x10, x1, #12
adcs x15, x7, x17
mul v23.4S, v28.4S, v4.4S
and x7, x4, #0x1ff
lsr x17, x1, #52
umulh x1, x19, x12
uzp2 v17.4S, v5.4S, v5.4S
extr x4, x15, x4, #9
add x24, x24, x17
mul v29.4S, v29.4S, v5.4S
extr x17, x24, x10, #36
extr x10, x9, x12, #16
uzp1 v28.4S, v4.4S, v4.4S
adcs x17, x8, x17
and x8, x10, #0xfffffffffffff
umull v16.2D, v24.2S, v20.2S
extr x10, x17, x15, #9
mul x15, x5, x8
stp x4, x10, [x0]
lsl x4, x24, #12
lsr x8, x9, #4
uaddlp v4.2D, v23.4S
and x8, x8, #0xfffffffffffff
umull v23.2D, v24.2S, v19.2S
mul x8, x5, x8
extr x10, x2, x9, #56
lsr x24, x24, #52
and x10, x10, #0xfffffffffffff
add x15, x15, x24
extr x4, x15, x4, #48
mul x24, x5, x10
lsr x10, x15, #52
usra v23.2D, v16.2D, #32
add x10, x8, x10
shl v4.2D, v4.2D, #32
adcs x22, x22, x4
extr x4, x6, x2, #44
lsl x15, x15, #12
lsr x8, x10, #52
extr x15, x10, x15, #60
and x10, x4, #0xfffffffffffff
umlal v4.2D, v28.2S, v27.2S
add x8, x24, x8
extr x4, x13, x6, #32
mul x24, x5, x10
uzp2 v16.4S, v30.4S, v30.4S
lsl x10, x15, #8
rev64 v28.4S, v30.4S
and x15, x4, #0xfffffffffffff
extr x4, x8, x10, #8
mul x10, x5, x15
lsl x15, x8, #12
adcs x23, x23, x4
lsr x4, x8, #52
lsr x8, x13, #20
add x4, x24, x4
mul x8, x5, x8
lsr x24, x4, #52
extr x15, x4, x15, #20
lsl x4, x4, #12
add x10, x10, x24
adcs x15, x16, x15
extr x4, x10, x4, #32
umulh x5, x20, x14
adcs x3, x3, x4
usra v18.2D, v6.2D, #32
lsl x16, x10, #12
extr x24, x15, x23, #9
lsr x10, x10, #52
uzp2 v27.4S, v0.4S, v0.4S
add x8, x8, x10
extr x10, x3, x15, #9
extr x4, x22, x17, #9
and v25.16B, v23.16B, v26.16B
lsr x17, x8, #44
extr x15, x8, x16, #44
extr x16, x23, x22, #9
xtn v7.2S, v30.2D
mov x8, v4.d[0]
stp x24, x10, [x0, #32]
uaddlp v30.2D, v29.4S
stp x4, x16, [x0, #16]
umulh x24, x20, x19
adcs x15, x21, x15
adc x16, x11, x17
subs x11, x20, x19
xtn v5.2S, v0.2D
csetm x17, cc
extr x3, x15, x3, #9
mov x22, v4.d[1]
cneg x21, x11, cc
subs x10, x12, x14
mul v31.4S, v28.4S, v0.4S
cneg x10, x10, cc
cinv x11, x17, cc
shl v4.2D, v30.2D, #32
umull v28.2D, v5.2S, v16.2S
extr x23, x16, x15, #9
adds x4, x8, x5
mul x17, x21, x10
umull v22.2D, v5.2S, v7.2S
adc x15, x5, xzr
adds x4, x4, x22
uaddlp v2.2D, v31.4S
lsr x5, x16, #9
adcs x16, x15, x1
mov x15, v18.d[0]
adc x1, x1, xzr
umulh x10, x21, x10
adds x22, x16, x22
umlal v4.2D, v24.2S, v20.2S
umull v30.2D, v27.2S, v16.2S
stp x3, x23, [x0, #48]
add x3, x7, x5
adc x16, x1, xzr
usra v28.2D, v22.2D, #32
mul x23, x20, x19
eor x1, x17, x11
cmn x11, #0x1
mov x17, v18.d[1]
umull v18.2D, v17.2S, v19.2S
adcs x7, x4, x1
eor x1, x10, x11
umlal v25.2D, v17.2S, v20.2S
movi v16.2D, #0xffffffff
adcs x22, x22, x1
usra v18.2D, v23.2D, #32
umulh x4, x14, x14
adc x1, x16, x11
adds x10, x8, x8
shl v23.2D, v2.2D, #32
str x3, [x0, #64]
adcs x5, x7, x7
and v16.16B, v28.16B, v16.16B
usra v30.2D, v28.2D, #32
adcs x7, x22, x22
mov x21, v3.d[1]
adcs x11, x1, x1
umlal v16.2D, v27.2S, v7.2S
adc x22, xzr, xzr
adds x16, x15, x23
mul x8, x14, x12
umlal v23.2D, v5.2S, v7.2S
usra v18.2D, v25.2D, #32
umulh x15, x14, x12
adcs x21, x21, x24
usra v30.2D, v16.2D, #32
adc x1, x17, xzr
adds x3, x16, x23
adcs x21, x21, x24
adc x1, x1, xzr
adds x24, x10, x21
umulh x21, x12, x12
adcs x16, x5, x1
adcs x10, x7, xzr
mov x17, v21.d[1]
adcs x23, x11, xzr
adc x5, x22, xzr
adds x1, x4, x8
adcs x22, x17, x15
ldp x17, x4, [x0]
mov x11, v21.d[0]
adc x21, x21, xzr
adds x1, x1, x8
adcs x15, x22, x15
adc x8, x21, xzr
adds x22, x11, x10
mov x21, v3.d[0]
adcs x11, x1, x23
ldp x1, x10, [x0, #16]
adcs x15, x15, x5
adc x7, x8, xzr
adds x8, x17, x21
mov x23, v4.d[1]
ldp x5, x21, [x0, #32]
adcs x17, x4, x3
ldr x4, [x0, #64]
mov x3, v18.d[0]
adcs x24, x1, x24
stp x8, x17, [x0]
adcs x17, x10, x16
ldp x1, x16, [x0, #48]
adcs x5, x5, x22
adcs x8, x21, x11
stp x5, x8, [x0, #32]
adcs x1, x1, x15
mov x15, v23.d[1]
adcs x21, x16, x7
stp x1, x21, [x0, #48]
adc x10, x4, xzr
subs x7, x14, x12
mov x16, v18.d[1]
cneg x5, x7, cc
csetm x4, cc
subs x11, x13, x6
mov x8, v23.d[0]
cneg x7, x11, cc
cinv x21, x4, cc
mov x11, v30.d[0]
adds x4, x23, x3
mul x22, x5, x7
mov x23, v30.d[1]
adcs x8, x8, x16
adcs x16, x15, x11
adc x11, x23, xzr
umulh x3, x5, x7
stp x24, x17, [x0, #16]
mov x5, v4.d[0]
subs x15, x20, x19
cneg x7, x15, cc
str x10, [x0, #64]
csetm x1, cc
subs x24, x2, x9
cneg x17, x24, cc
cinv x15, x1, cc
adds x23, x4, x5
umulh x1, x7, x17
adcs x24, x8, x4
adcs x10, x16, x8
eor x8, x22, x21
adcs x16, x11, x16
mul x22, x7, x17
eor x17, x1, x15
adc x1, xzr, x11
adds x11, x24, x5
eor x7, x3, x21
adcs x3, x10, x23
adcs x24, x16, x24
adcs x4, x1, x10
eor x10, x22, x15
adcs x16, xzr, x16
adc x1, xzr, x1
cmn x21, #0x1
adcs x8, x4, x8
adcs x22, x16, x7
adc x7, x1, x21
subs x21, x19, x12
csetm x4, cc
cneg x1, x21, cc
subs x21, x13, x2
cinv x16, x4, cc
cneg x4, x21, cc
cmn x15, #0x1
adcs x21, x23, x10
mul x23, x1, x4
adcs x11, x11, x17
adcs x3, x3, x15
umulh x1, x1, x4
adcs x24, x24, x15
adcs x8, x8, x15
adcs x22, x22, x15
eor x17, x23, x16
adc x15, x7, x15
subs x7, x20, x14
cneg x7, x7, cc
csetm x4, cc
subs x10, x20, x12
cneg x23, x10, cc
csetm x10, cc
subs x12, x6, x9
cinv x20, x4, cc
cneg x12, x12, cc
cmn x16, #0x1
eor x1, x1, x16
adcs x17, x24, x17
mul x4, x7, x12
adcs x8, x8, x1
umulh x1, x7, x12
adcs x24, x22, x16
adc x7, x15, x16
subs x12, x13, x9
cneg x12, x12, cc
cinv x13, x10, cc
subs x19, x19, x14
mul x9, x23, x12
cneg x19, x19, cc
csetm x10, cc
eor x16, x1, x20
subs x22, x6, x2
umulh x12, x23, x12
eor x1, x4, x20
cinv x4, x10, cc
cneg x22, x22, cc
cmn x20, #0x1
adcs x15, x11, x1
eor x6, x12, x13
adcs x10, x3, x16
adcs x17, x17, x20
eor x23, x9, x13
adcs x2, x8, x20
mul x11, x19, x22
adcs x24, x24, x20
adc x7, x7, x20
cmn x13, #0x1
adcs x3, x10, x23
umulh x22, x19, x22
adcs x17, x17, x6
eor x12, x22, x4
extr x22, x15, x21, #63
adcs x8, x2, x13
extr x21, x21, x5, #63
ldp x16, x23, [x0]
adcs x20, x24, x13
eor x1, x11, x4
adc x6, x7, x13
cmn x4, #0x1
ldp x2, x7, [x0, #16]
adcs x1, x3, x1
extr x19, x1, x15, #63
adcs x14, x17, x12
extr x1, x14, x1, #63
lsl x17, x5, #1
adcs x8, x8, x4
extr x12, x8, x14, #8
ldp x15, x11, [x0, #32]
adcs x9, x20, x4
adc x3, x6, x4
adds x16, x12, x16
extr x6, x9, x8, #8
ldp x14, x12, [x0, #48]
extr x8, x3, x9, #8
adcs x20, x6, x23
ldr x24, [x0, #64]
lsr x6, x3, #8
adcs x8, x8, x2
and x2, x1, #0x1ff
and x1, x20, x8
adcs x4, x6, x7
adcs x3, x17, x15
and x1, x1, x4
adcs x9, x21, x11
and x1, x1, x3
adcs x6, x22, x14
and x1, x1, x9
and x21, x1, x6
adcs x14, x19, x12
adc x1, x24, x2
cmp xzr, xzr
orr x12, x1, #0xfffffffffffffe00
lsr x1, x1, #9
adcs xzr, x16, x1
and x21, x21, x14
adcs xzr, x21, xzr
adcs xzr, x12, xzr
adcs x21, x16, x1
adcs x1, x20, xzr
adcs x19, x8, xzr
stp x21, x1, [x0]
adcs x1, x4, xzr
adcs x21, x3, xzr
stp x19, x1, [x0, #16]
adcs x1, x9, xzr
stp x21, x1, [x0, #32]
adcs x21, x6, xzr
adcs x1, x14, xzr
stp x21, x1, [x0, #48]
adc x1, x12, xzr
and x1, x1, #0x1ff
str x1, [x0, #64]
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
local_sub_p521:
ldp x5, x6, [x1]
ldp x4, x3, [x2]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x1, #16]
ldp x4, x3, [x2, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x1, #32]
ldp x4, x3, [x2, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
ldp x11, x12, [x1, #48]
ldp x4, x3, [x2, #48]
sbcs x11, x11, x4
sbcs x12, x12, x3
ldr x13, [x1, #64]
ldr x4, [x2, #64]
sbcs x13, x13, x4
sbcs x5, x5, xzr
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbcs x13, x13, xzr
and x13, x13, #0x1ff
stp x5, x6, [x0]
stp x7, x8, [x0, #16]
stp x9, x10, [x0, #32]
stp x11, x12, [x0, #48]
str x13, [x0, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 2,786
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_mod_p521_9.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Reduce modulo field characteristic, z := x mod p_521
// Input x[9]; output z[9]
//
// extern void bignum_mod_p521_9
// (uint64_t z[static 9], uint64_t x[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p521_9)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p521_9)
.text
.balign 4
#define z x0
#define x x1
#define h x2
#define t x3
#define d0 x4
#define d1 x5
#define d2 x6
#define d3 x7
#define d4 x8
#define d5 x9
#define d6 x10
#define d7 x11
#define d8 x12
S2N_BN_SYMBOL(bignum_mod_p521_9):
// Load top digit first and get its upper bits in h so that we
// separate out x = 2^521 * H + L with h = H. Now x mod p_521 =
// (H + L) mod p_521 = if H + L >= p_521 then H + L - p_521 else H + L.
ldr d8, [x, #64]
lsr h, d8, #9
// Load in the other digits and decide whether H + L >= p_521. This is
// equivalent to H + L + 1 >= 2^521, and since this can only happen if
// digits d7,...,d1 consist entirely of 1 bits, we can condense the
// carry chain by ANDing digits together, perhaps reducing its latency.
// This condenses only three pairs; the payoff beyond that seems limited.
// By stuffing in 1 bits from 521 position upwards, get CF directly
subs xzr, xzr, xzr
ldp d0, d1, [x]
adcs xzr, d0, h
adcs xzr, d1, xzr
ldp d2, d3, [x, #16]
and t, d2, d3
adcs xzr, t, xzr
ldp d4, d5, [x, #32]
and t, d4, d5
adcs xzr, t, xzr
ldp d6, d7, [x, #48]
and t, d6, d7
adcs xzr, t, xzr
orr t, d8, #~0x1FF
adcs t, t, xzr
// Now H + L >= p_521 <=> H + L + 1 >= 2^521 <=> CF from this comparison.
// So if CF is set we want (H + L) - p_521 = (H + L + 1) - 2^521
// while otherwise we want just H + L. So mask H + L + CF to 521 bits.
adcs d0, d0, h
adcs d1, d1, xzr
adcs d2, d2, xzr
adcs d3, d3, xzr
adcs d4, d4, xzr
adcs d5, d5, xzr
adcs d6, d6, xzr
adcs d7, d7, xzr
adc d8, d8, xzr
and d8, d8, #0x1FF
// Store the result
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
stp d6, d7, [z, #48]
str d8, [z, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 1,799
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_double_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Double modulo p_521, z := (2 * x) mod p_521, assuming x reduced
// Input x[9]; output z[9]
//
// extern void bignum_double_p521
// (uint64_t z[static 9], uint64_t x[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_double_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_double_p521)
.text
.balign 4
#define z x0
#define x x1
#define c x2
#define h x3
#define l x4
S2N_BN_SYMBOL(bignum_double_p521):
// We can decide whether 2 * x >= p_521 just by 2 * x >= 2^521, which
// amounts to whether the top word is >= 256
ldr c, [x, #64]
subs xzr, c, #256
// Now if 2 * x >= p_521 we want 2 * x - p_521 = (2 * x + 1) - 2^521
// and otherwise just 2 * x. Feed in the condition as the carry bit
// to get 2 * x + [2 * x >= p_521] then just mask it off to 521 bits.
ldp l, h, [x]
adcs l, l, l
adcs h, h, h
stp l, h, [z]
ldp l, h, [x, #16]
adcs l, l, l
adcs h, h, h
stp l, h, [z, #16]
ldp l, h, [x, #32]
adcs l, l, l
adcs h, h, h
stp l, h, [z, #32]
ldp l, h, [x, #48]
adcs l, l, l
adcs h, h, h
stp l, h, [z, #48]
adc c, c, c
and c, c, #0x1FF
str c, [z, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 2,369
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_optneg_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Optionally negate modulo p_521, z := (-x) mod p_521 (if p nonzero) or
// z := x (if p zero), assuming x reduced
// Inputs p, x[9]; output z[9]
//
// extern void bignum_optneg_p521
// (uint64_t z[static 9], uint64_t p, uint64_t x[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = p, X2 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_optneg_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_optneg_p521)
.text
.balign 4
#define z x0
#define p x1
#define x x2
#define q x3
#define d0 x4
#define d1 x5
#define d2 x6
#define d3 x7
#define d4 x8
#define d5 x9
#define d6 x10
#define d7 x11
#define d8 x12
S2N_BN_SYMBOL(bignum_optneg_p521):
// Load the 9 digits of x and generate q = the OR of them all
ldp d0, d1, [x]
orr d6, d0, d1
ldp d2, d3, [x, #16]
orr d7, d2, d3
orr q, d6, d7
ldp d4, d5, [x, #32]
orr d8, d4, d5
orr q, q, d8
ldp d6, d7, [x, #48]
orr d8, d6, d7
orr q, q, d8
ldr d8, [x, #64]
orr q, q, d8
// Turn q into a bitmask for "input is nonzero and p is nonzero", so that
// we avoid doing -0 = p_521 and hence maintain strict modular reduction
cmp q, #0
csetm q, ne
cmp p, #0
csel q, xzr, q, eq
// Since p_521 is all 1s, the subtraction is just an exclusive-or with q
// to give an optional inversion, with a slight fiddle for the top digit.
eor d0, d0, q
eor d1, d1, q
eor d2, d2, q
eor d3, d3, q
eor d4, d4, q
eor d5, d5, q
eor d6, d6, q
eor d7, d7, q
and q, q, #0x1FF
eor d8, d8, q
// Write back the result and return
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
stp d6, d7, [z, #48]
str d8, [z, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 2,312
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_sub_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Subtract modulo p_521, z := (x - y) mod p_521
// Inputs x[9], y[9]; output z[9]
//
// extern void bignum_sub_p521
// (uint64_t z[static 9], uint64_t x[static 9], uint64_t y[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = x, X2 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sub_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sub_p521)
.text
.balign 4
#define z x0
#define x x1
#define y x2
#define h x3
#define l x4
#define d0 x5
#define d1 x6
#define d2 x7
#define d3 x8
#define d4 x9
#define d5 x10
#define d6 x11
#define d7 x12
#define d8 x13
S2N_BN_SYMBOL(bignum_sub_p521):
// First just subtract the numbers as [d8;d7;d6;d5;d4;d3;d2;d1;d0] = x - y
ldp d0, d1, [x]
ldp l, h, [y]
subs d0, d0, l
sbcs d1, d1, h
ldp d2, d3, [x, #16]
ldp l, h, [y, #16]
sbcs d2, d2, l
sbcs d3, d3, h
ldp d4, d5, [x, #32]
ldp l, h, [y, #32]
sbcs d4, d4, l
sbcs d5, d5, h
ldp d6, d7, [x, #48]
ldp l, h, [y, #48]
sbcs d6, d6, l
sbcs d7, d7, h
ldr d8, [x, #64]
ldr l, [y, #64]
sbcs d8, d8, l
// Now if x < y we want (x - y) + p_521 == (x - y) - 1 (mod 2^521)
// Otherwise we just want the existing x - y result. So subtract
// 1 iff the initial subtraction carried, then mask to 521 bits.
sbcs d0, d0, xzr
sbcs d1, d1, xzr
sbcs d2, d2, xzr
sbcs d3, d3, xzr
sbcs d4, d4, xzr
sbcs d5, d5, xzr
sbcs d6, d6, xzr
sbcs d7, d7, xzr
sbcs d8, d8, xzr
and d8, d8, #0x1FF
// Store the result
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
stp d6, d7, [z, #48]
str d8, [z, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 15,374
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_sqr_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Square modulo p_521, z := (x^2) mod p_521, assuming x reduced
// Input x[9]; output z[9]
//
// extern void bignum_sqr_p521 (uint64_t z[static 9], uint64_t x[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_p521)
.text
.balign 4
#define z x0
#define x x1
#define a0 x2
#define a1 x3
#define a2 x4
#define a3 x5
#define b0 x6
#define b1 x7
#define b2 x8
#define b3 x9
#define s0 x10
#define s1 x11
#define s2 x12
#define s3 x13
#define s4 x14
#define s5 x15
#define s6 x16
#define s7 x17
#define c x19
#define h x20
#define l x21
#define t x22
#define u x23
#define v x24
// Aliased to earlier ones we no longer need
#define d0 x2
#define d1 x3
#define d2 x4
#define d3 x5
#define d4 x6
#define d5 x7
#define d6 x8
#define d7 x9
#define d8 x10
S2N_BN_SYMBOL(bignum_sqr_p521):
// Save registers
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
// Load all the inputs first
ldp a0, a1, [x]
ldp a2, a3, [x, #16]
ldp b0, b1, [x, #32]
ldp b2, b3, [x, #48]
// Square the upper half with a register-renamed variant of bignum_sqr_4_8
mul s2, b0, b2
mul s7, b1, b3
umulh t, b0, b2
subs u, b0, b1
cneg u, u, cc
csetm s1, cc
subs s0, b3, b2
cneg s0, s0, cc
mul s6, u, s0
umulh s0, u, s0
cinv s1, s1, cc
eor s6, s6, s1
eor s0, s0, s1
adds s3, s2, t
adc t, t, xzr
umulh u, b1, b3
adds s3, s3, s7
adcs t, t, u
adc u, u, xzr
adds t, t, s7
adc u, u, xzr
cmn s1, #0x1
adcs s3, s3, s6
adcs t, t, s0
adc u, u, s1
adds s2, s2, s2
adcs s3, s3, s3
adcs t, t, t
adcs u, u, u
adc c, xzr, xzr
mul s0, b0, b0
mul s6, b1, b1
mul l, b0, b1
umulh s1, b0, b0
umulh s7, b1, b1
umulh h, b0, b1
adds s1, s1, l
adcs s6, s6, h
adc s7, s7, xzr
adds s1, s1, l
adcs s6, s6, h
adc s7, s7, xzr
adds s2, s2, s6
adcs s3, s3, s7
adcs t, t, xzr
adcs u, u, xzr
adc c, c, xzr
mul s4, b2, b2
mul s6, b3, b3
mul l, b2, b3
umulh s5, b2, b2
umulh s7, b3, b3
umulh h, b2, b3
adds s5, s5, l
adcs s6, s6, h
adc s7, s7, xzr
adds s5, s5, l
adcs s6, s6, h
adc s7, s7, xzr
adds s4, s4, t
adcs s5, s5, u
adcs s6, s6, c
adc s7, s7, xzr
// Augment the high part with the contribution from the top little word C.
// If we write the input as 2^512 * C + x then we are otherwise just doing
// x^2, so we need to add to the high part 2^512 * C^2 + (2 * C) * x.
// Accumulate it as [c;s7;...;s0] = H'. Since 2 * C is only 10 bits long
// we multiply 52-bit chunks of the x digits by 2 * C and solve the overlap
// with non-overflowing addition to get 52-bit chunks of the result with
// similar alignment. Then we stitch these back together and add them into
// the running total. This is quite a bit of palaver, but it avoids using
// the standard 2-part multiplications involving umulh, and on target
// microarchitectures seems to improve performance by about 5%. We could
// equally well use 53 or 54 since they are still <= 64 - 10, but below
// 52 we would end up using more multiplications.
ldr c, [x, #64]
add u, c, c
mul c, c, c
// 0 * 52 = 64 * 0 + 0
and l, a0, #0x000fffffffffffff
mul l, u, l
// 1 * 52 = 64 * 0 + 52
extr h, a1, a0, #52
and h, h, #0x000fffffffffffff
mul h, u, h
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #12
adds s0, s0, t
// 2 * 52 = 64 * 1 + 40
extr l, a2, a1, #40
and l, l, #0x000fffffffffffff
mul l, u, l
lsr t, h, #52
add l, l, t
lsl h, h, #12
extr t, l, h, #24
adcs s1, s1, t
// 3 * 52 = 64 * 2 + 28
extr h, a3, a2, #28
and h, h, #0x000fffffffffffff
mul h, u, h
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #36
adcs s2, s2, t
// 4 * 52 = 64 * 3 + 16
extr l, b0, a3, #16
and l, l, #0x000fffffffffffff
mul l, u, l
lsr t, h, #52
add l, l, t
lsl h, h, #12
extr t, l, h, #48
adcs s3, s3, t
// 5 * 52 = 64 * 4 + 4
lsr h, b0, #4
and h, h, #0x000fffffffffffff
mul h, u, h
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr v, h, l, #60
// 6 * 52 = 64 * 4 + 56
extr l, b1, b0, #56
and l, l, #0x000fffffffffffff
mul l, u, l
lsr t, h, #52
add l, l, t
lsl v, v, #8
extr t, l, v, #8
adcs s4, s4, t
// 7 * 52 = 64 * 5 + 44
extr h, b2, b1, #44
and h, h, #0x000fffffffffffff
mul h, u, h
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #20
adcs s5, s5, t
// 8 * 52 = 64 * 6 + 32
extr l, b3, b2, #32
and l, l, #0x000fffffffffffff
mul l, u, l
lsr t, h, #52
add l, l, t
lsl h, h, #12
extr t, l, h, #32
adcs s6, s6, t
// 9 * 52 = 64 * 7 + 20
lsr h, b3, #20
mul h, u, h
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #44
adcs s7, s7, t
// Top word
lsr h, h, #44
adc c, c, h
// Rotate [c;s7;...;s0] before storing in the buffer.
// We want to add 2^512 * H', which splitting H' at bit 9 is
// 2^521 * H_top + 2^512 * H_bot == 2^512 * H_bot + H_top (mod p_521)
extr l, s1, s0, #9
extr h, s2, s1, #9
stp l, h, [z]
extr l, s3, s2, #9
extr h, s4, s3, #9
stp l, h, [z, #16]
extr l, s5, s4, #9
extr h, s6, s5, #9
stp l, h, [z, #32]
extr l, s7, s6, #9
extr h, c, s7, #9
stp l, h, [z, #48]
and t, s0, #0x1FF
lsr c, c, #9
add t, t, c
str t, [z, #64]
// Square the lower half with an analogous variant of bignum_sqr_4_8
mul s2, a0, a2
mul s7, a1, a3
umulh t, a0, a2
subs u, a0, a1
cneg u, u, cc
csetm s1, cc
subs s0, a3, a2
cneg s0, s0, cc
mul s6, u, s0
umulh s0, u, s0
cinv s1, s1, cc
eor s6, s6, s1
eor s0, s0, s1
adds s3, s2, t
adc t, t, xzr
umulh u, a1, a3
adds s3, s3, s7
adcs t, t, u
adc u, u, xzr
adds t, t, s7
adc u, u, xzr
cmn s1, #0x1
adcs s3, s3, s6
adcs t, t, s0
adc u, u, s1
adds s2, s2, s2
adcs s3, s3, s3
adcs t, t, t
adcs u, u, u
adc c, xzr, xzr
mul s0, a0, a0
mul s6, a1, a1
mul l, a0, a1
umulh s1, a0, a0
umulh s7, a1, a1
umulh h, a0, a1
adds s1, s1, l
adcs s6, s6, h
adc s7, s7, xzr
adds s1, s1, l
adcs s6, s6, h
adc s7, s7, xzr
adds s2, s2, s6
adcs s3, s3, s7
adcs t, t, xzr
adcs u, u, xzr
adc c, c, xzr
mul s4, a2, a2
mul s6, a3, a3
mul l, a2, a3
umulh s5, a2, a2
umulh s7, a3, a3
umulh h, a2, a3
adds s5, s5, l
adcs s6, s6, h
adc s7, s7, xzr
adds s5, s5, l
adcs s6, s6, h
adc s7, s7, xzr
adds s4, s4, t
adcs s5, s5, u
adcs s6, s6, c
adc s7, s7, xzr
// Add it directly to the existing buffer
ldp l, h, [z]
adds l, l, s0
adcs h, h, s1
stp l, h, [z]
ldp l, h, [z, #16]
adcs l, l, s2
adcs h, h, s3
stp l, h, [z, #16]
ldp l, h, [z, #32]
adcs l, l, s4
adcs h, h, s5
stp l, h, [z, #32]
ldp l, h, [z, #48]
adcs l, l, s6
adcs h, h, s7
stp l, h, [z, #48]
ldr t, [z, #64]
adc t, t, xzr
str t, [z, #64]
// Now get the cross-product in [s7,...,s0] with variant of bignum_mul_4_8
mul s0, a0, b0
mul s4, a1, b1
mul s5, a2, b2
mul s6, a3, b3
umulh s7, a0, b0
adds s4, s4, s7
umulh s7, a1, b1
adcs s5, s5, s7
umulh s7, a2, b2
adcs s6, s6, s7
umulh s7, a3, b3
adc s7, s7, xzr
adds s1, s4, s0
adcs s4, s5, s4
adcs s5, s6, s5
adcs s6, s7, s6
adc s7, xzr, s7
adds s2, s4, s0
adcs s3, s5, s1
adcs s4, s6, s4
adcs s5, s7, s5
adcs s6, xzr, s6
adc s7, xzr, s7
subs t, a2, a3
cneg t, t, cc
csetm c, cc
subs h, b3, b2
cneg h, h, cc
mul l, t, h
umulh h, t, h
cinv c, c, cc
cmn c, #0x1
eor l, l, c
adcs s5, s5, l
eor h, h, c
adcs s6, s6, h
adc s7, s7, c
subs t, a0, a1
cneg t, t, cc
csetm c, cc
subs h, b1, b0
cneg h, h, cc
mul l, t, h
umulh h, t, h
cinv c, c, cc
cmn c, #0x1
eor l, l, c
adcs s1, s1, l
eor h, h, c
adcs s2, s2, h
adcs s3, s3, c
adcs s4, s4, c
adcs s5, s5, c
adcs s6, s6, c
adc s7, s7, c
subs t, a1, a3
cneg t, t, cc
csetm c, cc
subs h, b3, b1
cneg h, h, cc
mul l, t, h
umulh h, t, h
cinv c, c, cc
cmn c, #0x1
eor l, l, c
adcs s4, s4, l
eor h, h, c
adcs s5, s5, h
adcs s6, s6, c
adc s7, s7, c
subs t, a0, a2
cneg t, t, cc
csetm c, cc
subs h, b2, b0
cneg h, h, cc
mul l, t, h
umulh h, t, h
cinv c, c, cc
cmn c, #0x1
eor l, l, c
adcs s2, s2, l
eor h, h, c
adcs s3, s3, h
adcs s4, s4, c
adcs s5, s5, c
adcs s6, s6, c
adc s7, s7, c
subs t, a0, a3
cneg t, t, cc
csetm c, cc
subs h, b3, b0
cneg h, h, cc
mul l, t, h
umulh h, t, h
cinv c, c, cc
cmn c, #0x1
eor l, l, c
adcs s3, s3, l
eor h, h, c
adcs s4, s4, h
adcs s5, s5, c
adcs s6, s6, c
adc s7, s7, c
subs t, a1, a2
cneg t, t, cc
csetm c, cc
subs h, b2, b1
cneg h, h, cc
mul l, t, h
umulh h, t, h
cinv c, c, cc
cmn c, #0x1
eor l, l, c
adcs s3, s3, l
eor h, h, c
adcs s4, s4, h
adcs s5, s5, c
adcs s6, s6, c
adc s7, s7, c
// Let the cross product be M. We want to add 2^256 * 2 * M to the buffer
// Split M into M_top (248 bits) and M_bot (264 bits), so we add
// 2^521 * M_top + 2^257 * M_bot == 2^257 * M_bot + M_top (mod p_521)
// Accumulate the (non-reduced in general) 9-word answer [d8;...;d0]
// As this sum is built, accumulate t = AND of words d7...d1 to help
// in condensing the carry chain in the comparison that comes next
ldp l, h, [z]
extr d0, s5, s4, #8
adds d0, d0, l
extr d1, s6, s5, #8
adcs d1, d1, h
ldp l, h, [z, #16]
extr d2, s7, s6, #8
adcs d2, d2, l
and t, d1, d2
lsr d3, s7, #8
adcs d3, d3, h
and t, t, d3
ldp l, h, [z, #32]
lsl d4, s0, #1
adcs d4, d4, l
and t, t, d4
extr d5, s1, s0, #63
adcs d5, d5, h
and t, t, d5
ldp l, h, [z, #48]
extr d6, s2, s1, #63
adcs d6, d6, l
and t, t, d6
extr d7, s3, s2, #63
adcs d7, d7, h
and t, t, d7
ldr l, [z, #64]
extr d8, s4, s3, #63
and d8, d8, #0x1FF
adc d8, l, d8
// Extract the high part h and mask off the low part l = [d8;d7;...;d0]
// but stuff d8 with 1 bits at the left to ease a comparison below
lsr h, d8, #9
orr d8, d8, #~0x1FF
// Decide whether h + l >= p_521 <=> h + l + 1 >= 2^521. Since this can only
// happen if digits d7,...d1 are all 1s, we use the AND of them "t" to
// condense the carry chain, and since we stuffed 1 bits into d8 we get
// the result in CF without an additional comparison.
subs xzr, xzr, xzr
adcs xzr, d0, h
adcs xzr, t, xzr
adcs xzr, d8, xzr
// Now if CF is set we want (h + l) - p_521 = (h + l + 1) - 2^521
// while otherwise we want just h + l. So mask h + l + CF to 521 bits.
// This masking also gets rid of the stuffing with 1s we did above.
adcs d0, d0, h
adcs d1, d1, xzr
adcs d2, d2, xzr
adcs d3, d3, xzr
adcs d4, d4, xzr
adcs d5, d5, xzr
adcs d6, d6, xzr
adcs d7, d7, xzr
adc d8, d8, xzr
and d8, d8, #0x1FF
// Store the final result
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
stp d6, d7, [z, #48]
str d8, [z, #64]
// Restore regs and return
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 2,550
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_add_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Add modulo p_521, z := (x + y) mod p_521, assuming x and y reduced
// Inputs x[9], y[9]; output z[9]
//
// extern void bignum_add_p521
// (uint64_t z[static 9], uint64_t x[static 9], uint64_t y[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = x, X2 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_add_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_add_p521)
.text
.balign 4
#define z x0
#define x x1
#define y x2
#define h x3
#define l x4
#define d0 x5
#define d1 x6
#define d2 x7
#define d3 x8
#define d4 x9
#define d5 x10
#define d6 x11
#define d7 x12
#define d8 x13
S2N_BN_SYMBOL(bignum_add_p521):
// Force carry-in to get s = [d8;d7;d6;d5;d4;d3;d2;d1;d0] = x + y + 1.
// We ignore the carry-out, assuming inputs are reduced so there is none.
subs xzr, xzr, xzr
ldp d0, d1, [x]
ldp l, h, [y]
adcs d0, d0, l
adcs d1, d1, h
ldp d2, d3, [x, #16]
ldp l, h, [y, #16]
adcs d2, d2, l
adcs d3, d3, h
ldp d4, d5, [x, #32]
ldp l, h, [y, #32]
adcs d4, d4, l
adcs d5, d5, h
ldp d6, d7, [x, #48]
ldp l, h, [y, #48]
adcs d6, d6, l
adcs d7, d7, h
ldr d8, [x, #64]
ldr l, [y, #64]
adc d8, d8, l
// Now x + y >= p_521 <=> s = x + y + 1 >= 2^521
// Set CF <=> s = x + y + 1 >= 2^521 and make it a mask in l as well
subs l, d8, #512
csetm l, cs
// Now if CF is set (and l is all 1s), we want (x + y) - p_521 = s - 2^521
// while otherwise we want x + y = s - 1 (from existing CF, which is nice)
sbcs d0, d0, xzr
and l, l, #512
sbcs d1, d1, xzr
sbcs d2, d2, xzr
sbcs d3, d3, xzr
sbcs d4, d4, xzr
sbcs d5, d5, xzr
sbcs d6, d6, xzr
sbcs d7, d7, xzr
sbc d8, d8, l
// Store the result
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
stp d6, d7, [z, #48]
str d8, [z, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 9,675
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_montsqr_p521_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery square, z := (x^2 / 2^576) mod p_521
// Input x[9]; output z[9]
//
// extern void bignum_montsqr_p521_alt
// (uint64_t z[static 9], uint64_t x[static 9]);
//
// Does z := (x^2 / 2^576) mod p_521, assuming x < p_521. This means the
// Montgomery base is the "native size" 2^{9*64} = 2^576; since p_521 is
// a Mersenne prime the basic modular squaring bignum_sqr_p521 can be
// considered a Montgomery operation to base 2^521.
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p521_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p521_alt)
.text
.balign 4
#define z x0
#define x x1
#define a0 x2
#define a1 x3
#define a2 x4
#define a3 x5
#define a4 x6
#define a5 x7
#define a6 x8
#define a7 x9
#define a8 x1 // Overwrites input argument at last load
#define l x10
#define u0 x11
#define u1 x12
#define u2 x13
#define u3 x14
#define u4 x15
#define u5 x16
#define u6 x17
#define u7 x19
#define u8 x20
#define u9 x21
#define u10 x22
#define u11 x23
#define u12 x24
#define u13 x25
#define u14 x26
#define u15 x27
#define u16 x29
S2N_BN_SYMBOL(bignum_montsqr_p521_alt):
// It's convenient to have more registers to play with
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x29, [sp, #-16]!
// Load low 8 elements as [a7;a6;a5;a4;a3;a2;a1;a0], set up an initial
// window [u8;u7;u6;u5;u4;u3;u2;u1] = 10 + 20 + 30 + 40 + 50 + 60 + 70
ldp a0, a1, [x]
mul u1, a0, a1
umulh u2, a0, a1
ldp a2, a3, [x, #16]
mul l, a0, a2
umulh u3, a0, a2
adds u2, u2, l
ldp a4, a5, [x, #32]
mul l, a0, a3
umulh u4, a0, a3
adcs u3, u3, l
ldp a6, a7, [x, #48]
mul l, a0, a4
umulh u5, a0, a4
adcs u4, u4, l
mul l, a0, a5
umulh u6, a0, a5
adcs u5, u5, l
mul l, a0, a6
umulh u7, a0, a6
adcs u6, u6, l
mul l, a0, a7
umulh u8, a0, a7
adcs u7, u7, l
adc u8, u8, xzr
// Add in the next diagonal = 21 + 31 + 41 + 51 + 61 + 71 + 54
mul l, a1, a2
adds u3, u3, l
mul l, a1, a3
adcs u4, u4, l
mul l, a1, a4
adcs u5, u5, l
mul l, a1, a5
adcs u6, u6, l
mul l, a1, a6
adcs u7, u7, l
mul l, a1, a7
adcs u8, u8, l
cset u9, cs
umulh l, a1, a2
adds u4, u4, l
umulh l, a1, a3
adcs u5, u5, l
umulh l, a1, a4
adcs u6, u6, l
umulh l, a1, a5
adcs u7, u7, l
umulh l, a1, a6
adcs u8, u8, l
umulh l, a1, a7
adc u9, u9, l
mul l, a4, a5
umulh u10, a4, a5
adds u9, u9, l
adc u10, u10, xzr
// And the next one = 32 + 42 + 52 + 62 + 72 + 64 + 65
mul l, a2, a3
adds u5, u5, l
mul l, a2, a4
adcs u6, u6, l
mul l, a2, a5
adcs u7, u7, l
mul l, a2, a6
adcs u8, u8, l
mul l, a2, a7
adcs u9, u9, l
mul l, a4, a6
adcs u10, u10, l
cset u11, cs
umulh l, a2, a3
adds u6, u6, l
umulh l, a2, a4
adcs u7, u7, l
umulh l, a2, a5
adcs u8, u8, l
umulh l, a2, a6
adcs u9, u9, l
umulh l, a2, a7
adcs u10, u10, l
umulh l, a4, a6
adc u11, u11, l
mul l, a5, a6
umulh u12, a5, a6
adds u11, u11, l
adc u12, u12, xzr
// And the final one = 43 + 53 + 63 + 73 + 74 + 75 + 76
mul l, a3, a4
adds u7, u7, l
mul l, a3, a5
adcs u8, u8, l
mul l, a3, a6
adcs u9, u9, l
mul l, a3, a7
adcs u10, u10, l
mul l, a4, a7
adcs u11, u11, l
mul l, a5, a7
adcs u12, u12, l
cset u13, cs
umulh l, a3, a4
adds u8, u8, l
umulh l, a3, a5
adcs u9, u9, l
umulh l, a3, a6
adcs u10, u10, l
umulh l, a3, a7
adcs u11, u11, l
umulh l, a4, a7
adcs u12, u12, l
umulh l, a5, a7
adc u13, u13, l
mul l, a6, a7
umulh u14, a6, a7
adds u13, u13, l
adc u14, u14, xzr
// Double that, with u15 holding the top carry
adds u1, u1, u1
adcs u2, u2, u2
adcs u3, u3, u3
adcs u4, u4, u4
adcs u5, u5, u5
adcs u6, u6, u6
adcs u7, u7, u7
adcs u8, u8, u8
adcs u9, u9, u9
adcs u10, u10, u10
adcs u11, u11, u11
adcs u12, u12, u12
adcs u13, u13, u13
adcs u14, u14, u14
cset u15, cs
// Add the homogeneous terms 00 + 11 + 22 + 33 + 44 + 55 + 66 + 77
umulh l, a0, a0
mul u0, a0, a0
adds u1, u1, l
mul l, a1, a1
adcs u2, u2, l
umulh l, a1, a1
adcs u3, u3, l
mul l, a2, a2
adcs u4, u4, l
umulh l, a2, a2
adcs u5, u5, l
mul l, a3, a3
adcs u6, u6, l
umulh l, a3, a3
adcs u7, u7, l
mul l, a4, a4
adcs u8, u8, l
umulh l, a4, a4
adcs u9, u9, l
mul l, a5, a5
adcs u10, u10, l
umulh l, a5, a5
adcs u11, u11, l
mul l, a6, a6
adcs u12, u12, l
umulh l, a6, a6
adcs u13, u13, l
mul l, a7, a7
adcs u14, u14, l
umulh l, a7, a7
adc u15, u15, l
// Now load in the top digit a8, and also set up its double and square
ldr a8, [x, #64]
mul u16, a8, a8
add a8, a8, a8
// Add a8 * [a7;...;a0] into the top of the buffer
mul l, a8, a0
adds u8, u8, l
mul l, a8, a1
adcs u9, u9, l
mul l, a8, a2
adcs u10, u10, l
mul l, a8, a3
adcs u11, u11, l
mul l, a8, a4
adcs u12, u12, l
mul l, a8, a5
adcs u13, u13, l
mul l, a8, a6
adcs u14, u14, l
mul l, a8, a7
adcs u15, u15, l
adc u16, u16, xzr
umulh l, a8, a0
adds u9, u9, l
umulh l, a8, a1
adcs u10, u10, l
umulh l, a8, a2
adcs u11, u11, l
umulh l, a8, a3
adcs u12, u12, l
umulh l, a8, a4
adcs u13, u13, l
umulh l, a8, a5
adcs u14, u14, l
umulh l, a8, a6
adcs u15, u15, l
umulh l, a8, a7
adc u16, u16, l
// Now we have the full product, which we consider as
// 2^521 * h + l. Form h + l + 1
subs xzr, xzr, xzr
extr l, u9, u8, #9
adcs u0, u0, l
extr l, u10, u9, #9
adcs u1, u1, l
extr l, u11, u10, #9
adcs u2, u2, l
extr l, u12, u11, #9
adcs u3, u3, l
extr l, u13, u12, #9
adcs u4, u4, l
extr l, u14, u13, #9
adcs u5, u5, l
extr l, u15, u14, #9
adcs u6, u6, l
extr l, u16, u15, #9
adcs u7, u7, l
orr u8, u8, #~0x1FF
lsr l, u16, #9
adcs u8, u8, l
// Now CF is set if h + l + 1 >= 2^521, which means it's already
// the answer, while if ~CF the answer is h + l so we should subtract
// 1 (all considered in 521 bits). Hence subtract ~CF and mask.
sbcs u0, u0, xzr
sbcs u1, u1, xzr
sbcs u2, u2, xzr
sbcs u3, u3, xzr
sbcs u4, u4, xzr
sbcs u5, u5, xzr
sbcs u6, u6, xzr
sbcs u7, u7, xzr
sbc u8, u8, xzr
and u8, u8, #0x1FF
// So far, this has been the same as a pure modular squaring
// Now finally the Montgomery ingredient, which is just a 521-bit
// rotation by 9*64 - 521 = 55 bits right.
lsl l, u0, #9
extr u0, u1, u0, #55
extr u1, u2, u1, #55
extr u2, u3, u2, #55
extr u3, u4, u3, #55
orr u8, u8, l
extr u4, u5, u4, #55
extr u5, u6, u5, #55
extr u6, u7, u6, #55
extr u7, u8, u7, #55
lsr u8, u8, #55
// Store back digits of final result
stp u0, u1, [z]
stp u2, u3, [z, #16]
stp u4, u5, [z, #32]
stp u6, u7, [z, #48]
str u8, [z, #64]
// Restore registers and return
ldp x27, x29, [sp], #16
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 64,566
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/p521_jscalarmul_alt.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Jacobian form scalar multiplication for P-521
// Input scalar[9], point[27]; output res[27]
//
// extern void p521_jscalarmul_alt
// (uint64_t res[static 27],
// uint64_t scalar[static 9],
// uint64_t point[static 27]);
//
// This function is a variant of its affine point version p521_scalarmul.
// Here, input and output points are assumed to be in Jacobian form with
// a triple (x,y,z) representing the affine point (x/z^2,y/z^3) when
// z is nonzero or the point at infinity (group identity) if z = 0.
//
// Given scalar = n and point = P, assumed to be on the NIST elliptic
// curve P-521, returns a representation of n * P. If the result is the
// point at infinity (either because the input point was or because the
// scalar was a multiple of p_521) then the output is guaranteed to
// represent the point at infinity, i.e. to have its z coordinate zero.
//
// Standard ARM ABI: X0 = res, X1 = scalar, X2 = point
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jscalarmul_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jscalarmul_alt)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 72
#define JACSIZE (3*NUMSIZE)
// Safe copies of input res and additional values in variables.
#define tabup x15
#define bf x16
#define sgn x17
#define j x19
#define res x20
// Intermediate variables on the stack.
// The table is 16 entries, each of size JACSIZE = 3 * NUMSIZE
#define scalarb sp, #(0*NUMSIZE)
#define acc sp, #(1*NUMSIZE)
#define tabent sp, #(4*NUMSIZE)
#define tab sp, #(7*NUMSIZE)
// Round up to maintain stack alignment
#define NSPACE #(55*NUMSIZE+8)
#define selectblock(I) \
cmp bf, #(1*I); \
ldp x10, x11, [tabup]; \
csel x0, x10, x0, eq; \
csel x1, x11, x1, eq; \
ldp x10, x11, [tabup, #16]; \
csel x2, x10, x2, eq; \
csel x3, x11, x3, eq; \
ldp x10, x11, [tabup, #32]; \
csel x4, x10, x4, eq; \
csel x5, x11, x5, eq; \
ldp x10, x11, [tabup, #48]; \
csel x6, x10, x6, eq; \
csel x7, x11, x7, eq; \
ldr x10, [tabup, #64]; \
csel x8, x10, x8, eq; \
add tabup, tabup, #JACSIZE
// Loading large constants
#define movbig(nn,n3,n2,n1,n0) \
movz nn, n0; \
movk nn, n1, lsl #16; \
movk nn, n2, lsl #32; \
movk nn, n3, lsl #48
S2N_BN_SYMBOL(p521_jscalarmul_alt):
stp x19, x20, [sp, #-16]!
stp x21, x30, [sp, #-16]!
sub sp, sp, NSPACE
// Preserve the "res" input argument; others get processed early.
mov res, x0
// Reduce the input scalar mod n_521 and store it to "scalarb".
mov x19, x2
add x0, scalarb
bl p521_jscalarmul_alt_bignum_mod_n521_9
mov x2, x19
// Set the tab[0] table entry to the input point = 1 * P, but also
// reduce all coordinates modulo p. In principle we assume reduction
// as a precondition, but this reduces the scope for surprise, e.g.
// making sure that any input with z = 0 is treated as zero, even
// if the other coordinates are not in fact reduced.
add x0, tab
mov x1, x19
bl p521_jscalarmul_alt_bignum_mod_p521_9
add x0, tab+NUMSIZE
add x1, x19, #NUMSIZE
bl p521_jscalarmul_alt_bignum_mod_p521_9
add x0, tab+2*NUMSIZE
add x1, x19, #(2*NUMSIZE)
bl p521_jscalarmul_alt_bignum_mod_p521_9
// If bit 520 of the scalar is set, then negate the scalar mod n_521,
// i.e. do scalar |-> n_521 - scalar, and also the point to compensate
// by negating its y coordinate. This further step is not needed by
// the indexing scheme (the top window is only a couple of bits either
// way), but is convenient to exclude a problem with the specific value
// scalar = n_521 - 18, where the last Jacobian addition is of the form
// (n_521 - 9) * P + -(9 * P) and hence is a degenerate doubling case.
ldp x0, x1, [scalarb]
movbig(x10, #0xbb6f, #0xb71e, #0x9138, #0x6409)
subs x10, x10, x0
movbig(x11, #0x3bb5, #0xc9b8, #0x899c, #0x47ae)
sbcs x11, x11, x1
ldp x2, x3, [scalarb+16]
movbig(x12, #0x7fcc, #0x0148, #0xf709, #0xa5d0)
sbcs x12, x12, x2
movbig(x13, #0x5186, #0x8783, #0xbf2f, #0x966b)
sbcs x13, x13, x3
ldp x4, x5, [scalarb+32]
mov x14, 0xfffffffffffffffa
sbcs x14, x14, x4
mov x15, 0xffffffffffffffff
sbcs x15, x15, x5
ldp x6, x7, [scalarb+48]
mov x16, 0xffffffffffffffff
sbcs x16, x16, x6
mov x17, 0xffffffffffffffff
sbcs x17, x17, x7
ldr x8, [scalarb+64]
mov x19, 0x00000000000001ff
sbc x19, x19, x8
tst x8, 0x100
csetm x9, ne
csel x0, x10, x0, ne
csel x1, x11, x1, ne
csel x2, x12, x2, ne
csel x3, x13, x3, ne
csel x4, x14, x4, ne
csel x5, x15, x5, ne
csel x6, x16, x6, ne
csel x7, x17, x7, ne
csel x8, x19, x8, ne
stp x0, x1, [scalarb]
stp x2, x3, [scalarb+16]
stp x4, x5, [scalarb+32]
stp x6, x7, [scalarb+48]
str x8, [scalarb+64]
add tabup, tab
ldp x0, x1, [tabup, #NUMSIZE]
ldp x2, x3, [tabup, #NUMSIZE+16]
ldp x4, x5, [tabup, #NUMSIZE+32]
ldp x6, x7, [tabup, #NUMSIZE+48]
ldr x8, [tabup, #NUMSIZE+64]
orr x10, x0, x1
orr x11, x2, x3
orr x12, x4, x5
orr x13, x6, x7
orr x10, x10, x11
orr x12, x12, x13
orr x12, x12, x8
orr x10, x10, x12
cmp x10, xzr
csel x9, x9, xzr, ne
eor x0, x0, x9
eor x1, x1, x9
eor x2, x2, x9
eor x3, x3, x9
eor x4, x4, x9
eor x5, x5, x9
eor x6, x6, x9
eor x7, x7, x9
and x9, x9, #0x1FF
eor x8, x8, x9
stp x0, x1, [tabup, #NUMSIZE]
stp x2, x3, [tabup, #NUMSIZE+16]
stp x4, x5, [tabup, #NUMSIZE+32]
stp x6, x7, [tabup, #NUMSIZE+48]
str x8, [tabup, #NUMSIZE+64]
// Compute and record tab[1] = 2 * p, ..., tab[15] = 16 * P
add x0, tab+JACSIZE*1
add x1, tab
bl p521_jscalarmul_alt_jdouble
add x0, tab+JACSIZE*2
add x1, tab+JACSIZE*1
add x2, tab
bl p521_jscalarmul_alt_jadd
add x0, tab+JACSIZE*3
add x1, tab+JACSIZE*1
bl p521_jscalarmul_alt_jdouble
add x0, tab+JACSIZE*4
add x1, tab+JACSIZE*3
add x2, tab
bl p521_jscalarmul_alt_jadd
add x0, tab+JACSIZE*5
add x1, tab+JACSIZE*2
bl p521_jscalarmul_alt_jdouble
add x0, tab+JACSIZE*6
add x1, tab+JACSIZE*5
add x2, tab
bl p521_jscalarmul_alt_jadd
add x0, tab+JACSIZE*7
add x1, tab+JACSIZE*3
bl p521_jscalarmul_alt_jdouble
add x0, tab+JACSIZE*8
add x1, tab+JACSIZE*7
add x2, tab
bl p521_jscalarmul_alt_jadd
add x0, tab+JACSIZE*9
add x1, tab+JACSIZE*4
bl p521_jscalarmul_alt_jdouble
add x0, tab+JACSIZE*10
add x1, tab+JACSIZE*9
add x2, tab
bl p521_jscalarmul_alt_jadd
add x0, tab+JACSIZE*11
add x1, tab+JACSIZE*5
bl p521_jscalarmul_alt_jdouble
add x0, tab+JACSIZE*12
add x1, tab+JACSIZE*11
add x2, tab
bl p521_jscalarmul_alt_jadd
add x0, tab+JACSIZE*13
add x1, tab+JACSIZE*6
bl p521_jscalarmul_alt_jdouble
add x0, tab+JACSIZE*14
add x1, tab+JACSIZE*13
add x2, tab
bl p521_jscalarmul_alt_jadd
add x0, tab+JACSIZE*15
add x1, tab+JACSIZE*7
bl p521_jscalarmul_alt_jdouble
// Add the recoding constant sum_i(16 * 32^i) to the scalar to allow signed
// digits. The digits of the constant, in lowest-to-highest order, are as
// follows; they are generated dynamically since none is a simple ARM load.
//
// 0x0842108421084210
// 0x1084210842108421
// 0x2108421084210842
// 0x4210842108421084
// 0x8421084210842108
// 0x0842108421084210
// 0x1084210842108421
// 0x2108421084210842
// 0x0000000000000084
ldp x0, x1, [scalarb]
ldp x2, x3, [scalarb+16]
ldp x4, x5, [scalarb+32]
ldp x6, x7, [scalarb+48]
ldr x8, [scalarb+64]
movbig(x10, #0x1084, #0x2108, #0x4210, #0x8421)
adds x0, x0, x10, lsr #1
adcs x1, x1, x10
lsl x10, x10, #1
adcs x2, x2, x10
lsl x10, x10, #1
adcs x3, x3, x10
lsl x10, x10, #1
adcs x4, x4, x10
lsr x11, x10, #4
adcs x5, x5, x11
lsr x10, x10, #3
adcs x6, x6, x10
lsl x10, x10, #1
adcs x7, x7, x10
lsl x10, x10, #1
and x10, x10, #0xFF
adc x8, x8, x10
// Because of the initial reduction the top bitfield (>= bits 520) is <= 1,
// i.e. just a single bit. Record that in "bf", then shift the whole
// scalar left 56 bits to align the top of the next bitfield with the MSB
// (bits 571..575).
lsr bf, x8, #8
extr x8, x8, x7, #8
extr x7, x7, x6, #8
extr x6, x6, x5, #8
extr x5, x5, x4, #8
extr x4, x4, x3, #8
extr x3, x3, x2, #8
extr x2, x2, x1, #8
extr x1, x1, x0, #8
lsl x0, x0, #56
stp x0, x1, [scalarb]
stp x2, x3, [scalarb+16]
stp x4, x5, [scalarb+32]
stp x6, x7, [scalarb+48]
str x8, [scalarb+64]
// According to the top bit, initialize the accumulator to P or 0. This top
// digit, uniquely, is not recoded so there is no sign adjustment to make.
// We only really need to adjust the z coordinate to zero, but do all three.
add tabup, tab
cmp bf, xzr
ldp x0, x1, [tabup]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc]
ldp x0, x1, [tabup, #16]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+16]
ldp x0, x1, [tabup, #32]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+32]
ldp x0, x1, [tabup, #48]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+48]
ldp x0, x1, [tabup, #64]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+64]
ldp x0, x1, [tabup, #80]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+80]
ldp x0, x1, [tabup, #96]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+96]
ldp x0, x1, [tabup, #112]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+112]
ldp x0, x1, [tabup, #128]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+128]
ldp x0, x1, [tabup, #144]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+144]
ldp x0, x1, [tabup, #160]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+160]
ldp x0, x1, [tabup, #176]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+176]
ldp x0, x1, [tabup, #192]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+192]
ldr x0, [tabup, #208]
csel x0, x0, xzr, ne
str x0, [acc+208]
// Main loop over size-5 bitfields: double 5 times then add signed digit
// At each stage we shift the scalar left by 5 bits so we can simply pick
// the top 5 bits as the bitfield, saving some fiddle over indexing.
mov j, #520
p521_jscalarmul_alt_mainloop:
sub j, j, #5
add x0, acc
add x1, acc
bl p521_jscalarmul_alt_jdouble
add x0, acc
add x1, acc
bl p521_jscalarmul_alt_jdouble
add x0, acc
add x1, acc
bl p521_jscalarmul_alt_jdouble
add x0, acc
add x1, acc
bl p521_jscalarmul_alt_jdouble
add x0, acc
add x1, acc
bl p521_jscalarmul_alt_jdouble
// Choose the bitfield and adjust it to sign and magnitude
ldp x0, x1, [scalarb]
ldp x2, x3, [scalarb+16]
ldp x4, x5, [scalarb+32]
ldp x6, x7, [scalarb+48]
ldr x8, [scalarb+64]
lsr bf, x8, #59
extr x8, x8, x7, #59
extr x7, x7, x6, #59
extr x6, x6, x5, #59
extr x5, x5, x4, #59
extr x4, x4, x3, #59
extr x3, x3, x2, #59
extr x2, x2, x1, #59
extr x1, x1, x0, #59
lsl x0, x0, #5
stp x0, x1, [scalarb]
stp x2, x3, [scalarb+16]
stp x4, x5, [scalarb+32]
stp x6, x7, [scalarb+48]
str x8, [scalarb+64]
subs bf, bf, #16
csetm sgn, lo // sgn = sign of digit (1 = negative)
cneg bf, bf, lo // bf = absolute value of digit
// Conditionally select the table entry tab[i-1] = i * P in constant time
mov x0, xzr
mov x1, xzr
mov x2, xzr
mov x3, xzr
mov x4, xzr
mov x5, xzr
mov x6, xzr
mov x7, xzr
mov x8, xzr
add tabup, tab
selectblock(1)
selectblock(2)
selectblock(3)
selectblock(4)
selectblock(5)
selectblock(6)
selectblock(7)
selectblock(8)
selectblock(9)
selectblock(10)
selectblock(11)
selectblock(12)
selectblock(13)
selectblock(14)
selectblock(15)
selectblock(16)
stp x0, x1, [tabent]
stp x2, x3, [tabent+16]
stp x4, x5, [tabent+32]
stp x6, x7, [tabent+48]
str x8, [tabent+64]
mov x0, xzr
mov x1, xzr
mov x2, xzr
mov x3, xzr
mov x4, xzr
mov x5, xzr
mov x6, xzr
mov x7, xzr
mov x8, xzr
add tabup, tab+2*NUMSIZE
selectblock(1)
selectblock(2)
selectblock(3)
selectblock(4)
selectblock(5)
selectblock(6)
selectblock(7)
selectblock(8)
selectblock(9)
selectblock(10)
selectblock(11)
selectblock(12)
selectblock(13)
selectblock(14)
selectblock(15)
selectblock(16)
stp x0, x1, [tabent+2*NUMSIZE]
stp x2, x3, [tabent+2*NUMSIZE+16]
stp x4, x5, [tabent+2*NUMSIZE+32]
stp x6, x7, [tabent+2*NUMSIZE+48]
str x8, [tabent+2*NUMSIZE+64]
mov x0, xzr
mov x1, xzr
mov x2, xzr
mov x3, xzr
mov x4, xzr
mov x5, xzr
mov x6, xzr
mov x7, xzr
mov x8, xzr
add tabup, tab+NUMSIZE
selectblock(1)
selectblock(2)
selectblock(3)
selectblock(4)
selectblock(5)
selectblock(6)
selectblock(7)
selectblock(8)
selectblock(9)
selectblock(10)
selectblock(11)
selectblock(12)
selectblock(13)
selectblock(14)
selectblock(15)
selectblock(16)
// Store it to "tabent" with the y coordinate optionally negated.
// This is done carefully to give coordinates < p_521 even in
// the degenerate case y = 0 (when z = 0 for points on the curve).
orr x10, x0, x1
orr x11, x2, x3
orr x12, x4, x5
orr x13, x6, x7
orr x10, x10, x11
orr x12, x12, x13
orr x12, x12, x8
orr x10, x10, x12
cmp x10, xzr
csel sgn, sgn, xzr, ne
eor x0, x0, sgn
eor x1, x1, sgn
eor x2, x2, sgn
eor x3, x3, sgn
eor x4, x4, sgn
eor x5, x5, sgn
eor x6, x6, sgn
eor x7, x7, sgn
and sgn, sgn, #0x1FF
eor x8, x8, sgn
stp x0, x1, [tabent+NUMSIZE]
stp x2, x3, [tabent+NUMSIZE+16]
stp x4, x5, [tabent+NUMSIZE+32]
stp x6, x7, [tabent+NUMSIZE+48]
str x8, [tabent+NUMSIZE+64]
// Add to the accumulator
add x0, acc
add x1, acc
add x2, tabent
bl p521_jscalarmul_alt_jadd
cbnz j, p521_jscalarmul_alt_mainloop
// That's the end of the main loop, and we just need to copy the
// result in "acc" to the output.
ldp x0, x1, [acc]
stp x0, x1, [res]
ldp x0, x1, [acc+16]
stp x0, x1, [res, #16]
ldp x0, x1, [acc+32]
stp x0, x1, [res, #32]
ldp x0, x1, [acc+48]
stp x0, x1, [res, #48]
ldp x0, x1, [acc+64]
stp x0, x1, [res, #64]
ldp x0, x1, [acc+80]
stp x0, x1, [res, #80]
ldp x0, x1, [acc+96]
stp x0, x1, [res, #96]
ldp x0, x1, [acc+112]
stp x0, x1, [res, #112]
ldp x0, x1, [acc+128]
stp x0, x1, [res, #128]
ldp x0, x1, [acc+144]
stp x0, x1, [res, #144]
ldp x0, x1, [acc+160]
stp x0, x1, [res, #160]
ldp x0, x1, [acc+176]
stp x0, x1, [res, #176]
ldp x0, x1, [acc+192]
stp x0, x1, [res, #192]
ldr x0, [acc+208]
str x0, [res, #208]
// Restore stack and registers and return
add sp, sp, NSPACE
ldp x21, x30, [sp], 16
ldp x19, x20, [sp], 16
ret
// Local copies of subroutines, complete clones at the moment except
// that we share multiplication and squaring between the point operations.
p521_jscalarmul_alt_bignum_mod_p521_9:
ldr x12, [x1, #64]
lsr x2, x12, #9
cmp xzr, xzr
ldp x4, x5, [x1]
adcs xzr, x4, x2
adcs xzr, x5, xzr
ldp x6, x7, [x1, #16]
and x3, x6, x7
adcs xzr, x3, xzr
ldp x8, x9, [x1, #32]
and x3, x8, x9
adcs xzr, x3, xzr
ldp x10, x11, [x1, #48]
and x3, x10, x11
adcs xzr, x3, xzr
orr x3, x12, #0xfffffffffffffe00
adcs x3, x3, xzr
adcs x4, x4, x2
adcs x5, x5, xzr
adcs x6, x6, xzr
adcs x7, x7, xzr
adcs x8, x8, xzr
adcs x9, x9, xzr
adcs x10, x10, xzr
adcs x11, x11, xzr
adc x12, x12, xzr
and x12, x12, #0x1ff
stp x4, x5, [x0]
stp x6, x7, [x0, #16]
stp x8, x9, [x0, #32]
stp x10, x11, [x0, #48]
str x12, [x0, #64]
ret
p521_jscalarmul_alt_bignum_mod_n521_9:
ldr x14, [x1, #64]
lsr x15, x14, #9
add x15, x15, #1
mov x2, #39927
movk x2, #28359, lsl #16
movk x2, #18657, lsl #32
movk x2, #17552, lsl #48
mul x6, x2, x15
mov x3, #47185
movk x3, #30307, lsl #16
movk x3, #13895, lsl #32
movk x3, #50250, lsl #48
mul x7, x3, x15
mov x4, #23087
movk x4, #2294, lsl #16
movk x4, #65207, lsl #32
movk x4, #32819, lsl #48
mul x8, x4, x15
mov x5, #27028
movk x5, #16592, lsl #16
movk x5, #30844, lsl #32
movk x5, #44665, lsl #48
mul x9, x5, x15
lsl x10, x15, #2
add x10, x10, x15
umulh x13, x2, x15
adds x7, x7, x13
umulh x13, x3, x15
adcs x8, x8, x13
umulh x13, x4, x15
adcs x9, x9, x13
umulh x13, x5, x15
adc x10, x10, x13
ldp x12, x13, [x1]
adds x6, x6, x12
adcs x7, x7, x13
ldp x12, x13, [x1, #16]
adcs x8, x8, x12
adcs x9, x9, x13
ldp x13, x11, [x1, #32]
adcs x10, x10, x13
adcs x11, x11, xzr
ldp x12, x13, [x1, #48]
adcs x12, x12, xzr
adcs x13, x13, xzr
orr x14, x14, #0xfffffffffffffe00
adcs x14, x14, xzr
csetm x15, lo
and x2, x2, x15
subs x6, x6, x2
and x3, x3, x15
sbcs x7, x7, x3
and x4, x4, x15
sbcs x8, x8, x4
and x5, x5, x15
sbcs x9, x9, x5
mov x2, #5
and x2, x2, x15
sbcs x10, x10, x2
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
and x14, x14, #0x1ff
stp x6, x7, [x0]
stp x8, x9, [x0, #16]
stp x10, x11, [x0, #32]
stp x12, x13, [x0, #48]
str x14, [x0, #64]
ret
p521_jscalarmul_alt_jadd:
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x29, x30, [sp, #-16]!
sub sp, sp, #0x240
mov x27, x0
mov x28, x1
mov x29, x2
mov x0, sp
add x1, x28, #0x90
bl p521_jscalarmul_alt_sqr_p521
add x0, sp, #0x168
add x1, x29, #0x90
bl p521_jscalarmul_alt_sqr_p521
add x0, sp, #0x1f8
add x1, x29, #0x90
add x2, x28, #0x48
bl p521_jscalarmul_alt_mul_p521
add x0, sp, #0x48
add x1, x28, #0x90
add x2, x29, #0x48
bl p521_jscalarmul_alt_mul_p521
add x0, sp, #0x90
mov x1, sp
add x2, x29, #0x0
bl p521_jscalarmul_alt_mul_p521
add x0, sp, #0x120
add x1, sp, #0x168
add x2, x28, #0x0
bl p521_jscalarmul_alt_mul_p521
add x0, sp, #0x48
mov x1, sp
add x2, sp, #0x48
bl p521_jscalarmul_alt_mul_p521
add x0, sp, #0x1f8
add x1, sp, #0x168
add x2, sp, #0x1f8
bl p521_jscalarmul_alt_mul_p521
add x0, sp, #0x168
add x1, sp, #0x90
add x2, sp, #0x120
bl p521_jscalarmul_alt_sub_p521
add x0, sp, #0x48
add x1, sp, #0x48
add x2, sp, #0x1f8
bl p521_jscalarmul_alt_sub_p521
add x0, sp, #0xd8
add x1, sp, #0x168
bl p521_jscalarmul_alt_sqr_p521
mov x0, sp
add x1, sp, #0x48
bl p521_jscalarmul_alt_sqr_p521
add x0, sp, #0x120
add x1, sp, #0xd8
add x2, sp, #0x120
bl p521_jscalarmul_alt_mul_p521
add x0, sp, #0x90
add x1, sp, #0xd8
add x2, sp, #0x90
bl p521_jscalarmul_alt_mul_p521
mov x0, sp
mov x1, sp
add x2, sp, #0x120
bl p521_jscalarmul_alt_sub_p521
add x0, sp, #0xd8
add x1, sp, #0x90
add x2, sp, #0x120
bl p521_jscalarmul_alt_sub_p521
add x0, sp, #0x168
add x1, sp, #0x168
add x2, x28, #0x90
bl p521_jscalarmul_alt_mul_p521
mov x0, sp
mov x1, sp
add x2, sp, #0x90
bl p521_jscalarmul_alt_sub_p521
add x0, sp, #0x120
add x1, sp, #0x120
mov x2, sp
bl p521_jscalarmul_alt_sub_p521
add x0, sp, #0xd8
add x1, sp, #0xd8
add x2, sp, #0x1f8
bl p521_jscalarmul_alt_mul_p521
add x0, sp, #0x168
add x1, sp, #0x168
add x2, x29, #0x90
bl p521_jscalarmul_alt_mul_p521
add x0, sp, #0x120
add x1, sp, #0x48
add x2, sp, #0x120
bl p521_jscalarmul_alt_mul_p521
add x0, sp, #0x120
add x1, sp, #0x120
add x2, sp, #0xd8
bl p521_jscalarmul_alt_sub_p521
ldp x0, x1, [x28, #144]
ldp x2, x3, [x28, #160]
ldp x4, x5, [x28, #176]
ldp x6, x7, [x28, #192]
ldr x8, [x28, #208]
orr x20, x0, x1
orr x21, x2, x3
orr x22, x4, x5
orr x23, x6, x7
orr x20, x20, x21
orr x22, x22, x23
orr x20, x20, x8
orr x20, x20, x22
cmp x20, xzr
cset x20, ne
ldp x10, x11, [x29, #144]
ldp x12, x13, [x29, #160]
ldp x14, x15, [x29, #176]
ldp x16, x17, [x29, #192]
ldr x19, [x29, #208]
orr x21, x10, x11
orr x22, x12, x13
orr x23, x14, x15
orr x24, x16, x17
orr x21, x21, x22
orr x23, x23, x24
orr x21, x21, x19
orr x21, x21, x23
csel x0, x0, x10, ne
csel x1, x1, x11, ne
csel x2, x2, x12, ne
csel x3, x3, x13, ne
csel x4, x4, x14, ne
csel x5, x5, x15, ne
csel x6, x6, x16, ne
csel x7, x7, x17, ne
csel x8, x8, x19, ne
cmp x21, xzr
cset x21, ne
cmp x21, x20
ldp x10, x11, [sp, #360]
ldp x12, x13, [sp, #376]
ldp x14, x15, [sp, #392]
ldp x16, x17, [sp, #408]
ldr x19, [sp, #424]
csel x0, x0, x10, ne
csel x1, x1, x11, ne
csel x2, x2, x12, ne
csel x3, x3, x13, ne
csel x4, x4, x14, ne
csel x5, x5, x15, ne
csel x6, x6, x16, ne
csel x7, x7, x17, ne
csel x8, x8, x19, ne
stp x0, x1, [sp, #360]
stp x2, x3, [sp, #376]
stp x4, x5, [sp, #392]
stp x6, x7, [sp, #408]
str x8, [sp, #424]
ldp x20, x21, [x28]
ldp x0, x1, [sp]
csel x0, x20, x0, cc
csel x1, x21, x1, cc
ldp x20, x21, [x29]
csel x0, x20, x0, hi
csel x1, x21, x1, hi
ldp x20, x21, [x28, #16]
ldp x2, x3, [sp, #16]
csel x2, x20, x2, cc
csel x3, x21, x3, cc
ldp x20, x21, [x29, #16]
csel x2, x20, x2, hi
csel x3, x21, x3, hi
ldp x20, x21, [x28, #32]
ldp x4, x5, [sp, #32]
csel x4, x20, x4, cc
csel x5, x21, x5, cc
ldp x20, x21, [x29, #32]
csel x4, x20, x4, hi
csel x5, x21, x5, hi
ldp x20, x21, [x28, #48]
ldp x6, x7, [sp, #48]
csel x6, x20, x6, cc
csel x7, x21, x7, cc
ldp x20, x21, [x29, #48]
csel x6, x20, x6, hi
csel x7, x21, x7, hi
ldr x20, [x28, #64]
ldr x8, [sp, #64]
csel x8, x20, x8, cc
ldr x21, [x29, #64]
csel x8, x21, x8, hi
ldp x20, x21, [x28, #72]
ldp x10, x11, [sp, #288]
csel x10, x20, x10, cc
csel x11, x21, x11, cc
ldp x20, x21, [x29, #72]
csel x10, x20, x10, hi
csel x11, x21, x11, hi
ldp x20, x21, [x28, #88]
ldp x12, x13, [sp, #304]
csel x12, x20, x12, cc
csel x13, x21, x13, cc
ldp x20, x21, [x29, #88]
csel x12, x20, x12, hi
csel x13, x21, x13, hi
ldp x20, x21, [x28, #104]
ldp x14, x15, [sp, #320]
csel x14, x20, x14, cc
csel x15, x21, x15, cc
ldp x20, x21, [x29, #104]
csel x14, x20, x14, hi
csel x15, x21, x15, hi
ldp x20, x21, [x28, #120]
ldp x16, x17, [sp, #336]
csel x16, x20, x16, cc
csel x17, x21, x17, cc
ldp x20, x21, [x29, #120]
csel x16, x20, x16, hi
csel x17, x21, x17, hi
ldr x20, [x28, #136]
ldr x19, [sp, #352]
csel x19, x20, x19, cc
ldr x21, [x29, #136]
csel x19, x21, x19, hi
stp x0, x1, [x27]
stp x2, x3, [x27, #16]
stp x4, x5, [x27, #32]
stp x6, x7, [x27, #48]
str x8, [x27, #64]
ldp x0, x1, [sp, #360]
ldp x2, x3, [sp, #376]
ldp x4, x5, [sp, #392]
ldp x6, x7, [sp, #408]
ldr x8, [sp, #424]
stp x10, x11, [x27, #72]
stp x12, x13, [x27, #88]
stp x14, x15, [x27, #104]
stp x16, x17, [x27, #120]
str x19, [x27, #136]
stp x0, x1, [x27, #144]
stp x2, x3, [x27, #160]
stp x4, x5, [x27, #176]
stp x6, x7, [x27, #192]
str x8, [x27, #208]
add sp, sp, #0x240
ldp x29, x30, [sp], #16
ldp x27, x28, [sp], #16
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
p521_jscalarmul_alt_jdouble:
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x29, x30, [sp, #-16]!
sub sp, sp, #0x200
mov x27, x0
mov x28, x1
mov x0, sp
add x1, x28, #0x90
bl p521_jscalarmul_alt_sqr_p521
add x0, sp, #0x48
add x1, x28, #0x48
bl p521_jscalarmul_alt_sqr_p521
ldp x5, x6, [x28]
ldp x4, x3, [sp]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x28, #16]
ldp x4, x3, [sp, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x28, #32]
ldp x4, x3, [sp, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
ldp x11, x12, [x28, #48]
ldp x4, x3, [sp, #48]
sbcs x11, x11, x4
sbcs x12, x12, x3
ldr x13, [x28, #64]
ldr x4, [sp, #64]
sbcs x13, x13, x4
sbcs x5, x5, xzr
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbcs x13, x13, xzr
and x13, x13, #0x1ff
stp x5, x6, [sp, #216]
stp x7, x8, [sp, #232]
stp x9, x10, [sp, #248]
stp x11, x12, [sp, #264]
str x13, [sp, #280]
cmp xzr, xzr
ldp x5, x6, [x28]
ldp x4, x3, [sp]
adcs x5, x5, x4
adcs x6, x6, x3
ldp x7, x8, [x28, #16]
ldp x4, x3, [sp, #16]
adcs x7, x7, x4
adcs x8, x8, x3
ldp x9, x10, [x28, #32]
ldp x4, x3, [sp, #32]
adcs x9, x9, x4
adcs x10, x10, x3
ldp x11, x12, [x28, #48]
ldp x4, x3, [sp, #48]
adcs x11, x11, x4
adcs x12, x12, x3
ldr x13, [x28, #64]
ldr x4, [sp, #64]
adc x13, x13, x4
subs x4, x13, #0x200
csetm x4, cs
sbcs x5, x5, xzr
and x4, x4, #0x200
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbc x13, x13, x4
stp x5, x6, [sp, #144]
stp x7, x8, [sp, #160]
stp x9, x10, [sp, #176]
stp x11, x12, [sp, #192]
str x13, [sp, #208]
add x0, sp, #0xd8
add x1, sp, #0x90
add x2, sp, #0xd8
bl p521_jscalarmul_alt_mul_p521
cmp xzr, xzr
ldp x5, x6, [x28, #72]
ldp x4, x3, [x28, #144]
adcs x5, x5, x4
adcs x6, x6, x3
ldp x7, x8, [x28, #88]
ldp x4, x3, [x28, #160]
adcs x7, x7, x4
adcs x8, x8, x3
ldp x9, x10, [x28, #104]
ldp x4, x3, [x28, #176]
adcs x9, x9, x4
adcs x10, x10, x3
ldp x11, x12, [x28, #120]
ldp x4, x3, [x28, #192]
adcs x11, x11, x4
adcs x12, x12, x3
ldr x13, [x28, #136]
ldr x4, [x28, #208]
adc x13, x13, x4
subs x4, x13, #0x200
csetm x4, cs
sbcs x5, x5, xzr
and x4, x4, #0x200
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbc x13, x13, x4
stp x5, x6, [sp, #144]
stp x7, x8, [sp, #160]
stp x9, x10, [sp, #176]
stp x11, x12, [sp, #192]
str x13, [sp, #208]
add x0, sp, #0x120
add x1, x28, #0x0
add x2, sp, #0x48
bl p521_jscalarmul_alt_mul_p521
add x0, sp, #0x168
add x1, sp, #0xd8
bl p521_jscalarmul_alt_sqr_p521
add x0, sp, #0x90
add x1, sp, #0x90
bl p521_jscalarmul_alt_sqr_p521
ldp x6, x7, [sp, #288]
mov x1, #0xc
mul x3, x1, x6
mul x4, x1, x7
umulh x6, x1, x6
adds x4, x4, x6
umulh x7, x1, x7
ldp x8, x9, [sp, #304]
mul x5, x1, x8
mul x6, x1, x9
umulh x8, x1, x8
adcs x5, x5, x7
umulh x9, x1, x9
adcs x6, x6, x8
ldp x10, x11, [sp, #320]
mul x7, x1, x10
mul x8, x1, x11
umulh x10, x1, x10
adcs x7, x7, x9
umulh x11, x1, x11
adcs x8, x8, x10
ldp x12, x13, [sp, #336]
mul x9, x1, x12
mul x10, x1, x13
umulh x12, x1, x12
adcs x9, x9, x11
umulh x13, x1, x13
adcs x10, x10, x12
ldr x14, [sp, #352]
mul x11, x1, x14
adc x11, x11, x13
mov x1, #0x9
ldp x20, x21, [sp, #360]
mvn x20, x20
mul x0, x1, x20
umulh x20, x1, x20
adds x3, x3, x0
mvn x21, x21
mul x0, x1, x21
umulh x21, x1, x21
adcs x4, x4, x0
ldp x22, x23, [sp, #376]
mvn x22, x22
mul x0, x1, x22
umulh x22, x1, x22
adcs x5, x5, x0
mvn x23, x23
mul x0, x1, x23
umulh x23, x1, x23
adcs x6, x6, x0
ldp x17, x19, [sp, #392]
mvn x17, x17
mul x0, x1, x17
umulh x17, x1, x17
adcs x7, x7, x0
mvn x19, x19
mul x0, x1, x19
umulh x19, x1, x19
adcs x8, x8, x0
ldp x2, x16, [sp, #408]
mvn x2, x2
mul x0, x1, x2
umulh x2, x1, x2
adcs x9, x9, x0
mvn x16, x16
mul x0, x1, x16
umulh x16, x1, x16
adcs x10, x10, x0
ldr x0, [sp, #424]
eor x0, x0, #0x1ff
mul x0, x1, x0
adc x11, x11, x0
adds x4, x4, x20
adcs x5, x5, x21
and x15, x4, x5
adcs x6, x6, x22
and x15, x15, x6
adcs x7, x7, x23
and x15, x15, x7
adcs x8, x8, x17
and x15, x15, x8
adcs x9, x9, x19
and x15, x15, x9
adcs x10, x10, x2
and x15, x15, x10
adc x11, x11, x16
lsr x12, x11, #9
orr x11, x11, #0xfffffffffffffe00
cmp xzr, xzr
adcs xzr, x3, x12
adcs xzr, x15, xzr
adcs xzr, x11, xzr
adcs x3, x3, x12
adcs x4, x4, xzr
adcs x5, x5, xzr
adcs x6, x6, xzr
adcs x7, x7, xzr
adcs x8, x8, xzr
adcs x9, x9, xzr
adcs x10, x10, xzr
adc x11, x11, xzr
and x11, x11, #0x1ff
stp x3, x4, [sp, #360]
stp x5, x6, [sp, #376]
stp x7, x8, [sp, #392]
stp x9, x10, [sp, #408]
str x11, [sp, #424]
ldp x5, x6, [sp, #144]
ldp x4, x3, [sp]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #160]
ldp x4, x3, [sp, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #176]
ldp x4, x3, [sp, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
ldp x11, x12, [sp, #192]
ldp x4, x3, [sp, #48]
sbcs x11, x11, x4
sbcs x12, x12, x3
ldr x13, [sp, #208]
ldr x4, [sp, #64]
sbcs x13, x13, x4
sbcs x5, x5, xzr
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbcs x13, x13, xzr
and x13, x13, #0x1ff
stp x5, x6, [sp, #144]
stp x7, x8, [sp, #160]
stp x9, x10, [sp, #176]
stp x11, x12, [sp, #192]
str x13, [sp, #208]
mov x0, sp
add x1, sp, #0x48
bl p521_jscalarmul_alt_sqr_p521
add x0, sp, #0xd8
add x1, sp, #0x168
add x2, sp, #0xd8
bl p521_jscalarmul_alt_mul_p521
ldp x5, x6, [sp, #144]
ldp x4, x3, [sp, #72]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #160]
ldp x4, x3, [sp, #88]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #176]
ldp x4, x3, [sp, #104]
sbcs x9, x9, x4
sbcs x10, x10, x3
ldp x11, x12, [sp, #192]
ldp x4, x3, [sp, #120]
sbcs x11, x11, x4
sbcs x12, x12, x3
ldr x13, [sp, #208]
ldr x4, [sp, #136]
sbcs x13, x13, x4
sbcs x5, x5, xzr
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbcs x13, x13, xzr
and x13, x13, #0x1ff
stp x5, x6, [x27, #144]
stp x7, x8, [x27, #160]
stp x9, x10, [x27, #176]
stp x11, x12, [x27, #192]
str x13, [x27, #208]
ldp x6, x7, [sp, #288]
lsl x3, x6, #2
extr x4, x7, x6, #62
ldp x8, x9, [sp, #304]
extr x5, x8, x7, #62
extr x6, x9, x8, #62
ldp x10, x11, [sp, #320]
extr x7, x10, x9, #62
extr x8, x11, x10, #62
ldp x12, x13, [sp, #336]
extr x9, x12, x11, #62
extr x10, x13, x12, #62
ldr x14, [sp, #352]
extr x11, x14, x13, #62
ldp x0, x1, [sp, #360]
mvn x0, x0
adds x3, x3, x0
sbcs x4, x4, x1
ldp x0, x1, [sp, #376]
sbcs x5, x5, x0
and x15, x4, x5
sbcs x6, x6, x1
and x15, x15, x6
ldp x0, x1, [sp, #392]
sbcs x7, x7, x0
and x15, x15, x7
sbcs x8, x8, x1
and x15, x15, x8
ldp x0, x1, [sp, #408]
sbcs x9, x9, x0
and x15, x15, x9
sbcs x10, x10, x1
and x15, x15, x10
ldr x0, [sp, #424]
eor x0, x0, #0x1ff
adc x11, x11, x0
lsr x12, x11, #9
orr x11, x11, #0xfffffffffffffe00
cmp xzr, xzr
adcs xzr, x3, x12
adcs xzr, x15, xzr
adcs xzr, x11, xzr
adcs x3, x3, x12
adcs x4, x4, xzr
adcs x5, x5, xzr
adcs x6, x6, xzr
adcs x7, x7, xzr
adcs x8, x8, xzr
adcs x9, x9, xzr
adcs x10, x10, xzr
adc x11, x11, xzr
and x11, x11, #0x1ff
stp x3, x4, [x27]
stp x5, x6, [x27, #16]
stp x7, x8, [x27, #32]
stp x9, x10, [x27, #48]
str x11, [x27, #64]
ldp x6, x7, [sp, #216]
lsl x3, x6, #1
adds x3, x3, x6
extr x4, x7, x6, #63
adcs x4, x4, x7
ldp x8, x9, [sp, #232]
extr x5, x8, x7, #63
adcs x5, x5, x8
extr x6, x9, x8, #63
adcs x6, x6, x9
ldp x10, x11, [sp, #248]
extr x7, x10, x9, #63
adcs x7, x7, x10
extr x8, x11, x10, #63
adcs x8, x8, x11
ldp x12, x13, [sp, #264]
extr x9, x12, x11, #63
adcs x9, x9, x12
extr x10, x13, x12, #63
adcs x10, x10, x13
ldr x14, [sp, #280]
extr x11, x14, x13, #63
adc x11, x11, x14
ldp x20, x21, [sp]
mvn x20, x20
lsl x0, x20, #3
adds x3, x3, x0
mvn x21, x21
extr x0, x21, x20, #61
adcs x4, x4, x0
ldp x22, x23, [sp, #16]
mvn x22, x22
extr x0, x22, x21, #61
adcs x5, x5, x0
and x15, x4, x5
mvn x23, x23
extr x0, x23, x22, #61
adcs x6, x6, x0
and x15, x15, x6
ldp x20, x21, [sp, #32]
mvn x20, x20
extr x0, x20, x23, #61
adcs x7, x7, x0
and x15, x15, x7
mvn x21, x21
extr x0, x21, x20, #61
adcs x8, x8, x0
and x15, x15, x8
ldp x22, x23, [sp, #48]
mvn x22, x22
extr x0, x22, x21, #61
adcs x9, x9, x0
and x15, x15, x9
mvn x23, x23
extr x0, x23, x22, #61
adcs x10, x10, x0
and x15, x15, x10
ldr x0, [sp, #64]
eor x0, x0, #0x1ff
extr x0, x0, x23, #61
adc x11, x11, x0
lsr x12, x11, #9
orr x11, x11, #0xfffffffffffffe00
cmp xzr, xzr
adcs xzr, x3, x12
adcs xzr, x15, xzr
adcs xzr, x11, xzr
adcs x3, x3, x12
adcs x4, x4, xzr
adcs x5, x5, xzr
adcs x6, x6, xzr
adcs x7, x7, xzr
adcs x8, x8, xzr
adcs x9, x9, xzr
adcs x10, x10, xzr
adc x11, x11, xzr
and x11, x11, #0x1ff
stp x3, x4, [x27, #72]
stp x5, x6, [x27, #88]
stp x7, x8, [x27, #104]
stp x9, x10, [x27, #120]
str x11, [x27, #136]
add sp, sp, #0x200
ldp x29, x30, [sp], #16
ldp x27, x28, [sp], #16
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
p521_jscalarmul_alt_mul_p521:
ldp x3, x4, [x1]
ldp x5, x6, [x2]
mul x15, x3, x5
umulh x16, x3, x5
mul x14, x3, x6
umulh x17, x3, x6
adds x16, x16, x14
ldp x7, x8, [x2, #16]
mul x14, x3, x7
umulh x19, x3, x7
adcs x17, x17, x14
mul x14, x3, x8
umulh x20, x3, x8
adcs x19, x19, x14
ldp x9, x10, [x2, #32]
mul x14, x3, x9
umulh x21, x3, x9
adcs x20, x20, x14
mul x14, x3, x10
umulh x22, x3, x10
adcs x21, x21, x14
ldp x11, x12, [x2, #48]
mul x14, x3, x11
umulh x23, x3, x11
adcs x22, x22, x14
ldr x13, [x2, #64]
mul x14, x3, x12
umulh x24, x3, x12
adcs x23, x23, x14
mul x14, x3, x13
umulh x25, x3, x13
adcs x24, x24, x14
adc x25, x25, xzr
mul x14, x4, x5
adds x16, x16, x14
mul x14, x4, x6
adcs x17, x17, x14
mul x14, x4, x7
adcs x19, x19, x14
mul x14, x4, x8
adcs x20, x20, x14
mul x14, x4, x9
adcs x21, x21, x14
mul x14, x4, x10
adcs x22, x22, x14
mul x14, x4, x11
adcs x23, x23, x14
mul x14, x4, x12
adcs x24, x24, x14
mul x14, x4, x13
adcs x25, x25, x14
cset x26, cs
umulh x14, x4, x5
adds x17, x17, x14
umulh x14, x4, x6
adcs x19, x19, x14
umulh x14, x4, x7
adcs x20, x20, x14
umulh x14, x4, x8
adcs x21, x21, x14
umulh x14, x4, x9
adcs x22, x22, x14
umulh x14, x4, x10
adcs x23, x23, x14
umulh x14, x4, x11
adcs x24, x24, x14
umulh x14, x4, x12
adcs x25, x25, x14
umulh x14, x4, x13
adc x26, x26, x14
stp x15, x16, [sp, #432]
ldp x3, x4, [x1, #16]
mul x14, x3, x5
adds x17, x17, x14
mul x14, x3, x6
adcs x19, x19, x14
mul x14, x3, x7
adcs x20, x20, x14
mul x14, x3, x8
adcs x21, x21, x14
mul x14, x3, x9
adcs x22, x22, x14
mul x14, x3, x10
adcs x23, x23, x14
mul x14, x3, x11
adcs x24, x24, x14
mul x14, x3, x12
adcs x25, x25, x14
mul x14, x3, x13
adcs x26, x26, x14
cset x15, cs
umulh x14, x3, x5
adds x19, x19, x14
umulh x14, x3, x6
adcs x20, x20, x14
umulh x14, x3, x7
adcs x21, x21, x14
umulh x14, x3, x8
adcs x22, x22, x14
umulh x14, x3, x9
adcs x23, x23, x14
umulh x14, x3, x10
adcs x24, x24, x14
umulh x14, x3, x11
adcs x25, x25, x14
umulh x14, x3, x12
adcs x26, x26, x14
umulh x14, x3, x13
adc x15, x15, x14
mul x14, x4, x5
adds x19, x19, x14
mul x14, x4, x6
adcs x20, x20, x14
mul x14, x4, x7
adcs x21, x21, x14
mul x14, x4, x8
adcs x22, x22, x14
mul x14, x4, x9
adcs x23, x23, x14
mul x14, x4, x10
adcs x24, x24, x14
mul x14, x4, x11
adcs x25, x25, x14
mul x14, x4, x12
adcs x26, x26, x14
mul x14, x4, x13
adcs x15, x15, x14
cset x16, cs
umulh x14, x4, x5
adds x20, x20, x14
umulh x14, x4, x6
adcs x21, x21, x14
umulh x14, x4, x7
adcs x22, x22, x14
umulh x14, x4, x8
adcs x23, x23, x14
umulh x14, x4, x9
adcs x24, x24, x14
umulh x14, x4, x10
adcs x25, x25, x14
umulh x14, x4, x11
adcs x26, x26, x14
umulh x14, x4, x12
adcs x15, x15, x14
umulh x14, x4, x13
adc x16, x16, x14
stp x17, x19, [sp, #448]
ldp x3, x4, [x1, #32]
mul x14, x3, x5
adds x20, x20, x14
mul x14, x3, x6
adcs x21, x21, x14
mul x14, x3, x7
adcs x22, x22, x14
mul x14, x3, x8
adcs x23, x23, x14
mul x14, x3, x9
adcs x24, x24, x14
mul x14, x3, x10
adcs x25, x25, x14
mul x14, x3, x11
adcs x26, x26, x14
mul x14, x3, x12
adcs x15, x15, x14
mul x14, x3, x13
adcs x16, x16, x14
cset x17, cs
umulh x14, x3, x5
adds x21, x21, x14
umulh x14, x3, x6
adcs x22, x22, x14
umulh x14, x3, x7
adcs x23, x23, x14
umulh x14, x3, x8
adcs x24, x24, x14
umulh x14, x3, x9
adcs x25, x25, x14
umulh x14, x3, x10
adcs x26, x26, x14
umulh x14, x3, x11
adcs x15, x15, x14
umulh x14, x3, x12
adcs x16, x16, x14
umulh x14, x3, x13
adc x17, x17, x14
mul x14, x4, x5
adds x21, x21, x14
mul x14, x4, x6
adcs x22, x22, x14
mul x14, x4, x7
adcs x23, x23, x14
mul x14, x4, x8
adcs x24, x24, x14
mul x14, x4, x9
adcs x25, x25, x14
mul x14, x4, x10
adcs x26, x26, x14
mul x14, x4, x11
adcs x15, x15, x14
mul x14, x4, x12
adcs x16, x16, x14
mul x14, x4, x13
adcs x17, x17, x14
cset x19, cs
umulh x14, x4, x5
adds x22, x22, x14
umulh x14, x4, x6
adcs x23, x23, x14
umulh x14, x4, x7
adcs x24, x24, x14
umulh x14, x4, x8
adcs x25, x25, x14
umulh x14, x4, x9
adcs x26, x26, x14
umulh x14, x4, x10
adcs x15, x15, x14
umulh x14, x4, x11
adcs x16, x16, x14
umulh x14, x4, x12
adcs x17, x17, x14
umulh x14, x4, x13
adc x19, x19, x14
stp x20, x21, [sp, #464]
ldp x3, x4, [x1, #48]
mul x14, x3, x5
adds x22, x22, x14
mul x14, x3, x6
adcs x23, x23, x14
mul x14, x3, x7
adcs x24, x24, x14
mul x14, x3, x8
adcs x25, x25, x14
mul x14, x3, x9
adcs x26, x26, x14
mul x14, x3, x10
adcs x15, x15, x14
mul x14, x3, x11
adcs x16, x16, x14
mul x14, x3, x12
adcs x17, x17, x14
mul x14, x3, x13
adcs x19, x19, x14
cset x20, cs
umulh x14, x3, x5
adds x23, x23, x14
umulh x14, x3, x6
adcs x24, x24, x14
umulh x14, x3, x7
adcs x25, x25, x14
umulh x14, x3, x8
adcs x26, x26, x14
umulh x14, x3, x9
adcs x15, x15, x14
umulh x14, x3, x10
adcs x16, x16, x14
umulh x14, x3, x11
adcs x17, x17, x14
umulh x14, x3, x12
adcs x19, x19, x14
umulh x14, x3, x13
adc x20, x20, x14
mul x14, x4, x5
adds x23, x23, x14
mul x14, x4, x6
adcs x24, x24, x14
mul x14, x4, x7
adcs x25, x25, x14
mul x14, x4, x8
adcs x26, x26, x14
mul x14, x4, x9
adcs x15, x15, x14
mul x14, x4, x10
adcs x16, x16, x14
mul x14, x4, x11
adcs x17, x17, x14
mul x14, x4, x12
adcs x19, x19, x14
mul x14, x4, x13
adcs x20, x20, x14
cset x21, cs
umulh x14, x4, x5
adds x24, x24, x14
umulh x14, x4, x6
adcs x25, x25, x14
umulh x14, x4, x7
adcs x26, x26, x14
umulh x14, x4, x8
adcs x15, x15, x14
umulh x14, x4, x9
adcs x16, x16, x14
umulh x14, x4, x10
adcs x17, x17, x14
umulh x14, x4, x11
adcs x19, x19, x14
umulh x14, x4, x12
adcs x20, x20, x14
umulh x14, x4, x13
adc x21, x21, x14
stp x22, x23, [sp, #480]
ldr x3, [x1, #64]
mul x14, x3, x5
adds x24, x24, x14
mul x14, x3, x6
adcs x25, x25, x14
mul x14, x3, x7
adcs x26, x26, x14
mul x14, x3, x8
adcs x15, x15, x14
mul x14, x3, x9
adcs x16, x16, x14
mul x14, x3, x10
adcs x17, x17, x14
mul x14, x3, x11
adcs x19, x19, x14
mul x14, x3, x12
adcs x20, x20, x14
mul x14, x3, x13
adc x21, x21, x14
umulh x14, x3, x5
adds x25, x25, x14
umulh x14, x3, x6
adcs x26, x26, x14
umulh x14, x3, x7
adcs x15, x15, x14
umulh x14, x3, x8
adcs x16, x16, x14
umulh x14, x3, x9
adcs x17, x17, x14
umulh x14, x3, x10
adcs x19, x19, x14
umulh x14, x3, x11
adcs x20, x20, x14
umulh x14, x3, x12
adc x21, x21, x14
cmp xzr, xzr
ldp x5, x6, [sp, #432]
extr x14, x25, x24, #9
adcs x5, x5, x14
extr x14, x26, x25, #9
adcs x6, x6, x14
ldp x7, x8, [sp, #448]
extr x14, x15, x26, #9
adcs x7, x7, x14
extr x14, x16, x15, #9
adcs x8, x8, x14
ldp x9, x10, [sp, #464]
extr x14, x17, x16, #9
adcs x9, x9, x14
extr x14, x19, x17, #9
adcs x10, x10, x14
ldp x11, x12, [sp, #480]
extr x14, x20, x19, #9
adcs x11, x11, x14
extr x14, x21, x20, #9
adcs x12, x12, x14
orr x13, x24, #0xfffffffffffffe00
lsr x14, x21, #9
adcs x13, x13, x14
sbcs x5, x5, xzr
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbc x13, x13, xzr
and x13, x13, #0x1ff
stp x5, x6, [x0]
stp x7, x8, [x0, #16]
stp x9, x10, [x0, #32]
stp x11, x12, [x0, #48]
str x13, [x0, #64]
ret
p521_jscalarmul_alt_sqr_p521:
ldp x2, x3, [x1]
mul x11, x2, x3
umulh x12, x2, x3
ldp x4, x5, [x1, #16]
mul x10, x2, x4
umulh x13, x2, x4
adds x12, x12, x10
ldp x6, x7, [x1, #32]
mul x10, x2, x5
umulh x14, x2, x5
adcs x13, x13, x10
ldp x8, x9, [x1, #48]
mul x10, x2, x6
umulh x15, x2, x6
adcs x14, x14, x10
mul x10, x2, x7
umulh x16, x2, x7
adcs x15, x15, x10
mul x10, x2, x8
umulh x17, x2, x8
adcs x16, x16, x10
mul x10, x2, x9
umulh x19, x2, x9
adcs x17, x17, x10
adc x19, x19, xzr
mul x10, x3, x4
adds x13, x13, x10
mul x10, x3, x5
adcs x14, x14, x10
mul x10, x3, x6
adcs x15, x15, x10
mul x10, x3, x7
adcs x16, x16, x10
mul x10, x3, x8
adcs x17, x17, x10
mul x10, x3, x9
adcs x19, x19, x10
cset x20, cs
umulh x10, x3, x4
adds x14, x14, x10
umulh x10, x3, x5
adcs x15, x15, x10
umulh x10, x3, x6
adcs x16, x16, x10
umulh x10, x3, x7
adcs x17, x17, x10
umulh x10, x3, x8
adcs x19, x19, x10
umulh x10, x3, x9
adc x20, x20, x10
mul x10, x6, x7
umulh x21, x6, x7
adds x20, x20, x10
adc x21, x21, xzr
mul x10, x4, x5
adds x15, x15, x10
mul x10, x4, x6
adcs x16, x16, x10
mul x10, x4, x7
adcs x17, x17, x10
mul x10, x4, x8
adcs x19, x19, x10
mul x10, x4, x9
adcs x20, x20, x10
mul x10, x6, x8
adcs x21, x21, x10
cset x22, cs
umulh x10, x4, x5
adds x16, x16, x10
umulh x10, x4, x6
adcs x17, x17, x10
umulh x10, x4, x7
adcs x19, x19, x10
umulh x10, x4, x8
adcs x20, x20, x10
umulh x10, x4, x9
adcs x21, x21, x10
umulh x10, x6, x8
adc x22, x22, x10
mul x10, x7, x8
umulh x23, x7, x8
adds x22, x22, x10
adc x23, x23, xzr
mul x10, x5, x6
adds x17, x17, x10
mul x10, x5, x7
adcs x19, x19, x10
mul x10, x5, x8
adcs x20, x20, x10
mul x10, x5, x9
adcs x21, x21, x10
mul x10, x6, x9
adcs x22, x22, x10
mul x10, x7, x9
adcs x23, x23, x10
cset x24, cs
umulh x10, x5, x6
adds x19, x19, x10
umulh x10, x5, x7
adcs x20, x20, x10
umulh x10, x5, x8
adcs x21, x21, x10
umulh x10, x5, x9
adcs x22, x22, x10
umulh x10, x6, x9
adcs x23, x23, x10
umulh x10, x7, x9
adc x24, x24, x10
mul x10, x8, x9
umulh x25, x8, x9
adds x24, x24, x10
adc x25, x25, xzr
adds x11, x11, x11
adcs x12, x12, x12
adcs x13, x13, x13
adcs x14, x14, x14
adcs x15, x15, x15
adcs x16, x16, x16
adcs x17, x17, x17
adcs x19, x19, x19
adcs x20, x20, x20
adcs x21, x21, x21
adcs x22, x22, x22
adcs x23, x23, x23
adcs x24, x24, x24
adcs x25, x25, x25
cset x26, cs
umulh x10, x2, x2
adds x11, x11, x10
mul x10, x3, x3
adcs x12, x12, x10
umulh x10, x3, x3
adcs x13, x13, x10
mul x10, x4, x4
adcs x14, x14, x10
umulh x10, x4, x4
adcs x15, x15, x10
mul x10, x5, x5
adcs x16, x16, x10
umulh x10, x5, x5
adcs x17, x17, x10
mul x10, x6, x6
adcs x19, x19, x10
umulh x10, x6, x6
adcs x20, x20, x10
mul x10, x7, x7
adcs x21, x21, x10
umulh x10, x7, x7
adcs x22, x22, x10
mul x10, x8, x8
adcs x23, x23, x10
umulh x10, x8, x8
adcs x24, x24, x10
mul x10, x9, x9
adcs x25, x25, x10
umulh x10, x9, x9
adc x26, x26, x10
ldr x1, [x1, #64]
add x1, x1, x1
mul x10, x1, x2
adds x19, x19, x10
umulh x10, x1, x2
adcs x20, x20, x10
mul x10, x1, x4
adcs x21, x21, x10
umulh x10, x1, x4
adcs x22, x22, x10
mul x10, x1, x6
adcs x23, x23, x10
umulh x10, x1, x6
adcs x24, x24, x10
mul x10, x1, x8
adcs x25, x25, x10
umulh x10, x1, x8
adcs x26, x26, x10
lsr x4, x1, #1
mul x4, x4, x4
adc x4, x4, xzr
mul x10, x1, x3
adds x20, x20, x10
umulh x10, x1, x3
adcs x21, x21, x10
mul x10, x1, x5
adcs x22, x22, x10
umulh x10, x1, x5
adcs x23, x23, x10
mul x10, x1, x7
adcs x24, x24, x10
umulh x10, x1, x7
adcs x25, x25, x10
mul x10, x1, x9
adcs x26, x26, x10
umulh x10, x1, x9
adc x4, x4, x10
mul x2, x2, x2
cmp xzr, xzr
extr x10, x20, x19, #9
adcs x2, x2, x10
extr x10, x21, x20, #9
adcs x11, x11, x10
extr x10, x22, x21, #9
adcs x12, x12, x10
extr x10, x23, x22, #9
adcs x13, x13, x10
extr x10, x24, x23, #9
adcs x14, x14, x10
extr x10, x25, x24, #9
adcs x15, x15, x10
extr x10, x26, x25, #9
adcs x16, x16, x10
extr x10, x4, x26, #9
adcs x17, x17, x10
orr x19, x19, #0xfffffffffffffe00
lsr x10, x4, #9
adcs x19, x19, x10
sbcs x2, x2, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbcs x14, x14, xzr
sbcs x15, x15, xzr
sbcs x16, x16, xzr
sbcs x17, x17, xzr
sbc x19, x19, xzr
and x19, x19, #0x1ff
stp x2, x11, [x0]
stp x12, x13, [x0, #16]
stp x14, x15, [x0, #32]
stp x16, x17, [x0, #48]
str x19, [x0, #64]
ret
p521_jscalarmul_alt_sub_p521:
ldp x5, x6, [x1]
ldp x4, x3, [x2]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x1, #16]
ldp x4, x3, [x2, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x1, #32]
ldp x4, x3, [x2, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
ldp x11, x12, [x1, #48]
ldp x4, x3, [x2, #48]
sbcs x11, x11, x4
sbcs x12, x12, x3
ldr x13, [x1, #64]
ldr x4, [x2, #64]
sbcs x13, x13, x4
sbcs x5, x5, xzr
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbcs x13, x13, xzr
and x13, x13, #0x1ff
stp x5, x6, [x0]
stp x7, x8, [x0, #16]
stp x9, x10, [x0, #32]
stp x11, x12, [x0, #48]
str x13, [x0, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 18,396
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_montmul_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery multiply, z := (x * y / 2^576) mod p_521
// Inputs x[9], y[9]; output z[9]
//
// extern void bignum_montmul_p521
// (uint64_t z[static 9], uint64_t x[static 9], uint64_t y[static 9]);
//
// Does z := (x * y / 2^576) mod p_521, assuming x < p_521, y < p_521. This
// means the Montgomery base is the "native size" 2^{9*64} = 2^576; since
// p_521 is a Mersenne prime the basic modular multiplication bignum_mul_p521
// can be considered a Montgomery operation to base 2^521.
//
// Standard ARM ABI: X0 = z, X1 = x, X2 = y
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p521)
.text
.balign 4
// ---------------------------------------------------------------------------
// Macro computing [c,b,a] := [b,a] + (x - y) * (w - z), adding with carry
// to the [b,a] components but leaving CF aligned with the c term, which is
// a sign bitmask for (x - y) * (w - z). Continued add-with-carry operations
// with [c,...,c] will continue the carry chain correctly starting from
// the c position if desired to add to a longer term of the form [...,b,a].
//
// c,h,l,t should all be different and t,h should not overlap w,z.
// ---------------------------------------------------------------------------
#define muldiffnadd(b,a,x,y,w,z) \
subs t, x, y; \
cneg t, t, cc; \
csetm c, cc; \
subs h, w, z; \
cneg h, h, cc; \
mul l, t, h; \
umulh h, t, h; \
cinv c, c, cc; \
adds xzr, c, #1; \
eor l, l, c; \
adcs a, a, l; \
eor h, h, c; \
adcs b, b, h
#define z x0
#define x x1
#define y x2
#define a0 x3
#define a1 x4
#define a2 x5
#define a3 x6
#define b0 x7
#define b1 x8
#define b2 x9
#define b3 x10
#define s0 x11
#define s1 x12
#define s2 x13
#define s3 x14
#define s4 x15
#define s5 x16
#define s6 x17
#define s7 x19
#define s8 x20
#define c x21
#define h x22
#define l x23
#define t x24
#define s x25
#define u x26
// ---------------------------------------------------------------------------
// Core 4x4->8 ADK multiplication macro
// Does [s7,s6,s5,s4,s3,s2,s1,s0] = [a3,a2,a1,a0] * [b3,b2,b1,b0]
// ---------------------------------------------------------------------------
#define mul4 \
/* First accumulate all the "simple" products as [s7,s6,s5,s4,s0] */ \
\
mul s0, a0, b0; \
mul s4, a1, b1; \
mul s5, a2, b2; \
mul s6, a3, b3; \
\
umulh s7, a0, b0; \
adds s4, s4, s7; \
umulh s7, a1, b1; \
adcs s5, s5, s7; \
umulh s7, a2, b2; \
adcs s6, s6, s7; \
umulh s7, a3, b3; \
adc s7, s7, xzr; \
\
/* Multiply by B + 1 to get [s7;s6;s5;s4;s1;s0] */ \
\
adds s1, s4, s0; \
adcs s4, s5, s4; \
adcs s5, s6, s5; \
adcs s6, s7, s6; \
adc s7, xzr, s7; \
\
/* Multiply by B^2 + 1 to get [s7;s6;s5;s4;s3;s2;s1;s0] */ \
\
adds s2, s4, s0; \
adcs s3, s5, s1; \
adcs s4, s6, s4; \
adcs s5, s7, s5; \
adcs s6, xzr, s6; \
adc s7, xzr, s7; \
\
/* Now add in all the "complicated" terms. */ \
\
muldiffnadd(s6,s5, a2,a3, b3,b2); \
adc s7, s7, c; \
\
muldiffnadd(s2,s1, a0,a1, b1,b0); \
adcs s3, s3, c; \
adcs s4, s4, c; \
adcs s5, s5, c; \
adcs s6, s6, c; \
adc s7, s7, c; \
\
muldiffnadd(s5,s4, a1,a3, b3,b1); \
adcs s6, s6, c; \
adc s7, s7, c; \
\
muldiffnadd(s3,s2, a0,a2, b2,b0); \
adcs s4, s4, c; \
adcs s5, s5, c; \
adcs s6, s6, c; \
adc s7, s7, c; \
\
muldiffnadd(s4,s3, a0,a3, b3,b0); \
adcs s5, s5, c; \
adcs s6, s6, c; \
adc s7, s7, c; \
muldiffnadd(s4,s3, a1,a2, b2,b1); \
adcs s5, s5, c; \
adcs s6, s6, c; \
adc s7, s7, c \
S2N_BN_SYMBOL(bignum_montmul_p521):
// Save registers and make space for the temporary buffer
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
sub sp, sp, #80
// Load 4-digit low parts and multiply them to get L
ldp a0, a1, [x]
ldp a2, a3, [x, #16]
ldp b0, b1, [y]
ldp b2, b3, [y, #16]
mul4
// Shift right 256 bits modulo p_521 and stash in temp buffer
lsl c, s0, #9
extr s0, s1, s0, #55
extr s1, s2, s1, #55
extr s2, s3, s2, #55
lsr s3, s3, #55
stp s4, s5, [sp]
stp s6, s7, [sp, #16]
stp c, s0, [sp, #32]
stp s1, s2, [sp, #48]
str s3, [sp, #64]
// Load 4-digit low parts and multiply them to get H
ldp a0, a1, [x, #32]
ldp a2, a3, [x, #48]
ldp b0, b1, [y, #32]
ldp b2, b3, [y, #48]
mul4
// Add to the existing temporary buffer and re-stash.
// This gives a result HL congruent to (2^256 * H + L) / 2^256 modulo p_521
ldp l, h, [sp]
adds s0, s0, l
adcs s1, s1, h
stp s0, s1, [sp]
ldp l, h, [sp, #16]
adcs s2, s2, l
adcs s3, s3, h
stp s2, s3, [sp, #16]
ldp l, h, [sp, #32]
adcs s4, s4, l
adcs s5, s5, h
stp s4, s5, [sp, #32]
ldp l, h, [sp, #48]
adcs s6, s6, l
adcs s7, s7, h
stp s6, s7, [sp, #48]
ldr c, [sp, #64]
adc c, c, xzr
str c, [sp, #64]
// Compute t,[a3,a2,a1,a0] = x_hi - x_lo
// and s,[b3,b2,b1,b0] = y_lo - y_hi
// sign-magnitude differences, then XOR overall sign bitmask into s
ldp l, h, [x]
subs a0, a0, l
sbcs a1, a1, h
ldp l, h, [x, #16]
sbcs a2, a2, l
sbcs a3, a3, h
csetm t, cc
ldp l, h, [y]
subs b0, l, b0
sbcs b1, h, b1
ldp l, h, [y, #16]
sbcs b2, l, b2
sbcs b3, h, b3
csetm s, cc
eor a0, a0, t
subs a0, a0, t
eor a1, a1, t
sbcs a1, a1, t
eor a2, a2, t
sbcs a2, a2, t
eor a3, a3, t
sbc a3, a3, t
eor b0, b0, s
subs b0, b0, s
eor b1, b1, s
sbcs b1, b1, s
eor b2, b2, s
sbcs b2, b2, s
eor b3, b3, s
sbc b3, b3, s
eor s, s, t
// Now do yet a third 4x4 multiply to get mid-term product M
mul4
// We now want, at the 256 position, 2^256 * HL + HL + (-1)^s * M
// To keep things positive we use M' = p_521 - M in place of -M,
// and this notion of negation just amounts to complementation in 521 bits.
// Fold in the re-addition of the appropriately scaled lowest 4 words
// The initial result is [s8; b3;b2;b1;b0; s7;s6;s5;s4;s3;s2;s1;s0]
// Rebase it as a 9-word value at the 512 bit position using
// [s8; b3;b2;b1;b0; s7;s6;s5;s4;s3;s2;s1;s0] ==
// [s8; b3;b2;b1;b0; s7;s6;s5;s4] + 2^265 * [s3;s2;s1;s0] =
// ([s8; b3;b2;b1;b0] + 2^9 * [s3;s2;s1;s0]); s7;s6;s5;s4]
//
// Accumulate as [s8; b3;b2;b1;b0; s7;s6;s5;s4] but leave out an additional
// small c (s8 + suspended carry) to add at the 256 position here (512
// overall). This can be added in the next block (to b0 = sum4).
ldp a0, a1, [sp]
ldp a2, a3, [sp, #16]
eor s0, s0, s
adds s0, s0, a0
eor s1, s1, s
adcs s1, s1, a1
eor s2, s2, s
adcs s2, s2, a2
eor s3, s3, s
adcs s3, s3, a3
eor s4, s4, s
ldp b0, b1, [sp, #32]
ldp b2, b3, [sp, #48]
ldr s8, [sp, #64]
adcs s4, s4, b0
eor s5, s5, s
adcs s5, s5, b1
eor s6, s6, s
adcs s6, s6, b2
eor s7, s7, s
adcs s7, s7, b3
adc c, s8, xzr
adds s4, s4, a0
adcs s5, s5, a1
adcs s6, s6, a2
adcs s7, s7, a3
and s, s, #0x1FF
lsl t, s0, #9
orr t, t, s
adcs b0, b0, t
extr t, s1, s0, #55
adcs b1, b1, t
extr t, s2, s1, #55
adcs b2, b2, t
extr t, s3, s2, #55
adcs b3, b3, t
lsr t, s3, #55
adc s8, t, s8
// Augment the total with the contribution from the top little words
// w and v. If we write the inputs as 2^512 * w + x and 2^512 * v + y
// then we are otherwise just doing x * y so we actually need to add
// 2^512 * (2^512 * w * v + w * y + v * x). We do this is an involved
// way chopping x and y into 52-bit chunks so we can do most of the core
// arithmetic using only basic muls, no umulh (since w, v are only 9 bits).
// This does however involve some intricate bit-splicing plus arithmetic.
// To make things marginally less confusing we introduce some new names
// at the human level: x = [c7;...;c0] and y = [d7;...d0], which are
// not all distinct, and [sum8;sum7;...;sum0] for the running sum.
// Also accumulate u = sum1 AND ... AND sum7 for the later comparison
#define sum0 s4
#define sum1 s5
#define sum2 s6
#define sum3 s7
#define sum4 b0
#define sum5 b1
#define sum6 b2
#define sum7 b3
#define sum8 s8
#define c0 a0
#define c1 a1
#define c2 a2
#define c3 a0
#define c4 a1
#define c5 a2
#define c6 a0
#define c7 a1
#define d0 s0
#define d1 s1
#define d2 s2
#define d3 s0
#define d4 s1
#define d5 s2
#define d6 s0
#define d7 s1
#define v a3
#define w s3
// 0 * 52 = 64 * 0 + 0
ldr v, [y, #64]
ldp c0, c1, [x]
and l, c0, #0x000fffffffffffff
mul l, v, l
ldr w, [x, #64]
ldp d0, d1, [y]
and t, d0, #0x000fffffffffffff
mul t, w, t
add l, l, t
// 1 * 52 = 64 * 0 + 52
extr t, c1, c0, #52
and t, t, #0x000fffffffffffff
mul h, v, t
extr t, d1, d0, #52
and t, t, #0x000fffffffffffff
mul t, w, t
add h, h, t
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #12
adds sum0, sum0, t
// 2 * 52 = 64 * 1 + 40
ldp c2, c3, [x, #16]
ldp d2, d3, [y, #16]
extr t, c2, c1, #40
and t, t, #0x000fffffffffffff
mul l, v, t
extr t, d2, d1, #40
and t, t, #0x000fffffffffffff
mul t, w, t
add l, l, t
lsr t, h, #52
add l, l, t
lsl h, h, #12
extr t, l, h, #24
adcs sum1, sum1, t
// 3 * 52 = 64 * 2 + 28
extr t, c3, c2, #28
and t, t, #0x000fffffffffffff
mul h, v, t
extr t, d3, d2, #28
and t, t, #0x000fffffffffffff
mul t, w, t
add h, h, t
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #36
adcs sum2, sum2, t
and u, sum1, sum2
// 4 * 52 = 64 * 3 + 16
// At this point we also fold in the addition of c at the right place.
// Note that 4 * 64 = 4 * 52 + 48 so we shift c left 48 places to align.
ldp c4, c5, [x, #32]
ldp d4, d5, [y, #32]
extr t, c4, c3, #16
and t, t, #0x000fffffffffffff
mul l, v, t
extr t, d4, d3, #16
and t, t, #0x000fffffffffffff
mul t, w, t
add l, l, t
lsl c, c, #48
add l, l, c
lsr t, h, #52
add l, l, t
lsl h, h, #12
extr t, l, h, #48
adcs sum3, sum3, t
and u, u, sum3
// 5 * 52 = 64 * 4 + 4
lsr t, c4, #4
and t, t, #0x000fffffffffffff
mul h, v, t
lsr t, d4, #4
and t, t, #0x000fffffffffffff
mul t, w, t
add h, h, t
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr s, h, l, #60
// 6 * 52 = 64 * 4 + 56
extr t, c5, c4, #56
and t, t, #0x000fffffffffffff
mul l, v, t
extr t, d5, d4, #56
and t, t, #0x000fffffffffffff
mul t, w, t
add l, l, t
lsr t, h, #52
add l, l, t
lsl s, s, #8
extr t, l, s, #8
adcs sum4, sum4, t
and u, u, sum4
// 7 * 52 = 64 * 5 + 44
ldp c6, c7, [x, #48]
ldp d6, d7, [y, #48]
extr t, c6, c5, #44
and t, t, #0x000fffffffffffff
mul h, v, t
extr t, d6, d5, #44
and t, t, #0x000fffffffffffff
mul t, w, t
add h, h, t
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #20
adcs sum5, sum5, t
and u, u, sum5
// 8 * 52 = 64 * 6 + 32
extr t, c7, c6, #32
and t, t, #0x000fffffffffffff
mul l, v, t
extr t, d7, d6, #32
and t, t, #0x000fffffffffffff
mul t, w, t
add l, l, t
lsr t, h, #52
add l, l, t
lsl h, h, #12
extr t, l, h, #32
adcs sum6, sum6, t
and u, u, sum6
// 9 * 52 = 64 * 7 + 20
lsr t, c7, #20
mul h, v, t
lsr t, d7, #20
mul t, w, t
add h, h, t
lsr t, l, #52
add h, h, t
lsl l, l, #12
extr t, h, l, #44
adcs sum7, sum7, t
and u, u, sum7
// Top word
mul t, v, w
lsr h, h, #44
add t, t, h
adc sum8, sum8, t
// Extract the high part h and mask off the low part l = [sum8;sum7;...;sum0]
// but stuff sum8 with 1 bits at the left to ease a comparison below
lsr h, sum8, #9
orr sum8, sum8, #~0x1FF
// Decide whether h + l >= p_521 <=> h + l + 1 >= 2^521. Since this can only
// happen if digits sum7,...sum1 are all 1s, we use the AND of them "u" to
// condense the carry chain, and since we stuffed 1 bits into sum8 we get
// the result in CF without an additional comparison.
subs xzr, xzr, xzr
adcs xzr, sum0, h
adcs xzr, u, xzr
adcs xzr, sum8, xzr
// Now if CF is set we want (h + l) - p_521 = (h + l + 1) - 2^521
// while otherwise we want just h + l. So mask h + l + CF to 521 bits.
// The masking is combined with the writeback in the next block.
adcs sum0, sum0, h
adcs sum1, sum1, xzr
adcs sum2, sum2, xzr
adcs sum3, sum3, xzr
adcs sum4, sum4, xzr
adcs sum5, sum5, xzr
adcs sum6, sum6, xzr
adcs sum7, sum7, xzr
adc sum8, sum8, xzr
// The result is actually [sum8;...;sum0] == product / 2^512, since we are
// in the 512 position. For Montgomery we want product / 2^576, so write
// back [sum8;...;sum0] rotated right by 64 bits, as a 521-bit quantity.
stp sum1, sum2, [z]
stp sum3, sum4, [z, #16]
stp sum5, sum6, [z, #32]
lsl h, sum0, #9
and sum8, sum8, #0x1FF
orr sum8, sum8, h
stp sum7, sum8, [z, #48]
lsr sum0, sum0, #55
str sum0, [z, #64]
// Restore regs and return
add sp, sp, #80
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 79,446
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/p521_jscalarmul.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Jacobian form scalar multiplication for P-521
// Input scalar[9], point[27]; output res[27]
//
// extern void p521_jscalarmul
// (uint64_t res[static 27],
// uint64_t scalar[static 9],
// uint64_t point[static 27]);
//
// This function is a variant of its affine point version p521_scalarmul.
// Here, input and output points are assumed to be in Jacobian form with
// a triple (x,y,z) representing the affine point (x/z^2,y/z^3) when
// z is nonzero or the point at infinity (group identity) if z = 0.
//
// Given scalar = n and point = P, assumed to be on the NIST elliptic
// curve P-521, returns a representation of n * P. If the result is the
// point at infinity (either because the input point was or because the
// scalar was a multiple of p_521) then the output is guaranteed to
// represent the point at infinity, i.e. to have its z coordinate zero.
//
// Standard ARM ABI: X0 = res, X1 = scalar, X2 = point
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jscalarmul)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jscalarmul)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 72
#define JACSIZE (3*NUMSIZE)
// Safe copies of input res and additional values in variables.
#define tabup x15
#define bf x16
#define sgn x17
#define j x19
#define res x20
// Intermediate variables on the stack.
// The table is 16 entries, each of size JACSIZE = 3 * NUMSIZE
#define scalarb sp, #(0*NUMSIZE)
#define acc sp, #(1*NUMSIZE)
#define tabent sp, #(4*NUMSIZE)
#define tab sp, #(7*NUMSIZE)
// Round up to maintain stack alignment
#define NSPACE #(55*NUMSIZE+8)
#define selectblock(I) \
cmp bf, #(1*I); \
ldp x10, x11, [tabup]; \
csel x0, x10, x0, eq; \
csel x1, x11, x1, eq; \
ldp x10, x11, [tabup, #16]; \
csel x2, x10, x2, eq; \
csel x3, x11, x3, eq; \
ldp x10, x11, [tabup, #32]; \
csel x4, x10, x4, eq; \
csel x5, x11, x5, eq; \
ldp x10, x11, [tabup, #48]; \
csel x6, x10, x6, eq; \
csel x7, x11, x7, eq; \
ldr x10, [tabup, #64]; \
csel x8, x10, x8, eq; \
add tabup, tabup, #JACSIZE
// Loading large constants
#define movbig(nn,n3,n2,n1,n0) \
movz nn, n0; \
movk nn, n1, lsl #16; \
movk nn, n2, lsl #32; \
movk nn, n3, lsl #48
S2N_BN_SYMBOL(p521_jscalarmul):
stp x19, x20, [sp, #-16]!
stp x21, x30, [sp, #-16]!
sub sp, sp, NSPACE
// Preserve the "res" input argument; others get processed early.
mov res, x0
// Reduce the input scalar mod n_521 and store it to "scalarb".
mov x19, x2
add x0, scalarb
bl p521_jscalarmul_bignum_mod_n521_9
mov x2, x19
// Set the tab[0] table entry to the input point = 1 * P, but also
// reduce all coordinates modulo p. In principle we assume reduction
// as a precondition, but this reduces the scope for surprise, e.g.
// making sure that any input with z = 0 is treated as zero, even
// if the other coordinates are not in fact reduced.
add x0, tab
mov x1, x19
bl p521_jscalarmul_bignum_mod_p521_9
add x0, tab+NUMSIZE
add x1, x19, #NUMSIZE
bl p521_jscalarmul_bignum_mod_p521_9
add x0, tab+2*NUMSIZE
add x1, x19, #(2*NUMSIZE)
bl p521_jscalarmul_bignum_mod_p521_9
// If bit 520 of the scalar is set, then negate the scalar mod n_521,
// i.e. do scalar |-> n_521 - scalar, and also the point to compensate
// by negating its y coordinate. This further step is not needed by
// the indexing scheme (the top window is only a couple of bits either
// way), but is convenient to exclude a problem with the specific value
// scalar = n_521 - 18, where the last Jacobian addition is of the form
// (n_521 - 9) * P + -(9 * P) and hence is a degenerate doubling case.
ldp x0, x1, [scalarb]
movbig(x10, #0xbb6f, #0xb71e, #0x9138, #0x6409)
subs x10, x10, x0
movbig(x11, #0x3bb5, #0xc9b8, #0x899c, #0x47ae)
sbcs x11, x11, x1
ldp x2, x3, [scalarb+16]
movbig(x12, #0x7fcc, #0x0148, #0xf709, #0xa5d0)
sbcs x12, x12, x2
movbig(x13, #0x5186, #0x8783, #0xbf2f, #0x966b)
sbcs x13, x13, x3
ldp x4, x5, [scalarb+32]
mov x14, 0xfffffffffffffffa
sbcs x14, x14, x4
mov x15, 0xffffffffffffffff
sbcs x15, x15, x5
ldp x6, x7, [scalarb+48]
mov x16, 0xffffffffffffffff
sbcs x16, x16, x6
mov x17, 0xffffffffffffffff
sbcs x17, x17, x7
ldr x8, [scalarb+64]
mov x19, 0x00000000000001ff
sbc x19, x19, x8
tst x8, 0x100
csetm x9, ne
csel x0, x10, x0, ne
csel x1, x11, x1, ne
csel x2, x12, x2, ne
csel x3, x13, x3, ne
csel x4, x14, x4, ne
csel x5, x15, x5, ne
csel x6, x16, x6, ne
csel x7, x17, x7, ne
csel x8, x19, x8, ne
stp x0, x1, [scalarb]
stp x2, x3, [scalarb+16]
stp x4, x5, [scalarb+32]
stp x6, x7, [scalarb+48]
str x8, [scalarb+64]
add tabup, tab
ldp x0, x1, [tabup, #NUMSIZE]
ldp x2, x3, [tabup, #NUMSIZE+16]
ldp x4, x5, [tabup, #NUMSIZE+32]
ldp x6, x7, [tabup, #NUMSIZE+48]
ldr x8, [tabup, #NUMSIZE+64]
orr x10, x0, x1
orr x11, x2, x3
orr x12, x4, x5
orr x13, x6, x7
orr x10, x10, x11
orr x12, x12, x13
orr x12, x12, x8
orr x10, x10, x12
cmp x10, xzr
csel x9, x9, xzr, ne
eor x0, x0, x9
eor x1, x1, x9
eor x2, x2, x9
eor x3, x3, x9
eor x4, x4, x9
eor x5, x5, x9
eor x6, x6, x9
eor x7, x7, x9
and x9, x9, #0x1FF
eor x8, x8, x9
stp x0, x1, [tabup, #NUMSIZE]
stp x2, x3, [tabup, #NUMSIZE+16]
stp x4, x5, [tabup, #NUMSIZE+32]
stp x6, x7, [tabup, #NUMSIZE+48]
str x8, [tabup, #NUMSIZE+64]
// Compute and record tab[1] = 2 * p, ..., tab[15] = 16 * P
add x0, tab+JACSIZE*1
add x1, tab
bl p521_jscalarmul_jdouble
add x0, tab+JACSIZE*2
add x1, tab+JACSIZE*1
add x2, tab
bl p521_jscalarmul_jadd
add x0, tab+JACSIZE*3
add x1, tab+JACSIZE*1
bl p521_jscalarmul_jdouble
add x0, tab+JACSIZE*4
add x1, tab+JACSIZE*3
add x2, tab
bl p521_jscalarmul_jadd
add x0, tab+JACSIZE*5
add x1, tab+JACSIZE*2
bl p521_jscalarmul_jdouble
add x0, tab+JACSIZE*6
add x1, tab+JACSIZE*5
add x2, tab
bl p521_jscalarmul_jadd
add x0, tab+JACSIZE*7
add x1, tab+JACSIZE*3
bl p521_jscalarmul_jdouble
add x0, tab+JACSIZE*8
add x1, tab+JACSIZE*7
add x2, tab
bl p521_jscalarmul_jadd
add x0, tab+JACSIZE*9
add x1, tab+JACSIZE*4
bl p521_jscalarmul_jdouble
add x0, tab+JACSIZE*10
add x1, tab+JACSIZE*9
add x2, tab
bl p521_jscalarmul_jadd
add x0, tab+JACSIZE*11
add x1, tab+JACSIZE*5
bl p521_jscalarmul_jdouble
add x0, tab+JACSIZE*12
add x1, tab+JACSIZE*11
add x2, tab
bl p521_jscalarmul_jadd
add x0, tab+JACSIZE*13
add x1, tab+JACSIZE*6
bl p521_jscalarmul_jdouble
add x0, tab+JACSIZE*14
add x1, tab+JACSIZE*13
add x2, tab
bl p521_jscalarmul_jadd
add x0, tab+JACSIZE*15
add x1, tab+JACSIZE*7
bl p521_jscalarmul_jdouble
// Add the recoding constant sum_i(16 * 32^i) to the scalar to allow signed
// digits. The digits of the constant, in lowest-to-highest order, are as
// follows; they are generated dynamically since none is a simple ARM load.
//
// 0x0842108421084210
// 0x1084210842108421
// 0x2108421084210842
// 0x4210842108421084
// 0x8421084210842108
// 0x0842108421084210
// 0x1084210842108421
// 0x2108421084210842
// 0x0000000000000084
ldp x0, x1, [scalarb]
ldp x2, x3, [scalarb+16]
ldp x4, x5, [scalarb+32]
ldp x6, x7, [scalarb+48]
ldr x8, [scalarb+64]
movbig(x10, #0x1084, #0x2108, #0x4210, #0x8421)
adds x0, x0, x10, lsr #1
adcs x1, x1, x10
lsl x10, x10, #1
adcs x2, x2, x10
lsl x10, x10, #1
adcs x3, x3, x10
lsl x10, x10, #1
adcs x4, x4, x10
lsr x11, x10, #4
adcs x5, x5, x11
lsr x10, x10, #3
adcs x6, x6, x10
lsl x10, x10, #1
adcs x7, x7, x10
lsl x10, x10, #1
and x10, x10, #0xFF
adc x8, x8, x10
// Because of the initial reduction the top bitfield (>= bits 520) is <= 1,
// i.e. just a single bit. Record that in "bf", then shift the whole
// scalar left 56 bits to align the top of the next bitfield with the MSB
// (bits 571..575).
lsr bf, x8, #8
extr x8, x8, x7, #8
extr x7, x7, x6, #8
extr x6, x6, x5, #8
extr x5, x5, x4, #8
extr x4, x4, x3, #8
extr x3, x3, x2, #8
extr x2, x2, x1, #8
extr x1, x1, x0, #8
lsl x0, x0, #56
stp x0, x1, [scalarb]
stp x2, x3, [scalarb+16]
stp x4, x5, [scalarb+32]
stp x6, x7, [scalarb+48]
str x8, [scalarb+64]
// According to the top bit, initialize the accumulator to P or 0. This top
// digit, uniquely, is not recoded so there is no sign adjustment to make.
// We only really need to adjust the z coordinate to zero, but do all three.
add tabup, tab
cmp bf, xzr
ldp x0, x1, [tabup]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc]
ldp x0, x1, [tabup, #16]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+16]
ldp x0, x1, [tabup, #32]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+32]
ldp x0, x1, [tabup, #48]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+48]
ldp x0, x1, [tabup, #64]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+64]
ldp x0, x1, [tabup, #80]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+80]
ldp x0, x1, [tabup, #96]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+96]
ldp x0, x1, [tabup, #112]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+112]
ldp x0, x1, [tabup, #128]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+128]
ldp x0, x1, [tabup, #144]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+144]
ldp x0, x1, [tabup, #160]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+160]
ldp x0, x1, [tabup, #176]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+176]
ldp x0, x1, [tabup, #192]
csel x0, x0, xzr, ne
csel x1, x1, xzr, ne
stp x0, x1, [acc+192]
ldr x0, [tabup, #208]
csel x0, x0, xzr, ne
str x0, [acc+208]
// Main loop over size-5 bitfields: double 5 times then add signed digit
// At each stage we shift the scalar left by 5 bits so we can simply pick
// the top 5 bits as the bitfield, saving some fiddle over indexing.
mov j, #520
p521_jscalarmul_mainloop:
sub j, j, #5
add x0, acc
add x1, acc
bl p521_jscalarmul_jdouble
add x0, acc
add x1, acc
bl p521_jscalarmul_jdouble
add x0, acc
add x1, acc
bl p521_jscalarmul_jdouble
add x0, acc
add x1, acc
bl p521_jscalarmul_jdouble
add x0, acc
add x1, acc
bl p521_jscalarmul_jdouble
// Choose the bitfield and adjust it to sign and magnitude
ldp x0, x1, [scalarb]
ldp x2, x3, [scalarb+16]
ldp x4, x5, [scalarb+32]
ldp x6, x7, [scalarb+48]
ldr x8, [scalarb+64]
lsr bf, x8, #59
extr x8, x8, x7, #59
extr x7, x7, x6, #59
extr x6, x6, x5, #59
extr x5, x5, x4, #59
extr x4, x4, x3, #59
extr x3, x3, x2, #59
extr x2, x2, x1, #59
extr x1, x1, x0, #59
lsl x0, x0, #5
stp x0, x1, [scalarb]
stp x2, x3, [scalarb+16]
stp x4, x5, [scalarb+32]
stp x6, x7, [scalarb+48]
str x8, [scalarb+64]
subs bf, bf, #16
csetm sgn, lo // sgn = sign of digit (1 = negative)
cneg bf, bf, lo // bf = absolute value of digit
// Conditionally select the table entry tab[i-1] = i * P in constant time
mov x0, xzr
mov x1, xzr
mov x2, xzr
mov x3, xzr
mov x4, xzr
mov x5, xzr
mov x6, xzr
mov x7, xzr
mov x8, xzr
add tabup, tab
selectblock(1)
selectblock(2)
selectblock(3)
selectblock(4)
selectblock(5)
selectblock(6)
selectblock(7)
selectblock(8)
selectblock(9)
selectblock(10)
selectblock(11)
selectblock(12)
selectblock(13)
selectblock(14)
selectblock(15)
selectblock(16)
stp x0, x1, [tabent]
stp x2, x3, [tabent+16]
stp x4, x5, [tabent+32]
stp x6, x7, [tabent+48]
str x8, [tabent+64]
mov x0, xzr
mov x1, xzr
mov x2, xzr
mov x3, xzr
mov x4, xzr
mov x5, xzr
mov x6, xzr
mov x7, xzr
mov x8, xzr
add tabup, tab+2*NUMSIZE
selectblock(1)
selectblock(2)
selectblock(3)
selectblock(4)
selectblock(5)
selectblock(6)
selectblock(7)
selectblock(8)
selectblock(9)
selectblock(10)
selectblock(11)
selectblock(12)
selectblock(13)
selectblock(14)
selectblock(15)
selectblock(16)
stp x0, x1, [tabent+2*NUMSIZE]
stp x2, x3, [tabent+2*NUMSIZE+16]
stp x4, x5, [tabent+2*NUMSIZE+32]
stp x6, x7, [tabent+2*NUMSIZE+48]
str x8, [tabent+2*NUMSIZE+64]
mov x0, xzr
mov x1, xzr
mov x2, xzr
mov x3, xzr
mov x4, xzr
mov x5, xzr
mov x6, xzr
mov x7, xzr
mov x8, xzr
add tabup, tab+NUMSIZE
selectblock(1)
selectblock(2)
selectblock(3)
selectblock(4)
selectblock(5)
selectblock(6)
selectblock(7)
selectblock(8)
selectblock(9)
selectblock(10)
selectblock(11)
selectblock(12)
selectblock(13)
selectblock(14)
selectblock(15)
selectblock(16)
// Store it to "tabent" with the y coordinate optionally negated.
// This is done carefully to give coordinates < p_521 even in
// the degenerate case y = 0 (when z = 0 for points on the curve).
orr x10, x0, x1
orr x11, x2, x3
orr x12, x4, x5
orr x13, x6, x7
orr x10, x10, x11
orr x12, x12, x13
orr x12, x12, x8
orr x10, x10, x12
cmp x10, xzr
csel sgn, sgn, xzr, ne
eor x0, x0, sgn
eor x1, x1, sgn
eor x2, x2, sgn
eor x3, x3, sgn
eor x4, x4, sgn
eor x5, x5, sgn
eor x6, x6, sgn
eor x7, x7, sgn
and sgn, sgn, #0x1FF
eor x8, x8, sgn
stp x0, x1, [tabent+NUMSIZE]
stp x2, x3, [tabent+NUMSIZE+16]
stp x4, x5, [tabent+NUMSIZE+32]
stp x6, x7, [tabent+NUMSIZE+48]
str x8, [tabent+NUMSIZE+64]
// Add to the accumulator
add x0, acc
add x1, acc
add x2, tabent
bl p521_jscalarmul_jadd
cbnz j, p521_jscalarmul_mainloop
// That's the end of the main loop, and we just need to copy the
// result in "acc" to the output.
ldp x0, x1, [acc]
stp x0, x1, [res]
ldp x0, x1, [acc+16]
stp x0, x1, [res, #16]
ldp x0, x1, [acc+32]
stp x0, x1, [res, #32]
ldp x0, x1, [acc+48]
stp x0, x1, [res, #48]
ldp x0, x1, [acc+64]
stp x0, x1, [res, #64]
ldp x0, x1, [acc+80]
stp x0, x1, [res, #80]
ldp x0, x1, [acc+96]
stp x0, x1, [res, #96]
ldp x0, x1, [acc+112]
stp x0, x1, [res, #112]
ldp x0, x1, [acc+128]
stp x0, x1, [res, #128]
ldp x0, x1, [acc+144]
stp x0, x1, [res, #144]
ldp x0, x1, [acc+160]
stp x0, x1, [res, #160]
ldp x0, x1, [acc+176]
stp x0, x1, [res, #176]
ldp x0, x1, [acc+192]
stp x0, x1, [res, #192]
ldr x0, [acc+208]
str x0, [res, #208]
// Restore stack and registers and return
add sp, sp, NSPACE
ldp x21, x30, [sp], 16
ldp x19, x20, [sp], 16
ret
// Local copies of subroutines, complete clones at the moment except
// that we share multiplication and squaring between the point operations.
p521_jscalarmul_bignum_mod_p521_9:
ldr x12, [x1, #64]
lsr x2, x12, #9
cmp xzr, xzr
ldp x4, x5, [x1]
adcs xzr, x4, x2
adcs xzr, x5, xzr
ldp x6, x7, [x1, #16]
and x3, x6, x7
adcs xzr, x3, xzr
ldp x8, x9, [x1, #32]
and x3, x8, x9
adcs xzr, x3, xzr
ldp x10, x11, [x1, #48]
and x3, x10, x11
adcs xzr, x3, xzr
orr x3, x12, #0xfffffffffffffe00
adcs x3, x3, xzr
adcs x4, x4, x2
adcs x5, x5, xzr
adcs x6, x6, xzr
adcs x7, x7, xzr
adcs x8, x8, xzr
adcs x9, x9, xzr
adcs x10, x10, xzr
adcs x11, x11, xzr
adc x12, x12, xzr
and x12, x12, #0x1ff
stp x4, x5, [x0]
stp x6, x7, [x0, #16]
stp x8, x9, [x0, #32]
stp x10, x11, [x0, #48]
str x12, [x0, #64]
ret
p521_jscalarmul_bignum_mod_n521_9:
ldr x14, [x1, #64]
lsr x15, x14, #9
add x15, x15, #1
mov x2, #39927
movk x2, #28359, lsl #16
movk x2, #18657, lsl #32
movk x2, #17552, lsl #48
mul x6, x2, x15
mov x3, #47185
movk x3, #30307, lsl #16
movk x3, #13895, lsl #32
movk x3, #50250, lsl #48
mul x7, x3, x15
mov x4, #23087
movk x4, #2294, lsl #16
movk x4, #65207, lsl #32
movk x4, #32819, lsl #48
mul x8, x4, x15
mov x5, #27028
movk x5, #16592, lsl #16
movk x5, #30844, lsl #32
movk x5, #44665, lsl #48
mul x9, x5, x15
lsl x10, x15, #2
add x10, x10, x15
umulh x13, x2, x15
adds x7, x7, x13
umulh x13, x3, x15
adcs x8, x8, x13
umulh x13, x4, x15
adcs x9, x9, x13
umulh x13, x5, x15
adc x10, x10, x13
ldp x12, x13, [x1]
adds x6, x6, x12
adcs x7, x7, x13
ldp x12, x13, [x1, #16]
adcs x8, x8, x12
adcs x9, x9, x13
ldp x13, x11, [x1, #32]
adcs x10, x10, x13
adcs x11, x11, xzr
ldp x12, x13, [x1, #48]
adcs x12, x12, xzr
adcs x13, x13, xzr
orr x14, x14, #0xfffffffffffffe00
adcs x14, x14, xzr
csetm x15, lo
and x2, x2, x15
subs x6, x6, x2
and x3, x3, x15
sbcs x7, x7, x3
and x4, x4, x15
sbcs x8, x8, x4
and x5, x5, x15
sbcs x9, x9, x5
mov x2, #5
and x2, x2, x15
sbcs x10, x10, x2
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbcs x13, x13, xzr
sbc x14, x14, xzr
and x14, x14, #0x1ff
stp x6, x7, [x0]
stp x8, x9, [x0, #16]
stp x10, x11, [x0, #32]
stp x12, x13, [x0, #48]
str x14, [x0, #64]
ret
p521_jscalarmul_jadd:
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x29, x30, [sp, #-16]!
sub sp, sp, #0x240
mov x26, x0
mov x27, x1
mov x28, x2
mov x0, sp
add x1, x27, #0x90
bl p521_jscalarmul_sqr_p521
add x0, sp, #0x168
add x1, x28, #0x90
bl p521_jscalarmul_sqr_p521
add x0, sp, #0x1f8
add x1, x28, #0x90
add x2, x27, #0x48
bl p521_jscalarmul_mul_p521
add x0, sp, #0x48
add x1, x27, #0x90
add x2, x28, #0x48
bl p521_jscalarmul_mul_p521
add x0, sp, #0x90
mov x1, sp
add x2, x28, #0x0
bl p521_jscalarmul_mul_p521
add x0, sp, #0x120
add x1, sp, #0x168
add x2, x27, #0x0
bl p521_jscalarmul_mul_p521
add x0, sp, #0x48
mov x1, sp
add x2, sp, #0x48
bl p521_jscalarmul_mul_p521
add x0, sp, #0x1f8
add x1, sp, #0x168
add x2, sp, #0x1f8
bl p521_jscalarmul_mul_p521
add x0, sp, #0x168
add x1, sp, #0x90
add x2, sp, #0x120
bl p521_jscalarmul_sub_p521
add x0, sp, #0x48
add x1, sp, #0x48
add x2, sp, #0x1f8
bl p521_jscalarmul_sub_p521
add x0, sp, #0xd8
add x1, sp, #0x168
bl p521_jscalarmul_sqr_p521
mov x0, sp
add x1, sp, #0x48
bl p521_jscalarmul_sqr_p521
add x0, sp, #0x120
add x1, sp, #0xd8
add x2, sp, #0x120
bl p521_jscalarmul_mul_p521
add x0, sp, #0x90
add x1, sp, #0xd8
add x2, sp, #0x90
bl p521_jscalarmul_mul_p521
mov x0, sp
mov x1, sp
add x2, sp, #0x120
bl p521_jscalarmul_sub_p521
add x0, sp, #0xd8
add x1, sp, #0x90
add x2, sp, #0x120
bl p521_jscalarmul_sub_p521
add x0, sp, #0x168
add x1, sp, #0x168
add x2, x27, #0x90
bl p521_jscalarmul_mul_p521
mov x0, sp
mov x1, sp
add x2, sp, #0x90
bl p521_jscalarmul_sub_p521
add x0, sp, #0x120
add x1, sp, #0x120
mov x2, sp
bl p521_jscalarmul_sub_p521
add x0, sp, #0xd8
add x1, sp, #0xd8
add x2, sp, #0x1f8
bl p521_jscalarmul_mul_p521
add x0, sp, #0x168
add x1, sp, #0x168
add x2, x28, #0x90
bl p521_jscalarmul_mul_p521
add x0, sp, #0x120
add x1, sp, #0x48
add x2, sp, #0x120
bl p521_jscalarmul_mul_p521
add x0, sp, #0x120
add x1, sp, #0x120
add x2, sp, #0xd8
bl p521_jscalarmul_sub_p521
ldp x0, x1, [x27, #144]
ldp x2, x3, [x27, #160]
ldp x4, x5, [x27, #176]
ldp x6, x7, [x27, #192]
ldr x8, [x27, #208]
orr x20, x0, x1
orr x21, x2, x3
orr x22, x4, x5
orr x23, x6, x7
orr x20, x20, x21
orr x22, x22, x23
orr x20, x20, x8
orr x20, x20, x22
cmp x20, xzr
cset x20, ne
ldp x10, x11, [x28, #144]
ldp x12, x13, [x28, #160]
ldp x14, x15, [x28, #176]
ldp x16, x17, [x28, #192]
ldr x19, [x28, #208]
orr x21, x10, x11
orr x22, x12, x13
orr x23, x14, x15
orr x24, x16, x17
orr x21, x21, x22
orr x23, x23, x24
orr x21, x21, x19
orr x21, x21, x23
csel x0, x0, x10, ne
csel x1, x1, x11, ne
csel x2, x2, x12, ne
csel x3, x3, x13, ne
csel x4, x4, x14, ne
csel x5, x5, x15, ne
csel x6, x6, x16, ne
csel x7, x7, x17, ne
csel x8, x8, x19, ne
cmp x21, xzr
cset x21, ne
cmp x21, x20
ldp x10, x11, [sp, #360]
ldp x12, x13, [sp, #376]
ldp x14, x15, [sp, #392]
ldp x16, x17, [sp, #408]
ldr x19, [sp, #424]
csel x0, x0, x10, ne
csel x1, x1, x11, ne
csel x2, x2, x12, ne
csel x3, x3, x13, ne
csel x4, x4, x14, ne
csel x5, x5, x15, ne
csel x6, x6, x16, ne
csel x7, x7, x17, ne
csel x8, x8, x19, ne
stp x0, x1, [sp, #360]
stp x2, x3, [sp, #376]
stp x4, x5, [sp, #392]
stp x6, x7, [sp, #408]
str x8, [sp, #424]
ldp x20, x21, [x27]
ldp x0, x1, [sp]
csel x0, x20, x0, cc
csel x1, x21, x1, cc
ldp x20, x21, [x28]
csel x0, x20, x0, hi
csel x1, x21, x1, hi
ldp x20, x21, [x27, #16]
ldp x2, x3, [sp, #16]
csel x2, x20, x2, cc
csel x3, x21, x3, cc
ldp x20, x21, [x28, #16]
csel x2, x20, x2, hi
csel x3, x21, x3, hi
ldp x20, x21, [x27, #32]
ldp x4, x5, [sp, #32]
csel x4, x20, x4, cc
csel x5, x21, x5, cc
ldp x20, x21, [x28, #32]
csel x4, x20, x4, hi
csel x5, x21, x5, hi
ldp x20, x21, [x27, #48]
ldp x6, x7, [sp, #48]
csel x6, x20, x6, cc
csel x7, x21, x7, cc
ldp x20, x21, [x28, #48]
csel x6, x20, x6, hi
csel x7, x21, x7, hi
ldr x20, [x27, #64]
ldr x8, [sp, #64]
csel x8, x20, x8, cc
ldr x21, [x28, #64]
csel x8, x21, x8, hi
ldp x20, x21, [x27, #72]
ldp x10, x11, [sp, #288]
csel x10, x20, x10, cc
csel x11, x21, x11, cc
ldp x20, x21, [x28, #72]
csel x10, x20, x10, hi
csel x11, x21, x11, hi
ldp x20, x21, [x27, #88]
ldp x12, x13, [sp, #304]
csel x12, x20, x12, cc
csel x13, x21, x13, cc
ldp x20, x21, [x28, #88]
csel x12, x20, x12, hi
csel x13, x21, x13, hi
ldp x20, x21, [x27, #104]
ldp x14, x15, [sp, #320]
csel x14, x20, x14, cc
csel x15, x21, x15, cc
ldp x20, x21, [x28, #104]
csel x14, x20, x14, hi
csel x15, x21, x15, hi
ldp x20, x21, [x27, #120]
ldp x16, x17, [sp, #336]
csel x16, x20, x16, cc
csel x17, x21, x17, cc
ldp x20, x21, [x28, #120]
csel x16, x20, x16, hi
csel x17, x21, x17, hi
ldr x20, [x27, #136]
ldr x19, [sp, #352]
csel x19, x20, x19, cc
ldr x21, [x28, #136]
csel x19, x21, x19, hi
stp x0, x1, [x26]
stp x2, x3, [x26, #16]
stp x4, x5, [x26, #32]
stp x6, x7, [x26, #48]
str x8, [x26, #64]
ldp x0, x1, [sp, #360]
ldp x2, x3, [sp, #376]
ldp x4, x5, [sp, #392]
ldp x6, x7, [sp, #408]
ldr x8, [sp, #424]
stp x10, x11, [x26, #72]
stp x12, x13, [x26, #88]
stp x14, x15, [x26, #104]
stp x16, x17, [x26, #120]
str x19, [x26, #136]
stp x0, x1, [x26, #144]
stp x2, x3, [x26, #160]
stp x4, x5, [x26, #176]
stp x6, x7, [x26, #192]
str x8, [x26, #208]
add sp, sp, #0x240
ldp x29, x30, [sp], #16
ldp x27, x28, [sp], #16
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
p521_jscalarmul_jdouble:
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x29, x30, [sp, #-16]!
sub sp, sp, #0x200
mov x26, x0
mov x27, x1
mov x0, sp
add x1, x27, #0x90
bl p521_jscalarmul_sqr_p521
add x0, sp, #0x48
add x1, x27, #0x48
bl p521_jscalarmul_sqr_p521
ldp x5, x6, [x27]
ldp x4, x3, [sp]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x27, #16]
ldp x4, x3, [sp, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x27, #32]
ldp x4, x3, [sp, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
ldp x11, x12, [x27, #48]
ldp x4, x3, [sp, #48]
sbcs x11, x11, x4
sbcs x12, x12, x3
ldr x13, [x27, #64]
ldr x4, [sp, #64]
sbcs x13, x13, x4
sbcs x5, x5, xzr
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbcs x13, x13, xzr
and x13, x13, #0x1ff
stp x5, x6, [sp, #216]
stp x7, x8, [sp, #232]
stp x9, x10, [sp, #248]
stp x11, x12, [sp, #264]
str x13, [sp, #280]
cmp xzr, xzr
ldp x5, x6, [x27]
ldp x4, x3, [sp]
adcs x5, x5, x4
adcs x6, x6, x3
ldp x7, x8, [x27, #16]
ldp x4, x3, [sp, #16]
adcs x7, x7, x4
adcs x8, x8, x3
ldp x9, x10, [x27, #32]
ldp x4, x3, [sp, #32]
adcs x9, x9, x4
adcs x10, x10, x3
ldp x11, x12, [x27, #48]
ldp x4, x3, [sp, #48]
adcs x11, x11, x4
adcs x12, x12, x3
ldr x13, [x27, #64]
ldr x4, [sp, #64]
adc x13, x13, x4
subs x4, x13, #0x200
csetm x4, cs
sbcs x5, x5, xzr
and x4, x4, #0x200
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbc x13, x13, x4
stp x5, x6, [sp, #144]
stp x7, x8, [sp, #160]
stp x9, x10, [sp, #176]
stp x11, x12, [sp, #192]
str x13, [sp, #208]
add x0, sp, #0xd8
add x1, sp, #0x90
add x2, sp, #0xd8
bl p521_jscalarmul_mul_p521
cmp xzr, xzr
ldp x5, x6, [x27, #72]
ldp x4, x3, [x27, #144]
adcs x5, x5, x4
adcs x6, x6, x3
ldp x7, x8, [x27, #88]
ldp x4, x3, [x27, #160]
adcs x7, x7, x4
adcs x8, x8, x3
ldp x9, x10, [x27, #104]
ldp x4, x3, [x27, #176]
adcs x9, x9, x4
adcs x10, x10, x3
ldp x11, x12, [x27, #120]
ldp x4, x3, [x27, #192]
adcs x11, x11, x4
adcs x12, x12, x3
ldr x13, [x27, #136]
ldr x4, [x27, #208]
adc x13, x13, x4
subs x4, x13, #0x200
csetm x4, cs
sbcs x5, x5, xzr
and x4, x4, #0x200
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbc x13, x13, x4
stp x5, x6, [sp, #144]
stp x7, x8, [sp, #160]
stp x9, x10, [sp, #176]
stp x11, x12, [sp, #192]
str x13, [sp, #208]
add x0, sp, #0x120
add x1, x27, #0x0
add x2, sp, #0x48
bl p521_jscalarmul_mul_p521
add x0, sp, #0x168
add x1, sp, #0xd8
bl p521_jscalarmul_sqr_p521
add x0, sp, #0x90
add x1, sp, #0x90
bl p521_jscalarmul_sqr_p521
ldp x6, x7, [sp, #288]
mov x1, #0xc
mul x3, x1, x6
mul x4, x1, x7
umulh x6, x1, x6
adds x4, x4, x6
umulh x7, x1, x7
ldp x8, x9, [sp, #304]
mul x5, x1, x8
mul x6, x1, x9
umulh x8, x1, x8
adcs x5, x5, x7
umulh x9, x1, x9
adcs x6, x6, x8
ldp x10, x11, [sp, #320]
mul x7, x1, x10
mul x8, x1, x11
umulh x10, x1, x10
adcs x7, x7, x9
umulh x11, x1, x11
adcs x8, x8, x10
ldp x12, x13, [sp, #336]
mul x9, x1, x12
mul x10, x1, x13
umulh x12, x1, x12
adcs x9, x9, x11
umulh x13, x1, x13
adcs x10, x10, x12
ldr x14, [sp, #352]
mul x11, x1, x14
adc x11, x11, x13
mov x1, #0x9
ldp x20, x21, [sp, #360]
mvn x20, x20
mul x0, x1, x20
umulh x20, x1, x20
adds x3, x3, x0
mvn x21, x21
mul x0, x1, x21
umulh x21, x1, x21
adcs x4, x4, x0
ldp x22, x23, [sp, #376]
mvn x22, x22
mul x0, x1, x22
umulh x22, x1, x22
adcs x5, x5, x0
mvn x23, x23
mul x0, x1, x23
umulh x23, x1, x23
adcs x6, x6, x0
ldp x17, x19, [sp, #392]
mvn x17, x17
mul x0, x1, x17
umulh x17, x1, x17
adcs x7, x7, x0
mvn x19, x19
mul x0, x1, x19
umulh x19, x1, x19
adcs x8, x8, x0
ldp x2, x16, [sp, #408]
mvn x2, x2
mul x0, x1, x2
umulh x2, x1, x2
adcs x9, x9, x0
mvn x16, x16
mul x0, x1, x16
umulh x16, x1, x16
adcs x10, x10, x0
ldr x0, [sp, #424]
eor x0, x0, #0x1ff
mul x0, x1, x0
adc x11, x11, x0
adds x4, x4, x20
adcs x5, x5, x21
and x15, x4, x5
adcs x6, x6, x22
and x15, x15, x6
adcs x7, x7, x23
and x15, x15, x7
adcs x8, x8, x17
and x15, x15, x8
adcs x9, x9, x19
and x15, x15, x9
adcs x10, x10, x2
and x15, x15, x10
adc x11, x11, x16
lsr x12, x11, #9
orr x11, x11, #0xfffffffffffffe00
cmp xzr, xzr
adcs xzr, x3, x12
adcs xzr, x15, xzr
adcs xzr, x11, xzr
adcs x3, x3, x12
adcs x4, x4, xzr
adcs x5, x5, xzr
adcs x6, x6, xzr
adcs x7, x7, xzr
adcs x8, x8, xzr
adcs x9, x9, xzr
adcs x10, x10, xzr
adc x11, x11, xzr
and x11, x11, #0x1ff
stp x3, x4, [sp, #360]
stp x5, x6, [sp, #376]
stp x7, x8, [sp, #392]
stp x9, x10, [sp, #408]
str x11, [sp, #424]
ldp x5, x6, [sp, #144]
ldp x4, x3, [sp]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #160]
ldp x4, x3, [sp, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #176]
ldp x4, x3, [sp, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
ldp x11, x12, [sp, #192]
ldp x4, x3, [sp, #48]
sbcs x11, x11, x4
sbcs x12, x12, x3
ldr x13, [sp, #208]
ldr x4, [sp, #64]
sbcs x13, x13, x4
sbcs x5, x5, xzr
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbcs x13, x13, xzr
and x13, x13, #0x1ff
stp x5, x6, [sp, #144]
stp x7, x8, [sp, #160]
stp x9, x10, [sp, #176]
stp x11, x12, [sp, #192]
str x13, [sp, #208]
mov x0, sp
add x1, sp, #0x48
bl p521_jscalarmul_sqr_p521
add x0, sp, #0xd8
add x1, sp, #0x168
add x2, sp, #0xd8
bl p521_jscalarmul_mul_p521
ldp x5, x6, [sp, #144]
ldp x4, x3, [sp, #72]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [sp, #160]
ldp x4, x3, [sp, #88]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [sp, #176]
ldp x4, x3, [sp, #104]
sbcs x9, x9, x4
sbcs x10, x10, x3
ldp x11, x12, [sp, #192]
ldp x4, x3, [sp, #120]
sbcs x11, x11, x4
sbcs x12, x12, x3
ldr x13, [sp, #208]
ldr x4, [sp, #136]
sbcs x13, x13, x4
sbcs x5, x5, xzr
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbcs x13, x13, xzr
and x13, x13, #0x1ff
stp x5, x6, [x26, #144]
stp x7, x8, [x26, #160]
stp x9, x10, [x26, #176]
stp x11, x12, [x26, #192]
str x13, [x26, #208]
ldp x6, x7, [sp, #288]
lsl x3, x6, #2
extr x4, x7, x6, #62
ldp x8, x9, [sp, #304]
extr x5, x8, x7, #62
extr x6, x9, x8, #62
ldp x10, x11, [sp, #320]
extr x7, x10, x9, #62
extr x8, x11, x10, #62
ldp x12, x13, [sp, #336]
extr x9, x12, x11, #62
extr x10, x13, x12, #62
ldr x14, [sp, #352]
extr x11, x14, x13, #62
ldp x0, x1, [sp, #360]
mvn x0, x0
adds x3, x3, x0
sbcs x4, x4, x1
ldp x0, x1, [sp, #376]
sbcs x5, x5, x0
and x15, x4, x5
sbcs x6, x6, x1
and x15, x15, x6
ldp x0, x1, [sp, #392]
sbcs x7, x7, x0
and x15, x15, x7
sbcs x8, x8, x1
and x15, x15, x8
ldp x0, x1, [sp, #408]
sbcs x9, x9, x0
and x15, x15, x9
sbcs x10, x10, x1
and x15, x15, x10
ldr x0, [sp, #424]
eor x0, x0, #0x1ff
adc x11, x11, x0
lsr x12, x11, #9
orr x11, x11, #0xfffffffffffffe00
cmp xzr, xzr
adcs xzr, x3, x12
adcs xzr, x15, xzr
adcs xzr, x11, xzr
adcs x3, x3, x12
adcs x4, x4, xzr
adcs x5, x5, xzr
adcs x6, x6, xzr
adcs x7, x7, xzr
adcs x8, x8, xzr
adcs x9, x9, xzr
adcs x10, x10, xzr
adc x11, x11, xzr
and x11, x11, #0x1ff
stp x3, x4, [x26]
stp x5, x6, [x26, #16]
stp x7, x8, [x26, #32]
stp x9, x10, [x26, #48]
str x11, [x26, #64]
ldp x6, x7, [sp, #216]
lsl x3, x6, #1
adds x3, x3, x6
extr x4, x7, x6, #63
adcs x4, x4, x7
ldp x8, x9, [sp, #232]
extr x5, x8, x7, #63
adcs x5, x5, x8
extr x6, x9, x8, #63
adcs x6, x6, x9
ldp x10, x11, [sp, #248]
extr x7, x10, x9, #63
adcs x7, x7, x10
extr x8, x11, x10, #63
adcs x8, x8, x11
ldp x12, x13, [sp, #264]
extr x9, x12, x11, #63
adcs x9, x9, x12
extr x10, x13, x12, #63
adcs x10, x10, x13
ldr x14, [sp, #280]
extr x11, x14, x13, #63
adc x11, x11, x14
ldp x20, x21, [sp]
mvn x20, x20
lsl x0, x20, #3
adds x3, x3, x0
mvn x21, x21
extr x0, x21, x20, #61
adcs x4, x4, x0
ldp x22, x23, [sp, #16]
mvn x22, x22
extr x0, x22, x21, #61
adcs x5, x5, x0
and x15, x4, x5
mvn x23, x23
extr x0, x23, x22, #61
adcs x6, x6, x0
and x15, x15, x6
ldp x20, x21, [sp, #32]
mvn x20, x20
extr x0, x20, x23, #61
adcs x7, x7, x0
and x15, x15, x7
mvn x21, x21
extr x0, x21, x20, #61
adcs x8, x8, x0
and x15, x15, x8
ldp x22, x23, [sp, #48]
mvn x22, x22
extr x0, x22, x21, #61
adcs x9, x9, x0
and x15, x15, x9
mvn x23, x23
extr x0, x23, x22, #61
adcs x10, x10, x0
and x15, x15, x10
ldr x0, [sp, #64]
eor x0, x0, #0x1ff
extr x0, x0, x23, #61
adc x11, x11, x0
lsr x12, x11, #9
orr x11, x11, #0xfffffffffffffe00
cmp xzr, xzr
adcs xzr, x3, x12
adcs xzr, x15, xzr
adcs xzr, x11, xzr
adcs x3, x3, x12
adcs x4, x4, xzr
adcs x5, x5, xzr
adcs x6, x6, xzr
adcs x7, x7, xzr
adcs x8, x8, xzr
adcs x9, x9, xzr
adcs x10, x10, xzr
adc x11, x11, xzr
and x11, x11, #0x1ff
stp x3, x4, [x26, #72]
stp x5, x6, [x26, #88]
stp x7, x8, [x26, #104]
stp x9, x10, [x26, #120]
str x11, [x26, #136]
add sp, sp, #0x200
ldp x29, x30, [sp], #16
ldp x27, x28, [sp], #16
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
p521_jscalarmul_mul_p521:
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
sub sp, sp, #80
ldr q6, [x2]
ldp x10, x17, [x1, #16]
ldr q4, [x1]
ldr q16, [x2, #32]
ldp x5, x20, [x2, #16]
ldr q2, [x1, #32]
movi v31.2D, #0x00000000ffffffff
uzp2 v17.4S, v6.4S, v6.4S
rev64 v7.4S, v6.4S
ldp x15, x21, [x1]
xtn v25.2S, v6.2D
xtn v22.2S, v4.2D
subs x14, x10, x17
mul v7.4S, v7.4S, v4.4S
csetm x8, cc
rev64 v3.4S, v16.4S
xtn v1.2S, v16.2D
ldp x13, x16, [x2]
mul x26, x10, x5
uzp2 v16.4S, v16.4S, v16.4S
uaddlp v26.2D, v7.4S
cneg x4, x14, cc
subs x24, x15, x21
xtn v5.2S, v2.2D
mul v28.4S, v3.4S, v2.4S
shl v26.2D, v26.2D, #32
mul x22, x17, x20
umull v20.2D, v22.2S, v25.2S
uzp2 v6.4S, v4.4S, v4.4S
umull v18.2D, v22.2S, v17.2S
uzp2 v4.4S, v2.4S, v2.4S
cneg x14, x24, cc
csetm x7, cc
umulh x11, x17, x20
usra v18.2D, v20.2D, #32
uaddlp v7.2D, v28.4S
subs x19, x16, x13
umlal v26.2D, v22.2S, v25.2S
cneg x19, x19, cc
shl v28.2D, v7.2D, #32
umull v7.2D, v5.2S, v1.2S
umull v30.2D, v5.2S, v16.2S
cinv x6, x7, cc
mul x25, x14, x19
umlal v28.2D, v5.2S, v1.2S
umull v21.2D, v6.2S, v17.2S
umulh x14, x14, x19
usra v30.2D, v7.2D, #32
subs x9, x20, x5
and v29.16B, v18.16B, v31.16B
cinv x23, x8, cc
mov x8, v26.d[1]
cneg x12, x9, cc
usra v21.2D, v18.2D, #32
umlal v29.2D, v6.2S, v25.2S
mul x24, x4, x12
umull v18.2D, v4.2S, v16.2S
movi v25.2D, #0x00000000ffffffff
eor x9, x14, x6
and v7.16B, v30.16B, v25.16B
usra v21.2D, v29.2D, #32
umulh x7, x10, x5
usra v18.2D, v30.2D, #32
umlal v7.2D, v4.2S, v1.2S
mov x19, v21.d[0]
umulh x3, x4, x12
mov x14, v21.d[1]
usra v18.2D, v7.2D, #32
adds x4, x8, x19
mov x8, v26.d[0]
adcs x19, x26, x14
adcs x14, x22, x7
adc x12, x11, xzr
adds x11, x4, x8
adcs x26, x19, x4
adcs x22, x14, x19
eor x4, x24, x23
adcs x14, x12, x14
eor x7, x25, x6
adc x25, xzr, x12
eor x19, x3, x23
adds x3, x26, x8
adcs x24, x22, x11
adcs x12, x14, x26
adcs x22, x25, x22
adcs x26, xzr, x14
adc x14, xzr, x25
cmn x23, #0x1
adcs x22, x22, x4
adcs x19, x26, x19
adc x25, x14, x23
subs x14, x21, x17
cneg x23, x14, cc
csetm x26, cc
subs x4, x20, x16
cneg x14, x4, cc
cinv x4, x26, cc
cmn x6, #0x1
adcs x11, x11, x7
mul x7, x23, x14
adcs x9, x3, x9
adcs x26, x24, x6
umulh x3, x23, x14
adcs x14, x12, x6
adcs x22, x22, x6
adcs x12, x19, x6
extr x24, x11, x8, #55
adc x6, x25, x6
subs x19, x15, x17
csetm x17, cc
cneg x23, x19, cc
subs x19, x20, x13
lsl x25, x8, #9
eor x8, x7, x4
cneg x20, x19, cc
umulh x7, x23, x20
cinv x19, x17, cc
subs x17, x15, x10
csetm x15, cc
stp x25, x24, [sp, #32]
cneg x24, x17, cc
mul x20, x23, x20
subs x25, x5, x13
cneg x13, x25, cc
cinv x15, x15, cc
mul x25, x24, x13
subs x21, x21, x10
csetm x23, cc
cneg x17, x21, cc
subs x21, x5, x16
umulh x13, x24, x13
cinv x10, x23, cc
cneg x23, x21, cc
cmn x4, #0x1
adcs x14, x14, x8
eor x21, x3, x4
adcs x21, x22, x21
eor x5, x20, x19
adcs x24, x12, x4
mul x12, x17, x23
eor x8, x25, x15
adc x25, x6, x4
cmn x15, #0x1
adcs x6, x9, x8
ldp x20, x8, [x2, #48]
eor x9, x13, x15
adcs x4, x26, x9
umulh x26, x17, x23
ldp x17, x13, [x1, #48]
adcs x9, x14, x15
adcs x16, x21, x15
adcs x14, x24, x15
eor x21, x7, x19
mul x23, x17, x20
adc x24, x25, x15
cmn x19, #0x1
adcs x7, x4, x5
adcs x9, x9, x21
umulh x3, x13, x8
adcs x16, x16, x19
adcs x22, x14, x19
eor x5, x12, x10
adc x12, x24, x19
cmn x10, #0x1
adcs x19, x7, x5
eor x14, x26, x10
mov x7, v28.d[1]
adcs x24, x9, x14
extr x4, x19, x6, #55
umulh x15, x17, x20
mov x14, v18.d[1]
lsr x9, x19, #55
adcs x5, x16, x10
mov x16, v18.d[0]
adcs x19, x22, x10
str x9, [sp, #64]
extr x25, x6, x11, #55
adc x21, x12, x10
subs x26, x17, x13
stp x25, x4, [sp, #48]
stp x19, x21, [sp, #16]
csetm x6, cc
cneg x4, x26, cc
mul x19, x13, x8
subs x11, x8, x20
stp x24, x5, [sp]
ldp x21, x10, [x1, #32]
cinv x12, x6, cc
cneg x6, x11, cc
mov x9, v28.d[0]
umulh x25, x4, x6
adds x22, x7, x16
ldp x16, x5, [x2, #32]
adcs x14, x23, x14
adcs x11, x19, x15
adc x24, x3, xzr
adds x3, x22, x9
adcs x15, x14, x22
mul x22, x4, x6
adcs x6, x11, x14
adcs x4, x24, x11
eor x14, x25, x12
adc x26, xzr, x24
subs x7, x21, x10
csetm x23, cc
cneg x19, x7, cc
subs x24, x5, x16
cneg x11, x24, cc
cinv x7, x23, cc
adds x25, x15, x9
eor x23, x22, x12
adcs x22, x6, x3
mul x24, x19, x11
adcs x15, x4, x15
adcs x6, x26, x6
umulh x19, x19, x11
adcs x11, xzr, x4
adc x26, xzr, x26
cmn x12, #0x1
adcs x4, x6, x23
eor x6, x24, x7
adcs x14, x11, x14
adc x26, x26, x12
subs x11, x10, x13
cneg x12, x11, cc
csetm x11, cc
eor x19, x19, x7
subs x24, x8, x5
cinv x11, x11, cc
cneg x24, x24, cc
cmn x7, #0x1
adcs x3, x3, x6
mul x23, x12, x24
adcs x25, x25, x19
adcs x6, x22, x7
umulh x19, x12, x24
adcs x22, x15, x7
adcs x12, x4, x7
eor x24, x23, x11
adcs x4, x14, x7
adc x26, x26, x7
eor x19, x19, x11
subs x14, x21, x17
cneg x7, x14, cc
csetm x14, cc
subs x23, x20, x16
cinv x14, x14, cc
cneg x23, x23, cc
cmn x11, #0x1
adcs x22, x22, x24
mul x24, x7, x23
adcs x15, x12, x19
adcs x4, x4, x11
adc x19, x26, x11
umulh x26, x7, x23
subs x7, x21, x13
eor x11, x24, x14
cneg x23, x7, cc
csetm x12, cc
subs x7, x8, x16
cneg x7, x7, cc
cinv x12, x12, cc
cmn x14, #0x1
eor x26, x26, x14
adcs x11, x25, x11
mul x25, x23, x7
adcs x26, x6, x26
adcs x6, x22, x14
adcs x24, x15, x14
umulh x23, x23, x7
adcs x4, x4, x14
adc x22, x19, x14
eor x14, x25, x12
eor x7, x23, x12
cmn x12, #0x1
adcs x14, x26, x14
ldp x19, x25, [x2]
ldp x15, x23, [x2, #16]
adcs x26, x6, x7
adcs x24, x24, x12
adcs x7, x4, x12
adc x4, x22, x12
subs x19, x19, x16
ldp x16, x22, [x1]
sbcs x6, x25, x5
ldp x12, x25, [x1, #16]
sbcs x15, x15, x20
sbcs x8, x23, x8
csetm x23, cc
subs x21, x21, x16
eor x16, x19, x23
sbcs x19, x10, x22
eor x22, x6, x23
eor x8, x8, x23
sbcs x6, x17, x12
sbcs x13, x13, x25
csetm x12, cc
subs x10, x10, x17
cneg x17, x10, cc
csetm x25, cc
subs x5, x20, x5
eor x10, x19, x12
cneg x19, x5, cc
eor x20, x15, x23
eor x21, x21, x12
cinv x15, x25, cc
mul x25, x17, x19
subs x16, x16, x23
sbcs x5, x22, x23
eor x6, x6, x12
sbcs x20, x20, x23
eor x22, x13, x12
sbc x8, x8, x23
subs x21, x21, x12
umulh x19, x17, x19
sbcs x10, x10, x12
sbcs x17, x6, x12
eor x6, x19, x15
eor x19, x25, x15
umulh x25, x17, x20
sbc x13, x22, x12
cmn x15, #0x1
adcs x22, x14, x19
adcs x19, x26, x6
ldp x6, x26, [sp]
adcs x14, x24, x15
umulh x24, x21, x16
adcs x7, x7, x15
adc x15, x4, x15
adds x4, x9, x6
eor x9, x23, x12
adcs x12, x3, x26
stp x4, x12, [sp]
ldp x4, x26, [sp, #16]
umulh x12, x10, x5
ldp x6, x23, [sp, #32]
adcs x3, x11, x4
mul x4, x13, x8
adcs x26, x22, x26
ldp x22, x11, [sp, #48]
adcs x6, x19, x6
stp x3, x26, [sp, #16]
mul x26, x10, x5
adcs x14, x14, x23
stp x6, x14, [sp, #32]
ldr x6, [sp, #64]
adcs x22, x7, x22
adcs x14, x15, x11
mul x11, x17, x20
adc x19, x6, xzr
stp x22, x14, [sp, #48]
adds x14, x26, x24
str x19, [sp, #64]
umulh x19, x13, x8
adcs x7, x11, x12
adcs x22, x4, x25
mul x6, x21, x16
adc x19, x19, xzr
subs x11, x17, x13
cneg x12, x11, cc
csetm x11, cc
subs x24, x8, x20
cinv x11, x11, cc
cneg x24, x24, cc
adds x4, x14, x6
adcs x14, x7, x14
mul x3, x12, x24
adcs x7, x22, x7
adcs x22, x19, x22
umulh x12, x12, x24
adc x24, xzr, x19
adds x19, x14, x6
eor x3, x3, x11
adcs x26, x7, x4
adcs x14, x22, x14
adcs x25, x24, x7
adcs x23, xzr, x22
eor x7, x12, x11
adc x12, xzr, x24
subs x22, x21, x10
cneg x24, x22, cc
csetm x22, cc
subs x15, x5, x16
cinv x22, x22, cc
cneg x15, x15, cc
cmn x11, #0x1
adcs x3, x25, x3
mul x25, x24, x15
adcs x23, x23, x7
adc x11, x12, x11
subs x7, x10, x13
umulh x15, x24, x15
cneg x12, x7, cc
csetm x7, cc
eor x24, x25, x22
eor x25, x15, x22
cmn x22, #0x1
adcs x24, x4, x24
adcs x19, x19, x25
adcs x15, x26, x22
adcs x4, x14, x22
adcs x26, x3, x22
adcs x25, x23, x22
adc x23, x11, x22
subs x14, x21, x17
cneg x3, x14, cc
csetm x11, cc
subs x14, x8, x5
cneg x14, x14, cc
cinv x7, x7, cc
subs x13, x21, x13
cneg x21, x13, cc
csetm x13, cc
mul x22, x12, x14
subs x8, x8, x16
cinv x13, x13, cc
umulh x14, x12, x14
cneg x12, x8, cc
subs x8, x20, x16
cneg x8, x8, cc
cinv x16, x11, cc
eor x22, x22, x7
cmn x7, #0x1
eor x14, x14, x7
adcs x4, x4, x22
mul x11, x3, x8
adcs x22, x26, x14
adcs x14, x25, x7
eor x25, x24, x9
adc x26, x23, x7
umulh x7, x3, x8
subs x17, x10, x17
cneg x24, x17, cc
eor x3, x11, x16
csetm x11, cc
subs x20, x20, x5
cneg x5, x20, cc
cinv x11, x11, cc
cmn x16, #0x1
mul x17, x21, x12
eor x8, x7, x16
adcs x10, x19, x3
and x19, x9, #0x1ff
adcs x20, x15, x8
umulh x15, x21, x12
eor x12, x10, x9
eor x8, x6, x9
adcs x6, x4, x16
adcs x4, x22, x16
adcs x21, x14, x16
adc x7, x26, x16
mul x10, x24, x5
cmn x13, #0x1
ldp x3, x14, [x1]
eor x17, x17, x13
umulh x5, x24, x5
adcs x20, x20, x17
eor x17, x15, x13
adcs x16, x6, x17
eor x22, x10, x11
adcs x23, x4, x13
extr x10, x14, x3, #52
and x26, x3, #0xfffffffffffff
adcs x24, x21, x13
and x15, x10, #0xfffffffffffff
adc x6, x7, x13
cmn x11, #0x1
adcs x17, x20, x22
eor x4, x5, x11
ldp x21, x10, [sp]
adcs x7, x16, x4
eor x16, x17, x9
eor x13, x7, x9
ldp x3, x17, [sp, #16]
adcs x7, x23, x11
eor x23, x7, x9
ldp x5, x22, [sp, #32]
adcs x7, x24, x11
adc x24, x6, x11
ldr x6, [x2, #64]
adds x20, x8, x21
lsl x11, x20, #9
eor x4, x7, x9
orr x7, x11, x19
eor x8, x24, x9
adcs x11, x25, x10
mul x26, x6, x26
ldp x19, x24, [sp, #48]
adcs x12, x12, x3
adcs x16, x16, x17
adcs x9, x13, x5
ldr x25, [sp, #64]
extr x20, x11, x20, #55
adcs x13, x23, x22
adcs x4, x4, x19
extr x23, x12, x11, #55
adcs x8, x8, x24
adc x11, x25, xzr
adds x21, x9, x21
extr x9, x16, x12, #55
lsr x12, x16, #55
adcs x10, x13, x10
mul x15, x6, x15
adcs x13, x4, x3
ldp x16, x4, [x2]
ldr x3, [x1, #64]
adcs x17, x8, x17
adcs x5, x5, x7
adcs x20, x22, x20
adcs x8, x19, x23
and x22, x16, #0xfffffffffffff
ldp x19, x7, [x1, #16]
adcs x9, x24, x9
extr x24, x4, x16, #52
adc x16, x12, x25
mul x22, x3, x22
and x25, x24, #0xfffffffffffff
extr x14, x19, x14, #40
and x12, x14, #0xfffffffffffff
extr x23, x7, x19, #28
ldp x19, x24, [x2, #16]
mul x14, x3, x25
and x23, x23, #0xfffffffffffff
add x22, x26, x22
lsl x11, x11, #48
lsr x26, x22, #52
lsl x25, x22, #12
mul x22, x6, x12
extr x12, x19, x4, #40
add x4, x15, x14
mul x15, x6, x23
add x4, x4, x26
extr x23, x24, x19, #28
ldp x14, x19, [x1, #32]
and x26, x12, #0xfffffffffffff
extr x12, x4, x25, #12
and x25, x23, #0xfffffffffffff
adds x21, x21, x12
mul x12, x3, x26
extr x23, x14, x7, #16
and x23, x23, #0xfffffffffffff
mul x7, x3, x25
ldp x25, x26, [x2, #32]
add x12, x22, x12
extr x22, x19, x14, #56
mul x23, x6, x23
lsr x14, x14, #4
extr x24, x25, x24, #16
add x7, x15, x7
and x15, x24, #0xfffffffffffff
and x22, x22, #0xfffffffffffff
lsr x24, x4, #52
mul x15, x3, x15
and x14, x14, #0xfffffffffffff
add x12, x12, x24
lsl x24, x4, #12
lsr x4, x12, #52
extr x24, x12, x24, #24
adcs x10, x10, x24
lsl x24, x12, #12
add x12, x7, x4
mul x22, x6, x22
add x4, x23, x15
extr x7, x12, x24, #36
adcs x13, x13, x7
lsl x15, x12, #12
add x7, x4, x11
lsr x24, x12, #52
ldp x23, x11, [x2, #48]
add x4, x7, x24
mul x12, x6, x14
extr x7, x26, x25, #56
extr x14, x4, x15, #48
and x2, x7, #0xfffffffffffff
extr x24, x11, x23, #32
ldp x15, x7, [x1, #48]
and x1, x24, #0xfffffffffffff
lsr x24, x4, #52
mul x2, x3, x2
extr x26, x23, x26, #44
lsr x23, x25, #4
and x23, x23, #0xfffffffffffff
and x25, x26, #0xfffffffffffff
extr x26, x7, x15, #32
extr x19, x15, x19, #44
mul x23, x3, x23
and x15, x26, #0xfffffffffffff
lsl x26, x4, #12
and x4, x19, #0xfffffffffffff
lsr x11, x11, #20
mul x19, x6, x4
adcs x17, x17, x14
add x14, x22, x2
add x22, x12, x23
lsr x7, x7, #20
add x22, x22, x24
extr x2, x22, x26, #60
mul x24, x3, x25
lsr x22, x22, #52
add x14, x14, x22
lsl x22, x2, #8
extr x22, x14, x22, #8
lsl x2, x14, #12
mul x1, x3, x1
adcs x12, x5, x22
mul x5, x6, x15
and x26, x10, x13
and x4, x26, x17
add x23, x19, x24
lsr x14, x14, #52
mul x22, x3, x11
add x11, x23, x14
extr x25, x11, x2, #20
lsl x19, x11, #12
adcs x25, x20, x25
and x14, x4, x12
add x1, x5, x1
and x14, x14, x25
mul x15, x6, x7
add x26, x15, x22
mul x6, x6, x3
lsr x22, x11, #52
add x4, x1, x22
lsr x1, x4, #52
extr x3, x4, x19, #32
lsl x15, x4, #12
add x7, x26, x1
adcs x23, x8, x3
extr x20, x7, x15, #44
and x3, x14, x23
lsr x19, x7, #44
adcs x7, x9, x20
add x11, x6, x19
adc x4, x16, x11
lsr x14, x4, #9
cmp xzr, xzr
and x15, x3, x7
orr x3, x4, #0xfffffffffffffe00
adcs xzr, x21, x14
adcs xzr, x15, xzr
adcs xzr, x3, xzr
adcs x11, x21, x14
and x14, x11, #0x1ff
adcs x1, x10, xzr
extr x10, x1, x11, #9
str x14, [x0, #64]
adcs x14, x13, xzr
extr x11, x14, x1, #9
adcs x1, x17, xzr
extr x4, x1, x14, #9
stp x10, x11, [x0]
adcs x11, x12, xzr
extr x14, x11, x1, #9
adcs x10, x25, xzr
extr x11, x10, x11, #9
stp x4, x14, [x0, #16]
adcs x14, x23, xzr
extr x10, x14, x10, #9
adcs x1, x7, xzr
stp x11, x10, [x0, #32]
extr x14, x1, x14, #9
adc x10, x3, xzr
extr x26, x10, x1, #9
stp x14, x26, [x0, #48]
add sp, sp, #80
ldp x25, x26, [sp], #16
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
p521_jscalarmul_sqr_p521:
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
ldr q23, [x1, #32]
ldp x9, x2, [x1, #32]
ldr q16, [x1, #32]
ldr q20, [x1, #48]
ldp x6, x13, [x1, #48]
rev64 v2.4S, v23.4S
mul x14, x9, x2
ldr q31, [x1, #48]
subs x22, x9, x2
uzp2 v26.4S, v23.4S, v23.4S
mul v30.4S, v2.4S, v16.4S
xtn v0.2S, v20.2D
csetm x12, cc
xtn v21.2S, v16.2D
xtn v23.2S, v23.2D
umulh x10, x9, x6
rev64 v27.4S, v31.4S
umull v2.2D, v21.2S, v26.2S
cneg x23, x22, cc
uaddlp v25.2D, v30.4S
umull v18.2D, v21.2S, v23.2S
mul x22, x9, x6
mul v6.4S, v27.4S, v20.4S
uzp2 v17.4S, v20.4S, v20.4S
shl v20.2D, v25.2D, #32
uzp2 v27.4S, v31.4S, v31.4S
mul x16, x2, x13
umlal v20.2D, v21.2S, v23.2S
usra v2.2D, v18.2D, #32
adds x8, x22, x10
umull v25.2D, v17.2S, v27.2S
xtn v31.2S, v31.2D
movi v1.2D, #0xffffffff
adc x3, x10, xzr
umulh x21, x2, x13
uzp2 v21.4S, v16.4S, v16.4S
umull v18.2D, v0.2S, v27.2S
subs x19, x13, x6
and v7.16B, v2.16B, v1.16B
umull v27.2D, v0.2S, v31.2S
cneg x20, x19, cc
movi v30.2D, #0xffffffff
umull v16.2D, v21.2S, v26.2S
umlal v7.2D, v21.2S, v23.2S
mul x19, x23, x20
cinv x7, x12, cc
uaddlp v6.2D, v6.4S
eor x12, x19, x7
adds x11, x8, x16
umulh x10, x23, x20
ldr q1, [x1]
usra v16.2D, v2.2D, #32
adcs x19, x3, x21
shl v2.2D, v6.2D, #32
adc x20, x21, xzr
adds x17, x19, x16
usra v18.2D, v27.2D, #32
adc x19, x20, xzr
cmn x7, #0x1
umlal v2.2D, v0.2S, v31.2S
umulh x16, x9, x2
adcs x8, x11, x12
usra v16.2D, v7.2D, #32
ldr x12, [x1, #64]
eor x20, x10, x7
umulh x10, x6, x13
mov x23, v2.d[0]
mov x3, v2.d[1]
adcs x21, x17, x20
usra v25.2D, v18.2D, #32
and v23.16B, v18.16B, v30.16B
adc x7, x19, x7
adds x22, x22, x22
ldr q7, [x1, #16]
adcs x17, x8, x8
umlal v23.2D, v17.2S, v31.2S
mov x19, v16.d[0]
mul x11, x12, x12
ldr q4, [x1]
usra v25.2D, v23.2D, #32
add x5, x12, x12
adcs x15, x21, x21
ldr q28, [x1]
mov x12, v20.d[1]
adcs x24, x7, x7
mov x21, v16.d[1]
adc x4, xzr, xzr
adds x19, x19, x14
ldr q18, [x1, #16]
xtn v26.2S, v1.2D
adcs x8, x12, x16
adc x21, x21, xzr
adds x7, x19, x14
xtn v23.2S, v7.2D
rev64 v21.4S, v28.4S
adcs x12, x8, x16
ldp x20, x19, [x1]
mov x16, v25.d[1]
xtn v22.2S, v28.2D
adc x14, x21, xzr
adds x8, x22, x12
uzp2 v24.4S, v28.4S, v28.4S
rev64 v28.4S, v18.4S
mul x12, x6, x13
mul v16.4S, v21.4S, v1.4S
shrn v31.2S, v7.2D, #32
adcs x22, x17, x14
mov x14, v25.d[0]
and x21, x20, #0xfffffffffffff
umull v17.2D, v26.2S, v24.2S
ldr q2, [x1, #32]
adcs x17, x15, xzr
ldr q30, [x1, #48]
umull v7.2D, v26.2S, v22.2S
adcs x15, x24, xzr
ldr q0, [x1, #16]
movi v6.2D, #0xffffffff
adc x4, x4, xzr
adds x14, x14, x12
uzp1 v27.4S, v18.4S, v4.4S
uzp2 v19.4S, v1.4S, v1.4S
adcs x24, x3, x10
mul x3, x5, x21
umull v29.2D, v23.2S, v31.2S
ldr q5, [x1]
adc x21, x16, xzr
adds x16, x14, x12
extr x12, x19, x20, #52
umull v18.2D, v19.2S, v24.2S
adcs x24, x24, x10
and x10, x12, #0xfffffffffffff
ldp x14, x12, [x1, #16]
usra v17.2D, v7.2D, #32
adc x21, x21, xzr
adds x23, x23, x17
mul x17, x5, x10
shl v21.2D, v29.2D, #33
lsl x10, x3, #12
lsr x1, x3, #52
rev64 v29.4S, v2.4S
uaddlp v25.2D, v16.4S
add x17, x17, x1
adcs x16, x16, x15
extr x3, x14, x19, #40
mov x15, v20.d[0]
extr x10, x17, x10, #12
and x3, x3, #0xfffffffffffff
shl v3.2D, v25.2D, #32
and v6.16B, v17.16B, v6.16B
mul x1, x5, x3
usra v18.2D, v17.2D, #32
adcs x3, x24, x4
extr x4, x12, x14, #28
umlal v6.2D, v19.2S, v22.2S
xtn v20.2S, v2.2D
umlal v3.2D, v26.2S, v22.2S
movi v26.2D, #0xffffffff
lsr x24, x17, #52
and x4, x4, #0xfffffffffffff
uzp2 v19.4S, v2.4S, v2.4S
add x1, x1, x24
mul x24, x5, x4
lsl x4, x17, #12
xtn v24.2S, v5.2D
extr x17, x1, x4, #24
adc x21, x21, xzr
umlal v21.2D, v23.2S, v23.2S
adds x4, x15, x10
lsl x10, x1, #12
adcs x15, x7, x17
mul v23.4S, v28.4S, v4.4S
and x7, x4, #0x1ff
lsr x17, x1, #52
umulh x1, x19, x12
uzp2 v17.4S, v5.4S, v5.4S
extr x4, x15, x4, #9
add x24, x24, x17
mul v29.4S, v29.4S, v5.4S
extr x17, x24, x10, #36
extr x10, x9, x12, #16
uzp1 v28.4S, v4.4S, v4.4S
adcs x17, x8, x17
and x8, x10, #0xfffffffffffff
umull v16.2D, v24.2S, v20.2S
extr x10, x17, x15, #9
mul x15, x5, x8
stp x4, x10, [x0]
lsl x4, x24, #12
lsr x8, x9, #4
uaddlp v4.2D, v23.4S
and x8, x8, #0xfffffffffffff
umull v23.2D, v24.2S, v19.2S
mul x8, x5, x8
extr x10, x2, x9, #56
lsr x24, x24, #52
and x10, x10, #0xfffffffffffff
add x15, x15, x24
extr x4, x15, x4, #48
mul x24, x5, x10
lsr x10, x15, #52
usra v23.2D, v16.2D, #32
add x10, x8, x10
shl v4.2D, v4.2D, #32
adcs x22, x22, x4
extr x4, x6, x2, #44
lsl x15, x15, #12
lsr x8, x10, #52
extr x15, x10, x15, #60
and x10, x4, #0xfffffffffffff
umlal v4.2D, v28.2S, v27.2S
add x8, x24, x8
extr x4, x13, x6, #32
mul x24, x5, x10
uzp2 v16.4S, v30.4S, v30.4S
lsl x10, x15, #8
rev64 v28.4S, v30.4S
and x15, x4, #0xfffffffffffff
extr x4, x8, x10, #8
mul x10, x5, x15
lsl x15, x8, #12
adcs x23, x23, x4
lsr x4, x8, #52
lsr x8, x13, #20
add x4, x24, x4
mul x8, x5, x8
lsr x24, x4, #52
extr x15, x4, x15, #20
lsl x4, x4, #12
add x10, x10, x24
adcs x15, x16, x15
extr x4, x10, x4, #32
umulh x5, x20, x14
adcs x3, x3, x4
usra v18.2D, v6.2D, #32
lsl x16, x10, #12
extr x24, x15, x23, #9
lsr x10, x10, #52
uzp2 v27.4S, v0.4S, v0.4S
add x8, x8, x10
extr x10, x3, x15, #9
extr x4, x22, x17, #9
and v25.16B, v23.16B, v26.16B
lsr x17, x8, #44
extr x15, x8, x16, #44
extr x16, x23, x22, #9
xtn v7.2S, v30.2D
mov x8, v4.d[0]
stp x24, x10, [x0, #32]
uaddlp v30.2D, v29.4S
stp x4, x16, [x0, #16]
umulh x24, x20, x19
adcs x15, x21, x15
adc x16, x11, x17
subs x11, x20, x19
xtn v5.2S, v0.2D
csetm x17, cc
extr x3, x15, x3, #9
mov x22, v4.d[1]
cneg x21, x11, cc
subs x10, x12, x14
mul v31.4S, v28.4S, v0.4S
cneg x10, x10, cc
cinv x11, x17, cc
shl v4.2D, v30.2D, #32
umull v28.2D, v5.2S, v16.2S
extr x23, x16, x15, #9
adds x4, x8, x5
mul x17, x21, x10
umull v22.2D, v5.2S, v7.2S
adc x15, x5, xzr
adds x4, x4, x22
uaddlp v2.2D, v31.4S
lsr x5, x16, #9
adcs x16, x15, x1
mov x15, v18.d[0]
adc x1, x1, xzr
umulh x10, x21, x10
adds x22, x16, x22
umlal v4.2D, v24.2S, v20.2S
umull v30.2D, v27.2S, v16.2S
stp x3, x23, [x0, #48]
add x3, x7, x5
adc x16, x1, xzr
usra v28.2D, v22.2D, #32
mul x23, x20, x19
eor x1, x17, x11
cmn x11, #0x1
mov x17, v18.d[1]
umull v18.2D, v17.2S, v19.2S
adcs x7, x4, x1
eor x1, x10, x11
umlal v25.2D, v17.2S, v20.2S
movi v16.2D, #0xffffffff
adcs x22, x22, x1
usra v18.2D, v23.2D, #32
umulh x4, x14, x14
adc x1, x16, x11
adds x10, x8, x8
shl v23.2D, v2.2D, #32
str x3, [x0, #64]
adcs x5, x7, x7
and v16.16B, v28.16B, v16.16B
usra v30.2D, v28.2D, #32
adcs x7, x22, x22
mov x21, v3.d[1]
adcs x11, x1, x1
umlal v16.2D, v27.2S, v7.2S
adc x22, xzr, xzr
adds x16, x15, x23
mul x8, x14, x12
umlal v23.2D, v5.2S, v7.2S
usra v18.2D, v25.2D, #32
umulh x15, x14, x12
adcs x21, x21, x24
usra v30.2D, v16.2D, #32
adc x1, x17, xzr
adds x3, x16, x23
adcs x21, x21, x24
adc x1, x1, xzr
adds x24, x10, x21
umulh x21, x12, x12
adcs x16, x5, x1
adcs x10, x7, xzr
mov x17, v21.d[1]
adcs x23, x11, xzr
adc x5, x22, xzr
adds x1, x4, x8
adcs x22, x17, x15
ldp x17, x4, [x0]
mov x11, v21.d[0]
adc x21, x21, xzr
adds x1, x1, x8
adcs x15, x22, x15
adc x8, x21, xzr
adds x22, x11, x10
mov x21, v3.d[0]
adcs x11, x1, x23
ldp x1, x10, [x0, #16]
adcs x15, x15, x5
adc x7, x8, xzr
adds x8, x17, x21
mov x23, v4.d[1]
ldp x5, x21, [x0, #32]
adcs x17, x4, x3
ldr x4, [x0, #64]
mov x3, v18.d[0]
adcs x24, x1, x24
stp x8, x17, [x0]
adcs x17, x10, x16
ldp x1, x16, [x0, #48]
adcs x5, x5, x22
adcs x8, x21, x11
stp x5, x8, [x0, #32]
adcs x1, x1, x15
mov x15, v23.d[1]
adcs x21, x16, x7
stp x1, x21, [x0, #48]
adc x10, x4, xzr
subs x7, x14, x12
mov x16, v18.d[1]
cneg x5, x7, cc
csetm x4, cc
subs x11, x13, x6
mov x8, v23.d[0]
cneg x7, x11, cc
cinv x21, x4, cc
mov x11, v30.d[0]
adds x4, x23, x3
mul x22, x5, x7
mov x23, v30.d[1]
adcs x8, x8, x16
adcs x16, x15, x11
adc x11, x23, xzr
umulh x3, x5, x7
stp x24, x17, [x0, #16]
mov x5, v4.d[0]
subs x15, x20, x19
cneg x7, x15, cc
str x10, [x0, #64]
csetm x1, cc
subs x24, x2, x9
cneg x17, x24, cc
cinv x15, x1, cc
adds x23, x4, x5
umulh x1, x7, x17
adcs x24, x8, x4
adcs x10, x16, x8
eor x8, x22, x21
adcs x16, x11, x16
mul x22, x7, x17
eor x17, x1, x15
adc x1, xzr, x11
adds x11, x24, x5
eor x7, x3, x21
adcs x3, x10, x23
adcs x24, x16, x24
adcs x4, x1, x10
eor x10, x22, x15
adcs x16, xzr, x16
adc x1, xzr, x1
cmn x21, #0x1
adcs x8, x4, x8
adcs x22, x16, x7
adc x7, x1, x21
subs x21, x19, x12
csetm x4, cc
cneg x1, x21, cc
subs x21, x13, x2
cinv x16, x4, cc
cneg x4, x21, cc
cmn x15, #0x1
adcs x21, x23, x10
mul x23, x1, x4
adcs x11, x11, x17
adcs x3, x3, x15
umulh x1, x1, x4
adcs x24, x24, x15
adcs x8, x8, x15
adcs x22, x22, x15
eor x17, x23, x16
adc x15, x7, x15
subs x7, x20, x14
cneg x7, x7, cc
csetm x4, cc
subs x10, x20, x12
cneg x23, x10, cc
csetm x10, cc
subs x12, x6, x9
cinv x20, x4, cc
cneg x12, x12, cc
cmn x16, #0x1
eor x1, x1, x16
adcs x17, x24, x17
mul x4, x7, x12
adcs x8, x8, x1
umulh x1, x7, x12
adcs x24, x22, x16
adc x7, x15, x16
subs x12, x13, x9
cneg x12, x12, cc
cinv x13, x10, cc
subs x19, x19, x14
mul x9, x23, x12
cneg x19, x19, cc
csetm x10, cc
eor x16, x1, x20
subs x22, x6, x2
umulh x12, x23, x12
eor x1, x4, x20
cinv x4, x10, cc
cneg x22, x22, cc
cmn x20, #0x1
adcs x15, x11, x1
eor x6, x12, x13
adcs x10, x3, x16
adcs x17, x17, x20
eor x23, x9, x13
adcs x2, x8, x20
mul x11, x19, x22
adcs x24, x24, x20
adc x7, x7, x20
cmn x13, #0x1
adcs x3, x10, x23
umulh x22, x19, x22
adcs x17, x17, x6
eor x12, x22, x4
extr x22, x15, x21, #63
adcs x8, x2, x13
extr x21, x21, x5, #63
ldp x16, x23, [x0]
adcs x20, x24, x13
eor x1, x11, x4
adc x6, x7, x13
cmn x4, #0x1
ldp x2, x7, [x0, #16]
adcs x1, x3, x1
extr x19, x1, x15, #63
adcs x14, x17, x12
extr x1, x14, x1, #63
lsl x17, x5, #1
adcs x8, x8, x4
extr x12, x8, x14, #8
ldp x15, x11, [x0, #32]
adcs x9, x20, x4
adc x3, x6, x4
adds x16, x12, x16
extr x6, x9, x8, #8
ldp x14, x12, [x0, #48]
extr x8, x3, x9, #8
adcs x20, x6, x23
ldr x24, [x0, #64]
lsr x6, x3, #8
adcs x8, x8, x2
and x2, x1, #0x1ff
and x1, x20, x8
adcs x4, x6, x7
adcs x3, x17, x15
and x1, x1, x4
adcs x9, x21, x11
and x1, x1, x3
adcs x6, x22, x14
and x1, x1, x9
and x21, x1, x6
adcs x14, x19, x12
adc x1, x24, x2
cmp xzr, xzr
orr x12, x1, #0xfffffffffffffe00
lsr x1, x1, #9
adcs xzr, x16, x1
and x21, x21, x14
adcs xzr, x21, xzr
adcs xzr, x12, xzr
adcs x21, x16, x1
adcs x1, x20, xzr
adcs x19, x8, xzr
stp x21, x1, [x0]
adcs x1, x4, xzr
adcs x21, x3, xzr
stp x19, x1, [x0, #16]
adcs x1, x9, xzr
stp x21, x1, [x0, #32]
adcs x21, x6, xzr
adcs x1, x14, xzr
stp x21, x1, [x0, #48]
adc x1, x12, xzr
and x1, x1, #0x1ff
str x1, [x0, #64]
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
p521_jscalarmul_sub_p521:
ldp x5, x6, [x1]
ldp x4, x3, [x2]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x1, #16]
ldp x4, x3, [x2, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x1, #32]
ldp x4, x3, [x2, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
ldp x11, x12, [x1, #48]
ldp x4, x3, [x2, #48]
sbcs x11, x11, x4
sbcs x12, x12, x3
ldr x13, [x1, #64]
ldr x4, [x2, #64]
sbcs x13, x13, x4
sbcs x5, x5, xzr
sbcs x6, x6, xzr
sbcs x7, x7, xzr
sbcs x8, x8, xzr
sbcs x9, x9, xzr
sbcs x10, x10, xzr
sbcs x11, x11, xzr
sbcs x12, x12, xzr
sbcs x13, x13, xzr
and x13, x13, #0x1ff
stp x5, x6, [x0]
stp x7, x8, [x0, #16]
stp x9, x10, [x0, #32]
stp x11, x12, [x0, #48]
str x13, [x0, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 1,932
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_demont_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Convert from Montgomery form z := (x / 2^576) mod p_521, assuming x reduced
// Input x[9]; output z[9]
//
// extern void bignum_demont_p521
// (uint64_t z[static 9], uint64_t x[static 9]);
//
// This assumes the input is < p_521 for correctness. If this is not the case,
// use the variant "bignum_deamont_p521" instead.
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_demont_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_demont_p521)
.text
.balign 4
// Input parameters
#define z x0
#define x x1
// Rotating registers for the intermediate windows
#define d0 x2
#define d1 x3
#define d2 x4
#define d3 x5
#define d4 x2
#define d5 x3
#define d6 x4
#define d7 x5
#define d8 x2
#define c x6
S2N_BN_SYMBOL(bignum_demont_p521):
// Rotate, as a 521-bit quantity, by 9*64 - 521 = 55 bits right.
ldp d0, d1, [x]
lsl c, d0, #9
extr d0, d1, d0, #55
ldp d2, d3, [x, #16]
extr d1, d2, d1, #55
stp d0, d1, [z]
extr d2, d3, d2, #55
ldp d4, d5, [x, #32]
extr d3, d4, d3, #55
stp d2, d3, [z, #16]
extr d4, d5, d4, #55
ldp d6, d7, [x, #48]
extr d5, d6, d5, #55
stp d4, d5, [z, #32]
extr d6, d7, d6, #55
ldr d8, [x, #64]
orr d8, d8, c
extr d7, d8, d7, #55
stp d6, d7, [z, #48]
lsr d8, d8, #55
str d8, [z, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 33,514
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_montsqr_p521_neon.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Montgomery square, z := (x^2 / 2^576) mod p_521
// Input x[9]; output z[9]
//
// extern void bignum_montsqr_p521_neon
// (uint64_t z[static 9], uint64_t x[static 9]);
//
// Does z := (x^2 / 2^576) mod p_521, assuming x < p_521. This means the
// Montgomery base is the "native size" 2^{9*64} = 2^576; since p_521 is
// a Mersenne prime the basic modular squaring bignum_sqr_p521 can be
// considered a Montgomery operation to base 2^521.
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
// bignum_montsqr_p521_neon is functionally equivalent to bignum_montsqr_p521.
// It is written in a way that
// 1. A subset of scalar multiplications in bignum_montmul_p384 are carefully
// chosen and vectorized
// 2. The vectorized assembly is rescheduled using the SLOTHY superoptimizer.
// https://github.com/slothy-optimizer/slothy
//
// The output program of step 1. is as follows:
//
// stp x19, x20, [sp, #-16]!
// stp x21, x22, [sp, #-16]!
// stp x23, x24, [sp, #-16]!
// ldp x16, x8, [x1]
// ldr q18, [x1]
// ldr q5, [x1]
// ldr q20, [x1]
// ldp x17, x13, [x1, #16]
// ldr q17, [x1, #16]
// ldr q1, [x1, #16]
// ldr q28, [x1, #16]
// ldp x9, x15, [x1, #32]
// ldr q27, [x1]
// ldr q29, [x1, #32]
// ldp x23, x2, [x1, #48]
// ldr q6, [x1, #48]
// ldr q4, [x1, #48]
// mul x24, x9, x23
// mul x11, x15, x2
// umulh x20, x9, x23
// subs x4, x9, x15
// cneg x22, x4, cc
// csetm x12, cc
// subs x4, x2, x23
// cneg x4, x4, cc
// mul x19, x22, x4
// umulh x4, x22, x4
// cinv x7, x12, cc
// eor x14, x19, x7
// eor x22, x4, x7
// adds x12, x24, x20
// adc x19, x20, xzr
// umulh x4, x15, x2
// adds x12, x12, x11
// adcs x19, x19, x4
// adc x4, x4, xzr
// adds x19, x19, x11
// adc x4, x4, xzr
// cmn x7, #0x1
// adcs x12, x12, x14
// adcs x19, x19, x22
// adc x4, x4, x7
// adds x11, x24, x24
// adcs x20, x12, x12
// adcs x10, x19, x19
// adcs x3, x4, x4
// adc x5, xzr, xzr
// ldr q30, [x1, #32]
// umull v0.2D, v30.2S, v30.2S
// umull2 v2.2D, v30.4S, v30.4S
// xtn v24.2S, v30.2D
// uzp2 v30.4S, v30.4S, v30.4S
// umull v30.2D, v30.2S, v24.2S
// mov x7, v0.d[0]
// mov x14, v0.d[1]
// mov x19, v2.d[0]
// mov x22, v2.d[1]
// mov x4, v30.d[0]
// mov x12, v30.d[1]
// adds x21, x7, x4, lsl #33
// lsr x4, x4, #31
// adc x14, x14, x4
// adds x19, x19, x12, lsl #33
// lsr x4, x12, #31
// adc x22, x22, x4
// mul x4, x9, x15
// umulh x12, x9, x15
// adds x24, x14, x4, lsl #1
// extr x4, x12, x4, #63
// adcs x19, x19, x4
// lsr x4, x12, #63
// adc x4, x22, x4
// adds x11, x11, x19
// adcs x20, x20, x4
// adcs x10, x10, xzr
// adcs x3, x3, xzr
// adc x6, x5, xzr
// movi v3.2D, #0x00000000ffffffff
// uzp2 v16.4S, v4.4S, v4.4S
// xtn v25.2S, v6.2D
// xtn v23.2S, v4.2D
// rev64 v30.4S, v4.4S
// umull v24.2D, v25.2S, v23.2S
// umull v0.2D, v25.2S, v16.2S
// uzp2 v2.4S, v6.4S, v6.4S
// mul v30.4S, v30.4S, v6.4S
// usra v0.2D, v24.2D, #32
// umull v19.2D, v2.2S, v16.2S
// uaddlp v30.2D, v30.4S
// and v24.16B, v0.16B, v3.16B
// umlal v24.2D, v2.2S, v23.2S
// shl v30.2D, v30.2D, #32
// usra v19.2D, v0.2D, #32
// umlal v30.2D, v25.2S, v23.2S
// usra v19.2D, v24.2D, #32
// mov x5, v30.d[0]
// mov x7, v30.d[1]
// mul x14, x23, x2
// mov x19, v19.d[0]
// mov x4, v19.d[1]
// umulh x22, x23, x2
// adds x12, x19, x14
// adcs x19, x7, x22
// adc x4, x4, xzr
// adds x12, x12, x14
// adcs x19, x19, x22
// adc x4, x4, xzr
// adds x7, x5, x10
// adcs x3, x12, x3
// adcs x14, x19, x6
// adc x10, x4, xzr
// ldr x4, [x1, #64]
// add x6, x4, x4
// mul x5, x4, x4
// and x4, x16, #0xfffffffffffff
// mul x22, x6, x4
// extr x4, x8, x16, #52
// and x4, x4, #0xfffffffffffff
// mul x19, x6, x4
// lsr x4, x22, #52
// add x12, x19, x4
// lsl x4, x22, #12
// extr x4, x12, x4, #12
// adds x21, x21, x4
// extr x4, x17, x8, #40
// and x4, x4, #0xfffffffffffff
// mul x19, x6, x4
// lsr x4, x12, #52
// add x22, x19, x4
// lsl x4, x12, #12
// extr x4, x22, x4, #24
// adcs x24, x24, x4
// extr x4, x13, x17, #28
// and x4, x4, #0xfffffffffffff
// mul x19, x6, x4
// lsr x4, x22, #52
// add x12, x19, x4
// lsl x4, x22, #12
// extr x4, x12, x4, #36
// adcs x11, x11, x4
// extr x4, x9, x13, #16
// and x4, x4, #0xfffffffffffff
// mul x19, x6, x4
// lsr x4, x12, #52
// add x22, x19, x4
// lsl x4, x12, #12
// extr x4, x22, x4, #48
// adcs x20, x20, x4
// lsr x4, x9, #4
// and x4, x4, #0xfffffffffffff
// mul x19, x6, x4
// lsr x4, x22, #52
// add x12, x19, x4
// lsl x4, x22, #12
// extr x22, x12, x4, #60
// extr x4, x15, x9, #56
// and x4, x4, #0xfffffffffffff
// mul x19, x6, x4
// lsr x4, x12, #52
// add x12, x19, x4
// lsl x4, x22, #8
// extr x4, x12, x4, #8
// adcs x7, x7, x4
// extr x4, x23, x15, #44
// and x4, x4, #0xfffffffffffff
// mul x19, x6, x4
// lsr x4, x12, #52
// add x22, x19, x4
// lsl x4, x12, #12
// extr x4, x22, x4, #20
// adcs x1, x3, x4
// extr x4, x2, x23, #32
// and x4, x4, #0xfffffffffffff
// mul x19, x6, x4
// lsr x4, x22, #52
// add x12, x19, x4
// lsl x4, x22, #12
// extr x4, x12, x4, #32
// adcs x14, x14, x4
// lsr x4, x2, #20
// mul x19, x6, x4
// lsr x4, x12, #52
// add x19, x19, x4
// lsl x4, x12, #12
// extr x4, x19, x4, #44
// adcs x22, x10, x4
// lsr x4, x19, #44
// adc x12, x5, x4
// extr x19, x24, x21, #9
// extr x4, x11, x24, #9
// stp x19, x4, [x0] // @slothy:writes=buffer0
// extr x19, x20, x11, #9
// extr x4, x7, x20, #9
// stp x19, x4, [x0, #16] // @slothy:writes=buffer16
// extr x19, x1, x7, #9
// extr x4, x14, x1, #9
// stp x19, x4, [x0, #32] // @slothy:writes=buffer32
// extr x19, x22, x14, #9
// extr x4, x12, x22, #9
// stp x19, x4, [x0, #48] // @slothy:writes=buffer48
// and x19, x21, #0x1ff
// lsr x4, x12, #9
// add x4, x19, x4
// str x4, [x0, #64]
// uzp1 v2.4S, v28.4S, v18.4S
// rev64 v30.4S, v28.4S
// uzp1 v24.4S, v18.4S, v18.4S
// mul v30.4S, v30.4S, v18.4S
// uaddlp v30.2D, v30.4S
// shl v30.2D, v30.2D, #32
// umlal v30.2D, v24.2S, v2.2S
// mov x11, v30.d[0]
// mov x20, v30.d[1]
// umulh x7, x16, x17
// subs x4, x16, x8
// cneg x22, x4, cc
// csetm x12, cc
// subs x4, x13, x17
// cneg x4, x4, cc
// mul x19, x22, x4
// umulh x4, x22, x4
// cinv x1, x12, cc
// eor x14, x19, x1
// eor x22, x4, x1
// adds x12, x11, x7
// adc x19, x7, xzr
// umulh x4, x8, x13
// adds x12, x12, x20
// adcs x19, x19, x4
// adc x4, x4, xzr
// adds x19, x19, x20
// adc x4, x4, xzr
// cmn x1, #0x1
// adcs x12, x12, x14
// adcs x19, x19, x22
// adc x4, x4, x1
// adds x21, x11, x11
// adcs x24, x12, x12
// adcs x11, x19, x19
// adcs x20, x4, x4
// adc x7, xzr, xzr
// movi v3.2D, #0x00000000ffffffff
// uzp2 v16.4S, v20.4S, v20.4S
// xtn v25.2S, v5.2D
// xtn v23.2S, v20.2D
// rev64 v30.4S, v20.4S
// umull v24.2D, v25.2S, v23.2S
// umull v0.2D, v25.2S, v16.2S
// uzp2 v2.4S, v5.4S, v5.4S
// mul v30.4S, v30.4S, v5.4S
// usra v0.2D, v24.2D, #32
// umull v19.2D, v2.2S, v16.2S
// uaddlp v30.2D, v30.4S
// and v24.16B, v0.16B, v3.16B
// umlal v24.2D, v2.2S, v23.2S
// shl v30.2D, v30.2D, #32
// usra v19.2D, v0.2D, #32
// umlal v30.2D, v25.2S, v23.2S
// usra v19.2D, v24.2D, #32
// mov x10, v30.d[0]
// mov x1, v30.d[1]
// mul x14, x16, x8
// mov x19, v19.d[0]
// mov x4, v19.d[1]
// umulh x22, x16, x8
// adds x12, x19, x14
// adcs x19, x1, x22
// adc x4, x4, xzr
// adds x3, x12, x14
// adcs x19, x19, x22
// adc x4, x4, xzr
// adds x5, x21, x19
// adcs x21, x24, x4
// adcs x24, x11, xzr
// adcs x11, x20, xzr
// adc x20, x7, xzr
// movi v3.2D, #0x00000000ffffffff
// uzp2 v16.4S, v1.4S, v1.4S
// xtn v25.2S, v17.2D
// xtn v23.2S, v1.2D
// rev64 v30.4S, v1.4S
// umull v24.2D, v25.2S, v23.2S
// umull v0.2D, v25.2S, v16.2S
// uzp2 v2.4S, v17.4S, v17.4S
// mul v30.4S, v30.4S, v17.4S
// usra v0.2D, v24.2D, #32
// umull v19.2D, v2.2S, v16.2S
// uaddlp v30.2D, v30.4S
// and v24.16B, v0.16B, v3.16B
// umlal v24.2D, v2.2S, v23.2S
// shl v30.2D, v30.2D, #32
// usra v19.2D, v0.2D, #32
// umlal v30.2D, v25.2S, v23.2S
// usra v19.2D, v24.2D, #32
// mov x7, v30.d[0]
// mov x1, v30.d[1]
// mul x14, x17, x13
// mov x19, v19.d[0]
// mov x4, v19.d[1]
// umulh x22, x17, x13
// adds x12, x19, x14
// adcs x19, x1, x22
// adc x4, x4, xzr
// adds x12, x12, x14
// adcs x19, x19, x22
// adc x4, x4, xzr
// adds x1, x7, x24
// adcs x14, x12, x11
// adcs x22, x19, x20
// adc x12, x4, xzr
// ldp x19, x4, [x0] // @slothy:reads=buffer0
// adds x19, x19, x10
// adcs x4, x4, x3
// stp x19, x4, [x0] // @slothy:writes=buffer0
// ldp x19, x4, [x0, #16] // @slothy:reads=buffer16
// adcs x19, x19, x5
// adcs x4, x4, x21
// stp x19, x4, [x0, #16] // @slothy:writes=buffer16
// ldp x19, x4, [x0, #32] // @slothy:reads=buffer32
// adcs x19, x19, x1
// adcs x4, x4, x14
// stp x19, x4, [x0, #32] // @slothy:writes=buffer32
// ldp x19, x4, [x0, #48] // @slothy:reads=buffer48
// adcs x19, x19, x22
// adcs x4, x4, x12
// stp x19, x4, [x0, #48] // @slothy:writes=buffer48
// ldr x4, [x0, #64]
// adc x4, x4, xzr
// str x4, [x0, #64]
// movi v3.2D, #0x00000000ffffffff
// uzp2 v2.4S, v29.4S, v29.4S
// xtn v16.2S, v27.2D
// xtn v25.2S, v29.2D
// rev64 v30.4S, v29.4S
// umull v24.2D, v16.2S, v25.2S
// umull v23.2D, v16.2S, v2.2S
// uzp2 v0.4S, v27.4S, v27.4S
// mul v30.4S, v30.4S, v27.4S
// usra v23.2D, v24.2D, #32
// umull v2.2D, v0.2S, v2.2S
// uaddlp v30.2D, v30.4S
// and v24.16B, v23.16B, v3.16B
// umlal v24.2D, v0.2S, v25.2S
// shl v30.2D, v30.2D, #32
// usra v2.2D, v23.2D, #32
// umlal v30.2D, v16.2S, v25.2S
// usra v2.2D, v24.2D, #32
// mov x6, v30.d[0]
// mov x22, v30.d[1]
// mul x12, x17, x23
// mul x19, x13, x2
// mov x4, v2.d[0]
// adds x22, x22, x4
// mov x4, v2.d[1]
// adcs x12, x12, x4
// umulh x4, x17, x23
// adcs x19, x19, x4
// umulh x4, x13, x2
// adc x4, x4, xzr
// adds x21, x22, x6
// adcs x22, x12, x22
// adcs x12, x19, x12
// adcs x19, x4, x19
// adc x4, xzr, x4
// adds x24, x22, x6
// adcs x11, x12, x21
// adcs x20, x19, x22
// adcs x1, x4, x12
// adcs x14, xzr, x19
// adc x7, xzr, x4
// subs x4, x17, x13
// cneg x12, x4, cc
// csetm x22, cc
// subs x4, x2, x23
// cneg x19, x4, cc
// mul x4, x12, x19
// umulh x12, x12, x19
// cinv x19, x22, cc
// cmn x19, #0x1
// eor x4, x4, x19
// adcs x1, x1, x4
// eor x4, x12, x19
// adcs x14, x14, x4
// adc x7, x7, x19
// subs x4, x16, x8
// cneg x12, x4, cc
// csetm x22, cc
// subs x4, x15, x9
// cneg x19, x4, cc
// mul x4, x12, x19
// umulh x12, x12, x19
// cinv x19, x22, cc
// cmn x19, #0x1
// eor x4, x4, x19
// adcs x10, x21, x4
// eor x4, x12, x19
// adcs x24, x24, x4
// adcs x11, x11, x19
// adcs x20, x20, x19
// adcs x1, x1, x19
// adcs x14, x14, x19
// adc x7, x7, x19
// subs x4, x8, x13
// cneg x12, x4, cc
// csetm x22, cc
// subs x4, x2, x15
// cneg x19, x4, cc
// mul x4, x12, x19
// umulh x12, x12, x19
// cinv x19, x22, cc
// cmn x19, #0x1
// eor x4, x4, x19
// adcs x20, x20, x4
// eor x4, x12, x19
// adcs x1, x1, x4
// adcs x14, x14, x19
// adc x7, x7, x19
// subs x4, x16, x17
// cneg x12, x4, cc
// csetm x22, cc
// subs x4, x23, x9
// cneg x19, x4, cc
// mul x4, x12, x19
// umulh x12, x12, x19
// cinv x19, x22, cc
// cmn x19, #0x1
// eor x4, x4, x19
// adcs x24, x24, x4
// eor x4, x12, x19
// adcs x11, x11, x4
// adcs x20, x20, x19
// adcs x1, x1, x19
// adcs x14, x14, x19
// adc x7, x7, x19
// subs x4, x16, x13
// cneg x12, x4, cc
// csetm x22, cc
// subs x4, x2, x9
// cneg x19, x4, cc
// mul x4, x12, x19
// umulh x12, x12, x19
// cinv x19, x22, cc
// cmn x19, #0x1
// eor x4, x4, x19
// adcs x11, x11, x4
// eor x4, x12, x19
// adcs x20, x20, x4
// adcs x1, x1, x19
// adcs x14, x14, x19
// adc x7, x7, x19
// subs x4, x8, x17
// cneg x12, x4, cc
// csetm x22, cc
// subs x4, x23, x15
// cneg x19, x4, cc
// mul x4, x12, x19
// umulh x12, x12, x19
// cinv x19, x22, cc
// cmn x19, #0x1
// eor x4, x4, x19
// adcs x3, x11, x4
// eor x4, x12, x19
// adcs x5, x20, x4
// adcs x1, x1, x19
// adcs x14, x14, x19
// adc x22, x7, x19
// ldp x12, x19, [x0] // @slothy:reads=buffer0
// extr x4, x1, x5, #8
// adds x11, x4, x12
// extr x4, x14, x1, #8
// adcs x20, x4, x19
// ldp x19, x12, [x0, #16] // @slothy:reads=buffer16
// extr x4, x22, x14, #8
// adcs x7, x4, x19
// and x19, x20, x7
// lsr x4, x22, #8
// adcs x1, x4, x12
// and x22, x19, x1
// ldp x19, x12, [x0, #32] // @slothy:reads=buffer32
// lsl x4, x6, #1
// adcs x14, x4, x19
// and x19, x22, x14
// extr x4, x10, x6, #63
// adcs x21, x4, x12
// and x22, x19, x21
// ldp x19, x12, [x0, #48] // @slothy:reads=buffer48
// extr x4, x24, x10, #63
// adcs x2, x4, x19
// and x19, x22, x2
// extr x4, x3, x24, #63
// adcs x24, x4, x12
// and x12, x19, x24
// ldr x19, [x0, #64]
// extr x4, x5, x3, #63
// and x4, x4, #0x1ff
// adc x4, x19, x4
// lsr x19, x4, #9
// orr x4, x4, #0xfffffffffffffe00
// cmp xzr, xzr
// adcs xzr, x11, x19
// adcs xzr, x12, xzr
// adcs xzr, x4, xzr
// adcs x11, x11, x19
// adcs x20, x20, xzr
// adcs x7, x7, xzr
// adcs x1, x1, xzr
// adcs x14, x14, xzr
// adcs x22, x21, xzr
// adcs x12, x2, xzr
// adcs x24, x24, xzr
// adc x4, x4, xzr
// and x19, x4, #0x1ff
// lsl x4, x11, #9
// extr x11, x20, x11, #55
// extr x20, x7, x20, #55
// extr x7, x1, x7, #55
// extr x1, x14, x1, #55
// orr x4, x19, x4
// extr x14, x22, x14, #55
// extr x22, x12, x22, #55
// extr x12, x24, x12, #55
// extr x19, x4, x24, #55
// lsr x4, x4, #55
// stp x11, x20, [x0] // @slothy:writes=buffer0
// stp x7, x1, [x0, #16] // @slothy:writes=buffer16
// stp x14, x22, [x0, #32] // @slothy:writes=buffer32
// stp x12, x19, [x0, #48] // @slothy:writes=buffer48
// str x4, [x0, #64]
// ldp x23, x24, [sp], #16
// ldp x21, x22, [sp], #16
// ldp x19, x20, [sp], #16
// ret
//
// The bash script used for step 2 is as follows:
//
// # Store the assembly instructions except the last 'ret',
// # callee-register store/loads as, say, 'input.S'.
// export OUTPUTS="[hint_buffer0,hint_buffer16,hint_buffer32,hint_buffer48,hint_buffer64]"
// export RESERVED_REGS="[x18,x25,x26,x27,x28,x29,x30,sp,q8,q9,q10,q11,q12,q13,q14,q15,v8,v9,v10,v11,v12,v13,v14,v15]"
// <s2n-bignum>/tools/external/slothy.sh input.S my_out_dir
// # my_out_dir/3.opt.s is the optimized assembly. Its output may differ
// # from this file since the sequence is non-deterministically chosen.
// # Please add 'ret' at the end of the output assembly.
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p521_neon)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p521_neon)
.text
.balign 4
S2N_BN_SYMBOL(bignum_montsqr_p521_neon):
// Save registers
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
// The optimized body
ldr q31, [x1, #48]
ldp x9, x15, [x1, #32]
ldp x23, x2, [x1, #48]
ldr q0, [x1, #48]
ldr q29, [x1, #32]
rev64 v21.4S, v31.4S
umulh x13, x9, x23
mul v23.4S, v21.4S, v0.4S
xtn v21.2S, v0.2D
uzp2 v19.4S, v31.4S, v31.4S
xtn v2.2S, v29.2D
xtn v30.2S, v31.2D
uzp2 v3.4S, v29.4S, v29.4S
umull v6.2D, v21.2S, v19.2S
mul x10, x9, x23
uaddlp v23.2D, v23.4S
umull v22.2D, v21.2S, v30.2S
adds x22, x10, x13
mul x17, x9, x15
movi v25.2D, #0x00000000ffffffff
uzp2 v1.4S, v0.4S, v0.4S
adc x8, x13, xzr
subs x19, x9, x15
umull v28.2D, v3.2S, v2.2S
shl v31.2D, v23.2D, #32
csetm x5, cc
cneg x3, x19, cc
umull v19.2D, v1.2S, v19.2S
ldr q4, [x1, #16]
subs x24, x2, x23
mul x6, x15, x2
usra v6.2D, v22.2D, #32
ldr q23, [x1]
cneg x13, x24, cc
umulh x24, x15, x2
umull v5.2D, v29.2S, v29.2S
rev64 v3.4S, v4.4S
cinv x19, x5, cc
adds x16, x22, x6
mov x14, v28.d[1]
umlal v31.2D, v21.2S, v30.2S
umull2 v17.2D, v29.4S, v29.4S
mov x20, v28.d[0]
mul v29.4S, v3.4S, v23.4S
and v22.16B, v6.16B, v25.16B
mul x5, x3, x13
mov x4, v5.d[1]
mov x7, v5.d[0]
adcs x11, x8, x24
ldr q5, [x1]
ldr q0, [x1]
adc x22, x24, xzr
adds x8, x11, x6
usra v19.2D, v6.2D, #32
umlal v22.2D, v1.2S, v30.2S
adc x11, x22, xzr
adds x21, x7, x20, lsl #33
mov x24, v17.d[1]
mov x22, v17.d[0]
lsr x12, x20, #31
uzp1 v2.4S, v4.4S, v23.4S
uzp1 v20.4S, v23.4S, v23.4S
usra v19.2D, v22.2D, #32
adc x4, x4, x12
lsr x6, x14, #31
adds x20, x22, x14, lsl #33
ldr q17, [x1, #16]
uzp2 v22.4S, v0.4S, v0.4S
eor x12, x5, x19
umulh x7, x3, x13
xtn v23.2S, v0.2D
adc x5, x24, x6
cmn x19, #0x1
xtn v25.2S, v5.2D
ldr q27, [x1]
adcs x16, x16, x12
uaddlp v1.2D, v29.4S
umulh x3, x9, x15
eor x13, x7, x19
adcs x24, x8, x13
adc x11, x11, x19
adds x12, x10, x10
adcs x13, x16, x16
mul x19, x23, x2
umull v21.2D, v25.2S, v23.2S
adcs x7, x24, x24
ldp x16, x8, [x1]
umull v3.2D, v25.2S, v22.2S
uzp2 v6.4S, v5.4S, v5.4S
adcs x10, x11, x11
ldr q29, [x1, #32]
adc x14, xzr, xzr
adds x24, x4, x17, lsl #1
mov x4, v31.d[1]
shl v30.2D, v1.2D, #32
lsr x6, x3, #63
extr x11, x3, x17, #63
ldr q1, [x1, #16]
mov x22, v19.d[1]
adcs x20, x20, x11
umulh x3, x23, x2
movi v4.2D, #0x00000000ffffffff
usra v3.2D, v21.2D, #32
adc x5, x5, x6
adds x11, x12, x20
mov x6, v19.d[0]
umull v19.2D, v6.2S, v22.2S
adcs x20, x13, x5
rev64 v22.4S, v0.4S
ldr x5, [x1, #64]
ldp x17, x13, [x1, #16]
adcs x7, x7, xzr
umlal v30.2D, v20.2S, v2.2S
adcs x12, x10, xzr
and x1, x16, #0xfffffffffffff
mul v22.4S, v22.4S, v5.4S
adc x14, x14, xzr
adds x6, x6, x19
xtn v5.2S, v1.2D
adcs x10, x4, x3
mov x4, v31.d[0]
adc x22, x22, xzr
adds x19, x6, x19
add x6, x5, x5
and v21.16B, v3.16B, v4.16B
adcs x10, x10, x3
extr x3, x8, x16, #52
mul x1, x6, x1
usra v19.2D, v3.2D, #32
adc x22, x22, xzr
adds x7, x4, x7
umlal v21.2D, v6.2S, v23.2S
and x4, x3, #0xfffffffffffff
adcs x3, x19, x12
uzp2 v28.4S, v1.4S, v1.4S
extr x19, x17, x8, #40
mul x12, x6, x4
adcs x14, x10, x14
rev64 v4.4S, v1.4S
mul x5, x5, x5
lsr x4, x9, #4
adc x10, x22, xzr
lsl x22, x1, #12
lsr x1, x1, #52
add x12, x12, x1
and x1, x19, #0xfffffffffffff
extr x19, x12, x22, #12
mul x1, x6, x1
extr x22, x13, x17, #28
adds x21, x21, x19
mul v31.4S, v4.4S, v17.4S
and x19, x22, #0xfffffffffffff
lsr x22, x12, #52
lsl x12, x12, #12
mul x19, x6, x19
add x22, x1, x22
extr x1, x22, x12, #24
and x4, x4, #0xfffffffffffff
adcs x12, x24, x1
extr x1, x9, x13, #16
mul x24, x6, x4
and x1, x1, #0xfffffffffffff
lsr x4, x22, #52
add x4, x19, x4
lsl x22, x22, #12
mul x1, x6, x1
extr x22, x4, x22, #36
adcs x11, x11, x22
extr x22, x11, x12, #9
extr x19, x12, x21, #9
uaddlp v3.2D, v22.4S
lsl x12, x4, #12
stp x19, x22, [x0]
umulh x19, x16, x17
uaddlp v31.2D, v31.4S
lsr x22, x4, #52
extr x4, x15, x9, #56
usra v19.2D, v21.2D, #32
add x22, x1, x22
extr x1, x23, x15, #44
shl v4.2D, v31.2D, #32
extr x12, x22, x12, #48
and x4, x4, #0xfffffffffffff
uzp2 v7.4S, v17.4S, v17.4S
adcs x20, x20, x12
xtn v17.2S, v17.2D
lsl x12, x22, #12
lsr x22, x22, #52
mul x4, x6, x4
add x22, x24, x22
and x24, x1, #0xfffffffffffff
extr x1, x2, x23, #32
extr x12, x22, x12, #60
lsl x12, x12, #8
lsr x22, x22, #52
mul x24, x6, x24
add x4, x4, x22
and x22, x1, #0xfffffffffffff
extr x12, x4, x12, #8
lsl x1, x4, #12
lsr x4, x4, #52
adcs x7, x7, x12
mul x12, x6, x22
add x24, x24, x4
extr x1, x24, x1, #20
extr x22, x20, x11, #9
extr x20, x7, x20, #9
lsr x11, x2, #20
mul x6, x6, x11
lsr x4, x24, #52
add x4, x12, x4
lsl x12, x24, #12
adcs x3, x3, x1
extr x24, x4, x12, #32
lsr x11, x4, #52
adcs x12, x14, x24
umull v31.2D, v17.2S, v28.2S
add x24, x6, x11
lsl x1, x4, #12
extr x7, x3, x7, #9
rev64 v6.4S, v29.4S
umull v22.2D, v17.2S, v5.2S
extr x11, x12, x3, #9
extr x14, x24, x1, #44
umlal v4.2D, v17.2S, v5.2S
adcs x3, x10, x14
umulh x10, x8, x13
lsr x14, x24, #44
adc x24, x5, x14
subs x5, x16, x8
stp x22, x20, [x0, #16]
csetm x1, cc
shl v21.2D, v3.2D, #32
movi v17.2D, #0x00000000ffffffff
cneg x20, x5, cc
subs x5, x13, x17
usra v31.2D, v22.2D, #32
cneg x14, x5, cc
lsr x6, x24, #9
and x22, x21, #0x1ff
mov x4, v30.d[0]
add x6, x22, x6
stp x7, x11, [x0, #32]
umulh x22, x20, x14
mov x5, v30.d[1]
str x6, [x0, #64]
extr x12, x3, x12, #9
umull v28.2D, v7.2S, v28.2S
mul x11, x20, x14
mul v6.4S, v6.4S, v27.4S
and v1.16B, v31.16B, v17.16B
cinv x21, x1, cc
adds x6, x4, x19
uzp2 v22.4S, v27.4S, v27.4S
adc x20, x19, xzr
adds x6, x6, x5
umlal v1.2D, v7.2S, v5.2S
xtn v20.2S, v29.2D
eor x22, x22, x21
adcs x7, x20, x10
usra v28.2D, v31.2D, #32
eor x20, x11, x21
usra v28.2D, v1.2D, #32
xtn v0.2S, v27.2D
adc x10, x10, xzr
adds x1, x7, x5
umlal v21.2D, v25.2S, v23.2S
uzp2 v29.4S, v29.4S, v29.4S
adc x19, x10, xzr
cmn x21, #0x1
umull v3.2D, v0.2S, v20.2S
adcs x5, x6, x20
extr x10, x24, x3, #9
umull v31.2D, v0.2S, v29.2S
adcs x1, x1, x22
stp x12, x10, [x0, #48]
mul x24, x16, x8
mov x3, v28.d[1]
usra v31.2D, v3.2D, #32
adc x10, x19, x21
adds x7, x4, x4
umulh x14, x16, x8
uaddlp v3.2D, v6.4S
mov x4, v28.d[0]
adcs x12, x5, x5
mov x5, v19.d[0]
movi v23.2D, #0x00000000ffffffff
adcs x20, x1, x1
mov x19, v21.d[1]
mov x1, v19.d[1]
adcs x22, x10, x10
and v17.16B, v31.16B, v23.16B
adc x6, xzr, xzr
umlal v17.2D, v22.2S, v20.2S
adds x10, x5, x24
mul x11, x17, x13
mov x5, v21.d[0]
umull v28.2D, v22.2S, v29.2S
adcs x19, x19, x14
shl v5.2D, v3.2D, #32
adc x21, x1, xzr
adds x10, x10, x24
adcs x1, x19, x14
umulh x14, x17, x13
adc x19, x21, xzr
adds x7, x7, x1
adcs x1, x12, x19
adcs x24, x20, xzr
mov x20, v4.d[1]
usra v28.2D, v31.2D, #32
mov x21, v4.d[0]
adcs x19, x22, xzr
adc x6, x6, xzr
adds x4, x4, x11
adcs x20, x20, x14
adc x22, x3, xzr
adds x12, x4, x11
umulh x11, x13, x2
adcs x3, x20, x14
adc x20, x22, xzr
adds x21, x21, x24
ldp x22, x24, [x0]
adcs x4, x12, x19
ldp x19, x14, [x0, #16]
usra v28.2D, v17.2D, #32
adcs x3, x3, x6
umlal v5.2D, v0.2S, v20.2S
adc x6, x20, xzr
umulh x20, x17, x23
adds x12, x22, x5
ldp x22, x5, [x0, #32]
adcs x10, x24, x10
adcs x19, x19, x7
stp x12, x10, [x0]
ldp x12, x7, [x0, #48]
adcs x10, x14, x1
mul x14, x13, x2
ldr x24, [x0, #64]
adcs x22, x22, x21
adcs x5, x5, x4
mov x21, v28.d[1]
stp x22, x5, [x0, #32]
mul x1, x17, x23
adcs x3, x12, x3
mov x4, v28.d[0]
mov x12, v5.d[1]
stp x19, x10, [x0, #16]
adcs x19, x7, x6
mov x6, v5.d[0]
adc x10, x24, xzr
subs x7, x16, x8
cneg x5, x7, cc
csetm x24, cc
subs x7, x15, x9
cneg x22, x7, cc
cinv x7, x24, cc
adds x12, x12, x4
umulh x4, x5, x22
adcs x1, x1, x21
stp x3, x19, [x0, #48]
str x10, [x0, #64]
adcs x20, x14, x20
adc x21, x11, xzr
subs x14, x17, x13
cneg x10, x14, cc
csetm x3, cc
subs x19, x2, x23
cneg x19, x19, cc
cinv x11, x3, cc
adds x14, x12, x6
mul x24, x5, x22
adcs x22, x1, x12
eor x3, x4, x7
mul x4, x10, x19
adcs x1, x20, x1
adcs x12, x21, x20
adc x5, xzr, x21
umulh x19, x10, x19
adds x20, x22, x6
eor x10, x24, x7
adcs x21, x1, x14
eor x24, x4, x11
adcs x4, x12, x22
adcs x1, x5, x1
adcs x12, xzr, x12
adc x22, xzr, x5
eor x5, x19, x11
cmn x11, #0x1
adcs x19, x1, x24
adcs x5, x12, x5
adc x24, x22, x11
subs x1, x8, x13
cneg x22, x1, cc
csetm x1, cc
subs x11, x2, x15
cinv x1, x1, cc
cneg x12, x11, cc
cmn x7, #0x1
adcs x10, x14, x10
mul x14, x22, x12
adcs x20, x20, x3
eor x11, x14, x1
adcs x3, x21, x7
umulh x21, x22, x12
adcs x22, x4, x7
adcs x4, x19, x7
adcs x12, x5, x7
adc x7, x24, x7
subs x14, x16, x17
csetm x5, cc
cneg x19, x14, cc
subs x24, x23, x9
cneg x14, x24, cc
cinv x5, x5, cc
cmn x1, #0x1
mul x24, x19, x14
adcs x22, x22, x11
eor x11, x21, x1
eor x24, x24, x5
umulh x19, x19, x14
adcs x4, x4, x11
adcs x14, x12, x1
adc x1, x7, x1
subs x17, x8, x17
cneg x12, x17, cc
csetm x17, cc
subs x16, x16, x13
cneg x11, x16, cc
csetm x16, cc
subs x23, x23, x15
cinv x7, x17, cc
cneg x13, x23, cc
mul x15, x12, x13
subs x23, x2, x9
cinv x8, x16, cc
cneg x17, x23, cc
eor x16, x19, x5
mul x23, x11, x17
cmn x5, #0x1
adcs x20, x20, x24
eor x15, x15, x7
adcs x3, x3, x16
adcs x2, x22, x5
umulh x16, x11, x17
adcs x19, x4, x5
ldp x4, x22, [x0, #48]
extr x21, x10, x6, #63
adcs x24, x14, x5
eor x23, x23, x8
adc x1, x1, x5
cmn x8, #0x1
umulh x9, x12, x13
eor x14, x16, x8
adcs x3, x3, x23
ldp x11, x5, [x0, #16]
ldp x13, x16, [x0]
adcs x23, x2, x14
adcs x14, x19, x8
extr x19, x20, x10, #63
lsl x12, x6, #1
adcs x17, x24, x8
adc x1, x1, x8
cmn x7, #0x1
adcs x24, x3, x15
eor x9, x9, x7
ldp x15, x3, [x0, #32]
adcs x9, x23, x9
ldr x8, [x0, #64]
extr x20, x24, x20, #63
adcs x23, x14, x7
extr x2, x9, x24, #63
adcs x14, x17, x7
and x24, x2, #0x1ff
extr x9, x23, x9, #8
extr x6, x14, x23, #8
adc x23, x1, x7
adds x10, x9, x13
adcs x13, x6, x16
extr x1, x23, x14, #8
lsr x23, x23, #8
adcs x7, x1, x11
adcs x2, x23, x5
and x23, x13, x7
adcs x16, x12, x15
and x23, x23, x2
adcs x14, x21, x3
and x23, x23, x16
adcs x5, x19, x4
and x23, x23, x14
adcs x22, x20, x22
and x23, x23, x5
and x1, x23, x22
adc x9, x8, x24
lsr x23, x9, #9
cmp xzr, xzr
orr x17, x9, #0xfffffffffffffe00
adcs xzr, x10, x23
adcs xzr, x1, xzr
adcs xzr, x17, xzr
adcs x23, x10, x23
adcs x9, x13, xzr
lsl x4, x23, #9
adcs x1, x7, xzr
extr x23, x9, x23, #55
extr x9, x1, x9, #55
adcs x10, x2, xzr
extr x1, x10, x1, #55
stp x23, x9, [x0]
adcs x19, x16, xzr
adcs x9, x14, xzr
extr x23, x19, x10, #55
adcs x10, x5, xzr
stp x1, x23, [x0, #16]
extr x5, x9, x19, #55
adcs x1, x22, xzr
extr x23, x10, x9, #55
adc x9, x17, xzr
stp x5, x23, [x0, #32]
extr x10, x1, x10, #55
and x23, x9, #0x1ff
orr x23, x23, x4
extr x9, x23, x1, #55
lsr x23, x23, #55
stp x10, x9, [x0, #48]
str x23, [x0, #64]
// Restore regs and return
ldp x23, x24, [sp], #16
ldp x21, x22, [sp], #16
ldp x19, x20, [sp], #16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 4,133
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_mod_n521_9.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Reduce modulo group order, z := x mod n_521
// Input x[9]; output z[9]
//
// extern void bignum_mod_n521_9
// (uint64_t z[static 9], uint64_t x[static 9]);
//
// Reduction is modulo the group order of the NIST curve P-521.
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n521_9)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n521_9)
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n521_9_alt)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n521_9_alt)
.text
.balign 4
#define z x0
#define x x1
#define n0 x2
#define n1 x3
#define n2 x4
#define n3 x5
#define d0 x6
#define d1 x7
#define d2 x8
#define d3 x9
#define d4 x10
#define d5 x11
#define d6 x12
#define d7 x13
#define d8 x14
#define q x15
// Re-use d6 and d7 as temporaries before they are needed
#define s d6
#define t d7
#define movbig(nn,n3,n2,n1,n0) \
movz nn, n0; \
movk nn, n1, lsl #16; \
movk nn, n2, lsl #32; \
movk nn, n3, lsl #48
S2N_BN_SYMBOL(bignum_mod_n521_9):
S2N_BN_SYMBOL(bignum_mod_n521_9_alt):
// Load the top digit first into d8.
// The initial quotient estimate is q = h + 1 where x = 2^521 * h + t
ldr d8, [x, #64]
lsr q, d8, #9
add q, q, #1
// Let [5; n3; n2; n1; n0] = r_521 = 2^521 - n_521
// and form [d4;d3;d2;d1;d0] = q * r_521
movbig( n0, #0x4490, #0x48e1, #0x6ec7, #0x9bf7)
mul d0, n0, q
movbig( n1, #0xc44a, #0x3647, #0x7663, #0xb851)
mul d1, n1, q
movbig( n2, #0x8033, #0xfeb7, #0x08f6, #0x5a2f)
mul d2, n2, q
movbig( n3, #0xae79, #0x787c, #0x40d0, #0x6994)
mul d3, n3, q
lsl d4, q, #2
add d4, d4, q
umulh t, n0, q
adds d1, d1, t
umulh t, n1, q
adcs d2, d2, t
umulh t, n2, q
adcs d3, d3, t
umulh t, n3, q
adc d4, d4, t
// Now load other digits and form r = x - q * n_521 = (q * r_521 + t) - 2^521.
// But the computed result stuffs in 1s from bit 521 onwards and actually
// gives r' = (q * r_521 + t) + (2^576 - 2^521) = r + 2^576, including the
// top carry. Hence CF <=> r >= 0, while r' == r (mod 2^521).
ldp s, t, [x]
adds d0, d0, s
adcs d1, d1, t
ldp s, t, [x, #16]
adcs d2, d2, s
adcs d3, d3, t
ldp t, d5, [x, #32]
adcs d4, d4, t
adcs d5, d5, xzr
ldp d6, d7, [x, #48]
adcs d6, d6, xzr
adcs d7, d7, xzr
orr d8, d8, #~0x1FF
adcs d8, d8, xzr
// We already know r < n_521, but if it actually went negative then
// we need to add back n_521 again. Recycle q as a bitmask for r < n_521,
// and just subtract r_521 and mask rather than literally adding 2^521.
// This also gets rid of the bit-stuffing above.
csetm q, cc
and n0, n0, q
subs d0, d0, n0
and n1, n1, q
sbcs d1, d1, n1
and n2, n2, q
sbcs d2, d2, n2
and n3, n3, q
sbcs d3, d3, n3
mov n0, #5
and n0, n0, q
sbcs d4, d4, n0
sbcs d5, d5, xzr
sbcs d6, d6, xzr
sbcs d7, d7, xzr
sbc d8, d8, xzr
and d8, d8, #0x1FF
// Store the end result
stp d0, d1, [z]
stp d2, d3, [z, #16]
stp d4, d5, [z, #32]
stp d6, d7, [z, #48]
str d8, [z, #64]
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 1,800
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p521/bignum_half_p521.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Halve modulo p_521, z := (x / 2) mod p_521, assuming x reduced
// Input x[9]; output z[9]
//
// extern void bignum_half_p521
// (uint64_t z[static 9], uint64_t x[static 9]);
//
// Standard ARM ABI: X0 = z, X1 = x
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_half_p521)
S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_half_p521)
.text
.balign 4
#define z x0
#define x x1
// We use distinct variables for clarity, but these are heavily aliased
#define d0 x2
#define d1 x3
#define d2 x4
#define d3 x5
#define d4 x2
#define d5 x3
#define d6 x4
#define d7 x5
#define d8 x2
#define a x6
S2N_BN_SYMBOL(bignum_half_p521):
// We do a 521-bit rotation one bit right, since 2^521 == 1 (mod p_521)
ldp d0, d1, [x]
and a, d0, #1
extr d0, d1, d0, #1
ldp d2, d3, [x, #16]
extr d1, d2, d1, #1
stp d0, d1, [z]
extr d2, d3, d2, #1
ldp d4, d5, [x, #32]
extr d3, d4, d3, #1
stp d2, d3, [z, #16]
extr d4, d5, d4, #1
ldp d6, d7, [x, #48]
extr d5, d6, d5, #1
stp d4, d5, [z, #32]
extr d6, d7, d6, #1
ldr d8, [x, #64]
extr d7, d8, d7, #1
stp d6, d7, [z, #48]
lsl d8, d8, #55
extr d8, a, d8, #56
str d8, [z, #64]
// Return
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 27,078
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/unopt/p384_montjadd.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point addition on NIST curve P-384 in Montgomery-Jacobian coordinates
//
// extern void p384_montjadd
// (uint64_t p3[static 18],uint64_t p1[static 18],uint64_t p2[static 18]);
//
// Does p3 := p1 + p2 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
//
// Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjadd)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjadd)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
// Stable homes for input arguments during main code sequence
#define input_z x24
#define input_x x25
#define input_y x26
// Pointer-offset pairs for inputs and outputs
#define x_1 input_x, #0
#define y_1 input_x, #NUMSIZE
#define z_1 input_x, #(2*NUMSIZE)
#define x_2 input_y, #0
#define y_2 input_y, #NUMSIZE
#define z_2 input_y, #(2*NUMSIZE)
#define x_3 input_z, #0
#define y_3 input_z, #NUMSIZE
#define z_3 input_z, #(2*NUMSIZE)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z1sq sp, #(NUMSIZE*0)
#define ww sp, #(NUMSIZE*0)
#define resx sp, #(NUMSIZE*0)
#define yd sp, #(NUMSIZE*1)
#define y2a sp, #(NUMSIZE*1)
#define x2a sp, #(NUMSIZE*2)
#define zzx2 sp, #(NUMSIZE*2)
#define zz sp, #(NUMSIZE*3)
#define t1 sp, #(NUMSIZE*3)
#define t2 sp, #(NUMSIZE*4)
#define x1a sp, #(NUMSIZE*4)
#define zzx1 sp, #(NUMSIZE*4)
#define resy sp, #(NUMSIZE*4)
#define xd sp, #(NUMSIZE*5)
#define z2sq sp, #(NUMSIZE*5)
#define resz sp, #(NUMSIZE*5)
#define y1a sp, #(NUMSIZE*6)
#define NSPACE (NUMSIZE*7)
// Corresponds to bignum_montmul_p384_neon, with callee-save register spills
// rewritten to update sp in advance
.montmul_p384:
sub sp, sp, 48
stp x19, x20, [sp, 32]
stp x21, x22, [sp, 16]
stp x23, x24, [sp]
ldr q3, [x1]
ldr q25, [x2]
ldp x13, x23, [x2]
ldp x3, x21, [x1]
rev64 v23.4S, v25.4S
uzp1 v17.4S, v25.4S, v3.4S
umulh x15, x3, x13
mul v6.4S, v23.4S, v3.4S
uzp1 v3.4S, v3.4S, v3.4S
ldr q27, [x2, #32]
ldp x8, x24, [x1, #16]
subs x6, x3, x21
ldr q0, [x1, #32]
movi v23.2D, #0x00000000ffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4S, v27.4S
uzp2 v25.4S, v27.4S, v27.4S
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2S, v0.2D
xtn v24.2S, v27.2D
cneg x20, x7, cc
ldp x6, x14, [x2, #16]
mul v27.4S, v4.4S, v0.4S
uaddlp v20.2D, v6.4S
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4S, v0.4S, v0.4S
umull v21.2D, v22.2S, v25.2S
shl v0.2D, v20.2D, #32
umlal v0.2D, v3.2S, v17.2S
mul x22, x8, x6
umull v1.2D, v6.2S, v25.2S
subs x12, x3, x8
umull v20.2D, v22.2S, v24.2S
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2D, v20.2D, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2D, v21.2D, #32
adds x22, x15, x7
and v26.16B, v21.16B, v23.16B
adcs x16, x12, x15
uaddlp v25.2D, v27.4S
adcs x9, x19, x12
umlal v26.2D, v6.2S, v24.2S
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2D, v25.2D, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2D, v22.2S, v24.2S
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2D, v26.2D, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [x2, #32]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [x1, #32]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [x0]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [x0, #16]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [x0, #32]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [x0]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [x0, #16]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [x0, #32]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [x0]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [x0, #16]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [x0, #32]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [x0]
ldp x21, x12, [x0, #16]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [x0, #32]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [x0]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [x0, #16]
adc x12, x15, x23
stp x21, x12, [x0, #32]
ldp x23, x24, [sp]
ldp x21, x22, [sp, 16]
ldp x19, x20, [sp, 32]
add sp, sp, 48
ret
// Corresponds exactly to bignum_montsqr_p384
.montsqr_p384:
ldr q1, [x1]
ldp x9, x2, [x1]
ldr q0, [x1]
ldp x4, x6, [x1, #16]
rev64 v21.4S, v1.4S
uzp2 v28.4S, v1.4S, v1.4S
umulh x7, x9, x2
xtn v17.2S, v1.2D
mul v27.4S, v21.4S, v0.4S
ldr q20, [x1, #32]
xtn v30.2S, v0.2D
ldr q1, [x1, #32]
uzp2 v31.4S, v0.4S, v0.4S
ldp x5, x10, [x1, #32]
umulh x8, x9, x4
uaddlp v3.2D, v27.4S
umull v16.2D, v30.2S, v17.2S
mul x16, x9, x4
umull v27.2D, v30.2S, v28.2S
shrn v0.2S, v20.2D, #32
xtn v7.2S, v20.2D
shl v20.2D, v3.2D, #32
umull v3.2D, v31.2S, v28.2S
mul x3, x2, x4
umlal v20.2D, v30.2S, v17.2S
umull v22.2D, v7.2S, v0.2S
usra v27.2D, v16.2D, #32
umulh x11, x2, x4
movi v21.2D, #0x00000000ffffffff
uzp2 v28.4S, v1.4S, v1.4S
adds x15, x16, x7
and v5.16B, v27.16B, v21.16B
adcs x3, x3, x8
usra v3.2D, v27.2D, #32
dup v29.2D, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2D, v31.2S, v17.2S
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2D, v22.2D, #33
xtn v25.2S, v29.2D
rev64 v31.4S, v1.4S
lsl x13, x14, #32
uzp2 v6.4S, v29.4S, v29.4S
umlal v19.2D, v7.2S, v7.2S
usra v3.2D, v5.2D, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4S, v31.4S, v29.4S
xtn v4.2S, v1.2D
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2D, v25.2S, v28.2S
adcs x11, x16, x16
umull v21.2D, v25.2S, v4.2S
mov x17, v3.d[0]
umull v18.2D, v6.2S, v28.2S
adc x16, x8, xzr
uaddlp v16.2D, v17.4S
movi v1.2D, #0x00000000ffffffff
subs x13, x13, x12
usra v31.2D, v21.2D, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2D, v16.2D, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16B, v31.16B, v1.16B
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2D, v6.2S, v4.2S
usra v18.2D, v31.2D, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2D, v25.2S, v4.2S
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2D, v3.2D, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [x0]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [x0, #16]
csetm x15, cc
cneg x1, x1, cc
stp x11, x14, [x0, #32]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc
cinv x16, x15, cc
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc
cneg x9, x9, cc
subs x4, x2, x4
cneg x4, x4, cc
csetm x7, cc
subs x2, x10, x6
cinv x8, x8, cc
cneg x2, x2, cc
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc
cneg x1, x1, cc
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [x0, #16]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [x0]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [x0, #32]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001
adcs x14, x14, x2
mov x2, #0x1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [x0]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [x0, #16]
adc x17, x14, xzr
stp x2, x17, [x0, #32]
ret
// Corresponds exactly to bignum_sub_p384
.sub_p384:
ldp x5, x6, [x1]
ldp x4, x3, [x2]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x1, #16]
ldp x4, x3, [x2, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x1, #32]
ldp x4, x3, [x2, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, cc
mov x4, #0xffffffff
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [x0]
stp x7, x8, [x0, #16]
stp x9, x10, [x0, #32]
ret
#define montmul_p384(P0,P1,P2) \
add x0, P0;\
add x1, P1;\
add x2, P2;\
bl .montmul_p384
#define montsqr_p384(P0,P1) \
add x0, P0;\
add x1, P1;\
bl .montsqr_p384
#define sub_p384(P0,P1,P2) \
add x0, P0;\
add x1, P1;\
add x2, P2;\
bl .sub_p384
S2N_BN_SYMBOL(p384_montjadd):
// Save regs and make room on stack for temporary variables
stp x19, x20, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x30, xzr, [sp, #-16]!
sub sp, sp, NSPACE
// Move the input arguments to stable places
mov input_z, x0
mov input_x, x1
mov input_y, x2
// Main code, just a sequence of basic field operations
// 8 * multiply + 3 * square + 7 * subtract
montsqr_p384(z1sq,z_1)
montsqr_p384(z2sq,z_2)
montmul_p384(y1a,z_2,y_1)
montmul_p384(y2a,z_1,y_2)
montmul_p384(x2a,z1sq,x_2)
montmul_p384(x1a,z2sq,x_1)
montmul_p384(y2a,z1sq,y2a)
montmul_p384(y1a,z2sq,y1a)
sub_p384(xd,x2a,x1a)
sub_p384(yd,y2a,y1a)
montsqr_p384(zz,xd)
montsqr_p384(ww,yd)
montmul_p384(zzx1,zz,x1a)
montmul_p384(zzx2,zz,x2a)
sub_p384(resx,ww,zzx1)
sub_p384(t1,zzx2,zzx1)
montmul_p384(xd,xd,z_1)
sub_p384(resx,resx,zzx2)
sub_p384(t2,zzx1,resx)
montmul_p384(t1,t1,y1a)
montmul_p384(resz,xd,z_2)
montmul_p384(t2,yd,t2)
sub_p384(resy,t2,t1)
// Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0
// The condition codes get set by a comparison (P2 != 0) - (P1 != 0)
// So "HI" <=> CF /\ ~ZF <=> P1 = 0 /\ ~(P2 = 0)
// and "LO" <=> ~CF <=> ~(P1 = 0) /\ P2 = 0
ldp x0, x1, [z_1]
ldp x2, x3, [z_1+16]
ldp x4, x5, [z_1+32]
orr x20, x0, x1
orr x21, x2, x3
orr x22, x4, x5
orr x20, x20, x21
orr x20, x20, x22
cmp x20, xzr
cset x20, ne
ldp x6, x7, [z_2]
ldp x8, x9, [z_2+16]
ldp x10, x11, [z_2+32]
orr x21, x6, x7
orr x22, x8, x9
orr x23, x10, x11
orr x21, x21, x22
orr x21, x21, x23
cmp x21, xzr
cset x21, ne
cmp x21, x20
// Multiplex the outputs accordingly, re-using the z's in registers
ldp x12, x13, [resz]
csel x12, x0, x12, lo
csel x13, x1, x13, lo
csel x12, x6, x12, hi
csel x13, x7, x13, hi
ldp x14, x15, [resz+16]
csel x14, x2, x14, lo
csel x15, x3, x15, lo
csel x14, x8, x14, hi
csel x15, x9, x15, hi
ldp x16, x17, [resz+32]
csel x16, x4, x16, lo
csel x17, x5, x17, lo
csel x16, x10, x16, hi
csel x17, x11, x17, hi
ldp x20, x21, [x_1]
ldp x0, x1, [resx]
csel x0, x20, x0, lo
csel x1, x21, x1, lo
ldp x20, x21, [x_2]
csel x0, x20, x0, hi
csel x1, x21, x1, hi
ldp x20, x21, [x_1+16]
ldp x2, x3, [resx+16]
csel x2, x20, x2, lo
csel x3, x21, x3, lo
ldp x20, x21, [x_2+16]
csel x2, x20, x2, hi
csel x3, x21, x3, hi
ldp x20, x21, [x_1+32]
ldp x4, x5, [resx+32]
csel x4, x20, x4, lo
csel x5, x21, x5, lo
ldp x20, x21, [x_2+32]
csel x4, x20, x4, hi
csel x5, x21, x5, hi
ldp x20, x21, [y_1]
ldp x6, x7, [resy]
csel x6, x20, x6, lo
csel x7, x21, x7, lo
ldp x20, x21, [y_2]
csel x6, x20, x6, hi
csel x7, x21, x7, hi
ldp x20, x21, [y_1+16]
ldp x8, x9, [resy+16]
csel x8, x20, x8, lo
csel x9, x21, x9, lo
ldp x20, x21, [y_2+16]
csel x8, x20, x8, hi
csel x9, x21, x9, hi
ldp x20, x21, [y_1+32]
ldp x10, x11, [resy+32]
csel x10, x20, x10, lo
csel x11, x21, x11, lo
ldp x20, x21, [y_2+32]
csel x10, x20, x10, hi
csel x11, x21, x11, hi
// Finally store back the multiplexed values
stp x0, x1, [x_3]
stp x2, x3, [x_3+16]
stp x4, x5, [x_3+32]
stp x6, x7, [y_3]
stp x8, x9, [y_3+16]
stp x10, x11, [y_3+32]
stp x12, x13, [z_3]
stp x14, x15, [z_3+16]
stp x16, x17, [z_3+32]
// Restore stack and registers
add sp, sp, NSPACE
ldp x30, xzr, [sp], 16
ldp x25, x26, [sp], 16
ldp x23, x24, [sp], 16
ldp x21, x22, [sp], 16
ldp x19, x20, [sp], 16
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 36,523
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/arm/p384/unopt/p384_montjdouble.S
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0
// ----------------------------------------------------------------------------
// Point doubling on NIST curve P-384 in Montgomery-Jacobian coordinates
//
// extern void p384_montjdouble
// (uint64_t p3[static 18],uint64_t p1[static 18]);
//
// Does p3 := 2 * p1 where all points are regarded as Jacobian triples with
// each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384.
// A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3).
//
// Standard ARM ABI: X0 = p3, X1 = p1
// ----------------------------------------------------------------------------
#include "_internal_s2n_bignum.h"
S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjdouble)
S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjdouble)
.text
.balign 4
// Size of individual field elements
#define NUMSIZE 48
// Stable homes for input arguments during main code sequence
#define input_z x25
#define input_x x26
// Pointer-offset pairs for inputs and outputs
#define x_1 input_x, #0
#define y_1 input_x, #NUMSIZE
#define z_1 input_x, #(2*NUMSIZE)
#define x_3 input_z, #0
#define y_3 input_z, #NUMSIZE
#define z_3 input_z, #(2*NUMSIZE)
// Pointer-offset pairs for temporaries, with some aliasing
// NSPACE is the total stack needed for these temporaries
#define z2 sp, #(NUMSIZE*0)
#define y2 sp, #(NUMSIZE*1)
#define x2p sp, #(NUMSIZE*2)
#define xy2 sp, #(NUMSIZE*3)
#define y4 sp, #(NUMSIZE*4)
#define t2 sp, #(NUMSIZE*4)
#define dx2 sp, #(NUMSIZE*5)
#define t1 sp, #(NUMSIZE*5)
#define d_ sp, #(NUMSIZE*6)
#define x4p sp, #(NUMSIZE*6)
#define NSPACE #(NUMSIZE*7)
// Corresponds exactly to bignum_montmul_p384_neon
.montmul_p384:
sub sp, sp, 48
stp x19, x20, [sp, 32]
stp x21, x22, [sp, 16]
stp x23, x24, [sp]
ldr q3, [x1]
ldr q25, [x2]
ldp x13, x23, [x2]
ldp x3, x21, [x1]
rev64 v23.4S, v25.4S
uzp1 v17.4S, v25.4S, v3.4S
umulh x15, x3, x13
mul v6.4S, v23.4S, v3.4S
uzp1 v3.4S, v3.4S, v3.4S
ldr q27, [x2, #32]
ldp x8, x24, [x1, #16]
subs x6, x3, x21
ldr q0, [x1, #32]
movi v23.2D, #0x00000000ffffffff
csetm x10, cc
umulh x19, x21, x23
rev64 v4.4S, v27.4S
uzp2 v25.4S, v27.4S, v27.4S
cneg x4, x6, cc
subs x7, x23, x13
xtn v22.2S, v0.2D
xtn v24.2S, v27.2D
cneg x20, x7, cc
ldp x6, x14, [x2, #16]
mul v27.4S, v4.4S, v0.4S
uaddlp v20.2D, v6.4S
cinv x5, x10, cc
mul x16, x4, x20
uzp2 v6.4S, v0.4S, v0.4S
umull v21.2D, v22.2S, v25.2S
shl v0.2D, v20.2D, #32
umlal v0.2D, v3.2S, v17.2S
mul x22, x8, x6
umull v1.2D, v6.2S, v25.2S
subs x12, x3, x8
umull v20.2D, v22.2S, v24.2S
cneg x17, x12, cc
umulh x9, x8, x6
mov x12, v0.d[1]
eor x11, x16, x5
mov x7, v0.d[0]
csetm x10, cc
usra v21.2D, v20.2D, #32
adds x15, x15, x12
adcs x12, x19, x22
umulh x20, x4, x20
adc x19, x9, xzr
usra v1.2D, v21.2D, #32
adds x22, x15, x7
and v26.16B, v21.16B, v23.16B
adcs x16, x12, x15
uaddlp v25.2D, v27.4S
adcs x9, x19, x12
umlal v26.2D, v6.2S, v24.2S
adc x4, x19, xzr
adds x16, x16, x7
shl v27.2D, v25.2D, #32
adcs x9, x9, x15
adcs x4, x4, x12
eor x12, x20, x5
adc x15, x19, xzr
subs x20, x6, x13
cneg x20, x20, cc
cinv x10, x10, cc
cmn x5, #0x1
mul x19, x17, x20
adcs x11, x22, x11
adcs x12, x16, x12
adcs x9, x9, x5
umulh x17, x17, x20
adcs x22, x4, x5
adc x5, x15, x5
subs x16, x21, x8
cneg x20, x16, cc
eor x19, x19, x10
csetm x4, cc
subs x16, x6, x23
cneg x16, x16, cc
umlal v27.2D, v22.2S, v24.2S
mul x15, x20, x16
cinv x4, x4, cc
cmn x10, #0x1
usra v1.2D, v26.2D, #32
adcs x19, x12, x19
eor x17, x17, x10
adcs x9, x9, x17
adcs x22, x22, x10
lsl x12, x7, #32
umulh x20, x20, x16
eor x16, x15, x4
ldp x15, x17, [x2, #32]
add x2, x12, x7
adc x7, x5, x10
ldp x5, x10, [x1, #32]
lsr x1, x2, #32
eor x12, x20, x4
subs x1, x1, x2
sbc x20, x2, xzr
cmn x4, #0x1
adcs x9, x9, x16
extr x1, x20, x1, #32
lsr x20, x20, #32
adcs x22, x22, x12
adc x16, x7, x4
adds x12, x20, x2
umulh x7, x24, x14
adc x4, xzr, xzr
subs x1, x11, x1
sbcs x20, x19, x12
sbcs x12, x9, x4
lsl x9, x1, #32
add x1, x9, x1
sbcs x9, x22, xzr
mul x22, x24, x14
sbcs x16, x16, xzr
lsr x4, x1, #32
sbc x19, x2, xzr
subs x4, x4, x1
sbc x11, x1, xzr
extr x2, x11, x4, #32
lsr x4, x11, #32
adds x4, x4, x1
adc x11, xzr, xzr
subs x2, x20, x2
sbcs x4, x12, x4
sbcs x20, x9, x11
lsl x12, x2, #32
add x2, x12, x2
sbcs x9, x16, xzr
lsr x11, x2, #32
sbcs x19, x19, xzr
sbc x1, x1, xzr
subs x16, x11, x2
sbc x12, x2, xzr
extr x16, x12, x16, #32
lsr x12, x12, #32
adds x11, x12, x2
adc x12, xzr, xzr
subs x16, x4, x16
mov x4, v27.d[0]
sbcs x11, x20, x11
sbcs x20, x9, x12
stp x16, x11, [x0]
sbcs x11, x19, xzr
sbcs x9, x1, xzr
stp x20, x11, [x0, #16]
mov x1, v1.d[0]
sbc x20, x2, xzr
subs x12, x24, x5
mov x11, v27.d[1]
cneg x16, x12, cc
csetm x2, cc
subs x19, x15, x14
mov x12, v1.d[1]
cinv x2, x2, cc
cneg x19, x19, cc
stp x9, x20, [x0, #32]
mul x9, x16, x19
adds x4, x7, x4
adcs x11, x1, x11
adc x1, x12, xzr
adds x20, x4, x22
umulh x19, x16, x19
adcs x7, x11, x4
eor x16, x9, x2
adcs x9, x1, x11
adc x12, x1, xzr
adds x7, x7, x22
adcs x4, x9, x4
adcs x9, x12, x11
adc x12, x1, xzr
cmn x2, #0x1
eor x1, x19, x2
adcs x11, x20, x16
adcs x19, x7, x1
adcs x1, x4, x2
adcs x20, x9, x2
adc x2, x12, x2
subs x12, x24, x10
cneg x16, x12, cc
csetm x12, cc
subs x9, x17, x14
cinv x12, x12, cc
cneg x9, x9, cc
subs x3, x24, x3
sbcs x21, x5, x21
mul x24, x16, x9
sbcs x4, x10, x8
ngc x8, xzr
subs x10, x5, x10
eor x5, x24, x12
csetm x7, cc
cneg x24, x10, cc
subs x10, x17, x15
cinv x7, x7, cc
cneg x10, x10, cc
subs x14, x13, x14
sbcs x15, x23, x15
eor x13, x21, x8
mul x23, x24, x10
sbcs x17, x6, x17
eor x6, x3, x8
ngc x21, xzr
umulh x9, x16, x9
cmn x8, #0x1
eor x3, x23, x7
adcs x23, x6, xzr
adcs x13, x13, xzr
eor x16, x4, x8
adc x16, x16, xzr
eor x4, x17, x21
umulh x17, x24, x10
cmn x21, #0x1
eor x24, x14, x21
eor x6, x15, x21
adcs x15, x24, xzr
adcs x14, x6, xzr
adc x6, x4, xzr
cmn x12, #0x1
eor x4, x9, x12
adcs x19, x19, x5
umulh x5, x23, x15
adcs x1, x1, x4
adcs x10, x20, x12
eor x4, x17, x7
ldp x20, x9, [x0]
adc x2, x2, x12
cmn x7, #0x1
adcs x12, x1, x3
ldp x17, x24, [x0, #16]
mul x1, x16, x6
adcs x3, x10, x4
adc x2, x2, x7
ldp x7, x4, [x0, #32]
adds x20, x22, x20
mul x10, x13, x14
adcs x11, x11, x9
eor x9, x8, x21
adcs x21, x19, x17
stp x20, x11, [x0]
adcs x12, x12, x24
mul x8, x23, x15
adcs x3, x3, x7
stp x21, x12, [x0, #16]
adcs x12, x2, x4
adc x19, xzr, xzr
subs x21, x23, x16
umulh x2, x16, x6
stp x3, x12, [x0, #32]
cneg x3, x21, cc
csetm x24, cc
umulh x11, x13, x14
subs x21, x13, x16
eor x7, x8, x9
cneg x17, x21, cc
csetm x16, cc
subs x21, x6, x15
cneg x22, x21, cc
cinv x21, x24, cc
subs x20, x23, x13
umulh x12, x3, x22
cneg x23, x20, cc
csetm x24, cc
subs x20, x14, x15
cinv x24, x24, cc
mul x22, x3, x22
cneg x3, x20, cc
subs x13, x6, x14
cneg x20, x13, cc
cinv x15, x16, cc
adds x13, x5, x10
mul x4, x23, x3
adcs x11, x11, x1
adc x14, x2, xzr
adds x5, x13, x8
adcs x16, x11, x13
umulh x23, x23, x3
adcs x3, x14, x11
adc x1, x14, xzr
adds x10, x16, x8
adcs x6, x3, x13
adcs x8, x1, x11
umulh x13, x17, x20
eor x1, x4, x24
adc x4, x14, xzr
cmn x24, #0x1
adcs x1, x5, x1
eor x16, x23, x24
eor x11, x1, x9
adcs x23, x10, x16
eor x2, x22, x21
adcs x3, x6, x24
mul x14, x17, x20
eor x17, x13, x15
adcs x13, x8, x24
adc x8, x4, x24
cmn x21, #0x1
adcs x6, x23, x2
mov x16, #0xfffffffffffffffe
eor x20, x12, x21
adcs x20, x3, x20
eor x23, x14, x15
adcs x2, x13, x21
adc x8, x8, x21
cmn x15, #0x1
ldp x5, x4, [x0]
ldp x21, x12, [x0, #16]
adcs x22, x20, x23
eor x23, x22, x9
adcs x17, x2, x17
adc x22, x8, x15
cmn x9, #0x1
adcs x15, x7, x5
ldp x10, x14, [x0, #32]
eor x1, x6, x9
lsl x2, x15, #32
adcs x8, x11, x4
adcs x13, x1, x21
eor x1, x22, x9
adcs x24, x23, x12
eor x11, x17, x9
adcs x23, x11, x10
adcs x7, x1, x14
adcs x17, x9, x19
adcs x20, x9, xzr
add x1, x2, x15
lsr x3, x1, #32
adcs x11, x9, xzr
adc x9, x9, xzr
subs x3, x3, x1
sbc x6, x1, xzr
adds x24, x24, x5
adcs x4, x23, x4
extr x3, x6, x3, #32
lsr x6, x6, #32
adcs x21, x7, x21
adcs x15, x17, x12
adcs x7, x20, x10
adcs x20, x11, x14
mov x14, #0xffffffff
adc x22, x9, x19
adds x12, x6, x1
adc x10, xzr, xzr
subs x3, x8, x3
sbcs x12, x13, x12
lsl x9, x3, #32
add x3, x9, x3
sbcs x10, x24, x10
sbcs x24, x4, xzr
lsr x9, x3, #32
sbcs x21, x21, xzr
sbc x1, x1, xzr
subs x9, x9, x3
sbc x13, x3, xzr
extr x9, x13, x9, #32
lsr x13, x13, #32
adds x13, x13, x3
adc x6, xzr, xzr
subs x12, x12, x9
sbcs x17, x10, x13
lsl x2, x12, #32
sbcs x10, x24, x6
add x9, x2, x12
sbcs x6, x21, xzr
lsr x5, x9, #32
sbcs x21, x1, xzr
sbc x13, x3, xzr
subs x8, x5, x9
sbc x19, x9, xzr
lsr x12, x19, #32
extr x3, x19, x8, #32
adds x8, x12, x9
adc x1, xzr, xzr
subs x2, x17, x3
sbcs x12, x10, x8
sbcs x5, x6, x1
sbcs x3, x21, xzr
sbcs x19, x13, xzr
sbc x24, x9, xzr
adds x23, x15, x3
adcs x8, x7, x19
adcs x11, x20, x24
adc x9, x22, xzr
add x24, x9, #0x1
lsl x7, x24, #32
subs x21, x24, x7
sbc x10, x7, xzr
adds x6, x2, x21
adcs x7, x12, x10
adcs x24, x5, x24
adcs x13, x23, xzr
adcs x8, x8, xzr
adcs x15, x11, xzr
csetm x23, cc
and x11, x16, x23
and x20, x14, x23
adds x22, x6, x20
eor x3, x20, x23
adcs x5, x7, x3
adcs x14, x24, x11
stp x22, x5, [x0]
adcs x5, x13, x23
adcs x21, x8, x23
stp x14, x5, [x0, #16]
adc x12, x15, x23
stp x21, x12, [x0, #32]
ldp x23, x24, [sp]
ldp x21, x22, [sp, 16]
ldp x19, x20, [sp, 32]
add sp, sp, 48
ret
// Corresponds exactly to bignum_montsqr_p384
.montsqr_p384:
ldr q1, [x1]
ldp x9, x2, [x1]
ldr q0, [x1]
ldp x4, x6, [x1, #16]
rev64 v21.4S, v1.4S
uzp2 v28.4S, v1.4S, v1.4S
umulh x7, x9, x2
xtn v17.2S, v1.2D
mul v27.4S, v21.4S, v0.4S
ldr q20, [x1, #32]
xtn v30.2S, v0.2D
ldr q1, [x1, #32]
uzp2 v31.4S, v0.4S, v0.4S
ldp x5, x10, [x1, #32]
umulh x8, x9, x4
uaddlp v3.2D, v27.4S
umull v16.2D, v30.2S, v17.2S
mul x16, x9, x4
umull v27.2D, v30.2S, v28.2S
shrn v0.2S, v20.2D, #32
xtn v7.2S, v20.2D
shl v20.2D, v3.2D, #32
umull v3.2D, v31.2S, v28.2S
mul x3, x2, x4
umlal v20.2D, v30.2S, v17.2S
umull v22.2D, v7.2S, v0.2S
usra v27.2D, v16.2D, #32
umulh x11, x2, x4
movi v21.2D, #0x00000000ffffffff
uzp2 v28.4S, v1.4S, v1.4S
adds x15, x16, x7
and v5.16B, v27.16B, v21.16B
adcs x3, x3, x8
usra v3.2D, v27.2D, #32
dup v29.2D, x6
adcs x16, x11, xzr
mov x14, v20.d[0]
umlal v5.2D, v31.2S, v17.2S
mul x8, x9, x2
mov x7, v20.d[1]
shl v19.2D, v22.2D, #33
xtn v25.2S, v29.2D
rev64 v31.4S, v1.4S
lsl x13, x14, #32
uzp2 v6.4S, v29.4S, v29.4S
umlal v19.2D, v7.2S, v7.2S
usra v3.2D, v5.2D, #32
adds x1, x8, x8
umulh x8, x4, x4
add x12, x13, x14
mul v17.4S, v31.4S, v29.4S
xtn v4.2S, v1.2D
adcs x14, x15, x15
lsr x13, x12, #32
adcs x15, x3, x3
umull v31.2D, v25.2S, v28.2S
adcs x11, x16, x16
umull v21.2D, v25.2S, v4.2S
mov x17, v3.d[0]
umull v18.2D, v6.2S, v28.2S
adc x16, x8, xzr
uaddlp v16.2D, v17.4S
movi v1.2D, #0x00000000ffffffff
subs x13, x13, x12
usra v31.2D, v21.2D, #32
sbc x8, x12, xzr
adds x17, x17, x1
mul x1, x4, x4
shl v28.2D, v16.2D, #32
mov x3, v3.d[1]
adcs x14, x7, x14
extr x7, x8, x13, #32
adcs x13, x3, x15
and v3.16B, v31.16B, v1.16B
adcs x11, x1, x11
lsr x1, x8, #32
umlal v3.2D, v6.2S, v4.2S
usra v18.2D, v31.2D, #32
adc x3, x16, xzr
adds x1, x1, x12
umlal v28.2D, v25.2S, v4.2S
adc x16, xzr, xzr
subs x15, x17, x7
sbcs x7, x14, x1
lsl x1, x15, #32
sbcs x16, x13, x16
add x8, x1, x15
usra v18.2D, v3.2D, #32
sbcs x14, x11, xzr
lsr x1, x8, #32
sbcs x17, x3, xzr
sbc x11, x12, xzr
subs x13, x1, x8
umulh x12, x4, x10
sbc x1, x8, xzr
extr x13, x1, x13, #32
lsr x1, x1, #32
adds x15, x1, x8
adc x1, xzr, xzr
subs x7, x7, x13
sbcs x13, x16, x15
lsl x3, x7, #32
umulh x16, x2, x5
sbcs x15, x14, x1
add x7, x3, x7
sbcs x3, x17, xzr
lsr x1, x7, #32
sbcs x14, x11, xzr
sbc x11, x8, xzr
subs x8, x1, x7
sbc x1, x7, xzr
extr x8, x1, x8, #32
lsr x1, x1, #32
adds x1, x1, x7
adc x17, xzr, xzr
subs x13, x13, x8
umulh x8, x9, x6
sbcs x1, x15, x1
sbcs x15, x3, x17
sbcs x3, x14, xzr
mul x17, x2, x5
sbcs x11, x11, xzr
stp x13, x1, [x0]
sbc x14, x7, xzr
mul x7, x4, x10
subs x1, x9, x2
stp x15, x3, [x0, #16]
csetm x15, cc
cneg x1, x1, cc
stp x11, x14, [x0, #32]
mul x14, x9, x6
adds x17, x8, x17
adcs x7, x16, x7
adc x13, x12, xzr
subs x12, x5, x6
cneg x3, x12, cc
cinv x16, x15, cc
mul x8, x1, x3
umulh x1, x1, x3
eor x12, x8, x16
adds x11, x17, x14
adcs x3, x7, x17
adcs x15, x13, x7
adc x8, x13, xzr
adds x3, x3, x14
adcs x15, x15, x17
adcs x17, x8, x7
eor x1, x1, x16
adc x13, x13, xzr
subs x9, x9, x4
csetm x8, cc
cneg x9, x9, cc
subs x4, x2, x4
cneg x4, x4, cc
csetm x7, cc
subs x2, x10, x6
cinv x8, x8, cc
cneg x2, x2, cc
cmn x16, #0x1
adcs x11, x11, x12
mul x12, x9, x2
adcs x3, x3, x1
adcs x15, x15, x16
umulh x9, x9, x2
adcs x17, x17, x16
adc x13, x13, x16
subs x1, x10, x5
cinv x2, x7, cc
cneg x1, x1, cc
eor x9, x9, x8
cmn x8, #0x1
eor x7, x12, x8
mul x12, x4, x1
adcs x3, x3, x7
adcs x7, x15, x9
adcs x15, x17, x8
ldp x9, x17, [x0, #16]
umulh x4, x4, x1
adc x8, x13, x8
cmn x2, #0x1
eor x1, x12, x2
adcs x1, x7, x1
ldp x7, x16, [x0]
eor x12, x4, x2
adcs x4, x15, x12
ldp x15, x12, [x0, #32]
adc x8, x8, x2
adds x13, x14, x14
umulh x14, x5, x10
adcs x2, x11, x11
adcs x3, x3, x3
adcs x1, x1, x1
adcs x4, x4, x4
adcs x11, x8, x8
adc x8, xzr, xzr
adds x13, x13, x7
adcs x2, x2, x16
mul x16, x5, x10
adcs x3, x3, x9
adcs x1, x1, x17
umulh x5, x5, x5
lsl x9, x13, #32
add x9, x9, x13
adcs x4, x4, x15
mov x13, v28.d[1]
adcs x15, x11, x12
lsr x7, x9, #32
adc x11, x8, xzr
subs x7, x7, x9
umulh x10, x10, x10
sbc x17, x9, xzr
extr x7, x17, x7, #32
lsr x17, x17, #32
adds x17, x17, x9
adc x12, xzr, xzr
subs x8, x2, x7
sbcs x17, x3, x17
lsl x7, x8, #32
sbcs x2, x1, x12
add x3, x7, x8
sbcs x12, x4, xzr
lsr x1, x3, #32
sbcs x7, x15, xzr
sbc x15, x9, xzr
subs x1, x1, x3
sbc x4, x3, xzr
lsr x9, x4, #32
extr x8, x4, x1, #32
adds x9, x9, x3
adc x4, xzr, xzr
subs x1, x17, x8
lsl x17, x1, #32
sbcs x8, x2, x9
sbcs x9, x12, x4
add x17, x17, x1
mov x1, v18.d[1]
lsr x2, x17, #32
sbcs x7, x7, xzr
mov x12, v18.d[0]
sbcs x15, x15, xzr
sbc x3, x3, xzr
subs x4, x2, x17
sbc x2, x17, xzr
adds x12, x13, x12
adcs x16, x16, x1
lsr x13, x2, #32
extr x1, x2, x4, #32
adc x2, x14, xzr
adds x4, x13, x17
mul x13, x6, x6
adc x14, xzr, xzr
subs x1, x8, x1
sbcs x4, x9, x4
mov x9, v28.d[0]
sbcs x7, x7, x14
sbcs x8, x15, xzr
sbcs x3, x3, xzr
sbc x14, x17, xzr
adds x17, x9, x9
adcs x12, x12, x12
mov x15, v19.d[0]
adcs x9, x16, x16
umulh x6, x6, x6
adcs x16, x2, x2
adc x2, xzr, xzr
adds x11, x11, x8
adcs x3, x3, xzr
adcs x14, x14, xzr
adcs x8, xzr, xzr
adds x13, x1, x13
mov x1, v19.d[1]
adcs x6, x4, x6
mov x4, #0xffffffff
adcs x15, x7, x15
adcs x7, x11, x5
adcs x1, x3, x1
adcs x14, x14, x10
adc x11, x8, xzr
adds x6, x6, x17
adcs x8, x15, x12
adcs x3, x7, x9
adcs x15, x1, x16
mov x16, #0xffffffff00000001
adcs x14, x14, x2
mov x2, #0x1
adc x17, x11, xzr
cmn x13, x16
adcs xzr, x6, x4
adcs xzr, x8, x2
adcs xzr, x3, xzr
adcs xzr, x15, xzr
adcs xzr, x14, xzr
adc x1, x17, xzr
neg x9, x1
and x1, x16, x9
adds x11, x13, x1
and x13, x4, x9
adcs x5, x6, x13
and x1, x2, x9
adcs x7, x8, x1
stp x11, x5, [x0]
adcs x11, x3, xzr
adcs x2, x15, xzr
stp x7, x11, [x0, #16]
adc x17, x14, xzr
stp x2, x17, [x0, #32]
ret
// Corresponds exactly to bignum_sub_p384
.sub_p384:
ldp x5, x6, [x1]
ldp x4, x3, [x2]
subs x5, x5, x4
sbcs x6, x6, x3
ldp x7, x8, [x1, #16]
ldp x4, x3, [x2, #16]
sbcs x7, x7, x4
sbcs x8, x8, x3
ldp x9, x10, [x1, #32]
ldp x4, x3, [x2, #32]
sbcs x9, x9, x4
sbcs x10, x10, x3
csetm x3, cc
mov x4, #0xffffffff
and x4, x4, x3
adds x5, x5, x4
eor x4, x4, x3
adcs x6, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
adcs x7, x7, x4
adcs x8, x8, x3
adcs x9, x9, x3
adc x10, x10, x3
stp x5, x6, [x0]
stp x7, x8, [x0, #16]
stp x9, x10, [x0, #32]
ret
// Corresponds exactly to bignum_add_p384
.add_p384:
ldp x5, x6, [x1]
ldp x4, x3, [x2]
adds x5, x5, x4
adcs x6, x6, x3
ldp x7, x8, [x1, #16]
ldp x4, x3, [x2, #16]
adcs x7, x7, x4
adcs x8, x8, x3
ldp x9, x10, [x1, #32]
ldp x4, x3, [x2, #32]
adcs x9, x9, x4
adcs x10, x10, x3
adc x3, xzr, xzr
mov x4, #0xffffffff
cmp x5, x4
mov x4, #0xffffffff00000000
sbcs xzr, x6, x4
mov x4, #0xfffffffffffffffe
sbcs xzr, x7, x4
adcs xzr, x8, xzr
adcs xzr, x9, xzr
adcs xzr, x10, xzr
adcs x3, x3, xzr
csetm x3, ne
mov x4, #0xffffffff
and x4, x4, x3
subs x5, x5, x4
eor x4, x4, x3
sbcs x6, x6, x4
mov x4, #0xfffffffffffffffe
and x4, x4, x3
sbcs x7, x7, x4
sbcs x8, x8, x3
sbcs x9, x9, x3
sbc x10, x10, x3
stp x5, x6, [x0]
stp x7, x8, [x0, #16]
stp x9, x10, [x0, #32]
ret
#define montmul_p384(P0,P1,P2) \
add x0, P0;\
add x1, P1;\
add x2, P2;\
bl .montmul_p384
#define montsqr_p384(P0,P1) \
add x0, P0;\
add x1, P1;\
bl .montsqr_p384
#define sub_p384(P0,P1,P2) \
add x0, P0;\
add x1, P1;\
add x2, P2;\
bl .sub_p384
#define add_p384(P0,P1,P2) \
add x0, P0;\
add x1, P1;\
add x2, P2;\
bl .add_p384
// P0 = 4 * P1 - P2
#define cmsub41_p384(P0,P1,P2) \
ldp x1, x2, [P1]; \
ldp x3, x4, [P1+16]; \
ldp x5, x6, [P1+32]; \
lsl x0, x1, #2; \
ldp x7, x8, [P2]; \
subs x0, x0, x7; \
extr x1, x2, x1, #62; \
sbcs x1, x1, x8; \
ldp x7, x8, [P2+16]; \
extr x2, x3, x2, #62; \
sbcs x2, x2, x7; \
extr x3, x4, x3, #62; \
sbcs x3, x3, x8; \
extr x4, x5, x4, #62; \
ldp x7, x8, [P2+32]; \
sbcs x4, x4, x7; \
extr x5, x6, x5, #62; \
sbcs x5, x5, x8; \
lsr x6, x6, #62; \
adc x6, x6, xzr; \
lsl x7, x6, #32; \
subs x8, x6, x7; \
sbc x7, x7, xzr; \
adds x0, x0, x8; \
adcs x1, x1, x7; \
adcs x2, x2, x6; \
adcs x3, x3, xzr; \
adcs x4, x4, xzr; \
adcs x5, x5, xzr; \
csetm x8, cc; \
mov x9, #0xffffffff; \
and x9, x9, x8; \
adds x0, x0, x9; \
eor x9, x9, x8; \
adcs x1, x1, x9; \
mov x9, #0xfffffffffffffffe; \
and x9, x9, x8; \
adcs x2, x2, x9; \
adcs x3, x3, x8; \
adcs x4, x4, x8; \
adc x5, x5, x8; \
stp x0, x1, [P0]; \
stp x2, x3, [P0+16]; \
stp x4, x5, [P0+32]
// P0 = C * P1 - D * P2
#define cmsub_p384(P0,C,P1,D,P2) \
ldp x0, x1, [P2]; \
mov x6, #0x00000000ffffffff; \
subs x6, x6, x0; \
mov x7, #0xffffffff00000000; \
sbcs x7, x7, x1; \
ldp x0, x1, [P2+16]; \
mov x8, #0xfffffffffffffffe; \
sbcs x8, x8, x0; \
mov x13, #0xffffffffffffffff; \
sbcs x9, x13, x1; \
ldp x0, x1, [P2+32]; \
sbcs x10, x13, x0; \
sbc x11, x13, x1; \
mov x12, D; \
mul x0, x12, x6; \
mul x1, x12, x7; \
mul x2, x12, x8; \
mul x3, x12, x9; \
mul x4, x12, x10; \
mul x5, x12, x11; \
umulh x6, x12, x6; \
umulh x7, x12, x7; \
umulh x8, x12, x8; \
umulh x9, x12, x9; \
umulh x10, x12, x10; \
umulh x12, x12, x11; \
adds x1, x1, x6; \
adcs x2, x2, x7; \
adcs x3, x3, x8; \
adcs x4, x4, x9; \
adcs x5, x5, x10; \
mov x6, #1; \
adc x6, x12, x6; \
ldp x8, x9, [P1]; \
ldp x10, x11, [P1+16]; \
ldp x12, x13, [P1+32]; \
mov x14, C; \
mul x15, x14, x8; \
umulh x8, x14, x8; \
adds x0, x0, x15; \
mul x15, x14, x9; \
umulh x9, x14, x9; \
adcs x1, x1, x15; \
mul x15, x14, x10; \
umulh x10, x14, x10; \
adcs x2, x2, x15; \
mul x15, x14, x11; \
umulh x11, x14, x11; \
adcs x3, x3, x15; \
mul x15, x14, x12; \
umulh x12, x14, x12; \
adcs x4, x4, x15; \
mul x15, x14, x13; \
umulh x13, x14, x13; \
adcs x5, x5, x15; \
adc x6, x6, xzr; \
adds x1, x1, x8; \
adcs x2, x2, x9; \
adcs x3, x3, x10; \
adcs x4, x4, x11; \
adcs x5, x5, x12; \
adcs x6, x6, x13; \
lsl x7, x6, #32; \
subs x8, x6, x7; \
sbc x7, x7, xzr; \
adds x0, x0, x8; \
adcs x1, x1, x7; \
adcs x2, x2, x6; \
adcs x3, x3, xzr; \
adcs x4, x4, xzr; \
adcs x5, x5, xzr; \
csetm x6, cc; \
mov x7, #0xffffffff; \
and x7, x7, x6; \
adds x0, x0, x7; \
eor x7, x7, x6; \
adcs x1, x1, x7; \
mov x7, #0xfffffffffffffffe; \
and x7, x7, x6; \
adcs x2, x2, x7; \
adcs x3, x3, x6; \
adcs x4, x4, x6; \
adc x5, x5, x6; \
stp x0, x1, [P0]; \
stp x2, x3, [P0+16]; \
stp x4, x5, [P0+32]
// A weak version of add that only guarantees sum in 6 digits
#define weakadd_p384(P0,P1,P2) \
ldp x5, x6, [P1]; \
ldp x4, x3, [P2]; \
adds x5, x5, x4; \
adcs x6, x6, x3; \
ldp x7, x8, [P1+16]; \
ldp x4, x3, [P2+16]; \
adcs x7, x7, x4; \
adcs x8, x8, x3; \
ldp x9, x10, [P1+32]; \
ldp x4, x3, [P2+32]; \
adcs x9, x9, x4; \
adcs x10, x10, x3; \
csetm x3, cs; \
mov x4, #0xffffffff; \
and x4, x4, x3; \
subs x5, x5, x4; \
eor x4, x4, x3; \
sbcs x6, x6, x4; \
mov x4, #0xfffffffffffffffe; \
and x4, x4, x3; \
sbcs x7, x7, x4; \
sbcs x8, x8, x3; \
sbcs x9, x9, x3; \
sbc x10, x10, x3; \
stp x5, x6, [P0]; \
stp x7, x8, [P0+16]; \
stp x9, x10, [P0+32]
// P0 = 3 * P1 - 8 * P2
#define cmsub38_p384(P0,P1,P2) \
ldp x0, x1, [P2]; \
mov x6, #0x00000000ffffffff; \
subs x6, x6, x0; \
mov x7, #0xffffffff00000000; \
sbcs x7, x7, x1; \
ldp x0, x1, [P2+16]; \
mov x8, #0xfffffffffffffffe; \
sbcs x8, x8, x0; \
mov x13, #0xffffffffffffffff; \
sbcs x9, x13, x1; \
ldp x0, x1, [P2+32]; \
sbcs x10, x13, x0; \
sbc x11, x13, x1; \
lsl x0, x6, #3; \
extr x1, x7, x6, #61; \
extr x2, x8, x7, #61; \
extr x3, x9, x8, #61; \
extr x4, x10, x9, #61; \
extr x5, x11, x10, #61; \
lsr x6, x11, #61; \
add x6, x6, #1; \
ldp x8, x9, [P1]; \
ldp x10, x11, [P1+16]; \
ldp x12, x13, [P1+32]; \
mov x14, 3; \
mul x15, x14, x8; \
umulh x8, x14, x8; \
adds x0, x0, x15; \
mul x15, x14, x9; \
umulh x9, x14, x9; \
adcs x1, x1, x15; \
mul x15, x14, x10; \
umulh x10, x14, x10; \
adcs x2, x2, x15; \
mul x15, x14, x11; \
umulh x11, x14, x11; \
adcs x3, x3, x15; \
mul x15, x14, x12; \
umulh x12, x14, x12; \
adcs x4, x4, x15; \
mul x15, x14, x13; \
umulh x13, x14, x13; \
adcs x5, x5, x15; \
adc x6, x6, xzr; \
adds x1, x1, x8; \
adcs x2, x2, x9; \
adcs x3, x3, x10; \
adcs x4, x4, x11; \
adcs x5, x5, x12; \
adcs x6, x6, x13; \
lsl x7, x6, #32; \
subs x8, x6, x7; \
sbc x7, x7, xzr; \
adds x0, x0, x8; \
adcs x1, x1, x7; \
adcs x2, x2, x6; \
adcs x3, x3, xzr; \
adcs x4, x4, xzr; \
adcs x5, x5, xzr; \
csetm x6, cc; \
mov x7, #0xffffffff; \
and x7, x7, x6; \
adds x0, x0, x7; \
eor x7, x7, x6; \
adcs x1, x1, x7; \
mov x7, #0xfffffffffffffffe; \
and x7, x7, x6; \
adcs x2, x2, x7; \
adcs x3, x3, x6; \
adcs x4, x4, x6; \
adc x5, x5, x6; \
stp x0, x1, [P0]; \
stp x2, x3, [P0+16]; \
stp x4, x5, [P0+32]
S2N_BN_SYMBOL(p384_montjdouble):
// Save regs and make room on stack for temporary variables
sub sp, sp, NSPACE+80
stp x19, x20, [sp, NSPACE]
stp x21, x22, [sp, NSPACE+16]
stp x23, x24, [sp, NSPACE+32]
stp x25, x26, [sp, NSPACE+48]
stp x30, xzr, [sp, NSPACE+64]
// Move the input arguments to stable places
mov input_z, x0
mov input_x, x1
// Main code, just a sequence of basic field operations
// z2 = z^2
// y2 = y^2
montsqr_p384(z2,z_1)
montsqr_p384(y2,y_1)
// x2p = x^2 - z^4 = (x + z^2) * (x - z^2)
weakadd_p384(t1,x_1,z2)
sub_p384(t2,x_1,z2)
montmul_p384(x2p,t1,t2)
// t1 = y + z
// x4p = x2p^2
// xy2 = x * y^2
add_p384(t1,y_1,z_1)
montsqr_p384(x4p,x2p)
montmul_p384(xy2,x_1,y2)
// t2 = (y + z)^2
montsqr_p384(t2,t1)
// d = 12 * xy2 - 9 * x4p
// t1 = y^2 + 2 * y * z
cmsub_p384(d_,12,xy2,9,x4p)
sub_p384(t1,t2,z2)
// y4 = y^4
montsqr_p384(y4,y2)
// z_3' = 2 * y * z
// dx2 = d * x2p
sub_p384(z_3,t1,y2)
montmul_p384(dx2,d_,x2p)
// x' = 4 * xy2 - d
cmsub41_p384(x_3,xy2,d_)
// y' = 3 * dx2 - 8 * y4
cmsub38_p384(y_3,dx2,y4)
// Restore stack and registers
ldp x19, x20, [sp, NSPACE]
ldp x21, x22, [sp, NSPACE+16]
ldp x23, x24, [sp, NSPACE+32]
ldp x25, x26, [sp, NSPACE+48]
ldp x30, xzr, [sp, NSPACE+64]
add sp, sp, NSPACE+80
ret
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack, "", %progbits
#endif
|
marvin-hansen/iggy-streaming-system
| 62,471
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/crypto/poly1305/poly1305_arm_asm.S
|
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
# This implementation was taken from the public domain, neon2 version in
# SUPERCOP by D. J. Bernstein and Peter Schwabe.
# qhasm: int32 input_0
# qhasm: int32 input_1
# qhasm: int32 input_2
# qhasm: int32 input_3
# qhasm: stack32 input_4
# qhasm: stack32 input_5
# qhasm: stack32 input_6
# qhasm: stack32 input_7
# qhasm: int32 caller_r4
# qhasm: int32 caller_r5
# qhasm: int32 caller_r6
# qhasm: int32 caller_r7
# qhasm: int32 caller_r8
# qhasm: int32 caller_r9
# qhasm: int32 caller_r10
# qhasm: int32 caller_r11
# qhasm: int32 caller_r12
# qhasm: int32 caller_r14
# qhasm: reg128 caller_q4
# qhasm: reg128 caller_q5
# qhasm: reg128 caller_q6
# qhasm: reg128 caller_q7
# qhasm: startcode
.fpu neon
.text
# qhasm: reg128 r0
# qhasm: reg128 r1
# qhasm: reg128 r2
# qhasm: reg128 r3
# qhasm: reg128 r4
# qhasm: reg128 x01
# qhasm: reg128 x23
# qhasm: reg128 x4
# qhasm: reg128 y0
# qhasm: reg128 y12
# qhasm: reg128 y34
# qhasm: reg128 5y12
# qhasm: reg128 5y34
# qhasm: stack128 y0_stack
# qhasm: stack128 y12_stack
# qhasm: stack128 y34_stack
# qhasm: stack128 5y12_stack
# qhasm: stack128 5y34_stack
# qhasm: reg128 z0
# qhasm: reg128 z12
# qhasm: reg128 z34
# qhasm: reg128 5z12
# qhasm: reg128 5z34
# qhasm: stack128 z0_stack
# qhasm: stack128 z12_stack
# qhasm: stack128 z34_stack
# qhasm: stack128 5z12_stack
# qhasm: stack128 5z34_stack
# qhasm: stack128 two24
# qhasm: int32 ptr
# qhasm: reg128 c01
# qhasm: reg128 c23
# qhasm: reg128 d01
# qhasm: reg128 d23
# qhasm: reg128 t0
# qhasm: reg128 t1
# qhasm: reg128 t2
# qhasm: reg128 t3
# qhasm: reg128 t4
# qhasm: reg128 mask
# qhasm: reg128 u0
# qhasm: reg128 u1
# qhasm: reg128 u2
# qhasm: reg128 u3
# qhasm: reg128 u4
# qhasm: reg128 v01
# qhasm: reg128 mid
# qhasm: reg128 v23
# qhasm: reg128 v4
# qhasm: int32 len
# qhasm: qpushenter crypto_onetimeauth_poly1305_neon2_blocks
.align 4
.global openssl_poly1305_neon2_blocks
.hidden openssl_poly1305_neon2_blocks
.type openssl_poly1305_neon2_blocks STT_FUNC
openssl_poly1305_neon2_blocks:
vpush {q4,q5,q6,q7}
mov r12,sp
sub sp,sp,#192
bic sp,sp,#31
# qhasm: len = input_3
# asm 1: mov >len=int32#4,<input_3=int32#4
# asm 2: mov >len=r3,<input_3=r3
mov r3,r3
# qhasm: new y0
# qhasm: y0 = mem64[input_1]y0[1]; input_1 += 8
# asm 1: vld1.8 {<y0=reg128#1%bot},[<input_1=int32#2]!
# asm 2: vld1.8 {<y0=d0},[<input_1=r1]!
vld1.8 {d0},[r1]!
# qhasm: y12 = mem128[input_1]; input_1 += 16
# asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<input_1=int32#2]!
# asm 2: vld1.8 {>y12=d2->y12=d3},[<input_1=r1]!
vld1.8 {d2-d3},[r1]!
# qhasm: y34 = mem128[input_1]; input_1 += 16
# asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<input_1=int32#2]!
# asm 2: vld1.8 {>y34=d4->y34=d5},[<input_1=r1]!
vld1.8 {d4-d5},[r1]!
# qhasm: input_1 += 8
# asm 1: add >input_1=int32#2,<input_1=int32#2,#8
# asm 2: add >input_1=r1,<input_1=r1,#8
add r1,r1,#8
# qhasm: new z0
# qhasm: z0 = mem64[input_1]z0[1]; input_1 += 8
# asm 1: vld1.8 {<z0=reg128#4%bot},[<input_1=int32#2]!
# asm 2: vld1.8 {<z0=d6},[<input_1=r1]!
vld1.8 {d6},[r1]!
# qhasm: z12 = mem128[input_1]; input_1 += 16
# asm 1: vld1.8 {>z12=reg128#5%bot->z12=reg128#5%top},[<input_1=int32#2]!
# asm 2: vld1.8 {>z12=d8->z12=d9},[<input_1=r1]!
vld1.8 {d8-d9},[r1]!
# qhasm: z34 = mem128[input_1]; input_1 += 16
# asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<input_1=int32#2]!
# asm 2: vld1.8 {>z34=d10->z34=d11},[<input_1=r1]!
vld1.8 {d10-d11},[r1]!
# qhasm: 2x mask = 0xffffffff
# asm 1: vmov.i64 >mask=reg128#7,#0xffffffff
# asm 2: vmov.i64 >mask=q6,#0xffffffff
vmov.i64 q6,#0xffffffff
# qhasm: 2x u4 = 0xff
# asm 1: vmov.i64 >u4=reg128#8,#0xff
# asm 2: vmov.i64 >u4=q7,#0xff
vmov.i64 q7,#0xff
# qhasm: x01 aligned= mem128[input_0];input_0+=16
# asm 1: vld1.8 {>x01=reg128#9%bot->x01=reg128#9%top},[<input_0=int32#1,: 128]!
# asm 2: vld1.8 {>x01=d16->x01=d17},[<input_0=r0,: 128]!
vld1.8 {d16-d17},[r0,: 128]!
# qhasm: x23 aligned= mem128[input_0];input_0+=16
# asm 1: vld1.8 {>x23=reg128#10%bot->x23=reg128#10%top},[<input_0=int32#1,: 128]!
# asm 2: vld1.8 {>x23=d18->x23=d19},[<input_0=r0,: 128]!
vld1.8 {d18-d19},[r0,: 128]!
# qhasm: x4 aligned= mem64[input_0]x4[1]
# asm 1: vld1.8 {<x4=reg128#11%bot},[<input_0=int32#1,: 64]
# asm 2: vld1.8 {<x4=d20},[<input_0=r0,: 64]
vld1.8 {d20},[r0,: 64]
# qhasm: input_0 -= 32
# asm 1: sub >input_0=int32#1,<input_0=int32#1,#32
# asm 2: sub >input_0=r0,<input_0=r0,#32
sub r0,r0,#32
# qhasm: 2x mask unsigned>>=6
# asm 1: vshr.u64 >mask=reg128#7,<mask=reg128#7,#6
# asm 2: vshr.u64 >mask=q6,<mask=q6,#6
vshr.u64 q6,q6,#6
# qhasm: 2x u4 unsigned>>= 7
# asm 1: vshr.u64 >u4=reg128#8,<u4=reg128#8,#7
# asm 2: vshr.u64 >u4=q7,<u4=q7,#7
vshr.u64 q7,q7,#7
# qhasm: 4x 5y12 = y12 << 2
# asm 1: vshl.i32 >5y12=reg128#12,<y12=reg128#2,#2
# asm 2: vshl.i32 >5y12=q11,<y12=q1,#2
vshl.i32 q11,q1,#2
# qhasm: 4x 5y34 = y34 << 2
# asm 1: vshl.i32 >5y34=reg128#13,<y34=reg128#3,#2
# asm 2: vshl.i32 >5y34=q12,<y34=q2,#2
vshl.i32 q12,q2,#2
# qhasm: 4x 5y12 += y12
# asm 1: vadd.i32 >5y12=reg128#12,<5y12=reg128#12,<y12=reg128#2
# asm 2: vadd.i32 >5y12=q11,<5y12=q11,<y12=q1
vadd.i32 q11,q11,q1
# qhasm: 4x 5y34 += y34
# asm 1: vadd.i32 >5y34=reg128#13,<5y34=reg128#13,<y34=reg128#3
# asm 2: vadd.i32 >5y34=q12,<5y34=q12,<y34=q2
vadd.i32 q12,q12,q2
# qhasm: 2x u4 <<= 24
# asm 1: vshl.i64 >u4=reg128#8,<u4=reg128#8,#24
# asm 2: vshl.i64 >u4=q7,<u4=q7,#24
vshl.i64 q7,q7,#24
# qhasm: 4x 5z12 = z12 << 2
# asm 1: vshl.i32 >5z12=reg128#14,<z12=reg128#5,#2
# asm 2: vshl.i32 >5z12=q13,<z12=q4,#2
vshl.i32 q13,q4,#2
# qhasm: 4x 5z34 = z34 << 2
# asm 1: vshl.i32 >5z34=reg128#15,<z34=reg128#6,#2
# asm 2: vshl.i32 >5z34=q14,<z34=q5,#2
vshl.i32 q14,q5,#2
# qhasm: 4x 5z12 += z12
# asm 1: vadd.i32 >5z12=reg128#14,<5z12=reg128#14,<z12=reg128#5
# asm 2: vadd.i32 >5z12=q13,<5z12=q13,<z12=q4
vadd.i32 q13,q13,q4
# qhasm: 4x 5z34 += z34
# asm 1: vadd.i32 >5z34=reg128#15,<5z34=reg128#15,<z34=reg128#6
# asm 2: vadd.i32 >5z34=q14,<5z34=q14,<z34=q5
vadd.i32 q14,q14,q5
# qhasm: new two24
# qhasm: new y0_stack
# qhasm: new y12_stack
# qhasm: new y34_stack
# qhasm: new 5y12_stack
# qhasm: new 5y34_stack
# qhasm: new z0_stack
# qhasm: new z12_stack
# qhasm: new z34_stack
# qhasm: new 5z12_stack
# qhasm: new 5z34_stack
# qhasm: ptr = &two24
# asm 1: lea >ptr=int32#2,<two24=stack128#1
# asm 2: lea >ptr=r1,<two24=[sp,#0]
add r1,sp,#0
# qhasm: mem128[ptr] aligned= u4
# asm 1: vst1.8 {<u4=reg128#8%bot-<u4=reg128#8%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<u4=d14-<u4=d15},[<ptr=r1,: 128]
vst1.8 {d14-d15},[r1,: 128]
# qhasm: r4 = u4
# asm 1: vmov >r4=reg128#16,<u4=reg128#8
# asm 2: vmov >r4=q15,<u4=q7
vmov q15,q7
# qhasm: r0 = u4
# asm 1: vmov >r0=reg128#8,<u4=reg128#8
# asm 2: vmov >r0=q7,<u4=q7
vmov q7,q7
# qhasm: ptr = &y0_stack
# asm 1: lea >ptr=int32#2,<y0_stack=stack128#2
# asm 2: lea >ptr=r1,<y0_stack=[sp,#16]
add r1,sp,#16
# qhasm: mem128[ptr] aligned= y0
# asm 1: vst1.8 {<y0=reg128#1%bot-<y0=reg128#1%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<y0=d0-<y0=d1},[<ptr=r1,: 128]
vst1.8 {d0-d1},[r1,: 128]
# qhasm: ptr = &y12_stack
# asm 1: lea >ptr=int32#2,<y12_stack=stack128#3
# asm 2: lea >ptr=r1,<y12_stack=[sp,#32]
add r1,sp,#32
# qhasm: mem128[ptr] aligned= y12
# asm 1: vst1.8 {<y12=reg128#2%bot-<y12=reg128#2%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<y12=d2-<y12=d3},[<ptr=r1,: 128]
vst1.8 {d2-d3},[r1,: 128]
# qhasm: ptr = &y34_stack
# asm 1: lea >ptr=int32#2,<y34_stack=stack128#4
# asm 2: lea >ptr=r1,<y34_stack=[sp,#48]
add r1,sp,#48
# qhasm: mem128[ptr] aligned= y34
# asm 1: vst1.8 {<y34=reg128#3%bot-<y34=reg128#3%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<y34=d4-<y34=d5},[<ptr=r1,: 128]
vst1.8 {d4-d5},[r1,: 128]
# qhasm: ptr = &z0_stack
# asm 1: lea >ptr=int32#2,<z0_stack=stack128#7
# asm 2: lea >ptr=r1,<z0_stack=[sp,#96]
add r1,sp,#96
# qhasm: mem128[ptr] aligned= z0
# asm 1: vst1.8 {<z0=reg128#4%bot-<z0=reg128#4%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<z0=d6-<z0=d7},[<ptr=r1,: 128]
vst1.8 {d6-d7},[r1,: 128]
# qhasm: ptr = &z12_stack
# asm 1: lea >ptr=int32#2,<z12_stack=stack128#8
# asm 2: lea >ptr=r1,<z12_stack=[sp,#112]
add r1,sp,#112
# qhasm: mem128[ptr] aligned= z12
# asm 1: vst1.8 {<z12=reg128#5%bot-<z12=reg128#5%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<z12=d8-<z12=d9},[<ptr=r1,: 128]
vst1.8 {d8-d9},[r1,: 128]
# qhasm: ptr = &z34_stack
# asm 1: lea >ptr=int32#2,<z34_stack=stack128#9
# asm 2: lea >ptr=r1,<z34_stack=[sp,#128]
add r1,sp,#128
# qhasm: mem128[ptr] aligned= z34
# asm 1: vst1.8 {<z34=reg128#6%bot-<z34=reg128#6%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<z34=d10-<z34=d11},[<ptr=r1,: 128]
vst1.8 {d10-d11},[r1,: 128]
# qhasm: ptr = &5y12_stack
# asm 1: lea >ptr=int32#2,<5y12_stack=stack128#5
# asm 2: lea >ptr=r1,<5y12_stack=[sp,#64]
add r1,sp,#64
# qhasm: mem128[ptr] aligned= 5y12
# asm 1: vst1.8 {<5y12=reg128#12%bot-<5y12=reg128#12%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<5y12=d22-<5y12=d23},[<ptr=r1,: 128]
vst1.8 {d22-d23},[r1,: 128]
# qhasm: ptr = &5y34_stack
# asm 1: lea >ptr=int32#2,<5y34_stack=stack128#6
# asm 2: lea >ptr=r1,<5y34_stack=[sp,#80]
add r1,sp,#80
# qhasm: mem128[ptr] aligned= 5y34
# asm 1: vst1.8 {<5y34=reg128#13%bot-<5y34=reg128#13%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<5y34=d24-<5y34=d25},[<ptr=r1,: 128]
vst1.8 {d24-d25},[r1,: 128]
# qhasm: ptr = &5z12_stack
# asm 1: lea >ptr=int32#2,<5z12_stack=stack128#10
# asm 2: lea >ptr=r1,<5z12_stack=[sp,#144]
add r1,sp,#144
# qhasm: mem128[ptr] aligned= 5z12
# asm 1: vst1.8 {<5z12=reg128#14%bot-<5z12=reg128#14%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<5z12=d26-<5z12=d27},[<ptr=r1,: 128]
vst1.8 {d26-d27},[r1,: 128]
# qhasm: ptr = &5z34_stack
# asm 1: lea >ptr=int32#2,<5z34_stack=stack128#11
# asm 2: lea >ptr=r1,<5z34_stack=[sp,#160]
add r1,sp,#160
# qhasm: mem128[ptr] aligned= 5z34
# asm 1: vst1.8 {<5z34=reg128#15%bot-<5z34=reg128#15%top},[<ptr=int32#2,: 128]
# asm 2: vst1.8 {<5z34=d28-<5z34=d29},[<ptr=r1,: 128]
vst1.8 {d28-d29},[r1,: 128]
# qhasm: unsigned>? len - 64
# asm 1: cmp <len=int32#4,#64
# asm 2: cmp <len=r3,#64
cmp r3,#64
# qhasm: goto below64bytes if !unsigned>
bls ._below64bytes
# qhasm: input_2 += 32
# asm 1: add >input_2=int32#2,<input_2=int32#3,#32
# asm 2: add >input_2=r1,<input_2=r2,#32
add r1,r2,#32
# qhasm: mainloop2:
._mainloop2:
# qhasm: c01 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>c01=reg128#1%bot->c01=reg128#1%top},[<input_2=int32#2]!
# asm 2: vld1.8 {>c01=d0->c01=d1},[<input_2=r1]!
vld1.8 {d0-d1},[r1]!
# qhasm: c23 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>c23=reg128#2%bot->c23=reg128#2%top},[<input_2=int32#2]!
# asm 2: vld1.8 {>c23=d2->c23=d3},[<input_2=r1]!
vld1.8 {d2-d3},[r1]!
# qhasm: r4[0,1] += x01[0] unsigned* z34[2]; r4[2,3] += x01[1] unsigned* z34[3]
# asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%bot,<z34=reg128#6%top
# asm 2: vmlal.u32 <r4=q15,<x01=d16,<z34=d11
vmlal.u32 q15,d16,d11
# qhasm: ptr = &z12_stack
# asm 1: lea >ptr=int32#3,<z12_stack=stack128#8
# asm 2: lea >ptr=r2,<z12_stack=[sp,#112]
add r2,sp,#112
# qhasm: z12 aligned= mem128[ptr]
# asm 1: vld1.8 {>z12=reg128#3%bot->z12=reg128#3%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>z12=d4->z12=d5},[<ptr=r2,: 128]
vld1.8 {d4-d5},[r2,: 128]
# qhasm: r4[0,1] += x01[2] unsigned* z34[0]; r4[2,3] += x01[3] unsigned* z34[1]
# asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%top,<z34=reg128#6%bot
# asm 2: vmlal.u32 <r4=q15,<x01=d17,<z34=d10
vmlal.u32 q15,d17,d10
# qhasm: ptr = &z0_stack
# asm 1: lea >ptr=int32#3,<z0_stack=stack128#7
# asm 2: lea >ptr=r2,<z0_stack=[sp,#96]
add r2,sp,#96
# qhasm: z0 aligned= mem128[ptr]
# asm 1: vld1.8 {>z0=reg128#4%bot->z0=reg128#4%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>z0=d6->z0=d7},[<ptr=r2,: 128]
vld1.8 {d6-d7},[r2,: 128]
# qhasm: r4[0,1] += x23[0] unsigned* z12[2]; r4[2,3] += x23[1] unsigned* z12[3]
# asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%bot,<z12=reg128#3%top
# asm 2: vmlal.u32 <r4=q15,<x23=d18,<z12=d5
vmlal.u32 q15,d18,d5
# qhasm: c01 c23 = c01[0]c01[1]c01[2]c23[2]c23[0]c23[1]c01[3]c23[3]
# asm 1: vtrn.32 <c01=reg128#1%top,<c23=reg128#2%top
# asm 2: vtrn.32 <c01=d1,<c23=d3
vtrn.32 d1,d3
# qhasm: r4[0,1] += x23[2] unsigned* z12[0]; r4[2,3] += x23[3] unsigned* z12[1]
# asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%top,<z12=reg128#3%bot
# asm 2: vmlal.u32 <r4=q15,<x23=d19,<z12=d4
vmlal.u32 q15,d19,d4
# qhasm: r4[0,1] += x4[0] unsigned* z0[0]; r4[2,3] += x4[1] unsigned* z0[1]
# asm 1: vmlal.u32 <r4=reg128#16,<x4=reg128#11%bot,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r4=q15,<x4=d20,<z0=d6
vmlal.u32 q15,d20,d6
# qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
# asm 1: vshll.u32 >r3=reg128#5,<c23=reg128#2%top,#18
# asm 2: vshll.u32 >r3=q4,<c23=d3,#18
vshll.u32 q4,d3,#18
# qhasm: c01 c23 = c01[0]c23[0]c01[2]c01[3]c01[1]c23[1]c23[2]c23[3]
# asm 1: vtrn.32 <c01=reg128#1%bot,<c23=reg128#2%bot
# asm 2: vtrn.32 <c01=d0,<c23=d2
vtrn.32 d0,d2
# qhasm: r3[0,1] += x01[0] unsigned* z34[0]; r3[2,3] += x01[1] unsigned* z34[1]
# asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%bot,<z34=reg128#6%bot
# asm 2: vmlal.u32 <r3=q4,<x01=d16,<z34=d10
vmlal.u32 q4,d16,d10
# qhasm: r3[0,1] += x01[2] unsigned* z12[2]; r3[2,3] += x01[3] unsigned* z12[3]
# asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%top,<z12=reg128#3%top
# asm 2: vmlal.u32 <r3=q4,<x01=d17,<z12=d5
vmlal.u32 q4,d17,d5
# qhasm: r0 = r0[1]c01[0]r0[2,3]
# asm 1: vext.32 <r0=reg128#8%bot,<r0=reg128#8%bot,<c01=reg128#1%bot,#1
# asm 2: vext.32 <r0=d14,<r0=d14,<c01=d0,#1
vext.32 d14,d14,d0,#1
# qhasm: r3[0,1] += x23[0] unsigned* z12[0]; r3[2,3] += x23[1] unsigned* z12[1]
# asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%bot,<z12=reg128#3%bot
# asm 2: vmlal.u32 <r3=q4,<x23=d18,<z12=d4
vmlal.u32 q4,d18,d4
# qhasm: input_2 -= 64
# asm 1: sub >input_2=int32#2,<input_2=int32#2,#64
# asm 2: sub >input_2=r1,<input_2=r1,#64
sub r1,r1,#64
# qhasm: r3[0,1] += x23[2] unsigned* z0[0]; r3[2,3] += x23[3] unsigned* z0[1]
# asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%top,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r3=q4,<x23=d19,<z0=d6
vmlal.u32 q4,d19,d6
# qhasm: ptr = &5z34_stack
# asm 1: lea >ptr=int32#3,<5z34_stack=stack128#11
# asm 2: lea >ptr=r2,<5z34_stack=[sp,#160]
add r2,sp,#160
# qhasm: 5z34 aligned= mem128[ptr]
# asm 1: vld1.8 {>5z34=reg128#6%bot->5z34=reg128#6%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>5z34=d10->5z34=d11},[<ptr=r2,: 128]
vld1.8 {d10-d11},[r2,: 128]
# qhasm: r3[0,1] += x4[0] unsigned* 5z34[2]; r3[2,3] += x4[1] unsigned* 5z34[3]
# asm 1: vmlal.u32 <r3=reg128#5,<x4=reg128#11%bot,<5z34=reg128#6%top
# asm 2: vmlal.u32 <r3=q4,<x4=d20,<5z34=d11
vmlal.u32 q4,d20,d11
# qhasm: r0 = r0[1]r0[0]r0[3]r0[2]
# asm 1: vrev64.i32 >r0=reg128#8,<r0=reg128#8
# asm 2: vrev64.i32 >r0=q7,<r0=q7
vrev64.i32 q7,q7
# qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
# asm 1: vshll.u32 >r2=reg128#14,<c01=reg128#1%top,#12
# asm 2: vshll.u32 >r2=q13,<c01=d1,#12
vshll.u32 q13,d1,#12
# qhasm: d01 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>d01=reg128#12%bot->d01=reg128#12%top},[<input_2=int32#2]!
# asm 2: vld1.8 {>d01=d22->d01=d23},[<input_2=r1]!
vld1.8 {d22-d23},[r1]!
# qhasm: r2[0,1] += x01[0] unsigned* z12[2]; r2[2,3] += x01[1] unsigned* z12[3]
# asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%bot,<z12=reg128#3%top
# asm 2: vmlal.u32 <r2=q13,<x01=d16,<z12=d5
vmlal.u32 q13,d16,d5
# qhasm: r2[0,1] += x01[2] unsigned* z12[0]; r2[2,3] += x01[3] unsigned* z12[1]
# asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%top,<z12=reg128#3%bot
# asm 2: vmlal.u32 <r2=q13,<x01=d17,<z12=d4
vmlal.u32 q13,d17,d4
# qhasm: r2[0,1] += x23[0] unsigned* z0[0]; r2[2,3] += x23[1] unsigned* z0[1]
# asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%bot,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r2=q13,<x23=d18,<z0=d6
vmlal.u32 q13,d18,d6
# qhasm: r2[0,1] += x23[2] unsigned* 5z34[2]; r2[2,3] += x23[3] unsigned* 5z34[3]
# asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%top,<5z34=reg128#6%top
# asm 2: vmlal.u32 <r2=q13,<x23=d19,<5z34=d11
vmlal.u32 q13,d19,d11
# qhasm: r2[0,1] += x4[0] unsigned* 5z34[0]; r2[2,3] += x4[1] unsigned* 5z34[1]
# asm 1: vmlal.u32 <r2=reg128#14,<x4=reg128#11%bot,<5z34=reg128#6%bot
# asm 2: vmlal.u32 <r2=q13,<x4=d20,<5z34=d10
vmlal.u32 q13,d20,d10
# qhasm: r0 = r0[0,1]c01[1]r0[2]
# asm 1: vext.32 <r0=reg128#8%top,<c01=reg128#1%bot,<r0=reg128#8%top,#1
# asm 2: vext.32 <r0=d15,<c01=d0,<r0=d15,#1
vext.32 d15,d0,d15,#1
# qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
# asm 1: vshll.u32 >r1=reg128#15,<c23=reg128#2%bot,#6
# asm 2: vshll.u32 >r1=q14,<c23=d2,#6
vshll.u32 q14,d2,#6
# qhasm: r1[0,1] += x01[0] unsigned* z12[0]; r1[2,3] += x01[1] unsigned* z12[1]
# asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%bot,<z12=reg128#3%bot
# asm 2: vmlal.u32 <r1=q14,<x01=d16,<z12=d4
vmlal.u32 q14,d16,d4
# qhasm: r1[0,1] += x01[2] unsigned* z0[0]; r1[2,3] += x01[3] unsigned* z0[1]
# asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%top,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r1=q14,<x01=d17,<z0=d6
vmlal.u32 q14,d17,d6
# qhasm: r1[0,1] += x23[0] unsigned* 5z34[2]; r1[2,3] += x23[1] unsigned* 5z34[3]
# asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%bot,<5z34=reg128#6%top
# asm 2: vmlal.u32 <r1=q14,<x23=d18,<5z34=d11
vmlal.u32 q14,d18,d11
# qhasm: r1[0,1] += x23[2] unsigned* 5z34[0]; r1[2,3] += x23[3] unsigned* 5z34[1]
# asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%top,<5z34=reg128#6%bot
# asm 2: vmlal.u32 <r1=q14,<x23=d19,<5z34=d10
vmlal.u32 q14,d19,d10
# qhasm: ptr = &5z12_stack
# asm 1: lea >ptr=int32#3,<5z12_stack=stack128#10
# asm 2: lea >ptr=r2,<5z12_stack=[sp,#144]
add r2,sp,#144
# qhasm: 5z12 aligned= mem128[ptr]
# asm 1: vld1.8 {>5z12=reg128#1%bot->5z12=reg128#1%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>5z12=d0->5z12=d1},[<ptr=r2,: 128]
vld1.8 {d0-d1},[r2,: 128]
# qhasm: r1[0,1] += x4[0] unsigned* 5z12[2]; r1[2,3] += x4[1] unsigned* 5z12[3]
# asm 1: vmlal.u32 <r1=reg128#15,<x4=reg128#11%bot,<5z12=reg128#1%top
# asm 2: vmlal.u32 <r1=q14,<x4=d20,<5z12=d1
vmlal.u32 q14,d20,d1
# qhasm: d23 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>d23=reg128#2%bot->d23=reg128#2%top},[<input_2=int32#2]!
# asm 2: vld1.8 {>d23=d2->d23=d3},[<input_2=r1]!
vld1.8 {d2-d3},[r1]!
# qhasm: input_2 += 32
# asm 1: add >input_2=int32#2,<input_2=int32#2,#32
# asm 2: add >input_2=r1,<input_2=r1,#32
add r1,r1,#32
# qhasm: r0[0,1] += x4[0] unsigned* 5z12[0]; r0[2,3] += x4[1] unsigned* 5z12[1]
# asm 1: vmlal.u32 <r0=reg128#8,<x4=reg128#11%bot,<5z12=reg128#1%bot
# asm 2: vmlal.u32 <r0=q7,<x4=d20,<5z12=d0
vmlal.u32 q7,d20,d0
# qhasm: r0[0,1] += x23[0] unsigned* 5z34[0]; r0[2,3] += x23[1] unsigned* 5z34[1]
# asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%bot,<5z34=reg128#6%bot
# asm 2: vmlal.u32 <r0=q7,<x23=d18,<5z34=d10
vmlal.u32 q7,d18,d10
# qhasm: d01 d23 = d01[0] d23[0] d01[1] d23[1]
# asm 1: vswp <d23=reg128#2%bot,<d01=reg128#12%top
# asm 2: vswp <d23=d2,<d01=d23
vswp d2,d23
# qhasm: r0[0,1] += x23[2] unsigned* 5z12[2]; r0[2,3] += x23[3] unsigned* 5z12[3]
# asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%top,<5z12=reg128#1%top
# asm 2: vmlal.u32 <r0=q7,<x23=d19,<5z12=d1
vmlal.u32 q7,d19,d1
# qhasm: r0[0,1] += x01[0] unsigned* z0[0]; r0[2,3] += x01[1] unsigned* z0[1]
# asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%bot,<z0=reg128#4%bot
# asm 2: vmlal.u32 <r0=q7,<x01=d16,<z0=d6
vmlal.u32 q7,d16,d6
# qhasm: new mid
# qhasm: 2x v4 = d23 unsigned>> 40
# asm 1: vshr.u64 >v4=reg128#4,<d23=reg128#2,#40
# asm 2: vshr.u64 >v4=q3,<d23=q1,#40
vshr.u64 q3,q1,#40
# qhasm: mid = d01[1]d23[0] mid[2,3]
# asm 1: vext.32 <mid=reg128#1%bot,<d01=reg128#12%bot,<d23=reg128#2%bot,#1
# asm 2: vext.32 <mid=d0,<d01=d22,<d23=d2,#1
vext.32 d0,d22,d2,#1
# qhasm: new v23
# qhasm: v23[2] = d23[0,1] unsigned>> 14; v23[3] = d23[2,3] unsigned>> 14
# asm 1: vshrn.u64 <v23=reg128#10%top,<d23=reg128#2,#14
# asm 2: vshrn.u64 <v23=d19,<d23=q1,#14
vshrn.u64 d19,q1,#14
# qhasm: mid = mid[0,1] d01[3]d23[2]
# asm 1: vext.32 <mid=reg128#1%top,<d01=reg128#12%top,<d23=reg128#2%top,#1
# asm 2: vext.32 <mid=d1,<d01=d23,<d23=d3,#1
vext.32 d1,d23,d3,#1
# qhasm: new v01
# qhasm: v01[2] = d01[0,1] unsigned>> 26; v01[3] = d01[2,3] unsigned>> 26
# asm 1: vshrn.u64 <v01=reg128#11%top,<d01=reg128#12,#26
# asm 2: vshrn.u64 <v01=d21,<d01=q11,#26
vshrn.u64 d21,q11,#26
# qhasm: v01 = d01[1]d01[0] v01[2,3]
# asm 1: vext.32 <v01=reg128#11%bot,<d01=reg128#12%bot,<d01=reg128#12%bot,#1
# asm 2: vext.32 <v01=d20,<d01=d22,<d01=d22,#1
vext.32 d20,d22,d22,#1
# qhasm: r0[0,1] += x01[2] unsigned* 5z34[2]; r0[2,3] += x01[3] unsigned* 5z34[3]
# asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%top,<5z34=reg128#6%top
# asm 2: vmlal.u32 <r0=q7,<x01=d17,<5z34=d11
vmlal.u32 q7,d17,d11
# qhasm: v01 = v01[1]d01[2] v01[2,3]
# asm 1: vext.32 <v01=reg128#11%bot,<v01=reg128#11%bot,<d01=reg128#12%top,#1
# asm 2: vext.32 <v01=d20,<v01=d20,<d01=d23,#1
vext.32 d20,d20,d23,#1
# qhasm: v23[0] = mid[0,1] unsigned>> 20; v23[1] = mid[2,3] unsigned>> 20
# asm 1: vshrn.u64 <v23=reg128#10%bot,<mid=reg128#1,#20
# asm 2: vshrn.u64 <v23=d18,<mid=q0,#20
vshrn.u64 d18,q0,#20
# qhasm: v4 = v4[0]v4[2]v4[1]v4[3]
# asm 1: vtrn.32 <v4=reg128#4%bot,<v4=reg128#4%top
# asm 2: vtrn.32 <v4=d6,<v4=d7
vtrn.32 d6,d7
# qhasm: 4x v01 &= 0x03ffffff
# asm 1: vand.i32 <v01=reg128#11,#0x03ffffff
# asm 2: vand.i32 <v01=q10,#0x03ffffff
vand.i32 q10,#0x03ffffff
# qhasm: ptr = &y34_stack
# asm 1: lea >ptr=int32#3,<y34_stack=stack128#4
# asm 2: lea >ptr=r2,<y34_stack=[sp,#48]
add r2,sp,#48
# qhasm: y34 aligned= mem128[ptr]
# asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>y34=d4->y34=d5},[<ptr=r2,: 128]
vld1.8 {d4-d5},[r2,: 128]
# qhasm: 4x v23 &= 0x03ffffff
# asm 1: vand.i32 <v23=reg128#10,#0x03ffffff
# asm 2: vand.i32 <v23=q9,#0x03ffffff
vand.i32 q9,#0x03ffffff
# qhasm: ptr = &y12_stack
# asm 1: lea >ptr=int32#3,<y12_stack=stack128#3
# asm 2: lea >ptr=r2,<y12_stack=[sp,#32]
add r2,sp,#32
# qhasm: y12 aligned= mem128[ptr]
# asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>y12=d2->y12=d3},[<ptr=r2,: 128]
vld1.8 {d2-d3},[r2,: 128]
# qhasm: 4x v4 |= 0x01000000
# asm 1: vorr.i32 <v4=reg128#4,#0x01000000
# asm 2: vorr.i32 <v4=q3,#0x01000000
vorr.i32 q3,#0x01000000
# qhasm: ptr = &y0_stack
# asm 1: lea >ptr=int32#3,<y0_stack=stack128#2
# asm 2: lea >ptr=r2,<y0_stack=[sp,#16]
add r2,sp,#16
# qhasm: y0 aligned= mem128[ptr]
# asm 1: vld1.8 {>y0=reg128#1%bot->y0=reg128#1%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>y0=d0->y0=d1},[<ptr=r2,: 128]
vld1.8 {d0-d1},[r2,: 128]
# qhasm: r4[0,1] += v01[0] unsigned* y34[2]; r4[2,3] += v01[1] unsigned* y34[3]
# asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%bot,<y34=reg128#3%top
# asm 2: vmlal.u32 <r4=q15,<v01=d20,<y34=d5
vmlal.u32 q15,d20,d5
# qhasm: r4[0,1] += v01[2] unsigned* y34[0]; r4[2,3] += v01[3] unsigned* y34[1]
# asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%top,<y34=reg128#3%bot
# asm 2: vmlal.u32 <r4=q15,<v01=d21,<y34=d4
vmlal.u32 q15,d21,d4
# qhasm: r4[0,1] += v23[0] unsigned* y12[2]; r4[2,3] += v23[1] unsigned* y12[3]
# asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%bot,<y12=reg128#2%top
# asm 2: vmlal.u32 <r4=q15,<v23=d18,<y12=d3
vmlal.u32 q15,d18,d3
# qhasm: r4[0,1] += v23[2] unsigned* y12[0]; r4[2,3] += v23[3] unsigned* y12[1]
# asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%top,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r4=q15,<v23=d19,<y12=d2
vmlal.u32 q15,d19,d2
# qhasm: r4[0,1] += v4[0] unsigned* y0[0]; r4[2,3] += v4[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r4=reg128#16,<v4=reg128#4%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r4=q15,<v4=d6,<y0=d0
vmlal.u32 q15,d6,d0
# qhasm: ptr = &5y34_stack
# asm 1: lea >ptr=int32#3,<5y34_stack=stack128#6
# asm 2: lea >ptr=r2,<5y34_stack=[sp,#80]
add r2,sp,#80
# qhasm: 5y34 aligned= mem128[ptr]
# asm 1: vld1.8 {>5y34=reg128#13%bot->5y34=reg128#13%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>5y34=d24->5y34=d25},[<ptr=r2,: 128]
vld1.8 {d24-d25},[r2,: 128]
# qhasm: r3[0,1] += v01[0] unsigned* y34[0]; r3[2,3] += v01[1] unsigned* y34[1]
# asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%bot,<y34=reg128#3%bot
# asm 2: vmlal.u32 <r3=q4,<v01=d20,<y34=d4
vmlal.u32 q4,d20,d4
# qhasm: r3[0,1] += v01[2] unsigned* y12[2]; r3[2,3] += v01[3] unsigned* y12[3]
# asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%top,<y12=reg128#2%top
# asm 2: vmlal.u32 <r3=q4,<v01=d21,<y12=d3
vmlal.u32 q4,d21,d3
# qhasm: r3[0,1] += v23[0] unsigned* y12[0]; r3[2,3] += v23[1] unsigned* y12[1]
# asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%bot,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r3=q4,<v23=d18,<y12=d2
vmlal.u32 q4,d18,d2
# qhasm: r3[0,1] += v23[2] unsigned* y0[0]; r3[2,3] += v23[3] unsigned* y0[1]
# asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%top,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r3=q4,<v23=d19,<y0=d0
vmlal.u32 q4,d19,d0
# qhasm: r3[0,1] += v4[0] unsigned* 5y34[2]; r3[2,3] += v4[1] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r3=reg128#5,<v4=reg128#4%bot,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r3=q4,<v4=d6,<5y34=d25
vmlal.u32 q4,d6,d25
# qhasm: ptr = &5y12_stack
# asm 1: lea >ptr=int32#3,<5y12_stack=stack128#5
# asm 2: lea >ptr=r2,<5y12_stack=[sp,#64]
add r2,sp,#64
# qhasm: 5y12 aligned= mem128[ptr]
# asm 1: vld1.8 {>5y12=reg128#12%bot->5y12=reg128#12%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>5y12=d22->5y12=d23},[<ptr=r2,: 128]
vld1.8 {d22-d23},[r2,: 128]
# qhasm: r0[0,1] += v4[0] unsigned* 5y12[0]; r0[2,3] += v4[1] unsigned* 5y12[1]
# asm 1: vmlal.u32 <r0=reg128#8,<v4=reg128#4%bot,<5y12=reg128#12%bot
# asm 2: vmlal.u32 <r0=q7,<v4=d6,<5y12=d22
vmlal.u32 q7,d6,d22
# qhasm: r0[0,1] += v23[0] unsigned* 5y34[0]; r0[2,3] += v23[1] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%bot,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r0=q7,<v23=d18,<5y34=d24
vmlal.u32 q7,d18,d24
# qhasm: r0[0,1] += v23[2] unsigned* 5y12[2]; r0[2,3] += v23[3] unsigned* 5y12[3]
# asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%top,<5y12=reg128#12%top
# asm 2: vmlal.u32 <r0=q7,<v23=d19,<5y12=d23
vmlal.u32 q7,d19,d23
# qhasm: r0[0,1] += v01[0] unsigned* y0[0]; r0[2,3] += v01[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r0=q7,<v01=d20,<y0=d0
vmlal.u32 q7,d20,d0
# qhasm: r0[0,1] += v01[2] unsigned* 5y34[2]; r0[2,3] += v01[3] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%top,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r0=q7,<v01=d21,<5y34=d25
vmlal.u32 q7,d21,d25
# qhasm: r1[0,1] += v01[0] unsigned* y12[0]; r1[2,3] += v01[1] unsigned* y12[1]
# asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%bot,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r1=q14,<v01=d20,<y12=d2
vmlal.u32 q14,d20,d2
# qhasm: r1[0,1] += v01[2] unsigned* y0[0]; r1[2,3] += v01[3] unsigned* y0[1]
# asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%top,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r1=q14,<v01=d21,<y0=d0
vmlal.u32 q14,d21,d0
# qhasm: r1[0,1] += v23[0] unsigned* 5y34[2]; r1[2,3] += v23[1] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%bot,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r1=q14,<v23=d18,<5y34=d25
vmlal.u32 q14,d18,d25
# qhasm: r1[0,1] += v23[2] unsigned* 5y34[0]; r1[2,3] += v23[3] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%top,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r1=q14,<v23=d19,<5y34=d24
vmlal.u32 q14,d19,d24
# qhasm: r1[0,1] += v4[0] unsigned* 5y12[2]; r1[2,3] += v4[1] unsigned* 5y12[3]
# asm 1: vmlal.u32 <r1=reg128#15,<v4=reg128#4%bot,<5y12=reg128#12%top
# asm 2: vmlal.u32 <r1=q14,<v4=d6,<5y12=d23
vmlal.u32 q14,d6,d23
# qhasm: r2[0,1] += v01[0] unsigned* y12[2]; r2[2,3] += v01[1] unsigned* y12[3]
# asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%bot,<y12=reg128#2%top
# asm 2: vmlal.u32 <r2=q13,<v01=d20,<y12=d3
vmlal.u32 q13,d20,d3
# qhasm: r2[0,1] += v01[2] unsigned* y12[0]; r2[2,3] += v01[3] unsigned* y12[1]
# asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%top,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r2=q13,<v01=d21,<y12=d2
vmlal.u32 q13,d21,d2
# qhasm: r2[0,1] += v23[0] unsigned* y0[0]; r2[2,3] += v23[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r2=q13,<v23=d18,<y0=d0
vmlal.u32 q13,d18,d0
# qhasm: r2[0,1] += v23[2] unsigned* 5y34[2]; r2[2,3] += v23[3] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%top,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r2=q13,<v23=d19,<5y34=d25
vmlal.u32 q13,d19,d25
# qhasm: r2[0,1] += v4[0] unsigned* 5y34[0]; r2[2,3] += v4[1] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r2=reg128#14,<v4=reg128#4%bot,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r2=q13,<v4=d6,<5y34=d24
vmlal.u32 q13,d6,d24
# qhasm: ptr = &two24
# asm 1: lea >ptr=int32#3,<two24=stack128#1
# asm 2: lea >ptr=r2,<two24=[sp,#0]
add r2,sp,#0
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#4,<r0=reg128#8,#26
# asm 2: vshr.u64 >t1=q3,<r0=q7,#26
vshr.u64 q3,q7,#26
# qhasm: len -= 64
# asm 1: sub >len=int32#4,<len=int32#4,#64
# asm 2: sub >len=r3,<len=r3,#64
sub r3,r3,#64
# qhasm: r0 &= mask
# asm 1: vand >r0=reg128#6,<r0=reg128#8,<mask=reg128#7
# asm 2: vand >r0=q5,<r0=q7,<mask=q6
vand q5,q7,q6
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#15,<t1=reg128#4
# asm 2: vadd.i64 >r1=q3,<r1=q14,<t1=q3
vadd.i64 q3,q14,q3
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#5,#26
# asm 2: vshr.u64 >t4=q7,<r3=q4,#26
vshr.u64 q7,q4,#26
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
# asm 2: vand >r3=q4,<r3=q4,<mask=q6
vand q4,q4,q6
# qhasm: 2x x4 = r4 + t4
# asm 1: vadd.i64 >x4=reg128#8,<r4=reg128#16,<t4=reg128#8
# asm 2: vadd.i64 >x4=q7,<r4=q15,<t4=q7
vadd.i64 q7,q15,q7
# qhasm: r4 aligned= mem128[ptr]
# asm 1: vld1.8 {>r4=reg128#16%bot->r4=reg128#16%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>r4=d30->r4=d31},[<ptr=r2,: 128]
vld1.8 {d30-d31},[r2,: 128]
# qhasm: 2x t2 = r1 unsigned>> 26
# asm 1: vshr.u64 >t2=reg128#9,<r1=reg128#4,#26
# asm 2: vshr.u64 >t2=q8,<r1=q3,#26
vshr.u64 q8,q3,#26
# qhasm: r1 &= mask
# asm 1: vand >r1=reg128#4,<r1=reg128#4,<mask=reg128#7
# asm 2: vand >r1=q3,<r1=q3,<mask=q6
vand q3,q3,q6
# qhasm: 2x t0 = x4 unsigned>> 26
# asm 1: vshr.u64 >t0=reg128#10,<x4=reg128#8,#26
# asm 2: vshr.u64 >t0=q9,<x4=q7,#26
vshr.u64 q9,q7,#26
# qhasm: 2x r2 += t2
# asm 1: vadd.i64 >r2=reg128#9,<r2=reg128#14,<t2=reg128#9
# asm 2: vadd.i64 >r2=q8,<r2=q13,<t2=q8
vadd.i64 q8,q13,q8
# qhasm: x4 &= mask
# asm 1: vand >x4=reg128#11,<x4=reg128#8,<mask=reg128#7
# asm 2: vand >x4=q10,<x4=q7,<mask=q6
vand q10,q7,q6
# qhasm: 2x x01 = r0 + t0
# asm 1: vadd.i64 >x01=reg128#6,<r0=reg128#6,<t0=reg128#10
# asm 2: vadd.i64 >x01=q5,<r0=q5,<t0=q9
vadd.i64 q5,q5,q9
# qhasm: r0 aligned= mem128[ptr]
# asm 1: vld1.8 {>r0=reg128#8%bot->r0=reg128#8%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>r0=d14->r0=d15},[<ptr=r2,: 128]
vld1.8 {d14-d15},[r2,: 128]
# qhasm: ptr = &z34_stack
# asm 1: lea >ptr=int32#3,<z34_stack=stack128#9
# asm 2: lea >ptr=r2,<z34_stack=[sp,#128]
add r2,sp,#128
# qhasm: 2x t0 <<= 2
# asm 1: vshl.i64 >t0=reg128#10,<t0=reg128#10,#2
# asm 2: vshl.i64 >t0=q9,<t0=q9,#2
vshl.i64 q9,q9,#2
# qhasm: 2x t3 = r2 unsigned>> 26
# asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#9,#26
# asm 2: vshr.u64 >t3=q13,<r2=q8,#26
vshr.u64 q13,q8,#26
# qhasm: 2x x01 += t0
# asm 1: vadd.i64 >x01=reg128#15,<x01=reg128#6,<t0=reg128#10
# asm 2: vadd.i64 >x01=q14,<x01=q5,<t0=q9
vadd.i64 q14,q5,q9
# qhasm: z34 aligned= mem128[ptr]
# asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<ptr=int32#3,: 128]
# asm 2: vld1.8 {>z34=d10->z34=d11},[<ptr=r2,: 128]
vld1.8 {d10-d11},[r2,: 128]
# qhasm: x23 = r2 & mask
# asm 1: vand >x23=reg128#10,<r2=reg128#9,<mask=reg128#7
# asm 2: vand >x23=q9,<r2=q8,<mask=q6
vand q9,q8,q6
# qhasm: 2x r3 += t3
# asm 1: vadd.i64 >r3=reg128#5,<r3=reg128#5,<t3=reg128#14
# asm 2: vadd.i64 >r3=q4,<r3=q4,<t3=q13
vadd.i64 q4,q4,q13
# qhasm: input_2 += 32
# asm 1: add >input_2=int32#2,<input_2=int32#2,#32
# asm 2: add >input_2=r1,<input_2=r1,#32
add r1,r1,#32
# qhasm: 2x t1 = x01 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#14,<x01=reg128#15,#26
# asm 2: vshr.u64 >t1=q13,<x01=q14,#26
vshr.u64 q13,q14,#26
# qhasm: x23 = x23[0,2,1,3]
# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
# asm 2: vtrn.32 <x23=d18,<x23=d19
vtrn.32 d18,d19
# qhasm: x01 = x01 & mask
# asm 1: vand >x01=reg128#9,<x01=reg128#15,<mask=reg128#7
# asm 2: vand >x01=q8,<x01=q14,<mask=q6
vand q8,q14,q6
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#4,<t1=reg128#14
# asm 2: vadd.i64 >r1=q3,<r1=q3,<t1=q13
vadd.i64 q3,q3,q13
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#14,<r3=reg128#5,#26
# asm 2: vshr.u64 >t4=q13,<r3=q4,#26
vshr.u64 q13,q4,#26
# qhasm: x01 = x01[0,2,1,3]
# asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
# asm 2: vtrn.32 <x01=d16,<x01=d17
vtrn.32 d16,d17
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
# asm 2: vand >r3=q4,<r3=q4,<mask=q6
vand q4,q4,q6
# qhasm: r1 = r1[0,2,1,3]
# asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
# asm 2: vtrn.32 <r1=d6,<r1=d7
vtrn.32 d6,d7
# qhasm: 2x x4 += t4
# asm 1: vadd.i64 >x4=reg128#11,<x4=reg128#11,<t4=reg128#14
# asm 2: vadd.i64 >x4=q10,<x4=q10,<t4=q13
vadd.i64 q10,q10,q13
# qhasm: r3 = r3[0,2,1,3]
# asm 1: vtrn.32 <r3=reg128#5%bot,<r3=reg128#5%top
# asm 2: vtrn.32 <r3=d8,<r3=d9
vtrn.32 d8,d9
# qhasm: x01 = x01[0,1] r1[0,1]
# asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
# asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
vext.32 d17,d6,d6,#0
# qhasm: x23 = x23[0,1] r3[0,1]
# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#5%bot,<r3=reg128#5%bot,#0
# asm 2: vext.32 <x23=d19,<r3=d8,<r3=d8,#0
vext.32 d19,d8,d8,#0
# qhasm: x4 = x4[0,2,1,3]
# asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
# asm 2: vtrn.32 <x4=d20,<x4=d21
vtrn.32 d20,d21
# qhasm: unsigned>? len - 64
# asm 1: cmp <len=int32#4,#64
# asm 2: cmp <len=r3,#64
cmp r3,#64
# qhasm: goto mainloop2 if unsigned>
bhi ._mainloop2
# qhasm: input_2 -= 32
# asm 1: sub >input_2=int32#3,<input_2=int32#2,#32
# asm 2: sub >input_2=r2,<input_2=r1,#32
sub r2,r1,#32
# qhasm: below64bytes:
._below64bytes:
# qhasm: unsigned>? len - 32
# asm 1: cmp <len=int32#4,#32
# asm 2: cmp <len=r3,#32
cmp r3,#32
# qhasm: goto end if !unsigned>
bls ._end
# qhasm: mainloop:
._mainloop:
# qhasm: new r0
# qhasm: ptr = &two24
# asm 1: lea >ptr=int32#2,<two24=stack128#1
# asm 2: lea >ptr=r1,<two24=[sp,#0]
add r1,sp,#0
# qhasm: r4 aligned= mem128[ptr]
# asm 1: vld1.8 {>r4=reg128#5%bot->r4=reg128#5%top},[<ptr=int32#2,: 128]
# asm 2: vld1.8 {>r4=d8->r4=d9},[<ptr=r1,: 128]
vld1.8 {d8-d9},[r1,: 128]
# qhasm: u4 aligned= mem128[ptr]
# asm 1: vld1.8 {>u4=reg128#6%bot->u4=reg128#6%top},[<ptr=int32#2,: 128]
# asm 2: vld1.8 {>u4=d10->u4=d11},[<ptr=r1,: 128]
vld1.8 {d10-d11},[r1,: 128]
# qhasm: c01 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>c01=reg128#8%bot->c01=reg128#8%top},[<input_2=int32#3]!
# asm 2: vld1.8 {>c01=d14->c01=d15},[<input_2=r2]!
vld1.8 {d14-d15},[r2]!
# qhasm: r4[0,1] += x01[0] unsigned* y34[2]; r4[2,3] += x01[1] unsigned* y34[3]
# asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%bot,<y34=reg128#3%top
# asm 2: vmlal.u32 <r4=q4,<x01=d16,<y34=d5
vmlal.u32 q4,d16,d5
# qhasm: c23 = mem128[input_2];input_2+=16
# asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_2=int32#3]!
# asm 2: vld1.8 {>c23=d26->c23=d27},[<input_2=r2]!
vld1.8 {d26-d27},[r2]!
# qhasm: r4[0,1] += x01[2] unsigned* y34[0]; r4[2,3] += x01[3] unsigned* y34[1]
# asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%top,<y34=reg128#3%bot
# asm 2: vmlal.u32 <r4=q4,<x01=d17,<y34=d4
vmlal.u32 q4,d17,d4
# qhasm: r0 = u4[1]c01[0]r0[2,3]
# asm 1: vext.32 <r0=reg128#4%bot,<u4=reg128#6%bot,<c01=reg128#8%bot,#1
# asm 2: vext.32 <r0=d6,<u4=d10,<c01=d14,#1
vext.32 d6,d10,d14,#1
# qhasm: r4[0,1] += x23[0] unsigned* y12[2]; r4[2,3] += x23[1] unsigned* y12[3]
# asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%bot,<y12=reg128#2%top
# asm 2: vmlal.u32 <r4=q4,<x23=d18,<y12=d3
vmlal.u32 q4,d18,d3
# qhasm: r0 = r0[0,1]u4[1]c23[0]
# asm 1: vext.32 <r0=reg128#4%top,<u4=reg128#6%bot,<c23=reg128#14%bot,#1
# asm 2: vext.32 <r0=d7,<u4=d10,<c23=d26,#1
vext.32 d7,d10,d26,#1
# qhasm: r4[0,1] += x23[2] unsigned* y12[0]; r4[2,3] += x23[3] unsigned* y12[1]
# asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%top,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r4=q4,<x23=d19,<y12=d2
vmlal.u32 q4,d19,d2
# qhasm: r0 = r0[1]r0[0]r0[3]r0[2]
# asm 1: vrev64.i32 >r0=reg128#4,<r0=reg128#4
# asm 2: vrev64.i32 >r0=q3,<r0=q3
vrev64.i32 q3,q3
# qhasm: r4[0,1] += x4[0] unsigned* y0[0]; r4[2,3] += x4[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r4=reg128#5,<x4=reg128#11%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r4=q4,<x4=d20,<y0=d0
vmlal.u32 q4,d20,d0
# qhasm: r0[0,1] += x4[0] unsigned* 5y12[0]; r0[2,3] += x4[1] unsigned* 5y12[1]
# asm 1: vmlal.u32 <r0=reg128#4,<x4=reg128#11%bot,<5y12=reg128#12%bot
# asm 2: vmlal.u32 <r0=q3,<x4=d20,<5y12=d22
vmlal.u32 q3,d20,d22
# qhasm: r0[0,1] += x23[0] unsigned* 5y34[0]; r0[2,3] += x23[1] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%bot,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r0=q3,<x23=d18,<5y34=d24
vmlal.u32 q3,d18,d24
# qhasm: r0[0,1] += x23[2] unsigned* 5y12[2]; r0[2,3] += x23[3] unsigned* 5y12[3]
# asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%top,<5y12=reg128#12%top
# asm 2: vmlal.u32 <r0=q3,<x23=d19,<5y12=d23
vmlal.u32 q3,d19,d23
# qhasm: c01 c23 = c01[0]c23[0]c01[2]c23[2]c01[1]c23[1]c01[3]c23[3]
# asm 1: vtrn.32 <c01=reg128#8,<c23=reg128#14
# asm 2: vtrn.32 <c01=q7,<c23=q13
vtrn.32 q7,q13
# qhasm: r0[0,1] += x01[0] unsigned* y0[0]; r0[2,3] += x01[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r0=q3,<x01=d16,<y0=d0
vmlal.u32 q3,d16,d0
# qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
# asm 1: vshll.u32 >r3=reg128#6,<c23=reg128#14%top,#18
# asm 2: vshll.u32 >r3=q5,<c23=d27,#18
vshll.u32 q5,d27,#18
# qhasm: r0[0,1] += x01[2] unsigned* 5y34[2]; r0[2,3] += x01[3] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%top,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r0=q3,<x01=d17,<5y34=d25
vmlal.u32 q3,d17,d25
# qhasm: r3[0,1] += x01[0] unsigned* y34[0]; r3[2,3] += x01[1] unsigned* y34[1]
# asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%bot,<y34=reg128#3%bot
# asm 2: vmlal.u32 <r3=q5,<x01=d16,<y34=d4
vmlal.u32 q5,d16,d4
# qhasm: r3[0,1] += x01[2] unsigned* y12[2]; r3[2,3] += x01[3] unsigned* y12[3]
# asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%top,<y12=reg128#2%top
# asm 2: vmlal.u32 <r3=q5,<x01=d17,<y12=d3
vmlal.u32 q5,d17,d3
# qhasm: r3[0,1] += x23[0] unsigned* y12[0]; r3[2,3] += x23[1] unsigned* y12[1]
# asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%bot,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r3=q5,<x23=d18,<y12=d2
vmlal.u32 q5,d18,d2
# qhasm: r3[0,1] += x23[2] unsigned* y0[0]; r3[2,3] += x23[3] unsigned* y0[1]
# asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%top,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r3=q5,<x23=d19,<y0=d0
vmlal.u32 q5,d19,d0
# qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
# asm 1: vshll.u32 >r1=reg128#14,<c23=reg128#14%bot,#6
# asm 2: vshll.u32 >r1=q13,<c23=d26,#6
vshll.u32 q13,d26,#6
# qhasm: r3[0,1] += x4[0] unsigned* 5y34[2]; r3[2,3] += x4[1] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r3=reg128#6,<x4=reg128#11%bot,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r3=q5,<x4=d20,<5y34=d25
vmlal.u32 q5,d20,d25
# qhasm: r1[0,1] += x01[0] unsigned* y12[0]; r1[2,3] += x01[1] unsigned* y12[1]
# asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%bot,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r1=q13,<x01=d16,<y12=d2
vmlal.u32 q13,d16,d2
# qhasm: r1[0,1] += x01[2] unsigned* y0[0]; r1[2,3] += x01[3] unsigned* y0[1]
# asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%top,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r1=q13,<x01=d17,<y0=d0
vmlal.u32 q13,d17,d0
# qhasm: r1[0,1] += x23[0] unsigned* 5y34[2]; r1[2,3] += x23[1] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%bot,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r1=q13,<x23=d18,<5y34=d25
vmlal.u32 q13,d18,d25
# qhasm: r1[0,1] += x23[2] unsigned* 5y34[0]; r1[2,3] += x23[3] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%top,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r1=q13,<x23=d19,<5y34=d24
vmlal.u32 q13,d19,d24
# qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
# asm 1: vshll.u32 >r2=reg128#8,<c01=reg128#8%top,#12
# asm 2: vshll.u32 >r2=q7,<c01=d15,#12
vshll.u32 q7,d15,#12
# qhasm: r1[0,1] += x4[0] unsigned* 5y12[2]; r1[2,3] += x4[1] unsigned* 5y12[3]
# asm 1: vmlal.u32 <r1=reg128#14,<x4=reg128#11%bot,<5y12=reg128#12%top
# asm 2: vmlal.u32 <r1=q13,<x4=d20,<5y12=d23
vmlal.u32 q13,d20,d23
# qhasm: r2[0,1] += x01[0] unsigned* y12[2]; r2[2,3] += x01[1] unsigned* y12[3]
# asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%bot,<y12=reg128#2%top
# asm 2: vmlal.u32 <r2=q7,<x01=d16,<y12=d3
vmlal.u32 q7,d16,d3
# qhasm: r2[0,1] += x01[2] unsigned* y12[0]; r2[2,3] += x01[3] unsigned* y12[1]
# asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%top,<y12=reg128#2%bot
# asm 2: vmlal.u32 <r2=q7,<x01=d17,<y12=d2
vmlal.u32 q7,d17,d2
# qhasm: r2[0,1] += x23[0] unsigned* y0[0]; r2[2,3] += x23[1] unsigned* y0[1]
# asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%bot,<y0=reg128#1%bot
# asm 2: vmlal.u32 <r2=q7,<x23=d18,<y0=d0
vmlal.u32 q7,d18,d0
# qhasm: r2[0,1] += x23[2] unsigned* 5y34[2]; r2[2,3] += x23[3] unsigned* 5y34[3]
# asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%top,<5y34=reg128#13%top
# asm 2: vmlal.u32 <r2=q7,<x23=d19,<5y34=d25
vmlal.u32 q7,d19,d25
# qhasm: r2[0,1] += x4[0] unsigned* 5y34[0]; r2[2,3] += x4[1] unsigned* 5y34[1]
# asm 1: vmlal.u32 <r2=reg128#8,<x4=reg128#11%bot,<5y34=reg128#13%bot
# asm 2: vmlal.u32 <r2=q7,<x4=d20,<5y34=d24
vmlal.u32 q7,d20,d24
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#9,<r0=reg128#4,#26
# asm 2: vshr.u64 >t1=q8,<r0=q3,#26
vshr.u64 q8,q3,#26
# qhasm: r0 &= mask
# asm 1: vand >r0=reg128#4,<r0=reg128#4,<mask=reg128#7
# asm 2: vand >r0=q3,<r0=q3,<mask=q6
vand q3,q3,q6
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#9,<r1=reg128#14,<t1=reg128#9
# asm 2: vadd.i64 >r1=q8,<r1=q13,<t1=q8
vadd.i64 q8,q13,q8
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#10,<r3=reg128#6,#26
# asm 2: vshr.u64 >t4=q9,<r3=q5,#26
vshr.u64 q9,q5,#26
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
# asm 2: vand >r3=q5,<r3=q5,<mask=q6
vand q5,q5,q6
# qhasm: 2x r4 += t4
# asm 1: vadd.i64 >r4=reg128#5,<r4=reg128#5,<t4=reg128#10
# asm 2: vadd.i64 >r4=q4,<r4=q4,<t4=q9
vadd.i64 q4,q4,q9
# qhasm: 2x t2 = r1 unsigned>> 26
# asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#9,#26
# asm 2: vshr.u64 >t2=q9,<r1=q8,#26
vshr.u64 q9,q8,#26
# qhasm: r1 &= mask
# asm 1: vand >r1=reg128#11,<r1=reg128#9,<mask=reg128#7
# asm 2: vand >r1=q10,<r1=q8,<mask=q6
vand q10,q8,q6
# qhasm: 2x t0 = r4 unsigned>> 26
# asm 1: vshr.u64 >t0=reg128#9,<r4=reg128#5,#26
# asm 2: vshr.u64 >t0=q8,<r4=q4,#26
vshr.u64 q8,q4,#26
# qhasm: 2x r2 += t2
# asm 1: vadd.i64 >r2=reg128#8,<r2=reg128#8,<t2=reg128#10
# asm 2: vadd.i64 >r2=q7,<r2=q7,<t2=q9
vadd.i64 q7,q7,q9
# qhasm: r4 &= mask
# asm 1: vand >r4=reg128#5,<r4=reg128#5,<mask=reg128#7
# asm 2: vand >r4=q4,<r4=q4,<mask=q6
vand q4,q4,q6
# qhasm: 2x r0 += t0
# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
vadd.i64 q3,q3,q8
# qhasm: 2x t0 <<= 2
# asm 1: vshl.i64 >t0=reg128#9,<t0=reg128#9,#2
# asm 2: vshl.i64 >t0=q8,<t0=q8,#2
vshl.i64 q8,q8,#2
# qhasm: 2x t3 = r2 unsigned>> 26
# asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#8,#26
# asm 2: vshr.u64 >t3=q13,<r2=q7,#26
vshr.u64 q13,q7,#26
# qhasm: 2x r0 += t0
# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
vadd.i64 q3,q3,q8
# qhasm: x23 = r2 & mask
# asm 1: vand >x23=reg128#10,<r2=reg128#8,<mask=reg128#7
# asm 2: vand >x23=q9,<r2=q7,<mask=q6
vand q9,q7,q6
# qhasm: 2x r3 += t3
# asm 1: vadd.i64 >r3=reg128#6,<r3=reg128#6,<t3=reg128#14
# asm 2: vadd.i64 >r3=q5,<r3=q5,<t3=q13
vadd.i64 q5,q5,q13
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#8,<r0=reg128#4,#26
# asm 2: vshr.u64 >t1=q7,<r0=q3,#26
vshr.u64 q7,q3,#26
# qhasm: x01 = r0 & mask
# asm 1: vand >x01=reg128#9,<r0=reg128#4,<mask=reg128#7
# asm 2: vand >x01=q8,<r0=q3,<mask=q6
vand q8,q3,q6
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#11,<t1=reg128#8
# asm 2: vadd.i64 >r1=q3,<r1=q10,<t1=q7
vadd.i64 q3,q10,q7
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#6,#26
# asm 2: vshr.u64 >t4=q7,<r3=q5,#26
vshr.u64 q7,q5,#26
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
# asm 2: vand >r3=q5,<r3=q5,<mask=q6
vand q5,q5,q6
# qhasm: 2x x4 = r4 + t4
# asm 1: vadd.i64 >x4=reg128#11,<r4=reg128#5,<t4=reg128#8
# asm 2: vadd.i64 >x4=q10,<r4=q4,<t4=q7
vadd.i64 q10,q4,q7
# qhasm: len -= 32
# asm 1: sub >len=int32#4,<len=int32#4,#32
# asm 2: sub >len=r3,<len=r3,#32
sub r3,r3,#32
# qhasm: x01 = x01[0,2,1,3]
# asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
# asm 2: vtrn.32 <x01=d16,<x01=d17
vtrn.32 d16,d17
# qhasm: x23 = x23[0,2,1,3]
# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
# asm 2: vtrn.32 <x23=d18,<x23=d19
vtrn.32 d18,d19
# qhasm: r1 = r1[0,2,1,3]
# asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
# asm 2: vtrn.32 <r1=d6,<r1=d7
vtrn.32 d6,d7
# qhasm: r3 = r3[0,2,1,3]
# asm 1: vtrn.32 <r3=reg128#6%bot,<r3=reg128#6%top
# asm 2: vtrn.32 <r3=d10,<r3=d11
vtrn.32 d10,d11
# qhasm: x4 = x4[0,2,1,3]
# asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
# asm 2: vtrn.32 <x4=d20,<x4=d21
vtrn.32 d20,d21
# qhasm: x01 = x01[0,1] r1[0,1]
# asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
# asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
vext.32 d17,d6,d6,#0
# qhasm: x23 = x23[0,1] r3[0,1]
# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#6%bot,<r3=reg128#6%bot,#0
# asm 2: vext.32 <x23=d19,<r3=d10,<r3=d10,#0
vext.32 d19,d10,d10,#0
# qhasm: unsigned>? len - 32
# asm 1: cmp <len=int32#4,#32
# asm 2: cmp <len=r3,#32
cmp r3,#32
# qhasm: goto mainloop if unsigned>
bhi ._mainloop
# qhasm: end:
._end:
# qhasm: mem128[input_0] = x01;input_0+=16
# asm 1: vst1.8 {<x01=reg128#9%bot-<x01=reg128#9%top},[<input_0=int32#1]!
# asm 2: vst1.8 {<x01=d16-<x01=d17},[<input_0=r0]!
vst1.8 {d16-d17},[r0]!
# qhasm: mem128[input_0] = x23;input_0+=16
# asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1]!
# asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0]!
vst1.8 {d18-d19},[r0]!
# qhasm: mem64[input_0] = x4[0]
# asm 1: vst1.8 <x4=reg128#11%bot,[<input_0=int32#1]
# asm 2: vst1.8 <x4=d20,[<input_0=r0]
vst1.8 d20,[r0]
# qhasm: len = len
# asm 1: mov >len=int32#1,<len=int32#4
# asm 2: mov >len=r0,<len=r3
mov r0,r3
# qhasm: qpopreturn len
mov sp,r12
vpop {q4,q5,q6,q7}
bx lr
# qhasm: int32 input_0
# qhasm: int32 input_1
# qhasm: int32 input_2
# qhasm: int32 input_3
# qhasm: stack32 input_4
# qhasm: stack32 input_5
# qhasm: stack32 input_6
# qhasm: stack32 input_7
# qhasm: int32 caller_r4
# qhasm: int32 caller_r5
# qhasm: int32 caller_r6
# qhasm: int32 caller_r7
# qhasm: int32 caller_r8
# qhasm: int32 caller_r9
# qhasm: int32 caller_r10
# qhasm: int32 caller_r11
# qhasm: int32 caller_r12
# qhasm: int32 caller_r14
# qhasm: reg128 caller_q4
# qhasm: reg128 caller_q5
# qhasm: reg128 caller_q6
# qhasm: reg128 caller_q7
# qhasm: reg128 r0
# qhasm: reg128 r1
# qhasm: reg128 r2
# qhasm: reg128 r3
# qhasm: reg128 r4
# qhasm: reg128 x01
# qhasm: reg128 x23
# qhasm: reg128 x4
# qhasm: reg128 y01
# qhasm: reg128 y23
# qhasm: reg128 y4
# qhasm: reg128 _5y01
# qhasm: reg128 _5y23
# qhasm: reg128 _5y4
# qhasm: reg128 c01
# qhasm: reg128 c23
# qhasm: reg128 c4
# qhasm: reg128 t0
# qhasm: reg128 t1
# qhasm: reg128 t2
# qhasm: reg128 t3
# qhasm: reg128 t4
# qhasm: reg128 mask
# qhasm: enter crypto_onetimeauth_poly1305_neon2_addmulmod
.align 2
.global openssl_poly1305_neon2_addmulmod
.hidden openssl_poly1305_neon2_addmulmod
.type openssl_poly1305_neon2_addmulmod STT_FUNC
openssl_poly1305_neon2_addmulmod:
sub sp,sp,#0
# qhasm: 2x mask = 0xffffffff
# asm 1: vmov.i64 >mask=reg128#1,#0xffffffff
# asm 2: vmov.i64 >mask=q0,#0xffffffff
vmov.i64 q0,#0xffffffff
# qhasm: y01 aligned= mem128[input_2];input_2+=16
# asm 1: vld1.8 {>y01=reg128#2%bot->y01=reg128#2%top},[<input_2=int32#3,: 128]!
# asm 2: vld1.8 {>y01=d2->y01=d3},[<input_2=r2,: 128]!
vld1.8 {d2-d3},[r2,: 128]!
# qhasm: 4x _5y01 = y01 << 2
# asm 1: vshl.i32 >_5y01=reg128#3,<y01=reg128#2,#2
# asm 2: vshl.i32 >_5y01=q2,<y01=q1,#2
vshl.i32 q2,q1,#2
# qhasm: y23 aligned= mem128[input_2];input_2+=16
# asm 1: vld1.8 {>y23=reg128#4%bot->y23=reg128#4%top},[<input_2=int32#3,: 128]!
# asm 2: vld1.8 {>y23=d6->y23=d7},[<input_2=r2,: 128]!
vld1.8 {d6-d7},[r2,: 128]!
# qhasm: 4x _5y23 = y23 << 2
# asm 1: vshl.i32 >_5y23=reg128#9,<y23=reg128#4,#2
# asm 2: vshl.i32 >_5y23=q8,<y23=q3,#2
vshl.i32 q8,q3,#2
# qhasm: y4 aligned= mem64[input_2]y4[1]
# asm 1: vld1.8 {<y4=reg128#10%bot},[<input_2=int32#3,: 64]
# asm 2: vld1.8 {<y4=d18},[<input_2=r2,: 64]
vld1.8 {d18},[r2,: 64]
# qhasm: 4x _5y4 = y4 << 2
# asm 1: vshl.i32 >_5y4=reg128#11,<y4=reg128#10,#2
# asm 2: vshl.i32 >_5y4=q10,<y4=q9,#2
vshl.i32 q10,q9,#2
# qhasm: x01 aligned= mem128[input_1];input_1+=16
# asm 1: vld1.8 {>x01=reg128#12%bot->x01=reg128#12%top},[<input_1=int32#2,: 128]!
# asm 2: vld1.8 {>x01=d22->x01=d23},[<input_1=r1,: 128]!
vld1.8 {d22-d23},[r1,: 128]!
# qhasm: 4x _5y01 += y01
# asm 1: vadd.i32 >_5y01=reg128#3,<_5y01=reg128#3,<y01=reg128#2
# asm 2: vadd.i32 >_5y01=q2,<_5y01=q2,<y01=q1
vadd.i32 q2,q2,q1
# qhasm: x23 aligned= mem128[input_1];input_1+=16
# asm 1: vld1.8 {>x23=reg128#13%bot->x23=reg128#13%top},[<input_1=int32#2,: 128]!
# asm 2: vld1.8 {>x23=d24->x23=d25},[<input_1=r1,: 128]!
vld1.8 {d24-d25},[r1,: 128]!
# qhasm: 4x _5y23 += y23
# asm 1: vadd.i32 >_5y23=reg128#9,<_5y23=reg128#9,<y23=reg128#4
# asm 2: vadd.i32 >_5y23=q8,<_5y23=q8,<y23=q3
vadd.i32 q8,q8,q3
# qhasm: 4x _5y4 += y4
# asm 1: vadd.i32 >_5y4=reg128#11,<_5y4=reg128#11,<y4=reg128#10
# asm 2: vadd.i32 >_5y4=q10,<_5y4=q10,<y4=q9
vadd.i32 q10,q10,q9
# qhasm: c01 aligned= mem128[input_3];input_3+=16
# asm 1: vld1.8 {>c01=reg128#14%bot->c01=reg128#14%top},[<input_3=int32#4,: 128]!
# asm 2: vld1.8 {>c01=d26->c01=d27},[<input_3=r3,: 128]!
vld1.8 {d26-d27},[r3,: 128]!
# qhasm: 4x x01 += c01
# asm 1: vadd.i32 >x01=reg128#12,<x01=reg128#12,<c01=reg128#14
# asm 2: vadd.i32 >x01=q11,<x01=q11,<c01=q13
vadd.i32 q11,q11,q13
# qhasm: c23 aligned= mem128[input_3];input_3+=16
# asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_3=int32#4,: 128]!
# asm 2: vld1.8 {>c23=d26->c23=d27},[<input_3=r3,: 128]!
vld1.8 {d26-d27},[r3,: 128]!
# qhasm: 4x x23 += c23
# asm 1: vadd.i32 >x23=reg128#13,<x23=reg128#13,<c23=reg128#14
# asm 2: vadd.i32 >x23=q12,<x23=q12,<c23=q13
vadd.i32 q12,q12,q13
# qhasm: x4 aligned= mem64[input_1]x4[1]
# asm 1: vld1.8 {<x4=reg128#14%bot},[<input_1=int32#2,: 64]
# asm 2: vld1.8 {<x4=d26},[<input_1=r1,: 64]
vld1.8 {d26},[r1,: 64]
# qhasm: 2x mask unsigned>>=6
# asm 1: vshr.u64 >mask=reg128#1,<mask=reg128#1,#6
# asm 2: vshr.u64 >mask=q0,<mask=q0,#6
vshr.u64 q0,q0,#6
# qhasm: c4 aligned= mem64[input_3]c4[1]
# asm 1: vld1.8 {<c4=reg128#15%bot},[<input_3=int32#4,: 64]
# asm 2: vld1.8 {<c4=d28},[<input_3=r3,: 64]
vld1.8 {d28},[r3,: 64]
# qhasm: 4x x4 += c4
# asm 1: vadd.i32 >x4=reg128#14,<x4=reg128#14,<c4=reg128#15
# asm 2: vadd.i32 >x4=q13,<x4=q13,<c4=q14
vadd.i32 q13,q13,q14
# qhasm: r0[0,1] = x01[0] unsigned* y01[0]; r0[2,3] = x01[1] unsigned* y01[1]
# asm 1: vmull.u32 >r0=reg128#15,<x01=reg128#12%bot,<y01=reg128#2%bot
# asm 2: vmull.u32 >r0=q14,<x01=d22,<y01=d2
vmull.u32 q14,d22,d2
# qhasm: r0[0,1] += x01[2] unsigned* _5y4[0]; r0[2,3] += x01[3] unsigned* _5y4[1]
# asm 1: vmlal.u32 <r0=reg128#15,<x01=reg128#12%top,<_5y4=reg128#11%bot
# asm 2: vmlal.u32 <r0=q14,<x01=d23,<_5y4=d20
vmlal.u32 q14,d23,d20
# qhasm: r0[0,1] += x23[0] unsigned* _5y23[2]; r0[2,3] += x23[1] unsigned* _5y23[3]
# asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%bot,<_5y23=reg128#9%top
# asm 2: vmlal.u32 <r0=q14,<x23=d24,<_5y23=d17
vmlal.u32 q14,d24,d17
# qhasm: r0[0,1] += x23[2] unsigned* _5y23[0]; r0[2,3] += x23[3] unsigned* _5y23[1]
# asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%top,<_5y23=reg128#9%bot
# asm 2: vmlal.u32 <r0=q14,<x23=d25,<_5y23=d16
vmlal.u32 q14,d25,d16
# qhasm: r0[0,1] += x4[0] unsigned* _5y01[2]; r0[2,3] += x4[1] unsigned* _5y01[3]
# asm 1: vmlal.u32 <r0=reg128#15,<x4=reg128#14%bot,<_5y01=reg128#3%top
# asm 2: vmlal.u32 <r0=q14,<x4=d26,<_5y01=d5
vmlal.u32 q14,d26,d5
# qhasm: r1[0,1] = x01[0] unsigned* y01[2]; r1[2,3] = x01[1] unsigned* y01[3]
# asm 1: vmull.u32 >r1=reg128#3,<x01=reg128#12%bot,<y01=reg128#2%top
# asm 2: vmull.u32 >r1=q2,<x01=d22,<y01=d3
vmull.u32 q2,d22,d3
# qhasm: r1[0,1] += x01[2] unsigned* y01[0]; r1[2,3] += x01[3] unsigned* y01[1]
# asm 1: vmlal.u32 <r1=reg128#3,<x01=reg128#12%top,<y01=reg128#2%bot
# asm 2: vmlal.u32 <r1=q2,<x01=d23,<y01=d2
vmlal.u32 q2,d23,d2
# qhasm: r1[0,1] += x23[0] unsigned* _5y4[0]; r1[2,3] += x23[1] unsigned* _5y4[1]
# asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%bot,<_5y4=reg128#11%bot
# asm 2: vmlal.u32 <r1=q2,<x23=d24,<_5y4=d20
vmlal.u32 q2,d24,d20
# qhasm: r1[0,1] += x23[2] unsigned* _5y23[2]; r1[2,3] += x23[3] unsigned* _5y23[3]
# asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%top,<_5y23=reg128#9%top
# asm 2: vmlal.u32 <r1=q2,<x23=d25,<_5y23=d17
vmlal.u32 q2,d25,d17
# qhasm: r1[0,1] += x4[0] unsigned* _5y23[0]; r1[2,3] += x4[1] unsigned* _5y23[1]
# asm 1: vmlal.u32 <r1=reg128#3,<x4=reg128#14%bot,<_5y23=reg128#9%bot
# asm 2: vmlal.u32 <r1=q2,<x4=d26,<_5y23=d16
vmlal.u32 q2,d26,d16
# qhasm: r2[0,1] = x01[0] unsigned* y23[0]; r2[2,3] = x01[1] unsigned* y23[1]
# asm 1: vmull.u32 >r2=reg128#16,<x01=reg128#12%bot,<y23=reg128#4%bot
# asm 2: vmull.u32 >r2=q15,<x01=d22,<y23=d6
vmull.u32 q15,d22,d6
# qhasm: r2[0,1] += x01[2] unsigned* y01[2]; r2[2,3] += x01[3] unsigned* y01[3]
# asm 1: vmlal.u32 <r2=reg128#16,<x01=reg128#12%top,<y01=reg128#2%top
# asm 2: vmlal.u32 <r2=q15,<x01=d23,<y01=d3
vmlal.u32 q15,d23,d3
# qhasm: r2[0,1] += x23[0] unsigned* y01[0]; r2[2,3] += x23[1] unsigned* y01[1]
# asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%bot,<y01=reg128#2%bot
# asm 2: vmlal.u32 <r2=q15,<x23=d24,<y01=d2
vmlal.u32 q15,d24,d2
# qhasm: r2[0,1] += x23[2] unsigned* _5y4[0]; r2[2,3] += x23[3] unsigned* _5y4[1]
# asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%top,<_5y4=reg128#11%bot
# asm 2: vmlal.u32 <r2=q15,<x23=d25,<_5y4=d20
vmlal.u32 q15,d25,d20
# qhasm: r2[0,1] += x4[0] unsigned* _5y23[2]; r2[2,3] += x4[1] unsigned* _5y23[3]
# asm 1: vmlal.u32 <r2=reg128#16,<x4=reg128#14%bot,<_5y23=reg128#9%top
# asm 2: vmlal.u32 <r2=q15,<x4=d26,<_5y23=d17
vmlal.u32 q15,d26,d17
# qhasm: r3[0,1] = x01[0] unsigned* y23[2]; r3[2,3] = x01[1] unsigned* y23[3]
# asm 1: vmull.u32 >r3=reg128#9,<x01=reg128#12%bot,<y23=reg128#4%top
# asm 2: vmull.u32 >r3=q8,<x01=d22,<y23=d7
vmull.u32 q8,d22,d7
# qhasm: r3[0,1] += x01[2] unsigned* y23[0]; r3[2,3] += x01[3] unsigned* y23[1]
# asm 1: vmlal.u32 <r3=reg128#9,<x01=reg128#12%top,<y23=reg128#4%bot
# asm 2: vmlal.u32 <r3=q8,<x01=d23,<y23=d6
vmlal.u32 q8,d23,d6
# qhasm: r3[0,1] += x23[0] unsigned* y01[2]; r3[2,3] += x23[1] unsigned* y01[3]
# asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%bot,<y01=reg128#2%top
# asm 2: vmlal.u32 <r3=q8,<x23=d24,<y01=d3
vmlal.u32 q8,d24,d3
# qhasm: r3[0,1] += x23[2] unsigned* y01[0]; r3[2,3] += x23[3] unsigned* y01[1]
# asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%top,<y01=reg128#2%bot
# asm 2: vmlal.u32 <r3=q8,<x23=d25,<y01=d2
vmlal.u32 q8,d25,d2
# qhasm: r3[0,1] += x4[0] unsigned* _5y4[0]; r3[2,3] += x4[1] unsigned* _5y4[1]
# asm 1: vmlal.u32 <r3=reg128#9,<x4=reg128#14%bot,<_5y4=reg128#11%bot
# asm 2: vmlal.u32 <r3=q8,<x4=d26,<_5y4=d20
vmlal.u32 q8,d26,d20
# qhasm: r4[0,1] = x01[0] unsigned* y4[0]; r4[2,3] = x01[1] unsigned* y4[1]
# asm 1: vmull.u32 >r4=reg128#10,<x01=reg128#12%bot,<y4=reg128#10%bot
# asm 2: vmull.u32 >r4=q9,<x01=d22,<y4=d18
vmull.u32 q9,d22,d18
# qhasm: r4[0,1] += x01[2] unsigned* y23[2]; r4[2,3] += x01[3] unsigned* y23[3]
# asm 1: vmlal.u32 <r4=reg128#10,<x01=reg128#12%top,<y23=reg128#4%top
# asm 2: vmlal.u32 <r4=q9,<x01=d23,<y23=d7
vmlal.u32 q9,d23,d7
# qhasm: r4[0,1] += x23[0] unsigned* y23[0]; r4[2,3] += x23[1] unsigned* y23[1]
# asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%bot,<y23=reg128#4%bot
# asm 2: vmlal.u32 <r4=q9,<x23=d24,<y23=d6
vmlal.u32 q9,d24,d6
# qhasm: r4[0,1] += x23[2] unsigned* y01[2]; r4[2,3] += x23[3] unsigned* y01[3]
# asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%top,<y01=reg128#2%top
# asm 2: vmlal.u32 <r4=q9,<x23=d25,<y01=d3
vmlal.u32 q9,d25,d3
# qhasm: r4[0,1] += x4[0] unsigned* y01[0]; r4[2,3] += x4[1] unsigned* y01[1]
# asm 1: vmlal.u32 <r4=reg128#10,<x4=reg128#14%bot,<y01=reg128#2%bot
# asm 2: vmlal.u32 <r4=q9,<x4=d26,<y01=d2
vmlal.u32 q9,d26,d2
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#2,<r0=reg128#15,#26
# asm 2: vshr.u64 >t1=q1,<r0=q14,#26
vshr.u64 q1,q14,#26
# qhasm: r0 &= mask
# asm 1: vand >r0=reg128#4,<r0=reg128#15,<mask=reg128#1
# asm 2: vand >r0=q3,<r0=q14,<mask=q0
vand q3,q14,q0
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#3,<t1=reg128#2
# asm 2: vadd.i64 >r1=q1,<r1=q2,<t1=q1
vadd.i64 q1,q2,q1
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#3,<r3=reg128#9,#26
# asm 2: vshr.u64 >t4=q2,<r3=q8,#26
vshr.u64 q2,q8,#26
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#9,<r3=reg128#9,<mask=reg128#1
# asm 2: vand >r3=q8,<r3=q8,<mask=q0
vand q8,q8,q0
# qhasm: 2x r4 += t4
# asm 1: vadd.i64 >r4=reg128#3,<r4=reg128#10,<t4=reg128#3
# asm 2: vadd.i64 >r4=q2,<r4=q9,<t4=q2
vadd.i64 q2,q9,q2
# qhasm: 2x t2 = r1 unsigned>> 26
# asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#2,#26
# asm 2: vshr.u64 >t2=q9,<r1=q1,#26
vshr.u64 q9,q1,#26
# qhasm: r1 &= mask
# asm 1: vand >r1=reg128#2,<r1=reg128#2,<mask=reg128#1
# asm 2: vand >r1=q1,<r1=q1,<mask=q0
vand q1,q1,q0
# qhasm: 2x t0 = r4 unsigned>> 26
# asm 1: vshr.u64 >t0=reg128#11,<r4=reg128#3,#26
# asm 2: vshr.u64 >t0=q10,<r4=q2,#26
vshr.u64 q10,q2,#26
# qhasm: 2x r2 += t2
# asm 1: vadd.i64 >r2=reg128#10,<r2=reg128#16,<t2=reg128#10
# asm 2: vadd.i64 >r2=q9,<r2=q15,<t2=q9
vadd.i64 q9,q15,q9
# qhasm: r4 &= mask
# asm 1: vand >r4=reg128#3,<r4=reg128#3,<mask=reg128#1
# asm 2: vand >r4=q2,<r4=q2,<mask=q0
vand q2,q2,q0
# qhasm: 2x r0 += t0
# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11
# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10
vadd.i64 q3,q3,q10
# qhasm: 2x t0 <<= 2
# asm 1: vshl.i64 >t0=reg128#11,<t0=reg128#11,#2
# asm 2: vshl.i64 >t0=q10,<t0=q10,#2
vshl.i64 q10,q10,#2
# qhasm: 2x t3 = r2 unsigned>> 26
# asm 1: vshr.u64 >t3=reg128#12,<r2=reg128#10,#26
# asm 2: vshr.u64 >t3=q11,<r2=q9,#26
vshr.u64 q11,q9,#26
# qhasm: 2x r0 += t0
# asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11
# asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10
vadd.i64 q3,q3,q10
# qhasm: x23 = r2 & mask
# asm 1: vand >x23=reg128#10,<r2=reg128#10,<mask=reg128#1
# asm 2: vand >x23=q9,<r2=q9,<mask=q0
vand q9,q9,q0
# qhasm: 2x r3 += t3
# asm 1: vadd.i64 >r3=reg128#9,<r3=reg128#9,<t3=reg128#12
# asm 2: vadd.i64 >r3=q8,<r3=q8,<t3=q11
vadd.i64 q8,q8,q11
# qhasm: 2x t1 = r0 unsigned>> 26
# asm 1: vshr.u64 >t1=reg128#11,<r0=reg128#4,#26
# asm 2: vshr.u64 >t1=q10,<r0=q3,#26
vshr.u64 q10,q3,#26
# qhasm: x23 = x23[0,2,1,3]
# asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
# asm 2: vtrn.32 <x23=d18,<x23=d19
vtrn.32 d18,d19
# qhasm: x01 = r0 & mask
# asm 1: vand >x01=reg128#4,<r0=reg128#4,<mask=reg128#1
# asm 2: vand >x01=q3,<r0=q3,<mask=q0
vand q3,q3,q0
# qhasm: 2x r1 += t1
# asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#2,<t1=reg128#11
# asm 2: vadd.i64 >r1=q1,<r1=q1,<t1=q10
vadd.i64 q1,q1,q10
# qhasm: 2x t4 = r3 unsigned>> 26
# asm 1: vshr.u64 >t4=reg128#11,<r3=reg128#9,#26
# asm 2: vshr.u64 >t4=q10,<r3=q8,#26
vshr.u64 q10,q8,#26
# qhasm: x01 = x01[0,2,1,3]
# asm 1: vtrn.32 <x01=reg128#4%bot,<x01=reg128#4%top
# asm 2: vtrn.32 <x01=d6,<x01=d7
vtrn.32 d6,d7
# qhasm: r3 &= mask
# asm 1: vand >r3=reg128#1,<r3=reg128#9,<mask=reg128#1
# asm 2: vand >r3=q0,<r3=q8,<mask=q0
vand q0,q8,q0
# qhasm: r1 = r1[0,2,1,3]
# asm 1: vtrn.32 <r1=reg128#2%bot,<r1=reg128#2%top
# asm 2: vtrn.32 <r1=d2,<r1=d3
vtrn.32 d2,d3
# qhasm: 2x x4 = r4 + t4
# asm 1: vadd.i64 >x4=reg128#3,<r4=reg128#3,<t4=reg128#11
# asm 2: vadd.i64 >x4=q2,<r4=q2,<t4=q10
vadd.i64 q2,q2,q10
# qhasm: r3 = r3[0,2,1,3]
# asm 1: vtrn.32 <r3=reg128#1%bot,<r3=reg128#1%top
# asm 2: vtrn.32 <r3=d0,<r3=d1
vtrn.32 d0,d1
# qhasm: x01 = x01[0,1] r1[0,1]
# asm 1: vext.32 <x01=reg128#4%top,<r1=reg128#2%bot,<r1=reg128#2%bot,#0
# asm 2: vext.32 <x01=d7,<r1=d2,<r1=d2,#0
vext.32 d7,d2,d2,#0
# qhasm: x23 = x23[0,1] r3[0,1]
# asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#1%bot,<r3=reg128#1%bot,#0
# asm 2: vext.32 <x23=d19,<r3=d0,<r3=d0,#0
vext.32 d19,d0,d0,#0
# qhasm: x4 = x4[0,2,1,3]
# asm 1: vtrn.32 <x4=reg128#3%bot,<x4=reg128#3%top
# asm 2: vtrn.32 <x4=d4,<x4=d5
vtrn.32 d4,d5
# qhasm: mem128[input_0] aligned= x01;input_0+=16
# asm 1: vst1.8 {<x01=reg128#4%bot-<x01=reg128#4%top},[<input_0=int32#1,: 128]!
# asm 2: vst1.8 {<x01=d6-<x01=d7},[<input_0=r0,: 128]!
vst1.8 {d6-d7},[r0,: 128]!
# qhasm: mem128[input_0] aligned= x23;input_0+=16
# asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1,: 128]!
# asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0,: 128]!
vst1.8 {d18-d19},[r0,: 128]!
# qhasm: mem64[input_0] aligned= x4[0]
# asm 1: vst1.8 <x4=reg128#3%bot,[<input_0=int32#1,: 64]
# asm 2: vst1.8 <x4=d4,[<input_0=r0,: 64]
vst1.8 d4,[r0,: 64]
# qhasm: return
add sp,sp,#0
bx lr
#endif /* !OPENSSL_NO_ASM && OPENSSL_ARM && __ELF__ */
|
marvin-hansen/iggy-streaming-system
| 242,854
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/crypto/hrss/asm/poly_rq_mul.S
|
// Copyright (c) 2017, the HRSS authors.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_SMALL) && \
defined(__linux__) && !defined(MY_ASSEMBLER_IS_TOO_OLD_FOR_AVX)
// This is the polynomial multiplication function from [HRSS], provided by kind
// permission of the authors.
//
// HRSS: https://eprint.iacr.org/2017/1005
# This file was generated by poly_rq_mul.py
.text
.align 32
const3:
.word 3
.word 3
.word 3
.word 3
.word 3
.word 3
.word 3
.word 3
.word 3
.word 3
.word 3
.word 3
.word 3
.word 3
.word 3
.word 3
const9:
.word 9
.word 9
.word 9
.word 9
.word 9
.word 9
.word 9
.word 9
.word 9
.word 9
.word 9
.word 9
.word 9
.word 9
.word 9
.word 9
const0:
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
const729:
.word 729
.word 729
.word 729
.word 729
.word 729
.word 729
.word 729
.word 729
.word 729
.word 729
.word 729
.word 729
.word 729
.word 729
.word 729
.word 729
const3_inv:
.word 43691
.word 43691
.word 43691
.word 43691
.word 43691
.word 43691
.word 43691
.word 43691
.word 43691
.word 43691
.word 43691
.word 43691
.word 43691
.word 43691
.word 43691
.word 43691
const5_inv:
.word 52429
.word 52429
.word 52429
.word 52429
.word 52429
.word 52429
.word 52429
.word 52429
.word 52429
.word 52429
.word 52429
.word 52429
.word 52429
.word 52429
.word 52429
.word 52429
shuf48_16:
.byte 10
.byte 11
.byte 12
.byte 13
.byte 14
.byte 15
.byte 0
.byte 1
.byte 2
.byte 3
.byte 4
.byte 5
.byte 6
.byte 7
.byte 8
.byte 9
.byte 10
.byte 11
.byte 12
.byte 13
.byte 14
.byte 15
.byte 0
.byte 1
.byte 2
.byte 3
.byte 4
.byte 5
.byte 6
.byte 7
.byte 8
.byte 9
shufmin1_mask3:
.byte 2
.byte 3
.byte 4
.byte 5
.byte 6
.byte 7
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
.byte 255
mask32_to_16:
.word 0xffff
.word 0x0
.word 0xffff
.word 0x0
.word 0xffff
.word 0x0
.word 0xffff
.word 0x0
.word 0xffff
.word 0x0
.word 0xffff
.word 0x0
.word 0xffff
.word 0x0
.word 0xffff
.word 0x0
mask5_3_5_3:
.word 0
.word 0
.word 0
.word 65535
.word 65535
.word 65535
.word 65535
.word 65535
.word 0
.word 0
.word 0
.word 65535
.word 65535
.word 65535
.word 65535
.word 65535
mask3_5_3_5:
.word 65535
.word 65535
.word 65535
.word 0
.word 0
.word 0
.word 0
.word 0
.word 65535
.word 65535
.word 65535
.word 0
.word 0
.word 0
.word 0
.word 0
mask3_5_4_3_1:
.word 65535
.word 65535
.word 65535
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 65535
.word 65535
.word 65535
.word 0
mask_keephigh:
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 65535
.word 65535
.word 65535
.word 65535
.word 65535
.word 65535
.word 65535
.word 65535
mask_mod8192:
.word 8191
.word 8191
.word 8191
.word 8191
.word 8191
.word 8191
.word 8191
.word 8191
.word 8191
.word 8191
.word 8191
.word 8191
.word 8191
.word 8191
.word 8191
.word 8191
.text
.global poly_Rq_mul
.hidden poly_Rq_mul
.type poly_Rq_mul, @function
.att_syntax prefix
poly_Rq_mul:
.cfi_startproc
_CET_ENDBR
push %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register rbp
push %r12
.cfi_offset r12, -24
# This function originally used a significant amount of stack space. As an
# alternative, the needed scratch space is now passed in as the 4th argument.
# The amount of scratch space used must thus be kept in sync with
# POLY_MUL_RQ_SCRATCH_SPACE in internal.h.
#
# Setting RSP to point into the given scratch space upsets the ABI tests
# therefore all references to RSP are switched to R8.
mov %rcx, %r8
addq $6144+12288+512+9408+32, %r8
mov %r8, %rax
subq $6144, %r8
mov %r8, %r11
subq $12288, %r8
mov %r8, %r12
subq $512, %r8
vmovdqa const3(%rip), %ymm3
vmovdqu 0(%rsi), %ymm0
vmovdqu 88(%rsi), %ymm1
vmovdqu 176(%rsi), %ymm2
vmovdqu 264(%rsi), %ymm12
vmovdqu 1056(%rsi), %ymm4
vmovdqu 1144(%rsi), %ymm5
vmovdqu 1232(%rsi), %ymm6
vmovdqu 1320(%rsi), %ymm7
vmovdqu 352(%rsi), %ymm8
vmovdqu 440(%rsi), %ymm9
vmovdqu 528(%rsi), %ymm10
vmovdqu 616(%rsi), %ymm11
vmovdqa %ymm0, 0(%rax)
vmovdqa %ymm1, 96(%rax)
vpaddw %ymm0, %ymm1, %ymm14
vmovdqa %ymm14, 192(%rax)
vmovdqa %ymm2, 288(%rax)
vmovdqa %ymm12, 384(%rax)
vpaddw %ymm2, %ymm12, %ymm14
vmovdqa %ymm14, 480(%rax)
vpaddw %ymm0, %ymm2, %ymm14
vmovdqa %ymm14, 576(%rax)
vpaddw %ymm1, %ymm12, %ymm15
vmovdqa %ymm15, 672(%rax)
vpaddw %ymm14, %ymm15, %ymm14
vmovdqa %ymm14, 768(%rax)
vmovdqa %ymm4, 5184(%rax)
vmovdqa %ymm5, 5280(%rax)
vpaddw %ymm4, %ymm5, %ymm14
vmovdqa %ymm14, 5376(%rax)
vmovdqa %ymm6, 5472(%rax)
vmovdqa %ymm7, 5568(%rax)
vpaddw %ymm6, %ymm7, %ymm14
vmovdqa %ymm14, 5664(%rax)
vpaddw %ymm4, %ymm6, %ymm14
vmovdqa %ymm14, 5760(%rax)
vpaddw %ymm5, %ymm7, %ymm15
vmovdqa %ymm15, 5856(%rax)
vpaddw %ymm14, %ymm15, %ymm14
vmovdqa %ymm14, 5952(%rax)
vmovdqa %ymm0, 0(%r8)
vmovdqa %ymm1, 32(%r8)
vmovdqa %ymm2, 64(%r8)
vmovdqa %ymm12, 96(%r8)
vmovdqa %ymm8, 128(%r8)
vmovdqa %ymm9, 160(%r8)
vmovdqa %ymm10, 192(%r8)
vmovdqa %ymm11, 224(%r8)
vmovdqu 704(%rsi), %ymm0
vpaddw 0(%r8), %ymm0, %ymm1
vpaddw 128(%r8), %ymm4, %ymm2
vpaddw %ymm2, %ymm1, %ymm8
vpsubw %ymm2, %ymm1, %ymm12
vmovdqa %ymm0, 256(%r8)
vmovdqu 792(%rsi), %ymm0
vpaddw 32(%r8), %ymm0, %ymm1
vpaddw 160(%r8), %ymm5, %ymm2
vpaddw %ymm2, %ymm1, %ymm9
vpsubw %ymm2, %ymm1, %ymm13
vmovdqa %ymm0, 288(%r8)
vmovdqu 880(%rsi), %ymm0
vpaddw 64(%r8), %ymm0, %ymm1
vpaddw 192(%r8), %ymm6, %ymm2
vpaddw %ymm2, %ymm1, %ymm10
vpsubw %ymm2, %ymm1, %ymm14
vmovdqa %ymm0, 320(%r8)
vmovdqu 968(%rsi), %ymm0
vpaddw 96(%r8), %ymm0, %ymm1
vpaddw 224(%r8), %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm11
vpsubw %ymm2, %ymm1, %ymm15
vmovdqa %ymm0, 352(%r8)
vmovdqa %ymm8, 864(%rax)
vmovdqa %ymm9, 960(%rax)
vpaddw %ymm8, %ymm9, %ymm0
vmovdqa %ymm0, 1056(%rax)
vmovdqa %ymm10, 1152(%rax)
vmovdqa %ymm11, 1248(%rax)
vpaddw %ymm10, %ymm11, %ymm0
vmovdqa %ymm0, 1344(%rax)
vpaddw %ymm8, %ymm10, %ymm0
vmovdqa %ymm0, 1440(%rax)
vpaddw %ymm9, %ymm11, %ymm1
vmovdqa %ymm1, 1536(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 1632(%rax)
vmovdqa %ymm12, 1728(%rax)
vmovdqa %ymm13, 1824(%rax)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 1920(%rax)
vmovdqa %ymm14, 2016(%rax)
vmovdqa %ymm15, 2112(%rax)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 2208(%rax)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 2304(%rax)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 2400(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 2496(%rax)
vmovdqa 256(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 0(%r8), %ymm0, %ymm0
vpsllw $2, %ymm4, %ymm1
vpaddw 128(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm8
vpsubw %ymm1, %ymm0, %ymm12
vmovdqa 288(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 32(%r8), %ymm0, %ymm0
vpsllw $2, %ymm5, %ymm1
vpaddw 160(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm9
vpsubw %ymm1, %ymm0, %ymm13
vmovdqa 320(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 64(%r8), %ymm0, %ymm0
vpsllw $2, %ymm6, %ymm1
vpaddw 192(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm10
vpsubw %ymm1, %ymm0, %ymm14
vmovdqa 352(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 96(%r8), %ymm0, %ymm0
vpsllw $2, %ymm7, %ymm1
vpaddw 224(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm11
vpsubw %ymm1, %ymm0, %ymm15
vmovdqa %ymm8, 2592(%rax)
vmovdqa %ymm9, 2688(%rax)
vpaddw %ymm8, %ymm9, %ymm0
vmovdqa %ymm0, 2784(%rax)
vmovdqa %ymm10, 2880(%rax)
vmovdqa %ymm11, 2976(%rax)
vpaddw %ymm10, %ymm11, %ymm0
vmovdqa %ymm0, 3072(%rax)
vpaddw %ymm8, %ymm10, %ymm0
vmovdqa %ymm0, 3168(%rax)
vpaddw %ymm9, %ymm11, %ymm1
vmovdqa %ymm1, 3264(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 3360(%rax)
vmovdqa %ymm12, 3456(%rax)
vmovdqa %ymm13, 3552(%rax)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 3648(%rax)
vmovdqa %ymm14, 3744(%rax)
vmovdqa %ymm15, 3840(%rax)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 3936(%rax)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 4032(%rax)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 4128(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 4224(%rax)
vpmullw %ymm3, %ymm4, %ymm0
vpaddw 256(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 128(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 0(%r8), %ymm0, %ymm12
vpmullw %ymm3, %ymm5, %ymm0
vpaddw 288(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 160(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 32(%r8), %ymm0, %ymm13
vpmullw %ymm3, %ymm6, %ymm0
vpaddw 320(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 192(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 64(%r8), %ymm0, %ymm14
vpmullw %ymm3, %ymm7, %ymm0
vpaddw 352(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 224(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 96(%r8), %ymm0, %ymm15
vmovdqa %ymm12, 4320(%rax)
vmovdqa %ymm13, 4416(%rax)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 4512(%rax)
vmovdqa %ymm14, 4608(%rax)
vmovdqa %ymm15, 4704(%rax)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 4800(%rax)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 4896(%rax)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 4992(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 5088(%rax)
vmovdqu 32(%rsi), %ymm0
vmovdqu 120(%rsi), %ymm1
vmovdqu 208(%rsi), %ymm2
vmovdqu 296(%rsi), %ymm12
vmovdqu 1088(%rsi), %ymm4
vmovdqu 1176(%rsi), %ymm5
vmovdqu 1264(%rsi), %ymm6
vmovdqu 1352(%rsi), %ymm7
vmovdqu 384(%rsi), %ymm8
vmovdqu 472(%rsi), %ymm9
vmovdqu 560(%rsi), %ymm10
vmovdqu 648(%rsi), %ymm11
vmovdqa %ymm0, 32(%rax)
vmovdqa %ymm1, 128(%rax)
vpaddw %ymm0, %ymm1, %ymm14
vmovdqa %ymm14, 224(%rax)
vmovdqa %ymm2, 320(%rax)
vmovdqa %ymm12, 416(%rax)
vpaddw %ymm2, %ymm12, %ymm14
vmovdqa %ymm14, 512(%rax)
vpaddw %ymm0, %ymm2, %ymm14
vmovdqa %ymm14, 608(%rax)
vpaddw %ymm1, %ymm12, %ymm15
vmovdqa %ymm15, 704(%rax)
vpaddw %ymm14, %ymm15, %ymm14
vmovdqa %ymm14, 800(%rax)
vmovdqa %ymm4, 5216(%rax)
vmovdqa %ymm5, 5312(%rax)
vpaddw %ymm4, %ymm5, %ymm14
vmovdqa %ymm14, 5408(%rax)
vmovdqa %ymm6, 5504(%rax)
vmovdqa %ymm7, 5600(%rax)
vpaddw %ymm6, %ymm7, %ymm14
vmovdqa %ymm14, 5696(%rax)
vpaddw %ymm4, %ymm6, %ymm14
vmovdqa %ymm14, 5792(%rax)
vpaddw %ymm5, %ymm7, %ymm15
vmovdqa %ymm15, 5888(%rax)
vpaddw %ymm14, %ymm15, %ymm14
vmovdqa %ymm14, 5984(%rax)
vmovdqa %ymm0, 0(%r8)
vmovdqa %ymm1, 32(%r8)
vmovdqa %ymm2, 64(%r8)
vmovdqa %ymm12, 96(%r8)
vmovdqa %ymm8, 128(%r8)
vmovdqa %ymm9, 160(%r8)
vmovdqa %ymm10, 192(%r8)
vmovdqa %ymm11, 224(%r8)
vmovdqu 736(%rsi), %ymm0
vpaddw 0(%r8), %ymm0, %ymm1
vpaddw 128(%r8), %ymm4, %ymm2
vpaddw %ymm2, %ymm1, %ymm8
vpsubw %ymm2, %ymm1, %ymm12
vmovdqa %ymm0, 256(%r8)
vmovdqu 824(%rsi), %ymm0
vpaddw 32(%r8), %ymm0, %ymm1
vpaddw 160(%r8), %ymm5, %ymm2
vpaddw %ymm2, %ymm1, %ymm9
vpsubw %ymm2, %ymm1, %ymm13
vmovdqa %ymm0, 288(%r8)
vmovdqu 912(%rsi), %ymm0
vpaddw 64(%r8), %ymm0, %ymm1
vpaddw 192(%r8), %ymm6, %ymm2
vpaddw %ymm2, %ymm1, %ymm10
vpsubw %ymm2, %ymm1, %ymm14
vmovdqa %ymm0, 320(%r8)
vmovdqu 1000(%rsi), %ymm0
vpaddw 96(%r8), %ymm0, %ymm1
vpaddw 224(%r8), %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm11
vpsubw %ymm2, %ymm1, %ymm15
vmovdqa %ymm0, 352(%r8)
vmovdqa %ymm8, 896(%rax)
vmovdqa %ymm9, 992(%rax)
vpaddw %ymm8, %ymm9, %ymm0
vmovdqa %ymm0, 1088(%rax)
vmovdqa %ymm10, 1184(%rax)
vmovdqa %ymm11, 1280(%rax)
vpaddw %ymm10, %ymm11, %ymm0
vmovdqa %ymm0, 1376(%rax)
vpaddw %ymm8, %ymm10, %ymm0
vmovdqa %ymm0, 1472(%rax)
vpaddw %ymm9, %ymm11, %ymm1
vmovdqa %ymm1, 1568(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 1664(%rax)
vmovdqa %ymm12, 1760(%rax)
vmovdqa %ymm13, 1856(%rax)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 1952(%rax)
vmovdqa %ymm14, 2048(%rax)
vmovdqa %ymm15, 2144(%rax)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 2240(%rax)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 2336(%rax)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 2432(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 2528(%rax)
vmovdqa 256(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 0(%r8), %ymm0, %ymm0
vpsllw $2, %ymm4, %ymm1
vpaddw 128(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm8
vpsubw %ymm1, %ymm0, %ymm12
vmovdqa 288(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 32(%r8), %ymm0, %ymm0
vpsllw $2, %ymm5, %ymm1
vpaddw 160(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm9
vpsubw %ymm1, %ymm0, %ymm13
vmovdqa 320(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 64(%r8), %ymm0, %ymm0
vpsllw $2, %ymm6, %ymm1
vpaddw 192(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm10
vpsubw %ymm1, %ymm0, %ymm14
vmovdqa 352(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 96(%r8), %ymm0, %ymm0
vpsllw $2, %ymm7, %ymm1
vpaddw 224(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm11
vpsubw %ymm1, %ymm0, %ymm15
vmovdqa %ymm8, 2624(%rax)
vmovdqa %ymm9, 2720(%rax)
vpaddw %ymm8, %ymm9, %ymm0
vmovdqa %ymm0, 2816(%rax)
vmovdqa %ymm10, 2912(%rax)
vmovdqa %ymm11, 3008(%rax)
vpaddw %ymm10, %ymm11, %ymm0
vmovdqa %ymm0, 3104(%rax)
vpaddw %ymm8, %ymm10, %ymm0
vmovdqa %ymm0, 3200(%rax)
vpaddw %ymm9, %ymm11, %ymm1
vmovdqa %ymm1, 3296(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 3392(%rax)
vmovdqa %ymm12, 3488(%rax)
vmovdqa %ymm13, 3584(%rax)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 3680(%rax)
vmovdqa %ymm14, 3776(%rax)
vmovdqa %ymm15, 3872(%rax)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 3968(%rax)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 4064(%rax)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 4160(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 4256(%rax)
vpmullw %ymm3, %ymm4, %ymm0
vpaddw 256(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 128(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 0(%r8), %ymm0, %ymm12
vpmullw %ymm3, %ymm5, %ymm0
vpaddw 288(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 160(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 32(%r8), %ymm0, %ymm13
vpmullw %ymm3, %ymm6, %ymm0
vpaddw 320(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 192(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 64(%r8), %ymm0, %ymm14
vpmullw %ymm3, %ymm7, %ymm0
vpaddw 352(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 224(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 96(%r8), %ymm0, %ymm15
vmovdqa %ymm12, 4352(%rax)
vmovdqa %ymm13, 4448(%rax)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 4544(%rax)
vmovdqa %ymm14, 4640(%rax)
vmovdqa %ymm15, 4736(%rax)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 4832(%rax)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 4928(%rax)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 5024(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 5120(%rax)
vmovdqu 64(%rsi), %ymm0
vmovdqu 152(%rsi), %ymm1
vmovdqu 240(%rsi), %ymm2
vmovdqu 328(%rsi), %ymm12
vmovdqu 1120(%rsi), %ymm4
vmovdqu 1208(%rsi), %ymm5
vmovdqu 1296(%rsi), %ymm6
# Only 18 bytes more can be read, but vmovdqu reads 32.
# Copy 18 bytes to the red zone and zero pad to 32 bytes.
xor %r9, %r9
movq %r9, -16(%rsp)
movq %r9, -8(%rsp)
movq 1384(%rsi), %r9
movq %r9, -32(%rsp)
movq 1384+8(%rsi), %r9
movq %r9, -24(%rsp)
movw 1384+16(%rsi), %r9w
movw %r9w, -16(%rsp)
vmovdqu -32(%rsp), %ymm7
vmovdqu 416(%rsi), %ymm8
vmovdqu 504(%rsi), %ymm9
vmovdqu 592(%rsi), %ymm10
vmovdqu 680(%rsi), %ymm11
vmovdqa %ymm0, 64(%rax)
vmovdqa %ymm1, 160(%rax)
vpaddw %ymm0, %ymm1, %ymm14
vmovdqa %ymm14, 256(%rax)
vmovdqa %ymm2, 352(%rax)
vmovdqa %ymm12, 448(%rax)
vpaddw %ymm2, %ymm12, %ymm14
vmovdqa %ymm14, 544(%rax)
vpaddw %ymm0, %ymm2, %ymm14
vmovdqa %ymm14, 640(%rax)
vpaddw %ymm1, %ymm12, %ymm15
vmovdqa %ymm15, 736(%rax)
vpaddw %ymm14, %ymm15, %ymm14
vmovdqa %ymm14, 832(%rax)
vmovdqa %ymm4, 5248(%rax)
vmovdqa %ymm5, 5344(%rax)
vpaddw %ymm4, %ymm5, %ymm14
vmovdqa %ymm14, 5440(%rax)
vmovdqa %ymm6, 5536(%rax)
vmovdqa %ymm7, 5632(%rax)
vpaddw %ymm6, %ymm7, %ymm14
vmovdqa %ymm14, 5728(%rax)
vpaddw %ymm4, %ymm6, %ymm14
vmovdqa %ymm14, 5824(%rax)
vpaddw %ymm5, %ymm7, %ymm15
vmovdqa %ymm15, 5920(%rax)
vpaddw %ymm14, %ymm15, %ymm14
vmovdqa %ymm14, 6016(%rax)
vmovdqa %ymm0, 0(%r8)
vmovdqa %ymm1, 32(%r8)
vmovdqa %ymm2, 64(%r8)
vmovdqa %ymm12, 96(%r8)
vmovdqa %ymm8, 128(%r8)
vmovdqa %ymm9, 160(%r8)
vmovdqa %ymm10, 192(%r8)
vmovdqa %ymm11, 224(%r8)
vmovdqu 768(%rsi), %ymm0
vpaddw 0(%r8), %ymm0, %ymm1
vpaddw 128(%r8), %ymm4, %ymm2
vpaddw %ymm2, %ymm1, %ymm8
vpsubw %ymm2, %ymm1, %ymm12
vmovdqa %ymm0, 256(%r8)
vmovdqu 856(%rsi), %ymm0
vpaddw 32(%r8), %ymm0, %ymm1
vpaddw 160(%r8), %ymm5, %ymm2
vpaddw %ymm2, %ymm1, %ymm9
vpsubw %ymm2, %ymm1, %ymm13
vmovdqa %ymm0, 288(%r8)
vmovdqu 944(%rsi), %ymm0
vpaddw 64(%r8), %ymm0, %ymm1
vpaddw 192(%r8), %ymm6, %ymm2
vpaddw %ymm2, %ymm1, %ymm10
vpsubw %ymm2, %ymm1, %ymm14
vmovdqa %ymm0, 320(%r8)
vmovdqu 1032(%rsi), %ymm0
vpaddw 96(%r8), %ymm0, %ymm1
vpaddw 224(%r8), %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm11
vpsubw %ymm2, %ymm1, %ymm15
vmovdqa %ymm0, 352(%r8)
vmovdqa %ymm8, 928(%rax)
vmovdqa %ymm9, 1024(%rax)
vpaddw %ymm8, %ymm9, %ymm0
vmovdqa %ymm0, 1120(%rax)
vmovdqa %ymm10, 1216(%rax)
vmovdqa %ymm11, 1312(%rax)
vpaddw %ymm10, %ymm11, %ymm0
vmovdqa %ymm0, 1408(%rax)
vpaddw %ymm8, %ymm10, %ymm0
vmovdqa %ymm0, 1504(%rax)
vpaddw %ymm9, %ymm11, %ymm1
vmovdqa %ymm1, 1600(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 1696(%rax)
vmovdqa %ymm12, 1792(%rax)
vmovdqa %ymm13, 1888(%rax)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 1984(%rax)
vmovdqa %ymm14, 2080(%rax)
vmovdqa %ymm15, 2176(%rax)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 2272(%rax)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 2368(%rax)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 2464(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 2560(%rax)
vmovdqa 256(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 0(%r8), %ymm0, %ymm0
vpsllw $2, %ymm4, %ymm1
vpaddw 128(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm8
vpsubw %ymm1, %ymm0, %ymm12
vmovdqa 288(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 32(%r8), %ymm0, %ymm0
vpsllw $2, %ymm5, %ymm1
vpaddw 160(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm9
vpsubw %ymm1, %ymm0, %ymm13
vmovdqa 320(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 64(%r8), %ymm0, %ymm0
vpsllw $2, %ymm6, %ymm1
vpaddw 192(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm10
vpsubw %ymm1, %ymm0, %ymm14
vmovdqa 352(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 96(%r8), %ymm0, %ymm0
vpsllw $2, %ymm7, %ymm1
vpaddw 224(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm11
vpsubw %ymm1, %ymm0, %ymm15
vmovdqa %ymm8, 2656(%rax)
vmovdqa %ymm9, 2752(%rax)
vpaddw %ymm8, %ymm9, %ymm0
vmovdqa %ymm0, 2848(%rax)
vmovdqa %ymm10, 2944(%rax)
vmovdqa %ymm11, 3040(%rax)
vpaddw %ymm10, %ymm11, %ymm0
vmovdqa %ymm0, 3136(%rax)
vpaddw %ymm8, %ymm10, %ymm0
vmovdqa %ymm0, 3232(%rax)
vpaddw %ymm9, %ymm11, %ymm1
vmovdqa %ymm1, 3328(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 3424(%rax)
vmovdqa %ymm12, 3520(%rax)
vmovdqa %ymm13, 3616(%rax)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 3712(%rax)
vmovdqa %ymm14, 3808(%rax)
vmovdqa %ymm15, 3904(%rax)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 4000(%rax)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 4096(%rax)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 4192(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 4288(%rax)
vpmullw %ymm3, %ymm4, %ymm0
vpaddw 256(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 128(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 0(%r8), %ymm0, %ymm12
vpmullw %ymm3, %ymm5, %ymm0
vpaddw 288(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 160(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 32(%r8), %ymm0, %ymm13
vpmullw %ymm3, %ymm6, %ymm0
vpaddw 320(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 192(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 64(%r8), %ymm0, %ymm14
vpmullw %ymm3, %ymm7, %ymm0
vpaddw 352(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 224(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 96(%r8), %ymm0, %ymm15
vmovdqa %ymm12, 4384(%rax)
vmovdqa %ymm13, 4480(%rax)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 4576(%rax)
vmovdqa %ymm14, 4672(%rax)
vmovdqa %ymm15, 4768(%rax)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 4864(%rax)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 4960(%rax)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 5056(%rax)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 5152(%rax)
vmovdqu 0(%rdx), %ymm0
vmovdqu 88(%rdx), %ymm1
vmovdqu 176(%rdx), %ymm2
vmovdqu 264(%rdx), %ymm12
vmovdqu 1056(%rdx), %ymm4
vmovdqu 1144(%rdx), %ymm5
vmovdqu 1232(%rdx), %ymm6
vmovdqu 1320(%rdx), %ymm7
vmovdqu 352(%rdx), %ymm8
vmovdqu 440(%rdx), %ymm9
vmovdqu 528(%rdx), %ymm10
vmovdqu 616(%rdx), %ymm11
vmovdqa %ymm0, 0(%r11)
vmovdqa %ymm1, 96(%r11)
vpaddw %ymm0, %ymm1, %ymm14
vmovdqa %ymm14, 192(%r11)
vmovdqa %ymm2, 288(%r11)
vmovdqa %ymm12, 384(%r11)
vpaddw %ymm2, %ymm12, %ymm14
vmovdqa %ymm14, 480(%r11)
vpaddw %ymm0, %ymm2, %ymm14
vmovdqa %ymm14, 576(%r11)
vpaddw %ymm1, %ymm12, %ymm15
vmovdqa %ymm15, 672(%r11)
vpaddw %ymm14, %ymm15, %ymm14
vmovdqa %ymm14, 768(%r11)
vmovdqa %ymm4, 5184(%r11)
vmovdqa %ymm5, 5280(%r11)
vpaddw %ymm4, %ymm5, %ymm14
vmovdqa %ymm14, 5376(%r11)
vmovdqa %ymm6, 5472(%r11)
vmovdqa %ymm7, 5568(%r11)
vpaddw %ymm6, %ymm7, %ymm14
vmovdqa %ymm14, 5664(%r11)
vpaddw %ymm4, %ymm6, %ymm14
vmovdqa %ymm14, 5760(%r11)
vpaddw %ymm5, %ymm7, %ymm15
vmovdqa %ymm15, 5856(%r11)
vpaddw %ymm14, %ymm15, %ymm14
vmovdqa %ymm14, 5952(%r11)
vmovdqa %ymm0, 0(%r8)
vmovdqa %ymm1, 32(%r8)
vmovdqa %ymm2, 64(%r8)
vmovdqa %ymm12, 96(%r8)
vmovdqa %ymm8, 128(%r8)
vmovdqa %ymm9, 160(%r8)
vmovdqa %ymm10, 192(%r8)
vmovdqa %ymm11, 224(%r8)
vmovdqu 704(%rdx), %ymm0
vpaddw 0(%r8), %ymm0, %ymm1
vpaddw 128(%r8), %ymm4, %ymm2
vpaddw %ymm2, %ymm1, %ymm8
vpsubw %ymm2, %ymm1, %ymm12
vmovdqa %ymm0, 256(%r8)
vmovdqu 792(%rdx), %ymm0
vpaddw 32(%r8), %ymm0, %ymm1
vpaddw 160(%r8), %ymm5, %ymm2
vpaddw %ymm2, %ymm1, %ymm9
vpsubw %ymm2, %ymm1, %ymm13
vmovdqa %ymm0, 288(%r8)
vmovdqu 880(%rdx), %ymm0
vpaddw 64(%r8), %ymm0, %ymm1
vpaddw 192(%r8), %ymm6, %ymm2
vpaddw %ymm2, %ymm1, %ymm10
vpsubw %ymm2, %ymm1, %ymm14
vmovdqa %ymm0, 320(%r8)
vmovdqu 968(%rdx), %ymm0
vpaddw 96(%r8), %ymm0, %ymm1
vpaddw 224(%r8), %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm11
vpsubw %ymm2, %ymm1, %ymm15
vmovdqa %ymm0, 352(%r8)
vmovdqa %ymm8, 864(%r11)
vmovdqa %ymm9, 960(%r11)
vpaddw %ymm8, %ymm9, %ymm0
vmovdqa %ymm0, 1056(%r11)
vmovdqa %ymm10, 1152(%r11)
vmovdqa %ymm11, 1248(%r11)
vpaddw %ymm10, %ymm11, %ymm0
vmovdqa %ymm0, 1344(%r11)
vpaddw %ymm8, %ymm10, %ymm0
vmovdqa %ymm0, 1440(%r11)
vpaddw %ymm9, %ymm11, %ymm1
vmovdqa %ymm1, 1536(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 1632(%r11)
vmovdqa %ymm12, 1728(%r11)
vmovdqa %ymm13, 1824(%r11)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 1920(%r11)
vmovdqa %ymm14, 2016(%r11)
vmovdqa %ymm15, 2112(%r11)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 2208(%r11)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 2304(%r11)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 2400(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 2496(%r11)
vmovdqa 256(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 0(%r8), %ymm0, %ymm0
vpsllw $2, %ymm4, %ymm1
vpaddw 128(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm8
vpsubw %ymm1, %ymm0, %ymm12
vmovdqa 288(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 32(%r8), %ymm0, %ymm0
vpsllw $2, %ymm5, %ymm1
vpaddw 160(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm9
vpsubw %ymm1, %ymm0, %ymm13
vmovdqa 320(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 64(%r8), %ymm0, %ymm0
vpsllw $2, %ymm6, %ymm1
vpaddw 192(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm10
vpsubw %ymm1, %ymm0, %ymm14
vmovdqa 352(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 96(%r8), %ymm0, %ymm0
vpsllw $2, %ymm7, %ymm1
vpaddw 224(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm11
vpsubw %ymm1, %ymm0, %ymm15
vmovdqa %ymm8, 2592(%r11)
vmovdqa %ymm9, 2688(%r11)
vpaddw %ymm8, %ymm9, %ymm0
vmovdqa %ymm0, 2784(%r11)
vmovdqa %ymm10, 2880(%r11)
vmovdqa %ymm11, 2976(%r11)
vpaddw %ymm10, %ymm11, %ymm0
vmovdqa %ymm0, 3072(%r11)
vpaddw %ymm8, %ymm10, %ymm0
vmovdqa %ymm0, 3168(%r11)
vpaddw %ymm9, %ymm11, %ymm1
vmovdqa %ymm1, 3264(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 3360(%r11)
vmovdqa %ymm12, 3456(%r11)
vmovdqa %ymm13, 3552(%r11)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 3648(%r11)
vmovdqa %ymm14, 3744(%r11)
vmovdqa %ymm15, 3840(%r11)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 3936(%r11)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 4032(%r11)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 4128(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 4224(%r11)
vpmullw %ymm3, %ymm4, %ymm0
vpaddw 256(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 128(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 0(%r8), %ymm0, %ymm12
vpmullw %ymm3, %ymm5, %ymm0
vpaddw 288(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 160(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 32(%r8), %ymm0, %ymm13
vpmullw %ymm3, %ymm6, %ymm0
vpaddw 320(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 192(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 64(%r8), %ymm0, %ymm14
vpmullw %ymm3, %ymm7, %ymm0
vpaddw 352(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 224(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 96(%r8), %ymm0, %ymm15
vmovdqa %ymm12, 4320(%r11)
vmovdqa %ymm13, 4416(%r11)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 4512(%r11)
vmovdqa %ymm14, 4608(%r11)
vmovdqa %ymm15, 4704(%r11)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 4800(%r11)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 4896(%r11)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 4992(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 5088(%r11)
vmovdqu 32(%rdx), %ymm0
vmovdqu 120(%rdx), %ymm1
vmovdqu 208(%rdx), %ymm2
vmovdqu 296(%rdx), %ymm12
vmovdqu 1088(%rdx), %ymm4
vmovdqu 1176(%rdx), %ymm5
vmovdqu 1264(%rdx), %ymm6
vmovdqu 1352(%rdx), %ymm7
vmovdqu 384(%rdx), %ymm8
vmovdqu 472(%rdx), %ymm9
vmovdqu 560(%rdx), %ymm10
vmovdqu 648(%rdx), %ymm11
vmovdqa %ymm0, 32(%r11)
vmovdqa %ymm1, 128(%r11)
vpaddw %ymm0, %ymm1, %ymm14
vmovdqa %ymm14, 224(%r11)
vmovdqa %ymm2, 320(%r11)
vmovdqa %ymm12, 416(%r11)
vpaddw %ymm2, %ymm12, %ymm14
vmovdqa %ymm14, 512(%r11)
vpaddw %ymm0, %ymm2, %ymm14
vmovdqa %ymm14, 608(%r11)
vpaddw %ymm1, %ymm12, %ymm15
vmovdqa %ymm15, 704(%r11)
vpaddw %ymm14, %ymm15, %ymm14
vmovdqa %ymm14, 800(%r11)
vmovdqa %ymm4, 5216(%r11)
vmovdqa %ymm5, 5312(%r11)
vpaddw %ymm4, %ymm5, %ymm14
vmovdqa %ymm14, 5408(%r11)
vmovdqa %ymm6, 5504(%r11)
vmovdqa %ymm7, 5600(%r11)
vpaddw %ymm6, %ymm7, %ymm14
vmovdqa %ymm14, 5696(%r11)
vpaddw %ymm4, %ymm6, %ymm14
vmovdqa %ymm14, 5792(%r11)
vpaddw %ymm5, %ymm7, %ymm15
vmovdqa %ymm15, 5888(%r11)
vpaddw %ymm14, %ymm15, %ymm14
vmovdqa %ymm14, 5984(%r11)
vmovdqa %ymm0, 0(%r8)
vmovdqa %ymm1, 32(%r8)
vmovdqa %ymm2, 64(%r8)
vmovdqa %ymm12, 96(%r8)
vmovdqa %ymm8, 128(%r8)
vmovdqa %ymm9, 160(%r8)
vmovdqa %ymm10, 192(%r8)
vmovdqa %ymm11, 224(%r8)
vmovdqu 736(%rdx), %ymm0
vpaddw 0(%r8), %ymm0, %ymm1
vpaddw 128(%r8), %ymm4, %ymm2
vpaddw %ymm2, %ymm1, %ymm8
vpsubw %ymm2, %ymm1, %ymm12
vmovdqa %ymm0, 256(%r8)
vmovdqu 824(%rdx), %ymm0
vpaddw 32(%r8), %ymm0, %ymm1
vpaddw 160(%r8), %ymm5, %ymm2
vpaddw %ymm2, %ymm1, %ymm9
vpsubw %ymm2, %ymm1, %ymm13
vmovdqa %ymm0, 288(%r8)
vmovdqu 912(%rdx), %ymm0
vpaddw 64(%r8), %ymm0, %ymm1
vpaddw 192(%r8), %ymm6, %ymm2
vpaddw %ymm2, %ymm1, %ymm10
vpsubw %ymm2, %ymm1, %ymm14
vmovdqa %ymm0, 320(%r8)
vmovdqu 1000(%rdx), %ymm0
vpaddw 96(%r8), %ymm0, %ymm1
vpaddw 224(%r8), %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm11
vpsubw %ymm2, %ymm1, %ymm15
vmovdqa %ymm0, 352(%r8)
vmovdqa %ymm8, 896(%r11)
vmovdqa %ymm9, 992(%r11)
vpaddw %ymm8, %ymm9, %ymm0
vmovdqa %ymm0, 1088(%r11)
vmovdqa %ymm10, 1184(%r11)
vmovdqa %ymm11, 1280(%r11)
vpaddw %ymm10, %ymm11, %ymm0
vmovdqa %ymm0, 1376(%r11)
vpaddw %ymm8, %ymm10, %ymm0
vmovdqa %ymm0, 1472(%r11)
vpaddw %ymm9, %ymm11, %ymm1
vmovdqa %ymm1, 1568(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 1664(%r11)
vmovdqa %ymm12, 1760(%r11)
vmovdqa %ymm13, 1856(%r11)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 1952(%r11)
vmovdqa %ymm14, 2048(%r11)
vmovdqa %ymm15, 2144(%r11)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 2240(%r11)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 2336(%r11)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 2432(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 2528(%r11)
vmovdqa 256(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 0(%r8), %ymm0, %ymm0
vpsllw $2, %ymm4, %ymm1
vpaddw 128(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm8
vpsubw %ymm1, %ymm0, %ymm12
vmovdqa 288(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 32(%r8), %ymm0, %ymm0
vpsllw $2, %ymm5, %ymm1
vpaddw 160(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm9
vpsubw %ymm1, %ymm0, %ymm13
vmovdqa 320(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 64(%r8), %ymm0, %ymm0
vpsllw $2, %ymm6, %ymm1
vpaddw 192(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm10
vpsubw %ymm1, %ymm0, %ymm14
vmovdqa 352(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 96(%r8), %ymm0, %ymm0
vpsllw $2, %ymm7, %ymm1
vpaddw 224(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm11
vpsubw %ymm1, %ymm0, %ymm15
vmovdqa %ymm8, 2624(%r11)
vmovdqa %ymm9, 2720(%r11)
vpaddw %ymm8, %ymm9, %ymm0
vmovdqa %ymm0, 2816(%r11)
vmovdqa %ymm10, 2912(%r11)
vmovdqa %ymm11, 3008(%r11)
vpaddw %ymm10, %ymm11, %ymm0
vmovdqa %ymm0, 3104(%r11)
vpaddw %ymm8, %ymm10, %ymm0
vmovdqa %ymm0, 3200(%r11)
vpaddw %ymm9, %ymm11, %ymm1
vmovdqa %ymm1, 3296(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 3392(%r11)
vmovdqa %ymm12, 3488(%r11)
vmovdqa %ymm13, 3584(%r11)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 3680(%r11)
vmovdqa %ymm14, 3776(%r11)
vmovdqa %ymm15, 3872(%r11)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 3968(%r11)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 4064(%r11)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 4160(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 4256(%r11)
vpmullw %ymm3, %ymm4, %ymm0
vpaddw 256(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 128(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 0(%r8), %ymm0, %ymm12
vpmullw %ymm3, %ymm5, %ymm0
vpaddw 288(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 160(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 32(%r8), %ymm0, %ymm13
vpmullw %ymm3, %ymm6, %ymm0
vpaddw 320(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 192(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 64(%r8), %ymm0, %ymm14
vpmullw %ymm3, %ymm7, %ymm0
vpaddw 352(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 224(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 96(%r8), %ymm0, %ymm15
vmovdqa %ymm12, 4352(%r11)
vmovdqa %ymm13, 4448(%r11)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 4544(%r11)
vmovdqa %ymm14, 4640(%r11)
vmovdqa %ymm15, 4736(%r11)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 4832(%r11)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 4928(%r11)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 5024(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 5120(%r11)
vmovdqu 64(%rdx), %ymm0
vmovdqu 152(%rdx), %ymm1
vmovdqu 240(%rdx), %ymm2
vmovdqu 328(%rdx), %ymm12
vmovdqu 1120(%rdx), %ymm4
vmovdqu 1208(%rdx), %ymm5
vmovdqu 1296(%rdx), %ymm6
# Only 18 bytes more can be read, but vmovdqu reads 32.
# Copy 18 bytes to the red zone and zero pad to 32 bytes.
xor %r9, %r9
movq %r9, -16(%rsp)
movq %r9, -8(%rsp)
movq 1384(%rdx), %r9
movq %r9, -32(%rsp)
movq 1384+8(%rdx), %r9
movq %r9, -24(%rsp)
movw 1384+16(%rdx), %r9w
movw %r9w, -16(%rsp)
vmovdqu -32(%rsp), %ymm7
vmovdqu 416(%rdx), %ymm8
vmovdqu 504(%rdx), %ymm9
vmovdqu 592(%rdx), %ymm10
vmovdqu 680(%rdx), %ymm11
vmovdqa %ymm0, 64(%r11)
vmovdqa %ymm1, 160(%r11)
vpaddw %ymm0, %ymm1, %ymm14
vmovdqa %ymm14, 256(%r11)
vmovdqa %ymm2, 352(%r11)
vmovdqa %ymm12, 448(%r11)
vpaddw %ymm2, %ymm12, %ymm14
vmovdqa %ymm14, 544(%r11)
vpaddw %ymm0, %ymm2, %ymm14
vmovdqa %ymm14, 640(%r11)
vpaddw %ymm1, %ymm12, %ymm15
vmovdqa %ymm15, 736(%r11)
vpaddw %ymm14, %ymm15, %ymm14
vmovdqa %ymm14, 832(%r11)
vmovdqa %ymm4, 5248(%r11)
vmovdqa %ymm5, 5344(%r11)
vpaddw %ymm4, %ymm5, %ymm14
vmovdqa %ymm14, 5440(%r11)
vmovdqa %ymm6, 5536(%r11)
vmovdqa %ymm7, 5632(%r11)
vpaddw %ymm6, %ymm7, %ymm14
vmovdqa %ymm14, 5728(%r11)
vpaddw %ymm4, %ymm6, %ymm14
vmovdqa %ymm14, 5824(%r11)
vpaddw %ymm5, %ymm7, %ymm15
vmovdqa %ymm15, 5920(%r11)
vpaddw %ymm14, %ymm15, %ymm14
vmovdqa %ymm14, 6016(%r11)
vmovdqa %ymm0, 0(%r8)
vmovdqa %ymm1, 32(%r8)
vmovdqa %ymm2, 64(%r8)
vmovdqa %ymm12, 96(%r8)
vmovdqa %ymm8, 128(%r8)
vmovdqa %ymm9, 160(%r8)
vmovdqa %ymm10, 192(%r8)
vmovdqa %ymm11, 224(%r8)
vmovdqu 768(%rdx), %ymm0
vpaddw 0(%r8), %ymm0, %ymm1
vpaddw 128(%r8), %ymm4, %ymm2
vpaddw %ymm2, %ymm1, %ymm8
vpsubw %ymm2, %ymm1, %ymm12
vmovdqa %ymm0, 256(%r8)
vmovdqu 856(%rdx), %ymm0
vpaddw 32(%r8), %ymm0, %ymm1
vpaddw 160(%r8), %ymm5, %ymm2
vpaddw %ymm2, %ymm1, %ymm9
vpsubw %ymm2, %ymm1, %ymm13
vmovdqa %ymm0, 288(%r8)
vmovdqu 944(%rdx), %ymm0
vpaddw 64(%r8), %ymm0, %ymm1
vpaddw 192(%r8), %ymm6, %ymm2
vpaddw %ymm2, %ymm1, %ymm10
vpsubw %ymm2, %ymm1, %ymm14
vmovdqa %ymm0, 320(%r8)
vmovdqu 1032(%rdx), %ymm0
vpaddw 96(%r8), %ymm0, %ymm1
vpaddw 224(%r8), %ymm7, %ymm2
vpaddw %ymm2, %ymm1, %ymm11
vpsubw %ymm2, %ymm1, %ymm15
vmovdqa %ymm0, 352(%r8)
vmovdqa %ymm8, 928(%r11)
vmovdqa %ymm9, 1024(%r11)
vpaddw %ymm8, %ymm9, %ymm0
vmovdqa %ymm0, 1120(%r11)
vmovdqa %ymm10, 1216(%r11)
vmovdqa %ymm11, 1312(%r11)
vpaddw %ymm10, %ymm11, %ymm0
vmovdqa %ymm0, 1408(%r11)
vpaddw %ymm8, %ymm10, %ymm0
vmovdqa %ymm0, 1504(%r11)
vpaddw %ymm9, %ymm11, %ymm1
vmovdqa %ymm1, 1600(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 1696(%r11)
vmovdqa %ymm12, 1792(%r11)
vmovdqa %ymm13, 1888(%r11)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 1984(%r11)
vmovdqa %ymm14, 2080(%r11)
vmovdqa %ymm15, 2176(%r11)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 2272(%r11)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 2368(%r11)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 2464(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 2560(%r11)
vmovdqa 256(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 0(%r8), %ymm0, %ymm0
vpsllw $2, %ymm4, %ymm1
vpaddw 128(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm8
vpsubw %ymm1, %ymm0, %ymm12
vmovdqa 288(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 32(%r8), %ymm0, %ymm0
vpsllw $2, %ymm5, %ymm1
vpaddw 160(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm9
vpsubw %ymm1, %ymm0, %ymm13
vmovdqa 320(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 64(%r8), %ymm0, %ymm0
vpsllw $2, %ymm6, %ymm1
vpaddw 192(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm10
vpsubw %ymm1, %ymm0, %ymm14
vmovdqa 352(%r8), %ymm0
vpsllw $2, %ymm0, %ymm0
vpaddw 96(%r8), %ymm0, %ymm0
vpsllw $2, %ymm7, %ymm1
vpaddw 224(%r8), %ymm1, %ymm1
vpsllw $1, %ymm1, %ymm1
vpaddw %ymm1, %ymm0, %ymm11
vpsubw %ymm1, %ymm0, %ymm15
vmovdqa %ymm8, 2656(%r11)
vmovdqa %ymm9, 2752(%r11)
vpaddw %ymm8, %ymm9, %ymm0
vmovdqa %ymm0, 2848(%r11)
vmovdqa %ymm10, 2944(%r11)
vmovdqa %ymm11, 3040(%r11)
vpaddw %ymm10, %ymm11, %ymm0
vmovdqa %ymm0, 3136(%r11)
vpaddw %ymm8, %ymm10, %ymm0
vmovdqa %ymm0, 3232(%r11)
vpaddw %ymm9, %ymm11, %ymm1
vmovdqa %ymm1, 3328(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 3424(%r11)
vmovdqa %ymm12, 3520(%r11)
vmovdqa %ymm13, 3616(%r11)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 3712(%r11)
vmovdqa %ymm14, 3808(%r11)
vmovdqa %ymm15, 3904(%r11)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 4000(%r11)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 4096(%r11)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 4192(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 4288(%r11)
vpmullw %ymm3, %ymm4, %ymm0
vpaddw 256(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 128(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 0(%r8), %ymm0, %ymm12
vpmullw %ymm3, %ymm5, %ymm0
vpaddw 288(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 160(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 32(%r8), %ymm0, %ymm13
vpmullw %ymm3, %ymm6, %ymm0
vpaddw 320(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 192(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 64(%r8), %ymm0, %ymm14
vpmullw %ymm3, %ymm7, %ymm0
vpaddw 352(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 224(%r8), %ymm0, %ymm0
vpmullw %ymm3, %ymm0, %ymm0
vpaddw 96(%r8), %ymm0, %ymm15
vmovdqa %ymm12, 4384(%r11)
vmovdqa %ymm13, 4480(%r11)
vpaddw %ymm12, %ymm13, %ymm0
vmovdqa %ymm0, 4576(%r11)
vmovdqa %ymm14, 4672(%r11)
vmovdqa %ymm15, 4768(%r11)
vpaddw %ymm14, %ymm15, %ymm0
vmovdqa %ymm0, 4864(%r11)
vpaddw %ymm12, %ymm14, %ymm0
vmovdqa %ymm0, 4960(%r11)
vpaddw %ymm13, %ymm15, %ymm1
vmovdqa %ymm1, 5056(%r11)
vpaddw %ymm0, %ymm1, %ymm0
vmovdqa %ymm0, 5152(%r11)
subq $9408, %r8
mov $4, %ecx
karatsuba_loop_4eced63f144beffcb0247f9c6f67d165:
mov %r8, %r9
mov %r8, %r10
subq $32, %r8
vmovdqa 0(%rax), %ymm0
vmovdqa 192(%rax), %ymm1
vmovdqa 384(%rax), %ymm2
vmovdqa 576(%rax), %ymm3
vpunpcklwd 96(%rax), %ymm0, %ymm4
vpunpckhwd 96(%rax), %ymm0, %ymm5
vpunpcklwd 288(%rax), %ymm1, %ymm6
vpunpckhwd 288(%rax), %ymm1, %ymm7
vpunpcklwd 480(%rax), %ymm2, %ymm8
vpunpckhwd 480(%rax), %ymm2, %ymm9
vpunpcklwd 672(%rax), %ymm3, %ymm10
vpunpckhwd 672(%rax), %ymm3, %ymm11
vpunpckldq %ymm6, %ymm4, %ymm0
vpunpckhdq %ymm6, %ymm4, %ymm1
vpunpckldq %ymm7, %ymm5, %ymm2
vpunpckhdq %ymm7, %ymm5, %ymm3
vpunpckldq %ymm10, %ymm8, %ymm12
vpunpckhdq %ymm10, %ymm8, %ymm13
vpunpckldq %ymm11, %ymm9, %ymm14
vpunpckhdq %ymm11, %ymm9, %ymm15
vpunpcklqdq %ymm12, %ymm0, %ymm4
vpunpckhqdq %ymm12, %ymm0, %ymm5
vpunpcklqdq %ymm13, %ymm1, %ymm6
vpunpckhqdq %ymm13, %ymm1, %ymm7
vpunpcklqdq %ymm14, %ymm2, %ymm8
vpunpckhqdq %ymm14, %ymm2, %ymm9
vpunpcklqdq %ymm15, %ymm3, %ymm10
vpunpckhqdq %ymm15, %ymm3, %ymm11
vmovdqa 768(%rax), %ymm0
vmovdqa 960(%rax), %ymm1
vmovdqa 1152(%rax), %ymm2
vmovdqa 1344(%rax), %ymm3
vpunpcklwd 864(%rax), %ymm0, %ymm12
vpunpckhwd 864(%rax), %ymm0, %ymm13
vpunpcklwd 1056(%rax), %ymm1, %ymm14
vpunpckhwd 1056(%rax), %ymm1, %ymm15
vpunpcklwd 1248(%rax), %ymm2, %ymm0
vpunpckhwd 1248(%rax), %ymm2, %ymm1
vpunpcklwd 1440(%rax), %ymm3, %ymm2
vpunpckhwd 1440(%rax), %ymm3, %ymm3
vmovdqa %ymm11, 0(%r8)
vpunpckldq %ymm14, %ymm12, %ymm11
vpunpckhdq %ymm14, %ymm12, %ymm12
vpunpckldq %ymm15, %ymm13, %ymm14
vpunpckhdq %ymm15, %ymm13, %ymm15
vpunpckldq %ymm2, %ymm0, %ymm13
vpunpckhdq %ymm2, %ymm0, %ymm0
vpunpckldq %ymm3, %ymm1, %ymm2
vpunpckhdq %ymm3, %ymm1, %ymm1
vpunpcklqdq %ymm13, %ymm11, %ymm3
vpunpckhqdq %ymm13, %ymm11, %ymm13
vpunpcklqdq %ymm0, %ymm12, %ymm11
vpunpckhqdq %ymm0, %ymm12, %ymm0
vpunpcklqdq %ymm2, %ymm14, %ymm12
vpunpckhqdq %ymm2, %ymm14, %ymm2
vpunpcklqdq %ymm1, %ymm15, %ymm14
vpunpckhqdq %ymm1, %ymm15, %ymm1
vinserti128 $1, %xmm3, %ymm4, %ymm15
vmovdqa %ymm15, 0(%r9)
vinserti128 $1, %xmm13, %ymm5, %ymm15
vmovdqa %ymm15, 32(%r9)
vinserti128 $1, %xmm11, %ymm6, %ymm15
vmovdqa %ymm15, 64(%r9)
vinserti128 $1, %xmm0, %ymm7, %ymm15
vmovdqa %ymm15, 96(%r9)
vinserti128 $1, %xmm12, %ymm8, %ymm15
vmovdqa %ymm15, 128(%r9)
vinserti128 $1, %xmm2, %ymm9, %ymm15
vmovdqa %ymm15, 160(%r9)
vinserti128 $1, %xmm14, %ymm10, %ymm15
vmovdqa %ymm15, 192(%r9)
vpermq $78, %ymm4, %ymm4
vpermq $78, %ymm5, %ymm5
vpermq $78, %ymm6, %ymm6
vpermq $78, %ymm7, %ymm7
vpermq $78, %ymm8, %ymm8
vpermq $78, %ymm9, %ymm9
vpermq $78, %ymm10, %ymm10
vinserti128 $0, %xmm4, %ymm3, %ymm15
vmovdqa %ymm15, 256(%r9)
vinserti128 $0, %xmm5, %ymm13, %ymm15
vmovdqa %ymm15, 288(%r9)
vinserti128 $0, %xmm6, %ymm11, %ymm15
vmovdqa %ymm15, 320(%r9)
vinserti128 $0, %xmm7, %ymm0, %ymm15
vmovdqa %ymm15, 352(%r9)
vinserti128 $0, %xmm8, %ymm12, %ymm15
vmovdqa %ymm15, 384(%r9)
vinserti128 $0, %xmm9, %ymm2, %ymm15
vmovdqa %ymm15, 416(%r9)
vinserti128 $0, %xmm10, %ymm14, %ymm15
vmovdqa %ymm15, 448(%r9)
vmovdqa 0(%r8), %ymm11
vinserti128 $1, %xmm1, %ymm11, %ymm14
vmovdqa %ymm14, 224(%r9)
vpermq $78, %ymm11, %ymm11
vinserti128 $0, %xmm11, %ymm1, %ymm1
vmovdqa %ymm1, 480(%r9)
vmovdqa 32(%rax), %ymm0
vmovdqa 224(%rax), %ymm1
vmovdqa 416(%rax), %ymm2
vmovdqa 608(%rax), %ymm3
vpunpcklwd 128(%rax), %ymm0, %ymm4
vpunpckhwd 128(%rax), %ymm0, %ymm5
vpunpcklwd 320(%rax), %ymm1, %ymm6
vpunpckhwd 320(%rax), %ymm1, %ymm7
vpunpcklwd 512(%rax), %ymm2, %ymm8
vpunpckhwd 512(%rax), %ymm2, %ymm9
vpunpcklwd 704(%rax), %ymm3, %ymm10
vpunpckhwd 704(%rax), %ymm3, %ymm11
vpunpckldq %ymm6, %ymm4, %ymm0
vpunpckhdq %ymm6, %ymm4, %ymm1
vpunpckldq %ymm7, %ymm5, %ymm2
vpunpckhdq %ymm7, %ymm5, %ymm3
vpunpckldq %ymm10, %ymm8, %ymm12
vpunpckhdq %ymm10, %ymm8, %ymm13
vpunpckldq %ymm11, %ymm9, %ymm14
vpunpckhdq %ymm11, %ymm9, %ymm15
vpunpcklqdq %ymm12, %ymm0, %ymm4
vpunpckhqdq %ymm12, %ymm0, %ymm5
vpunpcklqdq %ymm13, %ymm1, %ymm6
vpunpckhqdq %ymm13, %ymm1, %ymm7
vpunpcklqdq %ymm14, %ymm2, %ymm8
vpunpckhqdq %ymm14, %ymm2, %ymm9
vpunpcklqdq %ymm15, %ymm3, %ymm10
vpunpckhqdq %ymm15, %ymm3, %ymm11
vmovdqa 800(%rax), %ymm0
vmovdqa 992(%rax), %ymm1
vmovdqa 1184(%rax), %ymm2
vmovdqa 1376(%rax), %ymm3
vpunpcklwd 896(%rax), %ymm0, %ymm12
vpunpckhwd 896(%rax), %ymm0, %ymm13
vpunpcklwd 1088(%rax), %ymm1, %ymm14
vpunpckhwd 1088(%rax), %ymm1, %ymm15
vpunpcklwd 1280(%rax), %ymm2, %ymm0
vpunpckhwd 1280(%rax), %ymm2, %ymm1
vpunpcklwd 1472(%rax), %ymm3, %ymm2
vpunpckhwd 1472(%rax), %ymm3, %ymm3
vmovdqa %ymm11, 0(%r8)
vpunpckldq %ymm14, %ymm12, %ymm11
vpunpckhdq %ymm14, %ymm12, %ymm12
vpunpckldq %ymm15, %ymm13, %ymm14
vpunpckhdq %ymm15, %ymm13, %ymm15
vpunpckldq %ymm2, %ymm0, %ymm13
vpunpckhdq %ymm2, %ymm0, %ymm0
vpunpckldq %ymm3, %ymm1, %ymm2
vpunpckhdq %ymm3, %ymm1, %ymm1
vpunpcklqdq %ymm13, %ymm11, %ymm3
vpunpckhqdq %ymm13, %ymm11, %ymm13
vpunpcklqdq %ymm0, %ymm12, %ymm11
vpunpckhqdq %ymm0, %ymm12, %ymm0
vpunpcklqdq %ymm2, %ymm14, %ymm12
vpunpckhqdq %ymm2, %ymm14, %ymm2
vpunpcklqdq %ymm1, %ymm15, %ymm14
vpunpckhqdq %ymm1, %ymm15, %ymm1
vinserti128 $1, %xmm3, %ymm4, %ymm15
vmovdqa %ymm15, 512(%r9)
vinserti128 $1, %xmm13, %ymm5, %ymm15
vmovdqa %ymm15, 544(%r9)
vinserti128 $1, %xmm11, %ymm6, %ymm15
vmovdqa %ymm15, 576(%r9)
vinserti128 $1, %xmm0, %ymm7, %ymm15
vmovdqa %ymm15, 608(%r9)
vinserti128 $1, %xmm12, %ymm8, %ymm15
vmovdqa %ymm15, 640(%r9)
vinserti128 $1, %xmm2, %ymm9, %ymm15
vmovdqa %ymm15, 672(%r9)
vinserti128 $1, %xmm14, %ymm10, %ymm15
vmovdqa %ymm15, 704(%r9)
vpermq $78, %ymm4, %ymm4
vpermq $78, %ymm5, %ymm5
vpermq $78, %ymm6, %ymm6
vpermq $78, %ymm7, %ymm7
vpermq $78, %ymm8, %ymm8
vpermq $78, %ymm9, %ymm9
vpermq $78, %ymm10, %ymm10
vinserti128 $0, %xmm4, %ymm3, %ymm15
vmovdqa %ymm15, 768(%r9)
vinserti128 $0, %xmm5, %ymm13, %ymm15
vmovdqa %ymm15, 800(%r9)
vinserti128 $0, %xmm6, %ymm11, %ymm15
vmovdqa %ymm15, 832(%r9)
vinserti128 $0, %xmm7, %ymm0, %ymm15
vmovdqa %ymm15, 864(%r9)
vinserti128 $0, %xmm8, %ymm12, %ymm15
vmovdqa %ymm15, 896(%r9)
vinserti128 $0, %xmm9, %ymm2, %ymm15
vmovdqa %ymm15, 928(%r9)
vinserti128 $0, %xmm10, %ymm14, %ymm15
vmovdqa %ymm15, 960(%r9)
vmovdqa 0(%r8), %ymm11
vinserti128 $1, %xmm1, %ymm11, %ymm14
vmovdqa %ymm14, 736(%r9)
vpermq $78, %ymm11, %ymm11
vinserti128 $0, %xmm11, %ymm1, %ymm1
vmovdqa %ymm1, 992(%r9)
vmovdqa 64(%rax), %ymm0
vmovdqa 256(%rax), %ymm1
vmovdqa 448(%rax), %ymm2
vmovdqa 640(%rax), %ymm3
vpunpcklwd 160(%rax), %ymm0, %ymm4
vpunpckhwd 160(%rax), %ymm0, %ymm5
vpunpcklwd 352(%rax), %ymm1, %ymm6
vpunpckhwd 352(%rax), %ymm1, %ymm7
vpunpcklwd 544(%rax), %ymm2, %ymm8
vpunpckhwd 544(%rax), %ymm2, %ymm9
vpunpcklwd 736(%rax), %ymm3, %ymm10
vpunpckhwd 736(%rax), %ymm3, %ymm11
vpunpckldq %ymm6, %ymm4, %ymm0
vpunpckhdq %ymm6, %ymm4, %ymm1
vpunpckldq %ymm7, %ymm5, %ymm2
vpunpckhdq %ymm7, %ymm5, %ymm3
vpunpckldq %ymm10, %ymm8, %ymm12
vpunpckhdq %ymm10, %ymm8, %ymm13
vpunpckldq %ymm11, %ymm9, %ymm14
vpunpckhdq %ymm11, %ymm9, %ymm15
vpunpcklqdq %ymm12, %ymm0, %ymm4
vpunpckhqdq %ymm12, %ymm0, %ymm5
vpunpcklqdq %ymm13, %ymm1, %ymm6
vpunpckhqdq %ymm13, %ymm1, %ymm7
vpunpcklqdq %ymm14, %ymm2, %ymm8
vpunpckhqdq %ymm14, %ymm2, %ymm9
vpunpcklqdq %ymm15, %ymm3, %ymm10
vpunpckhqdq %ymm15, %ymm3, %ymm11
vmovdqa 832(%rax), %ymm0
vmovdqa 1024(%rax), %ymm1
vmovdqa 1216(%rax), %ymm2
vmovdqa 1408(%rax), %ymm3
vpunpcklwd 928(%rax), %ymm0, %ymm12
vpunpckhwd 928(%rax), %ymm0, %ymm13
vpunpcklwd 1120(%rax), %ymm1, %ymm14
vpunpckhwd 1120(%rax), %ymm1, %ymm15
vpunpcklwd 1312(%rax), %ymm2, %ymm0
vpunpckhwd 1312(%rax), %ymm2, %ymm1
vpunpcklwd 1504(%rax), %ymm3, %ymm2
vpunpckhwd 1504(%rax), %ymm3, %ymm3
vmovdqa %ymm11, 0(%r8)
vpunpckldq %ymm14, %ymm12, %ymm11
vpunpckhdq %ymm14, %ymm12, %ymm12
vpunpckldq %ymm15, %ymm13, %ymm14
vpunpckhdq %ymm15, %ymm13, %ymm15
vpunpckldq %ymm2, %ymm0, %ymm13
vpunpckhdq %ymm2, %ymm0, %ymm0
vpunpckldq %ymm3, %ymm1, %ymm2
vpunpckhdq %ymm3, %ymm1, %ymm1
vpunpcklqdq %ymm13, %ymm11, %ymm3
vpunpckhqdq %ymm13, %ymm11, %ymm13
vpunpcklqdq %ymm0, %ymm12, %ymm11
vpunpckhqdq %ymm0, %ymm12, %ymm0
vpunpcklqdq %ymm2, %ymm14, %ymm12
vpunpckhqdq %ymm2, %ymm14, %ymm2
vpunpcklqdq %ymm1, %ymm15, %ymm14
vpunpckhqdq %ymm1, %ymm15, %ymm1
vinserti128 $1, %xmm3, %ymm4, %ymm15
vmovdqa %ymm15, 1024(%r9)
vinserti128 $1, %xmm13, %ymm5, %ymm15
vmovdqa %ymm15, 1056(%r9)
vinserti128 $1, %xmm11, %ymm6, %ymm15
vmovdqa %ymm15, 1088(%r9)
vinserti128 $1, %xmm0, %ymm7, %ymm15
vmovdqa %ymm15, 1120(%r9)
vinserti128 $1, %xmm12, %ymm8, %ymm15
vmovdqa %ymm15, 1152(%r9)
vinserti128 $1, %xmm2, %ymm9, %ymm15
vmovdqa %ymm15, 1184(%r9)
vinserti128 $1, %xmm14, %ymm10, %ymm15
vmovdqa %ymm15, 1216(%r9)
vpermq $78, %ymm4, %ymm4
vpermq $78, %ymm5, %ymm5
vpermq $78, %ymm6, %ymm6
vpermq $78, %ymm7, %ymm7
vpermq $78, %ymm8, %ymm8
vpermq $78, %ymm9, %ymm9
vpermq $78, %ymm10, %ymm10
vinserti128 $0, %xmm4, %ymm3, %ymm15
vmovdqa %ymm15, 1280(%r9)
vinserti128 $0, %xmm5, %ymm13, %ymm15
vmovdqa %ymm15, 1312(%r9)
vinserti128 $0, %xmm6, %ymm11, %ymm15
vmovdqa %ymm15, 1344(%r9)
vinserti128 $0, %xmm7, %ymm0, %ymm15
vmovdqa %ymm15, 1376(%r9)
vmovdqa 0(%r8), %ymm11
vinserti128 $1, %xmm1, %ymm11, %ymm14
vmovdqa %ymm14, 1248(%r9)
vmovdqa 0(%r11), %ymm0
vmovdqa 192(%r11), %ymm1
vmovdqa 384(%r11), %ymm2
vmovdqa 576(%r11), %ymm3
vpunpcklwd 96(%r11), %ymm0, %ymm4
vpunpckhwd 96(%r11), %ymm0, %ymm5
vpunpcklwd 288(%r11), %ymm1, %ymm6
vpunpckhwd 288(%r11), %ymm1, %ymm7
vpunpcklwd 480(%r11), %ymm2, %ymm8
vpunpckhwd 480(%r11), %ymm2, %ymm9
vpunpcklwd 672(%r11), %ymm3, %ymm10
vpunpckhwd 672(%r11), %ymm3, %ymm11
vpunpckldq %ymm6, %ymm4, %ymm0
vpunpckhdq %ymm6, %ymm4, %ymm1
vpunpckldq %ymm7, %ymm5, %ymm2
vpunpckhdq %ymm7, %ymm5, %ymm3
vpunpckldq %ymm10, %ymm8, %ymm12
vpunpckhdq %ymm10, %ymm8, %ymm13
vpunpckldq %ymm11, %ymm9, %ymm14
vpunpckhdq %ymm11, %ymm9, %ymm15
vpunpcklqdq %ymm12, %ymm0, %ymm4
vpunpckhqdq %ymm12, %ymm0, %ymm5
vpunpcklqdq %ymm13, %ymm1, %ymm6
vpunpckhqdq %ymm13, %ymm1, %ymm7
vpunpcklqdq %ymm14, %ymm2, %ymm8
vpunpckhqdq %ymm14, %ymm2, %ymm9
vpunpcklqdq %ymm15, %ymm3, %ymm10
vpunpckhqdq %ymm15, %ymm3, %ymm11
vmovdqa 768(%r11), %ymm0
vmovdqa 960(%r11), %ymm1
vmovdqa 1152(%r11), %ymm2
vmovdqa 1344(%r11), %ymm3
vpunpcklwd 864(%r11), %ymm0, %ymm12
vpunpckhwd 864(%r11), %ymm0, %ymm13
vpunpcklwd 1056(%r11), %ymm1, %ymm14
vpunpckhwd 1056(%r11), %ymm1, %ymm15
vpunpcklwd 1248(%r11), %ymm2, %ymm0
vpunpckhwd 1248(%r11), %ymm2, %ymm1
vpunpcklwd 1440(%r11), %ymm3, %ymm2
vpunpckhwd 1440(%r11), %ymm3, %ymm3
vmovdqa %ymm11, 0(%r8)
vpunpckldq %ymm14, %ymm12, %ymm11
vpunpckhdq %ymm14, %ymm12, %ymm12
vpunpckldq %ymm15, %ymm13, %ymm14
vpunpckhdq %ymm15, %ymm13, %ymm15
vpunpckldq %ymm2, %ymm0, %ymm13
vpunpckhdq %ymm2, %ymm0, %ymm0
vpunpckldq %ymm3, %ymm1, %ymm2
vpunpckhdq %ymm3, %ymm1, %ymm1
vpunpcklqdq %ymm13, %ymm11, %ymm3
vpunpckhqdq %ymm13, %ymm11, %ymm13
vpunpcklqdq %ymm0, %ymm12, %ymm11
vpunpckhqdq %ymm0, %ymm12, %ymm0
vpunpcklqdq %ymm2, %ymm14, %ymm12
vpunpckhqdq %ymm2, %ymm14, %ymm2
vpunpcklqdq %ymm1, %ymm15, %ymm14
vpunpckhqdq %ymm1, %ymm15, %ymm1
vinserti128 $1, %xmm3, %ymm4, %ymm15
vmovdqa %ymm15, 1408(%r9)
vinserti128 $1, %xmm13, %ymm5, %ymm15
vmovdqa %ymm15, 1440(%r9)
vinserti128 $1, %xmm11, %ymm6, %ymm15
vmovdqa %ymm15, 1472(%r9)
vinserti128 $1, %xmm0, %ymm7, %ymm15
vmovdqa %ymm15, 1504(%r9)
vinserti128 $1, %xmm12, %ymm8, %ymm15
vmovdqa %ymm15, 1536(%r9)
vinserti128 $1, %xmm2, %ymm9, %ymm15
vmovdqa %ymm15, 1568(%r9)
vinserti128 $1, %xmm14, %ymm10, %ymm15
vmovdqa %ymm15, 1600(%r9)
vpermq $78, %ymm4, %ymm4
vpermq $78, %ymm5, %ymm5
vpermq $78, %ymm6, %ymm6
vpermq $78, %ymm7, %ymm7
vpermq $78, %ymm8, %ymm8
vpermq $78, %ymm9, %ymm9
vpermq $78, %ymm10, %ymm10
vinserti128 $0, %xmm4, %ymm3, %ymm15
vmovdqa %ymm15, 1664(%r9)
vinserti128 $0, %xmm5, %ymm13, %ymm15
vmovdqa %ymm15, 1696(%r9)
vinserti128 $0, %xmm6, %ymm11, %ymm15
vmovdqa %ymm15, 1728(%r9)
vinserti128 $0, %xmm7, %ymm0, %ymm15
vmovdqa %ymm15, 1760(%r9)
vinserti128 $0, %xmm8, %ymm12, %ymm15
vmovdqa %ymm15, 1792(%r9)
vinserti128 $0, %xmm9, %ymm2, %ymm15
vmovdqa %ymm15, 1824(%r9)
vinserti128 $0, %xmm10, %ymm14, %ymm15
vmovdqa %ymm15, 1856(%r9)
vmovdqa 0(%r8), %ymm11
vinserti128 $1, %xmm1, %ymm11, %ymm14
vmovdqa %ymm14, 1632(%r9)
vpermq $78, %ymm11, %ymm11
vinserti128 $0, %xmm11, %ymm1, %ymm1
vmovdqa %ymm1, 1888(%r9)
vmovdqa 32(%r11), %ymm0
vmovdqa 224(%r11), %ymm1
vmovdqa 416(%r11), %ymm2
vmovdqa 608(%r11), %ymm3
vpunpcklwd 128(%r11), %ymm0, %ymm4
vpunpckhwd 128(%r11), %ymm0, %ymm5
vpunpcklwd 320(%r11), %ymm1, %ymm6
vpunpckhwd 320(%r11), %ymm1, %ymm7
vpunpcklwd 512(%r11), %ymm2, %ymm8
vpunpckhwd 512(%r11), %ymm2, %ymm9
vpunpcklwd 704(%r11), %ymm3, %ymm10
vpunpckhwd 704(%r11), %ymm3, %ymm11
vpunpckldq %ymm6, %ymm4, %ymm0
vpunpckhdq %ymm6, %ymm4, %ymm1
vpunpckldq %ymm7, %ymm5, %ymm2
vpunpckhdq %ymm7, %ymm5, %ymm3
vpunpckldq %ymm10, %ymm8, %ymm12
vpunpckhdq %ymm10, %ymm8, %ymm13
vpunpckldq %ymm11, %ymm9, %ymm14
vpunpckhdq %ymm11, %ymm9, %ymm15
vpunpcklqdq %ymm12, %ymm0, %ymm4
vpunpckhqdq %ymm12, %ymm0, %ymm5
vpunpcklqdq %ymm13, %ymm1, %ymm6
vpunpckhqdq %ymm13, %ymm1, %ymm7
vpunpcklqdq %ymm14, %ymm2, %ymm8
vpunpckhqdq %ymm14, %ymm2, %ymm9
vpunpcklqdq %ymm15, %ymm3, %ymm10
vpunpckhqdq %ymm15, %ymm3, %ymm11
vmovdqa 800(%r11), %ymm0
vmovdqa 992(%r11), %ymm1
vmovdqa 1184(%r11), %ymm2
vmovdqa 1376(%r11), %ymm3
vpunpcklwd 896(%r11), %ymm0, %ymm12
vpunpckhwd 896(%r11), %ymm0, %ymm13
vpunpcklwd 1088(%r11), %ymm1, %ymm14
vpunpckhwd 1088(%r11), %ymm1, %ymm15
vpunpcklwd 1280(%r11), %ymm2, %ymm0
vpunpckhwd 1280(%r11), %ymm2, %ymm1
vpunpcklwd 1472(%r11), %ymm3, %ymm2
vpunpckhwd 1472(%r11), %ymm3, %ymm3
vmovdqa %ymm11, 0(%r8)
vpunpckldq %ymm14, %ymm12, %ymm11
vpunpckhdq %ymm14, %ymm12, %ymm12
vpunpckldq %ymm15, %ymm13, %ymm14
vpunpckhdq %ymm15, %ymm13, %ymm15
vpunpckldq %ymm2, %ymm0, %ymm13
vpunpckhdq %ymm2, %ymm0, %ymm0
vpunpckldq %ymm3, %ymm1, %ymm2
vpunpckhdq %ymm3, %ymm1, %ymm1
vpunpcklqdq %ymm13, %ymm11, %ymm3
vpunpckhqdq %ymm13, %ymm11, %ymm13
vpunpcklqdq %ymm0, %ymm12, %ymm11
vpunpckhqdq %ymm0, %ymm12, %ymm0
vpunpcklqdq %ymm2, %ymm14, %ymm12
vpunpckhqdq %ymm2, %ymm14, %ymm2
vpunpcklqdq %ymm1, %ymm15, %ymm14
vpunpckhqdq %ymm1, %ymm15, %ymm1
vinserti128 $1, %xmm3, %ymm4, %ymm15
vmovdqa %ymm15, 1920(%r9)
vinserti128 $1, %xmm13, %ymm5, %ymm15
vmovdqa %ymm15, 1952(%r9)
vinserti128 $1, %xmm11, %ymm6, %ymm15
vmovdqa %ymm15, 1984(%r9)
vinserti128 $1, %xmm0, %ymm7, %ymm15
vmovdqa %ymm15, 2016(%r9)
vinserti128 $1, %xmm12, %ymm8, %ymm15
vmovdqa %ymm15, 2048(%r9)
vinserti128 $1, %xmm2, %ymm9, %ymm15
vmovdqa %ymm15, 2080(%r9)
vinserti128 $1, %xmm14, %ymm10, %ymm15
vmovdqa %ymm15, 2112(%r9)
vpermq $78, %ymm4, %ymm4
vpermq $78, %ymm5, %ymm5
vpermq $78, %ymm6, %ymm6
vpermq $78, %ymm7, %ymm7
vpermq $78, %ymm8, %ymm8
vpermq $78, %ymm9, %ymm9
vpermq $78, %ymm10, %ymm10
vinserti128 $0, %xmm4, %ymm3, %ymm15
vmovdqa %ymm15, 2176(%r9)
vinserti128 $0, %xmm5, %ymm13, %ymm15
vmovdqa %ymm15, 2208(%r9)
vinserti128 $0, %xmm6, %ymm11, %ymm15
vmovdqa %ymm15, 2240(%r9)
vinserti128 $0, %xmm7, %ymm0, %ymm15
vmovdqa %ymm15, 2272(%r9)
vinserti128 $0, %xmm8, %ymm12, %ymm15
vmovdqa %ymm15, 2304(%r9)
vinserti128 $0, %xmm9, %ymm2, %ymm15
vmovdqa %ymm15, 2336(%r9)
vinserti128 $0, %xmm10, %ymm14, %ymm15
vmovdqa %ymm15, 2368(%r9)
vmovdqa 0(%r8), %ymm11
vinserti128 $1, %xmm1, %ymm11, %ymm14
vmovdqa %ymm14, 2144(%r9)
vpermq $78, %ymm11, %ymm11
vinserti128 $0, %xmm11, %ymm1, %ymm1
vmovdqa %ymm1, 2400(%r9)
vmovdqa 64(%r11), %ymm0
vmovdqa 256(%r11), %ymm1
vmovdqa 448(%r11), %ymm2
vmovdqa 640(%r11), %ymm3
vpunpcklwd 160(%r11), %ymm0, %ymm4
vpunpckhwd 160(%r11), %ymm0, %ymm5
vpunpcklwd 352(%r11), %ymm1, %ymm6
vpunpckhwd 352(%r11), %ymm1, %ymm7
vpunpcklwd 544(%r11), %ymm2, %ymm8
vpunpckhwd 544(%r11), %ymm2, %ymm9
vpunpcklwd 736(%r11), %ymm3, %ymm10
vpunpckhwd 736(%r11), %ymm3, %ymm11
vpunpckldq %ymm6, %ymm4, %ymm0
vpunpckhdq %ymm6, %ymm4, %ymm1
vpunpckldq %ymm7, %ymm5, %ymm2
vpunpckhdq %ymm7, %ymm5, %ymm3
vpunpckldq %ymm10, %ymm8, %ymm12
vpunpckhdq %ymm10, %ymm8, %ymm13
vpunpckldq %ymm11, %ymm9, %ymm14
vpunpckhdq %ymm11, %ymm9, %ymm15
vpunpcklqdq %ymm12, %ymm0, %ymm4
vpunpckhqdq %ymm12, %ymm0, %ymm5
vpunpcklqdq %ymm13, %ymm1, %ymm6
vpunpckhqdq %ymm13, %ymm1, %ymm7
vpunpcklqdq %ymm14, %ymm2, %ymm8
vpunpckhqdq %ymm14, %ymm2, %ymm9
vpunpcklqdq %ymm15, %ymm3, %ymm10
vpunpckhqdq %ymm15, %ymm3, %ymm11
vmovdqa 832(%r11), %ymm0
vmovdqa 1024(%r11), %ymm1
vmovdqa 1216(%r11), %ymm2
vmovdqa 1408(%r11), %ymm3
vpunpcklwd 928(%r11), %ymm0, %ymm12
vpunpckhwd 928(%r11), %ymm0, %ymm13
vpunpcklwd 1120(%r11), %ymm1, %ymm14
vpunpckhwd 1120(%r11), %ymm1, %ymm15
vpunpcklwd 1312(%r11), %ymm2, %ymm0
vpunpckhwd 1312(%r11), %ymm2, %ymm1
vpunpcklwd 1504(%r11), %ymm3, %ymm2
vpunpckhwd 1504(%r11), %ymm3, %ymm3
vmovdqa %ymm11, 0(%r8)
vpunpckldq %ymm14, %ymm12, %ymm11
vpunpckhdq %ymm14, %ymm12, %ymm12
vpunpckldq %ymm15, %ymm13, %ymm14
vpunpckhdq %ymm15, %ymm13, %ymm15
vpunpckldq %ymm2, %ymm0, %ymm13
vpunpckhdq %ymm2, %ymm0, %ymm0
vpunpckldq %ymm3, %ymm1, %ymm2
vpunpckhdq %ymm3, %ymm1, %ymm1
vpunpcklqdq %ymm13, %ymm11, %ymm3
vpunpckhqdq %ymm13, %ymm11, %ymm13
vpunpcklqdq %ymm0, %ymm12, %ymm11
vpunpckhqdq %ymm0, %ymm12, %ymm0
vpunpcklqdq %ymm2, %ymm14, %ymm12
vpunpckhqdq %ymm2, %ymm14, %ymm2
vpunpcklqdq %ymm1, %ymm15, %ymm14
vpunpckhqdq %ymm1, %ymm15, %ymm1
vinserti128 $1, %xmm3, %ymm4, %ymm15
vmovdqa %ymm15, 2432(%r9)
vinserti128 $1, %xmm13, %ymm5, %ymm15
vmovdqa %ymm15, 2464(%r9)
vinserti128 $1, %xmm11, %ymm6, %ymm15
vmovdqa %ymm15, 2496(%r9)
vinserti128 $1, %xmm0, %ymm7, %ymm15
vmovdqa %ymm15, 2528(%r9)
vinserti128 $1, %xmm12, %ymm8, %ymm15
vmovdqa %ymm15, 2560(%r9)
vinserti128 $1, %xmm2, %ymm9, %ymm15
vmovdqa %ymm15, 2592(%r9)
vinserti128 $1, %xmm14, %ymm10, %ymm15
vmovdqa %ymm15, 2624(%r9)
vpermq $78, %ymm4, %ymm4
vpermq $78, %ymm5, %ymm5
vpermq $78, %ymm6, %ymm6
vpermq $78, %ymm7, %ymm7
vpermq $78, %ymm8, %ymm8
vpermq $78, %ymm9, %ymm9
vpermq $78, %ymm10, %ymm10
vinserti128 $0, %xmm4, %ymm3, %ymm15
vmovdqa %ymm15, 2688(%r9)
vinserti128 $0, %xmm5, %ymm13, %ymm15
vmovdqa %ymm15, 2720(%r9)
vinserti128 $0, %xmm6, %ymm11, %ymm15
vmovdqa %ymm15, 2752(%r9)
vinserti128 $0, %xmm7, %ymm0, %ymm15
vmovdqa %ymm15, 2784(%r9)
vmovdqa 0(%r8), %ymm11
vinserti128 $1, %xmm1, %ymm11, %ymm14
vmovdqa %ymm14, 2656(%r9)
addq $32, %r8
innerloop_4eced63f144beffcb0247f9c6f67d165:
vmovdqa 0(%r9), %ymm0
vmovdqa 1408(%r9), %ymm6
vmovdqa 32(%r9), %ymm1
vmovdqa 1440(%r9), %ymm7
vmovdqa 64(%r9), %ymm2
vmovdqa 1472(%r9), %ymm8
vmovdqa 96(%r9), %ymm3
vmovdqa 1504(%r9), %ymm9
vmovdqa 128(%r9), %ymm4
vmovdqa 1536(%r9), %ymm10
vmovdqa 160(%r9), %ymm5
vmovdqa 1568(%r9), %ymm11
vpmullw %ymm0, %ymm6, %ymm12
vmovdqa %ymm12, 2816(%r10)
vpmullw %ymm0, %ymm7, %ymm13
vpmullw %ymm1, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 2848(%r10)
vpmullw %ymm0, %ymm8, %ymm12
vpmullw %ymm1, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 2880(%r10)
vpmullw %ymm0, %ymm9, %ymm13
vpmullw %ymm1, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 2912(%r10)
vpmullw %ymm0, %ymm10, %ymm12
vpmullw %ymm1, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 2944(%r10)
vpmullw %ymm0, %ymm11, %ymm13
vpmullw %ymm1, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm5, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 2976(%r10)
vpmullw %ymm1, %ymm11, %ymm12
vpmullw %ymm2, %ymm10, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm5, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 3008(%r10)
vpmullw %ymm2, %ymm11, %ymm13
vpmullw %ymm3, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm5, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 3040(%r10)
vpmullw %ymm3, %ymm11, %ymm12
vpmullw %ymm4, %ymm10, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm5, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 3072(%r10)
vpmullw %ymm4, %ymm11, %ymm13
vpmullw %ymm5, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 3104(%r10)
vpmullw %ymm5, %ymm11, %ymm12
vmovdqa %ymm12, 3136(%r10)
vmovdqa 192(%r9), %ymm0
vmovdqa 1600(%r9), %ymm6
vmovdqa 224(%r9), %ymm1
vmovdqa 1632(%r9), %ymm7
vmovdqa 256(%r9), %ymm2
vmovdqa 1664(%r9), %ymm8
vmovdqa 288(%r9), %ymm3
vmovdqa 1696(%r9), %ymm9
vmovdqa 320(%r9), %ymm4
vmovdqa 1728(%r9), %ymm10
vpmullw %ymm0, %ymm6, %ymm12
vmovdqa %ymm12, 3200(%r10)
vpmullw %ymm0, %ymm7, %ymm13
vpmullw %ymm1, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 3232(%r10)
vpmullw %ymm0, %ymm8, %ymm12
vpmullw %ymm1, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 3264(%r10)
vpmullw %ymm0, %ymm9, %ymm13
vpmullw %ymm1, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 3296(%r10)
vpmullw %ymm0, %ymm10, %ymm12
vpmullw %ymm1, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 3328(%r10)
vpmullw %ymm1, %ymm10, %ymm13
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 3360(%r10)
vpmullw %ymm2, %ymm10, %ymm12
vpmullw %ymm3, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 3392(%r10)
vpmullw %ymm3, %ymm10, %ymm13
vpmullw %ymm4, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 3424(%r10)
vpmullw %ymm4, %ymm10, %ymm12
vmovdqa %ymm12, 3456(%r10)
vpaddw 0(%r9), %ymm0, %ymm0
vpaddw 1408(%r9), %ymm6, %ymm6
vpaddw 32(%r9), %ymm1, %ymm1
vpaddw 1440(%r9), %ymm7, %ymm7
vpaddw 64(%r9), %ymm2, %ymm2
vpaddw 1472(%r9), %ymm8, %ymm8
vpaddw 96(%r9), %ymm3, %ymm3
vpaddw 1504(%r9), %ymm9, %ymm9
vpaddw 128(%r9), %ymm4, %ymm4
vpaddw 1536(%r9), %ymm10, %ymm10
vpmullw %ymm0, %ymm11, %ymm12
vpmullw %ymm1, %ymm10, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm5, %ymm6, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpsubw 2976(%r10), %ymm12, %ymm12
vpsubw 3360(%r10), %ymm12, %ymm12
vmovdqa %ymm12, 3168(%r10)
vpmullw %ymm5, %ymm7, %ymm12
vpmullw %ymm5, %ymm8, %ymm13
vpmullw %ymm5, %ymm9, %ymm14
vpmullw %ymm5, %ymm10, %ymm15
vpmullw %ymm1, %ymm11, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm2, %ymm10, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm3, %ymm9, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm4, %ymm8, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm2, %ymm11, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm3, %ymm10, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm4, %ymm9, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm3, %ymm11, %ymm5
vpaddw %ymm5, %ymm14, %ymm14
vpmullw %ymm4, %ymm10, %ymm5
vpaddw %ymm5, %ymm14, %ymm14
vpmullw %ymm4, %ymm11, %ymm5
vpaddw %ymm5, %ymm15, %ymm15
vpmullw %ymm0, %ymm10, %ymm11
vpmullw %ymm1, %ymm9, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm2, %ymm8, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm3, %ymm7, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm4, %ymm6, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm0, %ymm9, %ymm10
vpmullw %ymm1, %ymm8, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm2, %ymm7, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm3, %ymm6, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm0, %ymm8, %ymm9
vpmullw %ymm1, %ymm7, %ymm5
vpaddw %ymm5, %ymm9, %ymm9
vpmullw %ymm2, %ymm6, %ymm5
vpaddw %ymm5, %ymm9, %ymm9
vpmullw %ymm0, %ymm7, %ymm8
vpmullw %ymm1, %ymm6, %ymm5
vpaddw %ymm5, %ymm8, %ymm8
vpmullw %ymm0, %ymm6, %ymm7
vmovdqa 3008(%r10), %ymm0
vpsubw 3200(%r10), %ymm0, %ymm0
vpsubw %ymm0, %ymm12, %ymm6
vpsubw 3392(%r10), %ymm6, %ymm6
vmovdqa %ymm6, 3200(%r10)
vpaddw %ymm7, %ymm0, %ymm0
vpsubw 2816(%r10), %ymm0, %ymm0
vmovdqa %ymm0, 3008(%r10)
vmovdqa 3040(%r10), %ymm1
vpsubw 3232(%r10), %ymm1, %ymm1
vpsubw %ymm1, %ymm13, %ymm7
vpsubw 3424(%r10), %ymm7, %ymm7
vmovdqa %ymm7, 3232(%r10)
vpaddw %ymm8, %ymm1, %ymm1
vpsubw 2848(%r10), %ymm1, %ymm1
vmovdqa %ymm1, 3040(%r10)
vmovdqa 3072(%r10), %ymm2
vpsubw 3264(%r10), %ymm2, %ymm2
vpsubw %ymm2, %ymm14, %ymm8
vpsubw 3456(%r10), %ymm8, %ymm8
vmovdqa %ymm8, 3264(%r10)
vpaddw %ymm9, %ymm2, %ymm2
vpsubw 2880(%r10), %ymm2, %ymm2
vmovdqa %ymm2, 3072(%r10)
vmovdqa 3104(%r10), %ymm3
vpsubw 3296(%r10), %ymm3, %ymm3
vpsubw %ymm3, %ymm15, %ymm9
vmovdqa %ymm9, 3296(%r10)
vpaddw %ymm10, %ymm3, %ymm3
vpsubw 2912(%r10), %ymm3, %ymm3
vmovdqa %ymm3, 3104(%r10)
vmovdqa 3136(%r10), %ymm4
vpsubw 3328(%r10), %ymm4, %ymm4
vpaddw %ymm11, %ymm4, %ymm4
vpsubw 2944(%r10), %ymm4, %ymm4
vmovdqa %ymm4, 3136(%r10)
vmovdqa 352(%r9), %ymm0
vmovdqa 1760(%r9), %ymm6
vmovdqa 384(%r9), %ymm1
vmovdqa 1792(%r9), %ymm7
vmovdqa 416(%r9), %ymm2
vmovdqa 1824(%r9), %ymm8
vmovdqa 448(%r9), %ymm3
vmovdqa 1856(%r9), %ymm9
vmovdqa 480(%r9), %ymm4
vmovdqa 1888(%r9), %ymm10
vmovdqa 512(%r9), %ymm5
vmovdqa 1920(%r9), %ymm11
vpmullw %ymm0, %ymm6, %ymm12
vmovdqa %ymm12, 3520(%r10)
vpmullw %ymm0, %ymm7, %ymm13
vpmullw %ymm1, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 3552(%r10)
vpmullw %ymm0, %ymm8, %ymm12
vpmullw %ymm1, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 3584(%r10)
vpmullw %ymm0, %ymm9, %ymm13
vpmullw %ymm1, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 3616(%r10)
vpmullw %ymm0, %ymm10, %ymm12
vpmullw %ymm1, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 3648(%r10)
vpmullw %ymm0, %ymm11, %ymm13
vpmullw %ymm1, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm5, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 3680(%r10)
vpmullw %ymm1, %ymm11, %ymm12
vpmullw %ymm2, %ymm10, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm5, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 3712(%r10)
vpmullw %ymm2, %ymm11, %ymm13
vpmullw %ymm3, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm5, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 3744(%r10)
vpmullw %ymm3, %ymm11, %ymm12
vpmullw %ymm4, %ymm10, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm5, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 3776(%r10)
vpmullw %ymm4, %ymm11, %ymm13
vpmullw %ymm5, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 3808(%r10)
vpmullw %ymm5, %ymm11, %ymm12
vmovdqa %ymm12, 3840(%r10)
vmovdqa 544(%r9), %ymm0
vmovdqa 1952(%r9), %ymm6
vmovdqa 576(%r9), %ymm1
vmovdqa 1984(%r9), %ymm7
vmovdqa 608(%r9), %ymm2
vmovdqa 2016(%r9), %ymm8
vmovdqa 640(%r9), %ymm3
vmovdqa 2048(%r9), %ymm9
vmovdqa 672(%r9), %ymm4
vmovdqa 2080(%r9), %ymm10
vpmullw %ymm0, %ymm6, %ymm12
vmovdqa %ymm12, 3904(%r10)
vpmullw %ymm0, %ymm7, %ymm13
vpmullw %ymm1, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 3936(%r10)
vpmullw %ymm0, %ymm8, %ymm12
vpmullw %ymm1, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 3968(%r10)
vpmullw %ymm0, %ymm9, %ymm13
vpmullw %ymm1, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 4000(%r10)
vpmullw %ymm0, %ymm10, %ymm12
vpmullw %ymm1, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 4032(%r10)
vpmullw %ymm1, %ymm10, %ymm13
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 4064(%r10)
vpmullw %ymm2, %ymm10, %ymm12
vpmullw %ymm3, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 4096(%r10)
vpmullw %ymm3, %ymm10, %ymm13
vpmullw %ymm4, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 4128(%r10)
vpmullw %ymm4, %ymm10, %ymm12
vmovdqa %ymm12, 4160(%r10)
vpaddw 352(%r9), %ymm0, %ymm0
vpaddw 1760(%r9), %ymm6, %ymm6
vpaddw 384(%r9), %ymm1, %ymm1
vpaddw 1792(%r9), %ymm7, %ymm7
vpaddw 416(%r9), %ymm2, %ymm2
vpaddw 1824(%r9), %ymm8, %ymm8
vpaddw 448(%r9), %ymm3, %ymm3
vpaddw 1856(%r9), %ymm9, %ymm9
vpaddw 480(%r9), %ymm4, %ymm4
vpaddw 1888(%r9), %ymm10, %ymm10
vpmullw %ymm0, %ymm11, %ymm12
vpmullw %ymm1, %ymm10, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm5, %ymm6, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpsubw 3680(%r10), %ymm12, %ymm12
vpsubw 4064(%r10), %ymm12, %ymm12
vmovdqa %ymm12, 3872(%r10)
vpmullw %ymm5, %ymm7, %ymm12
vpmullw %ymm5, %ymm8, %ymm13
vpmullw %ymm5, %ymm9, %ymm14
vpmullw %ymm5, %ymm10, %ymm15
vpmullw %ymm1, %ymm11, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm2, %ymm10, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm3, %ymm9, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm4, %ymm8, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm2, %ymm11, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm3, %ymm10, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm4, %ymm9, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm3, %ymm11, %ymm5
vpaddw %ymm5, %ymm14, %ymm14
vpmullw %ymm4, %ymm10, %ymm5
vpaddw %ymm5, %ymm14, %ymm14
vpmullw %ymm4, %ymm11, %ymm5
vpaddw %ymm5, %ymm15, %ymm15
vpmullw %ymm0, %ymm10, %ymm11
vpmullw %ymm1, %ymm9, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm2, %ymm8, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm3, %ymm7, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm4, %ymm6, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm0, %ymm9, %ymm10
vpmullw %ymm1, %ymm8, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm2, %ymm7, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm3, %ymm6, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm0, %ymm8, %ymm9
vpmullw %ymm1, %ymm7, %ymm5
vpaddw %ymm5, %ymm9, %ymm9
vpmullw %ymm2, %ymm6, %ymm5
vpaddw %ymm5, %ymm9, %ymm9
vpmullw %ymm0, %ymm7, %ymm8
vpmullw %ymm1, %ymm6, %ymm5
vpaddw %ymm5, %ymm8, %ymm8
vpmullw %ymm0, %ymm6, %ymm7
vmovdqa 3712(%r10), %ymm0
vpsubw 3904(%r10), %ymm0, %ymm0
vpsubw %ymm0, %ymm12, %ymm6
vpsubw 4096(%r10), %ymm6, %ymm6
vmovdqa %ymm6, 3904(%r10)
vpaddw %ymm7, %ymm0, %ymm0
vpsubw 3520(%r10), %ymm0, %ymm0
vmovdqa %ymm0, 3712(%r10)
vmovdqa 3744(%r10), %ymm1
vpsubw 3936(%r10), %ymm1, %ymm1
vpsubw %ymm1, %ymm13, %ymm7
vpsubw 4128(%r10), %ymm7, %ymm7
vmovdqa %ymm7, 3936(%r10)
vpaddw %ymm8, %ymm1, %ymm1
vpsubw 3552(%r10), %ymm1, %ymm1
vmovdqa %ymm1, 3744(%r10)
vmovdqa 3776(%r10), %ymm2
vpsubw 3968(%r10), %ymm2, %ymm2
vpsubw %ymm2, %ymm14, %ymm8
vpsubw 4160(%r10), %ymm8, %ymm8
vmovdqa %ymm8, 3968(%r10)
vpaddw %ymm9, %ymm2, %ymm2
vpsubw 3584(%r10), %ymm2, %ymm2
vmovdqa %ymm2, 3776(%r10)
vmovdqa 3808(%r10), %ymm3
vpsubw 4000(%r10), %ymm3, %ymm3
vpsubw %ymm3, %ymm15, %ymm9
vmovdqa %ymm9, 4000(%r10)
vpaddw %ymm10, %ymm3, %ymm3
vpsubw 3616(%r10), %ymm3, %ymm3
vmovdqa %ymm3, 3808(%r10)
vmovdqa 3840(%r10), %ymm4
vpsubw 4032(%r10), %ymm4, %ymm4
vpaddw %ymm11, %ymm4, %ymm4
vpsubw 3648(%r10), %ymm4, %ymm4
vmovdqa %ymm4, 3840(%r10)
vmovdqa 0(%r9), %ymm0
vmovdqa 1408(%r9), %ymm6
vpaddw 352(%r9), %ymm0, %ymm0
vpaddw 1760(%r9), %ymm6, %ymm6
vmovdqa 32(%r9), %ymm1
vmovdqa 1440(%r9), %ymm7
vpaddw 384(%r9), %ymm1, %ymm1
vpaddw 1792(%r9), %ymm7, %ymm7
vmovdqa 64(%r9), %ymm2
vmovdqa 1472(%r9), %ymm8
vpaddw 416(%r9), %ymm2, %ymm2
vpaddw 1824(%r9), %ymm8, %ymm8
vmovdqa 96(%r9), %ymm3
vmovdqa 1504(%r9), %ymm9
vpaddw 448(%r9), %ymm3, %ymm3
vpaddw 1856(%r9), %ymm9, %ymm9
vmovdqa 128(%r9), %ymm4
vmovdqa 1536(%r9), %ymm10
vpaddw 480(%r9), %ymm4, %ymm4
vpaddw 1888(%r9), %ymm10, %ymm10
vmovdqa 160(%r9), %ymm5
vmovdqa 1568(%r9), %ymm11
vpaddw 512(%r9), %ymm5, %ymm5
vpaddw 1920(%r9), %ymm11, %ymm11
vpmullw %ymm0, %ymm6, %ymm12
vmovdqa %ymm12, 5888(%r8)
vpmullw %ymm0, %ymm7, %ymm13
vpmullw %ymm1, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 5920(%r8)
vpmullw %ymm0, %ymm8, %ymm12
vpmullw %ymm1, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 5952(%r8)
vpmullw %ymm0, %ymm9, %ymm13
vpmullw %ymm1, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 5984(%r8)
vpmullw %ymm0, %ymm10, %ymm12
vpmullw %ymm1, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 6016(%r8)
vpmullw %ymm0, %ymm11, %ymm13
vpmullw %ymm1, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm5, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 6048(%r8)
vpmullw %ymm1, %ymm11, %ymm12
vpmullw %ymm2, %ymm10, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm5, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 6080(%r8)
vpmullw %ymm2, %ymm11, %ymm13
vpmullw %ymm3, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm5, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 6112(%r8)
vpmullw %ymm3, %ymm11, %ymm12
vpmullw %ymm4, %ymm10, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm5, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 6144(%r8)
vpmullw %ymm4, %ymm11, %ymm13
vpmullw %ymm5, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 6176(%r8)
vpmullw %ymm5, %ymm11, %ymm12
vmovdqa %ymm12, 6208(%r8)
vmovdqa 192(%r9), %ymm0
vmovdqa 1600(%r9), %ymm6
vpaddw 544(%r9), %ymm0, %ymm0
vpaddw 1952(%r9), %ymm6, %ymm6
vmovdqa 224(%r9), %ymm1
vmovdqa 1632(%r9), %ymm7
vpaddw 576(%r9), %ymm1, %ymm1
vpaddw 1984(%r9), %ymm7, %ymm7
vmovdqa 256(%r9), %ymm2
vmovdqa 1664(%r9), %ymm8
vpaddw 608(%r9), %ymm2, %ymm2
vpaddw 2016(%r9), %ymm8, %ymm8
vmovdqa 288(%r9), %ymm3
vmovdqa 1696(%r9), %ymm9
vpaddw 640(%r9), %ymm3, %ymm3
vpaddw 2048(%r9), %ymm9, %ymm9
vmovdqa 320(%r9), %ymm4
vmovdqa 1728(%r9), %ymm10
vpaddw 672(%r9), %ymm4, %ymm4
vpaddw 2080(%r9), %ymm10, %ymm10
vpmullw %ymm0, %ymm6, %ymm12
vmovdqa %ymm12, 6272(%r8)
vpmullw %ymm0, %ymm7, %ymm13
vpmullw %ymm1, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 6304(%r8)
vpmullw %ymm0, %ymm8, %ymm12
vpmullw %ymm1, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 6336(%r8)
vpmullw %ymm0, %ymm9, %ymm13
vpmullw %ymm1, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 6368(%r8)
vpmullw %ymm0, %ymm10, %ymm12
vpmullw %ymm1, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 6400(%r8)
vpmullw %ymm1, %ymm10, %ymm13
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 6432(%r8)
vpmullw %ymm2, %ymm10, %ymm12
vpmullw %ymm3, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 6464(%r8)
vpmullw %ymm3, %ymm10, %ymm13
vpmullw %ymm4, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 6496(%r8)
vpmullw %ymm4, %ymm10, %ymm12
vmovdqa %ymm12, 6528(%r8)
vpaddw 0(%r9), %ymm0, %ymm0
vpaddw 1408(%r9), %ymm6, %ymm6
vpaddw 352(%r9), %ymm0, %ymm0
vpaddw 1760(%r9), %ymm6, %ymm6
vpaddw 32(%r9), %ymm1, %ymm1
vpaddw 1440(%r9), %ymm7, %ymm7
vpaddw 384(%r9), %ymm1, %ymm1
vpaddw 1792(%r9), %ymm7, %ymm7
vpaddw 64(%r9), %ymm2, %ymm2
vpaddw 1472(%r9), %ymm8, %ymm8
vpaddw 416(%r9), %ymm2, %ymm2
vpaddw 1824(%r9), %ymm8, %ymm8
vpaddw 96(%r9), %ymm3, %ymm3
vpaddw 1504(%r9), %ymm9, %ymm9
vpaddw 448(%r9), %ymm3, %ymm3
vpaddw 1856(%r9), %ymm9, %ymm9
vpaddw 128(%r9), %ymm4, %ymm4
vpaddw 1536(%r9), %ymm10, %ymm10
vpaddw 480(%r9), %ymm4, %ymm4
vpaddw 1888(%r9), %ymm10, %ymm10
vpmullw %ymm0, %ymm11, %ymm12
vpmullw %ymm1, %ymm10, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm5, %ymm6, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpsubw 6048(%r8), %ymm12, %ymm12
vpsubw 6432(%r8), %ymm12, %ymm12
vmovdqa %ymm12, 6240(%r8)
vpmullw %ymm5, %ymm7, %ymm12
vpmullw %ymm5, %ymm8, %ymm13
vpmullw %ymm5, %ymm9, %ymm14
vpmullw %ymm5, %ymm10, %ymm15
vpmullw %ymm1, %ymm11, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm2, %ymm10, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm3, %ymm9, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm4, %ymm8, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm2, %ymm11, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm3, %ymm10, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm4, %ymm9, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm3, %ymm11, %ymm5
vpaddw %ymm5, %ymm14, %ymm14
vpmullw %ymm4, %ymm10, %ymm5
vpaddw %ymm5, %ymm14, %ymm14
vpmullw %ymm4, %ymm11, %ymm5
vpaddw %ymm5, %ymm15, %ymm15
vpmullw %ymm0, %ymm10, %ymm11
vpmullw %ymm1, %ymm9, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm2, %ymm8, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm3, %ymm7, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm4, %ymm6, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm0, %ymm9, %ymm10
vpmullw %ymm1, %ymm8, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm2, %ymm7, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm3, %ymm6, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm0, %ymm8, %ymm9
vpmullw %ymm1, %ymm7, %ymm5
vpaddw %ymm5, %ymm9, %ymm9
vpmullw %ymm2, %ymm6, %ymm5
vpaddw %ymm5, %ymm9, %ymm9
vpmullw %ymm0, %ymm7, %ymm8
vpmullw %ymm1, %ymm6, %ymm5
vpaddw %ymm5, %ymm8, %ymm8
vpmullw %ymm0, %ymm6, %ymm7
vmovdqa 6080(%r8), %ymm0
vpsubw 6272(%r8), %ymm0, %ymm0
vpsubw %ymm0, %ymm12, %ymm6
vpsubw 6464(%r8), %ymm6, %ymm6
vmovdqa %ymm6, 6272(%r8)
vpaddw %ymm7, %ymm0, %ymm0
vpsubw 5888(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 6080(%r8)
vmovdqa 6112(%r8), %ymm1
vpsubw 6304(%r8), %ymm1, %ymm1
vpsubw %ymm1, %ymm13, %ymm7
vpsubw 6496(%r8), %ymm7, %ymm7
vmovdqa %ymm7, 6304(%r8)
vpaddw %ymm8, %ymm1, %ymm1
vpsubw 5920(%r8), %ymm1, %ymm1
vmovdqa %ymm1, 6112(%r8)
vmovdqa 6144(%r8), %ymm2
vpsubw 6336(%r8), %ymm2, %ymm2
vpsubw %ymm2, %ymm14, %ymm8
vpsubw 6528(%r8), %ymm8, %ymm8
vmovdqa %ymm8, 6336(%r8)
vpaddw %ymm9, %ymm2, %ymm2
vpsubw 5952(%r8), %ymm2, %ymm2
vmovdqa %ymm2, 6144(%r8)
vmovdqa 6176(%r8), %ymm3
vpsubw 6368(%r8), %ymm3, %ymm3
vpsubw %ymm3, %ymm15, %ymm9
vmovdqa %ymm9, 6368(%r8)
vpaddw %ymm10, %ymm3, %ymm3
vpsubw 5984(%r8), %ymm3, %ymm3
vmovdqa %ymm3, 6176(%r8)
vmovdqa 6208(%r8), %ymm4
vpsubw 6400(%r8), %ymm4, %ymm4
vpaddw %ymm11, %ymm4, %ymm4
vpsubw 6016(%r8), %ymm4, %ymm4
vmovdqa %ymm4, 6208(%r8)
vmovdqa 6208(%r8), %ymm0
vpsubw 3136(%r10), %ymm0, %ymm0
vpsubw 3840(%r10), %ymm0, %ymm0
vmovdqa %ymm0, 3488(%r10)
vmovdqa 3168(%r10), %ymm0
vpsubw 3520(%r10), %ymm0, %ymm0
vmovdqa 6240(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 3872(%r10), %ymm1, %ymm1
vpsubw 2816(%r10), %ymm0, %ymm0
vpaddw 5888(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3168(%r10)
vmovdqa %ymm1, 3520(%r10)
vmovdqa 3200(%r10), %ymm0
vpsubw 3552(%r10), %ymm0, %ymm0
vmovdqa 6272(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 3904(%r10), %ymm1, %ymm1
vpsubw 2848(%r10), %ymm0, %ymm0
vpaddw 5920(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3200(%r10)
vmovdqa %ymm1, 3552(%r10)
vmovdqa 3232(%r10), %ymm0
vpsubw 3584(%r10), %ymm0, %ymm0
vmovdqa 6304(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 3936(%r10), %ymm1, %ymm1
vpsubw 2880(%r10), %ymm0, %ymm0
vpaddw 5952(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3232(%r10)
vmovdqa %ymm1, 3584(%r10)
vmovdqa 3264(%r10), %ymm0
vpsubw 3616(%r10), %ymm0, %ymm0
vmovdqa 6336(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 3968(%r10), %ymm1, %ymm1
vpsubw 2912(%r10), %ymm0, %ymm0
vpaddw 5984(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3264(%r10)
vmovdqa %ymm1, 3616(%r10)
vmovdqa 3296(%r10), %ymm0
vpsubw 3648(%r10), %ymm0, %ymm0
vmovdqa 6368(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 4000(%r10), %ymm1, %ymm1
vpsubw 2944(%r10), %ymm0, %ymm0
vpaddw 6016(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3296(%r10)
vmovdqa %ymm1, 3648(%r10)
vmovdqa 3328(%r10), %ymm0
vpsubw 3680(%r10), %ymm0, %ymm0
vmovdqa 6400(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 4032(%r10), %ymm1, %ymm1
vpsubw 2976(%r10), %ymm0, %ymm0
vpaddw 6048(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3328(%r10)
vmovdqa %ymm1, 3680(%r10)
vmovdqa 3360(%r10), %ymm0
vpsubw 3712(%r10), %ymm0, %ymm0
vmovdqa 6432(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 4064(%r10), %ymm1, %ymm1
vpsubw 3008(%r10), %ymm0, %ymm0
vpaddw 6080(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3360(%r10)
vmovdqa %ymm1, 3712(%r10)
vmovdqa 3392(%r10), %ymm0
vpsubw 3744(%r10), %ymm0, %ymm0
vmovdqa 6464(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 4096(%r10), %ymm1, %ymm1
vpsubw 3040(%r10), %ymm0, %ymm0
vpaddw 6112(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3392(%r10)
vmovdqa %ymm1, 3744(%r10)
vmovdqa 3424(%r10), %ymm0
vpsubw 3776(%r10), %ymm0, %ymm0
vmovdqa 6496(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 4128(%r10), %ymm1, %ymm1
vpsubw 3072(%r10), %ymm0, %ymm0
vpaddw 6144(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3424(%r10)
vmovdqa %ymm1, 3776(%r10)
vmovdqa 3456(%r10), %ymm0
vpsubw 3808(%r10), %ymm0, %ymm0
vmovdqa 6528(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 4160(%r10), %ymm1, %ymm1
vpsubw 3104(%r10), %ymm0, %ymm0
vpaddw 6176(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3456(%r10)
vmovdqa %ymm1, 3808(%r10)
neg %ecx
jns done_4eced63f144beffcb0247f9c6f67d165
add $704, %r9
add $1408, %r10
jmp innerloop_4eced63f144beffcb0247f9c6f67d165
done_4eced63f144beffcb0247f9c6f67d165:
sub $704, %r9
sub $1408, %r10
vmovdqa 0(%r9), %ymm0
vpaddw 704(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 6592(%r8)
vmovdqa 1408(%r9), %ymm0
vpaddw 2112(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7296(%r8)
vmovdqa 32(%r9), %ymm0
vpaddw 736(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 6624(%r8)
vmovdqa 1440(%r9), %ymm0
vpaddw 2144(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7328(%r8)
vmovdqa 64(%r9), %ymm0
vpaddw 768(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 6656(%r8)
vmovdqa 1472(%r9), %ymm0
vpaddw 2176(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7360(%r8)
vmovdqa 96(%r9), %ymm0
vpaddw 800(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 6688(%r8)
vmovdqa 1504(%r9), %ymm0
vpaddw 2208(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7392(%r8)
vmovdqa 128(%r9), %ymm0
vpaddw 832(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 6720(%r8)
vmovdqa 1536(%r9), %ymm0
vpaddw 2240(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7424(%r8)
vmovdqa 160(%r9), %ymm0
vpaddw 864(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 6752(%r8)
vmovdqa 1568(%r9), %ymm0
vpaddw 2272(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7456(%r8)
vmovdqa 192(%r9), %ymm0
vpaddw 896(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 6784(%r8)
vmovdqa 1600(%r9), %ymm0
vpaddw 2304(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7488(%r8)
vmovdqa 224(%r9), %ymm0
vpaddw 928(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 6816(%r8)
vmovdqa 1632(%r9), %ymm0
vpaddw 2336(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7520(%r8)
vmovdqa 256(%r9), %ymm0
vpaddw 960(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 6848(%r8)
vmovdqa 1664(%r9), %ymm0
vpaddw 2368(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7552(%r8)
vmovdqa 288(%r9), %ymm0
vpaddw 992(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 6880(%r8)
vmovdqa 1696(%r9), %ymm0
vpaddw 2400(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7584(%r8)
vmovdqa 320(%r9), %ymm0
vpaddw 1024(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 6912(%r8)
vmovdqa 1728(%r9), %ymm0
vpaddw 2432(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7616(%r8)
vmovdqa 352(%r9), %ymm0
vpaddw 1056(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 6944(%r8)
vmovdqa 1760(%r9), %ymm0
vpaddw 2464(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7648(%r8)
vmovdqa 384(%r9), %ymm0
vpaddw 1088(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 6976(%r8)
vmovdqa 1792(%r9), %ymm0
vpaddw 2496(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7680(%r8)
vmovdqa 416(%r9), %ymm0
vpaddw 1120(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7008(%r8)
vmovdqa 1824(%r9), %ymm0
vpaddw 2528(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7712(%r8)
vmovdqa 448(%r9), %ymm0
vpaddw 1152(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7040(%r8)
vmovdqa 1856(%r9), %ymm0
vpaddw 2560(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7744(%r8)
vmovdqa 480(%r9), %ymm0
vpaddw 1184(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7072(%r8)
vmovdqa 1888(%r9), %ymm0
vpaddw 2592(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7776(%r8)
vmovdqa 512(%r9), %ymm0
vpaddw 1216(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7104(%r8)
vmovdqa 1920(%r9), %ymm0
vpaddw 2624(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7808(%r8)
vmovdqa 544(%r9), %ymm0
vpaddw 1248(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7136(%r8)
vmovdqa 1952(%r9), %ymm0
vpaddw 2656(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7840(%r8)
vmovdqa 576(%r9), %ymm0
vpaddw 1280(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7168(%r8)
vmovdqa 1984(%r9), %ymm0
vpaddw 2688(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7872(%r8)
vmovdqa 608(%r9), %ymm0
vpaddw 1312(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7200(%r8)
vmovdqa 2016(%r9), %ymm0
vpaddw 2720(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7904(%r8)
vmovdqa 640(%r9), %ymm0
vpaddw 1344(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7232(%r8)
vmovdqa 2048(%r9), %ymm0
vpaddw 2752(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7936(%r8)
vmovdqa 672(%r9), %ymm0
vpaddw 1376(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7264(%r8)
vmovdqa 2080(%r9), %ymm0
vpaddw 2784(%r9), %ymm0, %ymm0
vmovdqa %ymm0, 7968(%r8)
vmovdqa 6592(%r8), %ymm0
vmovdqa 7296(%r8), %ymm6
vmovdqa 6624(%r8), %ymm1
vmovdqa 7328(%r8), %ymm7
vmovdqa 6656(%r8), %ymm2
vmovdqa 7360(%r8), %ymm8
vmovdqa 6688(%r8), %ymm3
vmovdqa 7392(%r8), %ymm9
vmovdqa 6720(%r8), %ymm4
vmovdqa 7424(%r8), %ymm10
vmovdqa 6752(%r8), %ymm5
vmovdqa 7456(%r8), %ymm11
vpmullw %ymm0, %ymm6, %ymm12
vmovdqa %ymm12, 8000(%r8)
vpmullw %ymm0, %ymm7, %ymm13
vpmullw %ymm1, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 8032(%r8)
vpmullw %ymm0, %ymm8, %ymm12
vpmullw %ymm1, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 8064(%r8)
vpmullw %ymm0, %ymm9, %ymm13
vpmullw %ymm1, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 8096(%r8)
vpmullw %ymm0, %ymm10, %ymm12
vpmullw %ymm1, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 8128(%r8)
vpmullw %ymm0, %ymm11, %ymm13
vpmullw %ymm1, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm5, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 8160(%r8)
vpmullw %ymm1, %ymm11, %ymm12
vpmullw %ymm2, %ymm10, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm5, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 8192(%r8)
vpmullw %ymm2, %ymm11, %ymm13
vpmullw %ymm3, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm5, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 8224(%r8)
vpmullw %ymm3, %ymm11, %ymm12
vpmullw %ymm4, %ymm10, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm5, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 8256(%r8)
vpmullw %ymm4, %ymm11, %ymm13
vpmullw %ymm5, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 8288(%r8)
vpmullw %ymm5, %ymm11, %ymm12
vmovdqa %ymm12, 8320(%r8)
vmovdqa 6784(%r8), %ymm0
vmovdqa 7488(%r8), %ymm6
vmovdqa 6816(%r8), %ymm1
vmovdqa 7520(%r8), %ymm7
vmovdqa 6848(%r8), %ymm2
vmovdqa 7552(%r8), %ymm8
vmovdqa 6880(%r8), %ymm3
vmovdqa 7584(%r8), %ymm9
vmovdqa 6912(%r8), %ymm4
vmovdqa 7616(%r8), %ymm10
vpmullw %ymm0, %ymm6, %ymm12
vmovdqa %ymm12, 8384(%r8)
vpmullw %ymm0, %ymm7, %ymm13
vpmullw %ymm1, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 8416(%r8)
vpmullw %ymm0, %ymm8, %ymm12
vpmullw %ymm1, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 8448(%r8)
vpmullw %ymm0, %ymm9, %ymm13
vpmullw %ymm1, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 8480(%r8)
vpmullw %ymm0, %ymm10, %ymm12
vpmullw %ymm1, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 8512(%r8)
vpmullw %ymm1, %ymm10, %ymm13
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 8544(%r8)
vpmullw %ymm2, %ymm10, %ymm12
vpmullw %ymm3, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 8576(%r8)
vpmullw %ymm3, %ymm10, %ymm13
vpmullw %ymm4, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 8608(%r8)
vpmullw %ymm4, %ymm10, %ymm12
vmovdqa %ymm12, 8640(%r8)
vpaddw 6592(%r8), %ymm0, %ymm0
vpaddw 7296(%r8), %ymm6, %ymm6
vpaddw 6624(%r8), %ymm1, %ymm1
vpaddw 7328(%r8), %ymm7, %ymm7
vpaddw 6656(%r8), %ymm2, %ymm2
vpaddw 7360(%r8), %ymm8, %ymm8
vpaddw 6688(%r8), %ymm3, %ymm3
vpaddw 7392(%r8), %ymm9, %ymm9
vpaddw 6720(%r8), %ymm4, %ymm4
vpaddw 7424(%r8), %ymm10, %ymm10
vpmullw %ymm0, %ymm11, %ymm12
vpmullw %ymm1, %ymm10, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm5, %ymm6, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpsubw 8160(%r8), %ymm12, %ymm12
vpsubw 8544(%r8), %ymm12, %ymm12
vmovdqa %ymm12, 8352(%r8)
vpmullw %ymm5, %ymm7, %ymm12
vpmullw %ymm5, %ymm8, %ymm13
vpmullw %ymm5, %ymm9, %ymm14
vpmullw %ymm5, %ymm10, %ymm15
vpmullw %ymm1, %ymm11, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm2, %ymm10, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm3, %ymm9, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm4, %ymm8, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm2, %ymm11, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm3, %ymm10, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm4, %ymm9, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm3, %ymm11, %ymm5
vpaddw %ymm5, %ymm14, %ymm14
vpmullw %ymm4, %ymm10, %ymm5
vpaddw %ymm5, %ymm14, %ymm14
vpmullw %ymm4, %ymm11, %ymm5
vpaddw %ymm5, %ymm15, %ymm15
vpmullw %ymm0, %ymm10, %ymm11
vpmullw %ymm1, %ymm9, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm2, %ymm8, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm3, %ymm7, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm4, %ymm6, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm0, %ymm9, %ymm10
vpmullw %ymm1, %ymm8, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm2, %ymm7, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm3, %ymm6, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm0, %ymm8, %ymm9
vpmullw %ymm1, %ymm7, %ymm5
vpaddw %ymm5, %ymm9, %ymm9
vpmullw %ymm2, %ymm6, %ymm5
vpaddw %ymm5, %ymm9, %ymm9
vpmullw %ymm0, %ymm7, %ymm8
vpmullw %ymm1, %ymm6, %ymm5
vpaddw %ymm5, %ymm8, %ymm8
vpmullw %ymm0, %ymm6, %ymm7
vmovdqa 8192(%r8), %ymm0
vpsubw 8384(%r8), %ymm0, %ymm0
vpsubw %ymm0, %ymm12, %ymm6
vpsubw 8576(%r8), %ymm6, %ymm6
vmovdqa %ymm6, 8384(%r8)
vpaddw %ymm7, %ymm0, %ymm0
vpsubw 8000(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 8192(%r8)
vmovdqa 8224(%r8), %ymm1
vpsubw 8416(%r8), %ymm1, %ymm1
vpsubw %ymm1, %ymm13, %ymm7
vpsubw 8608(%r8), %ymm7, %ymm7
vmovdqa %ymm7, 8416(%r8)
vpaddw %ymm8, %ymm1, %ymm1
vpsubw 8032(%r8), %ymm1, %ymm1
vmovdqa %ymm1, 8224(%r8)
vmovdqa 8256(%r8), %ymm2
vpsubw 8448(%r8), %ymm2, %ymm2
vpsubw %ymm2, %ymm14, %ymm8
vpsubw 8640(%r8), %ymm8, %ymm8
vmovdqa %ymm8, 8448(%r8)
vpaddw %ymm9, %ymm2, %ymm2
vpsubw 8064(%r8), %ymm2, %ymm2
vmovdqa %ymm2, 8256(%r8)
vmovdqa 8288(%r8), %ymm3
vpsubw 8480(%r8), %ymm3, %ymm3
vpsubw %ymm3, %ymm15, %ymm9
vmovdqa %ymm9, 8480(%r8)
vpaddw %ymm10, %ymm3, %ymm3
vpsubw 8096(%r8), %ymm3, %ymm3
vmovdqa %ymm3, 8288(%r8)
vmovdqa 8320(%r8), %ymm4
vpsubw 8512(%r8), %ymm4, %ymm4
vpaddw %ymm11, %ymm4, %ymm4
vpsubw 8128(%r8), %ymm4, %ymm4
vmovdqa %ymm4, 8320(%r8)
vmovdqa 6944(%r8), %ymm0
vmovdqa 7648(%r8), %ymm6
vmovdqa 6976(%r8), %ymm1
vmovdqa 7680(%r8), %ymm7
vmovdqa 7008(%r8), %ymm2
vmovdqa 7712(%r8), %ymm8
vmovdqa 7040(%r8), %ymm3
vmovdqa 7744(%r8), %ymm9
vmovdqa 7072(%r8), %ymm4
vmovdqa 7776(%r8), %ymm10
vmovdqa 7104(%r8), %ymm5
vmovdqa 7808(%r8), %ymm11
vpmullw %ymm0, %ymm6, %ymm12
vmovdqa %ymm12, 8704(%r8)
vpmullw %ymm0, %ymm7, %ymm13
vpmullw %ymm1, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 8736(%r8)
vpmullw %ymm0, %ymm8, %ymm12
vpmullw %ymm1, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 8768(%r8)
vpmullw %ymm0, %ymm9, %ymm13
vpmullw %ymm1, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 8800(%r8)
vpmullw %ymm0, %ymm10, %ymm12
vpmullw %ymm1, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 8832(%r8)
vpmullw %ymm0, %ymm11, %ymm13
vpmullw %ymm1, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm5, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 8864(%r8)
vpmullw %ymm1, %ymm11, %ymm12
vpmullw %ymm2, %ymm10, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm5, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 8896(%r8)
vpmullw %ymm2, %ymm11, %ymm13
vpmullw %ymm3, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm5, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 8928(%r8)
vpmullw %ymm3, %ymm11, %ymm12
vpmullw %ymm4, %ymm10, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm5, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 8960(%r8)
vpmullw %ymm4, %ymm11, %ymm13
vpmullw %ymm5, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 8992(%r8)
vpmullw %ymm5, %ymm11, %ymm12
vmovdqa %ymm12, 9024(%r8)
vmovdqa 7136(%r8), %ymm0
vmovdqa 7840(%r8), %ymm6
vmovdqa 7168(%r8), %ymm1
vmovdqa 7872(%r8), %ymm7
vmovdqa 7200(%r8), %ymm2
vmovdqa 7904(%r8), %ymm8
vmovdqa 7232(%r8), %ymm3
vmovdqa 7936(%r8), %ymm9
vmovdqa 7264(%r8), %ymm4
vmovdqa 7968(%r8), %ymm10
vpmullw %ymm0, %ymm6, %ymm12
vmovdqa %ymm12, 9088(%r8)
vpmullw %ymm0, %ymm7, %ymm13
vpmullw %ymm1, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 9120(%r8)
vpmullw %ymm0, %ymm8, %ymm12
vpmullw %ymm1, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 9152(%r8)
vpmullw %ymm0, %ymm9, %ymm13
vpmullw %ymm1, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 9184(%r8)
vpmullw %ymm0, %ymm10, %ymm12
vpmullw %ymm1, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 9216(%r8)
vpmullw %ymm1, %ymm10, %ymm13
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 9248(%r8)
vpmullw %ymm2, %ymm10, %ymm12
vpmullw %ymm3, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 9280(%r8)
vpmullw %ymm3, %ymm10, %ymm13
vpmullw %ymm4, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 9312(%r8)
vpmullw %ymm4, %ymm10, %ymm12
vmovdqa %ymm12, 9344(%r8)
vpaddw 6944(%r8), %ymm0, %ymm0
vpaddw 7648(%r8), %ymm6, %ymm6
vpaddw 6976(%r8), %ymm1, %ymm1
vpaddw 7680(%r8), %ymm7, %ymm7
vpaddw 7008(%r8), %ymm2, %ymm2
vpaddw 7712(%r8), %ymm8, %ymm8
vpaddw 7040(%r8), %ymm3, %ymm3
vpaddw 7744(%r8), %ymm9, %ymm9
vpaddw 7072(%r8), %ymm4, %ymm4
vpaddw 7776(%r8), %ymm10, %ymm10
vpmullw %ymm0, %ymm11, %ymm12
vpmullw %ymm1, %ymm10, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm5, %ymm6, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpsubw 8864(%r8), %ymm12, %ymm12
vpsubw 9248(%r8), %ymm12, %ymm12
vmovdqa %ymm12, 9056(%r8)
vpmullw %ymm5, %ymm7, %ymm12
vpmullw %ymm5, %ymm8, %ymm13
vpmullw %ymm5, %ymm9, %ymm14
vpmullw %ymm5, %ymm10, %ymm15
vpmullw %ymm1, %ymm11, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm2, %ymm10, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm3, %ymm9, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm4, %ymm8, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm2, %ymm11, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm3, %ymm10, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm4, %ymm9, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm3, %ymm11, %ymm5
vpaddw %ymm5, %ymm14, %ymm14
vpmullw %ymm4, %ymm10, %ymm5
vpaddw %ymm5, %ymm14, %ymm14
vpmullw %ymm4, %ymm11, %ymm5
vpaddw %ymm5, %ymm15, %ymm15
vpmullw %ymm0, %ymm10, %ymm11
vpmullw %ymm1, %ymm9, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm2, %ymm8, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm3, %ymm7, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm4, %ymm6, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm0, %ymm9, %ymm10
vpmullw %ymm1, %ymm8, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm2, %ymm7, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm3, %ymm6, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm0, %ymm8, %ymm9
vpmullw %ymm1, %ymm7, %ymm5
vpaddw %ymm5, %ymm9, %ymm9
vpmullw %ymm2, %ymm6, %ymm5
vpaddw %ymm5, %ymm9, %ymm9
vpmullw %ymm0, %ymm7, %ymm8
vpmullw %ymm1, %ymm6, %ymm5
vpaddw %ymm5, %ymm8, %ymm8
vpmullw %ymm0, %ymm6, %ymm7
vmovdqa 8896(%r8), %ymm0
vpsubw 9088(%r8), %ymm0, %ymm0
vpsubw %ymm0, %ymm12, %ymm6
vpsubw 9280(%r8), %ymm6, %ymm6
vmovdqa %ymm6, 9088(%r8)
vpaddw %ymm7, %ymm0, %ymm0
vpsubw 8704(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 8896(%r8)
vmovdqa 8928(%r8), %ymm1
vpsubw 9120(%r8), %ymm1, %ymm1
vpsubw %ymm1, %ymm13, %ymm7
vpsubw 9312(%r8), %ymm7, %ymm7
vmovdqa %ymm7, 9120(%r8)
vpaddw %ymm8, %ymm1, %ymm1
vpsubw 8736(%r8), %ymm1, %ymm1
vmovdqa %ymm1, 8928(%r8)
vmovdqa 8960(%r8), %ymm2
vpsubw 9152(%r8), %ymm2, %ymm2
vpsubw %ymm2, %ymm14, %ymm8
vpsubw 9344(%r8), %ymm8, %ymm8
vmovdqa %ymm8, 9152(%r8)
vpaddw %ymm9, %ymm2, %ymm2
vpsubw 8768(%r8), %ymm2, %ymm2
vmovdqa %ymm2, 8960(%r8)
vmovdqa 8992(%r8), %ymm3
vpsubw 9184(%r8), %ymm3, %ymm3
vpsubw %ymm3, %ymm15, %ymm9
vmovdqa %ymm9, 9184(%r8)
vpaddw %ymm10, %ymm3, %ymm3
vpsubw 8800(%r8), %ymm3, %ymm3
vmovdqa %ymm3, 8992(%r8)
vmovdqa 9024(%r8), %ymm4
vpsubw 9216(%r8), %ymm4, %ymm4
vpaddw %ymm11, %ymm4, %ymm4
vpsubw 8832(%r8), %ymm4, %ymm4
vmovdqa %ymm4, 9024(%r8)
vmovdqa 6592(%r8), %ymm0
vmovdqa 7296(%r8), %ymm6
vpaddw 6944(%r8), %ymm0, %ymm0
vpaddw 7648(%r8), %ymm6, %ymm6
vmovdqa 6624(%r8), %ymm1
vmovdqa 7328(%r8), %ymm7
vpaddw 6976(%r8), %ymm1, %ymm1
vpaddw 7680(%r8), %ymm7, %ymm7
vmovdqa 6656(%r8), %ymm2
vmovdqa 7360(%r8), %ymm8
vpaddw 7008(%r8), %ymm2, %ymm2
vpaddw 7712(%r8), %ymm8, %ymm8
vmovdqa 6688(%r8), %ymm3
vmovdqa 7392(%r8), %ymm9
vpaddw 7040(%r8), %ymm3, %ymm3
vpaddw 7744(%r8), %ymm9, %ymm9
vmovdqa 6720(%r8), %ymm4
vmovdqa 7424(%r8), %ymm10
vpaddw 7072(%r8), %ymm4, %ymm4
vpaddw 7776(%r8), %ymm10, %ymm10
vmovdqa 6752(%r8), %ymm5
vmovdqa 7456(%r8), %ymm11
vpaddw 7104(%r8), %ymm5, %ymm5
vpaddw 7808(%r8), %ymm11, %ymm11
vpmullw %ymm0, %ymm6, %ymm12
vmovdqa %ymm12, 5888(%r8)
vpmullw %ymm0, %ymm7, %ymm13
vpmullw %ymm1, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 5920(%r8)
vpmullw %ymm0, %ymm8, %ymm12
vpmullw %ymm1, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 5952(%r8)
vpmullw %ymm0, %ymm9, %ymm13
vpmullw %ymm1, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 5984(%r8)
vpmullw %ymm0, %ymm10, %ymm12
vpmullw %ymm1, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 6016(%r8)
vpmullw %ymm0, %ymm11, %ymm13
vpmullw %ymm1, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm5, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 6048(%r8)
vpmullw %ymm1, %ymm11, %ymm12
vpmullw %ymm2, %ymm10, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm5, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 6080(%r8)
vpmullw %ymm2, %ymm11, %ymm13
vpmullw %ymm3, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm5, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 6112(%r8)
vpmullw %ymm3, %ymm11, %ymm12
vpmullw %ymm4, %ymm10, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm5, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 6144(%r8)
vpmullw %ymm4, %ymm11, %ymm13
vpmullw %ymm5, %ymm10, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 6176(%r8)
vpmullw %ymm5, %ymm11, %ymm12
vmovdqa %ymm12, 6208(%r8)
vmovdqa 6784(%r8), %ymm0
vmovdqa 7488(%r8), %ymm6
vpaddw 7136(%r8), %ymm0, %ymm0
vpaddw 7840(%r8), %ymm6, %ymm6
vmovdqa 6816(%r8), %ymm1
vmovdqa 7520(%r8), %ymm7
vpaddw 7168(%r8), %ymm1, %ymm1
vpaddw 7872(%r8), %ymm7, %ymm7
vmovdqa 6848(%r8), %ymm2
vmovdqa 7552(%r8), %ymm8
vpaddw 7200(%r8), %ymm2, %ymm2
vpaddw 7904(%r8), %ymm8, %ymm8
vmovdqa 6880(%r8), %ymm3
vmovdqa 7584(%r8), %ymm9
vpaddw 7232(%r8), %ymm3, %ymm3
vpaddw 7936(%r8), %ymm9, %ymm9
vmovdqa 6912(%r8), %ymm4
vmovdqa 7616(%r8), %ymm10
vpaddw 7264(%r8), %ymm4, %ymm4
vpaddw 7968(%r8), %ymm10, %ymm10
vpmullw %ymm0, %ymm6, %ymm12
vmovdqa %ymm12, 6272(%r8)
vpmullw %ymm0, %ymm7, %ymm13
vpmullw %ymm1, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 6304(%r8)
vpmullw %ymm0, %ymm8, %ymm12
vpmullw %ymm1, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 6336(%r8)
vpmullw %ymm0, %ymm9, %ymm13
vpmullw %ymm1, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm2, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm6, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 6368(%r8)
vpmullw %ymm0, %ymm10, %ymm12
vpmullw %ymm1, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm2, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm3, %ymm7, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm6, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 6400(%r8)
vpmullw %ymm1, %ymm10, %ymm13
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 6432(%r8)
vpmullw %ymm2, %ymm10, %ymm12
vpmullw %ymm3, %ymm9, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vpmullw %ymm4, %ymm8, %ymm15
vpaddw %ymm12, %ymm15, %ymm12
vmovdqa %ymm12, 6464(%r8)
vpmullw %ymm3, %ymm10, %ymm13
vpmullw %ymm4, %ymm9, %ymm15
vpaddw %ymm13, %ymm15, %ymm13
vmovdqa %ymm13, 6496(%r8)
vpmullw %ymm4, %ymm10, %ymm12
vmovdqa %ymm12, 6528(%r8)
vpaddw 6592(%r8), %ymm0, %ymm0
vpaddw 7296(%r8), %ymm6, %ymm6
vpaddw 6944(%r8), %ymm0, %ymm0
vpaddw 7648(%r8), %ymm6, %ymm6
vpaddw 6624(%r8), %ymm1, %ymm1
vpaddw 7328(%r8), %ymm7, %ymm7
vpaddw 6976(%r8), %ymm1, %ymm1
vpaddw 7680(%r8), %ymm7, %ymm7
vpaddw 6656(%r8), %ymm2, %ymm2
vpaddw 7360(%r8), %ymm8, %ymm8
vpaddw 7008(%r8), %ymm2, %ymm2
vpaddw 7712(%r8), %ymm8, %ymm8
vpaddw 6688(%r8), %ymm3, %ymm3
vpaddw 7392(%r8), %ymm9, %ymm9
vpaddw 7040(%r8), %ymm3, %ymm3
vpaddw 7744(%r8), %ymm9, %ymm9
vpaddw 6720(%r8), %ymm4, %ymm4
vpaddw 7424(%r8), %ymm10, %ymm10
vpaddw 7072(%r8), %ymm4, %ymm4
vpaddw 7776(%r8), %ymm10, %ymm10
vpmullw %ymm0, %ymm11, %ymm12
vpmullw %ymm1, %ymm10, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm2, %ymm9, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm3, %ymm8, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm4, %ymm7, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpmullw %ymm5, %ymm6, %ymm15
vpaddw %ymm15, %ymm12, %ymm12
vpsubw 6048(%r8), %ymm12, %ymm12
vpsubw 6432(%r8), %ymm12, %ymm12
vmovdqa %ymm12, 6240(%r8)
vpmullw %ymm5, %ymm7, %ymm12
vpmullw %ymm5, %ymm8, %ymm13
vpmullw %ymm5, %ymm9, %ymm14
vpmullw %ymm5, %ymm10, %ymm15
vpmullw %ymm1, %ymm11, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm2, %ymm10, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm3, %ymm9, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm4, %ymm8, %ymm5
vpaddw %ymm5, %ymm12, %ymm12
vpmullw %ymm2, %ymm11, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm3, %ymm10, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm4, %ymm9, %ymm5
vpaddw %ymm5, %ymm13, %ymm13
vpmullw %ymm3, %ymm11, %ymm5
vpaddw %ymm5, %ymm14, %ymm14
vpmullw %ymm4, %ymm10, %ymm5
vpaddw %ymm5, %ymm14, %ymm14
vpmullw %ymm4, %ymm11, %ymm5
vpaddw %ymm5, %ymm15, %ymm15
vpmullw %ymm0, %ymm10, %ymm11
vpmullw %ymm1, %ymm9, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm2, %ymm8, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm3, %ymm7, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm4, %ymm6, %ymm5
vpaddw %ymm5, %ymm11, %ymm11
vpmullw %ymm0, %ymm9, %ymm10
vpmullw %ymm1, %ymm8, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm2, %ymm7, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm3, %ymm6, %ymm5
vpaddw %ymm5, %ymm10, %ymm10
vpmullw %ymm0, %ymm8, %ymm9
vpmullw %ymm1, %ymm7, %ymm5
vpaddw %ymm5, %ymm9, %ymm9
vpmullw %ymm2, %ymm6, %ymm5
vpaddw %ymm5, %ymm9, %ymm9
vpmullw %ymm0, %ymm7, %ymm8
vpmullw %ymm1, %ymm6, %ymm5
vpaddw %ymm5, %ymm8, %ymm8
vpmullw %ymm0, %ymm6, %ymm7
vmovdqa 6080(%r8), %ymm0
vpsubw 6272(%r8), %ymm0, %ymm0
vpsubw %ymm0, %ymm12, %ymm6
vpsubw 6464(%r8), %ymm6, %ymm6
vmovdqa %ymm6, 6272(%r8)
vpaddw %ymm7, %ymm0, %ymm0
vpsubw 5888(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 6080(%r8)
vmovdqa 6112(%r8), %ymm1
vpsubw 6304(%r8), %ymm1, %ymm1
vpsubw %ymm1, %ymm13, %ymm7
vpsubw 6496(%r8), %ymm7, %ymm7
vmovdqa %ymm7, 6304(%r8)
vpaddw %ymm8, %ymm1, %ymm1
vpsubw 5920(%r8), %ymm1, %ymm1
vmovdqa %ymm1, 6112(%r8)
vmovdqa 6144(%r8), %ymm2
vpsubw 6336(%r8), %ymm2, %ymm2
vpsubw %ymm2, %ymm14, %ymm8
vpsubw 6528(%r8), %ymm8, %ymm8
vmovdqa %ymm8, 6336(%r8)
vpaddw %ymm9, %ymm2, %ymm2
vpsubw 5952(%r8), %ymm2, %ymm2
vmovdqa %ymm2, 6144(%r8)
vmovdqa 6176(%r8), %ymm3
vpsubw 6368(%r8), %ymm3, %ymm3
vpsubw %ymm3, %ymm15, %ymm9
vmovdqa %ymm9, 6368(%r8)
vpaddw %ymm10, %ymm3, %ymm3
vpsubw 5984(%r8), %ymm3, %ymm3
vmovdqa %ymm3, 6176(%r8)
vmovdqa 6208(%r8), %ymm4
vpsubw 6400(%r8), %ymm4, %ymm4
vpaddw %ymm11, %ymm4, %ymm4
vpsubw 6016(%r8), %ymm4, %ymm4
vmovdqa %ymm4, 6208(%r8)
vmovdqa 8352(%r8), %ymm0
vpsubw 8704(%r8), %ymm0, %ymm0
vmovdqa 6240(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 9056(%r8), %ymm1, %ymm6
vpsubw 8000(%r8), %ymm0, %ymm0
vpaddw 5888(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 8352(%r8)
vmovdqa 8384(%r8), %ymm0
vpsubw 8736(%r8), %ymm0, %ymm0
vmovdqa 6272(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 9088(%r8), %ymm1, %ymm7
vpsubw 8032(%r8), %ymm0, %ymm0
vpaddw 5920(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 8384(%r8)
vmovdqa 8416(%r8), %ymm0
vpsubw 8768(%r8), %ymm0, %ymm0
vmovdqa 6304(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 9120(%r8), %ymm1, %ymm8
vpsubw 8064(%r8), %ymm0, %ymm0
vpaddw 5952(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 8416(%r8)
vmovdqa 8448(%r8), %ymm0
vpsubw 8800(%r8), %ymm0, %ymm0
vmovdqa 6336(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 9152(%r8), %ymm1, %ymm9
vpsubw 8096(%r8), %ymm0, %ymm0
vpaddw 5984(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 8448(%r8)
vmovdqa 8480(%r8), %ymm0
vpsubw 8832(%r8), %ymm0, %ymm0
vmovdqa 6368(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 9184(%r8), %ymm1, %ymm10
vpsubw 8128(%r8), %ymm0, %ymm0
vpaddw 6016(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 8480(%r8)
vmovdqa 8512(%r8), %ymm0
vpsubw 8864(%r8), %ymm0, %ymm0
vmovdqa 6400(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 9216(%r8), %ymm1, %ymm11
vpsubw 8160(%r8), %ymm0, %ymm0
vpaddw 6048(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 8512(%r8)
vmovdqa 8544(%r8), %ymm0
vpsubw 8896(%r8), %ymm0, %ymm0
vmovdqa 6432(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 9248(%r8), %ymm1, %ymm12
vpsubw 8192(%r8), %ymm0, %ymm0
vpaddw 6080(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 8544(%r8)
vmovdqa 8576(%r8), %ymm0
vpsubw 8928(%r8), %ymm0, %ymm0
vmovdqa 6464(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 9280(%r8), %ymm1, %ymm13
vpsubw 8224(%r8), %ymm0, %ymm0
vpaddw 6112(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 8576(%r8)
vmovdqa 8608(%r8), %ymm0
vpsubw 8960(%r8), %ymm0, %ymm0
vmovdqa 6496(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 9312(%r8), %ymm1, %ymm14
vpsubw 8256(%r8), %ymm0, %ymm0
vpaddw 6144(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 8608(%r8)
vmovdqa 8640(%r8), %ymm0
vpsubw 8992(%r8), %ymm0, %ymm0
vmovdqa 6528(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 9344(%r8), %ymm1, %ymm15
vpsubw 8288(%r8), %ymm0, %ymm0
vpaddw 6176(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 8640(%r8)
vmovdqa 6208(%r8), %ymm0
vpsubw 8320(%r8), %ymm0, %ymm0
vpsubw 9024(%r8), %ymm0, %ymm0
vpsubw 3488(%r10), %ymm0, %ymm0
vpsubw 4896(%r10), %ymm0, %ymm0
vmovdqa %ymm0, 4192(%r10)
vmovdqa 3520(%r10), %ymm0
vpsubw 4224(%r10), %ymm0, %ymm0
vpsubw %ymm0, %ymm6, %ymm6
vpsubw 4928(%r10), %ymm6, %ymm6
vpsubw 2816(%r10), %ymm0, %ymm0
vpaddw 8000(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3520(%r10)
vmovdqa %ymm6, 4224(%r10)
vmovdqa 3552(%r10), %ymm0
vpsubw 4256(%r10), %ymm0, %ymm0
vpsubw %ymm0, %ymm7, %ymm7
vpsubw 4960(%r10), %ymm7, %ymm7
vpsubw 2848(%r10), %ymm0, %ymm0
vpaddw 8032(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3552(%r10)
vmovdqa %ymm7, 4256(%r10)
vmovdqa 3584(%r10), %ymm0
vpsubw 4288(%r10), %ymm0, %ymm0
vpsubw %ymm0, %ymm8, %ymm8
vpsubw 4992(%r10), %ymm8, %ymm8
vpsubw 2880(%r10), %ymm0, %ymm0
vpaddw 8064(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3584(%r10)
vmovdqa %ymm8, 4288(%r10)
vmovdqa 3616(%r10), %ymm0
vpsubw 4320(%r10), %ymm0, %ymm0
vpsubw %ymm0, %ymm9, %ymm9
vpsubw 5024(%r10), %ymm9, %ymm9
vpsubw 2912(%r10), %ymm0, %ymm0
vpaddw 8096(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3616(%r10)
vmovdqa %ymm9, 4320(%r10)
vmovdqa 3648(%r10), %ymm0
vpsubw 4352(%r10), %ymm0, %ymm0
vpsubw %ymm0, %ymm10, %ymm10
vpsubw 5056(%r10), %ymm10, %ymm10
vpsubw 2944(%r10), %ymm0, %ymm0
vpaddw 8128(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3648(%r10)
vmovdqa %ymm10, 4352(%r10)
vmovdqa 3680(%r10), %ymm0
vpsubw 4384(%r10), %ymm0, %ymm0
vpsubw %ymm0, %ymm11, %ymm11
vpsubw 5088(%r10), %ymm11, %ymm11
vpsubw 2976(%r10), %ymm0, %ymm0
vpaddw 8160(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3680(%r10)
vmovdqa %ymm11, 4384(%r10)
vmovdqa 3712(%r10), %ymm0
vpsubw 4416(%r10), %ymm0, %ymm0
vpsubw %ymm0, %ymm12, %ymm12
vpsubw 5120(%r10), %ymm12, %ymm12
vpsubw 3008(%r10), %ymm0, %ymm0
vpaddw 8192(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3712(%r10)
vmovdqa %ymm12, 4416(%r10)
vmovdqa 3744(%r10), %ymm0
vpsubw 4448(%r10), %ymm0, %ymm0
vpsubw %ymm0, %ymm13, %ymm13
vpsubw 5152(%r10), %ymm13, %ymm13
vpsubw 3040(%r10), %ymm0, %ymm0
vpaddw 8224(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3744(%r10)
vmovdqa %ymm13, 4448(%r10)
vmovdqa 3776(%r10), %ymm0
vpsubw 4480(%r10), %ymm0, %ymm0
vpsubw %ymm0, %ymm14, %ymm14
vpsubw 5184(%r10), %ymm14, %ymm14
vpsubw 3072(%r10), %ymm0, %ymm0
vpaddw 8256(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3776(%r10)
vmovdqa %ymm14, 4480(%r10)
vmovdqa 3808(%r10), %ymm0
vpsubw 4512(%r10), %ymm0, %ymm0
vpsubw %ymm0, %ymm15, %ymm15
vpsubw 5216(%r10), %ymm15, %ymm15
vpsubw 3104(%r10), %ymm0, %ymm0
vpaddw 8288(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3808(%r10)
vmovdqa %ymm15, 4512(%r10)
vmovdqa 3840(%r10), %ymm0
vpsubw 4544(%r10), %ymm0, %ymm0
vmovdqa 9024(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 5248(%r10), %ymm1, %ymm1
vpsubw 3136(%r10), %ymm0, %ymm0
vpaddw 8320(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3840(%r10)
vmovdqa %ymm1, 4544(%r10)
vmovdqa 3872(%r10), %ymm0
vpsubw 4576(%r10), %ymm0, %ymm0
vmovdqa 9056(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 5280(%r10), %ymm1, %ymm1
vpsubw 3168(%r10), %ymm0, %ymm0
vpaddw 8352(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3872(%r10)
vmovdqa %ymm1, 4576(%r10)
vmovdqa 3904(%r10), %ymm0
vpsubw 4608(%r10), %ymm0, %ymm0
vmovdqa 9088(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 5312(%r10), %ymm1, %ymm1
vpsubw 3200(%r10), %ymm0, %ymm0
vpaddw 8384(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3904(%r10)
vmovdqa %ymm1, 4608(%r10)
vmovdqa 3936(%r10), %ymm0
vpsubw 4640(%r10), %ymm0, %ymm0
vmovdqa 9120(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 5344(%r10), %ymm1, %ymm1
vpsubw 3232(%r10), %ymm0, %ymm0
vpaddw 8416(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3936(%r10)
vmovdqa %ymm1, 4640(%r10)
vmovdqa 3968(%r10), %ymm0
vpsubw 4672(%r10), %ymm0, %ymm0
vmovdqa 9152(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 5376(%r10), %ymm1, %ymm1
vpsubw 3264(%r10), %ymm0, %ymm0
vpaddw 8448(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 3968(%r10)
vmovdqa %ymm1, 4672(%r10)
vmovdqa 4000(%r10), %ymm0
vpsubw 4704(%r10), %ymm0, %ymm0
vmovdqa 9184(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 5408(%r10), %ymm1, %ymm1
vpsubw 3296(%r10), %ymm0, %ymm0
vpaddw 8480(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 4000(%r10)
vmovdqa %ymm1, 4704(%r10)
vmovdqa 4032(%r10), %ymm0
vpsubw 4736(%r10), %ymm0, %ymm0
vmovdqa 9216(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 5440(%r10), %ymm1, %ymm1
vpsubw 3328(%r10), %ymm0, %ymm0
vpaddw 8512(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 4032(%r10)
vmovdqa %ymm1, 4736(%r10)
vmovdqa 4064(%r10), %ymm0
vpsubw 4768(%r10), %ymm0, %ymm0
vmovdqa 9248(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 5472(%r10), %ymm1, %ymm1
vpsubw 3360(%r10), %ymm0, %ymm0
vpaddw 8544(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 4064(%r10)
vmovdqa %ymm1, 4768(%r10)
vmovdqa 4096(%r10), %ymm0
vpsubw 4800(%r10), %ymm0, %ymm0
vmovdqa 9280(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 5504(%r10), %ymm1, %ymm1
vpsubw 3392(%r10), %ymm0, %ymm0
vpaddw 8576(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 4096(%r10)
vmovdqa %ymm1, 4800(%r10)
vmovdqa 4128(%r10), %ymm0
vpsubw 4832(%r10), %ymm0, %ymm0
vmovdqa 9312(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 5536(%r10), %ymm1, %ymm1
vpsubw 3424(%r10), %ymm0, %ymm0
vpaddw 8608(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 4128(%r10)
vmovdqa %ymm1, 4832(%r10)
vmovdqa 4160(%r10), %ymm0
vpsubw 4864(%r10), %ymm0, %ymm0
vmovdqa 9344(%r8), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 5568(%r10), %ymm1, %ymm1
vpsubw 3456(%r10), %ymm0, %ymm0
vpaddw 8640(%r8), %ymm0, %ymm0
vmovdqa %ymm0, 4160(%r10)
vmovdqa %ymm1, 4864(%r10)
vpxor %ymm1, %ymm1, %ymm1
vmovdqa %ymm1, 5600(%r10)
subq $32, %r8
vmovdqa 2816(%r10), %ymm0
vmovdqa 2880(%r10), %ymm1
vmovdqa 2944(%r10), %ymm2
vmovdqa 3008(%r10), %ymm3
vpunpcklwd 2848(%r10), %ymm0, %ymm4
vpunpckhwd 2848(%r10), %ymm0, %ymm5
vpunpcklwd 2912(%r10), %ymm1, %ymm6
vpunpckhwd 2912(%r10), %ymm1, %ymm7
vpunpcklwd 2976(%r10), %ymm2, %ymm8
vpunpckhwd 2976(%r10), %ymm2, %ymm9
vpunpcklwd 3040(%r10), %ymm3, %ymm10
vpunpckhwd 3040(%r10), %ymm3, %ymm11
vpunpckldq %ymm6, %ymm4, %ymm0
vpunpckhdq %ymm6, %ymm4, %ymm1
vpunpckldq %ymm7, %ymm5, %ymm2
vpunpckhdq %ymm7, %ymm5, %ymm3
vpunpckldq %ymm10, %ymm8, %ymm12
vpunpckhdq %ymm10, %ymm8, %ymm13
vpunpckldq %ymm11, %ymm9, %ymm14
vpunpckhdq %ymm11, %ymm9, %ymm15
vpunpcklqdq %ymm12, %ymm0, %ymm4
vpunpckhqdq %ymm12, %ymm0, %ymm5
vpunpcklqdq %ymm13, %ymm1, %ymm6
vpunpckhqdq %ymm13, %ymm1, %ymm7
vpunpcklqdq %ymm14, %ymm2, %ymm8
vpunpckhqdq %ymm14, %ymm2, %ymm9
vpunpcklqdq %ymm15, %ymm3, %ymm10
vpunpckhqdq %ymm15, %ymm3, %ymm11
vmovdqa 3072(%r10), %ymm0
vmovdqa 3136(%r10), %ymm1
vmovdqa 3200(%r10), %ymm2
vmovdqa 3264(%r10), %ymm3
vpunpcklwd 3104(%r10), %ymm0, %ymm12
vpunpckhwd 3104(%r10), %ymm0, %ymm13
vpunpcklwd 3168(%r10), %ymm1, %ymm14
vpunpckhwd 3168(%r10), %ymm1, %ymm15
vpunpcklwd 3232(%r10), %ymm2, %ymm0
vpunpckhwd 3232(%r10), %ymm2, %ymm1
vpunpcklwd 3296(%r10), %ymm3, %ymm2
vpunpckhwd 3296(%r10), %ymm3, %ymm3
vmovdqa %ymm11, 0(%r8)
vpunpckldq %ymm14, %ymm12, %ymm11
vpunpckhdq %ymm14, %ymm12, %ymm12
vpunpckldq %ymm15, %ymm13, %ymm14
vpunpckhdq %ymm15, %ymm13, %ymm15
vpunpckldq %ymm2, %ymm0, %ymm13
vpunpckhdq %ymm2, %ymm0, %ymm0
vpunpckldq %ymm3, %ymm1, %ymm2
vpunpckhdq %ymm3, %ymm1, %ymm1
vpunpcklqdq %ymm13, %ymm11, %ymm3
vpunpckhqdq %ymm13, %ymm11, %ymm13
vpunpcklqdq %ymm0, %ymm12, %ymm11
vpunpckhqdq %ymm0, %ymm12, %ymm0
vpunpcklqdq %ymm2, %ymm14, %ymm12
vpunpckhqdq %ymm2, %ymm14, %ymm2
vpunpcklqdq %ymm1, %ymm15, %ymm14
vpunpckhqdq %ymm1, %ymm15, %ymm1
vinserti128 $1, %xmm3, %ymm4, %ymm15
vmovdqa %ymm15, 0(%r12)
vinserti128 $1, %xmm13, %ymm5, %ymm15
vmovdqa %ymm15, 192(%r12)
vinserti128 $1, %xmm11, %ymm6, %ymm15
vmovdqa %ymm15, 384(%r12)
vinserti128 $1, %xmm0, %ymm7, %ymm15
vmovdqa %ymm15, 576(%r12)
vinserti128 $1, %xmm12, %ymm8, %ymm15
vmovdqa %ymm15, 768(%r12)
vinserti128 $1, %xmm2, %ymm9, %ymm15
vmovdqa %ymm15, 960(%r12)
vinserti128 $1, %xmm14, %ymm10, %ymm15
vmovdqa %ymm15, 1152(%r12)
vpermq $78, %ymm4, %ymm4
vpermq $78, %ymm5, %ymm5
vpermq $78, %ymm6, %ymm6
vpermq $78, %ymm7, %ymm7
vpermq $78, %ymm8, %ymm8
vpermq $78, %ymm9, %ymm9
vpermq $78, %ymm10, %ymm10
vinserti128 $0, %xmm4, %ymm3, %ymm15
vmovdqa %ymm15, 1536(%r12)
vinserti128 $0, %xmm5, %ymm13, %ymm15
vmovdqa %ymm15, 1728(%r12)
vinserti128 $0, %xmm6, %ymm11, %ymm15
vmovdqa %ymm15, 1920(%r12)
vinserti128 $0, %xmm7, %ymm0, %ymm15
vmovdqa %ymm15, 2112(%r12)
vinserti128 $0, %xmm8, %ymm12, %ymm15
vmovdqa %ymm15, 2304(%r12)
vinserti128 $0, %xmm9, %ymm2, %ymm15
vmovdqa %ymm15, 2496(%r12)
vinserti128 $0, %xmm10, %ymm14, %ymm15
vmovdqa %ymm15, 2688(%r12)
vmovdqa 0(%r8), %ymm11
vinserti128 $1, %xmm1, %ymm11, %ymm14
vmovdqa %ymm14, 1344(%r12)
vpermq $78, %ymm11, %ymm11
vinserti128 $0, %xmm11, %ymm1, %ymm1
vmovdqa %ymm1, 2880(%r12)
vmovdqa 3328(%r10), %ymm0
vmovdqa 3392(%r10), %ymm1
vmovdqa 3456(%r10), %ymm2
vmovdqa 3520(%r10), %ymm3
vpunpcklwd 3360(%r10), %ymm0, %ymm4
vpunpckhwd 3360(%r10), %ymm0, %ymm5
vpunpcklwd 3424(%r10), %ymm1, %ymm6
vpunpckhwd 3424(%r10), %ymm1, %ymm7
vpunpcklwd 3488(%r10), %ymm2, %ymm8
vpunpckhwd 3488(%r10), %ymm2, %ymm9
vpunpcklwd 3552(%r10), %ymm3, %ymm10
vpunpckhwd 3552(%r10), %ymm3, %ymm11
vpunpckldq %ymm6, %ymm4, %ymm0
vpunpckhdq %ymm6, %ymm4, %ymm1
vpunpckldq %ymm7, %ymm5, %ymm2
vpunpckhdq %ymm7, %ymm5, %ymm3
vpunpckldq %ymm10, %ymm8, %ymm12
vpunpckhdq %ymm10, %ymm8, %ymm13
vpunpckldq %ymm11, %ymm9, %ymm14
vpunpckhdq %ymm11, %ymm9, %ymm15
vpunpcklqdq %ymm12, %ymm0, %ymm4
vpunpckhqdq %ymm12, %ymm0, %ymm5
vpunpcklqdq %ymm13, %ymm1, %ymm6
vpunpckhqdq %ymm13, %ymm1, %ymm7
vpunpcklqdq %ymm14, %ymm2, %ymm8
vpunpckhqdq %ymm14, %ymm2, %ymm9
vpunpcklqdq %ymm15, %ymm3, %ymm10
vpunpckhqdq %ymm15, %ymm3, %ymm11
vmovdqa 3584(%r10), %ymm0
vmovdqa 3648(%r10), %ymm1
vmovdqa 3712(%r10), %ymm2
vmovdqa 3776(%r10), %ymm3
vpunpcklwd 3616(%r10), %ymm0, %ymm12
vpunpckhwd 3616(%r10), %ymm0, %ymm13
vpunpcklwd 3680(%r10), %ymm1, %ymm14
vpunpckhwd 3680(%r10), %ymm1, %ymm15
vpunpcklwd 3744(%r10), %ymm2, %ymm0
vpunpckhwd 3744(%r10), %ymm2, %ymm1
vpunpcklwd 3808(%r10), %ymm3, %ymm2
vpunpckhwd 3808(%r10), %ymm3, %ymm3
vmovdqa %ymm11, 0(%r8)
vpunpckldq %ymm14, %ymm12, %ymm11
vpunpckhdq %ymm14, %ymm12, %ymm12
vpunpckldq %ymm15, %ymm13, %ymm14
vpunpckhdq %ymm15, %ymm13, %ymm15
vpunpckldq %ymm2, %ymm0, %ymm13
vpunpckhdq %ymm2, %ymm0, %ymm0
vpunpckldq %ymm3, %ymm1, %ymm2
vpunpckhdq %ymm3, %ymm1, %ymm1
vpunpcklqdq %ymm13, %ymm11, %ymm3
vpunpckhqdq %ymm13, %ymm11, %ymm13
vpunpcklqdq %ymm0, %ymm12, %ymm11
vpunpckhqdq %ymm0, %ymm12, %ymm0
vpunpcklqdq %ymm2, %ymm14, %ymm12
vpunpckhqdq %ymm2, %ymm14, %ymm2
vpunpcklqdq %ymm1, %ymm15, %ymm14
vpunpckhqdq %ymm1, %ymm15, %ymm1
vinserti128 $1, %xmm3, %ymm4, %ymm15
vmovdqa %ymm15, 32(%r12)
vinserti128 $1, %xmm13, %ymm5, %ymm15
vmovdqa %ymm15, 224(%r12)
vinserti128 $1, %xmm11, %ymm6, %ymm15
vmovdqa %ymm15, 416(%r12)
vinserti128 $1, %xmm0, %ymm7, %ymm15
vmovdqa %ymm15, 608(%r12)
vinserti128 $1, %xmm12, %ymm8, %ymm15
vmovdqa %ymm15, 800(%r12)
vinserti128 $1, %xmm2, %ymm9, %ymm15
vmovdqa %ymm15, 992(%r12)
vinserti128 $1, %xmm14, %ymm10, %ymm15
vmovdqa %ymm15, 1184(%r12)
vpermq $78, %ymm4, %ymm4
vpermq $78, %ymm5, %ymm5
vpermq $78, %ymm6, %ymm6
vpermq $78, %ymm7, %ymm7
vpermq $78, %ymm8, %ymm8
vpermq $78, %ymm9, %ymm9
vpermq $78, %ymm10, %ymm10
vinserti128 $0, %xmm4, %ymm3, %ymm15
vmovdqa %ymm15, 1568(%r12)
vinserti128 $0, %xmm5, %ymm13, %ymm15
vmovdqa %ymm15, 1760(%r12)
vinserti128 $0, %xmm6, %ymm11, %ymm15
vmovdqa %ymm15, 1952(%r12)
vinserti128 $0, %xmm7, %ymm0, %ymm15
vmovdqa %ymm15, 2144(%r12)
vinserti128 $0, %xmm8, %ymm12, %ymm15
vmovdqa %ymm15, 2336(%r12)
vinserti128 $0, %xmm9, %ymm2, %ymm15
vmovdqa %ymm15, 2528(%r12)
vinserti128 $0, %xmm10, %ymm14, %ymm15
vmovdqa %ymm15, 2720(%r12)
vmovdqa 0(%r8), %ymm11
vinserti128 $1, %xmm1, %ymm11, %ymm14
vmovdqa %ymm14, 1376(%r12)
vpermq $78, %ymm11, %ymm11
vinserti128 $0, %xmm11, %ymm1, %ymm1
vmovdqa %ymm1, 2912(%r12)
vmovdqa 3840(%r10), %ymm0
vmovdqa 3904(%r10), %ymm1
vmovdqa 3968(%r10), %ymm2
vmovdqa 4032(%r10), %ymm3
vpunpcklwd 3872(%r10), %ymm0, %ymm4
vpunpckhwd 3872(%r10), %ymm0, %ymm5
vpunpcklwd 3936(%r10), %ymm1, %ymm6
vpunpckhwd 3936(%r10), %ymm1, %ymm7
vpunpcklwd 4000(%r10), %ymm2, %ymm8
vpunpckhwd 4000(%r10), %ymm2, %ymm9
vpunpcklwd 4064(%r10), %ymm3, %ymm10
vpunpckhwd 4064(%r10), %ymm3, %ymm11
vpunpckldq %ymm6, %ymm4, %ymm0
vpunpckhdq %ymm6, %ymm4, %ymm1
vpunpckldq %ymm7, %ymm5, %ymm2
vpunpckhdq %ymm7, %ymm5, %ymm3
vpunpckldq %ymm10, %ymm8, %ymm12
vpunpckhdq %ymm10, %ymm8, %ymm13
vpunpckldq %ymm11, %ymm9, %ymm14
vpunpckhdq %ymm11, %ymm9, %ymm15
vpunpcklqdq %ymm12, %ymm0, %ymm4
vpunpckhqdq %ymm12, %ymm0, %ymm5
vpunpcklqdq %ymm13, %ymm1, %ymm6
vpunpckhqdq %ymm13, %ymm1, %ymm7
vpunpcklqdq %ymm14, %ymm2, %ymm8
vpunpckhqdq %ymm14, %ymm2, %ymm9
vpunpcklqdq %ymm15, %ymm3, %ymm10
vpunpckhqdq %ymm15, %ymm3, %ymm11
vmovdqa 4096(%r10), %ymm0
vmovdqa 4160(%r10), %ymm1
vmovdqa 4224(%r10), %ymm2
vmovdqa 4288(%r10), %ymm3
vpunpcklwd 4128(%r10), %ymm0, %ymm12
vpunpckhwd 4128(%r10), %ymm0, %ymm13
vpunpcklwd 4192(%r10), %ymm1, %ymm14
vpunpckhwd 4192(%r10), %ymm1, %ymm15
vpunpcklwd 4256(%r10), %ymm2, %ymm0
vpunpckhwd 4256(%r10), %ymm2, %ymm1
vpunpcklwd 4320(%r10), %ymm3, %ymm2
vpunpckhwd 4320(%r10), %ymm3, %ymm3
vmovdqa %ymm11, 0(%r8)
vpunpckldq %ymm14, %ymm12, %ymm11
vpunpckhdq %ymm14, %ymm12, %ymm12
vpunpckldq %ymm15, %ymm13, %ymm14
vpunpckhdq %ymm15, %ymm13, %ymm15
vpunpckldq %ymm2, %ymm0, %ymm13
vpunpckhdq %ymm2, %ymm0, %ymm0
vpunpckldq %ymm3, %ymm1, %ymm2
vpunpckhdq %ymm3, %ymm1, %ymm1
vpunpcklqdq %ymm13, %ymm11, %ymm3
vpunpckhqdq %ymm13, %ymm11, %ymm13
vpunpcklqdq %ymm0, %ymm12, %ymm11
vpunpckhqdq %ymm0, %ymm12, %ymm0
vpunpcklqdq %ymm2, %ymm14, %ymm12
vpunpckhqdq %ymm2, %ymm14, %ymm2
vpunpcklqdq %ymm1, %ymm15, %ymm14
vpunpckhqdq %ymm1, %ymm15, %ymm1
vinserti128 $1, %xmm3, %ymm4, %ymm15
vmovdqa %ymm15, 64(%r12)
vinserti128 $1, %xmm13, %ymm5, %ymm15
vmovdqa %ymm15, 256(%r12)
vinserti128 $1, %xmm11, %ymm6, %ymm15
vmovdqa %ymm15, 448(%r12)
vinserti128 $1, %xmm0, %ymm7, %ymm15
vmovdqa %ymm15, 640(%r12)
vinserti128 $1, %xmm12, %ymm8, %ymm15
vmovdqa %ymm15, 832(%r12)
vinserti128 $1, %xmm2, %ymm9, %ymm15
vmovdqa %ymm15, 1024(%r12)
vinserti128 $1, %xmm14, %ymm10, %ymm15
vmovdqa %ymm15, 1216(%r12)
vpermq $78, %ymm4, %ymm4
vpermq $78, %ymm5, %ymm5
vpermq $78, %ymm6, %ymm6
vpermq $78, %ymm7, %ymm7
vpermq $78, %ymm8, %ymm8
vpermq $78, %ymm9, %ymm9
vpermq $78, %ymm10, %ymm10
vinserti128 $0, %xmm4, %ymm3, %ymm15
vmovdqa %ymm15, 1600(%r12)
vinserti128 $0, %xmm5, %ymm13, %ymm15
vmovdqa %ymm15, 1792(%r12)
vinserti128 $0, %xmm6, %ymm11, %ymm15
vmovdqa %ymm15, 1984(%r12)
vinserti128 $0, %xmm7, %ymm0, %ymm15
vmovdqa %ymm15, 2176(%r12)
vinserti128 $0, %xmm8, %ymm12, %ymm15
vmovdqa %ymm15, 2368(%r12)
vinserti128 $0, %xmm9, %ymm2, %ymm15
vmovdqa %ymm15, 2560(%r12)
vinserti128 $0, %xmm10, %ymm14, %ymm15
vmovdqa %ymm15, 2752(%r12)
vmovdqa 0(%r8), %ymm11
vinserti128 $1, %xmm1, %ymm11, %ymm14
vmovdqa %ymm14, 1408(%r12)
vpermq $78, %ymm11, %ymm11
vinserti128 $0, %xmm11, %ymm1, %ymm1
vmovdqa %ymm1, 2944(%r12)
vmovdqa 4224(%r10), %ymm0
vmovdqa 4288(%r10), %ymm1
vmovdqa 4352(%r10), %ymm2
vmovdqa 4416(%r10), %ymm3
vpunpcklwd 4256(%r10), %ymm0, %ymm4
vpunpckhwd 4256(%r10), %ymm0, %ymm5
vpunpcklwd 4320(%r10), %ymm1, %ymm6
vpunpckhwd 4320(%r10), %ymm1, %ymm7
vpunpcklwd 4384(%r10), %ymm2, %ymm8
vpunpckhwd 4384(%r10), %ymm2, %ymm9
vpunpcklwd 4448(%r10), %ymm3, %ymm10
vpunpckhwd 4448(%r10), %ymm3, %ymm11
vpunpckldq %ymm6, %ymm4, %ymm0
vpunpckhdq %ymm6, %ymm4, %ymm1
vpunpckldq %ymm7, %ymm5, %ymm2
vpunpckhdq %ymm7, %ymm5, %ymm3
vpunpckldq %ymm10, %ymm8, %ymm12
vpunpckhdq %ymm10, %ymm8, %ymm13
vpunpckldq %ymm11, %ymm9, %ymm14
vpunpckhdq %ymm11, %ymm9, %ymm15
vpunpcklqdq %ymm12, %ymm0, %ymm4
vpunpckhqdq %ymm12, %ymm0, %ymm5
vpunpcklqdq %ymm13, %ymm1, %ymm6
vpunpckhqdq %ymm13, %ymm1, %ymm7
vpunpcklqdq %ymm14, %ymm2, %ymm8
vpunpckhqdq %ymm14, %ymm2, %ymm9
vpunpcklqdq %ymm15, %ymm3, %ymm10
vpunpckhqdq %ymm15, %ymm3, %ymm11
vmovdqa 4480(%r10), %ymm0
vmovdqa 4544(%r10), %ymm1
vmovdqa 4608(%r10), %ymm2
vmovdqa 4672(%r10), %ymm3
vpunpcklwd 4512(%r10), %ymm0, %ymm12
vpunpckhwd 4512(%r10), %ymm0, %ymm13
vpunpcklwd 4576(%r10), %ymm1, %ymm14
vpunpckhwd 4576(%r10), %ymm1, %ymm15
vpunpcklwd 4640(%r10), %ymm2, %ymm0
vpunpckhwd 4640(%r10), %ymm2, %ymm1
vpunpcklwd 4704(%r10), %ymm3, %ymm2
vpunpckhwd 4704(%r10), %ymm3, %ymm3
vmovdqa %ymm11, 0(%r8)
vpunpckldq %ymm14, %ymm12, %ymm11
vpunpckhdq %ymm14, %ymm12, %ymm12
vpunpckldq %ymm15, %ymm13, %ymm14
vpunpckhdq %ymm15, %ymm13, %ymm15
vpunpckldq %ymm2, %ymm0, %ymm13
vpunpckhdq %ymm2, %ymm0, %ymm0
vpunpckldq %ymm3, %ymm1, %ymm2
vpunpckhdq %ymm3, %ymm1, %ymm1
vpunpcklqdq %ymm13, %ymm11, %ymm3
vpunpckhqdq %ymm13, %ymm11, %ymm13
vpunpcklqdq %ymm0, %ymm12, %ymm11
vpunpckhqdq %ymm0, %ymm12, %ymm0
vpunpcklqdq %ymm2, %ymm14, %ymm12
vpunpckhqdq %ymm2, %ymm14, %ymm2
vpunpcklqdq %ymm1, %ymm15, %ymm14
vpunpckhqdq %ymm1, %ymm15, %ymm1
vinserti128 $1, %xmm3, %ymm4, %ymm15
vmovdqa %ymm15, 96(%r12)
vinserti128 $1, %xmm13, %ymm5, %ymm15
vmovdqa %ymm15, 288(%r12)
vinserti128 $1, %xmm11, %ymm6, %ymm15
vmovdqa %ymm15, 480(%r12)
vinserti128 $1, %xmm0, %ymm7, %ymm15
vmovdqa %ymm15, 672(%r12)
vinserti128 $1, %xmm12, %ymm8, %ymm15
vmovdqa %ymm15, 864(%r12)
vinserti128 $1, %xmm2, %ymm9, %ymm15
vmovdqa %ymm15, 1056(%r12)
vinserti128 $1, %xmm14, %ymm10, %ymm15
vmovdqa %ymm15, 1248(%r12)
vpermq $78, %ymm4, %ymm4
vpermq $78, %ymm5, %ymm5
vpermq $78, %ymm6, %ymm6
vpermq $78, %ymm7, %ymm7
vpermq $78, %ymm8, %ymm8
vpermq $78, %ymm9, %ymm9
vpermq $78, %ymm10, %ymm10
vinserti128 $0, %xmm4, %ymm3, %ymm15
vmovdqa %ymm15, 1632(%r12)
vinserti128 $0, %xmm5, %ymm13, %ymm15
vmovdqa %ymm15, 1824(%r12)
vinserti128 $0, %xmm6, %ymm11, %ymm15
vmovdqa %ymm15, 2016(%r12)
vinserti128 $0, %xmm7, %ymm0, %ymm15
vmovdqa %ymm15, 2208(%r12)
vinserti128 $0, %xmm8, %ymm12, %ymm15
vmovdqa %ymm15, 2400(%r12)
vinserti128 $0, %xmm9, %ymm2, %ymm15
vmovdqa %ymm15, 2592(%r12)
vinserti128 $0, %xmm10, %ymm14, %ymm15
vmovdqa %ymm15, 2784(%r12)
vmovdqa 0(%r8), %ymm11
vinserti128 $1, %xmm1, %ymm11, %ymm14
vmovdqa %ymm14, 1440(%r12)
vpermq $78, %ymm11, %ymm11
vinserti128 $0, %xmm11, %ymm1, %ymm1
vmovdqa %ymm1, 2976(%r12)
vmovdqa 4736(%r10), %ymm0
vmovdqa 4800(%r10), %ymm1
vmovdqa 4864(%r10), %ymm2
vmovdqa 4928(%r10), %ymm3
vpunpcklwd 4768(%r10), %ymm0, %ymm4
vpunpckhwd 4768(%r10), %ymm0, %ymm5
vpunpcklwd 4832(%r10), %ymm1, %ymm6
vpunpckhwd 4832(%r10), %ymm1, %ymm7
vpunpcklwd 4896(%r10), %ymm2, %ymm8
vpunpckhwd 4896(%r10), %ymm2, %ymm9
vpunpcklwd 4960(%r10), %ymm3, %ymm10
vpunpckhwd 4960(%r10), %ymm3, %ymm11
vpunpckldq %ymm6, %ymm4, %ymm0
vpunpckhdq %ymm6, %ymm4, %ymm1
vpunpckldq %ymm7, %ymm5, %ymm2
vpunpckhdq %ymm7, %ymm5, %ymm3
vpunpckldq %ymm10, %ymm8, %ymm12
vpunpckhdq %ymm10, %ymm8, %ymm13
vpunpckldq %ymm11, %ymm9, %ymm14
vpunpckhdq %ymm11, %ymm9, %ymm15
vpunpcklqdq %ymm12, %ymm0, %ymm4
vpunpckhqdq %ymm12, %ymm0, %ymm5
vpunpcklqdq %ymm13, %ymm1, %ymm6
vpunpckhqdq %ymm13, %ymm1, %ymm7
vpunpcklqdq %ymm14, %ymm2, %ymm8
vpunpckhqdq %ymm14, %ymm2, %ymm9
vpunpcklqdq %ymm15, %ymm3, %ymm10
vpunpckhqdq %ymm15, %ymm3, %ymm11
vmovdqa 4992(%r10), %ymm0
vmovdqa 5056(%r10), %ymm1
vmovdqa 5120(%r10), %ymm2
vmovdqa 5184(%r10), %ymm3
vpunpcklwd 5024(%r10), %ymm0, %ymm12
vpunpckhwd 5024(%r10), %ymm0, %ymm13
vpunpcklwd 5088(%r10), %ymm1, %ymm14
vpunpckhwd 5088(%r10), %ymm1, %ymm15
vpunpcklwd 5152(%r10), %ymm2, %ymm0
vpunpckhwd 5152(%r10), %ymm2, %ymm1
vpunpcklwd 5216(%r10), %ymm3, %ymm2
vpunpckhwd 5216(%r10), %ymm3, %ymm3
vmovdqa %ymm11, 0(%r8)
vpunpckldq %ymm14, %ymm12, %ymm11
vpunpckhdq %ymm14, %ymm12, %ymm12
vpunpckldq %ymm15, %ymm13, %ymm14
vpunpckhdq %ymm15, %ymm13, %ymm15
vpunpckldq %ymm2, %ymm0, %ymm13
vpunpckhdq %ymm2, %ymm0, %ymm0
vpunpckldq %ymm3, %ymm1, %ymm2
vpunpckhdq %ymm3, %ymm1, %ymm1
vpunpcklqdq %ymm13, %ymm11, %ymm3
vpunpckhqdq %ymm13, %ymm11, %ymm13
vpunpcklqdq %ymm0, %ymm12, %ymm11
vpunpckhqdq %ymm0, %ymm12, %ymm0
vpunpcklqdq %ymm2, %ymm14, %ymm12
vpunpckhqdq %ymm2, %ymm14, %ymm2
vpunpcklqdq %ymm1, %ymm15, %ymm14
vpunpckhqdq %ymm1, %ymm15, %ymm1
vinserti128 $1, %xmm3, %ymm4, %ymm15
vmovdqa %ymm15, 128(%r12)
vinserti128 $1, %xmm13, %ymm5, %ymm15
vmovdqa %ymm15, 320(%r12)
vinserti128 $1, %xmm11, %ymm6, %ymm15
vmovdqa %ymm15, 512(%r12)
vinserti128 $1, %xmm0, %ymm7, %ymm15
vmovdqa %ymm15, 704(%r12)
vinserti128 $1, %xmm12, %ymm8, %ymm15
vmovdqa %ymm15, 896(%r12)
vinserti128 $1, %xmm2, %ymm9, %ymm15
vmovdqa %ymm15, 1088(%r12)
vinserti128 $1, %xmm14, %ymm10, %ymm15
vmovdqa %ymm15, 1280(%r12)
vpermq $78, %ymm4, %ymm4
vpermq $78, %ymm5, %ymm5
vpermq $78, %ymm6, %ymm6
vpermq $78, %ymm7, %ymm7
vpermq $78, %ymm8, %ymm8
vpermq $78, %ymm9, %ymm9
vpermq $78, %ymm10, %ymm10
vinserti128 $0, %xmm4, %ymm3, %ymm15
vmovdqa %ymm15, 1664(%r12)
vinserti128 $0, %xmm5, %ymm13, %ymm15
vmovdqa %ymm15, 1856(%r12)
vinserti128 $0, %xmm6, %ymm11, %ymm15
vmovdqa %ymm15, 2048(%r12)
vinserti128 $0, %xmm7, %ymm0, %ymm15
vmovdqa %ymm15, 2240(%r12)
vinserti128 $0, %xmm8, %ymm12, %ymm15
vmovdqa %ymm15, 2432(%r12)
vinserti128 $0, %xmm9, %ymm2, %ymm15
vmovdqa %ymm15, 2624(%r12)
vinserti128 $0, %xmm10, %ymm14, %ymm15
vmovdqa %ymm15, 2816(%r12)
vmovdqa 0(%r8), %ymm11
vinserti128 $1, %xmm1, %ymm11, %ymm14
vmovdqa %ymm14, 1472(%r12)
vpermq $78, %ymm11, %ymm11
vinserti128 $0, %xmm11, %ymm1, %ymm1
vmovdqa %ymm1, 3008(%r12)
vmovdqa 5248(%r10), %ymm0
vmovdqa 5312(%r10), %ymm1
vmovdqa 5376(%r10), %ymm2
vmovdqa 5440(%r10), %ymm3
vpunpcklwd 5280(%r10), %ymm0, %ymm4
vpunpckhwd 5280(%r10), %ymm0, %ymm5
vpunpcklwd 5344(%r10), %ymm1, %ymm6
vpunpckhwd 5344(%r10), %ymm1, %ymm7
vpunpcklwd 5408(%r10), %ymm2, %ymm8
vpunpckhwd 5408(%r10), %ymm2, %ymm9
vpunpcklwd 5472(%r10), %ymm3, %ymm10
vpunpckhwd 5472(%r10), %ymm3, %ymm11
vpunpckldq %ymm6, %ymm4, %ymm0
vpunpckhdq %ymm6, %ymm4, %ymm1
vpunpckldq %ymm7, %ymm5, %ymm2
vpunpckhdq %ymm7, %ymm5, %ymm3
vpunpckldq %ymm10, %ymm8, %ymm12
vpunpckhdq %ymm10, %ymm8, %ymm13
vpunpckldq %ymm11, %ymm9, %ymm14
vpunpckhdq %ymm11, %ymm9, %ymm15
vpunpcklqdq %ymm12, %ymm0, %ymm4
vpunpckhqdq %ymm12, %ymm0, %ymm5
vpunpcklqdq %ymm13, %ymm1, %ymm6
vpunpckhqdq %ymm13, %ymm1, %ymm7
vpunpcklqdq %ymm14, %ymm2, %ymm8
vpunpckhqdq %ymm14, %ymm2, %ymm9
vpunpcklqdq %ymm15, %ymm3, %ymm10
vpunpckhqdq %ymm15, %ymm3, %ymm11
vmovdqa 5504(%r10), %ymm0
vmovdqa 5568(%r10), %ymm1
vmovdqa 5632(%r10), %ymm2
vmovdqa 5696(%r10), %ymm3
vpunpcklwd 5536(%r10), %ymm0, %ymm12
vpunpckhwd 5536(%r10), %ymm0, %ymm13
vpunpcklwd 5600(%r10), %ymm1, %ymm14
vpunpckhwd 5600(%r10), %ymm1, %ymm15
vpunpcklwd 5664(%r10), %ymm2, %ymm0
vpunpckhwd 5664(%r10), %ymm2, %ymm1
vpunpcklwd 5728(%r10), %ymm3, %ymm2
vpunpckhwd 5728(%r10), %ymm3, %ymm3
vmovdqa %ymm11, 0(%r8)
vpunpckldq %ymm14, %ymm12, %ymm11
vpunpckhdq %ymm14, %ymm12, %ymm12
vpunpckldq %ymm15, %ymm13, %ymm14
vpunpckhdq %ymm15, %ymm13, %ymm15
vpunpckldq %ymm2, %ymm0, %ymm13
vpunpckhdq %ymm2, %ymm0, %ymm0
vpunpckldq %ymm3, %ymm1, %ymm2
vpunpckhdq %ymm3, %ymm1, %ymm1
vpunpcklqdq %ymm13, %ymm11, %ymm3
vpunpckhqdq %ymm13, %ymm11, %ymm13
vpunpcklqdq %ymm0, %ymm12, %ymm11
vpunpckhqdq %ymm0, %ymm12, %ymm0
vpunpcklqdq %ymm2, %ymm14, %ymm12
vpunpckhqdq %ymm2, %ymm14, %ymm2
vpunpcklqdq %ymm1, %ymm15, %ymm14
vpunpckhqdq %ymm1, %ymm15, %ymm1
vinserti128 $1, %xmm3, %ymm4, %ymm15
vmovdqa %ymm15, 160(%r12)
vinserti128 $1, %xmm13, %ymm5, %ymm15
vmovdqa %ymm15, 352(%r12)
vinserti128 $1, %xmm11, %ymm6, %ymm15
vmovdqa %ymm15, 544(%r12)
vinserti128 $1, %xmm0, %ymm7, %ymm15
vmovdqa %ymm15, 736(%r12)
vinserti128 $1, %xmm12, %ymm8, %ymm15
vmovdqa %ymm15, 928(%r12)
vinserti128 $1, %xmm2, %ymm9, %ymm15
vmovdqa %ymm15, 1120(%r12)
vinserti128 $1, %xmm14, %ymm10, %ymm15
vmovdqa %ymm15, 1312(%r12)
vpermq $78, %ymm4, %ymm4
vpermq $78, %ymm5, %ymm5
vpermq $78, %ymm6, %ymm6
vpermq $78, %ymm7, %ymm7
vpermq $78, %ymm8, %ymm8
vpermq $78, %ymm9, %ymm9
vpermq $78, %ymm10, %ymm10
vinserti128 $0, %xmm4, %ymm3, %ymm15
vmovdqa %ymm15, 1696(%r12)
vinserti128 $0, %xmm5, %ymm13, %ymm15
vmovdqa %ymm15, 1888(%r12)
vinserti128 $0, %xmm6, %ymm11, %ymm15
vmovdqa %ymm15, 2080(%r12)
vinserti128 $0, %xmm7, %ymm0, %ymm15
vmovdqa %ymm15, 2272(%r12)
vinserti128 $0, %xmm8, %ymm12, %ymm15
vmovdqa %ymm15, 2464(%r12)
vinserti128 $0, %xmm9, %ymm2, %ymm15
vmovdqa %ymm15, 2656(%r12)
vinserti128 $0, %xmm10, %ymm14, %ymm15
vmovdqa %ymm15, 2848(%r12)
vmovdqa 0(%r8), %ymm11
vinserti128 $1, %xmm1, %ymm11, %ymm14
vmovdqa %ymm14, 1504(%r12)
vpermq $78, %ymm11, %ymm11
vinserti128 $0, %xmm11, %ymm1, %ymm1
vmovdqa %ymm1, 3040(%r12)
addq $32, %r8
add $1536, %rax
add $1536, %r11
add $3072, %r12
dec %ecx
jnz karatsuba_loop_4eced63f144beffcb0247f9c6f67d165
sub $12288, %r12
add $9408-2400, %r8
vpxor %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 1792(%r8)
vmovdqa %ymm0, 1824(%r8)
vmovdqa %ymm0, 1856(%r8)
vmovdqa %ymm0, 1888(%r8)
vmovdqa %ymm0, 1920(%r8)
vmovdqa %ymm0, 1952(%r8)
vmovdqa %ymm0, 1984(%r8)
vmovdqa %ymm0, 2016(%r8)
vmovdqa %ymm0, 2048(%r8)
vmovdqa %ymm0, 2080(%r8)
vmovdqa %ymm0, 2112(%r8)
vmovdqa %ymm0, 2144(%r8)
vmovdqa %ymm0, 2176(%r8)
vmovdqa %ymm0, 2208(%r8)
vmovdqa %ymm0, 2240(%r8)
vmovdqa %ymm0, 2272(%r8)
vmovdqa %ymm0, 2304(%r8)
vmovdqa %ymm0, 2336(%r8)
vmovdqa %ymm0, 2368(%r8)
vmovdqa %ymm0, 2400(%r8)
vmovdqa %ymm0, 2432(%r8)
vmovdqa %ymm0, 2464(%r8)
vmovdqa %ymm0, 2496(%r8)
vmovdqa %ymm0, 2528(%r8)
vmovdqa %ymm0, 2560(%r8)
vmovdqa %ymm0, 2592(%r8)
vmovdqa %ymm0, 2624(%r8)
vmovdqa %ymm0, 2656(%r8)
vmovdqa %ymm0, 2688(%r8)
vmovdqa %ymm0, 2720(%r8)
vmovdqa %ymm0, 2752(%r8)
vmovdqa %ymm0, 2784(%r8)
vmovdqa const729(%rip), %ymm15
vmovdqa const3_inv(%rip), %ymm14
vmovdqa const5_inv(%rip), %ymm13
vmovdqa const9(%rip), %ymm12
vmovdqa 96(%r12), %ymm0
vpsubw 192(%r12), %ymm0, %ymm0
vmovdqa 480(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 288(%r12), %ymm1, %ymm1
vpsubw 0(%r12), %ymm0, %ymm0
vpaddw 384(%r12), %ymm0, %ymm0
vmovdqa 672(%r12), %ymm2
vpsubw 768(%r12), %ymm2, %ymm2
vmovdqa 1056(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 864(%r12), %ymm3, %ymm3
vpsubw 576(%r12), %ymm2, %ymm2
vpaddw 960(%r12), %ymm2, %ymm2
vmovdqa 1248(%r12), %ymm4
vpsubw 1344(%r12), %ymm4, %ymm4
vmovdqa 1632(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 1440(%r12), %ymm5, %ymm5
vpsubw 1152(%r12), %ymm4, %ymm4
vpaddw 1536(%r12), %ymm4, %ymm4
vpsubw 576(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 0(%r12), %ymm1, %ymm1
vpaddw 1152(%r12), %ymm1, %ymm1
vmovdqa 288(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 1440(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 864(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 0(%r12), %ymm8
vmovdqa 864(%r12), %ymm9
vmovdqa %ymm8, 0(%r8)
vmovdqa %ymm0, 32(%r8)
vmovdqa %ymm1, 64(%r8)
vmovdqa %ymm7, 96(%r8)
vmovdqa %ymm5, 128(%r8)
vmovdqa %ymm2, 160(%r8)
vmovdqa %ymm3, 192(%r8)
vmovdqa %ymm9, 224(%r8)
vmovdqa 1824(%r12), %ymm0
vpsubw 1920(%r12), %ymm0, %ymm0
vmovdqa 2208(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 2016(%r12), %ymm1, %ymm1
vpsubw 1728(%r12), %ymm0, %ymm0
vpaddw 2112(%r12), %ymm0, %ymm0
vmovdqa 2400(%r12), %ymm2
vpsubw 2496(%r12), %ymm2, %ymm2
vmovdqa 2784(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 2592(%r12), %ymm3, %ymm3
vpsubw 2304(%r12), %ymm2, %ymm2
vpaddw 2688(%r12), %ymm2, %ymm2
vmovdqa 2976(%r12), %ymm4
vpsubw 3072(%r12), %ymm4, %ymm4
vmovdqa 3360(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 3168(%r12), %ymm5, %ymm5
vpsubw 2880(%r12), %ymm4, %ymm4
vpaddw 3264(%r12), %ymm4, %ymm4
vpsubw 2304(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 1728(%r12), %ymm1, %ymm1
vpaddw 2880(%r12), %ymm1, %ymm1
vmovdqa 2016(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 3168(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 2592(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 1728(%r12), %ymm8
vmovdqa 2592(%r12), %ymm9
vmovdqa %ymm8, 256(%r8)
vmovdqa %ymm0, 288(%r8)
vmovdqa %ymm1, 320(%r8)
vmovdqa %ymm7, 352(%r8)
vmovdqa %ymm5, 384(%r8)
vmovdqa %ymm2, 416(%r8)
vmovdqa %ymm3, 448(%r8)
vmovdqa %ymm9, 480(%r8)
vmovdqa 3552(%r12), %ymm0
vpsubw 3648(%r12), %ymm0, %ymm0
vmovdqa 3936(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 3744(%r12), %ymm1, %ymm1
vpsubw 3456(%r12), %ymm0, %ymm0
vpaddw 3840(%r12), %ymm0, %ymm0
vmovdqa 4128(%r12), %ymm2
vpsubw 4224(%r12), %ymm2, %ymm2
vmovdqa 4512(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 4320(%r12), %ymm3, %ymm3
vpsubw 4032(%r12), %ymm2, %ymm2
vpaddw 4416(%r12), %ymm2, %ymm2
vmovdqa 4704(%r12), %ymm4
vpsubw 4800(%r12), %ymm4, %ymm4
vmovdqa 5088(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 4896(%r12), %ymm5, %ymm5
vpsubw 4608(%r12), %ymm4, %ymm4
vpaddw 4992(%r12), %ymm4, %ymm4
vpsubw 4032(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 3456(%r12), %ymm1, %ymm1
vpaddw 4608(%r12), %ymm1, %ymm1
vmovdqa 3744(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 4896(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 4320(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 3456(%r12), %ymm8
vmovdqa 4320(%r12), %ymm9
vmovdqa %ymm8, 512(%r8)
vmovdqa %ymm0, 544(%r8)
vmovdqa %ymm1, 576(%r8)
vmovdqa %ymm7, 608(%r8)
vmovdqa %ymm5, 640(%r8)
vmovdqa %ymm2, 672(%r8)
vmovdqa %ymm3, 704(%r8)
vmovdqa %ymm9, 736(%r8)
vmovdqa 5280(%r12), %ymm0
vpsubw 5376(%r12), %ymm0, %ymm0
vmovdqa 5664(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 5472(%r12), %ymm1, %ymm1
vpsubw 5184(%r12), %ymm0, %ymm0
vpaddw 5568(%r12), %ymm0, %ymm0
vmovdqa 5856(%r12), %ymm2
vpsubw 5952(%r12), %ymm2, %ymm2
vmovdqa 6240(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 6048(%r12), %ymm3, %ymm3
vpsubw 5760(%r12), %ymm2, %ymm2
vpaddw 6144(%r12), %ymm2, %ymm2
vmovdqa 6432(%r12), %ymm4
vpsubw 6528(%r12), %ymm4, %ymm4
vmovdqa 6816(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 6624(%r12), %ymm5, %ymm5
vpsubw 6336(%r12), %ymm4, %ymm4
vpaddw 6720(%r12), %ymm4, %ymm4
vpsubw 5760(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 5184(%r12), %ymm1, %ymm1
vpaddw 6336(%r12), %ymm1, %ymm1
vmovdqa 5472(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 6624(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 6048(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 5184(%r12), %ymm8
vmovdqa 6048(%r12), %ymm9
vmovdqa %ymm8, 768(%r8)
vmovdqa %ymm0, 800(%r8)
vmovdqa %ymm1, 832(%r8)
vmovdqa %ymm7, 864(%r8)
vmovdqa %ymm5, 896(%r8)
vmovdqa %ymm2, 928(%r8)
vmovdqa %ymm3, 960(%r8)
vmovdqa %ymm9, 992(%r8)
vmovdqa 7008(%r12), %ymm0
vpsubw 7104(%r12), %ymm0, %ymm0
vmovdqa 7392(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 7200(%r12), %ymm1, %ymm1
vpsubw 6912(%r12), %ymm0, %ymm0
vpaddw 7296(%r12), %ymm0, %ymm0
vmovdqa 7584(%r12), %ymm2
vpsubw 7680(%r12), %ymm2, %ymm2
vmovdqa 7968(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 7776(%r12), %ymm3, %ymm3
vpsubw 7488(%r12), %ymm2, %ymm2
vpaddw 7872(%r12), %ymm2, %ymm2
vmovdqa 8160(%r12), %ymm4
vpsubw 8256(%r12), %ymm4, %ymm4
vmovdqa 8544(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 8352(%r12), %ymm5, %ymm5
vpsubw 8064(%r12), %ymm4, %ymm4
vpaddw 8448(%r12), %ymm4, %ymm4
vpsubw 7488(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 6912(%r12), %ymm1, %ymm1
vpaddw 8064(%r12), %ymm1, %ymm1
vmovdqa 7200(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 8352(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 7776(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 6912(%r12), %ymm8
vmovdqa 7776(%r12), %ymm9
vmovdqa %ymm8, 1024(%r8)
vmovdqa %ymm0, 1056(%r8)
vmovdqa %ymm1, 1088(%r8)
vmovdqa %ymm7, 1120(%r8)
vmovdqa %ymm5, 1152(%r8)
vmovdqa %ymm2, 1184(%r8)
vmovdqa %ymm3, 1216(%r8)
vmovdqa %ymm9, 1248(%r8)
vmovdqa 8736(%r12), %ymm0
vpsubw 8832(%r12), %ymm0, %ymm0
vmovdqa 9120(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 8928(%r12), %ymm1, %ymm1
vpsubw 8640(%r12), %ymm0, %ymm0
vpaddw 9024(%r12), %ymm0, %ymm0
vmovdqa 9312(%r12), %ymm2
vpsubw 9408(%r12), %ymm2, %ymm2
vmovdqa 9696(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 9504(%r12), %ymm3, %ymm3
vpsubw 9216(%r12), %ymm2, %ymm2
vpaddw 9600(%r12), %ymm2, %ymm2
vmovdqa 9888(%r12), %ymm4
vpsubw 9984(%r12), %ymm4, %ymm4
vmovdqa 10272(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 10080(%r12), %ymm5, %ymm5
vpsubw 9792(%r12), %ymm4, %ymm4
vpaddw 10176(%r12), %ymm4, %ymm4
vpsubw 9216(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 8640(%r12), %ymm1, %ymm1
vpaddw 9792(%r12), %ymm1, %ymm1
vmovdqa 8928(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 10080(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 9504(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 8640(%r12), %ymm8
vmovdqa 9504(%r12), %ymm9
vmovdqa %ymm8, 1280(%r8)
vmovdqa %ymm0, 1312(%r8)
vmovdqa %ymm1, 1344(%r8)
vmovdqa %ymm7, 1376(%r8)
vmovdqa %ymm5, 1408(%r8)
vmovdqa %ymm2, 1440(%r8)
vmovdqa %ymm3, 1472(%r8)
vmovdqa %ymm9, 1504(%r8)
vmovdqa 10464(%r12), %ymm0
vpsubw 10560(%r12), %ymm0, %ymm0
vmovdqa 10848(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 10656(%r12), %ymm1, %ymm1
vpsubw 10368(%r12), %ymm0, %ymm0
vpaddw 10752(%r12), %ymm0, %ymm0
vmovdqa 11040(%r12), %ymm2
vpsubw 11136(%r12), %ymm2, %ymm2
vmovdqa 11424(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 11232(%r12), %ymm3, %ymm3
vpsubw 10944(%r12), %ymm2, %ymm2
vpaddw 11328(%r12), %ymm2, %ymm2
vmovdqa 11616(%r12), %ymm4
vpsubw 11712(%r12), %ymm4, %ymm4
vmovdqa 12000(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 11808(%r12), %ymm5, %ymm5
vpsubw 11520(%r12), %ymm4, %ymm4
vpaddw 11904(%r12), %ymm4, %ymm4
vpsubw 10944(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 10368(%r12), %ymm1, %ymm1
vpaddw 11520(%r12), %ymm1, %ymm1
vmovdqa 10656(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 11808(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 11232(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 10368(%r12), %ymm8
vmovdqa 11232(%r12), %ymm9
vmovdqa %ymm8, 1536(%r8)
vmovdqa %ymm0, 1568(%r8)
vmovdqa %ymm1, 1600(%r8)
vmovdqa %ymm7, 1632(%r8)
vmovdqa %ymm5, 1664(%r8)
vmovdqa %ymm2, 1696(%r8)
vmovdqa %ymm3, 1728(%r8)
vmovdqa %ymm9, 1760(%r8)
vmovdqa 0(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm10
vpunpckhwd const0(%rip), %ymm11, %ymm9
vpslld $1, %ymm10, %ymm10
vpslld $1, %ymm9, %ymm9
vmovdqa 256(%r8), %ymm8
vpunpcklwd const0(%rip), %ymm8, %ymm7
vpunpckhwd const0(%rip), %ymm8, %ymm8
vmovdqa 512(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm5
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm5, %ymm7, %ymm4
vpaddd %ymm6, %ymm8, %ymm3
vpsubd %ymm10, %ymm4, %ymm4
vpsubd %ymm9, %ymm3, %ymm3
vpsubd %ymm5, %ymm7, %ymm5
vpsubd %ymm6, %ymm8, %ymm6
vpsrld $1, %ymm5, %ymm5
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm5, %ymm5
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm5, %ymm6
vmovdqa 1536(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm8
vpunpckhwd const0(%rip), %ymm5, %ymm7
vpslld $1, %ymm8, %ymm8
vpslld $1, %ymm7, %ymm7
vpsubd %ymm8, %ymm4, %ymm4
vpsubd %ymm7, %ymm3, %ymm3
vpsrld $1, %ymm4, %ymm4
vpsrld $1, %ymm3, %ymm3
vpand mask32_to_16(%rip), %ymm4, %ymm4
vpand mask32_to_16(%rip), %ymm3, %ymm3
vpackusdw %ymm3, %ymm4, %ymm3
vmovdqa 768(%r8), %ymm4
vpaddw 1024(%r8), %ymm4, %ymm7
vpsubw 1024(%r8), %ymm4, %ymm4
vpsrlw $2, %ymm4, %ymm4
vpsubw %ymm6, %ymm4, %ymm4
vpmullw %ymm14, %ymm4, %ymm4
vpsllw $1, %ymm11, %ymm8
vpsubw %ymm8, %ymm7, %ymm8
vpsllw $7, %ymm5, %ymm7
vpsubw %ymm7, %ymm8, %ymm7
vpsrlw $3, %ymm7, %ymm7
vpsubw %ymm3, %ymm7, %ymm7
vmovdqa 1280(%r8), %ymm8
vpsubw %ymm11, %ymm8, %ymm8
vpmullw %ymm15, %ymm5, %ymm9
vpsubw %ymm9, %ymm8, %ymm9
vpmullw %ymm14, %ymm7, %ymm7
vpsubw %ymm7, %ymm3, %ymm3
vpmullw %ymm12, %ymm7, %ymm8
vpaddw %ymm8, %ymm3, %ymm8
vpmullw %ymm12, %ymm8, %ymm8
vpsubw %ymm8, %ymm9, %ymm8
vpmullw %ymm14, %ymm8, %ymm8
vpsubw %ymm6, %ymm8, %ymm8
vpsrlw $3, %ymm8, %ymm8
vpsubw %ymm4, %ymm8, %ymm8
vpsubw %ymm8, %ymm4, %ymm4
vpsubw %ymm4, %ymm6, %ymm6
vpmullw %ymm13, %ymm8, %ymm8
vpsubw %ymm8, %ymm6, %ymm6
vpshufb shuf48_16(%rip), %ymm7, %ymm7
vpand mask3_5_3_5(%rip), %ymm7, %ymm9
vpand mask5_3_5_3(%rip), %ymm7, %ymm7
vpermq $206, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm10
vpor %ymm10, %ymm7, %ymm7
vpaddw %ymm7, %ymm11, %ymm11
vmovdqa %xmm9, 2048(%r8)
vpshufb shuf48_16(%rip), %ymm8, %ymm8
vpand mask3_5_3_5(%rip), %ymm8, %ymm9
vpand mask5_3_5_3(%rip), %ymm8, %ymm8
vpermq $206, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm10
vpor %ymm10, %ymm8, %ymm8
vpaddw %ymm8, %ymm6, %ymm6
vmovdqa %xmm9, 2304(%r8)
vpshufb shuf48_16(%rip), %ymm5, %ymm5
vpand mask3_5_3_5(%rip), %ymm5, %ymm9
vpand mask5_3_5_3(%rip), %ymm5, %ymm5
vpermq $206, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm10
vpor %ymm10, %ymm5, %ymm5
vpaddw %ymm5, %ymm3, %ymm3
vmovdqa %xmm9, 2560(%r8)
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 0(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 352(%rdi)
vpand mask_mod8192(%rip), %ymm3, %ymm3
vmovdqu %ymm3, 704(%rdi)
vpand mask_mod8192(%rip), %ymm4, %ymm4
vmovdqu %ymm4, 1056(%rdi)
vmovdqa 32(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm8
vpunpckhwd const0(%rip), %ymm5, %ymm7
vpslld $1, %ymm8, %ymm8
vpslld $1, %ymm7, %ymm7
vmovdqa 288(%r8), %ymm4
vpunpcklwd const0(%rip), %ymm4, %ymm3
vpunpckhwd const0(%rip), %ymm4, %ymm4
vmovdqa 544(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm11
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm11, %ymm3, %ymm9
vpaddd %ymm6, %ymm4, %ymm10
vpsubd %ymm8, %ymm9, %ymm9
vpsubd %ymm7, %ymm10, %ymm10
vpsubd %ymm11, %ymm3, %ymm11
vpsubd %ymm6, %ymm4, %ymm6
vpsrld $1, %ymm11, %ymm11
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm11, %ymm11
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm11, %ymm6
vmovdqa 1568(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm4
vpunpckhwd const0(%rip), %ymm11, %ymm3
vpslld $1, %ymm4, %ymm4
vpslld $1, %ymm3, %ymm3
vpsubd %ymm4, %ymm9, %ymm9
vpsubd %ymm3, %ymm10, %ymm10
vpsrld $1, %ymm9, %ymm9
vpsrld $1, %ymm10, %ymm10
vpand mask32_to_16(%rip), %ymm9, %ymm9
vpand mask32_to_16(%rip), %ymm10, %ymm10
vpackusdw %ymm10, %ymm9, %ymm10
vmovdqa 800(%r8), %ymm9
vpaddw 1056(%r8), %ymm9, %ymm3
vpsubw 1056(%r8), %ymm9, %ymm9
vpsrlw $2, %ymm9, %ymm9
vpsubw %ymm6, %ymm9, %ymm9
vpmullw %ymm14, %ymm9, %ymm9
vpsllw $1, %ymm5, %ymm4
vpsubw %ymm4, %ymm3, %ymm4
vpsllw $7, %ymm11, %ymm3
vpsubw %ymm3, %ymm4, %ymm3
vpsrlw $3, %ymm3, %ymm3
vpsubw %ymm10, %ymm3, %ymm3
vmovdqa 1312(%r8), %ymm4
vpsubw %ymm5, %ymm4, %ymm4
vpmullw %ymm15, %ymm11, %ymm7
vpsubw %ymm7, %ymm4, %ymm7
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm3, %ymm10, %ymm10
vpmullw %ymm12, %ymm3, %ymm4
vpaddw %ymm4, %ymm10, %ymm4
vpmullw %ymm12, %ymm4, %ymm4
vpsubw %ymm4, %ymm7, %ymm4
vpmullw %ymm14, %ymm4, %ymm4
vpsubw %ymm6, %ymm4, %ymm4
vpsrlw $3, %ymm4, %ymm4
vpsubw %ymm9, %ymm4, %ymm4
vpsubw %ymm4, %ymm9, %ymm9
vpsubw %ymm9, %ymm6, %ymm6
vpmullw %ymm13, %ymm4, %ymm4
vpsubw %ymm4, %ymm6, %ymm6
vpshufb shuf48_16(%rip), %ymm3, %ymm3
vpand mask3_5_3_5(%rip), %ymm3, %ymm7
vpand mask5_3_5_3(%rip), %ymm3, %ymm3
vpermq $206, %ymm7, %ymm7
vpand mask_keephigh(%rip), %ymm7, %ymm8
vpor %ymm8, %ymm3, %ymm3
vpaddw %ymm3, %ymm5, %ymm5
vmovdqa %xmm7, 2080(%r8)
vpshufb shuf48_16(%rip), %ymm4, %ymm4
vpand mask3_5_3_5(%rip), %ymm4, %ymm7
vpand mask5_3_5_3(%rip), %ymm4, %ymm4
vpermq $206, %ymm7, %ymm7
vpand mask_keephigh(%rip), %ymm7, %ymm8
vpor %ymm8, %ymm4, %ymm4
vpaddw %ymm4, %ymm6, %ymm6
vmovdqa %xmm7, 2336(%r8)
vpshufb shuf48_16(%rip), %ymm11, %ymm11
vpand mask3_5_3_5(%rip), %ymm11, %ymm7
vpand mask5_3_5_3(%rip), %ymm11, %ymm11
vpermq $206, %ymm7, %ymm7
vpand mask_keephigh(%rip), %ymm7, %ymm8
vpor %ymm8, %ymm11, %ymm11
vpaddw %ymm11, %ymm10, %ymm10
vmovdqa %xmm7, 2592(%r8)
vpand mask_mod8192(%rip), %ymm5, %ymm5
vmovdqu %ymm5, 88(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 440(%rdi)
vpand mask_mod8192(%rip), %ymm10, %ymm10
vmovdqu %ymm10, 792(%rdi)
vpand mask_mod8192(%rip), %ymm9, %ymm9
vmovdqu %ymm9, 1144(%rdi)
vmovdqa 64(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm4
vpunpckhwd const0(%rip), %ymm11, %ymm3
vpslld $1, %ymm4, %ymm4
vpslld $1, %ymm3, %ymm3
vmovdqa 320(%r8), %ymm9
vpunpcklwd const0(%rip), %ymm9, %ymm10
vpunpckhwd const0(%rip), %ymm9, %ymm9
vmovdqa 576(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm5
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm5, %ymm10, %ymm7
vpaddd %ymm6, %ymm9, %ymm8
vpsubd %ymm4, %ymm7, %ymm7
vpsubd %ymm3, %ymm8, %ymm8
vpsubd %ymm5, %ymm10, %ymm5
vpsubd %ymm6, %ymm9, %ymm6
vpsrld $1, %ymm5, %ymm5
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm5, %ymm5
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm5, %ymm6
vmovdqa 1600(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm9
vpunpckhwd const0(%rip), %ymm5, %ymm10
vpslld $1, %ymm9, %ymm9
vpslld $1, %ymm10, %ymm10
vpsubd %ymm9, %ymm7, %ymm7
vpsubd %ymm10, %ymm8, %ymm8
vpsrld $1, %ymm7, %ymm7
vpsrld $1, %ymm8, %ymm8
vpand mask32_to_16(%rip), %ymm7, %ymm7
vpand mask32_to_16(%rip), %ymm8, %ymm8
vpackusdw %ymm8, %ymm7, %ymm8
vmovdqa 832(%r8), %ymm7
vpaddw 1088(%r8), %ymm7, %ymm10
vpsubw 1088(%r8), %ymm7, %ymm7
vpsrlw $2, %ymm7, %ymm7
vpsubw %ymm6, %ymm7, %ymm7
vpmullw %ymm14, %ymm7, %ymm7
vpsllw $1, %ymm11, %ymm9
vpsubw %ymm9, %ymm10, %ymm9
vpsllw $7, %ymm5, %ymm10
vpsubw %ymm10, %ymm9, %ymm10
vpsrlw $3, %ymm10, %ymm10
vpsubw %ymm8, %ymm10, %ymm10
vmovdqa 1344(%r8), %ymm9
vpsubw %ymm11, %ymm9, %ymm9
vpmullw %ymm15, %ymm5, %ymm3
vpsubw %ymm3, %ymm9, %ymm3
vpmullw %ymm14, %ymm10, %ymm10
vpsubw %ymm10, %ymm8, %ymm8
vpmullw %ymm12, %ymm10, %ymm9
vpaddw %ymm9, %ymm8, %ymm9
vpmullw %ymm12, %ymm9, %ymm9
vpsubw %ymm9, %ymm3, %ymm9
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm6, %ymm9, %ymm9
vpsrlw $3, %ymm9, %ymm9
vpsubw %ymm7, %ymm9, %ymm9
vpsubw %ymm9, %ymm7, %ymm7
vpsubw %ymm7, %ymm6, %ymm6
vpmullw %ymm13, %ymm9, %ymm9
vpsubw %ymm9, %ymm6, %ymm6
vpshufb shuf48_16(%rip), %ymm10, %ymm10
vpand mask3_5_3_5(%rip), %ymm10, %ymm3
vpand mask5_3_5_3(%rip), %ymm10, %ymm10
vpermq $206, %ymm3, %ymm3
vpand mask_keephigh(%rip), %ymm3, %ymm4
vpor %ymm4, %ymm10, %ymm10
vpaddw %ymm10, %ymm11, %ymm11
vmovdqa %xmm3, 2112(%r8)
vpshufb shuf48_16(%rip), %ymm9, %ymm9
vpand mask3_5_3_5(%rip), %ymm9, %ymm3
vpand mask5_3_5_3(%rip), %ymm9, %ymm9
vpermq $206, %ymm3, %ymm3
vpand mask_keephigh(%rip), %ymm3, %ymm4
vpor %ymm4, %ymm9, %ymm9
vpaddw %ymm9, %ymm6, %ymm6
vmovdqa %xmm3, 2368(%r8)
vpshufb shuf48_16(%rip), %ymm5, %ymm5
vpand mask3_5_3_5(%rip), %ymm5, %ymm3
vpand mask5_3_5_3(%rip), %ymm5, %ymm5
vpermq $206, %ymm3, %ymm3
vpand mask_keephigh(%rip), %ymm3, %ymm4
vpor %ymm4, %ymm5, %ymm5
vpaddw %ymm5, %ymm8, %ymm8
vmovdqa %xmm3, 2624(%r8)
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 176(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 528(%rdi)
vpand mask_mod8192(%rip), %ymm8, %ymm8
vmovdqu %ymm8, 880(%rdi)
vpand mask_mod8192(%rip), %ymm7, %ymm7
vmovdqu %ymm7, 1232(%rdi)
vmovdqa 96(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm9
vpunpckhwd const0(%rip), %ymm5, %ymm10
vpslld $1, %ymm9, %ymm9
vpslld $1, %ymm10, %ymm10
vmovdqa 352(%r8), %ymm7
vpunpcklwd const0(%rip), %ymm7, %ymm8
vpunpckhwd const0(%rip), %ymm7, %ymm7
vmovdqa 608(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm11
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm11, %ymm8, %ymm3
vpaddd %ymm6, %ymm7, %ymm4
vpsubd %ymm9, %ymm3, %ymm3
vpsubd %ymm10, %ymm4, %ymm4
vpsubd %ymm11, %ymm8, %ymm11
vpsubd %ymm6, %ymm7, %ymm6
vpsrld $1, %ymm11, %ymm11
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm11, %ymm11
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm11, %ymm6
vmovdqa 1632(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm7
vpunpckhwd const0(%rip), %ymm11, %ymm8
vpslld $1, %ymm7, %ymm7
vpslld $1, %ymm8, %ymm8
vpsubd %ymm7, %ymm3, %ymm3
vpsubd %ymm8, %ymm4, %ymm4
vpsrld $1, %ymm3, %ymm3
vpsrld $1, %ymm4, %ymm4
vpand mask32_to_16(%rip), %ymm3, %ymm3
vpand mask32_to_16(%rip), %ymm4, %ymm4
vpackusdw %ymm4, %ymm3, %ymm4
vmovdqa 864(%r8), %ymm3
vpaddw 1120(%r8), %ymm3, %ymm8
vpsubw 1120(%r8), %ymm3, %ymm3
vpsrlw $2, %ymm3, %ymm3
vpsubw %ymm6, %ymm3, %ymm3
vpmullw %ymm14, %ymm3, %ymm3
vpsllw $1, %ymm5, %ymm7
vpsubw %ymm7, %ymm8, %ymm7
vpsllw $7, %ymm11, %ymm8
vpsubw %ymm8, %ymm7, %ymm8
vpsrlw $3, %ymm8, %ymm8
vpsubw %ymm4, %ymm8, %ymm8
vmovdqa 1376(%r8), %ymm7
vpsubw %ymm5, %ymm7, %ymm7
vpmullw %ymm15, %ymm11, %ymm10
vpsubw %ymm10, %ymm7, %ymm10
vpmullw %ymm14, %ymm8, %ymm8
vpsubw %ymm8, %ymm4, %ymm4
vpmullw %ymm12, %ymm8, %ymm7
vpaddw %ymm7, %ymm4, %ymm7
vpmullw %ymm12, %ymm7, %ymm7
vpsubw %ymm7, %ymm10, %ymm7
vpmullw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm7, %ymm7
vpsrlw $3, %ymm7, %ymm7
vpsubw %ymm3, %ymm7, %ymm7
vpsubw %ymm7, %ymm3, %ymm3
vpsubw %ymm3, %ymm6, %ymm6
vpmullw %ymm13, %ymm7, %ymm7
vpsubw %ymm7, %ymm6, %ymm6
vpshufb shuf48_16(%rip), %ymm8, %ymm8
vpand mask3_5_3_5(%rip), %ymm8, %ymm10
vpand mask5_3_5_3(%rip), %ymm8, %ymm8
vpermq $206, %ymm10, %ymm10
vpand mask_keephigh(%rip), %ymm10, %ymm9
vpor %ymm9, %ymm8, %ymm8
vpaddw %ymm8, %ymm5, %ymm5
vmovdqa %xmm10, 2144(%r8)
vpshufb shuf48_16(%rip), %ymm7, %ymm7
vpand mask3_5_3_5(%rip), %ymm7, %ymm10
vpand mask5_3_5_3(%rip), %ymm7, %ymm7
vpermq $206, %ymm10, %ymm10
vpand mask_keephigh(%rip), %ymm10, %ymm9
vpor %ymm9, %ymm7, %ymm7
vpaddw %ymm7, %ymm6, %ymm6
vmovdqa %xmm10, 2400(%r8)
vpshufb shuf48_16(%rip), %ymm11, %ymm11
vpand mask3_5_3_5(%rip), %ymm11, %ymm10
vpand mask5_3_5_3(%rip), %ymm11, %ymm11
vpermq $206, %ymm10, %ymm10
vpand mask_keephigh(%rip), %ymm10, %ymm9
vpor %ymm9, %ymm11, %ymm11
vpaddw %ymm11, %ymm4, %ymm4
vmovdqa %xmm10, 2656(%r8)
vpand mask_mod8192(%rip), %ymm5, %ymm5
vmovdqu %ymm5, 264(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 616(%rdi)
vpand mask_mod8192(%rip), %ymm4, %ymm4
vmovdqu %ymm4, 968(%rdi)
vpand mask_mod8192(%rip), %ymm3, %ymm3
vmovdqu %ymm3, 1320(%rdi)
vmovdqa 128(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm7
vpunpckhwd const0(%rip), %ymm11, %ymm8
vpslld $1, %ymm7, %ymm7
vpslld $1, %ymm8, %ymm8
vmovdqa 384(%r8), %ymm3
vpunpcklwd const0(%rip), %ymm3, %ymm4
vpunpckhwd const0(%rip), %ymm3, %ymm3
vmovdqa 640(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm5
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm5, %ymm4, %ymm10
vpaddd %ymm6, %ymm3, %ymm9
vpsubd %ymm7, %ymm10, %ymm10
vpsubd %ymm8, %ymm9, %ymm9
vpsubd %ymm5, %ymm4, %ymm5
vpsubd %ymm6, %ymm3, %ymm6
vpsrld $1, %ymm5, %ymm5
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm5, %ymm5
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm5, %ymm6
vmovdqa 1664(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm3
vpunpckhwd const0(%rip), %ymm5, %ymm4
vpslld $1, %ymm3, %ymm3
vpslld $1, %ymm4, %ymm4
vpsubd %ymm3, %ymm10, %ymm10
vpsubd %ymm4, %ymm9, %ymm9
vpsrld $1, %ymm10, %ymm10
vpsrld $1, %ymm9, %ymm9
vpand mask32_to_16(%rip), %ymm10, %ymm10
vpand mask32_to_16(%rip), %ymm9, %ymm9
vpackusdw %ymm9, %ymm10, %ymm9
vmovdqa 896(%r8), %ymm10
vpaddw 1152(%r8), %ymm10, %ymm4
vpsubw 1152(%r8), %ymm10, %ymm10
vpsrlw $2, %ymm10, %ymm10
vpsubw %ymm6, %ymm10, %ymm10
vpmullw %ymm14, %ymm10, %ymm10
vpsllw $1, %ymm11, %ymm3
vpsubw %ymm3, %ymm4, %ymm3
vpsllw $7, %ymm5, %ymm4
vpsubw %ymm4, %ymm3, %ymm4
vpsrlw $3, %ymm4, %ymm4
vpsubw %ymm9, %ymm4, %ymm4
vmovdqa 1408(%r8), %ymm3
vpsubw %ymm11, %ymm3, %ymm3
vpmullw %ymm15, %ymm5, %ymm8
vpsubw %ymm8, %ymm3, %ymm8
vpmullw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm9, %ymm9
vpmullw %ymm12, %ymm4, %ymm3
vpaddw %ymm3, %ymm9, %ymm3
vpmullw %ymm12, %ymm3, %ymm3
vpsubw %ymm3, %ymm8, %ymm3
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm6, %ymm3, %ymm3
vpsrlw $3, %ymm3, %ymm3
vpsubw %ymm10, %ymm3, %ymm3
vpsubw %ymm3, %ymm10, %ymm10
vpsubw %ymm10, %ymm6, %ymm6
vpmullw %ymm13, %ymm3, %ymm3
vpsubw %ymm3, %ymm6, %ymm6
vmovdqu 352(%rdi), %ymm8
vmovdqu 704(%rdi), %ymm7
vmovdqu 1056(%rdi), %ymm2
vpaddw %ymm11, %ymm8, %ymm11
vpaddw %ymm6, %ymm7, %ymm6
vpaddw %ymm9, %ymm2, %ymm9
vpshufb shuf48_16(%rip), %ymm10, %ymm10
vpand mask3_5_3_5(%rip), %ymm10, %ymm2
vpand mask5_3_5_3(%rip), %ymm10, %ymm10
vpermq $206, %ymm2, %ymm2
vpand mask_keephigh(%rip), %ymm2, %ymm7
vpor %ymm7, %ymm10, %ymm10
vmovdqu 0(%rdi), %ymm7
vpaddw %ymm10, %ymm7, %ymm7
vpand mask_mod8192(%rip), %ymm7, %ymm7
vmovdqu %ymm7, 0(%rdi)
vmovdqa %xmm2, 1920(%r8)
vpshufb shuf48_16(%rip), %ymm4, %ymm4
vpand mask3_5_3_5(%rip), %ymm4, %ymm2
vpand mask5_3_5_3(%rip), %ymm4, %ymm4
vpermq $206, %ymm2, %ymm2
vpand mask_keephigh(%rip), %ymm2, %ymm7
vpor %ymm7, %ymm4, %ymm4
vpaddw %ymm4, %ymm11, %ymm11
vmovdqa %xmm2, 2176(%r8)
vpshufb shuf48_16(%rip), %ymm3, %ymm3
vpand mask3_5_3_5(%rip), %ymm3, %ymm2
vpand mask5_3_5_3(%rip), %ymm3, %ymm3
vpermq $206, %ymm2, %ymm2
vpand mask_keephigh(%rip), %ymm2, %ymm7
vpor %ymm7, %ymm3, %ymm3
vpaddw %ymm3, %ymm6, %ymm6
vmovdqa %xmm2, 2432(%r8)
vpshufb shuf48_16(%rip), %ymm5, %ymm5
vpand mask3_5_3_5(%rip), %ymm5, %ymm2
vpand mask5_3_5_3(%rip), %ymm5, %ymm5
vpermq $206, %ymm2, %ymm2
vpand mask_keephigh(%rip), %ymm2, %ymm7
vpor %ymm7, %ymm5, %ymm5
vpaddw %ymm5, %ymm9, %ymm9
vmovdqa %xmm2, 2688(%r8)
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 352(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 704(%rdi)
vpand mask_mod8192(%rip), %ymm9, %ymm9
vmovdqu %ymm9, 1056(%rdi)
vmovdqa 160(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm3
vpunpckhwd const0(%rip), %ymm5, %ymm4
vpslld $1, %ymm3, %ymm3
vpslld $1, %ymm4, %ymm4
vmovdqa 416(%r8), %ymm10
vpunpcklwd const0(%rip), %ymm10, %ymm9
vpunpckhwd const0(%rip), %ymm10, %ymm10
vmovdqa 672(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm11
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm11, %ymm9, %ymm2
vpaddd %ymm6, %ymm10, %ymm7
vpsubd %ymm3, %ymm2, %ymm2
vpsubd %ymm4, %ymm7, %ymm7
vpsubd %ymm11, %ymm9, %ymm11
vpsubd %ymm6, %ymm10, %ymm6
vpsrld $1, %ymm11, %ymm11
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm11, %ymm11
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm11, %ymm6
vmovdqa 1696(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm10
vpunpckhwd const0(%rip), %ymm11, %ymm9
vpslld $1, %ymm10, %ymm10
vpslld $1, %ymm9, %ymm9
vpsubd %ymm10, %ymm2, %ymm2
vpsubd %ymm9, %ymm7, %ymm7
vpsrld $1, %ymm2, %ymm2
vpsrld $1, %ymm7, %ymm7
vpand mask32_to_16(%rip), %ymm2, %ymm2
vpand mask32_to_16(%rip), %ymm7, %ymm7
vpackusdw %ymm7, %ymm2, %ymm7
vmovdqa 928(%r8), %ymm2
vpaddw 1184(%r8), %ymm2, %ymm9
vpsubw 1184(%r8), %ymm2, %ymm2
vpsrlw $2, %ymm2, %ymm2
vpsubw %ymm6, %ymm2, %ymm2
vpmullw %ymm14, %ymm2, %ymm2
vpsllw $1, %ymm5, %ymm10
vpsubw %ymm10, %ymm9, %ymm10
vpsllw $7, %ymm11, %ymm9
vpsubw %ymm9, %ymm10, %ymm9
vpsrlw $3, %ymm9, %ymm9
vpsubw %ymm7, %ymm9, %ymm9
vmovdqa 1440(%r8), %ymm10
vpsubw %ymm5, %ymm10, %ymm10
vpmullw %ymm15, %ymm11, %ymm4
vpsubw %ymm4, %ymm10, %ymm4
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm9, %ymm7, %ymm7
vpmullw %ymm12, %ymm9, %ymm10
vpaddw %ymm10, %ymm7, %ymm10
vpmullw %ymm12, %ymm10, %ymm10
vpsubw %ymm10, %ymm4, %ymm10
vpmullw %ymm14, %ymm10, %ymm10
vpsubw %ymm6, %ymm10, %ymm10
vpsrlw $3, %ymm10, %ymm10
vpsubw %ymm2, %ymm10, %ymm10
vpsubw %ymm10, %ymm2, %ymm2
vpsubw %ymm2, %ymm6, %ymm6
vpmullw %ymm13, %ymm10, %ymm10
vpsubw %ymm10, %ymm6, %ymm6
vmovdqu 440(%rdi), %ymm4
vmovdqu 792(%rdi), %ymm3
vmovdqu 1144(%rdi), %ymm8
vpaddw %ymm5, %ymm4, %ymm5
vpaddw %ymm6, %ymm3, %ymm6
vpaddw %ymm7, %ymm8, %ymm7
vpshufb shuf48_16(%rip), %ymm2, %ymm2
vpand mask3_5_3_5(%rip), %ymm2, %ymm8
vpand mask5_3_5_3(%rip), %ymm2, %ymm2
vpermq $206, %ymm8, %ymm8
vpand mask_keephigh(%rip), %ymm8, %ymm3
vpor %ymm3, %ymm2, %ymm2
vmovdqu 88(%rdi), %ymm3
vpaddw %ymm2, %ymm3, %ymm3
vpand mask_mod8192(%rip), %ymm3, %ymm3
vmovdqu %ymm3, 88(%rdi)
vmovdqa %xmm8, 1952(%r8)
vpshufb shuf48_16(%rip), %ymm9, %ymm9
vpand mask3_5_3_5(%rip), %ymm9, %ymm8
vpand mask5_3_5_3(%rip), %ymm9, %ymm9
vpermq $206, %ymm8, %ymm8
vpand mask_keephigh(%rip), %ymm8, %ymm3
vpor %ymm3, %ymm9, %ymm9
vpaddw %ymm9, %ymm5, %ymm5
vmovdqa %xmm8, 2208(%r8)
vpshufb shuf48_16(%rip), %ymm10, %ymm10
vpand mask3_5_3_5(%rip), %ymm10, %ymm8
vpand mask5_3_5_3(%rip), %ymm10, %ymm10
vpermq $206, %ymm8, %ymm8
vpand mask_keephigh(%rip), %ymm8, %ymm3
vpor %ymm3, %ymm10, %ymm10
vpaddw %ymm10, %ymm6, %ymm6
vmovdqa %xmm8, 2464(%r8)
vpshufb shuf48_16(%rip), %ymm11, %ymm11
vpand mask3_5_3_5(%rip), %ymm11, %ymm8
vpand mask5_3_5_3(%rip), %ymm11, %ymm11
vpermq $206, %ymm8, %ymm8
vpand mask_keephigh(%rip), %ymm8, %ymm3
vpor %ymm3, %ymm11, %ymm11
vpaddw %ymm11, %ymm7, %ymm7
vmovdqa %xmm8, 2720(%r8)
vpand mask_mod8192(%rip), %ymm5, %ymm5
vmovdqu %ymm5, 440(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 792(%rdi)
vpand mask_mod8192(%rip), %ymm7, %ymm7
vmovdqu %ymm7, 1144(%rdi)
vmovdqa 192(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm10
vpunpckhwd const0(%rip), %ymm11, %ymm9
vpslld $1, %ymm10, %ymm10
vpslld $1, %ymm9, %ymm9
vmovdqa 448(%r8), %ymm2
vpunpcklwd const0(%rip), %ymm2, %ymm7
vpunpckhwd const0(%rip), %ymm2, %ymm2
vmovdqa 704(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm5
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm5, %ymm7, %ymm8
vpaddd %ymm6, %ymm2, %ymm3
vpsubd %ymm10, %ymm8, %ymm8
vpsubd %ymm9, %ymm3, %ymm3
vpsubd %ymm5, %ymm7, %ymm5
vpsubd %ymm6, %ymm2, %ymm6
vpsrld $1, %ymm5, %ymm5
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm5, %ymm5
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm5, %ymm6
vmovdqa 1728(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm2
vpunpckhwd const0(%rip), %ymm5, %ymm7
vpslld $1, %ymm2, %ymm2
vpslld $1, %ymm7, %ymm7
vpsubd %ymm2, %ymm8, %ymm8
vpsubd %ymm7, %ymm3, %ymm3
vpsrld $1, %ymm8, %ymm8
vpsrld $1, %ymm3, %ymm3
vpand mask32_to_16(%rip), %ymm8, %ymm8
vpand mask32_to_16(%rip), %ymm3, %ymm3
vpackusdw %ymm3, %ymm8, %ymm3
vmovdqa 960(%r8), %ymm8
vpaddw 1216(%r8), %ymm8, %ymm7
vpsubw 1216(%r8), %ymm8, %ymm8
vpsrlw $2, %ymm8, %ymm8
vpsubw %ymm6, %ymm8, %ymm8
vpmullw %ymm14, %ymm8, %ymm8
vpsllw $1, %ymm11, %ymm2
vpsubw %ymm2, %ymm7, %ymm2
vpsllw $7, %ymm5, %ymm7
vpsubw %ymm7, %ymm2, %ymm7
vpsrlw $3, %ymm7, %ymm7
vpsubw %ymm3, %ymm7, %ymm7
vmovdqa 1472(%r8), %ymm2
vpsubw %ymm11, %ymm2, %ymm2
vpmullw %ymm15, %ymm5, %ymm9
vpsubw %ymm9, %ymm2, %ymm9
vpmullw %ymm14, %ymm7, %ymm7
vpsubw %ymm7, %ymm3, %ymm3
vpmullw %ymm12, %ymm7, %ymm2
vpaddw %ymm2, %ymm3, %ymm2
vpmullw %ymm12, %ymm2, %ymm2
vpsubw %ymm2, %ymm9, %ymm2
vpmullw %ymm14, %ymm2, %ymm2
vpsubw %ymm6, %ymm2, %ymm2
vpsrlw $3, %ymm2, %ymm2
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm2, %ymm8, %ymm8
vpsubw %ymm8, %ymm6, %ymm6
vpmullw %ymm13, %ymm2, %ymm2
vpsubw %ymm2, %ymm6, %ymm6
vmovdqu 528(%rdi), %ymm9
vmovdqu 880(%rdi), %ymm10
vmovdqu 1232(%rdi), %ymm4
vpaddw %ymm11, %ymm9, %ymm11
vpaddw %ymm6, %ymm10, %ymm6
vpaddw %ymm3, %ymm4, %ymm3
vpshufb shuf48_16(%rip), %ymm8, %ymm8
vpand mask3_5_3_5(%rip), %ymm8, %ymm4
vpand mask5_3_5_3(%rip), %ymm8, %ymm8
vpermq $206, %ymm4, %ymm4
vpand mask_keephigh(%rip), %ymm4, %ymm10
vpor %ymm10, %ymm8, %ymm8
vmovdqu 176(%rdi), %ymm10
vpaddw %ymm8, %ymm10, %ymm10
vpand mask_mod8192(%rip), %ymm10, %ymm10
vmovdqu %ymm10, 176(%rdi)
vmovdqa %xmm4, 1984(%r8)
vpshufb shuf48_16(%rip), %ymm7, %ymm7
vpand mask3_5_3_5(%rip), %ymm7, %ymm4
vpand mask5_3_5_3(%rip), %ymm7, %ymm7
vpermq $206, %ymm4, %ymm4
vpand mask_keephigh(%rip), %ymm4, %ymm10
vpor %ymm10, %ymm7, %ymm7
vpaddw %ymm7, %ymm11, %ymm11
vmovdqa %xmm4, 2240(%r8)
vpshufb shuf48_16(%rip), %ymm2, %ymm2
vpand mask3_5_3_5(%rip), %ymm2, %ymm4
vpand mask5_3_5_3(%rip), %ymm2, %ymm2
vpermq $206, %ymm4, %ymm4
vpand mask_keephigh(%rip), %ymm4, %ymm10
vpor %ymm10, %ymm2, %ymm2
vpaddw %ymm2, %ymm6, %ymm6
vmovdqa %xmm4, 2496(%r8)
vpshufb shuf48_16(%rip), %ymm5, %ymm5
vpand mask3_5_3_5(%rip), %ymm5, %ymm4
vpand mask5_3_5_3(%rip), %ymm5, %ymm5
vpermq $206, %ymm4, %ymm4
vpand mask_keephigh(%rip), %ymm4, %ymm10
vpor %ymm10, %ymm5, %ymm5
vpaddw %ymm5, %ymm3, %ymm3
vmovdqa %xmm4, 2752(%r8)
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 528(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 880(%rdi)
vpand mask_mod8192(%rip), %ymm3, %ymm3
vmovdqu %ymm3, 1232(%rdi)
vmovdqa 224(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm2
vpunpckhwd const0(%rip), %ymm5, %ymm7
vpslld $1, %ymm2, %ymm2
vpslld $1, %ymm7, %ymm7
vmovdqa 480(%r8), %ymm8
vpunpcklwd const0(%rip), %ymm8, %ymm3
vpunpckhwd const0(%rip), %ymm8, %ymm8
vmovdqa 736(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm11
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm11, %ymm3, %ymm4
vpaddd %ymm6, %ymm8, %ymm10
vpsubd %ymm2, %ymm4, %ymm4
vpsubd %ymm7, %ymm10, %ymm10
vpsubd %ymm11, %ymm3, %ymm11
vpsubd %ymm6, %ymm8, %ymm6
vpsrld $1, %ymm11, %ymm11
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm11, %ymm11
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm11, %ymm6
vmovdqa 1760(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm8
vpunpckhwd const0(%rip), %ymm11, %ymm3
vpslld $1, %ymm8, %ymm8
vpslld $1, %ymm3, %ymm3
vpsubd %ymm8, %ymm4, %ymm4
vpsubd %ymm3, %ymm10, %ymm10
vpsrld $1, %ymm4, %ymm4
vpsrld $1, %ymm10, %ymm10
vpand mask32_to_16(%rip), %ymm4, %ymm4
vpand mask32_to_16(%rip), %ymm10, %ymm10
vpackusdw %ymm10, %ymm4, %ymm10
vmovdqa 992(%r8), %ymm4
vpaddw 1248(%r8), %ymm4, %ymm3
vpsubw 1248(%r8), %ymm4, %ymm4
vpsrlw $2, %ymm4, %ymm4
vpsubw %ymm6, %ymm4, %ymm4
vpmullw %ymm14, %ymm4, %ymm4
vpsllw $1, %ymm5, %ymm8
vpsubw %ymm8, %ymm3, %ymm8
vpsllw $7, %ymm11, %ymm3
vpsubw %ymm3, %ymm8, %ymm3
vpsrlw $3, %ymm3, %ymm3
vpsubw %ymm10, %ymm3, %ymm3
vmovdqa 1504(%r8), %ymm8
vpsubw %ymm5, %ymm8, %ymm8
vpmullw %ymm15, %ymm11, %ymm7
vpsubw %ymm7, %ymm8, %ymm7
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm3, %ymm10, %ymm10
vpmullw %ymm12, %ymm3, %ymm8
vpaddw %ymm8, %ymm10, %ymm8
vpmullw %ymm12, %ymm8, %ymm8
vpsubw %ymm8, %ymm7, %ymm8
vpmullw %ymm14, %ymm8, %ymm8
vpsubw %ymm6, %ymm8, %ymm8
vpsrlw $3, %ymm8, %ymm8
vpsubw %ymm4, %ymm8, %ymm8
vpsubw %ymm8, %ymm4, %ymm4
vpsubw %ymm4, %ymm6, %ymm6
vpmullw %ymm13, %ymm8, %ymm8
vpsubw %ymm8, %ymm6, %ymm6
vmovdqu 616(%rdi), %ymm7
vmovdqu 968(%rdi), %ymm2
vmovdqu 1320(%rdi), %ymm9
vpaddw %ymm5, %ymm7, %ymm5
vpaddw %ymm6, %ymm2, %ymm6
vpaddw %ymm10, %ymm9, %ymm10
vpshufb shuf48_16(%rip), %ymm4, %ymm4
vpand mask3_5_3_5(%rip), %ymm4, %ymm9
vpand mask5_3_5_3(%rip), %ymm4, %ymm4
vpermq $206, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm2
vpor %ymm2, %ymm4, %ymm4
vmovdqu 264(%rdi), %ymm2
vpaddw %ymm4, %ymm2, %ymm2
vpand mask_mod8192(%rip), %ymm2, %ymm2
vmovdqu %ymm2, 264(%rdi)
vmovdqa %xmm9, 2016(%r8)
vpshufb shuf48_16(%rip), %ymm3, %ymm3
vpand mask3_5_3_5(%rip), %ymm3, %ymm9
vpand mask5_3_5_3(%rip), %ymm3, %ymm3
vpermq $206, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm2
vpor %ymm2, %ymm3, %ymm3
vpaddw %ymm3, %ymm5, %ymm5
vmovdqa %xmm9, 2272(%r8)
vpshufb shuf48_16(%rip), %ymm8, %ymm8
vpand mask3_5_3_5(%rip), %ymm8, %ymm9
vpand mask5_3_5_3(%rip), %ymm8, %ymm8
vpermq $206, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm2
vpor %ymm2, %ymm8, %ymm8
vpaddw %ymm8, %ymm6, %ymm6
vmovdqa %xmm9, 2528(%r8)
vpshufb shuf48_16(%rip), %ymm11, %ymm11
vpand mask3_5_3_5(%rip), %ymm11, %ymm9
vpand mask5_3_5_3(%rip), %ymm11, %ymm11
vpermq $206, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm2
vpor %ymm2, %ymm11, %ymm11
vpaddw %ymm11, %ymm10, %ymm10
vmovdqa %xmm9, 2784(%r8)
vpand mask_mod8192(%rip), %ymm5, %ymm5
vmovdqu %ymm5, 616(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 968(%rdi)
vpand mask_mod8192(%rip), %ymm10, %ymm10
vmovdqu %ymm10, 1320(%rdi)
vmovdqa 128(%r12), %ymm0
vpsubw 224(%r12), %ymm0, %ymm0
vmovdqa 512(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 320(%r12), %ymm1, %ymm1
vpsubw 32(%r12), %ymm0, %ymm0
vpaddw 416(%r12), %ymm0, %ymm0
vmovdqa 704(%r12), %ymm2
vpsubw 800(%r12), %ymm2, %ymm2
vmovdqa 1088(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 896(%r12), %ymm3, %ymm3
vpsubw 608(%r12), %ymm2, %ymm2
vpaddw 992(%r12), %ymm2, %ymm2
vmovdqa 1280(%r12), %ymm4
vpsubw 1376(%r12), %ymm4, %ymm4
vmovdqa 1664(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 1472(%r12), %ymm5, %ymm5
vpsubw 1184(%r12), %ymm4, %ymm4
vpaddw 1568(%r12), %ymm4, %ymm4
vpsubw 608(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 32(%r12), %ymm1, %ymm1
vpaddw 1184(%r12), %ymm1, %ymm1
vmovdqa 320(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 1472(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 896(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 32(%r12), %ymm8
vmovdqa 896(%r12), %ymm9
vmovdqa %ymm8, 0(%r8)
vmovdqa %ymm0, 32(%r8)
vmovdqa %ymm1, 64(%r8)
vmovdqa %ymm7, 96(%r8)
vmovdqa %ymm5, 128(%r8)
vmovdqa %ymm2, 160(%r8)
vmovdqa %ymm3, 192(%r8)
vmovdqa %ymm9, 224(%r8)
vmovdqa 1856(%r12), %ymm0
vpsubw 1952(%r12), %ymm0, %ymm0
vmovdqa 2240(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 2048(%r12), %ymm1, %ymm1
vpsubw 1760(%r12), %ymm0, %ymm0
vpaddw 2144(%r12), %ymm0, %ymm0
vmovdqa 2432(%r12), %ymm2
vpsubw 2528(%r12), %ymm2, %ymm2
vmovdqa 2816(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 2624(%r12), %ymm3, %ymm3
vpsubw 2336(%r12), %ymm2, %ymm2
vpaddw 2720(%r12), %ymm2, %ymm2
vmovdqa 3008(%r12), %ymm4
vpsubw 3104(%r12), %ymm4, %ymm4
vmovdqa 3392(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 3200(%r12), %ymm5, %ymm5
vpsubw 2912(%r12), %ymm4, %ymm4
vpaddw 3296(%r12), %ymm4, %ymm4
vpsubw 2336(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 1760(%r12), %ymm1, %ymm1
vpaddw 2912(%r12), %ymm1, %ymm1
vmovdqa 2048(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 3200(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 2624(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 1760(%r12), %ymm8
vmovdqa 2624(%r12), %ymm9
vmovdqa %ymm8, 256(%r8)
vmovdqa %ymm0, 288(%r8)
vmovdqa %ymm1, 320(%r8)
vmovdqa %ymm7, 352(%r8)
vmovdqa %ymm5, 384(%r8)
vmovdqa %ymm2, 416(%r8)
vmovdqa %ymm3, 448(%r8)
vmovdqa %ymm9, 480(%r8)
vmovdqa 3584(%r12), %ymm0
vpsubw 3680(%r12), %ymm0, %ymm0
vmovdqa 3968(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 3776(%r12), %ymm1, %ymm1
vpsubw 3488(%r12), %ymm0, %ymm0
vpaddw 3872(%r12), %ymm0, %ymm0
vmovdqa 4160(%r12), %ymm2
vpsubw 4256(%r12), %ymm2, %ymm2
vmovdqa 4544(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 4352(%r12), %ymm3, %ymm3
vpsubw 4064(%r12), %ymm2, %ymm2
vpaddw 4448(%r12), %ymm2, %ymm2
vmovdqa 4736(%r12), %ymm4
vpsubw 4832(%r12), %ymm4, %ymm4
vmovdqa 5120(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 4928(%r12), %ymm5, %ymm5
vpsubw 4640(%r12), %ymm4, %ymm4
vpaddw 5024(%r12), %ymm4, %ymm4
vpsubw 4064(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 3488(%r12), %ymm1, %ymm1
vpaddw 4640(%r12), %ymm1, %ymm1
vmovdqa 3776(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 4928(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 4352(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 3488(%r12), %ymm8
vmovdqa 4352(%r12), %ymm9
vmovdqa %ymm8, 512(%r8)
vmovdqa %ymm0, 544(%r8)
vmovdqa %ymm1, 576(%r8)
vmovdqa %ymm7, 608(%r8)
vmovdqa %ymm5, 640(%r8)
vmovdqa %ymm2, 672(%r8)
vmovdqa %ymm3, 704(%r8)
vmovdqa %ymm9, 736(%r8)
vmovdqa 5312(%r12), %ymm0
vpsubw 5408(%r12), %ymm0, %ymm0
vmovdqa 5696(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 5504(%r12), %ymm1, %ymm1
vpsubw 5216(%r12), %ymm0, %ymm0
vpaddw 5600(%r12), %ymm0, %ymm0
vmovdqa 5888(%r12), %ymm2
vpsubw 5984(%r12), %ymm2, %ymm2
vmovdqa 6272(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 6080(%r12), %ymm3, %ymm3
vpsubw 5792(%r12), %ymm2, %ymm2
vpaddw 6176(%r12), %ymm2, %ymm2
vmovdqa 6464(%r12), %ymm4
vpsubw 6560(%r12), %ymm4, %ymm4
vmovdqa 6848(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 6656(%r12), %ymm5, %ymm5
vpsubw 6368(%r12), %ymm4, %ymm4
vpaddw 6752(%r12), %ymm4, %ymm4
vpsubw 5792(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 5216(%r12), %ymm1, %ymm1
vpaddw 6368(%r12), %ymm1, %ymm1
vmovdqa 5504(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 6656(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 6080(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 5216(%r12), %ymm8
vmovdqa 6080(%r12), %ymm9
vmovdqa %ymm8, 768(%r8)
vmovdqa %ymm0, 800(%r8)
vmovdqa %ymm1, 832(%r8)
vmovdqa %ymm7, 864(%r8)
vmovdqa %ymm5, 896(%r8)
vmovdqa %ymm2, 928(%r8)
vmovdqa %ymm3, 960(%r8)
vmovdqa %ymm9, 992(%r8)
vmovdqa 7040(%r12), %ymm0
vpsubw 7136(%r12), %ymm0, %ymm0
vmovdqa 7424(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 7232(%r12), %ymm1, %ymm1
vpsubw 6944(%r12), %ymm0, %ymm0
vpaddw 7328(%r12), %ymm0, %ymm0
vmovdqa 7616(%r12), %ymm2
vpsubw 7712(%r12), %ymm2, %ymm2
vmovdqa 8000(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 7808(%r12), %ymm3, %ymm3
vpsubw 7520(%r12), %ymm2, %ymm2
vpaddw 7904(%r12), %ymm2, %ymm2
vmovdqa 8192(%r12), %ymm4
vpsubw 8288(%r12), %ymm4, %ymm4
vmovdqa 8576(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 8384(%r12), %ymm5, %ymm5
vpsubw 8096(%r12), %ymm4, %ymm4
vpaddw 8480(%r12), %ymm4, %ymm4
vpsubw 7520(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 6944(%r12), %ymm1, %ymm1
vpaddw 8096(%r12), %ymm1, %ymm1
vmovdqa 7232(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 8384(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 7808(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 6944(%r12), %ymm8
vmovdqa 7808(%r12), %ymm9
vmovdqa %ymm8, 1024(%r8)
vmovdqa %ymm0, 1056(%r8)
vmovdqa %ymm1, 1088(%r8)
vmovdqa %ymm7, 1120(%r8)
vmovdqa %ymm5, 1152(%r8)
vmovdqa %ymm2, 1184(%r8)
vmovdqa %ymm3, 1216(%r8)
vmovdqa %ymm9, 1248(%r8)
vmovdqa 8768(%r12), %ymm0
vpsubw 8864(%r12), %ymm0, %ymm0
vmovdqa 9152(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 8960(%r12), %ymm1, %ymm1
vpsubw 8672(%r12), %ymm0, %ymm0
vpaddw 9056(%r12), %ymm0, %ymm0
vmovdqa 9344(%r12), %ymm2
vpsubw 9440(%r12), %ymm2, %ymm2
vmovdqa 9728(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 9536(%r12), %ymm3, %ymm3
vpsubw 9248(%r12), %ymm2, %ymm2
vpaddw 9632(%r12), %ymm2, %ymm2
vmovdqa 9920(%r12), %ymm4
vpsubw 10016(%r12), %ymm4, %ymm4
vmovdqa 10304(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 10112(%r12), %ymm5, %ymm5
vpsubw 9824(%r12), %ymm4, %ymm4
vpaddw 10208(%r12), %ymm4, %ymm4
vpsubw 9248(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 8672(%r12), %ymm1, %ymm1
vpaddw 9824(%r12), %ymm1, %ymm1
vmovdqa 8960(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 10112(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 9536(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 8672(%r12), %ymm8
vmovdqa 9536(%r12), %ymm9
vmovdqa %ymm8, 1280(%r8)
vmovdqa %ymm0, 1312(%r8)
vmovdqa %ymm1, 1344(%r8)
vmovdqa %ymm7, 1376(%r8)
vmovdqa %ymm5, 1408(%r8)
vmovdqa %ymm2, 1440(%r8)
vmovdqa %ymm3, 1472(%r8)
vmovdqa %ymm9, 1504(%r8)
vmovdqa 10496(%r12), %ymm0
vpsubw 10592(%r12), %ymm0, %ymm0
vmovdqa 10880(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 10688(%r12), %ymm1, %ymm1
vpsubw 10400(%r12), %ymm0, %ymm0
vpaddw 10784(%r12), %ymm0, %ymm0
vmovdqa 11072(%r12), %ymm2
vpsubw 11168(%r12), %ymm2, %ymm2
vmovdqa 11456(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 11264(%r12), %ymm3, %ymm3
vpsubw 10976(%r12), %ymm2, %ymm2
vpaddw 11360(%r12), %ymm2, %ymm2
vmovdqa 11648(%r12), %ymm4
vpsubw 11744(%r12), %ymm4, %ymm4
vmovdqa 12032(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 11840(%r12), %ymm5, %ymm5
vpsubw 11552(%r12), %ymm4, %ymm4
vpaddw 11936(%r12), %ymm4, %ymm4
vpsubw 10976(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 10400(%r12), %ymm1, %ymm1
vpaddw 11552(%r12), %ymm1, %ymm1
vmovdqa 10688(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 11840(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 11264(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 10400(%r12), %ymm8
vmovdqa 11264(%r12), %ymm9
vmovdqa %ymm8, 1536(%r8)
vmovdqa %ymm0, 1568(%r8)
vmovdqa %ymm1, 1600(%r8)
vmovdqa %ymm7, 1632(%r8)
vmovdqa %ymm5, 1664(%r8)
vmovdqa %ymm2, 1696(%r8)
vmovdqa %ymm3, 1728(%r8)
vmovdqa %ymm9, 1760(%r8)
vmovdqa 0(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm8
vpunpckhwd const0(%rip), %ymm11, %ymm3
vpslld $1, %ymm8, %ymm8
vpslld $1, %ymm3, %ymm3
vmovdqa 256(%r8), %ymm4
vpunpcklwd const0(%rip), %ymm4, %ymm10
vpunpckhwd const0(%rip), %ymm4, %ymm4
vmovdqa 512(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm5
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm5, %ymm10, %ymm9
vpaddd %ymm6, %ymm4, %ymm2
vpsubd %ymm8, %ymm9, %ymm9
vpsubd %ymm3, %ymm2, %ymm2
vpsubd %ymm5, %ymm10, %ymm5
vpsubd %ymm6, %ymm4, %ymm6
vpsrld $1, %ymm5, %ymm5
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm5, %ymm5
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm5, %ymm6
vmovdqa 1536(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm4
vpunpckhwd const0(%rip), %ymm5, %ymm10
vpslld $1, %ymm4, %ymm4
vpslld $1, %ymm10, %ymm10
vpsubd %ymm4, %ymm9, %ymm9
vpsubd %ymm10, %ymm2, %ymm2
vpsrld $1, %ymm9, %ymm9
vpsrld $1, %ymm2, %ymm2
vpand mask32_to_16(%rip), %ymm9, %ymm9
vpand mask32_to_16(%rip), %ymm2, %ymm2
vpackusdw %ymm2, %ymm9, %ymm2
vmovdqa 768(%r8), %ymm9
vpaddw 1024(%r8), %ymm9, %ymm10
vpsubw 1024(%r8), %ymm9, %ymm9
vpsrlw $2, %ymm9, %ymm9
vpsubw %ymm6, %ymm9, %ymm9
vpmullw %ymm14, %ymm9, %ymm9
vpsllw $1, %ymm11, %ymm4
vpsubw %ymm4, %ymm10, %ymm4
vpsllw $7, %ymm5, %ymm10
vpsubw %ymm10, %ymm4, %ymm10
vpsrlw $3, %ymm10, %ymm10
vpsubw %ymm2, %ymm10, %ymm10
vmovdqa 1280(%r8), %ymm4
vpsubw %ymm11, %ymm4, %ymm4
vpmullw %ymm15, %ymm5, %ymm3
vpsubw %ymm3, %ymm4, %ymm3
vpmullw %ymm14, %ymm10, %ymm10
vpsubw %ymm10, %ymm2, %ymm2
vpmullw %ymm12, %ymm10, %ymm4
vpaddw %ymm4, %ymm2, %ymm4
vpmullw %ymm12, %ymm4, %ymm4
vpsubw %ymm4, %ymm3, %ymm4
vpmullw %ymm14, %ymm4, %ymm4
vpsubw %ymm6, %ymm4, %ymm4
vpsrlw $3, %ymm4, %ymm4
vpsubw %ymm9, %ymm4, %ymm4
vpsubw %ymm4, %ymm9, %ymm9
vpsubw %ymm9, %ymm6, %ymm6
vpmullw %ymm13, %ymm4, %ymm4
vpsubw %ymm4, %ymm6, %ymm6
vpshufb shuf48_16(%rip), %ymm10, %ymm10
vpand mask3_5_3_5(%rip), %ymm10, %ymm3
vpand mask5_3_5_3(%rip), %ymm10, %ymm10
vpermq $206, %ymm3, %ymm3
vpand mask_keephigh(%rip), %ymm3, %ymm8
vpor %ymm8, %ymm10, %ymm10
vpaddw 2048(%r8), %ymm11, %ymm11
vpaddw %ymm10, %ymm11, %ymm11
vmovdqa %xmm3, 2048(%r8)
vpshufb shuf48_16(%rip), %ymm4, %ymm4
vpand mask3_5_3_5(%rip), %ymm4, %ymm3
vpand mask5_3_5_3(%rip), %ymm4, %ymm4
vpermq $206, %ymm3, %ymm3
vpand mask_keephigh(%rip), %ymm3, %ymm8
vpor %ymm8, %ymm4, %ymm4
vpaddw 2304(%r8), %ymm6, %ymm6
vpaddw %ymm4, %ymm6, %ymm6
vmovdqa %xmm3, 2304(%r8)
vpshufb shuf48_16(%rip), %ymm5, %ymm5
vpand mask3_5_3_5(%rip), %ymm5, %ymm3
vpand mask5_3_5_3(%rip), %ymm5, %ymm5
vpermq $206, %ymm3, %ymm3
vpand mask_keephigh(%rip), %ymm3, %ymm8
vpor %ymm8, %ymm5, %ymm5
vpaddw 2560(%r8), %ymm2, %ymm2
vpaddw %ymm5, %ymm2, %ymm2
vmovdqa %xmm3, 2560(%r8)
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 32(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 384(%rdi)
vpand mask_mod8192(%rip), %ymm2, %ymm2
vmovdqu %ymm2, 736(%rdi)
vpand mask_mod8192(%rip), %ymm9, %ymm9
vmovdqu %ymm9, 1088(%rdi)
vmovdqa 32(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm4
vpunpckhwd const0(%rip), %ymm5, %ymm10
vpslld $1, %ymm4, %ymm4
vpslld $1, %ymm10, %ymm10
vmovdqa 288(%r8), %ymm9
vpunpcklwd const0(%rip), %ymm9, %ymm2
vpunpckhwd const0(%rip), %ymm9, %ymm9
vmovdqa 544(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm11
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm11, %ymm2, %ymm3
vpaddd %ymm6, %ymm9, %ymm8
vpsubd %ymm4, %ymm3, %ymm3
vpsubd %ymm10, %ymm8, %ymm8
vpsubd %ymm11, %ymm2, %ymm11
vpsubd %ymm6, %ymm9, %ymm6
vpsrld $1, %ymm11, %ymm11
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm11, %ymm11
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm11, %ymm6
vmovdqa 1568(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm9
vpunpckhwd const0(%rip), %ymm11, %ymm2
vpslld $1, %ymm9, %ymm9
vpslld $1, %ymm2, %ymm2
vpsubd %ymm9, %ymm3, %ymm3
vpsubd %ymm2, %ymm8, %ymm8
vpsrld $1, %ymm3, %ymm3
vpsrld $1, %ymm8, %ymm8
vpand mask32_to_16(%rip), %ymm3, %ymm3
vpand mask32_to_16(%rip), %ymm8, %ymm8
vpackusdw %ymm8, %ymm3, %ymm8
vmovdqa 800(%r8), %ymm3
vpaddw 1056(%r8), %ymm3, %ymm2
vpsubw 1056(%r8), %ymm3, %ymm3
vpsrlw $2, %ymm3, %ymm3
vpsubw %ymm6, %ymm3, %ymm3
vpmullw %ymm14, %ymm3, %ymm3
vpsllw $1, %ymm5, %ymm9
vpsubw %ymm9, %ymm2, %ymm9
vpsllw $7, %ymm11, %ymm2
vpsubw %ymm2, %ymm9, %ymm2
vpsrlw $3, %ymm2, %ymm2
vpsubw %ymm8, %ymm2, %ymm2
vmovdqa 1312(%r8), %ymm9
vpsubw %ymm5, %ymm9, %ymm9
vpmullw %ymm15, %ymm11, %ymm10
vpsubw %ymm10, %ymm9, %ymm10
vpmullw %ymm14, %ymm2, %ymm2
vpsubw %ymm2, %ymm8, %ymm8
vpmullw %ymm12, %ymm2, %ymm9
vpaddw %ymm9, %ymm8, %ymm9
vpmullw %ymm12, %ymm9, %ymm9
vpsubw %ymm9, %ymm10, %ymm9
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm6, %ymm9, %ymm9
vpsrlw $3, %ymm9, %ymm9
vpsubw %ymm3, %ymm9, %ymm9
vpsubw %ymm9, %ymm3, %ymm3
vpsubw %ymm3, %ymm6, %ymm6
vpmullw %ymm13, %ymm9, %ymm9
vpsubw %ymm9, %ymm6, %ymm6
vpshufb shuf48_16(%rip), %ymm2, %ymm2
vpand mask3_5_3_5(%rip), %ymm2, %ymm10
vpand mask5_3_5_3(%rip), %ymm2, %ymm2
vpermq $206, %ymm10, %ymm10
vpand mask_keephigh(%rip), %ymm10, %ymm4
vpor %ymm4, %ymm2, %ymm2
vpaddw 2080(%r8), %ymm5, %ymm5
vpaddw %ymm2, %ymm5, %ymm5
vmovdqa %xmm10, 2080(%r8)
vpshufb shuf48_16(%rip), %ymm9, %ymm9
vpand mask3_5_3_5(%rip), %ymm9, %ymm10
vpand mask5_3_5_3(%rip), %ymm9, %ymm9
vpermq $206, %ymm10, %ymm10
vpand mask_keephigh(%rip), %ymm10, %ymm4
vpor %ymm4, %ymm9, %ymm9
vpaddw 2336(%r8), %ymm6, %ymm6
vpaddw %ymm9, %ymm6, %ymm6
vmovdqa %xmm10, 2336(%r8)
vpshufb shuf48_16(%rip), %ymm11, %ymm11
vpand mask3_5_3_5(%rip), %ymm11, %ymm10
vpand mask5_3_5_3(%rip), %ymm11, %ymm11
vpermq $206, %ymm10, %ymm10
vpand mask_keephigh(%rip), %ymm10, %ymm4
vpor %ymm4, %ymm11, %ymm11
vpaddw 2592(%r8), %ymm8, %ymm8
vpaddw %ymm11, %ymm8, %ymm8
vmovdqa %xmm10, 2592(%r8)
vpand mask_mod8192(%rip), %ymm5, %ymm5
vmovdqu %ymm5, 120(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 472(%rdi)
vpand mask_mod8192(%rip), %ymm8, %ymm8
vmovdqu %ymm8, 824(%rdi)
vpand mask_mod8192(%rip), %ymm3, %ymm3
vmovdqu %ymm3, 1176(%rdi)
vmovdqa 64(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm9
vpunpckhwd const0(%rip), %ymm11, %ymm2
vpslld $1, %ymm9, %ymm9
vpslld $1, %ymm2, %ymm2
vmovdqa 320(%r8), %ymm3
vpunpcklwd const0(%rip), %ymm3, %ymm8
vpunpckhwd const0(%rip), %ymm3, %ymm3
vmovdqa 576(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm5
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm5, %ymm8, %ymm10
vpaddd %ymm6, %ymm3, %ymm4
vpsubd %ymm9, %ymm10, %ymm10
vpsubd %ymm2, %ymm4, %ymm4
vpsubd %ymm5, %ymm8, %ymm5
vpsubd %ymm6, %ymm3, %ymm6
vpsrld $1, %ymm5, %ymm5
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm5, %ymm5
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm5, %ymm6
vmovdqa 1600(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm3
vpunpckhwd const0(%rip), %ymm5, %ymm8
vpslld $1, %ymm3, %ymm3
vpslld $1, %ymm8, %ymm8
vpsubd %ymm3, %ymm10, %ymm10
vpsubd %ymm8, %ymm4, %ymm4
vpsrld $1, %ymm10, %ymm10
vpsrld $1, %ymm4, %ymm4
vpand mask32_to_16(%rip), %ymm10, %ymm10
vpand mask32_to_16(%rip), %ymm4, %ymm4
vpackusdw %ymm4, %ymm10, %ymm4
vmovdqa 832(%r8), %ymm10
vpaddw 1088(%r8), %ymm10, %ymm8
vpsubw 1088(%r8), %ymm10, %ymm10
vpsrlw $2, %ymm10, %ymm10
vpsubw %ymm6, %ymm10, %ymm10
vpmullw %ymm14, %ymm10, %ymm10
vpsllw $1, %ymm11, %ymm3
vpsubw %ymm3, %ymm8, %ymm3
vpsllw $7, %ymm5, %ymm8
vpsubw %ymm8, %ymm3, %ymm8
vpsrlw $3, %ymm8, %ymm8
vpsubw %ymm4, %ymm8, %ymm8
vmovdqa 1344(%r8), %ymm3
vpsubw %ymm11, %ymm3, %ymm3
vpmullw %ymm15, %ymm5, %ymm2
vpsubw %ymm2, %ymm3, %ymm2
vpmullw %ymm14, %ymm8, %ymm8
vpsubw %ymm8, %ymm4, %ymm4
vpmullw %ymm12, %ymm8, %ymm3
vpaddw %ymm3, %ymm4, %ymm3
vpmullw %ymm12, %ymm3, %ymm3
vpsubw %ymm3, %ymm2, %ymm3
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm6, %ymm3, %ymm3
vpsrlw $3, %ymm3, %ymm3
vpsubw %ymm10, %ymm3, %ymm3
vpsubw %ymm3, %ymm10, %ymm10
vpsubw %ymm10, %ymm6, %ymm6
vpmullw %ymm13, %ymm3, %ymm3
vpsubw %ymm3, %ymm6, %ymm6
vpshufb shuf48_16(%rip), %ymm8, %ymm8
vpand mask3_5_3_5(%rip), %ymm8, %ymm2
vpand mask5_3_5_3(%rip), %ymm8, %ymm8
vpermq $206, %ymm2, %ymm2
vpand mask_keephigh(%rip), %ymm2, %ymm9
vpor %ymm9, %ymm8, %ymm8
vpaddw 2112(%r8), %ymm11, %ymm11
vpaddw %ymm8, %ymm11, %ymm11
vmovdqa %xmm2, 2112(%r8)
vpshufb shuf48_16(%rip), %ymm3, %ymm3
vpand mask3_5_3_5(%rip), %ymm3, %ymm2
vpand mask5_3_5_3(%rip), %ymm3, %ymm3
vpermq $206, %ymm2, %ymm2
vpand mask_keephigh(%rip), %ymm2, %ymm9
vpor %ymm9, %ymm3, %ymm3
vpaddw 2368(%r8), %ymm6, %ymm6
vpaddw %ymm3, %ymm6, %ymm6
vmovdqa %xmm2, 2368(%r8)
vpshufb shuf48_16(%rip), %ymm5, %ymm5
vpand mask3_5_3_5(%rip), %ymm5, %ymm2
vpand mask5_3_5_3(%rip), %ymm5, %ymm5
vpermq $206, %ymm2, %ymm2
vpand mask_keephigh(%rip), %ymm2, %ymm9
vpor %ymm9, %ymm5, %ymm5
vpaddw 2624(%r8), %ymm4, %ymm4
vpaddw %ymm5, %ymm4, %ymm4
vmovdqa %xmm2, 2624(%r8)
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 208(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 560(%rdi)
vpand mask_mod8192(%rip), %ymm4, %ymm4
vmovdqu %ymm4, 912(%rdi)
vpand mask_mod8192(%rip), %ymm10, %ymm10
vmovdqu %ymm10, 1264(%rdi)
vmovdqa 96(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm3
vpunpckhwd const0(%rip), %ymm5, %ymm8
vpslld $1, %ymm3, %ymm3
vpslld $1, %ymm8, %ymm8
vmovdqa 352(%r8), %ymm10
vpunpcklwd const0(%rip), %ymm10, %ymm4
vpunpckhwd const0(%rip), %ymm10, %ymm10
vmovdqa 608(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm11
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm11, %ymm4, %ymm2
vpaddd %ymm6, %ymm10, %ymm9
vpsubd %ymm3, %ymm2, %ymm2
vpsubd %ymm8, %ymm9, %ymm9
vpsubd %ymm11, %ymm4, %ymm11
vpsubd %ymm6, %ymm10, %ymm6
vpsrld $1, %ymm11, %ymm11
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm11, %ymm11
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm11, %ymm6
vmovdqa 1632(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm10
vpunpckhwd const0(%rip), %ymm11, %ymm4
vpslld $1, %ymm10, %ymm10
vpslld $1, %ymm4, %ymm4
vpsubd %ymm10, %ymm2, %ymm2
vpsubd %ymm4, %ymm9, %ymm9
vpsrld $1, %ymm2, %ymm2
vpsrld $1, %ymm9, %ymm9
vpand mask32_to_16(%rip), %ymm2, %ymm2
vpand mask32_to_16(%rip), %ymm9, %ymm9
vpackusdw %ymm9, %ymm2, %ymm9
vmovdqa 864(%r8), %ymm2
vpaddw 1120(%r8), %ymm2, %ymm4
vpsubw 1120(%r8), %ymm2, %ymm2
vpsrlw $2, %ymm2, %ymm2
vpsubw %ymm6, %ymm2, %ymm2
vpmullw %ymm14, %ymm2, %ymm2
vpsllw $1, %ymm5, %ymm10
vpsubw %ymm10, %ymm4, %ymm10
vpsllw $7, %ymm11, %ymm4
vpsubw %ymm4, %ymm10, %ymm4
vpsrlw $3, %ymm4, %ymm4
vpsubw %ymm9, %ymm4, %ymm4
vmovdqa 1376(%r8), %ymm10
vpsubw %ymm5, %ymm10, %ymm10
vpmullw %ymm15, %ymm11, %ymm8
vpsubw %ymm8, %ymm10, %ymm8
vpmullw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm9, %ymm9
vpmullw %ymm12, %ymm4, %ymm10
vpaddw %ymm10, %ymm9, %ymm10
vpmullw %ymm12, %ymm10, %ymm10
vpsubw %ymm10, %ymm8, %ymm10
vpmullw %ymm14, %ymm10, %ymm10
vpsubw %ymm6, %ymm10, %ymm10
vpsrlw $3, %ymm10, %ymm10
vpsubw %ymm2, %ymm10, %ymm10
vpsubw %ymm10, %ymm2, %ymm2
vpsubw %ymm2, %ymm6, %ymm6
vpmullw %ymm13, %ymm10, %ymm10
vpsubw %ymm10, %ymm6, %ymm6
vpshufb shuf48_16(%rip), %ymm4, %ymm4
vpand mask3_5_3_5(%rip), %ymm4, %ymm8
vpand mask5_3_5_3(%rip), %ymm4, %ymm4
vpermq $206, %ymm8, %ymm8
vpand mask_keephigh(%rip), %ymm8, %ymm3
vpor %ymm3, %ymm4, %ymm4
vpaddw 2144(%r8), %ymm5, %ymm5
vpaddw %ymm4, %ymm5, %ymm5
vmovdqa %xmm8, 2144(%r8)
vpshufb shuf48_16(%rip), %ymm10, %ymm10
vpand mask3_5_3_5(%rip), %ymm10, %ymm8
vpand mask5_3_5_3(%rip), %ymm10, %ymm10
vpermq $206, %ymm8, %ymm8
vpand mask_keephigh(%rip), %ymm8, %ymm3
vpor %ymm3, %ymm10, %ymm10
vpaddw 2400(%r8), %ymm6, %ymm6
vpaddw %ymm10, %ymm6, %ymm6
vmovdqa %xmm8, 2400(%r8)
vpshufb shuf48_16(%rip), %ymm11, %ymm11
vpand mask3_5_3_5(%rip), %ymm11, %ymm8
vpand mask5_3_5_3(%rip), %ymm11, %ymm11
vpermq $206, %ymm8, %ymm8
vpand mask_keephigh(%rip), %ymm8, %ymm3
vpor %ymm3, %ymm11, %ymm11
vpaddw 2656(%r8), %ymm9, %ymm9
vpaddw %ymm11, %ymm9, %ymm9
vmovdqa %xmm8, 2656(%r8)
vpand mask_mod8192(%rip), %ymm5, %ymm5
vmovdqu %ymm5, 296(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 648(%rdi)
vpand mask_mod8192(%rip), %ymm9, %ymm9
vmovdqu %ymm9, 1000(%rdi)
vpand mask_mod8192(%rip), %ymm2, %ymm2
vmovdqu %ymm2, 1352(%rdi)
vmovdqa 128(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm10
vpunpckhwd const0(%rip), %ymm11, %ymm4
vpslld $1, %ymm10, %ymm10
vpslld $1, %ymm4, %ymm4
vmovdqa 384(%r8), %ymm2
vpunpcklwd const0(%rip), %ymm2, %ymm9
vpunpckhwd const0(%rip), %ymm2, %ymm2
vmovdqa 640(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm5
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm5, %ymm9, %ymm8
vpaddd %ymm6, %ymm2, %ymm3
vpsubd %ymm10, %ymm8, %ymm8
vpsubd %ymm4, %ymm3, %ymm3
vpsubd %ymm5, %ymm9, %ymm5
vpsubd %ymm6, %ymm2, %ymm6
vpsrld $1, %ymm5, %ymm5
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm5, %ymm5
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm5, %ymm6
vmovdqa 1664(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm2
vpunpckhwd const0(%rip), %ymm5, %ymm9
vpslld $1, %ymm2, %ymm2
vpslld $1, %ymm9, %ymm9
vpsubd %ymm2, %ymm8, %ymm8
vpsubd %ymm9, %ymm3, %ymm3
vpsrld $1, %ymm8, %ymm8
vpsrld $1, %ymm3, %ymm3
vpand mask32_to_16(%rip), %ymm8, %ymm8
vpand mask32_to_16(%rip), %ymm3, %ymm3
vpackusdw %ymm3, %ymm8, %ymm3
vmovdqa 896(%r8), %ymm8
vpaddw 1152(%r8), %ymm8, %ymm9
vpsubw 1152(%r8), %ymm8, %ymm8
vpsrlw $2, %ymm8, %ymm8
vpsubw %ymm6, %ymm8, %ymm8
vpmullw %ymm14, %ymm8, %ymm8
vpsllw $1, %ymm11, %ymm2
vpsubw %ymm2, %ymm9, %ymm2
vpsllw $7, %ymm5, %ymm9
vpsubw %ymm9, %ymm2, %ymm9
vpsrlw $3, %ymm9, %ymm9
vpsubw %ymm3, %ymm9, %ymm9
vmovdqa 1408(%r8), %ymm2
vpsubw %ymm11, %ymm2, %ymm2
vpmullw %ymm15, %ymm5, %ymm4
vpsubw %ymm4, %ymm2, %ymm4
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm9, %ymm3, %ymm3
vpmullw %ymm12, %ymm9, %ymm2
vpaddw %ymm2, %ymm3, %ymm2
vpmullw %ymm12, %ymm2, %ymm2
vpsubw %ymm2, %ymm4, %ymm2
vpmullw %ymm14, %ymm2, %ymm2
vpsubw %ymm6, %ymm2, %ymm2
vpsrlw $3, %ymm2, %ymm2
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm2, %ymm8, %ymm8
vpsubw %ymm8, %ymm6, %ymm6
vpmullw %ymm13, %ymm2, %ymm2
vpsubw %ymm2, %ymm6, %ymm6
vmovdqu 384(%rdi), %ymm4
vmovdqu 736(%rdi), %ymm10
vmovdqu 1088(%rdi), %ymm7
vpaddw %ymm11, %ymm4, %ymm11
vpaddw %ymm6, %ymm10, %ymm6
vpaddw %ymm3, %ymm7, %ymm3
vpshufb shuf48_16(%rip), %ymm8, %ymm8
vpand mask3_5_3_5(%rip), %ymm8, %ymm7
vpand mask5_3_5_3(%rip), %ymm8, %ymm8
vpermq $206, %ymm7, %ymm7
vpand mask_keephigh(%rip), %ymm7, %ymm10
vpor %ymm10, %ymm8, %ymm8
vmovdqu 32(%rdi), %ymm10
vpaddw 1920(%r8), %ymm10, %ymm10
vpaddw %ymm8, %ymm10, %ymm10
vpand mask_mod8192(%rip), %ymm10, %ymm10
vmovdqu %ymm10, 32(%rdi)
vmovdqa %xmm7, 1920(%r8)
vpshufb shuf48_16(%rip), %ymm9, %ymm9
vpand mask3_5_3_5(%rip), %ymm9, %ymm7
vpand mask5_3_5_3(%rip), %ymm9, %ymm9
vpermq $206, %ymm7, %ymm7
vpand mask_keephigh(%rip), %ymm7, %ymm10
vpor %ymm10, %ymm9, %ymm9
vpaddw 2176(%r8), %ymm11, %ymm11
vpaddw %ymm9, %ymm11, %ymm11
vmovdqa %xmm7, 2176(%r8)
vpshufb shuf48_16(%rip), %ymm2, %ymm2
vpand mask3_5_3_5(%rip), %ymm2, %ymm7
vpand mask5_3_5_3(%rip), %ymm2, %ymm2
vpermq $206, %ymm7, %ymm7
vpand mask_keephigh(%rip), %ymm7, %ymm10
vpor %ymm10, %ymm2, %ymm2
vpaddw 2432(%r8), %ymm6, %ymm6
vpaddw %ymm2, %ymm6, %ymm6
vmovdqa %xmm7, 2432(%r8)
vpshufb shuf48_16(%rip), %ymm5, %ymm5
vpand mask3_5_3_5(%rip), %ymm5, %ymm7
vpand mask5_3_5_3(%rip), %ymm5, %ymm5
vpermq $206, %ymm7, %ymm7
vpand mask_keephigh(%rip), %ymm7, %ymm10
vpor %ymm10, %ymm5, %ymm5
vpaddw 2688(%r8), %ymm3, %ymm3
vpaddw %ymm5, %ymm3, %ymm3
vmovdqa %xmm7, 2688(%r8)
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 384(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 736(%rdi)
vpand mask_mod8192(%rip), %ymm3, %ymm3
vmovdqu %ymm3, 1088(%rdi)
vmovdqa 160(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm2
vpunpckhwd const0(%rip), %ymm5, %ymm9
vpslld $1, %ymm2, %ymm2
vpslld $1, %ymm9, %ymm9
vmovdqa 416(%r8), %ymm8
vpunpcklwd const0(%rip), %ymm8, %ymm3
vpunpckhwd const0(%rip), %ymm8, %ymm8
vmovdqa 672(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm11
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm11, %ymm3, %ymm7
vpaddd %ymm6, %ymm8, %ymm10
vpsubd %ymm2, %ymm7, %ymm7
vpsubd %ymm9, %ymm10, %ymm10
vpsubd %ymm11, %ymm3, %ymm11
vpsubd %ymm6, %ymm8, %ymm6
vpsrld $1, %ymm11, %ymm11
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm11, %ymm11
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm11, %ymm6
vmovdqa 1696(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm8
vpunpckhwd const0(%rip), %ymm11, %ymm3
vpslld $1, %ymm8, %ymm8
vpslld $1, %ymm3, %ymm3
vpsubd %ymm8, %ymm7, %ymm7
vpsubd %ymm3, %ymm10, %ymm10
vpsrld $1, %ymm7, %ymm7
vpsrld $1, %ymm10, %ymm10
vpand mask32_to_16(%rip), %ymm7, %ymm7
vpand mask32_to_16(%rip), %ymm10, %ymm10
vpackusdw %ymm10, %ymm7, %ymm10
vmovdqa 928(%r8), %ymm7
vpaddw 1184(%r8), %ymm7, %ymm3
vpsubw 1184(%r8), %ymm7, %ymm7
vpsrlw $2, %ymm7, %ymm7
vpsubw %ymm6, %ymm7, %ymm7
vpmullw %ymm14, %ymm7, %ymm7
vpsllw $1, %ymm5, %ymm8
vpsubw %ymm8, %ymm3, %ymm8
vpsllw $7, %ymm11, %ymm3
vpsubw %ymm3, %ymm8, %ymm3
vpsrlw $3, %ymm3, %ymm3
vpsubw %ymm10, %ymm3, %ymm3
vmovdqa 1440(%r8), %ymm8
vpsubw %ymm5, %ymm8, %ymm8
vpmullw %ymm15, %ymm11, %ymm9
vpsubw %ymm9, %ymm8, %ymm9
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm3, %ymm10, %ymm10
vpmullw %ymm12, %ymm3, %ymm8
vpaddw %ymm8, %ymm10, %ymm8
vpmullw %ymm12, %ymm8, %ymm8
vpsubw %ymm8, %ymm9, %ymm8
vpmullw %ymm14, %ymm8, %ymm8
vpsubw %ymm6, %ymm8, %ymm8
vpsrlw $3, %ymm8, %ymm8
vpsubw %ymm7, %ymm8, %ymm8
vpsubw %ymm8, %ymm7, %ymm7
vpsubw %ymm7, %ymm6, %ymm6
vpmullw %ymm13, %ymm8, %ymm8
vpsubw %ymm8, %ymm6, %ymm6
vmovdqu 472(%rdi), %ymm9
vmovdqu 824(%rdi), %ymm2
vmovdqu 1176(%rdi), %ymm4
vpaddw %ymm5, %ymm9, %ymm5
vpaddw %ymm6, %ymm2, %ymm6
vpaddw %ymm10, %ymm4, %ymm10
vpshufb shuf48_16(%rip), %ymm7, %ymm7
vpand mask3_5_3_5(%rip), %ymm7, %ymm4
vpand mask5_3_5_3(%rip), %ymm7, %ymm7
vpermq $206, %ymm4, %ymm4
vpand mask_keephigh(%rip), %ymm4, %ymm2
vpor %ymm2, %ymm7, %ymm7
vmovdqu 120(%rdi), %ymm2
vpaddw 1952(%r8), %ymm2, %ymm2
vpaddw %ymm7, %ymm2, %ymm2
vpand mask_mod8192(%rip), %ymm2, %ymm2
vmovdqu %ymm2, 120(%rdi)
vmovdqa %xmm4, 1952(%r8)
vpshufb shuf48_16(%rip), %ymm3, %ymm3
vpand mask3_5_3_5(%rip), %ymm3, %ymm4
vpand mask5_3_5_3(%rip), %ymm3, %ymm3
vpermq $206, %ymm4, %ymm4
vpand mask_keephigh(%rip), %ymm4, %ymm2
vpor %ymm2, %ymm3, %ymm3
vpaddw 2208(%r8), %ymm5, %ymm5
vpaddw %ymm3, %ymm5, %ymm5
vmovdqa %xmm4, 2208(%r8)
vpshufb shuf48_16(%rip), %ymm8, %ymm8
vpand mask3_5_3_5(%rip), %ymm8, %ymm4
vpand mask5_3_5_3(%rip), %ymm8, %ymm8
vpermq $206, %ymm4, %ymm4
vpand mask_keephigh(%rip), %ymm4, %ymm2
vpor %ymm2, %ymm8, %ymm8
vpaddw 2464(%r8), %ymm6, %ymm6
vpaddw %ymm8, %ymm6, %ymm6
vmovdqa %xmm4, 2464(%r8)
vpshufb shuf48_16(%rip), %ymm11, %ymm11
vpand mask3_5_3_5(%rip), %ymm11, %ymm4
vpand mask5_3_5_3(%rip), %ymm11, %ymm11
vpermq $206, %ymm4, %ymm4
vpand mask_keephigh(%rip), %ymm4, %ymm2
vpor %ymm2, %ymm11, %ymm11
vpaddw 2720(%r8), %ymm10, %ymm10
vpaddw %ymm11, %ymm10, %ymm10
vmovdqa %xmm4, 2720(%r8)
vpand mask_mod8192(%rip), %ymm5, %ymm5
vmovdqu %ymm5, 472(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 824(%rdi)
vpand mask_mod8192(%rip), %ymm10, %ymm10
vmovdqu %ymm10, 1176(%rdi)
vmovdqa 192(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm8
vpunpckhwd const0(%rip), %ymm11, %ymm3
vpslld $1, %ymm8, %ymm8
vpslld $1, %ymm3, %ymm3
vmovdqa 448(%r8), %ymm7
vpunpcklwd const0(%rip), %ymm7, %ymm10
vpunpckhwd const0(%rip), %ymm7, %ymm7
vmovdqa 704(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm5
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm5, %ymm10, %ymm4
vpaddd %ymm6, %ymm7, %ymm2
vpsubd %ymm8, %ymm4, %ymm4
vpsubd %ymm3, %ymm2, %ymm2
vpsubd %ymm5, %ymm10, %ymm5
vpsubd %ymm6, %ymm7, %ymm6
vpsrld $1, %ymm5, %ymm5
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm5, %ymm5
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm5, %ymm6
vmovdqa 1728(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm7
vpunpckhwd const0(%rip), %ymm5, %ymm10
vpslld $1, %ymm7, %ymm7
vpslld $1, %ymm10, %ymm10
vpsubd %ymm7, %ymm4, %ymm4
vpsubd %ymm10, %ymm2, %ymm2
vpsrld $1, %ymm4, %ymm4
vpsrld $1, %ymm2, %ymm2
vpand mask32_to_16(%rip), %ymm4, %ymm4
vpand mask32_to_16(%rip), %ymm2, %ymm2
vpackusdw %ymm2, %ymm4, %ymm2
vmovdqa 960(%r8), %ymm4
vpaddw 1216(%r8), %ymm4, %ymm10
vpsubw 1216(%r8), %ymm4, %ymm4
vpsrlw $2, %ymm4, %ymm4
vpsubw %ymm6, %ymm4, %ymm4
vpmullw %ymm14, %ymm4, %ymm4
vpsllw $1, %ymm11, %ymm7
vpsubw %ymm7, %ymm10, %ymm7
vpsllw $7, %ymm5, %ymm10
vpsubw %ymm10, %ymm7, %ymm10
vpsrlw $3, %ymm10, %ymm10
vpsubw %ymm2, %ymm10, %ymm10
vmovdqa 1472(%r8), %ymm7
vpsubw %ymm11, %ymm7, %ymm7
vpmullw %ymm15, %ymm5, %ymm3
vpsubw %ymm3, %ymm7, %ymm3
vpmullw %ymm14, %ymm10, %ymm10
vpsubw %ymm10, %ymm2, %ymm2
vpmullw %ymm12, %ymm10, %ymm7
vpaddw %ymm7, %ymm2, %ymm7
vpmullw %ymm12, %ymm7, %ymm7
vpsubw %ymm7, %ymm3, %ymm7
vpmullw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm7, %ymm7
vpsrlw $3, %ymm7, %ymm7
vpsubw %ymm4, %ymm7, %ymm7
vpsubw %ymm7, %ymm4, %ymm4
vpsubw %ymm4, %ymm6, %ymm6
vpmullw %ymm13, %ymm7, %ymm7
vpsubw %ymm7, %ymm6, %ymm6
vmovdqu 560(%rdi), %ymm3
vmovdqu 912(%rdi), %ymm8
vmovdqu 1264(%rdi), %ymm9
vpaddw %ymm11, %ymm3, %ymm11
vpaddw %ymm6, %ymm8, %ymm6
vpaddw %ymm2, %ymm9, %ymm2
vpshufb shuf48_16(%rip), %ymm4, %ymm4
vpand mask3_5_3_5(%rip), %ymm4, %ymm9
vpand mask5_3_5_3(%rip), %ymm4, %ymm4
vpermq $206, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm8
vpor %ymm8, %ymm4, %ymm4
vmovdqu 208(%rdi), %ymm8
vpaddw 1984(%r8), %ymm8, %ymm8
vpaddw %ymm4, %ymm8, %ymm8
vpand mask_mod8192(%rip), %ymm8, %ymm8
vmovdqu %ymm8, 208(%rdi)
vmovdqa %xmm9, 1984(%r8)
vpshufb shuf48_16(%rip), %ymm10, %ymm10
vpand mask3_5_3_5(%rip), %ymm10, %ymm9
vpand mask5_3_5_3(%rip), %ymm10, %ymm10
vpermq $206, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm8
vpor %ymm8, %ymm10, %ymm10
vpaddw 2240(%r8), %ymm11, %ymm11
vpaddw %ymm10, %ymm11, %ymm11
vmovdqa %xmm9, 2240(%r8)
vpshufb shuf48_16(%rip), %ymm7, %ymm7
vpand mask3_5_3_5(%rip), %ymm7, %ymm9
vpand mask5_3_5_3(%rip), %ymm7, %ymm7
vpermq $206, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm8
vpor %ymm8, %ymm7, %ymm7
vpaddw 2496(%r8), %ymm6, %ymm6
vpaddw %ymm7, %ymm6, %ymm6
vmovdqa %xmm9, 2496(%r8)
vpshufb shuf48_16(%rip), %ymm5, %ymm5
vpand mask3_5_3_5(%rip), %ymm5, %ymm9
vpand mask5_3_5_3(%rip), %ymm5, %ymm5
vpermq $206, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm8
vpor %ymm8, %ymm5, %ymm5
vpaddw 2752(%r8), %ymm2, %ymm2
vpaddw %ymm5, %ymm2, %ymm2
vmovdqa %xmm9, 2752(%r8)
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 560(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 912(%rdi)
vpand mask_mod8192(%rip), %ymm2, %ymm2
vmovdqu %ymm2, 1264(%rdi)
vmovdqa 224(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm7
vpunpckhwd const0(%rip), %ymm5, %ymm10
vpslld $1, %ymm7, %ymm7
vpslld $1, %ymm10, %ymm10
vmovdqa 480(%r8), %ymm4
vpunpcklwd const0(%rip), %ymm4, %ymm2
vpunpckhwd const0(%rip), %ymm4, %ymm4
vmovdqa 736(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm11
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm11, %ymm2, %ymm9
vpaddd %ymm6, %ymm4, %ymm8
vpsubd %ymm7, %ymm9, %ymm9
vpsubd %ymm10, %ymm8, %ymm8
vpsubd %ymm11, %ymm2, %ymm11
vpsubd %ymm6, %ymm4, %ymm6
vpsrld $1, %ymm11, %ymm11
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm11, %ymm11
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm11, %ymm6
vmovdqa 1760(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm4
vpunpckhwd const0(%rip), %ymm11, %ymm2
vpslld $1, %ymm4, %ymm4
vpslld $1, %ymm2, %ymm2
vpsubd %ymm4, %ymm9, %ymm9
vpsubd %ymm2, %ymm8, %ymm8
vpsrld $1, %ymm9, %ymm9
vpsrld $1, %ymm8, %ymm8
vpand mask32_to_16(%rip), %ymm9, %ymm9
vpand mask32_to_16(%rip), %ymm8, %ymm8
vpackusdw %ymm8, %ymm9, %ymm8
vmovdqa 992(%r8), %ymm9
vpaddw 1248(%r8), %ymm9, %ymm2
vpsubw 1248(%r8), %ymm9, %ymm9
vpsrlw $2, %ymm9, %ymm9
vpsubw %ymm6, %ymm9, %ymm9
vpmullw %ymm14, %ymm9, %ymm9
vpsllw $1, %ymm5, %ymm4
vpsubw %ymm4, %ymm2, %ymm4
vpsllw $7, %ymm11, %ymm2
vpsubw %ymm2, %ymm4, %ymm2
vpsrlw $3, %ymm2, %ymm2
vpsubw %ymm8, %ymm2, %ymm2
vmovdqa 1504(%r8), %ymm4
vpsubw %ymm5, %ymm4, %ymm4
vpmullw %ymm15, %ymm11, %ymm10
vpsubw %ymm10, %ymm4, %ymm10
vpmullw %ymm14, %ymm2, %ymm2
vpsubw %ymm2, %ymm8, %ymm8
vpmullw %ymm12, %ymm2, %ymm4
vpaddw %ymm4, %ymm8, %ymm4
vpmullw %ymm12, %ymm4, %ymm4
vpsubw %ymm4, %ymm10, %ymm4
vpmullw %ymm14, %ymm4, %ymm4
vpsubw %ymm6, %ymm4, %ymm4
vpsrlw $3, %ymm4, %ymm4
vpsubw %ymm9, %ymm4, %ymm4
vpsubw %ymm4, %ymm9, %ymm9
vpsubw %ymm9, %ymm6, %ymm6
vpmullw %ymm13, %ymm4, %ymm4
vpsubw %ymm4, %ymm6, %ymm6
vmovdqu 648(%rdi), %ymm10
vmovdqu 1000(%rdi), %ymm7
vmovdqu 1352(%rdi), %ymm3
vpaddw %ymm5, %ymm10, %ymm5
vpaddw %ymm6, %ymm7, %ymm6
vpaddw %ymm8, %ymm3, %ymm8
vpshufb shuf48_16(%rip), %ymm9, %ymm9
vpand mask3_5_3_5(%rip), %ymm9, %ymm3
vpand mask5_3_5_3(%rip), %ymm9, %ymm9
vpermq $206, %ymm3, %ymm3
vpand mask_keephigh(%rip), %ymm3, %ymm7
vpor %ymm7, %ymm9, %ymm9
vmovdqu 296(%rdi), %ymm7
vpaddw 2016(%r8), %ymm7, %ymm7
vpaddw %ymm9, %ymm7, %ymm7
vpand mask_mod8192(%rip), %ymm7, %ymm7
vmovdqu %ymm7, 296(%rdi)
vmovdqa %xmm3, 2016(%r8)
vpshufb shuf48_16(%rip), %ymm2, %ymm2
vpand mask3_5_3_5(%rip), %ymm2, %ymm3
vpand mask5_3_5_3(%rip), %ymm2, %ymm2
vpermq $206, %ymm3, %ymm3
vpand mask_keephigh(%rip), %ymm3, %ymm7
vpor %ymm7, %ymm2, %ymm2
vpaddw 2272(%r8), %ymm5, %ymm5
vpaddw %ymm2, %ymm5, %ymm5
vmovdqa %xmm3, 2272(%r8)
vpshufb shuf48_16(%rip), %ymm4, %ymm4
vpand mask3_5_3_5(%rip), %ymm4, %ymm3
vpand mask5_3_5_3(%rip), %ymm4, %ymm4
vpermq $206, %ymm3, %ymm3
vpand mask_keephigh(%rip), %ymm3, %ymm7
vpor %ymm7, %ymm4, %ymm4
vpaddw 2528(%r8), %ymm6, %ymm6
vpaddw %ymm4, %ymm6, %ymm6
vmovdqa %xmm3, 2528(%r8)
vpshufb shuf48_16(%rip), %ymm11, %ymm11
vpand mask3_5_3_5(%rip), %ymm11, %ymm3
vpand mask5_3_5_3(%rip), %ymm11, %ymm11
vpermq $206, %ymm3, %ymm3
vpand mask_keephigh(%rip), %ymm3, %ymm7
vpor %ymm7, %ymm11, %ymm11
vpaddw 2784(%r8), %ymm8, %ymm8
vpaddw %ymm11, %ymm8, %ymm8
vmovdqa %xmm3, 2784(%r8)
vpand mask_mod8192(%rip), %ymm5, %ymm5
vmovdqu %ymm5, 648(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %ymm6, 1000(%rdi)
vpand mask_mod8192(%rip), %ymm8, %ymm8
vmovdqu %ymm8, 1352(%rdi)
vmovdqa 160(%r12), %ymm0
vpsubw 256(%r12), %ymm0, %ymm0
vmovdqa 544(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 352(%r12), %ymm1, %ymm1
vpsubw 64(%r12), %ymm0, %ymm0
vpaddw 448(%r12), %ymm0, %ymm0
vmovdqa 736(%r12), %ymm2
vpsubw 832(%r12), %ymm2, %ymm2
vmovdqa 1120(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 928(%r12), %ymm3, %ymm3
vpsubw 640(%r12), %ymm2, %ymm2
vpaddw 1024(%r12), %ymm2, %ymm2
vmovdqa 1312(%r12), %ymm4
vpsubw 1408(%r12), %ymm4, %ymm4
vmovdqa 1696(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 1504(%r12), %ymm5, %ymm5
vpsubw 1216(%r12), %ymm4, %ymm4
vpaddw 1600(%r12), %ymm4, %ymm4
vpsubw 640(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 64(%r12), %ymm1, %ymm1
vpaddw 1216(%r12), %ymm1, %ymm1
vmovdqa 352(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 1504(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 928(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 64(%r12), %ymm8
vmovdqa 928(%r12), %ymm9
vmovdqa %ymm8, 0(%r8)
vmovdqa %ymm0, 32(%r8)
vmovdqa %ymm1, 64(%r8)
vmovdqa %ymm7, 96(%r8)
vmovdqa %ymm5, 128(%r8)
vmovdqa %ymm2, 160(%r8)
vmovdqa %ymm3, 192(%r8)
vmovdqa %ymm9, 224(%r8)
vmovdqa 1888(%r12), %ymm0
vpsubw 1984(%r12), %ymm0, %ymm0
vmovdqa 2272(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 2080(%r12), %ymm1, %ymm1
vpsubw 1792(%r12), %ymm0, %ymm0
vpaddw 2176(%r12), %ymm0, %ymm0
vmovdqa 2464(%r12), %ymm2
vpsubw 2560(%r12), %ymm2, %ymm2
vmovdqa 2848(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 2656(%r12), %ymm3, %ymm3
vpsubw 2368(%r12), %ymm2, %ymm2
vpaddw 2752(%r12), %ymm2, %ymm2
vmovdqa 3040(%r12), %ymm4
vpsubw 3136(%r12), %ymm4, %ymm4
vmovdqa 3424(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 3232(%r12), %ymm5, %ymm5
vpsubw 2944(%r12), %ymm4, %ymm4
vpaddw 3328(%r12), %ymm4, %ymm4
vpsubw 2368(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 1792(%r12), %ymm1, %ymm1
vpaddw 2944(%r12), %ymm1, %ymm1
vmovdqa 2080(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 3232(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 2656(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 1792(%r12), %ymm8
vmovdqa 2656(%r12), %ymm9
vmovdqa %ymm8, 256(%r8)
vmovdqa %ymm0, 288(%r8)
vmovdqa %ymm1, 320(%r8)
vmovdqa %ymm7, 352(%r8)
vmovdqa %ymm5, 384(%r8)
vmovdqa %ymm2, 416(%r8)
vmovdqa %ymm3, 448(%r8)
vmovdqa %ymm9, 480(%r8)
vmovdqa 3616(%r12), %ymm0
vpsubw 3712(%r12), %ymm0, %ymm0
vmovdqa 4000(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 3808(%r12), %ymm1, %ymm1
vpsubw 3520(%r12), %ymm0, %ymm0
vpaddw 3904(%r12), %ymm0, %ymm0
vmovdqa 4192(%r12), %ymm2
vpsubw 4288(%r12), %ymm2, %ymm2
vmovdqa 4576(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 4384(%r12), %ymm3, %ymm3
vpsubw 4096(%r12), %ymm2, %ymm2
vpaddw 4480(%r12), %ymm2, %ymm2
vmovdqa 4768(%r12), %ymm4
vpsubw 4864(%r12), %ymm4, %ymm4
vmovdqa 5152(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 4960(%r12), %ymm5, %ymm5
vpsubw 4672(%r12), %ymm4, %ymm4
vpaddw 5056(%r12), %ymm4, %ymm4
vpsubw 4096(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 3520(%r12), %ymm1, %ymm1
vpaddw 4672(%r12), %ymm1, %ymm1
vmovdqa 3808(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 4960(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 4384(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 3520(%r12), %ymm8
vmovdqa 4384(%r12), %ymm9
vmovdqa %ymm8, 512(%r8)
vmovdqa %ymm0, 544(%r8)
vmovdqa %ymm1, 576(%r8)
vmovdqa %ymm7, 608(%r8)
vmovdqa %ymm5, 640(%r8)
vmovdqa %ymm2, 672(%r8)
vmovdqa %ymm3, 704(%r8)
vmovdqa %ymm9, 736(%r8)
vmovdqa 5344(%r12), %ymm0
vpsubw 5440(%r12), %ymm0, %ymm0
vmovdqa 5728(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 5536(%r12), %ymm1, %ymm1
vpsubw 5248(%r12), %ymm0, %ymm0
vpaddw 5632(%r12), %ymm0, %ymm0
vmovdqa 5920(%r12), %ymm2
vpsubw 6016(%r12), %ymm2, %ymm2
vmovdqa 6304(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 6112(%r12), %ymm3, %ymm3
vpsubw 5824(%r12), %ymm2, %ymm2
vpaddw 6208(%r12), %ymm2, %ymm2
vmovdqa 6496(%r12), %ymm4
vpsubw 6592(%r12), %ymm4, %ymm4
vmovdqa 6880(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 6688(%r12), %ymm5, %ymm5
vpsubw 6400(%r12), %ymm4, %ymm4
vpaddw 6784(%r12), %ymm4, %ymm4
vpsubw 5824(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 5248(%r12), %ymm1, %ymm1
vpaddw 6400(%r12), %ymm1, %ymm1
vmovdqa 5536(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 6688(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 6112(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 5248(%r12), %ymm8
vmovdqa 6112(%r12), %ymm9
vmovdqa %ymm8, 768(%r8)
vmovdqa %ymm0, 800(%r8)
vmovdqa %ymm1, 832(%r8)
vmovdqa %ymm7, 864(%r8)
vmovdqa %ymm5, 896(%r8)
vmovdqa %ymm2, 928(%r8)
vmovdqa %ymm3, 960(%r8)
vmovdqa %ymm9, 992(%r8)
vmovdqa 7072(%r12), %ymm0
vpsubw 7168(%r12), %ymm0, %ymm0
vmovdqa 7456(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 7264(%r12), %ymm1, %ymm1
vpsubw 6976(%r12), %ymm0, %ymm0
vpaddw 7360(%r12), %ymm0, %ymm0
vmovdqa 7648(%r12), %ymm2
vpsubw 7744(%r12), %ymm2, %ymm2
vmovdqa 8032(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 7840(%r12), %ymm3, %ymm3
vpsubw 7552(%r12), %ymm2, %ymm2
vpaddw 7936(%r12), %ymm2, %ymm2
vmovdqa 8224(%r12), %ymm4
vpsubw 8320(%r12), %ymm4, %ymm4
vmovdqa 8608(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 8416(%r12), %ymm5, %ymm5
vpsubw 8128(%r12), %ymm4, %ymm4
vpaddw 8512(%r12), %ymm4, %ymm4
vpsubw 7552(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 6976(%r12), %ymm1, %ymm1
vpaddw 8128(%r12), %ymm1, %ymm1
vmovdqa 7264(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 8416(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 7840(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 6976(%r12), %ymm8
vmovdqa 7840(%r12), %ymm9
vmovdqa %ymm8, 1024(%r8)
vmovdqa %ymm0, 1056(%r8)
vmovdqa %ymm1, 1088(%r8)
vmovdqa %ymm7, 1120(%r8)
vmovdqa %ymm5, 1152(%r8)
vmovdqa %ymm2, 1184(%r8)
vmovdqa %ymm3, 1216(%r8)
vmovdqa %ymm9, 1248(%r8)
vmovdqa 8800(%r12), %ymm0
vpsubw 8896(%r12), %ymm0, %ymm0
vmovdqa 9184(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 8992(%r12), %ymm1, %ymm1
vpsubw 8704(%r12), %ymm0, %ymm0
vpaddw 9088(%r12), %ymm0, %ymm0
vmovdqa 9376(%r12), %ymm2
vpsubw 9472(%r12), %ymm2, %ymm2
vmovdqa 9760(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 9568(%r12), %ymm3, %ymm3
vpsubw 9280(%r12), %ymm2, %ymm2
vpaddw 9664(%r12), %ymm2, %ymm2
vmovdqa 9952(%r12), %ymm4
vpsubw 10048(%r12), %ymm4, %ymm4
vmovdqa 10336(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 10144(%r12), %ymm5, %ymm5
vpsubw 9856(%r12), %ymm4, %ymm4
vpaddw 10240(%r12), %ymm4, %ymm4
vpsubw 9280(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 8704(%r12), %ymm1, %ymm1
vpaddw 9856(%r12), %ymm1, %ymm1
vmovdqa 8992(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 10144(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 9568(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 8704(%r12), %ymm8
vmovdqa 9568(%r12), %ymm9
vmovdqa %ymm8, 1280(%r8)
vmovdqa %ymm0, 1312(%r8)
vmovdqa %ymm1, 1344(%r8)
vmovdqa %ymm7, 1376(%r8)
vmovdqa %ymm5, 1408(%r8)
vmovdqa %ymm2, 1440(%r8)
vmovdqa %ymm3, 1472(%r8)
vmovdqa %ymm9, 1504(%r8)
vmovdqa 10528(%r12), %ymm0
vpsubw 10624(%r12), %ymm0, %ymm0
vmovdqa 10912(%r12), %ymm1
vpsubw %ymm0, %ymm1, %ymm1
vpsubw 10720(%r12), %ymm1, %ymm1
vpsubw 10432(%r12), %ymm0, %ymm0
vpaddw 10816(%r12), %ymm0, %ymm0
vmovdqa 11104(%r12), %ymm2
vpsubw 11200(%r12), %ymm2, %ymm2
vmovdqa 11488(%r12), %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw 11296(%r12), %ymm3, %ymm3
vpsubw 11008(%r12), %ymm2, %ymm2
vpaddw 11392(%r12), %ymm2, %ymm2
vmovdqa 11680(%r12), %ymm4
vpsubw 11776(%r12), %ymm4, %ymm4
vmovdqa 12064(%r12), %ymm5
vpsubw %ymm4, %ymm5, %ymm5
vpsubw 11872(%r12), %ymm5, %ymm5
vpsubw 11584(%r12), %ymm4, %ymm4
vpaddw 11968(%r12), %ymm4, %ymm4
vpsubw 11008(%r12), %ymm1, %ymm1
vpsubw %ymm1, %ymm5, %ymm5
vpsubw %ymm3, %ymm5, %ymm5
vpsubw 10432(%r12), %ymm1, %ymm1
vpaddw 11584(%r12), %ymm1, %ymm1
vmovdqa 10720(%r12), %ymm6
vpsubw %ymm2, %ymm6, %ymm7
vmovdqa 11872(%r12), %ymm2
vpsubw %ymm7, %ymm2, %ymm2
vpsubw 11296(%r12), %ymm2, %ymm2
vpsubw %ymm0, %ymm7, %ymm7
vpaddw %ymm4, %ymm7, %ymm7
vmovdqa 10432(%r12), %ymm8
vmovdqa 11296(%r12), %ymm9
vmovdqa %ymm8, 1536(%r8)
vmovdqa %ymm0, 1568(%r8)
vmovdqa %ymm1, 1600(%r8)
vmovdqa %ymm7, 1632(%r8)
vmovdqa %ymm5, 1664(%r8)
vmovdqa %ymm2, 1696(%r8)
vmovdqa %ymm3, 1728(%r8)
vmovdqa %ymm9, 1760(%r8)
vmovdqa 0(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm4
vpunpckhwd const0(%rip), %ymm11, %ymm2
vpslld $1, %ymm4, %ymm4
vpslld $1, %ymm2, %ymm2
vmovdqa 256(%r8), %ymm9
vpunpcklwd const0(%rip), %ymm9, %ymm8
vpunpckhwd const0(%rip), %ymm9, %ymm9
vmovdqa 512(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm5
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm5, %ymm8, %ymm3
vpaddd %ymm6, %ymm9, %ymm7
vpsubd %ymm4, %ymm3, %ymm3
vpsubd %ymm2, %ymm7, %ymm7
vpsubd %ymm5, %ymm8, %ymm5
vpsubd %ymm6, %ymm9, %ymm6
vpsrld $1, %ymm5, %ymm5
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm5, %ymm5
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm5, %ymm6
vmovdqa 1536(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm9
vpunpckhwd const0(%rip), %ymm5, %ymm8
vpslld $1, %ymm9, %ymm9
vpslld $1, %ymm8, %ymm8
vpsubd %ymm9, %ymm3, %ymm3
vpsubd %ymm8, %ymm7, %ymm7
vpsrld $1, %ymm3, %ymm3
vpsrld $1, %ymm7, %ymm7
vpand mask32_to_16(%rip), %ymm3, %ymm3
vpand mask32_to_16(%rip), %ymm7, %ymm7
vpackusdw %ymm7, %ymm3, %ymm7
vmovdqa 768(%r8), %ymm3
vpaddw 1024(%r8), %ymm3, %ymm8
vpsubw 1024(%r8), %ymm3, %ymm3
vpsrlw $2, %ymm3, %ymm3
vpsubw %ymm6, %ymm3, %ymm3
vpmullw %ymm14, %ymm3, %ymm3
vpsllw $1, %ymm11, %ymm9
vpsubw %ymm9, %ymm8, %ymm9
vpsllw $7, %ymm5, %ymm8
vpsubw %ymm8, %ymm9, %ymm8
vpsrlw $3, %ymm8, %ymm8
vpsubw %ymm7, %ymm8, %ymm8
vmovdqa 1280(%r8), %ymm9
vpsubw %ymm11, %ymm9, %ymm9
vpmullw %ymm15, %ymm5, %ymm2
vpsubw %ymm2, %ymm9, %ymm2
vpmullw %ymm14, %ymm8, %ymm8
vpsubw %ymm8, %ymm7, %ymm7
vpmullw %ymm12, %ymm8, %ymm9
vpaddw %ymm9, %ymm7, %ymm9
vpmullw %ymm12, %ymm9, %ymm9
vpsubw %ymm9, %ymm2, %ymm9
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm6, %ymm9, %ymm9
vpsrlw $3, %ymm9, %ymm9
vpsubw %ymm3, %ymm9, %ymm9
vpsubw %ymm9, %ymm3, %ymm3
vpsubw %ymm3, %ymm6, %ymm6
vpmullw %ymm13, %ymm9, %ymm9
vpsubw %ymm9, %ymm6, %ymm6
vpshufb shuf48_16(%rip), %ymm8, %ymm8
vpand mask3_5_4_3_1(%rip), %ymm8, %ymm2
vpand mask5_3_5_3(%rip), %ymm8, %ymm8
vpermq $139, %ymm2, %ymm2
vpand mask_keephigh(%rip), %ymm2, %ymm4
vpor %ymm4, %ymm8, %ymm8
vpaddw 2048(%r8), %ymm11, %ymm11
vpaddw %ymm8, %ymm11, %ymm11
vmovdqa %xmm2, 2048(%r8)
vpshufb shuf48_16(%rip), %ymm9, %ymm9
vpand mask3_5_4_3_1(%rip), %ymm9, %ymm2
vpand mask5_3_5_3(%rip), %ymm9, %ymm9
vpermq $139, %ymm2, %ymm2
vpand mask_keephigh(%rip), %ymm2, %ymm4
vpor %ymm4, %ymm9, %ymm9
vpaddw 2304(%r8), %ymm6, %ymm6
vpaddw %ymm9, %ymm6, %ymm6
vmovdqa %xmm2, 2304(%r8)
vpshufb shuf48_16(%rip), %ymm5, %ymm5
vpand mask3_5_4_3_1(%rip), %ymm5, %ymm2
vpand mask5_3_5_3(%rip), %ymm5, %ymm5
vpermq $139, %ymm2, %ymm2
vpand mask_keephigh(%rip), %ymm2, %ymm4
vpor %ymm4, %ymm5, %ymm5
vpaddw 2560(%r8), %ymm7, %ymm7
vpaddw %ymm5, %ymm7, %ymm7
vmovdqa %xmm2, 2560(%r8)
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %xmm11, 64(%rdi)
vextracti128 $1, %ymm11, %xmm11
vmovq %xmm11, 80(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %xmm6, 416(%rdi)
vextracti128 $1, %ymm6, %xmm6
vmovq %xmm6, 432(%rdi)
vpand mask_mod8192(%rip), %ymm7, %ymm7
vmovdqu %xmm7, 768(%rdi)
vextracti128 $1, %ymm7, %xmm7
vmovq %xmm7, 784(%rdi)
vpand mask_mod8192(%rip), %ymm3, %ymm3
vmovdqu %xmm3, 1120(%rdi)
vextracti128 $1, %ymm3, %xmm3
vmovq %xmm3, 1136(%rdi)
vmovdqa 32(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm9
vpunpckhwd const0(%rip), %ymm5, %ymm8
vpslld $1, %ymm9, %ymm9
vpslld $1, %ymm8, %ymm8
vmovdqa 288(%r8), %ymm3
vpunpcklwd const0(%rip), %ymm3, %ymm7
vpunpckhwd const0(%rip), %ymm3, %ymm3
vmovdqa 544(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm11
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm11, %ymm7, %ymm2
vpaddd %ymm6, %ymm3, %ymm4
vpsubd %ymm9, %ymm2, %ymm2
vpsubd %ymm8, %ymm4, %ymm4
vpsubd %ymm11, %ymm7, %ymm11
vpsubd %ymm6, %ymm3, %ymm6
vpsrld $1, %ymm11, %ymm11
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm11, %ymm11
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm11, %ymm6
vmovdqa 1568(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm3
vpunpckhwd const0(%rip), %ymm11, %ymm7
vpslld $1, %ymm3, %ymm3
vpslld $1, %ymm7, %ymm7
vpsubd %ymm3, %ymm2, %ymm2
vpsubd %ymm7, %ymm4, %ymm4
vpsrld $1, %ymm2, %ymm2
vpsrld $1, %ymm4, %ymm4
vpand mask32_to_16(%rip), %ymm2, %ymm2
vpand mask32_to_16(%rip), %ymm4, %ymm4
vpackusdw %ymm4, %ymm2, %ymm4
vmovdqa 800(%r8), %ymm2
vpaddw 1056(%r8), %ymm2, %ymm7
vpsubw 1056(%r8), %ymm2, %ymm2
vpsrlw $2, %ymm2, %ymm2
vpsubw %ymm6, %ymm2, %ymm2
vpmullw %ymm14, %ymm2, %ymm2
vpsllw $1, %ymm5, %ymm3
vpsubw %ymm3, %ymm7, %ymm3
vpsllw $7, %ymm11, %ymm7
vpsubw %ymm7, %ymm3, %ymm7
vpsrlw $3, %ymm7, %ymm7
vpsubw %ymm4, %ymm7, %ymm7
vmovdqa 1312(%r8), %ymm3
vpsubw %ymm5, %ymm3, %ymm3
vpmullw %ymm15, %ymm11, %ymm8
vpsubw %ymm8, %ymm3, %ymm8
vpmullw %ymm14, %ymm7, %ymm7
vpsubw %ymm7, %ymm4, %ymm4
vpmullw %ymm12, %ymm7, %ymm3
vpaddw %ymm3, %ymm4, %ymm3
vpmullw %ymm12, %ymm3, %ymm3
vpsubw %ymm3, %ymm8, %ymm3
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm6, %ymm3, %ymm3
vpsrlw $3, %ymm3, %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vpsubw %ymm3, %ymm2, %ymm2
vpsubw %ymm2, %ymm6, %ymm6
vpmullw %ymm13, %ymm3, %ymm3
vpsubw %ymm3, %ymm6, %ymm6
vpshufb shuf48_16(%rip), %ymm7, %ymm7
vpand mask3_5_4_3_1(%rip), %ymm7, %ymm8
vpand mask5_3_5_3(%rip), %ymm7, %ymm7
vpermq $139, %ymm8, %ymm8
vpand mask_keephigh(%rip), %ymm8, %ymm9
vpor %ymm9, %ymm7, %ymm7
vpaddw 2080(%r8), %ymm5, %ymm5
vpaddw %ymm7, %ymm5, %ymm5
vmovdqa %xmm8, 2080(%r8)
vpshufb shuf48_16(%rip), %ymm3, %ymm3
vpand mask3_5_4_3_1(%rip), %ymm3, %ymm8
vpand mask5_3_5_3(%rip), %ymm3, %ymm3
vpermq $139, %ymm8, %ymm8
vpand mask_keephigh(%rip), %ymm8, %ymm9
vpor %ymm9, %ymm3, %ymm3
vpaddw 2336(%r8), %ymm6, %ymm6
vpaddw %ymm3, %ymm6, %ymm6
vmovdqa %xmm8, 2336(%r8)
vpshufb shuf48_16(%rip), %ymm11, %ymm11
vpand mask3_5_4_3_1(%rip), %ymm11, %ymm8
vpand mask5_3_5_3(%rip), %ymm11, %ymm11
vpermq $139, %ymm8, %ymm8
vpand mask_keephigh(%rip), %ymm8, %ymm9
vpor %ymm9, %ymm11, %ymm11
vpaddw 2592(%r8), %ymm4, %ymm4
vpaddw %ymm11, %ymm4, %ymm4
vmovdqa %xmm8, 2592(%r8)
vpand mask_mod8192(%rip), %ymm5, %ymm5
vmovdqu %xmm5, 152(%rdi)
vextracti128 $1, %ymm5, %xmm5
vmovq %xmm5, 168(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %xmm6, 504(%rdi)
vextracti128 $1, %ymm6, %xmm6
vmovq %xmm6, 520(%rdi)
vpand mask_mod8192(%rip), %ymm4, %ymm4
vmovdqu %xmm4, 856(%rdi)
vextracti128 $1, %ymm4, %xmm4
vmovq %xmm4, 872(%rdi)
vpand mask_mod8192(%rip), %ymm2, %ymm2
vmovdqu %xmm2, 1208(%rdi)
vextracti128 $1, %ymm2, %xmm2
vmovq %xmm2, 1224(%rdi)
vmovdqa 64(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm3
vpunpckhwd const0(%rip), %ymm11, %ymm7
vpslld $1, %ymm3, %ymm3
vpslld $1, %ymm7, %ymm7
vmovdqa 320(%r8), %ymm2
vpunpcklwd const0(%rip), %ymm2, %ymm4
vpunpckhwd const0(%rip), %ymm2, %ymm2
vmovdqa 576(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm5
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm5, %ymm4, %ymm8
vpaddd %ymm6, %ymm2, %ymm9
vpsubd %ymm3, %ymm8, %ymm8
vpsubd %ymm7, %ymm9, %ymm9
vpsubd %ymm5, %ymm4, %ymm5
vpsubd %ymm6, %ymm2, %ymm6
vpsrld $1, %ymm5, %ymm5
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm5, %ymm5
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm5, %ymm6
vmovdqa 1600(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm2
vpunpckhwd const0(%rip), %ymm5, %ymm4
vpslld $1, %ymm2, %ymm2
vpslld $1, %ymm4, %ymm4
vpsubd %ymm2, %ymm8, %ymm8
vpsubd %ymm4, %ymm9, %ymm9
vpsrld $1, %ymm8, %ymm8
vpsrld $1, %ymm9, %ymm9
vpand mask32_to_16(%rip), %ymm8, %ymm8
vpand mask32_to_16(%rip), %ymm9, %ymm9
vpackusdw %ymm9, %ymm8, %ymm9
vmovdqa 832(%r8), %ymm8
vpaddw 1088(%r8), %ymm8, %ymm4
vpsubw 1088(%r8), %ymm8, %ymm8
vpsrlw $2, %ymm8, %ymm8
vpsubw %ymm6, %ymm8, %ymm8
vpmullw %ymm14, %ymm8, %ymm8
vpsllw $1, %ymm11, %ymm2
vpsubw %ymm2, %ymm4, %ymm2
vpsllw $7, %ymm5, %ymm4
vpsubw %ymm4, %ymm2, %ymm4
vpsrlw $3, %ymm4, %ymm4
vpsubw %ymm9, %ymm4, %ymm4
vmovdqa 1344(%r8), %ymm2
vpsubw %ymm11, %ymm2, %ymm2
vpmullw %ymm15, %ymm5, %ymm7
vpsubw %ymm7, %ymm2, %ymm7
vpmullw %ymm14, %ymm4, %ymm4
vpsubw %ymm4, %ymm9, %ymm9
vpmullw %ymm12, %ymm4, %ymm2
vpaddw %ymm2, %ymm9, %ymm2
vpmullw %ymm12, %ymm2, %ymm2
vpsubw %ymm2, %ymm7, %ymm2
vpmullw %ymm14, %ymm2, %ymm2
vpsubw %ymm6, %ymm2, %ymm2
vpsrlw $3, %ymm2, %ymm2
vpsubw %ymm8, %ymm2, %ymm2
vpsubw %ymm2, %ymm8, %ymm8
vpsubw %ymm8, %ymm6, %ymm6
vpmullw %ymm13, %ymm2, %ymm2
vpsubw %ymm2, %ymm6, %ymm6
vpshufb shuf48_16(%rip), %ymm4, %ymm4
vpand mask3_5_4_3_1(%rip), %ymm4, %ymm7
vpand mask5_3_5_3(%rip), %ymm4, %ymm4
vpermq $139, %ymm7, %ymm7
vpand mask_keephigh(%rip), %ymm7, %ymm3
vpor %ymm3, %ymm4, %ymm4
vpaddw 2112(%r8), %ymm11, %ymm11
vpaddw %ymm4, %ymm11, %ymm11
vmovdqa %xmm7, 2112(%r8)
vpshufb shuf48_16(%rip), %ymm2, %ymm2
vpand mask3_5_4_3_1(%rip), %ymm2, %ymm7
vpand mask5_3_5_3(%rip), %ymm2, %ymm2
vpermq $139, %ymm7, %ymm7
vpand mask_keephigh(%rip), %ymm7, %ymm3
vpor %ymm3, %ymm2, %ymm2
vpaddw 2368(%r8), %ymm6, %ymm6
vpaddw %ymm2, %ymm6, %ymm6
vmovdqa %xmm7, 2368(%r8)
vpshufb shuf48_16(%rip), %ymm5, %ymm5
vpand mask3_5_4_3_1(%rip), %ymm5, %ymm7
vpand mask5_3_5_3(%rip), %ymm5, %ymm5
vpermq $139, %ymm7, %ymm7
vpand mask_keephigh(%rip), %ymm7, %ymm3
vpor %ymm3, %ymm5, %ymm5
vpaddw 2624(%r8), %ymm9, %ymm9
vpaddw %ymm5, %ymm9, %ymm9
vmovdqa %xmm7, 2624(%r8)
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %xmm11, 240(%rdi)
vextracti128 $1, %ymm11, %xmm11
vmovq %xmm11, 256(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %xmm6, 592(%rdi)
vextracti128 $1, %ymm6, %xmm6
vmovq %xmm6, 608(%rdi)
vpand mask_mod8192(%rip), %ymm9, %ymm9
vmovdqu %xmm9, 944(%rdi)
vextracti128 $1, %ymm9, %xmm9
vmovq %xmm9, 960(%rdi)
vpand mask_mod8192(%rip), %ymm8, %ymm8
vmovdqu %xmm8, 1296(%rdi)
vextracti128 $1, %ymm8, %xmm8
vmovq %xmm8, 1312(%rdi)
vmovdqa 96(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm2
vpunpckhwd const0(%rip), %ymm5, %ymm4
vpslld $1, %ymm2, %ymm2
vpslld $1, %ymm4, %ymm4
vmovdqa 352(%r8), %ymm8
vpunpcklwd const0(%rip), %ymm8, %ymm9
vpunpckhwd const0(%rip), %ymm8, %ymm8
vmovdqa 608(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm11
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm11, %ymm9, %ymm7
vpaddd %ymm6, %ymm8, %ymm3
vpsubd %ymm2, %ymm7, %ymm7
vpsubd %ymm4, %ymm3, %ymm3
vpsubd %ymm11, %ymm9, %ymm11
vpsubd %ymm6, %ymm8, %ymm6
vpsrld $1, %ymm11, %ymm11
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm11, %ymm11
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm11, %ymm6
vmovdqa 1632(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm8
vpunpckhwd const0(%rip), %ymm11, %ymm9
vpslld $1, %ymm8, %ymm8
vpslld $1, %ymm9, %ymm9
vpsubd %ymm8, %ymm7, %ymm7
vpsubd %ymm9, %ymm3, %ymm3
vpsrld $1, %ymm7, %ymm7
vpsrld $1, %ymm3, %ymm3
vpand mask32_to_16(%rip), %ymm7, %ymm7
vpand mask32_to_16(%rip), %ymm3, %ymm3
vpackusdw %ymm3, %ymm7, %ymm3
vmovdqa 864(%r8), %ymm7
vpaddw 1120(%r8), %ymm7, %ymm9
vpsubw 1120(%r8), %ymm7, %ymm7
vpsrlw $2, %ymm7, %ymm7
vpsubw %ymm6, %ymm7, %ymm7
vpmullw %ymm14, %ymm7, %ymm7
vpsllw $1, %ymm5, %ymm8
vpsubw %ymm8, %ymm9, %ymm8
vpsllw $7, %ymm11, %ymm9
vpsubw %ymm9, %ymm8, %ymm9
vpsrlw $3, %ymm9, %ymm9
vpsubw %ymm3, %ymm9, %ymm9
vmovdqa 1376(%r8), %ymm8
vpsubw %ymm5, %ymm8, %ymm8
vpmullw %ymm15, %ymm11, %ymm4
vpsubw %ymm4, %ymm8, %ymm4
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm9, %ymm3, %ymm3
vpmullw %ymm12, %ymm9, %ymm8
vpaddw %ymm8, %ymm3, %ymm8
vpmullw %ymm12, %ymm8, %ymm8
vpsubw %ymm8, %ymm4, %ymm8
vpmullw %ymm14, %ymm8, %ymm8
vpsubw %ymm6, %ymm8, %ymm8
vpsrlw $3, %ymm8, %ymm8
vpsubw %ymm7, %ymm8, %ymm8
vpsubw %ymm8, %ymm7, %ymm7
vpsubw %ymm7, %ymm6, %ymm6
vpmullw %ymm13, %ymm8, %ymm8
vpsubw %ymm8, %ymm6, %ymm6
vpshufb shuf48_16(%rip), %ymm9, %ymm9
vpand mask3_5_4_3_1(%rip), %ymm9, %ymm4
vpand mask5_3_5_3(%rip), %ymm9, %ymm9
vpermq $139, %ymm4, %ymm4
vpand mask_keephigh(%rip), %ymm4, %ymm2
vpor %ymm2, %ymm9, %ymm9
vpaddw 2144(%r8), %ymm5, %ymm5
vpaddw %ymm9, %ymm5, %ymm5
vmovdqa %xmm4, 2144(%r8)
vpshufb shuf48_16(%rip), %ymm8, %ymm8
vpand mask3_5_4_3_1(%rip), %ymm8, %ymm4
vpand mask5_3_5_3(%rip), %ymm8, %ymm8
vpermq $139, %ymm4, %ymm4
vpand mask_keephigh(%rip), %ymm4, %ymm2
vpor %ymm2, %ymm8, %ymm8
vpaddw 2400(%r8), %ymm6, %ymm6
vpaddw %ymm8, %ymm6, %ymm6
vmovdqa %xmm4, 2400(%r8)
vpshufb shuf48_16(%rip), %ymm11, %ymm11
vpand mask3_5_4_3_1(%rip), %ymm11, %ymm4
vpand mask5_3_5_3(%rip), %ymm11, %ymm11
vpermq $139, %ymm4, %ymm4
vpand mask_keephigh(%rip), %ymm4, %ymm2
vpor %ymm2, %ymm11, %ymm11
vpaddw 2656(%r8), %ymm3, %ymm3
vpaddw %ymm11, %ymm3, %ymm3
vmovdqa %xmm4, 2656(%r8)
vpand mask_mod8192(%rip), %ymm5, %ymm5
vmovdqu %xmm5, 328(%rdi)
vextracti128 $1, %ymm5, %xmm5
vmovq %xmm5, 344(%rdi)
vpshufb shufmin1_mask3(%rip), %ymm5, %ymm5
vmovdqa %xmm5, 1792(%r8)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %xmm6, 680(%rdi)
vextracti128 $1, %ymm6, %xmm6
vmovq %xmm6, 696(%rdi)
vpshufb shufmin1_mask3(%rip), %ymm6, %ymm6
vmovdqa %xmm6, 1824(%r8)
vpand mask_mod8192(%rip), %ymm3, %ymm3
vmovdqu %xmm3, 1032(%rdi)
vextracti128 $1, %ymm3, %xmm3
vmovq %xmm3, 1048(%rdi)
vpshufb shufmin1_mask3(%rip), %ymm3, %ymm3
vmovdqa %xmm3, 1856(%r8)
vpand mask_mod8192(%rip), %ymm7, %ymm7
vmovdqu %xmm7, 1384(%rdi)
vextracti128 $1, %ymm7, %xmm7
vpextrw $0, %xmm7, 1400(%rdi)
vpshufb shufmin1_mask3(%rip), %ymm7, %ymm7
vmovdqa %xmm7, 1888(%r8)
vmovdqa 128(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm8
vpunpckhwd const0(%rip), %ymm11, %ymm9
vpslld $1, %ymm8, %ymm8
vpslld $1, %ymm9, %ymm9
vmovdqa 384(%r8), %ymm7
vpunpcklwd const0(%rip), %ymm7, %ymm3
vpunpckhwd const0(%rip), %ymm7, %ymm7
vmovdqa 640(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm5
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm5, %ymm3, %ymm4
vpaddd %ymm6, %ymm7, %ymm2
vpsubd %ymm8, %ymm4, %ymm4
vpsubd %ymm9, %ymm2, %ymm2
vpsubd %ymm5, %ymm3, %ymm5
vpsubd %ymm6, %ymm7, %ymm6
vpsrld $1, %ymm5, %ymm5
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm5, %ymm5
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm5, %ymm6
vmovdqa 1664(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm7
vpunpckhwd const0(%rip), %ymm5, %ymm3
vpslld $1, %ymm7, %ymm7
vpslld $1, %ymm3, %ymm3
vpsubd %ymm7, %ymm4, %ymm4
vpsubd %ymm3, %ymm2, %ymm2
vpsrld $1, %ymm4, %ymm4
vpsrld $1, %ymm2, %ymm2
vpand mask32_to_16(%rip), %ymm4, %ymm4
vpand mask32_to_16(%rip), %ymm2, %ymm2
vpackusdw %ymm2, %ymm4, %ymm2
vmovdqa 896(%r8), %ymm4
vpaddw 1152(%r8), %ymm4, %ymm3
vpsubw 1152(%r8), %ymm4, %ymm4
vpsrlw $2, %ymm4, %ymm4
vpsubw %ymm6, %ymm4, %ymm4
vpmullw %ymm14, %ymm4, %ymm4
vpsllw $1, %ymm11, %ymm7
vpsubw %ymm7, %ymm3, %ymm7
vpsllw $7, %ymm5, %ymm3
vpsubw %ymm3, %ymm7, %ymm3
vpsrlw $3, %ymm3, %ymm3
vpsubw %ymm2, %ymm3, %ymm3
vmovdqa 1408(%r8), %ymm7
vpsubw %ymm11, %ymm7, %ymm7
vpmullw %ymm15, %ymm5, %ymm9
vpsubw %ymm9, %ymm7, %ymm9
vpmullw %ymm14, %ymm3, %ymm3
vpsubw %ymm3, %ymm2, %ymm2
vpmullw %ymm12, %ymm3, %ymm7
vpaddw %ymm7, %ymm2, %ymm7
vpmullw %ymm12, %ymm7, %ymm7
vpsubw %ymm7, %ymm9, %ymm7
vpmullw %ymm14, %ymm7, %ymm7
vpsubw %ymm6, %ymm7, %ymm7
vpsrlw $3, %ymm7, %ymm7
vpsubw %ymm4, %ymm7, %ymm7
vpsubw %ymm7, %ymm4, %ymm4
vpsubw %ymm4, %ymm6, %ymm6
vpmullw %ymm13, %ymm7, %ymm7
vpsubw %ymm7, %ymm6, %ymm6
vmovdqu 416(%rdi), %ymm9
vmovdqu 768(%rdi), %ymm8
vmovdqu 1120(%rdi), %ymm10
vpaddw %ymm11, %ymm9, %ymm11
vpaddw %ymm6, %ymm8, %ymm6
vpaddw %ymm2, %ymm10, %ymm2
vpshufb shuf48_16(%rip), %ymm4, %ymm4
vpand mask3_5_4_3_1(%rip), %ymm4, %ymm10
vpand mask5_3_5_3(%rip), %ymm4, %ymm4
vpermq $139, %ymm10, %ymm10
vpand mask_keephigh(%rip), %ymm10, %ymm8
vpor %ymm8, %ymm4, %ymm4
vmovdqu 64(%rdi), %ymm8
vpaddw 1920(%r8), %ymm8, %ymm8
vpaddw %ymm4, %ymm8, %ymm8
vpand mask_mod8192(%rip), %ymm8, %ymm8
vmovdqu %xmm8, 64(%rdi)
vextracti128 $1, %ymm8, %xmm8
vmovq %xmm8, 80(%rdi)
vmovdqa %xmm10, 1920(%r8)
vpshufb shuf48_16(%rip), %ymm3, %ymm3
vpand mask3_5_4_3_1(%rip), %ymm3, %ymm10
vpand mask5_3_5_3(%rip), %ymm3, %ymm3
vpermq $139, %ymm10, %ymm10
vpand mask_keephigh(%rip), %ymm10, %ymm8
vpor %ymm8, %ymm3, %ymm3
vpaddw 2176(%r8), %ymm11, %ymm11
vpaddw %ymm3, %ymm11, %ymm11
vmovdqa %xmm10, 2176(%r8)
vpshufb shuf48_16(%rip), %ymm7, %ymm7
vpand mask3_5_4_3_1(%rip), %ymm7, %ymm10
vpand mask5_3_5_3(%rip), %ymm7, %ymm7
vpermq $139, %ymm10, %ymm10
vpand mask_keephigh(%rip), %ymm10, %ymm8
vpor %ymm8, %ymm7, %ymm7
vpaddw 2432(%r8), %ymm6, %ymm6
vpaddw %ymm7, %ymm6, %ymm6
vmovdqa %xmm10, 2432(%r8)
vpshufb shuf48_16(%rip), %ymm5, %ymm5
vpand mask3_5_4_3_1(%rip), %ymm5, %ymm10
vpand mask5_3_5_3(%rip), %ymm5, %ymm5
vpermq $139, %ymm10, %ymm10
vpand mask_keephigh(%rip), %ymm10, %ymm8
vpor %ymm8, %ymm5, %ymm5
vpaddw 2688(%r8), %ymm2, %ymm2
vpaddw %ymm5, %ymm2, %ymm2
vmovdqa %xmm10, 2688(%r8)
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %xmm11, 416(%rdi)
vextracti128 $1, %ymm11, %xmm11
vmovq %xmm11, 432(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %xmm6, 768(%rdi)
vextracti128 $1, %ymm6, %xmm6
vmovq %xmm6, 784(%rdi)
vpand mask_mod8192(%rip), %ymm2, %ymm2
vmovdqu %xmm2, 1120(%rdi)
vextracti128 $1, %ymm2, %xmm2
vmovq %xmm2, 1136(%rdi)
vmovdqa 160(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm7
vpunpckhwd const0(%rip), %ymm5, %ymm3
vpslld $1, %ymm7, %ymm7
vpslld $1, %ymm3, %ymm3
vmovdqa 416(%r8), %ymm4
vpunpcklwd const0(%rip), %ymm4, %ymm2
vpunpckhwd const0(%rip), %ymm4, %ymm4
vmovdqa 672(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm11
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm11, %ymm2, %ymm10
vpaddd %ymm6, %ymm4, %ymm8
vpsubd %ymm7, %ymm10, %ymm10
vpsubd %ymm3, %ymm8, %ymm8
vpsubd %ymm11, %ymm2, %ymm11
vpsubd %ymm6, %ymm4, %ymm6
vpsrld $1, %ymm11, %ymm11
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm11, %ymm11
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm11, %ymm6
vmovdqa 1696(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm4
vpunpckhwd const0(%rip), %ymm11, %ymm2
vpslld $1, %ymm4, %ymm4
vpslld $1, %ymm2, %ymm2
vpsubd %ymm4, %ymm10, %ymm10
vpsubd %ymm2, %ymm8, %ymm8
vpsrld $1, %ymm10, %ymm10
vpsrld $1, %ymm8, %ymm8
vpand mask32_to_16(%rip), %ymm10, %ymm10
vpand mask32_to_16(%rip), %ymm8, %ymm8
vpackusdw %ymm8, %ymm10, %ymm8
vmovdqa 928(%r8), %ymm10
vpaddw 1184(%r8), %ymm10, %ymm2
vpsubw 1184(%r8), %ymm10, %ymm10
vpsrlw $2, %ymm10, %ymm10
vpsubw %ymm6, %ymm10, %ymm10
vpmullw %ymm14, %ymm10, %ymm10
vpsllw $1, %ymm5, %ymm4
vpsubw %ymm4, %ymm2, %ymm4
vpsllw $7, %ymm11, %ymm2
vpsubw %ymm2, %ymm4, %ymm2
vpsrlw $3, %ymm2, %ymm2
vpsubw %ymm8, %ymm2, %ymm2
vmovdqa 1440(%r8), %ymm4
vpsubw %ymm5, %ymm4, %ymm4
vpmullw %ymm15, %ymm11, %ymm3
vpsubw %ymm3, %ymm4, %ymm3
vpmullw %ymm14, %ymm2, %ymm2
vpsubw %ymm2, %ymm8, %ymm8
vpmullw %ymm12, %ymm2, %ymm4
vpaddw %ymm4, %ymm8, %ymm4
vpmullw %ymm12, %ymm4, %ymm4
vpsubw %ymm4, %ymm3, %ymm4
vpmullw %ymm14, %ymm4, %ymm4
vpsubw %ymm6, %ymm4, %ymm4
vpsrlw $3, %ymm4, %ymm4
vpsubw %ymm10, %ymm4, %ymm4
vpsubw %ymm4, %ymm10, %ymm10
vpsubw %ymm10, %ymm6, %ymm6
vpmullw %ymm13, %ymm4, %ymm4
vpsubw %ymm4, %ymm6, %ymm6
vmovdqu 504(%rdi), %ymm3
vmovdqu 856(%rdi), %ymm7
vmovdqu 1208(%rdi), %ymm9
vpaddw %ymm5, %ymm3, %ymm5
vpaddw %ymm6, %ymm7, %ymm6
vpaddw %ymm8, %ymm9, %ymm8
vpshufb shuf48_16(%rip), %ymm10, %ymm10
vpand mask3_5_4_3_1(%rip), %ymm10, %ymm9
vpand mask5_3_5_3(%rip), %ymm10, %ymm10
vpermq $139, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm7
vpor %ymm7, %ymm10, %ymm10
vmovdqu 152(%rdi), %ymm7
vpaddw 1952(%r8), %ymm7, %ymm7
vpaddw %ymm10, %ymm7, %ymm7
vpand mask_mod8192(%rip), %ymm7, %ymm7
vmovdqu %xmm7, 152(%rdi)
vextracti128 $1, %ymm7, %xmm7
vmovq %xmm7, 168(%rdi)
vmovdqa %xmm9, 1952(%r8)
vpshufb shuf48_16(%rip), %ymm2, %ymm2
vpand mask3_5_4_3_1(%rip), %ymm2, %ymm9
vpand mask5_3_5_3(%rip), %ymm2, %ymm2
vpermq $139, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm7
vpor %ymm7, %ymm2, %ymm2
vpaddw 2208(%r8), %ymm5, %ymm5
vpaddw %ymm2, %ymm5, %ymm5
vmovdqa %xmm9, 2208(%r8)
vpshufb shuf48_16(%rip), %ymm4, %ymm4
vpand mask3_5_4_3_1(%rip), %ymm4, %ymm9
vpand mask5_3_5_3(%rip), %ymm4, %ymm4
vpermq $139, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm7
vpor %ymm7, %ymm4, %ymm4
vpaddw 2464(%r8), %ymm6, %ymm6
vpaddw %ymm4, %ymm6, %ymm6
vmovdqa %xmm9, 2464(%r8)
vpshufb shuf48_16(%rip), %ymm11, %ymm11
vpand mask3_5_4_3_1(%rip), %ymm11, %ymm9
vpand mask5_3_5_3(%rip), %ymm11, %ymm11
vpermq $139, %ymm9, %ymm9
vpand mask_keephigh(%rip), %ymm9, %ymm7
vpor %ymm7, %ymm11, %ymm11
vpaddw 2720(%r8), %ymm8, %ymm8
vpaddw %ymm11, %ymm8, %ymm8
vmovdqa %xmm9, 2720(%r8)
vpand mask_mod8192(%rip), %ymm5, %ymm5
vmovdqu %xmm5, 504(%rdi)
vextracti128 $1, %ymm5, %xmm5
vmovq %xmm5, 520(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %xmm6, 856(%rdi)
vextracti128 $1, %ymm6, %xmm6
vmovq %xmm6, 872(%rdi)
vpand mask_mod8192(%rip), %ymm8, %ymm8
vmovdqu %xmm8, 1208(%rdi)
vextracti128 $1, %ymm8, %xmm8
vmovq %xmm8, 1224(%rdi)
vmovdqa 192(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm4
vpunpckhwd const0(%rip), %ymm11, %ymm2
vpslld $1, %ymm4, %ymm4
vpslld $1, %ymm2, %ymm2
vmovdqa 448(%r8), %ymm10
vpunpcklwd const0(%rip), %ymm10, %ymm8
vpunpckhwd const0(%rip), %ymm10, %ymm10
vmovdqa 704(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm5
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm5, %ymm8, %ymm9
vpaddd %ymm6, %ymm10, %ymm7
vpsubd %ymm4, %ymm9, %ymm9
vpsubd %ymm2, %ymm7, %ymm7
vpsubd %ymm5, %ymm8, %ymm5
vpsubd %ymm6, %ymm10, %ymm6
vpsrld $1, %ymm5, %ymm5
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm5, %ymm5
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm5, %ymm6
vmovdqa 1728(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm10
vpunpckhwd const0(%rip), %ymm5, %ymm8
vpslld $1, %ymm10, %ymm10
vpslld $1, %ymm8, %ymm8
vpsubd %ymm10, %ymm9, %ymm9
vpsubd %ymm8, %ymm7, %ymm7
vpsrld $1, %ymm9, %ymm9
vpsrld $1, %ymm7, %ymm7
vpand mask32_to_16(%rip), %ymm9, %ymm9
vpand mask32_to_16(%rip), %ymm7, %ymm7
vpackusdw %ymm7, %ymm9, %ymm7
vmovdqa 960(%r8), %ymm9
vpaddw 1216(%r8), %ymm9, %ymm8
vpsubw 1216(%r8), %ymm9, %ymm9
vpsrlw $2, %ymm9, %ymm9
vpsubw %ymm6, %ymm9, %ymm9
vpmullw %ymm14, %ymm9, %ymm9
vpsllw $1, %ymm11, %ymm10
vpsubw %ymm10, %ymm8, %ymm10
vpsllw $7, %ymm5, %ymm8
vpsubw %ymm8, %ymm10, %ymm8
vpsrlw $3, %ymm8, %ymm8
vpsubw %ymm7, %ymm8, %ymm8
vmovdqa 1472(%r8), %ymm10
vpsubw %ymm11, %ymm10, %ymm10
vpmullw %ymm15, %ymm5, %ymm2
vpsubw %ymm2, %ymm10, %ymm2
vpmullw %ymm14, %ymm8, %ymm8
vpsubw %ymm8, %ymm7, %ymm7
vpmullw %ymm12, %ymm8, %ymm10
vpaddw %ymm10, %ymm7, %ymm10
vpmullw %ymm12, %ymm10, %ymm10
vpsubw %ymm10, %ymm2, %ymm10
vpmullw %ymm14, %ymm10, %ymm10
vpsubw %ymm6, %ymm10, %ymm10
vpsrlw $3, %ymm10, %ymm10
vpsubw %ymm9, %ymm10, %ymm10
vpsubw %ymm10, %ymm9, %ymm9
vpsubw %ymm9, %ymm6, %ymm6
vpmullw %ymm13, %ymm10, %ymm10
vpsubw %ymm10, %ymm6, %ymm6
vmovdqu 592(%rdi), %ymm2
vmovdqu 944(%rdi), %ymm4
vmovdqu 1296(%rdi), %ymm3
vpaddw %ymm11, %ymm2, %ymm11
vpaddw %ymm6, %ymm4, %ymm6
vpaddw %ymm7, %ymm3, %ymm7
vpshufb shuf48_16(%rip), %ymm9, %ymm9
vpand mask3_5_4_3_1(%rip), %ymm9, %ymm3
vpand mask5_3_5_3(%rip), %ymm9, %ymm9
vpermq $139, %ymm3, %ymm3
vpand mask_keephigh(%rip), %ymm3, %ymm4
vpor %ymm4, %ymm9, %ymm9
vmovdqu 240(%rdi), %ymm4
vpaddw 1984(%r8), %ymm4, %ymm4
vpaddw %ymm9, %ymm4, %ymm4
vpand mask_mod8192(%rip), %ymm4, %ymm4
vmovdqu %xmm4, 240(%rdi)
vextracti128 $1, %ymm4, %xmm4
vmovq %xmm4, 256(%rdi)
vmovdqa %xmm3, 1984(%r8)
vpshufb shuf48_16(%rip), %ymm8, %ymm8
vpand mask3_5_4_3_1(%rip), %ymm8, %ymm3
vpand mask5_3_5_3(%rip), %ymm8, %ymm8
vpermq $139, %ymm3, %ymm3
vpand mask_keephigh(%rip), %ymm3, %ymm4
vpor %ymm4, %ymm8, %ymm8
vpaddw 2240(%r8), %ymm11, %ymm11
vpaddw %ymm8, %ymm11, %ymm11
vmovdqa %xmm3, 2240(%r8)
vpshufb shuf48_16(%rip), %ymm10, %ymm10
vpand mask3_5_4_3_1(%rip), %ymm10, %ymm3
vpand mask5_3_5_3(%rip), %ymm10, %ymm10
vpermq $139, %ymm3, %ymm3
vpand mask_keephigh(%rip), %ymm3, %ymm4
vpor %ymm4, %ymm10, %ymm10
vpaddw 2496(%r8), %ymm6, %ymm6
vpaddw %ymm10, %ymm6, %ymm6
vmovdqa %xmm3, 2496(%r8)
vpshufb shuf48_16(%rip), %ymm5, %ymm5
vpand mask3_5_4_3_1(%rip), %ymm5, %ymm3
vpand mask5_3_5_3(%rip), %ymm5, %ymm5
vpermq $139, %ymm3, %ymm3
vpand mask_keephigh(%rip), %ymm3, %ymm4
vpor %ymm4, %ymm5, %ymm5
vpaddw 2752(%r8), %ymm7, %ymm7
vpaddw %ymm5, %ymm7, %ymm7
vmovdqa %xmm3, 2752(%r8)
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %xmm11, 592(%rdi)
vextracti128 $1, %ymm11, %xmm11
vmovq %xmm11, 608(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %xmm6, 944(%rdi)
vextracti128 $1, %ymm6, %xmm6
vmovq %xmm6, 960(%rdi)
vpand mask_mod8192(%rip), %ymm7, %ymm7
vmovdqu %xmm7, 1296(%rdi)
vextracti128 $1, %ymm7, %xmm7
vmovq %xmm7, 1312(%rdi)
vmovdqa 224(%r8), %ymm5
vpunpcklwd const0(%rip), %ymm5, %ymm10
vpunpckhwd const0(%rip), %ymm5, %ymm8
vpslld $1, %ymm10, %ymm10
vpslld $1, %ymm8, %ymm8
vmovdqa 480(%r8), %ymm9
vpunpcklwd const0(%rip), %ymm9, %ymm7
vpunpckhwd const0(%rip), %ymm9, %ymm9
vmovdqa 736(%r8), %ymm6
vpunpcklwd const0(%rip), %ymm6, %ymm11
vpunpckhwd const0(%rip), %ymm6, %ymm6
vpaddd %ymm11, %ymm7, %ymm3
vpaddd %ymm6, %ymm9, %ymm4
vpsubd %ymm10, %ymm3, %ymm3
vpsubd %ymm8, %ymm4, %ymm4
vpsubd %ymm11, %ymm7, %ymm11
vpsubd %ymm6, %ymm9, %ymm6
vpsrld $1, %ymm11, %ymm11
vpsrld $1, %ymm6, %ymm6
vpand mask32_to_16(%rip), %ymm11, %ymm11
vpand mask32_to_16(%rip), %ymm6, %ymm6
vpackusdw %ymm6, %ymm11, %ymm6
vmovdqa 1760(%r8), %ymm11
vpunpcklwd const0(%rip), %ymm11, %ymm9
vpunpckhwd const0(%rip), %ymm11, %ymm7
vpslld $1, %ymm9, %ymm9
vpslld $1, %ymm7, %ymm7
vpsubd %ymm9, %ymm3, %ymm3
vpsubd %ymm7, %ymm4, %ymm4
vpsrld $1, %ymm3, %ymm3
vpsrld $1, %ymm4, %ymm4
vpand mask32_to_16(%rip), %ymm3, %ymm3
vpand mask32_to_16(%rip), %ymm4, %ymm4
vpackusdw %ymm4, %ymm3, %ymm4
vmovdqa 992(%r8), %ymm3
vpaddw 1248(%r8), %ymm3, %ymm7
vpsubw 1248(%r8), %ymm3, %ymm3
vpsrlw $2, %ymm3, %ymm3
vpsubw %ymm6, %ymm3, %ymm3
vpmullw %ymm14, %ymm3, %ymm3
vpsllw $1, %ymm5, %ymm9
vpsubw %ymm9, %ymm7, %ymm9
vpsllw $7, %ymm11, %ymm7
vpsubw %ymm7, %ymm9, %ymm7
vpsrlw $3, %ymm7, %ymm7
vpsubw %ymm4, %ymm7, %ymm7
vmovdqa 1504(%r8), %ymm9
vpsubw %ymm5, %ymm9, %ymm9
vpmullw %ymm15, %ymm11, %ymm8
vpsubw %ymm8, %ymm9, %ymm8
vpmullw %ymm14, %ymm7, %ymm7
vpsubw %ymm7, %ymm4, %ymm4
vpmullw %ymm12, %ymm7, %ymm9
vpaddw %ymm9, %ymm4, %ymm9
vpmullw %ymm12, %ymm9, %ymm9
vpsubw %ymm9, %ymm8, %ymm9
vpmullw %ymm14, %ymm9, %ymm9
vpsubw %ymm6, %ymm9, %ymm9
vpsrlw $3, %ymm9, %ymm9
vpsubw %ymm3, %ymm9, %ymm9
vpsubw %ymm9, %ymm3, %ymm3
vpsubw %ymm3, %ymm6, %ymm6
vpmullw %ymm13, %ymm9, %ymm9
vpsubw %ymm9, %ymm6, %ymm6
vextracti128 $1, %ymm4, %xmm8
vpshufb shufmin1_mask3(%rip), %ymm8, %ymm8
vmovdqa %ymm8, 2816(%r8)
vextracti128 $1, %ymm3, %xmm8
vpshufb shufmin1_mask3(%rip), %ymm8, %ymm8
vmovdqa %ymm8, 2848(%r8)
vextracti128 $1, %ymm7, %xmm8
vpshufb shufmin1_mask3(%rip), %ymm8, %ymm8
vmovdqa %ymm8, 2880(%r8)
vmovdqu 680(%rdi), %ymm8
vmovdqu 1032(%rdi), %ymm10
# Only 18 bytes can be read at 1384, but vmovdqu reads 32.
# Copy 18 bytes to the red zone and zero pad to 32 bytes.
xor %r9, %r9
movq %r9, -16(%rsp)
movq %r9, -8(%rsp)
movq 1384(%rdi), %r9
movq %r9, -32(%rsp)
movq 1384+8(%rdi), %r9
movq %r9, -24(%rsp)
movw 1384+16(%rdi), %r9w
movw %r9w, -16(%rsp)
vmovdqu -32(%rsp), %ymm2
vpaddw %ymm5, %ymm8, %ymm5
vpaddw %ymm6, %ymm10, %ymm6
vpaddw %ymm4, %ymm2, %ymm4
vpshufb shuf48_16(%rip), %ymm3, %ymm3
vpand mask3_5_4_3_1(%rip), %ymm3, %ymm2
vpand mask5_3_5_3(%rip), %ymm3, %ymm3
vpermq $139, %ymm2, %ymm2
vpand mask_keephigh(%rip), %ymm2, %ymm10
vpor %ymm10, %ymm3, %ymm3
vmovdqu 328(%rdi), %ymm10
vpaddw 2016(%r8), %ymm10, %ymm10
vpaddw %ymm3, %ymm10, %ymm10
vpand mask_mod8192(%rip), %ymm10, %ymm10
vmovdqu %xmm10, 328(%rdi)
vextracti128 $1, %ymm10, %xmm10
vmovq %xmm10, 344(%rdi)
vpshufb shufmin1_mask3(%rip), %ymm10, %ymm10
vmovdqa %xmm10, 1792(%r8)
vmovdqa %xmm2, 2016(%r8)
vpshufb shuf48_16(%rip), %ymm7, %ymm7
vpand mask3_5_4_3_1(%rip), %ymm7, %ymm2
vpand mask5_3_5_3(%rip), %ymm7, %ymm7
vpermq $139, %ymm2, %ymm2
vpand mask_keephigh(%rip), %ymm2, %ymm10
vpor %ymm10, %ymm7, %ymm7
vpaddw 2272(%r8), %ymm5, %ymm5
vpaddw %ymm7, %ymm5, %ymm5
vmovdqa %xmm2, 2272(%r8)
vpshufb shuf48_16(%rip), %ymm9, %ymm9
vpand mask3_5_4_3_1(%rip), %ymm9, %ymm2
vpand mask5_3_5_3(%rip), %ymm9, %ymm9
vpermq $139, %ymm2, %ymm2
vpand mask_keephigh(%rip), %ymm2, %ymm10
vpor %ymm10, %ymm9, %ymm9
vpaddw 2528(%r8), %ymm6, %ymm6
vpaddw %ymm9, %ymm6, %ymm6
vmovdqa %xmm2, 2528(%r8)
vpshufb shuf48_16(%rip), %ymm11, %ymm11
vpand mask3_5_4_3_1(%rip), %ymm11, %ymm2
vpand mask5_3_5_3(%rip), %ymm11, %ymm11
vpermq $139, %ymm2, %ymm2
vpand mask_keephigh(%rip), %ymm2, %ymm10
vpor %ymm10, %ymm11, %ymm11
vpaddw 2784(%r8), %ymm4, %ymm4
vpaddw %ymm11, %ymm4, %ymm4
vmovdqa %xmm2, 2784(%r8)
vpand mask_mod8192(%rip), %ymm5, %ymm5
vmovdqu %xmm5, 680(%rdi)
vextracti128 $1, %ymm5, %xmm5
vmovq %xmm5, 696(%rdi)
vpand mask_mod8192(%rip), %ymm6, %ymm6
vmovdqu %xmm6, 1032(%rdi)
vextracti128 $1, %ymm6, %xmm6
vmovq %xmm6, 1048(%rdi)
vpand mask_mod8192(%rip), %ymm4, %ymm4
vmovdqu %xmm4, 1384(%rdi)
vextracti128 $1, %ymm4, %xmm4
vpextrw $0, %xmm4, 1400(%rdi)
vmovdqu 0(%rdi), %ymm11
vpaddw 1888(%r8), %ymm11, %ymm11
vpaddw 2816(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 0(%rdi)
vmovdqu 352(%rdi), %ymm11
vpaddw 2528(%r8), %ymm11, %ymm11
vpaddw 2848(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 352(%rdi)
vmovdqu 704(%rdi), %ymm11
vpaddw 2784(%r8), %ymm11, %ymm11
vpaddw 2880(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 704(%rdi)
vmovdqu 88(%rdi), %ymm11
vpaddw 2048(%r8), %ymm11, %ymm11
vpaddw 1920(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 88(%rdi)
vmovdqu 440(%rdi), %ymm11
vpaddw 2304(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 440(%rdi)
vmovdqu 792(%rdi), %ymm11
vpaddw 2560(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 792(%rdi)
vmovdqu 176(%rdi), %ymm11
vpaddw 2080(%r8), %ymm11, %ymm11
vpaddw 1952(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 176(%rdi)
vmovdqu 528(%rdi), %ymm11
vpaddw 2336(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 528(%rdi)
vmovdqu 880(%rdi), %ymm11
vpaddw 2592(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 880(%rdi)
vmovdqu 264(%rdi), %ymm11
vpaddw 2112(%r8), %ymm11, %ymm11
vpaddw 1984(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 264(%rdi)
vmovdqu 616(%rdi), %ymm11
vpaddw 2368(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 616(%rdi)
vmovdqu 968(%rdi), %ymm11
vpaddw 2624(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 968(%rdi)
vmovdqu 352(%rdi), %ymm11
vpaddw 2144(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 352(%rdi)
vmovdqu 704(%rdi), %ymm11
vpaddw 2400(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 704(%rdi)
vmovdqu 1056(%rdi), %ymm11
vpaddw 2656(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 1056(%rdi)
vmovdqu 440(%rdi), %ymm11
vpaddw 2176(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 440(%rdi)
vmovdqu 792(%rdi), %ymm11
vpaddw 2432(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 792(%rdi)
vmovdqu 1144(%rdi), %ymm11
vpaddw 2688(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 1144(%rdi)
vmovdqu 528(%rdi), %ymm11
vpaddw 2208(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 528(%rdi)
vmovdqu 880(%rdi), %ymm11
vpaddw 2464(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 880(%rdi)
vmovdqu 1232(%rdi), %ymm11
vpaddw 2720(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 1232(%rdi)
vmovdqu 616(%rdi), %ymm11
vpaddw 2240(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 616(%rdi)
vmovdqu 968(%rdi), %ymm11
vpaddw 2496(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 968(%rdi)
vmovdqu 1320(%rdi), %ymm11
vpaddw 2752(%r8), %ymm11, %ymm11
vpand mask_mod8192(%rip), %ymm11, %ymm11
vmovdqu %ymm11, 1320(%rdi)
pop %r12
.cfi_restore r12
pop %rbp
.cfi_restore rbp
.cfi_def_cfa_register rsp
.cfi_adjust_cfa_offset -8
ret
.cfi_endproc
.size poly_Rq_mul,.-poly_Rq_mul
#endif
|
marvin-hansen/iggy-streaming-system
| 5,330
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-x86/crypto/test/trampoline-x86.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl abi_test_trampoline
.hidden abi_test_trampoline
.type abi_test_trampoline,@function
.align 16
abi_test_trampoline:
.L_abi_test_trampoline_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 24(%esp),%ecx
movl (%ecx),%esi
movl 4(%ecx),%edi
movl 8(%ecx),%ebx
movl 12(%ecx),%ebp
subl $44,%esp
movl 72(%esp),%eax
xorl %ecx,%ecx
.L000loop:
cmpl 76(%esp),%ecx
jae .L001loop_done
movl (%eax,%ecx,4),%edx
movl %edx,(%esp,%ecx,4)
addl $1,%ecx
jmp .L000loop
.L001loop_done:
call *64(%esp)
addl $44,%esp
movl 24(%esp),%ecx
movl %esi,(%ecx)
movl %edi,4(%ecx)
movl %ebx,8(%ecx)
movl %ebp,12(%ecx)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size abi_test_trampoline,.-.L_abi_test_trampoline_begin
.globl abi_test_get_and_clear_direction_flag
.hidden abi_test_get_and_clear_direction_flag
.type abi_test_get_and_clear_direction_flag,@function
.align 16
abi_test_get_and_clear_direction_flag:
.L_abi_test_get_and_clear_direction_flag_begin:
pushfl
popl %eax
andl $1024,%eax
shrl $10,%eax
cld
ret
.size abi_test_get_and_clear_direction_flag,.-.L_abi_test_get_and_clear_direction_flag_begin
.globl abi_test_set_direction_flag
.hidden abi_test_set_direction_flag
.type abi_test_set_direction_flag,@function
.align 16
abi_test_set_direction_flag:
.L_abi_test_set_direction_flag_begin:
std
ret
.size abi_test_set_direction_flag,.-.L_abi_test_set_direction_flag_begin
.globl abi_test_clobber_eax
.hidden abi_test_clobber_eax
.type abi_test_clobber_eax,@function
.align 16
abi_test_clobber_eax:
.L_abi_test_clobber_eax_begin:
xorl %eax,%eax
ret
.size abi_test_clobber_eax,.-.L_abi_test_clobber_eax_begin
.globl abi_test_clobber_ebx
.hidden abi_test_clobber_ebx
.type abi_test_clobber_ebx,@function
.align 16
abi_test_clobber_ebx:
.L_abi_test_clobber_ebx_begin:
xorl %ebx,%ebx
ret
.size abi_test_clobber_ebx,.-.L_abi_test_clobber_ebx_begin
.globl abi_test_clobber_ecx
.hidden abi_test_clobber_ecx
.type abi_test_clobber_ecx,@function
.align 16
abi_test_clobber_ecx:
.L_abi_test_clobber_ecx_begin:
xorl %ecx,%ecx
ret
.size abi_test_clobber_ecx,.-.L_abi_test_clobber_ecx_begin
.globl abi_test_clobber_edx
.hidden abi_test_clobber_edx
.type abi_test_clobber_edx,@function
.align 16
abi_test_clobber_edx:
.L_abi_test_clobber_edx_begin:
xorl %edx,%edx
ret
.size abi_test_clobber_edx,.-.L_abi_test_clobber_edx_begin
.globl abi_test_clobber_edi
.hidden abi_test_clobber_edi
.type abi_test_clobber_edi,@function
.align 16
abi_test_clobber_edi:
.L_abi_test_clobber_edi_begin:
xorl %edi,%edi
ret
.size abi_test_clobber_edi,.-.L_abi_test_clobber_edi_begin
.globl abi_test_clobber_esi
.hidden abi_test_clobber_esi
.type abi_test_clobber_esi,@function
.align 16
abi_test_clobber_esi:
.L_abi_test_clobber_esi_begin:
xorl %esi,%esi
ret
.size abi_test_clobber_esi,.-.L_abi_test_clobber_esi_begin
.globl abi_test_clobber_ebp
.hidden abi_test_clobber_ebp
.type abi_test_clobber_ebp,@function
.align 16
abi_test_clobber_ebp:
.L_abi_test_clobber_ebp_begin:
xorl %ebp,%ebp
ret
.size abi_test_clobber_ebp,.-.L_abi_test_clobber_ebp_begin
.globl abi_test_clobber_xmm0
.hidden abi_test_clobber_xmm0
.type abi_test_clobber_xmm0,@function
.align 16
abi_test_clobber_xmm0:
.L_abi_test_clobber_xmm0_begin:
pxor %xmm0,%xmm0
ret
.size abi_test_clobber_xmm0,.-.L_abi_test_clobber_xmm0_begin
.globl abi_test_clobber_xmm1
.hidden abi_test_clobber_xmm1
.type abi_test_clobber_xmm1,@function
.align 16
abi_test_clobber_xmm1:
.L_abi_test_clobber_xmm1_begin:
pxor %xmm1,%xmm1
ret
.size abi_test_clobber_xmm1,.-.L_abi_test_clobber_xmm1_begin
.globl abi_test_clobber_xmm2
.hidden abi_test_clobber_xmm2
.type abi_test_clobber_xmm2,@function
.align 16
abi_test_clobber_xmm2:
.L_abi_test_clobber_xmm2_begin:
pxor %xmm2,%xmm2
ret
.size abi_test_clobber_xmm2,.-.L_abi_test_clobber_xmm2_begin
.globl abi_test_clobber_xmm3
.hidden abi_test_clobber_xmm3
.type abi_test_clobber_xmm3,@function
.align 16
abi_test_clobber_xmm3:
.L_abi_test_clobber_xmm3_begin:
pxor %xmm3,%xmm3
ret
.size abi_test_clobber_xmm3,.-.L_abi_test_clobber_xmm3_begin
.globl abi_test_clobber_xmm4
.hidden abi_test_clobber_xmm4
.type abi_test_clobber_xmm4,@function
.align 16
abi_test_clobber_xmm4:
.L_abi_test_clobber_xmm4_begin:
pxor %xmm4,%xmm4
ret
.size abi_test_clobber_xmm4,.-.L_abi_test_clobber_xmm4_begin
.globl abi_test_clobber_xmm5
.hidden abi_test_clobber_xmm5
.type abi_test_clobber_xmm5,@function
.align 16
abi_test_clobber_xmm5:
.L_abi_test_clobber_xmm5_begin:
pxor %xmm5,%xmm5
ret
.size abi_test_clobber_xmm5,.-.L_abi_test_clobber_xmm5_begin
.globl abi_test_clobber_xmm6
.hidden abi_test_clobber_xmm6
.type abi_test_clobber_xmm6,@function
.align 16
abi_test_clobber_xmm6:
.L_abi_test_clobber_xmm6_begin:
pxor %xmm6,%xmm6
ret
.size abi_test_clobber_xmm6,.-.L_abi_test_clobber_xmm6_begin
.globl abi_test_clobber_xmm7
.hidden abi_test_clobber_xmm7
.type abi_test_clobber_xmm7,@function
.align 16
abi_test_clobber_xmm7:
.L_abi_test_clobber_xmm7_begin:
pxor %xmm7,%xmm7
ret
.size abi_test_clobber_xmm7,.-.L_abi_test_clobber_xmm7_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 98,892
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-x86/crypto/fipsmodule/sha256-586.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl sha256_block_data_order_nohw
.hidden sha256_block_data_order_nohw
.type sha256_block_data_order_nohw,@function
.align 16
sha256_block_data_order_nohw:
.L_sha256_block_data_order_nohw_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl %esp,%ebx
call .L000pic_point
.L000pic_point:
popl %ebp
leal .LK256-.L000pic_point(%ebp),%ebp
subl $16,%esp
andl $-64,%esp
shll $6,%eax
addl %edi,%eax
movl %esi,(%esp)
movl %edi,4(%esp)
movl %eax,8(%esp)
movl %ebx,12(%esp)
.L001no_xmm:
subl %edi,%eax
cmpl $256,%eax
jae .L002unrolled
jmp .L003loop
.align 16
.L003loop:
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
bswap %eax
movl 12(%edi),%edx
bswap %ebx
pushl %eax
bswap %ecx
pushl %ebx
bswap %edx
pushl %ecx
pushl %edx
movl 16(%edi),%eax
movl 20(%edi),%ebx
movl 24(%edi),%ecx
bswap %eax
movl 28(%edi),%edx
bswap %ebx
pushl %eax
bswap %ecx
pushl %ebx
bswap %edx
pushl %ecx
pushl %edx
movl 32(%edi),%eax
movl 36(%edi),%ebx
movl 40(%edi),%ecx
bswap %eax
movl 44(%edi),%edx
bswap %ebx
pushl %eax
bswap %ecx
pushl %ebx
bswap %edx
pushl %ecx
pushl %edx
movl 48(%edi),%eax
movl 52(%edi),%ebx
movl 56(%edi),%ecx
bswap %eax
movl 60(%edi),%edx
bswap %ebx
pushl %eax
bswap %ecx
pushl %ebx
bswap %edx
pushl %ecx
pushl %edx
addl $64,%edi
leal -36(%esp),%esp
movl %edi,104(%esp)
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edi
movl %ebx,8(%esp)
xorl %ecx,%ebx
movl %ecx,12(%esp)
movl %edi,16(%esp)
movl %ebx,(%esp)
movl 16(%esi),%edx
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edi
movl %ebx,24(%esp)
movl %ecx,28(%esp)
movl %edi,32(%esp)
.align 16
.L00400_15:
movl %edx,%ecx
movl 24(%esp),%esi
rorl $14,%ecx
movl 28(%esp),%edi
xorl %edx,%ecx
xorl %edi,%esi
movl 96(%esp),%ebx
rorl $5,%ecx
andl %edx,%esi
movl %edx,20(%esp)
xorl %ecx,%edx
addl 32(%esp),%ebx
xorl %edi,%esi
rorl $6,%edx
movl %eax,%ecx
addl %esi,%ebx
rorl $9,%ecx
addl %edx,%ebx
movl 8(%esp),%edi
xorl %eax,%ecx
movl %eax,4(%esp)
leal -4(%esp),%esp
rorl $11,%ecx
movl (%ebp),%esi
xorl %eax,%ecx
movl 20(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %esi,%ebx
movl %eax,(%esp)
addl %ebx,%edx
andl 4(%esp),%eax
addl %ecx,%ebx
xorl %edi,%eax
addl $4,%ebp
addl %ebx,%eax
cmpl $3248222580,%esi
jne .L00400_15
movl 156(%esp),%ecx
jmp .L00516_63
.align 16
.L00516_63:
movl %ecx,%ebx
movl 104(%esp),%esi
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 160(%esp),%ebx
shrl $10,%edi
addl 124(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 24(%esp),%esi
rorl $14,%ecx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %edx,%ecx
xorl %edi,%esi
movl %ebx,96(%esp)
rorl $5,%ecx
andl %edx,%esi
movl %edx,20(%esp)
xorl %ecx,%edx
addl 32(%esp),%ebx
xorl %edi,%esi
rorl $6,%edx
movl %eax,%ecx
addl %esi,%ebx
rorl $9,%ecx
addl %edx,%ebx
movl 8(%esp),%edi
xorl %eax,%ecx
movl %eax,4(%esp)
leal -4(%esp),%esp
rorl $11,%ecx
movl (%ebp),%esi
xorl %eax,%ecx
movl 20(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %esi,%ebx
movl %eax,(%esp)
addl %ebx,%edx
andl 4(%esp),%eax
addl %ecx,%ebx
xorl %edi,%eax
movl 156(%esp),%ecx
addl $4,%ebp
addl %ebx,%eax
cmpl $3329325298,%esi
jne .L00516_63
movl 356(%esp),%esi
movl 8(%esp),%ebx
movl 16(%esp),%ecx
addl (%esi),%eax
addl 4(%esi),%ebx
addl 8(%esi),%edi
addl 12(%esi),%ecx
movl %eax,(%esi)
movl %ebx,4(%esi)
movl %edi,8(%esi)
movl %ecx,12(%esi)
movl 24(%esp),%eax
movl 28(%esp),%ebx
movl 32(%esp),%ecx
movl 360(%esp),%edi
addl 16(%esi),%edx
addl 20(%esi),%eax
addl 24(%esi),%ebx
addl 28(%esi),%ecx
movl %edx,16(%esi)
movl %eax,20(%esi)
movl %ebx,24(%esi)
movl %ecx,28(%esi)
leal 356(%esp),%esp
subl $256,%ebp
cmpl 8(%esp),%edi
jb .L003loop
movl 12(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 64
.LK256:
.long 1116352408,1899447441,3049323471,3921009573,961987163,1508970993,2453635748,2870763221,3624381080,310598401,607225278,1426881987,1925078388,2162078206,2614888103,3248222580,3835390401,4022224774,264347078,604807628,770255983,1249150122,1555081692,1996064986,2554220882,2821834349,2952996808,3210313671,3336571891,3584528711,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,2177026350,2456956037,2730485921,2820302411,3259730800,3345764771,3516065817,3600352804,4094571909,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,2227730452,2361852424,2428436474,2756734187,3204031479,3329325298
.long 66051,67438087,134810123,202182159
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97
.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32
.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97
.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103
.byte 62,0
.align 16
.L002unrolled:
leal -96(%esp),%esp
movl (%esi),%eax
movl 4(%esi),%ebp
movl 8(%esi),%ecx
movl 12(%esi),%ebx
movl %ebp,4(%esp)
xorl %ecx,%ebp
movl %ecx,8(%esp)
movl %ebx,12(%esp)
movl 16(%esi),%edx
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%esi
movl %ebx,20(%esp)
movl %ecx,24(%esp)
movl %esi,28(%esp)
jmp .L006grand_loop
.align 16
.L006grand_loop:
movl (%edi),%ebx
movl 4(%edi),%ecx
bswap %ebx
movl 8(%edi),%esi
bswap %ecx
movl %ebx,32(%esp)
bswap %esi
movl %ecx,36(%esp)
movl %esi,40(%esp)
movl 12(%edi),%ebx
movl 16(%edi),%ecx
bswap %ebx
movl 20(%edi),%esi
bswap %ecx
movl %ebx,44(%esp)
bswap %esi
movl %ecx,48(%esp)
movl %esi,52(%esp)
movl 24(%edi),%ebx
movl 28(%edi),%ecx
bswap %ebx
movl 32(%edi),%esi
bswap %ecx
movl %ebx,56(%esp)
bswap %esi
movl %ecx,60(%esp)
movl %esi,64(%esp)
movl 36(%edi),%ebx
movl 40(%edi),%ecx
bswap %ebx
movl 44(%edi),%esi
bswap %ecx
movl %ebx,68(%esp)
bswap %esi
movl %ecx,72(%esp)
movl %esi,76(%esp)
movl 48(%edi),%ebx
movl 52(%edi),%ecx
bswap %ebx
movl 56(%edi),%esi
bswap %ecx
movl %ebx,80(%esp)
bswap %esi
movl %ecx,84(%esp)
movl %esi,88(%esp)
movl 60(%edi),%ebx
addl $64,%edi
bswap %ebx
movl %edi,100(%esp)
movl %ebx,92(%esp)
movl %edx,%ecx
movl 20(%esp),%esi
rorl $14,%edx
movl 24(%esp),%edi
xorl %ecx,%edx
movl 32(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1116352408(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 16(%esp),%ecx
rorl $14,%edx
movl 20(%esp),%edi
xorl %esi,%edx
movl 36(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1899447441(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 12(%esp),%esi
rorl $14,%edx
movl 16(%esp),%edi
xorl %ecx,%edx
movl 40(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3049323471(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 8(%esp),%ecx
rorl $14,%edx
movl 12(%esp),%edi
xorl %esi,%edx
movl 44(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3921009573(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 4(%esp),%esi
rorl $14,%edx
movl 8(%esp),%edi
xorl %ecx,%edx
movl 48(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 961987163(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl (%esp),%ecx
rorl $14,%edx
movl 4(%esp),%edi
xorl %esi,%edx
movl 52(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1508970993(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 28(%esp),%esi
rorl $14,%edx
movl (%esp),%edi
xorl %ecx,%edx
movl 56(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2453635748(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 24(%esp),%ecx
rorl $14,%edx
movl 28(%esp),%edi
xorl %esi,%edx
movl 60(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2870763221(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 20(%esp),%esi
rorl $14,%edx
movl 24(%esp),%edi
xorl %ecx,%edx
movl 64(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3624381080(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 16(%esp),%ecx
rorl $14,%edx
movl 20(%esp),%edi
xorl %esi,%edx
movl 68(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 310598401(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 12(%esp),%esi
rorl $14,%edx
movl 16(%esp),%edi
xorl %ecx,%edx
movl 72(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 607225278(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 8(%esp),%ecx
rorl $14,%edx
movl 12(%esp),%edi
xorl %esi,%edx
movl 76(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1426881987(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 4(%esp),%esi
rorl $14,%edx
movl 8(%esp),%edi
xorl %ecx,%edx
movl 80(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1925078388(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl (%esp),%ecx
rorl $14,%edx
movl 4(%esp),%edi
xorl %esi,%edx
movl 84(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2162078206(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl %edx,%ecx
movl 28(%esp),%esi
rorl $14,%edx
movl (%esp),%edi
xorl %ecx,%edx
movl 88(%esp),%ebx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2614888103(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl %edx,%esi
movl 24(%esp),%ecx
rorl $14,%edx
movl 28(%esp),%edi
xorl %esi,%edx
movl 92(%esp),%ebx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3248222580(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 36(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 88(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 32(%esp),%ebx
shrl $10,%edi
addl 68(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,32(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3835390401(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 40(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 92(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 36(%esp),%ebx
shrl $10,%edi
addl 72(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,36(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 4022224774(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 44(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 32(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 40(%esp),%ebx
shrl $10,%edi
addl 76(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,40(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 264347078(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 48(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 36(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 44(%esp),%ebx
shrl $10,%edi
addl 80(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,44(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 604807628(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 52(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 40(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 48(%esp),%ebx
shrl $10,%edi
addl 84(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,48(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 770255983(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 56(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 44(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 52(%esp),%ebx
shrl $10,%edi
addl 88(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,52(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1249150122(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 60(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 48(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 56(%esp),%ebx
shrl $10,%edi
addl 92(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,56(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1555081692(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 64(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 52(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 60(%esp),%ebx
shrl $10,%edi
addl 32(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,60(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1996064986(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 68(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 56(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 64(%esp),%ebx
shrl $10,%edi
addl 36(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,64(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2554220882(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 72(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 60(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 68(%esp),%ebx
shrl $10,%edi
addl 40(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,68(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2821834349(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 76(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 64(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 72(%esp),%ebx
shrl $10,%edi
addl 44(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,72(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2952996808(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 80(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 68(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 76(%esp),%ebx
shrl $10,%edi
addl 48(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,76(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3210313671(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 84(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 72(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 80(%esp),%ebx
shrl $10,%edi
addl 52(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,80(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3336571891(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 88(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 76(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 84(%esp),%ebx
shrl $10,%edi
addl 56(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,84(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3584528711(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 92(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 80(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 88(%esp),%ebx
shrl $10,%edi
addl 60(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,88(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 113926993(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 32(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 84(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 92(%esp),%ebx
shrl $10,%edi
addl 64(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,92(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 338241895(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 36(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 88(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 32(%esp),%ebx
shrl $10,%edi
addl 68(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,32(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 666307205(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 40(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 92(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 36(%esp),%ebx
shrl $10,%edi
addl 72(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,36(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 773529912(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 44(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 32(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 40(%esp),%ebx
shrl $10,%edi
addl 76(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,40(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1294757372(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 48(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 36(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 44(%esp),%ebx
shrl $10,%edi
addl 80(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,44(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1396182291(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 52(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 40(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 48(%esp),%ebx
shrl $10,%edi
addl 84(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,48(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1695183700(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 56(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 44(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 52(%esp),%ebx
shrl $10,%edi
addl 88(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,52(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1986661051(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 60(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 48(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 56(%esp),%ebx
shrl $10,%edi
addl 92(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,56(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2177026350(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 64(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 52(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 60(%esp),%ebx
shrl $10,%edi
addl 32(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,60(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2456956037(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 68(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 56(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 64(%esp),%ebx
shrl $10,%edi
addl 36(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,64(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2730485921(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 72(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 60(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 68(%esp),%ebx
shrl $10,%edi
addl 40(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,68(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2820302411(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 76(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 64(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 72(%esp),%ebx
shrl $10,%edi
addl 44(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,72(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3259730800(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 80(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 68(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 76(%esp),%ebx
shrl $10,%edi
addl 48(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,76(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3345764771(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 84(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 72(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 80(%esp),%ebx
shrl $10,%edi
addl 52(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,80(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3516065817(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 88(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 76(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 84(%esp),%ebx
shrl $10,%edi
addl 56(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,84(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3600352804(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 92(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 80(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 88(%esp),%ebx
shrl $10,%edi
addl 60(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,88(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 4094571909(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 32(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 84(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 92(%esp),%ebx
shrl $10,%edi
addl 64(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,92(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 275423344(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 36(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 88(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 32(%esp),%ebx
shrl $10,%edi
addl 68(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,32(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 430227734(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 40(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 92(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 36(%esp),%ebx
shrl $10,%edi
addl 72(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,36(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 506948616(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 44(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 32(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 40(%esp),%ebx
shrl $10,%edi
addl 76(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,40(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 659060556(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 48(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 36(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 44(%esp),%ebx
shrl $10,%edi
addl 80(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,44(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 883997877(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 52(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 40(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 48(%esp),%ebx
shrl $10,%edi
addl 84(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,48(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 958139571(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 56(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 44(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 52(%esp),%ebx
shrl $10,%edi
addl 88(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,52(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1322822218(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 60(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 48(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 56(%esp),%ebx
shrl $10,%edi
addl 92(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
movl %ebx,56(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1537002063(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 64(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 52(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 60(%esp),%ebx
shrl $10,%edi
addl 32(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
movl %ebx,60(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 1747873779(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 68(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 56(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 64(%esp),%ebx
shrl $10,%edi
addl 36(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 20(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 24(%esp),%edi
xorl %ecx,%edx
movl %ebx,64(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
addl 28(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 4(%esp),%edi
xorl %eax,%ecx
movl %eax,(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 1955562222(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 72(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 12(%esp),%edx
addl %ecx,%ebp
movl 60(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 68(%esp),%ebx
shrl $10,%edi
addl 40(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 16(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 20(%esp),%edi
xorl %esi,%edx
movl %ebx,68(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,12(%esp)
xorl %esi,%edx
addl 24(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl (%esp),%edi
xorl %ebp,%esi
movl %ebp,28(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2024104815(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 76(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 8(%esp),%edx
addl %esi,%eax
movl 64(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 72(%esp),%ebx
shrl $10,%edi
addl 44(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 12(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 16(%esp),%edi
xorl %ecx,%edx
movl %ebx,72(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
addl 20(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 28(%esp),%edi
xorl %eax,%ecx
movl %eax,24(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2227730452(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 80(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 4(%esp),%edx
addl %ecx,%ebp
movl 68(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 76(%esp),%ebx
shrl $10,%edi
addl 48(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 8(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 12(%esp),%edi
xorl %esi,%edx
movl %ebx,76(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,4(%esp)
xorl %esi,%edx
addl 16(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 24(%esp),%edi
xorl %ebp,%esi
movl %ebp,20(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2361852424(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 84(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl (%esp),%edx
addl %esi,%eax
movl 72(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 80(%esp),%ebx
shrl $10,%edi
addl 52(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 4(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl 8(%esp),%edi
xorl %ecx,%edx
movl %ebx,80(%esp)
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
addl 12(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 20(%esp),%edi
xorl %eax,%ecx
movl %eax,16(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 2428436474(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 88(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 28(%esp),%edx
addl %ecx,%ebp
movl 76(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 84(%esp),%ebx
shrl $10,%edi
addl 56(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl (%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 4(%esp),%edi
xorl %esi,%edx
movl %ebx,84(%esp)
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,28(%esp)
xorl %esi,%edx
addl 8(%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 16(%esp),%edi
xorl %ebp,%esi
movl %ebp,12(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 2756734187(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
movl 92(%esp),%ecx
rorl $2,%esi
addl %edx,%eax
addl 24(%esp),%edx
addl %esi,%eax
movl 80(%esp),%esi
movl %ecx,%ebx
rorl $11,%ecx
movl %esi,%edi
rorl $2,%esi
xorl %ebx,%ecx
shrl $3,%ebx
rorl $7,%ecx
xorl %edi,%esi
xorl %ecx,%ebx
rorl $17,%esi
addl 88(%esp),%ebx
shrl $10,%edi
addl 60(%esp),%ebx
movl %edx,%ecx
xorl %esi,%edi
movl 28(%esp),%esi
rorl $14,%edx
addl %edi,%ebx
movl (%esp),%edi
xorl %ecx,%edx
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
addl 4(%esp),%ebx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%ebx
rorl $9,%ecx
movl %eax,%esi
movl 12(%esp),%edi
xorl %eax,%ecx
movl %eax,8(%esp)
xorl %edi,%eax
rorl $11,%ecx
andl %eax,%ebp
leal 3204031479(%ebx,%edx,1),%edx
xorl %esi,%ecx
xorl %edi,%ebp
movl 32(%esp),%esi
rorl $2,%ecx
addl %edx,%ebp
addl 20(%esp),%edx
addl %ecx,%ebp
movl 84(%esp),%ecx
movl %esi,%ebx
rorl $11,%esi
movl %ecx,%edi
rorl $2,%ecx
xorl %ebx,%esi
shrl $3,%ebx
rorl $7,%esi
xorl %edi,%ecx
xorl %esi,%ebx
rorl $17,%ecx
addl 92(%esp),%ebx
shrl $10,%edi
addl 64(%esp),%ebx
movl %edx,%esi
xorl %ecx,%edi
movl 24(%esp),%ecx
rorl $14,%edx
addl %edi,%ebx
movl 28(%esp),%edi
xorl %esi,%edx
xorl %edi,%ecx
rorl $5,%edx
andl %esi,%ecx
movl %esi,20(%esp)
xorl %esi,%edx
addl (%esp),%ebx
xorl %ecx,%edi
rorl $6,%edx
movl %ebp,%esi
addl %edi,%ebx
rorl $9,%esi
movl %ebp,%ecx
movl 8(%esp),%edi
xorl %ebp,%esi
movl %ebp,4(%esp)
xorl %edi,%ebp
rorl $11,%esi
andl %ebp,%eax
leal 3329325298(%ebx,%edx,1),%edx
xorl %ecx,%esi
xorl %edi,%eax
rorl $2,%esi
addl %edx,%eax
addl 16(%esp),%edx
addl %esi,%eax
movl 96(%esp),%esi
xorl %edi,%ebp
movl 12(%esp),%ecx
addl (%esi),%eax
addl 4(%esi),%ebp
addl 8(%esi),%edi
addl 12(%esi),%ecx
movl %eax,(%esi)
movl %ebp,4(%esi)
movl %edi,8(%esi)
movl %ecx,12(%esi)
movl %ebp,4(%esp)
xorl %edi,%ebp
movl %edi,8(%esp)
movl %ecx,12(%esp)
movl 20(%esp),%edi
movl 24(%esp),%ebx
movl 28(%esp),%ecx
addl 16(%esi),%edx
addl 20(%esi),%edi
addl 24(%esi),%ebx
addl 28(%esi),%ecx
movl %edx,16(%esi)
movl %edi,20(%esi)
movl %ebx,24(%esi)
movl %ecx,28(%esi)
movl %edi,20(%esp)
movl 100(%esp),%edi
movl %ebx,24(%esp)
movl %ecx,28(%esp)
cmpl 104(%esp),%edi
jb .L006grand_loop
movl 108(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size sha256_block_data_order_nohw,.-.L_sha256_block_data_order_nohw_begin
.globl sha256_block_data_order_ssse3
.hidden sha256_block_data_order_ssse3
.type sha256_block_data_order_ssse3,@function
.align 16
sha256_block_data_order_ssse3:
.L_sha256_block_data_order_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl %esp,%ebx
call .L007pic_point
.L007pic_point:
popl %ebp
leal .LK256-.L007pic_point(%ebp),%ebp
subl $16,%esp
andl $-64,%esp
shll $6,%eax
addl %edi,%eax
movl %esi,(%esp)
movl %edi,4(%esp)
movl %eax,8(%esp)
movl %ebx,12(%esp)
leal -96(%esp),%esp
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edi
movl %ebx,4(%esp)
xorl %ecx,%ebx
movl %ecx,8(%esp)
movl %edi,12(%esp)
movl 16(%esi),%edx
movl 20(%esi),%edi
movl 24(%esi),%ecx
movl 28(%esi),%esi
movl %edi,20(%esp)
movl 100(%esp),%edi
movl %ecx,24(%esp)
movl %esi,28(%esp)
movdqa 256(%ebp),%xmm7
jmp .L008grand_ssse3
.align 16
.L008grand_ssse3:
movdqu (%edi),%xmm0
movdqu 16(%edi),%xmm1
movdqu 32(%edi),%xmm2
movdqu 48(%edi),%xmm3
addl $64,%edi
.byte 102,15,56,0,199
movl %edi,100(%esp)
.byte 102,15,56,0,207
movdqa (%ebp),%xmm4
.byte 102,15,56,0,215
movdqa 16(%ebp),%xmm5
paddd %xmm0,%xmm4
.byte 102,15,56,0,223
movdqa 32(%ebp),%xmm6
paddd %xmm1,%xmm5
movdqa 48(%ebp),%xmm7
movdqa %xmm4,32(%esp)
paddd %xmm2,%xmm6
movdqa %xmm5,48(%esp)
paddd %xmm3,%xmm7
movdqa %xmm6,64(%esp)
movdqa %xmm7,80(%esp)
jmp .L009ssse3_00_47
.align 16
.L009ssse3_00_47:
addl $64,%ebp
movl %edx,%ecx
movdqa %xmm1,%xmm4
rorl $14,%edx
movl 20(%esp),%esi
movdqa %xmm3,%xmm7
xorl %ecx,%edx
movl 24(%esp),%edi
.byte 102,15,58,15,224,4
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
.byte 102,15,58,15,250,4
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
movdqa %xmm4,%xmm5
rorl $6,%edx
movl %eax,%ecx
movdqa %xmm4,%xmm6
addl %edi,%edx
movl 4(%esp),%edi
psrld $3,%xmm4
movl %eax,%esi
rorl $9,%ecx
paddd %xmm7,%xmm0
movl %eax,(%esp)
xorl %eax,%ecx
psrld $7,%xmm6
xorl %edi,%eax
addl 28(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
pshufd $250,%xmm3,%xmm7
xorl %esi,%ecx
addl 32(%esp),%edx
pslld $14,%xmm5
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm4
addl %edx,%ebx
addl 12(%esp),%edx
psrld $11,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm5,%xmm4
movl 16(%esp),%esi
xorl %ecx,%edx
pslld $11,%xmm5
movl 20(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
pxor %xmm6,%xmm4
andl %ecx,%esi
movl %ecx,12(%esp)
movdqa %xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
pxor %xmm5,%xmm4
movl %ebx,%ecx
addl %edi,%edx
psrld $10,%xmm7
movl (%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm4,%xmm0
movl %ebx,28(%esp)
xorl %ebx,%ecx
psrlq $17,%xmm6
xorl %edi,%ebx
addl 24(%esp),%edx
rorl $11,%ecx
pxor %xmm6,%xmm7
andl %ebx,%eax
xorl %esi,%ecx
psrlq $2,%xmm6
addl 36(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%eax
addl 8(%esp),%edx
pshufd $128,%xmm7,%xmm7
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
psrldq $8,%xmm7
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
paddd %xmm7,%xmm0
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,24(%esp)
pshufd $80,%xmm0,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
movdqa %xmm7,%xmm6
rorl $11,%ecx
psrld $10,%xmm7
andl %eax,%ebx
psrlq $17,%xmm6
xorl %esi,%ecx
addl 40(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%ebx
addl 4(%esp),%edx
psrlq $2,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm6,%xmm7
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
pshufd $8,%xmm7,%xmm7
xorl %edi,%esi
rorl $5,%edx
movdqa (%ebp),%xmm6
andl %ecx,%esi
movl %ecx,4(%esp)
pslldq $8,%xmm7
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm7,%xmm0
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
paddd %xmm0,%xmm6
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 44(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movdqa %xmm6,32(%esp)
movl %edx,%ecx
movdqa %xmm2,%xmm4
rorl $14,%edx
movl 4(%esp),%esi
movdqa %xmm0,%xmm7
xorl %ecx,%edx
movl 8(%esp),%edi
.byte 102,15,58,15,225,4
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
.byte 102,15,58,15,251,4
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
movdqa %xmm4,%xmm5
rorl $6,%edx
movl %eax,%ecx
movdqa %xmm4,%xmm6
addl %edi,%edx
movl 20(%esp),%edi
psrld $3,%xmm4
movl %eax,%esi
rorl $9,%ecx
paddd %xmm7,%xmm1
movl %eax,16(%esp)
xorl %eax,%ecx
psrld $7,%xmm6
xorl %edi,%eax
addl 12(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
pshufd $250,%xmm0,%xmm7
xorl %esi,%ecx
addl 48(%esp),%edx
pslld $14,%xmm5
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm4
addl %edx,%ebx
addl 28(%esp),%edx
psrld $11,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm5,%xmm4
movl (%esp),%esi
xorl %ecx,%edx
pslld $11,%xmm5
movl 4(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
pxor %xmm6,%xmm4
andl %ecx,%esi
movl %ecx,28(%esp)
movdqa %xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
pxor %xmm5,%xmm4
movl %ebx,%ecx
addl %edi,%edx
psrld $10,%xmm7
movl 16(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm4,%xmm1
movl %ebx,12(%esp)
xorl %ebx,%ecx
psrlq $17,%xmm6
xorl %edi,%ebx
addl 8(%esp),%edx
rorl $11,%ecx
pxor %xmm6,%xmm7
andl %ebx,%eax
xorl %esi,%ecx
psrlq $2,%xmm6
addl 52(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%eax
addl 24(%esp),%edx
pshufd $128,%xmm7,%xmm7
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
psrldq $8,%xmm7
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
paddd %xmm7,%xmm1
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,8(%esp)
pshufd $80,%xmm1,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
movdqa %xmm7,%xmm6
rorl $11,%ecx
psrld $10,%xmm7
andl %eax,%ebx
psrlq $17,%xmm6
xorl %esi,%ecx
addl 56(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%ebx
addl 20(%esp),%edx
psrlq $2,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm6,%xmm7
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
pshufd $8,%xmm7,%xmm7
xorl %edi,%esi
rorl $5,%edx
movdqa 16(%ebp),%xmm6
andl %ecx,%esi
movl %ecx,20(%esp)
pslldq $8,%xmm7
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm7,%xmm1
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
paddd %xmm1,%xmm6
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 60(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movdqa %xmm6,48(%esp)
movl %edx,%ecx
movdqa %xmm3,%xmm4
rorl $14,%edx
movl 20(%esp),%esi
movdqa %xmm1,%xmm7
xorl %ecx,%edx
movl 24(%esp),%edi
.byte 102,15,58,15,226,4
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
.byte 102,15,58,15,248,4
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
movdqa %xmm4,%xmm5
rorl $6,%edx
movl %eax,%ecx
movdqa %xmm4,%xmm6
addl %edi,%edx
movl 4(%esp),%edi
psrld $3,%xmm4
movl %eax,%esi
rorl $9,%ecx
paddd %xmm7,%xmm2
movl %eax,(%esp)
xorl %eax,%ecx
psrld $7,%xmm6
xorl %edi,%eax
addl 28(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
pshufd $250,%xmm1,%xmm7
xorl %esi,%ecx
addl 64(%esp),%edx
pslld $14,%xmm5
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm4
addl %edx,%ebx
addl 12(%esp),%edx
psrld $11,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm5,%xmm4
movl 16(%esp),%esi
xorl %ecx,%edx
pslld $11,%xmm5
movl 20(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
pxor %xmm6,%xmm4
andl %ecx,%esi
movl %ecx,12(%esp)
movdqa %xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
pxor %xmm5,%xmm4
movl %ebx,%ecx
addl %edi,%edx
psrld $10,%xmm7
movl (%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm4,%xmm2
movl %ebx,28(%esp)
xorl %ebx,%ecx
psrlq $17,%xmm6
xorl %edi,%ebx
addl 24(%esp),%edx
rorl $11,%ecx
pxor %xmm6,%xmm7
andl %ebx,%eax
xorl %esi,%ecx
psrlq $2,%xmm6
addl 68(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%eax
addl 8(%esp),%edx
pshufd $128,%xmm7,%xmm7
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
psrldq $8,%xmm7
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
paddd %xmm7,%xmm2
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,24(%esp)
pshufd $80,%xmm2,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
movdqa %xmm7,%xmm6
rorl $11,%ecx
psrld $10,%xmm7
andl %eax,%ebx
psrlq $17,%xmm6
xorl %esi,%ecx
addl 72(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%ebx
addl 4(%esp),%edx
psrlq $2,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm6,%xmm7
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
pshufd $8,%xmm7,%xmm7
xorl %edi,%esi
rorl $5,%edx
movdqa 32(%ebp),%xmm6
andl %ecx,%esi
movl %ecx,4(%esp)
pslldq $8,%xmm7
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm7,%xmm2
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
paddd %xmm2,%xmm6
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 76(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movdqa %xmm6,64(%esp)
movl %edx,%ecx
movdqa %xmm0,%xmm4
rorl $14,%edx
movl 4(%esp),%esi
movdqa %xmm2,%xmm7
xorl %ecx,%edx
movl 8(%esp),%edi
.byte 102,15,58,15,227,4
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
.byte 102,15,58,15,249,4
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
movdqa %xmm4,%xmm5
rorl $6,%edx
movl %eax,%ecx
movdqa %xmm4,%xmm6
addl %edi,%edx
movl 20(%esp),%edi
psrld $3,%xmm4
movl %eax,%esi
rorl $9,%ecx
paddd %xmm7,%xmm3
movl %eax,16(%esp)
xorl %eax,%ecx
psrld $7,%xmm6
xorl %edi,%eax
addl 12(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
pshufd $250,%xmm2,%xmm7
xorl %esi,%ecx
addl 80(%esp),%edx
pslld $14,%xmm5
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm4
addl %edx,%ebx
addl 28(%esp),%edx
psrld $11,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm5,%xmm4
movl (%esp),%esi
xorl %ecx,%edx
pslld $11,%xmm5
movl 4(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
pxor %xmm6,%xmm4
andl %ecx,%esi
movl %ecx,28(%esp)
movdqa %xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
pxor %xmm5,%xmm4
movl %ebx,%ecx
addl %edi,%edx
psrld $10,%xmm7
movl 16(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm4,%xmm3
movl %ebx,12(%esp)
xorl %ebx,%ecx
psrlq $17,%xmm6
xorl %edi,%ebx
addl 8(%esp),%edx
rorl $11,%ecx
pxor %xmm6,%xmm7
andl %ebx,%eax
xorl %esi,%ecx
psrlq $2,%xmm6
addl 84(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%eax
addl 24(%esp),%edx
pshufd $128,%xmm7,%xmm7
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
psrldq $8,%xmm7
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
paddd %xmm7,%xmm3
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,8(%esp)
pshufd $80,%xmm3,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
movdqa %xmm7,%xmm6
rorl $11,%ecx
psrld $10,%xmm7
andl %eax,%ebx
psrlq $17,%xmm6
xorl %esi,%ecx
addl 88(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
pxor %xmm6,%xmm7
addl %edx,%ebx
addl 20(%esp),%edx
psrlq $2,%xmm6
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
pxor %xmm6,%xmm7
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
pshufd $8,%xmm7,%xmm7
xorl %edi,%esi
rorl $5,%edx
movdqa 48(%ebp),%xmm6
andl %ecx,%esi
movl %ecx,20(%esp)
pslldq $8,%xmm7
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
paddd %xmm7,%xmm3
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
paddd %xmm3,%xmm6
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 92(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movdqa %xmm6,80(%esp)
cmpl $66051,64(%ebp)
jne .L009ssse3_00_47
movl %edx,%ecx
rorl $14,%edx
movl 20(%esp),%esi
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 32(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 16(%esp),%esi
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,28(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 36(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,24(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 40(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 44(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 4(%esp),%esi
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,16(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 48(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl (%esp),%esi
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,12(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 52(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,8(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 56(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 60(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 20(%esp),%esi
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 64(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 16(%esp),%esi
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,28(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 68(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,24(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 72(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 76(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 4(%esp),%esi
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,16(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 80(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl (%esp),%esi
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,12(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 84(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
rorl $14,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
rorl $9,%ecx
movl %eax,8(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
rorl $11,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 88(%esp),%edx
xorl %edi,%ebx
rorl $2,%ecx
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
rorl $14,%edx
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
rorl $5,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
rorl $6,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
rorl $9,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
rorl $11,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 92(%esp),%edx
xorl %edi,%eax
rorl $2,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movl 96(%esp),%esi
xorl %edi,%ebx
movl 12(%esp),%ecx
addl (%esi),%eax
addl 4(%esi),%ebx
addl 8(%esi),%edi
addl 12(%esi),%ecx
movl %eax,(%esi)
movl %ebx,4(%esi)
movl %edi,8(%esi)
movl %ecx,12(%esi)
movl %ebx,4(%esp)
xorl %edi,%ebx
movl %edi,8(%esp)
movl %ecx,12(%esp)
movl 20(%esp),%edi
movl 24(%esp),%ecx
addl 16(%esi),%edx
addl 20(%esi),%edi
addl 24(%esi),%ecx
movl %edx,16(%esi)
movl %edi,20(%esi)
movl %edi,20(%esp)
movl 28(%esp),%edi
movl %ecx,24(%esi)
addl 28(%esi),%edi
movl %ecx,24(%esp)
movl %edi,28(%esi)
movl %edi,28(%esp)
movl 100(%esp),%edi
movdqa 64(%ebp),%xmm7
subl $192,%ebp
cmpl 104(%esp),%edi
jb .L008grand_ssse3
movl 108(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size sha256_block_data_order_ssse3,.-.L_sha256_block_data_order_ssse3_begin
.globl sha256_block_data_order_avx
.hidden sha256_block_data_order_avx
.type sha256_block_data_order_avx,@function
.align 16
sha256_block_data_order_avx:
.L_sha256_block_data_order_avx_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl %esp,%ebx
call .L010pic_point
.L010pic_point:
popl %ebp
leal .LK256-.L010pic_point(%ebp),%ebp
subl $16,%esp
andl $-64,%esp
shll $6,%eax
addl %edi,%eax
movl %esi,(%esp)
movl %edi,4(%esp)
movl %eax,8(%esp)
movl %ebx,12(%esp)
leal -96(%esp),%esp
vzeroall
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edi
movl %ebx,4(%esp)
xorl %ecx,%ebx
movl %ecx,8(%esp)
movl %edi,12(%esp)
movl 16(%esi),%edx
movl 20(%esi),%edi
movl 24(%esi),%ecx
movl 28(%esi),%esi
movl %edi,20(%esp)
movl 100(%esp),%edi
movl %ecx,24(%esp)
movl %esi,28(%esp)
vmovdqa 256(%ebp),%xmm7
jmp .L011grand_avx
.align 32
.L011grand_avx:
vmovdqu (%edi),%xmm0
vmovdqu 16(%edi),%xmm1
vmovdqu 32(%edi),%xmm2
vmovdqu 48(%edi),%xmm3
addl $64,%edi
vpshufb %xmm7,%xmm0,%xmm0
movl %edi,100(%esp)
vpshufb %xmm7,%xmm1,%xmm1
vpshufb %xmm7,%xmm2,%xmm2
vpaddd (%ebp),%xmm0,%xmm4
vpshufb %xmm7,%xmm3,%xmm3
vpaddd 16(%ebp),%xmm1,%xmm5
vpaddd 32(%ebp),%xmm2,%xmm6
vpaddd 48(%ebp),%xmm3,%xmm7
vmovdqa %xmm4,32(%esp)
vmovdqa %xmm5,48(%esp)
vmovdqa %xmm6,64(%esp)
vmovdqa %xmm7,80(%esp)
jmp .L012avx_00_47
.align 16
.L012avx_00_47:
addl $64,%ebp
vpalignr $4,%xmm0,%xmm1,%xmm4
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 20(%esp),%esi
vpalignr $4,%xmm2,%xmm3,%xmm7
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
vpsrld $7,%xmm4,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
vpaddd %xmm7,%xmm0,%xmm0
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrld $3,%xmm4,%xmm7
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
vpslld $14,%xmm4,%xmm5
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,(%esp)
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
vpshufd $250,%xmm3,%xmm7
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpsrld $11,%xmm6,%xmm6
addl 32(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpxor %xmm5,%xmm4,%xmm4
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
vpslld $11,%xmm5,%xmm5
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 16(%esp),%esi
vpxor %xmm6,%xmm4,%xmm4
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
vpsrld $10,%xmm7,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
vpxor %xmm5,%xmm4,%xmm4
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
vpaddd %xmm4,%xmm0,%xmm0
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,28(%esp)
vpxor %xmm5,%xmm6,%xmm6
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
vpsrlq $19,%xmm7,%xmm7
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
vpxor %xmm7,%xmm6,%xmm6
addl 36(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
vpshufd $132,%xmm6,%xmm7
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
vpsrldq $8,%xmm7,%xmm7
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 12(%esp),%esi
vpaddd %xmm7,%xmm0,%xmm0
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
vpshufd $80,%xmm0,%xmm7
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
vpsrld $10,%xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
vpxor %xmm5,%xmm6,%xmm6
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,24(%esp)
vpsrlq $19,%xmm7,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
vpxor %xmm7,%xmm6,%xmm6
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpshufd $232,%xmm6,%xmm7
addl 40(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpslldq $8,%xmm7,%xmm7
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
vpaddd %xmm7,%xmm0,%xmm0
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 8(%esp),%esi
vpaddd (%ebp),%xmm0,%xmm6
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 44(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
vmovdqa %xmm6,32(%esp)
vpalignr $4,%xmm1,%xmm2,%xmm4
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 4(%esp),%esi
vpalignr $4,%xmm3,%xmm0,%xmm7
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
vpsrld $7,%xmm4,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,(%esp)
vpaddd %xmm7,%xmm1,%xmm1
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrld $3,%xmm4,%xmm7
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
vpslld $14,%xmm4,%xmm5
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,16(%esp)
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
vpshufd $250,%xmm0,%xmm7
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpsrld $11,%xmm6,%xmm6
addl 48(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpxor %xmm5,%xmm4,%xmm4
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
vpslld $11,%xmm5,%xmm5
movl %edx,%ecx
shrdl $14,%edx,%edx
movl (%esp),%esi
vpxor %xmm6,%xmm4,%xmm4
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
vpsrld $10,%xmm7,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
vpxor %xmm5,%xmm4,%xmm4
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
vpaddd %xmm4,%xmm1,%xmm1
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,12(%esp)
vpxor %xmm5,%xmm6,%xmm6
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
vpsrlq $19,%xmm7,%xmm7
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
vpxor %xmm7,%xmm6,%xmm6
addl 52(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
vpshufd $132,%xmm6,%xmm7
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
vpsrldq $8,%xmm7,%xmm7
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 28(%esp),%esi
vpaddd %xmm7,%xmm1,%xmm1
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
vpshufd $80,%xmm1,%xmm7
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
vpsrld $10,%xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
vpxor %xmm5,%xmm6,%xmm6
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,8(%esp)
vpsrlq $19,%xmm7,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
vpxor %xmm7,%xmm6,%xmm6
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpshufd $232,%xmm6,%xmm7
addl 56(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpslldq $8,%xmm7,%xmm7
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
vpaddd %xmm7,%xmm1,%xmm1
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 24(%esp),%esi
vpaddd 16(%ebp),%xmm1,%xmm6
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 60(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
vmovdqa %xmm6,48(%esp)
vpalignr $4,%xmm2,%xmm3,%xmm4
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 20(%esp),%esi
vpalignr $4,%xmm0,%xmm1,%xmm7
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
vpsrld $7,%xmm4,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
vpaddd %xmm7,%xmm2,%xmm2
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrld $3,%xmm4,%xmm7
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
vpslld $14,%xmm4,%xmm5
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,(%esp)
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
vpshufd $250,%xmm1,%xmm7
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpsrld $11,%xmm6,%xmm6
addl 64(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpxor %xmm5,%xmm4,%xmm4
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
vpslld $11,%xmm5,%xmm5
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 16(%esp),%esi
vpxor %xmm6,%xmm4,%xmm4
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
vpsrld $10,%xmm7,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
vpxor %xmm5,%xmm4,%xmm4
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
vpaddd %xmm4,%xmm2,%xmm2
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,28(%esp)
vpxor %xmm5,%xmm6,%xmm6
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
vpsrlq $19,%xmm7,%xmm7
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
vpxor %xmm7,%xmm6,%xmm6
addl 68(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
vpshufd $132,%xmm6,%xmm7
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
vpsrldq $8,%xmm7,%xmm7
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 12(%esp),%esi
vpaddd %xmm7,%xmm2,%xmm2
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
vpshufd $80,%xmm2,%xmm7
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
vpsrld $10,%xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
vpxor %xmm5,%xmm6,%xmm6
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,24(%esp)
vpsrlq $19,%xmm7,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
vpxor %xmm7,%xmm6,%xmm6
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpshufd $232,%xmm6,%xmm7
addl 72(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpslldq $8,%xmm7,%xmm7
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
vpaddd %xmm7,%xmm2,%xmm2
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 8(%esp),%esi
vpaddd 32(%ebp),%xmm2,%xmm6
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 76(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
vmovdqa %xmm6,64(%esp)
vpalignr $4,%xmm3,%xmm0,%xmm4
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 4(%esp),%esi
vpalignr $4,%xmm1,%xmm2,%xmm7
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
vpsrld $7,%xmm4,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,(%esp)
vpaddd %xmm7,%xmm3,%xmm3
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrld $3,%xmm4,%xmm7
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
vpslld $14,%xmm4,%xmm5
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,16(%esp)
vpxor %xmm6,%xmm7,%xmm4
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
vpshufd $250,%xmm2,%xmm7
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpsrld $11,%xmm6,%xmm6
addl 80(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpxor %xmm5,%xmm4,%xmm4
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
vpslld $11,%xmm5,%xmm5
movl %edx,%ecx
shrdl $14,%edx,%edx
movl (%esp),%esi
vpxor %xmm6,%xmm4,%xmm4
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
vpsrld $10,%xmm7,%xmm6
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
vpxor %xmm5,%xmm4,%xmm4
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
vpaddd %xmm4,%xmm3,%xmm3
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,12(%esp)
vpxor %xmm5,%xmm6,%xmm6
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
vpsrlq $19,%xmm7,%xmm7
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
vpxor %xmm7,%xmm6,%xmm6
addl 84(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
vpshufd $132,%xmm6,%xmm7
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
vpsrldq $8,%xmm7,%xmm7
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 28(%esp),%esi
vpaddd %xmm7,%xmm3,%xmm3
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
vpshufd $80,%xmm3,%xmm7
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
vpsrld $10,%xmm7,%xmm6
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
vpsrlq $17,%xmm7,%xmm5
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
vpxor %xmm5,%xmm6,%xmm6
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,8(%esp)
vpsrlq $19,%xmm7,%xmm7
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
vpxor %xmm7,%xmm6,%xmm6
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
vpshufd $232,%xmm6,%xmm7
addl 88(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
vpslldq $8,%xmm7,%xmm7
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
vpaddd %xmm7,%xmm3,%xmm3
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 24(%esp),%esi
vpaddd 48(%ebp),%xmm3,%xmm6
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 92(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
vmovdqa %xmm6,80(%esp)
cmpl $66051,64(%ebp)
jne .L012avx_00_47
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 20(%esp),%esi
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 32(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 16(%esp),%esi
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,28(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 36(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,24(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 40(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 44(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 4(%esp),%esi
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,16(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 48(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl (%esp),%esi
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,12(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 52(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,8(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 56(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 60(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 20(%esp),%esi
xorl %ecx,%edx
movl 24(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,16(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 4(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 28(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 64(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 12(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 16(%esp),%esi
xorl %ecx,%edx
movl 20(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,12(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl (%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,28(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 24(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 68(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 8(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 12(%esp),%esi
xorl %ecx,%edx
movl 16(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,8(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 28(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,24(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 20(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 72(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 4(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 8(%esp),%esi
xorl %ecx,%edx
movl 12(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,4(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 24(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,20(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 16(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 76(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl (%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 4(%esp),%esi
xorl %ecx,%edx
movl 8(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 20(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,16(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 12(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 80(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 28(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl (%esp),%esi
xorl %ecx,%edx
movl 4(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,28(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 16(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,12(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl 8(%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 84(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 24(%esp),%edx
addl %ecx,%eax
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 28(%esp),%esi
xorl %ecx,%edx
movl (%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,24(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %eax,%ecx
addl %edi,%edx
movl 12(%esp),%edi
movl %eax,%esi
shrdl $9,%ecx,%ecx
movl %eax,8(%esp)
xorl %eax,%ecx
xorl %edi,%eax
addl 4(%esp),%edx
shrdl $11,%ecx,%ecx
andl %eax,%ebx
xorl %esi,%ecx
addl 88(%esp),%edx
xorl %edi,%ebx
shrdl $2,%ecx,%ecx
addl %edx,%ebx
addl 20(%esp),%edx
addl %ecx,%ebx
movl %edx,%ecx
shrdl $14,%edx,%edx
movl 24(%esp),%esi
xorl %ecx,%edx
movl 28(%esp),%edi
xorl %edi,%esi
shrdl $5,%edx,%edx
andl %ecx,%esi
movl %ecx,20(%esp)
xorl %ecx,%edx
xorl %esi,%edi
shrdl $6,%edx,%edx
movl %ebx,%ecx
addl %edi,%edx
movl 8(%esp),%edi
movl %ebx,%esi
shrdl $9,%ecx,%ecx
movl %ebx,4(%esp)
xorl %ebx,%ecx
xorl %edi,%ebx
addl (%esp),%edx
shrdl $11,%ecx,%ecx
andl %ebx,%eax
xorl %esi,%ecx
addl 92(%esp),%edx
xorl %edi,%eax
shrdl $2,%ecx,%ecx
addl %edx,%eax
addl 16(%esp),%edx
addl %ecx,%eax
movl 96(%esp),%esi
xorl %edi,%ebx
movl 12(%esp),%ecx
addl (%esi),%eax
addl 4(%esi),%ebx
addl 8(%esi),%edi
addl 12(%esi),%ecx
movl %eax,(%esi)
movl %ebx,4(%esi)
movl %edi,8(%esi)
movl %ecx,12(%esi)
movl %ebx,4(%esp)
xorl %edi,%ebx
movl %edi,8(%esp)
movl %ecx,12(%esp)
movl 20(%esp),%edi
movl 24(%esp),%ecx
addl 16(%esi),%edx
addl 20(%esi),%edi
addl 24(%esi),%ecx
movl %edx,16(%esi)
movl %edi,20(%esi)
movl %edi,20(%esp)
movl 28(%esp),%edi
movl %ecx,24(%esi)
addl 28(%esi),%edi
movl %ecx,24(%esp)
movl %edi,28(%esi)
movl %edi,28(%esp)
movl 100(%esp),%edi
vmovdqa 64(%ebp),%xmm7
subl $192,%ebp
cmpl 104(%esp),%edi
jb .L011grand_avx
movl 108(%esp),%esp
vzeroall
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size sha256_block_data_order_avx,.-.L_sha256_block_data_order_avx_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 67,571
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-x86/crypto/fipsmodule/sha1-586.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl sha1_block_data_order_nohw
.hidden sha1_block_data_order_nohw
.type sha1_block_data_order_nohw,@function
.align 16
sha1_block_data_order_nohw:
.L_sha1_block_data_order_nohw_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%ebp
movl 24(%esp),%esi
movl 28(%esp),%eax
subl $76,%esp
shll $6,%eax
addl %esi,%eax
movl %eax,104(%esp)
movl 16(%ebp),%edi
jmp .L000loop
.align 16
.L000loop:
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
movl %eax,(%esp)
movl %ebx,4(%esp)
movl %ecx,8(%esp)
movl %edx,12(%esp)
movl 16(%esi),%eax
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
movl %eax,16(%esp)
movl %ebx,20(%esp)
movl %ecx,24(%esp)
movl %edx,28(%esp)
movl 32(%esi),%eax
movl 36(%esi),%ebx
movl 40(%esi),%ecx
movl 44(%esi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
movl %eax,32(%esp)
movl %ebx,36(%esp)
movl %ecx,40(%esp)
movl %edx,44(%esp)
movl 48(%esi),%eax
movl 52(%esi),%ebx
movl 56(%esi),%ecx
movl 60(%esi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
movl %eax,48(%esp)
movl %ebx,52(%esp)
movl %ecx,56(%esp)
movl %edx,60(%esp)
movl %esi,100(%esp)
movl (%ebp),%eax
movl 4(%ebp),%ebx
movl 8(%ebp),%ecx
movl 12(%ebp),%edx
movl %ecx,%esi
movl %eax,%ebp
roll $5,%ebp
xorl %edx,%esi
addl %edi,%ebp
movl (%esp),%edi
andl %ebx,%esi
rorl $2,%ebx
xorl %edx,%esi
leal 1518500249(%ebp,%edi,1),%ebp
addl %esi,%ebp
movl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
xorl %ecx,%edi
addl %edx,%ebp
movl 4(%esp),%edx
andl %eax,%edi
rorl $2,%eax
xorl %ecx,%edi
leal 1518500249(%ebp,%edx,1),%ebp
addl %edi,%ebp
movl %eax,%edx
movl %ebp,%edi
roll $5,%ebp
xorl %ebx,%edx
addl %ecx,%ebp
movl 8(%esp),%ecx
andl %esi,%edx
rorl $2,%esi
xorl %ebx,%edx
leal 1518500249(%ebp,%ecx,1),%ebp
addl %edx,%ebp
movl %esi,%ecx
movl %ebp,%edx
roll $5,%ebp
xorl %eax,%ecx
addl %ebx,%ebp
movl 12(%esp),%ebx
andl %edi,%ecx
rorl $2,%edi
xorl %eax,%ecx
leal 1518500249(%ebp,%ebx,1),%ebp
addl %ecx,%ebp
movl %edi,%ebx
movl %ebp,%ecx
roll $5,%ebp
xorl %esi,%ebx
addl %eax,%ebp
movl 16(%esp),%eax
andl %edx,%ebx
rorl $2,%edx
xorl %esi,%ebx
leal 1518500249(%ebp,%eax,1),%ebp
addl %ebx,%ebp
movl %edx,%eax
movl %ebp,%ebx
roll $5,%ebp
xorl %edi,%eax
addl %esi,%ebp
movl 20(%esp),%esi
andl %ecx,%eax
rorl $2,%ecx
xorl %edi,%eax
leal 1518500249(%ebp,%esi,1),%ebp
addl %eax,%ebp
movl %ecx,%esi
movl %ebp,%eax
roll $5,%ebp
xorl %edx,%esi
addl %edi,%ebp
movl 24(%esp),%edi
andl %ebx,%esi
rorl $2,%ebx
xorl %edx,%esi
leal 1518500249(%ebp,%edi,1),%ebp
addl %esi,%ebp
movl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
xorl %ecx,%edi
addl %edx,%ebp
movl 28(%esp),%edx
andl %eax,%edi
rorl $2,%eax
xorl %ecx,%edi
leal 1518500249(%ebp,%edx,1),%ebp
addl %edi,%ebp
movl %eax,%edx
movl %ebp,%edi
roll $5,%ebp
xorl %ebx,%edx
addl %ecx,%ebp
movl 32(%esp),%ecx
andl %esi,%edx
rorl $2,%esi
xorl %ebx,%edx
leal 1518500249(%ebp,%ecx,1),%ebp
addl %edx,%ebp
movl %esi,%ecx
movl %ebp,%edx
roll $5,%ebp
xorl %eax,%ecx
addl %ebx,%ebp
movl 36(%esp),%ebx
andl %edi,%ecx
rorl $2,%edi
xorl %eax,%ecx
leal 1518500249(%ebp,%ebx,1),%ebp
addl %ecx,%ebp
movl %edi,%ebx
movl %ebp,%ecx
roll $5,%ebp
xorl %esi,%ebx
addl %eax,%ebp
movl 40(%esp),%eax
andl %edx,%ebx
rorl $2,%edx
xorl %esi,%ebx
leal 1518500249(%ebp,%eax,1),%ebp
addl %ebx,%ebp
movl %edx,%eax
movl %ebp,%ebx
roll $5,%ebp
xorl %edi,%eax
addl %esi,%ebp
movl 44(%esp),%esi
andl %ecx,%eax
rorl $2,%ecx
xorl %edi,%eax
leal 1518500249(%ebp,%esi,1),%ebp
addl %eax,%ebp
movl %ecx,%esi
movl %ebp,%eax
roll $5,%ebp
xorl %edx,%esi
addl %edi,%ebp
movl 48(%esp),%edi
andl %ebx,%esi
rorl $2,%ebx
xorl %edx,%esi
leal 1518500249(%ebp,%edi,1),%ebp
addl %esi,%ebp
movl %ebx,%edi
movl %ebp,%esi
roll $5,%ebp
xorl %ecx,%edi
addl %edx,%ebp
movl 52(%esp),%edx
andl %eax,%edi
rorl $2,%eax
xorl %ecx,%edi
leal 1518500249(%ebp,%edx,1),%ebp
addl %edi,%ebp
movl %eax,%edx
movl %ebp,%edi
roll $5,%ebp
xorl %ebx,%edx
addl %ecx,%ebp
movl 56(%esp),%ecx
andl %esi,%edx
rorl $2,%esi
xorl %ebx,%edx
leal 1518500249(%ebp,%ecx,1),%ebp
addl %edx,%ebp
movl %esi,%ecx
movl %ebp,%edx
roll $5,%ebp
xorl %eax,%ecx
addl %ebx,%ebp
movl 60(%esp),%ebx
andl %edi,%ecx
rorl $2,%edi
xorl %eax,%ecx
leal 1518500249(%ebp,%ebx,1),%ebp
movl (%esp),%ebx
addl %ebp,%ecx
movl %edi,%ebp
xorl 8(%esp),%ebx
xorl %esi,%ebp
xorl 32(%esp),%ebx
andl %edx,%ebp
xorl 52(%esp),%ebx
roll $1,%ebx
xorl %esi,%ebp
addl %ebp,%eax
movl %ecx,%ebp
rorl $2,%edx
movl %ebx,(%esp)
roll $5,%ebp
leal 1518500249(%ebx,%eax,1),%ebx
movl 4(%esp),%eax
addl %ebp,%ebx
movl %edx,%ebp
xorl 12(%esp),%eax
xorl %edi,%ebp
xorl 36(%esp),%eax
andl %ecx,%ebp
xorl 56(%esp),%eax
roll $1,%eax
xorl %edi,%ebp
addl %ebp,%esi
movl %ebx,%ebp
rorl $2,%ecx
movl %eax,4(%esp)
roll $5,%ebp
leal 1518500249(%eax,%esi,1),%eax
movl 8(%esp),%esi
addl %ebp,%eax
movl %ecx,%ebp
xorl 16(%esp),%esi
xorl %edx,%ebp
xorl 40(%esp),%esi
andl %ebx,%ebp
xorl 60(%esp),%esi
roll $1,%esi
xorl %edx,%ebp
addl %ebp,%edi
movl %eax,%ebp
rorl $2,%ebx
movl %esi,8(%esp)
roll $5,%ebp
leal 1518500249(%esi,%edi,1),%esi
movl 12(%esp),%edi
addl %ebp,%esi
movl %ebx,%ebp
xorl 20(%esp),%edi
xorl %ecx,%ebp
xorl 44(%esp),%edi
andl %eax,%ebp
xorl (%esp),%edi
roll $1,%edi
xorl %ecx,%ebp
addl %ebp,%edx
movl %esi,%ebp
rorl $2,%eax
movl %edi,12(%esp)
roll $5,%ebp
leal 1518500249(%edi,%edx,1),%edi
movl 16(%esp),%edx
addl %ebp,%edi
movl %esi,%ebp
xorl 24(%esp),%edx
xorl %eax,%ebp
xorl 48(%esp),%edx
xorl %ebx,%ebp
xorl 4(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,16(%esp)
leal 1859775393(%edx,%ecx,1),%edx
movl 20(%esp),%ecx
addl %ebp,%edx
movl %edi,%ebp
xorl 28(%esp),%ecx
xorl %esi,%ebp
xorl 52(%esp),%ecx
xorl %eax,%ebp
xorl 8(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,20(%esp)
leal 1859775393(%ecx,%ebx,1),%ecx
movl 24(%esp),%ebx
addl %ebp,%ecx
movl %edx,%ebp
xorl 32(%esp),%ebx
xorl %edi,%ebp
xorl 56(%esp),%ebx
xorl %esi,%ebp
xorl 12(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,24(%esp)
leal 1859775393(%ebx,%eax,1),%ebx
movl 28(%esp),%eax
addl %ebp,%ebx
movl %ecx,%ebp
xorl 36(%esp),%eax
xorl %edx,%ebp
xorl 60(%esp),%eax
xorl %edi,%ebp
xorl 16(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,28(%esp)
leal 1859775393(%eax,%esi,1),%eax
movl 32(%esp),%esi
addl %ebp,%eax
movl %ebx,%ebp
xorl 40(%esp),%esi
xorl %ecx,%ebp
xorl (%esp),%esi
xorl %edx,%ebp
xorl 20(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,32(%esp)
leal 1859775393(%esi,%edi,1),%esi
movl 36(%esp),%edi
addl %ebp,%esi
movl %eax,%ebp
xorl 44(%esp),%edi
xorl %ebx,%ebp
xorl 4(%esp),%edi
xorl %ecx,%ebp
xorl 24(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,36(%esp)
leal 1859775393(%edi,%edx,1),%edi
movl 40(%esp),%edx
addl %ebp,%edi
movl %esi,%ebp
xorl 48(%esp),%edx
xorl %eax,%ebp
xorl 8(%esp),%edx
xorl %ebx,%ebp
xorl 28(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,40(%esp)
leal 1859775393(%edx,%ecx,1),%edx
movl 44(%esp),%ecx
addl %ebp,%edx
movl %edi,%ebp
xorl 52(%esp),%ecx
xorl %esi,%ebp
xorl 12(%esp),%ecx
xorl %eax,%ebp
xorl 32(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,44(%esp)
leal 1859775393(%ecx,%ebx,1),%ecx
movl 48(%esp),%ebx
addl %ebp,%ecx
movl %edx,%ebp
xorl 56(%esp),%ebx
xorl %edi,%ebp
xorl 16(%esp),%ebx
xorl %esi,%ebp
xorl 36(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,48(%esp)
leal 1859775393(%ebx,%eax,1),%ebx
movl 52(%esp),%eax
addl %ebp,%ebx
movl %ecx,%ebp
xorl 60(%esp),%eax
xorl %edx,%ebp
xorl 20(%esp),%eax
xorl %edi,%ebp
xorl 40(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,52(%esp)
leal 1859775393(%eax,%esi,1),%eax
movl 56(%esp),%esi
addl %ebp,%eax
movl %ebx,%ebp
xorl (%esp),%esi
xorl %ecx,%ebp
xorl 24(%esp),%esi
xorl %edx,%ebp
xorl 44(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,56(%esp)
leal 1859775393(%esi,%edi,1),%esi
movl 60(%esp),%edi
addl %ebp,%esi
movl %eax,%ebp
xorl 4(%esp),%edi
xorl %ebx,%ebp
xorl 28(%esp),%edi
xorl %ecx,%ebp
xorl 48(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,60(%esp)
leal 1859775393(%edi,%edx,1),%edi
movl (%esp),%edx
addl %ebp,%edi
movl %esi,%ebp
xorl 8(%esp),%edx
xorl %eax,%ebp
xorl 32(%esp),%edx
xorl %ebx,%ebp
xorl 52(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,(%esp)
leal 1859775393(%edx,%ecx,1),%edx
movl 4(%esp),%ecx
addl %ebp,%edx
movl %edi,%ebp
xorl 12(%esp),%ecx
xorl %esi,%ebp
xorl 36(%esp),%ecx
xorl %eax,%ebp
xorl 56(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,4(%esp)
leal 1859775393(%ecx,%ebx,1),%ecx
movl 8(%esp),%ebx
addl %ebp,%ecx
movl %edx,%ebp
xorl 16(%esp),%ebx
xorl %edi,%ebp
xorl 40(%esp),%ebx
xorl %esi,%ebp
xorl 60(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,8(%esp)
leal 1859775393(%ebx,%eax,1),%ebx
movl 12(%esp),%eax
addl %ebp,%ebx
movl %ecx,%ebp
xorl 20(%esp),%eax
xorl %edx,%ebp
xorl 44(%esp),%eax
xorl %edi,%ebp
xorl (%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,12(%esp)
leal 1859775393(%eax,%esi,1),%eax
movl 16(%esp),%esi
addl %ebp,%eax
movl %ebx,%ebp
xorl 24(%esp),%esi
xorl %ecx,%ebp
xorl 48(%esp),%esi
xorl %edx,%ebp
xorl 4(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,16(%esp)
leal 1859775393(%esi,%edi,1),%esi
movl 20(%esp),%edi
addl %ebp,%esi
movl %eax,%ebp
xorl 28(%esp),%edi
xorl %ebx,%ebp
xorl 52(%esp),%edi
xorl %ecx,%ebp
xorl 8(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,20(%esp)
leal 1859775393(%edi,%edx,1),%edi
movl 24(%esp),%edx
addl %ebp,%edi
movl %esi,%ebp
xorl 32(%esp),%edx
xorl %eax,%ebp
xorl 56(%esp),%edx
xorl %ebx,%ebp
xorl 12(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,24(%esp)
leal 1859775393(%edx,%ecx,1),%edx
movl 28(%esp),%ecx
addl %ebp,%edx
movl %edi,%ebp
xorl 36(%esp),%ecx
xorl %esi,%ebp
xorl 60(%esp),%ecx
xorl %eax,%ebp
xorl 16(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,28(%esp)
leal 1859775393(%ecx,%ebx,1),%ecx
movl 32(%esp),%ebx
addl %ebp,%ecx
movl %edi,%ebp
xorl 40(%esp),%ebx
xorl %esi,%ebp
xorl (%esp),%ebx
andl %edx,%ebp
xorl 20(%esp),%ebx
roll $1,%ebx
addl %eax,%ebp
rorl $2,%edx
movl %ecx,%eax
roll $5,%eax
movl %ebx,32(%esp)
leal 2400959708(%ebx,%ebp,1),%ebx
movl %edi,%ebp
addl %eax,%ebx
andl %esi,%ebp
movl 36(%esp),%eax
addl %ebp,%ebx
movl %edx,%ebp
xorl 44(%esp),%eax
xorl %edi,%ebp
xorl 4(%esp),%eax
andl %ecx,%ebp
xorl 24(%esp),%eax
roll $1,%eax
addl %esi,%ebp
rorl $2,%ecx
movl %ebx,%esi
roll $5,%esi
movl %eax,36(%esp)
leal 2400959708(%eax,%ebp,1),%eax
movl %edx,%ebp
addl %esi,%eax
andl %edi,%ebp
movl 40(%esp),%esi
addl %ebp,%eax
movl %ecx,%ebp
xorl 48(%esp),%esi
xorl %edx,%ebp
xorl 8(%esp),%esi
andl %ebx,%ebp
xorl 28(%esp),%esi
roll $1,%esi
addl %edi,%ebp
rorl $2,%ebx
movl %eax,%edi
roll $5,%edi
movl %esi,40(%esp)
leal 2400959708(%esi,%ebp,1),%esi
movl %ecx,%ebp
addl %edi,%esi
andl %edx,%ebp
movl 44(%esp),%edi
addl %ebp,%esi
movl %ebx,%ebp
xorl 52(%esp),%edi
xorl %ecx,%ebp
xorl 12(%esp),%edi
andl %eax,%ebp
xorl 32(%esp),%edi
roll $1,%edi
addl %edx,%ebp
rorl $2,%eax
movl %esi,%edx
roll $5,%edx
movl %edi,44(%esp)
leal 2400959708(%edi,%ebp,1),%edi
movl %ebx,%ebp
addl %edx,%edi
andl %ecx,%ebp
movl 48(%esp),%edx
addl %ebp,%edi
movl %eax,%ebp
xorl 56(%esp),%edx
xorl %ebx,%ebp
xorl 16(%esp),%edx
andl %esi,%ebp
xorl 36(%esp),%edx
roll $1,%edx
addl %ecx,%ebp
rorl $2,%esi
movl %edi,%ecx
roll $5,%ecx
movl %edx,48(%esp)
leal 2400959708(%edx,%ebp,1),%edx
movl %eax,%ebp
addl %ecx,%edx
andl %ebx,%ebp
movl 52(%esp),%ecx
addl %ebp,%edx
movl %esi,%ebp
xorl 60(%esp),%ecx
xorl %eax,%ebp
xorl 20(%esp),%ecx
andl %edi,%ebp
xorl 40(%esp),%ecx
roll $1,%ecx
addl %ebx,%ebp
rorl $2,%edi
movl %edx,%ebx
roll $5,%ebx
movl %ecx,52(%esp)
leal 2400959708(%ecx,%ebp,1),%ecx
movl %esi,%ebp
addl %ebx,%ecx
andl %eax,%ebp
movl 56(%esp),%ebx
addl %ebp,%ecx
movl %edi,%ebp
xorl (%esp),%ebx
xorl %esi,%ebp
xorl 24(%esp),%ebx
andl %edx,%ebp
xorl 44(%esp),%ebx
roll $1,%ebx
addl %eax,%ebp
rorl $2,%edx
movl %ecx,%eax
roll $5,%eax
movl %ebx,56(%esp)
leal 2400959708(%ebx,%ebp,1),%ebx
movl %edi,%ebp
addl %eax,%ebx
andl %esi,%ebp
movl 60(%esp),%eax
addl %ebp,%ebx
movl %edx,%ebp
xorl 4(%esp),%eax
xorl %edi,%ebp
xorl 28(%esp),%eax
andl %ecx,%ebp
xorl 48(%esp),%eax
roll $1,%eax
addl %esi,%ebp
rorl $2,%ecx
movl %ebx,%esi
roll $5,%esi
movl %eax,60(%esp)
leal 2400959708(%eax,%ebp,1),%eax
movl %edx,%ebp
addl %esi,%eax
andl %edi,%ebp
movl (%esp),%esi
addl %ebp,%eax
movl %ecx,%ebp
xorl 8(%esp),%esi
xorl %edx,%ebp
xorl 32(%esp),%esi
andl %ebx,%ebp
xorl 52(%esp),%esi
roll $1,%esi
addl %edi,%ebp
rorl $2,%ebx
movl %eax,%edi
roll $5,%edi
movl %esi,(%esp)
leal 2400959708(%esi,%ebp,1),%esi
movl %ecx,%ebp
addl %edi,%esi
andl %edx,%ebp
movl 4(%esp),%edi
addl %ebp,%esi
movl %ebx,%ebp
xorl 12(%esp),%edi
xorl %ecx,%ebp
xorl 36(%esp),%edi
andl %eax,%ebp
xorl 56(%esp),%edi
roll $1,%edi
addl %edx,%ebp
rorl $2,%eax
movl %esi,%edx
roll $5,%edx
movl %edi,4(%esp)
leal 2400959708(%edi,%ebp,1),%edi
movl %ebx,%ebp
addl %edx,%edi
andl %ecx,%ebp
movl 8(%esp),%edx
addl %ebp,%edi
movl %eax,%ebp
xorl 16(%esp),%edx
xorl %ebx,%ebp
xorl 40(%esp),%edx
andl %esi,%ebp
xorl 60(%esp),%edx
roll $1,%edx
addl %ecx,%ebp
rorl $2,%esi
movl %edi,%ecx
roll $5,%ecx
movl %edx,8(%esp)
leal 2400959708(%edx,%ebp,1),%edx
movl %eax,%ebp
addl %ecx,%edx
andl %ebx,%ebp
movl 12(%esp),%ecx
addl %ebp,%edx
movl %esi,%ebp
xorl 20(%esp),%ecx
xorl %eax,%ebp
xorl 44(%esp),%ecx
andl %edi,%ebp
xorl (%esp),%ecx
roll $1,%ecx
addl %ebx,%ebp
rorl $2,%edi
movl %edx,%ebx
roll $5,%ebx
movl %ecx,12(%esp)
leal 2400959708(%ecx,%ebp,1),%ecx
movl %esi,%ebp
addl %ebx,%ecx
andl %eax,%ebp
movl 16(%esp),%ebx
addl %ebp,%ecx
movl %edi,%ebp
xorl 24(%esp),%ebx
xorl %esi,%ebp
xorl 48(%esp),%ebx
andl %edx,%ebp
xorl 4(%esp),%ebx
roll $1,%ebx
addl %eax,%ebp
rorl $2,%edx
movl %ecx,%eax
roll $5,%eax
movl %ebx,16(%esp)
leal 2400959708(%ebx,%ebp,1),%ebx
movl %edi,%ebp
addl %eax,%ebx
andl %esi,%ebp
movl 20(%esp),%eax
addl %ebp,%ebx
movl %edx,%ebp
xorl 28(%esp),%eax
xorl %edi,%ebp
xorl 52(%esp),%eax
andl %ecx,%ebp
xorl 8(%esp),%eax
roll $1,%eax
addl %esi,%ebp
rorl $2,%ecx
movl %ebx,%esi
roll $5,%esi
movl %eax,20(%esp)
leal 2400959708(%eax,%ebp,1),%eax
movl %edx,%ebp
addl %esi,%eax
andl %edi,%ebp
movl 24(%esp),%esi
addl %ebp,%eax
movl %ecx,%ebp
xorl 32(%esp),%esi
xorl %edx,%ebp
xorl 56(%esp),%esi
andl %ebx,%ebp
xorl 12(%esp),%esi
roll $1,%esi
addl %edi,%ebp
rorl $2,%ebx
movl %eax,%edi
roll $5,%edi
movl %esi,24(%esp)
leal 2400959708(%esi,%ebp,1),%esi
movl %ecx,%ebp
addl %edi,%esi
andl %edx,%ebp
movl 28(%esp),%edi
addl %ebp,%esi
movl %ebx,%ebp
xorl 36(%esp),%edi
xorl %ecx,%ebp
xorl 60(%esp),%edi
andl %eax,%ebp
xorl 16(%esp),%edi
roll $1,%edi
addl %edx,%ebp
rorl $2,%eax
movl %esi,%edx
roll $5,%edx
movl %edi,28(%esp)
leal 2400959708(%edi,%ebp,1),%edi
movl %ebx,%ebp
addl %edx,%edi
andl %ecx,%ebp
movl 32(%esp),%edx
addl %ebp,%edi
movl %eax,%ebp
xorl 40(%esp),%edx
xorl %ebx,%ebp
xorl (%esp),%edx
andl %esi,%ebp
xorl 20(%esp),%edx
roll $1,%edx
addl %ecx,%ebp
rorl $2,%esi
movl %edi,%ecx
roll $5,%ecx
movl %edx,32(%esp)
leal 2400959708(%edx,%ebp,1),%edx
movl %eax,%ebp
addl %ecx,%edx
andl %ebx,%ebp
movl 36(%esp),%ecx
addl %ebp,%edx
movl %esi,%ebp
xorl 44(%esp),%ecx
xorl %eax,%ebp
xorl 4(%esp),%ecx
andl %edi,%ebp
xorl 24(%esp),%ecx
roll $1,%ecx
addl %ebx,%ebp
rorl $2,%edi
movl %edx,%ebx
roll $5,%ebx
movl %ecx,36(%esp)
leal 2400959708(%ecx,%ebp,1),%ecx
movl %esi,%ebp
addl %ebx,%ecx
andl %eax,%ebp
movl 40(%esp),%ebx
addl %ebp,%ecx
movl %edi,%ebp
xorl 48(%esp),%ebx
xorl %esi,%ebp
xorl 8(%esp),%ebx
andl %edx,%ebp
xorl 28(%esp),%ebx
roll $1,%ebx
addl %eax,%ebp
rorl $2,%edx
movl %ecx,%eax
roll $5,%eax
movl %ebx,40(%esp)
leal 2400959708(%ebx,%ebp,1),%ebx
movl %edi,%ebp
addl %eax,%ebx
andl %esi,%ebp
movl 44(%esp),%eax
addl %ebp,%ebx
movl %edx,%ebp
xorl 52(%esp),%eax
xorl %edi,%ebp
xorl 12(%esp),%eax
andl %ecx,%ebp
xorl 32(%esp),%eax
roll $1,%eax
addl %esi,%ebp
rorl $2,%ecx
movl %ebx,%esi
roll $5,%esi
movl %eax,44(%esp)
leal 2400959708(%eax,%ebp,1),%eax
movl %edx,%ebp
addl %esi,%eax
andl %edi,%ebp
movl 48(%esp),%esi
addl %ebp,%eax
movl %ebx,%ebp
xorl 56(%esp),%esi
xorl %ecx,%ebp
xorl 16(%esp),%esi
xorl %edx,%ebp
xorl 36(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,48(%esp)
leal 3395469782(%esi,%edi,1),%esi
movl 52(%esp),%edi
addl %ebp,%esi
movl %eax,%ebp
xorl 60(%esp),%edi
xorl %ebx,%ebp
xorl 20(%esp),%edi
xorl %ecx,%ebp
xorl 40(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,52(%esp)
leal 3395469782(%edi,%edx,1),%edi
movl 56(%esp),%edx
addl %ebp,%edi
movl %esi,%ebp
xorl (%esp),%edx
xorl %eax,%ebp
xorl 24(%esp),%edx
xorl %ebx,%ebp
xorl 44(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,56(%esp)
leal 3395469782(%edx,%ecx,1),%edx
movl 60(%esp),%ecx
addl %ebp,%edx
movl %edi,%ebp
xorl 4(%esp),%ecx
xorl %esi,%ebp
xorl 28(%esp),%ecx
xorl %eax,%ebp
xorl 48(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,60(%esp)
leal 3395469782(%ecx,%ebx,1),%ecx
movl (%esp),%ebx
addl %ebp,%ecx
movl %edx,%ebp
xorl 8(%esp),%ebx
xorl %edi,%ebp
xorl 32(%esp),%ebx
xorl %esi,%ebp
xorl 52(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,(%esp)
leal 3395469782(%ebx,%eax,1),%ebx
movl 4(%esp),%eax
addl %ebp,%ebx
movl %ecx,%ebp
xorl 12(%esp),%eax
xorl %edx,%ebp
xorl 36(%esp),%eax
xorl %edi,%ebp
xorl 56(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,4(%esp)
leal 3395469782(%eax,%esi,1),%eax
movl 8(%esp),%esi
addl %ebp,%eax
movl %ebx,%ebp
xorl 16(%esp),%esi
xorl %ecx,%ebp
xorl 40(%esp),%esi
xorl %edx,%ebp
xorl 60(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,8(%esp)
leal 3395469782(%esi,%edi,1),%esi
movl 12(%esp),%edi
addl %ebp,%esi
movl %eax,%ebp
xorl 20(%esp),%edi
xorl %ebx,%ebp
xorl 44(%esp),%edi
xorl %ecx,%ebp
xorl (%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,12(%esp)
leal 3395469782(%edi,%edx,1),%edi
movl 16(%esp),%edx
addl %ebp,%edi
movl %esi,%ebp
xorl 24(%esp),%edx
xorl %eax,%ebp
xorl 48(%esp),%edx
xorl %ebx,%ebp
xorl 4(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,16(%esp)
leal 3395469782(%edx,%ecx,1),%edx
movl 20(%esp),%ecx
addl %ebp,%edx
movl %edi,%ebp
xorl 28(%esp),%ecx
xorl %esi,%ebp
xorl 52(%esp),%ecx
xorl %eax,%ebp
xorl 8(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,20(%esp)
leal 3395469782(%ecx,%ebx,1),%ecx
movl 24(%esp),%ebx
addl %ebp,%ecx
movl %edx,%ebp
xorl 32(%esp),%ebx
xorl %edi,%ebp
xorl 56(%esp),%ebx
xorl %esi,%ebp
xorl 12(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,24(%esp)
leal 3395469782(%ebx,%eax,1),%ebx
movl 28(%esp),%eax
addl %ebp,%ebx
movl %ecx,%ebp
xorl 36(%esp),%eax
xorl %edx,%ebp
xorl 60(%esp),%eax
xorl %edi,%ebp
xorl 16(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
movl %eax,28(%esp)
leal 3395469782(%eax,%esi,1),%eax
movl 32(%esp),%esi
addl %ebp,%eax
movl %ebx,%ebp
xorl 40(%esp),%esi
xorl %ecx,%ebp
xorl (%esp),%esi
xorl %edx,%ebp
xorl 20(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
movl %esi,32(%esp)
leal 3395469782(%esi,%edi,1),%esi
movl 36(%esp),%edi
addl %ebp,%esi
movl %eax,%ebp
xorl 44(%esp),%edi
xorl %ebx,%ebp
xorl 4(%esp),%edi
xorl %ecx,%ebp
xorl 24(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
movl %edi,36(%esp)
leal 3395469782(%edi,%edx,1),%edi
movl 40(%esp),%edx
addl %ebp,%edi
movl %esi,%ebp
xorl 48(%esp),%edx
xorl %eax,%ebp
xorl 8(%esp),%edx
xorl %ebx,%ebp
xorl 28(%esp),%edx
roll $1,%edx
addl %ebp,%ecx
rorl $2,%esi
movl %edi,%ebp
roll $5,%ebp
movl %edx,40(%esp)
leal 3395469782(%edx,%ecx,1),%edx
movl 44(%esp),%ecx
addl %ebp,%edx
movl %edi,%ebp
xorl 52(%esp),%ecx
xorl %esi,%ebp
xorl 12(%esp),%ecx
xorl %eax,%ebp
xorl 32(%esp),%ecx
roll $1,%ecx
addl %ebp,%ebx
rorl $2,%edi
movl %edx,%ebp
roll $5,%ebp
movl %ecx,44(%esp)
leal 3395469782(%ecx,%ebx,1),%ecx
movl 48(%esp),%ebx
addl %ebp,%ecx
movl %edx,%ebp
xorl 56(%esp),%ebx
xorl %edi,%ebp
xorl 16(%esp),%ebx
xorl %esi,%ebp
xorl 36(%esp),%ebx
roll $1,%ebx
addl %ebp,%eax
rorl $2,%edx
movl %ecx,%ebp
roll $5,%ebp
movl %ebx,48(%esp)
leal 3395469782(%ebx,%eax,1),%ebx
movl 52(%esp),%eax
addl %ebp,%ebx
movl %ecx,%ebp
xorl 60(%esp),%eax
xorl %edx,%ebp
xorl 20(%esp),%eax
xorl %edi,%ebp
xorl 40(%esp),%eax
roll $1,%eax
addl %ebp,%esi
rorl $2,%ecx
movl %ebx,%ebp
roll $5,%ebp
leal 3395469782(%eax,%esi,1),%eax
movl 56(%esp),%esi
addl %ebp,%eax
movl %ebx,%ebp
xorl (%esp),%esi
xorl %ecx,%ebp
xorl 24(%esp),%esi
xorl %edx,%ebp
xorl 44(%esp),%esi
roll $1,%esi
addl %ebp,%edi
rorl $2,%ebx
movl %eax,%ebp
roll $5,%ebp
leal 3395469782(%esi,%edi,1),%esi
movl 60(%esp),%edi
addl %ebp,%esi
movl %eax,%ebp
xorl 4(%esp),%edi
xorl %ebx,%ebp
xorl 28(%esp),%edi
xorl %ecx,%ebp
xorl 48(%esp),%edi
roll $1,%edi
addl %ebp,%edx
rorl $2,%eax
movl %esi,%ebp
roll $5,%ebp
leal 3395469782(%edi,%edx,1),%edi
addl %ebp,%edi
movl 96(%esp),%ebp
movl 100(%esp),%edx
addl (%ebp),%edi
addl 4(%ebp),%esi
addl 8(%ebp),%eax
addl 12(%ebp),%ebx
addl 16(%ebp),%ecx
movl %edi,(%ebp)
addl $64,%edx
movl %esi,4(%ebp)
cmpl 104(%esp),%edx
movl %eax,8(%ebp)
movl %ecx,%edi
movl %ebx,12(%ebp)
movl %edx,%esi
movl %ecx,16(%ebp)
jb .L000loop
addl $76,%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size sha1_block_data_order_nohw,.-.L_sha1_block_data_order_nohw_begin
.globl sha1_block_data_order_ssse3
.hidden sha1_block_data_order_ssse3
.type sha1_block_data_order_ssse3,@function
.align 16
sha1_block_data_order_ssse3:
.L_sha1_block_data_order_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
call .L001pic_point
.L001pic_point:
popl %ebp
leal .LK_XX_XX-.L001pic_point(%ebp),%ebp
movdqa (%ebp),%xmm7
movdqa 16(%ebp),%xmm0
movdqa 32(%ebp),%xmm1
movdqa 48(%ebp),%xmm2
movdqa 64(%ebp),%xmm6
movl 20(%esp),%edi
movl 24(%esp),%ebp
movl 28(%esp),%edx
movl %esp,%esi
subl $208,%esp
andl $-64,%esp
movdqa %xmm0,112(%esp)
movdqa %xmm1,128(%esp)
movdqa %xmm2,144(%esp)
shll $6,%edx
movdqa %xmm7,160(%esp)
addl %ebp,%edx
movdqa %xmm6,176(%esp)
addl $64,%ebp
movl %edi,192(%esp)
movl %ebp,196(%esp)
movl %edx,200(%esp)
movl %esi,204(%esp)
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
movl 16(%edi),%edi
movl %ebx,%esi
movdqu -64(%ebp),%xmm0
movdqu -48(%ebp),%xmm1
movdqu -32(%ebp),%xmm2
movdqu -16(%ebp),%xmm3
.byte 102,15,56,0,198
.byte 102,15,56,0,206
.byte 102,15,56,0,214
movdqa %xmm7,96(%esp)
.byte 102,15,56,0,222
paddd %xmm7,%xmm0
paddd %xmm7,%xmm1
paddd %xmm7,%xmm2
movdqa %xmm0,(%esp)
psubd %xmm7,%xmm0
movdqa %xmm1,16(%esp)
psubd %xmm7,%xmm1
movdqa %xmm2,32(%esp)
movl %ecx,%ebp
psubd %xmm7,%xmm2
xorl %edx,%ebp
pshufd $238,%xmm0,%xmm4
andl %ebp,%esi
jmp .L002loop
.align 16
.L002loop:
rorl $2,%ebx
xorl %edx,%esi
movl %eax,%ebp
punpcklqdq %xmm1,%xmm4
movdqa %xmm3,%xmm6
addl (%esp),%edi
xorl %ecx,%ebx
paddd %xmm3,%xmm7
movdqa %xmm0,64(%esp)
roll $5,%eax
addl %esi,%edi
psrldq $4,%xmm6
andl %ebx,%ebp
xorl %ecx,%ebx
pxor %xmm0,%xmm4
addl %eax,%edi
rorl $7,%eax
pxor %xmm2,%xmm6
xorl %ecx,%ebp
movl %edi,%esi
addl 4(%esp),%edx
pxor %xmm6,%xmm4
xorl %ebx,%eax
roll $5,%edi
movdqa %xmm7,48(%esp)
addl %ebp,%edx
andl %eax,%esi
movdqa %xmm4,%xmm0
xorl %ebx,%eax
addl %edi,%edx
rorl $7,%edi
movdqa %xmm4,%xmm6
xorl %ebx,%esi
pslldq $12,%xmm0
paddd %xmm4,%xmm4
movl %edx,%ebp
addl 8(%esp),%ecx
psrld $31,%xmm6
xorl %eax,%edi
roll $5,%edx
movdqa %xmm0,%xmm7
addl %esi,%ecx
andl %edi,%ebp
xorl %eax,%edi
psrld $30,%xmm0
addl %edx,%ecx
rorl $7,%edx
por %xmm6,%xmm4
xorl %eax,%ebp
movl %ecx,%esi
addl 12(%esp),%ebx
pslld $2,%xmm7
xorl %edi,%edx
roll $5,%ecx
pxor %xmm0,%xmm4
movdqa 96(%esp),%xmm0
addl %ebp,%ebx
andl %edx,%esi
pxor %xmm7,%xmm4
pshufd $238,%xmm1,%xmm5
xorl %edi,%edx
addl %ecx,%ebx
rorl $7,%ecx
xorl %edi,%esi
movl %ebx,%ebp
punpcklqdq %xmm2,%xmm5
movdqa %xmm4,%xmm7
addl 16(%esp),%eax
xorl %edx,%ecx
paddd %xmm4,%xmm0
movdqa %xmm1,80(%esp)
roll $5,%ebx
addl %esi,%eax
psrldq $4,%xmm7
andl %ecx,%ebp
xorl %edx,%ecx
pxor %xmm1,%xmm5
addl %ebx,%eax
rorl $7,%ebx
pxor %xmm3,%xmm7
xorl %edx,%ebp
movl %eax,%esi
addl 20(%esp),%edi
pxor %xmm7,%xmm5
xorl %ecx,%ebx
roll $5,%eax
movdqa %xmm0,(%esp)
addl %ebp,%edi
andl %ebx,%esi
movdqa %xmm5,%xmm1
xorl %ecx,%ebx
addl %eax,%edi
rorl $7,%eax
movdqa %xmm5,%xmm7
xorl %ecx,%esi
pslldq $12,%xmm1
paddd %xmm5,%xmm5
movl %edi,%ebp
addl 24(%esp),%edx
psrld $31,%xmm7
xorl %ebx,%eax
roll $5,%edi
movdqa %xmm1,%xmm0
addl %esi,%edx
andl %eax,%ebp
xorl %ebx,%eax
psrld $30,%xmm1
addl %edi,%edx
rorl $7,%edi
por %xmm7,%xmm5
xorl %ebx,%ebp
movl %edx,%esi
addl 28(%esp),%ecx
pslld $2,%xmm0
xorl %eax,%edi
roll $5,%edx
pxor %xmm1,%xmm5
movdqa 112(%esp),%xmm1
addl %ebp,%ecx
andl %edi,%esi
pxor %xmm0,%xmm5
pshufd $238,%xmm2,%xmm6
xorl %eax,%edi
addl %edx,%ecx
rorl $7,%edx
xorl %eax,%esi
movl %ecx,%ebp
punpcklqdq %xmm3,%xmm6
movdqa %xmm5,%xmm0
addl 32(%esp),%ebx
xorl %edi,%edx
paddd %xmm5,%xmm1
movdqa %xmm2,96(%esp)
roll $5,%ecx
addl %esi,%ebx
psrldq $4,%xmm0
andl %edx,%ebp
xorl %edi,%edx
pxor %xmm2,%xmm6
addl %ecx,%ebx
rorl $7,%ecx
pxor %xmm4,%xmm0
xorl %edi,%ebp
movl %ebx,%esi
addl 36(%esp),%eax
pxor %xmm0,%xmm6
xorl %edx,%ecx
roll $5,%ebx
movdqa %xmm1,16(%esp)
addl %ebp,%eax
andl %ecx,%esi
movdqa %xmm6,%xmm2
xorl %edx,%ecx
addl %ebx,%eax
rorl $7,%ebx
movdqa %xmm6,%xmm0
xorl %edx,%esi
pslldq $12,%xmm2
paddd %xmm6,%xmm6
movl %eax,%ebp
addl 40(%esp),%edi
psrld $31,%xmm0
xorl %ecx,%ebx
roll $5,%eax
movdqa %xmm2,%xmm1
addl %esi,%edi
andl %ebx,%ebp
xorl %ecx,%ebx
psrld $30,%xmm2
addl %eax,%edi
rorl $7,%eax
por %xmm0,%xmm6
xorl %ecx,%ebp
movdqa 64(%esp),%xmm0
movl %edi,%esi
addl 44(%esp),%edx
pslld $2,%xmm1
xorl %ebx,%eax
roll $5,%edi
pxor %xmm2,%xmm6
movdqa 112(%esp),%xmm2
addl %ebp,%edx
andl %eax,%esi
pxor %xmm1,%xmm6
pshufd $238,%xmm3,%xmm7
xorl %ebx,%eax
addl %edi,%edx
rorl $7,%edi
xorl %ebx,%esi
movl %edx,%ebp
punpcklqdq %xmm4,%xmm7
movdqa %xmm6,%xmm1
addl 48(%esp),%ecx
xorl %eax,%edi
paddd %xmm6,%xmm2
movdqa %xmm3,64(%esp)
roll $5,%edx
addl %esi,%ecx
psrldq $4,%xmm1
andl %edi,%ebp
xorl %eax,%edi
pxor %xmm3,%xmm7
addl %edx,%ecx
rorl $7,%edx
pxor %xmm5,%xmm1
xorl %eax,%ebp
movl %ecx,%esi
addl 52(%esp),%ebx
pxor %xmm1,%xmm7
xorl %edi,%edx
roll $5,%ecx
movdqa %xmm2,32(%esp)
addl %ebp,%ebx
andl %edx,%esi
movdqa %xmm7,%xmm3
xorl %edi,%edx
addl %ecx,%ebx
rorl $7,%ecx
movdqa %xmm7,%xmm1
xorl %edi,%esi
pslldq $12,%xmm3
paddd %xmm7,%xmm7
movl %ebx,%ebp
addl 56(%esp),%eax
psrld $31,%xmm1
xorl %edx,%ecx
roll $5,%ebx
movdqa %xmm3,%xmm2
addl %esi,%eax
andl %ecx,%ebp
xorl %edx,%ecx
psrld $30,%xmm3
addl %ebx,%eax
rorl $7,%ebx
por %xmm1,%xmm7
xorl %edx,%ebp
movdqa 80(%esp),%xmm1
movl %eax,%esi
addl 60(%esp),%edi
pslld $2,%xmm2
xorl %ecx,%ebx
roll $5,%eax
pxor %xmm3,%xmm7
movdqa 112(%esp),%xmm3
addl %ebp,%edi
andl %ebx,%esi
pxor %xmm2,%xmm7
pshufd $238,%xmm6,%xmm2
xorl %ecx,%ebx
addl %eax,%edi
rorl $7,%eax
pxor %xmm4,%xmm0
punpcklqdq %xmm7,%xmm2
xorl %ecx,%esi
movl %edi,%ebp
addl (%esp),%edx
pxor %xmm1,%xmm0
movdqa %xmm4,80(%esp)
xorl %ebx,%eax
roll $5,%edi
movdqa %xmm3,%xmm4
addl %esi,%edx
paddd %xmm7,%xmm3
andl %eax,%ebp
pxor %xmm2,%xmm0
xorl %ebx,%eax
addl %edi,%edx
rorl $7,%edi
xorl %ebx,%ebp
movdqa %xmm0,%xmm2
movdqa %xmm3,48(%esp)
movl %edx,%esi
addl 4(%esp),%ecx
xorl %eax,%edi
roll $5,%edx
pslld $2,%xmm0
addl %ebp,%ecx
andl %edi,%esi
psrld $30,%xmm2
xorl %eax,%edi
addl %edx,%ecx
rorl $7,%edx
xorl %eax,%esi
movl %ecx,%ebp
addl 8(%esp),%ebx
xorl %edi,%edx
roll $5,%ecx
por %xmm2,%xmm0
addl %esi,%ebx
andl %edx,%ebp
movdqa 96(%esp),%xmm2
xorl %edi,%edx
addl %ecx,%ebx
addl 12(%esp),%eax
xorl %edi,%ebp
movl %ebx,%esi
pshufd $238,%xmm7,%xmm3
roll $5,%ebx
addl %ebp,%eax
xorl %edx,%esi
rorl $7,%ecx
addl %ebx,%eax
addl 16(%esp),%edi
pxor %xmm5,%xmm1
punpcklqdq %xmm0,%xmm3
xorl %ecx,%esi
movl %eax,%ebp
roll $5,%eax
pxor %xmm2,%xmm1
movdqa %xmm5,96(%esp)
addl %esi,%edi
xorl %ecx,%ebp
movdqa %xmm4,%xmm5
rorl $7,%ebx
paddd %xmm0,%xmm4
addl %eax,%edi
pxor %xmm3,%xmm1
addl 20(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
movdqa %xmm1,%xmm3
movdqa %xmm4,(%esp)
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %edi,%edx
pslld $2,%xmm1
addl 24(%esp),%ecx
xorl %eax,%esi
psrld $30,%xmm3
movl %edx,%ebp
roll $5,%edx
addl %esi,%ecx
xorl %eax,%ebp
rorl $7,%edi
addl %edx,%ecx
por %xmm3,%xmm1
addl 28(%esp),%ebx
xorl %edi,%ebp
movdqa 64(%esp),%xmm3
movl %ecx,%esi
roll $5,%ecx
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
pshufd $238,%xmm0,%xmm4
addl %ecx,%ebx
addl 32(%esp),%eax
pxor %xmm6,%xmm2
punpcklqdq %xmm1,%xmm4
xorl %edx,%esi
movl %ebx,%ebp
roll $5,%ebx
pxor %xmm3,%xmm2
movdqa %xmm6,64(%esp)
addl %esi,%eax
xorl %edx,%ebp
movdqa 128(%esp),%xmm6
rorl $7,%ecx
paddd %xmm1,%xmm5
addl %ebx,%eax
pxor %xmm4,%xmm2
addl 36(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
roll $5,%eax
movdqa %xmm2,%xmm4
movdqa %xmm5,16(%esp)
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%edi
pslld $2,%xmm2
addl 40(%esp),%edx
xorl %ebx,%esi
psrld $30,%xmm4
movl %edi,%ebp
roll $5,%edi
addl %esi,%edx
xorl %ebx,%ebp
rorl $7,%eax
addl %edi,%edx
por %xmm4,%xmm2
addl 44(%esp),%ecx
xorl %eax,%ebp
movdqa 80(%esp),%xmm4
movl %edx,%esi
roll $5,%edx
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
pshufd $238,%xmm1,%xmm5
addl %edx,%ecx
addl 48(%esp),%ebx
pxor %xmm7,%xmm3
punpcklqdq %xmm2,%xmm5
xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
pxor %xmm4,%xmm3
movdqa %xmm7,80(%esp)
addl %esi,%ebx
xorl %edi,%ebp
movdqa %xmm6,%xmm7
rorl $7,%edx
paddd %xmm2,%xmm6
addl %ecx,%ebx
pxor %xmm5,%xmm3
addl 52(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
movdqa %xmm3,%xmm5
movdqa %xmm6,32(%esp)
addl %ebp,%eax
xorl %edx,%esi
rorl $7,%ecx
addl %ebx,%eax
pslld $2,%xmm3
addl 56(%esp),%edi
xorl %ecx,%esi
psrld $30,%xmm5
movl %eax,%ebp
roll $5,%eax
addl %esi,%edi
xorl %ecx,%ebp
rorl $7,%ebx
addl %eax,%edi
por %xmm5,%xmm3
addl 60(%esp),%edx
xorl %ebx,%ebp
movdqa 96(%esp),%xmm5
movl %edi,%esi
roll $5,%edi
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
pshufd $238,%xmm2,%xmm6
addl %edi,%edx
addl (%esp),%ecx
pxor %xmm0,%xmm4
punpcklqdq %xmm3,%xmm6
xorl %eax,%esi
movl %edx,%ebp
roll $5,%edx
pxor %xmm5,%xmm4
movdqa %xmm0,96(%esp)
addl %esi,%ecx
xorl %eax,%ebp
movdqa %xmm7,%xmm0
rorl $7,%edi
paddd %xmm3,%xmm7
addl %edx,%ecx
pxor %xmm6,%xmm4
addl 4(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
roll $5,%ecx
movdqa %xmm4,%xmm6
movdqa %xmm7,48(%esp)
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
addl %ecx,%ebx
pslld $2,%xmm4
addl 8(%esp),%eax
xorl %edx,%esi
psrld $30,%xmm6
movl %ebx,%ebp
roll $5,%ebx
addl %esi,%eax
xorl %edx,%ebp
rorl $7,%ecx
addl %ebx,%eax
por %xmm6,%xmm4
addl 12(%esp),%edi
xorl %ecx,%ebp
movdqa 64(%esp),%xmm6
movl %eax,%esi
roll $5,%eax
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
pshufd $238,%xmm3,%xmm7
addl %eax,%edi
addl 16(%esp),%edx
pxor %xmm1,%xmm5
punpcklqdq %xmm4,%xmm7
xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
pxor %xmm6,%xmm5
movdqa %xmm1,64(%esp)
addl %esi,%edx
xorl %ebx,%ebp
movdqa %xmm0,%xmm1
rorl $7,%eax
paddd %xmm4,%xmm0
addl %edi,%edx
pxor %xmm7,%xmm5
addl 20(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
roll $5,%edx
movdqa %xmm5,%xmm7
movdqa %xmm0,(%esp)
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
addl %edx,%ecx
pslld $2,%xmm5
addl 24(%esp),%ebx
xorl %edi,%esi
psrld $30,%xmm7
movl %ecx,%ebp
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
addl %ecx,%ebx
por %xmm7,%xmm5
addl 28(%esp),%eax
movdqa 80(%esp),%xmm7
rorl $7,%ecx
movl %ebx,%esi
xorl %edx,%ebp
roll $5,%ebx
pshufd $238,%xmm4,%xmm0
addl %ebp,%eax
xorl %ecx,%esi
xorl %edx,%ecx
addl %ebx,%eax
addl 32(%esp),%edi
pxor %xmm2,%xmm6
punpcklqdq %xmm5,%xmm0
andl %ecx,%esi
xorl %edx,%ecx
rorl $7,%ebx
pxor %xmm7,%xmm6
movdqa %xmm2,80(%esp)
movl %eax,%ebp
xorl %ecx,%esi
roll $5,%eax
movdqa %xmm1,%xmm2
addl %esi,%edi
paddd %xmm5,%xmm1
xorl %ebx,%ebp
pxor %xmm0,%xmm6
xorl %ecx,%ebx
addl %eax,%edi
addl 36(%esp),%edx
andl %ebx,%ebp
movdqa %xmm6,%xmm0
movdqa %xmm1,16(%esp)
xorl %ecx,%ebx
rorl $7,%eax
movl %edi,%esi
xorl %ebx,%ebp
roll $5,%edi
pslld $2,%xmm6
addl %ebp,%edx
xorl %eax,%esi
psrld $30,%xmm0
xorl %ebx,%eax
addl %edi,%edx
addl 40(%esp),%ecx
andl %eax,%esi
xorl %ebx,%eax
rorl $7,%edi
por %xmm0,%xmm6
movl %edx,%ebp
xorl %eax,%esi
movdqa 96(%esp),%xmm0
roll $5,%edx
addl %esi,%ecx
xorl %edi,%ebp
xorl %eax,%edi
addl %edx,%ecx
pshufd $238,%xmm5,%xmm1
addl 44(%esp),%ebx
andl %edi,%ebp
xorl %eax,%edi
rorl $7,%edx
movl %ecx,%esi
xorl %edi,%ebp
roll $5,%ecx
addl %ebp,%ebx
xorl %edx,%esi
xorl %edi,%edx
addl %ecx,%ebx
addl 48(%esp),%eax
pxor %xmm3,%xmm7
punpcklqdq %xmm6,%xmm1
andl %edx,%esi
xorl %edi,%edx
rorl $7,%ecx
pxor %xmm0,%xmm7
movdqa %xmm3,96(%esp)
movl %ebx,%ebp
xorl %edx,%esi
roll $5,%ebx
movdqa 144(%esp),%xmm3
addl %esi,%eax
paddd %xmm6,%xmm2
xorl %ecx,%ebp
pxor %xmm1,%xmm7
xorl %edx,%ecx
addl %ebx,%eax
addl 52(%esp),%edi
andl %ecx,%ebp
movdqa %xmm7,%xmm1
movdqa %xmm2,32(%esp)
xorl %edx,%ecx
rorl $7,%ebx
movl %eax,%esi
xorl %ecx,%ebp
roll $5,%eax
pslld $2,%xmm7
addl %ebp,%edi
xorl %ebx,%esi
psrld $30,%xmm1
xorl %ecx,%ebx
addl %eax,%edi
addl 56(%esp),%edx
andl %ebx,%esi
xorl %ecx,%ebx
rorl $7,%eax
por %xmm1,%xmm7
movl %edi,%ebp
xorl %ebx,%esi
movdqa 64(%esp),%xmm1
roll $5,%edi
addl %esi,%edx
xorl %eax,%ebp
xorl %ebx,%eax
addl %edi,%edx
pshufd $238,%xmm6,%xmm2
addl 60(%esp),%ecx
andl %eax,%ebp
xorl %ebx,%eax
rorl $7,%edi
movl %edx,%esi
xorl %eax,%ebp
roll $5,%edx
addl %ebp,%ecx
xorl %edi,%esi
xorl %eax,%edi
addl %edx,%ecx
addl (%esp),%ebx
pxor %xmm4,%xmm0
punpcklqdq %xmm7,%xmm2
andl %edi,%esi
xorl %eax,%edi
rorl $7,%edx
pxor %xmm1,%xmm0
movdqa %xmm4,64(%esp)
movl %ecx,%ebp
xorl %edi,%esi
roll $5,%ecx
movdqa %xmm3,%xmm4
addl %esi,%ebx
paddd %xmm7,%xmm3
xorl %edx,%ebp
pxor %xmm2,%xmm0
xorl %edi,%edx
addl %ecx,%ebx
addl 4(%esp),%eax
andl %edx,%ebp
movdqa %xmm0,%xmm2
movdqa %xmm3,48(%esp)
xorl %edi,%edx
rorl $7,%ecx
movl %ebx,%esi
xorl %edx,%ebp
roll $5,%ebx
pslld $2,%xmm0
addl %ebp,%eax
xorl %ecx,%esi
psrld $30,%xmm2
xorl %edx,%ecx
addl %ebx,%eax
addl 8(%esp),%edi
andl %ecx,%esi
xorl %edx,%ecx
rorl $7,%ebx
por %xmm2,%xmm0
movl %eax,%ebp
xorl %ecx,%esi
movdqa 80(%esp),%xmm2
roll $5,%eax
addl %esi,%edi
xorl %ebx,%ebp
xorl %ecx,%ebx
addl %eax,%edi
pshufd $238,%xmm7,%xmm3
addl 12(%esp),%edx
andl %ebx,%ebp
xorl %ecx,%ebx
rorl $7,%eax
movl %edi,%esi
xorl %ebx,%ebp
roll $5,%edi
addl %ebp,%edx
xorl %eax,%esi
xorl %ebx,%eax
addl %edi,%edx
addl 16(%esp),%ecx
pxor %xmm5,%xmm1
punpcklqdq %xmm0,%xmm3
andl %eax,%esi
xorl %ebx,%eax
rorl $7,%edi
pxor %xmm2,%xmm1
movdqa %xmm5,80(%esp)
movl %edx,%ebp
xorl %eax,%esi
roll $5,%edx
movdqa %xmm4,%xmm5
addl %esi,%ecx
paddd %xmm0,%xmm4
xorl %edi,%ebp
pxor %xmm3,%xmm1
xorl %eax,%edi
addl %edx,%ecx
addl 20(%esp),%ebx
andl %edi,%ebp
movdqa %xmm1,%xmm3
movdqa %xmm4,(%esp)
xorl %eax,%edi
rorl $7,%edx
movl %ecx,%esi
xorl %edi,%ebp
roll $5,%ecx
pslld $2,%xmm1
addl %ebp,%ebx
xorl %edx,%esi
psrld $30,%xmm3
xorl %edi,%edx
addl %ecx,%ebx
addl 24(%esp),%eax
andl %edx,%esi
xorl %edi,%edx
rorl $7,%ecx
por %xmm3,%xmm1
movl %ebx,%ebp
xorl %edx,%esi
movdqa 96(%esp),%xmm3
roll $5,%ebx
addl %esi,%eax
xorl %ecx,%ebp
xorl %edx,%ecx
addl %ebx,%eax
pshufd $238,%xmm0,%xmm4
addl 28(%esp),%edi
andl %ecx,%ebp
xorl %edx,%ecx
rorl $7,%ebx
movl %eax,%esi
xorl %ecx,%ebp
roll $5,%eax
addl %ebp,%edi
xorl %ebx,%esi
xorl %ecx,%ebx
addl %eax,%edi
addl 32(%esp),%edx
pxor %xmm6,%xmm2
punpcklqdq %xmm1,%xmm4
andl %ebx,%esi
xorl %ecx,%ebx
rorl $7,%eax
pxor %xmm3,%xmm2
movdqa %xmm6,96(%esp)
movl %edi,%ebp
xorl %ebx,%esi
roll $5,%edi
movdqa %xmm5,%xmm6
addl %esi,%edx
paddd %xmm1,%xmm5
xorl %eax,%ebp
pxor %xmm4,%xmm2
xorl %ebx,%eax
addl %edi,%edx
addl 36(%esp),%ecx
andl %eax,%ebp
movdqa %xmm2,%xmm4
movdqa %xmm5,16(%esp)
xorl %ebx,%eax
rorl $7,%edi
movl %edx,%esi
xorl %eax,%ebp
roll $5,%edx
pslld $2,%xmm2
addl %ebp,%ecx
xorl %edi,%esi
psrld $30,%xmm4
xorl %eax,%edi
addl %edx,%ecx
addl 40(%esp),%ebx
andl %edi,%esi
xorl %eax,%edi
rorl $7,%edx
por %xmm4,%xmm2
movl %ecx,%ebp
xorl %edi,%esi
movdqa 64(%esp),%xmm4
roll $5,%ecx
addl %esi,%ebx
xorl %edx,%ebp
xorl %edi,%edx
addl %ecx,%ebx
pshufd $238,%xmm1,%xmm5
addl 44(%esp),%eax
andl %edx,%ebp
xorl %edi,%edx
rorl $7,%ecx
movl %ebx,%esi
xorl %edx,%ebp
roll $5,%ebx
addl %ebp,%eax
xorl %edx,%esi
addl %ebx,%eax
addl 48(%esp),%edi
pxor %xmm7,%xmm3
punpcklqdq %xmm2,%xmm5
xorl %ecx,%esi
movl %eax,%ebp
roll $5,%eax
pxor %xmm4,%xmm3
movdqa %xmm7,64(%esp)
addl %esi,%edi
xorl %ecx,%ebp
movdqa %xmm6,%xmm7
rorl $7,%ebx
paddd %xmm2,%xmm6
addl %eax,%edi
pxor %xmm5,%xmm3
addl 52(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
movdqa %xmm3,%xmm5
movdqa %xmm6,32(%esp)
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %edi,%edx
pslld $2,%xmm3
addl 56(%esp),%ecx
xorl %eax,%esi
psrld $30,%xmm5
movl %edx,%ebp
roll $5,%edx
addl %esi,%ecx
xorl %eax,%ebp
rorl $7,%edi
addl %edx,%ecx
por %xmm5,%xmm3
addl 60(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
roll $5,%ecx
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
addl %ecx,%ebx
addl (%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
roll $5,%ebx
addl %esi,%eax
xorl %edx,%ebp
rorl $7,%ecx
paddd %xmm3,%xmm7
addl %ebx,%eax
addl 4(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
movdqa %xmm7,48(%esp)
roll $5,%eax
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%edi
addl 8(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
addl %esi,%edx
xorl %ebx,%ebp
rorl $7,%eax
addl %edi,%edx
addl 12(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
roll $5,%edx
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
addl %edx,%ecx
movl 196(%esp),%ebp
cmpl 200(%esp),%ebp
je .L003done
movdqa 160(%esp),%xmm7
movdqa 176(%esp),%xmm6
movdqu (%ebp),%xmm0
movdqu 16(%ebp),%xmm1
movdqu 32(%ebp),%xmm2
movdqu 48(%ebp),%xmm3
addl $64,%ebp
.byte 102,15,56,0,198
movl %ebp,196(%esp)
movdqa %xmm7,96(%esp)
addl 16(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
.byte 102,15,56,0,206
addl %ecx,%ebx
addl 20(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
paddd %xmm7,%xmm0
roll $5,%ebx
addl %ebp,%eax
xorl %edx,%esi
rorl $7,%ecx
movdqa %xmm0,(%esp)
addl %ebx,%eax
addl 24(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
psubd %xmm7,%xmm0
roll $5,%eax
addl %esi,%edi
xorl %ecx,%ebp
rorl $7,%ebx
addl %eax,%edi
addl 28(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %edi,%edx
addl 32(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
roll $5,%edx
addl %esi,%ecx
xorl %eax,%ebp
rorl $7,%edi
.byte 102,15,56,0,214
addl %edx,%ecx
addl 36(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
paddd %xmm7,%xmm1
roll $5,%ecx
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
movdqa %xmm1,16(%esp)
addl %ecx,%ebx
addl 40(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
psubd %xmm7,%xmm1
roll $5,%ebx
addl %esi,%eax
xorl %edx,%ebp
rorl $7,%ecx
addl %ebx,%eax
addl 44(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
roll $5,%eax
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%edi
addl 48(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
addl %esi,%edx
xorl %ebx,%ebp
rorl $7,%eax
.byte 102,15,56,0,222
addl %edi,%edx
addl 52(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
paddd %xmm7,%xmm2
roll $5,%edx
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
movdqa %xmm2,32(%esp)
addl %edx,%ecx
addl 56(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
psubd %xmm7,%xmm2
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
addl %ecx,%ebx
addl 60(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
addl %ebp,%eax
rorl $7,%ecx
addl %ebx,%eax
movl 192(%esp),%ebp
addl (%ebp),%eax
addl 4(%ebp),%esi
addl 8(%ebp),%ecx
movl %eax,(%ebp)
addl 12(%ebp),%edx
movl %esi,4(%ebp)
addl 16(%ebp),%edi
movl %ecx,8(%ebp)
movl %ecx,%ebx
movl %edx,12(%ebp)
xorl %edx,%ebx
movl %edi,16(%ebp)
movl %esi,%ebp
pshufd $238,%xmm0,%xmm4
andl %ebx,%esi
movl %ebp,%ebx
jmp .L002loop
.align 16
.L003done:
addl 16(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
addl %ecx,%ebx
addl 20(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
addl %ebp,%eax
xorl %edx,%esi
rorl $7,%ecx
addl %ebx,%eax
addl 24(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
roll $5,%eax
addl %esi,%edi
xorl %ecx,%ebp
rorl $7,%ebx
addl %eax,%edi
addl 28(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
roll $5,%edi
addl %ebp,%edx
xorl %ebx,%esi
rorl $7,%eax
addl %edi,%edx
addl 32(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
roll $5,%edx
addl %esi,%ecx
xorl %eax,%ebp
rorl $7,%edi
addl %edx,%ecx
addl 36(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
roll $5,%ecx
addl %ebp,%ebx
xorl %edi,%esi
rorl $7,%edx
addl %ecx,%ebx
addl 40(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
roll $5,%ebx
addl %esi,%eax
xorl %edx,%ebp
rorl $7,%ecx
addl %ebx,%eax
addl 44(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
roll $5,%eax
addl %ebp,%edi
xorl %ecx,%esi
rorl $7,%ebx
addl %eax,%edi
addl 48(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
roll $5,%edi
addl %esi,%edx
xorl %ebx,%ebp
rorl $7,%eax
addl %edi,%edx
addl 52(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
roll $5,%edx
addl %ebp,%ecx
xorl %eax,%esi
rorl $7,%edi
addl %edx,%ecx
addl 56(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
roll $5,%ecx
addl %esi,%ebx
xorl %edi,%ebp
rorl $7,%edx
addl %ecx,%ebx
addl 60(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
roll $5,%ebx
addl %ebp,%eax
rorl $7,%ecx
addl %ebx,%eax
movl 192(%esp),%ebp
addl (%ebp),%eax
movl 204(%esp),%esp
addl 4(%ebp),%esi
addl 8(%ebp),%ecx
movl %eax,(%ebp)
addl 12(%ebp),%edx
movl %esi,4(%ebp)
addl 16(%ebp),%edi
movl %ecx,8(%ebp)
movl %edx,12(%ebp)
movl %edi,16(%ebp)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size sha1_block_data_order_ssse3,.-.L_sha1_block_data_order_ssse3_begin
.globl sha1_block_data_order_avx
.hidden sha1_block_data_order_avx
.type sha1_block_data_order_avx,@function
.align 16
sha1_block_data_order_avx:
.L_sha1_block_data_order_avx_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
call .L004pic_point
.L004pic_point:
popl %ebp
leal .LK_XX_XX-.L004pic_point(%ebp),%ebp
vzeroall
vmovdqa (%ebp),%xmm7
vmovdqa 16(%ebp),%xmm0
vmovdqa 32(%ebp),%xmm1
vmovdqa 48(%ebp),%xmm2
vmovdqa 64(%ebp),%xmm6
movl 20(%esp),%edi
movl 24(%esp),%ebp
movl 28(%esp),%edx
movl %esp,%esi
subl $208,%esp
andl $-64,%esp
vmovdqa %xmm0,112(%esp)
vmovdqa %xmm1,128(%esp)
vmovdqa %xmm2,144(%esp)
shll $6,%edx
vmovdqa %xmm7,160(%esp)
addl %ebp,%edx
vmovdqa %xmm6,176(%esp)
addl $64,%ebp
movl %edi,192(%esp)
movl %ebp,196(%esp)
movl %edx,200(%esp)
movl %esi,204(%esp)
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
movl 16(%edi),%edi
movl %ebx,%esi
vmovdqu -64(%ebp),%xmm0
vmovdqu -48(%ebp),%xmm1
vmovdqu -32(%ebp),%xmm2
vmovdqu -16(%ebp),%xmm3
vpshufb %xmm6,%xmm0,%xmm0
vpshufb %xmm6,%xmm1,%xmm1
vpshufb %xmm6,%xmm2,%xmm2
vmovdqa %xmm7,96(%esp)
vpshufb %xmm6,%xmm3,%xmm3
vpaddd %xmm7,%xmm0,%xmm4
vpaddd %xmm7,%xmm1,%xmm5
vpaddd %xmm7,%xmm2,%xmm6
vmovdqa %xmm4,(%esp)
movl %ecx,%ebp
vmovdqa %xmm5,16(%esp)
xorl %edx,%ebp
vmovdqa %xmm6,32(%esp)
andl %ebp,%esi
jmp .L005loop
.align 16
.L005loop:
shrdl $2,%ebx,%ebx
xorl %edx,%esi
vpalignr $8,%xmm0,%xmm1,%xmm4
movl %eax,%ebp
addl (%esp),%edi
vpaddd %xmm3,%xmm7,%xmm7
vmovdqa %xmm0,64(%esp)
xorl %ecx,%ebx
shldl $5,%eax,%eax
vpsrldq $4,%xmm3,%xmm6
addl %esi,%edi
andl %ebx,%ebp
vpxor %xmm0,%xmm4,%xmm4
xorl %ecx,%ebx
addl %eax,%edi
vpxor %xmm2,%xmm6,%xmm6
shrdl $7,%eax,%eax
xorl %ecx,%ebp
vmovdqa %xmm7,48(%esp)
movl %edi,%esi
addl 4(%esp),%edx
vpxor %xmm6,%xmm4,%xmm4
xorl %ebx,%eax
shldl $5,%edi,%edi
addl %ebp,%edx
andl %eax,%esi
vpsrld $31,%xmm4,%xmm6
xorl %ebx,%eax
addl %edi,%edx
shrdl $7,%edi,%edi
xorl %ebx,%esi
vpslldq $12,%xmm4,%xmm0
vpaddd %xmm4,%xmm4,%xmm4
movl %edx,%ebp
addl 8(%esp),%ecx
xorl %eax,%edi
shldl $5,%edx,%edx
vpsrld $30,%xmm0,%xmm7
vpor %xmm6,%xmm4,%xmm4
addl %esi,%ecx
andl %edi,%ebp
xorl %eax,%edi
addl %edx,%ecx
vpslld $2,%xmm0,%xmm0
shrdl $7,%edx,%edx
xorl %eax,%ebp
vpxor %xmm7,%xmm4,%xmm4
movl %ecx,%esi
addl 12(%esp),%ebx
xorl %edi,%edx
shldl $5,%ecx,%ecx
vpxor %xmm0,%xmm4,%xmm4
addl %ebp,%ebx
andl %edx,%esi
vmovdqa 96(%esp),%xmm0
xorl %edi,%edx
addl %ecx,%ebx
shrdl $7,%ecx,%ecx
xorl %edi,%esi
vpalignr $8,%xmm1,%xmm2,%xmm5
movl %ebx,%ebp
addl 16(%esp),%eax
vpaddd %xmm4,%xmm0,%xmm0
vmovdqa %xmm1,80(%esp)
xorl %edx,%ecx
shldl $5,%ebx,%ebx
vpsrldq $4,%xmm4,%xmm7
addl %esi,%eax
andl %ecx,%ebp
vpxor %xmm1,%xmm5,%xmm5
xorl %edx,%ecx
addl %ebx,%eax
vpxor %xmm3,%xmm7,%xmm7
shrdl $7,%ebx,%ebx
xorl %edx,%ebp
vmovdqa %xmm0,(%esp)
movl %eax,%esi
addl 20(%esp),%edi
vpxor %xmm7,%xmm5,%xmm5
xorl %ecx,%ebx
shldl $5,%eax,%eax
addl %ebp,%edi
andl %ebx,%esi
vpsrld $31,%xmm5,%xmm7
xorl %ecx,%ebx
addl %eax,%edi
shrdl $7,%eax,%eax
xorl %ecx,%esi
vpslldq $12,%xmm5,%xmm1
vpaddd %xmm5,%xmm5,%xmm5
movl %edi,%ebp
addl 24(%esp),%edx
xorl %ebx,%eax
shldl $5,%edi,%edi
vpsrld $30,%xmm1,%xmm0
vpor %xmm7,%xmm5,%xmm5
addl %esi,%edx
andl %eax,%ebp
xorl %ebx,%eax
addl %edi,%edx
vpslld $2,%xmm1,%xmm1
shrdl $7,%edi,%edi
xorl %ebx,%ebp
vpxor %xmm0,%xmm5,%xmm5
movl %edx,%esi
addl 28(%esp),%ecx
xorl %eax,%edi
shldl $5,%edx,%edx
vpxor %xmm1,%xmm5,%xmm5
addl %ebp,%ecx
andl %edi,%esi
vmovdqa 112(%esp),%xmm1
xorl %eax,%edi
addl %edx,%ecx
shrdl $7,%edx,%edx
xorl %eax,%esi
vpalignr $8,%xmm2,%xmm3,%xmm6
movl %ecx,%ebp
addl 32(%esp),%ebx
vpaddd %xmm5,%xmm1,%xmm1
vmovdqa %xmm2,96(%esp)
xorl %edi,%edx
shldl $5,%ecx,%ecx
vpsrldq $4,%xmm5,%xmm0
addl %esi,%ebx
andl %edx,%ebp
vpxor %xmm2,%xmm6,%xmm6
xorl %edi,%edx
addl %ecx,%ebx
vpxor %xmm4,%xmm0,%xmm0
shrdl $7,%ecx,%ecx
xorl %edi,%ebp
vmovdqa %xmm1,16(%esp)
movl %ebx,%esi
addl 36(%esp),%eax
vpxor %xmm0,%xmm6,%xmm6
xorl %edx,%ecx
shldl $5,%ebx,%ebx
addl %ebp,%eax
andl %ecx,%esi
vpsrld $31,%xmm6,%xmm0
xorl %edx,%ecx
addl %ebx,%eax
shrdl $7,%ebx,%ebx
xorl %edx,%esi
vpslldq $12,%xmm6,%xmm2
vpaddd %xmm6,%xmm6,%xmm6
movl %eax,%ebp
addl 40(%esp),%edi
xorl %ecx,%ebx
shldl $5,%eax,%eax
vpsrld $30,%xmm2,%xmm1
vpor %xmm0,%xmm6,%xmm6
addl %esi,%edi
andl %ebx,%ebp
xorl %ecx,%ebx
addl %eax,%edi
vpslld $2,%xmm2,%xmm2
vmovdqa 64(%esp),%xmm0
shrdl $7,%eax,%eax
xorl %ecx,%ebp
vpxor %xmm1,%xmm6,%xmm6
movl %edi,%esi
addl 44(%esp),%edx
xorl %ebx,%eax
shldl $5,%edi,%edi
vpxor %xmm2,%xmm6,%xmm6
addl %ebp,%edx
andl %eax,%esi
vmovdqa 112(%esp),%xmm2
xorl %ebx,%eax
addl %edi,%edx
shrdl $7,%edi,%edi
xorl %ebx,%esi
vpalignr $8,%xmm3,%xmm4,%xmm7
movl %edx,%ebp
addl 48(%esp),%ecx
vpaddd %xmm6,%xmm2,%xmm2
vmovdqa %xmm3,64(%esp)
xorl %eax,%edi
shldl $5,%edx,%edx
vpsrldq $4,%xmm6,%xmm1
addl %esi,%ecx
andl %edi,%ebp
vpxor %xmm3,%xmm7,%xmm7
xorl %eax,%edi
addl %edx,%ecx
vpxor %xmm5,%xmm1,%xmm1
shrdl $7,%edx,%edx
xorl %eax,%ebp
vmovdqa %xmm2,32(%esp)
movl %ecx,%esi
addl 52(%esp),%ebx
vpxor %xmm1,%xmm7,%xmm7
xorl %edi,%edx
shldl $5,%ecx,%ecx
addl %ebp,%ebx
andl %edx,%esi
vpsrld $31,%xmm7,%xmm1
xorl %edi,%edx
addl %ecx,%ebx
shrdl $7,%ecx,%ecx
xorl %edi,%esi
vpslldq $12,%xmm7,%xmm3
vpaddd %xmm7,%xmm7,%xmm7
movl %ebx,%ebp
addl 56(%esp),%eax
xorl %edx,%ecx
shldl $5,%ebx,%ebx
vpsrld $30,%xmm3,%xmm2
vpor %xmm1,%xmm7,%xmm7
addl %esi,%eax
andl %ecx,%ebp
xorl %edx,%ecx
addl %ebx,%eax
vpslld $2,%xmm3,%xmm3
vmovdqa 80(%esp),%xmm1
shrdl $7,%ebx,%ebx
xorl %edx,%ebp
vpxor %xmm2,%xmm7,%xmm7
movl %eax,%esi
addl 60(%esp),%edi
xorl %ecx,%ebx
shldl $5,%eax,%eax
vpxor %xmm3,%xmm7,%xmm7
addl %ebp,%edi
andl %ebx,%esi
vmovdqa 112(%esp),%xmm3
xorl %ecx,%ebx
addl %eax,%edi
vpalignr $8,%xmm6,%xmm7,%xmm2
vpxor %xmm4,%xmm0,%xmm0
shrdl $7,%eax,%eax
xorl %ecx,%esi
movl %edi,%ebp
addl (%esp),%edx
vpxor %xmm1,%xmm0,%xmm0
vmovdqa %xmm4,80(%esp)
xorl %ebx,%eax
shldl $5,%edi,%edi
vmovdqa %xmm3,%xmm4
vpaddd %xmm7,%xmm3,%xmm3
addl %esi,%edx
andl %eax,%ebp
vpxor %xmm2,%xmm0,%xmm0
xorl %ebx,%eax
addl %edi,%edx
shrdl $7,%edi,%edi
xorl %ebx,%ebp
vpsrld $30,%xmm0,%xmm2
vmovdqa %xmm3,48(%esp)
movl %edx,%esi
addl 4(%esp),%ecx
xorl %eax,%edi
shldl $5,%edx,%edx
vpslld $2,%xmm0,%xmm0
addl %ebp,%ecx
andl %edi,%esi
xorl %eax,%edi
addl %edx,%ecx
shrdl $7,%edx,%edx
xorl %eax,%esi
movl %ecx,%ebp
addl 8(%esp),%ebx
vpor %xmm2,%xmm0,%xmm0
xorl %edi,%edx
shldl $5,%ecx,%ecx
vmovdqa 96(%esp),%xmm2
addl %esi,%ebx
andl %edx,%ebp
xorl %edi,%edx
addl %ecx,%ebx
addl 12(%esp),%eax
xorl %edi,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpalignr $8,%xmm7,%xmm0,%xmm3
vpxor %xmm5,%xmm1,%xmm1
addl 16(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
vpxor %xmm2,%xmm1,%xmm1
vmovdqa %xmm5,96(%esp)
addl %esi,%edi
xorl %ecx,%ebp
vmovdqa %xmm4,%xmm5
vpaddd %xmm0,%xmm4,%xmm4
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpxor %xmm3,%xmm1,%xmm1
addl 20(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
shldl $5,%edi,%edi
vpsrld $30,%xmm1,%xmm3
vmovdqa %xmm4,(%esp)
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
vpslld $2,%xmm1,%xmm1
addl 24(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
shldl $5,%edx,%edx
addl %esi,%ecx
xorl %eax,%ebp
shrdl $7,%edi,%edi
addl %edx,%ecx
vpor %xmm3,%xmm1,%xmm1
addl 28(%esp),%ebx
xorl %edi,%ebp
vmovdqa 64(%esp),%xmm3
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpalignr $8,%xmm0,%xmm1,%xmm4
vpxor %xmm6,%xmm2,%xmm2
addl 32(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
vpxor %xmm3,%xmm2,%xmm2
vmovdqa %xmm6,64(%esp)
addl %esi,%eax
xorl %edx,%ebp
vmovdqa 128(%esp),%xmm6
vpaddd %xmm1,%xmm5,%xmm5
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpxor %xmm4,%xmm2,%xmm2
addl 36(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
shldl $5,%eax,%eax
vpsrld $30,%xmm2,%xmm4
vmovdqa %xmm5,16(%esp)
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpslld $2,%xmm2,%xmm2
addl 40(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
shldl $5,%edi,%edi
addl %esi,%edx
xorl %ebx,%ebp
shrdl $7,%eax,%eax
addl %edi,%edx
vpor %xmm4,%xmm2,%xmm2
addl 44(%esp),%ecx
xorl %eax,%ebp
vmovdqa 80(%esp),%xmm4
movl %edx,%esi
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
vpalignr $8,%xmm1,%xmm2,%xmm5
vpxor %xmm7,%xmm3,%xmm3
addl 48(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
vpxor %xmm4,%xmm3,%xmm3
vmovdqa %xmm7,80(%esp)
addl %esi,%ebx
xorl %edi,%ebp
vmovdqa %xmm6,%xmm7
vpaddd %xmm2,%xmm6,%xmm6
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpxor %xmm5,%xmm3,%xmm3
addl 52(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
vpsrld $30,%xmm3,%xmm5
vmovdqa %xmm6,32(%esp)
addl %ebp,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpslld $2,%xmm3,%xmm3
addl 56(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
addl %esi,%edi
xorl %ecx,%ebp
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpor %xmm5,%xmm3,%xmm3
addl 60(%esp),%edx
xorl %ebx,%ebp
vmovdqa 96(%esp),%xmm5
movl %edi,%esi
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
vpalignr $8,%xmm2,%xmm3,%xmm6
vpxor %xmm0,%xmm4,%xmm4
addl (%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
shldl $5,%edx,%edx
vpxor %xmm5,%xmm4,%xmm4
vmovdqa %xmm0,96(%esp)
addl %esi,%ecx
xorl %eax,%ebp
vmovdqa %xmm7,%xmm0
vpaddd %xmm3,%xmm7,%xmm7
shrdl $7,%edi,%edi
addl %edx,%ecx
vpxor %xmm6,%xmm4,%xmm4
addl 4(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
shldl $5,%ecx,%ecx
vpsrld $30,%xmm4,%xmm6
vmovdqa %xmm7,48(%esp)
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpslld $2,%xmm4,%xmm4
addl 8(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %edx,%ebp
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vpor %xmm6,%xmm4,%xmm4
addl 12(%esp),%edi
xorl %ecx,%ebp
vmovdqa 64(%esp),%xmm6
movl %eax,%esi
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpalignr $8,%xmm3,%xmm4,%xmm7
vpxor %xmm1,%xmm5,%xmm5
addl 16(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
shldl $5,%edi,%edi
vpxor %xmm6,%xmm5,%xmm5
vmovdqa %xmm1,64(%esp)
addl %esi,%edx
xorl %ebx,%ebp
vmovdqa %xmm0,%xmm1
vpaddd %xmm4,%xmm0,%xmm0
shrdl $7,%eax,%eax
addl %edi,%edx
vpxor %xmm7,%xmm5,%xmm5
addl 20(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
shldl $5,%edx,%edx
vpsrld $30,%xmm5,%xmm7
vmovdqa %xmm0,(%esp)
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
vpslld $2,%xmm5,%xmm5
addl 24(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
vpor %xmm7,%xmm5,%xmm5
addl 28(%esp),%eax
vmovdqa 80(%esp),%xmm7
shrdl $7,%ecx,%ecx
movl %ebx,%esi
xorl %edx,%ebp
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %ecx,%esi
xorl %edx,%ecx
addl %ebx,%eax
vpalignr $8,%xmm4,%xmm5,%xmm0
vpxor %xmm2,%xmm6,%xmm6
addl 32(%esp),%edi
andl %ecx,%esi
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
vpxor %xmm7,%xmm6,%xmm6
vmovdqa %xmm2,80(%esp)
movl %eax,%ebp
xorl %ecx,%esi
vmovdqa %xmm1,%xmm2
vpaddd %xmm5,%xmm1,%xmm1
shldl $5,%eax,%eax
addl %esi,%edi
vpxor %xmm0,%xmm6,%xmm6
xorl %ebx,%ebp
xorl %ecx,%ebx
addl %eax,%edi
addl 36(%esp),%edx
vpsrld $30,%xmm6,%xmm0
vmovdqa %xmm1,16(%esp)
andl %ebx,%ebp
xorl %ecx,%ebx
shrdl $7,%eax,%eax
movl %edi,%esi
vpslld $2,%xmm6,%xmm6
xorl %ebx,%ebp
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %eax,%esi
xorl %ebx,%eax
addl %edi,%edx
addl 40(%esp),%ecx
andl %eax,%esi
vpor %xmm0,%xmm6,%xmm6
xorl %ebx,%eax
shrdl $7,%edi,%edi
vmovdqa 96(%esp),%xmm0
movl %edx,%ebp
xorl %eax,%esi
shldl $5,%edx,%edx
addl %esi,%ecx
xorl %edi,%ebp
xorl %eax,%edi
addl %edx,%ecx
addl 44(%esp),%ebx
andl %edi,%ebp
xorl %eax,%edi
shrdl $7,%edx,%edx
movl %ecx,%esi
xorl %edi,%ebp
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edx,%esi
xorl %edi,%edx
addl %ecx,%ebx
vpalignr $8,%xmm5,%xmm6,%xmm1
vpxor %xmm3,%xmm7,%xmm7
addl 48(%esp),%eax
andl %edx,%esi
xorl %edi,%edx
shrdl $7,%ecx,%ecx
vpxor %xmm0,%xmm7,%xmm7
vmovdqa %xmm3,96(%esp)
movl %ebx,%ebp
xorl %edx,%esi
vmovdqa 144(%esp),%xmm3
vpaddd %xmm6,%xmm2,%xmm2
shldl $5,%ebx,%ebx
addl %esi,%eax
vpxor %xmm1,%xmm7,%xmm7
xorl %ecx,%ebp
xorl %edx,%ecx
addl %ebx,%eax
addl 52(%esp),%edi
vpsrld $30,%xmm7,%xmm1
vmovdqa %xmm2,32(%esp)
andl %ecx,%ebp
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
movl %eax,%esi
vpslld $2,%xmm7,%xmm7
xorl %ecx,%ebp
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ebx,%esi
xorl %ecx,%ebx
addl %eax,%edi
addl 56(%esp),%edx
andl %ebx,%esi
vpor %xmm1,%xmm7,%xmm7
xorl %ecx,%ebx
shrdl $7,%eax,%eax
vmovdqa 64(%esp),%xmm1
movl %edi,%ebp
xorl %ebx,%esi
shldl $5,%edi,%edi
addl %esi,%edx
xorl %eax,%ebp
xorl %ebx,%eax
addl %edi,%edx
addl 60(%esp),%ecx
andl %eax,%ebp
xorl %ebx,%eax
shrdl $7,%edi,%edi
movl %edx,%esi
xorl %eax,%ebp
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %edi,%esi
xorl %eax,%edi
addl %edx,%ecx
vpalignr $8,%xmm6,%xmm7,%xmm2
vpxor %xmm4,%xmm0,%xmm0
addl (%esp),%ebx
andl %edi,%esi
xorl %eax,%edi
shrdl $7,%edx,%edx
vpxor %xmm1,%xmm0,%xmm0
vmovdqa %xmm4,64(%esp)
movl %ecx,%ebp
xorl %edi,%esi
vmovdqa %xmm3,%xmm4
vpaddd %xmm7,%xmm3,%xmm3
shldl $5,%ecx,%ecx
addl %esi,%ebx
vpxor %xmm2,%xmm0,%xmm0
xorl %edx,%ebp
xorl %edi,%edx
addl %ecx,%ebx
addl 4(%esp),%eax
vpsrld $30,%xmm0,%xmm2
vmovdqa %xmm3,48(%esp)
andl %edx,%ebp
xorl %edi,%edx
shrdl $7,%ecx,%ecx
movl %ebx,%esi
vpslld $2,%xmm0,%xmm0
xorl %edx,%ebp
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %ecx,%esi
xorl %edx,%ecx
addl %ebx,%eax
addl 8(%esp),%edi
andl %ecx,%esi
vpor %xmm2,%xmm0,%xmm0
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
vmovdqa 80(%esp),%xmm2
movl %eax,%ebp
xorl %ecx,%esi
shldl $5,%eax,%eax
addl %esi,%edi
xorl %ebx,%ebp
xorl %ecx,%ebx
addl %eax,%edi
addl 12(%esp),%edx
andl %ebx,%ebp
xorl %ecx,%ebx
shrdl $7,%eax,%eax
movl %edi,%esi
xorl %ebx,%ebp
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %eax,%esi
xorl %ebx,%eax
addl %edi,%edx
vpalignr $8,%xmm7,%xmm0,%xmm3
vpxor %xmm5,%xmm1,%xmm1
addl 16(%esp),%ecx
andl %eax,%esi
xorl %ebx,%eax
shrdl $7,%edi,%edi
vpxor %xmm2,%xmm1,%xmm1
vmovdqa %xmm5,80(%esp)
movl %edx,%ebp
xorl %eax,%esi
vmovdqa %xmm4,%xmm5
vpaddd %xmm0,%xmm4,%xmm4
shldl $5,%edx,%edx
addl %esi,%ecx
vpxor %xmm3,%xmm1,%xmm1
xorl %edi,%ebp
xorl %eax,%edi
addl %edx,%ecx
addl 20(%esp),%ebx
vpsrld $30,%xmm1,%xmm3
vmovdqa %xmm4,(%esp)
andl %edi,%ebp
xorl %eax,%edi
shrdl $7,%edx,%edx
movl %ecx,%esi
vpslld $2,%xmm1,%xmm1
xorl %edi,%ebp
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edx,%esi
xorl %edi,%edx
addl %ecx,%ebx
addl 24(%esp),%eax
andl %edx,%esi
vpor %xmm3,%xmm1,%xmm1
xorl %edi,%edx
shrdl $7,%ecx,%ecx
vmovdqa 96(%esp),%xmm3
movl %ebx,%ebp
xorl %edx,%esi
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %ecx,%ebp
xorl %edx,%ecx
addl %ebx,%eax
addl 28(%esp),%edi
andl %ecx,%ebp
xorl %edx,%ecx
shrdl $7,%ebx,%ebx
movl %eax,%esi
xorl %ecx,%ebp
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ebx,%esi
xorl %ecx,%ebx
addl %eax,%edi
vpalignr $8,%xmm0,%xmm1,%xmm4
vpxor %xmm6,%xmm2,%xmm2
addl 32(%esp),%edx
andl %ebx,%esi
xorl %ecx,%ebx
shrdl $7,%eax,%eax
vpxor %xmm3,%xmm2,%xmm2
vmovdqa %xmm6,96(%esp)
movl %edi,%ebp
xorl %ebx,%esi
vmovdqa %xmm5,%xmm6
vpaddd %xmm1,%xmm5,%xmm5
shldl $5,%edi,%edi
addl %esi,%edx
vpxor %xmm4,%xmm2,%xmm2
xorl %eax,%ebp
xorl %ebx,%eax
addl %edi,%edx
addl 36(%esp),%ecx
vpsrld $30,%xmm2,%xmm4
vmovdqa %xmm5,16(%esp)
andl %eax,%ebp
xorl %ebx,%eax
shrdl $7,%edi,%edi
movl %edx,%esi
vpslld $2,%xmm2,%xmm2
xorl %eax,%ebp
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %edi,%esi
xorl %eax,%edi
addl %edx,%ecx
addl 40(%esp),%ebx
andl %edi,%esi
vpor %xmm4,%xmm2,%xmm2
xorl %eax,%edi
shrdl $7,%edx,%edx
vmovdqa 64(%esp),%xmm4
movl %ecx,%ebp
xorl %edi,%esi
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edx,%ebp
xorl %edi,%edx
addl %ecx,%ebx
addl 44(%esp),%eax
andl %edx,%ebp
xorl %edi,%edx
shrdl $7,%ecx,%ecx
movl %ebx,%esi
xorl %edx,%ebp
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %edx,%esi
addl %ebx,%eax
vpalignr $8,%xmm1,%xmm2,%xmm5
vpxor %xmm7,%xmm3,%xmm3
addl 48(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
vpxor %xmm4,%xmm3,%xmm3
vmovdqa %xmm7,64(%esp)
addl %esi,%edi
xorl %ecx,%ebp
vmovdqa %xmm6,%xmm7
vpaddd %xmm2,%xmm6,%xmm6
shrdl $7,%ebx,%ebx
addl %eax,%edi
vpxor %xmm5,%xmm3,%xmm3
addl 52(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
shldl $5,%edi,%edi
vpsrld $30,%xmm3,%xmm5
vmovdqa %xmm6,32(%esp)
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
vpslld $2,%xmm3,%xmm3
addl 56(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
shldl $5,%edx,%edx
addl %esi,%ecx
xorl %eax,%ebp
shrdl $7,%edi,%edi
addl %edx,%ecx
vpor %xmm5,%xmm3,%xmm3
addl 60(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl (%esp),%eax
vpaddd %xmm3,%xmm7,%xmm7
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
addl %esi,%eax
vmovdqa %xmm7,48(%esp)
xorl %edx,%ebp
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 4(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 8(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
shldl $5,%edi,%edi
addl %esi,%edx
xorl %ebx,%ebp
shrdl $7,%eax,%eax
addl %edi,%edx
addl 12(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
movl 196(%esp),%ebp
cmpl 200(%esp),%ebp
je .L006done
vmovdqa 160(%esp),%xmm7
vmovdqa 176(%esp),%xmm6
vmovdqu (%ebp),%xmm0
vmovdqu 16(%ebp),%xmm1
vmovdqu 32(%ebp),%xmm2
vmovdqu 48(%ebp),%xmm3
addl $64,%ebp
vpshufb %xmm6,%xmm0,%xmm0
movl %ebp,196(%esp)
vmovdqa %xmm7,96(%esp)
addl 16(%esp),%ebx
xorl %edi,%esi
vpshufb %xmm6,%xmm1,%xmm1
movl %ecx,%ebp
shldl $5,%ecx,%ecx
vpaddd %xmm7,%xmm0,%xmm4
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
vmovdqa %xmm4,(%esp)
addl 20(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 24(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
addl %esi,%edi
xorl %ecx,%ebp
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 28(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
addl 32(%esp),%ecx
xorl %eax,%esi
vpshufb %xmm6,%xmm2,%xmm2
movl %edx,%ebp
shldl $5,%edx,%edx
vpaddd %xmm7,%xmm1,%xmm5
addl %esi,%ecx
xorl %eax,%ebp
shrdl $7,%edi,%edi
addl %edx,%ecx
vmovdqa %xmm5,16(%esp)
addl 36(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 40(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %edx,%ebp
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 44(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 48(%esp),%edx
xorl %ebx,%esi
vpshufb %xmm6,%xmm3,%xmm3
movl %edi,%ebp
shldl $5,%edi,%edi
vpaddd %xmm7,%xmm2,%xmm6
addl %esi,%edx
xorl %ebx,%ebp
shrdl $7,%eax,%eax
addl %edi,%edx
vmovdqa %xmm6,32(%esp)
addl 52(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
addl 56(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 60(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
shrdl $7,%ecx,%ecx
addl %ebx,%eax
movl 192(%esp),%ebp
addl (%ebp),%eax
addl 4(%ebp),%esi
addl 8(%ebp),%ecx
movl %eax,(%ebp)
addl 12(%ebp),%edx
movl %esi,4(%ebp)
addl 16(%ebp),%edi
movl %ecx,%ebx
movl %ecx,8(%ebp)
xorl %edx,%ebx
movl %edx,12(%ebp)
movl %edi,16(%ebp)
movl %esi,%ebp
andl %ebx,%esi
movl %ebp,%ebx
jmp .L005loop
.align 16
.L006done:
addl 16(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 20(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
xorl %edx,%esi
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 24(%esp),%edi
xorl %ecx,%esi
movl %eax,%ebp
shldl $5,%eax,%eax
addl %esi,%edi
xorl %ecx,%ebp
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 28(%esp),%edx
xorl %ebx,%ebp
movl %edi,%esi
shldl $5,%edi,%edi
addl %ebp,%edx
xorl %ebx,%esi
shrdl $7,%eax,%eax
addl %edi,%edx
addl 32(%esp),%ecx
xorl %eax,%esi
movl %edx,%ebp
shldl $5,%edx,%edx
addl %esi,%ecx
xorl %eax,%ebp
shrdl $7,%edi,%edi
addl %edx,%ecx
addl 36(%esp),%ebx
xorl %edi,%ebp
movl %ecx,%esi
shldl $5,%ecx,%ecx
addl %ebp,%ebx
xorl %edi,%esi
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 40(%esp),%eax
xorl %edx,%esi
movl %ebx,%ebp
shldl $5,%ebx,%ebx
addl %esi,%eax
xorl %edx,%ebp
shrdl $7,%ecx,%ecx
addl %ebx,%eax
addl 44(%esp),%edi
xorl %ecx,%ebp
movl %eax,%esi
shldl $5,%eax,%eax
addl %ebp,%edi
xorl %ecx,%esi
shrdl $7,%ebx,%ebx
addl %eax,%edi
addl 48(%esp),%edx
xorl %ebx,%esi
movl %edi,%ebp
shldl $5,%edi,%edi
addl %esi,%edx
xorl %ebx,%ebp
shrdl $7,%eax,%eax
addl %edi,%edx
addl 52(%esp),%ecx
xorl %eax,%ebp
movl %edx,%esi
shldl $5,%edx,%edx
addl %ebp,%ecx
xorl %eax,%esi
shrdl $7,%edi,%edi
addl %edx,%ecx
addl 56(%esp),%ebx
xorl %edi,%esi
movl %ecx,%ebp
shldl $5,%ecx,%ecx
addl %esi,%ebx
xorl %edi,%ebp
shrdl $7,%edx,%edx
addl %ecx,%ebx
addl 60(%esp),%eax
xorl %edx,%ebp
movl %ebx,%esi
shldl $5,%ebx,%ebx
addl %ebp,%eax
shrdl $7,%ecx,%ecx
addl %ebx,%eax
vzeroall
movl 192(%esp),%ebp
addl (%ebp),%eax
movl 204(%esp),%esp
addl 4(%ebp),%esi
addl 8(%ebp),%ecx
movl %eax,(%ebp)
addl 12(%ebp),%edx
movl %esi,4(%ebp)
addl 16(%ebp),%edi
movl %ecx,8(%ebp)
movl %edx,12(%ebp)
movl %edi,16(%ebp)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size sha1_block_data_order_avx,.-.L_sha1_block_data_order_avx_begin
.align 64
.LK_XX_XX:
.long 1518500249,1518500249,1518500249,1518500249
.long 1859775393,1859775393,1859775393,1859775393
.long 2400959708,2400959708,2400959708,2400959708
.long 3395469782,3395469782,3395469782,3395469782
.long 66051,67438087,134810123,202182159
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115
.byte 102,111,114,109,32,102,111,114,32,120,56,54,44,32,67,82
.byte 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112
.byte 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 16,212
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-x86/crypto/fipsmodule/vpaes-x86.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
#ifdef BORINGSSL_DISPATCH_TEST
#endif
.align 64
.L_vpaes_consts:
.long 218628480,235210255,168496130,67568393
.long 252381056,17041926,33884169,51187212
.long 252645135,252645135,252645135,252645135
.long 1512730624,3266504856,1377990664,3401244816
.long 830229760,1275146365,2969422977,3447763452
.long 3411033600,2979783055,338359620,2782886510
.long 4209124096,907596821,221174255,1006095553
.long 191964160,3799684038,3164090317,1589111125
.long 182528256,1777043520,2877432650,3265356744
.long 1874708224,3503451415,3305285752,363511674
.long 1606117888,3487855781,1093350906,2384367825
.long 197121,67569157,134941193,202313229
.long 67569157,134941193,202313229,197121
.long 134941193,202313229,197121,67569157
.long 202313229,197121,67569157,134941193
.long 33619971,100992007,168364043,235736079
.long 235736079,33619971,100992007,168364043
.long 168364043,235736079,33619971,100992007
.long 100992007,168364043,235736079,33619971
.long 50462976,117835012,185207048,252579084
.long 252314880,51251460,117574920,184942860
.long 184682752,252054788,50987272,118359308
.long 118099200,185467140,251790600,50727180
.long 2946363062,528716217,1300004225,1881839624
.long 1532713819,1532713819,1532713819,1532713819
.long 3602276352,4288629033,3737020424,4153884961
.long 1354558464,32357713,2958822624,3775749553
.long 1201988352,132424512,1572796698,503232858
.long 2213177600,1597421020,4103937655,675398315
.long 2749646592,4273543773,1511898873,121693092
.long 3040248576,1103263732,2871565598,1608280554
.long 2236667136,2588920351,482954393,64377734
.long 3069987328,291237287,2117370568,3650299247
.long 533321216,3573750986,2572112006,1401264716
.long 1339849704,2721158661,548607111,3445553514
.long 2128193280,3054596040,2183486460,1257083700
.long 655635200,1165381986,3923443150,2344132524
.long 190078720,256924420,290342170,357187870
.long 1610966272,2263057382,4103205268,309794674
.long 2592527872,2233205587,1335446729,3402964816
.long 3973531904,3225098121,3002836325,1918774430
.long 3870401024,2102906079,2284471353,4117666579
.long 617007872,1021508343,366931923,691083277
.long 2528395776,3491914898,2968704004,1613121270
.long 3445188352,3247741094,844474987,4093578302
.long 651481088,1190302358,1689581232,574775300
.long 4289380608,206939853,2555985458,2489840491
.long 2130264064,327674451,3566485037,3349835193
.long 2470714624,316102159,3636825756,3393945945
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105
.byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83
.byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117
.byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105
.byte 118,101,114,115,105,116,121,41,0
.align 64
.hidden _vpaes_preheat
.type _vpaes_preheat,@function
.align 16
_vpaes_preheat:
addl (%esp),%ebp
movdqa -48(%ebp),%xmm7
movdqa -16(%ebp),%xmm6
ret
.size _vpaes_preheat,.-_vpaes_preheat
.hidden _vpaes_encrypt_core
.type _vpaes_encrypt_core,@function
.align 16
_vpaes_encrypt_core:
movl $16,%ecx
movl 240(%edx),%eax
movdqa %xmm6,%xmm1
movdqa (%ebp),%xmm2
pandn %xmm0,%xmm1
pand %xmm6,%xmm0
movdqu (%edx),%xmm5
.byte 102,15,56,0,208
movdqa 16(%ebp),%xmm0
pxor %xmm5,%xmm2
psrld $4,%xmm1
addl $16,%edx
.byte 102,15,56,0,193
leal 192(%ebp),%ebx
pxor %xmm2,%xmm0
jmp .L000enc_entry
.align 16
.L001enc_loop:
movdqa 32(%ebp),%xmm4
movdqa 48(%ebp),%xmm0
.byte 102,15,56,0,226
.byte 102,15,56,0,195
pxor %xmm5,%xmm4
movdqa 64(%ebp),%xmm5
pxor %xmm4,%xmm0
movdqa -64(%ebx,%ecx,1),%xmm1
.byte 102,15,56,0,234
movdqa 80(%ebp),%xmm2
movdqa (%ebx,%ecx,1),%xmm4
.byte 102,15,56,0,211
movdqa %xmm0,%xmm3
pxor %xmm5,%xmm2
.byte 102,15,56,0,193
addl $16,%edx
pxor %xmm2,%xmm0
.byte 102,15,56,0,220
addl $16,%ecx
pxor %xmm0,%xmm3
.byte 102,15,56,0,193
andl $48,%ecx
subl $1,%eax
pxor %xmm3,%xmm0
.L000enc_entry:
movdqa %xmm6,%xmm1
movdqa -32(%ebp),%xmm5
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm6,%xmm0
.byte 102,15,56,0,232
movdqa %xmm7,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm7,%xmm4
pxor %xmm5,%xmm3
.byte 102,15,56,0,224
movdqa %xmm7,%xmm2
pxor %xmm5,%xmm4
.byte 102,15,56,0,211
movdqa %xmm7,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%edx),%xmm5
pxor %xmm1,%xmm3
jnz .L001enc_loop
movdqa 96(%ebp),%xmm4
movdqa 112(%ebp),%xmm0
.byte 102,15,56,0,226
pxor %xmm5,%xmm4
.byte 102,15,56,0,195
movdqa 64(%ebx,%ecx,1),%xmm1
pxor %xmm4,%xmm0
.byte 102,15,56,0,193
ret
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
.hidden _vpaes_decrypt_core
.type _vpaes_decrypt_core,@function
.align 16
_vpaes_decrypt_core:
leal 608(%ebp),%ebx
movl 240(%edx),%eax
movdqa %xmm6,%xmm1
movdqa -64(%ebx),%xmm2
pandn %xmm0,%xmm1
movl %eax,%ecx
psrld $4,%xmm1
movdqu (%edx),%xmm5
shll $4,%ecx
pand %xmm6,%xmm0
.byte 102,15,56,0,208
movdqa -48(%ebx),%xmm0
xorl $48,%ecx
.byte 102,15,56,0,193
andl $48,%ecx
pxor %xmm5,%xmm2
movdqa 176(%ebp),%xmm5
pxor %xmm2,%xmm0
addl $16,%edx
leal -352(%ebx,%ecx,1),%ecx
jmp .L002dec_entry
.align 16
.L003dec_loop:
movdqa -32(%ebx),%xmm4
movdqa -16(%ebx),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
movdqa (%ebx),%xmm4
pxor %xmm1,%xmm0
movdqa 16(%ebx),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,197
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
movdqa 32(%ebx),%xmm4
pxor %xmm1,%xmm0
movdqa 48(%ebx),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,197
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
movdqa 64(%ebx),%xmm4
pxor %xmm1,%xmm0
movdqa 80(%ebx),%xmm1
.byte 102,15,56,0,226
.byte 102,15,56,0,197
.byte 102,15,56,0,203
pxor %xmm4,%xmm0
addl $16,%edx
.byte 102,15,58,15,237,12
pxor %xmm1,%xmm0
subl $1,%eax
.L002dec_entry:
movdqa %xmm6,%xmm1
movdqa -32(%ebp),%xmm2
pandn %xmm0,%xmm1
pand %xmm6,%xmm0
psrld $4,%xmm1
.byte 102,15,56,0,208
movdqa %xmm7,%xmm3
pxor %xmm1,%xmm0
.byte 102,15,56,0,217
movdqa %xmm7,%xmm4
pxor %xmm2,%xmm3
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm7,%xmm2
.byte 102,15,56,0,211
movdqa %xmm7,%xmm3
pxor %xmm0,%xmm2
.byte 102,15,56,0,220
movdqu (%edx),%xmm0
pxor %xmm1,%xmm3
jnz .L003dec_loop
movdqa 96(%ebx),%xmm4
.byte 102,15,56,0,226
pxor %xmm0,%xmm4
movdqa 112(%ebx),%xmm0
movdqa (%ecx),%xmm2
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
.byte 102,15,56,0,194
ret
.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
.hidden _vpaes_schedule_core
.type _vpaes_schedule_core,@function
.align 16
_vpaes_schedule_core:
addl (%esp),%ebp
movdqu (%esi),%xmm0
movdqa 320(%ebp),%xmm2
movdqa %xmm0,%xmm3
leal (%ebp),%ebx
movdqa %xmm2,4(%esp)
call _vpaes_schedule_transform
movdqa %xmm0,%xmm7
testl %edi,%edi
jnz .L004schedule_am_decrypting
movdqu %xmm0,(%edx)
jmp .L005schedule_go
.L004schedule_am_decrypting:
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,217
movdqu %xmm3,(%edx)
xorl $48,%ecx
.L005schedule_go:
cmpl $192,%eax
ja .L006schedule_256
je .L007schedule_192
.L008schedule_128:
movl $10,%eax
.L009loop_schedule_128:
call _vpaes_schedule_round
decl %eax
jz .L010schedule_mangle_last
call _vpaes_schedule_mangle
jmp .L009loop_schedule_128
.align 16
.L007schedule_192:
movdqu 8(%esi),%xmm0
call _vpaes_schedule_transform
movdqa %xmm0,%xmm6
pxor %xmm4,%xmm4
movhlps %xmm4,%xmm6
movl $4,%eax
.L011loop_schedule_192:
call _vpaes_schedule_round
.byte 102,15,58,15,198,8
call _vpaes_schedule_mangle
call _vpaes_schedule_192_smear
call _vpaes_schedule_mangle
call _vpaes_schedule_round
decl %eax
jz .L010schedule_mangle_last
call _vpaes_schedule_mangle
call _vpaes_schedule_192_smear
jmp .L011loop_schedule_192
.align 16
.L006schedule_256:
movdqu 16(%esi),%xmm0
call _vpaes_schedule_transform
movl $7,%eax
.L012loop_schedule_256:
call _vpaes_schedule_mangle
movdqa %xmm0,%xmm6
call _vpaes_schedule_round
decl %eax
jz .L010schedule_mangle_last
call _vpaes_schedule_mangle
pshufd $255,%xmm0,%xmm0
movdqa %xmm7,20(%esp)
movdqa %xmm6,%xmm7
call .L_vpaes_schedule_low_round
movdqa 20(%esp),%xmm7
jmp .L012loop_schedule_256
.align 16
.L010schedule_mangle_last:
leal 384(%ebp),%ebx
testl %edi,%edi
jnz .L013schedule_mangle_last_dec
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,193
leal 352(%ebp),%ebx
addl $32,%edx
.L013schedule_mangle_last_dec:
addl $-16,%edx
pxor 336(%ebp),%xmm0
call _vpaes_schedule_transform
movdqu %xmm0,(%edx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
ret
.size _vpaes_schedule_core,.-_vpaes_schedule_core
.hidden _vpaes_schedule_192_smear
.type _vpaes_schedule_192_smear,@function
.align 16
_vpaes_schedule_192_smear:
pshufd $128,%xmm6,%xmm1
pshufd $254,%xmm7,%xmm0
pxor %xmm1,%xmm6
pxor %xmm1,%xmm1
pxor %xmm0,%xmm6
movdqa %xmm6,%xmm0
movhlps %xmm1,%xmm6
ret
.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
.hidden _vpaes_schedule_round
.type _vpaes_schedule_round,@function
.align 16
_vpaes_schedule_round:
movdqa 8(%esp),%xmm2
pxor %xmm1,%xmm1
.byte 102,15,58,15,202,15
.byte 102,15,58,15,210,15
pxor %xmm1,%xmm7
pshufd $255,%xmm0,%xmm0
.byte 102,15,58,15,192,1
movdqa %xmm2,8(%esp)
.L_vpaes_schedule_low_round:
movdqa %xmm7,%xmm1
pslldq $4,%xmm7
pxor %xmm1,%xmm7
movdqa %xmm7,%xmm1
pslldq $8,%xmm7
pxor %xmm1,%xmm7
pxor 336(%ebp),%xmm7
movdqa -16(%ebp),%xmm4
movdqa -48(%ebp),%xmm5
movdqa %xmm4,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm4,%xmm0
movdqa -32(%ebp),%xmm2
.byte 102,15,56,0,208
pxor %xmm1,%xmm0
movdqa %xmm5,%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
movdqa %xmm5,%xmm4
.byte 102,15,56,0,224
pxor %xmm2,%xmm4
movdqa %xmm5,%xmm2
.byte 102,15,56,0,211
pxor %xmm0,%xmm2
movdqa %xmm5,%xmm3
.byte 102,15,56,0,220
pxor %xmm1,%xmm3
movdqa 32(%ebp),%xmm4
.byte 102,15,56,0,226
movdqa 48(%ebp),%xmm0
.byte 102,15,56,0,195
pxor %xmm4,%xmm0
pxor %xmm7,%xmm0
movdqa %xmm0,%xmm7
ret
.size _vpaes_schedule_round,.-_vpaes_schedule_round
.hidden _vpaes_schedule_transform
.type _vpaes_schedule_transform,@function
.align 16
_vpaes_schedule_transform:
movdqa -16(%ebp),%xmm2
movdqa %xmm2,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm0
movdqa (%ebx),%xmm2
.byte 102,15,56,0,208
movdqa 16(%ebx),%xmm0
.byte 102,15,56,0,193
pxor %xmm2,%xmm0
ret
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
.hidden _vpaes_schedule_mangle
.type _vpaes_schedule_mangle,@function
.align 16
_vpaes_schedule_mangle:
movdqa %xmm0,%xmm4
movdqa 128(%ebp),%xmm5
testl %edi,%edi
jnz .L014schedule_mangle_dec
addl $16,%edx
pxor 336(%ebp),%xmm4
.byte 102,15,56,0,229
movdqa %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
.byte 102,15,56,0,229
pxor %xmm4,%xmm3
jmp .L015schedule_mangle_both
.align 16
.L014schedule_mangle_dec:
movdqa -16(%ebp),%xmm2
leal 416(%ebp),%esi
movdqa %xmm2,%xmm1
pandn %xmm4,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm4
movdqa (%esi),%xmm2
.byte 102,15,56,0,212
movdqa 16(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 32(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 48(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 64(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 80(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
.byte 102,15,56,0,221
movdqa 96(%esi),%xmm2
.byte 102,15,56,0,212
pxor %xmm3,%xmm2
movdqa 112(%esi),%xmm3
.byte 102,15,56,0,217
pxor %xmm2,%xmm3
addl $-16,%edx
.L015schedule_mangle_both:
movdqa 256(%ebp,%ecx,1),%xmm1
.byte 102,15,56,0,217
addl $-16,%ecx
andl $48,%ecx
movdqu %xmm3,(%edx)
ret
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
.globl vpaes_set_encrypt_key
.hidden vpaes_set_encrypt_key
.type vpaes_set_encrypt_key,@function
.align 16
vpaes_set_encrypt_key:
.L_vpaes_set_encrypt_key_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L016pic
.L016pic:
popl %ebx
leal BORINGSSL_function_hit+5-.L016pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%eax
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movl %eax,%ebx
shrl $5,%ebx
addl $5,%ebx
movl %ebx,240(%edx)
movl $48,%ecx
movl $0,%edi
leal .L_vpaes_consts+0x30-.L017pic_point,%ebp
call _vpaes_schedule_core
.L017pic_point:
movl 48(%esp),%esp
xorl %eax,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_set_encrypt_key,.-.L_vpaes_set_encrypt_key_begin
.globl vpaes_set_decrypt_key
.hidden vpaes_set_decrypt_key
.type vpaes_set_decrypt_key,@function
.align 16
vpaes_set_decrypt_key:
.L_vpaes_set_decrypt_key_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%eax
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movl %eax,%ebx
shrl $5,%ebx
addl $5,%ebx
movl %ebx,240(%edx)
shll $4,%ebx
leal 16(%edx,%ebx,1),%edx
movl $1,%edi
movl %eax,%ecx
shrl $1,%ecx
andl $32,%ecx
xorl $32,%ecx
leal .L_vpaes_consts+0x30-.L018pic_point,%ebp
call _vpaes_schedule_core
.L018pic_point:
movl 48(%esp),%esp
xorl %eax,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_set_decrypt_key,.-.L_vpaes_set_decrypt_key_begin
.globl vpaes_encrypt
.hidden vpaes_encrypt
.type vpaes_encrypt,@function
.align 16
vpaes_encrypt:
.L_vpaes_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L019pic
.L019pic:
popl %ebx
leal BORINGSSL_function_hit+4-.L019pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
leal .L_vpaes_consts+0x30-.L020pic_point,%ebp
call _vpaes_preheat
.L020pic_point:
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%edi
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movdqu (%esi),%xmm0
call _vpaes_encrypt_core
movdqu %xmm0,(%edi)
movl 48(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_encrypt,.-.L_vpaes_encrypt_begin
.globl vpaes_decrypt
.hidden vpaes_decrypt
.type vpaes_decrypt,@function
.align 16
vpaes_decrypt:
.L_vpaes_decrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
leal .L_vpaes_consts+0x30-.L021pic_point,%ebp
call _vpaes_preheat
.L021pic_point:
movl 20(%esp),%esi
leal -56(%esp),%ebx
movl 24(%esp),%edi
andl $-16,%ebx
movl 28(%esp),%edx
xchgl %esp,%ebx
movl %ebx,48(%esp)
movdqu (%esi),%xmm0
call _vpaes_decrypt_core
movdqu %xmm0,(%edi)
movl 48(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_decrypt,.-.L_vpaes_decrypt_begin
.globl vpaes_cbc_encrypt
.hidden vpaes_cbc_encrypt
.type vpaes_cbc_encrypt,@function
.align 16
vpaes_cbc_encrypt:
.L_vpaes_cbc_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
subl $16,%eax
jc .L022cbc_abort
leal -56(%esp),%ebx
movl 36(%esp),%ebp
andl $-16,%ebx
movl 40(%esp),%ecx
xchgl %esp,%ebx
movdqu (%ebp),%xmm1
subl %esi,%edi
movl %ebx,48(%esp)
movl %edi,(%esp)
movl %edx,4(%esp)
movl %ebp,8(%esp)
movl %eax,%edi
leal .L_vpaes_consts+0x30-.L023pic_point,%ebp
call _vpaes_preheat
.L023pic_point:
cmpl $0,%ecx
je .L024cbc_dec_loop
jmp .L025cbc_enc_loop
.align 16
.L025cbc_enc_loop:
movdqu (%esi),%xmm0
pxor %xmm1,%xmm0
call _vpaes_encrypt_core
movl (%esp),%ebx
movl 4(%esp),%edx
movdqa %xmm0,%xmm1
movdqu %xmm0,(%ebx,%esi,1)
leal 16(%esi),%esi
subl $16,%edi
jnc .L025cbc_enc_loop
jmp .L026cbc_done
.align 16
.L024cbc_dec_loop:
movdqu (%esi),%xmm0
movdqa %xmm1,16(%esp)
movdqa %xmm0,32(%esp)
call _vpaes_decrypt_core
movl (%esp),%ebx
movl 4(%esp),%edx
pxor 16(%esp),%xmm0
movdqa 32(%esp),%xmm1
movdqu %xmm0,(%ebx,%esi,1)
leal 16(%esi),%esi
subl $16,%edi
jnc .L024cbc_dec_loop
.L026cbc_done:
movl 8(%esp),%ebx
movl 48(%esp),%esp
movdqu %xmm1,(%ebx)
.L022cbc_abort:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size vpaes_cbc_encrypt,.-.L_vpaes_cbc_encrypt_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 17,356
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-x86/crypto/fipsmodule/co-586.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl bn_mul_comba8
.hidden bn_mul_comba8
.type bn_mul_comba8,@function
.align 16
bn_mul_comba8:
.L_bn_mul_comba8_begin:
pushl %esi
movl 12(%esp),%esi
pushl %edi
movl 20(%esp),%edi
pushl %ebp
pushl %ebx
xorl %ebx,%ebx
movl (%esi),%eax
xorl %ecx,%ecx
movl (%edi),%edx
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl (%edi),%edx
adcl $0,%ebp
movl %ebx,(%eax)
movl 4(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%ecx
movl (%esi),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl (%edi),%edx
adcl $0,%ebx
movl %ecx,4(%eax)
movl 8(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%ebp
movl 4(%esi),%eax
adcl %edx,%ebx
movl 4(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl (%esi),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl (%edi),%edx
adcl $0,%ecx
movl %ebp,8(%eax)
movl 12(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 8(%esi),%eax
adcl %edx,%ecx
movl 4(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 4(%esi),%eax
adcl %edx,%ecx
movl 8(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl (%esi),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl (%edi),%edx
adcl $0,%ebp
movl %ebx,12(%eax)
movl 16(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%ecx
movl 12(%esi),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 8(%esi),%eax
adcl %edx,%ebp
movl 8(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 4(%esi),%eax
adcl %edx,%ebp
movl 12(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl (%esi),%eax
adcl %edx,%ebp
movl 16(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl (%edi),%edx
adcl $0,%ebx
movl %ecx,16(%eax)
movl 20(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%ebp
movl 16(%esi),%eax
adcl %edx,%ebx
movl 4(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 12(%esi),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 8(%esi),%eax
adcl %edx,%ebx
movl 12(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 4(%esi),%eax
adcl %edx,%ebx
movl 16(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl (%esi),%eax
adcl %edx,%ebx
movl 20(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl (%edi),%edx
adcl $0,%ecx
movl %ebp,20(%eax)
movl 24(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esi),%eax
adcl %edx,%ecx
movl 4(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 16(%esi),%eax
adcl %edx,%ecx
movl 8(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 12(%esi),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 8(%esi),%eax
adcl %edx,%ecx
movl 16(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 4(%esi),%eax
adcl %edx,%ecx
movl 20(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl (%esi),%eax
adcl %edx,%ecx
movl 24(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl (%edi),%edx
adcl $0,%ebp
movl %ebx,24(%eax)
movl 28(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%ecx
movl 24(%esi),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esi),%eax
adcl %edx,%ebp
movl 8(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 16(%esi),%eax
adcl %edx,%ebp
movl 12(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 12(%esi),%eax
adcl %edx,%ebp
movl 16(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 8(%esi),%eax
adcl %edx,%ebp
movl 20(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 4(%esi),%eax
adcl %edx,%ebp
movl 24(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl (%esi),%eax
adcl %edx,%ebp
movl 28(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
movl %ecx,28(%eax)
movl 28(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%ebp
movl 24(%esi),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esi),%eax
adcl %edx,%ebx
movl 12(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 16(%esi),%eax
adcl %edx,%ebx
movl 16(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 12(%esi),%eax
adcl %edx,%ebx
movl 20(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 8(%esi),%eax
adcl %edx,%ebx
movl 24(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 4(%esi),%eax
adcl %edx,%ebx
movl 28(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
movl %ebp,32(%eax)
movl 28(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 24(%esi),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esi),%eax
adcl %edx,%ecx
movl 16(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 16(%esi),%eax
adcl %edx,%ecx
movl 20(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 12(%esi),%eax
adcl %edx,%ecx
movl 24(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 8(%esi),%eax
adcl %edx,%ecx
movl 28(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
movl %ebx,36(%eax)
movl 28(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%ecx
movl 24(%esi),%eax
adcl %edx,%ebp
movl 16(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esi),%eax
adcl %edx,%ebp
movl 20(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 16(%esi),%eax
adcl %edx,%ebp
movl 24(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 12(%esi),%eax
adcl %edx,%ebp
movl 28(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl 16(%edi),%edx
adcl $0,%ebx
movl %ecx,40(%eax)
movl 28(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%ebp
movl 24(%esi),%eax
adcl %edx,%ebx
movl 20(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esi),%eax
adcl %edx,%ebx
movl 24(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 16(%esi),%eax
adcl %edx,%ebx
movl 28(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl 20(%edi),%edx
adcl $0,%ecx
movl %ebp,44(%eax)
movl 28(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 24(%esi),%eax
adcl %edx,%ecx
movl 24(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esi),%eax
adcl %edx,%ecx
movl 28(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl 24(%edi),%edx
adcl $0,%ebp
movl %ebx,48(%eax)
movl 28(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%ecx
movl 24(%esi),%eax
adcl %edx,%ebp
movl 28(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl 28(%edi),%edx
adcl $0,%ebx
movl %ecx,52(%eax)
movl 28(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
adcl $0,%ecx
movl %ebp,56(%eax)
movl %ebx,60(%eax)
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
.size bn_mul_comba8,.-.L_bn_mul_comba8_begin
.globl bn_mul_comba4
.hidden bn_mul_comba4
.type bn_mul_comba4,@function
.align 16
bn_mul_comba4:
.L_bn_mul_comba4_begin:
pushl %esi
movl 12(%esp),%esi
pushl %edi
movl 20(%esp),%edi
pushl %ebp
pushl %ebx
xorl %ebx,%ebx
movl (%esi),%eax
xorl %ecx,%ecx
movl (%edi),%edx
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl (%edi),%edx
adcl $0,%ebp
movl %ebx,(%eax)
movl 4(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%ecx
movl (%esi),%eax
adcl %edx,%ebp
movl 4(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl (%edi),%edx
adcl $0,%ebx
movl %ecx,4(%eax)
movl 8(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%ebp
movl 4(%esi),%eax
adcl %edx,%ebx
movl 4(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl (%esi),%eax
adcl %edx,%ebx
movl 8(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl (%edi),%edx
adcl $0,%ecx
movl %ebp,8(%eax)
movl 12(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 8(%esi),%eax
adcl %edx,%ecx
movl 4(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 4(%esi),%eax
adcl %edx,%ecx
movl 8(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl (%esi),%eax
adcl %edx,%ecx
movl 12(%edi),%edx
adcl $0,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
movl 4(%edi),%edx
adcl $0,%ebp
movl %ebx,12(%eax)
movl 12(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%ecx
movl 8(%esi),%eax
adcl %edx,%ebp
movl 8(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 4(%esi),%eax
adcl %edx,%ebp
movl 12(%edi),%edx
adcl $0,%ebx
mull %edx
addl %eax,%ecx
movl 20(%esp),%eax
adcl %edx,%ebp
movl 8(%edi),%edx
adcl $0,%ebx
movl %ecx,16(%eax)
movl 12(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%ebp
movl 8(%esi),%eax
adcl %edx,%ebx
movl 12(%edi),%edx
adcl $0,%ecx
mull %edx
addl %eax,%ebp
movl 20(%esp),%eax
adcl %edx,%ebx
movl 12(%edi),%edx
adcl $0,%ecx
movl %ebp,20(%eax)
movl 12(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%ebx
movl 20(%esp),%eax
adcl %edx,%ecx
adcl $0,%ebp
movl %ebx,24(%eax)
movl %ecx,28(%eax)
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
.size bn_mul_comba4,.-.L_bn_mul_comba4_begin
.globl bn_sqr_comba8
.hidden bn_sqr_comba8
.type bn_sqr_comba8,@function
.align 16
bn_sqr_comba8:
.L_bn_sqr_comba8_begin:
pushl %esi
pushl %edi
pushl %ebp
pushl %ebx
movl 20(%esp),%edi
movl 24(%esp),%esi
xorl %ebx,%ebx
xorl %ecx,%ecx
movl (%esi),%eax
xorl %ebp,%ebp
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
movl (%esi),%edx
adcl $0,%ebp
movl %ebx,(%edi)
movl 4(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%eax
adcl $0,%ebx
movl %ecx,4(%edi)
movl (%esi),%edx
xorl %ecx,%ecx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 4(%esi),%eax
adcl $0,%ecx
mull %eax
addl %eax,%ebp
adcl %edx,%ebx
movl (%esi),%edx
adcl $0,%ecx
movl %ebp,8(%edi)
movl 12(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 8(%esi),%eax
adcl $0,%ebp
movl 4(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 16(%esi),%eax
adcl $0,%ebp
movl %ebx,12(%edi)
movl (%esi),%edx
xorl %ebx,%ebx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 12(%esi),%eax
adcl $0,%ebx
movl 4(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%eax
adcl $0,%ebx
mull %eax
addl %eax,%ecx
adcl %edx,%ebp
movl (%esi),%edx
adcl $0,%ebx
movl %ecx,16(%edi)
movl 20(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 16(%esi),%eax
adcl $0,%ecx
movl 4(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 12(%esi),%eax
adcl $0,%ecx
movl 8(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 24(%esi),%eax
adcl $0,%ecx
movl %ebp,20(%edi)
movl (%esi),%edx
xorl %ebp,%ebp
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 20(%esi),%eax
adcl $0,%ebp
movl 4(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 16(%esi),%eax
adcl $0,%ebp
movl 8(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 12(%esi),%eax
adcl $0,%ebp
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
movl (%esi),%edx
adcl $0,%ebp
movl %ebx,24(%edi)
movl 28(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 24(%esi),%eax
adcl $0,%ebx
movl 4(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 20(%esi),%eax
adcl $0,%ebx
movl 8(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 16(%esi),%eax
adcl $0,%ebx
movl 12(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 28(%esi),%eax
adcl $0,%ebx
movl %ecx,28(%edi)
movl 4(%esi),%edx
xorl %ecx,%ecx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 24(%esi),%eax
adcl $0,%ecx
movl 8(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 20(%esi),%eax
adcl $0,%ecx
movl 12(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 16(%esi),%eax
adcl $0,%ecx
mull %eax
addl %eax,%ebp
adcl %edx,%ebx
movl 8(%esi),%edx
adcl $0,%ecx
movl %ebp,32(%edi)
movl 28(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 24(%esi),%eax
adcl $0,%ebp
movl 12(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 20(%esi),%eax
adcl $0,%ebp
movl 16(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 28(%esi),%eax
adcl $0,%ebp
movl %ebx,36(%edi)
movl 12(%esi),%edx
xorl %ebx,%ebx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 24(%esi),%eax
adcl $0,%ebx
movl 16(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 20(%esi),%eax
adcl $0,%ebx
mull %eax
addl %eax,%ecx
adcl %edx,%ebp
movl 16(%esi),%edx
adcl $0,%ebx
movl %ecx,40(%edi)
movl 28(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 24(%esi),%eax
adcl $0,%ecx
movl 20(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 28(%esi),%eax
adcl $0,%ecx
movl %ebp,44(%edi)
movl 20(%esi),%edx
xorl %ebp,%ebp
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 24(%esi),%eax
adcl $0,%ebp
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
movl 24(%esi),%edx
adcl $0,%ebp
movl %ebx,48(%edi)
movl 28(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 28(%esi),%eax
adcl $0,%ebx
movl %ecx,52(%edi)
xorl %ecx,%ecx
mull %eax
addl %eax,%ebp
adcl %edx,%ebx
adcl $0,%ecx
movl %ebp,56(%edi)
movl %ebx,60(%edi)
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
.size bn_sqr_comba8,.-.L_bn_sqr_comba8_begin
.globl bn_sqr_comba4
.hidden bn_sqr_comba4
.type bn_sqr_comba4,@function
.align 16
bn_sqr_comba4:
.L_bn_sqr_comba4_begin:
pushl %esi
pushl %edi
pushl %ebp
pushl %ebx
movl 20(%esp),%edi
movl 24(%esp),%esi
xorl %ebx,%ebx
xorl %ecx,%ecx
movl (%esi),%eax
xorl %ebp,%ebp
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
movl (%esi),%edx
adcl $0,%ebp
movl %ebx,(%edi)
movl 4(%esi),%eax
xorl %ebx,%ebx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%eax
adcl $0,%ebx
movl %ecx,4(%edi)
movl (%esi),%edx
xorl %ecx,%ecx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 4(%esi),%eax
adcl $0,%ecx
mull %eax
addl %eax,%ebp
adcl %edx,%ebx
movl (%esi),%edx
adcl $0,%ecx
movl %ebp,8(%edi)
movl 12(%esi),%eax
xorl %ebp,%ebp
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 8(%esi),%eax
adcl $0,%ebp
movl 4(%esi),%edx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebp
addl %eax,%ebx
adcl %edx,%ecx
movl 12(%esi),%eax
adcl $0,%ebp
movl %ebx,12(%edi)
movl 4(%esi),%edx
xorl %ebx,%ebx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ebx
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%eax
adcl $0,%ebx
mull %eax
addl %eax,%ecx
adcl %edx,%ebp
movl 8(%esi),%edx
adcl $0,%ebx
movl %ecx,16(%edi)
movl 12(%esi),%eax
xorl %ecx,%ecx
mull %edx
addl %eax,%eax
adcl %edx,%edx
adcl $0,%ecx
addl %eax,%ebp
adcl %edx,%ebx
movl 12(%esi),%eax
adcl $0,%ecx
movl %ebp,20(%edi)
xorl %ebp,%ebp
mull %eax
addl %eax,%ebx
adcl %edx,%ecx
adcl $0,%ebp
movl %ebx,24(%edi)
movl %ecx,28(%edi)
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
.size bn_sqr_comba4,.-.L_bn_sqr_comba4_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 11,526
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-x86/crypto/fipsmodule/md5-586.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl md5_block_asm_data_order
.hidden md5_block_asm_data_order
.type md5_block_asm_data_order,@function
.align 16
md5_block_asm_data_order:
.L_md5_block_asm_data_order_begin:
pushl %esi
pushl %edi
movl 12(%esp),%edi
movl 16(%esp),%esi
movl 20(%esp),%ecx
pushl %ebp
shll $6,%ecx
pushl %ebx
addl %esi,%ecx
subl $64,%ecx
movl (%edi),%eax
pushl %ecx
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
.L000start:
movl %ecx,%edi
movl (%esi),%ebp
xorl %edx,%edi
andl %ebx,%edi
leal 3614090360(%eax,%ebp,1),%eax
xorl %edx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $7,%eax
movl 4(%esi),%ebp
addl %ebx,%eax
xorl %ecx,%edi
andl %eax,%edi
leal 3905402710(%edx,%ebp,1),%edx
xorl %ecx,%edi
addl %edi,%edx
movl %eax,%edi
roll $12,%edx
movl 8(%esi),%ebp
addl %eax,%edx
xorl %ebx,%edi
andl %edx,%edi
leal 606105819(%ecx,%ebp,1),%ecx
xorl %ebx,%edi
addl %edi,%ecx
movl %edx,%edi
roll $17,%ecx
movl 12(%esi),%ebp
addl %edx,%ecx
xorl %eax,%edi
andl %ecx,%edi
leal 3250441966(%ebx,%ebp,1),%ebx
xorl %eax,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $22,%ebx
movl 16(%esi),%ebp
addl %ecx,%ebx
xorl %edx,%edi
andl %ebx,%edi
leal 4118548399(%eax,%ebp,1),%eax
xorl %edx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $7,%eax
movl 20(%esi),%ebp
addl %ebx,%eax
xorl %ecx,%edi
andl %eax,%edi
leal 1200080426(%edx,%ebp,1),%edx
xorl %ecx,%edi
addl %edi,%edx
movl %eax,%edi
roll $12,%edx
movl 24(%esi),%ebp
addl %eax,%edx
xorl %ebx,%edi
andl %edx,%edi
leal 2821735955(%ecx,%ebp,1),%ecx
xorl %ebx,%edi
addl %edi,%ecx
movl %edx,%edi
roll $17,%ecx
movl 28(%esi),%ebp
addl %edx,%ecx
xorl %eax,%edi
andl %ecx,%edi
leal 4249261313(%ebx,%ebp,1),%ebx
xorl %eax,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $22,%ebx
movl 32(%esi),%ebp
addl %ecx,%ebx
xorl %edx,%edi
andl %ebx,%edi
leal 1770035416(%eax,%ebp,1),%eax
xorl %edx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $7,%eax
movl 36(%esi),%ebp
addl %ebx,%eax
xorl %ecx,%edi
andl %eax,%edi
leal 2336552879(%edx,%ebp,1),%edx
xorl %ecx,%edi
addl %edi,%edx
movl %eax,%edi
roll $12,%edx
movl 40(%esi),%ebp
addl %eax,%edx
xorl %ebx,%edi
andl %edx,%edi
leal 4294925233(%ecx,%ebp,1),%ecx
xorl %ebx,%edi
addl %edi,%ecx
movl %edx,%edi
roll $17,%ecx
movl 44(%esi),%ebp
addl %edx,%ecx
xorl %eax,%edi
andl %ecx,%edi
leal 2304563134(%ebx,%ebp,1),%ebx
xorl %eax,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $22,%ebx
movl 48(%esi),%ebp
addl %ecx,%ebx
xorl %edx,%edi
andl %ebx,%edi
leal 1804603682(%eax,%ebp,1),%eax
xorl %edx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $7,%eax
movl 52(%esi),%ebp
addl %ebx,%eax
xorl %ecx,%edi
andl %eax,%edi
leal 4254626195(%edx,%ebp,1),%edx
xorl %ecx,%edi
addl %edi,%edx
movl %eax,%edi
roll $12,%edx
movl 56(%esi),%ebp
addl %eax,%edx
xorl %ebx,%edi
andl %edx,%edi
leal 2792965006(%ecx,%ebp,1),%ecx
xorl %ebx,%edi
addl %edi,%ecx
movl %edx,%edi
roll $17,%ecx
movl 60(%esi),%ebp
addl %edx,%ecx
xorl %eax,%edi
andl %ecx,%edi
leal 1236535329(%ebx,%ebp,1),%ebx
xorl %eax,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $22,%ebx
movl 4(%esi),%ebp
addl %ecx,%ebx
leal 4129170786(%eax,%ebp,1),%eax
xorl %ebx,%edi
andl %edx,%edi
movl 24(%esi),%ebp
xorl %ecx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $5,%eax
addl %ebx,%eax
leal 3225465664(%edx,%ebp,1),%edx
xorl %eax,%edi
andl %ecx,%edi
movl 44(%esi),%ebp
xorl %ebx,%edi
addl %edi,%edx
movl %eax,%edi
roll $9,%edx
addl %eax,%edx
leal 643717713(%ecx,%ebp,1),%ecx
xorl %edx,%edi
andl %ebx,%edi
movl (%esi),%ebp
xorl %eax,%edi
addl %edi,%ecx
movl %edx,%edi
roll $14,%ecx
addl %edx,%ecx
leal 3921069994(%ebx,%ebp,1),%ebx
xorl %ecx,%edi
andl %eax,%edi
movl 20(%esi),%ebp
xorl %edx,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $20,%ebx
addl %ecx,%ebx
leal 3593408605(%eax,%ebp,1),%eax
xorl %ebx,%edi
andl %edx,%edi
movl 40(%esi),%ebp
xorl %ecx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $5,%eax
addl %ebx,%eax
leal 38016083(%edx,%ebp,1),%edx
xorl %eax,%edi
andl %ecx,%edi
movl 60(%esi),%ebp
xorl %ebx,%edi
addl %edi,%edx
movl %eax,%edi
roll $9,%edx
addl %eax,%edx
leal 3634488961(%ecx,%ebp,1),%ecx
xorl %edx,%edi
andl %ebx,%edi
movl 16(%esi),%ebp
xorl %eax,%edi
addl %edi,%ecx
movl %edx,%edi
roll $14,%ecx
addl %edx,%ecx
leal 3889429448(%ebx,%ebp,1),%ebx
xorl %ecx,%edi
andl %eax,%edi
movl 36(%esi),%ebp
xorl %edx,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $20,%ebx
addl %ecx,%ebx
leal 568446438(%eax,%ebp,1),%eax
xorl %ebx,%edi
andl %edx,%edi
movl 56(%esi),%ebp
xorl %ecx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $5,%eax
addl %ebx,%eax
leal 3275163606(%edx,%ebp,1),%edx
xorl %eax,%edi
andl %ecx,%edi
movl 12(%esi),%ebp
xorl %ebx,%edi
addl %edi,%edx
movl %eax,%edi
roll $9,%edx
addl %eax,%edx
leal 4107603335(%ecx,%ebp,1),%ecx
xorl %edx,%edi
andl %ebx,%edi
movl 32(%esi),%ebp
xorl %eax,%edi
addl %edi,%ecx
movl %edx,%edi
roll $14,%ecx
addl %edx,%ecx
leal 1163531501(%ebx,%ebp,1),%ebx
xorl %ecx,%edi
andl %eax,%edi
movl 52(%esi),%ebp
xorl %edx,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $20,%ebx
addl %ecx,%ebx
leal 2850285829(%eax,%ebp,1),%eax
xorl %ebx,%edi
andl %edx,%edi
movl 8(%esi),%ebp
xorl %ecx,%edi
addl %edi,%eax
movl %ebx,%edi
roll $5,%eax
addl %ebx,%eax
leal 4243563512(%edx,%ebp,1),%edx
xorl %eax,%edi
andl %ecx,%edi
movl 28(%esi),%ebp
xorl %ebx,%edi
addl %edi,%edx
movl %eax,%edi
roll $9,%edx
addl %eax,%edx
leal 1735328473(%ecx,%ebp,1),%ecx
xorl %edx,%edi
andl %ebx,%edi
movl 48(%esi),%ebp
xorl %eax,%edi
addl %edi,%ecx
movl %edx,%edi
roll $14,%ecx
addl %edx,%ecx
leal 2368359562(%ebx,%ebp,1),%ebx
xorl %ecx,%edi
andl %eax,%edi
movl 20(%esi),%ebp
xorl %edx,%edi
addl %edi,%ebx
movl %ecx,%edi
roll $20,%ebx
addl %ecx,%ebx
xorl %edx,%edi
xorl %ebx,%edi
leal 4294588738(%eax,%ebp,1),%eax
addl %edi,%eax
roll $4,%eax
movl 32(%esi),%ebp
movl %ebx,%edi
leal 2272392833(%edx,%ebp,1),%edx
addl %ebx,%eax
xorl %ecx,%edi
xorl %eax,%edi
movl 44(%esi),%ebp
addl %edi,%edx
movl %eax,%edi
roll $11,%edx
addl %eax,%edx
xorl %ebx,%edi
xorl %edx,%edi
leal 1839030562(%ecx,%ebp,1),%ecx
addl %edi,%ecx
roll $16,%ecx
movl 56(%esi),%ebp
movl %edx,%edi
leal 4259657740(%ebx,%ebp,1),%ebx
addl %edx,%ecx
xorl %eax,%edi
xorl %ecx,%edi
movl 4(%esi),%ebp
addl %edi,%ebx
movl %ecx,%edi
roll $23,%ebx
addl %ecx,%ebx
xorl %edx,%edi
xorl %ebx,%edi
leal 2763975236(%eax,%ebp,1),%eax
addl %edi,%eax
roll $4,%eax
movl 16(%esi),%ebp
movl %ebx,%edi
leal 1272893353(%edx,%ebp,1),%edx
addl %ebx,%eax
xorl %ecx,%edi
xorl %eax,%edi
movl 28(%esi),%ebp
addl %edi,%edx
movl %eax,%edi
roll $11,%edx
addl %eax,%edx
xorl %ebx,%edi
xorl %edx,%edi
leal 4139469664(%ecx,%ebp,1),%ecx
addl %edi,%ecx
roll $16,%ecx
movl 40(%esi),%ebp
movl %edx,%edi
leal 3200236656(%ebx,%ebp,1),%ebx
addl %edx,%ecx
xorl %eax,%edi
xorl %ecx,%edi
movl 52(%esi),%ebp
addl %edi,%ebx
movl %ecx,%edi
roll $23,%ebx
addl %ecx,%ebx
xorl %edx,%edi
xorl %ebx,%edi
leal 681279174(%eax,%ebp,1),%eax
addl %edi,%eax
roll $4,%eax
movl (%esi),%ebp
movl %ebx,%edi
leal 3936430074(%edx,%ebp,1),%edx
addl %ebx,%eax
xorl %ecx,%edi
xorl %eax,%edi
movl 12(%esi),%ebp
addl %edi,%edx
movl %eax,%edi
roll $11,%edx
addl %eax,%edx
xorl %ebx,%edi
xorl %edx,%edi
leal 3572445317(%ecx,%ebp,1),%ecx
addl %edi,%ecx
roll $16,%ecx
movl 24(%esi),%ebp
movl %edx,%edi
leal 76029189(%ebx,%ebp,1),%ebx
addl %edx,%ecx
xorl %eax,%edi
xorl %ecx,%edi
movl 36(%esi),%ebp
addl %edi,%ebx
movl %ecx,%edi
roll $23,%ebx
addl %ecx,%ebx
xorl %edx,%edi
xorl %ebx,%edi
leal 3654602809(%eax,%ebp,1),%eax
addl %edi,%eax
roll $4,%eax
movl 48(%esi),%ebp
movl %ebx,%edi
leal 3873151461(%edx,%ebp,1),%edx
addl %ebx,%eax
xorl %ecx,%edi
xorl %eax,%edi
movl 60(%esi),%ebp
addl %edi,%edx
movl %eax,%edi
roll $11,%edx
addl %eax,%edx
xorl %ebx,%edi
xorl %edx,%edi
leal 530742520(%ecx,%ebp,1),%ecx
addl %edi,%ecx
roll $16,%ecx
movl 8(%esi),%ebp
movl %edx,%edi
leal 3299628645(%ebx,%ebp,1),%ebx
addl %edx,%ecx
xorl %eax,%edi
xorl %ecx,%edi
movl (%esi),%ebp
addl %edi,%ebx
movl $-1,%edi
roll $23,%ebx
addl %ecx,%ebx
xorl %edx,%edi
orl %ebx,%edi
leal 4096336452(%eax,%ebp,1),%eax
xorl %ecx,%edi
movl 28(%esi),%ebp
addl %edi,%eax
movl $-1,%edi
roll $6,%eax
xorl %ecx,%edi
addl %ebx,%eax
orl %eax,%edi
leal 1126891415(%edx,%ebp,1),%edx
xorl %ebx,%edi
movl 56(%esi),%ebp
addl %edi,%edx
movl $-1,%edi
roll $10,%edx
xorl %ebx,%edi
addl %eax,%edx
orl %edx,%edi
leal 2878612391(%ecx,%ebp,1),%ecx
xorl %eax,%edi
movl 20(%esi),%ebp
addl %edi,%ecx
movl $-1,%edi
roll $15,%ecx
xorl %eax,%edi
addl %edx,%ecx
orl %ecx,%edi
leal 4237533241(%ebx,%ebp,1),%ebx
xorl %edx,%edi
movl 48(%esi),%ebp
addl %edi,%ebx
movl $-1,%edi
roll $21,%ebx
xorl %edx,%edi
addl %ecx,%ebx
orl %ebx,%edi
leal 1700485571(%eax,%ebp,1),%eax
xorl %ecx,%edi
movl 12(%esi),%ebp
addl %edi,%eax
movl $-1,%edi
roll $6,%eax
xorl %ecx,%edi
addl %ebx,%eax
orl %eax,%edi
leal 2399980690(%edx,%ebp,1),%edx
xorl %ebx,%edi
movl 40(%esi),%ebp
addl %edi,%edx
movl $-1,%edi
roll $10,%edx
xorl %ebx,%edi
addl %eax,%edx
orl %edx,%edi
leal 4293915773(%ecx,%ebp,1),%ecx
xorl %eax,%edi
movl 4(%esi),%ebp
addl %edi,%ecx
movl $-1,%edi
roll $15,%ecx
xorl %eax,%edi
addl %edx,%ecx
orl %ecx,%edi
leal 2240044497(%ebx,%ebp,1),%ebx
xorl %edx,%edi
movl 32(%esi),%ebp
addl %edi,%ebx
movl $-1,%edi
roll $21,%ebx
xorl %edx,%edi
addl %ecx,%ebx
orl %ebx,%edi
leal 1873313359(%eax,%ebp,1),%eax
xorl %ecx,%edi
movl 60(%esi),%ebp
addl %edi,%eax
movl $-1,%edi
roll $6,%eax
xorl %ecx,%edi
addl %ebx,%eax
orl %eax,%edi
leal 4264355552(%edx,%ebp,1),%edx
xorl %ebx,%edi
movl 24(%esi),%ebp
addl %edi,%edx
movl $-1,%edi
roll $10,%edx
xorl %ebx,%edi
addl %eax,%edx
orl %edx,%edi
leal 2734768916(%ecx,%ebp,1),%ecx
xorl %eax,%edi
movl 52(%esi),%ebp
addl %edi,%ecx
movl $-1,%edi
roll $15,%ecx
xorl %eax,%edi
addl %edx,%ecx
orl %ecx,%edi
leal 1309151649(%ebx,%ebp,1),%ebx
xorl %edx,%edi
movl 16(%esi),%ebp
addl %edi,%ebx
movl $-1,%edi
roll $21,%ebx
xorl %edx,%edi
addl %ecx,%ebx
orl %ebx,%edi
leal 4149444226(%eax,%ebp,1),%eax
xorl %ecx,%edi
movl 44(%esi),%ebp
addl %edi,%eax
movl $-1,%edi
roll $6,%eax
xorl %ecx,%edi
addl %ebx,%eax
orl %eax,%edi
leal 3174756917(%edx,%ebp,1),%edx
xorl %ebx,%edi
movl 8(%esi),%ebp
addl %edi,%edx
movl $-1,%edi
roll $10,%edx
xorl %ebx,%edi
addl %eax,%edx
orl %edx,%edi
leal 718787259(%ecx,%ebp,1),%ecx
xorl %eax,%edi
movl 36(%esi),%ebp
addl %edi,%ecx
movl $-1,%edi
roll $15,%ecx
xorl %eax,%edi
addl %edx,%ecx
orl %ecx,%edi
leal 3951481745(%ebx,%ebp,1),%ebx
xorl %edx,%edi
movl 24(%esp),%ebp
addl %edi,%ebx
addl $64,%esi
roll $21,%ebx
movl (%ebp),%edi
addl %ecx,%ebx
addl %edi,%eax
movl 4(%ebp),%edi
addl %edi,%ebx
movl 8(%ebp),%edi
addl %edi,%ecx
movl 12(%ebp),%edi
addl %edi,%edx
movl %eax,(%ebp)
movl %ebx,4(%ebp)
movl (%esp),%edi
movl %ecx,8(%ebp)
movl %edx,12(%ebp)
cmpl %esi,%edi
jae .L000start
popl %eax
popl %ebx
popl %ebp
popl %edi
popl %esi
ret
.size md5_block_asm_data_order,.-.L_md5_block_asm_data_order_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 51,315
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-x86/crypto/fipsmodule/aesni-x86.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
#ifdef BORINGSSL_DISPATCH_TEST
#endif
.globl aes_hw_encrypt
.hidden aes_hw_encrypt
.type aes_hw_encrypt,@function
.align 16
aes_hw_encrypt:
.L_aes_hw_encrypt_begin:
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L000pic
.L000pic:
popl %ebx
leal BORINGSSL_function_hit+1-.L000pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 4(%esp),%eax
movl 12(%esp),%edx
movups (%eax),%xmm2
movl 240(%edx),%ecx
movl 8(%esp),%eax
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L001enc1_loop_1:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L001enc1_loop_1
.byte 102,15,56,221,209
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
movups %xmm2,(%eax)
pxor %xmm2,%xmm2
ret
.size aes_hw_encrypt,.-.L_aes_hw_encrypt_begin
.globl aes_hw_decrypt
.hidden aes_hw_decrypt
.type aes_hw_decrypt,@function
.align 16
aes_hw_decrypt:
.L_aes_hw_decrypt_begin:
movl 4(%esp),%eax
movl 12(%esp),%edx
movups (%eax),%xmm2
movl 240(%edx),%ecx
movl 8(%esp),%eax
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L002dec1_loop_2:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L002dec1_loop_2
.byte 102,15,56,223,209
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
movups %xmm2,(%eax)
pxor %xmm2,%xmm2
ret
.size aes_hw_decrypt,.-.L_aes_hw_decrypt_begin
.hidden _aesni_encrypt2
.type _aesni_encrypt2,@function
.align 16
_aesni_encrypt2:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
.L003enc2_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%edx,%ecx,1),%xmm0
jnz .L003enc2_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
ret
.size _aesni_encrypt2,.-_aesni_encrypt2
.hidden _aesni_decrypt2
.type _aesni_decrypt2,@function
.align 16
_aesni_decrypt2:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
.L004dec2_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
movups -16(%edx,%ecx,1),%xmm0
jnz .L004dec2_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,223,208
.byte 102,15,56,223,216
ret
.size _aesni_decrypt2,.-_aesni_decrypt2
.hidden _aesni_encrypt3
.type _aesni_encrypt3,@function
.align 16
_aesni_encrypt3:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
.L005enc3_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
movups -16(%edx,%ecx,1),%xmm0
jnz .L005enc3_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
ret
.size _aesni_encrypt3,.-_aesni_encrypt3
.hidden _aesni_decrypt3
.type _aesni_decrypt3,@function
.align 16
_aesni_decrypt3:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
addl $16,%ecx
.L006dec3_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
.byte 102,15,56,222,224
movups -16(%edx,%ecx,1),%xmm0
jnz .L006dec3_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
.byte 102,15,56,223,208
.byte 102,15,56,223,216
.byte 102,15,56,223,224
ret
.size _aesni_decrypt3,.-_aesni_decrypt3
.hidden _aesni_encrypt4
.type _aesni_encrypt4,@function
.align 16
_aesni_encrypt4:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
shll $4,%ecx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 15,31,64,0
addl $16,%ecx
.L007enc4_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
movups -16(%edx,%ecx,1),%xmm0
jnz .L007enc4_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
ret
.size _aesni_encrypt4,.-_aesni_encrypt4
.hidden _aesni_decrypt4
.type _aesni_decrypt4,@function
.align 16
_aesni_decrypt4:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
shll $4,%ecx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
movups 32(%edx),%xmm0
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 15,31,64,0
addl $16,%ecx
.L008dec4_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
.byte 102,15,56,222,233
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
.byte 102,15,56,222,224
.byte 102,15,56,222,232
movups -16(%edx,%ecx,1),%xmm0
jnz .L008dec4_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
.byte 102,15,56,222,233
.byte 102,15,56,223,208
.byte 102,15,56,223,216
.byte 102,15,56,223,224
.byte 102,15,56,223,232
ret
.size _aesni_decrypt4,.-_aesni_decrypt4
.hidden _aesni_encrypt6
.type _aesni_encrypt6,@function
.align 16
_aesni_encrypt6:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,220,209
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,220,217
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 102,15,56,220,225
pxor %xmm0,%xmm7
movups (%edx,%ecx,1),%xmm0
addl $16,%ecx
jmp .L009_aesni_encrypt6_inner
.align 16
.L010enc6_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.L009_aesni_encrypt6_inner:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.L_aesni_encrypt6_enter:
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
movups -16(%edx,%ecx,1),%xmm0
jnz .L010enc6_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,15,56,221,208
.byte 102,15,56,221,216
.byte 102,15,56,221,224
.byte 102,15,56,221,232
.byte 102,15,56,221,240
.byte 102,15,56,221,248
ret
.size _aesni_encrypt6,.-_aesni_encrypt6
.hidden _aesni_decrypt6
.type _aesni_decrypt6,@function
.align 16
_aesni_decrypt6:
movups (%edx),%xmm0
shll $4,%ecx
movups 16(%edx),%xmm1
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
pxor %xmm0,%xmm4
.byte 102,15,56,222,209
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
.byte 102,15,56,222,217
leal 32(%edx,%ecx,1),%edx
negl %ecx
.byte 102,15,56,222,225
pxor %xmm0,%xmm7
movups (%edx,%ecx,1),%xmm0
addl $16,%ecx
jmp .L011_aesni_decrypt6_inner
.align 16
.L012dec6_loop:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
.L011_aesni_decrypt6_inner:
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
.L_aesni_decrypt6_enter:
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,222,208
.byte 102,15,56,222,216
.byte 102,15,56,222,224
.byte 102,15,56,222,232
.byte 102,15,56,222,240
.byte 102,15,56,222,248
movups -16(%edx,%ecx,1),%xmm0
jnz .L012dec6_loop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
.byte 102,15,56,223,208
.byte 102,15,56,223,216
.byte 102,15,56,223,224
.byte 102,15,56,223,232
.byte 102,15,56,223,240
.byte 102,15,56,223,248
ret
.size _aesni_decrypt6,.-_aesni_decrypt6
.globl aes_hw_ecb_encrypt
.hidden aes_hw_ecb_encrypt
.type aes_hw_ecb_encrypt,@function
.align 16
aes_hw_ecb_encrypt:
.L_aes_hw_ecb_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
andl $-16,%eax
jz .L013ecb_ret
movl 240(%edx),%ecx
testl %ebx,%ebx
jz .L014ecb_decrypt
movl %edx,%ebp
movl %ecx,%ebx
cmpl $96,%eax
jb .L015ecb_enc_tail
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
movdqu 48(%esi),%xmm5
movdqu 64(%esi),%xmm6
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
subl $96,%eax
jmp .L016ecb_enc_loop6_enter
.align 16
.L017ecb_enc_loop6:
movups %xmm2,(%edi)
movdqu (%esi),%xmm2
movups %xmm3,16(%edi)
movdqu 16(%esi),%xmm3
movups %xmm4,32(%edi)
movdqu 32(%esi),%xmm4
movups %xmm5,48(%edi)
movdqu 48(%esi),%xmm5
movups %xmm6,64(%edi)
movdqu 64(%esi),%xmm6
movups %xmm7,80(%edi)
leal 96(%edi),%edi
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
.L016ecb_enc_loop6_enter:
call _aesni_encrypt6
movl %ebp,%edx
movl %ebx,%ecx
subl $96,%eax
jnc .L017ecb_enc_loop6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
movups %xmm7,80(%edi)
leal 96(%edi),%edi
addl $96,%eax
jz .L013ecb_ret
.L015ecb_enc_tail:
movups (%esi),%xmm2
cmpl $32,%eax
jb .L018ecb_enc_one
movups 16(%esi),%xmm3
je .L019ecb_enc_two
movups 32(%esi),%xmm4
cmpl $64,%eax
jb .L020ecb_enc_three
movups 48(%esi),%xmm5
je .L021ecb_enc_four
movups 64(%esi),%xmm6
xorps %xmm7,%xmm7
call _aesni_encrypt6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
jmp .L013ecb_ret
.align 16
.L018ecb_enc_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L022enc1_loop_3:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L022enc1_loop_3
.byte 102,15,56,221,209
movups %xmm2,(%edi)
jmp .L013ecb_ret
.align 16
.L019ecb_enc_two:
call _aesni_encrypt2
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
jmp .L013ecb_ret
.align 16
.L020ecb_enc_three:
call _aesni_encrypt3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
jmp .L013ecb_ret
.align 16
.L021ecb_enc_four:
call _aesni_encrypt4
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
jmp .L013ecb_ret
.align 16
.L014ecb_decrypt:
movl %edx,%ebp
movl %ecx,%ebx
cmpl $96,%eax
jb .L023ecb_dec_tail
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
movdqu 48(%esi),%xmm5
movdqu 64(%esi),%xmm6
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
subl $96,%eax
jmp .L024ecb_dec_loop6_enter
.align 16
.L025ecb_dec_loop6:
movups %xmm2,(%edi)
movdqu (%esi),%xmm2
movups %xmm3,16(%edi)
movdqu 16(%esi),%xmm3
movups %xmm4,32(%edi)
movdqu 32(%esi),%xmm4
movups %xmm5,48(%edi)
movdqu 48(%esi),%xmm5
movups %xmm6,64(%edi)
movdqu 64(%esi),%xmm6
movups %xmm7,80(%edi)
leal 96(%edi),%edi
movdqu 80(%esi),%xmm7
leal 96(%esi),%esi
.L024ecb_dec_loop6_enter:
call _aesni_decrypt6
movl %ebp,%edx
movl %ebx,%ecx
subl $96,%eax
jnc .L025ecb_dec_loop6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
movups %xmm7,80(%edi)
leal 96(%edi),%edi
addl $96,%eax
jz .L013ecb_ret
.L023ecb_dec_tail:
movups (%esi),%xmm2
cmpl $32,%eax
jb .L026ecb_dec_one
movups 16(%esi),%xmm3
je .L027ecb_dec_two
movups 32(%esi),%xmm4
cmpl $64,%eax
jb .L028ecb_dec_three
movups 48(%esi),%xmm5
je .L029ecb_dec_four
movups 64(%esi),%xmm6
xorps %xmm7,%xmm7
call _aesni_decrypt6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
jmp .L013ecb_ret
.align 16
.L026ecb_dec_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L030dec1_loop_4:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L030dec1_loop_4
.byte 102,15,56,223,209
movups %xmm2,(%edi)
jmp .L013ecb_ret
.align 16
.L027ecb_dec_two:
call _aesni_decrypt2
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
jmp .L013ecb_ret
.align 16
.L028ecb_dec_three:
call _aesni_decrypt3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
jmp .L013ecb_ret
.align 16
.L029ecb_dec_four:
call _aesni_decrypt4
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
.L013ecb_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_ecb_encrypt,.-.L_aes_hw_ecb_encrypt_begin
.globl aes_hw_ccm64_encrypt_blocks
.hidden aes_hw_ccm64_encrypt_blocks
.type aes_hw_ccm64_encrypt_blocks,@function
.align 16
aes_hw_ccm64_encrypt_blocks:
.L_aes_hw_ccm64_encrypt_blocks_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl 40(%esp),%ecx
movl %esp,%ebp
subl $60,%esp
andl $-16,%esp
movl %ebp,48(%esp)
movdqu (%ebx),%xmm7
movdqu (%ecx),%xmm3
movl 240(%edx),%ecx
movl $202182159,(%esp)
movl $134810123,4(%esp)
movl $67438087,8(%esp)
movl $66051,12(%esp)
movl $1,%ebx
xorl %ebp,%ebp
movl %ebx,16(%esp)
movl %ebp,20(%esp)
movl %ebp,24(%esp)
movl %ebp,28(%esp)
shll $4,%ecx
movl $16,%ebx
leal (%edx),%ebp
movdqa (%esp),%xmm5
movdqa %xmm7,%xmm2
leal 32(%edx,%ecx,1),%edx
subl %ecx,%ebx
.byte 102,15,56,0,253
.L031ccm64_enc_outer:
movups (%ebp),%xmm0
movl %ebx,%ecx
movups (%esi),%xmm6
xorps %xmm0,%xmm2
movups 16(%ebp),%xmm1
xorps %xmm6,%xmm0
xorps %xmm0,%xmm3
movups 32(%ebp),%xmm0
.L032ccm64_enc2_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%edx,%ecx,1),%xmm0
jnz .L032ccm64_enc2_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
paddq 16(%esp),%xmm7
decl %eax
.byte 102,15,56,221,208
.byte 102,15,56,221,216
leal 16(%esi),%esi
xorps %xmm2,%xmm6
movdqa %xmm7,%xmm2
movups %xmm6,(%edi)
.byte 102,15,56,0,213
leal 16(%edi),%edi
jnz .L031ccm64_enc_outer
movl 48(%esp),%esp
movl 40(%esp),%edi
movups %xmm3,(%edi)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_ccm64_encrypt_blocks,.-.L_aes_hw_ccm64_encrypt_blocks_begin
.globl aes_hw_ccm64_decrypt_blocks
.hidden aes_hw_ccm64_decrypt_blocks
.type aes_hw_ccm64_decrypt_blocks,@function
.align 16
aes_hw_ccm64_decrypt_blocks:
.L_aes_hw_ccm64_decrypt_blocks_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl 40(%esp),%ecx
movl %esp,%ebp
subl $60,%esp
andl $-16,%esp
movl %ebp,48(%esp)
movdqu (%ebx),%xmm7
movdqu (%ecx),%xmm3
movl 240(%edx),%ecx
movl $202182159,(%esp)
movl $134810123,4(%esp)
movl $67438087,8(%esp)
movl $66051,12(%esp)
movl $1,%ebx
xorl %ebp,%ebp
movl %ebx,16(%esp)
movl %ebp,20(%esp)
movl %ebp,24(%esp)
movl %ebp,28(%esp)
movdqa (%esp),%xmm5
movdqa %xmm7,%xmm2
movl %edx,%ebp
movl %ecx,%ebx
.byte 102,15,56,0,253
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L033enc1_loop_5:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L033enc1_loop_5
.byte 102,15,56,221,209
shll $4,%ebx
movl $16,%ecx
movups (%esi),%xmm6
paddq 16(%esp),%xmm7
leal 16(%esi),%esi
subl %ebx,%ecx
leal 32(%ebp,%ebx,1),%edx
movl %ecx,%ebx
jmp .L034ccm64_dec_outer
.align 16
.L034ccm64_dec_outer:
xorps %xmm2,%xmm6
movdqa %xmm7,%xmm2
movups %xmm6,(%edi)
leal 16(%edi),%edi
.byte 102,15,56,0,213
subl $1,%eax
jz .L035ccm64_dec_break
movups (%ebp),%xmm0
movl %ebx,%ecx
movups 16(%ebp),%xmm1
xorps %xmm0,%xmm6
xorps %xmm0,%xmm2
xorps %xmm6,%xmm3
movups 32(%ebp),%xmm0
.L036ccm64_dec2_loop:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
movups (%edx,%ecx,1),%xmm1
addl $32,%ecx
.byte 102,15,56,220,208
.byte 102,15,56,220,216
movups -16(%edx,%ecx,1),%xmm0
jnz .L036ccm64_dec2_loop
movups (%esi),%xmm6
paddq 16(%esp),%xmm7
.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,221,208
.byte 102,15,56,221,216
leal 16(%esi),%esi
jmp .L034ccm64_dec_outer
.align 16
.L035ccm64_dec_break:
movl 240(%ebp),%ecx
movl %ebp,%edx
movups (%edx),%xmm0
movups 16(%edx),%xmm1
xorps %xmm0,%xmm6
leal 32(%edx),%edx
xorps %xmm6,%xmm3
.L037enc1_loop_6:
.byte 102,15,56,220,217
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L037enc1_loop_6
.byte 102,15,56,221,217
movl 48(%esp),%esp
movl 40(%esp),%edi
movups %xmm3,(%edi)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
pxor %xmm7,%xmm7
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_ccm64_decrypt_blocks,.-.L_aes_hw_ccm64_decrypt_blocks_begin
.globl aes_hw_ctr32_encrypt_blocks
.hidden aes_hw_ctr32_encrypt_blocks
.type aes_hw_ctr32_encrypt_blocks,@function
.align 16
aes_hw_ctr32_encrypt_blocks:
.L_aes_hw_ctr32_encrypt_blocks_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L038pic
.L038pic:
popl %ebx
leal BORINGSSL_function_hit+0-.L038pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl %esp,%ebp
subl $88,%esp
andl $-16,%esp
movl %ebp,80(%esp)
cmpl $1,%eax
jb .L039ctr32_ret
je .L040ctr32_one_shortcut
movdqu (%ebx),%xmm7
movl $202182159,(%esp)
movl $134810123,4(%esp)
movl $67438087,8(%esp)
movl $66051,12(%esp)
movl $6,%ecx
xorl %ebp,%ebp
movl %ecx,16(%esp)
movl %ecx,20(%esp)
movl %ecx,24(%esp)
movl %ebp,28(%esp)
.byte 102,15,58,22,251,3
.byte 102,15,58,34,253,3
movl 240(%edx),%ecx
bswap %ebx
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
movdqa (%esp),%xmm2
.byte 102,15,58,34,195,0
leal 3(%ebx),%ebp
.byte 102,15,58,34,205,0
incl %ebx
.byte 102,15,58,34,195,1
incl %ebp
.byte 102,15,58,34,205,1
incl %ebx
.byte 102,15,58,34,195,2
incl %ebp
.byte 102,15,58,34,205,2
movdqa %xmm0,48(%esp)
.byte 102,15,56,0,194
movdqu (%edx),%xmm6
movdqa %xmm1,64(%esp)
.byte 102,15,56,0,202
pshufd $192,%xmm0,%xmm2
pshufd $128,%xmm0,%xmm3
cmpl $6,%eax
jb .L041ctr32_tail
pxor %xmm6,%xmm7
shll $4,%ecx
movl $16,%ebx
movdqa %xmm7,32(%esp)
movl %edx,%ebp
subl %ecx,%ebx
leal 32(%edx,%ecx,1),%edx
subl $6,%eax
jmp .L042ctr32_loop6
.align 16
.L042ctr32_loop6:
pshufd $64,%xmm0,%xmm4
movdqa 32(%esp),%xmm0
pshufd $192,%xmm1,%xmm5
pxor %xmm0,%xmm2
pshufd $128,%xmm1,%xmm6
pxor %xmm0,%xmm3
pshufd $64,%xmm1,%xmm7
movups 16(%ebp),%xmm1
pxor %xmm0,%xmm4
pxor %xmm0,%xmm5
.byte 102,15,56,220,209
pxor %xmm0,%xmm6
pxor %xmm0,%xmm7
.byte 102,15,56,220,217
movups 32(%ebp),%xmm0
movl %ebx,%ecx
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
call .L_aesni_encrypt6_enter
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps %xmm1,%xmm2
movups 32(%esi),%xmm1
xorps %xmm0,%xmm3
movups %xmm2,(%edi)
movdqa 16(%esp),%xmm0
xorps %xmm1,%xmm4
movdqa 64(%esp),%xmm1
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
paddd %xmm0,%xmm1
paddd 48(%esp),%xmm0
movdqa (%esp),%xmm2
movups 48(%esi),%xmm3
movups 64(%esi),%xmm4
xorps %xmm3,%xmm5
movups 80(%esi),%xmm3
leal 96(%esi),%esi
movdqa %xmm0,48(%esp)
.byte 102,15,56,0,194
xorps %xmm4,%xmm6
movups %xmm5,48(%edi)
xorps %xmm3,%xmm7
movdqa %xmm1,64(%esp)
.byte 102,15,56,0,202
movups %xmm6,64(%edi)
pshufd $192,%xmm0,%xmm2
movups %xmm7,80(%edi)
leal 96(%edi),%edi
pshufd $128,%xmm0,%xmm3
subl $6,%eax
jnc .L042ctr32_loop6
addl $6,%eax
jz .L039ctr32_ret
movdqu (%ebp),%xmm7
movl %ebp,%edx
pxor 32(%esp),%xmm7
movl 240(%ebp),%ecx
.L041ctr32_tail:
por %xmm7,%xmm2
cmpl $2,%eax
jb .L043ctr32_one
pshufd $64,%xmm0,%xmm4
por %xmm7,%xmm3
je .L044ctr32_two
pshufd $192,%xmm1,%xmm5
por %xmm7,%xmm4
cmpl $4,%eax
jb .L045ctr32_three
pshufd $128,%xmm1,%xmm6
por %xmm7,%xmm5
je .L046ctr32_four
por %xmm7,%xmm6
call _aesni_encrypt6
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps %xmm1,%xmm2
movups 32(%esi),%xmm1
xorps %xmm0,%xmm3
movups 48(%esi),%xmm0
xorps %xmm1,%xmm4
movups 64(%esi),%xmm1
xorps %xmm0,%xmm5
movups %xmm2,(%edi)
xorps %xmm1,%xmm6
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
jmp .L039ctr32_ret
.align 16
.L040ctr32_one_shortcut:
movups (%ebx),%xmm2
movl 240(%edx),%ecx
.L043ctr32_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L047enc1_loop_7:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L047enc1_loop_7
.byte 102,15,56,221,209
movups (%esi),%xmm6
xorps %xmm2,%xmm6
movups %xmm6,(%edi)
jmp .L039ctr32_ret
.align 16
.L044ctr32_two:
call _aesni_encrypt2
movups (%esi),%xmm5
movups 16(%esi),%xmm6
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
jmp .L039ctr32_ret
.align 16
.L045ctr32_three:
call _aesni_encrypt3
movups (%esi),%xmm5
movups 16(%esi),%xmm6
xorps %xmm5,%xmm2
movups 32(%esi),%xmm7
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
xorps %xmm7,%xmm4
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
jmp .L039ctr32_ret
.align 16
.L046ctr32_four:
call _aesni_encrypt4
movups (%esi),%xmm6
movups 16(%esi),%xmm7
movups 32(%esi),%xmm1
xorps %xmm6,%xmm2
movups 48(%esi),%xmm0
xorps %xmm7,%xmm3
movups %xmm2,(%edi)
xorps %xmm1,%xmm4
movups %xmm3,16(%edi)
xorps %xmm0,%xmm5
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
.L039ctr32_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
movdqa %xmm0,32(%esp)
pxor %xmm5,%xmm5
movdqa %xmm0,48(%esp)
pxor %xmm6,%xmm6
movdqa %xmm0,64(%esp)
pxor %xmm7,%xmm7
movl 80(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_ctr32_encrypt_blocks,.-.L_aes_hw_ctr32_encrypt_blocks_begin
.globl aes_hw_xts_encrypt
.hidden aes_hw_xts_encrypt
.type aes_hw_xts_encrypt,@function
.align 16
aes_hw_xts_encrypt:
.L_aes_hw_xts_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 36(%esp),%edx
movl 40(%esp),%esi
movl 240(%edx),%ecx
movups (%esi),%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L048enc1_loop_8:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L048enc1_loop_8
.byte 102,15,56,221,209
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl %esp,%ebp
subl $120,%esp
movl 240(%edx),%ecx
andl $-16,%esp
movl $135,96(%esp)
movl $0,100(%esp)
movl $1,104(%esp)
movl $0,108(%esp)
movl %eax,112(%esp)
movl %ebp,116(%esp)
movdqa %xmm2,%xmm1
pxor %xmm0,%xmm0
movdqa 96(%esp),%xmm3
pcmpgtd %xmm1,%xmm0
andl $-16,%eax
movl %edx,%ebp
movl %ecx,%ebx
subl $96,%eax
jc .L049xts_enc_short
shll $4,%ecx
movl $16,%ebx
subl %ecx,%ebx
leal 32(%edx,%ecx,1),%edx
jmp .L050xts_enc_loop6
.align 16
.L050xts_enc_loop6:
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,16(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,32(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,48(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm7
movdqa %xmm1,64(%esp)
paddq %xmm1,%xmm1
movups (%ebp),%xmm0
pand %xmm3,%xmm7
movups (%esi),%xmm2
pxor %xmm1,%xmm7
movl %ebx,%ecx
movdqu 16(%esi),%xmm3
xorps %xmm0,%xmm2
movdqu 32(%esi),%xmm4
pxor %xmm0,%xmm3
movdqu 48(%esi),%xmm5
pxor %xmm0,%xmm4
movdqu 64(%esi),%xmm6
pxor %xmm0,%xmm5
movdqu 80(%esi),%xmm1
pxor %xmm0,%xmm6
leal 96(%esi),%esi
pxor (%esp),%xmm2
movdqa %xmm7,80(%esp)
pxor %xmm1,%xmm7
movups 16(%ebp),%xmm1
pxor 16(%esp),%xmm3
pxor 32(%esp),%xmm4
.byte 102,15,56,220,209
pxor 48(%esp),%xmm5
pxor 64(%esp),%xmm6
.byte 102,15,56,220,217
pxor %xmm0,%xmm7
movups 32(%ebp),%xmm0
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
call .L_aesni_encrypt6_enter
movdqa 80(%esp),%xmm1
pxor %xmm0,%xmm0
xorps (%esp),%xmm2
pcmpgtd %xmm1,%xmm0
xorps 16(%esp),%xmm3
movups %xmm2,(%edi)
xorps 32(%esp),%xmm4
movups %xmm3,16(%edi)
xorps 48(%esp),%xmm5
movups %xmm4,32(%edi)
xorps 64(%esp),%xmm6
movups %xmm5,48(%edi)
xorps %xmm1,%xmm7
movups %xmm6,64(%edi)
pshufd $19,%xmm0,%xmm2
movups %xmm7,80(%edi)
leal 96(%edi),%edi
movdqa 96(%esp),%xmm3
pxor %xmm0,%xmm0
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
subl $96,%eax
jnc .L050xts_enc_loop6
movl 240(%ebp),%ecx
movl %ebp,%edx
movl %ecx,%ebx
.L049xts_enc_short:
addl $96,%eax
jz .L051xts_enc_done6x
movdqa %xmm1,%xmm5
cmpl $32,%eax
jb .L052xts_enc_one
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
je .L053xts_enc_two
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm6
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
cmpl $64,%eax
jb .L054xts_enc_three
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm7
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
movdqa %xmm5,(%esp)
movdqa %xmm6,16(%esp)
je .L055xts_enc_four
movdqa %xmm7,32(%esp)
pshufd $19,%xmm0,%xmm7
movdqa %xmm1,48(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm7
pxor %xmm1,%xmm7
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
pxor (%esp),%xmm2
movdqu 48(%esi),%xmm5
pxor 16(%esp),%xmm3
movdqu 64(%esi),%xmm6
pxor 32(%esp),%xmm4
leal 80(%esi),%esi
pxor 48(%esp),%xmm5
movdqa %xmm7,64(%esp)
pxor %xmm7,%xmm6
call _aesni_encrypt6
movaps 64(%esp),%xmm1
xorps (%esp),%xmm2
xorps 16(%esp),%xmm3
xorps 32(%esp),%xmm4
movups %xmm2,(%edi)
xorps 48(%esp),%xmm5
movups %xmm3,16(%edi)
xorps %xmm1,%xmm6
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
leal 80(%edi),%edi
jmp .L056xts_enc_done
.align 16
.L052xts_enc_one:
movups (%esi),%xmm2
leal 16(%esi),%esi
xorps %xmm5,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L057enc1_loop_9:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L057enc1_loop_9
.byte 102,15,56,221,209
xorps %xmm5,%xmm2
movups %xmm2,(%edi)
leal 16(%edi),%edi
movdqa %xmm5,%xmm1
jmp .L056xts_enc_done
.align 16
.L053xts_enc_two:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
leal 32(%esi),%esi
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
call _aesni_encrypt2
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
leal 32(%edi),%edi
movdqa %xmm6,%xmm1
jmp .L056xts_enc_done
.align 16
.L054xts_enc_three:
movaps %xmm1,%xmm7
movups (%esi),%xmm2
movups 16(%esi),%xmm3
movups 32(%esi),%xmm4
leal 48(%esi),%esi
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
xorps %xmm7,%xmm4
call _aesni_encrypt3
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
xorps %xmm7,%xmm4
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
leal 48(%edi),%edi
movdqa %xmm7,%xmm1
jmp .L056xts_enc_done
.align 16
.L055xts_enc_four:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
movups 32(%esi),%xmm4
xorps (%esp),%xmm2
movups 48(%esi),%xmm5
leal 64(%esi),%esi
xorps 16(%esp),%xmm3
xorps %xmm7,%xmm4
xorps %xmm6,%xmm5
call _aesni_encrypt4
xorps (%esp),%xmm2
xorps 16(%esp),%xmm3
xorps %xmm7,%xmm4
movups %xmm2,(%edi)
xorps %xmm6,%xmm5
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
leal 64(%edi),%edi
movdqa %xmm6,%xmm1
jmp .L056xts_enc_done
.align 16
.L051xts_enc_done6x:
movl 112(%esp),%eax
andl $15,%eax
jz .L058xts_enc_ret
movdqa %xmm1,%xmm5
movl %eax,112(%esp)
jmp .L059xts_enc_steal
.align 16
.L056xts_enc_done:
movl 112(%esp),%eax
pxor %xmm0,%xmm0
andl $15,%eax
jz .L058xts_enc_ret
pcmpgtd %xmm1,%xmm0
movl %eax,112(%esp)
pshufd $19,%xmm0,%xmm5
paddq %xmm1,%xmm1
pand 96(%esp),%xmm5
pxor %xmm1,%xmm5
.L059xts_enc_steal:
movzbl (%esi),%ecx
movzbl -16(%edi),%edx
leal 1(%esi),%esi
movb %cl,-16(%edi)
movb %dl,(%edi)
leal 1(%edi),%edi
subl $1,%eax
jnz .L059xts_enc_steal
subl 112(%esp),%edi
movl %ebp,%edx
movl %ebx,%ecx
movups -16(%edi),%xmm2
xorps %xmm5,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L060enc1_loop_10:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L060enc1_loop_10
.byte 102,15,56,221,209
xorps %xmm5,%xmm2
movups %xmm2,-16(%edi)
.L058xts_enc_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
movdqa %xmm0,(%esp)
pxor %xmm3,%xmm3
movdqa %xmm0,16(%esp)
pxor %xmm4,%xmm4
movdqa %xmm0,32(%esp)
pxor %xmm5,%xmm5
movdqa %xmm0,48(%esp)
pxor %xmm6,%xmm6
movdqa %xmm0,64(%esp)
pxor %xmm7,%xmm7
movdqa %xmm0,80(%esp)
movl 116(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_xts_encrypt,.-.L_aes_hw_xts_encrypt_begin
.globl aes_hw_xts_decrypt
.hidden aes_hw_xts_decrypt
.type aes_hw_xts_decrypt,@function
.align 16
aes_hw_xts_decrypt:
.L_aes_hw_xts_decrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 36(%esp),%edx
movl 40(%esp),%esi
movl 240(%edx),%ecx
movups (%esi),%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L061enc1_loop_11:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L061enc1_loop_11
.byte 102,15,56,221,209
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl 32(%esp),%edx
movl %esp,%ebp
subl $120,%esp
andl $-16,%esp
xorl %ebx,%ebx
testl $15,%eax
setnz %bl
shll $4,%ebx
subl %ebx,%eax
movl $135,96(%esp)
movl $0,100(%esp)
movl $1,104(%esp)
movl $0,108(%esp)
movl %eax,112(%esp)
movl %ebp,116(%esp)
movl 240(%edx),%ecx
movl %edx,%ebp
movl %ecx,%ebx
movdqa %xmm2,%xmm1
pxor %xmm0,%xmm0
movdqa 96(%esp),%xmm3
pcmpgtd %xmm1,%xmm0
andl $-16,%eax
subl $96,%eax
jc .L062xts_dec_short
shll $4,%ecx
movl $16,%ebx
subl %ecx,%ebx
leal 32(%edx,%ecx,1),%edx
jmp .L063xts_dec_loop6
.align 16
.L063xts_dec_loop6:
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,16(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,32(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,48(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
pshufd $19,%xmm0,%xmm7
movdqa %xmm1,64(%esp)
paddq %xmm1,%xmm1
movups (%ebp),%xmm0
pand %xmm3,%xmm7
movups (%esi),%xmm2
pxor %xmm1,%xmm7
movl %ebx,%ecx
movdqu 16(%esi),%xmm3
xorps %xmm0,%xmm2
movdqu 32(%esi),%xmm4
pxor %xmm0,%xmm3
movdqu 48(%esi),%xmm5
pxor %xmm0,%xmm4
movdqu 64(%esi),%xmm6
pxor %xmm0,%xmm5
movdqu 80(%esi),%xmm1
pxor %xmm0,%xmm6
leal 96(%esi),%esi
pxor (%esp),%xmm2
movdqa %xmm7,80(%esp)
pxor %xmm1,%xmm7
movups 16(%ebp),%xmm1
pxor 16(%esp),%xmm3
pxor 32(%esp),%xmm4
.byte 102,15,56,222,209
pxor 48(%esp),%xmm5
pxor 64(%esp),%xmm6
.byte 102,15,56,222,217
pxor %xmm0,%xmm7
movups 32(%ebp),%xmm0
.byte 102,15,56,222,225
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
call .L_aesni_decrypt6_enter
movdqa 80(%esp),%xmm1
pxor %xmm0,%xmm0
xorps (%esp),%xmm2
pcmpgtd %xmm1,%xmm0
xorps 16(%esp),%xmm3
movups %xmm2,(%edi)
xorps 32(%esp),%xmm4
movups %xmm3,16(%edi)
xorps 48(%esp),%xmm5
movups %xmm4,32(%edi)
xorps 64(%esp),%xmm6
movups %xmm5,48(%edi)
xorps %xmm1,%xmm7
movups %xmm6,64(%edi)
pshufd $19,%xmm0,%xmm2
movups %xmm7,80(%edi)
leal 96(%edi),%edi
movdqa 96(%esp),%xmm3
pxor %xmm0,%xmm0
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
subl $96,%eax
jnc .L063xts_dec_loop6
movl 240(%ebp),%ecx
movl %ebp,%edx
movl %ecx,%ebx
.L062xts_dec_short:
addl $96,%eax
jz .L064xts_dec_done6x
movdqa %xmm1,%xmm5
cmpl $32,%eax
jb .L065xts_dec_one
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
je .L066xts_dec_two
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm6
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
cmpl $64,%eax
jb .L067xts_dec_three
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa %xmm1,%xmm7
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
movdqa %xmm5,(%esp)
movdqa %xmm6,16(%esp)
je .L068xts_dec_four
movdqa %xmm7,32(%esp)
pshufd $19,%xmm0,%xmm7
movdqa %xmm1,48(%esp)
paddq %xmm1,%xmm1
pand %xmm3,%xmm7
pxor %xmm1,%xmm7
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
pxor (%esp),%xmm2
movdqu 48(%esi),%xmm5
pxor 16(%esp),%xmm3
movdqu 64(%esi),%xmm6
pxor 32(%esp),%xmm4
leal 80(%esi),%esi
pxor 48(%esp),%xmm5
movdqa %xmm7,64(%esp)
pxor %xmm7,%xmm6
call _aesni_decrypt6
movaps 64(%esp),%xmm1
xorps (%esp),%xmm2
xorps 16(%esp),%xmm3
xorps 32(%esp),%xmm4
movups %xmm2,(%edi)
xorps 48(%esp),%xmm5
movups %xmm3,16(%edi)
xorps %xmm1,%xmm6
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
movups %xmm6,64(%edi)
leal 80(%edi),%edi
jmp .L069xts_dec_done
.align 16
.L065xts_dec_one:
movups (%esi),%xmm2
leal 16(%esi),%esi
xorps %xmm5,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L070dec1_loop_12:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L070dec1_loop_12
.byte 102,15,56,223,209
xorps %xmm5,%xmm2
movups %xmm2,(%edi)
leal 16(%edi),%edi
movdqa %xmm5,%xmm1
jmp .L069xts_dec_done
.align 16
.L066xts_dec_two:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
leal 32(%esi),%esi
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
call _aesni_decrypt2
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
leal 32(%edi),%edi
movdqa %xmm6,%xmm1
jmp .L069xts_dec_done
.align 16
.L067xts_dec_three:
movaps %xmm1,%xmm7
movups (%esi),%xmm2
movups 16(%esi),%xmm3
movups 32(%esi),%xmm4
leal 48(%esi),%esi
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
xorps %xmm7,%xmm4
call _aesni_decrypt3
xorps %xmm5,%xmm2
xorps %xmm6,%xmm3
xorps %xmm7,%xmm4
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
leal 48(%edi),%edi
movdqa %xmm7,%xmm1
jmp .L069xts_dec_done
.align 16
.L068xts_dec_four:
movaps %xmm1,%xmm6
movups (%esi),%xmm2
movups 16(%esi),%xmm3
movups 32(%esi),%xmm4
xorps (%esp),%xmm2
movups 48(%esi),%xmm5
leal 64(%esi),%esi
xorps 16(%esp),%xmm3
xorps %xmm7,%xmm4
xorps %xmm6,%xmm5
call _aesni_decrypt4
xorps (%esp),%xmm2
xorps 16(%esp),%xmm3
xorps %xmm7,%xmm4
movups %xmm2,(%edi)
xorps %xmm6,%xmm5
movups %xmm3,16(%edi)
movups %xmm4,32(%edi)
movups %xmm5,48(%edi)
leal 64(%edi),%edi
movdqa %xmm6,%xmm1
jmp .L069xts_dec_done
.align 16
.L064xts_dec_done6x:
movl 112(%esp),%eax
andl $15,%eax
jz .L071xts_dec_ret
movl %eax,112(%esp)
jmp .L072xts_dec_only_one_more
.align 16
.L069xts_dec_done:
movl 112(%esp),%eax
pxor %xmm0,%xmm0
andl $15,%eax
jz .L071xts_dec_ret
pcmpgtd %xmm1,%xmm0
movl %eax,112(%esp)
pshufd $19,%xmm0,%xmm2
pxor %xmm0,%xmm0
movdqa 96(%esp),%xmm3
paddq %xmm1,%xmm1
pand %xmm3,%xmm2
pcmpgtd %xmm1,%xmm0
pxor %xmm2,%xmm1
.L072xts_dec_only_one_more:
pshufd $19,%xmm0,%xmm5
movdqa %xmm1,%xmm6
paddq %xmm1,%xmm1
pand %xmm3,%xmm5
pxor %xmm1,%xmm5
movl %ebp,%edx
movl %ebx,%ecx
movups (%esi),%xmm2
xorps %xmm5,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L073dec1_loop_13:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L073dec1_loop_13
.byte 102,15,56,223,209
xorps %xmm5,%xmm2
movups %xmm2,(%edi)
.L074xts_dec_steal:
movzbl 16(%esi),%ecx
movzbl (%edi),%edx
leal 1(%esi),%esi
movb %cl,(%edi)
movb %dl,16(%edi)
leal 1(%edi),%edi
subl $1,%eax
jnz .L074xts_dec_steal
subl 112(%esp),%edi
movl %ebp,%edx
movl %ebx,%ecx
movups (%edi),%xmm2
xorps %xmm6,%xmm2
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L075dec1_loop_14:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L075dec1_loop_14
.byte 102,15,56,223,209
xorps %xmm6,%xmm2
movups %xmm2,(%edi)
.L071xts_dec_ret:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
movdqa %xmm0,(%esp)
pxor %xmm3,%xmm3
movdqa %xmm0,16(%esp)
pxor %xmm4,%xmm4
movdqa %xmm0,32(%esp)
pxor %xmm5,%xmm5
movdqa %xmm0,48(%esp)
pxor %xmm6,%xmm6
movdqa %xmm0,64(%esp)
pxor %xmm7,%xmm7
movdqa %xmm0,80(%esp)
movl 116(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_xts_decrypt,.-.L_aes_hw_xts_decrypt_begin
.globl aes_hw_cbc_encrypt
.hidden aes_hw_cbc_encrypt
.type aes_hw_cbc_encrypt,@function
.align 16
aes_hw_cbc_encrypt:
.L_aes_hw_cbc_encrypt_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl %esp,%ebx
movl 24(%esp),%edi
subl $24,%ebx
movl 28(%esp),%eax
andl $-16,%ebx
movl 32(%esp),%edx
movl 36(%esp),%ebp
testl %eax,%eax
jz .L076cbc_abort
cmpl $0,40(%esp)
xchgl %esp,%ebx
movups (%ebp),%xmm7
movl 240(%edx),%ecx
movl %edx,%ebp
movl %ebx,16(%esp)
movl %ecx,%ebx
je .L077cbc_decrypt
movaps %xmm7,%xmm2
cmpl $16,%eax
jb .L078cbc_enc_tail
subl $16,%eax
jmp .L079cbc_enc_loop
.align 16
.L079cbc_enc_loop:
movups (%esi),%xmm7
leal 16(%esi),%esi
movups (%edx),%xmm0
movups 16(%edx),%xmm1
xorps %xmm0,%xmm7
leal 32(%edx),%edx
xorps %xmm7,%xmm2
.L080enc1_loop_15:
.byte 102,15,56,220,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L080enc1_loop_15
.byte 102,15,56,221,209
movl %ebx,%ecx
movl %ebp,%edx
movups %xmm2,(%edi)
leal 16(%edi),%edi
subl $16,%eax
jnc .L079cbc_enc_loop
addl $16,%eax
jnz .L078cbc_enc_tail
movaps %xmm2,%xmm7
pxor %xmm2,%xmm2
jmp .L081cbc_ret
.L078cbc_enc_tail:
movl %eax,%ecx
.long 2767451785
movl $16,%ecx
subl %eax,%ecx
xorl %eax,%eax
.long 2868115081
leal -16(%edi),%edi
movl %ebx,%ecx
movl %edi,%esi
movl %ebp,%edx
jmp .L079cbc_enc_loop
.align 16
.L077cbc_decrypt:
cmpl $80,%eax
jbe .L082cbc_dec_tail
movaps %xmm7,(%esp)
subl $80,%eax
jmp .L083cbc_dec_loop6_enter
.align 16
.L084cbc_dec_loop6:
movaps %xmm0,(%esp)
movups %xmm7,(%edi)
leal 16(%edi),%edi
.L083cbc_dec_loop6_enter:
movdqu (%esi),%xmm2
movdqu 16(%esi),%xmm3
movdqu 32(%esi),%xmm4
movdqu 48(%esi),%xmm5
movdqu 64(%esi),%xmm6
movdqu 80(%esi),%xmm7
call _aesni_decrypt6
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps (%esp),%xmm2
xorps %xmm1,%xmm3
movups 32(%esi),%xmm1
xorps %xmm0,%xmm4
movups 48(%esi),%xmm0
xorps %xmm1,%xmm5
movups 64(%esi),%xmm1
xorps %xmm0,%xmm6
movups 80(%esi),%xmm0
xorps %xmm1,%xmm7
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
leal 96(%esi),%esi
movups %xmm4,32(%edi)
movl %ebx,%ecx
movups %xmm5,48(%edi)
movl %ebp,%edx
movups %xmm6,64(%edi)
leal 80(%edi),%edi
subl $96,%eax
ja .L084cbc_dec_loop6
movaps %xmm7,%xmm2
movaps %xmm0,%xmm7
addl $80,%eax
jle .L085cbc_dec_clear_tail_collected
movups %xmm2,(%edi)
leal 16(%edi),%edi
.L082cbc_dec_tail:
movups (%esi),%xmm2
movaps %xmm2,%xmm6
cmpl $16,%eax
jbe .L086cbc_dec_one
movups 16(%esi),%xmm3
movaps %xmm3,%xmm5
cmpl $32,%eax
jbe .L087cbc_dec_two
movups 32(%esi),%xmm4
cmpl $48,%eax
jbe .L088cbc_dec_three
movups 48(%esi),%xmm5
cmpl $64,%eax
jbe .L089cbc_dec_four
movups 64(%esi),%xmm6
movaps %xmm7,(%esp)
movups (%esi),%xmm2
xorps %xmm7,%xmm7
call _aesni_decrypt6
movups (%esi),%xmm1
movups 16(%esi),%xmm0
xorps (%esp),%xmm2
xorps %xmm1,%xmm3
movups 32(%esi),%xmm1
xorps %xmm0,%xmm4
movups 48(%esi),%xmm0
xorps %xmm1,%xmm5
movups 64(%esi),%xmm7
xorps %xmm0,%xmm6
movups %xmm2,(%edi)
movups %xmm3,16(%edi)
pxor %xmm3,%xmm3
movups %xmm4,32(%edi)
pxor %xmm4,%xmm4
movups %xmm5,48(%edi)
pxor %xmm5,%xmm5
leal 64(%edi),%edi
movaps %xmm6,%xmm2
pxor %xmm6,%xmm6
subl $80,%eax
jmp .L090cbc_dec_tail_collected
.align 16
.L086cbc_dec_one:
movups (%edx),%xmm0
movups 16(%edx),%xmm1
leal 32(%edx),%edx
xorps %xmm0,%xmm2
.L091dec1_loop_16:
.byte 102,15,56,222,209
decl %ecx
movups (%edx),%xmm1
leal 16(%edx),%edx
jnz .L091dec1_loop_16
.byte 102,15,56,223,209
xorps %xmm7,%xmm2
movaps %xmm6,%xmm7
subl $16,%eax
jmp .L090cbc_dec_tail_collected
.align 16
.L087cbc_dec_two:
call _aesni_decrypt2
xorps %xmm7,%xmm2
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
movaps %xmm3,%xmm2
pxor %xmm3,%xmm3
leal 16(%edi),%edi
movaps %xmm5,%xmm7
subl $32,%eax
jmp .L090cbc_dec_tail_collected
.align 16
.L088cbc_dec_three:
call _aesni_decrypt3
xorps %xmm7,%xmm2
xorps %xmm6,%xmm3
xorps %xmm5,%xmm4
movups %xmm2,(%edi)
movaps %xmm4,%xmm2
pxor %xmm4,%xmm4
movups %xmm3,16(%edi)
pxor %xmm3,%xmm3
leal 32(%edi),%edi
movups 32(%esi),%xmm7
subl $48,%eax
jmp .L090cbc_dec_tail_collected
.align 16
.L089cbc_dec_four:
call _aesni_decrypt4
movups 16(%esi),%xmm1
movups 32(%esi),%xmm0
xorps %xmm7,%xmm2
movups 48(%esi),%xmm7
xorps %xmm6,%xmm3
movups %xmm2,(%edi)
xorps %xmm1,%xmm4
movups %xmm3,16(%edi)
pxor %xmm3,%xmm3
xorps %xmm0,%xmm5
movups %xmm4,32(%edi)
pxor %xmm4,%xmm4
leal 48(%edi),%edi
movaps %xmm5,%xmm2
pxor %xmm5,%xmm5
subl $64,%eax
jmp .L090cbc_dec_tail_collected
.align 16
.L085cbc_dec_clear_tail_collected:
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
.L090cbc_dec_tail_collected:
andl $15,%eax
jnz .L092cbc_dec_tail_partial
movups %xmm2,(%edi)
pxor %xmm0,%xmm0
jmp .L081cbc_ret
.align 16
.L092cbc_dec_tail_partial:
movaps %xmm2,(%esp)
pxor %xmm0,%xmm0
movl $16,%ecx
movl %esp,%esi
subl %eax,%ecx
.long 2767451785
movdqa %xmm2,(%esp)
.L081cbc_ret:
movl 16(%esp),%esp
movl 36(%esp),%ebp
pxor %xmm2,%xmm2
pxor %xmm1,%xmm1
movups %xmm7,(%ebp)
pxor %xmm7,%xmm7
.L076cbc_abort:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size aes_hw_cbc_encrypt,.-.L_aes_hw_cbc_encrypt_begin
.hidden _aesni_set_encrypt_key
.type _aesni_set_encrypt_key,@function
.align 16
_aesni_set_encrypt_key:
pushl %ebp
pushl %ebx
testl %eax,%eax
jz .L093bad_pointer
testl %edx,%edx
jz .L093bad_pointer
call .L094pic
.L094pic:
popl %ebx
leal .Lkey_const-.L094pic(%ebx),%ebx
leal OPENSSL_ia32cap_P-.Lkey_const(%ebx),%ebp
movups (%eax),%xmm0
xorps %xmm4,%xmm4
movl 4(%ebp),%ebp
leal 16(%edx),%edx
andl $268437504,%ebp
cmpl $256,%ecx
je .L09514rounds
cmpl $192,%ecx
je .L09612rounds
cmpl $128,%ecx
jne .L097bad_keybits
.align 16
.L09810rounds:
cmpl $268435456,%ebp
je .L09910rounds_alt
movl $9,%ecx
movups %xmm0,-16(%edx)
.byte 102,15,58,223,200,1
call .L100key_128_cold
.byte 102,15,58,223,200,2
call .L101key_128
.byte 102,15,58,223,200,4
call .L101key_128
.byte 102,15,58,223,200,8
call .L101key_128
.byte 102,15,58,223,200,16
call .L101key_128
.byte 102,15,58,223,200,32
call .L101key_128
.byte 102,15,58,223,200,64
call .L101key_128
.byte 102,15,58,223,200,128
call .L101key_128
.byte 102,15,58,223,200,27
call .L101key_128
.byte 102,15,58,223,200,54
call .L101key_128
movups %xmm0,(%edx)
movl %ecx,80(%edx)
jmp .L102good_key
.align 16
.L101key_128:
movups %xmm0,(%edx)
leal 16(%edx),%edx
.L100key_128_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.align 16
.L09910rounds_alt:
movdqa (%ebx),%xmm5
movl $8,%ecx
movdqa 32(%ebx),%xmm4
movdqa %xmm0,%xmm2
movdqu %xmm0,-16(%edx)
.L103loop_key128:
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
leal 16(%edx),%edx
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,-16(%edx)
movdqa %xmm0,%xmm2
decl %ecx
jnz .L103loop_key128
movdqa 48(%ebx),%xmm4
.byte 102,15,56,0,197
.byte 102,15,56,221,196
pslld $1,%xmm4
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,(%edx)
movdqa %xmm0,%xmm2
.byte 102,15,56,0,197
.byte 102,15,56,221,196
movdqa %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm2,%xmm3
pslldq $4,%xmm2
pxor %xmm3,%xmm2
pxor %xmm2,%xmm0
movdqu %xmm0,16(%edx)
movl $9,%ecx
movl %ecx,96(%edx)
jmp .L102good_key
.align 16
.L09612rounds:
movq 16(%eax),%xmm2
cmpl $268435456,%ebp
je .L10412rounds_alt
movl $11,%ecx
movups %xmm0,-16(%edx)
.byte 102,15,58,223,202,1
call .L105key_192a_cold
.byte 102,15,58,223,202,2
call .L106key_192b
.byte 102,15,58,223,202,4
call .L107key_192a
.byte 102,15,58,223,202,8
call .L106key_192b
.byte 102,15,58,223,202,16
call .L107key_192a
.byte 102,15,58,223,202,32
call .L106key_192b
.byte 102,15,58,223,202,64
call .L107key_192a
.byte 102,15,58,223,202,128
call .L106key_192b
movups %xmm0,(%edx)
movl %ecx,48(%edx)
jmp .L102good_key
.align 16
.L107key_192a:
movups %xmm0,(%edx)
leal 16(%edx),%edx
.align 16
.L105key_192a_cold:
movaps %xmm2,%xmm5
.L108key_192b_warm:
shufps $16,%xmm0,%xmm4
movdqa %xmm2,%xmm3
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
pslldq $4,%xmm3
xorps %xmm4,%xmm0
pshufd $85,%xmm1,%xmm1
pxor %xmm3,%xmm2
pxor %xmm1,%xmm0
pshufd $255,%xmm0,%xmm3
pxor %xmm3,%xmm2
ret
.align 16
.L106key_192b:
movaps %xmm0,%xmm3
shufps $68,%xmm0,%xmm5
movups %xmm5,(%edx)
shufps $78,%xmm2,%xmm3
movups %xmm3,16(%edx)
leal 32(%edx),%edx
jmp .L108key_192b_warm
.align 16
.L10412rounds_alt:
movdqa 16(%ebx),%xmm5
movdqa 32(%ebx),%xmm4
movl $8,%ecx
movdqu %xmm0,-16(%edx)
.L109loop_key192:
movq %xmm2,(%edx)
movdqa %xmm2,%xmm1
.byte 102,15,56,0,213
.byte 102,15,56,221,212
pslld $1,%xmm4
leal 24(%edx),%edx
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pshufd $255,%xmm0,%xmm3
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pxor %xmm2,%xmm0
pxor %xmm3,%xmm2
movdqu %xmm0,-16(%edx)
decl %ecx
jnz .L109loop_key192
movl $11,%ecx
movl %ecx,32(%edx)
jmp .L102good_key
.align 16
.L09514rounds:
movups 16(%eax),%xmm2
leal 16(%edx),%edx
cmpl $268435456,%ebp
je .L11014rounds_alt
movl $13,%ecx
movups %xmm0,-32(%edx)
movups %xmm2,-16(%edx)
.byte 102,15,58,223,202,1
call .L111key_256a_cold
.byte 102,15,58,223,200,1
call .L112key_256b
.byte 102,15,58,223,202,2
call .L113key_256a
.byte 102,15,58,223,200,2
call .L112key_256b
.byte 102,15,58,223,202,4
call .L113key_256a
.byte 102,15,58,223,200,4
call .L112key_256b
.byte 102,15,58,223,202,8
call .L113key_256a
.byte 102,15,58,223,200,8
call .L112key_256b
.byte 102,15,58,223,202,16
call .L113key_256a
.byte 102,15,58,223,200,16
call .L112key_256b
.byte 102,15,58,223,202,32
call .L113key_256a
.byte 102,15,58,223,200,32
call .L112key_256b
.byte 102,15,58,223,202,64
call .L113key_256a
movups %xmm0,(%edx)
movl %ecx,16(%edx)
xorl %eax,%eax
jmp .L102good_key
.align 16
.L113key_256a:
movups %xmm2,(%edx)
leal 16(%edx),%edx
.L111key_256a_cold:
shufps $16,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $140,%xmm0,%xmm4
xorps %xmm4,%xmm0
shufps $255,%xmm1,%xmm1
xorps %xmm1,%xmm0
ret
.align 16
.L112key_256b:
movups %xmm0,(%edx)
leal 16(%edx),%edx
shufps $16,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $140,%xmm2,%xmm4
xorps %xmm4,%xmm2
shufps $170,%xmm1,%xmm1
xorps %xmm1,%xmm2
ret
.align 16
.L11014rounds_alt:
movdqa (%ebx),%xmm5
movdqa 32(%ebx),%xmm4
movl $7,%ecx
movdqu %xmm0,-32(%edx)
movdqa %xmm2,%xmm1
movdqu %xmm2,-16(%edx)
.L114loop_key256:
.byte 102,15,56,0,213
.byte 102,15,56,221,212
movdqa %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm0,%xmm3
pslldq $4,%xmm0
pxor %xmm3,%xmm0
pslld $1,%xmm4
pxor %xmm2,%xmm0
movdqu %xmm0,(%edx)
decl %ecx
jz .L115done_key256
pshufd $255,%xmm0,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,221,211
movdqa %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm1,%xmm3
pslldq $4,%xmm1
pxor %xmm3,%xmm1
pxor %xmm1,%xmm2
movdqu %xmm2,16(%edx)
leal 32(%edx),%edx
movdqa %xmm2,%xmm1
jmp .L114loop_key256
.L115done_key256:
movl $13,%ecx
movl %ecx,16(%edx)
.L102good_key:
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
xorl %eax,%eax
popl %ebx
popl %ebp
ret
.align 4
.L093bad_pointer:
movl $-1,%eax
popl %ebx
popl %ebp
ret
.align 4
.L097bad_keybits:
pxor %xmm0,%xmm0
movl $-2,%eax
popl %ebx
popl %ebp
ret
.size _aesni_set_encrypt_key,.-_aesni_set_encrypt_key
.globl aes_hw_set_encrypt_key
.hidden aes_hw_set_encrypt_key
.type aes_hw_set_encrypt_key,@function
.align 16
aes_hw_set_encrypt_key:
.L_aes_hw_set_encrypt_key_begin:
#ifdef BORINGSSL_DISPATCH_TEST
pushl %ebx
pushl %edx
call .L116pic
.L116pic:
popl %ebx
leal BORINGSSL_function_hit+3-.L116pic(%ebx),%ebx
movl $1,%edx
movb %dl,(%ebx)
popl %edx
popl %ebx
#endif
movl 4(%esp),%eax
movl 8(%esp),%ecx
movl 12(%esp),%edx
call _aesni_set_encrypt_key
ret
.size aes_hw_set_encrypt_key,.-.L_aes_hw_set_encrypt_key_begin
.globl aes_hw_set_decrypt_key
.hidden aes_hw_set_decrypt_key
.type aes_hw_set_decrypt_key,@function
.align 16
aes_hw_set_decrypt_key:
.L_aes_hw_set_decrypt_key_begin:
movl 4(%esp),%eax
movl 8(%esp),%ecx
movl 12(%esp),%edx
call _aesni_set_encrypt_key
movl 12(%esp),%edx
shll $4,%ecx
testl %eax,%eax
jnz .L117dec_key_ret
leal 16(%edx,%ecx,1),%eax
movups (%edx),%xmm0
movups (%eax),%xmm1
movups %xmm0,(%eax)
movups %xmm1,(%edx)
leal 16(%edx),%edx
leal -16(%eax),%eax
.L118dec_key_inverse:
movups (%edx),%xmm0
movups (%eax),%xmm1
.byte 102,15,56,219,192
.byte 102,15,56,219,201
leal 16(%edx),%edx
leal -16(%eax),%eax
movups %xmm0,16(%eax)
movups %xmm1,-16(%edx)
cmpl %edx,%eax
ja .L118dec_key_inverse
movups (%edx),%xmm0
.byte 102,15,56,219,192
movups %xmm0,(%edx)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
xorl %eax,%eax
.L117dec_key_ret:
ret
.size aes_hw_set_decrypt_key,.-.L_aes_hw_set_decrypt_key_begin
.align 64
.Lkey_const:
.long 202313229,202313229,202313229,202313229
.long 67569157,67569157,67569157,67569157
.long 1,1,1,1
.long 27,27,27,27
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69
.byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83
.byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115
.byte 115,108,46,111,114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 5,668
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-x86/crypto/fipsmodule/ghash-ssse3-x86.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl gcm_gmult_ssse3
.hidden gcm_gmult_ssse3
.type gcm_gmult_ssse3,@function
.align 16
gcm_gmult_ssse3:
.L_gcm_gmult_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%edi
movl 24(%esp),%esi
movdqu (%edi),%xmm0
call .L000pic_point
.L000pic_point:
popl %eax
movdqa .Lreverse_bytes-.L000pic_point(%eax),%xmm7
movdqa .Llow4_mask-.L000pic_point(%eax),%xmm2
.byte 102,15,56,0,199
movdqa %xmm2,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm0
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
movl $5,%eax
.L001loop_row_1:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz .L001loop_row_1
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movl $5,%eax
.L002loop_row_2:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz .L002loop_row_2
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movl $6,%eax
.L003loop_row_3:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz .L003loop_row_3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
.byte 102,15,56,0,215
movdqu %xmm2,(%edi)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size gcm_gmult_ssse3,.-.L_gcm_gmult_ssse3_begin
.globl gcm_ghash_ssse3
.hidden gcm_ghash_ssse3
.type gcm_ghash_ssse3,@function
.align 16
gcm_ghash_ssse3:
.L_gcm_ghash_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%edi
movl 24(%esp),%esi
movl 28(%esp),%edx
movl 32(%esp),%ecx
movdqu (%edi),%xmm0
call .L004pic_point
.L004pic_point:
popl %ebx
movdqa .Lreverse_bytes-.L004pic_point(%ebx),%xmm7
andl $-16,%ecx
.byte 102,15,56,0,199
pxor %xmm3,%xmm3
.L005loop_ghash:
movdqa .Llow4_mask-.L004pic_point(%ebx),%xmm2
movdqu (%edx),%xmm1
.byte 102,15,56,0,207
pxor %xmm1,%xmm0
movdqa %xmm2,%xmm1
pandn %xmm0,%xmm1
psrld $4,%xmm1
pand %xmm2,%xmm0
pxor %xmm2,%xmm2
movl $5,%eax
.L006loop_row_4:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz .L006loop_row_4
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movl $5,%eax
.L007loop_row_5:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz .L007loop_row_5
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movl $6,%eax
.L008loop_row_6:
movdqa (%esi),%xmm4
leal 16(%esi),%esi
movdqa %xmm2,%xmm6
.byte 102,15,58,15,243,1
movdqa %xmm6,%xmm3
psrldq $1,%xmm2
movdqa %xmm4,%xmm5
.byte 102,15,56,0,224
.byte 102,15,56,0,233
pxor %xmm5,%xmm2
movdqa %xmm4,%xmm5
psllq $60,%xmm5
movdqa %xmm5,%xmm6
pslldq $8,%xmm6
pxor %xmm6,%xmm3
psrldq $8,%xmm5
pxor %xmm5,%xmm2
psrlq $4,%xmm4
pxor %xmm4,%xmm2
subl $1,%eax
jnz .L008loop_row_6
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $1,%xmm3
pxor %xmm3,%xmm2
psrlq $5,%xmm3
pxor %xmm3,%xmm2
pxor %xmm3,%xmm3
movdqa %xmm2,%xmm0
leal -256(%esi),%esi
leal 16(%edx),%edx
subl $16,%ecx
jnz .L005loop_ghash
.byte 102,15,56,0,199
movdqu %xmm0,(%edi)
pxor %xmm0,%xmm0
pxor %xmm1,%xmm1
pxor %xmm2,%xmm2
pxor %xmm3,%xmm3
pxor %xmm4,%xmm4
pxor %xmm5,%xmm5
pxor %xmm6,%xmm6
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size gcm_ghash_ssse3,.-.L_gcm_ghash_ssse3_begin
.align 16
.Lreverse_bytes:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.align 16
.Llow4_mask:
.long 252645135,252645135,252645135,252645135
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 15,486
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-x86/crypto/fipsmodule/bn-586.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl bn_mul_add_words
.hidden bn_mul_add_words
.type bn_mul_add_words,@function
.align 16
bn_mul_add_words:
.L_bn_mul_add_words_begin:
call .L000PIC_me_up
.L000PIC_me_up:
popl %eax
leal OPENSSL_ia32cap_P-.L000PIC_me_up(%eax),%eax
btl $26,(%eax)
jnc .L001maw_non_sse2
movl 4(%esp),%eax
movl 8(%esp),%edx
movl 12(%esp),%ecx
movd 16(%esp),%mm0
pxor %mm1,%mm1
jmp .L002maw_sse2_entry
.align 16
.L003maw_sse2_unrolled:
movd (%eax),%mm3
paddq %mm3,%mm1
movd (%edx),%mm2
pmuludq %mm0,%mm2
movd 4(%edx),%mm4
pmuludq %mm0,%mm4
movd 8(%edx),%mm6
pmuludq %mm0,%mm6
movd 12(%edx),%mm7
pmuludq %mm0,%mm7
paddq %mm2,%mm1
movd 4(%eax),%mm3
paddq %mm4,%mm3
movd 8(%eax),%mm5
paddq %mm6,%mm5
movd 12(%eax),%mm4
paddq %mm4,%mm7
movd %mm1,(%eax)
movd 16(%edx),%mm2
pmuludq %mm0,%mm2
psrlq $32,%mm1
movd 20(%edx),%mm4
pmuludq %mm0,%mm4
paddq %mm3,%mm1
movd 24(%edx),%mm6
pmuludq %mm0,%mm6
movd %mm1,4(%eax)
psrlq $32,%mm1
movd 28(%edx),%mm3
addl $32,%edx
pmuludq %mm0,%mm3
paddq %mm5,%mm1
movd 16(%eax),%mm5
paddq %mm5,%mm2
movd %mm1,8(%eax)
psrlq $32,%mm1
paddq %mm7,%mm1
movd 20(%eax),%mm5
paddq %mm5,%mm4
movd %mm1,12(%eax)
psrlq $32,%mm1
paddq %mm2,%mm1
movd 24(%eax),%mm5
paddq %mm5,%mm6
movd %mm1,16(%eax)
psrlq $32,%mm1
paddq %mm4,%mm1
movd 28(%eax),%mm5
paddq %mm5,%mm3
movd %mm1,20(%eax)
psrlq $32,%mm1
paddq %mm6,%mm1
movd %mm1,24(%eax)
psrlq $32,%mm1
paddq %mm3,%mm1
movd %mm1,28(%eax)
leal 32(%eax),%eax
psrlq $32,%mm1
subl $8,%ecx
jz .L004maw_sse2_exit
.L002maw_sse2_entry:
testl $4294967288,%ecx
jnz .L003maw_sse2_unrolled
.align 4
.L005maw_sse2_loop:
movd (%edx),%mm2
movd (%eax),%mm3
pmuludq %mm0,%mm2
leal 4(%edx),%edx
paddq %mm3,%mm1
paddq %mm2,%mm1
movd %mm1,(%eax)
subl $1,%ecx
psrlq $32,%mm1
leal 4(%eax),%eax
jnz .L005maw_sse2_loop
.L004maw_sse2_exit:
movd %mm1,%eax
emms
ret
.align 16
.L001maw_non_sse2:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %esi,%esi
movl 20(%esp),%edi
movl 28(%esp),%ecx
movl 24(%esp),%ebx
andl $4294967288,%ecx
movl 32(%esp),%ebp
pushl %ecx
jz .L006maw_finish
.align 16
.L007maw_loop:
movl (%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl (%edi),%eax
adcl $0,%edx
movl %eax,(%edi)
movl %edx,%esi
movl 4(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 4(%edi),%eax
adcl $0,%edx
movl %eax,4(%edi)
movl %edx,%esi
movl 8(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 8(%edi),%eax
adcl $0,%edx
movl %eax,8(%edi)
movl %edx,%esi
movl 12(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 12(%edi),%eax
adcl $0,%edx
movl %eax,12(%edi)
movl %edx,%esi
movl 16(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 16(%edi),%eax
adcl $0,%edx
movl %eax,16(%edi)
movl %edx,%esi
movl 20(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 20(%edi),%eax
adcl $0,%edx
movl %eax,20(%edi)
movl %edx,%esi
movl 24(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 24(%edi),%eax
adcl $0,%edx
movl %eax,24(%edi)
movl %edx,%esi
movl 28(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 28(%edi),%eax
adcl $0,%edx
movl %eax,28(%edi)
movl %edx,%esi
subl $8,%ecx
leal 32(%ebx),%ebx
leal 32(%edi),%edi
jnz .L007maw_loop
.L006maw_finish:
movl 32(%esp),%ecx
andl $7,%ecx
jnz .L008maw_finish2
jmp .L009maw_end
.L008maw_finish2:
movl (%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl (%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,(%edi)
movl %edx,%esi
jz .L009maw_end
movl 4(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 4(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,4(%edi)
movl %edx,%esi
jz .L009maw_end
movl 8(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 8(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,8(%edi)
movl %edx,%esi
jz .L009maw_end
movl 12(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 12(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,12(%edi)
movl %edx,%esi
jz .L009maw_end
movl 16(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 16(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,16(%edi)
movl %edx,%esi
jz .L009maw_end
movl 20(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 20(%edi),%eax
adcl $0,%edx
decl %ecx
movl %eax,20(%edi)
movl %edx,%esi
jz .L009maw_end
movl 24(%ebx),%eax
mull %ebp
addl %esi,%eax
adcl $0,%edx
addl 24(%edi),%eax
adcl $0,%edx
movl %eax,24(%edi)
movl %edx,%esi
.L009maw_end:
movl %esi,%eax
popl %ecx
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_mul_add_words,.-.L_bn_mul_add_words_begin
.globl bn_mul_words
.hidden bn_mul_words
.type bn_mul_words,@function
.align 16
bn_mul_words:
.L_bn_mul_words_begin:
call .L010PIC_me_up
.L010PIC_me_up:
popl %eax
leal OPENSSL_ia32cap_P-.L010PIC_me_up(%eax),%eax
btl $26,(%eax)
jnc .L011mw_non_sse2
movl 4(%esp),%eax
movl 8(%esp),%edx
movl 12(%esp),%ecx
movd 16(%esp),%mm0
pxor %mm1,%mm1
.align 16
.L012mw_sse2_loop:
movd (%edx),%mm2
pmuludq %mm0,%mm2
leal 4(%edx),%edx
paddq %mm2,%mm1
movd %mm1,(%eax)
subl $1,%ecx
psrlq $32,%mm1
leal 4(%eax),%eax
jnz .L012mw_sse2_loop
movd %mm1,%eax
emms
ret
.align 16
.L011mw_non_sse2:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %esi,%esi
movl 20(%esp),%edi
movl 24(%esp),%ebx
movl 28(%esp),%ebp
movl 32(%esp),%ecx
andl $4294967288,%ebp
jz .L013mw_finish
.L014mw_loop:
movl (%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,(%edi)
movl %edx,%esi
movl 4(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,4(%edi)
movl %edx,%esi
movl 8(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,8(%edi)
movl %edx,%esi
movl 12(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,12(%edi)
movl %edx,%esi
movl 16(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,16(%edi)
movl %edx,%esi
movl 20(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,20(%edi)
movl %edx,%esi
movl 24(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,24(%edi)
movl %edx,%esi
movl 28(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,28(%edi)
movl %edx,%esi
addl $32,%ebx
addl $32,%edi
subl $8,%ebp
jz .L013mw_finish
jmp .L014mw_loop
.L013mw_finish:
movl 28(%esp),%ebp
andl $7,%ebp
jnz .L015mw_finish2
jmp .L016mw_end
.L015mw_finish2:
movl (%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,(%edi)
movl %edx,%esi
decl %ebp
jz .L016mw_end
movl 4(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,4(%edi)
movl %edx,%esi
decl %ebp
jz .L016mw_end
movl 8(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,8(%edi)
movl %edx,%esi
decl %ebp
jz .L016mw_end
movl 12(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,12(%edi)
movl %edx,%esi
decl %ebp
jz .L016mw_end
movl 16(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,16(%edi)
movl %edx,%esi
decl %ebp
jz .L016mw_end
movl 20(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,20(%edi)
movl %edx,%esi
decl %ebp
jz .L016mw_end
movl 24(%ebx),%eax
mull %ecx
addl %esi,%eax
adcl $0,%edx
movl %eax,24(%edi)
movl %edx,%esi
.L016mw_end:
movl %esi,%eax
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_mul_words,.-.L_bn_mul_words_begin
.globl bn_sqr_words
.hidden bn_sqr_words
.type bn_sqr_words,@function
.align 16
bn_sqr_words:
.L_bn_sqr_words_begin:
call .L017PIC_me_up
.L017PIC_me_up:
popl %eax
leal OPENSSL_ia32cap_P-.L017PIC_me_up(%eax),%eax
btl $26,(%eax)
jnc .L018sqr_non_sse2
movl 4(%esp),%eax
movl 8(%esp),%edx
movl 12(%esp),%ecx
.align 16
.L019sqr_sse2_loop:
movd (%edx),%mm0
pmuludq %mm0,%mm0
leal 4(%edx),%edx
movq %mm0,(%eax)
subl $1,%ecx
leal 8(%eax),%eax
jnz .L019sqr_sse2_loop
emms
ret
.align 16
.L018sqr_non_sse2:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%ebx
andl $4294967288,%ebx
jz .L020sw_finish
.L021sw_loop:
movl (%edi),%eax
mull %eax
movl %eax,(%esi)
movl %edx,4(%esi)
movl 4(%edi),%eax
mull %eax
movl %eax,8(%esi)
movl %edx,12(%esi)
movl 8(%edi),%eax
mull %eax
movl %eax,16(%esi)
movl %edx,20(%esi)
movl 12(%edi),%eax
mull %eax
movl %eax,24(%esi)
movl %edx,28(%esi)
movl 16(%edi),%eax
mull %eax
movl %eax,32(%esi)
movl %edx,36(%esi)
movl 20(%edi),%eax
mull %eax
movl %eax,40(%esi)
movl %edx,44(%esi)
movl 24(%edi),%eax
mull %eax
movl %eax,48(%esi)
movl %edx,52(%esi)
movl 28(%edi),%eax
mull %eax
movl %eax,56(%esi)
movl %edx,60(%esi)
addl $32,%edi
addl $64,%esi
subl $8,%ebx
jnz .L021sw_loop
.L020sw_finish:
movl 28(%esp),%ebx
andl $7,%ebx
jz .L022sw_end
movl (%edi),%eax
mull %eax
movl %eax,(%esi)
decl %ebx
movl %edx,4(%esi)
jz .L022sw_end
movl 4(%edi),%eax
mull %eax
movl %eax,8(%esi)
decl %ebx
movl %edx,12(%esi)
jz .L022sw_end
movl 8(%edi),%eax
mull %eax
movl %eax,16(%esi)
decl %ebx
movl %edx,20(%esi)
jz .L022sw_end
movl 12(%edi),%eax
mull %eax
movl %eax,24(%esi)
decl %ebx
movl %edx,28(%esi)
jz .L022sw_end
movl 16(%edi),%eax
mull %eax
movl %eax,32(%esi)
decl %ebx
movl %edx,36(%esi)
jz .L022sw_end
movl 20(%edi),%eax
mull %eax
movl %eax,40(%esi)
decl %ebx
movl %edx,44(%esi)
jz .L022sw_end
movl 24(%edi),%eax
mull %eax
movl %eax,48(%esi)
movl %edx,52(%esi)
.L022sw_end:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_sqr_words,.-.L_bn_sqr_words_begin
.globl bn_div_words
.hidden bn_div_words
.type bn_div_words,@function
.align 16
bn_div_words:
.L_bn_div_words_begin:
movl 4(%esp),%edx
movl 8(%esp),%eax
movl 12(%esp),%ecx
divl %ecx
ret
.size bn_div_words,.-.L_bn_div_words_begin
.globl bn_add_words
.hidden bn_add_words
.type bn_add_words,@function
.align 16
bn_add_words:
.L_bn_add_words_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%ebx
movl 24(%esp),%esi
movl 28(%esp),%edi
movl 32(%esp),%ebp
xorl %eax,%eax
andl $4294967288,%ebp
jz .L023aw_finish
.L024aw_loop:
movl (%esi),%ecx
movl (%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,(%ebx)
movl 4(%esi),%ecx
movl 4(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,4(%ebx)
movl 8(%esi),%ecx
movl 8(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,8(%ebx)
movl 12(%esi),%ecx
movl 12(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,12(%ebx)
movl 16(%esi),%ecx
movl 16(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,16(%ebx)
movl 20(%esi),%ecx
movl 20(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,20(%ebx)
movl 24(%esi),%ecx
movl 24(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,24(%ebx)
movl 28(%esi),%ecx
movl 28(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,28(%ebx)
addl $32,%esi
addl $32,%edi
addl $32,%ebx
subl $8,%ebp
jnz .L024aw_loop
.L023aw_finish:
movl 32(%esp),%ebp
andl $7,%ebp
jz .L025aw_end
movl (%esi),%ecx
movl (%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,(%ebx)
jz .L025aw_end
movl 4(%esi),%ecx
movl 4(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,4(%ebx)
jz .L025aw_end
movl 8(%esi),%ecx
movl 8(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,8(%ebx)
jz .L025aw_end
movl 12(%esi),%ecx
movl 12(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,12(%ebx)
jz .L025aw_end
movl 16(%esi),%ecx
movl 16(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,16(%ebx)
jz .L025aw_end
movl 20(%esi),%ecx
movl 20(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,20(%ebx)
jz .L025aw_end
movl 24(%esi),%ecx
movl 24(%edi),%edx
addl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
addl %edx,%ecx
adcl $0,%eax
movl %ecx,24(%ebx)
.L025aw_end:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_add_words,.-.L_bn_add_words_begin
.globl bn_sub_words
.hidden bn_sub_words
.type bn_sub_words,@function
.align 16
bn_sub_words:
.L_bn_sub_words_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%ebx
movl 24(%esp),%esi
movl 28(%esp),%edi
movl 32(%esp),%ebp
xorl %eax,%eax
andl $4294967288,%ebp
jz .L026aw_finish
.L027aw_loop:
movl (%esi),%ecx
movl (%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,(%ebx)
movl 4(%esi),%ecx
movl 4(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,4(%ebx)
movl 8(%esi),%ecx
movl 8(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,8(%ebx)
movl 12(%esi),%ecx
movl 12(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,12(%ebx)
movl 16(%esi),%ecx
movl 16(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,16(%ebx)
movl 20(%esi),%ecx
movl 20(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,20(%ebx)
movl 24(%esi),%ecx
movl 24(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,24(%ebx)
movl 28(%esi),%ecx
movl 28(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,28(%ebx)
addl $32,%esi
addl $32,%edi
addl $32,%ebx
subl $8,%ebp
jnz .L027aw_loop
.L026aw_finish:
movl 32(%esp),%ebp
andl $7,%ebp
jz .L028aw_end
movl (%esi),%ecx
movl (%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,(%ebx)
jz .L028aw_end
movl 4(%esi),%ecx
movl 4(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,4(%ebx)
jz .L028aw_end
movl 8(%esi),%ecx
movl 8(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,8(%ebx)
jz .L028aw_end
movl 12(%esi),%ecx
movl 12(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,12(%ebx)
jz .L028aw_end
movl 16(%esi),%ecx
movl 16(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,16(%ebx)
jz .L028aw_end
movl 20(%esi),%ecx
movl 20(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
decl %ebp
movl %ecx,20(%ebx)
jz .L028aw_end
movl 24(%esi),%ecx
movl 24(%edi),%edx
subl %eax,%ecx
movl $0,%eax
adcl %eax,%eax
subl %edx,%ecx
adcl $0,%eax
movl %ecx,24(%ebx)
.L028aw_end:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_sub_words,.-.L_bn_sub_words_begin
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 8,981
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-x86/crypto/fipsmodule/x86-mont.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl bn_mul_mont
.hidden bn_mul_mont
.type bn_mul_mont,@function
.align 16
bn_mul_mont:
.L_bn_mul_mont_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
xorl %eax,%eax
movl 40(%esp),%edi
cmpl $4,%edi
jl .L000just_leave
leal 20(%esp),%esi
leal 24(%esp),%edx
addl $2,%edi
negl %edi
leal -32(%esp,%edi,4),%ebp
negl %edi
movl %ebp,%eax
subl %edx,%eax
andl $2047,%eax
subl %eax,%ebp
xorl %ebp,%edx
andl $2048,%edx
xorl $2048,%edx
subl %edx,%ebp
andl $-64,%ebp
movl %esp,%eax
subl %ebp,%eax
andl $-4096,%eax
movl %esp,%edx
leal (%ebp,%eax,1),%esp
movl (%esp),%eax
cmpl %ebp,%esp
ja .L001page_walk
jmp .L002page_walk_done
.align 16
.L001page_walk:
leal -4096(%esp),%esp
movl (%esp),%eax
cmpl %ebp,%esp
ja .L001page_walk
.L002page_walk_done:
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%ebp
movl 16(%esi),%esi
movl (%esi),%esi
movl %eax,4(%esp)
movl %ebx,8(%esp)
movl %ecx,12(%esp)
movl %ebp,16(%esp)
movl %esi,20(%esp)
leal -3(%edi),%ebx
movl %edx,24(%esp)
call .L003PIC_me_up
.L003PIC_me_up:
popl %eax
leal OPENSSL_ia32cap_P-.L003PIC_me_up(%eax),%eax
btl $26,(%eax)
jnc .L004non_sse2
movl $-1,%eax
movd %eax,%mm7
movl 8(%esp),%esi
movl 12(%esp),%edi
movl 16(%esp),%ebp
xorl %edx,%edx
xorl %ecx,%ecx
movd (%edi),%mm4
movd (%esi),%mm5
movd (%ebp),%mm3
pmuludq %mm4,%mm5
movq %mm5,%mm2
movq %mm5,%mm0
pand %mm7,%mm0
pmuludq 20(%esp),%mm5
pmuludq %mm5,%mm3
paddq %mm0,%mm3
movd 4(%ebp),%mm1
movd 4(%esi),%mm0
psrlq $32,%mm2
psrlq $32,%mm3
incl %ecx
.align 16
.L0051st:
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
movd 4(%ebp,%ecx,4),%mm1
paddq %mm0,%mm3
movd 4(%esi,%ecx,4),%mm0
psrlq $32,%mm2
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm3
leal 1(%ecx),%ecx
cmpl %ebx,%ecx
jl .L0051st
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
paddq %mm0,%mm3
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm2
psrlq $32,%mm3
paddq %mm2,%mm3
movq %mm3,32(%esp,%ebx,4)
incl %edx
.L006outer:
xorl %ecx,%ecx
movd (%edi,%edx,4),%mm4
movd (%esi),%mm5
movd 32(%esp),%mm6
movd (%ebp),%mm3
pmuludq %mm4,%mm5
paddq %mm6,%mm5
movq %mm5,%mm0
movq %mm5,%mm2
pand %mm7,%mm0
pmuludq 20(%esp),%mm5
pmuludq %mm5,%mm3
paddq %mm0,%mm3
movd 36(%esp),%mm6
movd 4(%ebp),%mm1
movd 4(%esi),%mm0
psrlq $32,%mm2
psrlq $32,%mm3
paddq %mm6,%mm2
incl %ecx
decl %ebx
.L007inner:
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
movd 36(%esp,%ecx,4),%mm6
pand %mm7,%mm0
movd 4(%ebp,%ecx,4),%mm1
paddq %mm0,%mm3
movd 4(%esi,%ecx,4),%mm0
psrlq $32,%mm2
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm3
paddq %mm6,%mm2
decl %ebx
leal 1(%ecx),%ecx
jnz .L007inner
movl %ecx,%ebx
pmuludq %mm4,%mm0
pmuludq %mm5,%mm1
paddq %mm0,%mm2
paddq %mm1,%mm3
movq %mm2,%mm0
pand %mm7,%mm0
paddq %mm0,%mm3
movd %mm3,28(%esp,%ecx,4)
psrlq $32,%mm2
psrlq $32,%mm3
movd 36(%esp,%ebx,4),%mm6
paddq %mm2,%mm3
paddq %mm6,%mm3
movq %mm3,32(%esp,%ebx,4)
leal 1(%edx),%edx
cmpl %ebx,%edx
jle .L006outer
emms
jmp .L008common_tail
.align 16
.L004non_sse2:
movl 8(%esp),%esi
leal 1(%ebx),%ebp
movl 12(%esp),%edi
xorl %ecx,%ecx
movl %esi,%edx
andl $1,%ebp
subl %edi,%edx
leal 4(%edi,%ebx,4),%eax
orl %edx,%ebp
movl (%edi),%edi
jz .L009bn_sqr_mont
movl %eax,28(%esp)
movl (%esi),%eax
xorl %edx,%edx
.align 16
.L010mull:
movl %edx,%ebp
mull %edi
addl %eax,%ebp
leal 1(%ecx),%ecx
adcl $0,%edx
movl (%esi,%ecx,4),%eax
cmpl %ebx,%ecx
movl %ebp,28(%esp,%ecx,4)
jl .L010mull
movl %edx,%ebp
mull %edi
movl 20(%esp),%edi
addl %ebp,%eax
movl 16(%esp),%esi
adcl $0,%edx
imull 32(%esp),%edi
movl %eax,32(%esp,%ebx,4)
xorl %ecx,%ecx
movl %edx,36(%esp,%ebx,4)
movl %ecx,40(%esp,%ebx,4)
movl (%esi),%eax
mull %edi
addl 32(%esp),%eax
movl 4(%esi),%eax
adcl $0,%edx
incl %ecx
jmp .L0112ndmadd
.align 16
.L0121stmadd:
movl %edx,%ebp
mull %edi
addl 32(%esp,%ecx,4),%ebp
leal 1(%ecx),%ecx
adcl $0,%edx
addl %eax,%ebp
movl (%esi,%ecx,4),%eax
adcl $0,%edx
cmpl %ebx,%ecx
movl %ebp,28(%esp,%ecx,4)
jl .L0121stmadd
movl %edx,%ebp
mull %edi
addl 32(%esp,%ebx,4),%eax
movl 20(%esp),%edi
adcl $0,%edx
movl 16(%esp),%esi
addl %eax,%ebp
adcl $0,%edx
imull 32(%esp),%edi
xorl %ecx,%ecx
addl 36(%esp,%ebx,4),%edx
movl %ebp,32(%esp,%ebx,4)
adcl $0,%ecx
movl (%esi),%eax
movl %edx,36(%esp,%ebx,4)
movl %ecx,40(%esp,%ebx,4)
mull %edi
addl 32(%esp),%eax
movl 4(%esi),%eax
adcl $0,%edx
movl $1,%ecx
.align 16
.L0112ndmadd:
movl %edx,%ebp
mull %edi
addl 32(%esp,%ecx,4),%ebp
leal 1(%ecx),%ecx
adcl $0,%edx
addl %eax,%ebp
movl (%esi,%ecx,4),%eax
adcl $0,%edx
cmpl %ebx,%ecx
movl %ebp,24(%esp,%ecx,4)
jl .L0112ndmadd
movl %edx,%ebp
mull %edi
addl 32(%esp,%ebx,4),%ebp
adcl $0,%edx
addl %eax,%ebp
adcl $0,%edx
movl %ebp,28(%esp,%ebx,4)
xorl %eax,%eax
movl 12(%esp),%ecx
addl 36(%esp,%ebx,4),%edx
adcl 40(%esp,%ebx,4),%eax
leal 4(%ecx),%ecx
movl %edx,32(%esp,%ebx,4)
cmpl 28(%esp),%ecx
movl %eax,36(%esp,%ebx,4)
je .L008common_tail
movl (%ecx),%edi
movl 8(%esp),%esi
movl %ecx,12(%esp)
xorl %ecx,%ecx
xorl %edx,%edx
movl (%esi),%eax
jmp .L0121stmadd
.align 16
.L009bn_sqr_mont:
movl %ebx,(%esp)
movl %ecx,12(%esp)
movl %edi,%eax
mull %edi
movl %eax,32(%esp)
movl %edx,%ebx
shrl $1,%edx
andl $1,%ebx
incl %ecx
.align 16
.L013sqr:
movl (%esi,%ecx,4),%eax
movl %edx,%ebp
mull %edi
addl %ebp,%eax
leal 1(%ecx),%ecx
adcl $0,%edx
leal (%ebx,%eax,2),%ebp
shrl $31,%eax
cmpl (%esp),%ecx
movl %eax,%ebx
movl %ebp,28(%esp,%ecx,4)
jl .L013sqr
movl (%esi,%ecx,4),%eax
movl %edx,%ebp
mull %edi
addl %ebp,%eax
movl 20(%esp),%edi
adcl $0,%edx
movl 16(%esp),%esi
leal (%ebx,%eax,2),%ebp
imull 32(%esp),%edi
shrl $31,%eax
movl %ebp,32(%esp,%ecx,4)
leal (%eax,%edx,2),%ebp
movl (%esi),%eax
shrl $31,%edx
movl %ebp,36(%esp,%ecx,4)
movl %edx,40(%esp,%ecx,4)
mull %edi
addl 32(%esp),%eax
movl %ecx,%ebx
adcl $0,%edx
movl 4(%esi),%eax
movl $1,%ecx
.align 16
.L0143rdmadd:
movl %edx,%ebp
mull %edi
addl 32(%esp,%ecx,4),%ebp
adcl $0,%edx
addl %eax,%ebp
movl 4(%esi,%ecx,4),%eax
adcl $0,%edx
movl %ebp,28(%esp,%ecx,4)
movl %edx,%ebp
mull %edi
addl 36(%esp,%ecx,4),%ebp
leal 2(%ecx),%ecx
adcl $0,%edx
addl %eax,%ebp
movl (%esi,%ecx,4),%eax
adcl $0,%edx
cmpl %ebx,%ecx
movl %ebp,24(%esp,%ecx,4)
jl .L0143rdmadd
movl %edx,%ebp
mull %edi
addl 32(%esp,%ebx,4),%ebp
adcl $0,%edx
addl %eax,%ebp
adcl $0,%edx
movl %ebp,28(%esp,%ebx,4)
movl 12(%esp),%ecx
xorl %eax,%eax
movl 8(%esp),%esi
addl 36(%esp,%ebx,4),%edx
adcl 40(%esp,%ebx,4),%eax
movl %edx,32(%esp,%ebx,4)
cmpl %ebx,%ecx
movl %eax,36(%esp,%ebx,4)
je .L008common_tail
movl 4(%esi,%ecx,4),%edi
leal 1(%ecx),%ecx
movl %edi,%eax
movl %ecx,12(%esp)
mull %edi
addl 32(%esp,%ecx,4),%eax
adcl $0,%edx
movl %eax,32(%esp,%ecx,4)
xorl %ebp,%ebp
cmpl %ebx,%ecx
leal 1(%ecx),%ecx
je .L015sqrlast
movl %edx,%ebx
shrl $1,%edx
andl $1,%ebx
.align 16
.L016sqradd:
movl (%esi,%ecx,4),%eax
movl %edx,%ebp
mull %edi
addl %ebp,%eax
leal (%eax,%eax,1),%ebp
adcl $0,%edx
shrl $31,%eax
addl 32(%esp,%ecx,4),%ebp
leal 1(%ecx),%ecx
adcl $0,%eax
addl %ebx,%ebp
adcl $0,%eax
cmpl (%esp),%ecx
movl %ebp,28(%esp,%ecx,4)
movl %eax,%ebx
jle .L016sqradd
movl %edx,%ebp
addl %edx,%edx
shrl $31,%ebp
addl %ebx,%edx
adcl $0,%ebp
.L015sqrlast:
movl 20(%esp),%edi
movl 16(%esp),%esi
imull 32(%esp),%edi
addl 32(%esp,%ecx,4),%edx
movl (%esi),%eax
adcl $0,%ebp
movl %edx,32(%esp,%ecx,4)
movl %ebp,36(%esp,%ecx,4)
mull %edi
addl 32(%esp),%eax
leal -1(%ecx),%ebx
adcl $0,%edx
movl $1,%ecx
movl 4(%esi),%eax
jmp .L0143rdmadd
.align 16
.L008common_tail:
movl 16(%esp),%ebp
movl 4(%esp),%edi
leal 32(%esp),%esi
movl (%esi),%eax
movl %ebx,%ecx
xorl %edx,%edx
.align 16
.L017sub:
sbbl (%ebp,%edx,4),%eax
movl %eax,(%edi,%edx,4)
decl %ecx
movl 4(%esi,%edx,4),%eax
leal 1(%edx),%edx
jge .L017sub
sbbl $0,%eax
movl $-1,%edx
xorl %eax,%edx
jmp .L018copy
.align 16
.L018copy:
movl 32(%esp,%ebx,4),%esi
movl (%edi,%ebx,4),%ebp
movl %ecx,32(%esp,%ebx,4)
andl %eax,%esi
andl %edx,%ebp
orl %esi,%ebp
movl %ebp,(%edi,%ebx,4)
decl %ebx
jge .L018copy
movl 24(%esp),%esp
movl $1,%eax
.L000just_leave:
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size bn_mul_mont,.-.L_bn_mul_mont_begin
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105
.byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56
.byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121
.byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46
.byte 111,114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 49,847
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-x86/crypto/fipsmodule/sha512-586.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl sha512_block_data_order
.hidden sha512_block_data_order
.type sha512_block_data_order,@function
.align 16
sha512_block_data_order:
.L_sha512_block_data_order_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%esi
movl 24(%esp),%edi
movl 28(%esp),%eax
movl %esp,%ebx
call .L000pic_point
.L000pic_point:
popl %ebp
leal .L001K512-.L000pic_point(%ebp),%ebp
subl $16,%esp
andl $-64,%esp
shll $7,%eax
addl %edi,%eax
movl %esi,(%esp)
movl %edi,4(%esp)
movl %eax,8(%esp)
movl %ebx,12(%esp)
leal OPENSSL_ia32cap_P-.L001K512(%ebp),%edx
movl (%edx),%ecx
testl $67108864,%ecx
jz .L002loop_x86
movl 4(%edx),%edx
movq (%esi),%mm0
andl $16777216,%ecx
movq 8(%esi),%mm1
andl $512,%edx
movq 16(%esi),%mm2
orl %edx,%ecx
movq 24(%esi),%mm3
movq 32(%esi),%mm4
movq 40(%esi),%mm5
movq 48(%esi),%mm6
movq 56(%esi),%mm7
cmpl $16777728,%ecx
je .L003SSSE3
subl $80,%esp
jmp .L004loop_sse2
.align 16
.L004loop_sse2:
movq %mm1,8(%esp)
movq %mm2,16(%esp)
movq %mm3,24(%esp)
movq %mm5,40(%esp)
movq %mm6,48(%esp)
pxor %mm1,%mm2
movq %mm7,56(%esp)
movq %mm0,%mm3
movl (%edi),%eax
movl 4(%edi),%ebx
addl $8,%edi
movl $15,%edx
bswap %eax
bswap %ebx
jmp .L00500_14_sse2
.align 16
.L00500_14_sse2:
movd %eax,%mm1
movl (%edi),%eax
movd %ebx,%mm7
movl 4(%edi),%ebx
addl $8,%edi
bswap %eax
bswap %ebx
punpckldq %mm1,%mm7
movq %mm4,%mm1
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
movq %mm3,%mm0
movq %mm7,72(%esp)
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
paddq (%ebp),%mm7
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
subl $8,%esp
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm2,%mm3
movq %mm0,%mm2
addl $8,%ebp
paddq %mm6,%mm3
movq 48(%esp),%mm6
decl %edx
jnz .L00500_14_sse2
movd %eax,%mm1
movd %ebx,%mm7
punpckldq %mm1,%mm7
movq %mm4,%mm1
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
movq %mm3,%mm0
movq %mm7,72(%esp)
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
paddq (%ebp),%mm7
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
subl $8,%esp
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 192(%esp),%mm7
paddq %mm2,%mm3
movq %mm0,%mm2
addl $8,%ebp
paddq %mm6,%mm3
pxor %mm0,%mm0
movl $32,%edx
jmp .L00616_79_sse2
.align 16
.L00616_79_sse2:
movq 88(%esp),%mm5
movq %mm7,%mm1
psrlq $1,%mm7
movq %mm5,%mm6
psrlq $6,%mm5
psllq $56,%mm1
paddq %mm3,%mm0
movq %mm7,%mm3
psrlq $6,%mm7
pxor %mm1,%mm3
psllq $7,%mm1
pxor %mm7,%mm3
psrlq $1,%mm7
pxor %mm1,%mm3
movq %mm5,%mm1
psrlq $13,%mm5
pxor %mm3,%mm7
psllq $3,%mm6
pxor %mm5,%mm1
paddq 200(%esp),%mm7
pxor %mm6,%mm1
psrlq $42,%mm5
paddq 128(%esp),%mm7
pxor %mm5,%mm1
psllq $42,%mm6
movq 40(%esp),%mm5
pxor %mm6,%mm1
movq 48(%esp),%mm6
paddq %mm1,%mm7
movq %mm4,%mm1
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
movq %mm7,72(%esp)
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
paddq (%ebp),%mm7
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
subl $8,%esp
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 192(%esp),%mm7
paddq %mm6,%mm2
addl $8,%ebp
movq 88(%esp),%mm5
movq %mm7,%mm1
psrlq $1,%mm7
movq %mm5,%mm6
psrlq $6,%mm5
psllq $56,%mm1
paddq %mm3,%mm2
movq %mm7,%mm3
psrlq $6,%mm7
pxor %mm1,%mm3
psllq $7,%mm1
pxor %mm7,%mm3
psrlq $1,%mm7
pxor %mm1,%mm3
movq %mm5,%mm1
psrlq $13,%mm5
pxor %mm3,%mm7
psllq $3,%mm6
pxor %mm5,%mm1
paddq 200(%esp),%mm7
pxor %mm6,%mm1
psrlq $42,%mm5
paddq 128(%esp),%mm7
pxor %mm5,%mm1
psllq $42,%mm6
movq 40(%esp),%mm5
pxor %mm6,%mm1
movq 48(%esp),%mm6
paddq %mm1,%mm7
movq %mm4,%mm1
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
movq %mm7,72(%esp)
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
paddq (%ebp),%mm7
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
subl $8,%esp
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 192(%esp),%mm7
paddq %mm6,%mm0
addl $8,%ebp
decl %edx
jnz .L00616_79_sse2
paddq %mm3,%mm0
movq 8(%esp),%mm1
movq 24(%esp),%mm3
movq 40(%esp),%mm5
movq 48(%esp),%mm6
movq 56(%esp),%mm7
pxor %mm1,%mm2
paddq (%esi),%mm0
paddq 8(%esi),%mm1
paddq 16(%esi),%mm2
paddq 24(%esi),%mm3
paddq 32(%esi),%mm4
paddq 40(%esi),%mm5
paddq 48(%esi),%mm6
paddq 56(%esi),%mm7
movl $640,%eax
movq %mm0,(%esi)
movq %mm1,8(%esi)
movq %mm2,16(%esi)
movq %mm3,24(%esi)
movq %mm4,32(%esi)
movq %mm5,40(%esi)
movq %mm6,48(%esi)
movq %mm7,56(%esi)
leal (%esp,%eax,1),%esp
subl %eax,%ebp
cmpl 88(%esp),%edi
jb .L004loop_sse2
movl 92(%esp),%esp
emms
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 32
.L003SSSE3:
leal -64(%esp),%edx
subl $256,%esp
movdqa 640(%ebp),%xmm1
movdqu (%edi),%xmm0
.byte 102,15,56,0,193
movdqa (%ebp),%xmm3
movdqa %xmm1,%xmm2
movdqu 16(%edi),%xmm1
paddq %xmm0,%xmm3
.byte 102,15,56,0,202
movdqa %xmm3,-128(%edx)
movdqa 16(%ebp),%xmm4
movdqa %xmm2,%xmm3
movdqu 32(%edi),%xmm2
paddq %xmm1,%xmm4
.byte 102,15,56,0,211
movdqa %xmm4,-112(%edx)
movdqa 32(%ebp),%xmm5
movdqa %xmm3,%xmm4
movdqu 48(%edi),%xmm3
paddq %xmm2,%xmm5
.byte 102,15,56,0,220
movdqa %xmm5,-96(%edx)
movdqa 48(%ebp),%xmm6
movdqa %xmm4,%xmm5
movdqu 64(%edi),%xmm4
paddq %xmm3,%xmm6
.byte 102,15,56,0,229
movdqa %xmm6,-80(%edx)
movdqa 64(%ebp),%xmm7
movdqa %xmm5,%xmm6
movdqu 80(%edi),%xmm5
paddq %xmm4,%xmm7
.byte 102,15,56,0,238
movdqa %xmm7,-64(%edx)
movdqa %xmm0,(%edx)
movdqa 80(%ebp),%xmm0
movdqa %xmm6,%xmm7
movdqu 96(%edi),%xmm6
paddq %xmm5,%xmm0
.byte 102,15,56,0,247
movdqa %xmm0,-48(%edx)
movdqa %xmm1,16(%edx)
movdqa 96(%ebp),%xmm1
movdqa %xmm7,%xmm0
movdqu 112(%edi),%xmm7
paddq %xmm6,%xmm1
.byte 102,15,56,0,248
movdqa %xmm1,-32(%edx)
movdqa %xmm2,32(%edx)
movdqa 112(%ebp),%xmm2
movdqa (%edx),%xmm0
paddq %xmm7,%xmm2
movdqa %xmm2,-16(%edx)
nop
.align 32
.L007loop_ssse3:
movdqa 16(%edx),%xmm2
movdqa %xmm3,48(%edx)
leal 128(%ebp),%ebp
movq %mm1,8(%esp)
movl %edi,%ebx
movq %mm2,16(%esp)
leal 128(%edi),%edi
movq %mm3,24(%esp)
cmpl %eax,%edi
movq %mm5,40(%esp)
cmovbl %edi,%ebx
movq %mm6,48(%esp)
movl $4,%ecx
pxor %mm1,%mm2
movq %mm7,56(%esp)
pxor %mm3,%mm3
jmp .L00800_47_ssse3
.align 32
.L00800_47_ssse3:
movdqa %xmm5,%xmm3
movdqa %xmm2,%xmm1
.byte 102,15,58,15,208,8
movdqa %xmm4,(%edx)
.byte 102,15,58,15,220,8
movdqa %xmm2,%xmm4
psrlq $7,%xmm2
paddq %xmm3,%xmm0
movdqa %xmm4,%xmm3
psrlq $1,%xmm4
psllq $56,%xmm3
pxor %xmm4,%xmm2
psrlq $7,%xmm4
pxor %xmm3,%xmm2
psllq $7,%xmm3
pxor %xmm4,%xmm2
movdqa %xmm7,%xmm4
pxor %xmm3,%xmm2
movdqa %xmm7,%xmm3
psrlq $6,%xmm4
paddq %xmm2,%xmm0
movdqa %xmm7,%xmm2
psrlq $19,%xmm3
psllq $3,%xmm2
pxor %xmm3,%xmm4
psrlq $42,%xmm3
pxor %xmm2,%xmm4
psllq $42,%xmm2
pxor %xmm3,%xmm4
movdqa 32(%edx),%xmm3
pxor %xmm2,%xmm4
movdqa (%ebp),%xmm2
movq %mm4,%mm1
paddq %xmm4,%xmm0
movq -128(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
paddq %xmm0,%xmm2
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 32(%esp),%mm5
paddq %mm6,%mm2
movq 40(%esp),%mm6
movq %mm4,%mm1
movq -120(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,24(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,56(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 48(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 16(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq (%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 24(%esp),%mm5
paddq %mm6,%mm0
movq 32(%esp),%mm6
movdqa %xmm2,-128(%edx)
movdqa %xmm6,%xmm4
movdqa %xmm3,%xmm2
.byte 102,15,58,15,217,8
movdqa %xmm5,16(%edx)
.byte 102,15,58,15,229,8
movdqa %xmm3,%xmm5
psrlq $7,%xmm3
paddq %xmm4,%xmm1
movdqa %xmm5,%xmm4
psrlq $1,%xmm5
psllq $56,%xmm4
pxor %xmm5,%xmm3
psrlq $7,%xmm5
pxor %xmm4,%xmm3
psllq $7,%xmm4
pxor %xmm5,%xmm3
movdqa %xmm0,%xmm5
pxor %xmm4,%xmm3
movdqa %xmm0,%xmm4
psrlq $6,%xmm5
paddq %xmm3,%xmm1
movdqa %xmm0,%xmm3
psrlq $19,%xmm4
psllq $3,%xmm3
pxor %xmm4,%xmm5
psrlq $42,%xmm4
pxor %xmm3,%xmm5
psllq $42,%xmm3
pxor %xmm4,%xmm5
movdqa 48(%edx),%xmm4
pxor %xmm3,%xmm5
movdqa 16(%ebp),%xmm3
movq %mm4,%mm1
paddq %xmm5,%xmm1
movq -112(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,16(%esp)
paddq %xmm1,%xmm3
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,48(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 40(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 8(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 56(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 16(%esp),%mm5
paddq %mm6,%mm2
movq 24(%esp),%mm6
movq %mm4,%mm1
movq -104(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,8(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,40(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 32(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq (%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 48(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 8(%esp),%mm5
paddq %mm6,%mm0
movq 16(%esp),%mm6
movdqa %xmm3,-112(%edx)
movdqa %xmm7,%xmm5
movdqa %xmm4,%xmm3
.byte 102,15,58,15,226,8
movdqa %xmm6,32(%edx)
.byte 102,15,58,15,238,8
movdqa %xmm4,%xmm6
psrlq $7,%xmm4
paddq %xmm5,%xmm2
movdqa %xmm6,%xmm5
psrlq $1,%xmm6
psllq $56,%xmm5
pxor %xmm6,%xmm4
psrlq $7,%xmm6
pxor %xmm5,%xmm4
psllq $7,%xmm5
pxor %xmm6,%xmm4
movdqa %xmm1,%xmm6
pxor %xmm5,%xmm4
movdqa %xmm1,%xmm5
psrlq $6,%xmm6
paddq %xmm4,%xmm2
movdqa %xmm1,%xmm4
psrlq $19,%xmm5
psllq $3,%xmm4
pxor %xmm5,%xmm6
psrlq $42,%xmm5
pxor %xmm4,%xmm6
psllq $42,%xmm4
pxor %xmm5,%xmm6
movdqa (%edx),%xmm5
pxor %xmm4,%xmm6
movdqa 32(%ebp),%xmm4
movq %mm4,%mm1
paddq %xmm6,%xmm2
movq -96(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,(%esp)
paddq %xmm2,%xmm4
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,32(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 24(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 56(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 40(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq (%esp),%mm5
paddq %mm6,%mm2
movq 8(%esp),%mm6
movq %mm4,%mm1
movq -88(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,56(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,24(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 16(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 48(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 32(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 56(%esp),%mm5
paddq %mm6,%mm0
movq (%esp),%mm6
movdqa %xmm4,-96(%edx)
movdqa %xmm0,%xmm6
movdqa %xmm5,%xmm4
.byte 102,15,58,15,235,8
movdqa %xmm7,48(%edx)
.byte 102,15,58,15,247,8
movdqa %xmm5,%xmm7
psrlq $7,%xmm5
paddq %xmm6,%xmm3
movdqa %xmm7,%xmm6
psrlq $1,%xmm7
psllq $56,%xmm6
pxor %xmm7,%xmm5
psrlq $7,%xmm7
pxor %xmm6,%xmm5
psllq $7,%xmm6
pxor %xmm7,%xmm5
movdqa %xmm2,%xmm7
pxor %xmm6,%xmm5
movdqa %xmm2,%xmm6
psrlq $6,%xmm7
paddq %xmm5,%xmm3
movdqa %xmm2,%xmm5
psrlq $19,%xmm6
psllq $3,%xmm5
pxor %xmm6,%xmm7
psrlq $42,%xmm6
pxor %xmm5,%xmm7
psllq $42,%xmm5
pxor %xmm6,%xmm7
movdqa 16(%edx),%xmm6
pxor %xmm5,%xmm7
movdqa 48(%ebp),%xmm5
movq %mm4,%mm1
paddq %xmm7,%xmm3
movq -80(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,48(%esp)
paddq %xmm3,%xmm5
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,16(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 8(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 40(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 24(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 48(%esp),%mm5
paddq %mm6,%mm2
movq 56(%esp),%mm6
movq %mm4,%mm1
movq -72(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,40(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,8(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq (%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 32(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 16(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm6,%mm0
movq 48(%esp),%mm6
movdqa %xmm5,-80(%edx)
movdqa %xmm1,%xmm7
movdqa %xmm6,%xmm5
.byte 102,15,58,15,244,8
movdqa %xmm0,(%edx)
.byte 102,15,58,15,248,8
movdqa %xmm6,%xmm0
psrlq $7,%xmm6
paddq %xmm7,%xmm4
movdqa %xmm0,%xmm7
psrlq $1,%xmm0
psllq $56,%xmm7
pxor %xmm0,%xmm6
psrlq $7,%xmm0
pxor %xmm7,%xmm6
psllq $7,%xmm7
pxor %xmm0,%xmm6
movdqa %xmm3,%xmm0
pxor %xmm7,%xmm6
movdqa %xmm3,%xmm7
psrlq $6,%xmm0
paddq %xmm6,%xmm4
movdqa %xmm3,%xmm6
psrlq $19,%xmm7
psllq $3,%xmm6
pxor %xmm7,%xmm0
psrlq $42,%xmm7
pxor %xmm6,%xmm0
psllq $42,%xmm6
pxor %xmm7,%xmm0
movdqa 32(%edx),%xmm7
pxor %xmm6,%xmm0
movdqa 64(%ebp),%xmm6
movq %mm4,%mm1
paddq %xmm0,%xmm4
movq -64(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
paddq %xmm4,%xmm6
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 32(%esp),%mm5
paddq %mm6,%mm2
movq 40(%esp),%mm6
movq %mm4,%mm1
movq -56(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,24(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,56(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 48(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 16(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq (%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 24(%esp),%mm5
paddq %mm6,%mm0
movq 32(%esp),%mm6
movdqa %xmm6,-64(%edx)
movdqa %xmm2,%xmm0
movdqa %xmm7,%xmm6
.byte 102,15,58,15,253,8
movdqa %xmm1,16(%edx)
.byte 102,15,58,15,193,8
movdqa %xmm7,%xmm1
psrlq $7,%xmm7
paddq %xmm0,%xmm5
movdqa %xmm1,%xmm0
psrlq $1,%xmm1
psllq $56,%xmm0
pxor %xmm1,%xmm7
psrlq $7,%xmm1
pxor %xmm0,%xmm7
psllq $7,%xmm0
pxor %xmm1,%xmm7
movdqa %xmm4,%xmm1
pxor %xmm0,%xmm7
movdqa %xmm4,%xmm0
psrlq $6,%xmm1
paddq %xmm7,%xmm5
movdqa %xmm4,%xmm7
psrlq $19,%xmm0
psllq $3,%xmm7
pxor %xmm0,%xmm1
psrlq $42,%xmm0
pxor %xmm7,%xmm1
psllq $42,%xmm7
pxor %xmm0,%xmm1
movdqa 48(%edx),%xmm0
pxor %xmm7,%xmm1
movdqa 80(%ebp),%xmm7
movq %mm4,%mm1
paddq %xmm1,%xmm5
movq -48(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,16(%esp)
paddq %xmm5,%xmm7
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,48(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 40(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 8(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 56(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 16(%esp),%mm5
paddq %mm6,%mm2
movq 24(%esp),%mm6
movq %mm4,%mm1
movq -40(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,8(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,40(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 32(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq (%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 48(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 8(%esp),%mm5
paddq %mm6,%mm0
movq 16(%esp),%mm6
movdqa %xmm7,-48(%edx)
movdqa %xmm3,%xmm1
movdqa %xmm0,%xmm7
.byte 102,15,58,15,198,8
movdqa %xmm2,32(%edx)
.byte 102,15,58,15,202,8
movdqa %xmm0,%xmm2
psrlq $7,%xmm0
paddq %xmm1,%xmm6
movdqa %xmm2,%xmm1
psrlq $1,%xmm2
psllq $56,%xmm1
pxor %xmm2,%xmm0
psrlq $7,%xmm2
pxor %xmm1,%xmm0
psllq $7,%xmm1
pxor %xmm2,%xmm0
movdqa %xmm5,%xmm2
pxor %xmm1,%xmm0
movdqa %xmm5,%xmm1
psrlq $6,%xmm2
paddq %xmm0,%xmm6
movdqa %xmm5,%xmm0
psrlq $19,%xmm1
psllq $3,%xmm0
pxor %xmm1,%xmm2
psrlq $42,%xmm1
pxor %xmm0,%xmm2
psllq $42,%xmm0
pxor %xmm1,%xmm2
movdqa (%edx),%xmm1
pxor %xmm0,%xmm2
movdqa 96(%ebp),%xmm0
movq %mm4,%mm1
paddq %xmm2,%xmm6
movq -32(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,(%esp)
paddq %xmm6,%xmm0
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,32(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 24(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 56(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 40(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq (%esp),%mm5
paddq %mm6,%mm2
movq 8(%esp),%mm6
movq %mm4,%mm1
movq -24(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,56(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,24(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 16(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 48(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 32(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 56(%esp),%mm5
paddq %mm6,%mm0
movq (%esp),%mm6
movdqa %xmm0,-32(%edx)
movdqa %xmm4,%xmm2
movdqa %xmm1,%xmm0
.byte 102,15,58,15,207,8
movdqa %xmm3,48(%edx)
.byte 102,15,58,15,211,8
movdqa %xmm1,%xmm3
psrlq $7,%xmm1
paddq %xmm2,%xmm7
movdqa %xmm3,%xmm2
psrlq $1,%xmm3
psllq $56,%xmm2
pxor %xmm3,%xmm1
psrlq $7,%xmm3
pxor %xmm2,%xmm1
psllq $7,%xmm2
pxor %xmm3,%xmm1
movdqa %xmm6,%xmm3
pxor %xmm2,%xmm1
movdqa %xmm6,%xmm2
psrlq $6,%xmm3
paddq %xmm1,%xmm7
movdqa %xmm6,%xmm1
psrlq $19,%xmm2
psllq $3,%xmm1
pxor %xmm2,%xmm3
psrlq $42,%xmm2
pxor %xmm1,%xmm3
psllq $42,%xmm1
pxor %xmm2,%xmm3
movdqa 16(%edx),%xmm2
pxor %xmm1,%xmm3
movdqa 112(%ebp),%xmm1
movq %mm4,%mm1
paddq %xmm3,%xmm7
movq -16(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,48(%esp)
paddq %xmm7,%xmm1
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,16(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 8(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 40(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 24(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 48(%esp),%mm5
paddq %mm6,%mm2
movq 56(%esp),%mm6
movq %mm4,%mm1
movq -8(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,40(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,8(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq (%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 32(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 16(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm6,%mm0
movq 48(%esp),%mm6
movdqa %xmm1,-16(%edx)
leal 128(%ebp),%ebp
decl %ecx
jnz .L00800_47_ssse3
movdqa (%ebp),%xmm1
leal -640(%ebp),%ebp
movdqu (%ebx),%xmm0
.byte 102,15,56,0,193
movdqa (%ebp),%xmm3
movdqa %xmm1,%xmm2
movdqu 16(%ebx),%xmm1
paddq %xmm0,%xmm3
.byte 102,15,56,0,202
movq %mm4,%mm1
movq -128(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 32(%esp),%mm5
paddq %mm6,%mm2
movq 40(%esp),%mm6
movq %mm4,%mm1
movq -120(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,24(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,56(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 48(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 16(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq (%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 24(%esp),%mm5
paddq %mm6,%mm0
movq 32(%esp),%mm6
movdqa %xmm3,-128(%edx)
movdqa 16(%ebp),%xmm4
movdqa %xmm2,%xmm3
movdqu 32(%ebx),%xmm2
paddq %xmm1,%xmm4
.byte 102,15,56,0,211
movq %mm4,%mm1
movq -112(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,16(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,48(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 40(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 8(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 56(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 16(%esp),%mm5
paddq %mm6,%mm2
movq 24(%esp),%mm6
movq %mm4,%mm1
movq -104(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,8(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,40(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 32(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq (%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 48(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 8(%esp),%mm5
paddq %mm6,%mm0
movq 16(%esp),%mm6
movdqa %xmm4,-112(%edx)
movdqa 32(%ebp),%xmm5
movdqa %xmm3,%xmm4
movdqu 48(%ebx),%xmm3
paddq %xmm2,%xmm5
.byte 102,15,56,0,220
movq %mm4,%mm1
movq -96(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,32(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 24(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 56(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 40(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq (%esp),%mm5
paddq %mm6,%mm2
movq 8(%esp),%mm6
movq %mm4,%mm1
movq -88(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,56(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,24(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 16(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 48(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 32(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 56(%esp),%mm5
paddq %mm6,%mm0
movq (%esp),%mm6
movdqa %xmm5,-96(%edx)
movdqa 48(%ebp),%xmm6
movdqa %xmm4,%xmm5
movdqu 64(%ebx),%xmm4
paddq %xmm3,%xmm6
.byte 102,15,56,0,229
movq %mm4,%mm1
movq -80(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,48(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,16(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 8(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 40(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 24(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 48(%esp),%mm5
paddq %mm6,%mm2
movq 56(%esp),%mm6
movq %mm4,%mm1
movq -72(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,40(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,8(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq (%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 32(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 16(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm6,%mm0
movq 48(%esp),%mm6
movdqa %xmm6,-80(%edx)
movdqa 64(%ebp),%xmm7
movdqa %xmm5,%xmm6
movdqu 80(%ebx),%xmm5
paddq %xmm4,%xmm7
.byte 102,15,56,0,238
movq %mm4,%mm1
movq -64(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,32(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 56(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 24(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 8(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 32(%esp),%mm5
paddq %mm6,%mm2
movq 40(%esp),%mm6
movq %mm4,%mm1
movq -56(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,24(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,56(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 48(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 16(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq (%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 24(%esp),%mm5
paddq %mm6,%mm0
movq 32(%esp),%mm6
movdqa %xmm7,-64(%edx)
movdqa %xmm0,(%edx)
movdqa 80(%ebp),%xmm0
movdqa %xmm6,%xmm7
movdqu 96(%ebx),%xmm6
paddq %xmm5,%xmm0
.byte 102,15,56,0,247
movq %mm4,%mm1
movq -48(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,16(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,48(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 40(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 8(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 56(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 16(%esp),%mm5
paddq %mm6,%mm2
movq 24(%esp),%mm6
movq %mm4,%mm1
movq -40(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,8(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,40(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 32(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq (%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 48(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 8(%esp),%mm5
paddq %mm6,%mm0
movq 16(%esp),%mm6
movdqa %xmm0,-48(%edx)
movdqa %xmm1,16(%edx)
movdqa 96(%ebp),%xmm1
movdqa %xmm7,%xmm0
movdqu 112(%ebx),%xmm7
paddq %xmm6,%xmm1
.byte 102,15,56,0,248
movq %mm4,%mm1
movq -32(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,32(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 24(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 56(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 40(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq (%esp),%mm5
paddq %mm6,%mm2
movq 8(%esp),%mm6
movq %mm4,%mm1
movq -24(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,56(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,24(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 16(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 48(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 32(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 56(%esp),%mm5
paddq %mm6,%mm0
movq (%esp),%mm6
movdqa %xmm1,-32(%edx)
movdqa %xmm2,32(%edx)
movdqa 112(%ebp),%xmm2
movdqa (%edx),%xmm0
paddq %xmm7,%xmm2
movq %mm4,%mm1
movq -16(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,48(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm0
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm0,16(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq 8(%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 40(%esp),%mm4
paddq %mm7,%mm3
movq %mm0,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm0,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 24(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm0,%mm2
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
pxor %mm7,%mm6
movq 48(%esp),%mm5
paddq %mm6,%mm2
movq 56(%esp),%mm6
movq %mm4,%mm1
movq -8(%edx),%mm7
pxor %mm6,%mm5
psrlq $14,%mm1
movq %mm4,40(%esp)
pand %mm4,%mm5
psllq $23,%mm4
paddq %mm3,%mm2
movq %mm1,%mm3
psrlq $4,%mm1
pxor %mm6,%mm5
pxor %mm4,%mm3
psllq $23,%mm4
pxor %mm1,%mm3
movq %mm2,8(%esp)
paddq %mm5,%mm7
pxor %mm4,%mm3
psrlq $23,%mm1
paddq (%esp),%mm7
pxor %mm1,%mm3
psllq $4,%mm4
pxor %mm4,%mm3
movq 32(%esp),%mm4
paddq %mm7,%mm3
movq %mm2,%mm5
psrlq $28,%mm5
paddq %mm3,%mm4
movq %mm2,%mm6
movq %mm5,%mm7
psllq $25,%mm6
movq 16(%esp),%mm1
psrlq $6,%mm5
pxor %mm6,%mm7
psllq $5,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm2
psrlq $5,%mm5
pxor %mm6,%mm7
pand %mm2,%mm0
psllq $6,%mm6
pxor %mm5,%mm7
pxor %mm1,%mm0
pxor %mm7,%mm6
movq 40(%esp),%mm5
paddq %mm6,%mm0
movq 48(%esp),%mm6
movdqa %xmm2,-16(%edx)
movq 8(%esp),%mm1
paddq %mm3,%mm0
movq 24(%esp),%mm3
movq 56(%esp),%mm7
pxor %mm1,%mm2
paddq (%esi),%mm0
paddq 8(%esi),%mm1
paddq 16(%esi),%mm2
paddq 24(%esi),%mm3
paddq 32(%esi),%mm4
paddq 40(%esi),%mm5
paddq 48(%esi),%mm6
paddq 56(%esi),%mm7
movq %mm0,(%esi)
movq %mm1,8(%esi)
movq %mm2,16(%esi)
movq %mm3,24(%esi)
movq %mm4,32(%esi)
movq %mm5,40(%esi)
movq %mm6,48(%esi)
movq %mm7,56(%esi)
cmpl %eax,%edi
jb .L007loop_ssse3
movl 76(%edx),%esp
emms
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 16
.L002loop_x86:
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 16(%edi),%eax
movl 20(%edi),%ebx
movl 24(%edi),%ecx
movl 28(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 32(%edi),%eax
movl 36(%edi),%ebx
movl 40(%edi),%ecx
movl 44(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 48(%edi),%eax
movl 52(%edi),%ebx
movl 56(%edi),%ecx
movl 60(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 64(%edi),%eax
movl 68(%edi),%ebx
movl 72(%edi),%ecx
movl 76(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 80(%edi),%eax
movl 84(%edi),%ebx
movl 88(%edi),%ecx
movl 92(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 96(%edi),%eax
movl 100(%edi),%ebx
movl 104(%edi),%ecx
movl 108(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movl 112(%edi),%eax
movl 116(%edi),%ebx
movl 120(%edi),%ecx
movl 124(%edi),%edx
bswap %eax
bswap %ebx
bswap %ecx
bswap %edx
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
addl $128,%edi
subl $72,%esp
movl %edi,204(%esp)
leal 8(%esp),%edi
movl $16,%ecx
.long 2784229001
.align 16
.L00900_15_x86:
movl 40(%esp),%ecx
movl 44(%esp),%edx
movl %ecx,%esi
shrl $9,%ecx
movl %edx,%edi
shrl $9,%edx
movl %ecx,%ebx
shll $14,%esi
movl %edx,%eax
shll $14,%edi
xorl %esi,%ebx
shrl $5,%ecx
xorl %edi,%eax
shrl $5,%edx
xorl %ecx,%eax
shll $4,%esi
xorl %edx,%ebx
shll $4,%edi
xorl %esi,%ebx
shrl $4,%ecx
xorl %edi,%eax
shrl $4,%edx
xorl %ecx,%eax
shll $5,%esi
xorl %edx,%ebx
shll $5,%edi
xorl %esi,%eax
xorl %edi,%ebx
movl 48(%esp),%ecx
movl 52(%esp),%edx
movl 56(%esp),%esi
movl 60(%esp),%edi
addl 64(%esp),%eax
adcl 68(%esp),%ebx
xorl %esi,%ecx
xorl %edi,%edx
andl 40(%esp),%ecx
andl 44(%esp),%edx
addl 192(%esp),%eax
adcl 196(%esp),%ebx
xorl %esi,%ecx
xorl %edi,%edx
movl (%ebp),%esi
movl 4(%ebp),%edi
addl %ecx,%eax
adcl %edx,%ebx
movl 32(%esp),%ecx
movl 36(%esp),%edx
addl %esi,%eax
adcl %edi,%ebx
movl %eax,(%esp)
movl %ebx,4(%esp)
addl %ecx,%eax
adcl %edx,%ebx
movl 8(%esp),%ecx
movl 12(%esp),%edx
movl %eax,32(%esp)
movl %ebx,36(%esp)
movl %ecx,%esi
shrl $2,%ecx
movl %edx,%edi
shrl $2,%edx
movl %ecx,%ebx
shll $4,%esi
movl %edx,%eax
shll $4,%edi
xorl %esi,%ebx
shrl $5,%ecx
xorl %edi,%eax
shrl $5,%edx
xorl %ecx,%ebx
shll $21,%esi
xorl %edx,%eax
shll $21,%edi
xorl %esi,%eax
shrl $21,%ecx
xorl %edi,%ebx
shrl $21,%edx
xorl %ecx,%eax
shll $5,%esi
xorl %edx,%ebx
shll $5,%edi
xorl %esi,%eax
xorl %edi,%ebx
movl 8(%esp),%ecx
movl 12(%esp),%edx
movl 16(%esp),%esi
movl 20(%esp),%edi
addl (%esp),%eax
adcl 4(%esp),%ebx
orl %esi,%ecx
orl %edi,%edx
andl 24(%esp),%ecx
andl 28(%esp),%edx
andl 8(%esp),%esi
andl 12(%esp),%edi
orl %esi,%ecx
orl %edi,%edx
addl %ecx,%eax
adcl %edx,%ebx
movl %eax,(%esp)
movl %ebx,4(%esp)
movb (%ebp),%dl
subl $8,%esp
leal 8(%ebp),%ebp
cmpb $148,%dl
jne .L00900_15_x86
.align 16
.L01016_79_x86:
movl 312(%esp),%ecx
movl 316(%esp),%edx
movl %ecx,%esi
shrl $1,%ecx
movl %edx,%edi
shrl $1,%edx
movl %ecx,%eax
shll $24,%esi
movl %edx,%ebx
shll $24,%edi
xorl %esi,%ebx
shrl $6,%ecx
xorl %edi,%eax
shrl $6,%edx
xorl %ecx,%eax
shll $7,%esi
xorl %edx,%ebx
shll $1,%edi
xorl %esi,%ebx
shrl $1,%ecx
xorl %edi,%eax
shrl $1,%edx
xorl %ecx,%eax
shll $6,%edi
xorl %edx,%ebx
xorl %edi,%eax
movl %eax,(%esp)
movl %ebx,4(%esp)
movl 208(%esp),%ecx
movl 212(%esp),%edx
movl %ecx,%esi
shrl $6,%ecx
movl %edx,%edi
shrl $6,%edx
movl %ecx,%eax
shll $3,%esi
movl %edx,%ebx
shll $3,%edi
xorl %esi,%eax
shrl $13,%ecx
xorl %edi,%ebx
shrl $13,%edx
xorl %ecx,%eax
shll $10,%esi
xorl %edx,%ebx
shll $10,%edi
xorl %esi,%ebx
shrl $10,%ecx
xorl %edi,%eax
shrl $10,%edx
xorl %ecx,%ebx
shll $13,%edi
xorl %edx,%eax
xorl %edi,%eax
movl 320(%esp),%ecx
movl 324(%esp),%edx
addl (%esp),%eax
adcl 4(%esp),%ebx
movl 248(%esp),%esi
movl 252(%esp),%edi
addl %ecx,%eax
adcl %edx,%ebx
addl %esi,%eax
adcl %edi,%ebx
movl %eax,192(%esp)
movl %ebx,196(%esp)
movl 40(%esp),%ecx
movl 44(%esp),%edx
movl %ecx,%esi
shrl $9,%ecx
movl %edx,%edi
shrl $9,%edx
movl %ecx,%ebx
shll $14,%esi
movl %edx,%eax
shll $14,%edi
xorl %esi,%ebx
shrl $5,%ecx
xorl %edi,%eax
shrl $5,%edx
xorl %ecx,%eax
shll $4,%esi
xorl %edx,%ebx
shll $4,%edi
xorl %esi,%ebx
shrl $4,%ecx
xorl %edi,%eax
shrl $4,%edx
xorl %ecx,%eax
shll $5,%esi
xorl %edx,%ebx
shll $5,%edi
xorl %esi,%eax
xorl %edi,%ebx
movl 48(%esp),%ecx
movl 52(%esp),%edx
movl 56(%esp),%esi
movl 60(%esp),%edi
addl 64(%esp),%eax
adcl 68(%esp),%ebx
xorl %esi,%ecx
xorl %edi,%edx
andl 40(%esp),%ecx
andl 44(%esp),%edx
addl 192(%esp),%eax
adcl 196(%esp),%ebx
xorl %esi,%ecx
xorl %edi,%edx
movl (%ebp),%esi
movl 4(%ebp),%edi
addl %ecx,%eax
adcl %edx,%ebx
movl 32(%esp),%ecx
movl 36(%esp),%edx
addl %esi,%eax
adcl %edi,%ebx
movl %eax,(%esp)
movl %ebx,4(%esp)
addl %ecx,%eax
adcl %edx,%ebx
movl 8(%esp),%ecx
movl 12(%esp),%edx
movl %eax,32(%esp)
movl %ebx,36(%esp)
movl %ecx,%esi
shrl $2,%ecx
movl %edx,%edi
shrl $2,%edx
movl %ecx,%ebx
shll $4,%esi
movl %edx,%eax
shll $4,%edi
xorl %esi,%ebx
shrl $5,%ecx
xorl %edi,%eax
shrl $5,%edx
xorl %ecx,%ebx
shll $21,%esi
xorl %edx,%eax
shll $21,%edi
xorl %esi,%eax
shrl $21,%ecx
xorl %edi,%ebx
shrl $21,%edx
xorl %ecx,%eax
shll $5,%esi
xorl %edx,%ebx
shll $5,%edi
xorl %esi,%eax
xorl %edi,%ebx
movl 8(%esp),%ecx
movl 12(%esp),%edx
movl 16(%esp),%esi
movl 20(%esp),%edi
addl (%esp),%eax
adcl 4(%esp),%ebx
orl %esi,%ecx
orl %edi,%edx
andl 24(%esp),%ecx
andl 28(%esp),%edx
andl 8(%esp),%esi
andl 12(%esp),%edi
orl %esi,%ecx
orl %edi,%edx
addl %ecx,%eax
adcl %edx,%ebx
movl %eax,(%esp)
movl %ebx,4(%esp)
movb (%ebp),%dl
subl $8,%esp
leal 8(%ebp),%ebp
cmpb $23,%dl
jne .L01016_79_x86
movl 840(%esp),%esi
movl 844(%esp),%edi
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edx
addl 8(%esp),%eax
adcl 12(%esp),%ebx
movl %eax,(%esi)
movl %ebx,4(%esi)
addl 16(%esp),%ecx
adcl 20(%esp),%edx
movl %ecx,8(%esi)
movl %edx,12(%esi)
movl 16(%esi),%eax
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edx
addl 24(%esp),%eax
adcl 28(%esp),%ebx
movl %eax,16(%esi)
movl %ebx,20(%esi)
addl 32(%esp),%ecx
adcl 36(%esp),%edx
movl %ecx,24(%esi)
movl %edx,28(%esi)
movl 32(%esi),%eax
movl 36(%esi),%ebx
movl 40(%esi),%ecx
movl 44(%esi),%edx
addl 40(%esp),%eax
adcl 44(%esp),%ebx
movl %eax,32(%esi)
movl %ebx,36(%esi)
addl 48(%esp),%ecx
adcl 52(%esp),%edx
movl %ecx,40(%esi)
movl %edx,44(%esi)
movl 48(%esi),%eax
movl 52(%esi),%ebx
movl 56(%esi),%ecx
movl 60(%esi),%edx
addl 56(%esp),%eax
adcl 60(%esp),%ebx
movl %eax,48(%esi)
movl %ebx,52(%esi)
addl 64(%esp),%ecx
adcl 68(%esp),%edx
movl %ecx,56(%esi)
movl %edx,60(%esi)
addl $840,%esp
subl $640,%ebp
cmpl 8(%esp),%edi
jb .L002loop_x86
movl 12(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.align 64
.L001K512:
.long 3609767458,1116352408
.long 602891725,1899447441
.long 3964484399,3049323471
.long 2173295548,3921009573
.long 4081628472,961987163
.long 3053834265,1508970993
.long 2937671579,2453635748
.long 3664609560,2870763221
.long 2734883394,3624381080
.long 1164996542,310598401
.long 1323610764,607225278
.long 3590304994,1426881987
.long 4068182383,1925078388
.long 991336113,2162078206
.long 633803317,2614888103
.long 3479774868,3248222580
.long 2666613458,3835390401
.long 944711139,4022224774
.long 2341262773,264347078
.long 2007800933,604807628
.long 1495990901,770255983
.long 1856431235,1249150122
.long 3175218132,1555081692
.long 2198950837,1996064986
.long 3999719339,2554220882
.long 766784016,2821834349
.long 2566594879,2952996808
.long 3203337956,3210313671
.long 1034457026,3336571891
.long 2466948901,3584528711
.long 3758326383,113926993
.long 168717936,338241895
.long 1188179964,666307205
.long 1546045734,773529912
.long 1522805485,1294757372
.long 2643833823,1396182291
.long 2343527390,1695183700
.long 1014477480,1986661051
.long 1206759142,2177026350
.long 344077627,2456956037
.long 1290863460,2730485921
.long 3158454273,2820302411
.long 3505952657,3259730800
.long 106217008,3345764771
.long 3606008344,3516065817
.long 1432725776,3600352804
.long 1467031594,4094571909
.long 851169720,275423344
.long 3100823752,430227734
.long 1363258195,506948616
.long 3750685593,659060556
.long 3785050280,883997877
.long 3318307427,958139571
.long 3812723403,1322822218
.long 2003034995,1537002063
.long 3602036899,1747873779
.long 1575990012,1955562222
.long 1125592928,2024104815
.long 2716904306,2227730452
.long 442776044,2361852424
.long 593698344,2428436474
.long 3733110249,2756734187
.long 2999351573,3204031479
.long 3815920427,3329325298
.long 3928383900,3391569614
.long 566280711,3515267271
.long 3454069534,3940187606
.long 4000239992,4118630271
.long 1914138554,116418474
.long 2731055270,174292421
.long 3203993006,289380356
.long 320620315,460393269
.long 587496836,685471733
.long 1086792851,852142971
.long 365543100,1017036298
.long 2618297676,1126000580
.long 3409855158,1288033470
.long 4234509866,1501505948
.long 987167468,1607167915
.long 1246189591,1816402316
.long 67438087,66051
.long 202182159,134810123
.size sha512_block_data_order,.-.L_sha512_block_data_order_begin
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97
.byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32
.byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97
.byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103
.byte 62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 6,610
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-x86/crypto/fipsmodule/ghash-x86.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl gcm_init_clmul
.hidden gcm_init_clmul
.type gcm_init_clmul,@function
.align 16
gcm_init_clmul:
.L_gcm_init_clmul_begin:
movl 4(%esp),%edx
movl 8(%esp),%eax
call .L000pic
.L000pic:
popl %ecx
leal .Lbswap-.L000pic(%ecx),%ecx
movdqu (%eax),%xmm2
pshufd $78,%xmm2,%xmm2
pshufd $255,%xmm2,%xmm4
movdqa %xmm2,%xmm3
psllq $1,%xmm2
pxor %xmm5,%xmm5
psrlq $63,%xmm3
pcmpgtd %xmm4,%xmm5
pslldq $8,%xmm3
por %xmm3,%xmm2
pand 16(%ecx),%xmm5
pxor %xmm5,%xmm2
movdqa %xmm2,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pshufd $78,%xmm2,%xmm4
pxor %xmm0,%xmm3
pxor %xmm2,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
xorps %xmm0,%xmm3
xorps %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
pshufd $78,%xmm2,%xmm3
pshufd $78,%xmm0,%xmm4
pxor %xmm2,%xmm3
movdqu %xmm2,(%edx)
pxor %xmm0,%xmm4
movdqu %xmm0,16(%edx)
.byte 102,15,58,15,227,8
movdqu %xmm4,32(%edx)
ret
.size gcm_init_clmul,.-.L_gcm_init_clmul_begin
.globl gcm_gmult_clmul
.hidden gcm_gmult_clmul
.type gcm_gmult_clmul,@function
.align 16
gcm_gmult_clmul:
.L_gcm_gmult_clmul_begin:
movl 4(%esp),%eax
movl 8(%esp),%edx
call .L001pic
.L001pic:
popl %ecx
leal .Lbswap-.L001pic(%ecx),%ecx
movdqu (%eax),%xmm0
movdqa (%ecx),%xmm5
movups (%edx),%xmm2
.byte 102,15,56,0,197
movups 32(%edx),%xmm4
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pxor %xmm0,%xmm3
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
xorps %xmm0,%xmm3
xorps %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.byte 102,15,56,0,197
movdqu %xmm0,(%eax)
ret
.size gcm_gmult_clmul,.-.L_gcm_gmult_clmul_begin
.globl gcm_ghash_clmul
.hidden gcm_ghash_clmul
.type gcm_ghash_clmul,@function
.align 16
gcm_ghash_clmul:
.L_gcm_ghash_clmul_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 20(%esp),%eax
movl 24(%esp),%edx
movl 28(%esp),%esi
movl 32(%esp),%ebx
call .L002pic
.L002pic:
popl %ecx
leal .Lbswap-.L002pic(%ecx),%ecx
movdqu (%eax),%xmm0
movdqa (%ecx),%xmm5
movdqu (%edx),%xmm2
.byte 102,15,56,0,197
subl $16,%ebx
jz .L003odd_tail
movdqu (%esi),%xmm3
movdqu 16(%esi),%xmm6
.byte 102,15,56,0,221
.byte 102,15,56,0,245
movdqu 32(%edx),%xmm5
pxor %xmm3,%xmm0
pshufd $78,%xmm6,%xmm3
movdqa %xmm6,%xmm7
pxor %xmm6,%xmm3
leal 32(%esi),%esi
.byte 102,15,58,68,242,0
.byte 102,15,58,68,250,17
.byte 102,15,58,68,221,0
movups 16(%edx),%xmm2
nop
subl $32,%ebx
jbe .L004even_tail
jmp .L005mod_loop
.align 32
.L005mod_loop:
pshufd $78,%xmm0,%xmm4
movdqa %xmm0,%xmm1
pxor %xmm0,%xmm4
nop
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,229,16
movups (%edx),%xmm2
xorps %xmm6,%xmm0
movdqa (%ecx),%xmm5
xorps %xmm7,%xmm1
movdqu (%esi),%xmm7
pxor %xmm0,%xmm3
movdqu 16(%esi),%xmm6
pxor %xmm1,%xmm3
.byte 102,15,56,0,253
pxor %xmm3,%xmm4
movdqa %xmm4,%xmm3
psrldq $8,%xmm4
pslldq $8,%xmm3
pxor %xmm4,%xmm1
pxor %xmm3,%xmm0
.byte 102,15,56,0,245
pxor %xmm7,%xmm1
movdqa %xmm6,%xmm7
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
.byte 102,15,58,68,242,0
movups 32(%edx),%xmm5
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
pshufd $78,%xmm7,%xmm3
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm7,%xmm3
pxor %xmm4,%xmm1
.byte 102,15,58,68,250,17
movups 16(%edx),%xmm2
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.byte 102,15,58,68,221,0
leal 32(%esi),%esi
subl $32,%ebx
ja .L005mod_loop
.L004even_tail:
pshufd $78,%xmm0,%xmm4
movdqa %xmm0,%xmm1
pxor %xmm0,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,229,16
movdqa (%ecx),%xmm5
xorps %xmm6,%xmm0
xorps %xmm7,%xmm1
pxor %xmm0,%xmm3
pxor %xmm1,%xmm3
pxor %xmm3,%xmm4
movdqa %xmm4,%xmm3
psrldq $8,%xmm4
pslldq $8,%xmm3
pxor %xmm4,%xmm1
pxor %xmm3,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
testl %ebx,%ebx
jnz .L006done
movups (%edx),%xmm2
.L003odd_tail:
movdqu (%esi),%xmm3
.byte 102,15,56,0,221
pxor %xmm3,%xmm0
movdqa %xmm0,%xmm1
pshufd $78,%xmm0,%xmm3
pshufd $78,%xmm2,%xmm4
pxor %xmm0,%xmm3
pxor %xmm2,%xmm4
.byte 102,15,58,68,194,0
.byte 102,15,58,68,202,17
.byte 102,15,58,68,220,0
xorps %xmm0,%xmm3
xorps %xmm1,%xmm3
movdqa %xmm3,%xmm4
psrldq $8,%xmm3
pslldq $8,%xmm4
pxor %xmm3,%xmm1
pxor %xmm4,%xmm0
movdqa %xmm0,%xmm4
movdqa %xmm0,%xmm3
psllq $5,%xmm0
pxor %xmm0,%xmm3
psllq $1,%xmm0
pxor %xmm3,%xmm0
psllq $57,%xmm0
movdqa %xmm0,%xmm3
pslldq $8,%xmm0
psrldq $8,%xmm3
pxor %xmm4,%xmm0
pxor %xmm3,%xmm1
movdqa %xmm0,%xmm4
psrlq $1,%xmm0
pxor %xmm4,%xmm1
pxor %xmm0,%xmm4
psrlq $5,%xmm0
pxor %xmm4,%xmm0
psrlq $1,%xmm0
pxor %xmm1,%xmm0
.L006done:
.byte 102,15,56,0,197
movdqu %xmm0,(%eax)
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size gcm_ghash_clmul,.-.L_gcm_ghash_clmul_begin
.align 64
.Lbswap:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,194
.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,44,32,67
.byte 82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112
.byte 112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62
.byte 0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 19,257
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-x86/crypto/chacha/chacha-x86.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
.text
.globl ChaCha20_ctr32_nohw
.hidden ChaCha20_ctr32_nohw
.type ChaCha20_ctr32_nohw,@function
.align 16
ChaCha20_ctr32_nohw:
.L_ChaCha20_ctr32_nohw_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
movl 32(%esp),%esi
movl 36(%esp),%edi
subl $132,%esp
movl (%esi),%eax
movl 4(%esi),%ebx
movl 8(%esi),%ecx
movl 12(%esi),%edx
movl %eax,80(%esp)
movl %ebx,84(%esp)
movl %ecx,88(%esp)
movl %edx,92(%esp)
movl 16(%esi),%eax
movl 20(%esi),%ebx
movl 24(%esi),%ecx
movl 28(%esi),%edx
movl %eax,96(%esp)
movl %ebx,100(%esp)
movl %ecx,104(%esp)
movl %edx,108(%esp)
movl (%edi),%eax
movl 4(%edi),%ebx
movl 8(%edi),%ecx
movl 12(%edi),%edx
subl $1,%eax
movl %eax,112(%esp)
movl %ebx,116(%esp)
movl %ecx,120(%esp)
movl %edx,124(%esp)
jmp .L000entry
.align 16
.L001outer_loop:
movl %ebx,156(%esp)
movl %eax,152(%esp)
movl %ecx,160(%esp)
.L000entry:
movl $1634760805,%eax
movl $857760878,4(%esp)
movl $2036477234,8(%esp)
movl $1797285236,12(%esp)
movl 84(%esp),%ebx
movl 88(%esp),%ebp
movl 104(%esp),%ecx
movl 108(%esp),%esi
movl 116(%esp),%edx
movl 120(%esp),%edi
movl %ebx,20(%esp)
movl %ebp,24(%esp)
movl %ecx,40(%esp)
movl %esi,44(%esp)
movl %edx,52(%esp)
movl %edi,56(%esp)
movl 92(%esp),%ebx
movl 124(%esp),%edi
movl 112(%esp),%edx
movl 80(%esp),%ebp
movl 96(%esp),%ecx
movl 100(%esp),%esi
addl $1,%edx
movl %ebx,28(%esp)
movl %edi,60(%esp)
movl %edx,112(%esp)
movl $10,%ebx
jmp .L002loop
.align 16
.L002loop:
addl %ebp,%eax
movl %ebx,128(%esp)
movl %ebp,%ebx
xorl %eax,%edx
roll $16,%edx
addl %edx,%ecx
xorl %ecx,%ebx
movl 52(%esp),%edi
roll $12,%ebx
movl 20(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,(%esp)
roll $8,%edx
movl 4(%esp),%eax
addl %edx,%ecx
movl %edx,48(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
movl %ecx,32(%esp)
roll $16,%edi
movl %ebx,16(%esp)
addl %edi,%esi
movl 40(%esp),%ecx
xorl %esi,%ebp
movl 56(%esp),%edx
roll $12,%ebp
movl 24(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,4(%esp)
roll $8,%edi
movl 8(%esp),%eax
addl %edi,%esi
movl %edi,52(%esp)
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
movl %esi,36(%esp)
roll $16,%edx
movl %ebp,20(%esp)
addl %edx,%ecx
movl 44(%esp),%esi
xorl %ecx,%ebx
movl 60(%esp),%edi
roll $12,%ebx
movl 28(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,8(%esp)
roll $8,%edx
movl 12(%esp),%eax
addl %edx,%ecx
movl %edx,56(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
roll $16,%edi
movl %ebx,24(%esp)
addl %edi,%esi
xorl %esi,%ebp
roll $12,%ebp
movl 20(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,12(%esp)
roll $8,%edi
movl (%esp),%eax
addl %edi,%esi
movl %edi,%edx
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
roll $16,%edx
movl %ebp,28(%esp)
addl %edx,%ecx
xorl %ecx,%ebx
movl 48(%esp),%edi
roll $12,%ebx
movl 24(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,(%esp)
roll $8,%edx
movl 4(%esp),%eax
addl %edx,%ecx
movl %edx,60(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
movl %ecx,40(%esp)
roll $16,%edi
movl %ebx,20(%esp)
addl %edi,%esi
movl 32(%esp),%ecx
xorl %esi,%ebp
movl 52(%esp),%edx
roll $12,%ebp
movl 28(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,4(%esp)
roll $8,%edi
movl 8(%esp),%eax
addl %edi,%esi
movl %edi,48(%esp)
xorl %esi,%ebp
addl %ebx,%eax
roll $7,%ebp
xorl %eax,%edx
movl %esi,44(%esp)
roll $16,%edx
movl %ebp,24(%esp)
addl %edx,%ecx
movl 36(%esp),%esi
xorl %ecx,%ebx
movl 56(%esp),%edi
roll $12,%ebx
movl 16(%esp),%ebp
addl %ebx,%eax
xorl %eax,%edx
movl %eax,8(%esp)
roll $8,%edx
movl 12(%esp),%eax
addl %edx,%ecx
movl %edx,52(%esp)
xorl %ecx,%ebx
addl %ebp,%eax
roll $7,%ebx
xorl %eax,%edi
roll $16,%edi
movl %ebx,28(%esp)
addl %edi,%esi
xorl %esi,%ebp
movl 48(%esp),%edx
roll $12,%ebp
movl 128(%esp),%ebx
addl %ebp,%eax
xorl %eax,%edi
movl %eax,12(%esp)
roll $8,%edi
movl (%esp),%eax
addl %edi,%esi
movl %edi,56(%esp)
xorl %esi,%ebp
roll $7,%ebp
decl %ebx
jnz .L002loop
movl 160(%esp),%ebx
addl $1634760805,%eax
addl 80(%esp),%ebp
addl 96(%esp),%ecx
addl 100(%esp),%esi
cmpl $64,%ebx
jb .L003tail
movl 156(%esp),%ebx
addl 112(%esp),%edx
addl 120(%esp),%edi
xorl (%ebx),%eax
xorl 16(%ebx),%ebp
movl %eax,(%esp)
movl 152(%esp),%eax
xorl 32(%ebx),%ecx
xorl 36(%ebx),%esi
xorl 48(%ebx),%edx
xorl 56(%ebx),%edi
movl %ebp,16(%eax)
movl %ecx,32(%eax)
movl %esi,36(%eax)
movl %edx,48(%eax)
movl %edi,56(%eax)
movl 4(%esp),%ebp
movl 8(%esp),%ecx
movl 12(%esp),%esi
movl 20(%esp),%edx
movl 24(%esp),%edi
addl $857760878,%ebp
addl $2036477234,%ecx
addl $1797285236,%esi
addl 84(%esp),%edx
addl 88(%esp),%edi
xorl 4(%ebx),%ebp
xorl 8(%ebx),%ecx
xorl 12(%ebx),%esi
xorl 20(%ebx),%edx
xorl 24(%ebx),%edi
movl %ebp,4(%eax)
movl %ecx,8(%eax)
movl %esi,12(%eax)
movl %edx,20(%eax)
movl %edi,24(%eax)
movl 28(%esp),%ebp
movl 40(%esp),%ecx
movl 44(%esp),%esi
movl 52(%esp),%edx
movl 60(%esp),%edi
addl 92(%esp),%ebp
addl 104(%esp),%ecx
addl 108(%esp),%esi
addl 116(%esp),%edx
addl 124(%esp),%edi
xorl 28(%ebx),%ebp
xorl 40(%ebx),%ecx
xorl 44(%ebx),%esi
xorl 52(%ebx),%edx
xorl 60(%ebx),%edi
leal 64(%ebx),%ebx
movl %ebp,28(%eax)
movl (%esp),%ebp
movl %ecx,40(%eax)
movl 160(%esp),%ecx
movl %esi,44(%eax)
movl %edx,52(%eax)
movl %edi,60(%eax)
movl %ebp,(%eax)
leal 64(%eax),%eax
subl $64,%ecx
jnz .L001outer_loop
jmp .L004done
.L003tail:
addl 112(%esp),%edx
addl 120(%esp),%edi
movl %eax,(%esp)
movl %ebp,16(%esp)
movl %ecx,32(%esp)
movl %esi,36(%esp)
movl %edx,48(%esp)
movl %edi,56(%esp)
movl 4(%esp),%ebp
movl 8(%esp),%ecx
movl 12(%esp),%esi
movl 20(%esp),%edx
movl 24(%esp),%edi
addl $857760878,%ebp
addl $2036477234,%ecx
addl $1797285236,%esi
addl 84(%esp),%edx
addl 88(%esp),%edi
movl %ebp,4(%esp)
movl %ecx,8(%esp)
movl %esi,12(%esp)
movl %edx,20(%esp)
movl %edi,24(%esp)
movl 28(%esp),%ebp
movl 40(%esp),%ecx
movl 44(%esp),%esi
movl 52(%esp),%edx
movl 60(%esp),%edi
addl 92(%esp),%ebp
addl 104(%esp),%ecx
addl 108(%esp),%esi
addl 116(%esp),%edx
addl 124(%esp),%edi
movl %ebp,28(%esp)
movl 156(%esp),%ebp
movl %ecx,40(%esp)
movl 152(%esp),%ecx
movl %esi,44(%esp)
xorl %esi,%esi
movl %edx,52(%esp)
movl %edi,60(%esp)
xorl %eax,%eax
xorl %edx,%edx
.L005tail_loop:
movb (%esi,%ebp,1),%al
movb (%esp,%esi,1),%dl
leal 1(%esi),%esi
xorb %dl,%al
movb %al,-1(%ecx,%esi,1)
decl %ebx
jnz .L005tail_loop
.L004done:
addl $132,%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size ChaCha20_ctr32_nohw,.-.L_ChaCha20_ctr32_nohw_begin
.globl ChaCha20_ctr32_ssse3
.hidden ChaCha20_ctr32_ssse3
.type ChaCha20_ctr32_ssse3,@function
.align 16
ChaCha20_ctr32_ssse3:
.L_ChaCha20_ctr32_ssse3_begin:
pushl %ebp
pushl %ebx
pushl %esi
pushl %edi
call .Lpic_point
.Lpic_point:
popl %eax
movl 20(%esp),%edi
movl 24(%esp),%esi
movl 28(%esp),%ecx
movl 32(%esp),%edx
movl 36(%esp),%ebx
movl %esp,%ebp
subl $524,%esp
andl $-64,%esp
movl %ebp,512(%esp)
leal .Lssse3_data-.Lpic_point(%eax),%eax
movdqu (%ebx),%xmm3
cmpl $256,%ecx
jb .L0061x
movl %edx,516(%esp)
movl %ebx,520(%esp)
subl $256,%ecx
leal 384(%esp),%ebp
movdqu (%edx),%xmm7
pshufd $0,%xmm3,%xmm0
pshufd $85,%xmm3,%xmm1
pshufd $170,%xmm3,%xmm2
pshufd $255,%xmm3,%xmm3
paddd 48(%eax),%xmm0
pshufd $0,%xmm7,%xmm4
pshufd $85,%xmm7,%xmm5
psubd 64(%eax),%xmm0
pshufd $170,%xmm7,%xmm6
pshufd $255,%xmm7,%xmm7
movdqa %xmm0,64(%ebp)
movdqa %xmm1,80(%ebp)
movdqa %xmm2,96(%ebp)
movdqa %xmm3,112(%ebp)
movdqu 16(%edx),%xmm3
movdqa %xmm4,-64(%ebp)
movdqa %xmm5,-48(%ebp)
movdqa %xmm6,-32(%ebp)
movdqa %xmm7,-16(%ebp)
movdqa 32(%eax),%xmm7
leal 128(%esp),%ebx
pshufd $0,%xmm3,%xmm0
pshufd $85,%xmm3,%xmm1
pshufd $170,%xmm3,%xmm2
pshufd $255,%xmm3,%xmm3
pshufd $0,%xmm7,%xmm4
pshufd $85,%xmm7,%xmm5
pshufd $170,%xmm7,%xmm6
pshufd $255,%xmm7,%xmm7
movdqa %xmm0,(%ebp)
movdqa %xmm1,16(%ebp)
movdqa %xmm2,32(%ebp)
movdqa %xmm3,48(%ebp)
movdqa %xmm4,-128(%ebp)
movdqa %xmm5,-112(%ebp)
movdqa %xmm6,-96(%ebp)
movdqa %xmm7,-80(%ebp)
leal 128(%esi),%esi
leal 128(%edi),%edi
jmp .L007outer_loop
.align 16
.L007outer_loop:
movdqa -112(%ebp),%xmm1
movdqa -96(%ebp),%xmm2
movdqa -80(%ebp),%xmm3
movdqa -48(%ebp),%xmm5
movdqa -32(%ebp),%xmm6
movdqa -16(%ebp),%xmm7
movdqa %xmm1,-112(%ebx)
movdqa %xmm2,-96(%ebx)
movdqa %xmm3,-80(%ebx)
movdqa %xmm5,-48(%ebx)
movdqa %xmm6,-32(%ebx)
movdqa %xmm7,-16(%ebx)
movdqa 32(%ebp),%xmm2
movdqa 48(%ebp),%xmm3
movdqa 64(%ebp),%xmm4
movdqa 80(%ebp),%xmm5
movdqa 96(%ebp),%xmm6
movdqa 112(%ebp),%xmm7
paddd 64(%eax),%xmm4
movdqa %xmm2,32(%ebx)
movdqa %xmm3,48(%ebx)
movdqa %xmm4,64(%ebx)
movdqa %xmm5,80(%ebx)
movdqa %xmm6,96(%ebx)
movdqa %xmm7,112(%ebx)
movdqa %xmm4,64(%ebp)
movdqa -128(%ebp),%xmm0
movdqa %xmm4,%xmm6
movdqa -64(%ebp),%xmm3
movdqa (%ebp),%xmm4
movdqa 16(%ebp),%xmm5
movl $10,%edx
nop
.align 16
.L008loop:
paddd %xmm3,%xmm0
movdqa %xmm3,%xmm2
pxor %xmm0,%xmm6
pshufb (%eax),%xmm6
paddd %xmm6,%xmm4
pxor %xmm4,%xmm2
movdqa -48(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -112(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 80(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-128(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,64(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
movdqa %xmm4,(%ebx)
pshufb (%eax),%xmm7
movdqa %xmm2,-64(%ebx)
paddd %xmm7,%xmm5
movdqa 32(%ebx),%xmm4
pxor %xmm5,%xmm3
movdqa -32(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -96(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 96(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-112(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,80(%ebx)
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
movdqa %xmm5,16(%ebx)
pshufb (%eax),%xmm6
movdqa %xmm3,-48(%ebx)
paddd %xmm6,%xmm4
movdqa 48(%ebx),%xmm5
pxor %xmm4,%xmm2
movdqa -16(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -80(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 112(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-96(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,96(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
pshufb (%eax),%xmm7
movdqa %xmm2,-32(%ebx)
paddd %xmm7,%xmm5
pxor %xmm5,%xmm3
movdqa -48(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -128(%ebx),%xmm0
paddd %xmm3,%xmm1
pxor %xmm1,%xmm7
movdqa %xmm1,-80(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,%xmm6
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
pshufb (%eax),%xmm6
movdqa %xmm3,-16(%ebx)
paddd %xmm6,%xmm4
pxor %xmm4,%xmm2
movdqa -32(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -112(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 64(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-128(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,112(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
movdqa %xmm4,32(%ebx)
pshufb (%eax),%xmm7
movdqa %xmm2,-48(%ebx)
paddd %xmm7,%xmm5
movdqa (%ebx),%xmm4
pxor %xmm5,%xmm3
movdqa -16(%ebx),%xmm2
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -96(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 80(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-112(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,64(%ebx)
pxor %xmm5,%xmm3
paddd %xmm2,%xmm0
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
pxor %xmm0,%xmm6
por %xmm1,%xmm3
movdqa %xmm5,48(%ebx)
pshufb (%eax),%xmm6
movdqa %xmm3,-32(%ebx)
paddd %xmm6,%xmm4
movdqa 16(%ebx),%xmm5
pxor %xmm4,%xmm2
movdqa -64(%ebx),%xmm3
movdqa %xmm2,%xmm1
pslld $12,%xmm2
psrld $20,%xmm1
por %xmm1,%xmm2
movdqa -80(%ebx),%xmm1
paddd %xmm2,%xmm0
movdqa 96(%ebx),%xmm7
pxor %xmm0,%xmm6
movdqa %xmm0,-96(%ebx)
pshufb 16(%eax),%xmm6
paddd %xmm6,%xmm4
movdqa %xmm6,80(%ebx)
pxor %xmm4,%xmm2
paddd %xmm3,%xmm1
movdqa %xmm2,%xmm0
pslld $7,%xmm2
psrld $25,%xmm0
pxor %xmm1,%xmm7
por %xmm0,%xmm2
pshufb (%eax),%xmm7
movdqa %xmm2,-16(%ebx)
paddd %xmm7,%xmm5
pxor %xmm5,%xmm3
movdqa %xmm3,%xmm0
pslld $12,%xmm3
psrld $20,%xmm0
por %xmm0,%xmm3
movdqa -128(%ebx),%xmm0
paddd %xmm3,%xmm1
movdqa 64(%ebx),%xmm6
pxor %xmm1,%xmm7
movdqa %xmm1,-80(%ebx)
pshufb 16(%eax),%xmm7
paddd %xmm7,%xmm5
movdqa %xmm7,96(%ebx)
pxor %xmm5,%xmm3
movdqa %xmm3,%xmm1
pslld $7,%xmm3
psrld $25,%xmm1
por %xmm1,%xmm3
decl %edx
jnz .L008loop
movdqa %xmm3,-64(%ebx)
movdqa %xmm4,(%ebx)
movdqa %xmm5,16(%ebx)
movdqa %xmm6,64(%ebx)
movdqa %xmm7,96(%ebx)
movdqa -112(%ebx),%xmm1
movdqa -96(%ebx),%xmm2
movdqa -80(%ebx),%xmm3
paddd -128(%ebp),%xmm0
paddd -112(%ebp),%xmm1
paddd -96(%ebp),%xmm2
paddd -80(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa -64(%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa -48(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa -32(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa -16(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd -64(%ebp),%xmm0
paddd -48(%ebp),%xmm1
paddd -32(%ebp),%xmm2
paddd -16(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa (%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa 16(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa 32(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa 48(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd (%ebp),%xmm0
paddd 16(%ebp),%xmm1
paddd 32(%ebp),%xmm2
paddd 48(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 16(%esi),%esi
pxor %xmm0,%xmm4
movdqa 64(%ebx),%xmm0
pxor %xmm1,%xmm5
movdqa 80(%ebx),%xmm1
pxor %xmm2,%xmm6
movdqa 96(%ebx),%xmm2
pxor %xmm3,%xmm7
movdqa 112(%ebx),%xmm3
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 16(%edi),%edi
paddd 64(%ebp),%xmm0
paddd 80(%ebp),%xmm1
paddd 96(%ebp),%xmm2
paddd 112(%ebp),%xmm3
movdqa %xmm0,%xmm6
punpckldq %xmm1,%xmm0
movdqa %xmm2,%xmm7
punpckldq %xmm3,%xmm2
punpckhdq %xmm1,%xmm6
punpckhdq %xmm3,%xmm7
movdqa %xmm0,%xmm1
punpcklqdq %xmm2,%xmm0
movdqa %xmm6,%xmm3
punpcklqdq %xmm7,%xmm6
punpckhqdq %xmm2,%xmm1
punpckhqdq %xmm7,%xmm3
movdqu -128(%esi),%xmm4
movdqu -64(%esi),%xmm5
movdqu (%esi),%xmm2
movdqu 64(%esi),%xmm7
leal 208(%esi),%esi
pxor %xmm0,%xmm4
pxor %xmm1,%xmm5
pxor %xmm2,%xmm6
pxor %xmm3,%xmm7
movdqu %xmm4,-128(%edi)
movdqu %xmm5,-64(%edi)
movdqu %xmm6,(%edi)
movdqu %xmm7,64(%edi)
leal 208(%edi),%edi
subl $256,%ecx
jnc .L007outer_loop
addl $256,%ecx
jz .L009done
movl 520(%esp),%ebx
leal -128(%esi),%esi
movl 516(%esp),%edx
leal -128(%edi),%edi
movd 64(%ebp),%xmm2
movdqu (%ebx),%xmm3
paddd 96(%eax),%xmm2
pand 112(%eax),%xmm3
por %xmm2,%xmm3
.L0061x:
movdqa 32(%eax),%xmm0
movdqu (%edx),%xmm1
movdqu 16(%edx),%xmm2
movdqa (%eax),%xmm6
movdqa 16(%eax),%xmm7
movl %ebp,48(%esp)
movdqa %xmm0,(%esp)
movdqa %xmm1,16(%esp)
movdqa %xmm2,32(%esp)
movdqa %xmm3,48(%esp)
movl $10,%edx
jmp .L010loop1x
.align 16
.L011outer1x:
movdqa 80(%eax),%xmm3
movdqa (%esp),%xmm0
movdqa 16(%esp),%xmm1
movdqa 32(%esp),%xmm2
paddd 48(%esp),%xmm3
movl $10,%edx
movdqa %xmm3,48(%esp)
jmp .L010loop1x
.align 16
.L010loop1x:
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,222
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $20,%xmm1
pslld $12,%xmm4
por %xmm4,%xmm1
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,223
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $25,%xmm1
pslld $7,%xmm4
por %xmm4,%xmm1
pshufd $78,%xmm2,%xmm2
pshufd $57,%xmm1,%xmm1
pshufd $147,%xmm3,%xmm3
nop
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,222
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $20,%xmm1
pslld $12,%xmm4
por %xmm4,%xmm1
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
.byte 102,15,56,0,223
paddd %xmm3,%xmm2
pxor %xmm2,%xmm1
movdqa %xmm1,%xmm4
psrld $25,%xmm1
pslld $7,%xmm4
por %xmm4,%xmm1
pshufd $78,%xmm2,%xmm2
pshufd $147,%xmm1,%xmm1
pshufd $57,%xmm3,%xmm3
decl %edx
jnz .L010loop1x
paddd (%esp),%xmm0
paddd 16(%esp),%xmm1
paddd 32(%esp),%xmm2
paddd 48(%esp),%xmm3
cmpl $64,%ecx
jb .L012tail
movdqu (%esi),%xmm4
movdqu 16(%esi),%xmm5
pxor %xmm4,%xmm0
movdqu 32(%esi),%xmm4
pxor %xmm5,%xmm1
movdqu 48(%esi),%xmm5
pxor %xmm4,%xmm2
pxor %xmm5,%xmm3
leal 64(%esi),%esi
movdqu %xmm0,(%edi)
movdqu %xmm1,16(%edi)
movdqu %xmm2,32(%edi)
movdqu %xmm3,48(%edi)
leal 64(%edi),%edi
subl $64,%ecx
jnz .L011outer1x
jmp .L009done
.L012tail:
movdqa %xmm0,(%esp)
movdqa %xmm1,16(%esp)
movdqa %xmm2,32(%esp)
movdqa %xmm3,48(%esp)
xorl %eax,%eax
xorl %edx,%edx
xorl %ebp,%ebp
.L013tail_loop:
movb (%esp,%ebp,1),%al
movb (%esi,%ebp,1),%dl
leal 1(%ebp),%ebp
xorb %dl,%al
movb %al,-1(%edi,%ebp,1)
decl %ecx
jnz .L013tail_loop
.L009done:
movl 512(%esp),%esp
popl %edi
popl %esi
popl %ebx
popl %ebp
ret
.size ChaCha20_ctr32_ssse3,.-.L_ChaCha20_ctr32_ssse3_begin
.align 64
.Lssse3_data:
.byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13
.byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14
.long 1634760805,857760878,2036477234,1797285236
.long 0,1,2,3
.long 4,4,4,4
.long 1,0,0,0
.long 4,0,0,0
.long 0,-1,-1,-1
.align 64
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54
.byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32
.byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111
.byte 114,103,62,0
#endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 10,917
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/test/trampoline-armv8.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
.text
// abi_test_trampoline loads callee-saved registers from |state|, calls |func|
// with |argv|, then saves the callee-saved registers into |state|. It returns
// the result of |func|. The |unwind| argument is unused.
// uint64_t abi_test_trampoline(void (*func)(...), CallerState *state,
// const uint64_t *argv, size_t argc,
// uint64_t unwind);
.globl abi_test_trampoline
.align 4
abi_test_trampoline:
Labi_test_trampoline_begin:
AARCH64_SIGN_LINK_REGISTER
// Stack layout (low to high addresses)
// x29,x30 (16 bytes)
// d8-d15 (64 bytes)
// x19-x28 (80 bytes)
// x1 (8 bytes)
// padding (8 bytes)
stp x29, x30, [sp, #-176]!
mov x29, sp
// Saved callee-saved registers and |state|.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
stp x19, x20, [sp, #80]
stp x21, x22, [sp, #96]
stp x23, x24, [sp, #112]
stp x25, x26, [sp, #128]
stp x27, x28, [sp, #144]
str x1, [sp, #160]
// Load registers from |state|, with the exception of x29. x29 is the
// frame pointer and also callee-saved, but AAPCS64 allows platforms to
// mandate that x29 always point to a frame. iOS64 does so, which means
// we cannot fill x29 with entropy without violating ABI rules
// ourselves. x29 is tested separately below.
ldp d8, d9, [x1], #16
ldp d10, d11, [x1], #16
ldp d12, d13, [x1], #16
ldp d14, d15, [x1], #16
ldp x19, x20, [x1], #16
ldp x21, x22, [x1], #16
ldp x23, x24, [x1], #16
ldp x25, x26, [x1], #16
ldp x27, x28, [x1], #16
// Move parameters into temporary registers.
mov x9, x0
mov x10, x2
mov x11, x3
// Load parameters into registers.
cbz x11, Largs_done
ldr x0, [x10], #8
subs x11, x11, #1
b.eq Largs_done
ldr x1, [x10], #8
subs x11, x11, #1
b.eq Largs_done
ldr x2, [x10], #8
subs x11, x11, #1
b.eq Largs_done
ldr x3, [x10], #8
subs x11, x11, #1
b.eq Largs_done
ldr x4, [x10], #8
subs x11, x11, #1
b.eq Largs_done
ldr x5, [x10], #8
subs x11, x11, #1
b.eq Largs_done
ldr x6, [x10], #8
subs x11, x11, #1
b.eq Largs_done
ldr x7, [x10], #8
Largs_done:
blr x9
// Reload |state| and store registers.
ldr x1, [sp, #160]
stp d8, d9, [x1], #16
stp d10, d11, [x1], #16
stp d12, d13, [x1], #16
stp d14, d15, [x1], #16
stp x19, x20, [x1], #16
stp x21, x22, [x1], #16
stp x23, x24, [x1], #16
stp x25, x26, [x1], #16
stp x27, x28, [x1], #16
// |func| is required to preserve x29, the frame pointer. We cannot load
// random values into x29 (see comment above), so compare it against the
// expected value and zero the field of |state| if corrupted.
mov x9, sp
cmp x29, x9
b.eq Lx29_ok
str xzr, [x1]
Lx29_ok:
// Restore callee-saved registers.
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
ldp x19, x20, [sp, #80]
ldp x21, x22, [sp, #96]
ldp x23, x24, [sp, #112]
ldp x25, x26, [sp, #128]
ldp x27, x28, [sp, #144]
ldp x29, x30, [sp], #176
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl abi_test_clobber_x0
.align 4
abi_test_clobber_x0:
AARCH64_VALID_CALL_TARGET
mov x0, xzr
ret
.globl abi_test_clobber_x1
.align 4
abi_test_clobber_x1:
AARCH64_VALID_CALL_TARGET
mov x1, xzr
ret
.globl abi_test_clobber_x2
.align 4
abi_test_clobber_x2:
AARCH64_VALID_CALL_TARGET
mov x2, xzr
ret
.globl abi_test_clobber_x3
.align 4
abi_test_clobber_x3:
AARCH64_VALID_CALL_TARGET
mov x3, xzr
ret
.globl abi_test_clobber_x4
.align 4
abi_test_clobber_x4:
AARCH64_VALID_CALL_TARGET
mov x4, xzr
ret
.globl abi_test_clobber_x5
.align 4
abi_test_clobber_x5:
AARCH64_VALID_CALL_TARGET
mov x5, xzr
ret
.globl abi_test_clobber_x6
.align 4
abi_test_clobber_x6:
AARCH64_VALID_CALL_TARGET
mov x6, xzr
ret
.globl abi_test_clobber_x7
.align 4
abi_test_clobber_x7:
AARCH64_VALID_CALL_TARGET
mov x7, xzr
ret
.globl abi_test_clobber_x8
.align 4
abi_test_clobber_x8:
AARCH64_VALID_CALL_TARGET
mov x8, xzr
ret
.globl abi_test_clobber_x9
.align 4
abi_test_clobber_x9:
AARCH64_VALID_CALL_TARGET
mov x9, xzr
ret
.globl abi_test_clobber_x10
.align 4
abi_test_clobber_x10:
AARCH64_VALID_CALL_TARGET
mov x10, xzr
ret
.globl abi_test_clobber_x11
.align 4
abi_test_clobber_x11:
AARCH64_VALID_CALL_TARGET
mov x11, xzr
ret
.globl abi_test_clobber_x12
.align 4
abi_test_clobber_x12:
AARCH64_VALID_CALL_TARGET
mov x12, xzr
ret
.globl abi_test_clobber_x13
.align 4
abi_test_clobber_x13:
AARCH64_VALID_CALL_TARGET
mov x13, xzr
ret
.globl abi_test_clobber_x14
.align 4
abi_test_clobber_x14:
AARCH64_VALID_CALL_TARGET
mov x14, xzr
ret
.globl abi_test_clobber_x15
.align 4
abi_test_clobber_x15:
AARCH64_VALID_CALL_TARGET
mov x15, xzr
ret
.globl abi_test_clobber_x16
.align 4
abi_test_clobber_x16:
AARCH64_VALID_CALL_TARGET
mov x16, xzr
ret
.globl abi_test_clobber_x17
.align 4
abi_test_clobber_x17:
AARCH64_VALID_CALL_TARGET
mov x17, xzr
ret
.globl abi_test_clobber_x19
.align 4
abi_test_clobber_x19:
AARCH64_VALID_CALL_TARGET
mov x19, xzr
ret
.globl abi_test_clobber_x20
.align 4
abi_test_clobber_x20:
AARCH64_VALID_CALL_TARGET
mov x20, xzr
ret
.globl abi_test_clobber_x21
.align 4
abi_test_clobber_x21:
AARCH64_VALID_CALL_TARGET
mov x21, xzr
ret
.globl abi_test_clobber_x22
.align 4
abi_test_clobber_x22:
AARCH64_VALID_CALL_TARGET
mov x22, xzr
ret
.globl abi_test_clobber_x23
.align 4
abi_test_clobber_x23:
AARCH64_VALID_CALL_TARGET
mov x23, xzr
ret
.globl abi_test_clobber_x24
.align 4
abi_test_clobber_x24:
AARCH64_VALID_CALL_TARGET
mov x24, xzr
ret
.globl abi_test_clobber_x25
.align 4
abi_test_clobber_x25:
AARCH64_VALID_CALL_TARGET
mov x25, xzr
ret
.globl abi_test_clobber_x26
.align 4
abi_test_clobber_x26:
AARCH64_VALID_CALL_TARGET
mov x26, xzr
ret
.globl abi_test_clobber_x27
.align 4
abi_test_clobber_x27:
AARCH64_VALID_CALL_TARGET
mov x27, xzr
ret
.globl abi_test_clobber_x28
.align 4
abi_test_clobber_x28:
AARCH64_VALID_CALL_TARGET
mov x28, xzr
ret
.globl abi_test_clobber_x29
.align 4
abi_test_clobber_x29:
AARCH64_VALID_CALL_TARGET
mov x29, xzr
ret
.globl abi_test_clobber_d0
.align 4
abi_test_clobber_d0:
AARCH64_VALID_CALL_TARGET
fmov d0, xzr
ret
.globl abi_test_clobber_d1
.align 4
abi_test_clobber_d1:
AARCH64_VALID_CALL_TARGET
fmov d1, xzr
ret
.globl abi_test_clobber_d2
.align 4
abi_test_clobber_d2:
AARCH64_VALID_CALL_TARGET
fmov d2, xzr
ret
.globl abi_test_clobber_d3
.align 4
abi_test_clobber_d3:
AARCH64_VALID_CALL_TARGET
fmov d3, xzr
ret
.globl abi_test_clobber_d4
.align 4
abi_test_clobber_d4:
AARCH64_VALID_CALL_TARGET
fmov d4, xzr
ret
.globl abi_test_clobber_d5
.align 4
abi_test_clobber_d5:
AARCH64_VALID_CALL_TARGET
fmov d5, xzr
ret
.globl abi_test_clobber_d6
.align 4
abi_test_clobber_d6:
AARCH64_VALID_CALL_TARGET
fmov d6, xzr
ret
.globl abi_test_clobber_d7
.align 4
abi_test_clobber_d7:
AARCH64_VALID_CALL_TARGET
fmov d7, xzr
ret
.globl abi_test_clobber_d8
.align 4
abi_test_clobber_d8:
AARCH64_VALID_CALL_TARGET
fmov d8, xzr
ret
.globl abi_test_clobber_d9
.align 4
abi_test_clobber_d9:
AARCH64_VALID_CALL_TARGET
fmov d9, xzr
ret
.globl abi_test_clobber_d10
.align 4
abi_test_clobber_d10:
AARCH64_VALID_CALL_TARGET
fmov d10, xzr
ret
.globl abi_test_clobber_d11
.align 4
abi_test_clobber_d11:
AARCH64_VALID_CALL_TARGET
fmov d11, xzr
ret
.globl abi_test_clobber_d12
.align 4
abi_test_clobber_d12:
AARCH64_VALID_CALL_TARGET
fmov d12, xzr
ret
.globl abi_test_clobber_d13
.align 4
abi_test_clobber_d13:
AARCH64_VALID_CALL_TARGET
fmov d13, xzr
ret
.globl abi_test_clobber_d14
.align 4
abi_test_clobber_d14:
AARCH64_VALID_CALL_TARGET
fmov d14, xzr
ret
.globl abi_test_clobber_d15
.align 4
abi_test_clobber_d15:
AARCH64_VALID_CALL_TARGET
fmov d15, xzr
ret
.globl abi_test_clobber_d16
.align 4
abi_test_clobber_d16:
AARCH64_VALID_CALL_TARGET
fmov d16, xzr
ret
.globl abi_test_clobber_d17
.align 4
abi_test_clobber_d17:
AARCH64_VALID_CALL_TARGET
fmov d17, xzr
ret
.globl abi_test_clobber_d18
.align 4
abi_test_clobber_d18:
AARCH64_VALID_CALL_TARGET
fmov d18, xzr
ret
.globl abi_test_clobber_d19
.align 4
abi_test_clobber_d19:
AARCH64_VALID_CALL_TARGET
fmov d19, xzr
ret
.globl abi_test_clobber_d20
.align 4
abi_test_clobber_d20:
AARCH64_VALID_CALL_TARGET
fmov d20, xzr
ret
.globl abi_test_clobber_d21
.align 4
abi_test_clobber_d21:
AARCH64_VALID_CALL_TARGET
fmov d21, xzr
ret
.globl abi_test_clobber_d22
.align 4
abi_test_clobber_d22:
AARCH64_VALID_CALL_TARGET
fmov d22, xzr
ret
.globl abi_test_clobber_d23
.align 4
abi_test_clobber_d23:
AARCH64_VALID_CALL_TARGET
fmov d23, xzr
ret
.globl abi_test_clobber_d24
.align 4
abi_test_clobber_d24:
AARCH64_VALID_CALL_TARGET
fmov d24, xzr
ret
.globl abi_test_clobber_d25
.align 4
abi_test_clobber_d25:
AARCH64_VALID_CALL_TARGET
fmov d25, xzr
ret
.globl abi_test_clobber_d26
.align 4
abi_test_clobber_d26:
AARCH64_VALID_CALL_TARGET
fmov d26, xzr
ret
.globl abi_test_clobber_d27
.align 4
abi_test_clobber_d27:
AARCH64_VALID_CALL_TARGET
fmov d27, xzr
ret
.globl abi_test_clobber_d28
.align 4
abi_test_clobber_d28:
AARCH64_VALID_CALL_TARGET
fmov d28, xzr
ret
.globl abi_test_clobber_d29
.align 4
abi_test_clobber_d29:
AARCH64_VALID_CALL_TARGET
fmov d29, xzr
ret
.globl abi_test_clobber_d30
.align 4
abi_test_clobber_d30:
AARCH64_VALID_CALL_TARGET
fmov d30, xzr
ret
.globl abi_test_clobber_d31
.align 4
abi_test_clobber_d31:
AARCH64_VALID_CALL_TARGET
fmov d31, xzr
ret
.globl abi_test_clobber_v8_upper
.align 4
abi_test_clobber_v8_upper:
AARCH64_VALID_CALL_TARGET
fmov v8.d[1], xzr
ret
.globl abi_test_clobber_v9_upper
.align 4
abi_test_clobber_v9_upper:
AARCH64_VALID_CALL_TARGET
fmov v9.d[1], xzr
ret
.globl abi_test_clobber_v10_upper
.align 4
abi_test_clobber_v10_upper:
AARCH64_VALID_CALL_TARGET
fmov v10.d[1], xzr
ret
.globl abi_test_clobber_v11_upper
.align 4
abi_test_clobber_v11_upper:
AARCH64_VALID_CALL_TARGET
fmov v11.d[1], xzr
ret
.globl abi_test_clobber_v12_upper
.align 4
abi_test_clobber_v12_upper:
AARCH64_VALID_CALL_TARGET
fmov v12.d[1], xzr
ret
.globl abi_test_clobber_v13_upper
.align 4
abi_test_clobber_v13_upper:
AARCH64_VALID_CALL_TARGET
fmov v13.d[1], xzr
ret
.globl abi_test_clobber_v14_upper
.align 4
abi_test_clobber_v14_upper:
AARCH64_VALID_CALL_TARGET
fmov v14.d[1], xzr
ret
.globl abi_test_clobber_v15_upper
.align 4
abi_test_clobber_v15_upper:
AARCH64_VALID_CALL_TARGET
fmov v15.d[1], xzr
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 47,076
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/aesv8-armx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.section .rodata
.align 5
Lrcon:
.long 0x01,0x01,0x01,0x01
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
.long 0x1b,0x1b,0x1b,0x1b
.text
.globl aes_hw_set_encrypt_key
.def aes_hw_set_encrypt_key
.type 32
.endef
.align 5
aes_hw_set_encrypt_key:
Lenc_key:
#ifdef BORINGSSL_DISPATCH_TEST
adrp x9,BORINGSSL_function_hit
add x9, x9, :lo12:BORINGSSL_function_hit
mov w10, #1
strb w10, [x9,#3] // kFlag_aes_hw_set_encrypt_key
#endif
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x3,#-1
cmp x0,#0
b.eq Lenc_key_abort
cmp x2,#0
b.eq Lenc_key_abort
mov x3,#-2
cmp w1,#128
b.lt Lenc_key_abort
cmp w1,#256
b.gt Lenc_key_abort
tst w1,#0x3f
b.ne Lenc_key_abort
adrp x3,Lrcon
add x3,x3,:lo12:Lrcon
cmp w1,#192
eor v0.16b,v0.16b,v0.16b
ld1 {v3.16b},[x0],#16
mov w1,#8 // reuse w1
ld1 {v1.4s,v2.4s},[x3],#32
b.lt Loop128
b.eq L192
b L256
.align 4
Loop128:
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
b.ne Loop128
ld1 {v1.4s},[x3]
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
tbl v6.16b,{v3.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v3.4s},[x2],#16
aese v6.16b,v0.16b
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2]
add x2,x2,#0x50
mov w12,#10
b Ldone
.align 4
L192:
ld1 {v4.8b},[x0],#8
movi v6.16b,#8 // borrow v6.16b
st1 {v3.4s},[x2],#16
sub v2.16b,v2.16b,v6.16b // adjust the mask
Loop192:
tbl v6.16b,{v4.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v4.8b},[x2],#8
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
dup v5.4s,v3.s[3]
eor v5.16b,v5.16b,v4.16b
eor v6.16b,v6.16b,v1.16b
ext v4.16b,v0.16b,v4.16b,#12
shl v1.16b,v1.16b,#1
eor v4.16b,v4.16b,v5.16b
eor v3.16b,v3.16b,v6.16b
eor v4.16b,v4.16b,v6.16b
st1 {v3.4s},[x2],#16
b.ne Loop192
mov w12,#12
add x2,x2,#0x20
b Ldone
.align 4
L256:
ld1 {v4.16b},[x0]
mov w1,#7
mov w12,#14
st1 {v3.4s},[x2],#16
Loop256:
tbl v6.16b,{v4.16b},v2.16b
ext v5.16b,v0.16b,v3.16b,#12
st1 {v4.4s},[x2],#16
aese v6.16b,v0.16b
subs w1,w1,#1
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v3.16b,v3.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v6.16b,v6.16b,v1.16b
eor v3.16b,v3.16b,v5.16b
shl v1.16b,v1.16b,#1
eor v3.16b,v3.16b,v6.16b
st1 {v3.4s},[x2],#16
b.eq Ldone
dup v6.4s,v3.s[3] // just splat
ext v5.16b,v0.16b,v4.16b,#12
aese v6.16b,v0.16b
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
ext v5.16b,v0.16b,v5.16b,#12
eor v4.16b,v4.16b,v5.16b
eor v4.16b,v4.16b,v6.16b
b Loop256
Ldone:
str w12,[x2]
mov x3,#0
Lenc_key_abort:
mov x0,x3 // return value
ldr x29,[sp],#16
ret
.globl aes_hw_set_decrypt_key
.def aes_hw_set_decrypt_key
.type 32
.endef
.align 5
aes_hw_set_decrypt_key:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
bl Lenc_key
cmp x0,#0
b.ne Ldec_key_abort
sub x2,x2,#240 // restore original x2
mov x4,#-16
add x0,x2,x12,lsl#4 // end of key schedule
ld1 {v0.4s},[x2]
ld1 {v1.4s},[x0]
st1 {v0.4s},[x0],x4
st1 {v1.4s},[x2],#16
Loop_imc:
ld1 {v0.4s},[x2]
ld1 {v1.4s},[x0]
aesimc v0.16b,v0.16b
aesimc v1.16b,v1.16b
st1 {v0.4s},[x0],x4
st1 {v1.4s},[x2],#16
cmp x0,x2
b.hi Loop_imc
ld1 {v0.4s},[x2]
aesimc v0.16b,v0.16b
st1 {v0.4s},[x0]
eor x0,x0,x0 // return value
Ldec_key_abort:
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl aes_hw_encrypt
.def aes_hw_encrypt
.type 32
.endef
.align 5
aes_hw_encrypt:
#ifdef BORINGSSL_DISPATCH_TEST
adrp x9,BORINGSSL_function_hit
add x9, x9, :lo12:BORINGSSL_function_hit
mov w10, #1
strb w10, [x9,#1] // kFlag_aes_hw_encrypt
#endif
AARCH64_VALID_CALL_TARGET
ldr w3,[x2,#240]
ld1 {v0.4s},[x2],#16
ld1 {v2.16b},[x0]
sub w3,w3,#2
ld1 {v1.4s},[x2],#16
Loop_enc:
aese v2.16b,v0.16b
aesmc v2.16b,v2.16b
ld1 {v0.4s},[x2],#16
subs w3,w3,#2
aese v2.16b,v1.16b
aesmc v2.16b,v2.16b
ld1 {v1.4s},[x2],#16
b.gt Loop_enc
aese v2.16b,v0.16b
aesmc v2.16b,v2.16b
ld1 {v0.4s},[x2]
aese v2.16b,v1.16b
eor v2.16b,v2.16b,v0.16b
st1 {v2.16b},[x1]
ret
.globl aes_hw_decrypt
.def aes_hw_decrypt
.type 32
.endef
.align 5
aes_hw_decrypt:
#ifdef BORINGSSL_DISPATCH_TEST
adrp x9,BORINGSSL_function_hit
add x9, x9, :lo12:BORINGSSL_function_hit
mov w10, #1
strb w10, [x9,#1] // kFlag_aes_hw_encrypt
#endif
AARCH64_VALID_CALL_TARGET
ldr w3,[x2,#240]
ld1 {v0.4s},[x2],#16
ld1 {v2.16b},[x0]
sub w3,w3,#2
ld1 {v1.4s},[x2],#16
Loop_dec:
aesd v2.16b,v0.16b
aesimc v2.16b,v2.16b
ld1 {v0.4s},[x2],#16
subs w3,w3,#2
aesd v2.16b,v1.16b
aesimc v2.16b,v2.16b
ld1 {v1.4s},[x2],#16
b.gt Loop_dec
aesd v2.16b,v0.16b
aesimc v2.16b,v2.16b
ld1 {v0.4s},[x2]
aesd v2.16b,v1.16b
eor v2.16b,v2.16b,v0.16b
st1 {v2.16b},[x1]
ret
.globl aes_hw_cbc_encrypt
.def aes_hw_cbc_encrypt
.type 32
.endef
.align 5
aes_hw_cbc_encrypt:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
subs x2,x2,#16
mov x8,#16
b.lo Lcbc_abort
csel x8,xzr,x8,eq
cmp w5,#0 // en- or decrypting?
ldr w5,[x3,#240]
and x2,x2,#-16
ld1 {v6.16b},[x4]
ld1 {v0.16b},[x0],x8
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
sub w5,w5,#6
add x7,x3,x5,lsl#4 // pointer to last 7 round keys
sub w5,w5,#2
ld1 {v18.4s,v19.4s},[x7],#32
ld1 {v20.4s,v21.4s},[x7],#32
ld1 {v22.4s,v23.4s},[x7],#32
ld1 {v7.4s},[x7]
add x7,x3,#32
mov w6,w5
b.eq Lcbc_dec
cmp w5,#2
eor v0.16b,v0.16b,v6.16b
eor v5.16b,v16.16b,v7.16b
b.eq Lcbc_enc128
ld1 {v2.4s,v3.4s},[x7]
add x7,x3,#16
add x6,x3,#16*4
add x12,x3,#16*5
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
add x14,x3,#16*6
add x3,x3,#16*7
b Lenter_cbc_enc
.align 4
Loop_cbc_enc:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
st1 {v6.16b},[x1],#16
Lenter_cbc_enc:
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v0.16b,v2.16b
aesmc v0.16b,v0.16b
ld1 {v16.4s},[x6]
cmp w5,#4
aese v0.16b,v3.16b
aesmc v0.16b,v0.16b
ld1 {v17.4s},[x12]
b.eq Lcbc_enc192
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
ld1 {v16.4s},[x14]
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
ld1 {v17.4s},[x3]
nop
Lcbc_enc192:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
subs x2,x2,#16
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
csel x8,xzr,x8,eq
aese v0.16b,v18.16b
aesmc v0.16b,v0.16b
aese v0.16b,v19.16b
aesmc v0.16b,v0.16b
ld1 {v16.16b},[x0],x8
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
eor v16.16b,v16.16b,v5.16b
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
ld1 {v17.4s},[x7] // re-pre-load rndkey[1]
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
aese v0.16b,v23.16b
eor v6.16b,v0.16b,v7.16b
b.hs Loop_cbc_enc
st1 {v6.16b},[x1],#16
b Lcbc_done
.align 5
Lcbc_enc128:
ld1 {v2.4s,v3.4s},[x7]
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
b Lenter_cbc_enc128
Loop_cbc_enc128:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
st1 {v6.16b},[x1],#16
Lenter_cbc_enc128:
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
subs x2,x2,#16
aese v0.16b,v2.16b
aesmc v0.16b,v0.16b
csel x8,xzr,x8,eq
aese v0.16b,v3.16b
aesmc v0.16b,v0.16b
aese v0.16b,v18.16b
aesmc v0.16b,v0.16b
aese v0.16b,v19.16b
aesmc v0.16b,v0.16b
ld1 {v16.16b},[x0],x8
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
eor v16.16b,v16.16b,v5.16b
aese v0.16b,v23.16b
eor v6.16b,v0.16b,v7.16b
b.hs Loop_cbc_enc128
st1 {v6.16b},[x1],#16
b Lcbc_done
.align 5
Lcbc_dec:
ld1 {v18.16b},[x0],#16
subs x2,x2,#32 // bias
add w6,w5,#2
orr v3.16b,v0.16b,v0.16b
orr v1.16b,v0.16b,v0.16b
orr v19.16b,v18.16b,v18.16b
b.lo Lcbc_dec_tail
orr v1.16b,v18.16b,v18.16b
ld1 {v18.16b},[x0],#16
orr v2.16b,v0.16b,v0.16b
orr v3.16b,v1.16b,v1.16b
orr v19.16b,v18.16b,v18.16b
Loop3x_cbc_dec:
aesd v0.16b,v16.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v16.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v16.16b
aesimc v18.16b,v18.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aesd v0.16b,v17.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v17.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v17.16b
aesimc v18.16b,v18.16b
ld1 {v17.4s},[x7],#16
b.gt Loop3x_cbc_dec
aesd v0.16b,v16.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v16.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v16.16b
aesimc v18.16b,v18.16b
eor v4.16b,v6.16b,v7.16b
subs x2,x2,#0x30
eor v5.16b,v2.16b,v7.16b
csel x6,x2,x6,lo // x6, w6, is zero at this point
aesd v0.16b,v17.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v17.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v17.16b
aesimc v18.16b,v18.16b
eor v17.16b,v3.16b,v7.16b
add x0,x0,x6 // x0 is adjusted in such way that
// at exit from the loop v1.16b-v18.16b
// are loaded with last "words"
orr v6.16b,v19.16b,v19.16b
mov x7,x3
aesd v0.16b,v20.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v20.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v20.16b
aesimc v18.16b,v18.16b
ld1 {v2.16b},[x0],#16
aesd v0.16b,v21.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v21.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v21.16b
aesimc v18.16b,v18.16b
ld1 {v3.16b},[x0],#16
aesd v0.16b,v22.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v22.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v22.16b
aesimc v18.16b,v18.16b
ld1 {v19.16b},[x0],#16
aesd v0.16b,v23.16b
aesd v1.16b,v23.16b
aesd v18.16b,v23.16b
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
add w6,w5,#2
eor v4.16b,v4.16b,v0.16b
eor v5.16b,v5.16b,v1.16b
eor v18.16b,v18.16b,v17.16b
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v4.16b},[x1],#16
orr v0.16b,v2.16b,v2.16b
st1 {v5.16b},[x1],#16
orr v1.16b,v3.16b,v3.16b
st1 {v18.16b},[x1],#16
orr v18.16b,v19.16b,v19.16b
b.hs Loop3x_cbc_dec
cmn x2,#0x30
b.eq Lcbc_done
nop
Lcbc_dec_tail:
aesd v1.16b,v16.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v16.16b
aesimc v18.16b,v18.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aesd v1.16b,v17.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v17.16b
aesimc v18.16b,v18.16b
ld1 {v17.4s},[x7],#16
b.gt Lcbc_dec_tail
aesd v1.16b,v16.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v16.16b
aesimc v18.16b,v18.16b
aesd v1.16b,v17.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v17.16b
aesimc v18.16b,v18.16b
aesd v1.16b,v20.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v20.16b
aesimc v18.16b,v18.16b
cmn x2,#0x20
aesd v1.16b,v21.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v21.16b
aesimc v18.16b,v18.16b
eor v5.16b,v6.16b,v7.16b
aesd v1.16b,v22.16b
aesimc v1.16b,v1.16b
aesd v18.16b,v22.16b
aesimc v18.16b,v18.16b
eor v17.16b,v3.16b,v7.16b
aesd v1.16b,v23.16b
aesd v18.16b,v23.16b
b.eq Lcbc_dec_one
eor v5.16b,v5.16b,v1.16b
eor v17.16b,v17.16b,v18.16b
orr v6.16b,v19.16b,v19.16b
st1 {v5.16b},[x1],#16
st1 {v17.16b},[x1],#16
b Lcbc_done
Lcbc_dec_one:
eor v5.16b,v5.16b,v18.16b
orr v6.16b,v19.16b,v19.16b
st1 {v5.16b},[x1],#16
Lcbc_done:
st1 {v6.16b},[x4]
Lcbc_abort:
ldr x29,[sp],#16
ret
.globl aes_hw_ctr32_encrypt_blocks
.def aes_hw_ctr32_encrypt_blocks
.type 32
.endef
.align 5
aes_hw_ctr32_encrypt_blocks:
#ifdef BORINGSSL_DISPATCH_TEST
adrp x9,BORINGSSL_function_hit
add x9, x9, :lo12:BORINGSSL_function_hit
mov w10, #1
strb w10, [x9] // kFlag_aes_hw_ctr32_encrypt_blocks
#endif
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ldr w5,[x3,#240]
ldr w8, [x4, #12]
ld1 {v0.4s},[x4]
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
sub w5,w5,#4
mov x12,#16
cmp x2,#2
add x7,x3,x5,lsl#4 // pointer to last 5 round keys
sub w5,w5,#2
ld1 {v20.4s,v21.4s},[x7],#32
ld1 {v22.4s,v23.4s},[x7],#32
ld1 {v7.4s},[x7]
add x7,x3,#32
mov w6,w5
// ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
// affected by silicon errata #1742098 [0] and #1655431 [1],
// respectively, where the second instruction of an aese/aesmc
// instruction pair may execute twice if an interrupt is taken right
// after the first instruction consumes an input register of which a
// single 32-bit lane has been updated the last time it was modified.
//
// This function uses a counter in one 32-bit lane. The vmov lines
// could write to v1.16b and v18.16b directly, but that trips this bugs.
// We write to v6.16b and copy to the final register as a workaround.
//
// [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
// [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __AARCH64EB__
rev w8, w8
#endif
add w10, w8, #1
orr v6.16b,v0.16b,v0.16b
rev w10, w10
mov v6.s[3],w10
add w8, w8, #2
orr v1.16b,v6.16b,v6.16b
b.ls Lctr32_tail
rev w12, w8
mov v6.s[3],w12
sub x2,x2,#3 // bias
orr v18.16b,v6.16b,v6.16b
b Loop3x_ctr32
.align 4
Loop3x_ctr32:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v18.16b,v17.16b
aesmc v18.16b,v18.16b
ld1 {v17.4s},[x7],#16
b.gt Loop3x_ctr32
aese v0.16b,v16.16b
aesmc v4.16b,v0.16b
aese v1.16b,v16.16b
aesmc v5.16b,v1.16b
ld1 {v2.16b},[x0],#16
add w9,w8,#1
aese v18.16b,v16.16b
aesmc v18.16b,v18.16b
ld1 {v3.16b},[x0],#16
rev w9,w9
aese v4.16b,v17.16b
aesmc v4.16b,v4.16b
aese v5.16b,v17.16b
aesmc v5.16b,v5.16b
ld1 {v19.16b},[x0],#16
mov x7,x3
aese v18.16b,v17.16b
aesmc v17.16b,v18.16b
aese v4.16b,v20.16b
aesmc v4.16b,v4.16b
aese v5.16b,v20.16b
aesmc v5.16b,v5.16b
eor v2.16b,v2.16b,v7.16b
add w10,w8,#2
aese v17.16b,v20.16b
aesmc v17.16b,v17.16b
eor v3.16b,v3.16b,v7.16b
add w8,w8,#3
aese v4.16b,v21.16b
aesmc v4.16b,v4.16b
aese v5.16b,v21.16b
aesmc v5.16b,v5.16b
// Note the logic to update v0.16b, v1.16b, and v1.16b is written to work
// around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
// 32-bit mode. See the comment above.
eor v19.16b,v19.16b,v7.16b
mov v6.s[3], w9
aese v17.16b,v21.16b
aesmc v17.16b,v17.16b
orr v0.16b,v6.16b,v6.16b
rev w10,w10
aese v4.16b,v22.16b
aesmc v4.16b,v4.16b
mov v6.s[3], w10
rev w12,w8
aese v5.16b,v22.16b
aesmc v5.16b,v5.16b
orr v1.16b,v6.16b,v6.16b
mov v6.s[3], w12
aese v17.16b,v22.16b
aesmc v17.16b,v17.16b
orr v18.16b,v6.16b,v6.16b
subs x2,x2,#3
aese v4.16b,v23.16b
aese v5.16b,v23.16b
aese v17.16b,v23.16b
eor v2.16b,v2.16b,v4.16b
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
st1 {v2.16b},[x1],#16
eor v3.16b,v3.16b,v5.16b
mov w6,w5
st1 {v3.16b},[x1],#16
eor v19.16b,v19.16b,v17.16b
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v19.16b},[x1],#16
b.hs Loop3x_ctr32
adds x2,x2,#3
b.eq Lctr32_done
Lctr32_tail:
cmp x2,#1
b.lt Lctr32_done // if len = 0, go to done
mov x12,#16
csel x12,xzr,x12,eq
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v17.4s},[x7],#16
b.gt Lctr32_tail
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
ld1 {v2.16b},[x0],x12
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
aese v1.16b,v20.16b
aesmc v1.16b,v1.16b
ld1 {v3.16b},[x0]
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
aese v1.16b,v21.16b
aesmc v1.16b,v1.16b
eor v2.16b,v2.16b,v7.16b
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
aese v1.16b,v22.16b
aesmc v1.16b,v1.16b
eor v3.16b,v3.16b,v7.16b
aese v0.16b,v23.16b
aese v1.16b,v23.16b
eor v2.16b,v2.16b,v0.16b
eor v3.16b,v3.16b,v1.16b
st1 {v2.16b},[x1],#16
cbz x12,Lctr32_done // if step = 0 (len = 1), go to done
st1 {v3.16b},[x1]
Lctr32_done:
ldr x29,[sp],#16
ret
.globl aes_hw_xts_encrypt
.def aes_hw_xts_encrypt
.type 32
.endef
.align 5
aes_hw_xts_encrypt:
AARCH64_VALID_CALL_TARGET
cmp x2,#16
// Original input data size bigger than 16, jump to big size processing.
b.ne Lxts_enc_big_size
// Encrypt the iv with key2, as the first XEX iv.
ldr w6,[x4,#240]
ld1 {v0.16b},[x4],#16
ld1 {v6.16b},[x5]
sub w6,w6,#2
ld1 {v1.16b},[x4],#16
Loop_enc_iv_enc:
aese v6.16b,v0.16b
aesmc v6.16b,v6.16b
ld1 {v0.4s},[x4],#16
subs w6,w6,#2
aese v6.16b,v1.16b
aesmc v6.16b,v6.16b
ld1 {v1.4s},[x4],#16
b.gt Loop_enc_iv_enc
aese v6.16b,v0.16b
aesmc v6.16b,v6.16b
ld1 {v0.4s},[x4]
aese v6.16b,v1.16b
eor v6.16b,v6.16b,v0.16b
ld1 {v0.16b},[x0]
eor v0.16b,v6.16b,v0.16b
ldr w6,[x3,#240]
ld1 {v28.4s,v29.4s},[x3],#32 // load key schedule...
aese v0.16b,v28.16b
aesmc v0.16b,v0.16b
ld1 {v16.4s,v17.4s},[x3],#32 // load key schedule...
aese v0.16b,v29.16b
aesmc v0.16b,v0.16b
subs w6,w6,#10 // if rounds==10, jump to aes-128-xts processing
b.eq Lxts_128_enc
Lxts_enc_round_loop:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
ld1 {v16.4s},[x3],#16 // load key schedule...
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
ld1 {v17.4s},[x3],#16 // load key schedule...
subs w6,w6,#2 // bias
b.gt Lxts_enc_round_loop
Lxts_128_enc:
ld1 {v18.4s,v19.4s},[x3],#32 // load key schedule...
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
ld1 {v20.4s,v21.4s},[x3],#32 // load key schedule...
aese v0.16b,v18.16b
aesmc v0.16b,v0.16b
aese v0.16b,v19.16b
aesmc v0.16b,v0.16b
ld1 {v22.4s,v23.4s},[x3],#32 // load key schedule...
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
ld1 {v7.4s},[x3]
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
aese v0.16b,v23.16b
eor v0.16b,v0.16b,v7.16b
eor v0.16b,v0.16b,v6.16b
st1 {v0.16b},[x1]
b Lxts_enc_final_abort
.align 4
Lxts_enc_big_size:
// Encrypt input size > 16 bytes
stp x19,x20,[sp,#-64]!
stp x21,x22,[sp,#48]
stp d8,d9,[sp,#32]
stp d10,d11,[sp,#16]
// tailcnt store the tail value of length%16.
and x21,x2,#0xf
and x2,x2,#-16 // len &= 0x1..110000, now divisible by 16
subs x2,x2,#16
mov x8,#16
b.lo Lxts_abort // if !(len > 16): error
csel x8,xzr,x8,eq // if (len == 16): step = 0
// Firstly, encrypt the iv with key2, as the first iv of XEX.
ldr w6,[x4,#240]
ld1 {v0.4s},[x4],#16
ld1 {v6.16b},[x5]
sub w6,w6,#2
ld1 {v1.4s},[x4],#16
Loop_iv_enc:
aese v6.16b,v0.16b
aesmc v6.16b,v6.16b
ld1 {v0.4s},[x4],#16
subs w6,w6,#2
aese v6.16b,v1.16b
aesmc v6.16b,v6.16b
ld1 {v1.4s},[x4],#16
b.gt Loop_iv_enc
aese v6.16b,v0.16b
aesmc v6.16b,v6.16b
ld1 {v0.4s},[x4]
aese v6.16b,v1.16b
eor v6.16b,v6.16b,v0.16b
// The iv for second block
// x9- iv(low), x10 - iv(high)
// the five ivs stored into, v6.16b,v8.16b,v9.16b,v10.16b,v11.16b
fmov x9,d6
fmov x10,v6.d[1]
mov w19,#0x87
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr#31
eor x9,x11,x9,lsl#1
fmov d8,x9
fmov v8.d[1],x10
ldr w5,[x3,#240] // next starting point
ld1 {v0.16b},[x0],x8
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
sub w5,w5,#6
add x7,x3,x5,lsl#4 // pointer to last 7 round keys
sub w5,w5,#2
ld1 {v18.4s,v19.4s},[x7],#32
ld1 {v20.4s,v21.4s},[x7],#32
ld1 {v22.4s,v23.4s},[x7],#32
ld1 {v7.4s},[x7]
add x7,x3,#32
mov w6,w5
// Encryption
Lxts_enc:
ld1 {v24.16b},[x0],#16
subs x2,x2,#32 // bias
add w6,w5,#2
orr v3.16b,v0.16b,v0.16b
orr v1.16b,v0.16b,v0.16b
orr v28.16b,v0.16b,v0.16b
orr v27.16b,v24.16b,v24.16b
orr v29.16b,v24.16b,v24.16b
b.lo Lxts_inner_enc_tail // when input size % 5 = 1 or 2
// (with tail or not)
eor v0.16b,v0.16b,v6.16b // before encryption, xor with iv
eor v24.16b,v24.16b,v8.16b
// The iv for third block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr#31
eor x9,x11,x9,lsl#1
fmov d9,x9
fmov v9.d[1],x10
orr v1.16b,v24.16b,v24.16b
ld1 {v24.16b},[x0],#16
orr v2.16b,v0.16b,v0.16b
orr v3.16b,v1.16b,v1.16b
eor v27.16b,v24.16b,v9.16b // the third block
eor v24.16b,v24.16b,v9.16b
cmp x2,#32
b.lo Lxts_outer_enc_tail
// The iv for fourth block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr#31
eor x9,x11,x9,lsl#1
fmov d10,x9
fmov v10.d[1],x10
ld1 {v25.16b},[x0],#16
// The iv for fifth block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr#31
eor x9,x11,x9,lsl#1
fmov d11,x9
fmov v11.d[1],x10
ld1 {v26.16b},[x0],#16
eor v25.16b,v25.16b,v10.16b // the fourth block
eor v26.16b,v26.16b,v11.16b
sub x2,x2,#32 // bias
mov w6,w5
b Loop5x_xts_enc
.align 4
Loop5x_xts_enc:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v24.16b,v16.16b
aesmc v24.16b,v24.16b
aese v25.16b,v16.16b
aesmc v25.16b,v25.16b
aese v26.16b,v16.16b
aesmc v26.16b,v26.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v24.16b,v17.16b
aesmc v24.16b,v24.16b
aese v25.16b,v17.16b
aesmc v25.16b,v25.16b
aese v26.16b,v17.16b
aesmc v26.16b,v26.16b
ld1 {v17.4s},[x7],#16
b.gt Loop5x_xts_enc
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v24.16b,v16.16b
aesmc v24.16b,v24.16b
aese v25.16b,v16.16b
aesmc v25.16b,v25.16b
aese v26.16b,v16.16b
aesmc v26.16b,v26.16b
subs x2,x2,#0x50 // because Lxts_enc_tail4x
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v24.16b,v17.16b
aesmc v24.16b,v24.16b
aese v25.16b,v17.16b
aesmc v25.16b,v25.16b
aese v26.16b,v17.16b
aesmc v26.16b,v26.16b
csel x6,xzr,x2,gt // borrow x6, w6, "gt" is not typo
mov x7,x3
aese v0.16b,v18.16b
aesmc v0.16b,v0.16b
aese v1.16b,v18.16b
aesmc v1.16b,v1.16b
aese v24.16b,v18.16b
aesmc v24.16b,v24.16b
aese v25.16b,v18.16b
aesmc v25.16b,v25.16b
aese v26.16b,v18.16b
aesmc v26.16b,v26.16b
add x0,x0,x6 // x0 is adjusted in such way that
// at exit from the loop v1.16b-v26.16b
// are loaded with last "words"
add x6,x2,#0x60 // because Lxts_enc_tail4x
aese v0.16b,v19.16b
aesmc v0.16b,v0.16b
aese v1.16b,v19.16b
aesmc v1.16b,v1.16b
aese v24.16b,v19.16b
aesmc v24.16b,v24.16b
aese v25.16b,v19.16b
aesmc v25.16b,v25.16b
aese v26.16b,v19.16b
aesmc v26.16b,v26.16b
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
aese v1.16b,v20.16b
aesmc v1.16b,v1.16b
aese v24.16b,v20.16b
aesmc v24.16b,v24.16b
aese v25.16b,v20.16b
aesmc v25.16b,v25.16b
aese v26.16b,v20.16b
aesmc v26.16b,v26.16b
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
aese v1.16b,v21.16b
aesmc v1.16b,v1.16b
aese v24.16b,v21.16b
aesmc v24.16b,v24.16b
aese v25.16b,v21.16b
aesmc v25.16b,v25.16b
aese v26.16b,v21.16b
aesmc v26.16b,v26.16b
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
aese v1.16b,v22.16b
aesmc v1.16b,v1.16b
aese v24.16b,v22.16b
aesmc v24.16b,v24.16b
aese v25.16b,v22.16b
aesmc v25.16b,v25.16b
aese v26.16b,v22.16b
aesmc v26.16b,v26.16b
eor v4.16b,v7.16b,v6.16b
aese v0.16b,v23.16b
// The iv for first block of one iteration
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr#31
eor x9,x11,x9,lsl#1
fmov d6,x9
fmov v6.d[1],x10
eor v5.16b,v7.16b,v8.16b
ld1 {v2.16b},[x0],#16
aese v1.16b,v23.16b
// The iv for second block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr#31
eor x9,x11,x9,lsl#1
fmov d8,x9
fmov v8.d[1],x10
eor v17.16b,v7.16b,v9.16b
ld1 {v3.16b},[x0],#16
aese v24.16b,v23.16b
// The iv for third block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr#31
eor x9,x11,x9,lsl#1
fmov d9,x9
fmov v9.d[1],x10
eor v30.16b,v7.16b,v10.16b
ld1 {v27.16b},[x0],#16
aese v25.16b,v23.16b
// The iv for fourth block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr#31
eor x9,x11,x9,lsl#1
fmov d10,x9
fmov v10.d[1],x10
eor v31.16b,v7.16b,v11.16b
ld1 {v28.16b},[x0],#16
aese v26.16b,v23.16b
// The iv for fifth block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d11,x9
fmov v11.d[1],x10
ld1 {v29.16b},[x0],#16
cbz x6,Lxts_enc_tail4x
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
eor v4.16b,v4.16b,v0.16b
eor v0.16b,v2.16b,v6.16b
eor v5.16b,v5.16b,v1.16b
eor v1.16b,v3.16b,v8.16b
eor v17.16b,v17.16b,v24.16b
eor v24.16b,v27.16b,v9.16b
eor v30.16b,v30.16b,v25.16b
eor v25.16b,v28.16b,v10.16b
eor v31.16b,v31.16b,v26.16b
st1 {v4.16b},[x1],#16
eor v26.16b,v29.16b,v11.16b
st1 {v5.16b},[x1],#16
mov w6,w5
st1 {v17.16b},[x1],#16
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v30.16b},[x1],#16
st1 {v31.16b},[x1],#16
b.hs Loop5x_xts_enc
// If left 4 blocks, borrow the five block's processing.
// This means if (x2 + 1 block) == 0, which is the case
// when input size % 5 = 4, continue processing and do
// another iteration in Loop5x_xts_enc which will exit from
// cbz x6,.Lxts_enc_tail4x.
// Otherwise, this is the end of the loop continue processing
// 0, 1, 2 or 3 blocks (with or without tail) starting at
// Loop5x_enc_after
cmn x2,#0x10
b.ne Loop5x_enc_after
orr v11.16b,v10.16b,v10.16b
orr v10.16b,v9.16b,v9.16b
orr v9.16b,v8.16b,v8.16b
orr v8.16b,v6.16b,v6.16b
fmov x9,d11
fmov x10,v11.d[1]
eor v0.16b,v6.16b,v2.16b
eor v1.16b,v8.16b,v3.16b
eor v24.16b,v27.16b,v9.16b
eor v25.16b,v28.16b,v10.16b
eor v26.16b,v29.16b,v11.16b
b.eq Loop5x_xts_enc
Loop5x_enc_after:
add x2,x2,#0x50
cbz x2,Lxts_enc_done // no blocks left
add w6,w5,#2
subs x2,x2,#0x30
b.lo Lxts_inner_enc_tail // 1 or 2 blocks left
// (with tail or not)
eor v0.16b,v6.16b,v27.16b // 3 blocks left
eor v1.16b,v8.16b,v28.16b
eor v24.16b,v29.16b,v9.16b
b Lxts_outer_enc_tail
.align 4
Lxts_enc_tail4x:
add x0,x0,#16
eor v5.16b,v1.16b,v5.16b
st1 {v5.16b},[x1],#16
eor v17.16b,v24.16b,v17.16b
st1 {v17.16b},[x1],#16
eor v30.16b,v25.16b,v30.16b
eor v31.16b,v26.16b,v31.16b
st1 {v30.16b,v31.16b},[x1],#32
b Lxts_enc_done
.align 4
Lxts_outer_enc_tail:
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v24.16b,v16.16b
aesmc v24.16b,v24.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v24.16b,v17.16b
aesmc v24.16b,v24.16b
ld1 {v17.4s},[x7],#16
b.gt Lxts_outer_enc_tail
aese v0.16b,v16.16b
aesmc v0.16b,v0.16b
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v24.16b,v16.16b
aesmc v24.16b,v24.16b
eor v4.16b,v6.16b,v7.16b
subs x2,x2,#0x30
// The iv for first block
fmov x9,d9
fmov x10,v9.d[1]
//mov w19,#0x87
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr#31
eor x9,x11,x9,lsl#1
fmov d6,x9
fmov v6.d[1],x10
eor v5.16b,v8.16b,v7.16b
csel x6,x2,x6,lo // x6, w6, is zero at this point
aese v0.16b,v17.16b
aesmc v0.16b,v0.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v24.16b,v17.16b
aesmc v24.16b,v24.16b
eor v17.16b,v9.16b,v7.16b
add x6,x6,#0x20
add x0,x0,x6
mov x7,x3
aese v0.16b,v20.16b
aesmc v0.16b,v0.16b
aese v1.16b,v20.16b
aesmc v1.16b,v1.16b
aese v24.16b,v20.16b
aesmc v24.16b,v24.16b
aese v0.16b,v21.16b
aesmc v0.16b,v0.16b
aese v1.16b,v21.16b
aesmc v1.16b,v1.16b
aese v24.16b,v21.16b
aesmc v24.16b,v24.16b
aese v0.16b,v22.16b
aesmc v0.16b,v0.16b
aese v1.16b,v22.16b
aesmc v1.16b,v1.16b
aese v24.16b,v22.16b
aesmc v24.16b,v24.16b
aese v0.16b,v23.16b
aese v1.16b,v23.16b
aese v24.16b,v23.16b
ld1 {v27.16b},[x0],#16
add w6,w5,#2
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
eor v4.16b,v4.16b,v0.16b
eor v5.16b,v5.16b,v1.16b
eor v24.16b,v24.16b,v17.16b
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v4.16b},[x1],#16
st1 {v5.16b},[x1],#16
st1 {v24.16b},[x1],#16
cmn x2,#0x30
b.eq Lxts_enc_done
Lxts_encxor_one:
orr v28.16b,v3.16b,v3.16b
orr v29.16b,v27.16b,v27.16b
nop
Lxts_inner_enc_tail:
cmn x2,#0x10
eor v1.16b,v28.16b,v6.16b
eor v24.16b,v29.16b,v8.16b
b.eq Lxts_enc_tail_loop
eor v24.16b,v29.16b,v6.16b
Lxts_enc_tail_loop:
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v24.16b,v16.16b
aesmc v24.16b,v24.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v24.16b,v17.16b
aesmc v24.16b,v24.16b
ld1 {v17.4s},[x7],#16
b.gt Lxts_enc_tail_loop
aese v1.16b,v16.16b
aesmc v1.16b,v1.16b
aese v24.16b,v16.16b
aesmc v24.16b,v24.16b
aese v1.16b,v17.16b
aesmc v1.16b,v1.16b
aese v24.16b,v17.16b
aesmc v24.16b,v24.16b
aese v1.16b,v20.16b
aesmc v1.16b,v1.16b
aese v24.16b,v20.16b
aesmc v24.16b,v24.16b
cmn x2,#0x20
aese v1.16b,v21.16b
aesmc v1.16b,v1.16b
aese v24.16b,v21.16b
aesmc v24.16b,v24.16b
eor v5.16b,v6.16b,v7.16b
aese v1.16b,v22.16b
aesmc v1.16b,v1.16b
aese v24.16b,v22.16b
aesmc v24.16b,v24.16b
eor v17.16b,v8.16b,v7.16b
aese v1.16b,v23.16b
aese v24.16b,v23.16b
b.eq Lxts_enc_one
eor v5.16b,v5.16b,v1.16b
st1 {v5.16b},[x1],#16
eor v17.16b,v17.16b,v24.16b
orr v6.16b,v8.16b,v8.16b
st1 {v17.16b},[x1],#16
fmov x9,d8
fmov x10,v8.d[1]
mov w19,#0x87
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d6,x9
fmov v6.d[1],x10
b Lxts_enc_done
Lxts_enc_one:
eor v5.16b,v5.16b,v24.16b
orr v6.16b,v6.16b,v6.16b
st1 {v5.16b},[x1],#16
fmov x9,d6
fmov x10,v6.d[1]
mov w19,#0x87
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d6,x9
fmov v6.d[1],x10
b Lxts_enc_done
.align 5
Lxts_enc_done:
// Process the tail block with cipher stealing.
tst x21,#0xf
b.eq Lxts_abort
mov x20,x0
mov x13,x1
sub x1,x1,#16
.composite_enc_loop:
subs x21,x21,#1
ldrb w15,[x1,x21]
ldrb w14,[x20,x21]
strb w15,[x13,x21]
strb w14,[x1,x21]
b.gt .composite_enc_loop
Lxts_enc_load_done:
ld1 {v26.16b},[x1]
eor v26.16b,v26.16b,v6.16b
// Encrypt the composite block to get the last second encrypted text block
ldr w6,[x3,#240] // load key schedule...
ld1 {v0.16b},[x3],#16
sub w6,w6,#2
ld1 {v1.16b},[x3],#16 // load key schedule...
Loop_final_enc:
aese v26.16b,v0.16b
aesmc v26.16b,v26.16b
ld1 {v0.4s},[x3],#16
subs w6,w6,#2
aese v26.16b,v1.16b
aesmc v26.16b,v26.16b
ld1 {v1.4s},[x3],#16
b.gt Loop_final_enc
aese v26.16b,v0.16b
aesmc v26.16b,v26.16b
ld1 {v0.4s},[x3]
aese v26.16b,v1.16b
eor v26.16b,v26.16b,v0.16b
eor v26.16b,v26.16b,v6.16b
st1 {v26.16b},[x1]
Lxts_abort:
ldp x21,x22,[sp,#48]
ldp d8,d9,[sp,#32]
ldp d10,d11,[sp,#16]
ldp x19,x20,[sp],#64
Lxts_enc_final_abort:
ret
.globl aes_hw_xts_decrypt
.def aes_hw_xts_decrypt
.type 32
.endef
.align 5
aes_hw_xts_decrypt:
AARCH64_VALID_CALL_TARGET
cmp x2,#16
// Original input data size bigger than 16, jump to big size processing.
b.ne Lxts_dec_big_size
// Encrypt the iv with key2, as the first XEX iv.
ldr w6,[x4,#240]
ld1 {v0.16b},[x4],#16
ld1 {v6.16b},[x5]
sub w6,w6,#2
ld1 {v1.16b},[x4],#16
Loop_dec_small_iv_enc:
aese v6.16b,v0.16b
aesmc v6.16b,v6.16b
ld1 {v0.4s},[x4],#16
subs w6,w6,#2
aese v6.16b,v1.16b
aesmc v6.16b,v6.16b
ld1 {v1.4s},[x4],#16
b.gt Loop_dec_small_iv_enc
aese v6.16b,v0.16b
aesmc v6.16b,v6.16b
ld1 {v0.4s},[x4]
aese v6.16b,v1.16b
eor v6.16b,v6.16b,v0.16b
ld1 {v0.16b},[x0]
eor v0.16b,v6.16b,v0.16b
ldr w6,[x3,#240]
ld1 {v28.4s,v29.4s},[x3],#32 // load key schedule...
aesd v0.16b,v28.16b
aesimc v0.16b,v0.16b
ld1 {v16.4s,v17.4s},[x3],#32 // load key schedule...
aesd v0.16b,v29.16b
aesimc v0.16b,v0.16b
subs w6,w6,#10 // bias
b.eq Lxts_128_dec
Lxts_dec_round_loop:
aesd v0.16b,v16.16b
aesimc v0.16b,v0.16b
ld1 {v16.4s},[x3],#16 // load key schedule...
aesd v0.16b,v17.16b
aesimc v0.16b,v0.16b
ld1 {v17.4s},[x3],#16 // load key schedule...
subs w6,w6,#2 // bias
b.gt Lxts_dec_round_loop
Lxts_128_dec:
ld1 {v18.4s,v19.4s},[x3],#32 // load key schedule...
aesd v0.16b,v16.16b
aesimc v0.16b,v0.16b
aesd v0.16b,v17.16b
aesimc v0.16b,v0.16b
ld1 {v20.4s,v21.4s},[x3],#32 // load key schedule...
aesd v0.16b,v18.16b
aesimc v0.16b,v0.16b
aesd v0.16b,v19.16b
aesimc v0.16b,v0.16b
ld1 {v22.4s,v23.4s},[x3],#32 // load key schedule...
aesd v0.16b,v20.16b
aesimc v0.16b,v0.16b
aesd v0.16b,v21.16b
aesimc v0.16b,v0.16b
ld1 {v7.4s},[x3]
aesd v0.16b,v22.16b
aesimc v0.16b,v0.16b
aesd v0.16b,v23.16b
eor v0.16b,v0.16b,v7.16b
eor v0.16b,v6.16b,v0.16b
st1 {v0.16b},[x1]
b Lxts_dec_final_abort
Lxts_dec_big_size:
stp x19,x20,[sp,#-64]!
stp x21,x22,[sp,#48]
stp d8,d9,[sp,#32]
stp d10,d11,[sp,#16]
and x21,x2,#0xf
and x2,x2,#-16
subs x2,x2,#16
mov x8,#16
b.lo Lxts_dec_abort
// Encrypt the iv with key2, as the first XEX iv
ldr w6,[x4,#240]
ld1 {v0.16b},[x4],#16
ld1 {v6.16b},[x5]
sub w6,w6,#2
ld1 {v1.16b},[x4],#16
Loop_dec_iv_enc:
aese v6.16b,v0.16b
aesmc v6.16b,v6.16b
ld1 {v0.4s},[x4],#16
subs w6,w6,#2
aese v6.16b,v1.16b
aesmc v6.16b,v6.16b
ld1 {v1.4s},[x4],#16
b.gt Loop_dec_iv_enc
aese v6.16b,v0.16b
aesmc v6.16b,v6.16b
ld1 {v0.4s},[x4]
aese v6.16b,v1.16b
eor v6.16b,v6.16b,v0.16b
// The iv for second block
// x9- iv(low), x10 - iv(high)
// the five ivs stored into, v6.16b,v8.16b,v9.16b,v10.16b,v11.16b
fmov x9,d6
fmov x10,v6.d[1]
mov w19,#0x87
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d8,x9
fmov v8.d[1],x10
ldr w5,[x3,#240] // load rounds number
// The iv for third block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d9,x9
fmov v9.d[1],x10
ld1 {v16.4s,v17.4s},[x3] // load key schedule...
sub w5,w5,#6
add x7,x3,x5,lsl#4 // pointer to last 7 round keys
sub w5,w5,#2
ld1 {v18.4s,v19.4s},[x7],#32 // load key schedule...
ld1 {v20.4s,v21.4s},[x7],#32
ld1 {v22.4s,v23.4s},[x7],#32
ld1 {v7.4s},[x7]
// The iv for fourth block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d10,x9
fmov v10.d[1],x10
add x7,x3,#32
mov w6,w5
b Lxts_dec
// Decryption
.align 5
Lxts_dec:
tst x21,#0xf
b.eq Lxts_dec_begin
subs x2,x2,#16
csel x8,xzr,x8,eq
ld1 {v0.16b},[x0],#16
b.lo Lxts_done
sub x0,x0,#16
Lxts_dec_begin:
ld1 {v0.16b},[x0],x8
subs x2,x2,#32 // bias
add w6,w5,#2
orr v3.16b,v0.16b,v0.16b
orr v1.16b,v0.16b,v0.16b
orr v28.16b,v0.16b,v0.16b
ld1 {v24.16b},[x0],#16
orr v27.16b,v24.16b,v24.16b
orr v29.16b,v24.16b,v24.16b
b.lo Lxts_inner_dec_tail
eor v0.16b,v0.16b,v6.16b // before decryt, xor with iv
eor v24.16b,v24.16b,v8.16b
orr v1.16b,v24.16b,v24.16b
ld1 {v24.16b},[x0],#16
orr v2.16b,v0.16b,v0.16b
orr v3.16b,v1.16b,v1.16b
eor v27.16b,v24.16b,v9.16b // third block xox with third iv
eor v24.16b,v24.16b,v9.16b
cmp x2,#32
b.lo Lxts_outer_dec_tail
ld1 {v25.16b},[x0],#16
// The iv for fifth block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d11,x9
fmov v11.d[1],x10
ld1 {v26.16b},[x0],#16
eor v25.16b,v25.16b,v10.16b // the fourth block
eor v26.16b,v26.16b,v11.16b
sub x2,x2,#32 // bias
mov w6,w5
b Loop5x_xts_dec
.align 4
Loop5x_xts_dec:
aesd v0.16b,v16.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v16.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v16.16b
aesimc v24.16b,v24.16b
aesd v25.16b,v16.16b
aesimc v25.16b,v25.16b
aesd v26.16b,v16.16b
aesimc v26.16b,v26.16b
ld1 {v16.4s},[x7],#16 // load key schedule...
subs w6,w6,#2
aesd v0.16b,v17.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v17.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v17.16b
aesimc v24.16b,v24.16b
aesd v25.16b,v17.16b
aesimc v25.16b,v25.16b
aesd v26.16b,v17.16b
aesimc v26.16b,v26.16b
ld1 {v17.4s},[x7],#16 // load key schedule...
b.gt Loop5x_xts_dec
aesd v0.16b,v16.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v16.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v16.16b
aesimc v24.16b,v24.16b
aesd v25.16b,v16.16b
aesimc v25.16b,v25.16b
aesd v26.16b,v16.16b
aesimc v26.16b,v26.16b
subs x2,x2,#0x50 // because Lxts_dec_tail4x
aesd v0.16b,v17.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v17.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v17.16b
aesimc v24.16b,v24.16b
aesd v25.16b,v17.16b
aesimc v25.16b,v25.16b
aesd v26.16b,v17.16b
aesimc v26.16b,v26.16b
csel x6,xzr,x2,gt // borrow x6, w6, "gt" is not typo
mov x7,x3
aesd v0.16b,v18.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v18.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v18.16b
aesimc v24.16b,v24.16b
aesd v25.16b,v18.16b
aesimc v25.16b,v25.16b
aesd v26.16b,v18.16b
aesimc v26.16b,v26.16b
add x0,x0,x6 // x0 is adjusted in such way that
// at exit from the loop v1.16b-v26.16b
// are loaded with last "words"
add x6,x2,#0x60 // because Lxts_dec_tail4x
aesd v0.16b,v19.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v19.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v19.16b
aesimc v24.16b,v24.16b
aesd v25.16b,v19.16b
aesimc v25.16b,v25.16b
aesd v26.16b,v19.16b
aesimc v26.16b,v26.16b
aesd v0.16b,v20.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v20.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v20.16b
aesimc v24.16b,v24.16b
aesd v25.16b,v20.16b
aesimc v25.16b,v25.16b
aesd v26.16b,v20.16b
aesimc v26.16b,v26.16b
aesd v0.16b,v21.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v21.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v21.16b
aesimc v24.16b,v24.16b
aesd v25.16b,v21.16b
aesimc v25.16b,v25.16b
aesd v26.16b,v21.16b
aesimc v26.16b,v26.16b
aesd v0.16b,v22.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v22.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v22.16b
aesimc v24.16b,v24.16b
aesd v25.16b,v22.16b
aesimc v25.16b,v25.16b
aesd v26.16b,v22.16b
aesimc v26.16b,v26.16b
eor v4.16b,v7.16b,v6.16b
aesd v0.16b,v23.16b
// The iv for first block of next iteration.
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d6,x9
fmov v6.d[1],x10
eor v5.16b,v7.16b,v8.16b
ld1 {v2.16b},[x0],#16
aesd v1.16b,v23.16b
// The iv for second block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d8,x9
fmov v8.d[1],x10
eor v17.16b,v7.16b,v9.16b
ld1 {v3.16b},[x0],#16
aesd v24.16b,v23.16b
// The iv for third block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d9,x9
fmov v9.d[1],x10
eor v30.16b,v7.16b,v10.16b
ld1 {v27.16b},[x0],#16
aesd v25.16b,v23.16b
// The iv for fourth block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d10,x9
fmov v10.d[1],x10
eor v31.16b,v7.16b,v11.16b
ld1 {v28.16b},[x0],#16
aesd v26.16b,v23.16b
// The iv for fifth block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d11,x9
fmov v11.d[1],x10
ld1 {v29.16b},[x0],#16
cbz x6,Lxts_dec_tail4x
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
eor v4.16b,v4.16b,v0.16b
eor v0.16b,v2.16b,v6.16b
eor v5.16b,v5.16b,v1.16b
eor v1.16b,v3.16b,v8.16b
eor v17.16b,v17.16b,v24.16b
eor v24.16b,v27.16b,v9.16b
eor v30.16b,v30.16b,v25.16b
eor v25.16b,v28.16b,v10.16b
eor v31.16b,v31.16b,v26.16b
st1 {v4.16b},[x1],#16
eor v26.16b,v29.16b,v11.16b
st1 {v5.16b},[x1],#16
mov w6,w5
st1 {v17.16b},[x1],#16
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v30.16b},[x1],#16
st1 {v31.16b},[x1],#16
b.hs Loop5x_xts_dec
cmn x2,#0x10
b.ne Loop5x_dec_after
// If x2(x2) equal to -0x10, the left blocks is 4.
// After specially processing, utilize the five blocks processing again.
// It will use the following IVs: v6.16b,v6.16b,v8.16b,v9.16b,v10.16b.
orr v11.16b,v10.16b,v10.16b
orr v10.16b,v9.16b,v9.16b
orr v9.16b,v8.16b,v8.16b
orr v8.16b,v6.16b,v6.16b
fmov x9,d11
fmov x10,v11.d[1]
eor v0.16b,v6.16b,v2.16b
eor v1.16b,v8.16b,v3.16b
eor v24.16b,v27.16b,v9.16b
eor v25.16b,v28.16b,v10.16b
eor v26.16b,v29.16b,v11.16b
b.eq Loop5x_xts_dec
Loop5x_dec_after:
add x2,x2,#0x50
cbz x2,Lxts_done
add w6,w5,#2
subs x2,x2,#0x30
b.lo Lxts_inner_dec_tail
eor v0.16b,v6.16b,v27.16b
eor v1.16b,v8.16b,v28.16b
eor v24.16b,v29.16b,v9.16b
b Lxts_outer_dec_tail
.align 4
Lxts_dec_tail4x:
add x0,x0,#16
tst x21,#0xf
eor v5.16b,v1.16b,v4.16b
st1 {v5.16b},[x1],#16
eor v17.16b,v24.16b,v17.16b
st1 {v17.16b},[x1],#16
eor v30.16b,v25.16b,v30.16b
eor v31.16b,v26.16b,v31.16b
st1 {v30.16b,v31.16b},[x1],#32
b.eq Lxts_dec_abort
ld1 {v0.4s},[x0],#16
b Lxts_done
.align 4
Lxts_outer_dec_tail:
aesd v0.16b,v16.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v16.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v16.16b
aesimc v24.16b,v24.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aesd v0.16b,v17.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v17.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v17.16b
aesimc v24.16b,v24.16b
ld1 {v17.4s},[x7],#16
b.gt Lxts_outer_dec_tail
aesd v0.16b,v16.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v16.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v16.16b
aesimc v24.16b,v24.16b
eor v4.16b,v6.16b,v7.16b
subs x2,x2,#0x30
// The iv for first block
fmov x9,d9
fmov x10,v9.d[1]
mov w19,#0x87
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d6,x9
fmov v6.d[1],x10
eor v5.16b,v8.16b,v7.16b
csel x6,x2,x6,lo // x6, w6, is zero at this point
aesd v0.16b,v17.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v17.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v17.16b
aesimc v24.16b,v24.16b
eor v17.16b,v9.16b,v7.16b
// The iv for second block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d8,x9
fmov v8.d[1],x10
add x6,x6,#0x20
add x0,x0,x6 // x0 is adjusted to the last data
mov x7,x3
// The iv for third block
extr x22,x10,x10,#32
extr x10,x10,x9,#63
and w11,w19,w22,asr #31
eor x9,x11,x9,lsl #1
fmov d9,x9
fmov v9.d[1],x10
aesd v0.16b,v20.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v20.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v20.16b
aesimc v24.16b,v24.16b
aesd v0.16b,v21.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v21.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v21.16b
aesimc v24.16b,v24.16b
aesd v0.16b,v22.16b
aesimc v0.16b,v0.16b
aesd v1.16b,v22.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v22.16b
aesimc v24.16b,v24.16b
ld1 {v27.16b},[x0],#16
aesd v0.16b,v23.16b
aesd v1.16b,v23.16b
aesd v24.16b,v23.16b
ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
add w6,w5,#2
eor v4.16b,v4.16b,v0.16b
eor v5.16b,v5.16b,v1.16b
eor v24.16b,v24.16b,v17.16b
ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
st1 {v4.16b},[x1],#16
st1 {v5.16b},[x1],#16
st1 {v24.16b},[x1],#16
cmn x2,#0x30
add x2,x2,#0x30
b.eq Lxts_done
sub x2,x2,#0x30
orr v28.16b,v3.16b,v3.16b
orr v29.16b,v27.16b,v27.16b
nop
Lxts_inner_dec_tail:
// x2 == -0x10 means two blocks left.
cmn x2,#0x10
eor v1.16b,v28.16b,v6.16b
eor v24.16b,v29.16b,v8.16b
b.eq Lxts_dec_tail_loop
eor v24.16b,v29.16b,v6.16b
Lxts_dec_tail_loop:
aesd v1.16b,v16.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v16.16b
aesimc v24.16b,v24.16b
ld1 {v16.4s},[x7],#16
subs w6,w6,#2
aesd v1.16b,v17.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v17.16b
aesimc v24.16b,v24.16b
ld1 {v17.4s},[x7],#16
b.gt Lxts_dec_tail_loop
aesd v1.16b,v16.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v16.16b
aesimc v24.16b,v24.16b
aesd v1.16b,v17.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v17.16b
aesimc v24.16b,v24.16b
aesd v1.16b,v20.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v20.16b
aesimc v24.16b,v24.16b
cmn x2,#0x20
aesd v1.16b,v21.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v21.16b
aesimc v24.16b,v24.16b
eor v5.16b,v6.16b,v7.16b
aesd v1.16b,v22.16b
aesimc v1.16b,v1.16b
aesd v24.16b,v22.16b
aesimc v24.16b,v24.16b
eor v17.16b,v8.16b,v7.16b
aesd v1.16b,v23.16b
aesd v24.16b,v23.16b
b.eq Lxts_dec_one
eor v5.16b,v5.16b,v1.16b
eor v17.16b,v17.16b,v24.16b
orr v6.16b,v9.16b,v9.16b
orr v8.16b,v10.16b,v10.16b
st1 {v5.16b},[x1],#16
st1 {v17.16b},[x1],#16
add x2,x2,#16
b Lxts_done
Lxts_dec_one:
eor v5.16b,v5.16b,v24.16b
orr v6.16b,v8.16b,v8.16b
orr v8.16b,v9.16b,v9.16b
st1 {v5.16b},[x1],#16
add x2,x2,#32
Lxts_done:
tst x21,#0xf
b.eq Lxts_dec_abort
// Processing the last two blocks with cipher stealing.
mov x7,x3
cbnz x2,Lxts_dec_1st_done
ld1 {v0.4s},[x0],#16
// Decrypt the last secod block to get the last plain text block
Lxts_dec_1st_done:
eor v26.16b,v0.16b,v8.16b
ldr w6,[x3,#240]
ld1 {v0.4s},[x3],#16
sub w6,w6,#2
ld1 {v1.4s},[x3],#16
Loop_final_2nd_dec:
aesd v26.16b,v0.16b
aesimc v26.16b,v26.16b
ld1 {v0.4s},[x3],#16 // load key schedule...
subs w6,w6,#2
aesd v26.16b,v1.16b
aesimc v26.16b,v26.16b
ld1 {v1.4s},[x3],#16 // load key schedule...
b.gt Loop_final_2nd_dec
aesd v26.16b,v0.16b
aesimc v26.16b,v26.16b
ld1 {v0.4s},[x3]
aesd v26.16b,v1.16b
eor v26.16b,v26.16b,v0.16b
eor v26.16b,v26.16b,v8.16b
st1 {v26.16b},[x1]
mov x20,x0
add x13,x1,#16
// Composite the tailcnt "16 byte not aligned block" into the last second plain blocks
// to get the last encrypted block.
.composite_dec_loop:
subs x21,x21,#1
ldrb w15,[x1,x21]
ldrb w14,[x20,x21]
strb w15,[x13,x21]
strb w14,[x1,x21]
b.gt .composite_dec_loop
Lxts_dec_load_done:
ld1 {v26.16b},[x1]
eor v26.16b,v26.16b,v6.16b
// Decrypt the composite block to get the last second plain text block
ldr w6,[x7,#240]
ld1 {v0.16b},[x7],#16
sub w6,w6,#2
ld1 {v1.16b},[x7],#16
Loop_final_dec:
aesd v26.16b,v0.16b
aesimc v26.16b,v26.16b
ld1 {v0.4s},[x7],#16 // load key schedule...
subs w6,w6,#2
aesd v26.16b,v1.16b
aesimc v26.16b,v26.16b
ld1 {v1.4s},[x7],#16 // load key schedule...
b.gt Loop_final_dec
aesd v26.16b,v0.16b
aesimc v26.16b,v26.16b
ld1 {v0.4s},[x7]
aesd v26.16b,v1.16b
eor v26.16b,v26.16b,v0.16b
eor v26.16b,v26.16b,v6.16b
st1 {v26.16b},[x1]
Lxts_dec_abort:
ldp x21,x22,[sp,#48]
ldp d8,d9,[sp,#32]
ldp d10,d11,[sp,#16]
ldp x19,x20,[sp],#64
Lxts_dec_final_abort:
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 34,129
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/sha256-armv8.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the OpenSSL license (the "License"). You may not use
// this file except in compliance with the License. You can obtain a copy
// in the file LICENSE in the source distribution or at
// https://www.openssl.org/source/license.html
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project. The module is, however, dual licensed under OpenSSL and
// CRYPTOGAMS licenses depending on where you obtain it. For further
// details see http://www.openssl.org/~appro/cryptogams/.
//
// Permission to use under GPLv2 terms is granted.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
# include <openssl/arm_arch.h>
#endif
.text
.globl sha256_block_data_order_nohw
.def sha256_block_data_order_nohw
.type 32
.endef
.align 6
sha256_block_data_order_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*4
ldp w20,w21,[x0] // load context
ldp w22,w23,[x0,#2*4]
ldp w24,w25,[x0,#4*4]
add x2,x1,x2,lsl#6 // end of input
ldp w26,w27,[x0,#6*4]
adrp x30,LK256
add x30,x30,:lo12:LK256
stp x0,x2,[x29,#96]
Loop:
ldp w3,w4,[x1],#2*4
ldr w19,[x30],#4 // *K++
eor w28,w21,w22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev w3,w3 // 0
#endif
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w6,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w3 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w4,w4 // 1
#endif
ldp w5,w6,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w7,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w4 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w5,w5 // 2
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w8,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w5 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w6,w6 // 3
#endif
ldp w7,w8,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w9,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w6 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w7,w7 // 4
#endif
add w24,w24,w17 // h+=Sigma0(a)
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w10,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w7 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w10,ror#11 // Sigma1(e)
ror w10,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w10,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w8,w8 // 5
#endif
ldp w9,w10,[x1],#2*4
add w23,w23,w17 // h+=Sigma0(a)
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w11,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w8 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w11,ror#11 // Sigma1(e)
ror w11,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w11,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w9,w9 // 6
#endif
add w22,w22,w17 // h+=Sigma0(a)
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w12,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w9 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w12,ror#11 // Sigma1(e)
ror w12,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w12,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w10,w10 // 7
#endif
ldp w11,w12,[x1],#2*4
add w21,w21,w17 // h+=Sigma0(a)
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
eor w13,w25,w25,ror#14
and w17,w26,w25
bic w28,w27,w25
add w20,w20,w10 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w13,ror#11 // Sigma1(e)
ror w13,w21,#2
add w20,w20,w17 // h+=Ch(e,f,g)
eor w17,w21,w21,ror#9
add w20,w20,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w24,w24,w20 // d+=h
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w13,w17,ror#13 // Sigma0(a)
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w20,w20,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w11,w11 // 8
#endif
add w20,w20,w17 // h+=Sigma0(a)
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
eor w14,w24,w24,ror#14
and w17,w25,w24
bic w19,w26,w24
add w27,w27,w11 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w14,ror#11 // Sigma1(e)
ror w14,w20,#2
add w27,w27,w17 // h+=Ch(e,f,g)
eor w17,w20,w20,ror#9
add w27,w27,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w23,w23,w27 // d+=h
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w14,w17,ror#13 // Sigma0(a)
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w27,w27,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w12,w12 // 9
#endif
ldp w13,w14,[x1],#2*4
add w27,w27,w17 // h+=Sigma0(a)
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
eor w15,w23,w23,ror#14
and w17,w24,w23
bic w28,w25,w23
add w26,w26,w12 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w15,ror#11 // Sigma1(e)
ror w15,w27,#2
add w26,w26,w17 // h+=Ch(e,f,g)
eor w17,w27,w27,ror#9
add w26,w26,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w22,w22,w26 // d+=h
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w15,w17,ror#13 // Sigma0(a)
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w26,w26,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w13,w13 // 10
#endif
add w26,w26,w17 // h+=Sigma0(a)
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
eor w0,w22,w22,ror#14
and w17,w23,w22
bic w19,w24,w22
add w25,w25,w13 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w0,ror#11 // Sigma1(e)
ror w0,w26,#2
add w25,w25,w17 // h+=Ch(e,f,g)
eor w17,w26,w26,ror#9
add w25,w25,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w21,w21,w25 // d+=h
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w0,w17,ror#13 // Sigma0(a)
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w25,w25,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w14,w14 // 11
#endif
ldp w15,w0,[x1],#2*4
add w25,w25,w17 // h+=Sigma0(a)
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
eor w6,w21,w21,ror#14
and w17,w22,w21
bic w28,w23,w21
add w24,w24,w14 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w6,ror#11 // Sigma1(e)
ror w6,w25,#2
add w24,w24,w17 // h+=Ch(e,f,g)
eor w17,w25,w25,ror#9
add w24,w24,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w20,w20,w24 // d+=h
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w17,ror#13 // Sigma0(a)
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w24,w24,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w15,w15 // 12
#endif
add w24,w24,w17 // h+=Sigma0(a)
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
eor w7,w20,w20,ror#14
and w17,w21,w20
bic w19,w22,w20
add w23,w23,w15 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w7,ror#11 // Sigma1(e)
ror w7,w24,#2
add w23,w23,w17 // h+=Ch(e,f,g)
eor w17,w24,w24,ror#9
add w23,w23,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w27,w27,w23 // d+=h
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w17,ror#13 // Sigma0(a)
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w23,w23,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w0,w0 // 13
#endif
ldp w1,w2,[x1]
add w23,w23,w17 // h+=Sigma0(a)
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
eor w8,w27,w27,ror#14
and w17,w20,w27
bic w28,w21,w27
add w22,w22,w0 // h+=X[i]
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w8,ror#11 // Sigma1(e)
ror w8,w23,#2
add w22,w22,w17 // h+=Ch(e,f,g)
eor w17,w23,w23,ror#9
add w22,w22,w16 // h+=Sigma1(e)
and w19,w19,w28 // (b^c)&=(a^b)
add w26,w26,w22 // d+=h
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w17,ror#13 // Sigma0(a)
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
//add w22,w22,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w1,w1 // 14
#endif
ldr w6,[sp,#12]
add w22,w22,w17 // h+=Sigma0(a)
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
eor w9,w26,w26,ror#14
and w17,w27,w26
bic w19,w20,w26
add w21,w21,w1 // h+=X[i]
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w9,ror#11 // Sigma1(e)
ror w9,w22,#2
add w21,w21,w17 // h+=Ch(e,f,g)
eor w17,w22,w22,ror#9
add w21,w21,w16 // h+=Sigma1(e)
and w28,w28,w19 // (b^c)&=(a^b)
add w25,w25,w21 // d+=h
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w17,ror#13 // Sigma0(a)
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
//add w21,w21,w17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev w2,w2 // 15
#endif
ldr w7,[sp,#0]
add w21,w21,w17 // h+=Sigma0(a)
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
Loop_16_xx:
ldr w8,[sp,#4]
str w11,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w10,w5,#7
and w17,w25,w24
ror w9,w2,#17
bic w19,w26,w24
ror w11,w20,#2
add w27,w27,w3 // h+=X[i]
eor w16,w16,w24,ror#11
eor w10,w10,w5,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w11,w11,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w9,w9,w2,ror#19
eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w11,w20,ror#22 // Sigma0(a)
eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
add w4,w4,w13
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w4,w4,w10
add w27,w27,w17 // h+=Sigma0(a)
add w4,w4,w9
ldr w9,[sp,#8]
str w12,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w11,w6,#7
and w17,w24,w23
ror w10,w3,#17
bic w28,w25,w23
ror w12,w27,#2
add w26,w26,w4 // h+=X[i]
eor w16,w16,w23,ror#11
eor w11,w11,w6,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w12,w12,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w10,w10,w3,ror#19
eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w12,w27,ror#22 // Sigma0(a)
eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
add w5,w5,w14
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w5,w5,w11
add w26,w26,w17 // h+=Sigma0(a)
add w5,w5,w10
ldr w10,[sp,#12]
str w13,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w12,w7,#7
and w17,w23,w22
ror w11,w4,#17
bic w19,w24,w22
ror w13,w26,#2
add w25,w25,w5 // h+=X[i]
eor w16,w16,w22,ror#11
eor w12,w12,w7,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w13,w13,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w11,w11,w4,ror#19
eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w13,w26,ror#22 // Sigma0(a)
eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
add w6,w6,w15
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w6,w6,w12
add w25,w25,w17 // h+=Sigma0(a)
add w6,w6,w11
ldr w11,[sp,#0]
str w14,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w13,w8,#7
and w17,w22,w21
ror w12,w5,#17
bic w28,w23,w21
ror w14,w25,#2
add w24,w24,w6 // h+=X[i]
eor w16,w16,w21,ror#11
eor w13,w13,w8,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w14,w14,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w12,w12,w5,ror#19
eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w14,w25,ror#22 // Sigma0(a)
eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
add w7,w7,w0
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w7,w7,w13
add w24,w24,w17 // h+=Sigma0(a)
add w7,w7,w12
ldr w12,[sp,#4]
str w15,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w14,w9,#7
and w17,w21,w20
ror w13,w6,#17
bic w19,w22,w20
ror w15,w24,#2
add w23,w23,w7 // h+=X[i]
eor w16,w16,w20,ror#11
eor w14,w14,w9,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w15,w15,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w13,w13,w6,ror#19
eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w15,w24,ror#22 // Sigma0(a)
eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
add w8,w8,w1
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w8,w8,w14
add w23,w23,w17 // h+=Sigma0(a)
add w8,w8,w13
ldr w13,[sp,#8]
str w0,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w15,w10,#7
and w17,w20,w27
ror w14,w7,#17
bic w28,w21,w27
ror w0,w23,#2
add w22,w22,w8 // h+=X[i]
eor w16,w16,w27,ror#11
eor w15,w15,w10,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w0,w0,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w14,w14,w7,ror#19
eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w0,w23,ror#22 // Sigma0(a)
eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
add w9,w9,w2
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w9,w9,w15
add w22,w22,w17 // h+=Sigma0(a)
add w9,w9,w14
ldr w14,[sp,#12]
str w1,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w0,w11,#7
and w17,w27,w26
ror w15,w8,#17
bic w19,w20,w26
ror w1,w22,#2
add w21,w21,w9 // h+=X[i]
eor w16,w16,w26,ror#11
eor w0,w0,w11,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w1,w1,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w15,w15,w8,ror#19
eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w1,w22,ror#22 // Sigma0(a)
eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
add w10,w10,w3
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w10,w10,w0
add w21,w21,w17 // h+=Sigma0(a)
add w10,w10,w15
ldr w15,[sp,#0]
str w2,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w1,w12,#7
and w17,w26,w25
ror w0,w9,#17
bic w28,w27,w25
ror w2,w21,#2
add w20,w20,w10 // h+=X[i]
eor w16,w16,w25,ror#11
eor w1,w1,w12,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w2,w2,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w0,w0,w9,ror#19
eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w2,w21,ror#22 // Sigma0(a)
eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
add w11,w11,w4
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w11,w11,w1
add w20,w20,w17 // h+=Sigma0(a)
add w11,w11,w0
ldr w0,[sp,#4]
str w3,[sp,#0]
ror w16,w24,#6
add w27,w27,w19 // h+=K[i]
ror w2,w13,#7
and w17,w25,w24
ror w1,w10,#17
bic w19,w26,w24
ror w3,w20,#2
add w27,w27,w11 // h+=X[i]
eor w16,w16,w24,ror#11
eor w2,w2,w13,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w20,w21 // a^b, b^c in next round
eor w16,w16,w24,ror#25 // Sigma1(e)
eor w3,w3,w20,ror#13
add w27,w27,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w1,w1,w10,ror#19
eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
add w27,w27,w16 // h+=Sigma1(e)
eor w28,w28,w21 // Maj(a,b,c)
eor w17,w3,w20,ror#22 // Sigma0(a)
eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
add w12,w12,w5
add w23,w23,w27 // d+=h
add w27,w27,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w12,w12,w2
add w27,w27,w17 // h+=Sigma0(a)
add w12,w12,w1
ldr w1,[sp,#8]
str w4,[sp,#4]
ror w16,w23,#6
add w26,w26,w28 // h+=K[i]
ror w3,w14,#7
and w17,w24,w23
ror w2,w11,#17
bic w28,w25,w23
ror w4,w27,#2
add w26,w26,w12 // h+=X[i]
eor w16,w16,w23,ror#11
eor w3,w3,w14,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w27,w20 // a^b, b^c in next round
eor w16,w16,w23,ror#25 // Sigma1(e)
eor w4,w4,w27,ror#13
add w26,w26,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w2,w2,w11,ror#19
eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
add w26,w26,w16 // h+=Sigma1(e)
eor w19,w19,w20 // Maj(a,b,c)
eor w17,w4,w27,ror#22 // Sigma0(a)
eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
add w13,w13,w6
add w22,w22,w26 // d+=h
add w26,w26,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w13,w13,w3
add w26,w26,w17 // h+=Sigma0(a)
add w13,w13,w2
ldr w2,[sp,#12]
str w5,[sp,#8]
ror w16,w22,#6
add w25,w25,w19 // h+=K[i]
ror w4,w15,#7
and w17,w23,w22
ror w3,w12,#17
bic w19,w24,w22
ror w5,w26,#2
add w25,w25,w13 // h+=X[i]
eor w16,w16,w22,ror#11
eor w4,w4,w15,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w26,w27 // a^b, b^c in next round
eor w16,w16,w22,ror#25 // Sigma1(e)
eor w5,w5,w26,ror#13
add w25,w25,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w3,w3,w12,ror#19
eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
add w25,w25,w16 // h+=Sigma1(e)
eor w28,w28,w27 // Maj(a,b,c)
eor w17,w5,w26,ror#22 // Sigma0(a)
eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
add w14,w14,w7
add w21,w21,w25 // d+=h
add w25,w25,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w14,w14,w4
add w25,w25,w17 // h+=Sigma0(a)
add w14,w14,w3
ldr w3,[sp,#0]
str w6,[sp,#12]
ror w16,w21,#6
add w24,w24,w28 // h+=K[i]
ror w5,w0,#7
and w17,w22,w21
ror w4,w13,#17
bic w28,w23,w21
ror w6,w25,#2
add w24,w24,w14 // h+=X[i]
eor w16,w16,w21,ror#11
eor w5,w5,w0,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w25,w26 // a^b, b^c in next round
eor w16,w16,w21,ror#25 // Sigma1(e)
eor w6,w6,w25,ror#13
add w24,w24,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w4,w4,w13,ror#19
eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
add w24,w24,w16 // h+=Sigma1(e)
eor w19,w19,w26 // Maj(a,b,c)
eor w17,w6,w25,ror#22 // Sigma0(a)
eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
add w15,w15,w8
add w20,w20,w24 // d+=h
add w24,w24,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w15,w15,w5
add w24,w24,w17 // h+=Sigma0(a)
add w15,w15,w4
ldr w4,[sp,#4]
str w7,[sp,#0]
ror w16,w20,#6
add w23,w23,w19 // h+=K[i]
ror w6,w1,#7
and w17,w21,w20
ror w5,w14,#17
bic w19,w22,w20
ror w7,w24,#2
add w23,w23,w15 // h+=X[i]
eor w16,w16,w20,ror#11
eor w6,w6,w1,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w24,w25 // a^b, b^c in next round
eor w16,w16,w20,ror#25 // Sigma1(e)
eor w7,w7,w24,ror#13
add w23,w23,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w5,w5,w14,ror#19
eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
add w23,w23,w16 // h+=Sigma1(e)
eor w28,w28,w25 // Maj(a,b,c)
eor w17,w7,w24,ror#22 // Sigma0(a)
eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
add w0,w0,w9
add w27,w27,w23 // d+=h
add w23,w23,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w0,w0,w6
add w23,w23,w17 // h+=Sigma0(a)
add w0,w0,w5
ldr w5,[sp,#8]
str w8,[sp,#4]
ror w16,w27,#6
add w22,w22,w28 // h+=K[i]
ror w7,w2,#7
and w17,w20,w27
ror w6,w15,#17
bic w28,w21,w27
ror w8,w23,#2
add w22,w22,w0 // h+=X[i]
eor w16,w16,w27,ror#11
eor w7,w7,w2,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w23,w24 // a^b, b^c in next round
eor w16,w16,w27,ror#25 // Sigma1(e)
eor w8,w8,w23,ror#13
add w22,w22,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w6,w6,w15,ror#19
eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
add w22,w22,w16 // h+=Sigma1(e)
eor w19,w19,w24 // Maj(a,b,c)
eor w17,w8,w23,ror#22 // Sigma0(a)
eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
add w1,w1,w10
add w26,w26,w22 // d+=h
add w22,w22,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w1,w1,w7
add w22,w22,w17 // h+=Sigma0(a)
add w1,w1,w6
ldr w6,[sp,#12]
str w9,[sp,#8]
ror w16,w26,#6
add w21,w21,w19 // h+=K[i]
ror w8,w3,#7
and w17,w27,w26
ror w7,w0,#17
bic w19,w20,w26
ror w9,w22,#2
add w21,w21,w1 // h+=X[i]
eor w16,w16,w26,ror#11
eor w8,w8,w3,ror#18
orr w17,w17,w19 // Ch(e,f,g)
eor w19,w22,w23 // a^b, b^c in next round
eor w16,w16,w26,ror#25 // Sigma1(e)
eor w9,w9,w22,ror#13
add w21,w21,w17 // h+=Ch(e,f,g)
and w28,w28,w19 // (b^c)&=(a^b)
eor w7,w7,w0,ror#19
eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
add w21,w21,w16 // h+=Sigma1(e)
eor w28,w28,w23 // Maj(a,b,c)
eor w17,w9,w22,ror#22 // Sigma0(a)
eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
add w2,w2,w11
add w25,w25,w21 // d+=h
add w21,w21,w28 // h+=Maj(a,b,c)
ldr w28,[x30],#4 // *K++, w19 in next round
add w2,w2,w8
add w21,w21,w17 // h+=Sigma0(a)
add w2,w2,w7
ldr w7,[sp,#0]
str w10,[sp,#12]
ror w16,w25,#6
add w20,w20,w28 // h+=K[i]
ror w9,w4,#7
and w17,w26,w25
ror w8,w1,#17
bic w28,w27,w25
ror w10,w21,#2
add w20,w20,w2 // h+=X[i]
eor w16,w16,w25,ror#11
eor w9,w9,w4,ror#18
orr w17,w17,w28 // Ch(e,f,g)
eor w28,w21,w22 // a^b, b^c in next round
eor w16,w16,w25,ror#25 // Sigma1(e)
eor w10,w10,w21,ror#13
add w20,w20,w17 // h+=Ch(e,f,g)
and w19,w19,w28 // (b^c)&=(a^b)
eor w8,w8,w1,ror#19
eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
add w20,w20,w16 // h+=Sigma1(e)
eor w19,w19,w22 // Maj(a,b,c)
eor w17,w10,w21,ror#22 // Sigma0(a)
eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
add w3,w3,w12
add w24,w24,w20 // d+=h
add w20,w20,w19 // h+=Maj(a,b,c)
ldr w19,[x30],#4 // *K++, w28 in next round
add w3,w3,w9
add w20,w20,w17 // h+=Sigma0(a)
add w3,w3,w8
cbnz w19,Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#260 // rewind
ldp w3,w4,[x0]
ldp w5,w6,[x0,#2*4]
add x1,x1,#14*4 // advance input pointer
ldp w7,w8,[x0,#4*4]
add w20,w20,w3
ldp w9,w10,[x0,#6*4]
add w21,w21,w4
add w22,w22,w5
add w23,w23,w6
stp w20,w21,[x0]
add w24,w24,w7
add w25,w25,w8
stp w22,w23,[x0,#2*4]
add w26,w26,w9
add w27,w27,w10
cmp x1,x2
stp w24,w25,[x0,#4*4]
stp w26,w27,[x0,#6*4]
b.ne Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*4
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.section .rodata
.align 6
LK256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0 //terminator
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.globl sha256_block_data_order_hw
.def sha256_block_data_order_hw
.type 32
.endef
.align 6
sha256_block_data_order_hw:
#ifdef BORINGSSL_DISPATCH_TEST
adrp x9,BORINGSSL_function_hit
add x9, x9, :lo12:BORINGSSL_function_hit
mov w10, #1
strb w10, [x9,#6] // kFlag_sha256_hw
#endif
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v0.4s,v1.4s},[x0]
adrp x3,LK256
add x3,x3,:lo12:LK256
Loop_hw:
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
sub x2,x2,#1
ld1 {v16.4s},[x3],#16
rev32 v4.16b,v4.16b
rev32 v5.16b,v5.16b
rev32 v6.16b,v6.16b
rev32 v7.16b,v7.16b
orr v18.16b,v0.16b,v0.16b // offload
orr v19.16b,v1.16b,v1.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
.long 0x5e2828a4 //sha256su0 v4.16b,v5.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
.long 0x5e2828c5 //sha256su0 v5.16b,v6.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v6.4s
.long 0x5e2828e6 //sha256su0 v6.16b,v7.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
.long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v7.4s
.long 0x5e282887 //sha256su0 v7.16b,v4.16b
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
.long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
ld1 {v17.4s},[x3],#16
add v16.4s,v16.4s,v4.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
ld1 {v16.4s},[x3],#16
add v17.4s,v17.4s,v5.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
ld1 {v17.4s},[x3]
add v16.4s,v16.4s,v6.4s
sub x3,x3,#64*4-16 // rewind
orr v2.16b,v0.16b,v0.16b
.long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
.long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
add v17.4s,v17.4s,v7.4s
orr v2.16b,v0.16b,v0.16b
.long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
.long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
add v0.4s,v0.4s,v18.4s
add v1.4s,v1.4s,v19.4s
cbnz x2,Loop_hw
st1 {v0.4s,v1.4s},[x0]
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 285,754
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/aesv8-gcm-armv8-unroll8.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include "openssl/arm_arch.h"
#if __ARM_MAX_ARCH__>=8
.text
.arch armv8.2-a+crypto
.globl aesv8_gcm_8x_enc_128
.def aesv8_gcm_8x_enc_128
.type 32
.endef
.align 4
aesv8_gcm_8x_enc_128:
#ifdef BORINGSSL_DISPATCH_TEST
adrp x9,BORINGSSL_function_hit
add x9, x9, :lo12:BORINGSSL_function_hit
mov w10, #1
strb w10, [x9,#7] // kFlag_aesv8_gcm_8x_enc_128
#endif
AARCH64_VALID_CALL_TARGET
cbz x1, L128_enc_ret
stp d8, d9, [sp, #-80]!
lsr x9, x1, #3
mov x16, x4
mov x11, x5
stp d10, d11, [sp, #16]
stp d12, d13, [sp, #32]
stp d14, d15, [sp, #48]
mov x5, #0xc200000000000000
stp x5, xzr, [sp, #64]
add x10, sp, #64
mov x15, #0x100000000 //set up counter increment
movi v31.16b, #0x0
mov v31.d[1], x15
mov x5, x9
ld1 { v0.16b}, [x16] //CTR block 0
sub x5, x5, #1 //byte_len - 1
and x5, x5, #0xffffffffffffff80 //number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
rev32 v30.16b, v0.16b //set up reversed counter
add v30.4s, v30.4s, v31.4s //CTR block 0
rev32 v1.16b, v30.16b //CTR block 1
add v30.4s, v30.4s, v31.4s //CTR block 1
rev32 v2.16b, v30.16b //CTR block 2
add v30.4s, v30.4s, v31.4s //CTR block 2
rev32 v3.16b, v30.16b //CTR block 3
add v30.4s, v30.4s, v31.4s //CTR block 3
rev32 v4.16b, v30.16b //CTR block 4
add v30.4s, v30.4s, v31.4s //CTR block 4
rev32 v5.16b, v30.16b //CTR block 5
add v30.4s, v30.4s, v31.4s //CTR block 5
ldp q26, q27, [x11, #0] //load rk0, rk1
rev32 v6.16b, v30.16b //CTR block 6
add v30.4s, v30.4s, v31.4s //CTR block 6
rev32 v7.16b, v30.16b //CTR block 7
add v30.4s, v30.4s, v31.4s //CTR block 7
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 0
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 0
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 0
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 0
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 0
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 0
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 0
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 0
ldp q28, q26, [x11, #32] //load rk2, rk3
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 1
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 1
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 1
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 1
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 1
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 1
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 2
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 1
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 2
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 2
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 2
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 2
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 2
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 2
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 3
ldp q27, q28, [x11, #64] //load rk4, rk5
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 3
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 3
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 3
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 3
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 3
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 3
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 4
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 3
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 4
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 4
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 4
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 4
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 4
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 4
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 5
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 5
ldp q26, q27, [x11, #96] //load rk6, rk7
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 5
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 5
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 5
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 5
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 5
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 5
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 6
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 6
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 6
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 6
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 6
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 6
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 6
ldp q28, q26, [x11, #128] //load rk8, rk9
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 7
ld1 { v19.16b}, [x3]
ext v19.16b, v19.16b, v19.16b, #8
rev64 v19.16b, v19.16b
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 7
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 7
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 7
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 7
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 7
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 7
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 7
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 8
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 8
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 8
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 8
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 8
ldr q27, [x11, #160] //load rk10
aese v3.16b, v26.16b //AES block 8k+11 - round 9
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 8
aese v2.16b, v26.16b //AES block 8k+10 - round 9
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 8
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 8
aese v6.16b, v26.16b //AES block 8k+14 - round 9
aese v4.16b, v26.16b //AES block 8k+12 - round 9
add x5, x5, x0
aese v0.16b, v26.16b //AES block 8k+8 - round 9
aese v7.16b, v26.16b //AES block 8k+15 - round 9
aese v5.16b, v26.16b //AES block 8k+13 - round 9
aese v1.16b, v26.16b //AES block 8k+9 - round 9
add x4, x0, x1, lsr #3 //end_input_ptr
cmp x0, x5 //check if we have <= 8 blocks
b.ge L128_enc_tail //handle tail
ldp q8, q9, [x0], #32 //AES block 0, 1 - load plaintext
ldp q10, q11, [x0], #32 //AES block 2, 3 - load plaintext
ldp q12, q13, [x0], #32 //AES block 4, 5 - load plaintext
ldp q14, q15, [x0], #32 //AES block 6, 7 - load plaintext
cmp x0, x5 //check if we have <= 8 blocks
.long 0xce006d08 //eor3 v8.16b, v8.16b, v0.16b, v27.16b //AES block 0 - result
rev32 v0.16b, v30.16b //CTR block 8
add v30.4s, v30.4s, v31.4s //CTR block 8
.long 0xce016d29 //eor3 v9.16b, v9.16b, v1.16b, v27.16b //AES block 1 - result
stp q8, q9, [x2], #32 //AES block 0, 1 - store result
rev32 v1.16b, v30.16b //CTR block 9
.long 0xce056dad //eor3 v13.16b, v13.16b, v5.16b, v27.16b //AES block 5 - result
add v30.4s, v30.4s, v31.4s //CTR block 9
.long 0xce026d4a //eor3 v10.16b, v10.16b, v2.16b, v27.16b //AES block 2 - result
.long 0xce066dce //eor3 v14.16b, v14.16b, v6.16b, v27.16b //AES block 6 - result
.long 0xce046d8c //eor3 v12.16b, v12.16b, v4.16b, v27.16b //AES block 4 - result
rev32 v2.16b, v30.16b //CTR block 10
add v30.4s, v30.4s, v31.4s //CTR block 10
.long 0xce036d6b //eor3 v11.16b, v11.16b, v3.16b, v27.16b //AES block 3 - result
.long 0xce076def //eor3 v15.16b, v15.16b, v7.16b,v27.16b //AES block 7 - result
stp q10, q11, [x2], #32 //AES block 2, 3 - store result
rev32 v3.16b, v30.16b //CTR block 11
add v30.4s, v30.4s, v31.4s //CTR block 11
stp q12, q13, [x2], #32 //AES block 4, 5 - store result
stp q14, q15, [x2], #32 //AES block 6, 7 - store result
rev32 v4.16b, v30.16b //CTR block 12
add v30.4s, v30.4s, v31.4s //CTR block 12
b.ge L128_enc_prepretail //do prepretail
L128_enc_main_loop: //main loop start
rev32 v5.16b, v30.16b //CTR block 8k+13
ldr q20, [x6, #96] //load h5l | h5h
ldr q22, [x6, #128] //load h6l | h6h
add v30.4s, v30.4s, v31.4s //CTR block 8k+13
rev64 v9.16b, v9.16b //GHASH block 8k+1
rev64 v8.16b, v8.16b //GHASH block 8k
ldr q23, [x6, #144] //load h7l | h7h
ldr q25, [x6, #176] //load h8l | h8h
rev32 v6.16b, v30.16b //CTR block 8k+14
add v30.4s, v30.4s, v31.4s //CTR block 8k+14
ext v19.16b, v19.16b, v19.16b, #8 //PRE 0
ldr q21, [x6, #112] //load h6k | h5k
ldr q24, [x6, #160] //load h8k | h7k
rev64 v13.16b, v13.16b //GHASH block 8k+5 (t0, t1, t2 and t3 free)
rev64 v11.16b, v11.16b //GHASH block 8k+3
ldp q26, q27, [x11, #0] //load rk0, rk1
eor v8.16b, v8.16b, v19.16b //PRE 1
rev32 v7.16b, v30.16b //CTR block 8k+15
rev64 v15.16b, v15.16b //GHASH block 8k+7 (t0, t1, t2 and t3 free)
pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high
rev64 v10.16b, v10.16b //GHASH block 8k+2
pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high
pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low
trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low
trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high
pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high
eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low
ldr q23, [x6, #48] //load h3l | h3h
ldr q25, [x6, #80] //load h3l | h3h
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 0
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 0
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 0
eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high
add v30.4s, v30.4s, v31.4s //CTR block 8k+15
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 0
eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 0
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 1
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 0
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 1
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 0
pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 1
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 0
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 1
.long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b,v9.16b //GHASH block 8k+2, 8k+3 - high
trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
ldp q28, q26, [x11, #32] //load rk2, rk3
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 1
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 1
pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 1
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 1
pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid
eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid
rev64 v14.16b, v14.16b //GHASH block 8k+6 (t0, t1, and t2 free)
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low
pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid
eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid
pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 2
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 2
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 2
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 2
.long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 2
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 2
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 2
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 3
ldr q21, [x6, #16] //load h2k | h1k
ldr q24, [x6, #64] //load h4k | h3k
rev64 v12.16b, v12.16b //GHASH block 8k+4 (t0, t1, and t2 free)
ldp q27, q28, [x11, #64] //load rk4, rk5
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 3
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 3
ldr q20, [x6] //load h1l | h1h
ldr q22, [x6, #32] //load h1l | h1h
pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high
pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low
trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 3
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 3
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 3
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 3
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 3
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 4
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 4
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 4
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 4
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 4
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 4
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 4
pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high
eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 4
ldp q26, q27, [x11, #96] //load rk6, rk7
trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid
pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid
pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high
pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 5
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 5
pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low
.long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high
trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
.long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 5
eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 5
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 5
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 5
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 5
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 5
.long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
ldr d16, [x10] //MODULO - load modulo constant
pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 6
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 6
pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 6
pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low
ldp q8, q9, [x0], #32 //AES block 8k+8, 8k+9 - load plaintext
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 6
rev32 v20.16b, v30.16b //CTR block 8k+16
add v30.4s, v30.4s, v31.4s //CTR block 8k+16
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 6
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 6
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 6
.long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
ldp q28, q26, [x11, #128] //load rk8, rk9
.long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 7
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 7
ldp q10, q11, [x0], #32 //AES block 8k+10, 8k+11 - load plaintext
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 7
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 7
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 7
pmull v21.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 7
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 7
rev32 v22.16b, v30.16b //CTR block 8k+17
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 7
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 8
ldp q12, q13, [x0], #32 //AES block 8k+12, 8k+13 - load plaintext
add v30.4s, v30.4s, v31.4s //CTR block 8k+17
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 8
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 8
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 8
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 8
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
ldr q27, [x11, #160] //load rk10
ext v29.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
rev32 v23.16b, v30.16b //CTR block 8k+18
add v30.4s, v30.4s, v31.4s //CTR block 8k+18
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 8
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 8
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 8
aese v2.16b, v26.16b //AES block 8k+10 - round 9
aese v4.16b, v26.16b //AES block 8k+12 - round 9
aese v1.16b, v26.16b //AES block 8k+9 - round 9
ldp q14, q15, [x0], #32 //AES block 8k+14, 8k+15 - load plaintext
rev32 v25.16b, v30.16b //CTR block 8k+19
add v30.4s, v30.4s, v31.4s //CTR block 8k+19
cmp x0, x5 //LOOP CONTROL
.long 0xce046d8c //eor3 v12.16b, v12.16b, v4.16b, v27.16b //AES block 4 - result
aese v7.16b, v26.16b //AES block 8k+15 - round 9
aese v6.16b, v26.16b //AES block 8k+14 - round 9
aese v3.16b, v26.16b //AES block 8k+11 - round 9
.long 0xce026d4a //eor3 v10.16b, v10.16b, v2.16b, v27.16b //AES block 8k+10 - result
mov v2.16b, v23.16b //CTR block 8k+18
aese v0.16b, v26.16b //AES block 8k+8 - round 9
rev32 v4.16b, v30.16b //CTR block 8k+20
add v30.4s, v30.4s, v31.4s //CTR block 8k+20
.long 0xce076def //eor3 v15.16b, v15.16b, v7.16b, v27.16b //AES block 7 - result
aese v5.16b, v26.16b //AES block 8k+13 - round 9
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
.long 0xce016d29 //eor3 v9.16b, v9.16b, v1.16b, v27.16b //AES block 8k+9 - result
.long 0xce036d6b //eor3 v11.16b, v11.16b, v3.16b, v27.16b //AES block 8k+11 - result
mov v3.16b, v25.16b //CTR block 8k+19
ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
.long 0xce056dad //eor3 v13.16b, v13.16b, v5.16b, v27.16b //AES block 5 - result
mov v1.16b, v22.16b //CTR block 8k+17
.long 0xce006d08 //eor3 v8.16b, v8.16b, v0.16b, v27.16b //AES block 8k+8 - result
mov v0.16b, v20.16b //CTR block 8k+16
stp q8, q9, [x2], #32 //AES block 8k+8, 8k+9 - store result
stp q10, q11, [x2], #32 //AES block 8k+10, 8k+11 - store result
.long 0xce066dce //eor3 v14.16b, v14.16b, v6.16b, v27.16b //AES block 6 - result
stp q12, q13, [x2], #32 //AES block 8k+12, 8k+13 - store result
.long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low
stp q14, q15, [x2], #32 //AES block 8k+14, 8k+15 - store result
b.lt L128_enc_main_loop
L128_enc_prepretail: //PREPRETAIL
rev32 v5.16b, v30.16b //CTR block 8k+13
ldr q23, [x6, #144] //load h7l | h7h
ldr q25, [x6, #176] //load h8l | h8h
ext v19.16b, v19.16b, v19.16b, #8 //PRE 0
ldr q20, [x6, #96] //load h5l | h5h
ldr q22, [x6, #128] //load h6l | h6h
rev64 v8.16b, v8.16b //GHASH block 8k
rev64 v9.16b, v9.16b //GHASH block 8k+1
ldr q21, [x6, #112] //load h6k | h5k
ldr q24, [x6, #160] //load h6k | h5k
add v30.4s, v30.4s, v31.4s //CTR block 8k+13
rev64 v11.16b, v11.16b //GHASH block 8k+3
rev64 v10.16b, v10.16b //GHASH block 8k+2
eor v8.16b, v8.16b, v19.16b //PRE 1
rev32 v6.16b, v30.16b //CTR block 8k+14
pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high
pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low
pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high
rev64 v13.16b, v13.16b //GHASH block 8k+5 (t0, t1, t2 and t3 free)
trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low
eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high
trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low
eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid
ldp q26, q27, [x11, #0] //load rk0, rk1
add v30.4s, v30.4s, v31.4s //CTR block 8k+14
pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid
pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid
rev64 v12.16b, v12.16b //GHASH block 8k+4 (t0, t1, and t2 free)
rev64 v15.16b, v15.16b //GHASH block 8k+7 (t0, t1, t2 and t3 free)
eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid
rev32 v7.16b, v30.16b //CTR block 8k+15
rev64 v14.16b, v14.16b //GHASH block 8k+6 (t0, t1, and t2 free)
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 0
pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high
pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 0
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 0
pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 0
.long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high
trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 0
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 0
eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 0
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 0
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 1
pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low
ldr q23, [x6, #48] //load h3l | h3h
ldr q25, [x6, #80] //load h4l | h4h
ldp q28, q26, [x11, #32] //load rk2, rk3
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 1
pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low
pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 1
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 1
.long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
ldr q21, [x6, #16] //load h2k | h1k
ldr q24, [x6, #64] //load h4k | h3k
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 1
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 1
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 1
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 2
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 2
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 2
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 1
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 2
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 3
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 2
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 2
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 2
ldp q27, q28, [x11, #64] //load rk4, rk5
ldr q20, [x6] //load h1l | h1h
ldr q22, [x6, #32] //load h1l | h1h
trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 3
pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 3
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 3
pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low
trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 3
add v30.4s, v30.4s, v31.4s //CTR block 8k+15
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 3
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 3
eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 3
pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high
trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low
trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 4
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 4
.long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high
.long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low
eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 5
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 4
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 4
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 4
pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 4
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 4
pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high
ldp q26, q27, [x11, #96] //load rk6, rk7
pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low
.long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid
pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 5
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 5
ldr d16, [x10] //MODULO - load modulo constant
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 5
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 5
.long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 5
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 5
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 5
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 6
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 6
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 6
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 6
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low
.long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 6
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 6
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 6
pmull v21.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
ldp q28, q26, [x11, #128] //load rk8, rk9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 7
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 7
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 7
ext v29.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 7
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 7
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 7
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 7
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 7
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 8
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 8
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 8
ext v18.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 8
.long 0xce114a73 //eor3 v19.16b, v19.16b, v17.16b, v18.16b //MODULO - fold into low
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 8
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 8
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 8
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 8
ldr q27, [x11, #160] //load rk10
aese v6.16b, v26.16b //AES block 8k+14 - round 9
aese v2.16b, v26.16b //AES block 8k+10 - round 9
aese v0.16b, v26.16b //AES block 8k+8 - round 9
aese v1.16b, v26.16b //AES block 8k+9 - round 9
aese v3.16b, v26.16b //AES block 8k+11 - round 9
aese v5.16b, v26.16b //AES block 8k+13 - round 9
aese v4.16b, v26.16b //AES block 8k+12 - round 9
aese v7.16b, v26.16b //AES block 8k+15 - round 9
L128_enc_tail: //TAIL
sub x5, x4, x0 //main_end_input_ptr is number of bytes left to process
ldr q8, [x0], #16 //AES block 8k+8 - load plaintext
mov v29.16b, v27.16b
ldp q20, q21, [x6, #96] //load h5l | h5h
.long 0xce007509 //eor3 v9.16b, v8.16b, v0.16b, v29.16b //AES block 8k+8 - result
ext v16.16b, v19.16b, v19.16b, #8 //prepare final partial tag
ldp q22, q23, [x6, #128] //load h6l | h6h
ldp q24, q25, [x6, #160] //load h8k | h7k
cmp x5, #112
b.gt L128_enc_blocks_more_than_7
mov v7.16b, v6.16b
mov v6.16b, v5.16b
movi v17.8b, #0
cmp x5, #96
sub v30.4s, v30.4s, v31.4s
mov v5.16b, v4.16b
mov v4.16b, v3.16b
mov v3.16b, v2.16b
mov v2.16b, v1.16b
movi v19.8b, #0
movi v18.8b, #0
b.gt L128_enc_blocks_more_than_6
mov v7.16b, v6.16b
cmp x5, #80
sub v30.4s, v30.4s, v31.4s
mov v6.16b, v5.16b
mov v5.16b, v4.16b
mov v4.16b, v3.16b
mov v3.16b, v1.16b
b.gt L128_enc_blocks_more_than_5
cmp x5, #64
sub v30.4s, v30.4s, v31.4s
mov v7.16b, v6.16b
mov v6.16b, v5.16b
mov v5.16b, v4.16b
mov v4.16b, v1.16b
b.gt L128_enc_blocks_more_than_4
mov v7.16b, v6.16b
sub v30.4s, v30.4s, v31.4s
mov v6.16b, v5.16b
mov v5.16b, v1.16b
cmp x5, #48
b.gt L128_enc_blocks_more_than_3
sub v30.4s, v30.4s, v31.4s
mov v7.16b, v6.16b
mov v6.16b, v1.16b
cmp x5, #32
ldr q24, [x6, #64] //load h4k | h3k
b.gt L128_enc_blocks_more_than_2
cmp x5, #16
sub v30.4s, v30.4s, v31.4s
mov v7.16b, v1.16b
b.gt L128_enc_blocks_more_than_1
ldr q21, [x6, #16] //load h2k | h1k
sub v30.4s, v30.4s, v31.4s
b L128_enc_blocks_less_than_1
L128_enc_blocks_more_than_7: //blocks left > 7
st1 { v9.16b}, [x2], #16 //AES final-7 block - store result
rev64 v8.16b, v9.16b //GHASH final-7 block
ldr q9, [x0], #16 //AES final-6 block - load plaintext
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-7 block - mid
pmull2 v17.1q, v8.2d, v25.2d //GHASH final-7 block - high
ins v18.d[0], v24.d[1] //GHASH final-7 block - mid
eor v27.8b, v27.8b, v8.8b //GHASH final-7 block - mid
movi v16.8b, #0 //supress further partial tag feed in
.long 0xce017529 //eor3 v9.16b, v9.16b, v1.16b, v29.16b //AES final-6 block - result
pmull v18.1q, v27.1d, v18.1d //GHASH final-7 block - mid
pmull v19.1q, v8.1d, v25.1d //GHASH final-7 block - low
L128_enc_blocks_more_than_6: //blocks left > 6
st1 { v9.16b}, [x2], #16 //AES final-6 block - store result
rev64 v8.16b, v9.16b //GHASH final-6 block
ldr q9, [x0], #16 //AES final-5 block - load plaintext
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-6 block - mid
.long 0xce027529 //eor3 v9.16b, v9.16b, v2.16b, v29.16b //AES final-5 block - result
pmull v26.1q, v8.1d, v23.1d //GHASH final-6 block - low
eor v27.8b, v27.8b, v8.8b //GHASH final-6 block - mid
movi v16.8b, #0 //supress further partial tag feed in
pmull v27.1q, v27.1d, v24.1d //GHASH final-6 block - mid
pmull2 v28.1q, v8.2d, v23.2d //GHASH final-6 block - high
eor v19.16b, v19.16b, v26.16b //GHASH final-6 block - low
eor v18.16b, v18.16b, v27.16b //GHASH final-6 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-6 block - high
L128_enc_blocks_more_than_5: //blocks left > 5
st1 { v9.16b}, [x2], #16 //AES final-5 block - store result
rev64 v8.16b, v9.16b //GHASH final-5 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-5 block - mid
ldr q9, [x0], #16 //AES final-4 block - load plaintext
pmull2 v28.1q, v8.2d, v22.2d //GHASH final-5 block - high
eor v17.16b, v17.16b, v28.16b //GHASH final-5 block - high
eor v27.8b, v27.8b, v8.8b //GHASH final-5 block - mid
ins v27.d[1], v27.d[0] //GHASH final-5 block - mid
.long 0xce037529 //eor3 v9.16b, v9.16b, v3.16b, v29.16b //AES final-4 block - result
pmull v26.1q, v8.1d, v22.1d //GHASH final-5 block - low
movi v16.8b, #0 //supress further partial tag feed in
pmull2 v27.1q, v27.2d, v21.2d //GHASH final-5 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-5 block - low
eor v18.16b, v18.16b, v27.16b //GHASH final-5 block - mid
L128_enc_blocks_more_than_4: //blocks left > 4
st1 { v9.16b}, [x2], #16 //AES final-4 block - store result
rev64 v8.16b, v9.16b //GHASH final-4 block
ldr q9, [x0], #16 //AES final-3 block - load plaintext
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-4 block - mid
movi v16.8b, #0 //supress further partial tag feed in
pmull2 v28.1q, v8.2d, v20.2d //GHASH final-4 block - high
eor v27.8b, v27.8b, v8.8b //GHASH final-4 block - mid
pmull v26.1q, v8.1d, v20.1d //GHASH final-4 block - low
eor v17.16b, v17.16b, v28.16b //GHASH final-4 block - high
pmull v27.1q, v27.1d, v21.1d //GHASH final-4 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-4 block - low
.long 0xce047529 //eor3 v9.16b, v9.16b, v4.16b, v29.16b //AES final-3 block - result
eor v18.16b, v18.16b, v27.16b //GHASH final-4 block - mid
L128_enc_blocks_more_than_3: //blocks left > 3
st1 { v9.16b}, [x2], #16 //AES final-3 block - store result
ldr q25, [x6, #80] //load h4l | h4h
rev64 v8.16b, v9.16b //GHASH final-3 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
movi v16.8b, #0 //supress further partial tag feed in
ins v27.d[0], v8.d[1] //GHASH final-3 block - mid
ldr q24, [x6, #64] //load h4k | h3k
pmull v26.1q, v8.1d, v25.1d //GHASH final-3 block - low
ldr q9, [x0], #16 //AES final-2 block - load plaintext
eor v27.8b, v27.8b, v8.8b //GHASH final-3 block - mid
ins v27.d[1], v27.d[0] //GHASH final-3 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-3 block - low
.long 0xce057529 //eor3 v9.16b, v9.16b, v5.16b, v29.16b //AES final-2 block - result
pmull2 v27.1q, v27.2d, v24.2d //GHASH final-3 block - mid
pmull2 v28.1q, v8.2d, v25.2d //GHASH final-3 block - high
eor v18.16b, v18.16b, v27.16b //GHASH final-3 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-3 block - high
L128_enc_blocks_more_than_2: //blocks left > 2
st1 { v9.16b}, [x2], #16 //AES final-2 block - store result
rev64 v8.16b, v9.16b //GHASH final-2 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ldr q9, [x0], #16 //AES final-1 block - load plaintext
ins v27.d[0], v8.d[1] //GHASH final-2 block - mid
ldr q23, [x6, #48] //load h3l | h3h
movi v16.8b, #0 //supress further partial tag feed in
eor v27.8b, v27.8b, v8.8b //GHASH final-2 block - mid
.long 0xce067529 //eor3 v9.16b, v9.16b, v6.16b, v29.16b //AES final-1 block - result
pmull2 v28.1q, v8.2d, v23.2d //GHASH final-2 block - high
pmull v26.1q, v8.1d, v23.1d //GHASH final-2 block - low
pmull v27.1q, v27.1d, v24.1d //GHASH final-2 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-2 block - high
eor v18.16b, v18.16b, v27.16b //GHASH final-2 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-2 block - low
L128_enc_blocks_more_than_1: //blocks left > 1
st1 { v9.16b}, [x2], #16 //AES final-1 block - store result
ldr q22, [x6, #32] //load h2l | h2h
rev64 v8.16b, v9.16b //GHASH final-1 block
ldr q9, [x0], #16 //AES final block - load plaintext
eor v8.16b, v8.16b, v16.16b //feed in partial tag
movi v16.8b, #0 //supress further partial tag feed in
ins v27.d[0], v8.d[1] //GHASH final-1 block - mid
.long 0xce077529 //eor3 v9.16b, v9.16b, v7.16b, v29.16b //AES final block - result
pmull2 v28.1q, v8.2d, v22.2d //GHASH final-1 block - high
eor v27.8b, v27.8b, v8.8b //GHASH final-1 block - mid
ldr q21, [x6, #16] //load h2k | h1k
ins v27.d[1], v27.d[0] //GHASH final-1 block - mid
pmull v26.1q, v8.1d, v22.1d //GHASH final-1 block - low
pmull2 v27.1q, v27.2d, v21.2d //GHASH final-1 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-1 block - high
eor v18.16b, v18.16b, v27.16b //GHASH final-1 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-1 block - low
L128_enc_blocks_less_than_1: //blocks left <= 1
rev32 v30.16b, v30.16b
str q30, [x16] //store the updated counter
and x1, x1, #127 //bit_length %= 128
sub x1, x1, #128 //bit_length -= 128
neg x1, x1 //bit_length = 128 - #bits in input (in range [1,128])
mvn x7, xzr //temp0_x = 0xffffffffffffffff
ld1 { v26.16b}, [x2] //load existing bytes where the possibly partial last block is to be stored
and x1, x1, #127 //bit_length %= 128
lsr x7, x7, x1 //temp0_x is mask for top 64b of last block
mvn x8, xzr //temp1_x = 0xffffffffffffffff
cmp x1, #64
csel x13, x8, x7, lt
csel x14, x7, xzr, lt
mov v0.d[1], x14
mov v0.d[0], x13 //ctr0b is mask for last block
and v9.16b, v9.16b, v0.16b //possibly partial last block has zeroes in highest bits
rev64 v8.16b, v9.16b //GHASH final block
bif v9.16b, v26.16b, v0.16b //insert existing bytes in top end of result before storing
st1 { v9.16b}, [x2] //store all 16B
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v16.d[0], v8.d[1] //GHASH final block - mid
eor v16.8b, v16.8b, v8.8b //GHASH final block - mid
ldr q20, [x6] //load h1l | h1h
pmull v16.1q, v16.1d, v21.1d //GHASH final block - mid
pmull2 v28.1q, v8.2d, v20.2d //GHASH final block - high
eor v18.16b, v18.16b, v16.16b //GHASH final block - mid
ldr d16, [x10] //MODULO - load modulo constant
pmull v26.1q, v8.1d, v20.1d //GHASH final block - low
eor v17.16b, v17.16b, v28.16b //GHASH final block - high
eor v19.16b, v19.16b, v26.16b //GHASH final block - low
ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
.long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low
ext v19.16b, v19.16b, v19.16b, #8
rev64 v19.16b, v19.16b
st1 { v19.16b }, [x3]
mov x0, x9
ldp d10, d11, [sp, #16]
ldp d12, d13, [sp, #32]
ldp d14, d15, [sp, #48]
ldp d8, d9, [sp], #80
ret
L128_enc_ret:
mov w0, #0x0
ret
.globl aesv8_gcm_8x_dec_128
.def aesv8_gcm_8x_dec_128
.type 32
.endef
.align 4
aesv8_gcm_8x_dec_128:
AARCH64_VALID_CALL_TARGET
cbz x1, L128_dec_ret
stp d8, d9, [sp, #-80]!
lsr x9, x1, #3
mov x16, x4
mov x11, x5
stp d10, d11, [sp, #16]
stp d12, d13, [sp, #32]
stp d14, d15, [sp, #48]
mov x5, #0xc200000000000000
stp x5, xzr, [sp, #64]
add x10, sp, #64
mov x5, x9
ld1 { v0.16b}, [x16] //CTR block 0
ldp q26, q27, [x11, #0] //load rk0, rk1
sub x5, x5, #1 //byte_len - 1
mov x15, #0x100000000 //set up counter increment
movi v31.16b, #0x0
mov v31.d[1], x15
ld1 { v19.16b}, [x3]
ext v19.16b, v19.16b, v19.16b, #8
rev64 v19.16b, v19.16b
rev32 v30.16b, v0.16b //set up reversed counter
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 0
add v30.4s, v30.4s, v31.4s //CTR block 0
rev32 v1.16b, v30.16b //CTR block 1
add v30.4s, v30.4s, v31.4s //CTR block 1
and x5, x5, #0xffffffffffffff80 //number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
rev32 v2.16b, v30.16b //CTR block 2
add v30.4s, v30.4s, v31.4s //CTR block 2
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 0
rev32 v3.16b, v30.16b //CTR block 3
add v30.4s, v30.4s, v31.4s //CTR block 3
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 1
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 1
rev32 v4.16b, v30.16b //CTR block 4
add v30.4s, v30.4s, v31.4s //CTR block 4
rev32 v5.16b, v30.16b //CTR block 5
add v30.4s, v30.4s, v31.4s //CTR block 5
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 0
rev32 v6.16b, v30.16b //CTR block 6
add v30.4s, v30.4s, v31.4s //CTR block 6
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 0
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 0
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 0
rev32 v7.16b, v30.16b //CTR block 7
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 0
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 1
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 0
ldp q28, q26, [x11, #32] //load rk2, rk3
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 1
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 1
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 1
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 2
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 2
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 1
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 2
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 2
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 2
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 2
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 2
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 3
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 3
ldp q27, q28, [x11, #64] //load rk4, rk5
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 3
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 3
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 3
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 3
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 3
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 4
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 4
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 3
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 4
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 4
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 4
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 4
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 4
ldp q26, q27, [x11, #96] //load rk6, rk7
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 5
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 5
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 5
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 5
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 5
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 5
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 5
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 6
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 5
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 6
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 6
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 6
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 6
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 6
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 6
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 7
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 7
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 7
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 7
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 7
ldp q28, q26, [x11, #128] //load rk8, rk9
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 7
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 7
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 7
add x5, x5, x0
add v30.4s, v30.4s, v31.4s //CTR block 7
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 8
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 8
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 8
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 8
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 8
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 8
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 8
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 8
aese v0.16b, v26.16b //AES block 0 - round 9
aese v1.16b, v26.16b //AES block 1 - round 9
aese v6.16b, v26.16b //AES block 6 - round 9
ldr q27, [x11, #160] //load rk10
aese v4.16b, v26.16b //AES block 4 - round 9
aese v3.16b, v26.16b //AES block 3 - round 9
aese v2.16b, v26.16b //AES block 2 - round 9
aese v5.16b, v26.16b //AES block 5 - round 9
aese v7.16b, v26.16b //AES block 7 - round 9
add x4, x0, x1, lsr #3 //end_input_ptr
cmp x0, x5 //check if we have <= 8 blocks
b.ge L128_dec_tail //handle tail
ldp q8, q9, [x0], #32 //AES block 0, 1 - load ciphertext
.long 0xce006d00 //eor3 v0.16b, v8.16b, v0.16b, v27.16b //AES block 0 - result
.long 0xce016d21 //eor3 v1.16b, v9.16b, v1.16b, v27.16b //AES block 1 - result
stp q0, q1, [x2], #32 //AES block 0, 1 - store result
rev32 v0.16b, v30.16b //CTR block 8
add v30.4s, v30.4s, v31.4s //CTR block 8
ldp q10, q11, [x0], #32 //AES block 2, 3 - load ciphertext
ldp q12, q13, [x0], #32 //AES block 4, 5 - load ciphertext
rev32 v1.16b, v30.16b //CTR block 9
add v30.4s, v30.4s, v31.4s //CTR block 9
ldp q14, q15, [x0], #32 //AES block 6, 7 - load ciphertext
.long 0xce036d63 //eor3 v3.16b, v11.16b, v3.16b, v27.16b //AES block 3 - result
.long 0xce026d42 //eor3 v2.16b, v10.16b, v2.16b, v27.16b //AES block 2 - result
stp q2, q3, [x2], #32 //AES block 2, 3 - store result
rev32 v2.16b, v30.16b //CTR block 10
add v30.4s, v30.4s, v31.4s //CTR block 10
.long 0xce066dc6 //eor3 v6.16b, v14.16b, v6.16b, v27.16b //AES block 6 - result
rev32 v3.16b, v30.16b //CTR block 11
add v30.4s, v30.4s, v31.4s //CTR block 11
.long 0xce046d84 //eor3 v4.16b, v12.16b, v4.16b, v27.16b //AES block 4 - result
.long 0xce056da5 //eor3 v5.16b, v13.16b, v5.16b, v27.16b //AES block 5 - result
stp q4, q5, [x2], #32 //AES block 4, 5 - store result
.long 0xce076de7 //eor3 v7.16b, v15.16b, v7.16b, v27.16b //AES block 7 - result
stp q6, q7, [x2], #32 //AES block 6, 7 - store result
rev32 v4.16b, v30.16b //CTR block 12
cmp x0, x5 //check if we have <= 8 blocks
add v30.4s, v30.4s, v31.4s //CTR block 12
b.ge L128_dec_prepretail //do prepretail
L128_dec_main_loop: //main loop start
ldr q23, [x6, #144] //load h7l | h7h
ldr q25, [x6, #176] //load h8l | h8h
rev64 v9.16b, v9.16b //GHASH block 8k+1
rev64 v8.16b, v8.16b //GHASH block 8k
ext v19.16b, v19.16b, v19.16b, #8 //PRE 0
rev64 v14.16b, v14.16b //GHASH block 8k+6
ldr q20, [x6, #96] //load h5l | h5h
ldr q22, [x6, #128] //load h6l | h6h
eor v8.16b, v8.16b, v19.16b //PRE 1
rev32 v5.16b, v30.16b //CTR block 8k+13
add v30.4s, v30.4s, v31.4s //CTR block 8k+13
rev64 v10.16b, v10.16b //GHASH block 8k+2
rev64 v12.16b, v12.16b //GHASH block 8k+4
ldp q26, q27, [x11, #0] //load rk0, rk1
rev32 v6.16b, v30.16b //CTR block 8k+14
add v30.4s, v30.4s, v31.4s //CTR block 8k+14
ldr q21, [x6, #112] //load h6k | h5k
ldr q24, [x6, #160] //load h8k | h7k
pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high
pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high
rev64 v11.16b, v11.16b //GHASH block 8k+3
rev32 v7.16b, v30.16b //CTR block 8k+15
trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
rev64 v13.16b, v13.16b //GHASH block 8k+5
pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low
pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low
trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 0
pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 0
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 0
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 0
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 0
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 0
eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 0
eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 0
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 1
eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low
.long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high
ldp q28, q26, [x11, #32] //load rk2, rk3
trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 1
pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low
trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid
ldr q23, [x6, #48] //load h3l | h3h
ldr q25, [x6, #80] //load h4l | h4h
pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 1
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 1
pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 1
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 1
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 1
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 2
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 2
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 2
eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
ldr q20, [x6] //load h1l | h1h
ldr q22, [x6, #32] //load h2l | h2h
eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 2
trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 2
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 2
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 2
pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid
pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 3
rev64 v15.16b, v15.16b //GHASH block 8k+7
pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high
ldp q27, q28, [x11, #64] //load rk4, rk5
pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low
.long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
ldr q21, [x6, #16] //load h2k | h1k
ldr q24, [x6, #64] //load h4k | h3k
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 3
trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 3
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 3
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 3
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 3
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 3
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 3
pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high
pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low
pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high
pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 4
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 4
eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 4
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 4
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 4
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 4
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 4
trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
ldp q26, q27, [x11, #96] //load rk6, rk7
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 5
pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 5
eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 5
pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 5
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 5
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 5
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 5
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 5
pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high
.long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
.long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 6
.long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 6
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 6
pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 6
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 6
pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low
pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 6
add v30.4s, v30.4s, v31.4s //CTR block 8k+15
.long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 6
ldp q28, q26, [x11, #128] //load rk8, rk9
ldr d16, [x10] //MODULO - load modulo constant
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 7
rev32 v20.16b, v30.16b //CTR block 8k+16
.long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
add v30.4s, v30.4s, v31.4s //CTR block 8k+16
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 7
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 7
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 7
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 7
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 7
rev32 v22.16b, v30.16b //CTR block 8k+17
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 7
ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 7
add v30.4s, v30.4s, v31.4s //CTR block 8k+17
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 8
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 8
ldp q8, q9, [x0], #32 //AES block 8k+8, 8k+9 - load ciphertext
ldp q10, q11, [x0], #32 //AES block 8k+10, 8k+11 - load ciphertext
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 8
rev32 v23.16b, v30.16b //CTR block 8k+18
ldp q12, q13, [x0], #32 //AES block 8k+12, 8k+13 - load ciphertext
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 8
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
ldp q14, q15, [x0], #32 //AES block 8k+14, 8k+15 - load ciphertext
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 8
add v30.4s, v30.4s, v31.4s //CTR block 8k+18
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 8
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 8
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 8
aese v0.16b, v26.16b //AES block 8k+8 - round 9
aese v1.16b, v26.16b //AES block 8k+9 - round 9
ldr q27, [x11, #160] //load rk10
aese v6.16b, v26.16b //AES block 8k+14 - round 9
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
aese v2.16b, v26.16b //AES block 8k+10 - round 9
aese v7.16b, v26.16b //AES block 8k+15 - round 9
aese v4.16b, v26.16b //AES block 8k+12 - round 9
ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
rev32 v25.16b, v30.16b //CTR block 8k+19
add v30.4s, v30.4s, v31.4s //CTR block 8k+19
aese v3.16b, v26.16b //AES block 8k+11 - round 9
aese v5.16b, v26.16b //AES block 8k+13 - round 9
.long 0xce016d21 //eor3 v1.16b, v9.16b, v1.16b, v27.16b //AES block 8k+9 - result
.long 0xce006d00 //eor3 v0.16b, v8.16b, v0.16b, v27.16b //AES block 8k+8 - result
.long 0xce076de7 //eor3 v7.16b, v15.16b, v7.16b, v27.16b //AES block 8k+15 - result
.long 0xce066dc6 //eor3 v6.16b, v14.16b, v6.16b, v27.16b //AES block 8k+14 - result
.long 0xce026d42 //eor3 v2.16b, v10.16b, v2.16b, v27.16b //AES block 8k+10 - result
stp q0, q1, [x2], #32 //AES block 8k+8, 8k+9 - store result
mov v1.16b, v22.16b //CTR block 8k+17
.long 0xce046d84 //eor3 v4.16b, v12.16b, v4.16b, v27.16b //AES block 8k+12 - result
.long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low
mov v0.16b, v20.16b //CTR block 8k+16
.long 0xce036d63 //eor3 v3.16b, v11.16b, v3.16b, v27.16b //AES block 8k+11 - result
cmp x0, x5 //LOOP CONTROL
stp q2, q3, [x2], #32 //AES block 8k+10, 8k+11 - store result
.long 0xce056da5 //eor3 v5.16b, v13.16b, v5.16b, v27.16b //AES block 8k+13 - result
mov v2.16b, v23.16b //CTR block 8k+18
stp q4, q5, [x2], #32 //AES block 8k+12, 8k+13 - store result
rev32 v4.16b, v30.16b //CTR block 8k+20
add v30.4s, v30.4s, v31.4s //CTR block 8k+20
stp q6, q7, [x2], #32 //AES block 8k+14, 8k+15 - store result
mov v3.16b, v25.16b //CTR block 8k+19
b.lt L128_dec_main_loop
L128_dec_prepretail: //PREPRETAIL
rev64 v11.16b, v11.16b //GHASH block 8k+3
ext v19.16b, v19.16b, v19.16b, #8 //PRE 0
rev64 v8.16b, v8.16b //GHASH block 8k
rev64 v10.16b, v10.16b //GHASH block 8k+2
rev32 v5.16b, v30.16b //CTR block 8k+13
ldp q26, q27, [x11, #0] //load rk0, rk1
ldr q23, [x6, #144] //load h7l | h7h
ldr q25, [x6, #176] //load h8l | h8h
eor v8.16b, v8.16b, v19.16b //PRE 1
rev64 v9.16b, v9.16b //GHASH block 8k+1
add v30.4s, v30.4s, v31.4s //CTR block 8k+13
ldr q20, [x6, #96] //load h5l | h5h
ldr q22, [x6, #128] //load h6l | h6h
rev64 v13.16b, v13.16b //GHASH block 8k+5
rev64 v12.16b, v12.16b //GHASH block 8k+4
rev64 v14.16b, v14.16b //GHASH block 8k+6
ldr q21, [x6, #112] //load h6k | h5k
ldr q24, [x6, #160] //load h8k | h7k
rev32 v6.16b, v30.16b //CTR block 8k+14
add v30.4s, v30.4s, v31.4s //CTR block 8k+14
pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high
pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low
pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high
trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high
pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low
pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 0
eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 0
eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid
pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low
rev32 v7.16b, v30.16b //CTR block 8k+15
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 0
.long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high
trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 0
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 0
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 0
pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid
pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid
pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 1
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 0
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 0
eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low
eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 1
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 1
ldp q28, q26, [x11, #32] //load rk2, rk3
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low
pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid
ldr q23, [x6, #48] //load h3l | h3h
ldr q25, [x6, #80] //load h4l | h4h
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 1
pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 1
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 1
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 1
ldr q20, [x6] //load h1l | h1h
ldr q22, [x6, #32] //load h2l | h2h
.long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 2
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 2
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 2
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 2
trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 2
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 2
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 2
pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high
pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low
trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
ldp q27, q28, [x11, #64] //load rk4, rk5
rev64 v15.16b, v15.16b //GHASH block 8k+7
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 3
ldr q21, [x6, #16] //load h2k | h1k
ldr q24, [x6, #64] //load h4k | h3k
pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high
pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 3
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 3
trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high
pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low
trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 3
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 3
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 3
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 3
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 3
eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
.long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 4
eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 4
pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 4
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 4
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 4
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 4
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 4
pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid
pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high
pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid
pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid
ldp q26, q27, [x11, #96] //load rk6, rk7
.long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 5
ldr d16, [x10] //MODULO - load modulo constant
pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low
.long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 5
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 5
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 5
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 5
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 5
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 5
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 5
.long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 6
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 6
.long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 6
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 6
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 6
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 6
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 6
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 7
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
ldp q28, q26, [x11, #128] //load rk8, rk9
pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 7
ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 7
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 7
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 7
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 7
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 7
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 7
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
ldr q27, [x11, #160] //load rk10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 8
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 8
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 8
ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 8
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 8
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 8
aese v6.16b, v26.16b //AES block 8k+14 - round 9
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 8
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 8
.long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low
add v30.4s, v30.4s, v31.4s //CTR block 8k+15
aese v2.16b, v26.16b //AES block 8k+10 - round 9
aese v3.16b, v26.16b //AES block 8k+11 - round 9
aese v5.16b, v26.16b //AES block 8k+13 - round 9
aese v0.16b, v26.16b //AES block 8k+8 - round 9
aese v4.16b, v26.16b //AES block 8k+12 - round 9
aese v1.16b, v26.16b //AES block 8k+9 - round 9
aese v7.16b, v26.16b //AES block 8k+15 - round 9
L128_dec_tail: //TAIL
mov v29.16b, v27.16b
sub x5, x4, x0 //main_end_input_ptr is number of bytes left to process
cmp x5, #112
ldp q24, q25, [x6, #160] //load h8k | h7k
ldr q9, [x0], #16 //AES block 8k+8 - load ciphertext
ldp q20, q21, [x6, #96] //load h5l | h5h
ext v16.16b, v19.16b, v19.16b, #8 //prepare final partial tag
ldp q22, q23, [x6, #128] //load h6l | h6h
.long 0xce00752c //eor3 v12.16b, v9.16b, v0.16b, v29.16b //AES block 8k+8 - result
b.gt L128_dec_blocks_more_than_7
cmp x5, #96
mov v7.16b, v6.16b
movi v19.8b, #0
movi v17.8b, #0
mov v6.16b, v5.16b
mov v5.16b, v4.16b
mov v4.16b, v3.16b
mov v3.16b, v2.16b
mov v2.16b, v1.16b
movi v18.8b, #0
sub v30.4s, v30.4s, v31.4s
b.gt L128_dec_blocks_more_than_6
cmp x5, #80
sub v30.4s, v30.4s, v31.4s
mov v7.16b, v6.16b
mov v6.16b, v5.16b
mov v5.16b, v4.16b
mov v4.16b, v3.16b
mov v3.16b, v1.16b
b.gt L128_dec_blocks_more_than_5
cmp x5, #64
mov v7.16b, v6.16b
mov v6.16b, v5.16b
mov v5.16b, v4.16b
mov v4.16b, v1.16b
sub v30.4s, v30.4s, v31.4s
b.gt L128_dec_blocks_more_than_4
sub v30.4s, v30.4s, v31.4s
mov v7.16b, v6.16b
mov v6.16b, v5.16b
mov v5.16b, v1.16b
cmp x5, #48
b.gt L128_dec_blocks_more_than_3
sub v30.4s, v30.4s, v31.4s
mov v7.16b, v6.16b
cmp x5, #32
ldr q24, [x6, #64] //load h4k | h3k
mov v6.16b, v1.16b
b.gt L128_dec_blocks_more_than_2
cmp x5, #16
mov v7.16b, v1.16b
sub v30.4s, v30.4s, v31.4s
b.gt L128_dec_blocks_more_than_1
sub v30.4s, v30.4s, v31.4s
ldr q21, [x6, #16] //load h2k | h1k
b L128_dec_blocks_less_than_1
L128_dec_blocks_more_than_7: //blocks left > 7
rev64 v8.16b, v9.16b //GHASH final-7 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v18.d[0], v24.d[1] //GHASH final-7 block - mid
pmull v19.1q, v8.1d, v25.1d //GHASH final-7 block - low
ins v27.d[0], v8.d[1] //GHASH final-7 block - mid
movi v16.8b, #0 //supress further partial tag feed in
ldr q9, [x0], #16 //AES final-6 block - load ciphertext
eor v27.8b, v27.8b, v8.8b //GHASH final-7 block - mid
pmull2 v17.1q, v8.2d, v25.2d //GHASH final-7 block - high
st1 { v12.16b}, [x2], #16 //AES final-7 block - store result
.long 0xce01752c //eor3 v12.16b, v9.16b, v1.16b, v29.16b //AES final-6 block - result
pmull v18.1q, v27.1d, v18.1d //GHASH final-7 block - mid
L128_dec_blocks_more_than_6: //blocks left > 6
rev64 v8.16b, v9.16b //GHASH final-6 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-6 block - mid
eor v27.8b, v27.8b, v8.8b //GHASH final-6 block - mid
pmull v26.1q, v8.1d, v23.1d //GHASH final-6 block - low
ldr q9, [x0], #16 //AES final-5 block - load ciphertext
movi v16.8b, #0 //supress further partial tag feed in
pmull v27.1q, v27.1d, v24.1d //GHASH final-6 block - mid
st1 { v12.16b}, [x2], #16 //AES final-6 block - store result
pmull2 v28.1q, v8.2d, v23.2d //GHASH final-6 block - high
eor v19.16b, v19.16b, v26.16b //GHASH final-6 block - low
eor v17.16b, v17.16b, v28.16b //GHASH final-6 block - high
eor v18.16b, v18.16b, v27.16b //GHASH final-6 block - mid
.long 0xce02752c //eor3 v12.16b, v9.16b, v2.16b, v29.16b //AES final-5 block - result
L128_dec_blocks_more_than_5: //blocks left > 5
rev64 v8.16b, v9.16b //GHASH final-5 block
ldr q9, [x0], #16 //AES final-4 block - load ciphertext
st1 { v12.16b}, [x2], #16 //AES final-5 block - store result
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-5 block - mid
.long 0xce03752c //eor3 v12.16b, v9.16b, v3.16b, v29.16b //AES final-4 block - result
eor v27.8b, v27.8b, v8.8b //GHASH final-5 block - mid
ins v27.d[1], v27.d[0] //GHASH final-5 block - mid
pmull v26.1q, v8.1d, v22.1d //GHASH final-5 block - low
movi v16.8b, #0 //supress further partial tag feed in
pmull2 v27.1q, v27.2d, v21.2d //GHASH final-5 block - mid
pmull2 v28.1q, v8.2d, v22.2d //GHASH final-5 block - high
eor v19.16b, v19.16b, v26.16b //GHASH final-5 block - low
eor v18.16b, v18.16b, v27.16b //GHASH final-5 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-5 block - high
L128_dec_blocks_more_than_4: //blocks left > 4
rev64 v8.16b, v9.16b //GHASH final-4 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ldr q9, [x0], #16 //AES final-3 block - load ciphertext
ins v27.d[0], v8.d[1] //GHASH final-4 block - mid
movi v16.8b, #0 //supress further partial tag feed in
pmull2 v28.1q, v8.2d, v20.2d //GHASH final-4 block - high
pmull v26.1q, v8.1d, v20.1d //GHASH final-4 block - low
eor v17.16b, v17.16b, v28.16b //GHASH final-4 block - high
st1 { v12.16b}, [x2], #16 //AES final-4 block - store result
eor v27.8b, v27.8b, v8.8b //GHASH final-4 block - mid
.long 0xce04752c //eor3 v12.16b, v9.16b, v4.16b, v29.16b //AES final-3 block - result
eor v19.16b, v19.16b, v26.16b //GHASH final-4 block - low
pmull v27.1q, v27.1d, v21.1d //GHASH final-4 block - mid
eor v18.16b, v18.16b, v27.16b //GHASH final-4 block - mid
L128_dec_blocks_more_than_3: //blocks left > 3
st1 { v12.16b}, [x2], #16 //AES final-3 block - store result
rev64 v8.16b, v9.16b //GHASH final-3 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-3 block - mid
ldr q25, [x6, #80] //load h4l | h4h
ldr q24, [x6, #64] //load h4k | h3k
eor v27.8b, v27.8b, v8.8b //GHASH final-3 block - mid
ldr q9, [x0], #16 //AES final-2 block - load ciphertext
ins v27.d[1], v27.d[0] //GHASH final-3 block - mid
pmull v26.1q, v8.1d, v25.1d //GHASH final-3 block - low
pmull2 v28.1q, v8.2d, v25.2d //GHASH final-3 block - high
movi v16.8b, #0 //supress further partial tag feed in
.long 0xce05752c //eor3 v12.16b, v9.16b, v5.16b, v29.16b //AES final-2 block - result
eor v19.16b, v19.16b, v26.16b //GHASH final-3 block - low
pmull2 v27.1q, v27.2d, v24.2d //GHASH final-3 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-3 block - high
eor v18.16b, v18.16b, v27.16b //GHASH final-3 block - mid
L128_dec_blocks_more_than_2: //blocks left > 2
rev64 v8.16b, v9.16b //GHASH final-2 block
st1 { v12.16b}, [x2], #16 //AES final-2 block - store result
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ldr q23, [x6, #48] //load h3l | h3h
movi v16.8b, #0 //supress further partial tag feed in
ins v27.d[0], v8.d[1] //GHASH final-2 block - mid
eor v27.8b, v27.8b, v8.8b //GHASH final-2 block - mid
pmull v26.1q, v8.1d, v23.1d //GHASH final-2 block - low
pmull2 v28.1q, v8.2d, v23.2d //GHASH final-2 block - high
pmull v27.1q, v27.1d, v24.1d //GHASH final-2 block - mid
ldr q9, [x0], #16 //AES final-1 block - load ciphertext
eor v18.16b, v18.16b, v27.16b //GHASH final-2 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-2 block - low
.long 0xce06752c //eor3 v12.16b, v9.16b, v6.16b, v29.16b //AES final-1 block - result
eor v17.16b, v17.16b, v28.16b //GHASH final-2 block - high
L128_dec_blocks_more_than_1: //blocks left > 1
st1 { v12.16b}, [x2], #16 //AES final-1 block - store result
rev64 v8.16b, v9.16b //GHASH final-1 block
ldr q22, [x6, #32] //load h2l | h2h
eor v8.16b, v8.16b, v16.16b //feed in partial tag
movi v16.8b, #0 //supress further partial tag feed in
ins v27.d[0], v8.d[1] //GHASH final-1 block - mid
ldr q9, [x0], #16 //AES final block - load ciphertext
pmull2 v28.1q, v8.2d, v22.2d //GHASH final-1 block - high
eor v27.8b, v27.8b, v8.8b //GHASH final-1 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-1 block - high
ldr q21, [x6, #16] //load h2k | h1k
ins v27.d[1], v27.d[0] //GHASH final-1 block - mid
.long 0xce07752c //eor3 v12.16b, v9.16b, v7.16b, v29.16b //AES final block - result
pmull v26.1q, v8.1d, v22.1d //GHASH final-1 block - low
pmull2 v27.1q, v27.2d, v21.2d //GHASH final-1 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-1 block - low
eor v18.16b, v18.16b, v27.16b //GHASH final-1 block - mid
L128_dec_blocks_less_than_1: //blocks left <= 1
and x1, x1, #127 //bit_length %= 128
sub x1, x1, #128 //bit_length -= 128
neg x1, x1 //bit_length = 128 - #bits in input (in range [1,128])
mvn x7, xzr //temp0_x = 0xffffffffffffffff
and x1, x1, #127 //bit_length %= 128
lsr x7, x7, x1 //temp0_x is mask for top 64b of last block
cmp x1, #64
mvn x8, xzr //temp1_x = 0xffffffffffffffff
csel x13, x8, x7, lt
csel x14, x7, xzr, lt
mov v0.d[1], x14
mov v0.d[0], x13 //ctr0b is mask for last block
ldr q20, [x6] //load h1l | h1h
ld1 { v26.16b}, [x2] //load existing bytes where the possibly partial last block is to be stored
and v9.16b, v9.16b, v0.16b //possibly partial last block has zeroes in highest bits
rev64 v8.16b, v9.16b //GHASH final block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
pmull2 v28.1q, v8.2d, v20.2d //GHASH final block - high
ins v16.d[0], v8.d[1] //GHASH final block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final block - high
eor v16.8b, v16.8b, v8.8b //GHASH final block - mid
bif v12.16b, v26.16b, v0.16b //insert existing bytes in top end of result before storing
pmull v16.1q, v16.1d, v21.1d //GHASH final block - mid
st1 { v12.16b}, [x2] //store all 16B
pmull v26.1q, v8.1d, v20.1d //GHASH final block - low
eor v18.16b, v18.16b, v16.16b //GHASH final block - mid
ldr d16, [x10] //MODULO - load modulo constant
eor v19.16b, v19.16b, v26.16b //GHASH final block - low
eor v14.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
pmull v21.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
ext v17.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
eor v18.16b, v18.16b, v14.16b //MODULO - karatsuba tidy up
.long 0xce115652 //eor3 v18.16b, v18.16b, v17.16b, v21.16b //MODULO - fold into mid
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
ext v18.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
.long 0xce124673 //eor3 v19.16b, v19.16b, v18.16b, v17.16b //MODULO - fold into low
ext v19.16b, v19.16b, v19.16b, #8
rev64 v19.16b, v19.16b
st1 { v19.16b }, [x3]
rev32 v30.16b, v30.16b
str q30, [x16] //store the updated counter
mov x0, x9
ldp d10, d11, [sp, #16]
ldp d12, d13, [sp, #32]
ldp d14, d15, [sp, #48]
ldp d8, d9, [sp], #80
ret
L128_dec_ret:
mov w0, #0x0
ret
.globl aesv8_gcm_8x_enc_192
.def aesv8_gcm_8x_enc_192
.type 32
.endef
.align 4
aesv8_gcm_8x_enc_192:
AARCH64_VALID_CALL_TARGET
cbz x1, L192_enc_ret
stp d8, d9, [sp, #-80]!
lsr x9, x1, #3
mov x16, x4
mov x11, x5
stp d10, d11, [sp, #16]
stp d12, d13, [sp, #32]
stp d14, d15, [sp, #48]
mov x5, #0xc200000000000000
stp x5, xzr, [sp, #64]
add x10, sp, #64
mov x5, x9
ld1 { v0.16b}, [x16] //CTR block 0
mov x15, #0x100000000 //set up counter increment
movi v31.16b, #0x0
mov v31.d[1], x15
rev32 v30.16b, v0.16b //set up reversed counter
add v30.4s, v30.4s, v31.4s //CTR block 0
rev32 v1.16b, v30.16b //CTR block 1
add v30.4s, v30.4s, v31.4s //CTR block 1
rev32 v2.16b, v30.16b //CTR block 2
add v30.4s, v30.4s, v31.4s //CTR block 2
rev32 v3.16b, v30.16b //CTR block 3
add v30.4s, v30.4s, v31.4s //CTR block 3
rev32 v4.16b, v30.16b //CTR block 4
add v30.4s, v30.4s, v31.4s //CTR block 4
sub x5, x5, #1 //byte_len - 1
and x5, x5, #0xffffffffffffff80 //number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
rev32 v5.16b, v30.16b //CTR block 5
add v30.4s, v30.4s, v31.4s //CTR block 5
ldp q26, q27, [x11, #0] //load rk0, rk1
add x5, x5, x0
rev32 v6.16b, v30.16b //CTR block 6
add v30.4s, v30.4s, v31.4s //CTR block 6
rev32 v7.16b, v30.16b //CTR block 7
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 0
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 0
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 0
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 0
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 0
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 0
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 0
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 0
ldp q28, q26, [x11, #32] //load rk2, rk3
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 1
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 1
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 1
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 1
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 1
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 2
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 1
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 1
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 1
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 2
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 2
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 2
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 2
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 2
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 2
ldp q27, q28, [x11, #64] //load rk4, rk5
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 3
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 3
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 3
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 3
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 3
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 3
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 3
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 4
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 4
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 3
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 4
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 4
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 4
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 4
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 4
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 5
ldp q26, q27, [x11, #96] //load rk6, rk7
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 5
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 5
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 5
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 5
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 5
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 5
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 5
add v30.4s, v30.4s, v31.4s //CTR block 7
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 6
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 6
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 6
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 6
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 6
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 6
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 6
ldp q28, q26, [x11, #128] //load rk8, rk9
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 7
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 7
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 7
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 7
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 7
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 7
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 7
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 7
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 8
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 8
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 8
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 8
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 8
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 8
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 8
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 8
add x4, x0, x1, lsr #3 //end_input_ptr
cmp x0, x5 //check if we have <= 8 blocks
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 9
ld1 { v19.16b}, [x3]
ext v19.16b, v19.16b, v19.16b, #8
rev64 v19.16b, v19.16b
ldp q27, q28, [x11, #160] //load rk10, rk11
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 9
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 9
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 9
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 9
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 9
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 9
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 14 - round 10
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 11 - round 10
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 9 - round 10
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 13 - round 10
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 12 - round 10
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8 - round 10
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 10 - round 10
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 15 - round 10
aese v6.16b, v28.16b //AES block 14 - round 11
aese v3.16b, v28.16b //AES block 11 - round 11
aese v4.16b, v28.16b //AES block 12 - round 11
aese v7.16b, v28.16b //AES block 15 - round 11
ldr q26, [x11, #192] //load rk12
aese v1.16b, v28.16b //AES block 9 - round 11
aese v5.16b, v28.16b //AES block 13 - round 11
aese v2.16b, v28.16b //AES block 10 - round 11
aese v0.16b, v28.16b //AES block 8 - round 11
b.ge L192_enc_tail //handle tail
ldp q8, q9, [x0], #32 //AES block 0, 1 - load plaintext
ldp q10, q11, [x0], #32 //AES block 2, 3 - load plaintext
ldp q12, q13, [x0], #32 //AES block 4, 5 - load plaintext
ldp q14, q15, [x0], #32 //AES block 6, 7 - load plaintext
.long 0xce006908 //eor3 v8.16b, v8.16b, v0.16b, v26.16b //AES block 0 - result
rev32 v0.16b, v30.16b //CTR block 8
add v30.4s, v30.4s, v31.4s //CTR block 8
.long 0xce03696b //eor3 v11.16b, v11.16b, v3.16b, v26.16b //AES block 3 - result
.long 0xce016929 //eor3 v9.16b, v9.16b, v1.16b, v26.16b //AES block 1 - result
rev32 v1.16b, v30.16b //CTR block 9
add v30.4s, v30.4s, v31.4s //CTR block 9
.long 0xce04698c //eor3 v12.16b, v12.16b, v4.16b, v26.16b //AES block 4 - result
.long 0xce0569ad //eor3 v13.16b, v13.16b, v5.16b, v26.16b //AES block 5 - result
.long 0xce0769ef //eor3 v15.16b, v15.16b, v7.16b, v26.16b //AES block 7 - result
stp q8, q9, [x2], #32 //AES block 0, 1 - store result
.long 0xce02694a //eor3 v10.16b, v10.16b, v2.16b, v26.16b //AES block 2 - result
rev32 v2.16b, v30.16b //CTR block 10
add v30.4s, v30.4s, v31.4s //CTR block 10
stp q10, q11, [x2], #32 //AES block 2, 3 - store result
cmp x0, x5 //check if we have <= 8 blocks
rev32 v3.16b, v30.16b //CTR block 11
add v30.4s, v30.4s, v31.4s //CTR block 11
.long 0xce0669ce //eor3 v14.16b, v14.16b, v6.16b, v26.16b //AES block 6 - result
stp q12, q13, [x2], #32 //AES block 4, 5 - store result
rev32 v4.16b, v30.16b //CTR block 12
stp q14, q15, [x2], #32 //AES block 6, 7 - store result
add v30.4s, v30.4s, v31.4s //CTR block 12
b.ge L192_enc_prepretail //do prepretail
L192_enc_main_loop: //main loop start
rev64 v12.16b, v12.16b //GHASH block 8k+4 (t0, t1, and t2 free)
ldp q26, q27, [x11, #0] //load rk0, rk1
rev64 v10.16b, v10.16b //GHASH block 8k+2
rev32 v5.16b, v30.16b //CTR block 8k+13
add v30.4s, v30.4s, v31.4s //CTR block 8k+13
ldr q23, [x6, #144] //load h7l | h7h
ldr q25, [x6, #176] //load h8l | h8h
ext v19.16b, v19.16b, v19.16b, #8 //PRE 0
rev64 v8.16b, v8.16b //GHASH block 8k
ldr q20, [x6, #96] //load h5l | h5h
ldr q22, [x6, #128] //load h6l | h6h
rev64 v9.16b, v9.16b //GHASH block 8k+1
rev32 v6.16b, v30.16b //CTR block 8k+14
add v30.4s, v30.4s, v31.4s //CTR block 8k+14
eor v8.16b, v8.16b, v19.16b //PRE 1
rev64 v11.16b, v11.16b //GHASH block 8k+3
rev64 v13.16b, v13.16b //GHASH block 8k+5 (t0, t1, t2 and t3 free)
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 0
rev32 v7.16b, v30.16b //CTR block 8k+15
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 0
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 0
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 0
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 0
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 0
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 0
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 0
ldp q28, q26, [x11, #32] //load rk2, rk3
pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 1
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 1
pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high
pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low
trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 1
ldr q21, [x6, #112] //load h6k | h5k
ldr q24, [x6, #160] //load h8k | h7k
pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high
pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low
trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 1
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 1
eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 1
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 1
pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high
eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 2
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 2
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 2
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 2
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 3
.long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high
pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 2
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 3
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 2
trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 2
trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 3
ldp q27, q28, [x11, #64] //load rk4, rk5
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 3
eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low
ldr q23, [x6, #48] //load h3l | h3h
ldr q25, [x6, #80] //load h4l | h4h
pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid
pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid
pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 3
eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 3
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 4
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 4
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 3
pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 4
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 3
pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 4
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 4
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 4
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 4
.long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 5
ldr q20, [x6] //load h1l | h1h
ldr q22, [x6, #32] //load h2l | h2h
ldp q26, q27, [x11, #96] //load rk6, rk7
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 5
rev64 v15.16b, v15.16b //GHASH block 8k+7 (t0, t1, t2 and t3 free)
rev64 v14.16b, v14.16b //GHASH block 8k+6 (t0, t1, and t2 free)
pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high
pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 5
trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 5
ldr q21, [x6, #16] //load h2k | h1k
ldr q24, [x6, #64] //load h4k | h3k
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 5
pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high
eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 5
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 5
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 5
pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 6
trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 6
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 6
pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high
pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low
trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 6
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 6
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 6
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 6
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 7
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 6
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 7
eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid
ldp q28, q26, [x11, #128] //load rk8, rk9
pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 7
pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 7
.long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 7
add v30.4s, v30.4s, v31.4s //CTR block 8k+15
ldr d16, [x10] //MODULO - load modulo constant
.long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 7
pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid
pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 7
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 8
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 8
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 8
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 8
.long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 7
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 8
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 8
pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 8
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 8
ldp q27, q28, [x11, #160] //load rk10, rk11
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low
rev32 v20.16b, v30.16b //CTR block 8k+16
add v30.4s, v30.4s, v31.4s //CTR block 8k+16
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 9
.long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
.long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 9
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 9
ldp q8, q9, [x0], #32 //AES block 8k+8, 8k+9 - load plaintext
pmull v21.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
rev32 v22.16b, v30.16b //CTR block 8k+17
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 9
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 9
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 9
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 9
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 9
add v30.4s, v30.4s, v31.4s //CTR block 8k+17
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 10
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 10
ldr q26, [x11, #192] //load rk12
ext v29.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 10
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 10
ldp q10, q11, [x0], #32 //AES block 8k+10, 8k+11 - load plaintext
aese v4.16b, v28.16b //AES block 8k+12 - round 11
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
ldp q12, q13, [x0], #32 //AES block 8k+12, 8k+13 - load plaintext
ldp q14, q15, [x0], #32 //AES block 8k+14, 8k+15 - load plaintext
aese v2.16b, v28.16b //AES block 8k+10 - round 11
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 10
rev32 v23.16b, v30.16b //CTR block 8k+18
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 10
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 10
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 10
aese v5.16b, v28.16b //AES block 8k+13 - round 11
add v30.4s, v30.4s, v31.4s //CTR block 8k+18
aese v7.16b, v28.16b //AES block 8k+15 - round 11
aese v0.16b, v28.16b //AES block 8k+8 - round 11
.long 0xce04698c //eor3 v12.16b, v12.16b, v4.16b, v26.16b //AES block 4 - result
aese v6.16b, v28.16b //AES block 8k+14 - round 11
aese v3.16b, v28.16b //AES block 8k+11 - round 11
aese v1.16b, v28.16b //AES block 8k+9 - round 11
rev32 v25.16b, v30.16b //CTR block 8k+19
add v30.4s, v30.4s, v31.4s //CTR block 8k+19
.long 0xce0769ef //eor3 v15.16b, v15.16b, v7.16b, v26.16b //AES block 7 - result
.long 0xce02694a //eor3 v10.16b, v10.16b, v2.16b, v26.16b //AES block 8k+10 - result
.long 0xce006908 //eor3 v8.16b, v8.16b, v0.16b, v26.16b //AES block 8k+8 - result
mov v2.16b, v23.16b //CTR block 8k+18
.long 0xce016929 //eor3 v9.16b, v9.16b, v1.16b, v26.16b //AES block 8k+9 - result
mov v1.16b, v22.16b //CTR block 8k+17
stp q8, q9, [x2], #32 //AES block 8k+8, 8k+9 - store result
ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
.long 0xce0669ce //eor3 v14.16b, v14.16b, v6.16b, v26.16b //AES block 6 - result
mov v0.16b, v20.16b //CTR block 8k+16
rev32 v4.16b, v30.16b //CTR block 8k+20
add v30.4s, v30.4s, v31.4s //CTR block 8k+20
.long 0xce0569ad //eor3 v13.16b, v13.16b, v5.16b, v26.16b //AES block 5 - result
.long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low
.long 0xce03696b //eor3 v11.16b, v11.16b, v3.16b, v26.16b //AES block 8k+11 - result
mov v3.16b, v25.16b //CTR block 8k+19
stp q10, q11, [x2], #32 //AES block 8k+10, 8k+11 - store result
stp q12, q13, [x2], #32 //AES block 8k+12, 8k+13 - store result
cmp x0, x5 //LOOP CONTROL
stp q14, q15, [x2], #32 //AES block 8k+14, 8k+15 - store result
b.lt L192_enc_main_loop
L192_enc_prepretail: //PREPRETAIL
rev32 v5.16b, v30.16b //CTR block 8k+13
ldp q26, q27, [x11, #0] //load rk0, rk1
add v30.4s, v30.4s, v31.4s //CTR block 8k+13
ldr q23, [x6, #144] //load h7l | h7h
ldr q25, [x6, #176] //load h8l | h8h
rev64 v8.16b, v8.16b //GHASH block 8k
ext v19.16b, v19.16b, v19.16b, #8 //PRE 0
rev32 v6.16b, v30.16b //CTR block 8k+14
add v30.4s, v30.4s, v31.4s //CTR block 8k+14
ldr q21, [x6, #112] //load h6k | h5k
ldr q24, [x6, #160] //load h8k | h7k
rev64 v11.16b, v11.16b //GHASH block 8k+3
rev64 v10.16b, v10.16b //GHASH block 8k+2
ldr q20, [x6, #96] //load h5l | h5h
ldr q22, [x6, #128] //load h6l | h6h
eor v8.16b, v8.16b, v19.16b //PRE 1
rev32 v7.16b, v30.16b //CTR block 8k+15
rev64 v9.16b, v9.16b //GHASH block 8k+1
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 0
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 0
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 0
pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 0
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 0
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 0
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 0
pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 1
pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low
trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 0
ldp q28, q26, [x11, #32] //load rk2, rk3
pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low
eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 1
eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 1
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 1
pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high
pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 1
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 1
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 1
pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 2
eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low
pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 2
.long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 3
trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 2
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 2
pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid
trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 2
rev64 v13.16b, v13.16b //GHASH block 8k+5 (t0, t1, t2 and t3 free)
rev64 v14.16b, v14.16b //GHASH block 8k+6 (t0, t1, and t2 free)
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 2
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 2
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 2
eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid
ldp q27, q28, [x11, #64] //load rk4, rk5
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 3
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 3
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 3
eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 3
ldr q23, [x6, #48] //load h3l | h3h
ldr q25, [x6, #80] //load h4l | h4h
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 3
pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid
ldr q20, [x6] //load h1l | h1h
ldr q22, [x6, #32] //load h2l | h2h
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 3
rev64 v12.16b, v12.16b //GHASH block 8k+4 (t0, t1, and t2 free)
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 3
pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 4
trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 4
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 4
.long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 4
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 4
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 4
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 4
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 5
rev64 v15.16b, v15.16b //GHASH block 8k+7 (t0, t1, t2 and t3 free)
ldr q21, [x6, #16] //load h2k | h1k
ldr q24, [x6, #64] //load h4k | h3k
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 5
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 5
ldp q26, q27, [x11, #96] //load rk6, rk7
pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high
pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high
pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 5
trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high
pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low
pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low
trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 5
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 6
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 5
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 5
eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 5
pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid
pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 6
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 6
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 7
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 6
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 6
.long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 6
.long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 7
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 6
ldr d16, [x10] //MODULO - load modulo constant
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 6
pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 7
.long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low
pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high
pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid
pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 7
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 7
ldp q28, q26, [x11, #128] //load rk8, rk9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 7
.long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low
.long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
ext v29.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 7
pmull v21.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 8
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 8
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 7
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 8
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 8
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 9
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 8
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 8
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 8
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 8
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 9
ldp q27, q28, [x11, #160] //load rk10, rk11
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 9
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 9
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 9
ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 9
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 9
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 9
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
ldr q26, [x11, #192] //load rk12
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 10
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 10
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 10
.long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 10
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 10
aese v1.16b, v28.16b //AES block 8k+9 - round 11
aese v7.16b, v28.16b //AES block 8k+15 - round 11
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 10
aese v3.16b, v28.16b //AES block 8k+11 - round 11
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 10
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 10
add v30.4s, v30.4s, v31.4s //CTR block 8k+15
aese v2.16b, v28.16b //AES block 8k+10 - round 11
aese v0.16b, v28.16b //AES block 8k+8 - round 11
aese v6.16b, v28.16b //AES block 8k+14 - round 11
aese v4.16b, v28.16b //AES block 8k+12 - round 11
aese v5.16b, v28.16b //AES block 8k+13 - round 11
L192_enc_tail: //TAIL
ldp q20, q21, [x6, #96] //load h5l | h5h
sub x5, x4, x0 //main_end_input_ptr is number of bytes left to process
ldr q8, [x0], #16 //AES block 8k+8 - l3ad plaintext
ldp q24, q25, [x6, #160] //load h8k | h7k
mov v29.16b, v26.16b
ldp q22, q23, [x6, #128] //load h6l | h6h
cmp x5, #112
.long 0xce007509 //eor3 v9.16b, v8.16b, v0.16b, v29.16b //AES block 8k+8 - result
ext v16.16b, v19.16b, v19.16b, #8 //prepare final partial tag
b.gt L192_enc_blocks_more_than_7
cmp x5, #96
mov v7.16b, v6.16b
movi v17.8b, #0
mov v6.16b, v5.16b
movi v19.8b, #0
sub v30.4s, v30.4s, v31.4s
mov v5.16b, v4.16b
mov v4.16b, v3.16b
mov v3.16b, v2.16b
mov v2.16b, v1.16b
movi v18.8b, #0
b.gt L192_enc_blocks_more_than_6
mov v7.16b, v6.16b
cmp x5, #80
mov v6.16b, v5.16b
mov v5.16b, v4.16b
mov v4.16b, v3.16b
mov v3.16b, v1.16b
sub v30.4s, v30.4s, v31.4s
b.gt L192_enc_blocks_more_than_5
cmp x5, #64
sub v30.4s, v30.4s, v31.4s
mov v7.16b, v6.16b
mov v6.16b, v5.16b
mov v5.16b, v4.16b
mov v4.16b, v1.16b
b.gt L192_enc_blocks_more_than_4
mov v7.16b, v6.16b
mov v6.16b, v5.16b
mov v5.16b, v1.16b
sub v30.4s, v30.4s, v31.4s
cmp x5, #48
b.gt L192_enc_blocks_more_than_3
mov v7.16b, v6.16b
mov v6.16b, v1.16b
sub v30.4s, v30.4s, v31.4s
ldr q24, [x6, #64] //load h4k | h3k
cmp x5, #32
b.gt L192_enc_blocks_more_than_2
sub v30.4s, v30.4s, v31.4s
cmp x5, #16
mov v7.16b, v1.16b
b.gt L192_enc_blocks_more_than_1
sub v30.4s, v30.4s, v31.4s
ldr q21, [x6, #16] //load h2k | h1k
b L192_enc_blocks_less_than_1
L192_enc_blocks_more_than_7: //blocks left > 7
st1 { v9.16b}, [x2], #16 //AES final-7 block - store result
rev64 v8.16b, v9.16b //GHASH final-7 block
ins v18.d[0], v24.d[1] //GHASH final-7 block - mid
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-7 block - mid
ldr q9, [x0], #16 //AES final-6 block - load plaintext
eor v27.8b, v27.8b, v8.8b //GHASH final-7 block - mid
movi v16.8b, #0 //supress further partial tag feed in
pmull v19.1q, v8.1d, v25.1d //GHASH final-7 block - low
pmull2 v17.1q, v8.2d, v25.2d //GHASH final-7 block - high
pmull v18.1q, v27.1d, v18.1d //GHASH final-7 block - mid
.long 0xce017529 //eor3 v9.16b, v9.16b, v1.16b, v29.16b //AES final-6 block - result
L192_enc_blocks_more_than_6: //blocks left > 6
st1 { v9.16b}, [x2], #16 //AES final-6 block - store result
rev64 v8.16b, v9.16b //GHASH final-6 block
ldr q9, [x0], #16 //AES final-5 block - load plaintext
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-6 block - mid
pmull v26.1q, v8.1d, v23.1d //GHASH final-6 block - low
.long 0xce027529 //eor3 v9.16b, v9.16b, v2.16b, v29.16b //AES final-5 block - result
movi v16.8b, #0 //supress further partial tag feed in
pmull2 v28.1q, v8.2d, v23.2d //GHASH final-6 block - high
eor v27.8b, v27.8b, v8.8b //GHASH final-6 block - mid
pmull v27.1q, v27.1d, v24.1d //GHASH final-6 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-6 block - high
eor v19.16b, v19.16b, v26.16b //GHASH final-6 block - low
eor v18.16b, v18.16b, v27.16b //GHASH final-6 block - mid
L192_enc_blocks_more_than_5: //blocks left > 5
st1 { v9.16b}, [x2], #16 //AES final-5 block - store result
rev64 v8.16b, v9.16b //GHASH final-5 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-5 block - mid
ldr q9, [x0], #16 //AES final-4 block - load plaintext
pmull2 v28.1q, v8.2d, v22.2d //GHASH final-5 block - high
eor v27.8b, v27.8b, v8.8b //GHASH final-5 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-5 block - high
ins v27.d[1], v27.d[0] //GHASH final-5 block - mid
pmull v26.1q, v8.1d, v22.1d //GHASH final-5 block - low
eor v19.16b, v19.16b, v26.16b //GHASH final-5 block - low
pmull2 v27.1q, v27.2d, v21.2d //GHASH final-5 block - mid
.long 0xce037529 //eor3 v9.16b, v9.16b, v3.16b, v29.16b //AES final-4 block - result
movi v16.8b, #0 //supress further partial tag feed in
eor v18.16b, v18.16b, v27.16b //GHASH final-5 block - mid
L192_enc_blocks_more_than_4: //blocks left > 4
st1 { v9.16b}, [x2], #16 //AES final-4 block - store result
rev64 v8.16b, v9.16b //GHASH final-4 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ldr q9, [x0], #16 //AES final-3 block - load plaintext
pmull2 v28.1q, v8.2d, v20.2d //GHASH final-4 block - high
ins v27.d[0], v8.d[1] //GHASH final-4 block - mid
pmull v26.1q, v8.1d, v20.1d //GHASH final-4 block - low
eor v17.16b, v17.16b, v28.16b //GHASH final-4 block - high
eor v27.8b, v27.8b, v8.8b //GHASH final-4 block - mid
movi v16.8b, #0 //supress further partial tag feed in
eor v19.16b, v19.16b, v26.16b //GHASH final-4 block - low
pmull v27.1q, v27.1d, v21.1d //GHASH final-4 block - mid
eor v18.16b, v18.16b, v27.16b //GHASH final-4 block - mid
.long 0xce047529 //eor3 v9.16b, v9.16b, v4.16b, v29.16b //AES final-3 block - result
L192_enc_blocks_more_than_3: //blocks left > 3
ldr q24, [x6, #64] //load h4k | h3k
st1 { v9.16b}, [x2], #16 //AES final-3 block - store result
rev64 v8.16b, v9.16b //GHASH final-3 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
movi v16.8b, #0 //supress further partial tag feed in
ldr q9, [x0], #16 //AES final-2 block - load plaintext
ldr q25, [x6, #80] //load h4l | h4h
ins v27.d[0], v8.d[1] //GHASH final-3 block - mid
.long 0xce057529 //eor3 v9.16b, v9.16b, v5.16b, v29.16b //AES final-2 block - result
eor v27.8b, v27.8b, v8.8b //GHASH final-3 block - mid
ins v27.d[1], v27.d[0] //GHASH final-3 block - mid
pmull v26.1q, v8.1d, v25.1d //GHASH final-3 block - low
pmull2 v28.1q, v8.2d, v25.2d //GHASH final-3 block - high
pmull2 v27.1q, v27.2d, v24.2d //GHASH final-3 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-3 block - low
eor v18.16b, v18.16b, v27.16b //GHASH final-3 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-3 block - high
L192_enc_blocks_more_than_2: //blocks left > 2
st1 { v9.16b}, [x2], #16 //AES final-2 block - store result
rev64 v8.16b, v9.16b //GHASH final-2 block
ldr q23, [x6, #48] //load h3l | h3h
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ldr q9, [x0], #16 //AES final-1 block - load plaintext
ins v27.d[0], v8.d[1] //GHASH final-2 block - mid
eor v27.8b, v27.8b, v8.8b //GHASH final-2 block - mid
pmull v26.1q, v8.1d, v23.1d //GHASH final-2 block - low
pmull2 v28.1q, v8.2d, v23.2d //GHASH final-2 block - high
movi v16.8b, #0 //supress further partial tag feed in
pmull v27.1q, v27.1d, v24.1d //GHASH final-2 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-2 block - low
eor v17.16b, v17.16b, v28.16b //GHASH final-2 block - high
eor v18.16b, v18.16b, v27.16b //GHASH final-2 block - mid
.long 0xce067529 //eor3 v9.16b, v9.16b, v6.16b, v29.16b //AES final-1 block - result
L192_enc_blocks_more_than_1: //blocks left > 1
ldr q22, [x6, #32] //load h1l | h1h
st1 { v9.16b}, [x2], #16 //AES final-1 block - store result
rev64 v8.16b, v9.16b //GHASH final-1 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-1 block - mid
pmull v26.1q, v8.1d, v22.1d //GHASH final-1 block - low
eor v19.16b, v19.16b, v26.16b //GHASH final-1 block - low
pmull2 v28.1q, v8.2d, v22.2d //GHASH final-1 block - high
eor v27.8b, v27.8b, v8.8b //GHASH final-1 block - mid
ldr q9, [x0], #16 //AES final block - load plaintext
ldr q21, [x6, #16] //load h2k | h1k
ins v27.d[1], v27.d[0] //GHASH final-1 block - mid
.long 0xce077529 //eor3 v9.16b, v9.16b, v7.16b, v29.16b //AES final block - result
pmull2 v27.1q, v27.2d, v21.2d //GHASH final-1 block - mid
movi v16.8b, #0 //supress further partial tag feed in
eor v18.16b, v18.16b, v27.16b //GHASH final-1 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-1 block - high
L192_enc_blocks_less_than_1: //blocks left <= 1
mvn x7, xzr //temp0_x = 0xffffffffffffffff
and x1, x1, #127 //bit_length %= 128
sub x1, x1, #128 //bit_length -= 128
neg x1, x1 //bit_length = 128 - #bits in input (in range [1,128])
and x1, x1, #127 //bit_length %= 128
lsr x7, x7, x1 //temp0_x is mask for top 64b of last block
cmp x1, #64
mvn x8, xzr //temp1_x = 0xffffffffffffffff
csel x13, x8, x7, lt
csel x14, x7, xzr, lt
mov v0.d[1], x14
ldr q20, [x6] //load h1l | h1h
ld1 { v26.16b}, [x2] //load existing bytes where the possibly partial last block is to be stored
mov v0.d[0], x13 //ctr0b is mask for last block
and v9.16b, v9.16b, v0.16b //possibly partial last block has zeroes in highest bits
rev64 v8.16b, v9.16b //GHASH final block
bif v9.16b, v26.16b, v0.16b //insert existing bytes in top end of result before storing
st1 { v9.16b}, [x2] //store all 16B
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v16.d[0], v8.d[1] //GHASH final block - mid
pmull2 v28.1q, v8.2d, v20.2d //GHASH final block - high
eor v17.16b, v17.16b, v28.16b //GHASH final block - high
pmull v26.1q, v8.1d, v20.1d //GHASH final block - low
eor v16.8b, v16.8b, v8.8b //GHASH final block - mid
pmull v16.1q, v16.1d, v21.1d //GHASH final block - mid
eor v18.16b, v18.16b, v16.16b //GHASH final block - mid
ldr d16, [x10] //MODULO - load modulo constant
eor v19.16b, v19.16b, v26.16b //GHASH final block - low
ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
rev32 v30.16b, v30.16b
str q30, [x16] //store the updated counter
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
.long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low
ext v19.16b, v19.16b, v19.16b, #8
rev64 v19.16b, v19.16b
st1 { v19.16b }, [x3]
mov x0, x9 //return sizes
ldp d10, d11, [sp, #16]
ldp d12, d13, [sp, #32]
ldp d14, d15, [sp, #48]
ldp d8, d9, [sp], #80
ret
L192_enc_ret:
mov w0, #0x0
ret
.globl aesv8_gcm_8x_dec_192
.def aesv8_gcm_8x_dec_192
.type 32
.endef
.align 4
aesv8_gcm_8x_dec_192:
AARCH64_VALID_CALL_TARGET
cbz x1, L192_dec_ret
stp d8, d9, [sp, #-80]!
lsr x9, x1, #3
mov x16, x4
mov x11, x5
stp d10, d11, [sp, #16]
stp d12, d13, [sp, #32]
stp d14, d15, [sp, #48]
mov x5, #0xc200000000000000
stp x5, xzr, [sp, #64]
add x10, sp, #64
mov x5, x9
ld1 { v0.16b}, [x16] //CTR block 0
ld1 { v19.16b}, [x3]
mov x15, #0x100000000 //set up counter increment
movi v31.16b, #0x0
mov v31.d[1], x15
rev32 v30.16b, v0.16b //set up reversed counter
add v30.4s, v30.4s, v31.4s //CTR block 0
rev32 v1.16b, v30.16b //CTR block 1
add v30.4s, v30.4s, v31.4s //CTR block 1
rev32 v2.16b, v30.16b //CTR block 2
add v30.4s, v30.4s, v31.4s //CTR block 2
rev32 v3.16b, v30.16b //CTR block 3
add v30.4s, v30.4s, v31.4s //CTR block 3
rev32 v4.16b, v30.16b //CTR block 4
add v30.4s, v30.4s, v31.4s //CTR block 4
rev32 v5.16b, v30.16b //CTR block 5
add v30.4s, v30.4s, v31.4s //CTR block 5
ldp q26, q27, [x11, #0] //load rk0, rk1
rev32 v6.16b, v30.16b //CTR block 6
add v30.4s, v30.4s, v31.4s //CTR block 6
rev32 v7.16b, v30.16b //CTR block 7
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 0
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 0
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 0
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 0
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 0
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 0
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 0
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 0
ldp q28, q26, [x11, #32] //load rk2, rk3
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 1
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 1
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 1
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 1
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 1
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 1
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 2
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 2
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 1
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 2
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 2
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 2
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 2
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 2
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 3
ldp q27, q28, [x11, #64] //load rk4, rk5
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 3
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 3
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 3
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 3
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 3
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 3
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 3
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 4
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 4
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 4
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 4
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 4
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 4
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 5
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 4
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 5
ldp q26, q27, [x11, #96] //load rk6, rk7
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 5
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 5
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 5
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 5
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 5
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 5
sub x5, x5, #1 //byte_len - 1
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 6
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 6
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 6
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 6
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 6
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 6
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 6
ldp q28, q26, [x11, #128] //load rk8, rk9
add v30.4s, v30.4s, v31.4s //CTR block 7
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 7
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 7
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 7
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 7
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 7
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 7
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 7
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 7
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 8
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 8
and x5, x5, #0xffffffffffffff80 //number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 8
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 8
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 8
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 8
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 8
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 8
add x4, x0, x1, lsr #3 //end_input_ptr
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 9
ld1 { v19.16b}, [x3]
ext v19.16b, v19.16b, v19.16b, #8
rev64 v19.16b, v19.16b
ldp q27, q28, [x11, #160] //load rk10, rk11
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 9
add x5, x5, x0
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 9
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 9
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 9
cmp x0, x5 //check if we have <= 8 blocks
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 9
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 9
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 10
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 10
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 10
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 10
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 10
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 10
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 10
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 10
ldr q26, [x11, #192] //load rk12
aese v0.16b, v28.16b //AES block 0 - round 11
aese v1.16b, v28.16b //AES block 1 - round 11
aese v4.16b, v28.16b //AES block 4 - round 11
aese v6.16b, v28.16b //AES block 6 - round 11
aese v5.16b, v28.16b //AES block 5 - round 11
aese v7.16b, v28.16b //AES block 7 - round 11
aese v2.16b, v28.16b //AES block 2 - round 11
aese v3.16b, v28.16b //AES block 3 - round 11
b.ge L192_dec_tail //handle tail
ldp q8, q9, [x0], #32 //AES block 0, 1 - load ciphertext
ldp q10, q11, [x0], #32 //AES block 2, 3 - load ciphertext
ldp q12, q13, [x0], #32 //AES block 4, 5 - load ciphertext
.long 0xce016921 //eor3 v1.16b, v9.16b, v1.16b, v26.16b //AES block 1 - result
.long 0xce006900 //eor3 v0.16b, v8.16b, v0.16b, v26.16b //AES block 0 - result
stp q0, q1, [x2], #32 //AES block 0, 1 - store result
rev32 v0.16b, v30.16b //CTR block 8
add v30.4s, v30.4s, v31.4s //CTR block 8
rev32 v1.16b, v30.16b //CTR block 9
add v30.4s, v30.4s, v31.4s //CTR block 9
.long 0xce036963 //eor3 v3.16b, v11.16b, v3.16b, v26.16b //AES block 3 - result
.long 0xce026942 //eor3 v2.16b, v10.16b, v2.16b, v26.16b //AES block 2 - result
stp q2, q3, [x2], #32 //AES block 2, 3 - store result
ldp q14, q15, [x0], #32 //AES block 6, 7 - load ciphertext
rev32 v2.16b, v30.16b //CTR block 10
add v30.4s, v30.4s, v31.4s //CTR block 10
.long 0xce046984 //eor3 v4.16b, v12.16b, v4.16b, v26.16b //AES block 4 - result
rev32 v3.16b, v30.16b //CTR block 11
add v30.4s, v30.4s, v31.4s //CTR block 11
.long 0xce0569a5 //eor3 v5.16b, v13.16b, v5.16b, v26.16b //AES block 5 - result
stp q4, q5, [x2], #32 //AES block 4, 5 - store result
cmp x0, x5 //check if we have <= 8 blocks
.long 0xce0669c6 //eor3 v6.16b, v14.16b, v6.16b, v26.16b //AES block 6 - result
.long 0xce0769e7 //eor3 v7.16b, v15.16b, v7.16b, v26.16b //AES block 7 - result
rev32 v4.16b, v30.16b //CTR block 12
add v30.4s, v30.4s, v31.4s //CTR block 12
stp q6, q7, [x2], #32 //AES block 6, 7 - store result
b.ge L192_dec_prepretail //do prepretail
L192_dec_main_loop: //main loop start
rev64 v9.16b, v9.16b //GHASH block 8k+1
ldp q26, q27, [x11, #0] //load rk0, rk1
ext v19.16b, v19.16b, v19.16b, #8 //PRE 0
rev64 v8.16b, v8.16b //GHASH block 8k
rev32 v5.16b, v30.16b //CTR block 8k+13
add v30.4s, v30.4s, v31.4s //CTR block 8k+13
ldr q23, [x6, #144] //load h7l | h7h
ldr q25, [x6, #176] //load h8l | h8h
rev64 v12.16b, v12.16b //GHASH block 8k+4
rev64 v11.16b, v11.16b //GHASH block 8k+3
eor v8.16b, v8.16b, v19.16b //PRE 1
rev32 v6.16b, v30.16b //CTR block 8k+14
add v30.4s, v30.4s, v31.4s //CTR block 8k+14
rev64 v13.16b, v13.16b //GHASH block 8k+5
rev32 v7.16b, v30.16b //CTR block 8k+15
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 0
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 0
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 0
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 0
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 0
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 0
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 0
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 0
pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low
pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high
ldp q28, q26, [x11, #32] //load rk2, rk3
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 1
pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low
ldr q20, [x6, #96] //load h5l | h5h
ldr q22, [x6, #128] //load h6l | h6h
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 1
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 1
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 1
pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 1
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 1
trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
rev64 v10.16b, v10.16b //GHASH block 8k+2
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 1
ldr q21, [x6, #112] //load h6k | h5k
ldr q24, [x6, #160] //load h8k | h7k
trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high
pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high
pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high
eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid
eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 2
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 2
pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low
.long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 2
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 3
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 2
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 2
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 2
ldr q23, [x6, #48] //load h3l | h3h
ldr q25, [x6, #80] //load h4l | h4h
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 2
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 3
pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low
trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 3
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 3
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 3
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 3
ldp q27, q28, [x11, #64] //load rk4, rk5
eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 3
trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
add v30.4s, v30.4s, v31.4s //CTR block 8k+15
pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid
pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid
pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 3
pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid
pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 4
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 4
eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 4
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 4
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 4
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 4
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 4
ldr q20, [x6] //load h1l | h1h
ldr q22, [x6, #32] //load h2l | h2h
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 5
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 5
ldp q26, q27, [x11, #96] //load rk6, rk7
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 5
rev64 v15.16b, v15.16b //GHASH block 8k+7
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 5
.long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 5
pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low
trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 5
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 5
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 5
rev64 v14.16b, v14.16b //GHASH block 8k+6
ldr q21, [x6, #16] //load h2k | h1k
ldr q24, [x6, #64] //load h4k | h3k
pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high
pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 6
eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 6
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 6
pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 6
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 6
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 7
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 7
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 6
pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid
.long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high
.long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low
pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low
trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 6
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 7
ldp q28, q26, [x11, #128] //load rk8, rk9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 7
eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 7
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 7
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 7
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 7
.long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid
pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high
pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid
ldr d16, [x10] //MODULO - load modulo constant
pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 8
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 8
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 8
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 8
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 8
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 8
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 8
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 8
.long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high
rev32 v20.16b, v30.16b //CTR block 8k+16
add v30.4s, v30.4s, v31.4s //CTR block 8k+16
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 9
.long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 9
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 9
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 9
ldp q27, q28, [x11, #160] //load rk10, rk11
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
ldp q8, q9, [x0], #32 //AES block 8k+8, 8k+9 - load ciphertext
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 9
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 9
ldp q10, q11, [x0], #32 //AES block 8k+10, 8k+11 - load ciphertext
rev32 v22.16b, v30.16b //CTR block 8k+17
pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
add v30.4s, v30.4s, v31.4s //CTR block 8k+17
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 9
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 9
ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 10
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 10
ldp q12, q13, [x0], #32 //AES block 8k+12, 8k+13 - load ciphertext
rev32 v23.16b, v30.16b //CTR block 8k+18
add v30.4s, v30.4s, v31.4s //CTR block 8k+18
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 10
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 10
ldr q26, [x11, #192] //load rk12
ldp q14, q15, [x0], #32 //AES block 8k+14, 8k+15 - load ciphertext
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 10
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 10
aese v0.16b, v28.16b //AES block 8k+8 - round 11
ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
aese v1.16b, v28.16b //AES block 8k+9 - round 11
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 10
aese v6.16b, v28.16b //AES block 8k+14 - round 11
aese v3.16b, v28.16b //AES block 8k+11 - round 11
.long 0xce006900 //eor3 v0.16b, v8.16b, v0.16b, v26.16b //AES block 8k+8 - result
rev32 v25.16b, v30.16b //CTR block 8k+19
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 10
aese v4.16b, v28.16b //AES block 8k+12 - round 11
aese v2.16b, v28.16b //AES block 8k+10 - round 11
add v30.4s, v30.4s, v31.4s //CTR block 8k+19
aese v7.16b, v28.16b //AES block 8k+15 - round 11
aese v5.16b, v28.16b //AES block 8k+13 - round 11
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
.long 0xce016921 //eor3 v1.16b, v9.16b, v1.16b, v26.16b //AES block 8k+9 - result
stp q0, q1, [x2], #32 //AES block 8k+8, 8k+9 - store result
.long 0xce036963 //eor3 v3.16b, v11.16b, v3.16b, v26.16b //AES block 8k+11 - result
.long 0xce026942 //eor3 v2.16b, v10.16b, v2.16b, v26.16b //AES block 8k+10 - result
.long 0xce0769e7 //eor3 v7.16b, v15.16b, v7.16b, v26.16b //AES block 8k+15 - result
stp q2, q3, [x2], #32 //AES block 8k+10, 8k+11 - store result
.long 0xce0569a5 //eor3 v5.16b, v13.16b, v5.16b, v26.16b //AES block 8k+13 - result
.long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low
mov v3.16b, v25.16b //CTR block 8k+19
.long 0xce046984 //eor3 v4.16b, v12.16b, v4.16b, v26.16b //AES block 8k+12 - result
stp q4, q5, [x2], #32 //AES block 8k+12, 8k+13 - store result
cmp x0, x5 //LOOP CONTROL
.long 0xce0669c6 //eor3 v6.16b, v14.16b, v6.16b, v26.16b //AES block 8k+14 - result
stp q6, q7, [x2], #32 //AES block 8k+14, 8k+15 - store result
mov v0.16b, v20.16b //CTR block 8k+16
mov v1.16b, v22.16b //CTR block 8k+17
mov v2.16b, v23.16b //CTR block 8k+18
rev32 v4.16b, v30.16b //CTR block 8k+20
add v30.4s, v30.4s, v31.4s //CTR block 8k+20
b.lt L192_dec_main_loop
L192_dec_prepretail: //PREPRETAIL
ldp q26, q27, [x11, #0] //load rk0, rk1
rev32 v5.16b, v30.16b //CTR block 8k+13
add v30.4s, v30.4s, v31.4s //CTR block 8k+13
ldr q23, [x6, #144] //load h7l | h7h
ldr q25, [x6, #176] //load h8l | h8h
rev64 v8.16b, v8.16b //GHASH block 8k
ext v19.16b, v19.16b, v19.16b, #8 //PRE 0
rev64 v11.16b, v11.16b //GHASH block 8k+3
rev32 v6.16b, v30.16b //CTR block 8k+14
add v30.4s, v30.4s, v31.4s //CTR block 8k+14
eor v8.16b, v8.16b, v19.16b //PRE 1
rev64 v10.16b, v10.16b //GHASH block 8k+2
rev64 v9.16b, v9.16b //GHASH block 8k+1
ldr q20, [x6, #96] //load h5l | h5h
ldr q22, [x6, #128] //load h6l | h6h
rev32 v7.16b, v30.16b //CTR block 8k+15
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 0
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 0
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 0
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 0
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 0
pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 0
pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 0
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 1
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 0
ldp q28, q26, [x11, #32] //load rk2, rk3
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 1
pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high
pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low
pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low
eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 1
pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 1
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 1
trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 1
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 1
ldr q21, [x6, #112] //load h6k | h5k
ldr q24, [x6, #160] //load h8k | h7k
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 2
eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 2
rev64 v13.16b, v13.16b //GHASH block 8k+5
pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low
.long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 2
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 2
trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 3
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 2
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 2
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 2
trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 2
pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 3
eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 3
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 3
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 3
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low
ldp q27, q28, [x11, #64] //load rk4, rk5
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 3
ldr q23, [x6, #48] //load h3l | h3h
ldr q25, [x6, #80] //load h4l | h4h
pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid
pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid
ldr q20, [x6] //load h1l | h1h
ldr q22, [x6, #32] //load h2l | h2h
eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 3
rev64 v15.16b, v15.16b //GHASH block 8k+7
.long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
rev64 v12.16b, v12.16b //GHASH block 8k+4
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 4
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 4
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 3
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 4
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 4
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 4
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 4
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 4
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 4
rev64 v14.16b, v14.16b //GHASH block 8k+6
ldr q21, [x6, #16] //load h2k | h1k
ldr q24, [x6, #64] //load h4k | h3k
trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 5
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 5
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 5
ldp q26, q27, [x11, #96] //load rk6, rk7
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 5
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 5
pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high
pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high
pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 5
pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low
trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high
pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low
trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 5
trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 5
eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 6
eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 6
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 6
pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid
pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 6
pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 6
pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high
.long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 7
.long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 6
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 6
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 7
ldp q28, q26, [x11, #128] //load rk8, rk9
pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 7
ldr d16, [x10] //MODULO - load modulo constant
.long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high
pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 7
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 7
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 7
.long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low
.long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 7
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 7
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 8
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 8
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 8
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 8
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 8
pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 8
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 8
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 8
ldp q27, q28, [x11, #160] //load rk10, rk11
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 9
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 9
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 9
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 9
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 9
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 9
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 9
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 9
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
ldr q26, [x11, #192] //load rk12
ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 10
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 10
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 10
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 10
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 10
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 10
aese v0.16b, v28.16b //AES block 8k+8 - round 11
.long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low
aese v5.16b, v28.16b //AES block 8k+13 - round 11
aese v2.16b, v28.16b //AES block 8k+10 - round 11
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 10
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 10
aese v6.16b, v28.16b //AES block 8k+14 - round 11
aese v4.16b, v28.16b //AES block 8k+12 - round 11
add v30.4s, v30.4s, v31.4s //CTR block 8k+15
aese v3.16b, v28.16b //AES block 8k+11 - round 11
aese v1.16b, v28.16b //AES block 8k+9 - round 11
aese v7.16b, v28.16b //AES block 8k+15 - round 11
L192_dec_tail: //TAIL
sub x5, x4, x0 //main_end_input_ptr is number of bytes left to process
ldp q20, q21, [x6, #96] //load h5l | h5h
ldr q9, [x0], #16 //AES block 8k+8 - load ciphertext
ldp q24, q25, [x6, #160] //load h8k | h7k
mov v29.16b, v26.16b
ldp q22, q23, [x6, #128] //load h6l | h6h
ext v16.16b, v19.16b, v19.16b, #8 //prepare final partial tag
.long 0xce00752c //eor3 v12.16b, v9.16b, v0.16b, v29.16b //AES block 8k+8 - result
cmp x5, #112
b.gt L192_dec_blocks_more_than_7
mov v7.16b, v6.16b
movi v17.8b, #0
sub v30.4s, v30.4s, v31.4s
mov v6.16b, v5.16b
mov v5.16b, v4.16b
mov v4.16b, v3.16b
cmp x5, #96
movi v19.8b, #0
mov v3.16b, v2.16b
mov v2.16b, v1.16b
movi v18.8b, #0
b.gt L192_dec_blocks_more_than_6
mov v7.16b, v6.16b
mov v6.16b, v5.16b
mov v5.16b, v4.16b
mov v4.16b, v3.16b
mov v3.16b, v1.16b
sub v30.4s, v30.4s, v31.4s
cmp x5, #80
b.gt L192_dec_blocks_more_than_5
mov v7.16b, v6.16b
mov v6.16b, v5.16b
mov v5.16b, v4.16b
mov v4.16b, v1.16b
cmp x5, #64
sub v30.4s, v30.4s, v31.4s
b.gt L192_dec_blocks_more_than_4
sub v30.4s, v30.4s, v31.4s
mov v7.16b, v6.16b
mov v6.16b, v5.16b
mov v5.16b, v1.16b
cmp x5, #48
b.gt L192_dec_blocks_more_than_3
sub v30.4s, v30.4s, v31.4s
mov v7.16b, v6.16b
cmp x5, #32
mov v6.16b, v1.16b
ldr q24, [x6, #64] //load h4k | h3k
b.gt L192_dec_blocks_more_than_2
sub v30.4s, v30.4s, v31.4s
mov v7.16b, v1.16b
cmp x5, #16
b.gt L192_dec_blocks_more_than_1
sub v30.4s, v30.4s, v31.4s
ldr q21, [x6, #16] //load h2k | h1k
b L192_dec_blocks_less_than_1
L192_dec_blocks_more_than_7: //blocks left > 7
rev64 v8.16b, v9.16b //GHASH final-7 block
ins v18.d[0], v24.d[1] //GHASH final-7 block - mid
eor v8.16b, v8.16b, v16.16b //feed in partial tag
pmull2 v17.1q, v8.2d, v25.2d //GHASH final-7 block - high
ins v27.d[0], v8.d[1] //GHASH final-7 block - mid
ldr q9, [x0], #16 //AES final-6 block - load ciphertext
pmull v19.1q, v8.1d, v25.1d //GHASH final-7 block - low
eor v27.8b, v27.8b, v8.8b //GHASH final-7 block - mid
st1 { v12.16b}, [x2], #16 //AES final-7 block - store result
.long 0xce01752c //eor3 v12.16b, v9.16b, v1.16b, v29.16b //AES final-6 block - result
pmull v18.1q, v27.1d, v18.1d //GHASH final-7 block - mid
movi v16.8b, #0 //supress further partial tag feed in
L192_dec_blocks_more_than_6: //blocks left > 6
rev64 v8.16b, v9.16b //GHASH final-6 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ldr q9, [x0], #16 //AES final-5 block - load ciphertext
ins v27.d[0], v8.d[1] //GHASH final-6 block - mid
eor v27.8b, v27.8b, v8.8b //GHASH final-6 block - mid
movi v16.8b, #0 //supress further partial tag feed in
pmull2 v28.1q, v8.2d, v23.2d //GHASH final-6 block - high
st1 { v12.16b}, [x2], #16 //AES final-6 block - store result
.long 0xce02752c //eor3 v12.16b, v9.16b, v2.16b, v29.16b //AES final-5 block - result
eor v17.16b, v17.16b, v28.16b //GHASH final-6 block - high
pmull v27.1q, v27.1d, v24.1d //GHASH final-6 block - mid
pmull v26.1q, v8.1d, v23.1d //GHASH final-6 block - low
eor v18.16b, v18.16b, v27.16b //GHASH final-6 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-6 block - low
L192_dec_blocks_more_than_5: //blocks left > 5
rev64 v8.16b, v9.16b //GHASH final-5 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-5 block - mid
eor v27.8b, v27.8b, v8.8b //GHASH final-5 block - mid
ins v27.d[1], v27.d[0] //GHASH final-5 block - mid
pmull2 v28.1q, v8.2d, v22.2d //GHASH final-5 block - high
ldr q9, [x0], #16 //AES final-4 block - load ciphertext
eor v17.16b, v17.16b, v28.16b //GHASH final-5 block - high
pmull v26.1q, v8.1d, v22.1d //GHASH final-5 block - low
pmull2 v27.1q, v27.2d, v21.2d //GHASH final-5 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-5 block - low
movi v16.8b, #0 //supress further partial tag feed in
st1 { v12.16b}, [x2], #16 //AES final-5 block - store result
eor v18.16b, v18.16b, v27.16b //GHASH final-5 block - mid
.long 0xce03752c //eor3 v12.16b, v9.16b, v3.16b, v29.16b //AES final-4 block - result
L192_dec_blocks_more_than_4: //blocks left > 4
rev64 v8.16b, v9.16b //GHASH final-4 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
movi v16.8b, #0 //supress further partial tag feed in
ldr q9, [x0], #16 //AES final-3 block - load ciphertext
ins v27.d[0], v8.d[1] //GHASH final-4 block - mid
pmull v26.1q, v8.1d, v20.1d //GHASH final-4 block - low
eor v27.8b, v27.8b, v8.8b //GHASH final-4 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-4 block - low
pmull v27.1q, v27.1d, v21.1d //GHASH final-4 block - mid
st1 { v12.16b}, [x2], #16 //AES final-4 block - store result
pmull2 v28.1q, v8.2d, v20.2d //GHASH final-4 block - high
.long 0xce04752c //eor3 v12.16b, v9.16b, v4.16b, v29.16b //AES final-3 block - result
eor v18.16b, v18.16b, v27.16b //GHASH final-4 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-4 block - high
L192_dec_blocks_more_than_3: //blocks left > 3
ldr q25, [x6, #80] //load h4l | h4h
rev64 v8.16b, v9.16b //GHASH final-3 block
ldr q9, [x0], #16 //AES final-2 block - load ciphertext
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-3 block - mid
pmull2 v28.1q, v8.2d, v25.2d //GHASH final-3 block - high
eor v17.16b, v17.16b, v28.16b //GHASH final-3 block - high
movi v16.8b, #0 //supress further partial tag feed in
pmull v26.1q, v8.1d, v25.1d //GHASH final-3 block - low
st1 { v12.16b}, [x2], #16 //AES final-3 block - store result
eor v27.8b, v27.8b, v8.8b //GHASH final-3 block - mid
.long 0xce05752c //eor3 v12.16b, v9.16b, v5.16b, v29.16b //AES final-2 block - result
eor v19.16b, v19.16b, v26.16b //GHASH final-3 block - low
ldr q24, [x6, #64] //load h4k | h3k
ins v27.d[1], v27.d[0] //GHASH final-3 block - mid
pmull2 v27.1q, v27.2d, v24.2d //GHASH final-3 block - mid
eor v18.16b, v18.16b, v27.16b //GHASH final-3 block - mid
L192_dec_blocks_more_than_2: //blocks left > 2
rev64 v8.16b, v9.16b //GHASH final-2 block
ldr q23, [x6, #48] //load h3l | h3h
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-2 block - mid
ldr q9, [x0], #16 //AES final-1 block - load ciphertext
pmull2 v28.1q, v8.2d, v23.2d //GHASH final-2 block - high
eor v27.8b, v27.8b, v8.8b //GHASH final-2 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-2 block - high
pmull v26.1q, v8.1d, v23.1d //GHASH final-2 block - low
pmull v27.1q, v27.1d, v24.1d //GHASH final-2 block - mid
movi v16.8b, #0 //supress further partial tag feed in
eor v19.16b, v19.16b, v26.16b //GHASH final-2 block - low
st1 { v12.16b}, [x2], #16 //AES final-2 block - store result
eor v18.16b, v18.16b, v27.16b //GHASH final-2 block - mid
.long 0xce06752c //eor3 v12.16b, v9.16b, v6.16b, v29.16b //AES final-1 block - result
L192_dec_blocks_more_than_1: //blocks left > 1
rev64 v8.16b, v9.16b //GHASH final-1 block
ldr q9, [x0], #16 //AES final block - load ciphertext
ldr q22, [x6, #32] //load h1l | h1h
eor v8.16b, v8.16b, v16.16b //feed in partial tag
movi v16.8b, #0 //supress further partial tag feed in
ldr q21, [x6, #16] //load h2k | h1k
pmull v26.1q, v8.1d, v22.1d //GHASH final-1 block - low
ins v27.d[0], v8.d[1] //GHASH final-1 block - mid
st1 { v12.16b}, [x2], #16 //AES final-1 block - store result
pmull2 v28.1q, v8.2d, v22.2d //GHASH final-1 block - high
.long 0xce07752c //eor3 v12.16b, v9.16b, v7.16b, v29.16b //AES final block - result
eor v27.8b, v27.8b, v8.8b //GHASH final-1 block - mid
ins v27.d[1], v27.d[0] //GHASH final-1 block - mid
pmull2 v27.1q, v27.2d, v21.2d //GHASH final-1 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-1 block - low
eor v18.16b, v18.16b, v27.16b //GHASH final-1 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-1 block - high
L192_dec_blocks_less_than_1: //blocks left <= 1
rev32 v30.16b, v30.16b
and x1, x1, #127 //bit_length %= 128
sub x1, x1, #128 //bit_length -= 128
str q30, [x16] //store the updated counter
neg x1, x1 //bit_length = 128 - #bits in input (in range [1,128])
mvn x7, xzr //temp0_x = 0xffffffffffffffff
and x1, x1, #127 //bit_length %= 128
mvn x8, xzr //temp1_x = 0xffffffffffffffff
lsr x7, x7, x1 //temp0_x is mask for top 64b of last block
cmp x1, #64
csel x13, x8, x7, lt
csel x14, x7, xzr, lt
ldr q20, [x6] //load h1l | h1h
mov v0.d[1], x14
ld1 { v26.16b}, [x2] //load existing bytes where the possibly partial last block is to be stored
mov v0.d[0], x13 //ctr0b is mask for last block
and v9.16b, v9.16b, v0.16b //possibly partial last block has zeroes in highest bits
bif v12.16b, v26.16b, v0.16b //insert existing bytes in top end of result before storing
rev64 v8.16b, v9.16b //GHASH final block
st1 { v12.16b}, [x2] //store all 16B
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v16.d[0], v8.d[1] //GHASH final block - mid
pmull v26.1q, v8.1d, v20.1d //GHASH final block - low
eor v16.8b, v16.8b, v8.8b //GHASH final block - mid
pmull2 v28.1q, v8.2d, v20.2d //GHASH final block - high
eor v19.16b, v19.16b, v26.16b //GHASH final block - low
pmull v16.1q, v16.1d, v21.1d //GHASH final block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final block - high
eor v14.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
eor v18.16b, v18.16b, v16.16b //GHASH final block - mid
ldr d16, [x10] //MODULO - load modulo constant
pmull v21.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
ext v17.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
eor v18.16b, v18.16b, v14.16b //MODULO - karatsuba tidy up
.long 0xce115652 //eor3 v18.16b, v18.16b, v17.16b, v21.16b //MODULO - fold into mid
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
ext v18.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
.long 0xce124673 //eor3 v19.16b, v19.16b, v18.16b, v17.16b //MODULO - fold into low
ext v19.16b, v19.16b, v19.16b, #8
rev64 v19.16b, v19.16b
st1 { v19.16b }, [x3]
mov x0, x9
ldp d10, d11, [sp, #16]
ldp d12, d13, [sp, #32]
ldp d14, d15, [sp, #48]
ldp d8, d9, [sp], #80
ret
L192_dec_ret:
mov w0, #0x0
ret
.globl aesv8_gcm_8x_enc_256
.def aesv8_gcm_8x_enc_256
.type 32
.endef
.align 4
aesv8_gcm_8x_enc_256:
AARCH64_VALID_CALL_TARGET
cbz x1, L256_enc_ret
stp d8, d9, [sp, #-80]!
lsr x9, x1, #3
mov x16, x4
mov x11, x5
stp d10, d11, [sp, #16]
stp d12, d13, [sp, #32]
stp d14, d15, [sp, #48]
mov x5, #0xc200000000000000
stp x5, xzr, [sp, #64]
add x10, sp, #64
ld1 { v0.16b}, [x16] //CTR block 0
mov x5, x9
mov x15, #0x100000000 //set up counter increment
movi v31.16b, #0x0
mov v31.d[1], x15
sub x5, x5, #1 //byte_len - 1
and x5, x5, #0xffffffffffffff80 //number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
add x5, x5, x0
rev32 v30.16b, v0.16b //set up reversed counter
add v30.4s, v30.4s, v31.4s //CTR block 0
rev32 v1.16b, v30.16b //CTR block 1
add v30.4s, v30.4s, v31.4s //CTR block 1
rev32 v2.16b, v30.16b //CTR block 2
add v30.4s, v30.4s, v31.4s //CTR block 2
rev32 v3.16b, v30.16b //CTR block 3
add v30.4s, v30.4s, v31.4s //CTR block 3
rev32 v4.16b, v30.16b //CTR block 4
add v30.4s, v30.4s, v31.4s //CTR block 4
rev32 v5.16b, v30.16b //CTR block 5
add v30.4s, v30.4s, v31.4s //CTR block 5
ldp q26, q27, [x11, #0] //load rk0, rk1
rev32 v6.16b, v30.16b //CTR block 6
add v30.4s, v30.4s, v31.4s //CTR block 6
rev32 v7.16b, v30.16b //CTR block 7
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 0
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 0
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 0
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 0
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 0
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 0
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 0
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 0
ldp q28, q26, [x11, #32] //load rk2, rk3
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 1
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 1
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 1
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 1
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 1
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 1
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 2
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 1
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 2
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 2
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 2
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 2
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 2
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 2
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 3
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 3
ldp q27, q28, [x11, #64] //load rk4, rk5
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 3
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 3
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 3
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 3
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 3
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 3
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 4
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 4
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 4
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 4
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 4
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 4
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 4
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 5
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 5
ldp q26, q27, [x11, #96] //load rk6, rk7
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 5
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 5
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 5
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 5
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 5
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 5
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 6
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 6
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 6
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 6
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 6
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 6
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 6
ldp q28, q26, [x11, #128] //load rk8, rk9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 7
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 7
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 7
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 7
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 7
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 7
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 7
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 7
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 8
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 8
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 8
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 8
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 8
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 8
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 8
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 8
ld1 { v19.16b}, [x3]
ext v19.16b, v19.16b, v19.16b, #8
rev64 v19.16b, v19.16b
ldp q27, q28, [x11, #160] //load rk10, rk11
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 9
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 9
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 9
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 9
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 9
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 9
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 9
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 10
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 10
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 10
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 10
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 10
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 10
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 10
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 10
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 11
ldp q26, q27, [x11, #192] //load rk12, rk13
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 11
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 11
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 11
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 11
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 11
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 11
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 11
add v30.4s, v30.4s, v31.4s //CTR block 7
ldr q28, [x11, #224] //load rk14
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 12
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 12
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 12
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 12
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 12
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 12
aese v2.16b, v27.16b //AES block 2 - round 13
aese v1.16b, v27.16b //AES block 1 - round 13
aese v4.16b, v27.16b //AES block 4 - round 13
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 12
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 12
aese v0.16b, v27.16b //AES block 0 - round 13
aese v5.16b, v27.16b //AES block 5 - round 13
aese v6.16b, v27.16b //AES block 6 - round 13
aese v7.16b, v27.16b //AES block 7 - round 13
aese v3.16b, v27.16b //AES block 3 - round 13
add x4, x0, x1, lsr #3 //end_input_ptr
cmp x0, x5 //check if we have <= 8 blocks
b.ge L256_enc_tail //handle tail
ldp q8, q9, [x0], #32 //AES block 0, 1 - load plaintext
ldp q10, q11, [x0], #32 //AES block 2, 3 - load plaintext
.long 0xce007108 //eor3 v8.16b, v8.16b, v0.16b, v28.16b //AES block 0 - result
rev32 v0.16b, v30.16b //CTR block 8
add v30.4s, v30.4s, v31.4s //CTR block 8
.long 0xce017129 //eor3 v9.16b, v9.16b, v1.16b, v28.16b //AES block 1 - result
.long 0xce03716b //eor3 v11.16b, v11.16b, v3.16b, v28.16b //AES block 3 - result
rev32 v1.16b, v30.16b //CTR block 9
add v30.4s, v30.4s, v31.4s //CTR block 9
ldp q12, q13, [x0], #32 //AES block 4, 5 - load plaintext
ldp q14, q15, [x0], #32 //AES block 6, 7 - load plaintext
.long 0xce02714a //eor3 v10.16b, v10.16b, v2.16b, v28.16b //AES block 2 - result
cmp x0, x5 //check if we have <= 8 blocks
rev32 v2.16b, v30.16b //CTR block 10
add v30.4s, v30.4s, v31.4s //CTR block 10
stp q8, q9, [x2], #32 //AES block 0, 1 - store result
stp q10, q11, [x2], #32 //AES block 2, 3 - store result
rev32 v3.16b, v30.16b //CTR block 11
add v30.4s, v30.4s, v31.4s //CTR block 11
.long 0xce04718c //eor3 v12.16b, v12.16b, v4.16b, v28.16b //AES block 4 - result
.long 0xce0771ef //eor3 v15.16b, v15.16b, v7.16b, v28.16b //AES block 7 - result
.long 0xce0671ce //eor3 v14.16b, v14.16b, v6.16b, v28.16b //AES block 6 - result
.long 0xce0571ad //eor3 v13.16b, v13.16b, v5.16b, v28.16b //AES block 5 - result
stp q12, q13, [x2], #32 //AES block 4, 5 - store result
rev32 v4.16b, v30.16b //CTR block 12
stp q14, q15, [x2], #32 //AES block 6, 7 - store result
add v30.4s, v30.4s, v31.4s //CTR block 12
b.ge L256_enc_prepretail //do prepretail
L256_enc_main_loop: //main loop start
ldp q26, q27, [x11, #0] //load rk0, rk1
rev32 v5.16b, v30.16b //CTR block 8k+13
add v30.4s, v30.4s, v31.4s //CTR block 8k+13
ldr q21, [x6, #112] //load h6k | h5k
ldr q24, [x6, #160] //load h8k | h7k
rev64 v11.16b, v11.16b //GHASH block 8k+3
ldr q20, [x6, #96] //load h5l | h5h
ldr q22, [x6, #128] //load h6l | h6h
rev64 v9.16b, v9.16b //GHASH block 8k+1
rev32 v6.16b, v30.16b //CTR block 8k+14
add v30.4s, v30.4s, v31.4s //CTR block 8k+14
rev64 v8.16b, v8.16b //GHASH block 8k
rev64 v12.16b, v12.16b //GHASH block 8k+4
ext v19.16b, v19.16b, v19.16b, #8 //PRE 0
ldr q23, [x6, #144] //load h7l | h7h
ldr q25, [x6, #176] //load h8l | h8h
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 0
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 0
rev32 v7.16b, v30.16b //CTR block 8k+15
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 0
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 0
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 0
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 0
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 0
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 0
ldp q28, q26, [x11, #32] //load rk2, rk3
eor v8.16b, v8.16b, v19.16b //PRE 1
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 1
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 1
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 1
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 1
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 1
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 1
pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high
pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low
pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high
trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 1
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 2
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 2
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 2
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 2
pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 2
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 3
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 3
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 2
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 3
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 2
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 3
rev64 v14.16b, v14.16b //GHASH block 8k+6
pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 3
ldp q27, q28, [x11, #64] //load rk4, rk5
rev64 v10.16b, v10.16b //GHASH block 8k+2
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 3
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 3
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 3
eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high
pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high
rev64 v13.16b, v13.16b //GHASH block 8k+5
pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low
eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low
ldr q23, [x6, #48] //load h3l | h3h
ldr q25, [x6, #80] //load h4l | h4h
trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
.long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high
pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 4
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 4
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 4
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 4
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 4
trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 4
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 4
trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid
ldp q26, q27, [x11, #96] //load rk6, rk7
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 5
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 5
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 5
eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 5
rev64 v15.16b, v15.16b //GHASH block 8k+7
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 5
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 5
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 5
pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid
pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 5
pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 6
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 6
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 6
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 6
eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid
pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 6
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 6
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 6
ldp q28, q26, [x11, #128] //load rk8, rk9
pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 7
ldr q20, [x6] //load h1l | h1h
ldr q22, [x6, #32] //load h2l | h2h
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 7
.long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
ldr q21, [x6, #16] //load h2k | h1k
ldr q24, [x6, #64] //load h4k | h3k
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 7
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 7
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 7
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 7
pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low
trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 7
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 7
pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 8
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 8
pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low
trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 9
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 8
pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid
pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 8
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 8
pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high
pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 8
trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 8
.long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 9
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 9
eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 9
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 9
ldp q27, q28, [x11, #160] //load rk10, rk11
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 9
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 9
pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high
.long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low
pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low
ldr d16, [x10] //MODULO - load modulo constant
pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid
pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 9
.long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low
.long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 10
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 10
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 10
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 10
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 10
add v30.4s, v30.4s, v31.4s //CTR block 8k+15
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 10
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 10
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 10
.long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high
ldp q26, q27, [x11, #192] //load rk12, rk13
rev32 v20.16b, v30.16b //CTR block 8k+16
ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
ldp q8, q9, [x0], #32 //AES block 8k+8, 8k+9 - load plaintext
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 11
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 11
add v30.4s, v30.4s, v31.4s //CTR block 8k+16
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 11
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 11
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 11
pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 11
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 12
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 11
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 12
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 12
rev32 v22.16b, v30.16b //CTR block 8k+17
add v30.4s, v30.4s, v31.4s //CTR block 8k+17
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 11
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 12
ldr q28, [x11, #224] //load rk14
aese v7.16b, v27.16b //AES block 8k+15 - round 13
ldp q10, q11, [x0], #32 //AES block 8k+10, 8k+11 - load plaintext
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 12
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 12
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 12
ldp q12, q13, [x0], #32 //AES block 4, 5 - load plaintext
ldp q14, q15, [x0], #32 //AES block 6, 7 - load plaintext
aese v2.16b, v27.16b //AES block 8k+10 - round 13
aese v4.16b, v27.16b //AES block 8k+12 - round 13
rev32 v23.16b, v30.16b //CTR block 8k+18
add v30.4s, v30.4s, v31.4s //CTR block 8k+18
aese v5.16b, v27.16b //AES block 8k+13 - round 13
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 12
aese v3.16b, v27.16b //AES block 8k+11 - round 13
cmp x0, x5 //LOOP CONTROL
.long 0xce02714a //eor3 v10.16b, v10.16b, v2.16b, v28.16b //AES block 8k+10 - result
rev32 v25.16b, v30.16b //CTR block 8k+19
add v30.4s, v30.4s, v31.4s //CTR block 8k+19
aese v0.16b, v27.16b //AES block 8k+8 - round 13
aese v6.16b, v27.16b //AES block 8k+14 - round 13
.long 0xce0571ad //eor3 v13.16b, v13.16b, v5.16b, v28.16b //AES block 5 - result
ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
aese v1.16b, v27.16b //AES block 8k+9 - round 13
.long 0xce04718c //eor3 v12.16b, v12.16b, v4.16b, v28.16b //AES block 4 - result
rev32 v4.16b, v30.16b //CTR block 8k+20
.long 0xce03716b //eor3 v11.16b, v11.16b, v3.16b, v28.16b //AES block 8k+11 - result
mov v3.16b, v25.16b //CTR block 8k+19
.long 0xce017129 //eor3 v9.16b, v9.16b, v1.16b, v28.16b //AES block 8k+9 - result
.long 0xce007108 //eor3 v8.16b, v8.16b, v0.16b, v28.16b //AES block 8k+8 - result
add v30.4s, v30.4s, v31.4s //CTR block 8k+20
stp q8, q9, [x2], #32 //AES block 8k+8, 8k+9 - store result
mov v2.16b, v23.16b //CTR block 8k+18
.long 0xce0771ef //eor3 v15.16b, v15.16b, v7.16b, v28.16b //AES block 7 - result
.long 0xce154673 //eor3 v19.16b, v19.16b, v21.16b, v17.16b //MODULO - fold into low
stp q10, q11, [x2], #32 //AES block 8k+10, 8k+11 - store result
.long 0xce0671ce //eor3 v14.16b, v14.16b, v6.16b, v28.16b //AES block 6 - result
mov v1.16b, v22.16b //CTR block 8k+17
stp q12, q13, [x2], #32 //AES block 4, 5 - store result
stp q14, q15, [x2], #32 //AES block 6, 7 - store result
mov v0.16b, v20.16b //CTR block 8k+16
b.lt L256_enc_main_loop
L256_enc_prepretail: //PREPRETAIL
rev32 v5.16b, v30.16b //CTR block 8k+13
ldp q26, q27, [x11, #0] //load rk0, rk1
add v30.4s, v30.4s, v31.4s //CTR block 8k+13
rev64 v10.16b, v10.16b //GHASH block 8k+2
rev32 v6.16b, v30.16b //CTR block 8k+14
add v30.4s, v30.4s, v31.4s //CTR block 8k+14
rev64 v13.16b, v13.16b //GHASH block 8k+5
ldr q21, [x6, #112] //load h6k | h5k
ldr q24, [x6, #160] //load h8k | h7k
rev32 v7.16b, v30.16b //CTR block 8k+15
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 0
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 0
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 0
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 0
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 0
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 0
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 0
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 0
ext v19.16b, v19.16b, v19.16b, #8 //PRE 0
rev64 v8.16b, v8.16b //GHASH block 8k
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 1
rev64 v9.16b, v9.16b //GHASH block 8k+1
ldp q28, q26, [x11, #32] //load rk2, rk3
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 1
ldr q23, [x6, #144] //load h7l | h7h
ldr q25, [x6, #176] //load h8l | h8h
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 1
ldr q20, [x6, #96] //load h5l | h5h
ldr q22, [x6, #128] //load h6l | h6h
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 1
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 1
eor v8.16b, v8.16b, v19.16b //PRE 1
rev64 v11.16b, v11.16b //GHASH block 8k+3
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 1
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 2
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 2
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 1
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 2
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 2
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 2
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 2
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 2
ldp q27, q28, [x11, #64] //load rk4, rk5
trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high
rev64 v14.16b, v14.16b //GHASH block 8k+6
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 3
pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 3
pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low
trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 3
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 3
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 3
eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high
pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low
pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 3
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 3
eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 3
pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 4
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 4
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 4
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 4
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 5
pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid
.long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 4
trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 4
eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 4
pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low
pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid
eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
rev64 v12.16b, v12.16b //GHASH block 8k+4
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 5
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 5
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 5
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 5
ldp q26, q27, [x11, #96] //load rk6, rk7
ldr q23, [x6, #48] //load h3l | h3h
ldr q25, [x6, #80] //load h4l | h4h
pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid
pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low
eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 5
rev64 v15.16b, v15.16b //GHASH block 8k+7
trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 5
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 5
.long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 6
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 6
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 6
ldr q21, [x6, #16] //load h2k | h1k
ldr q24, [x6, #64] //load h4k | h3k
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 6
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 6
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 6
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 6
pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high
pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low
ldr q20, [x6] //load h1l | h1h
ldr q22, [x6, #32] //load h2l | h2h
ldp q28, q26, [x11, #128] //load rk8, rk9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 7
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 7
pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high
trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 7
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 7
pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 7
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 7
eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high
pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 7
trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 7
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 8
.long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 8
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 8
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 8
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 8
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 8
eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 8
pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid
pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 8
pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high
pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid
pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid
pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low
.long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
.long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high
ldp q27, q28, [x11, #160] //load rk10, rk11
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 9
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 9
.long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high
.long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
ldr d16, [x10] //MODULO - load modulo constant
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 9
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 9
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 9
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 9
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 9
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 10
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 10
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 9
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 10
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 10
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 10
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 10
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 10
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 10
pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 11
ldp q26, q27, [x11, #192] //load rk12, rk13
ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 11
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 11
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 11
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 11
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 11
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 11
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 11
ldr q28, [x11, #224] //load rk14
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 12
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 12
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 12
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 12
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 12
ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 12
add v30.4s, v30.4s, v31.4s //CTR block 8k+15
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 12
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 12
aese v0.16b, v27.16b //AES block 8k+8 - round 13
.long 0xce154673 //eor3 v19.16b, v19.16b, v21.16b, v17.16b //MODULO - fold into low
aese v5.16b, v27.16b //AES block 8k+13 - round 13
aese v1.16b, v27.16b //AES block 8k+9 - round 13
aese v3.16b, v27.16b //AES block 8k+11 - round 13
aese v4.16b, v27.16b //AES block 8k+12 - round 13
aese v7.16b, v27.16b //AES block 8k+15 - round 13
aese v2.16b, v27.16b //AES block 8k+10 - round 13
aese v6.16b, v27.16b //AES block 8k+14 - round 13
L256_enc_tail: //TAIL
ldp q24, q25, [x6, #160] //load h8l | h8h
sub x5, x4, x0 //main_end_input_ptr is number of bytes left to process
ldr q8, [x0], #16 //AES block 8k+8 - load plaintext
ldp q20, q21, [x6, #96] //load h5l | h5h
ext v16.16b, v19.16b, v19.16b, #8 //prepare final partial tag
ldp q22, q23, [x6, #128] //load h6l | h6h
mov v29.16b, v28.16b
cmp x5, #112
.long 0xce007509 //eor3 v9.16b, v8.16b, v0.16b, v29.16b //AES block 8k+8 - result
b.gt L256_enc_blocks_more_than_7
movi v19.8b, #0
mov v7.16b, v6.16b
movi v17.8b, #0
mov v6.16b, v5.16b
mov v5.16b, v4.16b
mov v4.16b, v3.16b
mov v3.16b, v2.16b
sub v30.4s, v30.4s, v31.4s
mov v2.16b, v1.16b
movi v18.8b, #0
cmp x5, #96
b.gt L256_enc_blocks_more_than_6
mov v7.16b, v6.16b
mov v6.16b, v5.16b
cmp x5, #80
mov v5.16b, v4.16b
mov v4.16b, v3.16b
mov v3.16b, v1.16b
sub v30.4s, v30.4s, v31.4s
b.gt L256_enc_blocks_more_than_5
mov v7.16b, v6.16b
sub v30.4s, v30.4s, v31.4s
mov v6.16b, v5.16b
mov v5.16b, v4.16b
cmp x5, #64
mov v4.16b, v1.16b
b.gt L256_enc_blocks_more_than_4
cmp x5, #48
mov v7.16b, v6.16b
mov v6.16b, v5.16b
mov v5.16b, v1.16b
sub v30.4s, v30.4s, v31.4s
b.gt L256_enc_blocks_more_than_3
cmp x5, #32
mov v7.16b, v6.16b
ldr q24, [x6, #64] //load h4k | h3k
mov v6.16b, v1.16b
sub v30.4s, v30.4s, v31.4s
b.gt L256_enc_blocks_more_than_2
mov v7.16b, v1.16b
sub v30.4s, v30.4s, v31.4s
cmp x5, #16
b.gt L256_enc_blocks_more_than_1
sub v30.4s, v30.4s, v31.4s
ldr q21, [x6, #16] //load h2k | h1k
b L256_enc_blocks_less_than_1
L256_enc_blocks_more_than_7: //blocks left > 7
st1 { v9.16b}, [x2], #16 //AES final-7 block - store result
rev64 v8.16b, v9.16b //GHASH final-7 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ldr q9, [x0], #16 //AES final-6 block - load plaintext
pmull2 v17.1q, v8.2d, v25.2d //GHASH final-7 block - high
ins v27.d[0], v8.d[1] //GHASH final-7 block - mid
ins v18.d[0], v24.d[1] //GHASH final-7 block - mid
movi v16.8b, #0 //supress further partial tag feed in
eor v27.8b, v27.8b, v8.8b //GHASH final-7 block - mid
.long 0xce017529 //eor3 v9.16b, v9.16b, v1.16b, v29.16b //AES final-6 block - result
pmull v18.1q, v27.1d, v18.1d //GHASH final-7 block - mid
pmull v19.1q, v8.1d, v25.1d //GHASH final-7 block - low
L256_enc_blocks_more_than_6: //blocks left > 6
st1 { v9.16b}, [x2], #16 //AES final-6 block - store result
rev64 v8.16b, v9.16b //GHASH final-6 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
pmull v26.1q, v8.1d, v23.1d //GHASH final-6 block - low
ins v27.d[0], v8.d[1] //GHASH final-6 block - mid
pmull2 v28.1q, v8.2d, v23.2d //GHASH final-6 block - high
ldr q9, [x0], #16 //AES final-5 block - load plaintext
eor v19.16b, v19.16b, v26.16b //GHASH final-6 block - low
eor v27.8b, v27.8b, v8.8b //GHASH final-6 block - mid
pmull v27.1q, v27.1d, v24.1d //GHASH final-6 block - mid
.long 0xce027529 //eor3 v9.16b, v9.16b, v2.16b, v29.16b //AES final-5 block - result
movi v16.8b, #0 //supress further partial tag feed in
eor v18.16b, v18.16b, v27.16b //GHASH final-6 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-6 block - high
L256_enc_blocks_more_than_5: //blocks left > 5
st1 { v9.16b}, [x2], #16 //AES final-5 block - store result
rev64 v8.16b, v9.16b //GHASH final-5 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-5 block - mid
pmull2 v28.1q, v8.2d, v22.2d //GHASH final-5 block - high
eor v17.16b, v17.16b, v28.16b //GHASH final-5 block - high
eor v27.8b, v27.8b, v8.8b //GHASH final-5 block - mid
ins v27.d[1], v27.d[0] //GHASH final-5 block - mid
ldr q9, [x0], #16 //AES final-4 block - load plaintext
pmull v26.1q, v8.1d, v22.1d //GHASH final-5 block - low
pmull2 v27.1q, v27.2d, v21.2d //GHASH final-5 block - mid
movi v16.8b, #0 //supress further partial tag feed in
eor v19.16b, v19.16b, v26.16b //GHASH final-5 block - low
eor v18.16b, v18.16b, v27.16b //GHASH final-5 block - mid
.long 0xce037529 //eor3 v9.16b, v9.16b, v3.16b, v29.16b //AES final-4 block - result
L256_enc_blocks_more_than_4: //blocks left > 4
st1 { v9.16b}, [x2], #16 //AES final-4 block - store result
rev64 v8.16b, v9.16b //GHASH final-4 block
ldr q9, [x0], #16 //AES final-3 block - load plaintext
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-4 block - mid
pmull2 v28.1q, v8.2d, v20.2d //GHASH final-4 block - high
.long 0xce047529 //eor3 v9.16b, v9.16b, v4.16b, v29.16b //AES final-3 block - result
pmull v26.1q, v8.1d, v20.1d //GHASH final-4 block - low
eor v27.8b, v27.8b, v8.8b //GHASH final-4 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-4 block - low
pmull v27.1q, v27.1d, v21.1d //GHASH final-4 block - mid
movi v16.8b, #0 //supress further partial tag feed in
eor v18.16b, v18.16b, v27.16b //GHASH final-4 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-4 block - high
L256_enc_blocks_more_than_3: //blocks left > 3
st1 { v9.16b}, [x2], #16 //AES final-3 block - store result
ldr q25, [x6, #80] //load h4l | h4h
rev64 v8.16b, v9.16b //GHASH final-3 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-3 block - mid
pmull2 v28.1q, v8.2d, v25.2d //GHASH final-3 block - high
eor v17.16b, v17.16b, v28.16b //GHASH final-3 block - high
eor v27.8b, v27.8b, v8.8b //GHASH final-3 block - mid
ldr q24, [x6, #64] //load h4k | h3k
ins v27.d[1], v27.d[0] //GHASH final-3 block - mid
ldr q9, [x0], #16 //AES final-2 block - load plaintext
pmull2 v27.1q, v27.2d, v24.2d //GHASH final-3 block - mid
pmull v26.1q, v8.1d, v25.1d //GHASH final-3 block - low
.long 0xce057529 //eor3 v9.16b, v9.16b, v5.16b, v29.16b //AES final-2 block - result
movi v16.8b, #0 //supress further partial tag feed in
eor v18.16b, v18.16b, v27.16b //GHASH final-3 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-3 block - low
L256_enc_blocks_more_than_2: //blocks left > 2
ldr q23, [x6, #48] //load h3l | h3h
st1 { v9.16b}, [x2], #16 //AES final-2 block - store result
rev64 v8.16b, v9.16b //GHASH final-2 block
ldr q9, [x0], #16 //AES final-1 block - load plaintext
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-2 block - mid
movi v16.8b, #0 //supress further partial tag feed in
pmull2 v28.1q, v8.2d, v23.2d //GHASH final-2 block - high
.long 0xce067529 //eor3 v9.16b, v9.16b, v6.16b, v29.16b //AES final-1 block - result
eor v27.8b, v27.8b, v8.8b //GHASH final-2 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-2 block - high
pmull v27.1q, v27.1d, v24.1d //GHASH final-2 block - mid
pmull v26.1q, v8.1d, v23.1d //GHASH final-2 block - low
eor v18.16b, v18.16b, v27.16b //GHASH final-2 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-2 block - low
L256_enc_blocks_more_than_1: //blocks left > 1
st1 { v9.16b}, [x2], #16 //AES final-1 block - store result
ldr q22, [x6, #32] //load h2l | h2h
rev64 v8.16b, v9.16b //GHASH final-1 block
ldr q9, [x0], #16 //AES final block - load plaintext
eor v8.16b, v8.16b, v16.16b //feed in partial tag
movi v16.8b, #0 //supress further partial tag feed in
ins v27.d[0], v8.d[1] //GHASH final-1 block - mid
pmull2 v28.1q, v8.2d, v22.2d //GHASH final-1 block - high
.long 0xce077529 //eor3 v9.16b, v9.16b, v7.16b, v29.16b //AES final block - result
eor v17.16b, v17.16b, v28.16b //GHASH final-1 block - high
pmull v26.1q, v8.1d, v22.1d //GHASH final-1 block - low
eor v27.8b, v27.8b, v8.8b //GHASH final-1 block - mid
ldr q21, [x6, #16] //load h2k | h1k
eor v19.16b, v19.16b, v26.16b //GHASH final-1 block - low
ins v27.d[1], v27.d[0] //GHASH final-1 block - mid
pmull2 v27.1q, v27.2d, v21.2d //GHASH final-1 block - mid
eor v18.16b, v18.16b, v27.16b //GHASH final-1 block - mid
L256_enc_blocks_less_than_1: //blocks left <= 1
and x1, x1, #127 //bit_length %= 128
sub x1, x1, #128 //bit_length -= 128
neg x1, x1 //bit_length = 128 - #bits in input (in range [1,128])
mvn x7, xzr //temp0_x = 0xffffffffffffffff
and x1, x1, #127 //bit_length %= 128
lsr x7, x7, x1 //temp0_x is mask for top 64b of last block
cmp x1, #64
mvn x8, xzr //temp1_x = 0xffffffffffffffff
csel x14, x7, xzr, lt
csel x13, x8, x7, lt
mov v0.d[0], x13 //ctr0b is mask for last block
ldr q20, [x6] //load h1l | h1h
ld1 { v26.16b}, [x2] //load existing bytes where the possibly partial last block is to be stored
mov v0.d[1], x14
and v9.16b, v9.16b, v0.16b //possibly partial last block has zeroes in highest bits
rev64 v8.16b, v9.16b //GHASH final block
rev32 v30.16b, v30.16b
bif v9.16b, v26.16b, v0.16b //insert existing bytes in top end of result before storing
str q30, [x16] //store the updated counter
eor v8.16b, v8.16b, v16.16b //feed in partial tag
st1 { v9.16b}, [x2] //store all 16B
ins v16.d[0], v8.d[1] //GHASH final block - mid
pmull2 v28.1q, v8.2d, v20.2d //GHASH final block - high
pmull v26.1q, v8.1d, v20.1d //GHASH final block - low
eor v17.16b, v17.16b, v28.16b //GHASH final block - high
eor v19.16b, v19.16b, v26.16b //GHASH final block - low
eor v16.8b, v16.8b, v8.8b //GHASH final block - mid
pmull v16.1q, v16.1d, v21.1d //GHASH final block - mid
eor v18.16b, v18.16b, v16.16b //GHASH final block - mid
ldr d16, [x10] //MODULO - load modulo constant
ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
.long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low
ext v19.16b, v19.16b, v19.16b, #8
rev64 v19.16b, v19.16b
st1 { v19.16b }, [x3]
mov x0, x9 //return sizes
ldp d10, d11, [sp, #16]
ldp d12, d13, [sp, #32]
ldp d14, d15, [sp, #48]
ldp d8, d9, [sp], #80
ret
L256_enc_ret:
mov w0, #0x0
ret
.globl aesv8_gcm_8x_dec_256
.def aesv8_gcm_8x_dec_256
.type 32
.endef
.align 4
aesv8_gcm_8x_dec_256:
AARCH64_VALID_CALL_TARGET
cbz x1, L256_dec_ret
stp d8, d9, [sp, #-80]!
lsr x9, x1, #3
mov x16, x4
mov x11, x5
stp d10, d11, [sp, #16]
stp d12, d13, [sp, #32]
stp d14, d15, [sp, #48]
mov x5, #0xc200000000000000
stp x5, xzr, [sp, #64]
add x10, sp, #64
ld1 { v0.16b}, [x16] //CTR block 0
mov x15, #0x100000000 //set up counter increment
movi v31.16b, #0x0
mov v31.d[1], x15
mov x5, x9
sub x5, x5, #1 //byte_len - 1
rev32 v30.16b, v0.16b //set up reversed counter
add v30.4s, v30.4s, v31.4s //CTR block 0
rev32 v1.16b, v30.16b //CTR block 1
add v30.4s, v30.4s, v31.4s //CTR block 1
rev32 v2.16b, v30.16b //CTR block 2
add v30.4s, v30.4s, v31.4s //CTR block 2
ldp q26, q27, [x11, #0] //load rk0, rk1
rev32 v3.16b, v30.16b //CTR block 3
add v30.4s, v30.4s, v31.4s //CTR block 3
rev32 v4.16b, v30.16b //CTR block 4
add v30.4s, v30.4s, v31.4s //CTR block 4
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 0
rev32 v5.16b, v30.16b //CTR block 5
add v30.4s, v30.4s, v31.4s //CTR block 5
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 0
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 0
rev32 v6.16b, v30.16b //CTR block 6
add v30.4s, v30.4s, v31.4s //CTR block 6
rev32 v7.16b, v30.16b //CTR block 7
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 0
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 0
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 0
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 0
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 0
ldp q28, q26, [x11, #32] //load rk2, rk3
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 1
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 1
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 1
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 1
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 1
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 1
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 1
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 2
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 2
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 2
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 2
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 2
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 2
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 2
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 2
ldp q27, q28, [x11, #64] //load rk4, rk5
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 3
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 3
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 3
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 3
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 3
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 3
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 3
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 3
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 4
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 4
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 4
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 4
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 4
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 4
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 4
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 5
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 5
ldp q26, q27, [x11, #96] //load rk6, rk7
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 5
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 5
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 5
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 5
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 5
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 5
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 6
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 6
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 6
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 6
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 6
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 6
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 6
ldp q28, q26, [x11, #128] //load rk8, rk9
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 7
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 7
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 7
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 7
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 7
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 7
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 7
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 7
and x5, x5, #0xffffffffffffff80 //number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 8
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 8
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 8
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 8
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 8
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 8
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 8
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 8
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 9
ld1 { v19.16b}, [x3]
ext v19.16b, v19.16b, v19.16b, #8
rev64 v19.16b, v19.16b
ldp q27, q28, [x11, #160] //load rk10, rk11
add x4, x0, x1, lsr #3 //end_input_ptr
add x5, x5, x0
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 9
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 9
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 9
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 9
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 9
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 9
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 9
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 4 - round 10
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 7 - round 10
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 5 - round 10
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 1 - round 10
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 2 - round 10
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 0 - round 10
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 6 - round 10
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 3 - round 10
ldp q26, q27, [x11, #192] //load rk12, rk13
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 0 - round 11
add v30.4s, v30.4s, v31.4s //CTR block 7
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 7 - round 11
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 3 - round 11
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 1 - round 11
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 5 - round 11
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 4 - round 11
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 2 - round 11
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 6 - round 11
ldr q28, [x11, #224] //load rk14
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 1 - round 12
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 4 - round 12
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 5 - round 12
cmp x0, x5 //check if we have <= 8 blocks
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 3 - round 12
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 2 - round 12
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 6 - round 12
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 0 - round 12
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 7 - round 12
aese v5.16b, v27.16b //AES block 5 - round 13
aese v1.16b, v27.16b //AES block 1 - round 13
aese v2.16b, v27.16b //AES block 2 - round 13
aese v0.16b, v27.16b //AES block 0 - round 13
aese v4.16b, v27.16b //AES block 4 - round 13
aese v6.16b, v27.16b //AES block 6 - round 13
aese v3.16b, v27.16b //AES block 3 - round 13
aese v7.16b, v27.16b //AES block 7 - round 13
b.ge L256_dec_tail //handle tail
ldp q8, q9, [x0], #32 //AES block 0, 1 - load ciphertext
ldp q10, q11, [x0], #32 //AES block 2, 3 - load ciphertext
ldp q12, q13, [x0], #32 //AES block 4, 5 - load ciphertext
ldp q14, q15, [x0], #32 //AES block 6, 7 - load ciphertext
cmp x0, x5 //check if we have <= 8 blocks
.long 0xce017121 //eor3 v1.16b, v9.16b, v1.16b, v28.16b //AES block 1 - result
.long 0xce007100 //eor3 v0.16b, v8.16b, v0.16b, v28.16b //AES block 0 - result
stp q0, q1, [x2], #32 //AES block 0, 1 - store result
rev32 v0.16b, v30.16b //CTR block 8
add v30.4s, v30.4s, v31.4s //CTR block 8
.long 0xce037163 //eor3 v3.16b, v11.16b, v3.16b, v28.16b //AES block 3 - result
.long 0xce0571a5 //eor3 v5.16b, v13.16b, v5.16b, v28.16b //AES block 5 - result
.long 0xce047184 //eor3 v4.16b, v12.16b, v4.16b, v28.16b //AES block 4 - result
rev32 v1.16b, v30.16b //CTR block 9
add v30.4s, v30.4s, v31.4s //CTR block 9
.long 0xce027142 //eor3 v2.16b, v10.16b, v2.16b, v28.16b //AES block 2 - result
stp q2, q3, [x2], #32 //AES block 2, 3 - store result
rev32 v2.16b, v30.16b //CTR block 10
add v30.4s, v30.4s, v31.4s //CTR block 10
.long 0xce0671c6 //eor3 v6.16b, v14.16b, v6.16b, v28.16b //AES block 6 - result
rev32 v3.16b, v30.16b //CTR block 11
add v30.4s, v30.4s, v31.4s //CTR block 11
stp q4, q5, [x2], #32 //AES block 4, 5 - store result
.long 0xce0771e7 //eor3 v7.16b, v15.16b, v7.16b, v28.16b //AES block 7 - result
stp q6, q7, [x2], #32 //AES block 6, 7 - store result
rev32 v4.16b, v30.16b //CTR block 12
add v30.4s, v30.4s, v31.4s //CTR block 12
b.ge L256_dec_prepretail //do prepretail
L256_dec_main_loop: //main loop start
rev32 v5.16b, v30.16b //CTR block 8k+13
ldp q26, q27, [x11, #0] //load rk0, rk1
add v30.4s, v30.4s, v31.4s //CTR block 8k+13
rev64 v9.16b, v9.16b //GHASH block 8k+1
ldr q23, [x6, #144] //load h7l | h7h
ldr q25, [x6, #176] //load h8l | h8h
rev32 v6.16b, v30.16b //CTR block 8k+14
add v30.4s, v30.4s, v31.4s //CTR block 8k+14
rev64 v8.16b, v8.16b //GHASH block 8k
ext v19.16b, v19.16b, v19.16b, #8 //PRE 0
rev64 v12.16b, v12.16b //GHASH block 8k+4
rev64 v11.16b, v11.16b //GHASH block 8k+3
rev32 v7.16b, v30.16b //CTR block 8k+15
rev64 v15.16b, v15.16b //GHASH block 8k+7
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 0
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 0
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 0
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 0
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 0
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 0
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 0
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 0
ldp q28, q26, [x11, #32] //load rk2, rk3
eor v8.16b, v8.16b, v19.16b //PRE 1
ldr q20, [x6, #96] //load h5l | h5h
ldr q22, [x6, #128] //load h6l | h6h
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 1
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 1
rev64 v10.16b, v10.16b //GHASH block 8k+2
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 1
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 1
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 1
trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 1
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 1
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 2
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 2
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 2
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 2
pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 2
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 2
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 2
ldp q27, q28, [x11, #64] //load rk4, rk5
pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 3
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 3
pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high
pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 3
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 3
pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 3
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 3
trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 3
eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 4
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 3
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 4
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 4
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 4
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 4
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 4
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 4
ldr q21, [x6, #112] //load h6k | h5k
ldr q24, [x6, #160] //load h8k | h7k
eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid
pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low
ldp q26, q27, [x11, #96] //load rk6, rk7
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 5
eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 5
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 5
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 5
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 5
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 5
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 5
.long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high
trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
rev64 v13.16b, v13.16b //GHASH block 8k+5
pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid
pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid
trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 6
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 6
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 5
trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 6
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 6
eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 6
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 6
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 6
pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid
pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low
ldr q23, [x6, #48] //load h3l | h3h
ldr q25, [x6, #80] //load h4l | h4h
rev64 v14.16b, v14.16b //GHASH block 8k+6
eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 7
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 7
ldp q28, q26, [x11, #128] //load rk8, rk9
ldr q20, [x6] //load h1l | h1h
ldr q22, [x6, #32] //load h2l | h2h
.long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 7
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 7
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 7
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 7
ldr q21, [x6, #16] //load h2k | h1k
ldr q24, [x6, #64] //load h4k | h3k
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 7
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 7
pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high
pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low
trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 8
pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 8
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 8
pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 8
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 8
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 8
pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high
trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 8
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 8
ldp q27, q28, [x11, #160] //load rk10, rk11
pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low
trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
add v30.4s, v30.4s, v31.4s //CTR block 8k+15
.long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 9
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 9
eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 9
ldp q8, q9, [x0], #32 //AES block 8k+8, 8k+9 - load ciphertext
eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 9
pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 9
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 9
pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid
pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid
pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high
pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 10
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 10
pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 9
.long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 9
.long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
.long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 10
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 10
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 10
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 10
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 10
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 10
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low
rev32 v20.16b, v30.16b //CTR block 8k+16
ldr d16, [x10] //MODULO - load modulo constant
add v30.4s, v30.4s, v31.4s //CTR block 8k+16
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 11
ldp q26, q27, [x11, #192] //load rk12, rk13
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 11
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 11
.long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
rev32 v22.16b, v30.16b //CTR block 8k+17
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 11
ldp q10, q11, [x0], #32 //AES block 8k+10, 8k+11 - load ciphertext
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 11
ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 11
add v30.4s, v30.4s, v31.4s //CTR block 8k+17
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 11
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 12
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 12
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 12
rev32 v23.16b, v30.16b //CTR block 8k+18
add v30.4s, v30.4s, v31.4s //CTR block 8k+18
pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 12
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 11
ldr q28, [x11, #224] //load rk14
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 12
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 12
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 12
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 12
ldp q12, q13, [x0], #32 //AES block 8k+12, 8k+13 - load ciphertext
aese v1.16b, v27.16b //AES block 8k+9 - round 13
aese v2.16b, v27.16b //AES block 8k+10 - round 13
ldp q14, q15, [x0], #32 //AES block 8k+14, 8k+15 - load ciphertext
aese v0.16b, v27.16b //AES block 8k+8 - round 13
aese v5.16b, v27.16b //AES block 8k+13 - round 13
rev32 v25.16b, v30.16b //CTR block 8k+19
.long 0xce027142 //eor3 v2.16b, v10.16b, v2.16b, v28.16b //AES block 8k+10 - result
.long 0xce017121 //eor3 v1.16b, v9.16b, v1.16b, v28.16b //AES block 8k+9 - result
ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
aese v7.16b, v27.16b //AES block 8k+15 - round 13
add v30.4s, v30.4s, v31.4s //CTR block 8k+19
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
aese v4.16b, v27.16b //AES block 8k+12 - round 13
.long 0xce0571a5 //eor3 v5.16b, v13.16b, v5.16b, v28.16b //AES block 8k+13 - result
.long 0xce007100 //eor3 v0.16b, v8.16b, v0.16b, v28.16b //AES block 8k+8 - result
aese v3.16b, v27.16b //AES block 8k+11 - round 13
stp q0, q1, [x2], #32 //AES block 8k+8, 8k+9 - store result
mov v0.16b, v20.16b //CTR block 8k+16
.long 0xce047184 //eor3 v4.16b, v12.16b, v4.16b, v28.16b //AES block 8k+12 - result
.long 0xce154673 //eor3 v19.16b, v19.16b, v21.16b, v17.16b //MODULO - fold into low
.long 0xce037163 //eor3 v3.16b, v11.16b, v3.16b, v28.16b //AES block 8k+11 - result
stp q2, q3, [x2], #32 //AES block 8k+10, 8k+11 - store result
mov v3.16b, v25.16b //CTR block 8k+19
mov v2.16b, v23.16b //CTR block 8k+18
aese v6.16b, v27.16b //AES block 8k+14 - round 13
mov v1.16b, v22.16b //CTR block 8k+17
stp q4, q5, [x2], #32 //AES block 8k+12, 8k+13 - store result
.long 0xce0771e7 //eor3 v7.16b, v15.16b, v7.16b, v28.16b //AES block 8k+15 - result
.long 0xce0671c6 //eor3 v6.16b, v14.16b, v6.16b, v28.16b //AES block 8k+14 - result
rev32 v4.16b, v30.16b //CTR block 8k+20
add v30.4s, v30.4s, v31.4s //CTR block 8k+20
cmp x0, x5 //LOOP CONTROL
stp q6, q7, [x2], #32 //AES block 8k+14, 8k+15 - store result
b.lt L256_dec_main_loop
L256_dec_prepretail: //PREPRETAIL
ldp q26, q27, [x11, #0] //load rk0, rk1
rev32 v5.16b, v30.16b //CTR block 8k+13
add v30.4s, v30.4s, v31.4s //CTR block 8k+13
rev64 v12.16b, v12.16b //GHASH block 8k+4
ldr q21, [x6, #112] //load h6k | h5k
ldr q24, [x6, #160] //load h8k | h7k
rev32 v6.16b, v30.16b //CTR block 8k+14
rev64 v8.16b, v8.16b //GHASH block 8k
add v30.4s, v30.4s, v31.4s //CTR block 8k+14
ext v19.16b, v19.16b, v19.16b, #8 //PRE 0
ldr q23, [x6, #144] //load h7l | h7h
ldr q25, [x6, #176] //load h8l | h8h
rev64 v9.16b, v9.16b //GHASH block 8k+1
rev32 v7.16b, v30.16b //CTR block 8k+15
rev64 v10.16b, v10.16b //GHASH block 8k+2
ldr q20, [x6, #96] //load h5l | h5h
ldr q22, [x6, #128] //load h6l | h6h
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 0
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 0
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 0
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 0
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 0
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 0
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 1
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 0
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 0
ldp q28, q26, [x11, #32] //load rk2, rk3
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 1
eor v8.16b, v8.16b, v19.16b //PRE 1
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 1
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 1
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 1
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 1
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 1
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 1
pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high
trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low
rev64 v11.16b, v11.16b //GHASH block 8k+3
pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 2
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 2
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 2
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 2
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 2
pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 2
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 3
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 3
rev64 v14.16b, v14.16b //GHASH block 8k+6
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 3
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 2
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 3
pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high
trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 2
ldp q27, q28, [x11, #64] //load rk4, rk5
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 3
pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 3
eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high
eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 3
pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 3
.long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high
trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid
pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid
pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low
eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low
pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 4
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 4
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low
ldr q20, [x6] //load h1l | h1h
ldr q22, [x6, #32] //load h2l | h2h
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 4
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 4
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 4
eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid
eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 5
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 4
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 5
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 4
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 4
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 5
pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 5
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 5
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 5
pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 5
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 5
ldp q26, q27, [x11, #96] //load rk6, rk7
ldr q23, [x6, #48] //load h3l | h3h
ldr q25, [x6, #80] //load h4l | h4h
rev64 v15.16b, v15.16b //GHASH block 8k+7
rev64 v13.16b, v13.16b //GHASH block 8k+5
.long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid
trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 6
ldr q21, [x6, #16] //load h2k | h1k
ldr q24, [x6, #64] //load h4k | h3k
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 6
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 6
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 6
pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high
pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high
pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low
trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid
pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low
trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 7
pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 6
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 6
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 6
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 6
ldp q28, q26, [x11, #128] //load rk8, rk9
pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 7
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 7
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 7
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 7
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 7
.long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 7
trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 7
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 8
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 8
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 8
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 8
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 8
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 8
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 8
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 9
eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 9
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 9
eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 9
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 9
pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 8
pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid
pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high
pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid
pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid
pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low
ldp q27, q28, [x11, #160] //load rk10, rk11
.long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low
.long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 9
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 9
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 9
.long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high
.long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low
ldr d16, [x10] //MODULO - load modulo constant
.long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid
aese v4.16b, v27.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 10
aese v6.16b, v27.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 10
aese v5.16b, v27.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 10
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 10
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 10
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 10
.long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
aese v7.16b, v27.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 10
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 10
ldp q26, q27, [x11, #192] //load rk12, rk13
ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 11
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 11
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 11
pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 11
aese v7.16b, v28.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 11
aese v6.16b, v28.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 11
aese v4.16b, v28.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 11
aese v5.16b, v28.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 11
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b //AES block 8k+11 - round 12
.long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid
aese v3.16b, v27.16b //AES block 8k+11 - round 13
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b //AES block 8k+10 - round 12
aese v6.16b, v26.16b
aesmc v6.16b, v6.16b //AES block 8k+14 - round 12
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
aese v4.16b, v26.16b
aesmc v4.16b, v4.16b //AES block 8k+12 - round 12
aese v7.16b, v26.16b
aesmc v7.16b, v7.16b //AES block 8k+15 - round 12
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b //AES block 8k+8 - round 12
ldr q28, [x11, #224] //load rk14
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b //AES block 8k+9 - round 12
aese v4.16b, v27.16b //AES block 8k+12 - round 13
ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
aese v5.16b, v26.16b
aesmc v5.16b, v5.16b //AES block 8k+13 - round 12
aese v6.16b, v27.16b //AES block 8k+14 - round 13
aese v2.16b, v27.16b //AES block 8k+10 - round 13
aese v1.16b, v27.16b //AES block 8k+9 - round 13
aese v5.16b, v27.16b //AES block 8k+13 - round 13
.long 0xce154673 //eor3 v19.16b, v19.16b, v21.16b, v17.16b //MODULO - fold into low
add v30.4s, v30.4s, v31.4s //CTR block 8k+15
aese v7.16b, v27.16b //AES block 8k+15 - round 13
aese v0.16b, v27.16b //AES block 8k+8 - round 13
L256_dec_tail: //TAIL
ext v16.16b, v19.16b, v19.16b, #8 //prepare final partial tag
sub x5, x4, x0 //main_end_input_ptr is number of bytes left to process
cmp x5, #112
ldr q9, [x0], #16 //AES block 8k+8 - load ciphertext
ldp q24, q25, [x6, #160] //load h8k | h7k
mov v29.16b, v28.16b
ldp q20, q21, [x6, #96] //load h5l | h5h
.long 0xce00752c //eor3 v12.16b, v9.16b, v0.16b, v29.16b //AES block 8k+8 - result
ldp q22, q23, [x6, #128] //load h6l | h6h
b.gt L256_dec_blocks_more_than_7
mov v7.16b, v6.16b
sub v30.4s, v30.4s, v31.4s
mov v6.16b, v5.16b
mov v5.16b, v4.16b
mov v4.16b, v3.16b
movi v19.8b, #0
movi v17.8b, #0
movi v18.8b, #0
mov v3.16b, v2.16b
cmp x5, #96
mov v2.16b, v1.16b
b.gt L256_dec_blocks_more_than_6
mov v7.16b, v6.16b
mov v6.16b, v5.16b
mov v5.16b, v4.16b
cmp x5, #80
sub v30.4s, v30.4s, v31.4s
mov v4.16b, v3.16b
mov v3.16b, v1.16b
b.gt L256_dec_blocks_more_than_5
cmp x5, #64
mov v7.16b, v6.16b
sub v30.4s, v30.4s, v31.4s
mov v6.16b, v5.16b
mov v5.16b, v4.16b
mov v4.16b, v1.16b
b.gt L256_dec_blocks_more_than_4
sub v30.4s, v30.4s, v31.4s
mov v7.16b, v6.16b
cmp x5, #48
mov v6.16b, v5.16b
mov v5.16b, v1.16b
b.gt L256_dec_blocks_more_than_3
ldr q24, [x6, #64] //load h4k | h3k
sub v30.4s, v30.4s, v31.4s
mov v7.16b, v6.16b
cmp x5, #32
mov v6.16b, v1.16b
b.gt L256_dec_blocks_more_than_2
sub v30.4s, v30.4s, v31.4s
mov v7.16b, v1.16b
cmp x5, #16
b.gt L256_dec_blocks_more_than_1
sub v30.4s, v30.4s, v31.4s
ldr q21, [x6, #16] //load h2k | h1k
b L256_dec_blocks_less_than_1
L256_dec_blocks_more_than_7: //blocks left > 7
rev64 v8.16b, v9.16b //GHASH final-7 block
ldr q9, [x0], #16 //AES final-6 block - load ciphertext
st1 { v12.16b}, [x2], #16 //AES final-7 block - store result
ins v18.d[0], v24.d[1] //GHASH final-7 block - mid
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-7 block - mid
.long 0xce01752c //eor3 v12.16b, v9.16b, v1.16b, v29.16b //AES final-6 block - result
pmull2 v17.1q, v8.2d, v25.2d //GHASH final-7 block - high
eor v27.8b, v27.8b, v8.8b //GHASH final-7 block - mid
movi v16.8b, #0 //supress further partial tag feed in
pmull v19.1q, v8.1d, v25.1d //GHASH final-7 block - low
pmull v18.1q, v27.1d, v18.1d //GHASH final-7 block - mid
L256_dec_blocks_more_than_6: //blocks left > 6
rev64 v8.16b, v9.16b //GHASH final-6 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ldr q9, [x0], #16 //AES final-5 block - load ciphertext
movi v16.8b, #0 //supress further partial tag feed in
ins v27.d[0], v8.d[1] //GHASH final-6 block - mid
st1 { v12.16b}, [x2], #16 //AES final-6 block - store result
pmull2 v28.1q, v8.2d, v23.2d //GHASH final-6 block - high
pmull v26.1q, v8.1d, v23.1d //GHASH final-6 block - low
.long 0xce02752c //eor3 v12.16b, v9.16b, v2.16b, v29.16b //AES final-5 block - result
eor v19.16b, v19.16b, v26.16b //GHASH final-6 block - low
eor v27.8b, v27.8b, v8.8b //GHASH final-6 block - mid
pmull v27.1q, v27.1d, v24.1d //GHASH final-6 block - mid
eor v18.16b, v18.16b, v27.16b //GHASH final-6 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-6 block - high
L256_dec_blocks_more_than_5: //blocks left > 5
rev64 v8.16b, v9.16b //GHASH final-5 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
pmull2 v28.1q, v8.2d, v22.2d //GHASH final-5 block - high
ins v27.d[0], v8.d[1] //GHASH final-5 block - mid
ldr q9, [x0], #16 //AES final-4 block - load ciphertext
eor v27.8b, v27.8b, v8.8b //GHASH final-5 block - mid
st1 { v12.16b}, [x2], #16 //AES final-5 block - store result
pmull v26.1q, v8.1d, v22.1d //GHASH final-5 block - low
ins v27.d[1], v27.d[0] //GHASH final-5 block - mid
pmull2 v27.1q, v27.2d, v21.2d //GHASH final-5 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-5 block - high
.long 0xce03752c //eor3 v12.16b, v9.16b, v3.16b, v29.16b //AES final-4 block - result
eor v19.16b, v19.16b, v26.16b //GHASH final-5 block - low
eor v18.16b, v18.16b, v27.16b //GHASH final-5 block - mid
movi v16.8b, #0 //supress further partial tag feed in
L256_dec_blocks_more_than_4: //blocks left > 4
rev64 v8.16b, v9.16b //GHASH final-4 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-4 block - mid
ldr q9, [x0], #16 //AES final-3 block - load ciphertext
movi v16.8b, #0 //supress further partial tag feed in
pmull v26.1q, v8.1d, v20.1d //GHASH final-4 block - low
pmull2 v28.1q, v8.2d, v20.2d //GHASH final-4 block - high
eor v27.8b, v27.8b, v8.8b //GHASH final-4 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-4 block - high
pmull v27.1q, v27.1d, v21.1d //GHASH final-4 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-4 block - low
st1 { v12.16b}, [x2], #16 //AES final-4 block - store result
eor v18.16b, v18.16b, v27.16b //GHASH final-4 block - mid
.long 0xce04752c //eor3 v12.16b, v9.16b, v4.16b, v29.16b //AES final-3 block - result
L256_dec_blocks_more_than_3: //blocks left > 3
ldr q25, [x6, #80] //load h4l | h4h
rev64 v8.16b, v9.16b //GHASH final-3 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ldr q9, [x0], #16 //AES final-2 block - load ciphertext
ldr q24, [x6, #64] //load h4k | h3k
ins v27.d[0], v8.d[1] //GHASH final-3 block - mid
st1 { v12.16b}, [x2], #16 //AES final-3 block - store result
.long 0xce05752c //eor3 v12.16b, v9.16b, v5.16b, v29.16b //AES final-2 block - result
eor v27.8b, v27.8b, v8.8b //GHASH final-3 block - mid
ins v27.d[1], v27.d[0] //GHASH final-3 block - mid
pmull v26.1q, v8.1d, v25.1d //GHASH final-3 block - low
pmull2 v28.1q, v8.2d, v25.2d //GHASH final-3 block - high
movi v16.8b, #0 //supress further partial tag feed in
pmull2 v27.1q, v27.2d, v24.2d //GHASH final-3 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-3 block - low
eor v17.16b, v17.16b, v28.16b //GHASH final-3 block - high
eor v18.16b, v18.16b, v27.16b //GHASH final-3 block - mid
L256_dec_blocks_more_than_2: //blocks left > 2
rev64 v8.16b, v9.16b //GHASH final-2 block
ldr q23, [x6, #48] //load h3l | h3h
ldr q9, [x0], #16 //AES final-1 block - load ciphertext
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-2 block - mid
pmull v26.1q, v8.1d, v23.1d //GHASH final-2 block - low
st1 { v12.16b}, [x2], #16 //AES final-2 block - store result
.long 0xce06752c //eor3 v12.16b, v9.16b, v6.16b, v29.16b //AES final-1 block - result
eor v27.8b, v27.8b, v8.8b //GHASH final-2 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-2 block - low
movi v16.8b, #0 //supress further partial tag feed in
pmull v27.1q, v27.1d, v24.1d //GHASH final-2 block - mid
pmull2 v28.1q, v8.2d, v23.2d //GHASH final-2 block - high
eor v18.16b, v18.16b, v27.16b //GHASH final-2 block - mid
eor v17.16b, v17.16b, v28.16b //GHASH final-2 block - high
L256_dec_blocks_more_than_1: //blocks left > 1
rev64 v8.16b, v9.16b //GHASH final-1 block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v27.d[0], v8.d[1] //GHASH final-1 block - mid
ldr q22, [x6, #32] //load h2l | h2h
eor v27.8b, v27.8b, v8.8b //GHASH final-1 block - mid
ldr q9, [x0], #16 //AES final block - load ciphertext
st1 { v12.16b}, [x2], #16 //AES final-1 block - store result
ldr q21, [x6, #16] //load h2k | h1k
pmull v26.1q, v8.1d, v22.1d //GHASH final-1 block - low
ins v27.d[1], v27.d[0] //GHASH final-1 block - mid
eor v19.16b, v19.16b, v26.16b //GHASH final-1 block - low
.long 0xce07752c //eor3 v12.16b, v9.16b, v7.16b, v29.16b //AES final block - result
pmull2 v28.1q, v8.2d, v22.2d //GHASH final-1 block - high
pmull2 v27.1q, v27.2d, v21.2d //GHASH final-1 block - mid
movi v16.8b, #0 //supress further partial tag feed in
eor v17.16b, v17.16b, v28.16b //GHASH final-1 block - high
eor v18.16b, v18.16b, v27.16b //GHASH final-1 block - mid
L256_dec_blocks_less_than_1: //blocks left <= 1
ld1 { v26.16b}, [x2] //load existing bytes where the possibly partial last block is to be stored
mvn x7, xzr //temp0_x = 0xffffffffffffffff
and x1, x1, #127 //bit_length %= 128
sub x1, x1, #128 //bit_length -= 128
rev32 v30.16b, v30.16b
str q30, [x16] //store the updated counter
neg x1, x1 //bit_length = 128 - #bits in input (in range [1,128])
and x1, x1, #127 //bit_length %= 128
lsr x7, x7, x1 //temp0_x is mask for top 64b of last block
cmp x1, #64
mvn x8, xzr //temp1_x = 0xffffffffffffffff
csel x14, x7, xzr, lt
csel x13, x8, x7, lt
mov v0.d[0], x13 //ctr0b is mask for last block
mov v0.d[1], x14
and v9.16b, v9.16b, v0.16b //possibly partial last block has zeroes in highest bits
ldr q20, [x6] //load h1l | h1h
bif v12.16b, v26.16b, v0.16b //insert existing bytes in top end of result before storing
rev64 v8.16b, v9.16b //GHASH final block
eor v8.16b, v8.16b, v16.16b //feed in partial tag
ins v16.d[0], v8.d[1] //GHASH final block - mid
pmull2 v28.1q, v8.2d, v20.2d //GHASH final block - high
eor v16.8b, v16.8b, v8.8b //GHASH final block - mid
pmull v26.1q, v8.1d, v20.1d //GHASH final block - low
eor v17.16b, v17.16b, v28.16b //GHASH final block - high
pmull v16.1q, v16.1d, v21.1d //GHASH final block - mid
eor v18.16b, v18.16b, v16.16b //GHASH final block - mid
ldr d16, [x10] //MODULO - load modulo constant
eor v19.16b, v19.16b, v26.16b //GHASH final block - low
pmull v21.1q, v17.1d, v16.1d //MODULO - top 64b align with mid
eor v14.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up
ext v17.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment
st1 { v12.16b}, [x2] //store all 16B
eor v18.16b, v18.16b, v14.16b //MODULO - karatsuba tidy up
eor v21.16b, v17.16b, v21.16b //MODULO - fold into mid
eor v18.16b, v18.16b, v21.16b //MODULO - fold into mid
pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low
ext v18.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment
eor v19.16b, v19.16b, v17.16b //MODULO - fold into low
eor v19.16b, v19.16b, v18.16b //MODULO - fold into low
ext v19.16b, v19.16b, v19.16b, #8
rev64 v19.16b, v19.16b
st1 { v19.16b }, [x3]
mov x0, x9
ldp d10, d11, [sp, #16]
ldp d12, d13, [sp, #32]
ldp d14, d15, [sp, #48]
ldp d8, d9, [sp], #80
ret
L256_dec_ret:
mov w0, #0x0
ret
.byte 65,69,83,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,65,82,77,118,56,44,32,83,80,68,88,32,66,83,68,45,51,45,67,108,97,117,115,101,32,98,121,32,60,120,105,97,111,107,97,110,103,46,113,105,97,110,64,97,114,109,46,99,111,109,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 7,433
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/p256_beeu-armv8-asm.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include "openssl/arm_arch.h"
.text
.globl beeu_mod_inverse_vartime
.align 4
beeu_mod_inverse_vartime:
// Reserve enough space for 14 8-byte registers on the stack
// in the first stp call for x29, x30.
// Then store the remaining callee-saved registers.
//
// | x29 | x30 | x19 | x20 | ... | x27 | x28 | x0 | x2 |
// ^ ^
// sp <------------------- 112 bytes ----------------> old sp
// x29 (FP)
//
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-112]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
stp x0,x2,[sp,#96]
// B = b3..b0 := a
ldp x25,x26,[x1]
ldp x27,x28,[x1,#16]
// n3..n0 := n
// Note: the value of input params are changed in the following.
ldp x0,x1,[x2]
ldp x2,x30,[x2,#16]
// A = a3..a0 := n
mov x21, x0
mov x22, x1
mov x23, x2
mov x24, x30
// X = x4..x0 := 1
mov x3, #1
eor x4, x4, x4
eor x5, x5, x5
eor x6, x6, x6
eor x7, x7, x7
// Y = y4..y0 := 0
eor x8, x8, x8
eor x9, x9, x9
eor x10, x10, x10
eor x11, x11, x11
eor x12, x12, x12
Lbeeu_loop:
// if B == 0, jump to .Lbeeu_loop_end
orr x14, x25, x26
orr x14, x14, x27
// reverse the bit order of x25. This is needed for clz after this macro
rbit x15, x25
orr x14, x14, x28
cbz x14,Lbeeu_loop_end
// 0 < B < |n|,
// 0 < A <= |n|,
// (1) X*a == B (mod |n|),
// (2) (-1)*Y*a == A (mod |n|)
// Now divide B by the maximum possible power of two in the
// integers, and divide X by the same value mod |n|.
// When we're done, (1) still holds.
// shift := number of trailing 0s in x25
// ( = number of leading 0s in x15; see the "rbit" instruction in TEST_B_ZERO)
clz x13, x15
// If there is no shift, goto shift_A_Y
cbz x13, Lbeeu_shift_A_Y
// Shift B right by "x13" bits
neg x14, x13
lsr x25, x25, x13
lsl x15, x26, x14
lsr x26, x26, x13
lsl x19, x27, x14
orr x25, x25, x15
lsr x27, x27, x13
lsl x20, x28, x14
orr x26, x26, x19
lsr x28, x28, x13
orr x27, x27, x20
// Shift X right by "x13" bits, adding n whenever X becomes odd.
// x13--;
// x14 := 0; needed in the addition to the most significant word in SHIFT1
eor x14, x14, x14
Lbeeu_shift_loop_X:
tbz x3, #0, Lshift1_0
adds x3, x3, x0
adcs x4, x4, x1
adcs x5, x5, x2
adcs x6, x6, x30
adc x7, x7, x14
Lshift1_0:
// var0 := [var1|var0]<64..1>;
// i.e. concatenate var1 and var0,
// extract bits <64..1> from the resulting 128-bit value
// and put them in var0
extr x3, x4, x3, #1
extr x4, x5, x4, #1
extr x5, x6, x5, #1
extr x6, x7, x6, #1
lsr x7, x7, #1
subs x13, x13, #1
bne Lbeeu_shift_loop_X
// Note: the steps above perform the same sequence as in p256_beeu-x86_64-asm.pl
// with the following differences:
// - "x13" is set directly to the number of trailing 0s in B
// (using rbit and clz instructions)
// - The loop is only used to call SHIFT1(X)
// and x13 is decreased while executing the X loop.
// - SHIFT256(B, x13) is performed before right-shifting X; they are independent
Lbeeu_shift_A_Y:
// Same for A and Y.
// Afterwards, (2) still holds.
// Reverse the bit order of x21
// x13 := number of trailing 0s in x21 (= number of leading 0s in x15)
rbit x15, x21
clz x13, x15
// If there is no shift, goto |B-A|, X+Y update
cbz x13, Lbeeu_update_B_X_or_A_Y
// Shift A right by "x13" bits
neg x14, x13
lsr x21, x21, x13
lsl x15, x22, x14
lsr x22, x22, x13
lsl x19, x23, x14
orr x21, x21, x15
lsr x23, x23, x13
lsl x20, x24, x14
orr x22, x22, x19
lsr x24, x24, x13
orr x23, x23, x20
// Shift Y right by "x13" bits, adding n whenever Y becomes odd.
// x13--;
// x14 := 0; needed in the addition to the most significant word in SHIFT1
eor x14, x14, x14
Lbeeu_shift_loop_Y:
tbz x8, #0, Lshift1_1
adds x8, x8, x0
adcs x9, x9, x1
adcs x10, x10, x2
adcs x11, x11, x30
adc x12, x12, x14
Lshift1_1:
// var0 := [var1|var0]<64..1>;
// i.e. concatenate var1 and var0,
// extract bits <64..1> from the resulting 128-bit value
// and put them in var0
extr x8, x9, x8, #1
extr x9, x10, x9, #1
extr x10, x11, x10, #1
extr x11, x12, x11, #1
lsr x12, x12, #1
subs x13, x13, #1
bne Lbeeu_shift_loop_Y
Lbeeu_update_B_X_or_A_Y:
// Try T := B - A; if cs, continue with B > A (cs: carry set = no borrow)
// Note: this is a case of unsigned arithmetic, where T fits in 4 64-bit words
// without taking a sign bit if generated. The lack of a carry would
// indicate a negative result. See, for example,
// https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/condition-codes-1-condition-flags-and-codes
subs x14, x25, x21
sbcs x15, x26, x22
sbcs x19, x27, x23
sbcs x20, x28, x24
bcs Lbeeu_B_greater_than_A
// Else A > B =>
// A := A - B; Y := Y + X; goto beginning of the loop
subs x21, x21, x25
sbcs x22, x22, x26
sbcs x23, x23, x27
sbcs x24, x24, x28
adds x8, x8, x3
adcs x9, x9, x4
adcs x10, x10, x5
adcs x11, x11, x6
adc x12, x12, x7
b Lbeeu_loop
Lbeeu_B_greater_than_A:
// Continue with B > A =>
// B := B - A; X := X + Y; goto beginning of the loop
mov x25, x14
mov x26, x15
mov x27, x19
mov x28, x20
adds x3, x3, x8
adcs x4, x4, x9
adcs x5, x5, x10
adcs x6, x6, x11
adc x7, x7, x12
b Lbeeu_loop
Lbeeu_loop_end:
// The Euclid's algorithm loop ends when A == gcd(a,n);
// this would be 1, when a and n are co-prime (i.e. do not have a common factor).
// Since (-1)*Y*a == A (mod |n|), Y>0
// then out = -Y mod n
// Verify that A = 1 ==> (-1)*Y*a = A = 1 (mod |n|)
// Is A-1 == 0?
// If not, fail.
sub x14, x21, #1
orr x14, x14, x22
orr x14, x14, x23
orr x14, x14, x24
cbnz x14, Lbeeu_err
// If Y>n ==> Y:=Y-n
Lbeeu_reduction_loop:
// x_i := y_i - n_i (X is no longer needed, use it as temp)
// (x14 = 0 from above)
subs x3, x8, x0
sbcs x4, x9, x1
sbcs x5, x10, x2
sbcs x6, x11, x30
sbcs x7, x12, x14
// If result is non-negative (i.e., cs = carry set = no borrow),
// y_i := x_i; goto reduce again
// else
// y_i := y_i; continue
csel x8, x3, x8, cs
csel x9, x4, x9, cs
csel x10, x5, x10, cs
csel x11, x6, x11, cs
csel x12, x7, x12, cs
bcs Lbeeu_reduction_loop
// Now Y < n (Y cannot be equal to n, since the inverse cannot be 0)
// out = -Y = n-Y
subs x8, x0, x8
sbcs x9, x1, x9
sbcs x10, x2, x10
sbcs x11, x30, x11
// Save Y in output (out (x0) was saved on the stack)
ldr x3, [sp,#96]
stp x8, x9, [x3]
stp x10, x11, [x3,#16]
// return 1 (success)
mov x0, #1
b Lbeeu_finish
Lbeeu_err:
// return 0 (error)
eor x0, x0, x0
Lbeeu_finish:
// Restore callee-saved registers, except x0, x2
add sp,x29,#0
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
ldp x23,x24,[sp,#48]
ldp x25,x26,[sp,#64]
ldp x27,x28,[sp,#80]
ldp x29,x30,[sp],#112
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 37,795
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/p256-armv8-asm.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include "openssl/arm_arch.h"
.section .rodata
.align 5
Lpoly:
.quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001
LRR: // 2^512 mod P precomputed for NIST P256 polynomial
.quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd
Lone_mont:
.quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe
Lone:
.quad 1,0,0,0
Lord:
.quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000
LordK:
.quad 0xccd1c8aaee00bc4f
.byte 69,67,80,95,78,73,83,84,90,50,53,54,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
// void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4],
// const BN_ULONG x2[4]);
.globl ecp_nistz256_mul_mont
.def ecp_nistz256_mul_mont
.type 32
.endef
.align 4
ecp_nistz256_mul_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-32]!
add x29,sp,#0
stp x19,x20,[sp,#16]
ldr x3,[x2] // bp[0]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_mul_mont
ldp x19,x20,[sp,#16]
ldp x29,x30,[sp],#32
AARCH64_VALIDATE_LINK_REGISTER
ret
// void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl ecp_nistz256_sqr_mont
.def ecp_nistz256_sqr_mont
.type 32
.endef
.align 4
ecp_nistz256_sqr_mont:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-32]!
add x29,sp,#0
stp x19,x20,[sp,#16]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_sqr_mont
ldp x19,x20,[sp,#16]
ldp x29,x30,[sp],#32
AARCH64_VALIDATE_LINK_REGISTER
ret
// void ecp_nistz256_div_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl ecp_nistz256_div_by_2
.def ecp_nistz256_div_by_2
.type 32
.endef
.align 4
ecp_nistz256_div_by_2:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ldp x14,x15,[x1]
ldp x16,x17,[x1,#16]
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_div_by_2
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
// void ecp_nistz256_mul_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl ecp_nistz256_mul_by_2
.def ecp_nistz256_mul_by_2
.type 32
.endef
.align 4
ecp_nistz256_mul_by_2:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ldp x14,x15,[x1]
ldp x16,x17,[x1,#16]
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
mov x8,x14
mov x9,x15
mov x10,x16
mov x11,x17
bl __ecp_nistz256_add_to // ret = a+a // 2*a
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
// void ecp_nistz256_mul_by_3(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl ecp_nistz256_mul_by_3
.def ecp_nistz256_mul_by_3
.type 32
.endef
.align 4
ecp_nistz256_mul_by_3:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ldp x14,x15,[x1]
ldp x16,x17,[x1,#16]
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
mov x8,x14
mov x9,x15
mov x10,x16
mov x11,x17
mov x4,x14
mov x5,x15
mov x6,x16
mov x7,x17
bl __ecp_nistz256_add_to // ret = a+a // 2*a
mov x8,x4
mov x9,x5
mov x10,x6
mov x11,x7
bl __ecp_nistz256_add_to // ret += a // 2*a+a=3*a
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
// void ecp_nistz256_sub(BN_ULONG x0[4],const BN_ULONG x1[4],
// const BN_ULONG x2[4]);
.globl ecp_nistz256_sub
.def ecp_nistz256_sub
.type 32
.endef
.align 4
ecp_nistz256_sub:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ldp x14,x15,[x1]
ldp x16,x17,[x1,#16]
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_sub_from
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
// void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]);
.globl ecp_nistz256_neg
.def ecp_nistz256_neg
.type 32
.endef
.align 4
ecp_nistz256_neg:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x2,x1
mov x14,xzr // a = 0
mov x15,xzr
mov x16,xzr
mov x17,xzr
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
bl __ecp_nistz256_sub_from
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
// note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded
// to x4-x7 and b[0] - to x3
.def __ecp_nistz256_mul_mont
.type 32
.endef
.align 4
__ecp_nistz256_mul_mont:
mul x14,x4,x3 // a[0]*b[0]
umulh x8,x4,x3
mul x15,x5,x3 // a[1]*b[0]
umulh x9,x5,x3
mul x16,x6,x3 // a[2]*b[0]
umulh x10,x6,x3
mul x17,x7,x3 // a[3]*b[0]
umulh x11,x7,x3
ldr x3,[x2,#8] // b[1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adc x19,xzr,x11
mov x20,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
ldr x3,[x2,#8*(1+1)] // b[1+1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
ldr x3,[x2,#8*(2+1)] // b[2+1]
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
mul x8,x4,x3 // lo(a[0]*b[i])
adcs x15,x16,x9
mul x9,x5,x3 // lo(a[1]*b[i])
adcs x16,x17,x10 // +=acc[0]*0xffff0001
mul x10,x6,x3 // lo(a[2]*b[i])
adcs x17,x19,x11
mul x11,x7,x3 // lo(a[3]*b[i])
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts of multiplication
umulh x8,x4,x3 // hi(a[0]*b[i])
adcs x15,x15,x9
umulh x9,x5,x3 // hi(a[1]*b[i])
adcs x16,x16,x10
umulh x10,x6,x3 // hi(a[2]*b[i])
adcs x17,x17,x11
umulh x11,x7,x3 // hi(a[3]*b[i])
adc x19,x19,xzr
adds x15,x15,x8 // accumulate high parts of multiplication
lsl x8,x14,#32
adcs x16,x16,x9
lsr x9,x14,#32
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
// last reduction
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
adcs x16,x17,x10 // +=acc[0]*0xffff0001
adcs x17,x19,x11
adc x19,x20,xzr
adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x19,xzr // did it borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
// note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded
// to x4-x7
.def __ecp_nistz256_sqr_mont
.type 32
.endef
.align 4
__ecp_nistz256_sqr_mont:
// | | | | | |a1*a0| |
// | | | | |a2*a0| | |
// | |a3*a2|a3*a0| | | |
// | | | |a2*a1| | | |
// | | |a3*a1| | | | |
// *| | | | | | | | 2|
// +|a3*a3|a2*a2|a1*a1|a0*a0|
// |--+--+--+--+--+--+--+--|
// |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow
//
// "can't overflow" below mark carrying into high part of
// multiplication result, which can't overflow, because it
// can never be all ones.
mul x15,x5,x4 // a[1]*a[0]
umulh x9,x5,x4
mul x16,x6,x4 // a[2]*a[0]
umulh x10,x6,x4
mul x17,x7,x4 // a[3]*a[0]
umulh x19,x7,x4
adds x16,x16,x9 // accumulate high parts of multiplication
mul x8,x6,x5 // a[2]*a[1]
umulh x9,x6,x5
adcs x17,x17,x10
mul x10,x7,x5 // a[3]*a[1]
umulh x11,x7,x5
adc x19,x19,xzr // can't overflow
mul x20,x7,x6 // a[3]*a[2]
umulh x1,x7,x6
adds x9,x9,x10 // accumulate high parts of multiplication
mul x14,x4,x4 // a[0]*a[0]
adc x10,x11,xzr // can't overflow
adds x17,x17,x8 // accumulate low parts of multiplication
umulh x4,x4,x4
adcs x19,x19,x9
mul x9,x5,x5 // a[1]*a[1]
adcs x20,x20,x10
umulh x5,x5,x5
adc x1,x1,xzr // can't overflow
adds x15,x15,x15 // acc[1-6]*=2
mul x10,x6,x6 // a[2]*a[2]
adcs x16,x16,x16
umulh x6,x6,x6
adcs x17,x17,x17
mul x11,x7,x7 // a[3]*a[3]
adcs x19,x19,x19
umulh x7,x7,x7
adcs x20,x20,x20
adcs x1,x1,x1
adc x2,xzr,xzr
adds x15,x15,x4 // +a[i]*a[i]
adcs x16,x16,x9
adcs x17,x17,x5
adcs x19,x19,x10
adcs x20,x20,x6
lsl x8,x14,#32
adcs x1,x1,x11
lsr x9,x14,#32
adc x2,x2,x7
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
lsl x8,x14,#32
adcs x16,x17,x10 // +=acc[0]*0xffff0001
lsr x9,x14,#32
adc x17,x11,xzr // can't overflow
subs x10,x14,x8 // "*0xffff0001"
sbc x11,x14,x9
adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
adcs x15,x16,x9
adcs x16,x17,x10 // +=acc[0]*0xffff0001
adc x17,x11,xzr // can't overflow
adds x14,x14,x19 // accumulate upper half
adcs x15,x15,x20
adcs x16,x16,x1
adcs x17,x17,x2
adc x19,xzr,xzr
adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x19,xzr // did it borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
// Note that __ecp_nistz256_add_to expects both input vectors pre-loaded to
// x4-x7 and x8-x11. This is done because it's used in multiple
// contexts, e.g. in multiplication by 2 and 3...
.def __ecp_nistz256_add_to
.type 32
.endef
.align 4
__ecp_nistz256_add_to:
adds x14,x14,x8 // ret = a+b
adcs x15,x15,x9
adcs x16,x16,x10
adcs x17,x17,x11
adc x1,xzr,xzr // zap x1
adds x8,x14,#1 // subs x8,x4,#-1 // tmp = ret-modulus
sbcs x9,x15,x12
sbcs x10,x16,xzr
sbcs x11,x17,x13
sbcs xzr,x1,xzr // did subtraction borrow?
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ret
.def __ecp_nistz256_sub_from
.type 32
.endef
.align 4
__ecp_nistz256_sub_from:
ldp x8,x9,[x2]
ldp x10,x11,[x2,#16]
subs x14,x14,x8 // ret = a-b
sbcs x15,x15,x9
sbcs x16,x16,x10
sbcs x17,x17,x11
sbc x1,xzr,xzr // zap x1
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adc x11,x17,x13
cmp x1,xzr // did subtraction borrow?
csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret
csel x15,x15,x9,eq
csel x16,x16,x10,eq
stp x14,x15,[x0]
csel x17,x17,x11,eq
stp x16,x17,[x0,#16]
ret
.def __ecp_nistz256_sub_morf
.type 32
.endef
.align 4
__ecp_nistz256_sub_morf:
ldp x8,x9,[x2]
ldp x10,x11,[x2,#16]
subs x14,x8,x14 // ret = b-a
sbcs x15,x9,x15
sbcs x16,x10,x16
sbcs x17,x11,x17
sbc x1,xzr,xzr // zap x1
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adc x11,x17,x13
cmp x1,xzr // did subtraction borrow?
csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret
csel x15,x15,x9,eq
csel x16,x16,x10,eq
stp x14,x15,[x0]
csel x17,x17,x11,eq
stp x16,x17,[x0,#16]
ret
.def __ecp_nistz256_div_by_2
.type 32
.endef
.align 4
__ecp_nistz256_div_by_2:
subs x8,x14,#1 // adds x8,x4,#-1 // tmp = a+modulus
adcs x9,x15,x12
adcs x10,x16,xzr
adcs x11,x17,x13
adc x1,xzr,xzr // zap x1
tst x14,#1 // is a even?
csel x14,x14,x8,eq // ret = even ? a : a+modulus
csel x15,x15,x9,eq
csel x16,x16,x10,eq
csel x17,x17,x11,eq
csel x1,xzr,x1,eq
lsr x14,x14,#1 // ret >>= 1
orr x14,x14,x15,lsl#63
lsr x15,x15,#1
orr x15,x15,x16,lsl#63
lsr x16,x16,#1
orr x16,x16,x17,lsl#63
lsr x17,x17,#1
stp x14,x15,[x0]
orr x17,x17,x1,lsl#63
stp x16,x17,[x0,#16]
ret
.globl ecp_nistz256_point_double
.def ecp_nistz256_point_double
.type 32
.endef
.align 5
ecp_nistz256_point_double:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
sub sp,sp,#32*4
Ldouble_shortcut:
ldp x14,x15,[x1,#32]
mov x21,x0
ldp x16,x17,[x1,#48]
mov x22,x1
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
mov x8,x14
ldr x13,[x13,#24]
mov x9,x15
ldp x4,x5,[x22,#64] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[x22,#64+16]
add x0,sp,#0
bl __ecp_nistz256_add_to // p256_mul_by_2(S, in_y);
add x0,sp,#64
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z);
ldp x8,x9,[x22]
ldp x10,x11,[x22,#16]
mov x4,x14 // put Zsqr aside for p256_sub
mov x5,x15
mov x6,x16
mov x7,x17
add x0,sp,#32
bl __ecp_nistz256_add_to // p256_add(M, Zsqr, in_x);
add x2,x22,#0
mov x14,x4 // restore Zsqr
mov x15,x5
ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont
mov x16,x6
mov x17,x7
ldp x6,x7,[sp,#0+16]
add x0,sp,#64
bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr);
add x0,sp,#0
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S);
ldr x3,[x22,#32]
ldp x4,x5,[x22,#64]
ldp x6,x7,[x22,#64+16]
add x2,x22,#32
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y);
mov x8,x14
mov x9,x15
ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[sp,#0+16]
add x0,x21,#64
bl __ecp_nistz256_add_to // p256_mul_by_2(res_z, tmp0);
add x0,sp,#96
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S);
ldr x3,[sp,#64] // forward load for p256_mul_mont
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x0,x21,#32
bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0);
add x2,sp,#64
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr);
mov x8,x14 // duplicate M
mov x9,x15
mov x10,x16
mov x11,x17
mov x4,x14 // put M aside
mov x5,x15
mov x6,x16
mov x7,x17
add x0,sp,#32
bl __ecp_nistz256_add_to
mov x8,x4 // restore M
mov x9,x5
ldr x3,[x22] // forward load for p256_mul_mont
mov x10,x6
ldp x4,x5,[sp,#0]
mov x11,x7
ldp x6,x7,[sp,#0+16]
bl __ecp_nistz256_add_to // p256_mul_by_3(M, M);
add x2,x22,#0
add x0,sp,#0
bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x);
mov x8,x14
mov x9,x15
ldp x4,x5,[sp,#32] // forward load for p256_sqr_mont
mov x10,x16
mov x11,x17
ldp x6,x7,[sp,#32+16]
add x0,sp,#96
bl __ecp_nistz256_add_to // p256_mul_by_2(tmp0, S);
add x0,x21,#0
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M);
add x2,sp,#96
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0);
add x2,sp,#0
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x);
ldr x3,[sp,#32]
mov x4,x14 // copy S
mov x5,x15
mov x6,x16
mov x7,x17
add x2,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M);
add x2,x21,#32
add x0,x21,#32
bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y);
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl ecp_nistz256_point_add
.def ecp_nistz256_point_add
.type 32
.endef
.align 5
ecp_nistz256_point_add:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#32*12
ldp x4,x5,[x2,#64] // in2_z
ldp x6,x7,[x2,#64+16]
mov x21,x0
mov x22,x1
mov x23,x2
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
orr x8,x4,x5
orr x10,x6,x7
orr x25,x8,x10
cmp x25,#0
csetm x25,ne // ~in2infty
add x0,sp,#192
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z);
ldp x4,x5,[x22,#64] // in1_z
ldp x6,x7,[x22,#64+16]
orr x8,x4,x5
orr x10,x6,x7
orr x24,x8,x10
cmp x24,#0
csetm x24,ne // ~in1infty
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
ldr x3,[x23,#64]
ldp x4,x5,[sp,#192]
ldp x6,x7,[sp,#192+16]
add x2,x23,#64
add x0,sp,#320
bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,x22,#64
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
ldr x3,[x22,#32]
ldp x4,x5,[sp,#320]
ldp x6,x7,[sp,#320+16]
add x2,x22,#32
add x0,sp,#320
bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y);
ldr x3,[x23,#32]
ldp x4,x5,[sp,#352]
ldp x6,x7,[sp,#352+16]
add x2,x23,#32
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
add x2,sp,#320
ldr x3,[sp,#192] // forward load for p256_mul_mont
ldp x4,x5,[x22]
ldp x6,x7,[x22,#16]
add x0,sp,#160
bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1);
orr x14,x14,x15 // see if result is zero
orr x16,x16,x17
orr x26,x14,x16 // ~is_equal(S1,S2)
add x2,sp,#192
add x0,sp,#256
bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr);
ldr x3,[sp,#128]
ldp x4,x5,[x23]
ldp x6,x7,[x23,#16]
add x2,sp,#128
add x0,sp,#288
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr);
add x2,sp,#256
ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont
ldp x6,x7,[sp,#160+16]
add x0,sp,#96
bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1);
orr x14,x14,x15 // see if result is zero
orr x16,x16,x17
orr x14,x14,x16 // ~is_equal(U1,U2)
mvn x27,x24 // -1/0 -> 0/-1
mvn x28,x25 // -1/0 -> 0/-1
orr x14,x14,x27
orr x14,x14,x28
orr x14,x14,x26
cbnz x14,Ladd_proceed // if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2))
Ladd_double:
mov x1,x22
mov x0,x21
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
add sp,sp,#256 // #256 is from #32*(12-4). difference in stack frames
b Ldouble_shortcut
.align 4
Ladd_proceed:
add x0,sp,#192
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#96]
ldp x6,x7,[sp,#96+16]
add x2,x22,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
ldp x4,x5,[sp,#96]
ldp x6,x7,[sp,#96+16]
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
ldr x3,[x23,#64]
ldp x4,x5,[sp,#64]
ldp x6,x7,[sp,#64+16]
add x2,x23,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z);
ldr x3,[sp,#96]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,sp,#96
add x0,sp,#224
bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
ldr x3,[sp,#128]
ldp x4,x5,[sp,#256]
ldp x6,x7,[sp,#256+16]
add x2,sp,#128
add x0,sp,#288
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr);
mov x8,x14
mov x9,x15
mov x10,x16
mov x11,x17
add x0,sp,#128
bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2);
add x2,sp,#192
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
add x2,sp,#224
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
add x2,sp,#288
ldr x3,[sp,#224] // forward load for p256_mul_mont
ldp x4,x5,[sp,#320]
ldp x6,x7,[sp,#320+16]
add x0,sp,#32
bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
add x2,sp,#224
add x0,sp,#352
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub);
ldr x3,[sp,#160]
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x2,sp,#160
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
add x2,sp,#352
bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
ldp x4,x5,[sp,#0] // res
ldp x6,x7,[sp,#0+16]
ldp x8,x9,[x23] // in2
ldp x10,x11,[x23,#16]
ldp x14,x15,[x22,#0] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#0+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+0+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+0+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#0+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#0+48]
stp x14,x15,[x21,#0]
stp x16,x17,[x21,#0+16]
ldp x14,x15,[x22,#32] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#32+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+32+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+32+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#32+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#32+48]
stp x14,x15,[x21,#32]
stp x16,x17,[x21,#32+16]
ldp x14,x15,[x22,#64] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#64+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
csel x14,x8,x14,ne
csel x15,x9,x15,ne
csel x16,x10,x16,ne
csel x17,x11,x17,ne
stp x14,x15,[x21,#64]
stp x16,x17,[x21,#64+16]
Ladd_done:
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl ecp_nistz256_point_add_affine
.def ecp_nistz256_point_add_affine
.type 32
.endef
.align 5
ecp_nistz256_point_add_affine:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-80]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
sub sp,sp,#32*10
mov x21,x0
mov x22,x1
mov x23,x2
adrp x13,Lpoly
add x13,x13,:lo12:Lpoly
ldr x12,[x13,#8]
ldr x13,[x13,#24]
ldp x4,x5,[x1,#64] // in1_z
ldp x6,x7,[x1,#64+16]
orr x8,x4,x5
orr x10,x6,x7
orr x24,x8,x10
cmp x24,#0
csetm x24,ne // ~in1infty
ldp x14,x15,[x2] // in2_x
ldp x16,x17,[x2,#16]
ldp x8,x9,[x2,#32] // in2_y
ldp x10,x11,[x2,#48]
orr x14,x14,x15
orr x16,x16,x17
orr x8,x8,x9
orr x10,x10,x11
orr x14,x14,x16
orr x8,x8,x10
orr x25,x14,x8
cmp x25,#0
csetm x25,ne // ~in2infty
add x0,sp,#128
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
mov x4,x14
mov x5,x15
mov x6,x16
mov x7,x17
ldr x3,[x23]
add x2,x23,#0
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x);
add x2,x22,#0
ldr x3,[x22,#64] // forward load for p256_mul_mont
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x0,sp,#160
bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x);
add x2,x22,#64
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
ldr x3,[x22,#64]
ldp x4,x5,[sp,#160]
ldp x6,x7,[sp,#160+16]
add x2,x22,#64
add x0,sp,#64
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
ldr x3,[x23,#32]
ldp x4,x5,[sp,#128]
ldp x6,x7,[sp,#128+16]
add x2,x23,#32
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
add x2,x22,#32
ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont
ldp x6,x7,[sp,#160+16]
add x0,sp,#192
bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y);
add x0,sp,#224
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
ldp x4,x5,[sp,#192]
ldp x6,x7,[sp,#192+16]
add x0,sp,#288
bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
ldr x3,[sp,#160]
ldp x4,x5,[sp,#224]
ldp x6,x7,[sp,#224+16]
add x2,sp,#160
add x0,sp,#256
bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
ldr x3,[x22]
ldp x4,x5,[sp,#224]
ldp x6,x7,[sp,#224+16]
add x2,x22,#0
add x0,sp,#96
bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr);
mov x8,x14
mov x9,x15
mov x10,x16
mov x11,x17
add x0,sp,#224
bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2);
add x2,sp,#288
add x0,sp,#0
bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
add x2,sp,#256
bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
add x2,sp,#96
ldr x3,[x22,#32] // forward load for p256_mul_mont
ldp x4,x5,[sp,#256]
ldp x6,x7,[sp,#256+16]
add x0,sp,#32
bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
add x2,x22,#32
add x0,sp,#128
bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub);
ldr x3,[sp,#192]
ldp x4,x5,[sp,#32]
ldp x6,x7,[sp,#32+16]
add x2,sp,#192
add x0,sp,#32
bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
add x2,sp,#128
bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
ldp x4,x5,[sp,#0] // res
ldp x6,x7,[sp,#0+16]
ldp x8,x9,[x23] // in2
ldp x10,x11,[x23,#16]
ldp x14,x15,[x22,#0] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#0+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+0+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+0+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#0+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#0+48]
stp x14,x15,[x21,#0]
stp x16,x17,[x21,#0+16]
adrp x23,Lone_mont-64
add x23,x23,:lo12:Lone_mont-64
ldp x14,x15,[x22,#32] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#32+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
ldp x4,x5,[sp,#0+32+32] // res
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
ldp x6,x7,[sp,#0+32+48]
csel x14,x8,x14,ne
csel x15,x9,x15,ne
ldp x8,x9,[x23,#32+32] // in2
csel x16,x10,x16,ne
csel x17,x11,x17,ne
ldp x10,x11,[x23,#32+48]
stp x14,x15,[x21,#32]
stp x16,x17,[x21,#32+16]
ldp x14,x15,[x22,#64] // in1
cmp x24,#0 // ~, remember?
ldp x16,x17,[x22,#64+16]
csel x8,x4,x8,ne
csel x9,x5,x9,ne
csel x10,x6,x10,ne
csel x11,x7,x11,ne
cmp x25,#0 // ~, remember?
csel x14,x8,x14,ne
csel x15,x9,x15,ne
csel x16,x10,x16,ne
csel x17,x11,x17,ne
stp x14,x15,[x21,#64]
stp x16,x17,[x21,#64+16]
add sp,x29,#0 // destroy frame
ldp x19,x20,[x29,#16]
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x29,x30,[sp],#80
AARCH64_VALIDATE_LINK_REGISTER
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4],
// uint64_t b[4]);
.globl ecp_nistz256_ord_mul_mont
.def ecp_nistz256_ord_mul_mont
.type 32
.endef
.align 4
ecp_nistz256_ord_mul_mont:
AARCH64_VALID_CALL_TARGET
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
adrp x23,Lord
add x23,x23,:lo12:Lord
ldr x3,[x2] // bp[0]
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
ldp x12,x13,[x23,#0]
ldp x21,x22,[x23,#16]
ldr x23,[x23,#32]
mul x14,x4,x3 // a[0]*b[0]
umulh x8,x4,x3
mul x15,x5,x3 // a[1]*b[0]
umulh x9,x5,x3
mul x16,x6,x3 // a[2]*b[0]
umulh x10,x6,x3
mul x17,x7,x3 // a[3]*b[0]
umulh x19,x7,x3
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts of multiplication
adcs x16,x16,x9
adcs x17,x17,x10
adc x19,x19,xzr
mov x20,xzr
ldr x3,[x2,#8*1] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
ldr x3,[x2,#8*2] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
ldr x3,[x2,#8*3] // b[i]
lsl x8,x24,#32
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
mul x8,x4,x3
adc x11,x11,xzr
mul x9,x5,x3
adds x14,x15,x10
mul x10,x6,x3
adcs x15,x16,x11
mul x11,x7,x3
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
adds x14,x14,x8 // accumulate low parts
umulh x8,x4,x3
adcs x15,x15,x9
umulh x9,x5,x3
adcs x16,x16,x10
umulh x10,x6,x3
adcs x17,x17,x11
umulh x11,x7,x3
adc x19,x19,xzr
mul x24,x14,x23
adds x15,x15,x8 // accumulate high parts
adcs x16,x16,x9
adcs x17,x17,x10
adcs x19,x19,x11
adc x20,xzr,xzr
lsl x8,x24,#32 // last reduction
subs x16,x16,x24
lsr x9,x24,#32
sbcs x17,x17,x8
sbcs x19,x19,x9
sbc x20,x20,xzr
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adcs x17,x19,x24
adc x19,x20,xzr
subs x8,x14,x12 // ret -= modulus
sbcs x9,x15,x13
sbcs x10,x16,x21
sbcs x11,x17,x22
sbcs xzr,x19,xzr
csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x15,x15,x9,lo
csel x16,x16,x10,lo
stp x14,x15,[x0]
csel x17,x17,x11,lo
stp x16,x17,[x0,#16]
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
ldp x23,x24,[sp,#48]
ldr x29,[sp],#64
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4],
// uint64_t rep);
.globl ecp_nistz256_ord_sqr_mont
.def ecp_nistz256_ord_sqr_mont
.type 32
.endef
.align 4
ecp_nistz256_ord_sqr_mont:
AARCH64_VALID_CALL_TARGET
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
adrp x23,Lord
add x23,x23,:lo12:Lord
ldp x4,x5,[x1]
ldp x6,x7,[x1,#16]
ldp x12,x13,[x23,#0]
ldp x21,x22,[x23,#16]
ldr x23,[x23,#32]
b Loop_ord_sqr
.align 4
Loop_ord_sqr:
sub x2,x2,#1
////////////////////////////////////////////////////////////////
// | | | | | |a1*a0| |
// | | | | |a2*a0| | |
// | |a3*a2|a3*a0| | | |
// | | | |a2*a1| | | |
// | | |a3*a1| | | | |
// *| | | | | | | | 2|
// +|a3*a3|a2*a2|a1*a1|a0*a0|
// |--+--+--+--+--+--+--+--|
// |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow
//
// "can't overflow" below mark carrying into high part of
// multiplication result, which can't overflow, because it
// can never be all ones.
mul x15,x5,x4 // a[1]*a[0]
umulh x9,x5,x4
mul x16,x6,x4 // a[2]*a[0]
umulh x10,x6,x4
mul x17,x7,x4 // a[3]*a[0]
umulh x19,x7,x4
adds x16,x16,x9 // accumulate high parts of multiplication
mul x8,x6,x5 // a[2]*a[1]
umulh x9,x6,x5
adcs x17,x17,x10
mul x10,x7,x5 // a[3]*a[1]
umulh x11,x7,x5
adc x19,x19,xzr // can't overflow
mul x20,x7,x6 // a[3]*a[2]
umulh x1,x7,x6
adds x9,x9,x10 // accumulate high parts of multiplication
mul x14,x4,x4 // a[0]*a[0]
adc x10,x11,xzr // can't overflow
adds x17,x17,x8 // accumulate low parts of multiplication
umulh x4,x4,x4
adcs x19,x19,x9
mul x9,x5,x5 // a[1]*a[1]
adcs x20,x20,x10
umulh x5,x5,x5
adc x1,x1,xzr // can't overflow
adds x15,x15,x15 // acc[1-6]*=2
mul x10,x6,x6 // a[2]*a[2]
adcs x16,x16,x16
umulh x6,x6,x6
adcs x17,x17,x17
mul x11,x7,x7 // a[3]*a[3]
adcs x19,x19,x19
umulh x7,x7,x7
adcs x20,x20,x20
adcs x1,x1,x1
adc x3,xzr,xzr
adds x15,x15,x4 // +a[i]*a[i]
mul x24,x14,x23
adcs x16,x16,x9
adcs x17,x17,x5
adcs x19,x19,x10
adcs x20,x20,x6
adcs x1,x1,x11
adc x3,x3,x7
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adc x17,xzr,x24 // can't overflow
mul x11,x14,x23
lsl x8,x24,#32
subs x15,x15,x24
lsr x9,x24,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x11
mul x10,x13,x11
umulh x24,x13,x11
adcs x10,x10,x9
adc x24,x24,xzr
adds x14,x15,x10
adcs x15,x16,x24
adcs x16,x17,x11
adc x17,xzr,x11 // can't overflow
mul x24,x14,x23
lsl x8,x11,#32
subs x15,x15,x11
lsr x9,x11,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x24
mul x10,x13,x24
umulh x11,x13,x24
adcs x10,x10,x9
adc x11,x11,xzr
adds x14,x15,x10
adcs x15,x16,x11
adcs x16,x17,x24
adc x17,xzr,x24 // can't overflow
mul x11,x14,x23
lsl x8,x24,#32
subs x15,x15,x24
lsr x9,x24,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
subs xzr,x14,#1
umulh x9,x12,x11
mul x10,x13,x11
umulh x24,x13,x11
adcs x10,x10,x9
adc x24,x24,xzr
adds x14,x15,x10
adcs x15,x16,x24
adcs x16,x17,x11
adc x17,xzr,x11 // can't overflow
lsl x8,x11,#32
subs x15,x15,x11
lsr x9,x11,#32
sbcs x16,x16,x8
sbc x17,x17,x9 // can't borrow
adds x14,x14,x19 // accumulate upper half
adcs x15,x15,x20
adcs x16,x16,x1
adcs x17,x17,x3
adc x19,xzr,xzr
subs x8,x14,x12 // ret -= modulus
sbcs x9,x15,x13
sbcs x10,x16,x21
sbcs x11,x17,x22
sbcs xzr,x19,xzr
csel x4,x14,x8,lo // ret = borrow ? ret : ret-modulus
csel x5,x15,x9,lo
csel x6,x16,x10,lo
csel x7,x17,x11,lo
cbnz x2,Loop_ord_sqr
stp x4,x5,[x0]
stp x6,x7,[x0,#16]
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
ldp x23,x24,[sp,#48]
ldr x29,[sp],#64
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_select_w5(uint64_t *val, uint64_t *in_t, int index);
.globl ecp_nistz256_select_w5
.def ecp_nistz256_select_w5
.type 32
.endef
.align 4
ecp_nistz256_select_w5:
AARCH64_VALID_CALL_TARGET
// x10 := x0
// w9 := 0; loop counter and incremented internal index
mov x10, x0
mov w9, #0
// [v16-v21] := 0
movi v16.16b, #0
movi v17.16b, #0
movi v18.16b, #0
movi v19.16b, #0
movi v20.16b, #0
movi v21.16b, #0
Lselect_w5_loop:
// Loop 16 times.
// Increment index (loop counter); tested at the end of the loop
add w9, w9, #1
// [v22-v27] := Load a (3*256-bit = 6*128-bit) table entry starting at x1
// and advance x1 to point to the next entry
ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64
// x11 := (w9 == w2)? All 1s : All 0s
cmp w9, w2
csetm x11, eq
// continue loading ...
ld1 {v26.2d, v27.2d}, [x1],#32
// duplicate mask_64 into Mask (all 0s or all 1s)
dup v3.2d, x11
// [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19]
// i.e., values in output registers will remain the same if w9 != w2
bit v16.16b, v22.16b, v3.16b
bit v17.16b, v23.16b, v3.16b
bit v18.16b, v24.16b, v3.16b
bit v19.16b, v25.16b, v3.16b
bit v20.16b, v26.16b, v3.16b
bit v21.16b, v27.16b, v3.16b
// If bit #4 is not 0 (i.e. idx_ctr < 16) loop back
tbz w9, #4, Lselect_w5_loop
// Write [v16-v21] to memory at the output pointer
st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x10],#64
st1 {v20.2d, v21.2d}, [x10]
ret
////////////////////////////////////////////////////////////////////////
// void ecp_nistz256_select_w7(uint64_t *val, uint64_t *in_t, int index);
.globl ecp_nistz256_select_w7
.def ecp_nistz256_select_w7
.type 32
.endef
.align 4
ecp_nistz256_select_w7:
AARCH64_VALID_CALL_TARGET
// w9 := 0; loop counter and incremented internal index
mov w9, #0
// [v16-v21] := 0
movi v16.16b, #0
movi v17.16b, #0
movi v18.16b, #0
movi v19.16b, #0
Lselect_w7_loop:
// Loop 64 times.
// Increment index (loop counter); tested at the end of the loop
add w9, w9, #1
// [v22-v25] := Load a (2*256-bit = 4*128-bit) table entry starting at x1
// and advance x1 to point to the next entry
ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64
// x11 := (w9 == w2)? All 1s : All 0s
cmp w9, w2
csetm x11, eq
// duplicate mask_64 into Mask (all 0s or all 1s)
dup v3.2d, x11
// [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19]
// i.e., values in output registers will remain the same if w9 != w2
bit v16.16b, v22.16b, v3.16b
bit v17.16b, v23.16b, v3.16b
bit v18.16b, v24.16b, v3.16b
bit v19.16b, v25.16b, v3.16b
// If bit #6 is not 0 (i.e. idx_ctr < 64) loop back
tbz w9, #6, Lselect_w7_loop
// Write [v16-v19] to memory at the output pointer
st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0]
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 45,099
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/md5-armv8.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
.text
.globl md5_block_asm_data_order
md5_block_asm_data_order:
// Save all callee-saved registers
stp x19,x20,[sp,#-80]!
stp x21,x22,[sp,#16]
stp x23,x24,[sp,#32]
stp x25,x26,[sp,#48]
stp x27,x28,[sp,#64]
ldp w10, w11, [x0, #0] // Load MD5 state->A and state->B
ldp w12, w13, [x0, #8] // Load MD5 state->C and state->D
.align 5
md5_blocks_loop:
eor x17, x12, x13 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
and x16, x17, x11 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
ldp x15, x3, [x1] // Load 4 words of input data0 M[0]/0
eor x14, x16, x13 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x9, #0xa478 // Load lower half of constant 0xd76aa478
movk x9, #0xd76a, lsl #16 // Load upper half of constant 0xd76aa478
add w8, w10, w15 // Add dest value
add w7, w8, w9 // Add constant 0xd76aa478
add w6, w7, w14 // Add aux function result
ror w6, w6, #25 // Rotate left s=7 bits
eor x5, x11, x12 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w4, w11, w6 // Add X parameter round 1 A=FF(A, B, C, D, 0xd76aa478, s=7, M[0])
and x8, x5, x4 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x17, x8, x12 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x16, #0xb756 // Load lower half of constant 0xe8c7b756
movk x16, #0xe8c7, lsl #16 // Load upper half of constant 0xe8c7b756
lsr x20, x15, #32 // Right shift high input value containing M[1]
add w9, w13, w20 // Add dest value
add w7, w9, w16 // Add constant 0xe8c7b756
add w14, w7, w17 // Add aux function result
ror w14, w14, #20 // Rotate left s=12 bits
eor x6, x4, x11 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w5, w4, w14 // Add X parameter round 1 D=FF(D, A, B, C, 0xe8c7b756, s=12, M[1])
and x8, x6, x5 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x9, x8, x11 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x16, #0x70db // Load lower half of constant 0x242070db
movk x16, #0x2420, lsl #16 // Load upper half of constant 0x242070db
add w7, w12, w3 // Add dest value
add w17, w7, w16 // Add constant 0x242070db
add w14, w17, w9 // Add aux function result
ror w14, w14, #15 // Rotate left s=17 bits
eor x6, x5, x4 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w8, w5, w14 // Add X parameter round 1 C=FF(C, D, A, B, 0x242070db, s=17, M[2])
and x7, x6, x8 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x16, x7, x4 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x9, #0xceee // Load lower half of constant 0xc1bdceee
movk x9, #0xc1bd, lsl #16 // Load upper half of constant 0xc1bdceee
lsr x21, x3, #32 // Right shift high input value containing M[3]
add w14, w11, w21 // Add dest value
add w6, w14, w9 // Add constant 0xc1bdceee
add w7, w6, w16 // Add aux function result
ror w7, w7, #10 // Rotate left s=22 bits
eor x17, x8, x5 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w9, w8, w7 // Add X parameter round 1 B=FF(B, C, D, A, 0xc1bdceee, s=22, M[3])
ldp x14, x7, [x1, #16] // Load 4 words of input data0 M[4]/0w
and x16, x17, x9 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x6, x16, x5 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x16, #0xfaf // Load lower half of constant 0xf57c0faf
movk x16, #0xf57c, lsl #16 // Load upper half of constant 0xf57c0faf
add w17, w4, w14 // Add dest value
add w16, w17, w16 // Add constant 0xf57c0faf
add w4, w16, w6 // Add aux function result
ror w4, w4, #25 // Rotate left s=7 bits
eor x16, x9, x8 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w17, w9, w4 // Add X parameter round 1 A=FF(A, B, C, D, 0xf57c0faf, s=7, M[4])
and x16, x16, x17 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x6, x16, x8 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x4, #0xc62a // Load lower half of constant 0x4787c62a
movk x4, #0x4787, lsl #16 // Load upper half of constant 0x4787c62a
lsr x22, x14, #32 // Right shift high input value containing M[5]
add w16, w5, w22 // Add dest value
add w16, w16, w4 // Add constant 0x4787c62a
add w5, w16, w6 // Add aux function result
ror w5, w5, #20 // Rotate left s=12 bits
eor x4, x17, x9 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w19, w17, w5 // Add X parameter round 1 D=FF(D, A, B, C, 0x4787c62a, s=12, M[5])
and x6, x4, x19 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x5, x6, x9 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x4, #0x4613 // Load lower half of constant 0xa8304613
movk x4, #0xa830, lsl #16 // Load upper half of constant 0xa8304613
add w6, w8, w7 // Add dest value
add w8, w6, w4 // Add constant 0xa8304613
add w4, w8, w5 // Add aux function result
ror w4, w4, #15 // Rotate left s=17 bits
eor x6, x19, x17 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w8, w19, w4 // Add X parameter round 1 C=FF(C, D, A, B, 0xa8304613, s=17, M[6])
and x5, x6, x8 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x4, x5, x17 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x6, #0x9501 // Load lower half of constant 0xfd469501
movk x6, #0xfd46, lsl #16 // Load upper half of constant 0xfd469501
lsr x23, x7, #32 // Right shift high input value containing M[7]
add w9, w9, w23 // Add dest value
add w5, w9, w6 // Add constant 0xfd469501
add w9, w5, w4 // Add aux function result
ror w9, w9, #10 // Rotate left s=22 bits
eor x6, x8, x19 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w4, w8, w9 // Add X parameter round 1 B=FF(B, C, D, A, 0xfd469501, s=22, M[7])
ldp x5, x16, [x1, #32] // Load 4 words of input data0 M[8]/0
and x9, x6, x4 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x6, x9, x19 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x9, #0x98d8 // Load lower half of constant 0x698098d8
movk x9, #0x6980, lsl #16 // Load upper half of constant 0x698098d8
add w17, w17, w5 // Add dest value
add w9, w17, w9 // Add constant 0x698098d8
add w17, w9, w6 // Add aux function result
ror w17, w17, #25 // Rotate left s=7 bits
eor x9, x4, x8 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w6, w4, w17 // Add X parameter round 1 A=FF(A, B, C, D, 0x698098d8, s=7, M[8])
and x17, x9, x6 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x9, x17, x8 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x17, #0xf7af // Load lower half of constant 0x8b44f7af
movk x17, #0x8b44, lsl #16 // Load upper half of constant 0x8b44f7af
lsr x24, x5, #32 // Right shift high input value containing M[9]
add w19, w19, w24 // Add dest value
add w17, w19, w17 // Add constant 0x8b44f7af
add w19, w17, w9 // Add aux function result
ror w19, w19, #20 // Rotate left s=12 bits
eor x9, x6, x4 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w17, w6, w19 // Add X parameter round 1 D=FF(D, A, B, C, 0x8b44f7af, s=12, M[9])
and x9, x9, x17 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x9, x9, x4 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x11, #0x5bb1 // Load lower half of constant 0xffff5bb1
movk x11, #0xffff, lsl #16 // Load upper half of constant 0xffff5bb1
add w8, w8, w16 // Add dest value
add w8, w8, w11 // Add constant 0xffff5bb1
add w8, w8, w9 // Add aux function result
ror w8, w8, #15 // Rotate left s=17 bits
eor x9, x17, x6 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w8, w17, w8 // Add X parameter round 1 C=FF(C, D, A, B, 0xffff5bb1, s=17, M[10])
and x9, x9, x8 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x9, x9, x6 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x11, #0xd7be // Load lower half of constant 0x895cd7be
movk x11, #0x895c, lsl #16 // Load upper half of constant 0x895cd7be
lsr x25, x16, #32 // Right shift high input value containing M[11]
add w4, w4, w25 // Add dest value
add w4, w4, w11 // Add constant 0x895cd7be
add w9, w4, w9 // Add aux function result
ror w9, w9, #10 // Rotate left s=22 bits
eor x4, x8, x17 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w9, w8, w9 // Add X parameter round 1 B=FF(B, C, D, A, 0x895cd7be, s=22, M[11])
ldp x11, x12, [x1, #48] // Load 4 words of input data0 M[12]/0
and x4, x4, x9 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x4, x4, x17 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x19, #0x1122 // Load lower half of constant 0x6b901122
movk x19, #0x6b90, lsl #16 // Load upper half of constant 0x6b901122
add w6, w6, w11 // Add dest value
add w6, w6, w19 // Add constant 0x6b901122
add w4, w6, w4 // Add aux function result
ror w4, w4, #25 // Rotate left s=7 bits
eor x6, x9, x8 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w4, w9, w4 // Add X parameter round 1 A=FF(A, B, C, D, 0x6b901122, s=7, M[12])
and x6, x6, x4 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x6, x6, x8 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x19, #0x7193 // Load lower half of constant 0xfd987193
movk x19, #0xfd98, lsl #16 // Load upper half of constant 0xfd987193
lsr x26, x11, #32 // Right shift high input value containing M[13]
add w17, w17, w26 // Add dest value
add w17, w17, w19 // Add constant 0xfd987193
add w17, w17, w6 // Add aux function result
ror w17, w17, #20 // Rotate left s=12 bits
eor x6, x4, x9 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w17, w4, w17 // Add X parameter round 1 D=FF(D, A, B, C, 0xfd987193, s=12, M[13])
and x6, x6, x17 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x6, x6, x9 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x13, #0x438e // Load lower half of constant 0xa679438e
movk x13, #0xa679, lsl #16 // Load upper half of constant 0xa679438e
add w8, w8, w12 // Add dest value
add w8, w8, w13 // Add constant 0xa679438e
add w8, w8, w6 // Add aux function result
ror w8, w8, #15 // Rotate left s=17 bits
eor x6, x17, x4 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z)
add w8, w17, w8 // Add X parameter round 1 C=FF(C, D, A, B, 0xa679438e, s=17, M[14])
and x6, x6, x8 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z)
eor x6, x6, x4 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z)
movz x13, #0x821 // Load lower half of constant 0x49b40821
movk x13, #0x49b4, lsl #16 // Load upper half of constant 0x49b40821
lsr x27, x12, #32 // Right shift high input value containing M[15]
add w9, w9, w27 // Add dest value
add w9, w9, w13 // Add constant 0x49b40821
add w9, w9, w6 // Add aux function result
ror w9, w9, #10 // Rotate left s=22 bits
bic x6, x8, x17 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w9, w8, w9 // Add X parameter round 1 B=FF(B, C, D, A, 0x49b40821, s=22, M[15])
and x13, x9, x17 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0x2562 // Load lower half of constant 0xf61e2562
movk x13, #0xf61e, lsl #16 // Load upper half of constant 0xf61e2562
add w4, w4, w20 // Add dest value
add w4, w4, w13 // Add constant 0xf61e2562
add w4, w4, w6 // Add aux function result
ror w4, w4, #27 // Rotate left s=5 bits
bic x6, x9, x8 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w4, w9, w4 // Add X parameter round 2 A=GG(A, B, C, D, 0xf61e2562, s=5, M[1])
and x13, x4, x8 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0xb340 // Load lower half of constant 0xc040b340
movk x13, #0xc040, lsl #16 // Load upper half of constant 0xc040b340
add w17, w17, w7 // Add dest value
add w17, w17, w13 // Add constant 0xc040b340
add w17, w17, w6 // Add aux function result
ror w17, w17, #23 // Rotate left s=9 bits
bic x6, x4, x9 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w17, w4, w17 // Add X parameter round 2 D=GG(D, A, B, C, 0xc040b340, s=9, M[6])
and x13, x17, x9 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0x5a51 // Load lower half of constant 0x265e5a51
movk x13, #0x265e, lsl #16 // Load upper half of constant 0x265e5a51
add w8, w8, w25 // Add dest value
add w8, w8, w13 // Add constant 0x265e5a51
add w8, w8, w6 // Add aux function result
ror w8, w8, #18 // Rotate left s=14 bits
bic x6, x17, x4 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w8, w17, w8 // Add X parameter round 2 C=GG(C, D, A, B, 0x265e5a51, s=14, M[11])
and x13, x8, x4 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0xc7aa // Load lower half of constant 0xe9b6c7aa
movk x13, #0xe9b6, lsl #16 // Load upper half of constant 0xe9b6c7aa
add w9, w9, w15 // Add dest value
add w9, w9, w13 // Add constant 0xe9b6c7aa
add w9, w9, w6 // Add aux function result
ror w9, w9, #12 // Rotate left s=20 bits
bic x6, x8, x17 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w9, w8, w9 // Add X parameter round 2 B=GG(B, C, D, A, 0xe9b6c7aa, s=20, M[0])
and x13, x9, x17 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0x105d // Load lower half of constant 0xd62f105d
movk x13, #0xd62f, lsl #16 // Load upper half of constant 0xd62f105d
add w4, w4, w22 // Add dest value
add w4, w4, w13 // Add constant 0xd62f105d
add w4, w4, w6 // Add aux function result
ror w4, w4, #27 // Rotate left s=5 bits
bic x6, x9, x8 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w4, w9, w4 // Add X parameter round 2 A=GG(A, B, C, D, 0xd62f105d, s=5, M[5])
and x13, x4, x8 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0x1453 // Load lower half of constant 0x2441453
movk x13, #0x244, lsl #16 // Load upper half of constant 0x2441453
add w17, w17, w16 // Add dest value
add w17, w17, w13 // Add constant 0x2441453
add w17, w17, w6 // Add aux function result
ror w17, w17, #23 // Rotate left s=9 bits
bic x6, x4, x9 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w17, w4, w17 // Add X parameter round 2 D=GG(D, A, B, C, 0x2441453, s=9, M[10])
and x13, x17, x9 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0xe681 // Load lower half of constant 0xd8a1e681
movk x13, #0xd8a1, lsl #16 // Load upper half of constant 0xd8a1e681
add w8, w8, w27 // Add dest value
add w8, w8, w13 // Add constant 0xd8a1e681
add w8, w8, w6 // Add aux function result
ror w8, w8, #18 // Rotate left s=14 bits
bic x6, x17, x4 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w8, w17, w8 // Add X parameter round 2 C=GG(C, D, A, B, 0xd8a1e681, s=14, M[15])
and x13, x8, x4 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0xfbc8 // Load lower half of constant 0xe7d3fbc8
movk x13, #0xe7d3, lsl #16 // Load upper half of constant 0xe7d3fbc8
add w9, w9, w14 // Add dest value
add w9, w9, w13 // Add constant 0xe7d3fbc8
add w9, w9, w6 // Add aux function result
ror w9, w9, #12 // Rotate left s=20 bits
bic x6, x8, x17 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w9, w8, w9 // Add X parameter round 2 B=GG(B, C, D, A, 0xe7d3fbc8, s=20, M[4])
and x13, x9, x17 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0xcde6 // Load lower half of constant 0x21e1cde6
movk x13, #0x21e1, lsl #16 // Load upper half of constant 0x21e1cde6
add w4, w4, w24 // Add dest value
add w4, w4, w13 // Add constant 0x21e1cde6
add w4, w4, w6 // Add aux function result
ror w4, w4, #27 // Rotate left s=5 bits
bic x6, x9, x8 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w4, w9, w4 // Add X parameter round 2 A=GG(A, B, C, D, 0x21e1cde6, s=5, M[9])
and x13, x4, x8 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0x7d6 // Load lower half of constant 0xc33707d6
movk x13, #0xc337, lsl #16 // Load upper half of constant 0xc33707d6
add w17, w17, w12 // Add dest value
add w17, w17, w13 // Add constant 0xc33707d6
add w17, w17, w6 // Add aux function result
ror w17, w17, #23 // Rotate left s=9 bits
bic x6, x4, x9 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w17, w4, w17 // Add X parameter round 2 D=GG(D, A, B, C, 0xc33707d6, s=9, M[14])
and x13, x17, x9 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0xd87 // Load lower half of constant 0xf4d50d87
movk x13, #0xf4d5, lsl #16 // Load upper half of constant 0xf4d50d87
add w8, w8, w21 // Add dest value
add w8, w8, w13 // Add constant 0xf4d50d87
add w8, w8, w6 // Add aux function result
ror w8, w8, #18 // Rotate left s=14 bits
bic x6, x17, x4 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w8, w17, w8 // Add X parameter round 2 C=GG(C, D, A, B, 0xf4d50d87, s=14, M[3])
and x13, x8, x4 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0x14ed // Load lower half of constant 0x455a14ed
movk x13, #0x455a, lsl #16 // Load upper half of constant 0x455a14ed
add w9, w9, w5 // Add dest value
add w9, w9, w13 // Add constant 0x455a14ed
add w9, w9, w6 // Add aux function result
ror w9, w9, #12 // Rotate left s=20 bits
bic x6, x8, x17 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w9, w8, w9 // Add X parameter round 2 B=GG(B, C, D, A, 0x455a14ed, s=20, M[8])
and x13, x9, x17 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0xe905 // Load lower half of constant 0xa9e3e905
movk x13, #0xa9e3, lsl #16 // Load upper half of constant 0xa9e3e905
add w4, w4, w26 // Add dest value
add w4, w4, w13 // Add constant 0xa9e3e905
add w4, w4, w6 // Add aux function result
ror w4, w4, #27 // Rotate left s=5 bits
bic x6, x9, x8 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w4, w9, w4 // Add X parameter round 2 A=GG(A, B, C, D, 0xa9e3e905, s=5, M[13])
and x13, x4, x8 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0xa3f8 // Load lower half of constant 0xfcefa3f8
movk x13, #0xfcef, lsl #16 // Load upper half of constant 0xfcefa3f8
add w17, w17, w3 // Add dest value
add w17, w17, w13 // Add constant 0xfcefa3f8
add w17, w17, w6 // Add aux function result
ror w17, w17, #23 // Rotate left s=9 bits
bic x6, x4, x9 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w17, w4, w17 // Add X parameter round 2 D=GG(D, A, B, C, 0xfcefa3f8, s=9, M[2])
and x13, x17, x9 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0x2d9 // Load lower half of constant 0x676f02d9
movk x13, #0x676f, lsl #16 // Load upper half of constant 0x676f02d9
add w8, w8, w23 // Add dest value
add w8, w8, w13 // Add constant 0x676f02d9
add w8, w8, w6 // Add aux function result
ror w8, w8, #18 // Rotate left s=14 bits
bic x6, x17, x4 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
add w8, w17, w8 // Add X parameter round 2 C=GG(C, D, A, B, 0x676f02d9, s=14, M[7])
and x13, x8, x4 // Aux function round 2 G(x,y,z)=((x&z)|(~z&y))
orr x6, x6, x13 // End aux function round 2 G(x,y,z)=((x&z)|(~z&y))
movz x13, #0x4c8a // Load lower half of constant 0x8d2a4c8a
movk x13, #0x8d2a, lsl #16 // Load upper half of constant 0x8d2a4c8a
add w9, w9, w11 // Add dest value
add w9, w9, w13 // Add constant 0x8d2a4c8a
add w9, w9, w6 // Add aux function result
eor x6, x8, x17 // Begin aux function round 3 H(x,y,z)=(x^y^z)
ror w9, w9, #12 // Rotate left s=20 bits
movz x10, #0x3942 // Load lower half of constant 0xfffa3942
add w9, w8, w9 // Add X parameter round 2 B=GG(B, C, D, A, 0x8d2a4c8a, s=20, M[12])
movk x10, #0xfffa, lsl #16 // Load upper half of constant 0xfffa3942
add w4, w4, w22 // Add dest value
eor x6, x6, x9 // End aux function round 3 H(x,y,z)=(x^y^z)
add w4, w4, w10 // Add constant 0xfffa3942
add w4, w4, w6 // Add aux function result
ror w4, w4, #28 // Rotate left s=4 bits
eor x6, x9, x8 // Begin aux function round 3 H(x,y,z)=(x^y^z)
movz x10, #0xf681 // Load lower half of constant 0x8771f681
add w4, w9, w4 // Add X parameter round 3 A=HH(A, B, C, D, 0xfffa3942, s=4, M[5])
movk x10, #0x8771, lsl #16 // Load upper half of constant 0x8771f681
add w17, w17, w5 // Add dest value
eor x6, x6, x4 // End aux function round 3 H(x,y,z)=(x^y^z)
add w17, w17, w10 // Add constant 0x8771f681
add w17, w17, w6 // Add aux function result
eor x6, x4, x9 // Begin aux function round 3 H(x,y,z)=(x^y^z)
ror w17, w17, #21 // Rotate left s=11 bits
movz x13, #0x6122 // Load lower half of constant 0x6d9d6122
add w17, w4, w17 // Add X parameter round 3 D=HH(D, A, B, C, 0x8771f681, s=11, M[8])
movk x13, #0x6d9d, lsl #16 // Load upper half of constant 0x6d9d6122
add w8, w8, w25 // Add dest value
eor x6, x6, x17 // End aux function round 3 H(x,y,z)=(x^y^z)
add w8, w8, w13 // Add constant 0x6d9d6122
add w8, w8, w6 // Add aux function result
ror w8, w8, #16 // Rotate left s=16 bits
eor x6, x17, x4 // Begin aux function round 3 H(x,y,z)=(x^y^z)
movz x13, #0x380c // Load lower half of constant 0xfde5380c
add w8, w17, w8 // Add X parameter round 3 C=HH(C, D, A, B, 0x6d9d6122, s=16, M[11])
movk x13, #0xfde5, lsl #16 // Load upper half of constant 0xfde5380c
add w9, w9, w12 // Add dest value
eor x6, x6, x8 // End aux function round 3 H(x,y,z)=(x^y^z)
add w9, w9, w13 // Add constant 0xfde5380c
add w9, w9, w6 // Add aux function result
eor x6, x8, x17 // Begin aux function round 3 H(x,y,z)=(x^y^z)
ror w9, w9, #9 // Rotate left s=23 bits
movz x10, #0xea44 // Load lower half of constant 0xa4beea44
add w9, w8, w9 // Add X parameter round 3 B=HH(B, C, D, A, 0xfde5380c, s=23, M[14])
movk x10, #0xa4be, lsl #16 // Load upper half of constant 0xa4beea44
add w4, w4, w20 // Add dest value
eor x6, x6, x9 // End aux function round 3 H(x,y,z)=(x^y^z)
add w4, w4, w10 // Add constant 0xa4beea44
add w4, w4, w6 // Add aux function result
ror w4, w4, #28 // Rotate left s=4 bits
eor x6, x9, x8 // Begin aux function round 3 H(x,y,z)=(x^y^z)
movz x10, #0xcfa9 // Load lower half of constant 0x4bdecfa9
add w4, w9, w4 // Add X parameter round 3 A=HH(A, B, C, D, 0xa4beea44, s=4, M[1])
movk x10, #0x4bde, lsl #16 // Load upper half of constant 0x4bdecfa9
add w17, w17, w14 // Add dest value
eor x6, x6, x4 // End aux function round 3 H(x,y,z)=(x^y^z)
add w17, w17, w10 // Add constant 0x4bdecfa9
add w17, w17, w6 // Add aux function result
eor x6, x4, x9 // Begin aux function round 3 H(x,y,z)=(x^y^z)
ror w17, w17, #21 // Rotate left s=11 bits
movz x13, #0x4b60 // Load lower half of constant 0xf6bb4b60
add w17, w4, w17 // Add X parameter round 3 D=HH(D, A, B, C, 0x4bdecfa9, s=11, M[4])
movk x13, #0xf6bb, lsl #16 // Load upper half of constant 0xf6bb4b60
add w8, w8, w23 // Add dest value
eor x6, x6, x17 // End aux function round 3 H(x,y,z)=(x^y^z)
add w8, w8, w13 // Add constant 0xf6bb4b60
add w8, w8, w6 // Add aux function result
ror w8, w8, #16 // Rotate left s=16 bits
eor x6, x17, x4 // Begin aux function round 3 H(x,y,z)=(x^y^z)
movz x13, #0xbc70 // Load lower half of constant 0xbebfbc70
add w8, w17, w8 // Add X parameter round 3 C=HH(C, D, A, B, 0xf6bb4b60, s=16, M[7])
movk x13, #0xbebf, lsl #16 // Load upper half of constant 0xbebfbc70
add w9, w9, w16 // Add dest value
eor x6, x6, x8 // End aux function round 3 H(x,y,z)=(x^y^z)
add w9, w9, w13 // Add constant 0xbebfbc70
add w9, w9, w6 // Add aux function result
eor x6, x8, x17 // Begin aux function round 3 H(x,y,z)=(x^y^z)
ror w9, w9, #9 // Rotate left s=23 bits
movz x10, #0x7ec6 // Load lower half of constant 0x289b7ec6
add w9, w8, w9 // Add X parameter round 3 B=HH(B, C, D, A, 0xbebfbc70, s=23, M[10])
movk x10, #0x289b, lsl #16 // Load upper half of constant 0x289b7ec6
add w4, w4, w26 // Add dest value
eor x6, x6, x9 // End aux function round 3 H(x,y,z)=(x^y^z)
add w4, w4, w10 // Add constant 0x289b7ec6
add w4, w4, w6 // Add aux function result
ror w4, w4, #28 // Rotate left s=4 bits
eor x6, x9, x8 // Begin aux function round 3 H(x,y,z)=(x^y^z)
movz x10, #0x27fa // Load lower half of constant 0xeaa127fa
add w4, w9, w4 // Add X parameter round 3 A=HH(A, B, C, D, 0x289b7ec6, s=4, M[13])
movk x10, #0xeaa1, lsl #16 // Load upper half of constant 0xeaa127fa
add w17, w17, w15 // Add dest value
eor x6, x6, x4 // End aux function round 3 H(x,y,z)=(x^y^z)
add w17, w17, w10 // Add constant 0xeaa127fa
add w17, w17, w6 // Add aux function result
eor x6, x4, x9 // Begin aux function round 3 H(x,y,z)=(x^y^z)
ror w17, w17, #21 // Rotate left s=11 bits
movz x13, #0x3085 // Load lower half of constant 0xd4ef3085
add w17, w4, w17 // Add X parameter round 3 D=HH(D, A, B, C, 0xeaa127fa, s=11, M[0])
movk x13, #0xd4ef, lsl #16 // Load upper half of constant 0xd4ef3085
add w8, w8, w21 // Add dest value
eor x6, x6, x17 // End aux function round 3 H(x,y,z)=(x^y^z)
add w8, w8, w13 // Add constant 0xd4ef3085
add w8, w8, w6 // Add aux function result
ror w8, w8, #16 // Rotate left s=16 bits
eor x6, x17, x4 // Begin aux function round 3 H(x,y,z)=(x^y^z)
movz x13, #0x1d05 // Load lower half of constant 0x4881d05
add w8, w17, w8 // Add X parameter round 3 C=HH(C, D, A, B, 0xd4ef3085, s=16, M[3])
movk x13, #0x488, lsl #16 // Load upper half of constant 0x4881d05
add w9, w9, w7 // Add dest value
eor x6, x6, x8 // End aux function round 3 H(x,y,z)=(x^y^z)
add w9, w9, w13 // Add constant 0x4881d05
add w9, w9, w6 // Add aux function result
eor x6, x8, x17 // Begin aux function round 3 H(x,y,z)=(x^y^z)
ror w9, w9, #9 // Rotate left s=23 bits
movz x10, #0xd039 // Load lower half of constant 0xd9d4d039
add w9, w8, w9 // Add X parameter round 3 B=HH(B, C, D, A, 0x4881d05, s=23, M[6])
movk x10, #0xd9d4, lsl #16 // Load upper half of constant 0xd9d4d039
add w4, w4, w24 // Add dest value
eor x6, x6, x9 // End aux function round 3 H(x,y,z)=(x^y^z)
add w4, w4, w10 // Add constant 0xd9d4d039
add w4, w4, w6 // Add aux function result
ror w4, w4, #28 // Rotate left s=4 bits
eor x6, x9, x8 // Begin aux function round 3 H(x,y,z)=(x^y^z)
movz x10, #0x99e5 // Load lower half of constant 0xe6db99e5
add w4, w9, w4 // Add X parameter round 3 A=HH(A, B, C, D, 0xd9d4d039, s=4, M[9])
movk x10, #0xe6db, lsl #16 // Load upper half of constant 0xe6db99e5
add w17, w17, w11 // Add dest value
eor x6, x6, x4 // End aux function round 3 H(x,y,z)=(x^y^z)
add w17, w17, w10 // Add constant 0xe6db99e5
add w17, w17, w6 // Add aux function result
eor x6, x4, x9 // Begin aux function round 3 H(x,y,z)=(x^y^z)
ror w17, w17, #21 // Rotate left s=11 bits
movz x13, #0x7cf8 // Load lower half of constant 0x1fa27cf8
add w17, w4, w17 // Add X parameter round 3 D=HH(D, A, B, C, 0xe6db99e5, s=11, M[12])
movk x13, #0x1fa2, lsl #16 // Load upper half of constant 0x1fa27cf8
add w8, w8, w27 // Add dest value
eor x6, x6, x17 // End aux function round 3 H(x,y,z)=(x^y^z)
add w8, w8, w13 // Add constant 0x1fa27cf8
add w8, w8, w6 // Add aux function result
ror w8, w8, #16 // Rotate left s=16 bits
eor x6, x17, x4 // Begin aux function round 3 H(x,y,z)=(x^y^z)
movz x13, #0x5665 // Load lower half of constant 0xc4ac5665
add w8, w17, w8 // Add X parameter round 3 C=HH(C, D, A, B, 0x1fa27cf8, s=16, M[15])
movk x13, #0xc4ac, lsl #16 // Load upper half of constant 0xc4ac5665
add w9, w9, w3 // Add dest value
eor x6, x6, x8 // End aux function round 3 H(x,y,z)=(x^y^z)
add w9, w9, w13 // Add constant 0xc4ac5665
add w9, w9, w6 // Add aux function result
ror w9, w9, #9 // Rotate left s=23 bits
movz x6, #0x2244 // Load lower half of constant 0xf4292244
movk x6, #0xf429, lsl #16 // Load upper half of constant 0xf4292244
add w9, w8, w9 // Add X parameter round 3 B=HH(B, C, D, A, 0xc4ac5665, s=23, M[2])
add w4, w4, w15 // Add dest value
orn x13, x9, x17 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w4, w4, w6 // Add constant 0xf4292244
eor x6, x8, x13 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w4, w4, w6 // Add aux function result
ror w4, w4, #26 // Rotate left s=6 bits
movz x6, #0xff97 // Load lower half of constant 0x432aff97
movk x6, #0x432a, lsl #16 // Load upper half of constant 0x432aff97
add w4, w9, w4 // Add X parameter round 4 A=II(A, B, C, D, 0xf4292244, s=6, M[0])
orn x10, x4, x8 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w17, w17, w23 // Add dest value
eor x10, x9, x10 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w17, w17, w6 // Add constant 0x432aff97
add w6, w17, w10 // Add aux function result
ror w6, w6, #22 // Rotate left s=10 bits
movz x17, #0x23a7 // Load lower half of constant 0xab9423a7
movk x17, #0xab94, lsl #16 // Load upper half of constant 0xab9423a7
add w6, w4, w6 // Add X parameter round 4 D=II(D, A, B, C, 0x432aff97, s=10, M[7])
add w8, w8, w12 // Add dest value
orn x10, x6, x9 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w8, w8, w17 // Add constant 0xab9423a7
eor x17, x4, x10 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w8, w8, w17 // Add aux function result
ror w8, w8, #17 // Rotate left s=15 bits
movz x17, #0xa039 // Load lower half of constant 0xfc93a039
movk x17, #0xfc93, lsl #16 // Load upper half of constant 0xfc93a039
add w8, w6, w8 // Add X parameter round 4 C=II(C, D, A, B, 0xab9423a7, s=15, M[14])
orn x13, x8, x4 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w9, w9, w22 // Add dest value
eor x13, x6, x13 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w9, w9, w17 // Add constant 0xfc93a039
add w17, w9, w13 // Add aux function result
ror w17, w17, #11 // Rotate left s=21 bits
movz x9, #0x59c3 // Load lower half of constant 0x655b59c3
movk x9, #0x655b, lsl #16 // Load upper half of constant 0x655b59c3
add w17, w8, w17 // Add X parameter round 4 B=II(B, C, D, A, 0xfc93a039, s=21, M[5])
add w4, w4, w11 // Add dest value
orn x13, x17, x6 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w9, w4, w9 // Add constant 0x655b59c3
eor x4, x8, x13 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w9, w9, w4 // Add aux function result
ror w9, w9, #26 // Rotate left s=6 bits
movz x4, #0xcc92 // Load lower half of constant 0x8f0ccc92
movk x4, #0x8f0c, lsl #16 // Load upper half of constant 0x8f0ccc92
add w9, w17, w9 // Add X parameter round 4 A=II(A, B, C, D, 0x655b59c3, s=6, M[12])
orn x10, x9, x8 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w6, w6, w21 // Add dest value
eor x10, x17, x10 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w4, w6, w4 // Add constant 0x8f0ccc92
add w6, w4, w10 // Add aux function result
ror w6, w6, #22 // Rotate left s=10 bits
movz x4, #0xf47d // Load lower half of constant 0xffeff47d
movk x4, #0xffef, lsl #16 // Load upper half of constant 0xffeff47d
add w6, w9, w6 // Add X parameter round 4 D=II(D, A, B, C, 0x8f0ccc92, s=10, M[3])
add w8, w8, w16 // Add dest value
orn x10, x6, x17 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w8, w8, w4 // Add constant 0xffeff47d
eor x4, x9, x10 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w8, w8, w4 // Add aux function result
ror w8, w8, #17 // Rotate left s=15 bits
movz x4, #0x5dd1 // Load lower half of constant 0x85845dd1
movk x4, #0x8584, lsl #16 // Load upper half of constant 0x85845dd1
add w8, w6, w8 // Add X parameter round 4 C=II(C, D, A, B, 0xffeff47d, s=15, M[10])
orn x10, x8, x9 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w15, w17, w20 // Add dest value
eor x17, x6, x10 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w15, w15, w4 // Add constant 0x85845dd1
add w4, w15, w17 // Add aux function result
ror w4, w4, #11 // Rotate left s=21 bits
movz x15, #0x7e4f // Load lower half of constant 0x6fa87e4f
movk x15, #0x6fa8, lsl #16 // Load upper half of constant 0x6fa87e4f
add w17, w8, w4 // Add X parameter round 4 B=II(B, C, D, A, 0x85845dd1, s=21, M[1])
add w4, w9, w5 // Add dest value
orn x9, x17, x6 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w15, w4, w15 // Add constant 0x6fa87e4f
eor x4, x8, x9 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w9, w15, w4 // Add aux function result
ror w9, w9, #26 // Rotate left s=6 bits
movz x15, #0xe6e0 // Load lower half of constant 0xfe2ce6e0
movk x15, #0xfe2c, lsl #16 // Load upper half of constant 0xfe2ce6e0
add w4, w17, w9 // Add X parameter round 4 A=II(A, B, C, D, 0x6fa87e4f, s=6, M[8])
orn x9, x4, x8 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w6, w6, w27 // Add dest value
eor x9, x17, x9 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w15, w6, w15 // Add constant 0xfe2ce6e0
add w6, w15, w9 // Add aux function result
ror w6, w6, #22 // Rotate left s=10 bits
movz x9, #0x4314 // Load lower half of constant 0xa3014314
movk x9, #0xa301, lsl #16 // Load upper half of constant 0xa3014314
add w15, w4, w6 // Add X parameter round 4 D=II(D, A, B, C, 0xfe2ce6e0, s=10, M[15])
add w6, w8, w7 // Add dest value
orn x7, x15, x17 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w8, w6, w9 // Add constant 0xa3014314
eor x9, x4, x7 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w6, w8, w9 // Add aux function result
ror w6, w6, #17 // Rotate left s=15 bits
movz x7, #0x11a1 // Load lower half of constant 0x4e0811a1
movk x7, #0x4e08, lsl #16 // Load upper half of constant 0x4e0811a1
add w8, w15, w6 // Add X parameter round 4 C=II(C, D, A, B, 0xa3014314, s=15, M[6])
orn x9, x8, x4 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w6, w17, w26 // Add dest value
eor x17, x15, x9 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w9, w6, w7 // Add constant 0x4e0811a1
add w7, w9, w17 // Add aux function result
ror w7, w7, #11 // Rotate left s=21 bits
movz x6, #0x7e82 // Load lower half of constant 0xf7537e82
movk x6, #0xf753, lsl #16 // Load upper half of constant 0xf7537e82
add w9, w8, w7 // Add X parameter round 4 B=II(B, C, D, A, 0x4e0811a1, s=21, M[13])
add w17, w4, w14 // Add dest value
orn x7, x9, x15 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w14, w17, w6 // Add constant 0xf7537e82
eor x4, x8, x7 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w17, w14, w4 // Add aux function result
ror w17, w17, #26 // Rotate left s=6 bits
movz x6, #0xf235 // Load lower half of constant 0xbd3af235
movk x6, #0xbd3a, lsl #16 // Load upper half of constant 0xbd3af235
add w7, w9, w17 // Add X parameter round 4 A=II(A, B, C, D, 0xf7537e82, s=6, M[4])
orn x14, x7, x8 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w4, w15, w25 // Add dest value
eor x17, x9, x14 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w15, w4, w6 // Add constant 0xbd3af235
add w16, w15, w17 // Add aux function result
ror w16, w16, #22 // Rotate left s=10 bits
movz x14, #0xd2bb // Load lower half of constant 0x2ad7d2bb
movk x14, #0x2ad7, lsl #16 // Load upper half of constant 0x2ad7d2bb
add w4, w7, w16 // Add X parameter round 4 D=II(D, A, B, C, 0xbd3af235, s=10, M[11])
add w6, w8, w3 // Add dest value
orn x15, x4, x9 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w17, w6, w14 // Add constant 0x2ad7d2bb
eor x16, x7, x15 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w8, w17, w16 // Add aux function result
ror w8, w8, #17 // Rotate left s=15 bits
movz x3, #0xd391 // Load lower half of constant 0xeb86d391
movk x3, #0xeb86, lsl #16 // Load upper half of constant 0xeb86d391
add w14, w4, w8 // Add X parameter round 4 C=II(C, D, A, B, 0x2ad7d2bb, s=15, M[2])
orn x6, x14, x7 // Begin aux function round 4 I(x,y,z)=((~z|x)^y)
add w15, w9, w24 // Add dest value
eor x17, x4, x6 // End aux function round 4 I(x,y,z)=((~z|x)^y)
add w16, w15, w3 // Add constant 0xeb86d391
add w8, w16, w17 // Add aux function result
ror w8, w8, #11 // Rotate left s=21 bits
ldp w6, w15, [x0] // Reload MD5 state->A and state->B
ldp w5, w9, [x0, #8] // Reload MD5 state->C and state->D
add w3, w14, w8 // Add X parameter round 4 B=II(B, C, D, A, 0xeb86d391, s=21, M[9])
add w13, w4, w9 // Add result of MD5 rounds to state->D
add w12, w14, w5 // Add result of MD5 rounds to state->C
add w10, w7, w6 // Add result of MD5 rounds to state->A
add w11, w3, w15 // Add result of MD5 rounds to state->B
stp w12, w13, [x0, #8] // Store MD5 states C,D
stp w10, w11, [x0] // Store MD5 states A,B
add x1, x1, #64 // Increment data pointer
subs w2, w2, #1 // Decrement block counter
b.ne md5_blocks_loop
ldp x21,x22,[sp,#16]
ldp x23,x24,[sp,#32]
ldp x25,x26,[sp,#48]
ldp x27,x28,[sp,#64]
ldp x19,x20,[sp],#80
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 49,131
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/sha512-armv8.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
//
// Licensed under the OpenSSL license (the "License"). You may not use
// this file except in compliance with the License. You can obtain a copy
// in the file LICENSE in the source distribution or at
// https://www.openssl.org/source/license.html
// ====================================================================
// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
// project. The module is, however, dual licensed under OpenSSL and
// CRYPTOGAMS licenses depending on where you obtain it. For further
// details see http://www.openssl.org/~appro/cryptogams/.
//
// Permission to use under GPLv2 terms is granted.
// ====================================================================
//
// SHA256/512 for ARMv8.
//
// Performance in cycles per processed byte and improvement coefficient
// over code generated with "default" compiler:
//
// SHA256-hw SHA256(*) SHA512
// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
// Denver 2.01 10.5 (+26%) 6.70 (+8%)
// X-Gene 20.0 (+100%) 12.8 (+300%(***))
// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
// Kryo 1.92 17.4 (+30%) 11.2 (+8%)
//
// (*) Software SHA256 results are of lesser relevance, presented
// mostly for informational purposes.
// (**) The result is a trade-off: it's possible to improve it by
// 10% (or by 1 cycle per round), but at the cost of 20% loss
// on Cortex-A53 (or by 4 cycles per round).
// (***) Super-impressive coefficients over gcc-generated code are
// indication of some compiler "pathology", most notably code
// generated with -mgeneral-regs-only is significantly faster
// and the gap is only 40-90%.
#ifndef __KERNEL__
# include <openssl/arm_arch.h>
#endif
.text
.globl sha512_block_data_order_nohw
.def sha512_block_data_order_nohw
.type 32
.endef
.align 6
sha512_block_data_order_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#4*8
ldp x20,x21,[x0] // load context
ldp x22,x23,[x0,#2*8]
ldp x24,x25,[x0,#4*8]
add x2,x1,x2,lsl#7 // end of input
ldp x26,x27,[x0,#6*8]
adrp x30,LK512
add x30,x30,:lo12:LK512
stp x0,x2,[x29,#96]
Loop:
ldp x3,x4,[x1],#2*8
ldr x19,[x30],#8 // *K++
eor x28,x21,x22 // magic seed
str x1,[x29,#112]
#ifndef __AARCH64EB__
rev x3,x3 // 0
#endif
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x6,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x3 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x4,x4 // 1
#endif
ldp x5,x6,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x7,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x4 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x5,x5 // 2
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x8,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x5 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x6,x6 // 3
#endif
ldp x7,x8,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x9,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x6 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x7,x7 // 4
#endif
add x24,x24,x17 // h+=Sigma0(a)
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x10,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x7 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x10,ror#18 // Sigma1(e)
ror x10,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x10,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x8,x8 // 5
#endif
ldp x9,x10,[x1],#2*8
add x23,x23,x17 // h+=Sigma0(a)
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x11,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x8 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x11,ror#18 // Sigma1(e)
ror x11,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x11,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x9,x9 // 6
#endif
add x22,x22,x17 // h+=Sigma0(a)
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x12,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x9 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x12,ror#18 // Sigma1(e)
ror x12,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x12,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x10,x10 // 7
#endif
ldp x11,x12,[x1],#2*8
add x21,x21,x17 // h+=Sigma0(a)
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
eor x13,x25,x25,ror#23
and x17,x26,x25
bic x28,x27,x25
add x20,x20,x10 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x13,ror#18 // Sigma1(e)
ror x13,x21,#28
add x20,x20,x17 // h+=Ch(e,f,g)
eor x17,x21,x21,ror#5
add x20,x20,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x24,x24,x20 // d+=h
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x13,x17,ror#34 // Sigma0(a)
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x20,x20,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x11,x11 // 8
#endif
add x20,x20,x17 // h+=Sigma0(a)
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
eor x14,x24,x24,ror#23
and x17,x25,x24
bic x19,x26,x24
add x27,x27,x11 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x14,ror#18 // Sigma1(e)
ror x14,x20,#28
add x27,x27,x17 // h+=Ch(e,f,g)
eor x17,x20,x20,ror#5
add x27,x27,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x23,x23,x27 // d+=h
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x14,x17,ror#34 // Sigma0(a)
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x27,x27,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x12,x12 // 9
#endif
ldp x13,x14,[x1],#2*8
add x27,x27,x17 // h+=Sigma0(a)
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
eor x15,x23,x23,ror#23
and x17,x24,x23
bic x28,x25,x23
add x26,x26,x12 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x15,ror#18 // Sigma1(e)
ror x15,x27,#28
add x26,x26,x17 // h+=Ch(e,f,g)
eor x17,x27,x27,ror#5
add x26,x26,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x22,x22,x26 // d+=h
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x15,x17,ror#34 // Sigma0(a)
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x26,x26,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x13,x13 // 10
#endif
add x26,x26,x17 // h+=Sigma0(a)
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
eor x0,x22,x22,ror#23
and x17,x23,x22
bic x19,x24,x22
add x25,x25,x13 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x0,ror#18 // Sigma1(e)
ror x0,x26,#28
add x25,x25,x17 // h+=Ch(e,f,g)
eor x17,x26,x26,ror#5
add x25,x25,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x21,x21,x25 // d+=h
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x0,x17,ror#34 // Sigma0(a)
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x25,x25,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x14,x14 // 11
#endif
ldp x15,x0,[x1],#2*8
add x25,x25,x17 // h+=Sigma0(a)
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
eor x6,x21,x21,ror#23
and x17,x22,x21
bic x28,x23,x21
add x24,x24,x14 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x6,ror#18 // Sigma1(e)
ror x6,x25,#28
add x24,x24,x17 // h+=Ch(e,f,g)
eor x17,x25,x25,ror#5
add x24,x24,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x20,x20,x24 // d+=h
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x17,ror#34 // Sigma0(a)
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x24,x24,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x15,x15 // 12
#endif
add x24,x24,x17 // h+=Sigma0(a)
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
eor x7,x20,x20,ror#23
and x17,x21,x20
bic x19,x22,x20
add x23,x23,x15 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x7,ror#18 // Sigma1(e)
ror x7,x24,#28
add x23,x23,x17 // h+=Ch(e,f,g)
eor x17,x24,x24,ror#5
add x23,x23,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x27,x27,x23 // d+=h
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x17,ror#34 // Sigma0(a)
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x23,x23,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x0,x0 // 13
#endif
ldp x1,x2,[x1]
add x23,x23,x17 // h+=Sigma0(a)
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
eor x8,x27,x27,ror#23
and x17,x20,x27
bic x28,x21,x27
add x22,x22,x0 // h+=X[i]
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x8,ror#18 // Sigma1(e)
ror x8,x23,#28
add x22,x22,x17 // h+=Ch(e,f,g)
eor x17,x23,x23,ror#5
add x22,x22,x16 // h+=Sigma1(e)
and x19,x19,x28 // (b^c)&=(a^b)
add x26,x26,x22 // d+=h
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x17,ror#34 // Sigma0(a)
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
//add x22,x22,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x1,x1 // 14
#endif
ldr x6,[sp,#24]
add x22,x22,x17 // h+=Sigma0(a)
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
eor x9,x26,x26,ror#23
and x17,x27,x26
bic x19,x20,x26
add x21,x21,x1 // h+=X[i]
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x9,ror#18 // Sigma1(e)
ror x9,x22,#28
add x21,x21,x17 // h+=Ch(e,f,g)
eor x17,x22,x22,ror#5
add x21,x21,x16 // h+=Sigma1(e)
and x28,x28,x19 // (b^c)&=(a^b)
add x25,x25,x21 // d+=h
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x17,ror#34 // Sigma0(a)
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
//add x21,x21,x17 // h+=Sigma0(a)
#ifndef __AARCH64EB__
rev x2,x2 // 15
#endif
ldr x7,[sp,#0]
add x21,x21,x17 // h+=Sigma0(a)
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
Loop_16_xx:
ldr x8,[sp,#8]
str x11,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x10,x5,#1
and x17,x25,x24
ror x9,x2,#19
bic x19,x26,x24
ror x11,x20,#28
add x27,x27,x3 // h+=X[i]
eor x16,x16,x24,ror#18
eor x10,x10,x5,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x11,x11,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x9,x9,x2,ror#61
eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x11,x20,ror#39 // Sigma0(a)
eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
add x4,x4,x13
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x4,x4,x10
add x27,x27,x17 // h+=Sigma0(a)
add x4,x4,x9
ldr x9,[sp,#16]
str x12,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x11,x6,#1
and x17,x24,x23
ror x10,x3,#19
bic x28,x25,x23
ror x12,x27,#28
add x26,x26,x4 // h+=X[i]
eor x16,x16,x23,ror#18
eor x11,x11,x6,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x12,x12,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x10,x10,x3,ror#61
eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x12,x27,ror#39 // Sigma0(a)
eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
add x5,x5,x14
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x5,x5,x11
add x26,x26,x17 // h+=Sigma0(a)
add x5,x5,x10
ldr x10,[sp,#24]
str x13,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x12,x7,#1
and x17,x23,x22
ror x11,x4,#19
bic x19,x24,x22
ror x13,x26,#28
add x25,x25,x5 // h+=X[i]
eor x16,x16,x22,ror#18
eor x12,x12,x7,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x13,x13,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x11,x11,x4,ror#61
eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x13,x26,ror#39 // Sigma0(a)
eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
add x6,x6,x15
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x6,x6,x12
add x25,x25,x17 // h+=Sigma0(a)
add x6,x6,x11
ldr x11,[sp,#0]
str x14,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x13,x8,#1
and x17,x22,x21
ror x12,x5,#19
bic x28,x23,x21
ror x14,x25,#28
add x24,x24,x6 // h+=X[i]
eor x16,x16,x21,ror#18
eor x13,x13,x8,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x14,x14,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x12,x12,x5,ror#61
eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x14,x25,ror#39 // Sigma0(a)
eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
add x7,x7,x0
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x7,x7,x13
add x24,x24,x17 // h+=Sigma0(a)
add x7,x7,x12
ldr x12,[sp,#8]
str x15,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x14,x9,#1
and x17,x21,x20
ror x13,x6,#19
bic x19,x22,x20
ror x15,x24,#28
add x23,x23,x7 // h+=X[i]
eor x16,x16,x20,ror#18
eor x14,x14,x9,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x15,x15,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x13,x13,x6,ror#61
eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x15,x24,ror#39 // Sigma0(a)
eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
add x8,x8,x1
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x8,x8,x14
add x23,x23,x17 // h+=Sigma0(a)
add x8,x8,x13
ldr x13,[sp,#16]
str x0,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x15,x10,#1
and x17,x20,x27
ror x14,x7,#19
bic x28,x21,x27
ror x0,x23,#28
add x22,x22,x8 // h+=X[i]
eor x16,x16,x27,ror#18
eor x15,x15,x10,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x0,x0,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x14,x14,x7,ror#61
eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x0,x23,ror#39 // Sigma0(a)
eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
add x9,x9,x2
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x9,x9,x15
add x22,x22,x17 // h+=Sigma0(a)
add x9,x9,x14
ldr x14,[sp,#24]
str x1,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x0,x11,#1
and x17,x27,x26
ror x15,x8,#19
bic x19,x20,x26
ror x1,x22,#28
add x21,x21,x9 // h+=X[i]
eor x16,x16,x26,ror#18
eor x0,x0,x11,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x1,x1,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x15,x15,x8,ror#61
eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x1,x22,ror#39 // Sigma0(a)
eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
add x10,x10,x3
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x10,x10,x0
add x21,x21,x17 // h+=Sigma0(a)
add x10,x10,x15
ldr x15,[sp,#0]
str x2,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x1,x12,#1
and x17,x26,x25
ror x0,x9,#19
bic x28,x27,x25
ror x2,x21,#28
add x20,x20,x10 // h+=X[i]
eor x16,x16,x25,ror#18
eor x1,x1,x12,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x2,x2,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x0,x0,x9,ror#61
eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x2,x21,ror#39 // Sigma0(a)
eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
add x11,x11,x4
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x11,x11,x1
add x20,x20,x17 // h+=Sigma0(a)
add x11,x11,x0
ldr x0,[sp,#8]
str x3,[sp,#0]
ror x16,x24,#14
add x27,x27,x19 // h+=K[i]
ror x2,x13,#1
and x17,x25,x24
ror x1,x10,#19
bic x19,x26,x24
ror x3,x20,#28
add x27,x27,x11 // h+=X[i]
eor x16,x16,x24,ror#18
eor x2,x2,x13,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x20,x21 // a^b, b^c in next round
eor x16,x16,x24,ror#41 // Sigma1(e)
eor x3,x3,x20,ror#34
add x27,x27,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x1,x1,x10,ror#61
eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
add x27,x27,x16 // h+=Sigma1(e)
eor x28,x28,x21 // Maj(a,b,c)
eor x17,x3,x20,ror#39 // Sigma0(a)
eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
add x12,x12,x5
add x23,x23,x27 // d+=h
add x27,x27,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x12,x12,x2
add x27,x27,x17 // h+=Sigma0(a)
add x12,x12,x1
ldr x1,[sp,#16]
str x4,[sp,#8]
ror x16,x23,#14
add x26,x26,x28 // h+=K[i]
ror x3,x14,#1
and x17,x24,x23
ror x2,x11,#19
bic x28,x25,x23
ror x4,x27,#28
add x26,x26,x12 // h+=X[i]
eor x16,x16,x23,ror#18
eor x3,x3,x14,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x27,x20 // a^b, b^c in next round
eor x16,x16,x23,ror#41 // Sigma1(e)
eor x4,x4,x27,ror#34
add x26,x26,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x2,x2,x11,ror#61
eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
add x26,x26,x16 // h+=Sigma1(e)
eor x19,x19,x20 // Maj(a,b,c)
eor x17,x4,x27,ror#39 // Sigma0(a)
eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
add x13,x13,x6
add x22,x22,x26 // d+=h
add x26,x26,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x13,x13,x3
add x26,x26,x17 // h+=Sigma0(a)
add x13,x13,x2
ldr x2,[sp,#24]
str x5,[sp,#16]
ror x16,x22,#14
add x25,x25,x19 // h+=K[i]
ror x4,x15,#1
and x17,x23,x22
ror x3,x12,#19
bic x19,x24,x22
ror x5,x26,#28
add x25,x25,x13 // h+=X[i]
eor x16,x16,x22,ror#18
eor x4,x4,x15,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x26,x27 // a^b, b^c in next round
eor x16,x16,x22,ror#41 // Sigma1(e)
eor x5,x5,x26,ror#34
add x25,x25,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x3,x3,x12,ror#61
eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
add x25,x25,x16 // h+=Sigma1(e)
eor x28,x28,x27 // Maj(a,b,c)
eor x17,x5,x26,ror#39 // Sigma0(a)
eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
add x14,x14,x7
add x21,x21,x25 // d+=h
add x25,x25,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x14,x14,x4
add x25,x25,x17 // h+=Sigma0(a)
add x14,x14,x3
ldr x3,[sp,#0]
str x6,[sp,#24]
ror x16,x21,#14
add x24,x24,x28 // h+=K[i]
ror x5,x0,#1
and x17,x22,x21
ror x4,x13,#19
bic x28,x23,x21
ror x6,x25,#28
add x24,x24,x14 // h+=X[i]
eor x16,x16,x21,ror#18
eor x5,x5,x0,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x25,x26 // a^b, b^c in next round
eor x16,x16,x21,ror#41 // Sigma1(e)
eor x6,x6,x25,ror#34
add x24,x24,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x4,x4,x13,ror#61
eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
add x24,x24,x16 // h+=Sigma1(e)
eor x19,x19,x26 // Maj(a,b,c)
eor x17,x6,x25,ror#39 // Sigma0(a)
eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
add x15,x15,x8
add x20,x20,x24 // d+=h
add x24,x24,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x15,x15,x5
add x24,x24,x17 // h+=Sigma0(a)
add x15,x15,x4
ldr x4,[sp,#8]
str x7,[sp,#0]
ror x16,x20,#14
add x23,x23,x19 // h+=K[i]
ror x6,x1,#1
and x17,x21,x20
ror x5,x14,#19
bic x19,x22,x20
ror x7,x24,#28
add x23,x23,x15 // h+=X[i]
eor x16,x16,x20,ror#18
eor x6,x6,x1,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x24,x25 // a^b, b^c in next round
eor x16,x16,x20,ror#41 // Sigma1(e)
eor x7,x7,x24,ror#34
add x23,x23,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x5,x5,x14,ror#61
eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
add x23,x23,x16 // h+=Sigma1(e)
eor x28,x28,x25 // Maj(a,b,c)
eor x17,x7,x24,ror#39 // Sigma0(a)
eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
add x0,x0,x9
add x27,x27,x23 // d+=h
add x23,x23,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x0,x0,x6
add x23,x23,x17 // h+=Sigma0(a)
add x0,x0,x5
ldr x5,[sp,#16]
str x8,[sp,#8]
ror x16,x27,#14
add x22,x22,x28 // h+=K[i]
ror x7,x2,#1
and x17,x20,x27
ror x6,x15,#19
bic x28,x21,x27
ror x8,x23,#28
add x22,x22,x0 // h+=X[i]
eor x16,x16,x27,ror#18
eor x7,x7,x2,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x23,x24 // a^b, b^c in next round
eor x16,x16,x27,ror#41 // Sigma1(e)
eor x8,x8,x23,ror#34
add x22,x22,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x6,x6,x15,ror#61
eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
add x22,x22,x16 // h+=Sigma1(e)
eor x19,x19,x24 // Maj(a,b,c)
eor x17,x8,x23,ror#39 // Sigma0(a)
eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
add x1,x1,x10
add x26,x26,x22 // d+=h
add x22,x22,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x1,x1,x7
add x22,x22,x17 // h+=Sigma0(a)
add x1,x1,x6
ldr x6,[sp,#24]
str x9,[sp,#16]
ror x16,x26,#14
add x21,x21,x19 // h+=K[i]
ror x8,x3,#1
and x17,x27,x26
ror x7,x0,#19
bic x19,x20,x26
ror x9,x22,#28
add x21,x21,x1 // h+=X[i]
eor x16,x16,x26,ror#18
eor x8,x8,x3,ror#8
orr x17,x17,x19 // Ch(e,f,g)
eor x19,x22,x23 // a^b, b^c in next round
eor x16,x16,x26,ror#41 // Sigma1(e)
eor x9,x9,x22,ror#34
add x21,x21,x17 // h+=Ch(e,f,g)
and x28,x28,x19 // (b^c)&=(a^b)
eor x7,x7,x0,ror#61
eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
add x21,x21,x16 // h+=Sigma1(e)
eor x28,x28,x23 // Maj(a,b,c)
eor x17,x9,x22,ror#39 // Sigma0(a)
eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
add x2,x2,x11
add x25,x25,x21 // d+=h
add x21,x21,x28 // h+=Maj(a,b,c)
ldr x28,[x30],#8 // *K++, x19 in next round
add x2,x2,x8
add x21,x21,x17 // h+=Sigma0(a)
add x2,x2,x7
ldr x7,[sp,#0]
str x10,[sp,#24]
ror x16,x25,#14
add x20,x20,x28 // h+=K[i]
ror x9,x4,#1
and x17,x26,x25
ror x8,x1,#19
bic x28,x27,x25
ror x10,x21,#28
add x20,x20,x2 // h+=X[i]
eor x16,x16,x25,ror#18
eor x9,x9,x4,ror#8
orr x17,x17,x28 // Ch(e,f,g)
eor x28,x21,x22 // a^b, b^c in next round
eor x16,x16,x25,ror#41 // Sigma1(e)
eor x10,x10,x21,ror#34
add x20,x20,x17 // h+=Ch(e,f,g)
and x19,x19,x28 // (b^c)&=(a^b)
eor x8,x8,x1,ror#61
eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
add x20,x20,x16 // h+=Sigma1(e)
eor x19,x19,x22 // Maj(a,b,c)
eor x17,x10,x21,ror#39 // Sigma0(a)
eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
add x3,x3,x12
add x24,x24,x20 // d+=h
add x20,x20,x19 // h+=Maj(a,b,c)
ldr x19,[x30],#8 // *K++, x28 in next round
add x3,x3,x9
add x20,x20,x17 // h+=Sigma0(a)
add x3,x3,x8
cbnz x19,Loop_16_xx
ldp x0,x2,[x29,#96]
ldr x1,[x29,#112]
sub x30,x30,#648 // rewind
ldp x3,x4,[x0]
ldp x5,x6,[x0,#2*8]
add x1,x1,#14*8 // advance input pointer
ldp x7,x8,[x0,#4*8]
add x20,x20,x3
ldp x9,x10,[x0,#6*8]
add x21,x21,x4
add x22,x22,x5
add x23,x23,x6
stp x20,x21,[x0]
add x24,x24,x7
add x25,x25,x8
stp x22,x23,[x0,#2*8]
add x26,x26,x9
add x27,x27,x10
cmp x1,x2
stp x24,x25,[x0,#4*8]
stp x26,x27,[x0,#6*8]
b.ne Loop
ldp x19,x20,[x29,#16]
add sp,sp,#4*8
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.section .rodata
.align 6
LK512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0 // terminator
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
.text
#ifndef __KERNEL__
.globl sha512_block_data_order_hw
.def sha512_block_data_order_hw
.type 32
.endef
.align 6
sha512_block_data_order_hw:
#ifdef BORINGSSL_DISPATCH_TEST
adrp x9,BORINGSSL_function_hit
add x9, x9, :lo12:BORINGSSL_function_hit
mov w10, #1
strb w10, [x9,#8] // kFlag_sha512_hw
#endif
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context
adrp x3,LK512
add x3,x3,:lo12:LK512
rev64 v16.16b,v16.16b
rev64 v17.16b,v17.16b
rev64 v18.16b,v18.16b
rev64 v19.16b,v19.16b
rev64 v20.16b,v20.16b
rev64 v21.16b,v21.16b
rev64 v22.16b,v22.16b
rev64 v23.16b,v23.16b
b Loop_hw
.align 4
Loop_hw:
ld1 {v24.2d},[x3],#16
subs x2,x2,#1
sub x4,x1,#128
orr v26.16b,v0.16b,v0.16b // offload
orr v27.16b,v1.16b,v1.16b
orr v28.16b,v2.16b,v2.16b
orr v29.16b,v3.16b,v3.16b
csel x1,x1,x4,ne // conditional rewind
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v24.2d,v24.2d,v16.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08230 //sha512su0 v16.16b,v17.16b
ext v7.16b,v20.16b,v21.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v25.2d,v25.2d,v17.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08251 //sha512su0 v17.16b,v18.16b
ext v7.16b,v21.16b,v22.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v24.2d,v24.2d,v18.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec08272 //sha512su0 v18.16b,v19.16b
ext v7.16b,v22.16b,v23.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
add v25.2d,v25.2d,v19.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08293 //sha512su0 v19.16b,v20.16b
ext v7.16b,v23.16b,v16.16b,#8
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
add v24.2d,v24.2d,v20.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082b4 //sha512su0 v20.16b,v21.16b
ext v7.16b,v16.16b,v17.16b,#8
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
add v25.2d,v25.2d,v21.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec082d5 //sha512su0 v21.16b,v22.16b
ext v7.16b,v17.16b,v18.16b,#8
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v24.2d,v24.2d,v22.2d
ld1 {v25.2d},[x3],#16
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
.long 0xcec082f6 //sha512su0 v22.16b,v23.16b
ext v7.16b,v18.16b,v19.16b,#8
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
add v25.2d,v25.2d,v23.2d
ld1 {v24.2d},[x3],#16
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]"
.long 0xcec08217 //sha512su0 v23.16b,v16.16b
ext v7.16b,v19.16b,v20.16b,#8
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v16.2d
ld1 {v16.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v16.16b,v16.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v17.2d
ld1 {v17.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v17.16b,v17.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v18.2d
ld1 {v18.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v18.16b,v18.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v19.2d
ld1 {v19.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v2.16b,v3.16b,#8
ext v6.16b,v1.16b,v2.16b,#8
add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b
rev64 v19.16b,v19.16b
add v4.2d,v1.2d,v3.2d // "D + T1"
.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v20.2d
ld1 {v20.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v4.16b,v2.16b,#8
ext v6.16b,v0.16b,v4.16b,#8
add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b
rev64 v20.16b,v20.16b
add v1.2d,v0.2d,v2.2d // "D + T1"
.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b
ld1 {v24.2d},[x3],#16
add v25.2d,v25.2d,v21.2d
ld1 {v21.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v1.16b,v4.16b,#8
ext v6.16b,v3.16b,v1.16b,#8
add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b
rev64 v21.16b,v21.16b
add v0.2d,v3.2d,v4.2d // "D + T1"
.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b
ld1 {v25.2d},[x3],#16
add v24.2d,v24.2d,v22.2d
ld1 {v22.16b},[x1],#16 // load next input
ext v24.16b,v24.16b,v24.16b,#8
ext v5.16b,v0.16b,v1.16b,#8
ext v6.16b,v2.16b,v0.16b,#8
add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]"
.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b
rev64 v22.16b,v22.16b
add v3.2d,v2.2d,v1.2d // "D + T1"
.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b
sub x3,x3,#80*8 // rewind
add v25.2d,v25.2d,v23.2d
ld1 {v23.16b},[x1],#16 // load next input
ext v25.16b,v25.16b,v25.16b,#8
ext v5.16b,v3.16b,v0.16b,#8
ext v6.16b,v4.16b,v3.16b,#8
add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]"
.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b
rev64 v23.16b,v23.16b
add v2.2d,v4.2d,v0.2d // "D + T1"
.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b
add v0.2d,v0.2d,v26.2d // accumulate
add v1.2d,v1.2d,v27.2d
add v2.2d,v2.2d,v28.2d
add v3.2d,v3.2d,v29.2d
cbnz x2,Loop_hw
st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context
ldr x29,[sp],#16
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 17,297
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/ghashv8-armx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.globl gcm_init_v8
.def gcm_init_v8
.type 32
.endef
.align 4
gcm_init_v8:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x1] //load input H
movi v19.16b,#0xe1
shl v19.2d,v19.2d,#57 //0xc2.0
ext v3.16b,v17.16b,v17.16b,#8
ushr v18.2d,v19.2d,#63
dup v17.4s,v17.s[1]
ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
ushr v18.2d,v3.2d,#63
sshr v17.4s,v17.4s,#31 //broadcast carry bit
and v18.16b,v18.16b,v16.16b
shl v3.2d,v3.2d,#1
ext v18.16b,v18.16b,v18.16b,#8
and v16.16b,v16.16b,v17.16b
orr v3.16b,v3.16b,v18.16b //H<<<=1
eor v20.16b,v3.16b,v16.16b //twisted H
ext v20.16b, v20.16b, v20.16b, #8
st1 {v20.2d},[x0],#16 //store Htable[0]
//calculate H^2
ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
pmull2 v0.1q,v20.2d,v20.2d
eor v16.16b,v16.16b,v20.16b
pmull v2.1q,v20.1d,v20.1d
pmull v1.1q,v16.1d,v16.1d
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v17.16b,v0.16b,v18.16b
ext v22.16b,v17.16b,v17.16b,#8 //Karatsuba pre-processing
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v21.2d},[x0],#16 //store Htable[1..2]
st1 {v22.2d},[x0],#16 //store Htable[1..2]
//calculate H^3 and H^4
pmull2 v0.1q,v20.2d, v22.2d
pmull2 v5.1q,v22.2d,v22.2d
pmull v2.1q,v20.1d, v22.1d
pmull v7.1q,v22.1d,v22.1d
pmull v1.1q,v16.1d,v17.1d
pmull v6.1q,v17.1d,v17.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v16.16b, v0.16b,v18.16b //H^3
eor v17.16b, v5.16b,v4.16b //H^4
ext v23.16b,v16.16b,v16.16b,#8 //Karatsuba pre-processing
ext v25.16b,v17.16b,v17.16b,#8
ext v18.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v23.16b
eor v17.16b,v17.16b,v25.16b
eor v18.16b,v18.16b,v22.16b
ext v24.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v23.2d,v24.2d,v25.2d},[x0],#48 //store Htable[3..5]
//calculate H^5 and H^6
pmull2 v0.1q,v22.2d, v23.2d
pmull2 v5.1q,v23.2d,v23.2d
pmull v2.1q,v22.1d, v23.1d
pmull v7.1q,v23.1d,v23.1d
pmull v1.1q,v16.1d,v18.1d
pmull v6.1q,v16.1d,v16.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v16.16b,v0.16b,v18.16b //H^5
eor v17.16b,v5.16b,v4.16b //H^6
ext v26.16b, v16.16b, v16.16b,#8 //Karatsuba pre-processing
ext v28.16b, v17.16b, v17.16b,#8
ext v18.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v26.16b
eor v17.16b,v17.16b,v28.16b
eor v18.16b,v18.16b,v22.16b
ext v27.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v26.2d,v27.2d,v28.2d},[x0],#48 //store Htable[6..8]
//calculate H^7 and H^8
pmull2 v0.1q,v22.2d,v26.2d
pmull2 v5.1q,v22.2d,v28.2d
pmull v2.1q,v22.1d,v26.1d
pmull v7.1q,v22.1d,v28.1d
pmull v1.1q,v16.1d,v18.1d
pmull v6.1q,v17.1d,v18.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v16.16b,v0.16b,v18.16b //H^7
eor v17.16b,v5.16b,v4.16b //H^8
ext v29.16b,v16.16b,v16.16b,#8 //Karatsuba pre-processing
ext v31.16b,v17.16b,v17.16b,#8
eor v16.16b,v16.16b,v29.16b
eor v17.16b,v17.16b,v31.16b
ext v30.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v29.2d,v30.2d,v31.2d},[x0] //store Htable[9..11]
ret
.globl gcm_gmult_v8
.def gcm_gmult_v8
.type 32
.endef
.align 4
gcm_gmult_v8:
AARCH64_VALID_CALL_TARGET
ld1 {v17.2d},[x0] //load Xi
movi v19.16b,#0xe1
ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
ext v20.16b,v20.16b,v20.16b,#8
shl v19.2d,v19.2d,#57
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v3.16b,v17.16b,v17.16b,#8
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.globl gcm_ghash_v8
.def gcm_ghash_v8
.type 32
.endef
.align 4
gcm_ghash_v8:
AARCH64_VALID_CALL_TARGET
cmp x3,#64
b.hs Lgcm_ghash_v8_4x
ld1 {v0.2d},[x0] //load [rotated] Xi
//"[rotated]" means that
//loaded value would have
//to be rotated in order to
//make it appear as in
//algorithm specification
subs x3,x3,#32 //see if x3 is 32 or larger
mov x12,#16 //x12 is used as post-
//increment for input pointer;
//as loop is modulo-scheduled
//x12 is zeroed just in time
//to preclude overstepping
//inp[len], which means that
//last block[s] are actually
//loaded twice, but last
//copy is not processed
ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2
ext v20.16b,v20.16b,v20.16b,#8
movi v19.16b,#0xe1
ld1 {v22.2d},[x1]
ext v22.16b,v22.16b,v22.16b,#8
csel x12,xzr,x12,eq //is it time to zero x12?
ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi
ld1 {v16.2d},[x2],#16 //load [rotated] I[0]
shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
#ifndef __AARCH64EB__
rev64 v16.16b,v16.16b
rev64 v0.16b,v0.16b
#endif
ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0]
b.lo Lodd_tail_v8 //x3 was less than 32
ld1 {v17.2d},[x2],x12 //load [rotated] I[1]
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ext v7.16b,v17.16b,v17.16b,#8
eor v3.16b,v3.16b,v0.16b //I[i]^=Xi
pmull v4.1q,v20.1d,v7.1d //H·Ii+1
eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
pmull2 v6.1q,v20.2d,v7.2d
b Loop_mod2x_v8
.align 4
Loop_mod2x_v8:
ext v18.16b,v3.16b,v3.16b,#8
subs x3,x3,#32 //is there more data?
pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo
csel x12,xzr,x12,lo //is it time to zero x12?
pmull v5.1q,v21.1d,v17.1d
eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi
eor v0.16b,v0.16b,v4.16b //accumulate
pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2]
eor v2.16b,v2.16b,v6.16b
csel x12,xzr,x12,eq //is it time to zero x12?
eor v1.16b,v1.16b,v5.16b
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3]
#ifndef __AARCH64EB__
rev64 v16.16b,v16.16b
#endif
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
#ifndef __AARCH64EB__
rev64 v17.16b,v17.16b
#endif
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
ext v7.16b,v17.16b,v17.16b,#8
ext v3.16b,v16.16b,v16.16b,#8
eor v0.16b,v1.16b,v18.16b
pmull v4.1q,v20.1d,v7.1d //H·Ii+1
eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v3.16b,v3.16b,v18.16b
eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
eor v3.16b,v3.16b,v0.16b
pmull2 v6.1q,v20.2d,v7.2d
b.hs Loop_mod2x_v8 //there was at least 32 more bytes
eor v2.16b,v2.16b,v18.16b
ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b
adds x3,x3,#32 //re-construct x3
eor v0.16b,v0.16b,v2.16b //re-construct v0.16b
b.eq Ldone_v8 //is x3 zero?
Lodd_tail_v8:
ext v18.16b,v0.16b,v0.16b,#8
eor v3.16b,v3.16b,v0.16b //inp^=Xi
eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi
pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
Ldone_v8:
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
ext v0.16b,v0.16b,v0.16b,#8
st1 {v0.2d},[x0] //write out Xi
ret
.def gcm_ghash_v8_4x
.type 32
.endef
.align 4
gcm_ghash_v8_4x:
Lgcm_ghash_v8_4x:
ld1 {v0.2d},[x0] //load [rotated] Xi
ld1 {v20.2d,v21.2d,v22.2d},[x1],#48 //load twisted H, ..., H^2
ext v20.16b,v20.16b,v20.16b,#8
ext v22.16b,v22.16b,v22.16b,#8
movi v19.16b,#0xe1
ld1 {v26.2d,v27.2d,v28.2d},[x1] //load twisted H^3, ..., H^4
ext v26.16b,v26.16b,v26.16b,#8
ext v28.16b,v28.16b,v28.16b,#8
shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v7.16b,v7.16b
rev64 v4.16b,v4.16b
#endif
ext v25.16b,v7.16b,v7.16b,#8
ext v24.16b,v6.16b,v6.16b,#8
ext v23.16b,v5.16b,v5.16b,#8
pmull v29.1q,v20.1d,v25.1d //H·Ii+3
eor v7.16b,v7.16b,v25.16b
pmull2 v31.1q,v20.2d,v25.2d
pmull v30.1q,v21.1d,v7.1d
pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2
eor v6.16b,v6.16b,v24.16b
pmull2 v24.1q,v22.2d,v24.2d
pmull2 v6.1q,v21.2d,v6.2d
eor v29.16b,v29.16b,v16.16b
eor v31.16b,v31.16b,v24.16b
eor v30.16b,v30.16b,v6.16b
pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1
eor v5.16b,v5.16b,v23.16b
pmull2 v23.1q,v26.2d,v23.2d
pmull v5.1q,v27.1d,v5.1d
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
eor v30.16b,v30.16b,v5.16b
subs x3,x3,#128
b.lo Ltail4x
b Loop4x
.align 4
Loop4x:
eor v16.16b,v4.16b,v0.16b
ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64
ext v3.16b,v16.16b,v16.16b,#8
#ifndef __AARCH64EB__
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v7.16b,v7.16b
rev64 v4.16b,v4.16b
#endif
pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v28.2d,v3.2d
ext v25.16b,v7.16b,v7.16b,#8
pmull2 v1.1q,v27.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
ext v24.16b,v6.16b,v6.16b,#8
eor v1.16b,v1.16b,v30.16b
ext v23.16b,v5.16b,v5.16b,#8
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
pmull v29.1q,v20.1d,v25.1d //H·Ii+3
eor v7.16b,v7.16b,v25.16b
eor v1.16b,v1.16b,v17.16b
pmull2 v31.1q,v20.2d,v25.2d
eor v1.16b,v1.16b,v18.16b
pmull v30.1q,v21.1d,v7.1d
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2
eor v6.16b,v6.16b,v24.16b
pmull2 v24.1q,v22.2d,v24.2d
eor v0.16b,v1.16b,v18.16b
pmull2 v6.1q,v21.2d,v6.2d
eor v29.16b,v29.16b,v16.16b
eor v31.16b,v31.16b,v24.16b
eor v30.16b,v30.16b,v6.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1
eor v5.16b,v5.16b,v23.16b
eor v18.16b,v18.16b,v2.16b
pmull2 v23.1q,v26.2d,v23.2d
pmull v5.1q,v27.1d,v5.1d
eor v0.16b,v0.16b,v18.16b
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
ext v0.16b,v0.16b,v0.16b,#8
eor v30.16b,v30.16b,v5.16b
subs x3,x3,#64
b.hs Loop4x
Ltail4x:
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v28.2d,v3.2d
pmull2 v1.1q,v27.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
adds x3,x3,#64
b.eq Ldone4x
cmp x3,#32
b.lo Lone
b.eq Ltwo
Lthree:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d,v5.2d,v6.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
ext v24.16b,v6.16b,v6.16b,#8
ext v23.16b,v5.16b,v5.16b,#8
eor v0.16b,v1.16b,v18.16b
pmull v29.1q,v20.1d,v24.1d //H·Ii+2
eor v6.16b,v6.16b,v24.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
pmull2 v31.1q,v20.2d,v24.2d
pmull v30.1q,v21.1d,v6.1d
eor v0.16b,v0.16b,v18.16b
pmull v7.1q,v22.1d,v23.1d //H^2·Ii+1
eor v5.16b,v5.16b,v23.16b
ext v0.16b,v0.16b,v0.16b,#8
pmull2 v23.1q,v22.2d,v23.2d
eor v16.16b,v4.16b,v0.16b
pmull2 v5.1q,v21.2d,v5.2d
ext v3.16b,v16.16b,v16.16b,#8
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
eor v30.16b,v30.16b,v5.16b
pmull v0.1q,v26.1d,v3.1d //H^3·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v26.2d,v3.2d
pmull v1.1q,v27.1d,v16.1d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
b Ldone4x
.align 4
Ltwo:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d,v5.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v5.16b,v5.16b
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
ext v23.16b,v5.16b,v5.16b,#8
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
pmull v29.1q,v20.1d,v23.1d //H·Ii+1
eor v5.16b,v5.16b,v23.16b
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull2 v31.1q,v20.2d,v23.2d
pmull v30.1q,v21.1d,v5.1d
pmull v0.1q,v22.1d,v3.1d //H^2·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v22.2d,v3.2d
pmull2 v1.1q,v21.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
b Ldone4x
.align 4
Lone:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __AARCH64EB__
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull v0.1q,v20.1d,v3.1d
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v20.2d,v3.2d
pmull v1.1q,v21.1d,v16.1d
Ldone4x:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
#ifndef __AARCH64EB__
rev64 v0.16b,v0.16b
#endif
st1 {v0.2d},[x0] //write out Xi
ret
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 42,639
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/vpaes-armv8.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
.section .rodata
.align 7 // totally strategic alignment
_vpaes_consts:
Lk_mc_forward: // mc_forward
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
Lk_mc_backward: // mc_backward
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
Lk_sr: // sr
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
//
// "Hot" constants
//
Lk_inv: // inv, inva
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
Lk_ipt: // input transform (lo, hi)
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
Lk_sbo: // sbou, sbot
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
Lk_sb1: // sb1u, sb1t
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
Lk_sb2: // sb2u, sb2t
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
//
// Decryption stuff
//
Lk_dipt: // decryption input transform
.quad 0x0F505B040B545F00, 0x154A411E114E451A
.quad 0x86E383E660056500, 0x12771772F491F194
Lk_dsbo: // decryption sbox final output
.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
Lk_dsb9: // decryption sbox output *9*u, *9*t
.quad 0x851C03539A86D600, 0xCAD51F504F994CC9
.quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
Lk_dsbd: // decryption sbox output *D*u, *D*t
.quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
.quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
Lk_dsbb: // decryption sbox output *B*u, *B*t
.quad 0xD022649296B44200, 0x602646F6B0F2D404
.quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
Lk_dsbe: // decryption sbox output *E*u, *E*t
.quad 0x46F2929626D4D000, 0x2242600464B4F6B0
.quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
//
// Key schedule constants
//
Lk_dksd: // decryption key schedule: invskew x*D
.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
Lk_dksb: // decryption key schedule: invskew x*B
.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
Lk_dkse: // decryption key schedule: invskew x*E + 0x63
.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
Lk_dks9: // decryption key schedule: invskew x*9
.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
Lk_rcon: // rcon
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
Lk_opt: // output transform
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
Lk_deskew: // deskew tables: inverts the sbox's "skew"
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 2
.align 6
.text
##
## _aes_preheat
##
## Fills register %r10 -> .aes_consts (so you can -fPIC)
## and %xmm9-%xmm15 as specified below.
##
.def _vpaes_encrypt_preheat
.type 32
.endef
.align 4
_vpaes_encrypt_preheat:
adrp x10, Lk_inv
add x10, x10, :lo12:Lk_inv
movi v17.16b, #0x0f
ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv
ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // Lk_ipt, Lk_sbo
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // Lk_sb1, Lk_sb2
ret
##
## _aes_encrypt_core
##
## AES-encrypt %xmm0.
##
## Inputs:
## %xmm0 = input
## %xmm9-%xmm15 as in _vpaes_preheat
## (%rdx) = scheduled keys
##
## Output in %xmm0
## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
## Preserves %xmm6 - %xmm8 so you get some local vectors
##
##
.def _vpaes_encrypt_core
.type 32
.endef
.align 4
_vpaes_encrypt_core:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, Lk_mc_forward+16
add x11, x11, :lo12:Lk_mc_forward+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
b Lenc_entry
.align 4
Lenc_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
sub w8, w8, #1 // nr--
Lenc_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, Lenc_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0
ret
.globl vpaes_encrypt
.def vpaes_encrypt
.type 32
.endef
.align 4
vpaes_encrypt:
#ifdef BORINGSSL_DISPATCH_TEST
adrp x9,BORINGSSL_function_hit
add x9, x9, :lo12:BORINGSSL_function_hit
mov w10, #1
strb w10, [x9,#4] // kFlag_vpaes_encrypt
#endif
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v7.16b}, [x0]
bl _vpaes_encrypt_preheat
bl _vpaes_encrypt_core
st1 {v0.16b}, [x1]
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.def _vpaes_encrypt_2x
.type 32
.endef
.align 4
_vpaes_encrypt_2x:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
adrp x11, Lk_mc_forward+16
add x11, x11, :lo12:Lk_mc_forward+16
// vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0
and v9.16b, v15.16b, v17.16b
ushr v8.16b, v15.16b, #4
tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
tbl v9.16b, {v20.16b}, v9.16b
// vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
tbl v10.16b, {v21.16b}, v8.16b
eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
eor v8.16b, v9.16b, v16.16b
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
eor v8.16b, v8.16b, v10.16b
b Lenc_2x_entry
.align 4
Lenc_2x_loop:
// middle of middle round
add x10, x11, #0x40
tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
tbl v12.16b, {v25.16b}, v10.16b
ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[]
tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
tbl v8.16b, {v24.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
tbl v13.16b, {v27.16b}, v10.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
tbl v10.16b, {v26.16b}, v11.16b
ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[]
tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
tbl v11.16b, {v8.16b}, v1.16b
eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
eor v10.16b, v10.16b, v13.16b
tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
tbl v8.16b, {v8.16b}, v4.16b
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
eor v11.16b, v11.16b, v10.16b
tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
tbl v12.16b, {v11.16b},v1.16b
eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
eor v8.16b, v8.16b, v11.16b
and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
eor v8.16b, v8.16b, v12.16b
sub w8, w8, #1 // nr--
Lenc_2x_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
and v9.16b, v8.16b, v17.16b
ushr v8.16b, v8.16b, #4
tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
tbl v13.16b, {v19.16b},v9.16b
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
eor v9.16b, v9.16b, v8.16b
tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v11.16b, {v18.16b},v8.16b
tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
tbl v12.16b, {v18.16b},v9.16b
eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v11.16b, v11.16b, v13.16b
eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
eor v12.16b, v12.16b, v13.16b
tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v10.16b, {v18.16b},v11.16b
tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
tbl v11.16b, {v18.16b},v12.16b
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v10.16b, v10.16b, v9.16b
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
eor v11.16b, v11.16b, v8.16b
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
cbnz w8, Lenc_2x_loop
// middle of last round
add x10, x11, #0x80
// vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
// vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
tbl v12.16b, {v22.16b}, v10.16b
ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[]
tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
tbl v8.16b, {v23.16b}, v11.16b
eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
eor v8.16b, v8.16b, v12.16b
tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0
tbl v1.16b, {v8.16b},v1.16b
ret
.def _vpaes_decrypt_preheat
.type 32
.endef
.align 4
_vpaes_decrypt_preheat:
adrp x10, Lk_inv
add x10, x10, :lo12:Lk_inv
movi v17.16b, #0x0f
adrp x11, Lk_dipt
add x11, x11, :lo12:Lk_dipt
ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv
ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x11],#64 // Lk_dipt, Lk_dsbo
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x11],#64 // Lk_dsb9, Lk_dsbd
ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x11] // Lk_dsbb, Lk_dsbe
ret
##
## Decryption core
##
## Same API as encryption core.
##
.def _vpaes_decrypt_core
.type 32
.endef
.align 4
_vpaes_decrypt_core:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
// vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11
eor x11, x11, #0x30 // xor $0x30, %r11
adrp x10, Lk_sr
add x10, x10, :lo12:Lk_sr
and x11, x11, #0x30 // and $0x30, %r11
add x11, x11, x10
adrp x10, Lk_mc_forward+48
add x10, x10, :lo12:Lk_mc_forward+48
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key
and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0
tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
ld1 {v5.2d}, [x10] // vmovdqa Lk_mc_forward+48(%rip), %xmm5
// vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
b Ldec_entry
.align 4
Ldec_loop:
//
// Inverse mix columns
//
// vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
// vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0
// vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
// vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
// vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
// vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
// vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
// vmovdqa 0x50(%r10), %xmm1 # 0 : sbet
tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5
eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
sub w8, w8, #1 // sub $1,%rax # nr--
Ldec_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0
cbnz w8, Ldec_loop
// middle of last round
// vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
// vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # Lk_sr-Lk_dsbd=-0x160
tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A
tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0
ret
.globl vpaes_decrypt
.def vpaes_decrypt
.type 32
.endef
.align 4
vpaes_decrypt:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
ld1 {v7.16b}, [x0]
bl _vpaes_decrypt_preheat
bl _vpaes_decrypt_core
st1 {v0.16b}, [x1]
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
// v14-v15 input, v0-v1 output
.def _vpaes_decrypt_2x
.type 32
.endef
.align 4
_vpaes_decrypt_2x:
mov x9, x2
ldr w8, [x2,#240] // pull rounds
// vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11
eor x11, x11, #0x30 // xor $0x30, %r11
adrp x10, Lk_sr
add x10, x10, :lo12:Lk_sr
and x11, x11, #0x30 // and $0x30, %r11
add x11, x11, x10
adrp x10, Lk_mc_forward+48
add x10, x10, :lo12:Lk_mc_forward+48
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key
and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0
and v9.16b, v15.16b, v17.16b
ushr v8.16b, v15.16b, #4
tbl v2.16b, {v20.16b},v1.16b // vpshufb %xmm1, %xmm2, %xmm2
tbl v10.16b, {v20.16b},v9.16b
ld1 {v5.2d}, [x10] // vmovdqa Lk_mc_forward+48(%rip), %xmm5
// vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
tbl v0.16b, {v21.16b},v0.16b // vpshufb %xmm0, %xmm1, %xmm0
tbl v8.16b, {v21.16b},v8.16b
eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2
eor v10.16b, v10.16b, v16.16b
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
eor v8.16b, v8.16b, v10.16b
b Ldec_2x_entry
.align 4
Ldec_2x_loop:
//
// Inverse mix columns
//
// vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
// vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
tbl v12.16b, {v24.16b}, v10.16b
tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
tbl v9.16b, {v25.16b}, v11.16b
eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0
eor v8.16b, v12.16b, v16.16b
// vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
// vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
tbl v12.16b, {v26.16b}, v10.16b
tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
tbl v8.16b, {v8.16b},v5.16b
tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
tbl v9.16b, {v27.16b}, v11.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
eor v8.16b, v8.16b, v12.16b
// vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
eor v8.16b, v8.16b, v9.16b
// vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
tbl v12.16b, {v28.16b}, v10.16b
tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
tbl v8.16b, {v8.16b},v5.16b
tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
tbl v9.16b, {v29.16b}, v11.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
eor v8.16b, v8.16b, v12.16b
// vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
eor v8.16b, v8.16b, v9.16b
// vmovdqa 0x50(%r10), %xmm1 # 0 : sbet
tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
tbl v12.16b, {v30.16b}, v10.16b
tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
tbl v8.16b, {v8.16b},v5.16b
tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
tbl v9.16b, {v31.16b}, v11.16b
eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
eor v8.16b, v8.16b, v12.16b
ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5
eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
eor v8.16b, v8.16b, v9.16b
sub w8, w8, #1 // sub $1,%rax # nr--
Ldec_2x_entry:
// top of round
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
and v9.16b, v8.16b, v17.16b
ushr v8.16b, v8.16b, #4
tbl v2.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
tbl v10.16b, {v19.16b},v9.16b
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
eor v9.16b, v9.16b, v8.16b
tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
tbl v11.16b, {v18.16b},v8.16b
tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
tbl v12.16b, {v18.16b},v9.16b
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
eor v11.16b, v11.16b, v10.16b
eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
eor v12.16b, v12.16b, v10.16b
tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
tbl v10.16b, {v18.16b},v11.16b
tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
tbl v11.16b, {v18.16b},v12.16b
eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
eor v10.16b, v10.16b, v9.16b
eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
eor v11.16b, v11.16b, v8.16b
ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0
cbnz w8, Ldec_2x_loop
// middle of last round
// vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
tbl v12.16b, {v22.16b}, v10.16b
// vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
tbl v9.16b, {v23.16b}, v11.16b
ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # Lk_sr-Lk_dsbd=-0x160
eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
eor v12.16b, v12.16b, v16.16b
eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A
eor v8.16b, v9.16b, v12.16b
tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0
tbl v1.16b, {v8.16b},v2.16b
ret
########################################################
## ##
## AES key schedule ##
## ##
########################################################
.def _vpaes_key_preheat
.type 32
.endef
.align 4
_vpaes_key_preheat:
adrp x10, Lk_inv
add x10, x10, :lo12:Lk_inv
movi v16.16b, #0x5b // Lk_s63
adrp x11, Lk_sb1
add x11, x11, :lo12:Lk_sb1
movi v17.16b, #0x0f // Lk_s0F
ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // Lk_inv, Lk_ipt
adrp x10, Lk_dksd
add x10, x10, :lo12:Lk_dksd
ld1 {v22.2d,v23.2d}, [x11] // Lk_sb1
adrp x11, Lk_mc_forward
add x11, x11, :lo12:Lk_mc_forward
ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // Lk_dksd, Lk_dksb
ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // Lk_dkse, Lk_dks9
ld1 {v8.2d}, [x10] // Lk_rcon
ld1 {v9.2d}, [x11] // Lk_mc_forward[0]
ret
.def _vpaes_schedule_core
.type 32
.endef
.align 4
_vpaes_schedule_core:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp,#-16]!
add x29,sp,#0
bl _vpaes_key_preheat // load the tables
ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned)
// input transform
mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3
bl _vpaes_schedule_transform
mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7
adrp x10, Lk_sr // lea Lk_sr(%rip),%r10
add x10, x10, :lo12:Lk_sr
add x8, x8, x10
cbnz w3, Lschedule_am_decrypting
// encrypting, output zeroth round key after transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx)
b Lschedule_go
Lschedule_am_decrypting:
// decrypting, output zeroth round key after shiftrows
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx)
eor x8, x8, #0x30 // xor $0x30, %r8
Lschedule_go:
cmp w1, #192 // cmp $192, %esi
b.hi Lschedule_256
b.eq Lschedule_192
// 128: fall though
##
## .schedule_128
##
## 128-bit specific part of key schedule.
##
## This schedule is really simple, because all its parts
## are accomplished by the subroutines.
##
Lschedule_128:
mov x0, #10 // mov $10, %esi
Loop_schedule_128:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle // write output
b Loop_schedule_128
##
## .aes_schedule_192
##
## 192-bit specific part of key schedule.
##
## The main body of this schedule is the same as the 128-bit
## schedule, but with more smearing. The long, high side is
## stored in %xmm7 as before, and the short, low side is in
## the high bits of %xmm6.
##
## This schedule is somewhat nastier, however, because each
## round produces 192 bits of key material, or 1.5 round keys.
## Therefore, on each cycle we do 2 rounds and produce 3 round
## keys.
##
.align 4
Lschedule_192:
sub x0, x0, #8
ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
bl _vpaes_schedule_transform // input transform
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4
ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
mov x0, #4 // mov $4, %esi
Loop_schedule_192:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_round
ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0
bl _vpaes_schedule_mangle // save key n
bl _vpaes_schedule_192_smear
bl _vpaes_schedule_mangle // save key n+1
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle // save key n+2
bl _vpaes_schedule_192_smear
b Loop_schedule_192
##
## .aes_schedule_256
##
## 256-bit specific part of key schedule.
##
## The structure here is very similar to the 128-bit
## schedule, but with an additional "low side" in
## %xmm6. The low side's rounds are the same as the
## high side's, except no rcon and no rotation.
##
.align 4
Lschedule_256:
ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
bl _vpaes_schedule_transform // input transform
mov x0, #7 // mov $7, %esi
Loop_schedule_256:
sub x0, x0, #1 // dec %esi
bl _vpaes_schedule_mangle // output low result
mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
// high round
bl _vpaes_schedule_round
cbz x0, Lschedule_mangle_last
bl _vpaes_schedule_mangle
// low round. swap xmm7 and xmm6
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
movi v4.16b, #0
mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5
mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7
bl _vpaes_schedule_low_round
mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7
b Loop_schedule_256
##
## .aes_schedule_mangle_last
##
## Mangler for last round of key schedule
## Mangles %xmm0
## when encrypting, outputs out(%xmm0) ^ 63
## when decrypting, outputs unskew(%xmm0)
##
## Always called right before return... jumps to cleanup and exits
##
.align 4
Lschedule_mangle_last:
// schedule last round key from xmm0
adrp x11, Lk_deskew // lea Lk_deskew(%rip),%r11 # prepare to deskew
add x11, x11, :lo12:Lk_deskew
cbnz w3, Lschedule_mangle_last_dec
// encrypting
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1
adrp x11, Lk_opt // lea Lk_opt(%rip), %r11 # prepare to output transform
add x11, x11, :lo12:Lk_opt
add x2, x2, #32 // add $32, %rdx
tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute
Lschedule_mangle_last_dec:
ld1 {v20.2d,v21.2d}, [x11] // reload constants
sub x2, x2, #16 // add $-16, %rdx
eor v0.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm0
bl _vpaes_schedule_transform // output transform
st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key
// cleanup
eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2
eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3
eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4
eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5
eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6
eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7
ldp x29, x30, [sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
##
## .aes_schedule_192_smear
##
## Smear the short, low side in the 192-bit key schedule.
##
## Inputs:
## %xmm7: high side, b a x y
## %xmm6: low side, d c 0 0
## %xmm13: 0
##
## Outputs:
## %xmm6: b+c+d b+c 0 0
## %xmm0: b+c+d b+c b a
##
.def _vpaes_schedule_192_smear
.type 32
.endef
.align 4
_vpaes_schedule_192_smear:
movi v1.16b, #0
dup v0.4s, v7.s[3]
ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0
ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
ret
##
## .aes_schedule_round
##
## Runs one main round of the key schedule on %xmm0, %xmm7
##
## Specifically, runs subbytes on the high dword of %xmm0
## then rotates it by one byte and xors into the low dword of
## %xmm7.
##
## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
## next rcon.
##
## Smears the dwords of %xmm7 by xoring the low into the
## second low, result into third, result into highest.
##
## Returns results in %xmm7 = %xmm0.
## Clobbers %xmm1-%xmm4, %r11.
##
.def _vpaes_schedule_round
.type 32
.endef
.align 4
_vpaes_schedule_round:
// extract rcon from xmm8
movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4
ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1
ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
// rotate
dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0
// fall through...
// low round: same as high round, but no rotation and no rcon.
_vpaes_schedule_low_round:
// smear xmm7
ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1
eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4
// subbytes
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7
tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
eor v7.16b, v7.16b, v16.16b // vpxor Lk_s63(%rip), %xmm7, %xmm7
tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io
eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
// add in smeared stuff
eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0
eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7
ret
##
## .aes_schedule_transform
##
## Linear-transform %xmm0 according to tables at (%r11)
##
## Requires that %xmm9 = 0x0F0F... as in preheat
## Output in %xmm0
## Clobbers %xmm1, %xmm2
##
.def _vpaes_schedule_transform
.type 32
.endef
.align 4
_vpaes_schedule_transform:
and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0
// vmovdqa (%r11), %xmm2 # lo
tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
// vmovdqa 16(%r11), %xmm1 # hi
tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
ret
##
## .aes_schedule_mangle
##
## Mangle xmm0 from (basis-transformed) standard version
## to our version.
##
## On encrypt,
## xor with 0x63
## multiply by circulant 0,1,1,1
## apply shiftrows transform
##
## On decrypt,
## xor with 0x63
## multiply by "inverse mixcolumns" circulant E,B,D,9
## deskew
## apply shiftrows transform
##
##
## Writes out to (%rdx), and increments or decrements it
## Keeps track of round number mod 4 in %r8
## Preserves xmm0
## Clobbers xmm1-xmm5
##
.def _vpaes_schedule_mangle
.type 32
.endef
.align 4
_vpaes_schedule_mangle:
mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later
// vmovdqa .Lk_mc_forward(%rip),%xmm5
cbnz w3, Lschedule_mangle_dec
// encrypting
eor v4.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm4
add x2, x2, #16 // add $16, %rdx
tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4
tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1
tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3
eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3
b Lschedule_mangle_both
.align 4
Lschedule_mangle_dec:
// inverse mix columns
// lea .Lk_dksd(%rip),%r11
ushr v1.16b, v4.16b, #4 // vpsrlb $4, %xmm4, %xmm1 # 1 = hi
and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo
// vmovdqa 0x00(%r11), %xmm2
tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
// vmovdqa 0x10(%r11), %xmm3
tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
// vmovdqa 0x20(%r11), %xmm2
tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
// vmovdqa 0x30(%r11), %xmm3
tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
// vmovdqa 0x40(%r11), %xmm2
tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
// vmovdqa 0x50(%r11), %xmm3
tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
// vmovdqa 0x60(%r11), %xmm2
tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
// vmovdqa 0x70(%r11), %xmm4
tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4
ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3
sub x2, x2, #16 // add $-16, %rdx
Lschedule_mangle_both:
tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
add x8, x8, #48 // add $-16, %r8
and x8, x8, #~(1<<6) // and $0x30, %r8
st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx)
ret
.globl vpaes_set_encrypt_key
.def vpaes_set_encrypt_key
.type 32
.endef
.align 4
vpaes_set_encrypt_key:
#ifdef BORINGSSL_DISPATCH_TEST
adrp x9,BORINGSSL_function_hit
add x9, x9, :lo12:BORINGSSL_function_hit
mov w10, #1
strb w10, [x9,#5] // kFlag_vpaes_set_encrypt_key
#endif
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
lsr w9, w1, #5 // shr $5,%eax
add w9, w9, #5 // $5,%eax
str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
mov w3, #0 // mov $0,%ecx
mov x8, #0x30 // mov $0x30,%r8d
bl _vpaes_schedule_core
eor x0, x0, x0
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl vpaes_set_decrypt_key
.def vpaes_set_decrypt_key
.type 32
.endef
.align 4
vpaes_set_decrypt_key:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
lsr w9, w1, #5 // shr $5,%eax
add w9, w9, #5 // $5,%eax
str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
lsl w9, w9, #4 // shl $4,%eax
add x2, x2, #16 // lea 16(%rdx,%rax),%rdx
add x2, x2, x9
mov w3, #1 // mov $1,%ecx
lsr w8, w1, #1 // shr $1,%r8d
and x8, x8, #32 // and $32,%r8d
eor x8, x8, #32 // xor $32,%r8d # nbits==192?0:32
bl _vpaes_schedule_core
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl vpaes_cbc_encrypt
.def vpaes_cbc_encrypt
.type 32
.endef
.align 4
vpaes_cbc_encrypt:
AARCH64_SIGN_LINK_REGISTER
cbz x2, Lcbc_abort
cmp w5, #0 // check direction
b.eq vpaes_cbc_decrypt
stp x29,x30,[sp,#-16]!
add x29,sp,#0
mov x17, x2 // reassign
mov x2, x3 // reassign
ld1 {v0.16b}, [x4] // load ivec
bl _vpaes_encrypt_preheat
b Lcbc_enc_loop
.align 4
Lcbc_enc_loop:
ld1 {v7.16b}, [x0],#16 // load input
eor v7.16b, v7.16b, v0.16b // xor with ivec
bl _vpaes_encrypt_core
st1 {v0.16b}, [x1],#16 // save output
subs x17, x17, #16
b.hi Lcbc_enc_loop
st1 {v0.16b}, [x4] // write ivec
ldp x29,x30,[sp],#16
Lcbc_abort:
AARCH64_VALIDATE_LINK_REGISTER
ret
.def vpaes_cbc_decrypt
.type 32
.endef
.align 4
vpaes_cbc_decrypt:
// Not adding AARCH64_SIGN_LINK_REGISTER here because vpaes_cbc_decrypt is jumped to
// only from vpaes_cbc_encrypt which has already signed the return address.
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
stp d10,d11,[sp,#-16]!
stp d12,d13,[sp,#-16]!
stp d14,d15,[sp,#-16]!
mov x17, x2 // reassign
mov x2, x3 // reassign
ld1 {v6.16b}, [x4] // load ivec
bl _vpaes_decrypt_preheat
tst x17, #16
b.eq Lcbc_dec_loop2x
ld1 {v7.16b}, [x0], #16 // load input
bl _vpaes_decrypt_core
eor v0.16b, v0.16b, v6.16b // xor with ivec
orr v6.16b, v7.16b, v7.16b // next ivec value
st1 {v0.16b}, [x1], #16
subs x17, x17, #16
b.ls Lcbc_dec_done
.align 4
Lcbc_dec_loop2x:
ld1 {v14.16b,v15.16b}, [x0], #32
bl _vpaes_decrypt_2x
eor v0.16b, v0.16b, v6.16b // xor with ivec
eor v1.16b, v1.16b, v14.16b
orr v6.16b, v15.16b, v15.16b
st1 {v0.16b,v1.16b}, [x1], #32
subs x17, x17, #32
b.hi Lcbc_dec_loop2x
Lcbc_dec_done:
st1 {v6.16b}, [x4]
ldp d14,d15,[sp],#16
ldp d12,d13,[sp],#16
ldp d10,d11,[sp],#16
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl vpaes_ctr32_encrypt_blocks
.def vpaes_ctr32_encrypt_blocks
.type 32
.endef
.align 4
vpaes_ctr32_encrypt_blocks:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
stp d8,d9,[sp,#-16]! // ABI spec says so
stp d10,d11,[sp,#-16]!
stp d12,d13,[sp,#-16]!
stp d14,d15,[sp,#-16]!
cbz x2, Lctr32_done
// Note, unlike the other functions, x2 here is measured in blocks,
// not bytes.
mov x17, x2
mov x2, x3
// Load the IV and counter portion.
ldr w6, [x4, #12]
ld1 {v7.16b}, [x4]
bl _vpaes_encrypt_preheat
tst x17, #1
rev w6, w6 // The counter is big-endian.
b.eq Lctr32_prep_loop
// Handle one block so the remaining block count is even for
// _vpaes_encrypt_2x.
ld1 {v6.16b}, [x0], #16 // Load input ahead of time
bl _vpaes_encrypt_core
eor v0.16b, v0.16b, v6.16b // XOR input and result
st1 {v0.16b}, [x1], #16
subs x17, x17, #1
// Update the counter.
add w6, w6, #1
rev w7, w6
mov v7.s[3], w7
b.ls Lctr32_done
Lctr32_prep_loop:
// _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x
// uses v14 and v15.
mov v15.16b, v7.16b
mov v14.16b, v7.16b
add w6, w6, #1
rev w7, w6
mov v15.s[3], w7
Lctr32_loop:
ld1 {v6.16b,v7.16b}, [x0], #32 // Load input ahead of time
bl _vpaes_encrypt_2x
eor v0.16b, v0.16b, v6.16b // XOR input and result
eor v1.16b, v1.16b, v7.16b // XOR input and result (#2)
st1 {v0.16b,v1.16b}, [x1], #32
subs x17, x17, #2
// Update the counter.
add w7, w6, #1
add w6, w6, #2
rev w7, w7
mov v14.s[3], w7
rev w7, w6
mov v15.s[3], w7
b.hi Lctr32_loop
Lctr32_done:
ldp d14,d15,[sp],#16
ldp d12,d13,[sp],#16
ldp d10,d11,[sp],#16
ldp d8,d9,[sp],#16
ldp x29,x30,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 10,892
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/ghash-neon-armv8.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
.text
.globl gcm_init_neon
.def gcm_init_neon
.type 32
.endef
.align 4
gcm_init_neon:
AARCH64_VALID_CALL_TARGET
// This function is adapted from gcm_init_v8. xC2 is t3.
ld1 {v17.2d}, [x1] // load H
movi v19.16b, #0xe1
shl v19.2d, v19.2d, #57 // 0xc2.0
ext v3.16b, v17.16b, v17.16b, #8
ushr v18.2d, v19.2d, #63
dup v17.4s, v17.s[1]
ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01
ushr v18.2d, v3.2d, #63
sshr v17.4s, v17.4s, #31 // broadcast carry bit
and v18.16b, v18.16b, v16.16b
shl v3.2d, v3.2d, #1
ext v18.16b, v18.16b, v18.16b, #8
and v16.16b, v16.16b, v17.16b
orr v3.16b, v3.16b, v18.16b // H<<<=1
eor v5.16b, v3.16b, v16.16b // twisted H
st1 {v5.2d}, [x0] // store Htable[0]
ret
.globl gcm_gmult_neon
.def gcm_gmult_neon
.type 32
.endef
.align 4
gcm_gmult_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v3.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks // load constants
add x9, x9, :lo12:Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v3.16b, v3.16b // byteswap Xi
ext v3.16b, v3.16b, v3.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
mov x3, #16
b Lgmult_neon
.globl gcm_ghash_neon
.def gcm_ghash_neon
.type 32
.endef
.align 4
gcm_ghash_neon:
AARCH64_VALID_CALL_TARGET
ld1 {v0.16b}, [x0] // load Xi
ld1 {v5.1d}, [x1], #8 // load twisted H
ld1 {v6.1d}, [x1]
adrp x9, Lmasks // load constants
add x9, x9, :lo12:Lmasks
ld1 {v24.2d, v25.2d}, [x9]
rev64 v0.16b, v0.16b // byteswap Xi
ext v0.16b, v0.16b, v0.16b, #8
eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing
Loop_neon:
ld1 {v3.16b}, [x2], #16 // load inp
rev64 v3.16b, v3.16b // byteswap inp
ext v3.16b, v3.16b, v3.16b, #8
eor v3.16b, v3.16b, v0.16b // inp ^= Xi
Lgmult_neon:
// Split the input into v3 and v4. (The upper halves are unused,
// so it is okay to leave them alone.)
ins v4.d[0], v3.d[1]
ext v16.8b, v5.8b, v5.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v0.8b, v3.8b, v3.8b, #1 // B1
pmull v0.8h, v5.8b, v0.8b // E = A*B1
ext v17.8b, v5.8b, v5.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v5.8b, v19.8b // G = A*B2
ext v18.8b, v5.8b, v5.8b, #3 // A3
eor v16.16b, v16.16b, v0.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v0.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v0.8h, v5.8b, v0.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v0.16b // N = I + J
pmull v19.8h, v5.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v0.8h, v5.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v0.16b, v0.16b, v16.16b
eor v0.16b, v0.16b, v18.16b
eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing
ext v16.8b, v7.8b, v7.8b, #1 // A1
pmull v16.8h, v16.8b, v3.8b // F = A1*B
ext v1.8b, v3.8b, v3.8b, #1 // B1
pmull v1.8h, v7.8b, v1.8b // E = A*B1
ext v17.8b, v7.8b, v7.8b, #2 // A2
pmull v17.8h, v17.8b, v3.8b // H = A2*B
ext v19.8b, v3.8b, v3.8b, #2 // B2
pmull v19.8h, v7.8b, v19.8b // G = A*B2
ext v18.8b, v7.8b, v7.8b, #3 // A3
eor v16.16b, v16.16b, v1.16b // L = E + F
pmull v18.8h, v18.8b, v3.8b // J = A3*B
ext v1.8b, v3.8b, v3.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v1.8h, v7.8b, v1.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v3.8b, v3.8b, #4 // B4
eor v18.16b, v18.16b, v1.16b // N = I + J
pmull v19.8h, v7.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v1.8h, v7.8b, v3.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v1.16b, v1.16b, v16.16b
eor v1.16b, v1.16b, v18.16b
ext v16.8b, v6.8b, v6.8b, #1 // A1
pmull v16.8h, v16.8b, v4.8b // F = A1*B
ext v2.8b, v4.8b, v4.8b, #1 // B1
pmull v2.8h, v6.8b, v2.8b // E = A*B1
ext v17.8b, v6.8b, v6.8b, #2 // A2
pmull v17.8h, v17.8b, v4.8b // H = A2*B
ext v19.8b, v4.8b, v4.8b, #2 // B2
pmull v19.8h, v6.8b, v19.8b // G = A*B2
ext v18.8b, v6.8b, v6.8b, #3 // A3
eor v16.16b, v16.16b, v2.16b // L = E + F
pmull v18.8h, v18.8b, v4.8b // J = A3*B
ext v2.8b, v4.8b, v4.8b, #3 // B3
eor v17.16b, v17.16b, v19.16b // M = G + H
pmull v2.8h, v6.8b, v2.8b // I = A*B3
// Here we diverge from the 32-bit version. It computes the following
// (instructions reordered for clarity):
//
// veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L)
// vand $t0#hi, $t0#hi, $k48
// veor $t0#lo, $t0#lo, $t0#hi
//
// veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M)
// vand $t1#hi, $t1#hi, $k32
// veor $t1#lo, $t1#lo, $t1#hi
//
// veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N)
// vand $t2#hi, $t2#hi, $k16
// veor $t2#lo, $t2#lo, $t2#hi
//
// veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K)
// vmov.i64 $t3#hi, #0
//
// $kN is a mask with the bottom N bits set. AArch64 cannot compute on
// upper halves of SIMD registers, so we must split each half into
// separate registers. To compensate, we pair computations up and
// parallelize.
ext v19.8b, v4.8b, v4.8b, #4 // B4
eor v18.16b, v18.16b, v2.16b // N = I + J
pmull v19.8h, v6.8b, v19.8b // K = A*B4
// This can probably be scheduled more efficiently. For now, we just
// pair up independent instructions.
zip1 v20.2d, v16.2d, v17.2d
zip1 v22.2d, v18.2d, v19.2d
zip2 v21.2d, v16.2d, v17.2d
zip2 v23.2d, v18.2d, v19.2d
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
and v21.16b, v21.16b, v24.16b
and v23.16b, v23.16b, v25.16b
eor v20.16b, v20.16b, v21.16b
eor v22.16b, v22.16b, v23.16b
zip1 v16.2d, v20.2d, v21.2d
zip1 v18.2d, v22.2d, v23.2d
zip2 v17.2d, v20.2d, v21.2d
zip2 v19.2d, v22.2d, v23.2d
ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8
ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16
pmull v2.8h, v6.8b, v4.8b // D = A*B
ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32
ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24
eor v16.16b, v16.16b, v17.16b
eor v18.16b, v18.16b, v19.16b
eor v2.16b, v2.16b, v16.16b
eor v2.16b, v2.16b, v18.16b
ext v16.16b, v0.16b, v2.16b, #8
eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing
eor v1.16b, v1.16b, v2.16b
eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi
ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result
// This is a no-op due to the ins instruction below.
// ins v2.d[0], v1.d[1]
// equivalent of reduction_avx from ghash-x86_64.pl
shl v17.2d, v0.2d, #57 // 1st phase
shl v18.2d, v0.2d, #62
eor v18.16b, v18.16b, v17.16b //
shl v17.2d, v0.2d, #63
eor v18.16b, v18.16b, v17.16b //
// Note Xm contains {Xl.d[1], Xh.d[0]}.
eor v18.16b, v18.16b, v1.16b
ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0]
ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1]
ushr v18.2d, v0.2d, #1 // 2nd phase
eor v2.16b, v2.16b,v0.16b
eor v0.16b, v0.16b,v18.16b //
ushr v18.2d, v18.2d, #6
ushr v0.2d, v0.2d, #1 //
eor v0.16b, v0.16b, v2.16b //
eor v0.16b, v0.16b, v18.16b //
subs x3, x3, #16
bne Loop_neon
rev64 v0.16b, v0.16b // byteswap Xi and write
ext v0.16b, v0.16b, v0.16b, #8
st1 {v0.16b}, [x0]
ret
.section .rodata
.align 4
Lmasks:
.quad 0x0000ffffffffffff // k48
.quad 0x00000000ffffffff // k32
.quad 0x000000000000ffff // k16
.quad 0x0000000000000000 // k0
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 28,215
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/sha1-armv8.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
.text
.globl sha1_block_data_order_nohw
.def sha1_block_data_order_nohw
.type 32
.endef
.align 6
sha1_block_data_order_nohw:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-96]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
ldp w20,w21,[x0]
ldp w22,w23,[x0,#8]
ldr w24,[x0,#16]
Loop:
ldr x3,[x1],#64
movz w28,#0x7999
sub x2,x2,#1
movk w28,#0x5a82,lsl#16
#ifdef __AARCH64EB__
ror x3,x3,#32
#else
rev32 x3,x3
#endif
add w24,w24,w28 // warm it up
add w24,w24,w3
lsr x4,x3,#32
ldr x5,[x1,#-56]
bic w25,w23,w21
and w26,w22,w21
ror w27,w20,#27
add w23,w23,w28 // future e+=K
orr w25,w25,w26
add w24,w24,w27 // e+=rot(a,5)
ror w21,w21,#2
add w23,w23,w4 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
#ifdef __AARCH64EB__
ror x5,x5,#32
#else
rev32 x5,x5
#endif
bic w25,w22,w20
and w26,w21,w20
ror w27,w24,#27
add w22,w22,w28 // future e+=K
orr w25,w25,w26
add w23,w23,w27 // e+=rot(a,5)
ror w20,w20,#2
add w22,w22,w5 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
lsr x6,x5,#32
ldr x7,[x1,#-48]
bic w25,w21,w24
and w26,w20,w24
ror w27,w23,#27
add w21,w21,w28 // future e+=K
orr w25,w25,w26
add w22,w22,w27 // e+=rot(a,5)
ror w24,w24,#2
add w21,w21,w6 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
#ifdef __AARCH64EB__
ror x7,x7,#32
#else
rev32 x7,x7
#endif
bic w25,w20,w23
and w26,w24,w23
ror w27,w22,#27
add w20,w20,w28 // future e+=K
orr w25,w25,w26
add w21,w21,w27 // e+=rot(a,5)
ror w23,w23,#2
add w20,w20,w7 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
lsr x8,x7,#32
ldr x9,[x1,#-40]
bic w25,w24,w22
and w26,w23,w22
ror w27,w21,#27
add w24,w24,w28 // future e+=K
orr w25,w25,w26
add w20,w20,w27 // e+=rot(a,5)
ror w22,w22,#2
add w24,w24,w8 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
#ifdef __AARCH64EB__
ror x9,x9,#32
#else
rev32 x9,x9
#endif
bic w25,w23,w21
and w26,w22,w21
ror w27,w20,#27
add w23,w23,w28 // future e+=K
orr w25,w25,w26
add w24,w24,w27 // e+=rot(a,5)
ror w21,w21,#2
add w23,w23,w9 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
lsr x10,x9,#32
ldr x11,[x1,#-32]
bic w25,w22,w20
and w26,w21,w20
ror w27,w24,#27
add w22,w22,w28 // future e+=K
orr w25,w25,w26
add w23,w23,w27 // e+=rot(a,5)
ror w20,w20,#2
add w22,w22,w10 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
#ifdef __AARCH64EB__
ror x11,x11,#32
#else
rev32 x11,x11
#endif
bic w25,w21,w24
and w26,w20,w24
ror w27,w23,#27
add w21,w21,w28 // future e+=K
orr w25,w25,w26
add w22,w22,w27 // e+=rot(a,5)
ror w24,w24,#2
add w21,w21,w11 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
lsr x12,x11,#32
ldr x13,[x1,#-24]
bic w25,w20,w23
and w26,w24,w23
ror w27,w22,#27
add w20,w20,w28 // future e+=K
orr w25,w25,w26
add w21,w21,w27 // e+=rot(a,5)
ror w23,w23,#2
add w20,w20,w12 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
#ifdef __AARCH64EB__
ror x13,x13,#32
#else
rev32 x13,x13
#endif
bic w25,w24,w22
and w26,w23,w22
ror w27,w21,#27
add w24,w24,w28 // future e+=K
orr w25,w25,w26
add w20,w20,w27 // e+=rot(a,5)
ror w22,w22,#2
add w24,w24,w13 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
lsr x14,x13,#32
ldr x15,[x1,#-16]
bic w25,w23,w21
and w26,w22,w21
ror w27,w20,#27
add w23,w23,w28 // future e+=K
orr w25,w25,w26
add w24,w24,w27 // e+=rot(a,5)
ror w21,w21,#2
add w23,w23,w14 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
#ifdef __AARCH64EB__
ror x15,x15,#32
#else
rev32 x15,x15
#endif
bic w25,w22,w20
and w26,w21,w20
ror w27,w24,#27
add w22,w22,w28 // future e+=K
orr w25,w25,w26
add w23,w23,w27 // e+=rot(a,5)
ror w20,w20,#2
add w22,w22,w15 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
lsr x16,x15,#32
ldr x17,[x1,#-8]
bic w25,w21,w24
and w26,w20,w24
ror w27,w23,#27
add w21,w21,w28 // future e+=K
orr w25,w25,w26
add w22,w22,w27 // e+=rot(a,5)
ror w24,w24,#2
add w21,w21,w16 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
#ifdef __AARCH64EB__
ror x17,x17,#32
#else
rev32 x17,x17
#endif
bic w25,w20,w23
and w26,w24,w23
ror w27,w22,#27
add w20,w20,w28 // future e+=K
orr w25,w25,w26
add w21,w21,w27 // e+=rot(a,5)
ror w23,w23,#2
add w20,w20,w17 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
lsr x19,x17,#32
eor w3,w3,w5
bic w25,w24,w22
and w26,w23,w22
ror w27,w21,#27
eor w3,w3,w11
add w24,w24,w28 // future e+=K
orr w25,w25,w26
add w20,w20,w27 // e+=rot(a,5)
eor w3,w3,w16
ror w22,w22,#2
add w24,w24,w19 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
ror w3,w3,#31
eor w4,w4,w6
bic w25,w23,w21
and w26,w22,w21
ror w27,w20,#27
eor w4,w4,w12
add w23,w23,w28 // future e+=K
orr w25,w25,w26
add w24,w24,w27 // e+=rot(a,5)
eor w4,w4,w17
ror w21,w21,#2
add w23,w23,w3 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
ror w4,w4,#31
eor w5,w5,w7
bic w25,w22,w20
and w26,w21,w20
ror w27,w24,#27
eor w5,w5,w13
add w22,w22,w28 // future e+=K
orr w25,w25,w26
add w23,w23,w27 // e+=rot(a,5)
eor w5,w5,w19
ror w20,w20,#2
add w22,w22,w4 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
ror w5,w5,#31
eor w6,w6,w8
bic w25,w21,w24
and w26,w20,w24
ror w27,w23,#27
eor w6,w6,w14
add w21,w21,w28 // future e+=K
orr w25,w25,w26
add w22,w22,w27 // e+=rot(a,5)
eor w6,w6,w3
ror w24,w24,#2
add w21,w21,w5 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
ror w6,w6,#31
eor w7,w7,w9
bic w25,w20,w23
and w26,w24,w23
ror w27,w22,#27
eor w7,w7,w15
add w20,w20,w28 // future e+=K
orr w25,w25,w26
add w21,w21,w27 // e+=rot(a,5)
eor w7,w7,w4
ror w23,w23,#2
add w20,w20,w6 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
ror w7,w7,#31
movz w28,#0xeba1
movk w28,#0x6ed9,lsl#16
eor w8,w8,w10
bic w25,w24,w22
and w26,w23,w22
ror w27,w21,#27
eor w8,w8,w16
add w24,w24,w28 // future e+=K
orr w25,w25,w26
add w20,w20,w27 // e+=rot(a,5)
eor w8,w8,w5
ror w22,w22,#2
add w24,w24,w7 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
ror w8,w8,#31
eor w9,w9,w11
eor w25,w23,w21
ror w27,w20,#27
add w23,w23,w28 // future e+=K
eor w9,w9,w17
eor w25,w25,w22
add w24,w24,w27 // e+=rot(a,5)
ror w21,w21,#2
eor w9,w9,w6
add w23,w23,w8 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
ror w9,w9,#31
eor w10,w10,w12
eor w25,w22,w20
ror w27,w24,#27
add w22,w22,w28 // future e+=K
eor w10,w10,w19
eor w25,w25,w21
add w23,w23,w27 // e+=rot(a,5)
ror w20,w20,#2
eor w10,w10,w7
add w22,w22,w9 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
ror w10,w10,#31
eor w11,w11,w13
eor w25,w21,w24
ror w27,w23,#27
add w21,w21,w28 // future e+=K
eor w11,w11,w3
eor w25,w25,w20
add w22,w22,w27 // e+=rot(a,5)
ror w24,w24,#2
eor w11,w11,w8
add w21,w21,w10 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
ror w11,w11,#31
eor w12,w12,w14
eor w25,w20,w23
ror w27,w22,#27
add w20,w20,w28 // future e+=K
eor w12,w12,w4
eor w25,w25,w24
add w21,w21,w27 // e+=rot(a,5)
ror w23,w23,#2
eor w12,w12,w9
add w20,w20,w11 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
ror w12,w12,#31
eor w13,w13,w15
eor w25,w24,w22
ror w27,w21,#27
add w24,w24,w28 // future e+=K
eor w13,w13,w5
eor w25,w25,w23
add w20,w20,w27 // e+=rot(a,5)
ror w22,w22,#2
eor w13,w13,w10
add w24,w24,w12 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
ror w13,w13,#31
eor w14,w14,w16
eor w25,w23,w21
ror w27,w20,#27
add w23,w23,w28 // future e+=K
eor w14,w14,w6
eor w25,w25,w22
add w24,w24,w27 // e+=rot(a,5)
ror w21,w21,#2
eor w14,w14,w11
add w23,w23,w13 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
ror w14,w14,#31
eor w15,w15,w17
eor w25,w22,w20
ror w27,w24,#27
add w22,w22,w28 // future e+=K
eor w15,w15,w7
eor w25,w25,w21
add w23,w23,w27 // e+=rot(a,5)
ror w20,w20,#2
eor w15,w15,w12
add w22,w22,w14 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
ror w15,w15,#31
eor w16,w16,w19
eor w25,w21,w24
ror w27,w23,#27
add w21,w21,w28 // future e+=K
eor w16,w16,w8
eor w25,w25,w20
add w22,w22,w27 // e+=rot(a,5)
ror w24,w24,#2
eor w16,w16,w13
add w21,w21,w15 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
ror w16,w16,#31
eor w17,w17,w3
eor w25,w20,w23
ror w27,w22,#27
add w20,w20,w28 // future e+=K
eor w17,w17,w9
eor w25,w25,w24
add w21,w21,w27 // e+=rot(a,5)
ror w23,w23,#2
eor w17,w17,w14
add w20,w20,w16 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
ror w17,w17,#31
eor w19,w19,w4
eor w25,w24,w22
ror w27,w21,#27
add w24,w24,w28 // future e+=K
eor w19,w19,w10
eor w25,w25,w23
add w20,w20,w27 // e+=rot(a,5)
ror w22,w22,#2
eor w19,w19,w15
add w24,w24,w17 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
ror w19,w19,#31
eor w3,w3,w5
eor w25,w23,w21
ror w27,w20,#27
add w23,w23,w28 // future e+=K
eor w3,w3,w11
eor w25,w25,w22
add w24,w24,w27 // e+=rot(a,5)
ror w21,w21,#2
eor w3,w3,w16
add w23,w23,w19 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
ror w3,w3,#31
eor w4,w4,w6
eor w25,w22,w20
ror w27,w24,#27
add w22,w22,w28 // future e+=K
eor w4,w4,w12
eor w25,w25,w21
add w23,w23,w27 // e+=rot(a,5)
ror w20,w20,#2
eor w4,w4,w17
add w22,w22,w3 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
ror w4,w4,#31
eor w5,w5,w7
eor w25,w21,w24
ror w27,w23,#27
add w21,w21,w28 // future e+=K
eor w5,w5,w13
eor w25,w25,w20
add w22,w22,w27 // e+=rot(a,5)
ror w24,w24,#2
eor w5,w5,w19
add w21,w21,w4 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
ror w5,w5,#31
eor w6,w6,w8
eor w25,w20,w23
ror w27,w22,#27
add w20,w20,w28 // future e+=K
eor w6,w6,w14
eor w25,w25,w24
add w21,w21,w27 // e+=rot(a,5)
ror w23,w23,#2
eor w6,w6,w3
add w20,w20,w5 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
ror w6,w6,#31
eor w7,w7,w9
eor w25,w24,w22
ror w27,w21,#27
add w24,w24,w28 // future e+=K
eor w7,w7,w15
eor w25,w25,w23
add w20,w20,w27 // e+=rot(a,5)
ror w22,w22,#2
eor w7,w7,w4
add w24,w24,w6 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
ror w7,w7,#31
eor w8,w8,w10
eor w25,w23,w21
ror w27,w20,#27
add w23,w23,w28 // future e+=K
eor w8,w8,w16
eor w25,w25,w22
add w24,w24,w27 // e+=rot(a,5)
ror w21,w21,#2
eor w8,w8,w5
add w23,w23,w7 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
ror w8,w8,#31
eor w9,w9,w11
eor w25,w22,w20
ror w27,w24,#27
add w22,w22,w28 // future e+=K
eor w9,w9,w17
eor w25,w25,w21
add w23,w23,w27 // e+=rot(a,5)
ror w20,w20,#2
eor w9,w9,w6
add w22,w22,w8 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
ror w9,w9,#31
eor w10,w10,w12
eor w25,w21,w24
ror w27,w23,#27
add w21,w21,w28 // future e+=K
eor w10,w10,w19
eor w25,w25,w20
add w22,w22,w27 // e+=rot(a,5)
ror w24,w24,#2
eor w10,w10,w7
add w21,w21,w9 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
ror w10,w10,#31
eor w11,w11,w13
eor w25,w20,w23
ror w27,w22,#27
add w20,w20,w28 // future e+=K
eor w11,w11,w3
eor w25,w25,w24
add w21,w21,w27 // e+=rot(a,5)
ror w23,w23,#2
eor w11,w11,w8
add w20,w20,w10 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
ror w11,w11,#31
movz w28,#0xbcdc
movk w28,#0x8f1b,lsl#16
eor w12,w12,w14
eor w25,w24,w22
ror w27,w21,#27
add w24,w24,w28 // future e+=K
eor w12,w12,w4
eor w25,w25,w23
add w20,w20,w27 // e+=rot(a,5)
ror w22,w22,#2
eor w12,w12,w9
add w24,w24,w11 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
ror w12,w12,#31
orr w25,w21,w22
and w26,w21,w22
eor w13,w13,w15
ror w27,w20,#27
and w25,w25,w23
add w23,w23,w28 // future e+=K
eor w13,w13,w5
add w24,w24,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w21,w21,#2
eor w13,w13,w10
add w23,w23,w12 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
ror w13,w13,#31
orr w25,w20,w21
and w26,w20,w21
eor w14,w14,w16
ror w27,w24,#27
and w25,w25,w22
add w22,w22,w28 // future e+=K
eor w14,w14,w6
add w23,w23,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w20,w20,#2
eor w14,w14,w11
add w22,w22,w13 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
ror w14,w14,#31
orr w25,w24,w20
and w26,w24,w20
eor w15,w15,w17
ror w27,w23,#27
and w25,w25,w21
add w21,w21,w28 // future e+=K
eor w15,w15,w7
add w22,w22,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w24,w24,#2
eor w15,w15,w12
add w21,w21,w14 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
ror w15,w15,#31
orr w25,w23,w24
and w26,w23,w24
eor w16,w16,w19
ror w27,w22,#27
and w25,w25,w20
add w20,w20,w28 // future e+=K
eor w16,w16,w8
add w21,w21,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w23,w23,#2
eor w16,w16,w13
add w20,w20,w15 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
ror w16,w16,#31
orr w25,w22,w23
and w26,w22,w23
eor w17,w17,w3
ror w27,w21,#27
and w25,w25,w24
add w24,w24,w28 // future e+=K
eor w17,w17,w9
add w20,w20,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w22,w22,#2
eor w17,w17,w14
add w24,w24,w16 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
ror w17,w17,#31
orr w25,w21,w22
and w26,w21,w22
eor w19,w19,w4
ror w27,w20,#27
and w25,w25,w23
add w23,w23,w28 // future e+=K
eor w19,w19,w10
add w24,w24,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w21,w21,#2
eor w19,w19,w15
add w23,w23,w17 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
ror w19,w19,#31
orr w25,w20,w21
and w26,w20,w21
eor w3,w3,w5
ror w27,w24,#27
and w25,w25,w22
add w22,w22,w28 // future e+=K
eor w3,w3,w11
add w23,w23,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w20,w20,#2
eor w3,w3,w16
add w22,w22,w19 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
ror w3,w3,#31
orr w25,w24,w20
and w26,w24,w20
eor w4,w4,w6
ror w27,w23,#27
and w25,w25,w21
add w21,w21,w28 // future e+=K
eor w4,w4,w12
add w22,w22,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w24,w24,#2
eor w4,w4,w17
add w21,w21,w3 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
ror w4,w4,#31
orr w25,w23,w24
and w26,w23,w24
eor w5,w5,w7
ror w27,w22,#27
and w25,w25,w20
add w20,w20,w28 // future e+=K
eor w5,w5,w13
add w21,w21,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w23,w23,#2
eor w5,w5,w19
add w20,w20,w4 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
ror w5,w5,#31
orr w25,w22,w23
and w26,w22,w23
eor w6,w6,w8
ror w27,w21,#27
and w25,w25,w24
add w24,w24,w28 // future e+=K
eor w6,w6,w14
add w20,w20,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w22,w22,#2
eor w6,w6,w3
add w24,w24,w5 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
ror w6,w6,#31
orr w25,w21,w22
and w26,w21,w22
eor w7,w7,w9
ror w27,w20,#27
and w25,w25,w23
add w23,w23,w28 // future e+=K
eor w7,w7,w15
add w24,w24,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w21,w21,#2
eor w7,w7,w4
add w23,w23,w6 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
ror w7,w7,#31
orr w25,w20,w21
and w26,w20,w21
eor w8,w8,w10
ror w27,w24,#27
and w25,w25,w22
add w22,w22,w28 // future e+=K
eor w8,w8,w16
add w23,w23,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w20,w20,#2
eor w8,w8,w5
add w22,w22,w7 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
ror w8,w8,#31
orr w25,w24,w20
and w26,w24,w20
eor w9,w9,w11
ror w27,w23,#27
and w25,w25,w21
add w21,w21,w28 // future e+=K
eor w9,w9,w17
add w22,w22,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w24,w24,#2
eor w9,w9,w6
add w21,w21,w8 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
ror w9,w9,#31
orr w25,w23,w24
and w26,w23,w24
eor w10,w10,w12
ror w27,w22,#27
and w25,w25,w20
add w20,w20,w28 // future e+=K
eor w10,w10,w19
add w21,w21,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w23,w23,#2
eor w10,w10,w7
add w20,w20,w9 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
ror w10,w10,#31
orr w25,w22,w23
and w26,w22,w23
eor w11,w11,w13
ror w27,w21,#27
and w25,w25,w24
add w24,w24,w28 // future e+=K
eor w11,w11,w3
add w20,w20,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w22,w22,#2
eor w11,w11,w8
add w24,w24,w10 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
ror w11,w11,#31
orr w25,w21,w22
and w26,w21,w22
eor w12,w12,w14
ror w27,w20,#27
and w25,w25,w23
add w23,w23,w28 // future e+=K
eor w12,w12,w4
add w24,w24,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w21,w21,#2
eor w12,w12,w9
add w23,w23,w11 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
ror w12,w12,#31
orr w25,w20,w21
and w26,w20,w21
eor w13,w13,w15
ror w27,w24,#27
and w25,w25,w22
add w22,w22,w28 // future e+=K
eor w13,w13,w5
add w23,w23,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w20,w20,#2
eor w13,w13,w10
add w22,w22,w12 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
ror w13,w13,#31
orr w25,w24,w20
and w26,w24,w20
eor w14,w14,w16
ror w27,w23,#27
and w25,w25,w21
add w21,w21,w28 // future e+=K
eor w14,w14,w6
add w22,w22,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w24,w24,#2
eor w14,w14,w11
add w21,w21,w13 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
ror w14,w14,#31
orr w25,w23,w24
and w26,w23,w24
eor w15,w15,w17
ror w27,w22,#27
and w25,w25,w20
add w20,w20,w28 // future e+=K
eor w15,w15,w7
add w21,w21,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w23,w23,#2
eor w15,w15,w12
add w20,w20,w14 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
ror w15,w15,#31
movz w28,#0xc1d6
movk w28,#0xca62,lsl#16
orr w25,w22,w23
and w26,w22,w23
eor w16,w16,w19
ror w27,w21,#27
and w25,w25,w24
add w24,w24,w28 // future e+=K
eor w16,w16,w8
add w20,w20,w27 // e+=rot(a,5)
orr w25,w25,w26
ror w22,w22,#2
eor w16,w16,w13
add w24,w24,w15 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
ror w16,w16,#31
eor w17,w17,w3
eor w25,w23,w21
ror w27,w20,#27
add w23,w23,w28 // future e+=K
eor w17,w17,w9
eor w25,w25,w22
add w24,w24,w27 // e+=rot(a,5)
ror w21,w21,#2
eor w17,w17,w14
add w23,w23,w16 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
ror w17,w17,#31
eor w19,w19,w4
eor w25,w22,w20
ror w27,w24,#27
add w22,w22,w28 // future e+=K
eor w19,w19,w10
eor w25,w25,w21
add w23,w23,w27 // e+=rot(a,5)
ror w20,w20,#2
eor w19,w19,w15
add w22,w22,w17 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
ror w19,w19,#31
eor w3,w3,w5
eor w25,w21,w24
ror w27,w23,#27
add w21,w21,w28 // future e+=K
eor w3,w3,w11
eor w25,w25,w20
add w22,w22,w27 // e+=rot(a,5)
ror w24,w24,#2
eor w3,w3,w16
add w21,w21,w19 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
ror w3,w3,#31
eor w4,w4,w6
eor w25,w20,w23
ror w27,w22,#27
add w20,w20,w28 // future e+=K
eor w4,w4,w12
eor w25,w25,w24
add w21,w21,w27 // e+=rot(a,5)
ror w23,w23,#2
eor w4,w4,w17
add w20,w20,w3 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
ror w4,w4,#31
eor w5,w5,w7
eor w25,w24,w22
ror w27,w21,#27
add w24,w24,w28 // future e+=K
eor w5,w5,w13
eor w25,w25,w23
add w20,w20,w27 // e+=rot(a,5)
ror w22,w22,#2
eor w5,w5,w19
add w24,w24,w4 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
ror w5,w5,#31
eor w6,w6,w8
eor w25,w23,w21
ror w27,w20,#27
add w23,w23,w28 // future e+=K
eor w6,w6,w14
eor w25,w25,w22
add w24,w24,w27 // e+=rot(a,5)
ror w21,w21,#2
eor w6,w6,w3
add w23,w23,w5 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
ror w6,w6,#31
eor w7,w7,w9
eor w25,w22,w20
ror w27,w24,#27
add w22,w22,w28 // future e+=K
eor w7,w7,w15
eor w25,w25,w21
add w23,w23,w27 // e+=rot(a,5)
ror w20,w20,#2
eor w7,w7,w4
add w22,w22,w6 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
ror w7,w7,#31
eor w8,w8,w10
eor w25,w21,w24
ror w27,w23,#27
add w21,w21,w28 // future e+=K
eor w8,w8,w16
eor w25,w25,w20
add w22,w22,w27 // e+=rot(a,5)
ror w24,w24,#2
eor w8,w8,w5
add w21,w21,w7 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
ror w8,w8,#31
eor w9,w9,w11
eor w25,w20,w23
ror w27,w22,#27
add w20,w20,w28 // future e+=K
eor w9,w9,w17
eor w25,w25,w24
add w21,w21,w27 // e+=rot(a,5)
ror w23,w23,#2
eor w9,w9,w6
add w20,w20,w8 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
ror w9,w9,#31
eor w10,w10,w12
eor w25,w24,w22
ror w27,w21,#27
add w24,w24,w28 // future e+=K
eor w10,w10,w19
eor w25,w25,w23
add w20,w20,w27 // e+=rot(a,5)
ror w22,w22,#2
eor w10,w10,w7
add w24,w24,w9 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
ror w10,w10,#31
eor w11,w11,w13
eor w25,w23,w21
ror w27,w20,#27
add w23,w23,w28 // future e+=K
eor w11,w11,w3
eor w25,w25,w22
add w24,w24,w27 // e+=rot(a,5)
ror w21,w21,#2
eor w11,w11,w8
add w23,w23,w10 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
ror w11,w11,#31
eor w12,w12,w14
eor w25,w22,w20
ror w27,w24,#27
add w22,w22,w28 // future e+=K
eor w12,w12,w4
eor w25,w25,w21
add w23,w23,w27 // e+=rot(a,5)
ror w20,w20,#2
eor w12,w12,w9
add w22,w22,w11 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
ror w12,w12,#31
eor w13,w13,w15
eor w25,w21,w24
ror w27,w23,#27
add w21,w21,w28 // future e+=K
eor w13,w13,w5
eor w25,w25,w20
add w22,w22,w27 // e+=rot(a,5)
ror w24,w24,#2
eor w13,w13,w10
add w21,w21,w12 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
ror w13,w13,#31
eor w14,w14,w16
eor w25,w20,w23
ror w27,w22,#27
add w20,w20,w28 // future e+=K
eor w14,w14,w6
eor w25,w25,w24
add w21,w21,w27 // e+=rot(a,5)
ror w23,w23,#2
eor w14,w14,w11
add w20,w20,w13 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
ror w14,w14,#31
eor w15,w15,w17
eor w25,w24,w22
ror w27,w21,#27
add w24,w24,w28 // future e+=K
eor w15,w15,w7
eor w25,w25,w23
add w20,w20,w27 // e+=rot(a,5)
ror w22,w22,#2
eor w15,w15,w12
add w24,w24,w14 // future e+=X[i]
add w20,w20,w25 // e+=F(b,c,d)
ror w15,w15,#31
eor w16,w16,w19
eor w25,w23,w21
ror w27,w20,#27
add w23,w23,w28 // future e+=K
eor w16,w16,w8
eor w25,w25,w22
add w24,w24,w27 // e+=rot(a,5)
ror w21,w21,#2
eor w16,w16,w13
add w23,w23,w15 // future e+=X[i]
add w24,w24,w25 // e+=F(b,c,d)
ror w16,w16,#31
eor w17,w17,w3
eor w25,w22,w20
ror w27,w24,#27
add w22,w22,w28 // future e+=K
eor w17,w17,w9
eor w25,w25,w21
add w23,w23,w27 // e+=rot(a,5)
ror w20,w20,#2
eor w17,w17,w14
add w22,w22,w16 // future e+=X[i]
add w23,w23,w25 // e+=F(b,c,d)
ror w17,w17,#31
eor w19,w19,w4
eor w25,w21,w24
ror w27,w23,#27
add w21,w21,w28 // future e+=K
eor w19,w19,w10
eor w25,w25,w20
add w22,w22,w27 // e+=rot(a,5)
ror w24,w24,#2
eor w19,w19,w15
add w21,w21,w17 // future e+=X[i]
add w22,w22,w25 // e+=F(b,c,d)
ror w19,w19,#31
ldp w4,w5,[x0]
eor w25,w20,w23
ror w27,w22,#27
add w20,w20,w28 // future e+=K
eor w25,w25,w24
add w21,w21,w27 // e+=rot(a,5)
ror w23,w23,#2
add w20,w20,w19 // future e+=X[i]
add w21,w21,w25 // e+=F(b,c,d)
ldp w6,w7,[x0,#8]
eor w25,w24,w22
ror w27,w21,#27
eor w25,w25,w23
add w20,w20,w27 // e+=rot(a,5)
ror w22,w22,#2
ldr w8,[x0,#16]
add w20,w20,w25 // e+=F(b,c,d)
add w21,w21,w5
add w22,w22,w6
add w20,w20,w4
add w23,w23,w7
add w24,w24,w8
stp w20,w21,[x0]
stp w22,w23,[x0,#8]
str w24,[x0,#16]
cbnz x2,Loop
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
ldp x23,x24,[sp,#48]
ldp x25,x26,[sp,#64]
ldp x27,x28,[sp,#80]
ldr x29,[sp],#96
ret
.globl sha1_block_data_order_hw
.def sha1_block_data_order_hw
.type 32
.endef
.align 6
sha1_block_data_order_hw:
// Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later.
AARCH64_VALID_CALL_TARGET
stp x29,x30,[sp,#-16]!
add x29,sp,#0
adrp x4,Lconst
add x4,x4,:lo12:Lconst
eor v1.16b,v1.16b,v1.16b
ld1 {v0.4s},[x0],#16
ld1 {v1.s}[0],[x0]
sub x0,x0,#16
ld1 {v16.4s,v17.4s,v18.4s,v19.4s},[x4]
Loop_hw:
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
sub x2,x2,#1
rev32 v4.16b,v4.16b
rev32 v5.16b,v5.16b
add v20.4s,v16.4s,v4.4s
rev32 v6.16b,v6.16b
orr v22.16b,v0.16b,v0.16b // offload
add v21.4s,v16.4s,v5.4s
rev32 v7.16b,v7.16b
.long 0x5e280803 //sha1h v3.16b,v0.16b
.long 0x5e140020 //sha1c v0.16b,v1.16b,v20.4s // 0
add v20.4s,v16.4s,v6.4s
.long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b
.long 0x5e280802 //sha1h v2.16b,v0.16b // 1
.long 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s
add v21.4s,v16.4s,v7.4s
.long 0x5e2818e4 //sha1su1 v4.16b,v7.16b
.long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b
.long 0x5e280803 //sha1h v3.16b,v0.16b // 2
.long 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s
add v20.4s,v16.4s,v4.4s
.long 0x5e281885 //sha1su1 v5.16b,v4.16b
.long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b
.long 0x5e280802 //sha1h v2.16b,v0.16b // 3
.long 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s
add v21.4s,v17.4s,v5.4s
.long 0x5e2818a6 //sha1su1 v6.16b,v5.16b
.long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b
.long 0x5e280803 //sha1h v3.16b,v0.16b // 4
.long 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s
add v20.4s,v17.4s,v6.4s
.long 0x5e2818c7 //sha1su1 v7.16b,v6.16b
.long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b
.long 0x5e280802 //sha1h v2.16b,v0.16b // 5
.long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s
add v21.4s,v17.4s,v7.4s
.long 0x5e2818e4 //sha1su1 v4.16b,v7.16b
.long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b
.long 0x5e280803 //sha1h v3.16b,v0.16b // 6
.long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s
add v20.4s,v17.4s,v4.4s
.long 0x5e281885 //sha1su1 v5.16b,v4.16b
.long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b
.long 0x5e280802 //sha1h v2.16b,v0.16b // 7
.long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s
add v21.4s,v17.4s,v5.4s
.long 0x5e2818a6 //sha1su1 v6.16b,v5.16b
.long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b
.long 0x5e280803 //sha1h v3.16b,v0.16b // 8
.long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s
add v20.4s,v18.4s,v6.4s
.long 0x5e2818c7 //sha1su1 v7.16b,v6.16b
.long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b
.long 0x5e280802 //sha1h v2.16b,v0.16b // 9
.long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s
add v21.4s,v18.4s,v7.4s
.long 0x5e2818e4 //sha1su1 v4.16b,v7.16b
.long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b
.long 0x5e280803 //sha1h v3.16b,v0.16b // 10
.long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s
add v20.4s,v18.4s,v4.4s
.long 0x5e281885 //sha1su1 v5.16b,v4.16b
.long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b
.long 0x5e280802 //sha1h v2.16b,v0.16b // 11
.long 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s
add v21.4s,v18.4s,v5.4s
.long 0x5e2818a6 //sha1su1 v6.16b,v5.16b
.long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b
.long 0x5e280803 //sha1h v3.16b,v0.16b // 12
.long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s
add v20.4s,v18.4s,v6.4s
.long 0x5e2818c7 //sha1su1 v7.16b,v6.16b
.long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b
.long 0x5e280802 //sha1h v2.16b,v0.16b // 13
.long 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s
add v21.4s,v19.4s,v7.4s
.long 0x5e2818e4 //sha1su1 v4.16b,v7.16b
.long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b
.long 0x5e280803 //sha1h v3.16b,v0.16b // 14
.long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s
add v20.4s,v19.4s,v4.4s
.long 0x5e281885 //sha1su1 v5.16b,v4.16b
.long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b
.long 0x5e280802 //sha1h v2.16b,v0.16b // 15
.long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s
add v21.4s,v19.4s,v5.4s
.long 0x5e2818a6 //sha1su1 v6.16b,v5.16b
.long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b
.long 0x5e280803 //sha1h v3.16b,v0.16b // 16
.long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s
add v20.4s,v19.4s,v6.4s
.long 0x5e2818c7 //sha1su1 v7.16b,v6.16b
.long 0x5e280802 //sha1h v2.16b,v0.16b // 17
.long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s
add v21.4s,v19.4s,v7.4s
.long 0x5e280803 //sha1h v3.16b,v0.16b // 18
.long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s
.long 0x5e280802 //sha1h v2.16b,v0.16b // 19
.long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s
add v1.4s,v1.4s,v2.4s
add v0.4s,v0.4s,v22.4s
cbnz x2,Loop_hw
st1 {v0.4s},[x0],#16
st1 {v1.s}[0],[x0]
ldr x29,[sp],#16
ret
.section .rodata
.align 6
Lconst:
.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 //K_00_19
.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 //K_20_39
.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc //K_40_59
.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 //K_60_79
.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 80,057
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/aesv8-gcm-armv8.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
#if __ARM_MAX_ARCH__ >= 8
.arch armv8-a+crypto
.text
.globl aes_gcm_enc_kernel
.def aes_gcm_enc_kernel
.type 32
.endef
.align 4
aes_gcm_enc_kernel:
#ifdef BORINGSSL_DISPATCH_TEST
adrp x9,BORINGSSL_function_hit
add x9, x9, :lo12:BORINGSSL_function_hit
mov w10, #1
strb w10, [x9,#2] // kFlag_aes_gcm_enc_kernel
#endif
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
add x4, x0, x1, lsr #3 // end_input_ptr
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
sub x5, x5, #1 // byte_len - 1
ldr q18, [x8, #0] // load rk0
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
ldr q25, [x8, #112] // load rk7
add x5, x5, x0
lsr x12, x11, #32
fmov d2, x10 // CTR block 2
orr w11, w11, w11
rev w12, w12 // rev_ctr32
fmov d1, x10 // CTR block 1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
add w12, w12, #1 // increment rev_ctr32
rev w9, w12 // CTR block 1
fmov d3, x10 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 1
add w12, w12, #1 // CTR block 1
ldr q19, [x8, #16] // load rk1
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
ldr q20, [x8, #32] // load rk2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
orr x9, x11, x9, lsl #32 // CTR block 3
fmov v3.d[1], x9 // CTR block 3
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q21, [x8, #48] // load rk3
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q24, [x8, #96] // load rk6
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q23, [x8, #80] // load rk5
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ldr q14, [x6, #48] // load h3l | h3h
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q22, [x8, #64] // load rk4
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
ldr q13, [x6, #32] // load h2l | h2h
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q15, [x6, #80] // load h4l | h4h
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
ldr q29, [x8, #176] // load rk11
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
ldr q26, [x8, #128] // load rk8
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
add w12, w12, #1 // CTR block 3
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
ldr q27, [x8, #144] // load rk9
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
ldr q12, [x6] // load h1l | h1h
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
ldr q28, [x8, #160] // load rk10
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
b.lt Lenc_finish_first_blocks // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
b.eq Lenc_finish_first_blocks // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
Lenc_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v2.16b, v31.16b // AES block 2 - round N-1
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
aese v3.16b, v31.16b // AES block 3 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
b.ge Lenc_tail // handle tail
ldp x19, x20, [x0, #16] // AES block 1 - load plaintext
rev w9, w12 // CTR block 4
ldp x6, x7, [x0, #0] // AES block 0 - load plaintext
ldp x23, x24, [x0, #48] // AES block 3 - load plaintext
ldp x21, x22, [x0, #32] // AES block 2 - load plaintext
add x0, x0, #64 // AES input_ptr update
eor x19, x19, x13 // AES block 1 - round N low
eor x20, x20, x14 // AES block 1 - round N high
fmov d5, x19 // AES block 1 - mov low
eor x6, x6, x13 // AES block 0 - round N low
eor x7, x7, x14 // AES block 0 - round N high
eor x24, x24, x14 // AES block 3 - round N high
fmov d4, x6 // AES block 0 - mov low
cmp x0, x5 // check if we have <= 8 blocks
fmov v4.d[1], x7 // AES block 0 - mov high
eor x23, x23, x13 // AES block 3 - round N low
eor x21, x21, x13 // AES block 2 - round N low
fmov v5.d[1], x20 // AES block 1 - mov high
fmov d6, x21 // AES block 2 - mov low
add w12, w12, #1 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov d7, x23 // AES block 3 - mov low
eor x22, x22, x14 // AES block 2 - round N high
fmov v6.d[1], x22 // AES block 2 - mov high
eor v4.16b, v4.16b, v0.16b // AES block 0 - result
fmov d0, x10 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
eor v5.16b, v5.16b, v1.16b // AES block 1 - result
fmov d1, x10 // CTR block 5
orr x9, x11, x9, lsl #32 // CTR block 5
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
st1 { v4.16b}, [x2], #16 // AES block 0 - store result
fmov v7.d[1], x24 // AES block 3 - mov high
orr x9, x11, x9, lsl #32 // CTR block 6
eor v6.16b, v6.16b, v2.16b // AES block 2 - result
st1 { v5.16b}, [x2], #16 // AES block 1 - store result
add w12, w12, #1 // CTR block 6
fmov d2, x10 // CTR block 6
fmov v2.d[1], x9 // CTR block 6
st1 { v6.16b}, [x2], #16 // AES block 2 - store result
rev w9, w12 // CTR block 7
orr x9, x11, x9, lsl #32 // CTR block 7
eor v7.16b, v7.16b, v3.16b // AES block 3 - result
st1 { v7.16b}, [x2], #16 // AES block 3 - store result
b.ge Lenc_prepretail // do prepretail
Lenc_main_loop: // main loop start
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d3, x10 // CTR block 4k+3
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
fmov v3.d[1], x9 // CTR block 4k+3
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
eor v4.16b, v4.16b, v11.16b // PRE 1
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x23, x23, x13 // AES block 4k+7 - round N low
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d10, v17.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
eor x22, x22, x14 // AES block 4k+6 - round N high
mov d8, v4.d[1] // GHASH block 4k - mid
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor x19, x19, x13 // AES block 4k+5 - round N low
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
eor x21, x21, x13 // AES block 4k+6 - round N low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
movi v8.8b, #0xc2
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
fmov d5, x19 // AES block 4k+5 - mov low
ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext
b.lt Lenc_main_loop_continue // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq Lenc_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Lenc_main_loop_continue:
shl d8, d8, #56 // mod_constant
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
add w12, w12, #1 // CTR block 4k+3
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
add x0, x0, #64 // AES input_ptr update
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
rev w9, w12 // CTR block 4k+8
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor x6, x6, x13 // AES block 4k+4 - round N low
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
eor x7, x7, x14 // AES block 4k+4 - round N high
fmov d4, x6 // AES block 4k+4 - mov low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid
eor x20, x20, x14 // AES block 4k+5 - round N high
eor x24, x24, x14 // AES block 4k+7 - round N high
add w12, w12, #1 // CTR block 4k+8
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
fmov d7, x23 // AES block 4k+7 - mov low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
fmov v5.d[1], x20 // AES block 4k+5 - mov high
fmov d6, x21 // AES block 4k+6 - mov low
cmp x0, x5 // LOOP CONTROL
fmov v6.d[1], x22 // AES block 4k+6 - mov high
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
rev w9, w12 // CTR block 4k+9
add w12, w12, #1 // CTR block 4k+9
eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result
fmov d1, x10 // CTR block 4k+9
orr x9, x11, x9, lsl #32 // CTR block 4k+9
fmov v1.d[1], x9 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
rev w9, w12 // CTR block 4k+10
st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result
orr x9, x11, x9, lsl #32 // CTR block 4k+10
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
fmov v7.d[1], x24 // AES block 4k+7 - mov high
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result
fmov d2, x10 // CTR block 4k+10
st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result
fmov v2.d[1], x9 // CTR block 4k+10
rev w9, w12 // CTR block 4k+11
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
orr x9, x11, x9, lsl #32 // CTR block 4k+11
eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result
st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result
b.lt Lenc_main_loop
Lenc_prepretail: // PREPRETAIL
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free)
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov d3, x10 // CTR block 4k+3
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free)
fmov v3.d[1], x9 // CTR block 4k+3
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free)
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free)
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
add w12, w12, #1 // CTR block 4k+3
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
mov d4, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid
pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v10.16b, v10.16b, v9.16b // karatsuba tidy up
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
pmull v4.1q, v9.1d, v8.1d
ext v9.16b, v9.16b, v9.16b, #8
eor v10.16b, v10.16b, v11.16b
b.lt Lenc_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
b.eq Lenc_finish_prepretail // branch if AES-192
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
Lenc_finish_prepretail:
eor v10.16b, v10.16b, v4.16b
eor v10.16b, v10.16b, v9.16b
pmull v4.1q, v10.1d, v8.1d
ext v10.16b, v10.16b, v10.16b, #8
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
eor v11.16b, v11.16b, v4.16b
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b
Lenc_tail: // TAIL
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
cmp x5, #48
fmov d4, x6 // AES block 4k+4 - mov low
fmov v4.d[1], x7 // AES block 4k+4 - mov high
eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result
b.gt Lenc_blocks_4_remaining
cmp x5, #32
mov v3.16b, v2.16b
movi v11.8b, #0
movi v9.8b, #0
sub w12, w12, #1
mov v2.16b, v1.16b
movi v10.8b, #0
b.gt Lenc_blocks_3_remaining
mov v3.16b, v1.16b
sub w12, w12, #1
cmp x5, #16
b.gt Lenc_blocks_2_remaining
sub w12, w12, #1
b Lenc_blocks_1_remaining
Lenc_blocks_4_remaining: // blocks left = 4
st1 { v5.16b}, [x2], #16 // AES final-3 block - store result
ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-3 block
eor x6, x6, x13 // AES final-2 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor x7, x7, x14 // AES final-2 block - round N high
mov d22, v4.d[1] // GHASH final-3 block - mid
fmov d5, x6 // AES final-2 block - mov low
fmov v5.d[1], x7 // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
mov d10, v17.d[1] // GHASH final-3 block - mid
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor v5.16b, v5.16b, v1.16b // AES final-2 block - result
Lenc_blocks_3_remaining: // blocks left = 3
st1 { v5.16b}, [x2], #16 // AES final-2 block - store result
ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high
rev64 v4.16b, v5.16b // GHASH final-2 block
eor x6, x6, x13 // AES final-1 block - round N low
eor v4.16b, v4.16b, v8.16b // feed in partial tag
fmov d5, x6 // AES final-1 block - mov low
eor x7, x7, x14 // AES final-1 block - round N high
fmov v5.d[1], x7 // AES final-1 block - mov high
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
eor v5.16b, v5.16b, v2.16b // AES final-1 block - result
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
Lenc_blocks_2_remaining: // blocks left = 2
st1 { v5.16b}, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ldp x6, x7, [x0], #16 // AES final block - load input low & high
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
eor x6, x6, x13 // AES final block - round N low
mov d22, v4.d[1] // GHASH final-1 block - mid
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor x7, x7, x14 // AES final block - round N high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
fmov d5, x6 // AES final block - mov low
fmov v5.d[1], x7 // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
eor v5.16b, v5.16b, v3.16b // AES final block - result
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
Lenc_blocks_1_remaining: // blocks_left = 1
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
mov d8, v4.d[1] // GHASH final block - mid
rev w9, w12
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
str w9, [x16, #12] // store the updated counter
st1 { v5.16b}, [x2] // store all 16B
eor v11.16b, v11.16b, v9.16b // MODULO - fold into low
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl aes_gcm_dec_kernel
.def aes_gcm_dec_kernel
.type 32
.endef
.align 4
aes_gcm_dec_kernel:
AARCH64_SIGN_LINK_REGISTER
stp x29, x30, [sp, #-128]!
mov x29, sp
stp x19, x20, [sp, #16]
mov x16, x4
mov x8, x5
stp x21, x22, [sp, #32]
stp x23, x24, [sp, #48]
stp d8, d9, [sp, #64]
stp d10, d11, [sp, #80]
stp d12, d13, [sp, #96]
stp d14, d15, [sp, #112]
ldr w17, [x8, #240]
add x19, x8, x17, lsl #4 // borrow input_l1 for last key
ldp x13, x14, [x19] // load round N keys
ldr q31, [x19, #-16] // load round N-1 keys
lsr x5, x1, #3 // byte_len
mov x15, x5
ldp x10, x11, [x16] // ctr96_b64, ctr96_t32
ldr q26, [x8, #128] // load rk8
sub x5, x5, #1 // byte_len - 1
ldr q25, [x8, #112] // load rk7
and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail)
add x4, x0, x1, lsr #3 // end_input_ptr
ldr q24, [x8, #96] // load rk6
lsr x12, x11, #32
ldr q23, [x8, #80] // load rk5
orr w11, w11, w11
ldr q21, [x8, #48] // load rk3
add x5, x5, x0
rev w12, w12 // rev_ctr32
add w12, w12, #1 // increment rev_ctr32
fmov d3, x10 // CTR block 3
rev w9, w12 // CTR block 1
add w12, w12, #1 // CTR block 1
fmov d1, x10 // CTR block 1
orr x9, x11, x9, lsl #32 // CTR block 1
ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible
fmov v1.d[1], x9 // CTR block 1
rev w9, w12 // CTR block 2
add w12, w12, #1 // CTR block 2
fmov d2, x10 // CTR block 2
orr x9, x11, x9, lsl #32 // CTR block 2
fmov v2.d[1], x9 // CTR block 2
rev w9, w12 // CTR block 3
orr x9, x11, x9, lsl #32 // CTR block 3
ldr q18, [x8, #0] // load rk0
fmov v3.d[1], x9 // CTR block 3
add w12, w12, #1 // CTR block 3
ldr q22, [x8, #64] // load rk4
ldr q19, [x8, #16] // load rk1
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 0 - round 0
ldr q14, [x6, #48] // load h3l | h3h
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 3 - round 0
ldr q15, [x6, #80] // load h4l | h4h
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 1 - round 0
ldr q13, [x6, #32] // load h2l | h2h
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 2 - round 0
ldr q20, [x8, #32] // load rk2
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 0 - round 1
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 1 - round 1
ld1 { v11.16b}, [x3]
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 2 - round 1
ldr q27, [x8, #144] // load rk9
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 3 - round 1
ldr q30, [x8, #192] // load rk12
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 0 - round 2
ldr q12, [x6] // load h1l | h1h
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 2 - round 2
ldr q28, [x8, #160] // load rk10
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 3 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 0 - round 3
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 1 - round 2
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 3 - round 3
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 0 - round 4
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 2 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 1 - round 3
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 3 - round 4
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 2 - round 4
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 1 - round 4
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 3 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 0 - round 5
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 1 - round 5
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 2 - round 5
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 0 - round 6
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 3 - round 6
cmp x17, #12 // setup flags for AES-128/192/256 check
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 1 - round 6
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 2 - round 6
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 0 - round 7
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 1 - round 7
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 3 - round 7
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 0 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 2 - round 7
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 3 - round 8
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 1 - round 8
ldr q29, [x8, #176] // load rk11
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 2 - round 8
b.lt Ldec_finish_first_blocks // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 0 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 1 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 3 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 2 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 0 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 1 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 3 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 2 - round 10
b.eq Ldec_finish_first_blocks // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 0 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 3 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 1 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 2 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 1 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 0 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 2 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 3 - round 12
Ldec_finish_first_blocks:
cmp x0, x5 // check if we have <= 4 blocks
trn1 v9.2d, v14.2d, v15.2d // h4h | h3h
trn2 v17.2d, v14.2d, v15.2d // h4l | h3l
trn1 v8.2d, v12.2d, v13.2d // h2h | h1h
trn2 v16.2d, v12.2d, v13.2d // h2l | h1l
eor v17.16b, v17.16b, v9.16b // h4k | h3k
aese v1.16b, v31.16b // AES block 1 - round N-1
aese v2.16b, v31.16b // AES block 2 - round N-1
eor v16.16b, v16.16b, v8.16b // h2k | h1k
aese v3.16b, v31.16b // AES block 3 - round N-1
aese v0.16b, v31.16b // AES block 0 - round N-1
b.ge Ldec_tail // handle tail
ldr q4, [x0, #0] // AES block 0 - load ciphertext
ldr q5, [x0, #16] // AES block 1 - load ciphertext
rev w9, w12 // CTR block 4
eor v0.16b, v4.16b, v0.16b // AES block 0 - result
eor v1.16b, v5.16b, v1.16b // AES block 1 - result
rev64 v5.16b, v5.16b // GHASH block 1
ldr q7, [x0, #48] // AES block 3 - load ciphertext
mov x7, v0.d[1] // AES block 0 - mov high
mov x6, v0.d[0] // AES block 0 - mov low
rev64 v4.16b, v4.16b // GHASH block 0
add w12, w12, #1 // CTR block 4
fmov d0, x10 // CTR block 4
orr x9, x11, x9, lsl #32 // CTR block 4
fmov v0.d[1], x9 // CTR block 4
rev w9, w12 // CTR block 5
add w12, w12, #1 // CTR block 5
mov x19, v1.d[0] // AES block 1 - mov low
orr x9, x11, x9, lsl #32 // CTR block 5
mov x20, v1.d[1] // AES block 1 - mov high
eor x7, x7, x14 // AES block 0 - round N high
eor x6, x6, x13 // AES block 0 - round N low
stp x6, x7, [x2], #16 // AES block 0 - store result
fmov d1, x10 // CTR block 5
ldr q6, [x0, #32] // AES block 2 - load ciphertext
add x0, x0, #64 // AES input_ptr update
fmov v1.d[1], x9 // CTR block 5
rev w9, w12 // CTR block 6
add w12, w12, #1 // CTR block 6
eor x19, x19, x13 // AES block 1 - round N low
orr x9, x11, x9, lsl #32 // CTR block 6
eor x20, x20, x14 // AES block 1 - round N high
stp x19, x20, [x2], #16 // AES block 1 - store result
eor v2.16b, v6.16b, v2.16b // AES block 2 - result
cmp x0, x5 // check if we have <= 8 blocks
b.ge Ldec_prepretail // do prepretail
Ldec_main_loop: // main loop start
mov x21, v2.d[0] // AES block 4k+2 - mov low
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
eor v4.16b, v4.16b, v11.16b // PRE 1
rev w9, w12 // CTR block 4k+7
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x23, v3.d[0] // AES block 4k+3 - mov low
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
fmov v3.d[1], x9 // CTR block 4k+7
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
eor x22, x22, x14 // AES block 4k+2 - round N high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
mov d10, v17.d[1] // GHASH block 4k - mid
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
rev64 v6.16b, v6.16b // GHASH block 4k+2
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
eor x21, x21, x13 // AES block 4k+2 - round N low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
rev64 v7.16b, v7.16b // GHASH block 4k+3
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor x23, x23, x13 // AES block 4k+3 - round N low
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
eor x24, x24, x14 // AES block 4k+3 - round N high
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
add w12, w12, #1 // CTR block 4k+7
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
rev w9, w12 // CTR block 4k+8
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
add w12, w12, #1 // CTR block 4k+8
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
orr x9, x11, x9, lsl #32 // CTR block 4k+8
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
movi v8.8b, #0xc2
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
b.lt Ldec_main_loop_continue // branch if AES-128
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
b.eq Ldec_main_loop_continue // branch if AES-192
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Ldec_main_loop_continue:
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext
eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext
ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext
mov x7, v0.d[1] // AES block 4k+4 - mov high
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
add x0, x0, #64 // AES input_ptr update
mov x6, v0.d[0] // AES block 4k+4 - mov low
fmov d0, x10 // CTR block 4k+8
fmov v0.d[1], x9 // CTR block 4k+8
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result
rev w9, w12 // CTR block 4k+9
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+9
cmp x0, x5 // LOOP CONTROL
add w12, w12, #1 // CTR block 4k+9
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
mov x20, v1.d[1] // AES block 4k+5 - mov high
eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
mov x19, v1.d[0] // AES block 4k+5 - mov low
fmov d1, x10 // CTR block 4k+9
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
fmov v1.d[1], x9 // CTR block 4k+9
rev w9, w12 // CTR block 4k+10
add w12, w12, #1 // CTR block 4k+10
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
orr x9, x11, x9, lsl #32 // CTR block 4k+10
rev64 v5.16b, v5.16b // GHASH block 4k+5
eor x20, x20, x14 // AES block 4k+5 - round N high
stp x6, x7, [x2], #16 // AES block 4k+4 - store result
eor x19, x19, x13 // AES block 4k+5 - round N low
stp x19, x20, [x2], #16 // AES block 4k+5 - store result
rev64 v4.16b, v4.16b // GHASH block 4k+4
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
b.lt Ldec_main_loop
Ldec_prepretail: // PREPRETAIL
ext v11.16b, v11.16b, v11.16b, #8 // PRE 0
mov x21, v2.d[0] // AES block 4k+2 - mov low
eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result
aese v0.16b, v18.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 0
mov x22, v2.d[1] // AES block 4k+2 - mov high
aese v1.16b, v18.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 0
fmov d2, x10 // CTR block 4k+6
fmov v2.d[1], x9 // CTR block 4k+6
rev w9, w12 // CTR block 4k+7
eor v4.16b, v4.16b, v11.16b // PRE 1
rev64 v6.16b, v6.16b // GHASH block 4k+2
orr x9, x11, x9, lsl #32 // CTR block 4k+7
mov x23, v3.d[0] // AES block 4k+3 - mov low
aese v1.16b, v19.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 1
mov x24, v3.d[1] // AES block 4k+3 - mov high
pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low
mov d8, v4.d[1] // GHASH block 4k - mid
fmov d3, x10 // CTR block 4k+7
pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high
fmov v3.d[1], x9 // CTR block 4k+7
aese v2.16b, v18.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 0
mov d10, v17.d[1] // GHASH block 4k - mid
aese v0.16b, v19.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 1
eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid
pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high
aese v2.16b, v19.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 1
rev64 v7.16b, v7.16b // GHASH block 4k+3
aese v3.16b, v18.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 0
pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high
pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low
aese v3.16b, v19.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 1
mov d4, v5.d[1] // GHASH block 4k+1 - mid
aese v0.16b, v20.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 2
aese v1.16b, v20.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 2
eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low
aese v2.16b, v20.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 2
aese v0.16b, v21.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 3
mov d8, v6.d[1] // GHASH block 4k+2 - mid
aese v3.16b, v20.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 2
eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid
pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low
aese v0.16b, v22.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 4
aese v3.16b, v21.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 3
eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid
pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid
aese v0.16b, v23.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 5
eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low
aese v3.16b, v22.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 4
pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high
eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid
pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high
aese v3.16b, v23.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 5
ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid
aese v2.16b, v21.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 3
aese v1.16b, v21.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 3
eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high
pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low
aese v2.16b, v22.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 4
mov d6, v7.d[1] // GHASH block 4k+3 - mid
aese v1.16b, v22.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 4
pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid
aese v2.16b, v23.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 5
eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid
aese v1.16b, v23.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 5
aese v3.16b, v24.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 6
eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid
aese v2.16b, v24.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 6
aese v0.16b, v24.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 6
movi v8.8b, #0xc2
aese v1.16b, v24.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 6
eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low
pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid
aese v3.16b, v25.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 7
cmp x17, #12 // setup flags for AES-128/192/256 check
eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high
aese v1.16b, v25.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 7
aese v0.16b, v25.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 7
eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid
aese v3.16b, v26.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 8
aese v2.16b, v25.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 7
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
aese v1.16b, v26.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 8
aese v0.16b, v26.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 8
shl d8, d8, #56 // mod_constant
aese v2.16b, v26.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 8
b.lt Ldec_finish_prepretail // branch if AES-128
aese v1.16b, v27.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 9
aese v2.16b, v27.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 9
aese v3.16b, v27.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 9
aese v0.16b, v27.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 9
aese v2.16b, v28.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 10
aese v3.16b, v28.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 10
aese v0.16b, v28.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 10
aese v1.16b, v28.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 10
b.eq Ldec_finish_prepretail // branch if AES-192
aese v2.16b, v29.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 11
aese v0.16b, v29.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 11
aese v1.16b, v29.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 11
aese v2.16b, v30.16b
aesmc v2.16b, v2.16b // AES block 4k+6 - round 12
aese v3.16b, v29.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 11
aese v1.16b, v30.16b
aesmc v1.16b, v1.16b // AES block 4k+5 - round 12
aese v0.16b, v30.16b
aesmc v0.16b, v0.16b // AES block 4k+4 - round 12
aese v3.16b, v30.16b
aesmc v3.16b, v3.16b // AES block 4k+7 - round 12
Ldec_finish_prepretail:
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor x22, x22, x14 // AES block 4k+2 - round N high
eor x23, x23, x13 // AES block 4k+3 - round N low
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
add w12, w12, #1 // CTR block 4k+7
eor x21, x21, x13 // AES block 4k+2 - round N low
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
eor x24, x24, x14 // AES block 4k+3 - round N high
stp x21, x22, [x2], #16 // AES block 4k+2 - store result
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
stp x23, x24, [x2], #16 // AES block 4k+3 - store result
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
aese v1.16b, v31.16b // AES block 4k+5 - round N-1
aese v0.16b, v31.16b // AES block 4k+4 - round N-1
aese v3.16b, v31.16b // AES block 4k+7 - round N-1
aese v2.16b, v31.16b // AES block 4k+6 - round N-1
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
Ldec_tail: // TAIL
sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process
ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext
eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result
mov x6, v0.d[0] // AES block 4k+4 - mov low
mov x7, v0.d[1] // AES block 4k+4 - mov high
ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag
cmp x5, #48
eor x6, x6, x13 // AES block 4k+4 - round N low
eor x7, x7, x14 // AES block 4k+4 - round N high
b.gt Ldec_blocks_4_remaining
sub w12, w12, #1
mov v3.16b, v2.16b
movi v10.8b, #0
movi v11.8b, #0
cmp x5, #32
movi v9.8b, #0
mov v2.16b, v1.16b
b.gt Ldec_blocks_3_remaining
sub w12, w12, #1
mov v3.16b, v1.16b
cmp x5, #16
b.gt Ldec_blocks_2_remaining
sub w12, w12, #1
b Ldec_blocks_1_remaining
Ldec_blocks_4_remaining: // blocks left = 4
rev64 v4.16b, v5.16b // GHASH final-3 block
ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext
stp x6, x7, [x2], #16 // AES final-3 block - store result
mov d10, v17.d[1] // GHASH final-3 block - mid
eor v4.16b, v4.16b, v8.16b // feed in partial tag
eor v0.16b, v5.16b, v1.16b // AES final-2 block - result
mov d22, v4.d[1] // GHASH final-3 block - mid
mov x6, v0.d[0] // AES final-2 block - mov low
mov x7, v0.d[1] // AES final-2 block - mov high
eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid
movi v8.8b, #0 // suppress further partial tag feed in
pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high
pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid
eor x6, x6, x13 // AES final-2 block - round N low
pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low
eor x7, x7, x14 // AES final-2 block - round N high
Ldec_blocks_3_remaining: // blocks left = 3
rev64 v4.16b, v5.16b // GHASH final-2 block
ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
stp x6, x7, [x2], #16 // AES final-2 block - store result
eor v0.16b, v5.16b, v2.16b // AES final-1 block - result
mov d22, v4.d[1] // GHASH final-2 block - mid
pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low
pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid
mov x6, v0.d[0] // AES final-1 block - mov low
mov x7, v0.d[1] // AES final-1 block - mov high
eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low
movi v8.8b, #0 // suppress further partial tag feed in
pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high
eor x6, x6, x13 // AES final-1 block - round N low
eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid
eor x7, x7, x14 // AES final-1 block - round N high
Ldec_blocks_2_remaining: // blocks left = 2
stp x6, x7, [x2], #16 // AES final-1 block - store result
rev64 v4.16b, v5.16b // GHASH final-1 block
ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext
eor v4.16b, v4.16b, v8.16b // feed in partial tag
movi v8.8b, #0 // suppress further partial tag feed in
mov d22, v4.d[1] // GHASH final-1 block - mid
eor v0.16b, v5.16b, v3.16b // AES final block - result
pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high
eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid
pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low
mov x6, v0.d[0] // AES final block - mov low
ins v22.d[1], v22.d[0] // GHASH final-1 block - mid
mov x7, v0.d[1] // AES final block - mov high
pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid
eor x6, x6, x13 // AES final block - round N low
eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low
eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high
eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid
eor x7, x7, x14 // AES final block - round N high
Ldec_blocks_1_remaining: // blocks_left = 1
rev w9, w12
rev64 v4.16b, v5.16b // GHASH final block
eor v4.16b, v4.16b, v8.16b // feed in partial tag
pmull v21.1q, v4.1d, v12.1d // GHASH final block - low
mov d8, v4.d[1] // GHASH final block - mid
eor v8.8b, v8.8b, v4.8b // GHASH final block - mid
pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high
pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid
eor v9.16b, v9.16b, v20.16b // GHASH final block - high
eor v11.16b, v11.16b, v21.16b // GHASH final block - low
eor v10.16b, v10.16b, v8.16b // GHASH final block - mid
movi v8.8b, #0xc2
eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up
shl d8, d8, #56 // mod_constant
eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up
pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid
ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment
eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid
eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid
pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low
ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment
eor v11.16b, v11.16b, v8.16b // MODULO - fold into low
stp x6, x7, [x2]
str w9, [x16, #12] // store the updated counter
eor v11.16b, v11.16b, v10.16b // MODULO - fold into low
ext v11.16b, v11.16b, v11.16b, #8
rev64 v11.16b, v11.16b
mov x0, x15
st1 { v11.16b }, [x3]
ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32]
ldp x23, x24, [sp, #48]
ldp d8, d9, [sp, #64]
ldp d10, d11, [sp, #80]
ldp d12, d13, [sp, #96]
ldp d14, d15, [sp, #112]
ldp x29, x30, [sp], #128
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 21,910
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/keccak1600-armv8.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
.text
.align 8 // strategic alignment and padding that allows to use
// address value as loop termination condition...
.quad 0,0,0,0,0,0,0,0
iotas:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808a
.quad 0x8000000080008000
.quad 0x000000000000808b
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008a
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000a
.quad 0x000000008000808b
.quad 0x800000000000008b
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800a
.quad 0x800000008000000a
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
.def KeccakF1600_int
.type 32
.endef
.align 5
KeccakF1600_int:
AARCH64_SIGN_LINK_REGISTER
adr x28,iotas
stp x28,x30,[sp,#16] // 32 bytes on top are mine
b Loop
.align 4
Loop:
////////////////////////////////////////// Theta
eor x26,x0,x5
stp x4,x9,[sp,#0] // offload pair...
eor x27,x1,x6
eor x28,x2,x7
eor x30,x3,x8
eor x4,x4,x9
eor x26,x26,x10
eor x27,x27,x11
eor x28,x28,x12
eor x30,x30,x13
eor x4,x4,x14
eor x26,x26,x15
eor x27,x27,x16
eor x28,x28,x17
eor x30,x30,x25
eor x4,x4,x19
eor x26,x26,x20
eor x28,x28,x22
eor x27,x27,x21
eor x30,x30,x23
eor x4,x4,x24
eor x9,x26,x28,ror#63
eor x1,x1,x9
eor x6,x6,x9
eor x11,x11,x9
eor x16,x16,x9
eor x21,x21,x9
eor x9,x27,x30,ror#63
eor x28,x28,x4,ror#63
eor x30,x30,x26,ror#63
eor x4,x4,x27,ror#63
eor x27, x2,x9 // mov x27,x2
eor x7,x7,x9
eor x12,x12,x9
eor x17,x17,x9
eor x22,x22,x9
eor x0,x0,x4
eor x5,x5,x4
eor x10,x10,x4
eor x15,x15,x4
eor x20,x20,x4
ldp x4,x9,[sp,#0] // re-load offloaded data
eor x26, x3,x28 // mov x26,x3
eor x8,x8,x28
eor x13,x13,x28
eor x25,x25,x28
eor x23,x23,x28
eor x28, x4,x30 // mov x28,x4
eor x9,x9,x30
eor x14,x14,x30
eor x19,x19,x30
eor x24,x24,x30
////////////////////////////////////////// Rho+Pi
mov x30,x1
ror x1,x6,#20
//mov x27,x2
ror x2,x12,#21
//mov x26,x3
ror x3,x25,#43
//mov x28,x4
ror x4,x24,#50
ror x6,x9,#44
ror x12,x13,#39
ror x25,x17,#49
ror x24,x21,#62
ror x9,x22,#3
ror x13,x19,#56
ror x17,x11,#54
ror x21,x8,#9
ror x22,x14,#25
ror x19,x23,#8
ror x11,x7,#58
ror x8,x16,#19
ror x14,x20,#46
ror x23,x15,#23
ror x7,x10,#61
ror x16,x5,#28
ror x5,x26,#36
ror x10,x30,#63
ror x15,x28,#37
ror x20,x27,#2
////////////////////////////////////////// Chi+Iota
bic x26,x2,x1
bic x27,x3,x2
bic x28,x0,x4
bic x30,x1,x0
eor x0,x0,x26
bic x26,x4,x3
eor x1,x1,x27
ldr x27,[sp,#16]
eor x3,x3,x28
eor x4,x4,x30
eor x2,x2,x26
ldr x30,[x27],#8 // Iota[i++]
bic x26,x7,x6
tst x27,#255 // are we done?
str x27,[sp,#16]
bic x27,x8,x7
bic x28,x5,x9
eor x0,x0,x30 // A[0][0] ^= Iota
bic x30,x6,x5
eor x5,x5,x26
bic x26,x9,x8
eor x6,x6,x27
eor x8,x8,x28
eor x9,x9,x30
eor x7,x7,x26
bic x26,x12,x11
bic x27,x13,x12
bic x28,x10,x14
bic x30,x11,x10
eor x10,x10,x26
bic x26,x14,x13
eor x11,x11,x27
eor x13,x13,x28
eor x14,x14,x30
eor x12,x12,x26
bic x26,x17,x16
bic x27,x25,x17
bic x28,x15,x19
bic x30,x16,x15
eor x15,x15,x26
bic x26,x19,x25
eor x16,x16,x27
eor x25,x25,x28
eor x19,x19,x30
eor x17,x17,x26
bic x26,x22,x21
bic x27,x23,x22
bic x28,x20,x24
bic x30,x21,x20
eor x20,x20,x26
bic x26,x24,x23
eor x21,x21,x27
eor x23,x23,x28
eor x24,x24,x30
eor x22,x22,x26
bne Loop
ldr x30,[sp,#24]
AARCH64_VALIDATE_LINK_REGISTER
ret
.def KeccakF1600
.type 32
.endef
.align 5
KeccakF1600:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#48
str x0,[sp,#32] // offload argument
mov x26,x0
ldp x0,x1,[x0,#16*0]
ldp x2,x3,[x26,#16*1]
ldp x4,x5,[x26,#16*2]
ldp x6,x7,[x26,#16*3]
ldp x8,x9,[x26,#16*4]
ldp x10,x11,[x26,#16*5]
ldp x12,x13,[x26,#16*6]
ldp x14,x15,[x26,#16*7]
ldp x16,x17,[x26,#16*8]
ldp x25,x19,[x26,#16*9]
ldp x20,x21,[x26,#16*10]
ldp x22,x23,[x26,#16*11]
ldr x24,[x26,#16*12]
bl KeccakF1600_int
ldr x26,[sp,#32]
stp x0,x1,[x26,#16*0]
stp x2,x3,[x26,#16*1]
stp x4,x5,[x26,#16*2]
stp x6,x7,[x26,#16*3]
stp x8,x9,[x26,#16*4]
stp x10,x11,[x26,#16*5]
stp x12,x13,[x26,#16*6]
stp x14,x15,[x26,#16*7]
stp x16,x17,[x26,#16*8]
stp x25,x19,[x26,#16*9]
stp x20,x21,[x26,#16*10]
stp x22,x23,[x26,#16*11]
str x24,[x26,#16*12]
ldp x19,x20,[x29,#16]
add sp,sp,#48
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl SHA3_Absorb_hw
.def SHA3_Absorb_hw
.type 32
.endef
.align 5
SHA3_Absorb_hw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#64
stp x0,x1,[sp,#32] // offload arguments
stp x2,x3,[sp,#48]
mov x26,x0 // uint64_t A[5][5]
mov x27,x1 // const void *inp
mov x28,x2 // size_t len
mov x30,x3 // size_t bsz
ldp x0,x1,[x26,#16*0]
ldp x2,x3,[x26,#16*1]
ldp x4,x5,[x26,#16*2]
ldp x6,x7,[x26,#16*3]
ldp x8,x9,[x26,#16*4]
ldp x10,x11,[x26,#16*5]
ldp x12,x13,[x26,#16*6]
ldp x14,x15,[x26,#16*7]
ldp x16,x17,[x26,#16*8]
ldp x25,x19,[x26,#16*9]
ldp x20,x21,[x26,#16*10]
ldp x22,x23,[x26,#16*11]
ldr x24,[x26,#16*12]
b Loop_absorb
.align 4
Loop_absorb:
subs x26,x28,x30 // len - bsz
blo Labsorbed
str x26,[sp,#48] // save len - bsz
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x0,x0,x26
cmp x30,#8*(0+2)
blo Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x1,x1,x26
beq Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x2,x2,x26
cmp x30,#8*(2+2)
blo Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x3,x3,x26
beq Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x4,x4,x26
cmp x30,#8*(4+2)
blo Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x5,x5,x26
beq Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x6,x6,x26
cmp x30,#8*(6+2)
blo Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x7,x7,x26
beq Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x8,x8,x26
cmp x30,#8*(8+2)
blo Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x9,x9,x26
beq Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x10,x10,x26
cmp x30,#8*(10+2)
blo Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x11,x11,x26
beq Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x12,x12,x26
cmp x30,#8*(12+2)
blo Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x13,x13,x26
beq Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x14,x14,x26
cmp x30,#8*(14+2)
blo Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x15,x15,x26
beq Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x16,x16,x26
cmp x30,#8*(16+2)
blo Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x17,x17,x26
beq Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x25,x25,x26
cmp x30,#8*(18+2)
blo Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x19,x19,x26
beq Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x20,x20,x26
cmp x30,#8*(20+2)
blo Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x21,x21,x26
beq Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x22,x22,x26
cmp x30,#8*(22+2)
blo Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x23,x23,x26
beq Lprocess_block
ldr x26,[x27],#8 // *inp++
#ifdef __AARCH64EB__
rev x26,x26
#endif
eor x24,x24,x26
Lprocess_block:
str x27,[sp,#40] // save inp
bl KeccakF1600_int
ldr x27,[sp,#40] // restore arguments
ldp x28,x30,[sp,#48]
b Loop_absorb
.align 4
Labsorbed:
ldr x27,[sp,#32]
stp x0,x1,[x27,#16*0]
stp x2,x3,[x27,#16*1]
stp x4,x5,[x27,#16*2]
stp x6,x7,[x27,#16*3]
stp x8,x9,[x27,#16*4]
stp x10,x11,[x27,#16*5]
stp x12,x13,[x27,#16*6]
stp x14,x15,[x27,#16*7]
stp x16,x17,[x27,#16*8]
stp x25,x19,[x27,#16*9]
stp x20,x21,[x27,#16*10]
stp x22,x23,[x27,#16*11]
str x24,[x27,#16*12]
mov x0,x28 // return value
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#128
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl SHA3_Squeeze_hw
.def SHA3_Squeeze_hw
.type 32
.endef
.align 5
SHA3_Squeeze_hw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-48]!
add x29,sp,#0
cmp x2,#0
beq Lsqueeze_abort
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
mov x19,x0 // put aside arguments
mov x20,x1
mov x21,x2
mov x22,x3
cmp x4, #0 // x4 = 'padded' argument; if !=0, perform Keccak first
bne Lnext_block
Loop_squeeze:
ldr x4,[x0],#8
cmp x21,#8
blo Lsqueeze_tail
#ifdef __AARCH64EB__
rev x4,x4
#endif
str x4,[x20],#8
subs x21,x21,#8
beq Lsqueeze_done
subs x3,x3,#8
bhi Loop_squeeze
Lnext_block:
mov x0,x19
bl KeccakF1600
mov x0,x19
mov x3,x22
b Loop_squeeze
.align 4
Lsqueeze_tail:
strb w4,[x20],#1
lsr x4,x4,#8
subs x21,x21,#1
beq Lsqueeze_done
strb w4,[x20],#1
lsr x4,x4,#8
subs x21,x21,#1
beq Lsqueeze_done
strb w4,[x20],#1
lsr x4,x4,#8
subs x21,x21,#1
beq Lsqueeze_done
strb w4,[x20],#1
lsr x4,x4,#8
subs x21,x21,#1
beq Lsqueeze_done
strb w4,[x20],#1
lsr x4,x4,#8
subs x21,x21,#1
beq Lsqueeze_done
strb w4,[x20],#1
lsr x4,x4,#8
subs x21,x21,#1
beq Lsqueeze_done
strb w4,[x20],#1
Lsqueeze_done:
ldp x19,x20,[sp,#16]
ldp x21,x22,[sp,#32]
Lsqueeze_abort:
ldp x29,x30,[sp],#48
AARCH64_VALIDATE_LINK_REGISTER
ret
.def KeccakF1600_ce
.type 32
.endef
.align 5
KeccakF1600_ce:
mov x9,#24
adr x10,iotas
b Loop_ce
.align 4
Loop_ce:
////////////////////////////////////////////////// Theta
.long 0xce0f2a99 //eor3 v25.16b,v20.16b,v15.16b,v10.16b
.long 0xce102eba //eor3 v26.16b,v21.16b,v16.16b,v11.16b
.long 0xce1132db //eor3 v27.16b,v22.16b,v17.16b,v12.16b
.long 0xce1236fc //eor3 v28.16b,v23.16b,v18.16b,v13.16b
.long 0xce133b1d //eor3 v29.16b,v24.16b,v19.16b,v14.16b
.long 0xce050339 //eor3 v25.16b,v25.16b, v5.16b,v0.16b
.long 0xce06075a //eor3 v26.16b,v26.16b, v6.16b,v1.16b
.long 0xce070b7b //eor3 v27.16b,v27.16b, v7.16b,v2.16b
.long 0xce080f9c //eor3 v28.16b,v28.16b, v8.16b,v3.16b
.long 0xce0913bd //eor3 v29.16b,v29.16b, v9.16b,v4.16b
.long 0xce7b8f3e //rax1 v30.16b,v25.16b,v27.16b // D[1]
.long 0xce7c8f5f //rax1 v31.16b,v26.16b,v28.16b // D[2]
.long 0xce7d8f7b //rax1 v27.16b,v27.16b,v29.16b // D[3]
.long 0xce798f9c //rax1 v28.16b,v28.16b,v25.16b // D[4]
.long 0xce7a8fbd //rax1 v29.16b,v29.16b,v26.16b // D[0]
////////////////////////////////////////////////// Theta+Rho+Pi
.long 0xce9efc39 //xar v25.16b, v1.16b,v30.16b,#63 // C[0]=A[2][0]
.long 0xce9e50c1 //xar v1.16b,v6.16b,v30.16b,#20
.long 0xce9cb126 //xar v6.16b,v9.16b,v28.16b,#44
.long 0xce9f0ec9 //xar v9.16b,v22.16b,v31.16b,#3
.long 0xce9c65d6 //xar v22.16b,v14.16b,v28.16b,#25
.long 0xce9dba8e //xar v14.16b,v20.16b,v29.16b,#46
.long 0xce9f085a //xar v26.16b, v2.16b,v31.16b,#2 // C[1]=A[4][0]
.long 0xce9f5582 //xar v2.16b,v12.16b,v31.16b,#21
.long 0xce9b9dac //xar v12.16b,v13.16b,v27.16b,#39
.long 0xce9ce26d //xar v13.16b,v19.16b,v28.16b,#56
.long 0xce9b22f3 //xar v19.16b,v23.16b,v27.16b,#8
.long 0xce9d5df7 //xar v23.16b,v15.16b,v29.16b,#23
.long 0xce9c948f //xar v15.16b,v4.16b,v28.16b,#37
.long 0xce9ccb1c //xar v28.16b, v24.16b,v28.16b,#50 // D[4]=A[0][4]
.long 0xce9efab8 //xar v24.16b,v21.16b,v30.16b,#62
.long 0xce9b2508 //xar v8.16b,v8.16b,v27.16b,#9 // A[1][3]=A[4][1]
.long 0xce9e4e04 //xar v4.16b,v16.16b,v30.16b,#19 // A[0][4]=A[1][3]
.long 0xce9d70b0 //xar v16.16b,v5.16b,v29.16b,#28
.long 0xce9b9065 //xar v5.16b,v3.16b,v27.16b,#36
eor v0.16b,v0.16b,v29.16b
.long 0xce9bae5b //xar v27.16b, v18.16b,v27.16b,#43 // D[3]=A[0][3]
.long 0xce9fc623 //xar v3.16b,v17.16b,v31.16b,#49 // A[0][3]=A[3][3]
.long 0xce9ed97e //xar v30.16b, v11.16b,v30.16b,#54 // D[1]=A[3][2]
.long 0xce9fe8ff //xar v31.16b, v7.16b,v31.16b,#58 // D[2]=A[2][1]
.long 0xce9df55d //xar v29.16b, v10.16b,v29.16b,#61 // D[0]=A[1][2]
////////////////////////////////////////////////// Chi+Iota
.long 0xce362354 //bcax v20.16b,v26.16b, v22.16b,v8.16b // A[1][3]=A[4][1]
.long 0xce375915 //bcax v21.16b,v8.16b,v23.16b,v22.16b // A[1][3]=A[4][1]
.long 0xce385ed6 //bcax v22.16b,v22.16b,v24.16b,v23.16b
.long 0xce3a62f7 //bcax v23.16b,v23.16b,v26.16b, v24.16b
.long 0xce286b18 //bcax v24.16b,v24.16b,v8.16b,v26.16b // A[1][3]=A[4][1]
ld1r {v26.2d},[x10],#8
.long 0xce330fd1 //bcax v17.16b,v30.16b, v19.16b,v3.16b // A[0][3]=A[3][3]
.long 0xce2f4c72 //bcax v18.16b,v3.16b,v15.16b,v19.16b // A[0][3]=A[3][3]
.long 0xce303e73 //bcax v19.16b,v19.16b,v16.16b,v15.16b
.long 0xce3e41ef //bcax v15.16b,v15.16b,v30.16b, v16.16b
.long 0xce237a10 //bcax v16.16b,v16.16b,v3.16b,v30.16b // A[0][3]=A[3][3]
.long 0xce2c7f2a //bcax v10.16b,v25.16b, v12.16b,v31.16b
.long 0xce2d33eb //bcax v11.16b,v31.16b, v13.16b,v12.16b
.long 0xce2e358c //bcax v12.16b,v12.16b,v14.16b,v13.16b
.long 0xce3939ad //bcax v13.16b,v13.16b,v25.16b, v14.16b
.long 0xce3f65ce //bcax v14.16b,v14.16b,v31.16b, v25.16b
.long 0xce2913a7 //bcax v7.16b,v29.16b, v9.16b,v4.16b // A[0][4]=A[1][3]
.long 0xce252488 //bcax v8.16b,v4.16b,v5.16b,v9.16b // A[0][4]=A[1][3]
.long 0xce261529 //bcax v9.16b,v9.16b,v6.16b,v5.16b
.long 0xce3d18a5 //bcax v5.16b,v5.16b,v29.16b, v6.16b
.long 0xce2474c6 //bcax v6.16b,v6.16b,v4.16b,v29.16b // A[0][4]=A[1][3]
.long 0xce207363 //bcax v3.16b,v27.16b, v0.16b,v28.16b
.long 0xce210384 //bcax v4.16b,v28.16b, v1.16b,v0.16b
.long 0xce220400 //bcax v0.16b,v0.16b,v2.16b,v1.16b
.long 0xce3b0821 //bcax v1.16b,v1.16b,v27.16b, v2.16b
.long 0xce3c6c42 //bcax v2.16b,v2.16b,v28.16b, v27.16b
eor v0.16b,v0.16b,v26.16b
subs x9,x9,#1
bne Loop_ce
ret
.def KeccakF1600_cext
.type 32
.endef
.align 5
KeccakF1600_cext:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-80]!
add x29,sp,#0
stp d8,d9,[sp,#16] // per ABI requirement
stp d10,d11,[sp,#32]
stp d12,d13,[sp,#48]
stp d14,d15,[sp,#64]
ldp d0,d1,[x0,#8*0]
ldp d2,d3,[x0,#8*2]
ldp d4,d5,[x0,#8*4]
ldp d6,d7,[x0,#8*6]
ldp d8,d9,[x0,#8*8]
ldp d10,d11,[x0,#8*10]
ldp d12,d13,[x0,#8*12]
ldp d14,d15,[x0,#8*14]
ldp d16,d17,[x0,#8*16]
ldp d18,d19,[x0,#8*18]
ldp d20,d21,[x0,#8*20]
ldp d22,d23,[x0,#8*22]
ldr d24,[x0,#8*24]
bl KeccakF1600_ce
ldr x30,[sp,#8]
stp d0,d1,[x0,#8*0]
stp d2,d3,[x0,#8*2]
stp d4,d5,[x0,#8*4]
stp d6,d7,[x0,#8*6]
stp d8,d9,[x0,#8*8]
stp d10,d11,[x0,#8*10]
stp d12,d13,[x0,#8*12]
stp d14,d15,[x0,#8*14]
stp d16,d17,[x0,#8*16]
stp d18,d19,[x0,#8*18]
stp d20,d21,[x0,#8*20]
stp d22,d23,[x0,#8*22]
str d24,[x0,#8*24]
ldp d8,d9,[sp,#16]
ldp d10,d11,[sp,#32]
ldp d12,d13,[sp,#48]
ldp d14,d15,[sp,#64]
ldr x29,[sp],#80
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl SHA3_Absorb_cext
.def SHA3_Absorb_cext
.type 32
.endef
.align 5
SHA3_Absorb_cext:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-80]!
add x29,sp,#0
stp d8,d9,[sp,#16] // per ABI requirement
stp d10,d11,[sp,#32]
stp d12,d13,[sp,#48]
stp d14,d15,[sp,#64]
ldp d0,d1,[x0,#8*0]
ldp d2,d3,[x0,#8*2]
ldp d4,d5,[x0,#8*4]
ldp d6,d7,[x0,#8*6]
ldp d8,d9,[x0,#8*8]
ldp d10,d11,[x0,#8*10]
ldp d12,d13,[x0,#8*12]
ldp d14,d15,[x0,#8*14]
ldp d16,d17,[x0,#8*16]
ldp d18,d19,[x0,#8*18]
ldp d20,d21,[x0,#8*20]
ldp d22,d23,[x0,#8*22]
ldr d24,[x0,#8*24]
b Loop_absorb_ce
.align 4
Loop_absorb_ce:
subs x2,x2,x3 // len - bsz
blo Labsorbed_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v0.16b,v0.16b,v31.16b
cmp x3,#8*(0+2)
blo Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v1.16b,v1.16b,v31.16b
beq Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v2.16b,v2.16b,v31.16b
cmp x3,#8*(2+2)
blo Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v3.16b,v3.16b,v31.16b
beq Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v4.16b,v4.16b,v31.16b
cmp x3,#8*(4+2)
blo Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v5.16b,v5.16b,v31.16b
beq Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v6.16b,v6.16b,v31.16b
cmp x3,#8*(6+2)
blo Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v7.16b,v7.16b,v31.16b
beq Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v8.16b,v8.16b,v31.16b
cmp x3,#8*(8+2)
blo Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v9.16b,v9.16b,v31.16b
beq Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v10.16b,v10.16b,v31.16b
cmp x3,#8*(10+2)
blo Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v11.16b,v11.16b,v31.16b
beq Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v12.16b,v12.16b,v31.16b
cmp x3,#8*(12+2)
blo Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v13.16b,v13.16b,v31.16b
beq Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v14.16b,v14.16b,v31.16b
cmp x3,#8*(14+2)
blo Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v15.16b,v15.16b,v31.16b
beq Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v16.16b,v16.16b,v31.16b
cmp x3,#8*(16+2)
blo Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v17.16b,v17.16b,v31.16b
beq Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v18.16b,v18.16b,v31.16b
cmp x3,#8*(18+2)
blo Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v19.16b,v19.16b,v31.16b
beq Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v20.16b,v20.16b,v31.16b
cmp x3,#8*(20+2)
blo Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v21.16b,v21.16b,v31.16b
beq Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v22.16b,v22.16b,v31.16b
cmp x3,#8*(22+2)
blo Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v23.16b,v23.16b,v31.16b
beq Lprocess_block_ce
ldr d31,[x1],#8 // *inp++
#ifdef __AARCH64EB__
rev64 v31.16b,v31.16b
#endif
eor v24.16b,v24.16b,v31.16b
Lprocess_block_ce:
bl KeccakF1600_ce
b Loop_absorb_ce
.align 4
Labsorbed_ce:
stp d0,d1,[x0,#8*0]
stp d2,d3,[x0,#8*2]
stp d4,d5,[x0,#8*4]
stp d6,d7,[x0,#8*6]
stp d8,d9,[x0,#8*8]
stp d10,d11,[x0,#8*10]
stp d12,d13,[x0,#8*12]
stp d14,d15,[x0,#8*14]
stp d16,d17,[x0,#8*16]
stp d18,d19,[x0,#8*18]
stp d20,d21,[x0,#8*20]
stp d22,d23,[x0,#8*22]
str d24,[x0,#8*24]
add x0,x2,x3 // return value
ldp d8,d9,[sp,#16]
ldp d10,d11,[sp,#32]
ldp d12,d13,[sp,#48]
ldp d14,d15,[sp,#64]
ldp x29,x30,[sp],#80
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl SHA3_Squeeze_cext
.def SHA3_Squeeze_cext
.type 32
.endef
.align 5
SHA3_Squeeze_cext:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-16]!
add x29,sp,#0
cmp x2,#0
beq Lsqueeze_done_ce
mov x9,x0
mov x10,x3
Loop_squeeze_ce:
ldr x4,[x9],#8
cmp x2,#8
blo Lsqueeze_tail_ce
#ifdef __AARCH64EB__
rev x4,x4
#endif
str x4,[x1],#8
beq Lsqueeze_done_ce
sub x2,x2,#8
subs x10,x10,#8
bhi Loop_squeeze_ce
bl KeccakF1600_cext
ldr x30,[sp,#8]
mov x9,x0
mov x10,x3
b Loop_squeeze_ce
.align 4
Lsqueeze_tail_ce:
strb w4,[x1],#1
lsr x4,x4,#8
subs x2,x2,#1
beq Lsqueeze_done_ce
strb w4,[x1],#1
lsr x4,x4,#8
subs x2,x2,#1
beq Lsqueeze_done_ce
strb w4,[x1],#1
lsr x4,x4,#8
subs x2,x2,#1
beq Lsqueeze_done_ce
strb w4,[x1],#1
lsr x4,x4,#8
subs x2,x2,#1
beq Lsqueeze_done_ce
strb w4,[x1],#1
lsr x4,x4,#8
subs x2,x2,#1
beq Lsqueeze_done_ce
strb w4,[x1],#1
lsr x4,x4,#8
subs x2,x2,#1
beq Lsqueeze_done_ce
strb w4,[x1],#1
Lsqueeze_done_ce:
ldr x29,[sp],#16
AARCH64_VALIDATE_LINK_REGISTER
ret
.byte 75,101,99,99,97,107,45,49,54,48,48,32,97,98,115,111,114,98,32,97,110,100,32,115,113,117,101,101,122,101,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 1,923
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/bn-armv8.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
.text
// BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
// size_t num);
.globl bn_add_words
.align 4
bn_add_words:
AARCH64_VALID_CALL_TARGET
# Clear the carry flag.
cmn xzr, xzr
# aarch64 can load two registers at a time, so we do two loop iterations at
# at a time. Split x3 = 2 * x8 + x3. This allows loop
# operations to use CBNZ without clobbering the carry flag.
lsr x8, x3, #1
and x3, x3, #1
cbz x8, Ladd_tail
Ladd_loop:
ldp x4, x5, [x1], #16
ldp x6, x7, [x2], #16
sub x8, x8, #1
adcs x4, x4, x6
adcs x5, x5, x7
stp x4, x5, [x0], #16
cbnz x8, Ladd_loop
Ladd_tail:
cbz x3, Ladd_exit
ldr x4, [x1], #8
ldr x6, [x2], #8
adcs x4, x4, x6
str x4, [x0], #8
Ladd_exit:
cset x0, cs
ret
// BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
// size_t num);
.globl bn_sub_words
.align 4
bn_sub_words:
AARCH64_VALID_CALL_TARGET
# Set the carry flag. Arm's borrow bit is flipped from the carry flag,
# so we want C = 1 here.
cmp xzr, xzr
# aarch64 can load two registers at a time, so we do two loop iterations at
# at a time. Split x3 = 2 * x8 + x3. This allows loop
# operations to use CBNZ without clobbering the carry flag.
lsr x8, x3, #1
and x3, x3, #1
cbz x8, Lsub_tail
Lsub_loop:
ldp x4, x5, [x1], #16
ldp x6, x7, [x2], #16
sub x8, x8, #1
sbcs x4, x4, x6
sbcs x5, x5, x7
stp x4, x5, [x0], #16
cbnz x8, Lsub_loop
Lsub_tail:
cbz x3, Lsub_exit
ldr x4, [x1], #8
ldr x6, [x2], #8
sbcs x4, x4, x6
str x4, [x0], #8
Lsub_exit:
cset x0, cc
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 31,029
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/armv8-mont.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
.text
.globl bn_mul_mont
.def bn_mul_mont
.type 32
.endef
.align 5
bn_mul_mont:
AARCH64_SIGN_LINK_REGISTER
tst x5,#7
b.eq __bn_sqr8x_mont
tst x5,#3
b.eq __bn_mul4x_mont
Lmul_mont:
stp x29,x30,[sp,#-64]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
ldr x9,[x2],#8 // bp[0]
sub x22,sp,x5,lsl#3
ldp x7,x8,[x1],#16 // ap[0..1]
lsl x5,x5,#3
ldr x4,[x4] // *n0
and x22,x22,#-16 // ABI says so
ldp x13,x14,[x3],#16 // np[0..1]
mul x6,x7,x9 // ap[0]*bp[0]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
mul x10,x8,x9 // ap[1]*bp[0]
umulh x11,x8,x9
mul x15,x6,x4 // "tp[0]"*n0
mov sp,x22 // alloca
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6 // discarded
// (*) As for removal of first multiplication and addition
// instructions. The outcome of first addition is
// guaranteed to be zero, which leaves two computationally
// significant outcomes: it either carries or not. Then
// question is when does it carry? Is there alternative
// way to deduce it? If you follow operations, you can
// observe that condition for carry is quite simple:
// x6 being non-zero. So that carry can be calculated
// by adding -1 to x6. That's what next instruction does.
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
adc x13,x13,xzr
cbz x21,L1st_skip
L1st:
ldr x8,[x1],#8
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
ldr x14,[x3],#8
adds x12,x16,x13
mul x10,x8,x9 // ap[j]*bp[0]
adc x13,x17,xzr
umulh x11,x8,x9
adds x12,x12,x6
mul x16,x14,x15 // np[j]*m1
adc x13,x13,xzr
umulh x17,x14,x15
str x12,[x22],#8 // tp[j-1]
cbnz x21,L1st
L1st_skip:
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adc x13,x17,xzr
adds x12,x12,x6
sub x20,x5,#8 // i=num-1
adcs x13,x13,x7
adc x19,xzr,xzr // upmost overflow bit
stp x12,x13,[x22]
Louter:
ldr x9,[x2],#8 // bp[i]
ldp x7,x8,[x1],#16
ldr x23,[sp] // tp[0]
add x22,sp,#8
mul x6,x7,x9 // ap[0]*bp[i]
sub x21,x5,#16 // j=num-2
umulh x7,x7,x9
ldp x13,x14,[x3],#16
mul x10,x8,x9 // ap[1]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x15,x6,x4
sub x20,x20,#8 // i--
// (*) mul x12,x13,x15 // np[0]*m1
umulh x13,x13,x15
mul x16,x14,x15 // np[1]*m1
// (*) adds x12,x12,x6
subs xzr,x6,#1 // (*)
umulh x17,x14,x15
cbz x21,Linner_skip
Linner:
ldr x8,[x1],#8
adc x13,x13,xzr
ldr x23,[x22],#8 // tp[j]
adds x6,x10,x7
sub x21,x21,#8 // j--
adc x7,x11,xzr
adds x12,x16,x13
ldr x14,[x3],#8
adc x13,x17,xzr
mul x10,x8,x9 // ap[j]*bp[i]
adds x6,x6,x23
umulh x11,x8,x9
adc x7,x7,xzr
mul x16,x14,x15 // np[j]*m1
adds x12,x12,x6
umulh x17,x14,x15
str x12,[x22,#-16] // tp[j-1]
cbnz x21,Linner
Linner_skip:
ldr x23,[x22],#8 // tp[j]
adc x13,x13,xzr
adds x6,x10,x7
sub x1,x1,x5 // rewind x1
adc x7,x11,xzr
adds x12,x16,x13
sub x3,x3,x5 // rewind x3
adcs x13,x17,x19
adc x19,xzr,xzr
adds x6,x6,x23
adc x7,x7,xzr
adds x12,x12,x6
adcs x13,x13,x7
adc x19,x19,xzr // upmost overflow bit
stp x12,x13,[x22,#-16]
cbnz x20,Louter
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x14,[x3],#8 // np[0]
subs x21,x5,#8 // j=num-1 and clear borrow
mov x1,x0
Lsub:
sbcs x8,x23,x14 // tp[j]-np[j]
ldr x23,[x22],#8
sub x21,x21,#8 // j--
ldr x14,[x3],#8
str x8,[x1],#8 // rp[j]=tp[j]-np[j]
cbnz x21,Lsub
sbcs x8,x23,x14
sbcs x19,x19,xzr // did it borrow?
str x8,[x1],#8 // rp[num-1]
ldr x23,[sp] // tp[0]
add x22,sp,#8
ldr x8,[x0],#8 // rp[0]
sub x5,x5,#8 // num--
nop
Lcond_copy:
sub x5,x5,#8 // num--
csel x14,x23,x8,lo // did it borrow?
ldr x23,[x22],#8
ldr x8,[x0],#8
str xzr,[x22,#-16] // wipe tp
str x14,[x0,#-16]
cbnz x5,Lcond_copy
csel x14,x23,x8,lo
str xzr,[x22,#-8] // wipe tp
str x14,[x0,#-8]
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldr x29,[sp],#64
AARCH64_VALIDATE_LINK_REGISTER
ret
.def __bn_sqr8x_mont
.type 32
.endef
.align 5
__bn_sqr8x_mont:
// Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_sqr8x_mont is jumped to
// only from bn_mul_mont which has already signed the return address.
cmp x1,x2
b.ne __bn_mul4x_mont
Lsqr8x_mont:
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
stp x0,x3,[sp,#96] // offload rp and np
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
ldp x12,x13,[x1,#8*6]
sub x2,sp,x5,lsl#4
lsl x5,x5,#3
ldr x4,[x4] // *n0
mov sp,x2 // alloca
sub x27,x5,#8*8
b Lsqr8x_zero_start
Lsqr8x_zero:
sub x27,x27,#8*8
stp xzr,xzr,[x2,#8*0]
stp xzr,xzr,[x2,#8*2]
stp xzr,xzr,[x2,#8*4]
stp xzr,xzr,[x2,#8*6]
Lsqr8x_zero_start:
stp xzr,xzr,[x2,#8*8]
stp xzr,xzr,[x2,#8*10]
stp xzr,xzr,[x2,#8*12]
stp xzr,xzr,[x2,#8*14]
add x2,x2,#8*16
cbnz x27,Lsqr8x_zero
add x3,x1,x5
add x1,x1,#8*8
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
mov x23,xzr
mov x24,xzr
mov x25,xzr
mov x26,xzr
mov x2,sp
str x4,[x29,#112] // offload n0
// Multiply everything but a[i]*a[i]
.align 4
Lsqr8x_outer_loop:
// a[1]a[0] (i)
// a[2]a[0]
// a[3]a[0]
// a[4]a[0]
// a[5]a[0]
// a[6]a[0]
// a[7]a[0]
// a[2]a[1] (ii)
// a[3]a[1]
// a[4]a[1]
// a[5]a[1]
// a[6]a[1]
// a[7]a[1]
// a[3]a[2] (iii)
// a[4]a[2]
// a[5]a[2]
// a[6]a[2]
// a[7]a[2]
// a[4]a[3] (iv)
// a[5]a[3]
// a[6]a[3]
// a[7]a[3]
// a[5]a[4] (v)
// a[6]a[4]
// a[7]a[4]
// a[6]a[5] (vi)
// a[7]a[5]
// a[7]a[6] (vii)
mul x14,x7,x6 // lo(a[1..7]*a[0]) (i)
mul x15,x8,x6
mul x16,x9,x6
mul x17,x10,x6
adds x20,x20,x14 // t[1]+lo(a[1]*a[0])
mul x14,x11,x6
adcs x21,x21,x15
mul x15,x12,x6
adcs x22,x22,x16
mul x16,x13,x6
adcs x23,x23,x17
umulh x17,x7,x6 // hi(a[1..7]*a[0])
adcs x24,x24,x14
umulh x14,x8,x6
adcs x25,x25,x15
umulh x15,x9,x6
adcs x26,x26,x16
umulh x16,x10,x6
stp x19,x20,[x2],#8*2 // t[0..1]
adc x19,xzr,xzr // t[8]
adds x21,x21,x17 // t[2]+lo(a[1]*a[0])
umulh x17,x11,x6
adcs x22,x22,x14
umulh x14,x12,x6
adcs x23,x23,x15
umulh x15,x13,x6
adcs x24,x24,x16
mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii)
adcs x25,x25,x17
mul x17,x9,x7
adcs x26,x26,x14
mul x14,x10,x7
adc x19,x19,x15
mul x15,x11,x7
adds x22,x22,x16
mul x16,x12,x7
adcs x23,x23,x17
mul x17,x13,x7
adcs x24,x24,x14
umulh x14,x8,x7 // hi(a[2..7]*a[1])
adcs x25,x25,x15
umulh x15,x9,x7
adcs x26,x26,x16
umulh x16,x10,x7
adcs x19,x19,x17
umulh x17,x11,x7
stp x21,x22,[x2],#8*2 // t[2..3]
adc x20,xzr,xzr // t[9]
adds x23,x23,x14
umulh x14,x12,x7
adcs x24,x24,x15
umulh x15,x13,x7
adcs x25,x25,x16
mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii)
adcs x26,x26,x17
mul x17,x10,x8
adcs x19,x19,x14
mul x14,x11,x8
adc x20,x20,x15
mul x15,x12,x8
adds x24,x24,x16
mul x16,x13,x8
adcs x25,x25,x17
umulh x17,x9,x8 // hi(a[3..7]*a[2])
adcs x26,x26,x14
umulh x14,x10,x8
adcs x19,x19,x15
umulh x15,x11,x8
adcs x20,x20,x16
umulh x16,x12,x8
stp x23,x24,[x2],#8*2 // t[4..5]
adc x21,xzr,xzr // t[10]
adds x25,x25,x17
umulh x17,x13,x8
adcs x26,x26,x14
mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv)
adcs x19,x19,x15
mul x15,x11,x9
adcs x20,x20,x16
mul x16,x12,x9
adc x21,x21,x17
mul x17,x13,x9
adds x26,x26,x14
umulh x14,x10,x9 // hi(a[4..7]*a[3])
adcs x19,x19,x15
umulh x15,x11,x9
adcs x20,x20,x16
umulh x16,x12,x9
adcs x21,x21,x17
umulh x17,x13,x9
stp x25,x26,[x2],#8*2 // t[6..7]
adc x22,xzr,xzr // t[11]
adds x19,x19,x14
mul x14,x11,x10 // lo(a[5..7]*a[4]) (v)
adcs x20,x20,x15
mul x15,x12,x10
adcs x21,x21,x16
mul x16,x13,x10
adc x22,x22,x17
umulh x17,x11,x10 // hi(a[5..7]*a[4])
adds x20,x20,x14
umulh x14,x12,x10
adcs x21,x21,x15
umulh x15,x13,x10
adcs x22,x22,x16
mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi)
adc x23,xzr,xzr // t[12]
adds x21,x21,x17
mul x17,x13,x11
adcs x22,x22,x14
umulh x14,x12,x11 // hi(a[6..7]*a[5])
adc x23,x23,x15
umulh x15,x13,x11
adds x22,x22,x16
mul x16,x13,x12 // lo(a[7]*a[6]) (vii)
adcs x23,x23,x17
umulh x17,x13,x12 // hi(a[7]*a[6])
adc x24,xzr,xzr // t[13]
adds x23,x23,x14
sub x27,x3,x1 // done yet?
adc x24,x24,x15
adds x24,x24,x16
sub x14,x3,x5 // rewinded ap
adc x25,xzr,xzr // t[14]
add x25,x25,x17
cbz x27,Lsqr8x_outer_break
mov x4,x6
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x0,x1
adcs x26,xzr,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved below
mov x27,#-8*8
// a[8]a[0]
// a[9]a[0]
// a[a]a[0]
// a[b]a[0]
// a[c]a[0]
// a[d]a[0]
// a[e]a[0]
// a[f]a[0]
// a[8]a[1]
// a[f]a[1]........................
// a[8]a[2]
// a[f]a[2]........................
// a[8]a[3]
// a[f]a[3]........................
// a[8]a[4]
// a[f]a[4]........................
// a[8]a[5]
// a[f]a[5]........................
// a[8]a[6]
// a[f]a[6]........................
// a[8]a[7]
// a[f]a[7]........................
Lsqr8x_mul:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_mul
// note that carry flag is guaranteed
// to be zero at this point
cmp x1,x3 // done yet?
b.eq Lsqr8x_break
ldp x6,x7,[x2,#8*0]
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
adds x19,x19,x6
ldr x4,[x0,#-8*8]
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_mul
.align 4
Lsqr8x_break:
ldp x6,x7,[x0,#8*0]
add x1,x0,#8*8
ldp x8,x9,[x0,#8*2]
sub x14,x3,x1 // is it last iteration?
ldp x10,x11,[x0,#8*4]
sub x15,x2,x14
ldp x12,x13,[x0,#8*6]
cbz x14,Lsqr8x_outer_loop
stp x19,x20,[x2,#8*0]
ldp x19,x20,[x15,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x15,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x15,#8*4]
stp x25,x26,[x2,#8*6]
mov x2,x15
ldp x25,x26,[x15,#8*6]
b Lsqr8x_outer_loop
.align 4
Lsqr8x_outer_break:
// Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0]
ldp x15,x16,[sp,#8*1]
ldp x11,x13,[x14,#8*2]
add x1,x14,#8*4
ldp x17,x14,[sp,#8*3]
stp x19,x20,[x2,#8*0]
mul x19,x7,x7
stp x21,x22,[x2,#8*2]
umulh x7,x7,x7
stp x23,x24,[x2,#8*4]
mul x8,x9,x9
stp x25,x26,[x2,#8*6]
mov x2,sp
umulh x9,x9,x9
adds x20,x7,x15,lsl#1
extr x15,x16,x15,#63
sub x27,x5,#8*4
Lsqr4x_shift_n_add:
adcs x21,x8,x15
extr x16,x17,x16,#63
sub x27,x27,#8*4
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
ldp x7,x9,[x1],#8*2
umulh x11,x11,x11
mul x12,x13,x13
umulh x13,x13,x13
extr x17,x14,x17,#63
stp x19,x20,[x2,#8*0]
adcs x23,x10,x17
extr x14,x15,x14,#63
stp x21,x22,[x2,#8*2]
adcs x24,x11,x14
ldp x17,x14,[x2,#8*7]
extr x15,x16,x15,#63
adcs x25,x12,x15
extr x16,x17,x16,#63
adcs x26,x13,x16
ldp x15,x16,[x2,#8*9]
mul x6,x7,x7
ldp x11,x13,[x1],#8*2
umulh x7,x7,x7
mul x8,x9,x9
umulh x9,x9,x9
stp x23,x24,[x2,#8*4]
extr x17,x14,x17,#63
stp x25,x26,[x2,#8*6]
add x2,x2,#8*8
adcs x19,x6,x17
extr x14,x15,x14,#63
adcs x20,x7,x14
ldp x17,x14,[x2,#8*3]
extr x15,x16,x15,#63
cbnz x27,Lsqr4x_shift_n_add
ldp x1,x4,[x29,#104] // pull np and n0
adcs x21,x8,x15
extr x16,x17,x16,#63
adcs x22,x9,x16
ldp x15,x16,[x2,#8*5]
mul x10,x11,x11
umulh x11,x11,x11
stp x19,x20,[x2,#8*0]
mul x12,x13,x13
umulh x13,x13,x13
stp x21,x22,[x2,#8*2]
extr x17,x14,x17,#63
adcs x23,x10,x17
extr x14,x15,x14,#63
ldp x19,x20,[sp,#8*0]
adcs x24,x11,x14
extr x15,x16,x15,#63
ldp x6,x7,[x1,#8*0]
adcs x25,x12,x15
extr x16,xzr,x16,#63
ldp x8,x9,[x1,#8*2]
adc x26,x13,x16
ldp x10,x11,[x1,#8*4]
// Reduce by 512 bits per iteration
mul x28,x4,x19 // t[0]*n0
ldp x12,x13,[x1,#8*6]
add x3,x1,x5
ldp x21,x22,[sp,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[sp,#8*4]
stp x25,x26,[x2,#8*6]
ldp x25,x26,[sp,#8*6]
add x1,x1,#8*8
mov x30,xzr // initial top-most carry
mov x2,sp
mov x27,#8
Lsqr8x_reduction:
// (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0)
mul x15,x7,x28
sub x27,x27,#1
mul x16,x8,x28
str x28,[x2],#8 // put aside t[0]*n0 for tail processing
mul x17,x9,x28
// (*) adds xzr,x19,x14
subs xzr,x19,#1 // (*)
mul x14,x10,x28
adcs x19,x20,x15
mul x15,x11,x28
adcs x20,x21,x16
mul x16,x12,x28
adcs x21,x22,x17
mul x17,x13,x28
adcs x22,x23,x14
umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0)
adcs x23,x24,x15
umulh x15,x7,x28
adcs x24,x25,x16
umulh x16,x8,x28
adcs x25,x26,x17
umulh x17,x9,x28
adc x26,xzr,xzr
adds x19,x19,x14
umulh x14,x10,x28
adcs x20,x20,x15
umulh x15,x11,x28
adcs x21,x21,x16
umulh x16,x12,x28
adcs x22,x22,x17
umulh x17,x13,x28
mul x28,x4,x19 // next t[0]*n0
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adc x26,x26,x17
cbnz x27,Lsqr8x_reduction
ldp x14,x15,[x2,#8*0]
ldp x16,x17,[x2,#8*2]
mov x0,x2
sub x27,x3,x1 // done yet?
adds x19,x19,x14
adcs x20,x20,x15
ldp x14,x15,[x2,#8*4]
adcs x21,x21,x16
adcs x22,x22,x17
ldp x16,x17,[x2,#8*6]
adcs x23,x23,x14
adcs x24,x24,x15
adcs x25,x25,x16
adcs x26,x26,x17
//adc x28,xzr,xzr // moved below
cbz x27,Lsqr8x8_post_condition
ldr x4,[x2,#-8*8]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
ldp x10,x11,[x1,#8*4]
mov x27,#-8*8
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
Lsqr8x_tail:
mul x14,x6,x4
adc x28,xzr,xzr // carry bit, modulo-scheduled
mul x15,x7,x4
add x27,x27,#8
mul x16,x8,x4
mul x17,x9,x4
adds x19,x19,x14
mul x14,x10,x4
adcs x20,x20,x15
mul x15,x11,x4
adcs x21,x21,x16
mul x16,x12,x4
adcs x22,x22,x17
mul x17,x13,x4
adcs x23,x23,x14
umulh x14,x6,x4
adcs x24,x24,x15
umulh x15,x7,x4
adcs x25,x25,x16
umulh x16,x8,x4
adcs x26,x26,x17
umulh x17,x9,x4
adc x28,x28,xzr
str x19,[x2],#8
adds x19,x20,x14
umulh x14,x10,x4
adcs x20,x21,x15
umulh x15,x11,x4
adcs x21,x22,x16
umulh x16,x12,x4
adcs x22,x23,x17
umulh x17,x13,x4
ldr x4,[x0,x27]
adcs x23,x24,x14
adcs x24,x25,x15
adcs x25,x26,x16
adcs x26,x28,x17
//adc x28,xzr,xzr // moved above
cbnz x27,Lsqr8x_tail
// note that carry flag is guaranteed
// to be zero at this point
ldp x6,x7,[x2,#8*0]
sub x27,x3,x1 // done yet?
sub x16,x3,x5 // rewinded np
ldp x8,x9,[x2,#8*2]
ldp x10,x11,[x2,#8*4]
ldp x12,x13,[x2,#8*6]
cbz x27,Lsqr8x_tail_break
ldr x4,[x0,#-8*8]
adds x19,x19,x6
adcs x20,x20,x7
ldp x6,x7,[x1,#8*0]
adcs x21,x21,x8
adcs x22,x22,x9
ldp x8,x9,[x1,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x1,#8*4]
adcs x25,x25,x12
mov x27,#-8*8
adcs x26,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
//adc x28,xzr,xzr // moved above
b Lsqr8x_tail
.align 4
Lsqr8x_tail_break:
ldr x4,[x29,#112] // pull n0
add x27,x2,#8*8 // end of current t[num] window
subs xzr,x30,#1 // "move" top-most carry to carry bit
adcs x14,x19,x6
adcs x15,x20,x7
ldp x19,x20,[x0,#8*0]
adcs x21,x21,x8
ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0]
adcs x22,x22,x9
ldp x8,x9,[x16,#8*2]
adcs x23,x23,x10
adcs x24,x24,x11
ldp x10,x11,[x16,#8*4]
adcs x25,x25,x12
adcs x26,x26,x13
ldp x12,x13,[x16,#8*6]
add x1,x16,#8*8
adc x30,xzr,xzr // top-most carry
mul x28,x4,x19
stp x14,x15,[x2,#8*0]
stp x21,x22,[x2,#8*2]
ldp x21,x22,[x0,#8*2]
stp x23,x24,[x2,#8*4]
ldp x23,x24,[x0,#8*4]
cmp x27,x29 // did we hit the bottom?
stp x25,x26,[x2,#8*6]
mov x2,x0 // slide the window
ldp x25,x26,[x0,#8*6]
mov x27,#8
b.ne Lsqr8x_reduction
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
ldr x0,[x29,#96] // pull rp
add x2,x2,#8*8
subs x14,x19,x6
sbcs x15,x20,x7
sub x27,x5,#8*8
mov x3,x0 // x0 copy
Lsqr8x_sub:
sbcs x16,x21,x8
ldp x6,x7,[x1,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x1,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x10,x11,[x1,#8*4]
sbcs x17,x26,x13
ldp x12,x13,[x1,#8*6]
add x1,x1,#8*8
ldp x19,x20,[x2,#8*0]
sub x27,x27,#8*8
ldp x21,x22,[x2,#8*2]
ldp x23,x24,[x2,#8*4]
ldp x25,x26,[x2,#8*6]
add x2,x2,#8*8
stp x14,x15,[x0,#8*4]
sbcs x14,x19,x6
stp x16,x17,[x0,#8*6]
add x0,x0,#8*8
sbcs x15,x20,x7
cbnz x27,Lsqr8x_sub
sbcs x16,x21,x8
mov x2,sp
add x1,sp,x5
ldp x6,x7,[x3,#8*0]
sbcs x17,x22,x9
stp x14,x15,[x0,#8*0]
sbcs x14,x23,x10
ldp x8,x9,[x3,#8*2]
sbcs x15,x24,x11
stp x16,x17,[x0,#8*2]
sbcs x16,x25,x12
ldp x19,x20,[x1,#8*0]
sbcs x17,x26,x13
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
stp x14,x15,[x0,#8*4]
stp x16,x17,[x0,#8*6]
sub x27,x5,#8*4
Lsqr4x_cond_copy:
sub x27,x27,#8*4
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
ldp x6,x7,[x3,#8*4]
ldp x19,x20,[x1,#8*4]
csel x16,x21,x8,lo
stp xzr,xzr,[x2,#8*2]
add x2,x2,#8*4
csel x17,x22,x9,lo
ldp x8,x9,[x3,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
add x3,x3,#8*4
stp xzr,xzr,[x1,#8*0]
stp xzr,xzr,[x1,#8*2]
cbnz x27,Lsqr4x_cond_copy
csel x14,x19,x6,lo
stp xzr,xzr,[x2,#8*0]
csel x15,x20,x7,lo
stp xzr,xzr,[x2,#8*2]
csel x16,x21,x8,lo
csel x17,x22,x9,lo
stp x14,x15,[x3,#8*0]
stp x16,x17,[x3,#8*2]
b Lsqr8x_done
.align 4
Lsqr8x8_post_condition:
adc x28,xzr,xzr
ldr x30,[x29,#8] // pull return address
// x19-7,x28 hold result, x6-7 hold modulus
subs x6,x19,x6
ldr x1,[x29,#96] // pull rp
sbcs x7,x20,x7
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x8
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x9
stp xzr,xzr,[sp,#8*4]
sbcs x10,x23,x10
stp xzr,xzr,[sp,#8*6]
sbcs x11,x24,x11
stp xzr,xzr,[sp,#8*8]
sbcs x12,x25,x12
stp xzr,xzr,[sp,#8*10]
sbcs x13,x26,x13
stp xzr,xzr,[sp,#8*12]
sbcs x28,x28,xzr // did it borrow?
stp xzr,xzr,[sp,#8*14]
// x6-7 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
csel x10,x23,x10,lo
csel x11,x24,x11,lo
stp x8,x9,[x1,#8*2]
csel x12,x25,x12,lo
csel x13,x26,x13,lo
stp x10,x11,[x1,#8*4]
stp x12,x13,[x1,#8*6]
Lsqr8x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.def __bn_mul4x_mont
.type 32
.endef
.align 5
__bn_mul4x_mont:
// Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_mul4x_mont is jumped to
// only from bn_mul_mont or __bn_mul8x_mont which have already signed the
// return address.
stp x29,x30,[sp,#-128]!
add x29,sp,#0
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub x26,sp,x5,lsl#3
lsl x5,x5,#3
ldr x4,[x4] // *n0
sub sp,x26,#8*4 // alloca
add x10,x2,x5
add x27,x1,x5
stp x0,x10,[x29,#96] // offload rp and &b[num]
ldr x24,[x2,#8*0] // b[0]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
mov x19,xzr
mov x20,xzr
mov x21,xzr
mov x22,xzr
ldp x14,x15,[x3,#8*0] // n[0..3]
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
mov x28,#0
mov x26,sp
Loop_mul4x_1st_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[0])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[0])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
// (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0)
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0)
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
sub x10,x27,x1
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_reduction
cbz x10,Lmul4x4_post_condition
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldr x25,[sp] // a[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
Loop_mul4x_1st_tail:
mul x10,x6,x24 // lo(a[4..7]*b[i])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[i])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i] (or b[0])
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*a[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
adcs x23,x23,x0
umulh x13,x17,x25
adc x0,xzr,xzr
ldr x25,[sp,x28] // next t[0]*n0
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_1st_tail
sub x11,x27,x5 // rewinded x1
cbz x10,Lmul4x_proceed
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_1st_tail
.align 5
Lmul4x_proceed:
ldr x24,[x2,#8*4]! // *++b
adc x30,x0,xzr
ldp x6,x7,[x11,#8*0] // a[0..3]
sub x3,x3,x5 // rewind np
ldp x8,x9,[x11,#8*2]
add x1,x11,#8*4
stp x19,x20,[x26,#8*0] // result!!!
ldp x19,x20,[sp,#8*4] // t[0..3]
stp x21,x22,[x26,#8*2] // result!!!
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x3,#8*0] // n[0..3]
mov x26,sp
ldp x16,x17,[x3,#8*2]
adds x3,x3,#8*4 // clear carry bit
mov x0,xzr
.align 4
Loop_mul4x_reduction:
mul x10,x6,x24 // lo(a[0..3]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[0..3]*b[4])
adcs x20,x20,x11
mul x25,x19,x4 // t[0]*n0
adcs x21,x21,x12
umulh x11,x7,x24
adcs x22,x22,x13
umulh x12,x8,x24
adc x23,xzr,xzr
umulh x13,x9,x24
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
// (*) mul x10,x14,x25
str x25,[x26],#8 // put aside t[0]*n0 for tail processing
adcs x21,x21,x11
mul x11,x15,x25 // lo(n[0..3]*t[0]*n0
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
// (*) adds xzr,x19,x10
subs xzr,x19,#1 // (*)
umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0
adcs x19,x20,x11
umulh x11,x15,x25
adcs x20,x21,x12
umulh x12,x16,x25
adcs x21,x22,x13
umulh x13,x17,x25
adcs x22,x23,x0
adc x0,xzr,xzr
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_reduction
adc x0,x0,xzr
ldp x10,x11,[x26,#8*4] // t[4..7]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0] // a[4..7]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldr x25,[sp] // t[0]*n0
ldp x14,x15,[x3,#8*0] // n[4..7]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
.align 4
Loop_mul4x_tail:
mul x10,x6,x24 // lo(a[4..7]*b[4])
adc x0,x0,xzr // modulo-scheduled
mul x11,x7,x24
add x28,x28,#8
mul x12,x8,x24
and x28,x28,#31
mul x13,x9,x24
adds x19,x19,x10
umulh x10,x6,x24 // hi(a[4..7]*b[4])
adcs x20,x20,x11
umulh x11,x7,x24
adcs x21,x21,x12
umulh x12,x8,x24
adcs x22,x22,x13
umulh x13,x9,x24
adc x23,xzr,xzr
ldr x24,[x2,x28] // next b[i]
adds x20,x20,x10
mul x10,x14,x25 // lo(n[4..7]*t[0]*n0)
adcs x21,x21,x11
mul x11,x15,x25
adcs x22,x22,x12
mul x12,x16,x25
adc x23,x23,x13 // can't overflow
mul x13,x17,x25
adds x19,x19,x10
umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0)
adcs x20,x20,x11
umulh x11,x15,x25
adcs x21,x21,x12
umulh x12,x16,x25
adcs x22,x22,x13
umulh x13,x17,x25
adcs x23,x23,x0
ldr x25,[sp,x28] // next a[0]*n0
adc x0,xzr,xzr
str x19,[x26],#8 // result!!!
adds x19,x20,x10
sub x10,x27,x1 // done yet?
adcs x20,x21,x11
adcs x21,x22,x12
adcs x22,x23,x13
//adc x0,x0,xzr
cbnz x28,Loop_mul4x_tail
sub x11,x3,x5 // rewinded np?
adc x0,x0,xzr
cbz x10,Loop_mul4x_break
ldp x10,x11,[x26,#8*4]
ldp x12,x13,[x26,#8*6]
ldp x6,x7,[x1,#8*0]
ldp x8,x9,[x1,#8*2]
add x1,x1,#8*4
adds x19,x19,x10
adcs x20,x20,x11
adcs x21,x21,x12
adcs x22,x22,x13
//adc x0,x0,xzr
ldp x14,x15,[x3,#8*0]
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
b Loop_mul4x_tail
.align 4
Loop_mul4x_break:
ldp x12,x13,[x29,#96] // pull rp and &b[num]
adds x19,x19,x30
add x2,x2,#8*4 // bp++
adcs x20,x20,xzr
sub x1,x1,x5 // rewind ap
adcs x21,x21,xzr
stp x19,x20,[x26,#8*0] // result!!!
adcs x22,x22,xzr
ldp x19,x20,[sp,#8*4] // t[0..3]
adc x30,x0,xzr
stp x21,x22,[x26,#8*2] // result!!!
cmp x2,x13 // done yet?
ldp x21,x22,[sp,#8*6]
ldp x14,x15,[x11,#8*0] // n[0..3]
ldp x16,x17,[x11,#8*2]
add x3,x11,#8*4
b.eq Lmul4x_post
ldr x24,[x2]
ldp x6,x7,[x1,#8*0] // a[0..3]
ldp x8,x9,[x1,#8*2]
adds x1,x1,#8*4 // clear carry bit
mov x0,xzr
mov x26,sp
b Loop_mul4x_reduction
.align 4
Lmul4x_post:
// Final step. We see if result is larger than modulus, and
// if it is, subtract the modulus. But comparison implies
// subtraction. So we subtract modulus, see if it borrowed,
// and conditionally copy original value.
mov x0,x12
mov x27,x12 // x0 copy
subs x10,x19,x14
add x26,sp,#8*8
sbcs x11,x20,x15
sub x28,x5,#8*4
Lmul4x_sub:
sbcs x12,x21,x16
ldp x14,x15,[x3,#8*0]
sub x28,x28,#8*4
ldp x19,x20,[x26,#8*0]
sbcs x13,x22,x17
ldp x16,x17,[x3,#8*2]
add x3,x3,#8*4
ldp x21,x22,[x26,#8*2]
add x26,x26,#8*4
stp x10,x11,[x0,#8*0]
sbcs x10,x19,x14
stp x12,x13,[x0,#8*2]
add x0,x0,#8*4
sbcs x11,x20,x15
cbnz x28,Lmul4x_sub
sbcs x12,x21,x16
mov x26,sp
add x1,sp,#8*4
ldp x6,x7,[x27,#8*0]
sbcs x13,x22,x17
stp x10,x11,[x0,#8*0]
ldp x8,x9,[x27,#8*2]
stp x12,x13,[x0,#8*2]
ldp x19,x20,[x1,#8*0]
ldp x21,x22,[x1,#8*2]
sbcs xzr,x30,xzr // did it borrow?
ldr x30,[x29,#8] // pull return address
sub x28,x5,#8*4
Lmul4x_cond_copy:
sub x28,x28,#8*4
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
ldp x6,x7,[x27,#8*4]
ldp x19,x20,[x1,#8*4]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*2]
add x26,x26,#8*4
csel x13,x22,x9,lo
ldp x8,x9,[x27,#8*6]
ldp x21,x22,[x1,#8*6]
add x1,x1,#8*4
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
add x27,x27,#8*4
cbnz x28,Lmul4x_cond_copy
csel x10,x19,x6,lo
stp xzr,xzr,[x26,#8*0]
csel x11,x20,x7,lo
stp xzr,xzr,[x26,#8*2]
csel x12,x21,x8,lo
stp xzr,xzr,[x26,#8*3]
csel x13,x22,x9,lo
stp xzr,xzr,[x26,#8*4]
stp x10,x11,[x27,#8*0]
stp x12,x13,[x27,#8*2]
b Lmul4x_done
.align 4
Lmul4x4_post_condition:
adc x0,x0,xzr
ldr x1,[x29,#96] // pull rp
// x19-3,x0 hold result, x14-7 hold modulus
subs x6,x19,x14
ldr x30,[x29,#8] // pull return address
sbcs x7,x20,x15
stp xzr,xzr,[sp,#8*0]
sbcs x8,x21,x16
stp xzr,xzr,[sp,#8*2]
sbcs x9,x22,x17
stp xzr,xzr,[sp,#8*4]
sbcs xzr,x0,xzr // did it borrow?
stp xzr,xzr,[sp,#8*6]
// x6-3 hold result-modulus
csel x6,x19,x6,lo
csel x7,x20,x7,lo
csel x8,x21,x8,lo
csel x9,x22,x9,lo
stp x6,x7,[x1,#8*0]
stp x8,x9,[x1,#8*2]
Lmul4x_done:
ldp x19,x20,[x29,#16]
mov sp,x29
ldp x21,x22,[x29,#32]
mov x0,#1
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldr x29,[sp],#128
// x30 is popped earlier
AARCH64_VALIDATE_LINK_REGISTER
ret
.byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 4
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 40,231
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/chacha/chacha-armv8.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
.section .rodata
.align 5
Lsigma:
.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral
Lone:
.long 1,0,0,0
.byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.text
.globl ChaCha20_ctr32_nohw
.def ChaCha20_ctr32_nohw
.type 32
.endef
.align 5
ChaCha20_ctr32_nohw:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma
add x5,x5,:lo12:Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ldp x28,x30,[x4] // load counter
#ifdef __AARCH64EB__
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
Loop_outer:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov w7,w23
lsr x8,x23,#32
mov w9,w24
lsr x10,x24,#32
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#64
Loop:
sub x4,x4,#1
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
ror w21,w21,#16
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#20
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
add w5,w5,w9
add w6,w6,w10
add w7,w7,w11
add w8,w8,w12
eor w17,w17,w5
eor w19,w19,w6
eor w20,w20,w7
eor w21,w21,w8
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
ror w21,w21,#24
add w13,w13,w17
add w14,w14,w19
add w15,w15,w20
add w16,w16,w21
eor w9,w9,w13
eor w10,w10,w14
eor w11,w11,w15
eor w12,w12,w16
ror w9,w9,#25
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#16
ror w17,w17,#16
ror w19,w19,#16
ror w20,w20,#16
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#20
ror w11,w11,#20
ror w12,w12,#20
ror w9,w9,#20
add w5,w5,w10
add w6,w6,w11
add w7,w7,w12
add w8,w8,w9
eor w21,w21,w5
eor w17,w17,w6
eor w19,w19,w7
eor w20,w20,w8
ror w21,w21,#24
ror w17,w17,#24
ror w19,w19,#24
ror w20,w20,#24
add w15,w15,w21
add w16,w16,w17
add w13,w13,w19
add w14,w14,w20
eor w10,w10,w15
eor w11,w11,w16
eor w12,w12,w13
eor w9,w9,w14
ror w10,w10,#25
ror w11,w11,#25
ror w12,w12,#25
ror w9,w9,#25
cbnz x4,Loop
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
b.lo Ltail
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.hi Loop_outer
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.align 4
Ltail:
add x2,x2,#64
Less_than_64:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
stp x5,x7,[sp,#0]
stp x9,x11,[sp,#16]
stp x13,x15,[sp,#32]
stp x17,x20,[sp,#48]
Loop_tail:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,Loop_tail
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.globl ChaCha20_ctr32_neon
.def ChaCha20_ctr32_neon
.type 32
.endef
.align 5
ChaCha20_ctr32_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma
add x5,x5,:lo12:Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
cmp x2,#512
b.hs L512_or_more_neon
sub sp,sp,#64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
Loop_outer_neon:
mov w5,w22 // unpack key block
lsr x6,x22,#32
mov v0.16b,v24.16b
mov w7,w23
lsr x8,x23,#32
mov v4.16b,v24.16b
mov w9,w24
lsr x10,x24,#32
mov v16.16b,v24.16b
mov w11,w25
mov v1.16b,v25.16b
lsr x12,x25,#32
mov v5.16b,v25.16b
mov w13,w26
mov v17.16b,v25.16b
lsr x14,x26,#32
mov v3.16b,v27.16b
mov w15,w27
mov v7.16b,v28.16b
lsr x16,x27,#32
mov v19.16b,v29.16b
mov w17,w28
mov v2.16b,v26.16b
lsr x19,x28,#32
mov v6.16b,v26.16b
mov w20,w30
mov v18.16b,v26.16b
lsr x21,x30,#32
mov x4,#10
subs x2,x2,#256
Loop_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v16.4s,v16.4s,v17.4s
add w7,w7,w11
eor v3.16b,v3.16b,v0.16b
add w8,w8,w12
eor v7.16b,v7.16b,v4.16b
eor w17,w17,w5
eor v19.16b,v19.16b,v16.16b
eor w19,w19,w6
rev32 v3.8h,v3.8h
eor w20,w20,w7
rev32 v7.8h,v7.8h
eor w21,w21,w8
rev32 v19.8h,v19.8h
ror w17,w17,#16
add v2.4s,v2.4s,v3.4s
ror w19,w19,#16
add v6.4s,v6.4s,v7.4s
ror w20,w20,#16
add v18.4s,v18.4s,v19.4s
ror w21,w21,#16
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#20
add w16,w16,w21
ushr v5.4s,v21.4s,#20
eor w9,w9,w13
ushr v17.4s,v22.4s,#20
eor w10,w10,w14
sli v1.4s,v20.4s,#12
eor w11,w11,w15
sli v5.4s,v21.4s,#12
eor w12,w12,w16
sli v17.4s,v22.4s,#12
ror w9,w9,#20
add v0.4s,v0.4s,v1.4s
ror w10,w10,#20
add v4.4s,v4.4s,v5.4s
ror w11,w11,#20
add v16.4s,v16.4s,v17.4s
ror w12,w12,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w9
eor v21.16b,v7.16b,v4.16b
add w6,w6,w10
eor v22.16b,v19.16b,v16.16b
add w7,w7,w11
ushr v3.4s,v20.4s,#24
add w8,w8,w12
ushr v7.4s,v21.4s,#24
eor w17,w17,w5
ushr v19.4s,v22.4s,#24
eor w19,w19,w6
sli v3.4s,v20.4s,#8
eor w20,w20,w7
sli v7.4s,v21.4s,#8
eor w21,w21,w8
sli v19.4s,v22.4s,#8
ror w17,w17,#24
add v2.4s,v2.4s,v3.4s
ror w19,w19,#24
add v6.4s,v6.4s,v7.4s
ror w20,w20,#24
add v18.4s,v18.4s,v19.4s
ror w21,w21,#24
eor v20.16b,v1.16b,v2.16b
add w13,w13,w17
eor v21.16b,v5.16b,v6.16b
add w14,w14,w19
eor v22.16b,v17.16b,v18.16b
add w15,w15,w20
ushr v1.4s,v20.4s,#25
add w16,w16,w21
ushr v5.4s,v21.4s,#25
eor w9,w9,w13
ushr v17.4s,v22.4s,#25
eor w10,w10,w14
sli v1.4s,v20.4s,#7
eor w11,w11,w15
sli v5.4s,v21.4s,#7
eor w12,w12,w16
sli v17.4s,v22.4s,#7
ror w9,w9,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w10,w10,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w10
add v4.4s,v4.4s,v5.4s
add w6,w6,w11
add v16.4s,v16.4s,v17.4s
add w7,w7,w12
eor v3.16b,v3.16b,v0.16b
add w8,w8,w9
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w5
eor v19.16b,v19.16b,v16.16b
eor w17,w17,w6
rev32 v3.8h,v3.8h
eor w19,w19,w7
rev32 v7.8h,v7.8h
eor w20,w20,w8
rev32 v19.8h,v19.8h
ror w21,w21,#16
add v2.4s,v2.4s,v3.4s
ror w17,w17,#16
add v6.4s,v6.4s,v7.4s
ror w19,w19,#16
add v18.4s,v18.4s,v19.4s
ror w20,w20,#16
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#20
add w14,w14,w20
ushr v5.4s,v21.4s,#20
eor w10,w10,w15
ushr v17.4s,v22.4s,#20
eor w11,w11,w16
sli v1.4s,v20.4s,#12
eor w12,w12,w13
sli v5.4s,v21.4s,#12
eor w9,w9,w14
sli v17.4s,v22.4s,#12
ror w10,w10,#20
add v0.4s,v0.4s,v1.4s
ror w11,w11,#20
add v4.4s,v4.4s,v5.4s
ror w12,w12,#20
add v16.4s,v16.4s,v17.4s
ror w9,w9,#20
eor v20.16b,v3.16b,v0.16b
add w5,w5,w10
eor v21.16b,v7.16b,v4.16b
add w6,w6,w11
eor v22.16b,v19.16b,v16.16b
add w7,w7,w12
ushr v3.4s,v20.4s,#24
add w8,w8,w9
ushr v7.4s,v21.4s,#24
eor w21,w21,w5
ushr v19.4s,v22.4s,#24
eor w17,w17,w6
sli v3.4s,v20.4s,#8
eor w19,w19,w7
sli v7.4s,v21.4s,#8
eor w20,w20,w8
sli v19.4s,v22.4s,#8
ror w21,w21,#24
add v2.4s,v2.4s,v3.4s
ror w17,w17,#24
add v6.4s,v6.4s,v7.4s
ror w19,w19,#24
add v18.4s,v18.4s,v19.4s
ror w20,w20,#24
eor v20.16b,v1.16b,v2.16b
add w15,w15,w21
eor v21.16b,v5.16b,v6.16b
add w16,w16,w17
eor v22.16b,v17.16b,v18.16b
add w13,w13,w19
ushr v1.4s,v20.4s,#25
add w14,w14,w20
ushr v5.4s,v21.4s,#25
eor w10,w10,w15
ushr v17.4s,v22.4s,#25
eor w11,w11,w16
sli v1.4s,v20.4s,#7
eor w12,w12,w13
sli v5.4s,v21.4s,#7
eor w9,w9,w14
sli v17.4s,v22.4s,#7
ror w10,w10,#25
ext v2.16b,v2.16b,v2.16b,#8
ror w11,w11,#25
ext v6.16b,v6.16b,v6.16b,#8
ror w12,w12,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
cbnz x4,Loop_neon
add w5,w5,w22 // accumulate key block
add v0.4s,v0.4s,v24.4s
add x6,x6,x22,lsr#32
add v4.4s,v4.4s,v24.4s
add w7,w7,w23
add v16.4s,v16.4s,v24.4s
add x8,x8,x23,lsr#32
add v2.4s,v2.4s,v26.4s
add w9,w9,w24
add v6.4s,v6.4s,v26.4s
add x10,x10,x24,lsr#32
add v18.4s,v18.4s,v26.4s
add w11,w11,w25
add v3.4s,v3.4s,v27.4s
add x12,x12,x25,lsr#32
add w13,w13,w26
add v7.4s,v7.4s,v28.4s
add x14,x14,x26,lsr#32
add w15,w15,w27
add v19.4s,v19.4s,v29.4s
add x16,x16,x27,lsr#32
add w17,w17,w28
add v1.4s,v1.4s,v25.4s
add x19,x19,x28,lsr#32
add w20,w20,w30
add v5.4s,v5.4s,v25.4s
add x21,x21,x30,lsr#32
add v17.4s,v17.4s,v25.4s
b.lo Ltail_neon
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v20.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v21.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v22.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v23.16b
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
add v27.4s,v27.4s,v31.4s // += 4
stp x13,x15,[x0,#32]
add v28.4s,v28.4s,v31.4s
stp x17,x20,[x0,#48]
add v29.4s,v29.4s,v31.4s
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
eor v16.16b,v16.16b,v0.16b
eor v17.16b,v17.16b,v1.16b
eor v18.16b,v18.16b,v2.16b
eor v19.16b,v19.16b,v3.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
b.hi Loop_outer_neon
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
Ltail_neon:
add x2,x2,#256
cmp x2,#64
b.lo Less_than_64
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#4 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
b.eq Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo Less_than_128
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v0.16b,v0.16b,v20.16b
eor v1.16b,v1.16b,v21.16b
eor v2.16b,v2.16b,v22.16b
eor v3.16b,v3.16b,v23.16b
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
b.eq Ldone_neon
sub x2,x2,#64
cmp x2,#64
b.lo Less_than_192
ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64
eor v4.16b,v4.16b,v20.16b
eor v5.16b,v5.16b,v21.16b
eor v6.16b,v6.16b,v22.16b
eor v7.16b,v7.16b,v23.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
b.eq Ldone_neon
sub x2,x2,#64
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp]
b Last_neon
Less_than_128:
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp]
b Last_neon
Less_than_192:
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp]
b Last_neon
.align 4
Last_neon:
sub x0,x0,#1
add x1,x1,x2
add x0,x0,x2
add x4,sp,x2
neg x2,x2
Loop_tail_neon:
ldrb w10,[x1,x2]
ldrb w11,[x4,x2]
add x2,x2,#1
eor w10,w10,w11
strb w10,[x0,x2]
cbnz x2,Loop_tail_neon
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
Ldone_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
.def ChaCha20_512_neon
.type 32
.endef
.align 5
ChaCha20_512_neon:
AARCH64_SIGN_LINK_REGISTER
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adrp x5,Lsigma
add x5,x5,:lo12:Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
L512_or_more_neon:
sub sp,sp,#128+64
ldp x22,x23,[x5] // load sigma
ld1 {v24.4s},[x5],#16
ldp x24,x25,[x3] // load key
ldp x26,x27,[x3,#16]
ld1 {v25.4s,v26.4s},[x3]
ldp x28,x30,[x4] // load counter
ld1 {v27.4s},[x4]
ld1 {v31.4s},[x5]
#ifdef __AARCH64EB__
rev64 v24.4s,v24.4s
ror x24,x24,#32
ror x25,x25,#32
ror x26,x26,#32
ror x27,x27,#32
ror x28,x28,#32
ror x30,x30,#32
#endif
add v27.4s,v27.4s,v31.4s // += 1
stp q24,q25,[sp,#0] // off-load key block, invariant part
add v27.4s,v27.4s,v31.4s // not typo
str q26,[sp,#32]
add v28.4s,v27.4s,v31.4s
add v29.4s,v28.4s,v31.4s
add v30.4s,v29.4s,v31.4s
shl v31.4s,v31.4s,#2 // 1 -> 4
stp d8,d9,[sp,#128+0] // meet ABI requirements
stp d10,d11,[sp,#128+16]
stp d12,d13,[sp,#128+32]
stp d14,d15,[sp,#128+48]
sub x2,x2,#512 // not typo
Loop_outer_512_neon:
mov v0.16b,v24.16b
mov v4.16b,v24.16b
mov v8.16b,v24.16b
mov v12.16b,v24.16b
mov v16.16b,v24.16b
mov v20.16b,v24.16b
mov v1.16b,v25.16b
mov w5,w22 // unpack key block
mov v5.16b,v25.16b
lsr x6,x22,#32
mov v9.16b,v25.16b
mov w7,w23
mov v13.16b,v25.16b
lsr x8,x23,#32
mov v17.16b,v25.16b
mov w9,w24
mov v21.16b,v25.16b
lsr x10,x24,#32
mov v3.16b,v27.16b
mov w11,w25
mov v7.16b,v28.16b
lsr x12,x25,#32
mov v11.16b,v29.16b
mov w13,w26
mov v15.16b,v30.16b
lsr x14,x26,#32
mov v2.16b,v26.16b
mov w15,w27
mov v6.16b,v26.16b
lsr x16,x27,#32
add v19.4s,v3.4s,v31.4s // +4
mov w17,w28
add v23.4s,v7.4s,v31.4s // +4
lsr x19,x28,#32
mov v10.16b,v26.16b
mov w20,w30
mov v14.16b,v26.16b
lsr x21,x30,#32
mov v18.16b,v26.16b
stp q27,q28,[sp,#48] // off-load key block, variable part
mov v22.16b,v26.16b
str q29,[sp,#80]
mov x4,#5
subs x2,x2,#512
Loop_upper_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,Loop_upper_neon
add w5,w5,w22 // accumulate key block
add x6,x6,x22,lsr#32
add w7,w7,w23
add x8,x8,x23,lsr#32
add w9,w9,w24
add x10,x10,x24,lsr#32
add w11,w11,w25
add x12,x12,x25,lsr#32
add w13,w13,w26
add x14,x14,x26,lsr#32
add w15,w15,w27
add x16,x16,x27,lsr#32
add w17,w17,w28
add x19,x19,x28,lsr#32
add w20,w20,w30
add x21,x21,x30,lsr#32
add x5,x5,x6,lsl#32 // pack
add x7,x7,x8,lsl#32
ldp x6,x8,[x1,#0] // load input
add x9,x9,x10,lsl#32
add x11,x11,x12,lsl#32
ldp x10,x12,[x1,#16]
add x13,x13,x14,lsl#32
add x15,x15,x16,lsl#32
ldp x14,x16,[x1,#32]
add x17,x17,x19,lsl#32
add x20,x20,x21,lsl#32
ldp x19,x21,[x1,#48]
add x1,x1,#64
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor x15,x15,x16
eor x17,x17,x19
eor x20,x20,x21
stp x5,x7,[x0,#0] // store output
add x28,x28,#1 // increment counter
mov w5,w22 // unpack key block
lsr x6,x22,#32
stp x9,x11,[x0,#16]
mov w7,w23
lsr x8,x23,#32
stp x13,x15,[x0,#32]
mov w9,w24
lsr x10,x24,#32
stp x17,x20,[x0,#48]
add x0,x0,#64
mov w11,w25
lsr x12,x25,#32
mov w13,w26
lsr x14,x26,#32
mov w15,w27
lsr x16,x27,#32
mov w17,w28
lsr x19,x28,#32
mov w20,w30
lsr x21,x30,#32
mov x4,#5
Loop_lower_neon:
sub x4,x4,#1
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#12
ext v7.16b,v7.16b,v7.16b,#12
ext v11.16b,v11.16b,v11.16b,#12
ext v15.16b,v15.16b,v15.16b,#12
ext v19.16b,v19.16b,v19.16b,#12
ext v23.16b,v23.16b,v23.16b,#12
ext v1.16b,v1.16b,v1.16b,#4
ext v5.16b,v5.16b,v5.16b,#4
ext v9.16b,v9.16b,v9.16b,#4
ext v13.16b,v13.16b,v13.16b,#4
ext v17.16b,v17.16b,v17.16b,#4
ext v21.16b,v21.16b,v21.16b,#4
add v0.4s,v0.4s,v1.4s
add w5,w5,w9
add v4.4s,v4.4s,v5.4s
add w6,w6,w10
add v8.4s,v8.4s,v9.4s
add w7,w7,w11
add v12.4s,v12.4s,v13.4s
add w8,w8,w12
add v16.4s,v16.4s,v17.4s
eor w17,w17,w5
add v20.4s,v20.4s,v21.4s
eor w19,w19,w6
eor v3.16b,v3.16b,v0.16b
eor w20,w20,w7
eor v7.16b,v7.16b,v4.16b
eor w21,w21,w8
eor v11.16b,v11.16b,v8.16b
ror w17,w17,#16
eor v15.16b,v15.16b,v12.16b
ror w19,w19,#16
eor v19.16b,v19.16b,v16.16b
ror w20,w20,#16
eor v23.16b,v23.16b,v20.16b
ror w21,w21,#16
rev32 v3.8h,v3.8h
add w13,w13,w17
rev32 v7.8h,v7.8h
add w14,w14,w19
rev32 v11.8h,v11.8h
add w15,w15,w20
rev32 v15.8h,v15.8h
add w16,w16,w21
rev32 v19.8h,v19.8h
eor w9,w9,w13
rev32 v23.8h,v23.8h
eor w10,w10,w14
add v2.4s,v2.4s,v3.4s
eor w11,w11,w15
add v6.4s,v6.4s,v7.4s
eor w12,w12,w16
add v10.4s,v10.4s,v11.4s
ror w9,w9,#20
add v14.4s,v14.4s,v15.4s
ror w10,w10,#20
add v18.4s,v18.4s,v19.4s
ror w11,w11,#20
add v22.4s,v22.4s,v23.4s
ror w12,w12,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w9
eor v25.16b,v5.16b,v6.16b
add w6,w6,w10
eor v26.16b,v9.16b,v10.16b
add w7,w7,w11
eor v27.16b,v13.16b,v14.16b
add w8,w8,w12
eor v28.16b,v17.16b,v18.16b
eor w17,w17,w5
eor v29.16b,v21.16b,v22.16b
eor w19,w19,w6
ushr v1.4s,v24.4s,#20
eor w20,w20,w7
ushr v5.4s,v25.4s,#20
eor w21,w21,w8
ushr v9.4s,v26.4s,#20
ror w17,w17,#24
ushr v13.4s,v27.4s,#20
ror w19,w19,#24
ushr v17.4s,v28.4s,#20
ror w20,w20,#24
ushr v21.4s,v29.4s,#20
ror w21,w21,#24
sli v1.4s,v24.4s,#12
add w13,w13,w17
sli v5.4s,v25.4s,#12
add w14,w14,w19
sli v9.4s,v26.4s,#12
add w15,w15,w20
sli v13.4s,v27.4s,#12
add w16,w16,w21
sli v17.4s,v28.4s,#12
eor w9,w9,w13
sli v21.4s,v29.4s,#12
eor w10,w10,w14
add v0.4s,v0.4s,v1.4s
eor w11,w11,w15
add v4.4s,v4.4s,v5.4s
eor w12,w12,w16
add v8.4s,v8.4s,v9.4s
ror w9,w9,#25
add v12.4s,v12.4s,v13.4s
ror w10,w10,#25
add v16.4s,v16.4s,v17.4s
ror w11,w11,#25
add v20.4s,v20.4s,v21.4s
ror w12,w12,#25
eor v24.16b,v3.16b,v0.16b
add w5,w5,w10
eor v25.16b,v7.16b,v4.16b
add w6,w6,w11
eor v26.16b,v11.16b,v8.16b
add w7,w7,w12
eor v27.16b,v15.16b,v12.16b
add w8,w8,w9
eor v28.16b,v19.16b,v16.16b
eor w21,w21,w5
eor v29.16b,v23.16b,v20.16b
eor w17,w17,w6
ushr v3.4s,v24.4s,#24
eor w19,w19,w7
ushr v7.4s,v25.4s,#24
eor w20,w20,w8
ushr v11.4s,v26.4s,#24
ror w21,w21,#16
ushr v15.4s,v27.4s,#24
ror w17,w17,#16
ushr v19.4s,v28.4s,#24
ror w19,w19,#16
ushr v23.4s,v29.4s,#24
ror w20,w20,#16
sli v3.4s,v24.4s,#8
add w15,w15,w21
sli v7.4s,v25.4s,#8
add w16,w16,w17
sli v11.4s,v26.4s,#8
add w13,w13,w19
sli v15.4s,v27.4s,#8
add w14,w14,w20
sli v19.4s,v28.4s,#8
eor w10,w10,w15
sli v23.4s,v29.4s,#8
eor w11,w11,w16
add v2.4s,v2.4s,v3.4s
eor w12,w12,w13
add v6.4s,v6.4s,v7.4s
eor w9,w9,w14
add v10.4s,v10.4s,v11.4s
ror w10,w10,#20
add v14.4s,v14.4s,v15.4s
ror w11,w11,#20
add v18.4s,v18.4s,v19.4s
ror w12,w12,#20
add v22.4s,v22.4s,v23.4s
ror w9,w9,#20
eor v24.16b,v1.16b,v2.16b
add w5,w5,w10
eor v25.16b,v5.16b,v6.16b
add w6,w6,w11
eor v26.16b,v9.16b,v10.16b
add w7,w7,w12
eor v27.16b,v13.16b,v14.16b
add w8,w8,w9
eor v28.16b,v17.16b,v18.16b
eor w21,w21,w5
eor v29.16b,v21.16b,v22.16b
eor w17,w17,w6
ushr v1.4s,v24.4s,#25
eor w19,w19,w7
ushr v5.4s,v25.4s,#25
eor w20,w20,w8
ushr v9.4s,v26.4s,#25
ror w21,w21,#24
ushr v13.4s,v27.4s,#25
ror w17,w17,#24
ushr v17.4s,v28.4s,#25
ror w19,w19,#24
ushr v21.4s,v29.4s,#25
ror w20,w20,#24
sli v1.4s,v24.4s,#7
add w15,w15,w21
sli v5.4s,v25.4s,#7
add w16,w16,w17
sli v9.4s,v26.4s,#7
add w13,w13,w19
sli v13.4s,v27.4s,#7
add w14,w14,w20
sli v17.4s,v28.4s,#7
eor w10,w10,w15
sli v21.4s,v29.4s,#7
eor w11,w11,w16
ext v2.16b,v2.16b,v2.16b,#8
eor w12,w12,w13
ext v6.16b,v6.16b,v6.16b,#8
eor w9,w9,w14
ext v10.16b,v10.16b,v10.16b,#8
ror w10,w10,#25
ext v14.16b,v14.16b,v14.16b,#8
ror w11,w11,#25
ext v18.16b,v18.16b,v18.16b,#8
ror w12,w12,#25
ext v22.16b,v22.16b,v22.16b,#8
ror w9,w9,#25
ext v3.16b,v3.16b,v3.16b,#4
ext v7.16b,v7.16b,v7.16b,#4
ext v11.16b,v11.16b,v11.16b,#4
ext v15.16b,v15.16b,v15.16b,#4
ext v19.16b,v19.16b,v19.16b,#4
ext v23.16b,v23.16b,v23.16b,#4
ext v1.16b,v1.16b,v1.16b,#12
ext v5.16b,v5.16b,v5.16b,#12
ext v9.16b,v9.16b,v9.16b,#12
ext v13.16b,v13.16b,v13.16b,#12
ext v17.16b,v17.16b,v17.16b,#12
ext v21.16b,v21.16b,v21.16b,#12
cbnz x4,Loop_lower_neon
add w5,w5,w22 // accumulate key block
ldp q24,q25,[sp,#0]
add x6,x6,x22,lsr#32
ldp q26,q27,[sp,#32]
add w7,w7,w23
ldp q28,q29,[sp,#64]
add x8,x8,x23,lsr#32
add v0.4s,v0.4s,v24.4s
add w9,w9,w24
add v4.4s,v4.4s,v24.4s
add x10,x10,x24,lsr#32
add v8.4s,v8.4s,v24.4s
add w11,w11,w25
add v12.4s,v12.4s,v24.4s
add x12,x12,x25,lsr#32
add v16.4s,v16.4s,v24.4s
add w13,w13,w26
add v20.4s,v20.4s,v24.4s
add x14,x14,x26,lsr#32
add v2.4s,v2.4s,v26.4s
add w15,w15,w27
add v6.4s,v6.4s,v26.4s
add x16,x16,x27,lsr#32
add v10.4s,v10.4s,v26.4s
add w17,w17,w28
add v14.4s,v14.4s,v26.4s
add x19,x19,x28,lsr#32
add v18.4s,v18.4s,v26.4s
add w20,w20,w30
add v22.4s,v22.4s,v26.4s
add x21,x21,x30,lsr#32
add v19.4s,v19.4s,v31.4s // +4
add x5,x5,x6,lsl#32 // pack
add v23.4s,v23.4s,v31.4s // +4
add x7,x7,x8,lsl#32
add v3.4s,v3.4s,v27.4s
ldp x6,x8,[x1,#0] // load input
add v7.4s,v7.4s,v28.4s
add x9,x9,x10,lsl#32
add v11.4s,v11.4s,v29.4s
add x11,x11,x12,lsl#32
add v15.4s,v15.4s,v30.4s
ldp x10,x12,[x1,#16]
add v19.4s,v19.4s,v27.4s
add x13,x13,x14,lsl#32
add v23.4s,v23.4s,v28.4s
add x15,x15,x16,lsl#32
add v1.4s,v1.4s,v25.4s
ldp x14,x16,[x1,#32]
add v5.4s,v5.4s,v25.4s
add x17,x17,x19,lsl#32
add v9.4s,v9.4s,v25.4s
add x20,x20,x21,lsl#32
add v13.4s,v13.4s,v25.4s
ldp x19,x21,[x1,#48]
add v17.4s,v17.4s,v25.4s
add x1,x1,#64
add v21.4s,v21.4s,v25.4s
#ifdef __AARCH64EB__
rev x5,x5
rev x7,x7
rev x9,x9
rev x11,x11
rev x13,x13
rev x15,x15
rev x17,x17
rev x20,x20
#endif
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
eor x5,x5,x6
eor x7,x7,x8
eor x9,x9,x10
eor x11,x11,x12
eor x13,x13,x14
eor v0.16b,v0.16b,v24.16b
eor x15,x15,x16
eor v1.16b,v1.16b,v25.16b
eor x17,x17,x19
eor v2.16b,v2.16b,v26.16b
eor x20,x20,x21
eor v3.16b,v3.16b,v27.16b
ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64
stp x5,x7,[x0,#0] // store output
add x28,x28,#7 // increment counter
stp x9,x11,[x0,#16]
stp x13,x15,[x0,#32]
stp x17,x20,[x0,#48]
add x0,x0,#64
st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64
ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64
eor v4.16b,v4.16b,v24.16b
eor v5.16b,v5.16b,v25.16b
eor v6.16b,v6.16b,v26.16b
eor v7.16b,v7.16b,v27.16b
st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64
ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64
eor v8.16b,v8.16b,v0.16b
ldp q24,q25,[sp,#0]
eor v9.16b,v9.16b,v1.16b
ldp q26,q27,[sp,#32]
eor v10.16b,v10.16b,v2.16b
eor v11.16b,v11.16b,v3.16b
st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64
ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64
eor v12.16b,v12.16b,v4.16b
eor v13.16b,v13.16b,v5.16b
eor v14.16b,v14.16b,v6.16b
eor v15.16b,v15.16b,v7.16b
st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64
ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64
eor v16.16b,v16.16b,v8.16b
eor v17.16b,v17.16b,v9.16b
eor v18.16b,v18.16b,v10.16b
eor v19.16b,v19.16b,v11.16b
st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64
shl v0.4s,v31.4s,#1 // 4 -> 8
eor v20.16b,v20.16b,v12.16b
eor v21.16b,v21.16b,v13.16b
eor v22.16b,v22.16b,v14.16b
eor v23.16b,v23.16b,v15.16b
st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64
add v27.4s,v27.4s,v0.4s // += 8
add v28.4s,v28.4s,v0.4s
add v29.4s,v29.4s,v0.4s
add v30.4s,v30.4s,v0.4s
b.hs Loop_outer_512_neon
adds x2,x2,#512
ushr v0.4s,v31.4s,#2 // 4 -> 1
ldp d8,d9,[sp,#128+0] // meet ABI requirements
ldp d10,d11,[sp,#128+16]
ldp d12,d13,[sp,#128+32]
ldp d14,d15,[sp,#128+48]
stp q24,q31,[sp,#0] // wipe off-load area
stp q24,q31,[sp,#32]
stp q24,q31,[sp,#64]
b.eq Ldone_512_neon
cmp x2,#192
sub v27.4s,v27.4s,v0.4s // -= 1
sub v28.4s,v28.4s,v0.4s
sub v29.4s,v29.4s,v0.4s
add sp,sp,#128
b.hs Loop_outer_neon
eor v25.16b,v25.16b,v25.16b
eor v26.16b,v26.16b,v26.16b
eor v27.16b,v27.16b,v27.16b
eor v28.16b,v28.16b,v28.16b
eor v29.16b,v29.16b,v29.16b
eor v30.16b,v30.16b,v30.16b
b Loop_outer
Ldone_512_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#128+64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
AARCH64_VALIDATE_LINK_REGISTER
ret
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 74,015
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/win-aarch64/crypto/cipher_extra/chacha20_poly1305_armv8.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
#include <openssl/arm_arch.h>
.section .rodata
.align 7
Lchacha20_consts:
.byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k'
Linc:
.long 1,2,3,4
Lrol8:
.byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14
Lclamp:
.quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC
.text
.def Lpoly_hash_ad_internal
.type 32
.endef
.align 6
Lpoly_hash_ad_internal:
.cfi_startproc
cbnz x4, Lpoly_hash_intro
ret
Lpoly_hash_intro:
cmp x4, #16
b.lt Lpoly_hash_ad_tail
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b Lpoly_hash_ad_internal
Lpoly_hash_ad_tail:
cbz x4, Lpoly_hash_ad_ret
eor v20.16b, v20.16b, v20.16b // Use T0 to load the AAD
sub x4, x4, #1
Lpoly_hash_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, x4]
mov v20.b[0], w11
subs x4, x4, #1
b.ge Lpoly_hash_tail_16_compose
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lpoly_hash_ad_ret:
ret
.cfi_endproc
/////////////////////////////////
//
// void chacha20_poly1305_seal(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *seal_data);
//
.globl chacha20_poly1305_seal
.def chacha20_poly1305_seal
.type 32
.endef
.align 6
chacha20_poly1305_seal:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
ldr x12, [x5, #56] // The total cipher text length includes extra_in_len
add x12, x12, x2
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x12
cmp x2, #128
b.le Lseal_128 // Optimization for smaller buffers
// Initially we prepare 5 ChaCha20 blocks. Four to encrypt up to 4 blocks (256 bytes) of plaintext,
// and one for the Poly1305 R and S keys. The first four blocks (A0-A3..D0-D3) are computed vertically,
// the fifth block (A4-D4) horizontally.
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
sub x5, x5, #32
mov x6, #10
.align 5
Lseal_init_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.hi Lseal_init_rounds
add v15.4s, v15.4s, v25.4s
mov x11, #4
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
and v4.16b, v4.16b, v27.16b
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
mov x16, v4.d[0] // Move the R key to GPRs
mov x17, v4.d[1]
mov v27.16b, v9.16b // Store the S key
bl Lpoly_hash_ad_internal
mov x3, x0
cmp x2, #256
b.le Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #256
mov x6, #4 // In the first run of the loop we need to hash 256 bytes, therefore we hash one block for the first 4 rounds
mov x7, #6 // and two blocks for the remaining 6, for a total of (1 * 4 + 2 * 6) * 16 = 256
Lseal_main_loop:
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
sub x5, x5, #32
.align 5
Lseal_main_loop_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x6, x6, #1
b.ge Lseal_main_loop_rounds
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
subs x7, x7, #1
b.gt Lseal_main_loop_rounds
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
cmp x2, #320
b.le Lseal_tail
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #320
mov x6, #0
mov x7, #10 // For the remainder of the loop we always hash and encrypt 320 bytes per iteration
b Lseal_main_loop
Lseal_tail:
// This part of the function handles the storage and authentication of the last [0,320) bytes
// We assume A0-A4 ... D0-D4 hold at least inl (320 max) bytes of the stream data.
cmp x2, #64
b.lt Lseal_tail_64
// Store and authenticate 64B blocks per iteration
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
// Shift the state left by 64 bytes for the next iteration of the loop
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
mov v1.16b, v2.16b
mov v6.16b, v7.16b
mov v11.16b, v12.16b
mov v16.16b, v17.16b
mov v2.16b, v3.16b
mov v7.16b, v8.16b
mov v12.16b, v13.16b
mov v17.16b, v18.16b
mov v3.16b, v4.16b
mov v8.16b, v9.16b
mov v13.16b, v14.16b
mov v18.16b, v19.16b
b Lseal_tail
Lseal_tail_64:
ldp x3, x4, [x5, #48] // extra_in_len and extra_in_ptr
// Here we handle the last [0,64) bytes of plaintext
cmp x2, #16
b.lt Lseal_tail_16
// Each iteration encrypt and authenticate a 16B block
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
st1 {v20.16b}, [x0], #16
sub x2, x2, #16
// Shift the state left by 16 bytes for the next iteration of the loop
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
b Lseal_tail_64
Lseal_tail_16:
// Here we handle the last [0,16) bytes of ciphertext that require a padded block
cbz x2, Lseal_hash_extra
eor v20.16b, v20.16b, v20.16b // Use T0 to load the plaintext/extra in
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask that will only mask the ciphertext bytes
not v22.16b, v20.16b
mov x6, x2
add x1, x1, x2
cbz x4, Lseal_tail_16_compose // No extra data to pad with, zero padding
mov x7, #16 // We need to load some extra_in first for padding
sub x7, x7, x2
cmp x4, x7
csel x7, x4, x7, lt // Load the minimum of extra_in_len and the amount needed to fill the register
mov x12, x7
add x3, x3, x7
sub x4, x4, x7
Lseal_tail16_compose_extra_in:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x7, x7, #1
b.gt Lseal_tail16_compose_extra_in
add x3, x3, x12
Lseal_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x1, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt Lseal_tail_16_compose
and v0.16b, v0.16b, v21.16b
eor v20.16b, v20.16b, v0.16b
mov v21.16b, v20.16b
Lseal_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt Lseal_tail_16_store
// Hash in the final ct block concatenated with extra_in
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lseal_hash_extra:
cbz x4, Lseal_finalize
Lseal_hash_extra_loop:
cmp x4, #16
b.lt Lseal_hash_extra_tail
ld1 {v20.16b}, [x3], #16
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #16
b Lseal_hash_extra_loop
Lseal_hash_extra_tail:
cbz x4, Lseal_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the remaining extra ciphertext
add x3, x3, x4
Lseal_hash_extra_load:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x3, #-1]!
mov v20.b[0], w11
subs x4, x4, #1
b.gt Lseal_hash_extra_load
// Hash in the final padded extra_in blcok
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lseal_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
Lseal_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
Lseal_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi Lseal_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
// Only the first 32 bytes of the third block (counter = 0) are needed,
// so skip updating v12 and v17.
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl Lpoly_hash_ad_internal
b Lseal_tail
.cfi_endproc
/////////////////////////////////
//
// void chacha20_poly1305_open(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *aead_data);
//
.globl chacha20_poly1305_open
.def chacha20_poly1305_open
.type 32
.endef
.align 6
chacha20_poly1305_open:
AARCH64_SIGN_LINK_REGISTER
.cfi_startproc
stp x29, x30, [sp, #-80]!
.cfi_def_cfa_offset 80
.cfi_offset w30, -72
.cfi_offset w29, -80
mov x29, sp
// We probably could do .cfi_def_cfa w29, 80 at this point, but since
// we don't actually use the frame pointer like that, it's probably not
// worth bothering.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
.cfi_offset b15, -8
.cfi_offset b14, -16
.cfi_offset b13, -24
.cfi_offset b12, -32
.cfi_offset b11, -40
.cfi_offset b10, -48
.cfi_offset b9, -56
.cfi_offset b8, -64
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values
ld1 {v28.16b - v30.16b}, [x5]
mov x15, #1 // Prepare the Poly1305 state
mov x8, #0
mov x9, #0
mov x10, #0
mov v31.d[0], x4 // Store the input and aad lengths
mov v31.d[1], x2
cmp x2, #128
b.le Lopen_128 // Optimization for smaller buffers
// Initially we prepare a single ChaCha20 block for the Poly1305 R and S keys
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
mov x6, #10
.align 5
Lopen_init_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.hi Lopen_init_rounds
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
and v0.16b, v0.16b, v27.16b
mov x16, v0.d[0] // Move the R key to GPRs
mov x17, v0.d[1]
mov v27.16b, v5.16b // Store the S key
bl Lpoly_hash_ad_internal
Lopen_ad_done:
mov x3, x1
// Each iteration of the loop hash 320 bytes, and prepare stream for 320 bytes
Lopen_main_loop:
cmp x2, #192
b.lt Lopen_tail
adrp x11, Lchacha20_consts
add x11, x11, :lo12:Lchacha20_consts
ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11]
mov v4.16b, v24.16b
ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16
mov v9.16b, v28.16b
ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16
mov v14.16b, v29.16b
ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5]
sub x5, x5, #32
add v15.4s, v15.4s, v25.4s
mov v19.16b, v30.16b
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
lsr x4, x2, #4 // How many whole blocks we have to hash, will always be at least 12
sub x4, x4, #10
mov x7, #10
subs x6, x7, x4
subs x6, x7, x4 // itr1 can be negative if we have more than 320 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are full
cbz x7, Lopen_main_loop_rounds_short
.align 5
Lopen_main_loop_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lopen_main_loop_rounds_short:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v9.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v18.8h, v18.8h
rev32 v19.8h, v19.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
eor v8.16b, v8.16b, v13.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v9.4s, #20
sli v8.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
add v3.4s, v3.4s, v7.4s
add v4.4s, v4.4s, v8.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
eor v18.16b, v18.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
add v13.4s, v13.4s, v18.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v14.16b
ushr v9.4s, v8.4s, #25
sli v9.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #4
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #12
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
add v0.4s, v0.4s, v6.4s
add v1.4s, v1.4s, v7.4s
add v2.4s, v2.4s, v8.4s
add v3.4s, v3.4s, v5.4s
add v4.4s, v4.4s, v9.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
rev32 v18.8h, v18.8h
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
rev32 v19.8h, v19.8h
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v6.16b, v6.16b, v12.16b
eor v7.16b, v7.16b, v13.16b
eor v8.16b, v8.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v9.16b, v9.16b, v14.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
ushr v7.4s, v8.4s, #20
sli v7.4s, v8.4s, #12
ushr v8.4s, v5.4s, #20
sli v8.4s, v5.4s, #12
ushr v5.4s, v9.4s, #20
sli v5.4s, v9.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
add v3.4s, v3.4s, v8.4s
add v4.4s, v4.4s, v5.4s
eor v18.16b, v18.16b, v0.16b
eor v15.16b, v15.16b, v1.16b
eor v16.16b, v16.16b, v2.16b
eor v17.16b, v17.16b, v3.16b
eor v19.16b, v19.16b, v4.16b
tbl v18.16b, {v18.16b}, v26.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
tbl v19.16b, {v19.16b}, v26.16b
add v12.4s, v12.4s, v18.4s
add v13.4s, v13.4s, v15.4s
add v10.4s, v10.4s, v16.4s
add v11.4s, v11.4s, v17.4s
add v14.4s, v14.4s, v19.4s
eor v20.16b, v20.16b, v12.16b
eor v6.16b, v6.16b, v13.16b
eor v7.16b, v7.16b, v10.16b
eor v8.16b, v8.16b, v11.16b
eor v5.16b, v5.16b, v14.16b
ushr v9.4s, v5.4s, #25
sli v9.4s, v5.4s, #7
ushr v5.4s, v8.4s, #25
sli v5.4s, v8.4s, #7
ushr v8.4s, v7.4s, #25
sli v8.4s, v7.4s, #7
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v9.16b, v9.16b, v9.16b, #12
ext v14.16b, v14.16b, v14.16b, #8
ext v19.16b, v19.16b, v19.16b, #4
subs x7, x7, #1
b.gt Lopen_main_loop_rounds
subs x6, x6, #1
b.ge Lopen_main_loop_rounds_short
eor v20.16b, v20.16b, v20.16b //zero
not v21.16b, v20.16b // -1
sub v21.4s, v25.4s, v21.4s // Add +1
ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter)
add v19.4s, v19.4s, v20.4s
add v15.4s, v15.4s, v25.4s
mov x11, #5
dup v20.4s, w11
add v25.4s, v25.4s, v20.4s
zip1 v20.4s, v0.4s, v1.4s
zip2 v21.4s, v0.4s, v1.4s
zip1 v22.4s, v2.4s, v3.4s
zip2 v23.4s, v2.4s, v3.4s
zip1 v0.2d, v20.2d, v22.2d
zip2 v1.2d, v20.2d, v22.2d
zip1 v2.2d, v21.2d, v23.2d
zip2 v3.2d, v21.2d, v23.2d
zip1 v20.4s, v5.4s, v6.4s
zip2 v21.4s, v5.4s, v6.4s
zip1 v22.4s, v7.4s, v8.4s
zip2 v23.4s, v7.4s, v8.4s
zip1 v5.2d, v20.2d, v22.2d
zip2 v6.2d, v20.2d, v22.2d
zip1 v7.2d, v21.2d, v23.2d
zip2 v8.2d, v21.2d, v23.2d
zip1 v20.4s, v10.4s, v11.4s
zip2 v21.4s, v10.4s, v11.4s
zip1 v22.4s, v12.4s, v13.4s
zip2 v23.4s, v12.4s, v13.4s
zip1 v10.2d, v20.2d, v22.2d
zip2 v11.2d, v20.2d, v22.2d
zip1 v12.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
zip1 v20.4s, v15.4s, v16.4s
zip2 v21.4s, v15.4s, v16.4s
zip1 v22.4s, v17.4s, v18.4s
zip2 v23.4s, v17.4s, v18.4s
zip1 v15.2d, v20.2d, v22.2d
zip2 v16.2d, v20.2d, v22.2d
zip1 v17.2d, v21.2d, v23.2d
zip2 v18.2d, v21.2d, v23.2d
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v1.4s, v1.4s, v24.4s
add v6.4s, v6.4s, v28.4s
add v11.4s, v11.4s, v29.4s
add v16.4s, v16.4s, v30.4s
add v2.4s, v2.4s, v24.4s
add v7.4s, v7.4s, v28.4s
add v12.4s, v12.4s, v29.4s
add v17.4s, v17.4s, v30.4s
add v3.4s, v3.4s, v24.4s
add v8.4s, v8.4s, v28.4s
add v13.4s, v13.4s, v29.4s
add v18.4s, v18.4s, v30.4s
add v4.4s, v4.4s, v24.4s
add v9.4s, v9.4s, v28.4s
add v14.4s, v14.4s, v29.4s
add v19.4s, v19.4s, v30.4s
// We can always safely store 192 bytes
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #192
mov v0.16b, v3.16b
mov v5.16b, v8.16b
mov v10.16b, v13.16b
mov v15.16b, v18.16b
cmp x2, #64
b.lt Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v3.16b
eor v21.16b, v21.16b, v8.16b
eor v22.16b, v22.16b, v13.16b
eor v23.16b, v23.16b, v18.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v4.16b
mov v5.16b, v9.16b
mov v10.16b, v14.16b
mov v15.16b, v19.16b
cmp x2, #64
b.lt Lopen_tail_64_store
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v9.16b
eor v22.16b, v22.16b, v14.16b
eor v23.16b, v23.16b, v19.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b Lopen_main_loop
Lopen_tail:
cbz x2, Lopen_finalize
lsr x4, x2, #4 // How many whole blocks we have to hash
cmp x2, #64
b.le Lopen_tail_64
cmp x2, #128
b.le Lopen_tail_128
Lopen_tail_192:
// We need three more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
mov v17.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v21.16b, v21.16b, v21.16b
ins v23.s[0], v25.s[0]
ins v21.d[0], x15
add v22.4s, v23.4s, v21.4s
add v21.4s, v22.4s, v21.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
mov x7, #10
subs x6, x7, x4 // itr1 can be negative if we have more than 160 bytes to hash
csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are hashing
sub x4, x4, x7
cbz x7, Lopen_tail_192_rounds_no_hash
Lopen_tail_192_rounds:
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
Lopen_tail_192_rounds_no_hash:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x7, x7, #1
b.gt Lopen_tail_192_rounds
subs x6, x6, #1
b.ge Lopen_tail_192_rounds_no_hash
// We hashed 160 bytes at most, may still have 32 bytes left
Lopen_tail_192_hash:
cbz x4, Lopen_tail_192_hash_done
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b Lopen_tail_192_hash
Lopen_tail_192_hash_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v12.4s, v12.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v17.4s, v17.4s, v30.4s
add v15.4s, v15.4s, v21.4s
add v16.4s, v16.4s, v23.4s
add v17.4s, v17.4s, v22.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v2.16b
eor v21.16b, v21.16b, v7.16b
eor v22.16b, v22.16b, v12.16b
eor v23.16b, v23.16b, v17.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #128
b Lopen_tail_64_store
Lopen_tail_128:
// We need two more blocks
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v15.16b, v30.16b
mov v16.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
eor v22.16b, v22.16b, v22.16b
ins v23.s[0], v25.s[0]
ins v22.d[0], x15
add v22.4s, v22.4s, v23.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
mov x6, #10
sub x6, x6, x4
Lopen_tail_128_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #4
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
add v1.4s, v1.4s, v6.4s
eor v16.16b, v16.16b, v1.16b
rev32 v16.8h, v16.8h
add v11.4s, v11.4s, v16.4s
eor v6.16b, v6.16b, v11.16b
ushr v20.4s, v6.4s, #20
sli v20.4s, v6.4s, #12
add v1.4s, v1.4s, v20.4s
eor v16.16b, v16.16b, v1.16b
tbl v16.16b, {v16.16b}, v26.16b
add v11.4s, v11.4s, v16.4s
eor v20.16b, v20.16b, v11.16b
ushr v6.4s, v20.4s, #25
sli v6.4s, v20.4s, #7
ext v6.16b, v6.16b, v6.16b, #12
ext v11.16b, v11.16b, v11.16b, #8
ext v16.16b, v16.16b, v16.16b, #4
subs x6, x6, #1
b.gt Lopen_tail_128_rounds
cbz x4, Lopen_tail_128_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b Lopen_tail_128_rounds
Lopen_tail_128_rounds_done:
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v16.4s, v16.4s, v30.4s
add v15.4s, v15.4s, v22.4s
add v16.4s, v16.4s, v23.4s
ld1 {v20.16b - v23.16b}, [x1], #64
eor v20.16b, v20.16b, v1.16b
eor v21.16b, v21.16b, v6.16b
eor v22.16b, v22.16b, v11.16b
eor v23.16b, v23.16b, v16.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
b Lopen_tail_64_store
Lopen_tail_64:
// We just need a single block
mov v0.16b, v24.16b
mov v5.16b, v28.16b
mov v10.16b, v29.16b
mov v15.16b, v30.16b
eor v23.16b, v23.16b, v23.16b
ins v23.s[0], v25.s[0]
add v15.4s, v15.4s, v23.4s
mov x6, #10
sub x6, x6, x4
Lopen_tail_64_rounds:
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
add v0.4s, v0.4s, v5.4s
eor v15.16b, v15.16b, v0.16b
rev32 v15.8h, v15.8h
add v10.4s, v10.4s, v15.4s
eor v5.16b, v5.16b, v10.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
add v0.4s, v0.4s, v20.4s
eor v15.16b, v15.16b, v0.16b
tbl v15.16b, {v15.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
eor v20.16b, v20.16b, v10.16b
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
subs x6, x6, #1
b.gt Lopen_tail_64_rounds
cbz x4, Lopen_tail_64_rounds_done
subs x4, x4, #1
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
b Lopen_tail_64_rounds
Lopen_tail_64_rounds_done:
add v0.4s, v0.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v15.4s, v15.4s, v30.4s
add v15.4s, v15.4s, v23.4s
Lopen_tail_64_store:
cmp x2, #16
b.lt Lopen_tail_16
ld1 {v20.16b}, [x1], #16
eor v20.16b, v20.16b, v0.16b
st1 {v20.16b}, [x0], #16
mov v0.16b, v5.16b
mov v5.16b, v10.16b
mov v10.16b, v15.16b
sub x2, x2, #16
b Lopen_tail_64_store
Lopen_tail_16:
// Here we handle the last [0,16) bytes that require a padded block
cbz x2, Lopen_finalize
eor v20.16b, v20.16b, v20.16b // Use T0 to load the ciphertext
eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask
not v22.16b, v20.16b
add x7, x1, x2
mov x6, x2
Lopen_tail_16_compose:
ext v20.16b, v20.16b, v20.16b, #15
ldrb w11, [x7, #-1]!
mov v20.b[0], w11
ext v21.16b, v22.16b, v21.16b, #15
subs x2, x2, #1
b.gt Lopen_tail_16_compose
and v20.16b, v20.16b, v21.16b
// Hash in the final padded block
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
Lopen_tail_16_store:
umov w11, v20.b[0]
strb w11, [x0], #1
ext v20.16b, v20.16b, v20.16b, #1
subs x6, x6, #1
b.gt Lopen_tail_16_store
Lopen_finalize:
mov x11, v31.d[0]
mov x12, v31.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
// Final reduction step
sub x12, xzr, x15
orr x13, xzr, #3
subs x11, x8, #-5
sbcs x12, x9, x12
sbcs x13, x10, x13
csel x8, x11, x8, cs
csel x9, x12, x9, cs
csel x10, x13, x10, cs
mov x11, v27.d[0]
mov x12, v27.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
stp x8, x9, [x5]
ldp d8, d9, [sp, #16]
ldp d10, d11, [sp, #32]
ldp d12, d13, [sp, #48]
ldp d14, d15, [sp, #64]
.cfi_restore b15
.cfi_restore b14
.cfi_restore b13
.cfi_restore b12
.cfi_restore b11
.cfi_restore b10
.cfi_restore b9
.cfi_restore b8
ldp x29, x30, [sp], 80
.cfi_restore w29
.cfi_restore w30
.cfi_def_cfa_offset 0
AARCH64_VALIDATE_LINK_REGISTER
ret
Lopen_128:
// On some architectures preparing 5 blocks for small buffers is wasteful
eor v25.16b, v25.16b, v25.16b
mov x11, #1
mov v25.s[0], w11
mov v0.16b, v24.16b
mov v1.16b, v24.16b
mov v2.16b, v24.16b
mov v5.16b, v28.16b
mov v6.16b, v28.16b
mov v7.16b, v28.16b
mov v10.16b, v29.16b
mov v11.16b, v29.16b
mov v12.16b, v29.16b
mov v17.16b, v30.16b
add v15.4s, v17.4s, v25.4s
add v16.4s, v15.4s, v25.4s
mov x6, #10
Lopen_128_rounds:
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #4
ext v6.16b, v6.16b, v6.16b, #4
ext v7.16b, v7.16b, v7.16b, #4
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #12
ext v16.16b, v16.16b, v16.16b, #12
ext v17.16b, v17.16b, v17.16b, #12
add v0.4s, v0.4s, v5.4s
add v1.4s, v1.4s, v6.4s
add v2.4s, v2.4s, v7.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
rev32 v15.8h, v15.8h
rev32 v16.8h, v16.8h
rev32 v17.8h, v17.8h
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v5.16b, v5.16b, v10.16b
eor v6.16b, v6.16b, v11.16b
eor v7.16b, v7.16b, v12.16b
ushr v20.4s, v5.4s, #20
sli v20.4s, v5.4s, #12
ushr v5.4s, v6.4s, #20
sli v5.4s, v6.4s, #12
ushr v6.4s, v7.4s, #20
sli v6.4s, v7.4s, #12
add v0.4s, v0.4s, v20.4s
add v1.4s, v1.4s, v5.4s
add v2.4s, v2.4s, v6.4s
eor v15.16b, v15.16b, v0.16b
eor v16.16b, v16.16b, v1.16b
eor v17.16b, v17.16b, v2.16b
tbl v15.16b, {v15.16b}, v26.16b
tbl v16.16b, {v16.16b}, v26.16b
tbl v17.16b, {v17.16b}, v26.16b
add v10.4s, v10.4s, v15.4s
add v11.4s, v11.4s, v16.4s
add v12.4s, v12.4s, v17.4s
eor v20.16b, v20.16b, v10.16b
eor v5.16b, v5.16b, v11.16b
eor v6.16b, v6.16b, v12.16b
ushr v7.4s, v6.4s, #25
sli v7.4s, v6.4s, #7
ushr v6.4s, v5.4s, #25
sli v6.4s, v5.4s, #7
ushr v5.4s, v20.4s, #25
sli v5.4s, v20.4s, #7
ext v5.16b, v5.16b, v5.16b, #12
ext v6.16b, v6.16b, v6.16b, #12
ext v7.16b, v7.16b, v7.16b, #12
ext v10.16b, v10.16b, v10.16b, #8
ext v11.16b, v11.16b, v11.16b, #8
ext v12.16b, v12.16b, v12.16b, #8
ext v15.16b, v15.16b, v15.16b, #4
ext v16.16b, v16.16b, v16.16b, #4
ext v17.16b, v17.16b, v17.16b, #4
subs x6, x6, #1
b.hi Lopen_128_rounds
add v0.4s, v0.4s, v24.4s
add v1.4s, v1.4s, v24.4s
add v2.4s, v2.4s, v24.4s
add v5.4s, v5.4s, v28.4s
add v6.4s, v6.4s, v28.4s
add v7.4s, v7.4s, v28.4s
add v10.4s, v10.4s, v29.4s
add v11.4s, v11.4s, v29.4s
add v30.4s, v30.4s, v25.4s
add v15.4s, v15.4s, v30.4s
add v30.4s, v30.4s, v25.4s
add v16.4s, v16.4s, v30.4s
and v2.16b, v2.16b, v27.16b
mov x16, v2.d[0] // Move the R key to GPRs
mov x17, v2.d[1]
mov v27.16b, v7.16b // Store the S key
bl Lpoly_hash_ad_internal
Lopen_128_store:
cmp x2, #64
b.lt Lopen_128_store_64
ld1 {v20.16b - v23.16b}, [x1], #64
mov x11, v20.d[0]
mov x12, v20.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v21.d[0]
mov x12, v21.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v22.d[0]
mov x12, v22.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
mov x11, v23.d[0]
mov x12, v23.d[1]
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
eor v20.16b, v20.16b, v0.16b
eor v21.16b, v21.16b, v5.16b
eor v22.16b, v22.16b, v10.16b
eor v23.16b, v23.16b, v15.16b
st1 {v20.16b - v23.16b}, [x0], #64
sub x2, x2, #64
mov v0.16b, v1.16b
mov v5.16b, v6.16b
mov v10.16b, v11.16b
mov v15.16b, v16.16b
Lopen_128_store_64:
lsr x4, x2, #4
mov x3, x1
Lopen_128_hash_64:
cbz x4, Lopen_tail_64_store
ldp x11, x12, [x3], 16
adds x8, x8, x11
adcs x9, x9, x12
adc x10, x10, x15
mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0
umulh x12, x8, x16
mul x13, x9, x16
umulh x14, x9, x16
adds x12, x12, x13
mul x13, x10, x16
adc x13, x13, x14
mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0]
umulh x8, x8, x17
adds x12, x12, x14
mul x14, x9, x17
umulh x9, x9, x17
adcs x14, x14, x8
mul x10, x10, x17
adc x10, x10, x9
adds x13, x13, x14
adc x14, x10, xzr
and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3)
and x8, x13, #-4
extr x13, x14, x13, #2
adds x8, x8, x11
lsr x11, x14, #2
adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits
adds x8, x8, x13
adcs x9, x9, x12
adc x10, x10, xzr // At this point acc2 has the value of 4 at most
sub x4, x4, #1
b Lopen_128_hash_64
.cfi_endproc
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
|
marvin-hansen/iggy-streaming-system
| 9,066
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-arm/crypto/test/trampoline-armv4.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
.syntax unified
.arch armv7-a
.fpu vfp
.text
@ abi_test_trampoline loads callee-saved registers from |state|, calls |func|
@ with |argv|, then saves the callee-saved registers into |state|. It returns
@ the result of |func|. The |unwind| argument is unused.
@ uint32_t abi_test_trampoline(void (*func)(...), CallerState *state,
@ const uint32_t *argv, size_t argc,
@ int unwind);
.type abi_test_trampoline, %function
.globl abi_test_trampoline
.hidden abi_test_trampoline
.align 4
abi_test_trampoline:
@ Save parameters and all callee-saved registers. For convenience, we
@ save r9 on iOS even though it's volatile.
vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
stmdb sp!, {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,lr}
@ Reserve stack space for six (10-4) stack parameters, plus an extra 4
@ bytes to keep it 8-byte-aligned (see AAPCS, section 5.3).
sub sp, sp, #28
@ Every register in AAPCS is either non-volatile or a parameter (except
@ r9 on iOS), so this code, by the actual call, loses all its scratch
@ registers. First fill in stack parameters while there are registers
@ to spare.
cmp r3, #4
bls .Lstack_args_done
mov r4, sp @ r4 is the output pointer.
add r5, r2, r3, lsl #2 @ Set r5 to the end of argv.
add r2, r2, #16 @ Skip four arguments.
.Lstack_args_loop:
ldr r6, [r2], #4
cmp r2, r5
str r6, [r4], #4
bne .Lstack_args_loop
.Lstack_args_done:
@ Load registers from |r1|.
vldmia r1!, {d8,d9,d10,d11,d12,d13,d14,d15}
#if defined(__APPLE__)
@ r9 is not volatile on iOS.
ldmia r1!, {r4,r5,r6,r7,r8,r10-r11}
#else
ldmia r1!, {r4,r5,r6,r7,r8,r9,r10,r11}
#endif
@ Load register parameters. This uses up our remaining registers, so we
@ repurpose lr as scratch space.
ldr r3, [sp, #40] @ Reload argc.
ldr lr, [sp, #36] @ .Load argv into lr.
cmp r3, #3
bhi .Larg_r3
beq .Larg_r2
cmp r3, #1
bhi .Larg_r1
beq .Larg_r0
b .Largs_done
.Larg_r3:
ldr r3, [lr, #12] @ argv[3]
.Larg_r2:
ldr r2, [lr, #8] @ argv[2]
.Larg_r1:
ldr r1, [lr, #4] @ argv[1]
.Larg_r0:
ldr r0, [lr] @ argv[0]
.Largs_done:
@ With every other register in use, load the function pointer into lr
@ and call the function.
ldr lr, [sp, #28]
blx lr
@ r1-r3 are free for use again. The trampoline only supports
@ single-return functions. Pass r4-r11 to the caller.
ldr r1, [sp, #32]
vstmia r1!, {d8,d9,d10,d11,d12,d13,d14,d15}
#if defined(__APPLE__)
@ r9 is not volatile on iOS.
stmia r1!, {r4,r5,r6,r7,r8,r10-r11}
#else
stmia r1!, {r4,r5,r6,r7,r8,r9,r10,r11}
#endif
@ Unwind the stack and restore registers.
add sp, sp, #44 @ 44 = 28+16
ldmia sp!, {r4,r5,r6,r7,r8,r9,r10,r11,lr} @ Skip r0-r3 (see +16 above).
vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
bx lr
.size abi_test_trampoline,.-abi_test_trampoline
.type abi_test_clobber_r0, %function
.globl abi_test_clobber_r0
.hidden abi_test_clobber_r0
.align 4
abi_test_clobber_r0:
mov r0, #0
bx lr
.size abi_test_clobber_r0,.-abi_test_clobber_r0
.type abi_test_clobber_r1, %function
.globl abi_test_clobber_r1
.hidden abi_test_clobber_r1
.align 4
abi_test_clobber_r1:
mov r1, #0
bx lr
.size abi_test_clobber_r1,.-abi_test_clobber_r1
.type abi_test_clobber_r2, %function
.globl abi_test_clobber_r2
.hidden abi_test_clobber_r2
.align 4
abi_test_clobber_r2:
mov r2, #0
bx lr
.size abi_test_clobber_r2,.-abi_test_clobber_r2
.type abi_test_clobber_r3, %function
.globl abi_test_clobber_r3
.hidden abi_test_clobber_r3
.align 4
abi_test_clobber_r3:
mov r3, #0
bx lr
.size abi_test_clobber_r3,.-abi_test_clobber_r3
.type abi_test_clobber_r4, %function
.globl abi_test_clobber_r4
.hidden abi_test_clobber_r4
.align 4
abi_test_clobber_r4:
mov r4, #0
bx lr
.size abi_test_clobber_r4,.-abi_test_clobber_r4
.type abi_test_clobber_r5, %function
.globl abi_test_clobber_r5
.hidden abi_test_clobber_r5
.align 4
abi_test_clobber_r5:
mov r5, #0
bx lr
.size abi_test_clobber_r5,.-abi_test_clobber_r5
.type abi_test_clobber_r6, %function
.globl abi_test_clobber_r6
.hidden abi_test_clobber_r6
.align 4
abi_test_clobber_r6:
mov r6, #0
bx lr
.size abi_test_clobber_r6,.-abi_test_clobber_r6
.type abi_test_clobber_r7, %function
.globl abi_test_clobber_r7
.hidden abi_test_clobber_r7
.align 4
abi_test_clobber_r7:
mov r7, #0
bx lr
.size abi_test_clobber_r7,.-abi_test_clobber_r7
.type abi_test_clobber_r8, %function
.globl abi_test_clobber_r8
.hidden abi_test_clobber_r8
.align 4
abi_test_clobber_r8:
mov r8, #0
bx lr
.size abi_test_clobber_r8,.-abi_test_clobber_r8
.type abi_test_clobber_r9, %function
.globl abi_test_clobber_r9
.hidden abi_test_clobber_r9
.align 4
abi_test_clobber_r9:
mov r9, #0
bx lr
.size abi_test_clobber_r9,.-abi_test_clobber_r9
.type abi_test_clobber_r10, %function
.globl abi_test_clobber_r10
.hidden abi_test_clobber_r10
.align 4
abi_test_clobber_r10:
mov r10, #0
bx lr
.size abi_test_clobber_r10,.-abi_test_clobber_r10
.type abi_test_clobber_r11, %function
.globl abi_test_clobber_r11
.hidden abi_test_clobber_r11
.align 4
abi_test_clobber_r11:
mov r11, #0
bx lr
.size abi_test_clobber_r11,.-abi_test_clobber_r11
.type abi_test_clobber_r12, %function
.globl abi_test_clobber_r12
.hidden abi_test_clobber_r12
.align 4
abi_test_clobber_r12:
mov r12, #0
bx lr
.size abi_test_clobber_r12,.-abi_test_clobber_r12
.type abi_test_clobber_d0, %function
.globl abi_test_clobber_d0
.hidden abi_test_clobber_d0
.align 4
abi_test_clobber_d0:
mov r0, #0
vmov s0, r0
vmov s1, r0
bx lr
.size abi_test_clobber_d0,.-abi_test_clobber_d0
.type abi_test_clobber_d1, %function
.globl abi_test_clobber_d1
.hidden abi_test_clobber_d1
.align 4
abi_test_clobber_d1:
mov r0, #0
vmov s2, r0
vmov s3, r0
bx lr
.size abi_test_clobber_d1,.-abi_test_clobber_d1
.type abi_test_clobber_d2, %function
.globl abi_test_clobber_d2
.hidden abi_test_clobber_d2
.align 4
abi_test_clobber_d2:
mov r0, #0
vmov s4, r0
vmov s5, r0
bx lr
.size abi_test_clobber_d2,.-abi_test_clobber_d2
.type abi_test_clobber_d3, %function
.globl abi_test_clobber_d3
.hidden abi_test_clobber_d3
.align 4
abi_test_clobber_d3:
mov r0, #0
vmov s6, r0
vmov s7, r0
bx lr
.size abi_test_clobber_d3,.-abi_test_clobber_d3
.type abi_test_clobber_d4, %function
.globl abi_test_clobber_d4
.hidden abi_test_clobber_d4
.align 4
abi_test_clobber_d4:
mov r0, #0
vmov s8, r0
vmov s9, r0
bx lr
.size abi_test_clobber_d4,.-abi_test_clobber_d4
.type abi_test_clobber_d5, %function
.globl abi_test_clobber_d5
.hidden abi_test_clobber_d5
.align 4
abi_test_clobber_d5:
mov r0, #0
vmov s10, r0
vmov s11, r0
bx lr
.size abi_test_clobber_d5,.-abi_test_clobber_d5
.type abi_test_clobber_d6, %function
.globl abi_test_clobber_d6
.hidden abi_test_clobber_d6
.align 4
abi_test_clobber_d6:
mov r0, #0
vmov s12, r0
vmov s13, r0
bx lr
.size abi_test_clobber_d6,.-abi_test_clobber_d6
.type abi_test_clobber_d7, %function
.globl abi_test_clobber_d7
.hidden abi_test_clobber_d7
.align 4
abi_test_clobber_d7:
mov r0, #0
vmov s14, r0
vmov s15, r0
bx lr
.size abi_test_clobber_d7,.-abi_test_clobber_d7
.type abi_test_clobber_d8, %function
.globl abi_test_clobber_d8
.hidden abi_test_clobber_d8
.align 4
abi_test_clobber_d8:
mov r0, #0
vmov s16, r0
vmov s17, r0
bx lr
.size abi_test_clobber_d8,.-abi_test_clobber_d8
.type abi_test_clobber_d9, %function
.globl abi_test_clobber_d9
.hidden abi_test_clobber_d9
.align 4
abi_test_clobber_d9:
mov r0, #0
vmov s18, r0
vmov s19, r0
bx lr
.size abi_test_clobber_d9,.-abi_test_clobber_d9
.type abi_test_clobber_d10, %function
.globl abi_test_clobber_d10
.hidden abi_test_clobber_d10
.align 4
abi_test_clobber_d10:
mov r0, #0
vmov s20, r0
vmov s21, r0
bx lr
.size abi_test_clobber_d10,.-abi_test_clobber_d10
.type abi_test_clobber_d11, %function
.globl abi_test_clobber_d11
.hidden abi_test_clobber_d11
.align 4
abi_test_clobber_d11:
mov r0, #0
vmov s22, r0
vmov s23, r0
bx lr
.size abi_test_clobber_d11,.-abi_test_clobber_d11
.type abi_test_clobber_d12, %function
.globl abi_test_clobber_d12
.hidden abi_test_clobber_d12
.align 4
abi_test_clobber_d12:
mov r0, #0
vmov s24, r0
vmov s25, r0
bx lr
.size abi_test_clobber_d12,.-abi_test_clobber_d12
.type abi_test_clobber_d13, %function
.globl abi_test_clobber_d13
.hidden abi_test_clobber_d13
.align 4
abi_test_clobber_d13:
mov r0, #0
vmov s26, r0
vmov s27, r0
bx lr
.size abi_test_clobber_d13,.-abi_test_clobber_d13
.type abi_test_clobber_d14, %function
.globl abi_test_clobber_d14
.hidden abi_test_clobber_d14
.align 4
abi_test_clobber_d14:
mov r0, #0
vmov s28, r0
vmov s29, r0
bx lr
.size abi_test_clobber_d14,.-abi_test_clobber_d14
.type abi_test_clobber_d15, %function
.globl abi_test_clobber_d15
.hidden abi_test_clobber_d15
.align 4
abi_test_clobber_d15:
mov r0, #0
vmov s30, r0
vmov s31, r0
bx lr
.size abi_test_clobber_d15,.-abi_test_clobber_d15
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 19,390
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-arm/crypto/fipsmodule/aesv8-armx.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
#include <openssl/arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.arch armv7-a @ don't confuse not-so-latest binutils with argv8 :-)
.fpu neon
.code 32
#undef __thumb2__
.align 5
.Lrcon:
.long 0x01,0x01,0x01,0x01
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d @ rotate-n-splat
.long 0x1b,0x1b,0x1b,0x1b
.text
.globl aes_hw_set_encrypt_key
.hidden aes_hw_set_encrypt_key
.type aes_hw_set_encrypt_key,%function
.align 5
aes_hw_set_encrypt_key:
.Lenc_key:
mov r3,#-1
cmp r0,#0
beq .Lenc_key_abort
cmp r2,#0
beq .Lenc_key_abort
mov r3,#-2
cmp r1,#128
blt .Lenc_key_abort
cmp r1,#256
bgt .Lenc_key_abort
tst r1,#0x3f
bne .Lenc_key_abort
adr r3,.Lrcon
cmp r1,#192
veor q0,q0,q0
vld1.8 {q3},[r0]!
mov r1,#8 @ reuse r1
vld1.32 {q1,q2},[r3]!
blt .Loop128
beq .L192
b .L256
.align 4
.Loop128:
vtbl.8 d20,{q3},d4
vtbl.8 d21,{q3},d5
vext.8 q9,q0,q3,#12
vst1.32 {q3},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
subs r1,r1,#1
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q10,q10,q1
veor q3,q3,q9
vshl.u8 q1,q1,#1
veor q3,q3,q10
bne .Loop128
vld1.32 {q1},[r3]
vtbl.8 d20,{q3},d4
vtbl.8 d21,{q3},d5
vext.8 q9,q0,q3,#12
vst1.32 {q3},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q10,q10,q1
veor q3,q3,q9
vshl.u8 q1,q1,#1
veor q3,q3,q10
vtbl.8 d20,{q3},d4
vtbl.8 d21,{q3},d5
vext.8 q9,q0,q3,#12
vst1.32 {q3},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q10,q10,q1
veor q3,q3,q9
veor q3,q3,q10
vst1.32 {q3},[r2]
add r2,r2,#0x50
mov r12,#10
b .Ldone
.align 4
.L192:
vld1.8 {d16},[r0]!
vmov.i8 q10,#8 @ borrow q10
vst1.32 {q3},[r2]!
vsub.i8 q2,q2,q10 @ adjust the mask
.Loop192:
vtbl.8 d20,{q8},d4
vtbl.8 d21,{q8},d5
vext.8 q9,q0,q3,#12
vst1.32 {d16},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
subs r1,r1,#1
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vdup.32 q9,d7[1]
veor q9,q9,q8
veor q10,q10,q1
vext.8 q8,q0,q8,#12
vshl.u8 q1,q1,#1
veor q8,q8,q9
veor q3,q3,q10
veor q8,q8,q10
vst1.32 {q3},[r2]!
bne .Loop192
mov r12,#12
add r2,r2,#0x20
b .Ldone
.align 4
.L256:
vld1.8 {q8},[r0]
mov r1,#7
mov r12,#14
vst1.32 {q3},[r2]!
.Loop256:
vtbl.8 d20,{q8},d4
vtbl.8 d21,{q8},d5
vext.8 q9,q0,q3,#12
vst1.32 {q8},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
subs r1,r1,#1
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q3,q3,q9
vext.8 q9,q0,q9,#12
veor q10,q10,q1
veor q3,q3,q9
vshl.u8 q1,q1,#1
veor q3,q3,q10
vst1.32 {q3},[r2]!
beq .Ldone
vdup.32 q10,d7[1]
vext.8 q9,q0,q8,#12
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
veor q8,q8,q9
vext.8 q9,q0,q9,#12
veor q8,q8,q9
vext.8 q9,q0,q9,#12
veor q8,q8,q9
veor q8,q8,q10
b .Loop256
.Ldone:
str r12,[r2]
mov r3,#0
.Lenc_key_abort:
mov r0,r3 @ return value
bx lr
.size aes_hw_set_encrypt_key,.-aes_hw_set_encrypt_key
.globl aes_hw_set_decrypt_key
.hidden aes_hw_set_decrypt_key
.type aes_hw_set_decrypt_key,%function
.align 5
aes_hw_set_decrypt_key:
stmdb sp!,{r4,lr}
bl .Lenc_key
cmp r0,#0
bne .Ldec_key_abort
sub r2,r2,#240 @ restore original r2
mov r4,#-16
add r0,r2,r12,lsl#4 @ end of key schedule
vld1.32 {q0},[r2]
vld1.32 {q1},[r0]
vst1.32 {q0},[r0],r4
vst1.32 {q1},[r2]!
.Loop_imc:
vld1.32 {q0},[r2]
vld1.32 {q1},[r0]
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
vst1.32 {q0},[r0],r4
vst1.32 {q1},[r2]!
cmp r0,r2
bhi .Loop_imc
vld1.32 {q0},[r2]
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
vst1.32 {q0},[r0]
eor r0,r0,r0 @ return value
.Ldec_key_abort:
ldmia sp!,{r4,pc}
.size aes_hw_set_decrypt_key,.-aes_hw_set_decrypt_key
.globl aes_hw_encrypt
.hidden aes_hw_encrypt
.type aes_hw_encrypt,%function
.align 5
aes_hw_encrypt:
AARCH64_VALID_CALL_TARGET
ldr r3,[r2,#240]
vld1.32 {q0},[r2]!
vld1.8 {q2},[r0]
sub r3,r3,#2
vld1.32 {q1},[r2]!
.Loop_enc:
.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
vld1.32 {q0},[r2]!
subs r3,r3,#2
.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
vld1.32 {q1},[r2]!
bgt .Loop_enc
.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
vld1.32 {q0},[r2]
.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
veor q2,q2,q0
vst1.8 {q2},[r1]
bx lr
.size aes_hw_encrypt,.-aes_hw_encrypt
.globl aes_hw_decrypt
.hidden aes_hw_decrypt
.type aes_hw_decrypt,%function
.align 5
aes_hw_decrypt:
AARCH64_VALID_CALL_TARGET
ldr r3,[r2,#240]
vld1.32 {q0},[r2]!
vld1.8 {q2},[r0]
sub r3,r3,#2
vld1.32 {q1},[r2]!
.Loop_dec:
.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
vld1.32 {q0},[r2]!
subs r3,r3,#2
.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
vld1.32 {q1},[r2]!
bgt .Loop_dec
.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
vld1.32 {q0},[r2]
.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
veor q2,q2,q0
vst1.8 {q2},[r1]
bx lr
.size aes_hw_decrypt,.-aes_hw_decrypt
.globl aes_hw_cbc_encrypt
.hidden aes_hw_cbc_encrypt
.type aes_hw_cbc_encrypt,%function
.align 5
aes_hw_cbc_encrypt:
mov ip,sp
stmdb sp!,{r4,r5,r6,r7,r8,lr}
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
ldmia ip,{r4,r5} @ load remaining args
subs r2,r2,#16
mov r8,#16
blo .Lcbc_abort
moveq r8,#0
cmp r5,#0 @ en- or decrypting?
ldr r5,[r3,#240]
and r2,r2,#-16
vld1.8 {q6},[r4]
vld1.8 {q0},[r0],r8
vld1.32 {q8,q9},[r3] @ load key schedule...
sub r5,r5,#6
add r7,r3,r5,lsl#4 @ pointer to last 7 round keys
sub r5,r5,#2
vld1.32 {q10,q11},[r7]!
vld1.32 {q12,q13},[r7]!
vld1.32 {q14,q15},[r7]!
vld1.32 {q7},[r7]
add r7,r3,#32
mov r6,r5
beq .Lcbc_dec
cmp r5,#2
veor q0,q0,q6
veor q5,q8,q7
beq .Lcbc_enc128
vld1.32 {q2,q3},[r7]
add r7,r3,#16
add r6,r3,#16*4
add r12,r3,#16*5
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
add r14,r3,#16*6
add r3,r3,#16*7
b .Lenter_cbc_enc
.align 4
.Loop_cbc_enc:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vst1.8 {q6},[r1]!
.Lenter_cbc_enc:
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q8},[r6]
cmp r5,#4
.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q9},[r12]
beq .Lcbc_enc192
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q8},[r14]
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q9},[r3]
nop
.Lcbc_enc192:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
subs r2,r2,#16
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
moveq r8,#0
.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.8 {q8},[r0],r8
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
veor q8,q8,q5
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q9},[r7] @ re-pre-load rndkey[1]
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
veor q6,q0,q7
bhs .Loop_cbc_enc
vst1.8 {q6},[r1]!
b .Lcbc_done
.align 5
.Lcbc_enc128:
vld1.32 {q2,q3},[r7]
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
b .Lenter_cbc_enc128
.Loop_cbc_enc128:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vst1.8 {q6},[r1]!
.Lenter_cbc_enc128:
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
subs r2,r2,#16
.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
moveq r8,#0
.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.8 {q8},[r0],r8
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
veor q8,q8,q5
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
veor q6,q0,q7
bhs .Loop_cbc_enc128
vst1.8 {q6},[r1]!
b .Lcbc_done
.align 5
.Lcbc_dec:
vld1.8 {q10},[r0]!
subs r2,r2,#32 @ bias
add r6,r5,#2
vorr q3,q0,q0
vorr q1,q0,q0
vorr q11,q10,q10
blo .Lcbc_dec_tail
vorr q1,q10,q10
vld1.8 {q10},[r0]!
vorr q2,q0,q0
vorr q3,q1,q1
vorr q11,q10,q10
.Loop3x_cbc_dec:
.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q9},[r7]!
bgt .Loop3x_cbc_dec
.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q4,q6,q7
subs r2,r2,#0x30
veor q5,q2,q7
movlo r6,r2 @ r6, r6, is zero at this point
.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q9,q3,q7
add r0,r0,r6 @ r0 is adjusted in such way that
@ at exit from the loop q1-q10
@ are loaded with last "words"
vorr q6,q11,q11
mov r7,r3
.byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.8 {q2},[r0]!
.byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.8 {q3},[r0]!
.byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.8 {q11},[r0]!
.byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15
.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
add r6,r5,#2
veor q4,q4,q0
veor q5,q5,q1
veor q10,q10,q9
vld1.32 {q9},[r7]! @ re-pre-load rndkey[1]
vst1.8 {q4},[r1]!
vorr q0,q2,q2
vst1.8 {q5},[r1]!
vorr q1,q3,q3
vst1.8 {q10},[r1]!
vorr q10,q11,q11
bhs .Loop3x_cbc_dec
cmn r2,#0x30
beq .Lcbc_done
nop
.Lcbc_dec_tail:
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q9},[r7]!
bgt .Lcbc_dec_tail
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
cmn r2,#0x20
.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q5,q6,q7
.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q9,q3,q7
.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
beq .Lcbc_dec_one
veor q5,q5,q1
veor q9,q9,q10
vorr q6,q11,q11
vst1.8 {q5},[r1]!
vst1.8 {q9},[r1]!
b .Lcbc_done
.Lcbc_dec_one:
veor q5,q5,q10
vorr q6,q11,q11
vst1.8 {q5},[r1]!
.Lcbc_done:
vst1.8 {q6},[r4]
.Lcbc_abort:
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!,{r4,r5,r6,r7,r8,pc}
.size aes_hw_cbc_encrypt,.-aes_hw_cbc_encrypt
.globl aes_hw_ctr32_encrypt_blocks
.hidden aes_hw_ctr32_encrypt_blocks
.type aes_hw_ctr32_encrypt_blocks,%function
.align 5
aes_hw_ctr32_encrypt_blocks:
mov ip,sp
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,lr}
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
ldr r4, [ip] @ load remaining arg
ldr r5,[r3,#240]
ldr r8, [r4, #12]
vld1.32 {q0},[r4]
vld1.32 {q8,q9},[r3] @ load key schedule...
sub r5,r5,#4
mov r12,#16
cmp r2,#2
add r7,r3,r5,lsl#4 @ pointer to last 5 round keys
sub r5,r5,#2
vld1.32 {q12,q13},[r7]!
vld1.32 {q14,q15},[r7]!
vld1.32 {q7},[r7]
add r7,r3,#32
mov r6,r5
@ ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
@ affected by silicon errata #1742098 [0] and #1655431 [1],
@ respectively, where the second instruction of an aese/aesmc
@ instruction pair may execute twice if an interrupt is taken right
@ after the first instruction consumes an input register of which a
@ single 32-bit lane has been updated the last time it was modified.
@
@ This function uses a counter in one 32-bit lane. The
@ could write to q1 and q10 directly, but that trips this bugs.
@ We write to q6 and copy to the final register as a workaround.
@
@ [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
@ [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
#ifndef __ARMEB__
rev r8, r8
#endif
add r10, r8, #1
vorr q6,q0,q0
rev r10, r10
vmov.32 d13[1],r10
add r8, r8, #2
vorr q1,q6,q6
bls .Lctr32_tail
rev r12, r8
vmov.32 d13[1],r12
sub r2,r2,#3 @ bias
vorr q10,q6,q6
b .Loop3x_ctr32
.align 4
.Loop3x_ctr32:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.32 {q9},[r7]!
bgt .Loop3x_ctr32
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x83,0xb0,0xf3 @ aesmc q4,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1
vld1.8 {q2},[r0]!
add r9,r8,#1
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.8 {q3},[r0]!
rev r9,r9
.byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
vld1.8 {q11},[r0]!
mov r7,r3
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
.byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10
.byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
veor q2,q2,q7
add r10,r8,#2
.byte 0x28,0x23,0xf0,0xf3 @ aese q9,q12
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
veor q3,q3,q7
add r8,r8,#3
.byte 0x2a,0x83,0xb0,0xf3 @ aese q4,q13
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
@ Note the logic to update q0, q1, and q1 is written to work
@ around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
@ 32-bit mode. See the comment above.
veor q11,q11,q7
vmov.32 d13[1], r9
.byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
vorr q0,q6,q6
rev r10,r10
.byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
vmov.32 d13[1], r10
rev r12,r8
.byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
vorr q1,q6,q6
vmov.32 d13[1], r12
.byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
vorr q10,q6,q6
subs r2,r2,#3
.byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15
.byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15
.byte 0x2e,0x23,0xf0,0xf3 @ aese q9,q15
veor q2,q2,q4
vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
vst1.8 {q2},[r1]!
veor q3,q3,q5
mov r6,r5
vst1.8 {q3},[r1]!
veor q11,q11,q9
vld1.32 {q9},[r7]! @ re-pre-load rndkey[1]
vst1.8 {q11},[r1]!
bhs .Loop3x_ctr32
adds r2,r2,#3
beq .Lctr32_done
.Lctr32_tail:
cmp r2,#1
blt .Lctr32_done @ if len = 0, go to done
mov r12,#16
moveq r12,#0
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.32 {q9},[r7]!
bgt .Lctr32_tail
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.8 {q2},[r0],r12
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x28,0x23,0xb0,0xf3 @ aese q1,q12
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.8 {q3},[r0]
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2a,0x23,0xb0,0xf3 @ aese q1,q13
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
veor q2,q2,q7
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2c,0x23,0xb0,0xf3 @ aese q1,q14
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
veor q3,q3,q7
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
.byte 0x2e,0x23,0xb0,0xf3 @ aese q1,q15
veor q2,q2,q0
veor q3,q3,q1
vst1.8 {q2},[r1]!
cmp r12, #0
beq .Lctr32_done
vst1.8 {q3},[r1]
.Lctr32_done:
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc}
.size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 32,442
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-arm/crypto/fipsmodule/bsaes-armv7.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
@ Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the OpenSSL license (the "License"). You may not use
@ this file except in compliance with the License. You can obtain a copy
@ in the file LICENSE in the source distribution or at
@ https://www.openssl.org/source/license.html
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@
@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
@ of Linaro. Permission to use under GPL terms is granted.
@ ====================================================================
@ Bit-sliced AES for ARM NEON
@
@ February 2012.
@
@ This implementation is direct adaptation of bsaes-x86_64 module for
@ ARM NEON. Except that this module is endian-neutral [in sense that
@ it can be compiled for either endianness] by courtesy of vld1.8's
@ neutrality. Initial version doesn't implement interface to OpenSSL,
@ only low-level primitives and unsupported entry points, just enough
@ to collect performance results, which for Cortex-A8 core are:
@
@ encrypt 19.5 cycles per byte processed with 128-bit key
@ decrypt 22.1 cycles per byte processed with 128-bit key
@ key conv. 440 cycles per 128-bit key/0.18 of 8x block
@
@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
@ which is [much] worse than anticipated (for further details see
@ http://www.openssl.org/~appro/Snapdragon-S4.html).
@
@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
@ manages in 20.0 cycles].
@
@ When comparing to x86_64 results keep in mind that NEON unit is
@ [mostly] single-issue and thus can't [fully] benefit from
@ instruction-level parallelism. And when comparing to aes-armv4
@ results keep in mind key schedule conversion overhead (see
@ bsaes-x86_64.pl for further details)...
@
@ <appro@openssl.org>
@ April-August 2013
@ Add CBC, CTR and XTS subroutines and adapt for kernel use; courtesy of Ard.
#ifndef __KERNEL__
# include <openssl/arm_arch.h>
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
# define VFP_ABI_FRAME 0x40
#else
# define VFP_ABI_PUSH
# define VFP_ABI_POP
# define VFP_ABI_FRAME 0
# define BSAES_ASM_EXTENDED_KEY
# define XTS_CHAIN_TWEAK
# define __ARM_MAX_ARCH__ 7
#endif
#ifdef __thumb__
# define adrl adr
#endif
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.text
.syntax unified @ ARMv7-capable assembler is expected to handle this
#if defined(__thumb2__) && !defined(__APPLE__)
.thumb
#else
.code 32
# undef __thumb2__
#endif
.type _bsaes_decrypt8,%function
.align 4
_bsaes_decrypt8:
adr r6,.
vldmia r4!, {q9} @ round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,.LM0ISR
#else
add r6,r6,#.LM0ISR-_bsaes_decrypt8
#endif
vldmia r6!, {q8} @ .LM0ISR
veor q10, q0, q9 @ xor with round0 key
veor q11, q1, q9
vtbl.8 d0, {q10}, d16
vtbl.8 d1, {q10}, d17
veor q12, q2, q9
vtbl.8 d2, {q11}, d16
vtbl.8 d3, {q11}, d17
veor q13, q3, q9
vtbl.8 d4, {q12}, d16
vtbl.8 d5, {q12}, d17
veor q14, q4, q9
vtbl.8 d6, {q13}, d16
vtbl.8 d7, {q13}, d17
veor q15, q5, q9
vtbl.8 d8, {q14}, d16
vtbl.8 d9, {q14}, d17
veor q10, q6, q9
vtbl.8 d10, {q15}, d16
vtbl.8 d11, {q15}, d17
veor q11, q7, q9
vtbl.8 d12, {q10}, d16
vtbl.8 d13, {q10}, d17
vtbl.8 d14, {q11}, d16
vtbl.8 d15, {q11}, d17
vmov.i8 q8,#0x55 @ compose .LBS0
vmov.i8 q9,#0x33 @ compose .LBS1
vshr.u64 q10, q6, #1
vshr.u64 q11, q4, #1
veor q10, q10, q7
veor q11, q11, q5
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #1
veor q5, q5, q11
vshl.u64 q11, q11, #1
veor q6, q6, q10
veor q4, q4, q11
vshr.u64 q10, q2, #1
vshr.u64 q11, q0, #1
veor q10, q10, q3
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q3, q3, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q2, q2, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose .LBS2
vshr.u64 q10, q5, #2
vshr.u64 q11, q4, #2
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q9
vand q11, q11, q9
veor q7, q7, q10
vshl.u64 q10, q10, #2
veor q6, q6, q11
vshl.u64 q11, q11, #2
veor q5, q5, q10
veor q4, q4, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q3
veor q11, q11, q2
vand q10, q10, q9
vand q11, q11, q9
veor q3, q3, q10
vshl.u64 q10, q10, #2
veor q2, q2, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q3, #4
vshr.u64 q11, q2, #4
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q6, q6, q11
vshl.u64 q11, q11, #4
veor q3, q3, q10
veor q2, q2, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q5
veor q11, q11, q4
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q4, q4, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
sub r5,r5,#1
b .Ldec_sbox
.align 4
.Ldec_loop:
vldmia r4!, {q8,q9,q10,q11}
veor q8, q8, q0
veor q9, q9, q1
vtbl.8 d0, {q8}, d24
vtbl.8 d1, {q8}, d25
vldmia r4!, {q8}
veor q10, q10, q2
vtbl.8 d2, {q9}, d24
vtbl.8 d3, {q9}, d25
vldmia r4!, {q9}
veor q11, q11, q3
vtbl.8 d4, {q10}, d24
vtbl.8 d5, {q10}, d25
vldmia r4!, {q10}
vtbl.8 d6, {q11}, d24
vtbl.8 d7, {q11}, d25
vldmia r4!, {q11}
veor q8, q8, q4
veor q9, q9, q5
vtbl.8 d8, {q8}, d24
vtbl.8 d9, {q8}, d25
veor q10, q10, q6
vtbl.8 d10, {q9}, d24
vtbl.8 d11, {q9}, d25
veor q11, q11, q7
vtbl.8 d12, {q10}, d24
vtbl.8 d13, {q10}, d25
vtbl.8 d14, {q11}, d24
vtbl.8 d15, {q11}, d25
.Ldec_sbox:
veor q1, q1, q4
veor q3, q3, q4
veor q4, q4, q7
veor q1, q1, q6
veor q2, q2, q7
veor q6, q6, q4
veor q0, q0, q1
veor q2, q2, q5
veor q7, q7, q6
veor q3, q3, q0
veor q5, q5, q0
veor q1, q1, q3
veor q11, q3, q0
veor q10, q7, q4
veor q9, q1, q6
veor q13, q4, q0
vmov q8, q10
veor q12, q5, q2
vorr q10, q10, q9
veor q15, q11, q8
vand q14, q11, q12
vorr q11, q11, q12
veor q12, q12, q9
vand q8, q8, q9
veor q9, q6, q2
vand q15, q15, q12
vand q13, q13, q9
veor q9, q3, q7
veor q12, q1, q5
veor q11, q11, q13
veor q10, q10, q13
vand q13, q9, q12
vorr q9, q9, q12
veor q11, q11, q15
veor q8, q8, q13
veor q10, q10, q14
veor q9, q9, q15
veor q8, q8, q14
vand q12, q4, q6
veor q9, q9, q14
vand q13, q0, q2
vand q14, q7, q1
vorr q15, q3, q5
veor q11, q11, q12
veor q9, q9, q14
veor q8, q8, q15
veor q10, q10, q13
@ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
@ new smaller inversion
vand q14, q11, q9
vmov q12, q8
veor q13, q10, q14
veor q15, q8, q14
veor q14, q8, q14 @ q14=q15
vbsl q13, q9, q8
vbsl q15, q11, q10
veor q11, q11, q10
vbsl q12, q13, q14
vbsl q8, q14, q13
vand q14, q12, q15
veor q9, q9, q8
veor q14, q14, q11
veor q12, q5, q2
veor q8, q1, q6
veor q10, q15, q14
vand q10, q10, q5
veor q5, q5, q1
vand q11, q1, q15
vand q5, q5, q14
veor q1, q11, q10
veor q5, q5, q11
veor q15, q15, q13
veor q14, q14, q9
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q2
veor q12, q12, q8
veor q2, q2, q6
vand q8, q8, q15
vand q6, q6, q13
vand q12, q12, q14
vand q2, q2, q9
veor q8, q8, q12
veor q2, q2, q6
veor q12, q12, q11
veor q6, q6, q10
veor q5, q5, q12
veor q2, q2, q12
veor q1, q1, q8
veor q6, q6, q8
veor q12, q3, q0
veor q8, q7, q4
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q0
veor q12, q12, q8
veor q0, q0, q4
vand q8, q8, q15
vand q4, q4, q13
vand q12, q12, q14
vand q0, q0, q9
veor q8, q8, q12
veor q0, q0, q4
veor q12, q12, q11
veor q4, q4, q10
veor q15, q15, q13
veor q14, q14, q9
veor q10, q15, q14
vand q10, q10, q3
veor q3, q3, q7
vand q11, q7, q15
vand q3, q3, q14
veor q7, q11, q10
veor q3, q3, q11
veor q3, q3, q12
veor q0, q0, q12
veor q7, q7, q8
veor q4, q4, q8
veor q1, q1, q7
veor q6, q6, q5
veor q4, q4, q1
veor q2, q2, q7
veor q5, q5, q7
veor q4, q4, q2
veor q7, q7, q0
veor q4, q4, q5
veor q3, q3, q6
veor q6, q6, q1
veor q3, q3, q4
veor q4, q4, q0
veor q7, q7, q3
subs r5,r5,#1
bcc .Ldec_done
@ multiplication by 0x05-0x00-0x04-0x00
vext.8 q8, q0, q0, #8
vext.8 q14, q3, q3, #8
vext.8 q15, q5, q5, #8
veor q8, q8, q0
vext.8 q9, q1, q1, #8
veor q14, q14, q3
vext.8 q10, q6, q6, #8
veor q15, q15, q5
vext.8 q11, q4, q4, #8
veor q9, q9, q1
vext.8 q12, q2, q2, #8
veor q10, q10, q6
vext.8 q13, q7, q7, #8
veor q11, q11, q4
veor q12, q12, q2
veor q13, q13, q7
veor q0, q0, q14
veor q1, q1, q14
veor q6, q6, q8
veor q2, q2, q10
veor q4, q4, q9
veor q1, q1, q15
veor q6, q6, q15
veor q2, q2, q14
veor q7, q7, q11
veor q4, q4, q14
veor q3, q3, q12
veor q2, q2, q15
veor q7, q7, q15
veor q5, q5, q13
vext.8 q8, q0, q0, #12 @ x0 <<< 32
vext.8 q9, q1, q1, #12
veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
vext.8 q10, q6, q6, #12
veor q1, q1, q9
vext.8 q11, q4, q4, #12
veor q6, q6, q10
vext.8 q12, q2, q2, #12
veor q4, q4, q11
vext.8 q13, q7, q7, #12
veor q2, q2, q12
vext.8 q14, q3, q3, #12
veor q7, q7, q13
vext.8 q15, q5, q5, #12
veor q3, q3, q14
veor q9, q9, q0
veor q5, q5, q15
vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
veor q10, q10, q1
veor q8, q8, q5
veor q9, q9, q5
vext.8 q1, q1, q1, #8
veor q13, q13, q2
veor q0, q0, q8
veor q14, q14, q7
veor q1, q1, q9
vext.8 q8, q2, q2, #8
veor q12, q12, q4
vext.8 q9, q7, q7, #8
veor q15, q15, q3
vext.8 q2, q4, q4, #8
veor q11, q11, q6
vext.8 q7, q5, q5, #8
veor q12, q12, q5
vext.8 q4, q3, q3, #8
veor q11, q11, q5
vext.8 q3, q6, q6, #8
veor q5, q9, q13
veor q11, q11, q2
veor q7, q7, q15
veor q6, q4, q14
veor q4, q8, q12
veor q2, q3, q10
vmov q3, q11
@ vmov q5, q9
vldmia r6, {q12} @ .LISR
ite eq @ Thumb2 thing, sanity check in ARM
addeq r6,r6,#0x10
bne .Ldec_loop
vldmia r6, {q12} @ .LISRM0
b .Ldec_loop
.align 4
.Ldec_done:
vmov.i8 q8,#0x55 @ compose .LBS0
vmov.i8 q9,#0x33 @ compose .LBS1
vshr.u64 q10, q3, #1
vshr.u64 q11, q2, #1
veor q10, q10, q5
veor q11, q11, q7
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #1
veor q7, q7, q11
vshl.u64 q11, q11, #1
veor q3, q3, q10
veor q2, q2, q11
vshr.u64 q10, q6, #1
vshr.u64 q11, q0, #1
veor q10, q10, q4
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q4, q4, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q6, q6, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose .LBS2
vshr.u64 q10, q7, #2
vshr.u64 q11, q2, #2
veor q10, q10, q5
veor q11, q11, q3
vand q10, q10, q9
vand q11, q11, q9
veor q5, q5, q10
vshl.u64 q10, q10, #2
veor q3, q3, q11
vshl.u64 q11, q11, #2
veor q7, q7, q10
veor q2, q2, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q4
veor q11, q11, q6
vand q10, q10, q9
vand q11, q11, q9
veor q4, q4, q10
vshl.u64 q10, q10, #2
veor q6, q6, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q4, #4
vshr.u64 q11, q6, #4
veor q10, q10, q5
veor q11, q11, q3
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q3, q3, q11
vshl.u64 q11, q11, #4
veor q4, q4, q10
veor q6, q6, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q7
veor q11, q11, q2
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q2, q2, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
vldmia r4, {q8} @ last round key
veor q6, q6, q8
veor q4, q4, q8
veor q2, q2, q8
veor q7, q7, q8
veor q3, q3, q8
veor q5, q5, q8
veor q0, q0, q8
veor q1, q1, q8
bx lr
.size _bsaes_decrypt8,.-_bsaes_decrypt8
.type _bsaes_const,%object
.align 6
_bsaes_const:
.LM0ISR:@ InvShiftRows constants
.quad 0x0a0e0206070b0f03, 0x0004080c0d010509
.LISR:
.quad 0x0504070602010003, 0x0f0e0d0c080b0a09
.LISRM0:
.quad 0x01040b0e0205080f, 0x0306090c00070a0d
.LM0SR:@ ShiftRows constants
.quad 0x0a0e02060f03070b, 0x0004080c05090d01
.LSR:
.quad 0x0504070600030201, 0x0f0e0d0c0a09080b
.LSRM0:
.quad 0x0304090e00050a0f, 0x01060b0c0207080d
.LM0:
.quad 0x02060a0e03070b0f, 0x0004080c0105090d
.LREVM0SR:
.quad 0x090d01050c000408, 0x03070b0f060a0e02
.byte 66,105,116,45,115,108,105,99,101,100,32,65,69,83,32,102,111,114,32,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 6
.size _bsaes_const,.-_bsaes_const
.type _bsaes_encrypt8,%function
.align 4
_bsaes_encrypt8:
adr r6,.
vldmia r4!, {q9} @ round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,.LM0SR
#else
sub r6,r6,#_bsaes_encrypt8-.LM0SR
#endif
vldmia r6!, {q8} @ .LM0SR
_bsaes_encrypt8_alt:
veor q10, q0, q9 @ xor with round0 key
veor q11, q1, q9
vtbl.8 d0, {q10}, d16
vtbl.8 d1, {q10}, d17
veor q12, q2, q9
vtbl.8 d2, {q11}, d16
vtbl.8 d3, {q11}, d17
veor q13, q3, q9
vtbl.8 d4, {q12}, d16
vtbl.8 d5, {q12}, d17
veor q14, q4, q9
vtbl.8 d6, {q13}, d16
vtbl.8 d7, {q13}, d17
veor q15, q5, q9
vtbl.8 d8, {q14}, d16
vtbl.8 d9, {q14}, d17
veor q10, q6, q9
vtbl.8 d10, {q15}, d16
vtbl.8 d11, {q15}, d17
veor q11, q7, q9
vtbl.8 d12, {q10}, d16
vtbl.8 d13, {q10}, d17
vtbl.8 d14, {q11}, d16
vtbl.8 d15, {q11}, d17
_bsaes_encrypt8_bitslice:
vmov.i8 q8,#0x55 @ compose .LBS0
vmov.i8 q9,#0x33 @ compose .LBS1
vshr.u64 q10, q6, #1
vshr.u64 q11, q4, #1
veor q10, q10, q7
veor q11, q11, q5
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #1
veor q5, q5, q11
vshl.u64 q11, q11, #1
veor q6, q6, q10
veor q4, q4, q11
vshr.u64 q10, q2, #1
vshr.u64 q11, q0, #1
veor q10, q10, q3
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q3, q3, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q2, q2, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose .LBS2
vshr.u64 q10, q5, #2
vshr.u64 q11, q4, #2
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q9
vand q11, q11, q9
veor q7, q7, q10
vshl.u64 q10, q10, #2
veor q6, q6, q11
vshl.u64 q11, q11, #2
veor q5, q5, q10
veor q4, q4, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q3
veor q11, q11, q2
vand q10, q10, q9
vand q11, q11, q9
veor q3, q3, q10
vshl.u64 q10, q10, #2
veor q2, q2, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q3, #4
vshr.u64 q11, q2, #4
veor q10, q10, q7
veor q11, q11, q6
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q6, q6, q11
vshl.u64 q11, q11, #4
veor q3, q3, q10
veor q2, q2, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q5
veor q11, q11, q4
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q4, q4, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
sub r5,r5,#1
b .Lenc_sbox
.align 4
.Lenc_loop:
vldmia r4!, {q8,q9,q10,q11}
veor q8, q8, q0
veor q9, q9, q1
vtbl.8 d0, {q8}, d24
vtbl.8 d1, {q8}, d25
vldmia r4!, {q8}
veor q10, q10, q2
vtbl.8 d2, {q9}, d24
vtbl.8 d3, {q9}, d25
vldmia r4!, {q9}
veor q11, q11, q3
vtbl.8 d4, {q10}, d24
vtbl.8 d5, {q10}, d25
vldmia r4!, {q10}
vtbl.8 d6, {q11}, d24
vtbl.8 d7, {q11}, d25
vldmia r4!, {q11}
veor q8, q8, q4
veor q9, q9, q5
vtbl.8 d8, {q8}, d24
vtbl.8 d9, {q8}, d25
veor q10, q10, q6
vtbl.8 d10, {q9}, d24
vtbl.8 d11, {q9}, d25
veor q11, q11, q7
vtbl.8 d12, {q10}, d24
vtbl.8 d13, {q10}, d25
vtbl.8 d14, {q11}, d24
vtbl.8 d15, {q11}, d25
.Lenc_sbox:
veor q2, q2, q1
veor q5, q5, q6
veor q3, q3, q0
veor q6, q6, q2
veor q5, q5, q0
veor q6, q6, q3
veor q3, q3, q7
veor q7, q7, q5
veor q3, q3, q4
veor q4, q4, q5
veor q2, q2, q7
veor q3, q3, q1
veor q1, q1, q5
veor q11, q7, q4
veor q10, q1, q2
veor q9, q5, q3
veor q13, q2, q4
vmov q8, q10
veor q12, q6, q0
vorr q10, q10, q9
veor q15, q11, q8
vand q14, q11, q12
vorr q11, q11, q12
veor q12, q12, q9
vand q8, q8, q9
veor q9, q3, q0
vand q15, q15, q12
vand q13, q13, q9
veor q9, q7, q1
veor q12, q5, q6
veor q11, q11, q13
veor q10, q10, q13
vand q13, q9, q12
vorr q9, q9, q12
veor q11, q11, q15
veor q8, q8, q13
veor q10, q10, q14
veor q9, q9, q15
veor q8, q8, q14
vand q12, q2, q3
veor q9, q9, q14
vand q13, q4, q0
vand q14, q1, q5
vorr q15, q7, q6
veor q11, q11, q12
veor q9, q9, q14
veor q8, q8, q15
veor q10, q10, q13
@ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
@ new smaller inversion
vand q14, q11, q9
vmov q12, q8
veor q13, q10, q14
veor q15, q8, q14
veor q14, q8, q14 @ q14=q15
vbsl q13, q9, q8
vbsl q15, q11, q10
veor q11, q11, q10
vbsl q12, q13, q14
vbsl q8, q14, q13
vand q14, q12, q15
veor q9, q9, q8
veor q14, q14, q11
veor q12, q6, q0
veor q8, q5, q3
veor q10, q15, q14
vand q10, q10, q6
veor q6, q6, q5
vand q11, q5, q15
vand q6, q6, q14
veor q5, q11, q10
veor q6, q6, q11
veor q15, q15, q13
veor q14, q14, q9
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q0
veor q12, q12, q8
veor q0, q0, q3
vand q8, q8, q15
vand q3, q3, q13
vand q12, q12, q14
vand q0, q0, q9
veor q8, q8, q12
veor q0, q0, q3
veor q12, q12, q11
veor q3, q3, q10
veor q6, q6, q12
veor q0, q0, q12
veor q5, q5, q8
veor q3, q3, q8
veor q12, q7, q4
veor q8, q1, q2
veor q11, q15, q14
veor q10, q13, q9
vand q11, q11, q12
vand q10, q10, q4
veor q12, q12, q8
veor q4, q4, q2
vand q8, q8, q15
vand q2, q2, q13
vand q12, q12, q14
vand q4, q4, q9
veor q8, q8, q12
veor q4, q4, q2
veor q12, q12, q11
veor q2, q2, q10
veor q15, q15, q13
veor q14, q14, q9
veor q10, q15, q14
vand q10, q10, q7
veor q7, q7, q1
vand q11, q1, q15
vand q7, q7, q14
veor q1, q11, q10
veor q7, q7, q11
veor q7, q7, q12
veor q4, q4, q12
veor q1, q1, q8
veor q2, q2, q8
veor q7, q7, q0
veor q1, q1, q6
veor q6, q6, q0
veor q4, q4, q7
veor q0, q0, q1
veor q1, q1, q5
veor q5, q5, q2
veor q2, q2, q3
veor q3, q3, q5
veor q4, q4, q5
veor q6, q6, q3
subs r5,r5,#1
bcc .Lenc_done
vext.8 q8, q0, q0, #12 @ x0 <<< 32
vext.8 q9, q1, q1, #12
veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
vext.8 q10, q4, q4, #12
veor q1, q1, q9
vext.8 q11, q6, q6, #12
veor q4, q4, q10
vext.8 q12, q3, q3, #12
veor q6, q6, q11
vext.8 q13, q7, q7, #12
veor q3, q3, q12
vext.8 q14, q2, q2, #12
veor q7, q7, q13
vext.8 q15, q5, q5, #12
veor q2, q2, q14
veor q9, q9, q0
veor q5, q5, q15
vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
veor q10, q10, q1
veor q8, q8, q5
veor q9, q9, q5
vext.8 q1, q1, q1, #8
veor q13, q13, q3
veor q0, q0, q8
veor q14, q14, q7
veor q1, q1, q9
vext.8 q8, q3, q3, #8
veor q12, q12, q6
vext.8 q9, q7, q7, #8
veor q15, q15, q2
vext.8 q3, q6, q6, #8
veor q11, q11, q4
vext.8 q7, q5, q5, #8
veor q12, q12, q5
vext.8 q6, q2, q2, #8
veor q11, q11, q5
vext.8 q2, q4, q4, #8
veor q5, q9, q13
veor q4, q8, q12
veor q3, q3, q11
veor q7, q7, q15
veor q6, q6, q14
@ vmov q4, q8
veor q2, q2, q10
@ vmov q5, q9
vldmia r6, {q12} @ .LSR
ite eq @ Thumb2 thing, samity check in ARM
addeq r6,r6,#0x10
bne .Lenc_loop
vldmia r6, {q12} @ .LSRM0
b .Lenc_loop
.align 4
.Lenc_done:
vmov.i8 q8,#0x55 @ compose .LBS0
vmov.i8 q9,#0x33 @ compose .LBS1
vshr.u64 q10, q2, #1
vshr.u64 q11, q3, #1
veor q10, q10, q5
veor q11, q11, q7
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #1
veor q7, q7, q11
vshl.u64 q11, q11, #1
veor q2, q2, q10
veor q3, q3, q11
vshr.u64 q10, q4, #1
vshr.u64 q11, q0, #1
veor q10, q10, q6
veor q11, q11, q1
vand q10, q10, q8
vand q11, q11, q8
veor q6, q6, q10
vshl.u64 q10, q10, #1
veor q1, q1, q11
vshl.u64 q11, q11, #1
veor q4, q4, q10
veor q0, q0, q11
vmov.i8 q8,#0x0f @ compose .LBS2
vshr.u64 q10, q7, #2
vshr.u64 q11, q3, #2
veor q10, q10, q5
veor q11, q11, q2
vand q10, q10, q9
vand q11, q11, q9
veor q5, q5, q10
vshl.u64 q10, q10, #2
veor q2, q2, q11
vshl.u64 q11, q11, #2
veor q7, q7, q10
veor q3, q3, q11
vshr.u64 q10, q1, #2
vshr.u64 q11, q0, #2
veor q10, q10, q6
veor q11, q11, q4
vand q10, q10, q9
vand q11, q11, q9
veor q6, q6, q10
vshl.u64 q10, q10, #2
veor q4, q4, q11
vshl.u64 q11, q11, #2
veor q1, q1, q10
veor q0, q0, q11
vshr.u64 q10, q6, #4
vshr.u64 q11, q4, #4
veor q10, q10, q5
veor q11, q11, q2
vand q10, q10, q8
vand q11, q11, q8
veor q5, q5, q10
vshl.u64 q10, q10, #4
veor q2, q2, q11
vshl.u64 q11, q11, #4
veor q6, q6, q10
veor q4, q4, q11
vshr.u64 q10, q1, #4
vshr.u64 q11, q0, #4
veor q10, q10, q7
veor q11, q11, q3
vand q10, q10, q8
vand q11, q11, q8
veor q7, q7, q10
vshl.u64 q10, q10, #4
veor q3, q3, q11
vshl.u64 q11, q11, #4
veor q1, q1, q10
veor q0, q0, q11
vldmia r4, {q8} @ last round key
veor q4, q4, q8
veor q6, q6, q8
veor q3, q3, q8
veor q7, q7, q8
veor q2, q2, q8
veor q5, q5, q8
veor q0, q0, q8
veor q1, q1, q8
bx lr
.size _bsaes_encrypt8,.-_bsaes_encrypt8
.type _bsaes_key_convert,%function
.align 4
_bsaes_key_convert:
adr r6,.
vld1.8 {q7}, [r4]! @ load round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,.LM0
#else
sub r6,r6,#_bsaes_key_convert-.LM0
#endif
vld1.8 {q15}, [r4]! @ load round 1 key
vmov.i8 q8, #0x01 @ bit masks
vmov.i8 q9, #0x02
vmov.i8 q10, #0x04
vmov.i8 q11, #0x08
vmov.i8 q12, #0x10
vmov.i8 q13, #0x20
vldmia r6, {q14} @ .LM0
#ifdef __ARMEL__
vrev32.8 q7, q7
vrev32.8 q15, q15
#endif
sub r5,r5,#1
vstmia r12!, {q7} @ save round 0 key
b .Lkey_loop
.align 4
.Lkey_loop:
vtbl.8 d14,{q15},d28
vtbl.8 d15,{q15},d29
vmov.i8 q6, #0x40
vmov.i8 q15, #0x80
vtst.8 q0, q7, q8
vtst.8 q1, q7, q9
vtst.8 q2, q7, q10
vtst.8 q3, q7, q11
vtst.8 q4, q7, q12
vtst.8 q5, q7, q13
vtst.8 q6, q7, q6
vtst.8 q7, q7, q15
vld1.8 {q15}, [r4]! @ load next round key
vmvn q0, q0 @ "pnot"
vmvn q1, q1
vmvn q5, q5
vmvn q6, q6
#ifdef __ARMEL__
vrev32.8 q15, q15
#endif
subs r5,r5,#1
vstmia r12!,{q0,q1,q2,q3,q4,q5,q6,q7} @ write bit-sliced round key
bne .Lkey_loop
vmov.i8 q7,#0x63 @ compose .L63
@ don't save last round key
bx lr
.size _bsaes_key_convert,.-_bsaes_key_convert
.globl bsaes_cbc_encrypt
.hidden bsaes_cbc_encrypt
.type bsaes_cbc_encrypt,%function
.align 5
bsaes_cbc_encrypt:
@ In OpenSSL, this function had a fallback to aes_nohw_cbc_encrypt for
@ short inputs. We patch this out, using bsaes for all input sizes.
@ it is up to the caller to make sure we are called with enc == 0
mov ip, sp
stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr}
VFP_ABI_PUSH
ldr r8, [ip] @ IV is 1st arg on the stack
mov r2, r2, lsr#4 @ len in 16 byte blocks
sub sp, #0x10 @ scratch space to carry over the IV
mov r9, sp @ save sp
ldr r10, [r3, #240] @ get # of rounds
#ifndef BSAES_ASM_EXTENDED_KEY
@ allocate the key schedule on the stack
sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
add r12, #96 @ sifze of bit-slices key schedule
@ populate the key schedule
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
mov sp, r12 @ sp is sp
bl _bsaes_key_convert
vldmia sp, {q6}
vstmia r12, {q15} @ save last round key
veor q7, q7, q6 @ fix up round 0 key
vstmia sp, {q7}
#else
ldr r12, [r3, #244]
eors r12, #1
beq 0f
@ populate the key schedule
str r12, [r3, #244]
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
add r12, r3, #248 @ pass key schedule
bl _bsaes_key_convert
add r4, r3, #248
vldmia r4, {q6}
vstmia r12, {q15} @ save last round key
veor q7, q7, q6 @ fix up round 0 key
vstmia r4, {q7}
.align 2
#endif
vld1.8 {q15}, [r8] @ load IV
b .Lcbc_dec_loop
.align 4
.Lcbc_dec_loop:
subs r2, r2, #0x8
bmi .Lcbc_dec_loop_finish
vld1.8 {q0,q1}, [r0]! @ load input
vld1.8 {q2,q3}, [r0]!
#ifndef BSAES_ASM_EXTENDED_KEY
mov r4, sp @ pass the key
#else
add r4, r3, #248
#endif
vld1.8 {q4,q5}, [r0]!
mov r5, r10
vld1.8 {q6,q7}, [r0]
sub r0, r0, #0x60
vstmia r9, {q15} @ put aside IV
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10,q11}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q12,q13}, [r0]!
veor q4, q4, q10
veor q2, q2, q11
vld1.8 {q14,q15}, [r0]!
veor q7, q7, q12
vst1.8 {q0,q1}, [r1]! @ write output
veor q3, q3, q13
vst1.8 {q6}, [r1]!
veor q5, q5, q14
vst1.8 {q4}, [r1]!
vst1.8 {q2}, [r1]!
vst1.8 {q7}, [r1]!
vst1.8 {q3}, [r1]!
vst1.8 {q5}, [r1]!
b .Lcbc_dec_loop
.Lcbc_dec_loop_finish:
adds r2, r2, #8
beq .Lcbc_dec_done
@ Set up most parameters for the _bsaes_decrypt8 call.
#ifndef BSAES_ASM_EXTENDED_KEY
mov r4, sp @ pass the key
#else
add r4, r3, #248
#endif
mov r5, r10
vstmia r9, {q15} @ put aside IV
vld1.8 {q0}, [r0]! @ load input
cmp r2, #2
blo .Lcbc_dec_one
vld1.8 {q1}, [r0]!
beq .Lcbc_dec_two
vld1.8 {q2}, [r0]!
cmp r2, #4
blo .Lcbc_dec_three
vld1.8 {q3}, [r0]!
beq .Lcbc_dec_four
vld1.8 {q4}, [r0]!
cmp r2, #6
blo .Lcbc_dec_five
vld1.8 {q5}, [r0]!
beq .Lcbc_dec_six
vld1.8 {q6}, [r0]!
sub r0, r0, #0x70
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10,q11}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q12,q13}, [r0]!
veor q4, q4, q10
veor q2, q2, q11
vld1.8 {q15}, [r0]!
veor q7, q7, q12
vst1.8 {q0,q1}, [r1]! @ write output
veor q3, q3, q13
vst1.8 {q6}, [r1]!
vst1.8 {q4}, [r1]!
vst1.8 {q2}, [r1]!
vst1.8 {q7}, [r1]!
vst1.8 {q3}, [r1]!
b .Lcbc_dec_done
.align 4
.Lcbc_dec_six:
sub r0, r0, #0x60
bl _bsaes_decrypt8
vldmia r9,{q14} @ reload IV
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10,q11}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q12}, [r0]!
veor q4, q4, q10
veor q2, q2, q11
vld1.8 {q15}, [r0]!
veor q7, q7, q12
vst1.8 {q0,q1}, [r1]! @ write output
vst1.8 {q6}, [r1]!
vst1.8 {q4}, [r1]!
vst1.8 {q2}, [r1]!
vst1.8 {q7}, [r1]!
b .Lcbc_dec_done
.align 4
.Lcbc_dec_five:
sub r0, r0, #0x50
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10,q11}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q15}, [r0]!
veor q4, q4, q10
vst1.8 {q0,q1}, [r1]! @ write output
veor q2, q2, q11
vst1.8 {q6}, [r1]!
vst1.8 {q4}, [r1]!
vst1.8 {q2}, [r1]!
b .Lcbc_dec_done
.align 4
.Lcbc_dec_four:
sub r0, r0, #0x40
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q15}, [r0]!
veor q4, q4, q10
vst1.8 {q0,q1}, [r1]! @ write output
vst1.8 {q6}, [r1]!
vst1.8 {q4}, [r1]!
b .Lcbc_dec_done
.align 4
.Lcbc_dec_three:
sub r0, r0, #0x30
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q15}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vst1.8 {q0,q1}, [r1]! @ write output
vst1.8 {q6}, [r1]!
b .Lcbc_dec_done
.align 4
.Lcbc_dec_two:
sub r0, r0, #0x20
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q15}, [r0]! @ reload input
veor q1, q1, q8
vst1.8 {q0,q1}, [r1]! @ write output
b .Lcbc_dec_done
.align 4
.Lcbc_dec_one:
sub r0, r0, #0x10
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q15}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vst1.8 {q0}, [r1]! @ write output
.Lcbc_dec_done:
#ifndef BSAES_ASM_EXTENDED_KEY
vmov.i32 q0, #0
vmov.i32 q1, #0
.Lcbc_dec_bzero:@ wipe key schedule [if any]
vstmia sp!, {q0,q1}
cmp sp, r9
bne .Lcbc_dec_bzero
#endif
mov sp, r9
add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
vst1.8 {q15}, [r8] @ return IV
VFP_ABI_POP
ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc}
.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
.globl bsaes_ctr32_encrypt_blocks
.hidden bsaes_ctr32_encrypt_blocks
.type bsaes_ctr32_encrypt_blocks,%function
.align 5
bsaes_ctr32_encrypt_blocks:
@ In OpenSSL, short inputs fall back to aes_nohw_* here. We patch this
@ out to retain a constant-time implementation.
mov ip, sp
stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr}
VFP_ABI_PUSH
ldr r8, [ip] @ ctr is 1st arg on the stack
sub sp, sp, #0x10 @ scratch space to carry over the ctr
mov r9, sp @ save sp
ldr r10, [r3, #240] @ get # of rounds
#ifndef BSAES_ASM_EXTENDED_KEY
@ allocate the key schedule on the stack
sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
add r12, #96 @ size of bit-sliced key schedule
@ populate the key schedule
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
mov sp, r12 @ sp is sp
bl _bsaes_key_convert
veor q7,q7,q15 @ fix up last round key
vstmia r12, {q7} @ save last round key
vld1.8 {q0}, [r8] @ load counter
#ifdef __APPLE__
mov r8, #:lower16:(.LREVM0SR-.LM0)
add r8, r6, r8
#else
add r8, r6, #.LREVM0SR-.LM0 @ borrow r8
#endif
vldmia sp, {q4} @ load round0 key
#else
ldr r12, [r3, #244]
eors r12, #1
beq 0f
@ populate the key schedule
str r12, [r3, #244]
mov r4, r3 @ pass key
mov r5, r10 @ pass # of rounds
add r12, r3, #248 @ pass key schedule
bl _bsaes_key_convert
veor q7,q7,q15 @ fix up last round key
vstmia r12, {q7} @ save last round key
.align 2
add r12, r3, #248
vld1.8 {q0}, [r8] @ load counter
adrl r8, .LREVM0SR @ borrow r8
vldmia r12, {q4} @ load round0 key
sub sp, #0x10 @ place for adjusted round0 key
#endif
vmov.i32 q8,#1 @ compose 1<<96
veor q9,q9,q9
vrev32.8 q0,q0
vext.8 q8,q9,q8,#4
vrev32.8 q4,q4
vadd.u32 q9,q8,q8 @ compose 2<<96
vstmia sp, {q4} @ save adjusted round0 key
b .Lctr_enc_loop
.align 4
.Lctr_enc_loop:
vadd.u32 q10, q8, q9 @ compose 3<<96
vadd.u32 q1, q0, q8 @ +1
vadd.u32 q2, q0, q9 @ +2
vadd.u32 q3, q0, q10 @ +3
vadd.u32 q4, q1, q10
vadd.u32 q5, q2, q10
vadd.u32 q6, q3, q10
vadd.u32 q7, q4, q10
vadd.u32 q10, q5, q10 @ next counter
@ Borrow prologue from _bsaes_encrypt8 to use the opportunity
@ to flip byte order in 32-bit counter
vldmia sp, {q9} @ load round0 key
#ifndef BSAES_ASM_EXTENDED_KEY
add r4, sp, #0x10 @ pass next round key
#else
add r4, r3, #264
#endif
vldmia r8, {q8} @ .LREVM0SR
mov r5, r10 @ pass rounds
vstmia r9, {q10} @ save next counter
#ifdef __APPLE__
mov r6, #:lower16:(.LREVM0SR-.LSR)
sub r6, r8, r6
#else
sub r6, r8, #.LREVM0SR-.LSR @ pass constants
#endif
bl _bsaes_encrypt8_alt
subs r2, r2, #8
blo .Lctr_enc_loop_done
vld1.8 {q8,q9}, [r0]! @ load input
vld1.8 {q10,q11}, [r0]!
veor q0, q8
veor q1, q9
vld1.8 {q12,q13}, [r0]!
veor q4, q10
veor q6, q11
vld1.8 {q14,q15}, [r0]!
veor q3, q12
vst1.8 {q0,q1}, [r1]! @ write output
veor q7, q13
veor q2, q14
vst1.8 {q4}, [r1]!
veor q5, q15
vst1.8 {q6}, [r1]!
vmov.i32 q8, #1 @ compose 1<<96
vst1.8 {q3}, [r1]!
veor q9, q9, q9
vst1.8 {q7}, [r1]!
vext.8 q8, q9, q8, #4
vst1.8 {q2}, [r1]!
vadd.u32 q9,q8,q8 @ compose 2<<96
vst1.8 {q5}, [r1]!
vldmia r9, {q0} @ load counter
bne .Lctr_enc_loop
b .Lctr_enc_done
.align 4
.Lctr_enc_loop_done:
add r2, r2, #8
vld1.8 {q8}, [r0]! @ load input
veor q0, q8
vst1.8 {q0}, [r1]! @ write output
cmp r2, #2
blo .Lctr_enc_done
vld1.8 {q9}, [r0]!
veor q1, q9
vst1.8 {q1}, [r1]!
beq .Lctr_enc_done
vld1.8 {q10}, [r0]!
veor q4, q10
vst1.8 {q4}, [r1]!
cmp r2, #4
blo .Lctr_enc_done
vld1.8 {q11}, [r0]!
veor q6, q11
vst1.8 {q6}, [r1]!
beq .Lctr_enc_done
vld1.8 {q12}, [r0]!
veor q3, q12
vst1.8 {q3}, [r1]!
cmp r2, #6
blo .Lctr_enc_done
vld1.8 {q13}, [r0]!
veor q7, q13
vst1.8 {q7}, [r1]!
beq .Lctr_enc_done
vld1.8 {q14}, [r0]
veor q2, q14
vst1.8 {q2}, [r1]!
.Lctr_enc_done:
vmov.i32 q0, #0
vmov.i32 q1, #0
#ifndef BSAES_ASM_EXTENDED_KEY
.Lctr_enc_bzero:@ wipe key schedule [if any]
vstmia sp!, {q0,q1}
cmp sp, r9
bne .Lctr_enc_bzero
#else
vstmia sp, {q0,q1}
#endif
mov sp, r9
add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
VFP_ABI_POP
ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} @ return
@ OpenSSL contains aes_nohw_* fallback code here. We patch this
@ out to retain a constant-time implementation.
.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
#endif
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 40,888
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-arm/crypto/fipsmodule/vpaes-armv7.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
.syntax unified
.arch armv7-a
.fpu neon
#if defined(__thumb2__)
.thumb
#else
.code 32
#endif
.text
.type _vpaes_consts,%object
.align 7 @ totally strategic alignment
_vpaes_consts:
.Lk_mc_forward:@ mc_forward
.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
.quad 0x080B0A0904070605, 0x000302010C0F0E0D
.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
.quad 0x000302010C0F0E0D, 0x080B0A0904070605
.Lk_mc_backward:@ mc_backward
.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
.quad 0x020100030E0D0C0F, 0x0A09080B06050407
.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
.quad 0x0A09080B06050407, 0x020100030E0D0C0F
.Lk_sr:@ sr
.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
.quad 0x030E09040F0A0500, 0x0B06010C07020D08
.quad 0x0F060D040B020900, 0x070E050C030A0108
.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
@
@ "Hot" constants
@
.Lk_inv:@ inv, inva
.quad 0x0E05060F0D080180, 0x040703090A0B0C02
.quad 0x01040A060F0B0780, 0x030D0E0C02050809
.Lk_ipt:@ input transform (lo, hi)
.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
.Lk_sbo:@ sbou, sbot
.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
.Lk_sb1:@ sb1u, sb1t
.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
.Lk_sb2:@ sb2u, sb2t
.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,55,32,78,69,79,78,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
.align 2
.size _vpaes_consts,.-_vpaes_consts
.align 6
@@
@@ _aes_preheat
@@
@@ Fills q9-q15 as specified below.
@@
.type _vpaes_preheat,%function
.align 4
_vpaes_preheat:
adr r10, .Lk_inv
vmov.i8 q9, #0x0f @ .Lk_s0F
vld1.64 {q10,q11}, [r10]! @ .Lk_inv
add r10, r10, #64 @ Skip .Lk_ipt, .Lk_sbo
vld1.64 {q12,q13}, [r10]! @ .Lk_sb1
vld1.64 {q14,q15}, [r10] @ .Lk_sb2
bx lr
@@
@@ _aes_encrypt_core
@@
@@ AES-encrypt q0.
@@
@@ Inputs:
@@ q0 = input
@@ q9-q15 as in _vpaes_preheat
@@ [r2] = scheduled keys
@@
@@ Output in q0
@@ Clobbers q1-q5, r8-r11
@@ Preserves q6-q8 so you get some local vectors
@@
@@
.type _vpaes_encrypt_core,%function
.align 4
_vpaes_encrypt_core:
mov r9, r2
ldr r8, [r2,#240] @ pull rounds
adr r11, .Lk_ipt
@ vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
@ vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
vld1.64 {q2, q3}, [r11]
adr r11, .Lk_mc_forward+16
vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 # round0 key
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0
vtbl.8 d2, {q2}, d2 @ vpshufb %xmm1, %xmm2, %xmm1
vtbl.8 d3, {q2}, d3
vtbl.8 d4, {q3}, d0 @ vpshufb %xmm0, %xmm3, %xmm2
vtbl.8 d5, {q3}, d1
veor q0, q1, q5 @ vpxor %xmm5, %xmm1, %xmm0
veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0
@ .Lenc_entry ends with a bnz instruction which is normally paired with
@ subs in .Lenc_loop.
tst r8, r8
b .Lenc_entry
.align 4
.Lenc_loop:
@ middle of middle round
add r10, r11, #0x40
vtbl.8 d8, {q13}, d4 @ vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
vtbl.8 d9, {q13}, d5
vld1.64 {q1}, [r11]! @ vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
vtbl.8 d0, {q12}, d6 @ vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
vtbl.8 d1, {q12}, d7
veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
vtbl.8 d10, {q15}, d4 @ vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
vtbl.8 d11, {q15}, d5
veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A
vtbl.8 d4, {q14}, d6 @ vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
vtbl.8 d5, {q14}, d7
vld1.64 {q4}, [r10] @ vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
vtbl.8 d6, {q0}, d2 @ vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
vtbl.8 d7, {q0}, d3
veor q2, q2, q5 @ vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
@ Write to q5 instead of q0, so the table and destination registers do
@ not overlap.
vtbl.8 d10, {q0}, d8 @ vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
vtbl.8 d11, {q0}, d9
veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
vtbl.8 d8, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
vtbl.8 d9, {q3}, d3
@ Here we restore the original q0/q5 usage.
veor q0, q5, q3 @ vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
and r11, r11, #~(1<<6) @ and $0x30, %r11 # ... mod 4
veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
subs r8, r8, #1 @ nr--
.Lenc_entry:
@ top of round
vand q1, q0, q9 @ vpand %xmm0, %xmm9, %xmm1 # 0 = k
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i
vtbl.8 d10, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
vtbl.8 d11, {q11}, d3
veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j
vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
vtbl.8 d7, {q10}, d1
vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
vtbl.8 d9, {q10}, d3
veor q3, q3, q5 @ vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
vtbl.8 d5, {q10}, d7
vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
vtbl.8 d7, {q10}, d9
veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io
veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5
bne .Lenc_loop
@ middle of last round
add r10, r11, #0x80
adr r11, .Lk_sbo
@ Read to q1 instead of q4, so the vtbl.8 instruction below does not
@ overlap table and destination registers.
vld1.64 {q1}, [r11]! @ vmovdqa -0x60(%r10), %xmm4 # 3 : sbou
vld1.64 {q0}, [r11] @ vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
vtbl.8 d9, {q1}, d5
vld1.64 {q1}, [r10] @ vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
@ Write to q2 instead of q0 below, to avoid overlapping table and
@ destination registers.
vtbl.8 d4, {q0}, d6 @ vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
vtbl.8 d5, {q0}, d7
veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
veor q2, q2, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A
@ Here we restore the original q0/q2 usage.
vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0
vtbl.8 d1, {q2}, d3
bx lr
.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
.globl vpaes_encrypt
.hidden vpaes_encrypt
.type vpaes_encrypt,%function
.align 4
vpaes_encrypt:
@ _vpaes_encrypt_core uses r8-r11. Round up to r7-r11 to maintain stack
@ alignment.
stmdb sp!, {r7,r8,r9,r10,r11,lr}
@ _vpaes_encrypt_core uses q4-q5 (d8-d11), which are callee-saved.
vstmdb sp!, {d8,d9,d10,d11}
vld1.64 {q0}, [r0]
bl _vpaes_preheat
bl _vpaes_encrypt_core
vst1.64 {q0}, [r1]
vldmia sp!, {d8,d9,d10,d11}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
.size vpaes_encrypt,.-vpaes_encrypt
@
@ Decryption stuff
@
.type _vpaes_decrypt_consts,%object
.align 4
_vpaes_decrypt_consts:
.Lk_dipt:@ decryption input transform
.quad 0x0F505B040B545F00, 0x154A411E114E451A
.quad 0x86E383E660056500, 0x12771772F491F194
.Lk_dsbo:@ decryption sbox final output
.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
.Lk_dsb9:@ decryption sbox output *9*u, *9*t
.quad 0x851C03539A86D600, 0xCAD51F504F994CC9
.quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
.Lk_dsbd:@ decryption sbox output *D*u, *D*t
.quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
.quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
.Lk_dsbb:@ decryption sbox output *B*u, *B*t
.quad 0xD022649296B44200, 0x602646F6B0F2D404
.quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
.Lk_dsbe:@ decryption sbox output *E*u, *E*t
.quad 0x46F2929626D4D000, 0x2242600464B4F6B0
.quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
.size _vpaes_decrypt_consts,.-_vpaes_decrypt_consts
@@
@@ Decryption core
@@
@@ Same API as encryption core, except it clobbers q12-q15 rather than using
@@ the values from _vpaes_preheat. q9-q11 must still be set from
@@ _vpaes_preheat.
@@
.type _vpaes_decrypt_core,%function
.align 4
_vpaes_decrypt_core:
mov r9, r2
ldr r8, [r2,#240] @ pull rounds
@ This function performs shuffles with various constants. The x86_64
@ version loads them on-demand into %xmm0-%xmm5. This does not work well
@ for ARMv7 because those registers are shuffle destinations. The ARMv8
@ version preloads those constants into registers, but ARMv7 has half
@ the registers to work with. Instead, we load them on-demand into
@ q12-q15, registers normally use for preloaded constants. This is fine
@ because decryption doesn't use those constants. The values are
@ constant, so this does not interfere with potential 2x optimizations.
adr r7, .Lk_dipt
vld1.64 {q12,q13}, [r7] @ vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
lsl r11, r8, #4 @ mov %rax, %r11; shl $4, %r11
eor r11, r11, #0x30 @ xor $0x30, %r11
adr r10, .Lk_sr
and r11, r11, #0x30 @ and $0x30, %r11
add r11, r11, r10
adr r10, .Lk_mc_forward+48
vld1.64 {q4}, [r9]! @ vmovdqu (%r9), %xmm4 # round0 key
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0
vtbl.8 d4, {q12}, d2 @ vpshufb %xmm1, %xmm2, %xmm2
vtbl.8 d5, {q12}, d3
vld1.64 {q5}, [r10] @ vmovdqa .Lk_mc_forward+48(%rip), %xmm5
@ vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
vtbl.8 d0, {q13}, d0 @ vpshufb %xmm0, %xmm1, %xmm0
vtbl.8 d1, {q13}, d1
veor q2, q2, q4 @ vpxor %xmm4, %xmm2, %xmm2
veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0
@ .Ldec_entry ends with a bnz instruction which is normally paired with
@ subs in .Ldec_loop.
tst r8, r8
b .Ldec_entry
.align 4
.Ldec_loop:
@
@ Inverse mix columns
@
@ We load .Lk_dsb* into q12-q15 on-demand. See the comment at the top of
@ the function.
adr r10, .Lk_dsb9
vld1.64 {q12,q13}, [r10]! @ vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
@ vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
@ Load sbd* ahead of time.
vld1.64 {q14,q15}, [r10]! @ vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
@ vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
vtbl.8 d8, {q12}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
vtbl.8 d9, {q12}, d5
vtbl.8 d2, {q13}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
vtbl.8 d3, {q13}, d7
veor q0, q4, q0 @ vpxor %xmm4, %xmm0, %xmm0
veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
@ Load sbb* ahead of time.
vld1.64 {q12,q13}, [r10]! @ vmovdqa 0x20(%r10),%xmm4 # 4 : sbbu
@ vmovdqa 0x30(%r10),%xmm1 # 0 : sbbt
vtbl.8 d8, {q14}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
vtbl.8 d9, {q14}, d5
@ Write to q1 instead of q0, so the table and destination registers do
@ not overlap.
vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch
vtbl.8 d3, {q0}, d11
@ Here we restore the original q0/q1 usage. This instruction is
@ reordered from the ARMv8 version so we do not clobber the vtbl.8
@ below.
veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
vtbl.8 d2, {q15}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
vtbl.8 d3, {q15}, d7
@ vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
@ vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
@ Load sbd* ahead of time.
vld1.64 {q14,q15}, [r10]! @ vmovdqa 0x40(%r10),%xmm4 # 4 : sbeu
@ vmovdqa 0x50(%r10),%xmm1 # 0 : sbet
vtbl.8 d8, {q12}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
vtbl.8 d9, {q12}, d5
@ Write to q1 instead of q0, so the table and destination registers do
@ not overlap.
vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch
vtbl.8 d3, {q0}, d11
@ Here we restore the original q0/q1 usage. This instruction is
@ reordered from the ARMv8 version so we do not clobber the vtbl.8
@ below.
veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
vtbl.8 d2, {q13}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
vtbl.8 d3, {q13}, d7
veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
vtbl.8 d8, {q14}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
vtbl.8 d9, {q14}, d5
@ Write to q1 instead of q0, so the table and destination registers do
@ not overlap.
vtbl.8 d2, {q0}, d10 @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch
vtbl.8 d3, {q0}, d11
@ Here we restore the original q0/q1 usage. This instruction is
@ reordered from the ARMv8 version so we do not clobber the vtbl.8
@ below.
veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
vtbl.8 d2, {q15}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
vtbl.8 d3, {q15}, d7
vext.8 q5, q5, q5, #12 @ vpalignr $12, %xmm5, %xmm5, %xmm5
veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
subs r8, r8, #1 @ sub $1,%rax # nr--
.Ldec_entry:
@ top of round
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i
vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
vtbl.8 d5, {q11}, d3
veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j
vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
vtbl.8 d7, {q10}, d1
vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
vtbl.8 d9, {q10}, d3
veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
vtbl.8 d5, {q10}, d7
vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
vtbl.8 d7, {q10}, d9
veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io
veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
vld1.64 {q0}, [r9]! @ vmovdqu (%r9), %xmm0
bne .Ldec_loop
@ middle of last round
adr r10, .Lk_dsbo
@ Write to q1 rather than q4 to avoid overlapping table and destination.
vld1.64 {q1}, [r10]! @ vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
vtbl.8 d9, {q1}, d5
@ Write to q2 rather than q1 to avoid overlapping table and destination.
vld1.64 {q2}, [r10] @ vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
vtbl.8 d2, {q2}, d6 @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
vtbl.8 d3, {q2}, d7
vld1.64 {q2}, [r11] @ vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
veor q4, q4, q0 @ vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
@ Write to q1 rather than q0 so the table and destination registers
@ below do not overlap.
veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm0 # 0 = A
vtbl.8 d0, {q1}, d4 @ vpshufb %xmm2, %xmm0, %xmm0
vtbl.8 d1, {q1}, d5
bx lr
.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
.globl vpaes_decrypt
.hidden vpaes_decrypt
.type vpaes_decrypt,%function
.align 4
vpaes_decrypt:
@ _vpaes_decrypt_core uses r7-r11.
stmdb sp!, {r7,r8,r9,r10,r11,lr}
@ _vpaes_decrypt_core uses q4-q5 (d8-d11), which are callee-saved.
vstmdb sp!, {d8,d9,d10,d11}
vld1.64 {q0}, [r0]
bl _vpaes_preheat
bl _vpaes_decrypt_core
vst1.64 {q0}, [r1]
vldmia sp!, {d8,d9,d10,d11}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
.size vpaes_decrypt,.-vpaes_decrypt
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@ @@
@@ AES key schedule @@
@@ @@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ This function diverges from both x86_64 and armv7 in which constants are
@ pinned. x86_64 has a common preheat function for all operations. aarch64
@ separates them because it has enough registers to pin nearly all constants.
@ armv7 does not have enough registers, but needing explicit loads and stores
@ also complicates using x86_64's register allocation directly.
@
@ We pin some constants for convenience and leave q14 and q15 free to load
@ others on demand.
@
@ Key schedule constants
@
.type _vpaes_key_consts,%object
.align 4
_vpaes_key_consts:
.Lk_dksd:@ decryption key schedule: invskew x*D
.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
.Lk_dksb:@ decryption key schedule: invskew x*B
.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
.Lk_dkse:@ decryption key schedule: invskew x*E + 0x63
.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
.Lk_dks9:@ decryption key schedule: invskew x*9
.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
.Lk_rcon:@ rcon
.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
.Lk_opt:@ output transform
.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
.Lk_deskew:@ deskew tables: inverts the sbox's "skew"
.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
.size _vpaes_key_consts,.-_vpaes_key_consts
.type _vpaes_key_preheat,%function
.align 4
_vpaes_key_preheat:
adr r11, .Lk_rcon
vmov.i8 q12, #0x5b @ .Lk_s63
adr r10, .Lk_inv @ Must be aligned to 8 mod 16.
vmov.i8 q9, #0x0f @ .Lk_s0F
vld1.64 {q10,q11}, [r10] @ .Lk_inv
vld1.64 {q8}, [r11] @ .Lk_rcon
bx lr
.size _vpaes_key_preheat,.-_vpaes_key_preheat
.type _vpaes_schedule_core,%function
.align 4
_vpaes_schedule_core:
@ We only need to save lr, but ARM requires an 8-byte stack alignment,
@ so save an extra register.
stmdb sp!, {r3,lr}
bl _vpaes_key_preheat @ load the tables
adr r11, .Lk_ipt @ Must be aligned to 8 mod 16.
vld1.64 {q0}, [r0]! @ vmovdqu (%rdi), %xmm0 # load key (unaligned)
@ input transform
@ Use q4 here rather than q3 so .Lschedule_am_decrypting does not
@ overlap table and destination.
vmov q4, q0 @ vmovdqa %xmm0, %xmm3
bl _vpaes_schedule_transform
adr r10, .Lk_sr @ Must be aligned to 8 mod 16.
vmov q7, q0 @ vmovdqa %xmm0, %xmm7
add r8, r8, r10
tst r3, r3
bne .Lschedule_am_decrypting
@ encrypting, output zeroth round key after transform
vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx)
b .Lschedule_go
.Lschedule_am_decrypting:
@ decrypting, output zeroth round key after shiftrows
vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1
vtbl.8 d6, {q4}, d2 @ vpshufb %xmm1, %xmm3, %xmm3
vtbl.8 d7, {q4}, d3
vst1.64 {q3}, [r2] @ vmovdqu %xmm3, (%rdx)
eor r8, r8, #0x30 @ xor $0x30, %r8
.Lschedule_go:
cmp r1, #192 @ cmp $192, %esi
bhi .Lschedule_256
beq .Lschedule_192
@ 128: fall though
@@
@@ .schedule_128
@@
@@ 128-bit specific part of key schedule.
@@
@@ This schedule is really simple, because all its parts
@@ are accomplished by the subroutines.
@@
.Lschedule_128:
mov r0, #10 @ mov $10, %esi
.Loop_schedule_128:
bl _vpaes_schedule_round
subs r0, r0, #1 @ dec %esi
beq .Lschedule_mangle_last
bl _vpaes_schedule_mangle @ write output
b .Loop_schedule_128
@@
@@ .aes_schedule_192
@@
@@ 192-bit specific part of key schedule.
@@
@@ The main body of this schedule is the same as the 128-bit
@@ schedule, but with more smearing. The long, high side is
@@ stored in q7 as before, and the short, low side is in
@@ the high bits of q6.
@@
@@ This schedule is somewhat nastier, however, because each
@@ round produces 192 bits of key material, or 1.5 round keys.
@@ Therefore, on each cycle we do 2 rounds and produce 3 round
@@ keys.
@@
.align 4
.Lschedule_192:
sub r0, r0, #8
vld1.64 {q0}, [r0] @ vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
bl _vpaes_schedule_transform @ input transform
vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save short part
vmov.i8 d12, #0 @ vpxor %xmm4, %xmm4, %xmm4 # clear 4
@ vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
mov r0, #4 @ mov $4, %esi
.Loop_schedule_192:
bl _vpaes_schedule_round
vext.8 q0, q6, q0, #8 @ vpalignr $8,%xmm6,%xmm0,%xmm0
bl _vpaes_schedule_mangle @ save key n
bl _vpaes_schedule_192_smear
bl _vpaes_schedule_mangle @ save key n+1
bl _vpaes_schedule_round
subs r0, r0, #1 @ dec %esi
beq .Lschedule_mangle_last
bl _vpaes_schedule_mangle @ save key n+2
bl _vpaes_schedule_192_smear
b .Loop_schedule_192
@@
@@ .aes_schedule_256
@@
@@ 256-bit specific part of key schedule.
@@
@@ The structure here is very similar to the 128-bit
@@ schedule, but with an additional "low side" in
@@ q6. The low side's rounds are the same as the
@@ high side's, except no rcon and no rotation.
@@
.align 4
.Lschedule_256:
vld1.64 {q0}, [r0] @ vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
bl _vpaes_schedule_transform @ input transform
mov r0, #7 @ mov $7, %esi
.Loop_schedule_256:
bl _vpaes_schedule_mangle @ output low result
vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
@ high round
bl _vpaes_schedule_round
subs r0, r0, #1 @ dec %esi
beq .Lschedule_mangle_last
bl _vpaes_schedule_mangle
@ low round. swap xmm7 and xmm6
vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0
vmov.i8 q4, #0
vmov q5, q7 @ vmovdqa %xmm7, %xmm5
vmov q7, q6 @ vmovdqa %xmm6, %xmm7
bl _vpaes_schedule_low_round
vmov q7, q5 @ vmovdqa %xmm5, %xmm7
b .Loop_schedule_256
@@
@@ .aes_schedule_mangle_last
@@
@@ Mangler for last round of key schedule
@@ Mangles q0
@@ when encrypting, outputs out(q0) ^ 63
@@ when decrypting, outputs unskew(q0)
@@
@@ Always called right before return... jumps to cleanup and exits
@@
.align 4
.Lschedule_mangle_last:
@ schedule last round key from xmm0
adr r11, .Lk_deskew @ lea .Lk_deskew(%rip),%r11 # prepare to deskew
tst r3, r3
bne .Lschedule_mangle_last_dec
@ encrypting
vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10),%xmm1
adr r11, .Lk_opt @ lea .Lk_opt(%rip), %r11 # prepare to output transform
add r2, r2, #32 @ add $32, %rdx
vmov q2, q0
vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 # output permute
vtbl.8 d1, {q2}, d3
.Lschedule_mangle_last_dec:
sub r2, r2, #16 @ add $-16, %rdx
veor q0, q0, q12 @ vpxor .Lk_s63(%rip), %xmm0, %xmm0
bl _vpaes_schedule_transform @ output transform
vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) # save last key
@ cleanup
veor q0, q0, q0 @ vpxor %xmm0, %xmm0, %xmm0
veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1
veor q2, q2, q2 @ vpxor %xmm2, %xmm2, %xmm2
veor q3, q3, q3 @ vpxor %xmm3, %xmm3, %xmm3
veor q4, q4, q4 @ vpxor %xmm4, %xmm4, %xmm4
veor q5, q5, q5 @ vpxor %xmm5, %xmm5, %xmm5
veor q6, q6, q6 @ vpxor %xmm6, %xmm6, %xmm6
veor q7, q7, q7 @ vpxor %xmm7, %xmm7, %xmm7
ldmia sp!, {r3,pc} @ return
.size _vpaes_schedule_core,.-_vpaes_schedule_core
@@
@@ .aes_schedule_192_smear
@@
@@ Smear the short, low side in the 192-bit key schedule.
@@
@@ Inputs:
@@ q7: high side, b a x y
@@ q6: low side, d c 0 0
@@
@@ Outputs:
@@ q6: b+c+d b+c 0 0
@@ q0: b+c+d b+c b a
@@
.type _vpaes_schedule_192_smear,%function
.align 4
_vpaes_schedule_192_smear:
vmov.i8 q1, #0
vdup.32 q0, d15[1]
vshl.i64 q1, q6, #32 @ vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
vmov d0, d15 @ vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
veor q6, q6, q1 @ vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1
veor q6, q6, q0 @ vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
vmov q0, q6 @ vmovdqa %xmm6, %xmm0
vmov d12, d2 @ vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
bx lr
.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
@@
@@ .aes_schedule_round
@@
@@ Runs one main round of the key schedule on q0, q7
@@
@@ Specifically, runs subbytes on the high dword of q0
@@ then rotates it by one byte and xors into the low dword of
@@ q7.
@@
@@ Adds rcon from low byte of q8, then rotates q8 for
@@ next rcon.
@@
@@ Smears the dwords of q7 by xoring the low into the
@@ second low, result into third, result into highest.
@@
@@ Returns results in q7 = q0.
@@ Clobbers q1-q4, r11.
@@
.type _vpaes_schedule_round,%function
.align 4
_vpaes_schedule_round:
@ extract rcon from xmm8
vmov.i8 q4, #0 @ vpxor %xmm4, %xmm4, %xmm4
vext.8 q1, q8, q4, #15 @ vpalignr $15, %xmm8, %xmm4, %xmm1
vext.8 q8, q8, q8, #15 @ vpalignr $15, %xmm8, %xmm8, %xmm8
veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7
@ rotate
vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0
vext.8 q0, q0, q0, #1 @ vpalignr $1, %xmm0, %xmm0, %xmm0
@ fall through...
@ low round: same as high round, but no rotation and no rcon.
_vpaes_schedule_low_round:
@ The x86_64 version pins .Lk_sb1 in %xmm13 and .Lk_sb1+16 in %xmm12.
@ We pin other values in _vpaes_key_preheat, so load them now.
adr r11, .Lk_sb1
vld1.64 {q14,q15}, [r11]
@ smear xmm7
vext.8 q1, q4, q7, #12 @ vpslldq $4, %xmm7, %xmm1
veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7
vext.8 q4, q4, q7, #8 @ vpslldq $8, %xmm7, %xmm4
@ subbytes
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i
veor q7, q7, q4 @ vpxor %xmm4, %xmm7, %xmm7
vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
vtbl.8 d5, {q11}, d3
veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j
vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
vtbl.8 d7, {q10}, d1
veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
vtbl.8 d9, {q10}, d3
veor q7, q7, q12 @ vpxor .Lk_s63(%rip), %xmm7, %xmm7
vtbl.8 d6, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
vtbl.8 d7, {q10}, d7
veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
vtbl.8 d4, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
vtbl.8 d5, {q10}, d9
veor q3, q3, q1 @ vpxor %xmm1, %xmm3, %xmm3 # 2 = io
veor q2, q2, q0 @ vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
vtbl.8 d8, {q15}, d6 @ vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
vtbl.8 d9, {q15}, d7
vtbl.8 d2, {q14}, d4 @ vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
vtbl.8 d3, {q14}, d5
veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
@ add in smeared stuff
veor q0, q1, q7 @ vpxor %xmm7, %xmm1, %xmm0
veor q7, q1, q7 @ vmovdqa %xmm0, %xmm7
bx lr
.size _vpaes_schedule_round,.-_vpaes_schedule_round
@@
@@ .aes_schedule_transform
@@
@@ Linear-transform q0 according to tables at [r11]
@@
@@ Requires that q9 = 0x0F0F... as in preheat
@@ Output in q0
@@ Clobbers q1, q2, q14, q15
@@
.type _vpaes_schedule_transform,%function
.align 4
_vpaes_schedule_transform:
vld1.64 {q14,q15}, [r11] @ vmovdqa (%r11), %xmm2 # lo
@ vmovdqa 16(%r11), %xmm1 # hi
vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1
vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0
vtbl.8 d4, {q14}, d2 @ vpshufb %xmm1, %xmm2, %xmm2
vtbl.8 d5, {q14}, d3
vtbl.8 d0, {q15}, d0 @ vpshufb %xmm0, %xmm1, %xmm0
vtbl.8 d1, {q15}, d1
veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0
bx lr
.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
@@
@@ .aes_schedule_mangle
@@
@@ Mangles q0 from (basis-transformed) standard version
@@ to our version.
@@
@@ On encrypt,
@@ xor with 0x63
@@ multiply by circulant 0,1,1,1
@@ apply shiftrows transform
@@
@@ On decrypt,
@@ xor with 0x63
@@ multiply by "inverse mixcolumns" circulant E,B,D,9
@@ deskew
@@ apply shiftrows transform
@@
@@
@@ Writes out to [r2], and increments or decrements it
@@ Keeps track of round number mod 4 in r8
@@ Preserves q0
@@ Clobbers q1-q5
@@
.type _vpaes_schedule_mangle,%function
.align 4
_vpaes_schedule_mangle:
tst r3, r3
vmov q4, q0 @ vmovdqa %xmm0, %xmm4 # save xmm0 for later
adr r11, .Lk_mc_forward @ Must be aligned to 8 mod 16.
vld1.64 {q5}, [r11] @ vmovdqa .Lk_mc_forward(%rip),%xmm5
bne .Lschedule_mangle_dec
@ encrypting
@ Write to q2 so we do not overlap table and destination below.
veor q2, q0, q12 @ vpxor .Lk_s63(%rip), %xmm0, %xmm4
add r2, r2, #16 @ add $16, %rdx
vtbl.8 d8, {q2}, d10 @ vpshufb %xmm5, %xmm4, %xmm4
vtbl.8 d9, {q2}, d11
vtbl.8 d2, {q4}, d10 @ vpshufb %xmm5, %xmm4, %xmm1
vtbl.8 d3, {q4}, d11
vtbl.8 d6, {q1}, d10 @ vpshufb %xmm5, %xmm1, %xmm3
vtbl.8 d7, {q1}, d11
veor q4, q4, q1 @ vpxor %xmm1, %xmm4, %xmm4
vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1
veor q3, q3, q4 @ vpxor %xmm4, %xmm3, %xmm3
b .Lschedule_mangle_both
.align 4
.Lschedule_mangle_dec:
@ inverse mix columns
adr r11, .Lk_dksd @ lea .Lk_dksd(%rip),%r11
vshr.u8 q1, q4, #4 @ vpsrlb $4, %xmm4, %xmm1 # 1 = hi
vand q4, q4, q9 @ vpand %xmm9, %xmm4, %xmm4 # 4 = lo
vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x00(%r11), %xmm2
@ vmovdqa 0x10(%r11), %xmm3
vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2
vtbl.8 d5, {q14}, d9
vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3
vtbl.8 d7, {q15}, d3
@ Load .Lk_dksb ahead of time.
vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x20(%r11), %xmm2
@ vmovdqa 0x30(%r11), %xmm3
@ Write to q13 so we do not overlap table and destination.
veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3
vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3
vtbl.8 d7, {q13}, d11
vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2
vtbl.8 d5, {q14}, d9
veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2
vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3
vtbl.8 d7, {q15}, d3
@ Load .Lk_dkse ahead of time.
vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x40(%r11), %xmm2
@ vmovdqa 0x50(%r11), %xmm3
@ Write to q13 so we do not overlap table and destination.
veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3
vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3
vtbl.8 d7, {q13}, d11
vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2
vtbl.8 d5, {q14}, d9
veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2
vtbl.8 d6, {q15}, d2 @ vpshufb %xmm1, %xmm3, %xmm3
vtbl.8 d7, {q15}, d3
@ Load .Lk_dkse ahead of time.
vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x60(%r11), %xmm2
@ vmovdqa 0x70(%r11), %xmm4
@ Write to q13 so we do not overlap table and destination.
veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3
vtbl.8 d4, {q14}, d8 @ vpshufb %xmm4, %xmm2, %xmm2
vtbl.8 d5, {q14}, d9
vtbl.8 d6, {q13}, d10 @ vpshufb %xmm5, %xmm3, %xmm3
vtbl.8 d7, {q13}, d11
vtbl.8 d8, {q15}, d2 @ vpshufb %xmm1, %xmm4, %xmm4
vtbl.8 d9, {q15}, d3
vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1
veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2
veor q3, q4, q2 @ vpxor %xmm2, %xmm4, %xmm3
sub r2, r2, #16 @ add $-16, %rdx
.Lschedule_mangle_both:
@ Write to q2 so table and destination do not overlap.
vtbl.8 d4, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm3
vtbl.8 d5, {q3}, d3
add r8, r8, #64-16 @ add $-16, %r8
and r8, r8, #~(1<<6) @ and $0x30, %r8
vst1.64 {q2}, [r2] @ vmovdqu %xmm3, (%rdx)
bx lr
.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
.globl vpaes_set_encrypt_key
.hidden vpaes_set_encrypt_key
.type vpaes_set_encrypt_key,%function
.align 4
vpaes_set_encrypt_key:
stmdb sp!, {r7,r8,r9,r10,r11, lr}
vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
lsr r9, r1, #5 @ shr $5,%eax
add r9, r9, #5 @ $5,%eax
str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
mov r3, #0 @ mov $0,%ecx
mov r8, #0x30 @ mov $0x30,%r8d
bl _vpaes_schedule_core
eor r0, r0, r0
vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
.globl vpaes_set_decrypt_key
.hidden vpaes_set_decrypt_key
.type vpaes_set_decrypt_key,%function
.align 4
vpaes_set_decrypt_key:
stmdb sp!, {r7,r8,r9,r10,r11, lr}
vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
lsr r9, r1, #5 @ shr $5,%eax
add r9, r9, #5 @ $5,%eax
str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
lsl r9, r9, #4 @ shl $4,%eax
add r2, r2, #16 @ lea 16(%rdx,%rax),%rdx
add r2, r2, r9
mov r3, #1 @ mov $1,%ecx
lsr r8, r1, #1 @ shr $1,%r8d
and r8, r8, #32 @ and $32,%r8d
eor r8, r8, #32 @ xor $32,%r8d # nbits==192?0:32
bl _vpaes_schedule_core
vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
.size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key
@ Additional constants for converting to bsaes.
.type _vpaes_convert_consts,%object
.align 4
_vpaes_convert_consts:
@ .Lk_opt_then_skew applies skew(opt(x)) XOR 0x63, where skew is the linear
@ transform in the AES S-box. 0x63 is incorporated into the low half of the
@ table. This was computed with the following script:
@
@ def u64s_to_u128(x, y):
@ return x | (y << 64)
@ def u128_to_u64s(w):
@ return w & ((1<<64)-1), w >> 64
@ def get_byte(w, i):
@ return (w >> (i*8)) & 0xff
@ def apply_table(table, b):
@ lo = b & 0xf
@ hi = b >> 4
@ return get_byte(table[0], lo) ^ get_byte(table[1], hi)
@ def opt(b):
@ table = [
@ u64s_to_u128(0xFF9F4929D6B66000, 0xF7974121DEBE6808),
@ u64s_to_u128(0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0),
@ ]
@ return apply_table(table, b)
@ def rot_byte(b, n):
@ return 0xff & ((b << n) | (b >> (8-n)))
@ def skew(x):
@ return (x ^ rot_byte(x, 1) ^ rot_byte(x, 2) ^ rot_byte(x, 3) ^
@ rot_byte(x, 4))
@ table = [0, 0]
@ for i in range(16):
@ table[0] |= (skew(opt(i)) ^ 0x63) << (i*8)
@ table[1] |= skew(opt(i<<4)) << (i*8)
@ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[0]))
@ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[1]))
.Lk_opt_then_skew:
.quad 0x9cb8436798bc4763, 0x6440bb9f6044bf9b
.quad 0x1f30062936192f00, 0xb49bad829db284ab
@ .Lk_decrypt_transform is a permutation which performs an 8-bit left-rotation
@ followed by a byte-swap on each 32-bit word of a vector. E.g., 0x11223344
@ becomes 0x22334411 and then 0x11443322.
.Lk_decrypt_transform:
.quad 0x0704050603000102, 0x0f0c0d0e0b08090a
.size _vpaes_convert_consts,.-_vpaes_convert_consts
@ void vpaes_encrypt_key_to_bsaes(AES_KEY *bsaes, const AES_KEY *vpaes);
.globl vpaes_encrypt_key_to_bsaes
.hidden vpaes_encrypt_key_to_bsaes
.type vpaes_encrypt_key_to_bsaes,%function
.align 4
vpaes_encrypt_key_to_bsaes:
stmdb sp!, {r11, lr}
@ See _vpaes_schedule_core for the key schedule logic. In particular,
@ _vpaes_schedule_transform(.Lk_ipt) (section 2.2 of the paper),
@ _vpaes_schedule_mangle (section 4.3), and .Lschedule_mangle_last
@ contain the transformations not in the bsaes representation. This
@ function inverts those transforms.
@
@ Note also that bsaes-armv7.pl expects aes-armv4.pl's key
@ representation, which does not match the other aes_nohw_*
@ implementations. The ARM aes_nohw_* stores each 32-bit word
@ byteswapped, as a convenience for (unsupported) big-endian ARM, at the
@ cost of extra REV and VREV32 operations in little-endian ARM.
vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform
adr r2, .Lk_mc_forward @ Must be aligned to 8 mod 16.
add r3, r2, 0x90 @ .Lk_sr+0x10-.Lk_mc_forward = 0x90 (Apple's toolchain doesn't support the expression)
vld1.64 {q12}, [r2]
vmov.i8 q10, #0x5b @ .Lk_s63 from vpaes-x86_64
adr r11, .Lk_opt @ Must be aligned to 8 mod 16.
vmov.i8 q11, #0x63 @ .LK_s63 without .Lk_ipt applied
@ vpaes stores one fewer round count than bsaes, but the number of keys
@ is the same.
ldr r2, [r1,#240]
add r2, r2, #1
str r2, [r0,#240]
@ The first key is transformed with _vpaes_schedule_transform(.Lk_ipt).
@ Invert this with .Lk_opt.
vld1.64 {q0}, [r1]!
bl _vpaes_schedule_transform
vrev32.8 q0, q0
vst1.64 {q0}, [r0]!
@ The middle keys have _vpaes_schedule_transform(.Lk_ipt) applied,
@ followed by _vpaes_schedule_mangle. _vpaes_schedule_mangle XORs 0x63,
@ multiplies by the circulant 0,1,1,1, then applies ShiftRows.
.Loop_enc_key_to_bsaes:
vld1.64 {q0}, [r1]!
@ Invert the ShiftRows step (see .Lschedule_mangle_both). Note we cycle
@ r3 in the opposite direction and start at .Lk_sr+0x10 instead of 0x30.
@ We use r3 rather than r8 to avoid a callee-saved register.
vld1.64 {q1}, [r3]
vtbl.8 d4, {q0}, d2
vtbl.8 d5, {q0}, d3
add r3, r3, #16
and r3, r3, #~(1<<6)
vmov q0, q2
@ Handle the last key differently.
subs r2, r2, #1
beq .Loop_enc_key_to_bsaes_last
@ Multiply by the circulant. This is its own inverse.
vtbl.8 d2, {q0}, d24
vtbl.8 d3, {q0}, d25
vmov q0, q1
vtbl.8 d4, {q1}, d24
vtbl.8 d5, {q1}, d25
veor q0, q0, q2
vtbl.8 d2, {q2}, d24
vtbl.8 d3, {q2}, d25
veor q0, q0, q1
@ XOR and finish.
veor q0, q0, q10
bl _vpaes_schedule_transform
vrev32.8 q0, q0
vst1.64 {q0}, [r0]!
b .Loop_enc_key_to_bsaes
.Loop_enc_key_to_bsaes_last:
@ The final key does not have a basis transform (note
@ .Lschedule_mangle_last inverts the original transform). It only XORs
@ 0x63 and applies ShiftRows. The latter was already inverted in the
@ loop. Note that, because we act on the original representation, we use
@ q11, not q10.
veor q0, q0, q11
vrev32.8 q0, q0
vst1.64 {q0}, [r0]
@ Wipe registers which contained key material.
veor q0, q0, q0
veor q1, q1, q1
veor q2, q2, q2
ldmia sp!, {r11, pc} @ return
.size vpaes_encrypt_key_to_bsaes,.-vpaes_encrypt_key_to_bsaes
@ void vpaes_decrypt_key_to_bsaes(AES_KEY *vpaes, const AES_KEY *bsaes);
.globl vpaes_decrypt_key_to_bsaes
.hidden vpaes_decrypt_key_to_bsaes
.type vpaes_decrypt_key_to_bsaes,%function
.align 4
vpaes_decrypt_key_to_bsaes:
stmdb sp!, {r11, lr}
@ See _vpaes_schedule_core for the key schedule logic. Note vpaes
@ computes the decryption key schedule in reverse. Additionally,
@ aes-x86_64.pl shares some transformations, so we must only partially
@ invert vpaes's transformations. In general, vpaes computes in a
@ different basis (.Lk_ipt and .Lk_opt) and applies the inverses of
@ MixColumns, ShiftRows, and the affine part of the AES S-box (which is
@ split into a linear skew and XOR of 0x63). We undo all but MixColumns.
@
@ Note also that bsaes-armv7.pl expects aes-armv4.pl's key
@ representation, which does not match the other aes_nohw_*
@ implementations. The ARM aes_nohw_* stores each 32-bit word
@ byteswapped, as a convenience for (unsupported) big-endian ARM, at the
@ cost of extra REV and VREV32 operations in little-endian ARM.
adr r2, .Lk_decrypt_transform
adr r3, .Lk_sr+0x30
adr r11, .Lk_opt_then_skew @ Input to _vpaes_schedule_transform.
vld1.64 {q12}, [r2] @ Reuse q12 from encryption.
vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform
@ vpaes stores one fewer round count than bsaes, but the number of keys
@ is the same.
ldr r2, [r1,#240]
add r2, r2, #1
str r2, [r0,#240]
@ Undo the basis change and reapply the S-box affine transform. See
@ .Lschedule_mangle_last.
vld1.64 {q0}, [r1]!
bl _vpaes_schedule_transform
vrev32.8 q0, q0
vst1.64 {q0}, [r0]!
@ See _vpaes_schedule_mangle for the transform on the middle keys. Note
@ it simultaneously inverts MixColumns and the S-box affine transform.
@ See .Lk_dksd through .Lk_dks9.
.Loop_dec_key_to_bsaes:
vld1.64 {q0}, [r1]!
@ Invert the ShiftRows step (see .Lschedule_mangle_both). Note going
@ forwards cancels inverting for which direction we cycle r3. We use r3
@ rather than r8 to avoid a callee-saved register.
vld1.64 {q1}, [r3]
vtbl.8 d4, {q0}, d2
vtbl.8 d5, {q0}, d3
add r3, r3, #64-16
and r3, r3, #~(1<<6)
vmov q0, q2
@ Handle the last key differently.
subs r2, r2, #1
beq .Loop_dec_key_to_bsaes_last
@ Undo the basis change and reapply the S-box affine transform.
bl _vpaes_schedule_transform
@ Rotate each word by 8 bytes (cycle the rows) and then byte-swap. We
@ combine the two operations in .Lk_decrypt_transform.
@
@ TODO(davidben): Where does the rotation come from?
vtbl.8 d2, {q0}, d24
vtbl.8 d3, {q0}, d25
vst1.64 {q1}, [r0]!
b .Loop_dec_key_to_bsaes
.Loop_dec_key_to_bsaes_last:
@ The final key only inverts ShiftRows (already done in the loop). See
@ .Lschedule_am_decrypting. Its basis is not transformed.
vrev32.8 q0, q0
vst1.64 {q0}, [r0]!
@ Wipe registers which contained key material.
veor q0, q0, q0
veor q1, q1, q1
veor q2, q2, q2
ldmia sp!, {r11, pc} @ return
.size vpaes_decrypt_key_to_bsaes,.-vpaes_decrypt_key_to_bsaes
.globl vpaes_ctr32_encrypt_blocks
.hidden vpaes_ctr32_encrypt_blocks
.type vpaes_ctr32_encrypt_blocks,%function
.align 4
vpaes_ctr32_encrypt_blocks:
mov ip, sp
stmdb sp!, {r7,r8,r9,r10,r11, lr}
@ This function uses q4-q7 (d8-d15), which are callee-saved.
vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
cmp r2, #0
@ r8 is passed on the stack.
ldr r8, [ip]
beq .Lctr32_done
@ _vpaes_encrypt_core expects the key in r2, so swap r2 and r3.
mov r9, r3
mov r3, r2
mov r2, r9
@ Load the IV and counter portion.
ldr r7, [r8, #12]
vld1.8 {q7}, [r8]
bl _vpaes_preheat
rev r7, r7 @ The counter is big-endian.
.Lctr32_loop:
vmov q0, q7
vld1.8 {q6}, [r0]! @ .Load input ahead of time
bl _vpaes_encrypt_core
veor q0, q0, q6 @ XOR input and result
vst1.8 {q0}, [r1]!
subs r3, r3, #1
@ Update the counter.
add r7, r7, #1
rev r9, r7
vmov.32 d15[1], r9
bne .Lctr32_loop
.Lctr32_done:
vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return
.size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
marvin-hansen/iggy-streaming-system
| 21,840
|
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/generated-src/linux-arm/crypto/fipsmodule/armv4-mont.S
|
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.
#include <openssl/asm_base.h>
#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
#include <openssl/arm_arch.h>
@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
.arch armv7-a
.text
#if defined(__thumb2__)
.syntax unified
.thumb
#else
.code 32
#endif
.globl bn_mul_mont_nohw
.hidden bn_mul_mont_nohw
.type bn_mul_mont_nohw,%function
.align 5
bn_mul_mont_nohw:
ldr ip,[sp,#4] @ load num
stmdb sp!,{r0,r2} @ sp points at argument block
cmp ip,#2
mov r0,ip @ load num
#ifdef __thumb2__
ittt lt
#endif
movlt r0,#0
addlt sp,sp,#2*4
blt .Labrt
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ save 10 registers
mov r0,r0,lsl#2 @ rescale r0 for byte count
sub sp,sp,r0 @ alloca(4*num)
sub sp,sp,#4 @ +extra dword
sub r0,r0,#4 @ "num=num-1"
add r4,r2,r0 @ &bp[num-1]
add r0,sp,r0 @ r0 to point at &tp[num-1]
ldr r8,[r0,#14*4] @ &n0
ldr r2,[r2] @ bp[0]
ldr r5,[r1],#4 @ ap[0],ap++
ldr r6,[r3],#4 @ np[0],np++
ldr r8,[r8] @ *n0
str r4,[r0,#15*4] @ save &bp[num]
umull r10,r11,r5,r2 @ ap[0]*bp[0]
str r8,[r0,#14*4] @ save n0 value
mul r8,r10,r8 @ "tp[0]"*n0
mov r12,#0
umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]"
mov r4,sp
.L1st:
ldr r5,[r1],#4 @ ap[j],ap++
mov r10,r11
ldr r6,[r3],#4 @ np[j],np++
mov r11,#0
umlal r10,r11,r5,r2 @ ap[j]*bp[0]
mov r14,#0
umlal r12,r14,r6,r8 @ np[j]*n0
adds r12,r12,r10
str r12,[r4],#4 @ tp[j-1]=,tp++
adc r12,r14,#0
cmp r4,r0
bne .L1st
adds r12,r12,r11
ldr r4,[r0,#13*4] @ restore bp
mov r14,#0
ldr r8,[r0,#14*4] @ restore n0
adc r14,r14,#0
str r12,[r0] @ tp[num-1]=
mov r7,sp
str r14,[r0,#4] @ tp[num]=
.Louter:
sub r7,r0,r7 @ "original" r0-1 value
sub r1,r1,r7 @ "rewind" ap to &ap[1]
ldr r2,[r4,#4]! @ *(++bp)
sub r3,r3,r7 @ "rewind" np to &np[1]
ldr r5,[r1,#-4] @ ap[0]
ldr r10,[sp] @ tp[0]
ldr r6,[r3,#-4] @ np[0]
ldr r7,[sp,#4] @ tp[1]
mov r11,#0
umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0]
str r4,[r0,#13*4] @ save bp
mul r8,r10,r8
mov r12,#0
umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]"
mov r4,sp
.Linner:
ldr r5,[r1],#4 @ ap[j],ap++
adds r10,r11,r7 @ +=tp[j]
ldr r6,[r3],#4 @ np[j],np++
mov r11,#0
umlal r10,r11,r5,r2 @ ap[j]*bp[i]
mov r14,#0
umlal r12,r14,r6,r8 @ np[j]*n0
adc r11,r11,#0
ldr r7,[r4,#8] @ tp[j+1]
adds r12,r12,r10
str r12,[r4],#4 @ tp[j-1]=,tp++
adc r12,r14,#0
cmp r4,r0
bne .Linner
adds r12,r12,r11
mov r14,#0
ldr r4,[r0,#13*4] @ restore bp
adc r14,r14,#0
ldr r8,[r0,#14*4] @ restore n0
adds r12,r12,r7
ldr r7,[r0,#15*4] @ restore &bp[num]
adc r14,r14,#0
str r12,[r0] @ tp[num-1]=
str r14,[r0,#4] @ tp[num]=
cmp r4,r7
#ifdef __thumb2__
itt ne
#endif
movne r7,sp
bne .Louter
ldr r2,[r0,#12*4] @ pull rp
mov r5,sp
add r0,r0,#4 @ r0 to point at &tp[num]
sub r5,r0,r5 @ "original" num value
mov r4,sp @ "rewind" r4
mov r1,r4 @ "borrow" r1
sub r3,r3,r5 @ "rewind" r3 to &np[0]
subs r7,r7,r7 @ "clear" carry flag
.Lsub: ldr r7,[r4],#4
ldr r6,[r3],#4
sbcs r7,r7,r6 @ tp[j]-np[j]
str r7,[r2],#4 @ rp[j]=
teq r4,r0 @ preserve carry
bne .Lsub
sbcs r14,r14,#0 @ upmost carry
mov r4,sp @ "rewind" r4
sub r2,r2,r5 @ "rewind" r2
.Lcopy: ldr r7,[r4] @ conditional copy
ldr r5,[r2]
str sp,[r4],#4 @ zap tp
#ifdef __thumb2__
it cc
#endif
movcc r5,r7
str r5,[r2],#4
teq r4,r0 @ preserve carry
bne .Lcopy
mov sp,r0
add sp,sp,#4 @ skip over tp[num+1]
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ restore registers
add sp,sp,#2*4 @ skip over {r0,r2}
mov r0,#1
.Labrt:
#if __ARM_ARCH>=5
bx lr @ bx lr
#else
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size bn_mul_mont_nohw,.-bn_mul_mont_nohw
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl bn_mul8x_mont_neon
.hidden bn_mul8x_mont_neon
.type bn_mul8x_mont_neon,%function
.align 5
bn_mul8x_mont_neon:
mov ip,sp
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11}
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
ldmia ip,{r4,r5} @ load rest of parameter block
mov ip,sp
cmp r5,#8
bhi .LNEON_8n
@ special case for r5==8, everything is in register bank...
vld1.32 {d28[0]}, [r2,:32]!
veor d8,d8,d8
sub r7,sp,r5,lsl#4
vld1.32 {d0,d1,d2,d3}, [r1]! @ can't specify :32 :-(
and r7,r7,#-64
vld1.32 {d30[0]}, [r4,:32]
mov sp,r7 @ alloca
vzip.16 d28,d8
vmull.u32 q6,d28,d0[0]
vmull.u32 q7,d28,d0[1]
vmull.u32 q8,d28,d1[0]
vshl.i64 d29,d13,#16
vmull.u32 q9,d28,d1[1]
vadd.u64 d29,d29,d12
veor d8,d8,d8
vmul.u32 d29,d29,d30
vmull.u32 q10,d28,d2[0]
vld1.32 {d4,d5,d6,d7}, [r3]!
vmull.u32 q11,d28,d2[1]
vmull.u32 q12,d28,d3[0]
vzip.16 d29,d8
vmull.u32 q13,d28,d3[1]
vmlal.u32 q6,d29,d4[0]
sub r9,r5,#1
vmlal.u32 q7,d29,d4[1]
vmlal.u32 q8,d29,d5[0]
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vmov q5,q6
vmlal.u32 q11,d29,d6[1]
vmov q6,q7
vmlal.u32 q12,d29,d7[0]
vmov q7,q8
vmlal.u32 q13,d29,d7[1]
vmov q8,q9
vmov q9,q10
vshr.u64 d10,d10,#16
vmov q10,q11
vmov q11,q12
vadd.u64 d10,d10,d11
vmov q12,q13
veor q13,q13
vshr.u64 d10,d10,#16
b .LNEON_outer8
.align 4
.LNEON_outer8:
vld1.32 {d28[0]}, [r2,:32]!
veor d8,d8,d8
vzip.16 d28,d8
vadd.u64 d12,d12,d10
vmlal.u32 q6,d28,d0[0]
vmlal.u32 q7,d28,d0[1]
vmlal.u32 q8,d28,d1[0]
vshl.i64 d29,d13,#16
vmlal.u32 q9,d28,d1[1]
vadd.u64 d29,d29,d12
veor d8,d8,d8
subs r9,r9,#1
vmul.u32 d29,d29,d30
vmlal.u32 q10,d28,d2[0]
vmlal.u32 q11,d28,d2[1]
vmlal.u32 q12,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q13,d28,d3[1]
vmlal.u32 q6,d29,d4[0]
vmlal.u32 q7,d29,d4[1]
vmlal.u32 q8,d29,d5[0]
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vmov q5,q6
vmlal.u32 q11,d29,d6[1]
vmov q6,q7
vmlal.u32 q12,d29,d7[0]
vmov q7,q8
vmlal.u32 q13,d29,d7[1]
vmov q8,q9
vmov q9,q10
vshr.u64 d10,d10,#16
vmov q10,q11
vmov q11,q12
vadd.u64 d10,d10,d11
vmov q12,q13
veor q13,q13
vshr.u64 d10,d10,#16
bne .LNEON_outer8
vadd.u64 d12,d12,d10
mov r7,sp
vshr.u64 d10,d12,#16
mov r8,r5
vadd.u64 d13,d13,d10
add r6,sp,#96
vshr.u64 d10,d13,#16
vzip.16 d12,d13
b .LNEON_tail_entry
.align 4
.LNEON_8n:
veor q6,q6,q6
sub r7,sp,#128
veor q7,q7,q7
sub r7,r7,r5,lsl#4
veor q8,q8,q8
and r7,r7,#-64
veor q9,q9,q9
mov sp,r7 @ alloca
veor q10,q10,q10
add r7,r7,#256
veor q11,q11,q11
sub r8,r5,#8
veor q12,q12,q12
veor q13,q13,q13
.LNEON_8n_init:
vst1.64 {q6,q7},[r7,:256]!
subs r8,r8,#8
vst1.64 {q8,q9},[r7,:256]!
vst1.64 {q10,q11},[r7,:256]!
vst1.64 {q12,q13},[r7,:256]!
bne .LNEON_8n_init
add r6,sp,#256
vld1.32 {d0,d1,d2,d3},[r1]!
add r10,sp,#8
vld1.32 {d30[0]},[r4,:32]
mov r9,r5
b .LNEON_8n_outer
.align 4
.LNEON_8n_outer:
vld1.32 {d28[0]},[r2,:32]! @ *b++
veor d8,d8,d8
vzip.16 d28,d8
add r7,sp,#128
vld1.32 {d4,d5,d6,d7},[r3]!
vmlal.u32 q6,d28,d0[0]
vmlal.u32 q7,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q8,d28,d1[0]
vshl.i64 d29,d13,#16
vmlal.u32 q9,d28,d1[1]
vadd.u64 d29,d29,d12
vmlal.u32 q10,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q11,d28,d2[1]
vst1.32 {d28},[sp,:64] @ put aside smashed b[8*i+0]
vmlal.u32 q12,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q13,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q6,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q7,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q8,d29,d5[0]
vshr.u64 d12,d12,#16
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vadd.u64 d12,d12,d13
vmlal.u32 q11,d29,d6[1]
vshr.u64 d12,d12,#16
vmlal.u32 q12,d29,d7[0]
vmlal.u32 q13,d29,d7[1]
vadd.u64 d14,d14,d12
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+0]
vmlal.u32 q7,d28,d0[0]
vld1.64 {q6},[r6,:128]!
vmlal.u32 q8,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q9,d28,d1[0]
vshl.i64 d29,d15,#16
vmlal.u32 q10,d28,d1[1]
vadd.u64 d29,d29,d14
vmlal.u32 q11,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q12,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+1]
vmlal.u32 q13,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q6,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q7,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q8,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q9,d29,d5[0]
vshr.u64 d14,d14,#16
vmlal.u32 q10,d29,d5[1]
vmlal.u32 q11,d29,d6[0]
vadd.u64 d14,d14,d15
vmlal.u32 q12,d29,d6[1]
vshr.u64 d14,d14,#16
vmlal.u32 q13,d29,d7[0]
vmlal.u32 q6,d29,d7[1]
vadd.u64 d16,d16,d14
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+1]
vmlal.u32 q8,d28,d0[0]
vld1.64 {q7},[r6,:128]!
vmlal.u32 q9,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q10,d28,d1[0]
vshl.i64 d29,d17,#16
vmlal.u32 q11,d28,d1[1]
vadd.u64 d29,d29,d16
vmlal.u32 q12,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q13,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+2]
vmlal.u32 q6,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q7,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q8,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q9,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q10,d29,d5[0]
vshr.u64 d16,d16,#16
vmlal.u32 q11,d29,d5[1]
vmlal.u32 q12,d29,d6[0]
vadd.u64 d16,d16,d17
vmlal.u32 q13,d29,d6[1]
vshr.u64 d16,d16,#16
vmlal.u32 q6,d29,d7[0]
vmlal.u32 q7,d29,d7[1]
vadd.u64 d18,d18,d16
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+2]
vmlal.u32 q9,d28,d0[0]
vld1.64 {q8},[r6,:128]!
vmlal.u32 q10,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q11,d28,d1[0]
vshl.i64 d29,d19,#16
vmlal.u32 q12,d28,d1[1]
vadd.u64 d29,d29,d18
vmlal.u32 q13,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q6,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+3]
vmlal.u32 q7,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q8,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q9,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q10,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q11,d29,d5[0]
vshr.u64 d18,d18,#16
vmlal.u32 q12,d29,d5[1]
vmlal.u32 q13,d29,d6[0]
vadd.u64 d18,d18,d19
vmlal.u32 q6,d29,d6[1]
vshr.u64 d18,d18,#16
vmlal.u32 q7,d29,d7[0]
vmlal.u32 q8,d29,d7[1]
vadd.u64 d20,d20,d18
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+3]
vmlal.u32 q10,d28,d0[0]
vld1.64 {q9},[r6,:128]!
vmlal.u32 q11,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q12,d28,d1[0]
vshl.i64 d29,d21,#16
vmlal.u32 q13,d28,d1[1]
vadd.u64 d29,d29,d20
vmlal.u32 q6,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q7,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+4]
vmlal.u32 q8,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q9,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q10,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q11,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q12,d29,d5[0]
vshr.u64 d20,d20,#16
vmlal.u32 q13,d29,d5[1]
vmlal.u32 q6,d29,d6[0]
vadd.u64 d20,d20,d21
vmlal.u32 q7,d29,d6[1]
vshr.u64 d20,d20,#16
vmlal.u32 q8,d29,d7[0]
vmlal.u32 q9,d29,d7[1]
vadd.u64 d22,d22,d20
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+4]
vmlal.u32 q11,d28,d0[0]
vld1.64 {q10},[r6,:128]!
vmlal.u32 q12,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q13,d28,d1[0]
vshl.i64 d29,d23,#16
vmlal.u32 q6,d28,d1[1]
vadd.u64 d29,d29,d22
vmlal.u32 q7,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q8,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+5]
vmlal.u32 q9,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q10,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q11,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q12,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q13,d29,d5[0]
vshr.u64 d22,d22,#16
vmlal.u32 q6,d29,d5[1]
vmlal.u32 q7,d29,d6[0]
vadd.u64 d22,d22,d23
vmlal.u32 q8,d29,d6[1]
vshr.u64 d22,d22,#16
vmlal.u32 q9,d29,d7[0]
vmlal.u32 q10,d29,d7[1]
vadd.u64 d24,d24,d22
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+5]
vmlal.u32 q12,d28,d0[0]
vld1.64 {q11},[r6,:128]!
vmlal.u32 q13,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q6,d28,d1[0]
vshl.i64 d29,d25,#16
vmlal.u32 q7,d28,d1[1]
vadd.u64 d29,d29,d24
vmlal.u32 q8,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q9,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+6]
vmlal.u32 q10,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q11,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q12,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q13,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q6,d29,d5[0]
vshr.u64 d24,d24,#16
vmlal.u32 q7,d29,d5[1]
vmlal.u32 q8,d29,d6[0]
vadd.u64 d24,d24,d25
vmlal.u32 q9,d29,d6[1]
vshr.u64 d24,d24,#16
vmlal.u32 q10,d29,d7[0]
vmlal.u32 q11,d29,d7[1]
vadd.u64 d26,d26,d24
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+6]
vmlal.u32 q13,d28,d0[0]
vld1.64 {q12},[r6,:128]!
vmlal.u32 q6,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q7,d28,d1[0]
vshl.i64 d29,d27,#16
vmlal.u32 q8,d28,d1[1]
vadd.u64 d29,d29,d26
vmlal.u32 q9,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q10,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+7]
vmlal.u32 q11,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q12,d28,d3[1]
vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0]
vmlal.u32 q13,d29,d4[0]
vld1.32 {d0,d1,d2,d3},[r1]!
vmlal.u32 q6,d29,d4[1]
vmlal.u32 q7,d29,d5[0]
vshr.u64 d26,d26,#16
vmlal.u32 q8,d29,d5[1]
vmlal.u32 q9,d29,d6[0]
vadd.u64 d26,d26,d27
vmlal.u32 q10,d29,d6[1]
vshr.u64 d26,d26,#16
vmlal.u32 q11,d29,d7[0]
vmlal.u32 q12,d29,d7[1]
vadd.u64 d12,d12,d26
vst1.32 {d29},[r10,:64] @ put aside smashed m[8*i+7]
add r10,sp,#8 @ rewind
sub r8,r5,#8
b .LNEON_8n_inner
.align 4
.LNEON_8n_inner:
subs r8,r8,#8
vmlal.u32 q6,d28,d0[0]
vld1.64 {q13},[r6,:128]
vmlal.u32 q7,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+0]
vmlal.u32 q8,d28,d1[0]
vld1.32 {d4,d5,d6,d7},[r3]!
vmlal.u32 q9,d28,d1[1]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q10,d28,d2[0]
vmlal.u32 q11,d28,d2[1]
vmlal.u32 q12,d28,d3[0]
vmlal.u32 q13,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+1]
vmlal.u32 q6,d29,d4[0]
vmlal.u32 q7,d29,d4[1]
vmlal.u32 q8,d29,d5[0]
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vmlal.u32 q11,d29,d6[1]
vmlal.u32 q12,d29,d7[0]
vmlal.u32 q13,d29,d7[1]
vst1.64 {q6},[r7,:128]!
vmlal.u32 q7,d28,d0[0]
vld1.64 {q6},[r6,:128]
vmlal.u32 q8,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+1]
vmlal.u32 q9,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q10,d28,d1[1]
vmlal.u32 q11,d28,d2[0]
vmlal.u32 q12,d28,d2[1]
vmlal.u32 q13,d28,d3[0]
vmlal.u32 q6,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+2]
vmlal.u32 q7,d29,d4[0]
vmlal.u32 q8,d29,d4[1]
vmlal.u32 q9,d29,d5[0]
vmlal.u32 q10,d29,d5[1]
vmlal.u32 q11,d29,d6[0]
vmlal.u32 q12,d29,d6[1]
vmlal.u32 q13,d29,d7[0]
vmlal.u32 q6,d29,d7[1]
vst1.64 {q7},[r7,:128]!
vmlal.u32 q8,d28,d0[0]
vld1.64 {q7},[r6,:128]
vmlal.u32 q9,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+2]
vmlal.u32 q10,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q11,d28,d1[1]
vmlal.u32 q12,d28,d2[0]
vmlal.u32 q13,d28,d2[1]
vmlal.u32 q6,d28,d3[0]
vmlal.u32 q7,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+3]
vmlal.u32 q8,d29,d4[0]
vmlal.u32 q9,d29,d4[1]
vmlal.u32 q10,d29,d5[0]
vmlal.u32 q11,d29,d5[1]
vmlal.u32 q12,d29,d6[0]
vmlal.u32 q13,d29,d6[1]
vmlal.u32 q6,d29,d7[0]
vmlal.u32 q7,d29,d7[1]
vst1.64 {q8},[r7,:128]!
vmlal.u32 q9,d28,d0[0]
vld1.64 {q8},[r6,:128]
vmlal.u32 q10,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+3]
vmlal.u32 q11,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q12,d28,d1[1]
vmlal.u32 q13,d28,d2[0]
vmlal.u32 q6,d28,d2[1]
vmlal.u32 q7,d28,d3[0]
vmlal.u32 q8,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+4]
vmlal.u32 q9,d29,d4[0]
vmlal.u32 q10,d29,d4[1]
vmlal.u32 q11,d29,d5[0]
vmlal.u32 q12,d29,d5[1]
vmlal.u32 q13,d29,d6[0]
vmlal.u32 q6,d29,d6[1]
vmlal.u32 q7,d29,d7[0]
vmlal.u32 q8,d29,d7[1]
vst1.64 {q9},[r7,:128]!
vmlal.u32 q10,d28,d0[0]
vld1.64 {q9},[r6,:128]
vmlal.u32 q11,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+4]
vmlal.u32 q12,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q13,d28,d1[1]
vmlal.u32 q6,d28,d2[0]
vmlal.u32 q7,d28,d2[1]
vmlal.u32 q8,d28,d3[0]
vmlal.u32 q9,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+5]
vmlal.u32 q10,d29,d4[0]
vmlal.u32 q11,d29,d4[1]
vmlal.u32 q12,d29,d5[0]
vmlal.u32 q13,d29,d5[1]
vmlal.u32 q6,d29,d6[0]
vmlal.u32 q7,d29,d6[1]
vmlal.u32 q8,d29,d7[0]
vmlal.u32 q9,d29,d7[1]
vst1.64 {q10},[r7,:128]!
vmlal.u32 q11,d28,d0[0]
vld1.64 {q10},[r6,:128]
vmlal.u32 q12,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+5]
vmlal.u32 q13,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q6,d28,d1[1]
vmlal.u32 q7,d28,d2[0]
vmlal.u32 q8,d28,d2[1]
vmlal.u32 q9,d28,d3[0]
vmlal.u32 q10,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+6]
vmlal.u32 q11,d29,d4[0]
vmlal.u32 q12,d29,d4[1]
vmlal.u32 q13,d29,d5[0]
vmlal.u32 q6,d29,d5[1]
vmlal.u32 q7,d29,d6[0]
vmlal.u32 q8,d29,d6[1]
vmlal.u32 q9,d29,d7[0]
vmlal.u32 q10,d29,d7[1]
vst1.64 {q11},[r7,:128]!
vmlal.u32 q12,d28,d0[0]
vld1.64 {q11},[r6,:128]
vmlal.u32 q13,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+6]
vmlal.u32 q6,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q7,d28,d1[1]
vmlal.u32 q8,d28,d2[0]
vmlal.u32 q9,d28,d2[1]
vmlal.u32 q10,d28,d3[0]
vmlal.u32 q11,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+7]
vmlal.u32 q12,d29,d4[0]
vmlal.u32 q13,d29,d4[1]
vmlal.u32 q6,d29,d5[0]
vmlal.u32 q7,d29,d5[1]
vmlal.u32 q8,d29,d6[0]
vmlal.u32 q9,d29,d6[1]
vmlal.u32 q10,d29,d7[0]
vmlal.u32 q11,d29,d7[1]
vst1.64 {q12},[r7,:128]!
vmlal.u32 q13,d28,d0[0]
vld1.64 {q12},[r6,:128]
vmlal.u32 q6,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+7]
vmlal.u32 q7,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q8,d28,d1[1]
vmlal.u32 q9,d28,d2[0]
vmlal.u32 q10,d28,d2[1]
vmlal.u32 q11,d28,d3[0]
vmlal.u32 q12,d28,d3[1]
it eq
subeq r1,r1,r5,lsl#2 @ rewind
vmlal.u32 q13,d29,d4[0]
vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0]
vmlal.u32 q6,d29,d4[1]
vld1.32 {d0,d1,d2,d3},[r1]!
vmlal.u32 q7,d29,d5[0]
add r10,sp,#8 @ rewind
vmlal.u32 q8,d29,d5[1]
vmlal.u32 q9,d29,d6[0]
vmlal.u32 q10,d29,d6[1]
vmlal.u32 q11,d29,d7[0]
vst1.64 {q13},[r7,:128]!
vmlal.u32 q12,d29,d7[1]
bne .LNEON_8n_inner
add r6,sp,#128
vst1.64 {q6,q7},[r7,:256]!
veor q2,q2,q2 @ d4-d5
vst1.64 {q8,q9},[r7,:256]!
veor q3,q3,q3 @ d6-d7
vst1.64 {q10,q11},[r7,:256]!
vst1.64 {q12},[r7,:128]
subs r9,r9,#8
vld1.64 {q6,q7},[r6,:256]!
vld1.64 {q8,q9},[r6,:256]!
vld1.64 {q10,q11},[r6,:256]!
vld1.64 {q12,q13},[r6,:256]!
itt ne
subne r3,r3,r5,lsl#2 @ rewind
bne .LNEON_8n_outer
add r7,sp,#128
vst1.64 {q2,q3}, [sp,:256]! @ start wiping stack frame
vshr.u64 d10,d12,#16
vst1.64 {q2,q3},[sp,:256]!
vadd.u64 d13,d13,d10
vst1.64 {q2,q3}, [sp,:256]!
vshr.u64 d10,d13,#16
vst1.64 {q2,q3}, [sp,:256]!
vzip.16 d12,d13
mov r8,r5
b .LNEON_tail_entry
.align 4
.LNEON_tail:
vadd.u64 d12,d12,d10
vshr.u64 d10,d12,#16
vld1.64 {q8,q9}, [r6, :256]!
vadd.u64 d13,d13,d10
vld1.64 {q10,q11}, [r6, :256]!
vshr.u64 d10,d13,#16
vld1.64 {q12,q13}, [r6, :256]!
vzip.16 d12,d13
.LNEON_tail_entry:
vadd.u64 d14,d14,d10
vst1.32 {d12[0]}, [r7, :32]!
vshr.u64 d10,d14,#16
vadd.u64 d15,d15,d10
vshr.u64 d10,d15,#16
vzip.16 d14,d15
vadd.u64 d16,d16,d10
vst1.32 {d14[0]}, [r7, :32]!
vshr.u64 d10,d16,#16
vadd.u64 d17,d17,d10
vshr.u64 d10,d17,#16
vzip.16 d16,d17
vadd.u64 d18,d18,d10
vst1.32 {d16[0]}, [r7, :32]!
vshr.u64 d10,d18,#16
vadd.u64 d19,d19,d10
vshr.u64 d10,d19,#16
vzip.16 d18,d19
vadd.u64 d20,d20,d10
vst1.32 {d18[0]}, [r7, :32]!
vshr.u64 d10,d20,#16
vadd.u64 d21,d21,d10
vshr.u64 d10,d21,#16
vzip.16 d20,d21
vadd.u64 d22,d22,d10
vst1.32 {d20[0]}, [r7, :32]!
vshr.u64 d10,d22,#16
vadd.u64 d23,d23,d10
vshr.u64 d10,d23,#16
vzip.16 d22,d23
vadd.u64 d24,d24,d10
vst1.32 {d22[0]}, [r7, :32]!
vshr.u64 d10,d24,#16
vadd.u64 d25,d25,d10
vshr.u64 d10,d25,#16
vzip.16 d24,d25
vadd.u64 d26,d26,d10
vst1.32 {d24[0]}, [r7, :32]!
vshr.u64 d10,d26,#16
vadd.u64 d27,d27,d10
vshr.u64 d10,d27,#16
vzip.16 d26,d27
vld1.64 {q6,q7}, [r6, :256]!
subs r8,r8,#8
vst1.32 {d26[0]}, [r7, :32]!
bne .LNEON_tail
vst1.32 {d10[0]}, [r7, :32] @ top-most bit
sub r3,r3,r5,lsl#2 @ rewind r3
subs r1,sp,#0 @ clear carry flag
add r2,sp,r5,lsl#2
.LNEON_sub:
ldmia r1!, {r4,r5,r6,r7}
ldmia r3!, {r8,r9,r10,r11}
sbcs r8, r4,r8
sbcs r9, r5,r9
sbcs r10,r6,r10
sbcs r11,r7,r11
teq r1,r2 @ preserves carry
stmia r0!, {r8,r9,r10,r11}
bne .LNEON_sub
ldr r10, [r1] @ load top-most bit
mov r11,sp
veor q0,q0,q0
sub r11,r2,r11 @ this is num*4
veor q1,q1,q1
mov r1,sp
sub r0,r0,r11 @ rewind r0
mov r3,r2 @ second 3/4th of frame
sbcs r10,r10,#0 @ result is carry flag
.LNEON_copy_n_zap:
ldmia r1!, {r4,r5,r6,r7}
ldmia r0, {r8,r9,r10,r11}
it cc
movcc r8, r4
vst1.64 {q0,q1}, [r3,:256]! @ wipe
itt cc
movcc r9, r5
movcc r10,r6
vst1.64 {q0,q1}, [r3,:256]! @ wipe
it cc
movcc r11,r7
ldmia r1, {r4,r5,r6,r7}
stmia r0!, {r8,r9,r10,r11}
sub r1,r1,#16
ldmia r0, {r8,r9,r10,r11}
it cc
movcc r8, r4
vst1.64 {q0,q1}, [r1,:256]! @ wipe
itt cc
movcc r9, r5
movcc r10,r6
vst1.64 {q0,q1}, [r3,:256]! @ wipe
it cc
movcc r11,r7
teq r1,r2 @ preserves carry
stmia r0!, {r8,r9,r10,r11}
bne .LNEON_copy_n_zap
mov sp,ip
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11}
bx lr @ bx lr
.size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
#endif
.byte 77,111,110,116,103,111,109,101,114,121,32,109,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
#endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.