repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
wlsfx/bnbb
2,190
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_sub_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Subtract modulo p_384, z := (x - y) mod p_384 // Inputs x[6], y[6]; output z[6] // // extern void bignum_sub_p384(uint64_t z[static 6], const uint64_t x[static 6], // const uint64_t y[static 6]); // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sub_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sub_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sub_p384) .text .balign 4 #define z x0 #define x x1 #define y x2 #define c x3 #define l x4 #define d0 x5 #define d1 x6 #define d2 x7 #define d3 x8 #define d4 x9 #define d5 x10 S2N_BN_SYMBOL(bignum_sub_p384): CFI_START // First just subtract the numbers as [d5; d4; d3; d2; d1; d0] // Set a mask based on (inverted) carry indicating x < y = correction is needed ldp d0, d1, [x] ldp l, c, [y] subs d0, d0, l sbcs d1, d1, c ldp d2, d3, [x, #16] ldp l, c, [y, #16] sbcs d2, d2, l sbcs d3, d3, c ldp d4, d5, [x, #32] ldp l, c, [y, #32] sbcs d4, d4, l sbcs d5, d5, c // Create a mask for the condition x < y, when we need to correct csetm c, cc // Now correct by adding masked p_384 mov l, #0x00000000ffffffff and l, l, c adds d0, d0, l eor l, l, c adcs d1, d1, l mov l, #0xfffffffffffffffe and l, l, c adcs d2, d2, l adcs d3, d3, c adcs d4, d4, c adc d5, d5, c // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sub_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,482
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_mod_n384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo group order, z := x mod n_384 // Input x[k]; output z[6] // // extern void bignum_mod_n384(uint64_t z[static 6], uint64_t k, // const uint64_t *x); // // Reduction is modulo the group order of the NIST curve P-384. // // Standard ARM ABI: X0 = z, X1 = k, X2 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_n384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n384) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n384_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_n384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n384_alt) .text .balign 4 #define z x0 #define k x1 #define x x2 #define m0 x3 #define m1 x4 #define m2 x5 #define m3 x6 #define m4 x7 #define m5 x8 #define t0 x9 #define t1 x10 #define t2 x11 #define t3 x12 #define t4 x13 #define t5 x14 #define n0 x15 #define n1 x16 #define n2 x17 // Aliased to t4 #define q x13 // Aliased to t5 #define d x14 // This is aliased to t5; we get one extra (free-ish?) reg-reg move in the // main loop by not using an additional register, which seems an OK decision. #define t x14 // Loading large constants #define movbig(nn,n3,n2,n1,n0) \ movz nn, n0 __LF \ movk nn, n1, lsl #16 __LF \ movk nn, n2, lsl #32 __LF \ movk nn, n3, lsl #48 S2N_BN_SYMBOL(bignum_mod_n384): S2N_BN_SYMBOL(bignum_mod_n384_alt): CFI_START // If the input is already <= 5 words long, go to a trivial "copy" path cmp k, #6 bcc Lbignum_mod_n384_short // Otherwise load the top 6 digits (top-down) and reduce k by 6 sub k, k, #6 lsl t0, k, #3 add t0, t0, x ldp m4, m5, [t0, #32] ldp m2, m3, [t0, #16] ldp m0, m1, [t0] // Load the complicated three words of 2^384 - n_384 = [0; 0; 0; n2; n1; n0] movbig( n0, #0x1313, #0xe695, #0x333a, #0xd68d) movbig( n1, #0xa7e5, #0xf24d, #0xb74f, #0x5885) movbig( n2, #0x389c, #0xb27e, #0x0bc8, #0xd220) // Reduce the top 6 digits mod n_384 (a conditional subtraction of n_384) adds t0, m0, n0 adcs t1, m1, n1 adcs t2, m2, n2 adcs t3, m3, xzr adcs t4, m4, xzr adcs t5, m5, xzr csel m0, m0, t0, cc csel m1, m1, t1, cc csel m2, m2, t2, cc csel m3, m3, t3, cc csel m4, m4, t4, cc csel m5, m5, t5, cc // Now do (k-6) iterations of 7->6 word modular reduction cbz k, Lbignum_mod_n384_writeback Lbignum_mod_n384_loop: // Compute q = min (m5 + 1) (2^64 - 1) adds q, m5, #1 csetm t0, cs orr q, q, t0 // [t3;t2;t1;t0] = q * (2^384 - n_384) mul t0, n0, q mul t1, n1, q mul t2, n2, q umulh t3, n0, q adds t1, t1, t3 umulh t3, n1, q adcs t2, t2, t3 umulh t3, n2, q adc t3, xzr, t3 // Decrement k and load the next digit sub k, k, #1 ldr d, [x, k, lsl #3] // Compensate for 2^384 * q sub m5, m5, q // [m5;m4;t4;t3;t2;t1;t0] = [m5;m4;m3;m2;m1;m0;d] - q * n_384 adds t0, d, t0 adcs t1, m0, t1 adcs t2, m1, t2 adcs t3, m2, t3 adcs t4, m3, xzr adcs m4, m4, xzr adc m5, m5, xzr // Now our top word m5 is either zero or all 1s. Use it for a masked // addition of n_384, which we can do by a *subtraction* of // 2^384 - n_384 from our portion, re-using the constants and t, m5, n0 subs m0, t0, t and t, m5, n1 sbcs m1, t1, t and t, m5, n2 sbcs m2, t2, t sbcs m3, t3, xzr sbcs t, t4, xzr sbc m5, m4, xzr mov m4, t cbnz k, Lbignum_mod_n384_loop // Finally write back [m5;m4;m3;m2;m1;m0] and return Lbignum_mod_n384_writeback: stp m0, m1, [z] stp m2, m3, [z, #16] stp m4, m5, [z, #32] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_n384) // Short case: just copy the input with zero-padding Lbignum_mod_n384_short: mov m0, xzr mov m1, xzr mov m2, xzr mov m3, xzr mov m4, xzr mov m5, xzr cbz k, Lbignum_mod_n384_writeback ldr m0, [x] subs k, k, #1 beq Lbignum_mod_n384_writeback ldr m1, [x, #8] subs k, k, #1 beq Lbignum_mod_n384_writeback ldr m2, [x, #16] subs k, k, #1 beq Lbignum_mod_n384_writeback ldr m3, [x, #24] subs k, k, #1 beq Lbignum_mod_n384_writeback ldr m4, [x, #32] b Lbignum_mod_n384_writeback #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
9,663
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_montmul_p384_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^384) mod p_384 // Inputs x[6], y[6]; output z[6] // // extern void bignum_montmul_p384_alt(uint64_t z[static 6], // const uint64_t x[static 6], // const uint64_t y[static 6]); // // Does z := (2^{-384} * x * y) mod p_384, assuming that the inputs x and y // satisfy x * y <= 2^384 * p_384 (in particular this is true if we are in // the "usual" case x < p_384 and y < p_384). // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p384_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p384_alt) .text .balign 4 // --------------------------------------------------------------------------- // Core one-step "short" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1], // adding to the existing contents of [d5;d4;d3;d2;d1]. It is fine // for d6 to be the same register as d0. // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // --------------------------------------------------------------------------- #define montreds(d6,d5,d4,d3,d2,d1,d0, t3,t2,t1) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ /* Store it in d6 to make the 2^384 * w contribution already */ \ lsl t1, d0, #32 __LF \ add d6, t1, d0 __LF \ /* Now let [t3;t2;t1;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel d0 so we don't need it */ \ mov t1, #0xffffffff00000001 __LF \ umulh t1, t1, d6 __LF \ mov t2, #0x00000000ffffffff __LF \ mul t3, t2, d6 __LF \ umulh t2, t2, d6 __LF \ adds t1, t1, t3 __LF \ adcs t2, t2, d6 __LF \ adc t3, xzr, xzr __LF \ /* Now add it, by subtracting from 2^384 * w + x */ \ subs d1, d1, t1 __LF \ sbcs d2, d2, t2 __LF \ sbcs d3, d3, t3 __LF \ sbcs d4, d4, xzr __LF \ sbcs d5, d5, xzr __LF \ sbc d6, d6, xzr #define z x0 #define x x1 #define y x2 // These are repeated mod 2 as we load pairs of inputs #define a0 x3 #define a1 x4 #define a2 x3 #define a3 x4 #define a4 x3 #define a5 x4 #define b0 x5 #define b1 x6 #define b2 x7 #define b3 x8 #define b4 x9 #define b5 x10 #define l x11 #define u0 x12 #define u1 x13 #define u2 x14 #define u3 x15 #define u4 x16 #define u5 x17 #define u6 x19 #define u7 x20 #define u8 x21 #define u9 x22 #define u10 x2 // same as y #define u11 x1 // same as x #define h b5 // same as b5 S2N_BN_SYMBOL(bignum_montmul_p384_alt): CFI_START // Save more registers CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) // Load operands and set up row 0 = [u6;...;u0] = a0 * [b5;...;b0] ldp a0, a1, [x] ldp b0, b1, [y] mul u0, a0, b0 umulh u1, a0, b0 mul l, a0, b1 umulh u2, a0, b1 adds u1, u1, l ldp b2, b3, [y, #16] mul l, a0, b2 umulh u3, a0, b2 adcs u2, u2, l mul l, a0, b3 umulh u4, a0, b3 adcs u3, u3, l ldp b4, b5, [y, #32] mul l, a0, b4 umulh u5, a0, b4 adcs u4, u4, l mul l, a0, b5 umulh u6, a0, b5 adcs u5, u5, l adc u6, u6, xzr // Row 1 = [u7;...;u0] = [a1;a0] * [b5;...;b0] mul l, a1, b0 adds u1, u1, l mul l, a1, b1 adcs u2, u2, l mul l, a1, b2 adcs u3, u3, l mul l, a1, b3 adcs u4, u4, l mul l, a1, b4 adcs u5, u5, l mul l, a1, b5 adcs u6, u6, l cset u7, cs umulh l, a1, b0 adds u2, u2, l umulh l, a1, b1 adcs u3, u3, l umulh l, a1, b2 adcs u4, u4, l umulh l, a1, b3 adcs u5, u5, l umulh l, a1, b4 adcs u6, u6, l umulh l, a1, b5 adc u7, u7, l // Row 2 = [u8;...;u0] = [a2;a1;a0] * [b5;...;b0] ldp a2, a3, [x, #16] mul l, a2, b0 adds u2, u2, l mul l, a2, b1 adcs u3, u3, l mul l, a2, b2 adcs u4, u4, l mul l, a2, b3 adcs u5, u5, l mul l, a2, b4 adcs u6, u6, l mul l, a2, b5 adcs u7, u7, l cset u8, cs umulh l, a2, b0 adds u3, u3, l umulh l, a2, b1 adcs u4, u4, l umulh l, a2, b2 adcs u5, u5, l umulh l, a2, b3 adcs u6, u6, l umulh l, a2, b4 adcs u7, u7, l umulh l, a2, b5 adc u8, u8, l // Row 3 = [u9;...;u0] = [a3;a2;a1;a0] * [b5;...;b0] mul l, a3, b0 adds u3, u3, l mul l, a3, b1 adcs u4, u4, l mul l, a3, b2 adcs u5, u5, l mul l, a3, b3 adcs u6, u6, l mul l, a3, b4 adcs u7, u7, l mul l, a3, b5 adcs u8, u8, l cset u9, cs umulh l, a3, b0 adds u4, u4, l umulh l, a3, b1 adcs u5, u5, l umulh l, a3, b2 adcs u6, u6, l umulh l, a3, b3 adcs u7, u7, l umulh l, a3, b4 adcs u8, u8, l umulh l, a3, b5 adc u9, u9, l // Row 4 = [u10;...;u0] = [a4;a3;a2;a1;a0] * [b5;...;b0] ldp a4, a5, [x, #32] mul l, a4, b0 adds u4, u4, l mul l, a4, b1 adcs u5, u5, l mul l, a4, b2 adcs u6, u6, l mul l, a4, b3 adcs u7, u7, l mul l, a4, b4 adcs u8, u8, l mul l, a4, b5 adcs u9, u9, l cset u10, cs umulh l, a4, b0 adds u5, u5, l umulh l, a4, b1 adcs u6, u6, l umulh l, a4, b2 adcs u7, u7, l umulh l, a4, b3 adcs u8, u8, l umulh l, a4, b4 adcs u9, u9, l umulh l, a4, b5 adc u10, u10, l // Row 5 = [u11;...;u0] = [a5;a4;a3;a2;a1;a0] * [b5;...;b0] mul l, a5, b0 adds u5, u5, l mul l, a5, b1 adcs u6, u6, l mul l, a5, b2 adcs u7, u7, l mul l, a5, b3 adcs u8, u8, l mul l, a5, b4 adcs u9, u9, l mul l, a5, b5 adcs u10, u10, l cset u11, cs umulh l, a5, b0 adds u6, u6, l umulh l, a5, b1 adcs u7, u7, l umulh l, a5, b2 adcs u8, u8, l umulh l, a5, b3 adcs u9, u9, l umulh l, a5, b4 adcs u10, u10, l umulh l, a5, b5 adc u11, u11, l // Montgomery rotate the low half montreds(u0,u5,u4,u3,u2,u1,u0, b0,b1,b2) montreds(u1,u0,u5,u4,u3,u2,u1, b0,b1,b2) montreds(u2,u1,u0,u5,u4,u3,u2, b0,b1,b2) montreds(u3,u2,u1,u0,u5,u4,u3, b0,b1,b2) montreds(u4,u3,u2,u1,u0,u5,u4, b0,b1,b2) montreds(u5,u4,u3,u2,u1,u0,u5, b0,b1,b2) // Add up the high and low parts as [h; u5;u4;u3;u2;u1;u0] = z adds u0, u0, u6 adcs u1, u1, u7 adcs u2, u2, u8 adcs u3, u3, u9 adcs u4, u4, u10 adcs u5, u5, u11 adc h, xzr, xzr // Now add [h; u11;u10;u9;u8;u7;u6] = z + (2^384 - p_384) mov l, #0xffffffff00000001 adds u6, u0, l mov l, #0x00000000ffffffff adcs u7, u1, l mov l, #0x0000000000000001 adcs u8, u2, l adcs u9, u3, xzr adcs u10, u4, xzr adcs u11, u5, xzr adcs h, h, xzr // Now z >= p_384 iff h is nonzero, so select accordingly csel u0, u0, u6, eq csel u1, u1, u7, eq csel u2, u2, u8, eq csel u3, u3, u9, eq csel u4, u4, u10, eq csel u5, u5, u11, eq // Store back final result stp u0, u1, [z] stp u2, u3, [z, #16] stp u4, u5, [z, #32] // Restore registers CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montmul_p384_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
73,060
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_montinv_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery inverse modulo p_384 = 2^384 - 2^128 - 2^96 + 2^32 - 1 // Input x[6]; output z[6] // // extern void bignum_montinv_p384(uint64_t z[static 6], // const uint64_t x[static 6]); // // If the 6-digit input x is coprime to p_384, i.e. is not divisible // by it, returns z < p_384 such that x * z == 2^768 (mod p_384). This // is effectively "Montgomery inverse" because if we consider x and z as // Montgomery forms of X and Z, i.e. x == 2^384 * X and z == 2^384 * Z // (both mod p_384) then X * Z == 1 (mod p_384). That is, this function // gives the analog of the modular inverse bignum_inv_p384 but with both // input and output in the Montgomery domain. Note that x does not need // to be reduced modulo p_384, but the output always is. If the input // is divisible (i.e. is 0 or p_384), then there can be no solution to // the congruence x * z == 2^768 (mod p_384), and z = 0 is returned. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montinv_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montinv_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montinv_p384) .text .balign 4 // Size in bytes of a 64-bit word #define N 8 // Used for the return pointer #define res x20 // Loop counter and d = 2 * delta value for divstep #define i x21 #define d x22 // Registers used for matrix element magnitudes and signs #define m00 x10 #define m01 x11 #define m10 x12 #define m11 x13 #define s00 x14 #define s01 x15 #define s10 x16 #define s11 x17 // Initial carries for combinations #define car0 x9 #define car1 x19 // Input and output, plain registers treated according to pattern #define reg0 x0, #0 #define reg1 x1, #0 #define reg2 x2, #0 #define reg3 x3, #0 #define reg4 x4, #0 #define x x1, #0 #define z x0, #0 // Pointer-offset pairs for temporaries on stack // The u and v variables are 6 words each as expected, but the f and g // variables are 8 words each -- they need to have at least one extra // word for a sign word, and to preserve alignment we "round up" to 8. // In fact, we currently keep an extra word in u and v as well. #define f sp, #0 #define g sp, #(8*N) #define u sp, #(16*N) #define v sp, #(24*N) // Total size to reserve on the stack #define NSPACE 32*N // --------------------------------------------------------------------------- // Core signed almost-Montgomery reduction macro. Takes input in // [d6;d5;d4;d3;d2;d1;d0] and returns result in [d6;d5d4;d3;d2;d1], adding // to the existing [d6;d5;d4;d3;d2;d1], and re-using d0 as a temporary // internally as well as t0, t1, t2. This is almost-Montgomery, i.e. the // result fits in 6 digits but is not necessarily strictly reduced mod p_384. // --------------------------------------------------------------------------- #define amontred(d6,d5,d4,d3,d2,d1,d0, t3,t2,t1) \ /* We only know the input is -2^444 < x < 2^444. To do traditional */ \ /* unsigned Montgomery reduction, start by adding 2^61 * p_384. */ \ mov t1, #0xe000000000000000 __LF \ adds d0, d0, t1 __LF \ mov t2, #0x000000001fffffff __LF \ adcs d1, d1, t2 __LF \ mov t3, #0xffffffffe0000000 __LF \ bic t3, t3, #0x2000000000000000 __LF \ adcs d2, d2, t3 __LF \ sbcs d3, d3, xzr __LF \ sbcs d4, d4, xzr __LF \ sbcs d5, d5, xzr __LF \ mov t1, #0x1fffffffffffffff __LF \ adc d6, d6, t1 __LF \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ /* Store it back into d0 since we no longer need that digit. */ \ add d0, d0, d0, lsl #32 __LF \ /* Now let [t3;t2;t1;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel d0 so we don't need it */ \ mov t1, #0xffffffff00000001 __LF \ umulh t1, t1, d0 __LF \ mov t2, #0x00000000ffffffff __LF \ mul t3, t2, d0 __LF \ umulh t2, t2, d0 __LF \ adds t1, t1, t3 __LF \ adcs t2, t2, d0 __LF \ cset t3, cs __LF \ /* Now x + p_384 * w = (x + 2^384 * w) - (2^384 - p_384) * w */ \ /* We catch the net top carry from add-subtract in the digit d0 */ \ adds d6, d6, d0 __LF \ cset d0, cs __LF \ subs d1, d1, t1 __LF \ sbcs d2, d2, t2 __LF \ sbcs d3, d3, t3 __LF \ sbcs d4, d4, xzr __LF \ sbcs d5, d5, xzr __LF \ sbcs d6, d6, xzr __LF \ sbcs d0, d0, xzr __LF \ /* Now if d0 is nonzero we subtract p_384 (almost-Montgomery) */ \ neg d0, d0 __LF \ and t1, d0, #0x00000000ffffffff __LF \ and t2, d0, #0xffffffff00000000 __LF \ and t3, d0, #0xfffffffffffffffe __LF \ subs d1, d1, t1 __LF \ sbcs d2, d2, t2 __LF \ sbcs d3, d3, t3 __LF \ sbcs d4, d4, d0 __LF \ sbcs d5, d5, d0 __LF \ sbc d6, d6, d0 // Very similar to a subroutine call to the s2n-bignum word_divstep59. // But different in register usage and returning the final matrix in // registers as follows // // [ m00 m01] // [ m10 m11] #define divstep59() \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x8, x4, #0x100, lsl #12 __LF \ sbfx x8, x8, #21, #21 __LF \ mov x11, #0x100000 __LF \ add x11, x11, x11, lsl #21 __LF \ add x9, x4, x11 __LF \ asr x9, x9, #42 __LF \ add x10, x5, #0x100, lsl #12 __LF \ sbfx x10, x10, #21, #21 __LF \ add x11, x5, x11 __LF \ asr x11, x11, #42 __LF \ mul x6, x8, x2 __LF \ mul x7, x9, x3 __LF \ mul x2, x10, x2 __LF \ mul x3, x11, x3 __LF \ add x4, x6, x7 __LF \ add x5, x2, x3 __LF \ asr x2, x4, #20 __LF \ asr x3, x5, #20 __LF \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x12, x4, #0x100, lsl #12 __LF \ sbfx x12, x12, #21, #21 __LF \ mov x15, #0x100000 __LF \ add x15, x15, x15, lsl #21 __LF \ add x13, x4, x15 __LF \ asr x13, x13, #42 __LF \ add x14, x5, #0x100, lsl #12 __LF \ sbfx x14, x14, #21, #21 __LF \ add x15, x5, x15 __LF \ asr x15, x15, #42 __LF \ mul x6, x12, x2 __LF \ mul x7, x13, x3 __LF \ mul x2, x14, x2 __LF \ mul x3, x15, x3 __LF \ add x4, x6, x7 __LF \ add x5, x2, x3 __LF \ asr x2, x4, #20 __LF \ asr x3, x5, #20 __LF \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ mul x2, x12, x8 __LF \ mul x3, x12, x9 __LF \ mul x6, x14, x8 __LF \ mul x7, x14, x9 __LF \ madd x8, x13, x10, x2 __LF \ madd x9, x13, x11, x3 __LF \ madd x16, x15, x10, x6 __LF \ madd x17, x15, x11, x7 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x12, x4, #0x100, lsl #12 __LF \ sbfx x12, x12, #22, #21 __LF \ mov x15, #0x100000 __LF \ add x15, x15, x15, lsl #21 __LF \ add x13, x4, x15 __LF \ asr x13, x13, #43 __LF \ add x14, x5, #0x100, lsl #12 __LF \ sbfx x14, x14, #22, #21 __LF \ add x15, x5, x15 __LF \ asr x15, x15, #43 __LF \ mneg x2, x12, x8 __LF \ mneg x3, x12, x9 __LF \ mneg x4, x14, x8 __LF \ mneg x5, x14, x9 __LF \ msub m00, x13, x16, x2 __LF \ msub m01, x13, x17, x3 __LF \ msub m10, x15, x16, x4 __LF \ msub m11, x15, x17, x5 S2N_BN_SYMBOL(bignum_montinv_p384): CFI_START // Save registers and make room for temporaries CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_DEC_SP(NSPACE) // Save the return pointer for the end so we can overwrite x0 later mov res, x0 // Copy the prime and input into the main f and g variables respectively. // Make sure x is reduced so that g <= f as assumed in the bound proof. mov x10, #0x00000000ffffffff mov x11, #0xffffffff00000000 mov x12, #0xfffffffffffffffe mov x15, #0xffffffffffffffff stp x10, x11, [f] stp x12, x15, [f+2*N] stp x15, x15, [f+4*N] str xzr, [f+6*N] ldp x2, x3, [x1] subs x10, x2, x10 sbcs x11, x3, x11 ldp x4, x5, [x1, #(2*N)] sbcs x12, x4, x12 sbcs x13, x5, x15 ldp x6, x7, [x1, #(4*N)] sbcs x14, x6, x15 sbcs x15, x7, x15 csel x2, x2, x10, cc csel x3, x3, x11, cc csel x4, x4, x12, cc csel x5, x5, x13, cc csel x6, x6, x14, cc csel x7, x7, x15, cc stp x2, x3, [g] stp x4, x5, [g+2*N] stp x6, x7, [g+4*N] str xzr, [g+6*N] // Also maintain reduced < 2^384 vector [u,v] such that // [f,g] == x * 2^{5*i-843} * [u,v] (mod p_384) // starting with [p_384,x] == x * 2^{5*0-843} * [0,2^843] (mod p_384) // The weird-looking 5*i modifications come in because we are doing // 64-bit word-sized Montgomery reductions at each stage, which is // 5 bits more than the 59-bit requirement to keep things stable. // After the 15th and last iteration and sign adjustment, when // f == 1 for in-scope cases, we have x * 2^{75-843} * u == 1, i.e. // x * u == 2^768 as required. stp xzr, xzr, [u] stp xzr, xzr, [u+2*N] stp xzr, xzr, [u+4*N] // The starting constant 2^843 mod p_384 is // 0x0000000000000800:00001000000007ff:fffff00000000000 // :00001000000007ff:fffff00000000800:0000000000000000 // where colons separate 64-bit subwords, least significant at the right. // Not all of these are single loads on ARM so this is a bit dynamic mov x12, #0xfffff00000000000 orr x10, x12, #0x0000000000000800 stp xzr, x10, [v] mov x11, #0x00000000000007ff orr x11, x11, #0x0000100000000000 stp x11, x12, [v+2*N] mov x12, #0x0000000000000800 stp x11, x12, [v+4*N] // Start of main loop. We jump into the middle so that the divstep // portion is common to the special fifteenth iteration after a uniform // first 14. mov i, #15 mov d, #1 b Lbignum_montinv_p384_midloop Lbignum_montinv_p384_loop: // Separate the matrix elements into sign-magnitude pairs cmp m00, xzr csetm s00, mi cneg m00, m00, mi cmp m01, xzr csetm s01, mi cneg m01, m01, mi cmp m10, xzr csetm s10, mi cneg m10, m10, mi cmp m11, xzr csetm s11, mi cneg m11, m11, mi // Adjust the initial values to allow for complement instead of negation // This initial offset is the same for [f,g] and [u,v] compositions. // Save it in stable registers for the [u,v] part and do [f,g] first. and x0, m00, s00 and x1, m01, s01 add car0, x0, x1 and x0, m10, s10 and x1, m11, s11 add car1, x0, x1 // Now the computation of the updated f and g values. This maintains a // 2-word carry between stages so we can conveniently insert the shift // right by 59 before storing back, and not overwrite digits we need // again of the old f and g values. // // Digit 0 of [f,g] ldr x7, [f] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [g] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 adc x2, x2, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x5, car1, x0 adc x3, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 adc x3, x3, x1 // Digit 1 of [f,g] ldr x7, [f+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [g+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 adc x6, x6, x1 extr x4, x2, x4, #59 str x4, [f] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x3, x3, x0 adc x4, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x3, x3, x0 adc x4, x4, x1 extr x5, x3, x5, #59 str x5, [g] // Digit 2 of [f,g] ldr x7, [f+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [g+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 adc x5, x5, x1 extr x2, x6, x2, #59 str x2, [f+N] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x4, x4, x0 adc x2, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x4, x4, x0 adc x2, x2, x1 extr x3, x4, x3, #59 str x3, [g+N] // Digit 3 of [f,g] ldr x7, [f+3*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, xzr, x1 ldr x8, [g+3*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 adc x3, x3, x1 extr x6, x5, x6, #59 str x6, [f+2*N] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x2, x2, x0 adc x6, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x2, x2, x0 adc x6, x6, x1 extr x4, x2, x4, #59 str x4, [g+2*N] // Digit 4 of [f,g] ldr x7, [f+4*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x3, x3, x0 adc x4, xzr, x1 ldr x8, [g+4*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x3, x3, x0 adc x4, x4, x1 extr x5, x3, x5, #59 str x5, [f+3*N] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x6, x6, x0 adc x5, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x6, x6, x0 adc x5, x5, x1 extr x2, x6, x2, #59 str x2, [g+3*N] // Digits 5 and 6 of [f,g] ldr x7, [f+5*N] eor x1, x7, s00 ldr x23, [f+6*N] eor x2, x23, s00 and x2, x2, m00 neg x2, x2 mul x0, x1, m00 umulh x1, x1, m00 adds x4, x4, x0 adc x2, x2, x1 ldr x8, [g+5*N] eor x1, x8, s01 ldr x24, [g+6*N] eor x0, x24, s01 and x0, x0, m01 sub x2, x2, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 adc x2, x2, x1 extr x3, x4, x3, #59 str x3, [f+4*N] extr x4, x2, x4, #59 str x4, [f+5*N] asr x2, x2, #59 str x2, [f+6*N] eor x1, x7, s10 eor x4, x23, s10 and x4, x4, m10 neg x4, x4 mul x0, x1, m10 umulh x1, x1, m10 adds x5, x5, x0 adc x4, x4, x1 eor x1, x8, s11 eor x0, x24, s11 and x0, x0, m11 sub x4, x4, x0 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 adc x4, x4, x1 extr x6, x5, x6, #59 str x6, [g+4*N] extr x5, x4, x5, #59 str x5, [g+5*N] asr x4, x4, #59 str x4, [g+6*N] // Now the computation of the updated u and v values and their // Montgomery reductions. A very similar accumulation except that // the top words of u and v are unsigned and we don't shift. // // Digit 0 of [u,v] ldr x7, [u] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [v] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u] adc x2, x2, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x5, car1, x0 adc x3, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 str x5, [v] adc x3, x3, x1 // Digit 1 of [u,v] ldr x7, [u+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [v+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 str x2, [u+N] adc x6, x6, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x3, x3, x0 adc x4, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x3, x3, x0 str x3, [v+N] adc x4, x4, x1 // Digit 2 of [u,v] ldr x7, [u+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [v+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 str x6, [u+2*N] adc x5, x5, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x4, x4, x0 adc x2, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x4, x4, x0 str x4, [v+2*N] adc x2, x2, x1 // Digit 3 of [u,v] ldr x7, [u+3*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, xzr, x1 ldr x8, [v+3*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 str x5, [u+3*N] adc x3, x3, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x2, x2, x0 adc x6, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x2, x2, x0 str x2, [v+3*N] adc x6, x6, x1 // Digit 4 of [u,v] ldr x7, [u+4*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x3, x3, x0 adc x4, xzr, x1 ldr x8, [v+4*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x3, x3, x0 str x3, [u+4*N] adc x4, x4, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x6, x6, x0 adc x5, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x6, x6, x0 str x6, [v+4*N] adc x5, x5, x1 // Digits 5 and 6 of [u,v] (top is unsigned) ldr x7, [u+5*N] eor x1, x7, s00 and x2, s00, m00 neg x2, x2 mul x0, x1, m00 umulh x1, x1, m00 adds x4, x4, x0 adc x2, x2, x1 ldr x8, [v+5*N] eor x1, x8, s01 and x0, s01, m01 sub x2, x2, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u+5*N] adc x2, x2, x1 str x2, [u+6*N] eor x1, x7, s10 and x4, s10, m10 neg x4, x4 mul x0, x1, m10 umulh x1, x1, m10 adds x5, x5, x0 adc x4, x4, x1 eor x1, x8, s11 and x0, s11, m11 sub x4, x4, x0 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 str x5, [v+5*N] adc x4, x4, x1 str x4, [v+6*N] // Montgomery reduction of u ldp x0, x1, [u] ldp x2, x3, [u+16] ldp x4, x5, [u+32] ldr x6, [u+48] amontred(x6,x5,x4,x3,x2,x1,x0, x9,x8,x7) stp x1, x2, [u] stp x3, x4, [u+16] stp x5, x6, [u+32] // Montgomery reduction of v ldp x0, x1, [v] ldp x2, x3, [v+16] ldp x4, x5, [v+32] ldr x6, [v+48] amontred(x6,x5,x4,x3,x2,x1,x0, x9,x8,x7) stp x1, x2, [v] stp x3, x4, [v+16] stp x5, x6, [v+32] Lbignum_montinv_p384_midloop: mov x1, d ldr x2, [f] ldr x3, [g] divstep59() mov d, x1 // Next iteration subs i, i, #1 bne Lbignum_montinv_p384_loop // The 15th and last iteration does not need anything except the // u value and the sign of f; the latter can be obtained from the // lowest word of f. So it's done differently from the main loop. // Find the sign of the new f. For this we just need one digit // since we know (for in-scope cases) that f is either +1 or -1. // We don't explicitly shift right by 59 either, but looking at // bit 63 (or any bit >= 60) of the unshifted result is enough // to distinguish -1 from +1; this is then made into a mask. ldr x0, [f] ldr x1, [g] mul x0, x0, m00 madd x1, x1, m01, x0 asr x0, x1, #63 // Now separate out the matrix into sign-magnitude pairs // and adjust each one based on the sign of f. // // Note that at this point we expect |f|=1 and we got its // sign above, so then since [f,0] == x * 2^{-768} [u,v] (mod p_384) // we want to flip the sign of u according to that of f. cmp m00, xzr csetm s00, mi cneg m00, m00, mi eor s00, s00, x0 cmp m01, xzr csetm s01, mi cneg m01, m01, mi eor s01, s01, x0 cmp m10, xzr csetm s10, mi cneg m10, m10, mi eor s10, s10, x0 cmp m11, xzr csetm s11, mi cneg m11, m11, mi eor s11, s11, x0 // Adjust the initial value to allow for complement instead of negation and x0, m00, s00 and x1, m01, s01 add car0, x0, x1 // Digit 0 of [u] ldr x7, [u] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [v] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u] adc x2, x2, x1 // Digit 1 of [u] ldr x7, [u+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [v+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 str x2, [u+N] adc x6, x6, x1 // Digit 2 of [u] ldr x7, [u+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [v+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 str x6, [u+2*N] adc x5, x5, x1 // Digit 3 of [u] ldr x7, [u+3*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, xzr, x1 ldr x8, [v+3*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 str x5, [u+3*N] adc x3, x3, x1 // Digit 4 of [u] ldr x7, [u+4*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x3, x3, x0 adc x4, xzr, x1 ldr x8, [v+4*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x3, x3, x0 str x3, [u+4*N] adc x4, x4, x1 // Digits 5 and 6 of [u] (top is unsigned) ldr x7, [u+5*N] eor x1, x7, s00 and x2, s00, m00 neg x2, x2 mul x0, x1, m00 umulh x1, x1, m00 adds x4, x4, x0 adc x2, x2, x1 ldr x8, [v+5*N] eor x1, x8, s01 and x0, s01, m01 sub x2, x2, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u+5*N] adc x2, x2, x1 str x2, [u+6*N] // Montgomery reduction of u. This needs to be strict not "almost" // so it is followed by an optional subtraction of p_384 ldp x10, x0, [u] ldp x1, x2, [u+16] ldp x3, x4, [u+32] ldr x5, [u+48] amontred(x5,x4,x3,x2,x1,x0,x10, x9,x8,x7) mov x10, #0x00000000ffffffff subs x10, x0, x10 mov x11, #0xffffffff00000000 sbcs x11, x1, x11 mov x12, #0xfffffffffffffffe sbcs x12, x2, x12 mov x15, #0xffffffffffffffff sbcs x13, x3, x15 sbcs x14, x4, x15 sbcs x15, x5, x15 csel x0, x0, x10, cc csel x1, x1, x11, cc csel x2, x2, x12, cc csel x3, x3, x13, cc csel x4, x4, x14, cc csel x5, x5, x15, cc // Store it back to the final output stp x0, x1, [res] stp x2, x3, [res, #16] stp x4, x5, [res, #32] // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montinv_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
41,142
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/p384_montjmixadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point mixed addition on NIST curve P-384 in Montgomery-Jacobian coordinates // // extern void p384_montjmixadd(uint64_t p3[static 18], // const uint64_t p1[static 18], // const uint64_t p2[static 12]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // The "mixed" part means that p2 only has x and y coordinates, with the // implicit z coordinate assumed to be the identity. // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjmixadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjmixadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjmixadd) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 // Stable homes for input arguments during main code sequence #define input_z x24 #define input_x x25 #define input_y x26 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define zp2 sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define NSPACE NUMSIZE*6 // Corresponds to bignum_montmul_p384 except x24 -> x0 #define montmul_p384(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P1+16] __LF \ ldp x7, x8, [P1+32] __LF \ ldp x9, x10, [P2] __LF \ ldp x11, x12, [P2+16] __LF \ ldp x13, x14, [P2+32] __LF \ mul x15, x3, x9 __LF \ mul x21, x4, x10 __LF \ mul x22, x5, x11 __LF \ umulh x23, x3, x9 __LF \ umulh x0, x4, x10 __LF \ umulh x1, x5, x11 __LF \ adds x23, x23, x21 __LF \ adcs x0, x0, x22 __LF \ adc x1, x1, xzr __LF \ adds x16, x23, x15 __LF \ adcs x17, x0, x23 __LF \ adcs x19, x1, x0 __LF \ adc x20, x1, xzr __LF \ adds x17, x17, x15 __LF \ adcs x19, x19, x23 __LF \ adcs x20, x20, x0 __LF \ adc x1, x1, xzr __LF \ subs x0, x3, x4 __LF \ cneg x0, x0, lo __LF \ csetm x23, lo __LF \ subs x22, x10, x9 __LF \ cneg x22, x22, lo __LF \ mul x21, x0, x22 __LF \ umulh x22, x0, x22 __LF \ cinv x23, x23, lo __LF \ eor x21, x21, x23 __LF \ eor x22, x22, x23 __LF \ cmn x23, #1 __LF \ adcs x16, x16, x21 __LF \ adcs x17, x17, x22 __LF \ adcs x19, x19, x23 __LF \ adcs x20, x20, x23 __LF \ adc x1, x1, x23 __LF \ subs x0, x3, x5 __LF \ cneg x0, x0, lo __LF \ csetm x23, lo __LF \ subs x22, x11, x9 __LF \ cneg x22, x22, lo __LF \ mul x21, x0, x22 __LF \ umulh x22, x0, x22 __LF \ cinv x23, x23, lo __LF \ eor x21, x21, x23 __LF \ eor x22, x22, x23 __LF \ cmn x23, #1 __LF \ adcs x17, x17, x21 __LF \ adcs x19, x19, x22 __LF \ adcs x20, x20, x23 __LF \ adc x1, x1, x23 __LF \ subs x0, x4, x5 __LF \ cneg x0, x0, lo __LF \ csetm x23, lo __LF \ subs x22, x11, x10 __LF \ cneg x22, x22, lo __LF \ mul x21, x0, x22 __LF \ umulh x22, x0, x22 __LF \ cinv x23, x23, lo __LF \ eor x21, x21, x23 __LF \ eor x22, x22, x23 __LF \ cmn x23, #1 __LF \ adcs x19, x19, x21 __LF \ adcs x20, x20, x22 __LF \ adc x1, x1, x23 __LF \ lsl x23, x15, #32 __LF \ add x15, x23, x15 __LF \ lsr x23, x15, #32 __LF \ subs x23, x23, x15 __LF \ sbc x22, x15, xzr __LF \ extr x23, x22, x23, #32 __LF \ lsr x22, x22, #32 __LF \ adds x22, x22, x15 __LF \ adc x21, xzr, xzr __LF \ subs x16, x16, x23 __LF \ sbcs x17, x17, x22 __LF \ sbcs x19, x19, x21 __LF \ sbcs x20, x20, xzr __LF \ sbcs x1, x1, xzr __LF \ sbc x15, x15, xzr __LF \ lsl x23, x16, #32 __LF \ add x16, x23, x16 __LF \ lsr x23, x16, #32 __LF \ subs x23, x23, x16 __LF \ sbc x22, x16, xzr __LF \ extr x23, x22, x23, #32 __LF \ lsr x22, x22, #32 __LF \ adds x22, x22, x16 __LF \ adc x21, xzr, xzr __LF \ subs x17, x17, x23 __LF \ sbcs x19, x19, x22 __LF \ sbcs x20, x20, x21 __LF \ sbcs x1, x1, xzr __LF \ sbcs x15, x15, xzr __LF \ sbc x16, x16, xzr __LF \ lsl x23, x17, #32 __LF \ add x17, x23, x17 __LF \ lsr x23, x17, #32 __LF \ subs x23, x23, x17 __LF \ sbc x22, x17, xzr __LF \ extr x23, x22, x23, #32 __LF \ lsr x22, x22, #32 __LF \ adds x22, x22, x17 __LF \ adc x21, xzr, xzr __LF \ subs x19, x19, x23 __LF \ sbcs x20, x20, x22 __LF \ sbcs x1, x1, x21 __LF \ sbcs x15, x15, xzr __LF \ sbcs x16, x16, xzr __LF \ sbc x17, x17, xzr __LF \ stp x19, x20, [P0] __LF \ stp x1, x15, [P0+16] __LF \ stp x16, x17, [P0+32] __LF \ mul x15, x6, x12 __LF \ mul x21, x7, x13 __LF \ mul x22, x8, x14 __LF \ umulh x23, x6, x12 __LF \ umulh x0, x7, x13 __LF \ umulh x1, x8, x14 __LF \ adds x23, x23, x21 __LF \ adcs x0, x0, x22 __LF \ adc x1, x1, xzr __LF \ adds x16, x23, x15 __LF \ adcs x17, x0, x23 __LF \ adcs x19, x1, x0 __LF \ adc x20, x1, xzr __LF \ adds x17, x17, x15 __LF \ adcs x19, x19, x23 __LF \ adcs x20, x20, x0 __LF \ adc x1, x1, xzr __LF \ subs x0, x6, x7 __LF \ cneg x0, x0, lo __LF \ csetm x23, lo __LF \ subs x22, x13, x12 __LF \ cneg x22, x22, lo __LF \ mul x21, x0, x22 __LF \ umulh x22, x0, x22 __LF \ cinv x23, x23, lo __LF \ eor x21, x21, x23 __LF \ eor x22, x22, x23 __LF \ cmn x23, #1 __LF \ adcs x16, x16, x21 __LF \ adcs x17, x17, x22 __LF \ adcs x19, x19, x23 __LF \ adcs x20, x20, x23 __LF \ adc x1, x1, x23 __LF \ subs x0, x6, x8 __LF \ cneg x0, x0, lo __LF \ csetm x23, lo __LF \ subs x22, x14, x12 __LF \ cneg x22, x22, lo __LF \ mul x21, x0, x22 __LF \ umulh x22, x0, x22 __LF \ cinv x23, x23, lo __LF \ eor x21, x21, x23 __LF \ eor x22, x22, x23 __LF \ cmn x23, #1 __LF \ adcs x17, x17, x21 __LF \ adcs x19, x19, x22 __LF \ adcs x20, x20, x23 __LF \ adc x1, x1, x23 __LF \ subs x0, x7, x8 __LF \ cneg x0, x0, lo __LF \ csetm x23, lo __LF \ subs x22, x14, x13 __LF \ cneg x22, x22, lo __LF \ mul x21, x0, x22 __LF \ umulh x22, x0, x22 __LF \ cinv x23, x23, lo __LF \ eor x21, x21, x23 __LF \ eor x22, x22, x23 __LF \ cmn x23, #1 __LF \ adcs x19, x19, x21 __LF \ adcs x20, x20, x22 __LF \ adc x1, x1, x23 __LF \ subs x6, x6, x3 __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x5 __LF \ ngc x3, xzr __LF \ cmn x3, #1 __LF \ eor x6, x6, x3 __LF \ adcs x6, x6, xzr __LF \ eor x7, x7, x3 __LF \ adcs x7, x7, xzr __LF \ eor x8, x8, x3 __LF \ adc x8, x8, xzr __LF \ subs x9, x9, x12 __LF \ sbcs x10, x10, x13 __LF \ sbcs x11, x11, x14 __LF \ ngc x14, xzr __LF \ cmn x14, #1 __LF \ eor x9, x9, x14 __LF \ adcs x9, x9, xzr __LF \ eor x10, x10, x14 __LF \ adcs x10, x10, xzr __LF \ eor x11, x11, x14 __LF \ adc x11, x11, xzr __LF \ eor x14, x3, x14 __LF \ ldp x21, x22, [P0] __LF \ adds x15, x15, x21 __LF \ adcs x16, x16, x22 __LF \ ldp x21, x22, [P0+16] __LF \ adcs x17, x17, x21 __LF \ adcs x19, x19, x22 __LF \ ldp x21, x22, [P0+32] __LF \ adcs x20, x20, x21 __LF \ adcs x1, x1, x22 __LF \ adc x2, xzr, xzr __LF \ stp x15, x16, [P0] __LF \ stp x17, x19, [P0+16] __LF \ stp x20, x1, [P0+32] __LF \ mul x15, x6, x9 __LF \ mul x21, x7, x10 __LF \ mul x22, x8, x11 __LF \ umulh x23, x6, x9 __LF \ umulh x0, x7, x10 __LF \ umulh x1, x8, x11 __LF \ adds x23, x23, x21 __LF \ adcs x0, x0, x22 __LF \ adc x1, x1, xzr __LF \ adds x16, x23, x15 __LF \ adcs x17, x0, x23 __LF \ adcs x19, x1, x0 __LF \ adc x20, x1, xzr __LF \ adds x17, x17, x15 __LF \ adcs x19, x19, x23 __LF \ adcs x20, x20, x0 __LF \ adc x1, x1, xzr __LF \ subs x0, x6, x7 __LF \ cneg x0, x0, lo __LF \ csetm x23, lo __LF \ subs x22, x10, x9 __LF \ cneg x22, x22, lo __LF \ mul x21, x0, x22 __LF \ umulh x22, x0, x22 __LF \ cinv x23, x23, lo __LF \ eor x21, x21, x23 __LF \ eor x22, x22, x23 __LF \ cmn x23, #1 __LF \ adcs x16, x16, x21 __LF \ adcs x17, x17, x22 __LF \ adcs x19, x19, x23 __LF \ adcs x20, x20, x23 __LF \ adc x1, x1, x23 __LF \ subs x0, x6, x8 __LF \ cneg x0, x0, lo __LF \ csetm x23, lo __LF \ subs x22, x11, x9 __LF \ cneg x22, x22, lo __LF \ mul x21, x0, x22 __LF \ umulh x22, x0, x22 __LF \ cinv x23, x23, lo __LF \ eor x21, x21, x23 __LF \ eor x22, x22, x23 __LF \ cmn x23, #1 __LF \ adcs x17, x17, x21 __LF \ adcs x19, x19, x22 __LF \ adcs x20, x20, x23 __LF \ adc x1, x1, x23 __LF \ subs x0, x7, x8 __LF \ cneg x0, x0, lo __LF \ csetm x23, lo __LF \ subs x22, x11, x10 __LF \ cneg x22, x22, lo __LF \ mul x21, x0, x22 __LF \ umulh x22, x0, x22 __LF \ cinv x23, x23, lo __LF \ eor x21, x21, x23 __LF \ eor x22, x22, x23 __LF \ cmn x23, #1 __LF \ adcs x19, x19, x21 __LF \ adcs x20, x20, x22 __LF \ adc x1, x1, x23 __LF \ ldp x3, x4, [P0] __LF \ ldp x5, x6, [P0+16] __LF \ ldp x7, x8, [P0+32] __LF \ cmn x14, #1 __LF \ eor x15, x15, x14 __LF \ adcs x15, x15, x3 __LF \ eor x16, x16, x14 __LF \ adcs x16, x16, x4 __LF \ eor x17, x17, x14 __LF \ adcs x17, x17, x5 __LF \ eor x19, x19, x14 __LF \ adcs x19, x19, x6 __LF \ eor x20, x20, x14 __LF \ adcs x20, x20, x7 __LF \ eor x1, x1, x14 __LF \ adcs x1, x1, x8 __LF \ adcs x9, x14, x2 __LF \ adcs x10, x14, xzr __LF \ adcs x11, x14, xzr __LF \ adc x12, x14, xzr __LF \ adds x19, x19, x3 __LF \ adcs x20, x20, x4 __LF \ adcs x1, x1, x5 __LF \ adcs x9, x9, x6 __LF \ adcs x10, x10, x7 __LF \ adcs x11, x11, x8 __LF \ adc x12, x12, x2 __LF \ lsl x23, x15, #32 __LF \ add x15, x23, x15 __LF \ lsr x23, x15, #32 __LF \ subs x23, x23, x15 __LF \ sbc x22, x15, xzr __LF \ extr x23, x22, x23, #32 __LF \ lsr x22, x22, #32 __LF \ adds x22, x22, x15 __LF \ adc x21, xzr, xzr __LF \ subs x16, x16, x23 __LF \ sbcs x17, x17, x22 __LF \ sbcs x19, x19, x21 __LF \ sbcs x20, x20, xzr __LF \ sbcs x1, x1, xzr __LF \ sbc x15, x15, xzr __LF \ lsl x23, x16, #32 __LF \ add x16, x23, x16 __LF \ lsr x23, x16, #32 __LF \ subs x23, x23, x16 __LF \ sbc x22, x16, xzr __LF \ extr x23, x22, x23, #32 __LF \ lsr x22, x22, #32 __LF \ adds x22, x22, x16 __LF \ adc x21, xzr, xzr __LF \ subs x17, x17, x23 __LF \ sbcs x19, x19, x22 __LF \ sbcs x20, x20, x21 __LF \ sbcs x1, x1, xzr __LF \ sbcs x15, x15, xzr __LF \ sbc x16, x16, xzr __LF \ lsl x23, x17, #32 __LF \ add x17, x23, x17 __LF \ lsr x23, x17, #32 __LF \ subs x23, x23, x17 __LF \ sbc x22, x17, xzr __LF \ extr x23, x22, x23, #32 __LF \ lsr x22, x22, #32 __LF \ adds x22, x22, x17 __LF \ adc x21, xzr, xzr __LF \ subs x19, x19, x23 __LF \ sbcs x20, x20, x22 __LF \ sbcs x1, x1, x21 __LF \ sbcs x15, x15, xzr __LF \ sbcs x16, x16, xzr __LF \ sbc x17, x17, xzr __LF \ adds x9, x9, x15 __LF \ adcs x10, x10, x16 __LF \ adcs x11, x11, x17 __LF \ adc x12, x12, xzr __LF \ add x22, x12, #1 __LF \ lsl x21, x22, #32 __LF \ subs x0, x22, x21 __LF \ sbc x21, x21, xzr __LF \ adds x19, x19, x0 __LF \ adcs x20, x20, x21 __LF \ adcs x1, x1, x22 __LF \ adcs x9, x9, xzr __LF \ adcs x10, x10, xzr __LF \ adcs x11, x11, xzr __LF \ csetm x22, lo __LF \ mov x23, #4294967295 __LF \ and x23, x23, x22 __LF \ adds x19, x19, x23 __LF \ eor x23, x23, x22 __LF \ adcs x20, x20, x23 __LF \ mov x23, #-2 __LF \ and x23, x23, x22 __LF \ adcs x1, x1, x23 __LF \ adcs x9, x9, x22 __LF \ adcs x10, x10, x22 __LF \ adc x11, x11, x22 __LF \ stp x19, x20, [P0] __LF \ stp x1, x9, [P0+16] __LF \ stp x10, x11, [P0+32] // Corresponds exactly to bignum_montsqr_p384 #define montsqr_p384(P0,P1) \ ldp x2, x3, [P1] __LF \ ldp x4, x5, [P1+16] __LF \ ldp x6, x7, [P1+32] __LF \ mul x14, x2, x3 __LF \ mul x15, x2, x4 __LF \ mul x16, x3, x4 __LF \ mul x8, x2, x2 __LF \ mul x10, x3, x3 __LF \ mul x12, x4, x4 __LF \ umulh x17, x2, x3 __LF \ adds x15, x15, x17 __LF \ umulh x17, x2, x4 __LF \ adcs x16, x16, x17 __LF \ umulh x17, x3, x4 __LF \ adcs x17, x17, xzr __LF \ umulh x9, x2, x2 __LF \ umulh x11, x3, x3 __LF \ umulh x13, x4, x4 __LF \ adds x14, x14, x14 __LF \ adcs x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adcs x17, x17, x17 __LF \ adc x13, x13, xzr __LF \ adds x9, x9, x14 __LF \ adcs x10, x10, x15 __LF \ adcs x11, x11, x16 __LF \ adcs x12, x12, x17 __LF \ adc x13, x13, xzr __LF \ lsl x16, x8, #32 __LF \ add x8, x16, x8 __LF \ lsr x16, x8, #32 __LF \ subs x16, x16, x8 __LF \ sbc x15, x8, xzr __LF \ extr x16, x15, x16, #32 __LF \ lsr x15, x15, #32 __LF \ adds x15, x15, x8 __LF \ adc x14, xzr, xzr __LF \ subs x9, x9, x16 __LF \ sbcs x10, x10, x15 __LF \ sbcs x11, x11, x14 __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ sbc x8, x8, xzr __LF \ lsl x16, x9, #32 __LF \ add x9, x16, x9 __LF \ lsr x16, x9, #32 __LF \ subs x16, x16, x9 __LF \ sbc x15, x9, xzr __LF \ extr x16, x15, x16, #32 __LF \ lsr x15, x15, #32 __LF \ adds x15, x15, x9 __LF \ adc x14, xzr, xzr __LF \ subs x10, x10, x16 __LF \ sbcs x11, x11, x15 __LF \ sbcs x12, x12, x14 __LF \ sbcs x13, x13, xzr __LF \ sbcs x8, x8, xzr __LF \ sbc x9, x9, xzr __LF \ lsl x16, x10, #32 __LF \ add x10, x16, x10 __LF \ lsr x16, x10, #32 __LF \ subs x16, x16, x10 __LF \ sbc x15, x10, xzr __LF \ extr x16, x15, x16, #32 __LF \ lsr x15, x15, #32 __LF \ adds x15, x15, x10 __LF \ adc x14, xzr, xzr __LF \ subs x11, x11, x16 __LF \ sbcs x12, x12, x15 __LF \ sbcs x13, x13, x14 __LF \ sbcs x8, x8, xzr __LF \ sbcs x9, x9, xzr __LF \ sbc x10, x10, xzr __LF \ stp x11, x12, [P0] __LF \ stp x13, x8, [P0+16] __LF \ stp x9, x10, [P0+32] __LF \ mul x8, x2, x5 __LF \ mul x14, x3, x6 __LF \ mul x15, x4, x7 __LF \ umulh x16, x2, x5 __LF \ umulh x17, x3, x6 __LF \ umulh x1, x4, x7 __LF \ adds x16, x16, x14 __LF \ adcs x17, x17, x15 __LF \ adc x1, x1, xzr __LF \ adds x9, x16, x8 __LF \ adcs x10, x17, x16 __LF \ adcs x11, x1, x17 __LF \ adc x12, x1, xzr __LF \ adds x10, x10, x8 __LF \ adcs x11, x11, x16 __LF \ adcs x12, x12, x17 __LF \ adc x13, x1, xzr __LF \ subs x17, x2, x3 __LF \ cneg x17, x17, lo __LF \ csetm x14, lo __LF \ subs x15, x6, x5 __LF \ cneg x15, x15, lo __LF \ mul x16, x17, x15 __LF \ umulh x15, x17, x15 __LF \ cinv x14, x14, lo __LF \ eor x16, x16, x14 __LF \ eor x15, x15, x14 __LF \ cmn x14, #1 __LF \ adcs x9, x9, x16 __LF \ adcs x10, x10, x15 __LF \ adcs x11, x11, x14 __LF \ adcs x12, x12, x14 __LF \ adc x13, x13, x14 __LF \ subs x17, x2, x4 __LF \ cneg x17, x17, lo __LF \ csetm x14, lo __LF \ subs x15, x7, x5 __LF \ cneg x15, x15, lo __LF \ mul x16, x17, x15 __LF \ umulh x15, x17, x15 __LF \ cinv x14, x14, lo __LF \ eor x16, x16, x14 __LF \ eor x15, x15, x14 __LF \ cmn x14, #1 __LF \ adcs x10, x10, x16 __LF \ adcs x11, x11, x15 __LF \ adcs x12, x12, x14 __LF \ adc x13, x13, x14 __LF \ subs x17, x3, x4 __LF \ cneg x17, x17, lo __LF \ csetm x14, lo __LF \ subs x15, x7, x6 __LF \ cneg x15, x15, lo __LF \ mul x16, x17, x15 __LF \ umulh x15, x17, x15 __LF \ cinv x14, x14, lo __LF \ eor x16, x16, x14 __LF \ eor x15, x15, x14 __LF \ cmn x14, #1 __LF \ adcs x11, x11, x16 __LF \ adcs x12, x12, x15 __LF \ adc x13, x13, x14 __LF \ adds x8, x8, x8 __LF \ adcs x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adc x17, xzr, xzr __LF \ ldp x2, x3, [P0] __LF \ adds x8, x8, x2 __LF \ adcs x9, x9, x3 __LF \ ldp x2, x3, [P0+16] __LF \ adcs x10, x10, x2 __LF \ adcs x11, x11, x3 __LF \ ldp x2, x3, [P0+32] __LF \ adcs x12, x12, x2 __LF \ adcs x13, x13, x3 __LF \ adc x17, x17, xzr __LF \ lsl x4, x8, #32 __LF \ add x8, x4, x8 __LF \ lsr x4, x8, #32 __LF \ subs x4, x4, x8 __LF \ sbc x3, x8, xzr __LF \ extr x4, x3, x4, #32 __LF \ lsr x3, x3, #32 __LF \ adds x3, x3, x8 __LF \ adc x2, xzr, xzr __LF \ subs x9, x9, x4 __LF \ sbcs x10, x10, x3 __LF \ sbcs x11, x11, x2 __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ sbc x8, x8, xzr __LF \ lsl x4, x9, #32 __LF \ add x9, x4, x9 __LF \ lsr x4, x9, #32 __LF \ subs x4, x4, x9 __LF \ sbc x3, x9, xzr __LF \ extr x4, x3, x4, #32 __LF \ lsr x3, x3, #32 __LF \ adds x3, x3, x9 __LF \ adc x2, xzr, xzr __LF \ subs x10, x10, x4 __LF \ sbcs x11, x11, x3 __LF \ sbcs x12, x12, x2 __LF \ sbcs x13, x13, xzr __LF \ sbcs x8, x8, xzr __LF \ sbc x9, x9, xzr __LF \ lsl x4, x10, #32 __LF \ add x10, x4, x10 __LF \ lsr x4, x10, #32 __LF \ subs x4, x4, x10 __LF \ sbc x3, x10, xzr __LF \ extr x4, x3, x4, #32 __LF \ lsr x3, x3, #32 __LF \ adds x3, x3, x10 __LF \ adc x2, xzr, xzr __LF \ subs x11, x11, x4 __LF \ sbcs x12, x12, x3 __LF \ sbcs x13, x13, x2 __LF \ sbcs x8, x8, xzr __LF \ sbcs x9, x9, xzr __LF \ sbc x10, x10, xzr __LF \ adds x17, x17, x8 __LF \ adcs x8, x9, xzr __LF \ adcs x9, x10, xzr __LF \ adcs x10, xzr, xzr __LF \ mul x1, x5, x5 __LF \ adds x11, x11, x1 __LF \ mul x14, x6, x6 __LF \ mul x15, x7, x7 __LF \ umulh x1, x5, x5 __LF \ adcs x12, x12, x1 __LF \ umulh x1, x6, x6 __LF \ adcs x13, x13, x14 __LF \ adcs x17, x17, x1 __LF \ umulh x1, x7, x7 __LF \ adcs x8, x8, x15 __LF \ adcs x9, x9, x1 __LF \ adc x10, x10, xzr __LF \ mul x1, x5, x6 __LF \ mul x14, x5, x7 __LF \ mul x15, x6, x7 __LF \ umulh x16, x5, x6 __LF \ adds x14, x14, x16 __LF \ umulh x16, x5, x7 __LF \ adcs x15, x15, x16 __LF \ umulh x16, x6, x7 __LF \ adc x16, x16, xzr __LF \ adds x1, x1, x1 __LF \ adcs x14, x14, x14 __LF \ adcs x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adc x5, xzr, xzr __LF \ adds x12, x12, x1 __LF \ adcs x13, x13, x14 __LF \ adcs x17, x17, x15 __LF \ adcs x8, x8, x16 __LF \ adcs x9, x9, x5 __LF \ adc x10, x10, xzr __LF \ mov x1, #-4294967295 __LF \ mov x14, #4294967295 __LF \ mov x15, #1 __LF \ cmn x11, x1 __LF \ adcs xzr, x12, x14 __LF \ adcs xzr, x13, x15 __LF \ adcs xzr, x17, xzr __LF \ adcs xzr, x8, xzr __LF \ adcs xzr, x9, xzr __LF \ adc x10, x10, xzr __LF \ neg x10, x10 __LF \ and x1, x1, x10 __LF \ adds x11, x11, x1 __LF \ and x14, x14, x10 __LF \ adcs x12, x12, x14 __LF \ and x15, x15, x10 __LF \ adcs x13, x13, x15 __LF \ adcs x17, x17, xzr __LF \ adcs x8, x8, xzr __LF \ adc x9, x9, xzr __LF \ stp x11, x12, [P0] __LF \ stp x13, x17, [P0+16] __LF \ stp x8, x9, [P0+32] // Corresponds exactly to bignum_sub_p384 #define sub_p384(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ ldp x9, x10, [P1+32] __LF \ ldp x4, x3, [P2+32] __LF \ sbcs x9, x9, x4 __LF \ sbcs x10, x10, x3 __LF \ csetm x3, lo __LF \ mov x4, #4294967295 __LF \ and x4, x4, x3 __LF \ adds x5, x5, x4 __LF \ eor x4, x4, x3 __LF \ adcs x6, x6, x4 __LF \ mov x4, #-2 __LF \ and x4, x4, x3 __LF \ adcs x7, x7, x4 __LF \ adcs x8, x8, x3 __LF \ adcs x9, x9, x3 __LF \ adc x10, x10, x3 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] S2N_BN_SYMBOL(p384_montjmixadd): CFI_START // Save regs and make room on stack for temporary variables CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_DEC_SP(NSPACE) // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations // 8 * multiply + 3 * square + 7 * subtract montsqr_p384(zp2,z_1) montmul_p384(y2a,z_1,y_2) montmul_p384(x2a,zp2,x_2) montmul_p384(y2a,zp2,y2a) sub_p384(xd,x2a,x_1) sub_p384(yd,y2a,y_1) montsqr_p384(zz,xd) montsqr_p384(ww,yd) montmul_p384(zzx1,zz,x_1) montmul_p384(zzx2,zz,x2a) sub_p384(resx,ww,zzx1) sub_p384(t1,zzx2,zzx1) montmul_p384(resz,xd,z_1) sub_p384(resx,resx,zzx2) sub_p384(t2,zzx1,resx) montmul_p384(t1,t1,y_1) montmul_p384(t2,yd,t2) sub_p384(resy,t2,t1) // Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence) ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] ldp x4, x5, [z_1+32] orr x6, x0, x1 orr x7, x2, x3 orr x8, x4, x5 orr x6, x6, x7 orr x6, x6, x8 cmp x6, xzr // Multiplex: if p1 <> 0 just copy the computed result from the staging area. // If p1 = 0 then return the point p2 augmented with a z = 1 coordinate (in // Montgomery form so not the simple constant 1 but rather 2^384 - p_384), // hence giving 0 + p2 = p2 for the final result. ldp x0, x1, [resx] ldp x19, x20, [x_2] csel x0, x0, x19, ne csel x1, x1, x20, ne ldp x2, x3, [resx+16] ldp x19, x20, [x_2+16] csel x2, x2, x19, ne csel x3, x3, x20, ne ldp x4, x5, [resx+32] ldp x19, x20, [x_2+32] csel x4, x4, x19, ne csel x5, x5, x20, ne ldp x6, x7, [resy] ldp x19, x20, [y_2] csel x6, x6, x19, ne csel x7, x7, x20, ne ldp x8, x9, [resy+16] ldp x19, x20, [y_2+16] csel x8, x8, x19, ne csel x9, x9, x20, ne ldp x10, x11, [resy+32] ldp x19, x20, [y_2+32] csel x10, x10, x19, ne csel x11, x11, x20, ne ldp x12, x13, [resz] mov x19, #0xffffffff00000001 mov x20, #0x00000000ffffffff csel x12, x12, x19, ne csel x13, x13, x20, ne ldp x14, x15, [resz+16] mov x19, #1 csel x14, x14, x19, ne csel x15, x15, xzr, ne ldp x16, x17, [resz+32] csel x16, x16, xzr, ne csel x17, x17, xzr, ne stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [x_3+32] stp x6, x7, [y_3] stp x8, x9, [y_3+16] stp x10, x11, [y_3+32] stp x12, x13, [z_3] stp x14, x15, [z_3+16] stp x16, x17, [z_3+32] // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p384_montjmixadd) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
3,658
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_triple_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Triple modulo p_384, z := (3 * x) mod p_384 // Input x[6]; output z[6] // // extern void bignum_triple_p384(uint64_t z[static 6], // const uint64_t x[static 6]); // // The input x can be any 6-digit bignum, not necessarily reduced modulo p_384, // and the result is always fully reduced, i.e. z = (3 * x) mod p_384. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p384) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p384_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p384_alt) .text .balign 4 #define z x0 #define x x1 #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define d4 x6 #define d5 x7 #define h x8 // Slightly offset aliases for the d_i for readability. #define a0 x3 #define a1 x4 #define a2 x5 #define a3 x6 #define a4 x7 #define a5 x8 // More aliases for the same thing at different stages #define q x8 #define c x8 // Other temporary variables #define t0 x9 #define t1 x10 S2N_BN_SYMBOL(bignum_triple_p384): S2N_BN_SYMBOL(bignum_triple_p384_alt): CFI_START // Load the inputs ldp a0, a1, [x] ldp a2, a3, [x, #16] ldp a4, a5, [x, #32] // First do the multiplication by 3, getting z = [h; d5; ...; d0] lsl d0, a0, #1 adds d0, d0, a0 extr d1, a1, a0, #63 adcs d1, d1, a1 extr d2, a2, a1, #63 adcs d2, d2, a2 extr d3, a3, a2, #63 adcs d3, d3, a3 extr d4, a4, a3, #63 adcs d4, d4, a4 extr d5, a5, a4, #63 adcs d5, d5, a5 lsr h, a5, #63 adc h, h, xzr // For this limited range a simple quotient estimate of q = h + 1 works, where // h = floor(z / 2^384). Then -p_384 <= z - q * p_384 < p_384, so we just need // to subtract q * p_384 and then if that's negative, add back p_384. add q, h, #1 // Initial subtraction of z - q * p_384, with bitmask c for the carry // Actually done as an addition of (z - 2^384 * h) + q * (2^384 - p_384) // which, because q = h + 1, is exactly 2^384 + (z - q * p_384), and // therefore CF <=> 2^384 + (z - q * p_384) >= 2^384 <=> z >= q * p_384. lsl t1, q, #32 subs t0, q, t1 sbc t1, t1, xzr adds d0, d0, t0 adcs d1, d1, t1 adcs d2, d2, q adcs d3, d3, xzr adcs d4, d4, xzr adcs d5, d5, xzr csetm c, cc // Use the bitmask c for final masked addition of p_384. mov t0, #0x00000000ffffffff and t0, t0, c adds d0, d0, t0 eor t0, t0, c adcs d1, d1, t0 mov t0, #0xfffffffffffffffe and t0, t0, c adcs d2, d2, t0 adcs d3, d3, c adcs d4, d4, c adc d5, d5, c // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_triple_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
19,807
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_montsqr_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^384) mod p_384 // Input x[6]; output z[6] // // extern void bignum_montsqr_p384(uint64_t z[static 6], // const uint64_t x[static 6]); // // Does z := (x^2 / 2^384) mod p_384, assuming x^2 <= 2^384 * p_384, which is // guaranteed in particular if x < p_384 initially (the "intended" case). // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- // bignum_montsqr_p384 is functionally equivalent to // unopt/bignum_montsqr_p384_base. // It is written in a way that // 1. A subset of scalar multiplications in bignum_montsqr_p384 are carefully // chosen and vectorized // 2. The vectorized assembly is rescheduled using the SLOTHY superoptimizer. // https://github.com/slothy-optimizer/slothy // // The output program of step 1. is as follows: // // ldp x9, x2, [x1] // ldr q18, [x1] // ldr q19, [x1] // ldp x4, x6, [x1, #16] // ldp x5, x10, [x1, #32] // ldr q21, [x1, #32] // ldr q28, [x1, #32] // mul x12, x9, x2 // mul x1, x9, x4 // mul x13, x2, x4 // movi v0.2D, #0x00000000ffffffff // uzp2 v5.4S, v19.4S, v19.4S // xtn v25.2S, v18.2D // xtn v4.2S, v19.2D // rev64 v23.4S, v19.4S // umull v20.2D, v25.2S, v4.2S // umull v30.2D, v25.2S, v5.2S // uzp2 v19.4S, v18.4S, v18.4S // mul v22.4S, v23.4S, v18.4S // usra v30.2D, v20.2D, #32 // umull v18.2D, v19.2S, v5.2S // uaddlp v22.2D, v22.4S // and v20.16B, v30.16B, v0.16B // umlal v20.2D, v19.2S, v4.2S // shl v19.2D, v22.2D, #32 // usra v18.2D, v30.2D, #32 // umlal v19.2D, v25.2S, v4.2S // usra v18.2D, v20.2D, #32 // mov x7, v19.d[0] // mov x17, v19.d[1] // mul x16, x4, x4 // umulh x3, x9, x2 // adds x15, x1, x3 // umulh x1, x9, x4 // adcs x13, x13, x1 // umulh x1, x2, x4 // adcs x8, x1, xzr // mov x11, v18.d[0] // mov x14, v18.d[1] // umulh x1, x4, x4 // adds x3, x12, x12 // adcs x15, x15, x15 // adcs x13, x13, x13 // adcs x12, x8, x8 // adc x1, x1, xzr // adds x11, x11, x3 // adcs x3, x17, x15 // adcs x17, x14, x13 // adcs x15, x16, x12 // adc x13, x1, xzr // lsl x1, x7, #32 // add x16, x1, x7 // lsr x1, x16, #32 // subs x12, x1, x16 // sbc x1, x16, xzr // extr x12, x1, x12, #32 // lsr x1, x1, #32 // adds x7, x1, x16 // adc x1, xzr, xzr // subs x12, x11, x12 // sbcs x11, x3, x7 // sbcs x17, x17, x1 // sbcs x15, x15, xzr // sbcs x13, x13, xzr // sbc x3, x16, xzr // lsl x1, x12, #32 // add x16, x1, x12 // lsr x1, x16, #32 // subs x12, x1, x16 // sbc x1, x16, xzr // extr x12, x1, x12, #32 // lsr x1, x1, #32 // adds x7, x1, x16 // adc x1, xzr, xzr // subs x12, x11, x12 // sbcs x17, x17, x7 // sbcs x15, x15, x1 // sbcs x13, x13, xzr // sbcs x11, x3, xzr // sbc x3, x16, xzr // lsl x1, x12, #32 // add x16, x1, x12 // lsr x1, x16, #32 // subs x12, x1, x16 // sbc x1, x16, xzr // extr x7, x1, x12, #32 // lsr x1, x1, #32 // adds x12, x1, x16 // adc x1, xzr, xzr // subs x17, x17, x7 // sbcs x15, x15, x12 // sbcs x13, x13, x1 // sbcs x7, x11, xzr // sbcs x12, x3, xzr // sbc x1, x16, xzr // stp x17, x15, [x0] // @slothy:writes=buffer0 // stp x13, x7, [x0, #16] // @slothy:writes=buffer16 // stp x12, x1, [x0, #32] // @slothy:writes=buffer32 // mul x14, x9, x6 // mul x15, x2, x5 // mul x13, x4, x10 // umulh x7, x9, x6 // umulh x12, x2, x5 // umulh x1, x4, x10 // adds x15, x7, x15 // adcs x16, x12, x13 // adc x13, x1, xzr // adds x11, x15, x14 // adcs x7, x16, x15 // adcs x12, x13, x16 // adc x1, x13, xzr // adds x17, x7, x14 // adcs x15, x12, x15 // adcs x3, x1, x16 // adc x16, x13, xzr // subs x1, x9, x2 // cneg x13, x1, cc // csetm x7, cc // subs x1, x5, x6 // cneg x1, x1, cc // mul x12, x13, x1 // umulh x1, x13, x1 // cinv x7, x7, cc // eor x12, x12, x7 // eor x1, x1, x7 // cmn x7, #0x1 // adcs x11, x11, x12 // adcs x17, x17, x1 // adcs x15, x15, x7 // adcs x3, x3, x7 // adc x16, x16, x7 // subs x9, x9, x4 // cneg x13, x9, cc // csetm x7, cc // subs x1, x10, x6 // cneg x1, x1, cc // mul x12, x13, x1 // umulh x1, x13, x1 // cinv x7, x7, cc // eor x12, x12, x7 // eor x1, x1, x7 // cmn x7, #0x1 // adcs x17, x17, x12 // adcs x15, x15, x1 // adcs x13, x3, x7 // adc x7, x16, x7 // subs x2, x2, x4 // cneg x12, x2, cc // csetm x1, cc // subs x2, x10, x5 // cneg x2, x2, cc // mul x4, x12, x2 // umulh x2, x12, x2 // cinv x1, x1, cc // eor x4, x4, x1 // eor x2, x2, x1 // cmn x1, #0x1 // adcs x12, x15, x4 // adcs x4, x13, x2 // adc x2, x7, x1 // adds x1, x14, x14 // adcs x16, x11, x11 // adcs x17, x17, x17 // adcs x15, x12, x12 // adcs x13, x4, x4 // adcs x7, x2, x2 // adc x12, xzr, xzr // ldp x4, x2, [x0] // @slothy:reads=buffer0 // adds x1, x1, x4 // adcs x16, x16, x2 // ldp x4, x2, [x0, #16] // @slothy:reads=buffer16 // adcs x17, x17, x4 // adcs x15, x15, x2 // ldp x4, x2, [x0, #32] // @slothy:reads=buffer32 // adcs x13, x13, x4 // adcs x7, x7, x2 // adc x11, x12, xzr // lsl x2, x1, #32 // add x12, x2, x1 // lsr x2, x12, #32 // subs x4, x2, x12 // sbc x2, x12, xzr // extr x4, x2, x4, #32 // lsr x2, x2, #32 // adds x1, x2, x12 // adc x2, xzr, xzr // subs x4, x16, x4 // sbcs x16, x17, x1 // sbcs x17, x15, x2 // sbcs x15, x13, xzr // sbcs x13, x7, xzr // sbc x7, x12, xzr // lsl x2, x4, #32 // add x12, x2, x4 // lsr x2, x12, #32 // subs x4, x2, x12 // sbc x2, x12, xzr // extr x4, x2, x4, #32 // lsr x2, x2, #32 // adds x1, x2, x12 // adc x2, xzr, xzr // subs x4, x16, x4 // sbcs x16, x17, x1 // sbcs x17, x15, x2 // sbcs x15, x13, xzr // sbcs x13, x7, xzr // sbc x7, x12, xzr // lsl x2, x4, #32 // add x12, x2, x4 // lsr x2, x12, #32 // subs x4, x2, x12 // sbc x2, x12, xzr // extr x1, x2, x4, #32 // lsr x2, x2, #32 // adds x4, x2, x12 // adc x2, xzr, xzr // subs x3, x16, x1 // sbcs x17, x17, x4 // sbcs x15, x15, x2 // sbcs x1, x13, xzr // sbcs x4, x7, xzr // sbc x2, x12, xzr // adds x13, x11, x1 // adcs x7, x4, xzr // adcs x12, x2, xzr // adcs x16, xzr, xzr // mul x2, x6, x6 // adds x3, x3, x2 // xtn v30.2S, v28.2D // shrn v26.2S, v28.2D, #32 // umull v26.2D, v30.2S, v26.2S // shl v19.2D, v26.2D, #33 // umlal v19.2D, v30.2S, v30.2S // mov x1, v19.d[0] // mov x4, v19.d[1] // umulh x2, x6, x6 // adcs x17, x17, x2 // umulh x2, x5, x5 // adcs x15, x15, x1 // adcs x13, x13, x2 // umulh x2, x10, x10 // adcs x7, x7, x4 // adcs x12, x12, x2 // adc x16, x16, xzr // dup v28.2D, x6 // movi v0.2D, #0x00000000ffffffff // uzp2 v5.4S, v21.4S, v21.4S // xtn v25.2S, v28.2D // xtn v4.2S, v21.2D // rev64 v19.4S, v21.4S // umull v30.2D, v25.2S, v4.2S // umull v23.2D, v25.2S, v5.2S // uzp2 v20.4S, v28.4S, v28.4S // mul v19.4S, v19.4S, v28.4S // usra v23.2D, v30.2D, #32 // umull v18.2D, v20.2S, v5.2S // uaddlp v19.2D, v19.4S // and v30.16B, v23.16B, v0.16B // umlal v30.2D, v20.2S, v4.2S // shl v19.2D, v19.2D, #32 // usra v18.2D, v23.2D, #32 // umlal v19.2D, v25.2S, v4.2S // usra v18.2D, v30.2D, #32 // mov x6, v19.d[0] // mov x1, v19.d[1] // mul x4, x5, x10 // mov x2, v18.d[0] // adds x1, x1, x2 // mov x2, v18.d[1] // adcs x4, x4, x2 // umulh x5, x5, x10 // adc x2, x5, xzr // adds x5, x6, x6 // adcs x6, x1, x1 // adcs x1, x4, x4 // adcs x4, x2, x2 // adc x2, xzr, xzr // adds x17, x17, x5 // adcs x15, x15, x6 // adcs x13, x13, x1 // adcs x7, x7, x4 // adcs x12, x12, x2 // adc x2, x16, xzr // mov x5, #0xffffffff00000001 // mov x6, #0xffffffff // mov x1, #0x1 // cmn x3, x5 // adcs xzr, x17, x6 // adcs xzr, x15, x1 // adcs xzr, x13, xzr // adcs xzr, x7, xzr // adcs xzr, x12, xzr // adc x2, x2, xzr // neg x4, x2 // and x2, x5, x4 // adds x10, x3, x2 // and x2, x6, x4 // adcs x5, x17, x2 // and x2, x1, x4 // adcs x6, x15, x2 // adcs x1, x13, xzr // adcs x4, x7, xzr // adc x2, x12, xzr // stp x10, x5, [x0] // @slothy:writes=buffer0 // stp x6, x1, [x0, #16] // @slothy:writes=buffer16 // stp x4, x2, [x0, #32] // @slothy:writes=buffer32 // ret // // The bash script used for step 2 is as follows: // // # Store the assembly instructions except the last 'ret' as, say, 'input.S'. // export OUTPUTS="[hint_buffer0,hint_buffer16,hint_buffer32]" // export RESERVED_REGS="[x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30,sp,q8,q9,q10,q11,q12,q13,q14,q15,v8,v9,v10,v11,v12,v13,v14,v15]" // <s2n-bignum>/tools/external/slothy.sh input.S my_out_dir // # my_out_dir/3.opt.s is the optimized assembly. Its output may differ // # from this file since the sequence is non-deterministically chosen. // # Please add 'ret' at the end of the output assembly. #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p384) .text .balign 4 S2N_BN_SYMBOL(bignum_montsqr_p384): CFI_START ldr q1, [x1] ldp x9, x2, [x1] ldr q0, [x1] ldp x4, x6, [x1, #16] rev64 v21.4S, v1.4S uzp2 v28.4S, v1.4S, v1.4S umulh x7, x9, x2 xtn v17.2S, v1.2D mul v27.4S, v21.4S, v0.4S ldr q20, [x1, #32] xtn v30.2S, v0.2D ldr q1, [x1, #32] uzp2 v31.4S, v0.4S, v0.4S ldp x5, x10, [x1, #32] umulh x8, x9, x4 uaddlp v3.2D, v27.4S umull v16.2D, v30.2S, v17.2S mul x16, x9, x4 umull v27.2D, v30.2S, v28.2S shrn v0.2S, v20.2D, #32 xtn v7.2S, v20.2D shl v20.2D, v3.2D, #32 umull v3.2D, v31.2S, v28.2S mul x3, x2, x4 umlal v20.2D, v30.2S, v17.2S umull v22.2D, v7.2S, v0.2S usra v27.2D, v16.2D, #32 umulh x11, x2, x4 movi v21.2D, #0x00000000ffffffff uzp2 v28.4S, v1.4S, v1.4S adds x15, x16, x7 and v5.16B, v27.16B, v21.16B adcs x3, x3, x8 usra v3.2D, v27.2D, #32 dup v29.2D, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2D, v31.2S, v17.2S mul x8, x9, x2 mov x7, v20.d[1] shl v19.2D, v22.2D, #33 xtn v25.2S, v29.2D rev64 v31.4S, v1.4S lsl x13, x14, #32 uzp2 v6.4S, v29.4S, v29.4S umlal v19.2D, v7.2S, v7.2S usra v3.2D, v5.2D, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4S, v31.4S, v29.4S xtn v4.2S, v1.2D adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2D, v25.2S, v28.2S adcs x11, x16, x16 umull v21.2D, v25.2S, v4.2S mov x17, v3.d[0] umull v18.2D, v6.2S, v28.2S adc x16, x8, xzr uaddlp v16.2D, v17.4S movi v1.2D, #0x00000000ffffffff subs x13, x13, x12 usra v31.2D, v21.2D, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2D, v16.2D, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16B, v31.16B, v1.16B adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2D, v6.2S, v4.2S usra v18.2D, v31.2D, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2D, v25.2S, v4.2S adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2D, v3.2D, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [x0] // @slothy:writes=buffer0 sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [x0, #16] // @slothy:writes=buffer16 csetm x15, cc cneg x1, x1, cc stp x11, x14, [x0, #32] // @slothy:writes=buffer32 mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc cinv x16, x15, cc mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc cneg x9, x9, cc subs x4, x2, x4 cneg x4, x4, cc csetm x7, cc subs x2, x10, x6 cinv x8, x8, cc cneg x2, x2, cc cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc cneg x1, x1, cc eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [x0, #16] // @slothy:reads=buffer16 umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [x0] // @slothy:reads=buffer0 eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [x0, #32] // @slothy:reads=buffer32 adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 adcs x14, x14, x2 mov x2, #0x1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [x0] // @slothy:writes=buffer0 adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [x0, #16] // @slothy:writes=buffer16 adc x17, x14, xzr stp x2, x17, [x0, #32] // depth 72 // @slothy:writes=buffer32 CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,475
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_double_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Double modulo p_384, z := (2 * x) mod p_384, assuming x reduced // Input x[6]; output z[6] // // extern void bignum_double_p384(uint64_t z[static 6], // const uint64_t x[static 6]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_double_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_double_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_double_p384) .text .balign 4 #define z x0 #define x x1 #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define d4 x6 #define d5 x7 #define c x8 #define n0 x9 #define n1 x10 #define n2 x11 #define n3 x12 #define n4 x13 #define n5 x14 S2N_BN_SYMBOL(bignum_double_p384): CFI_START // Double the input number as 2 * x = c + [d5; d4; d3; d2; d1; d0] // It's worth considering doing this with extr...63 instead ldp d0, d1, [x] ldp d2, d3, [x, #16] ldp d4, d5, [x, #32] adds d0, d0, d0 adcs d1, d1, d1 adcs d2, d2, d2 adcs d3, d3, d3 adcs d4, d4, d4 adcs d5, d5, d5 adc c, xzr, xzr // Subtract p_384 to give 2 * x - p_384 = c + [n5; n4; n3; n2; n1; n0] mov n0, #0x00000000ffffffff subs n0, d0, n0 mov n1, #0xffffffff00000000 sbcs n1, d1, n1 mov n2, #0xfffffffffffffffe sbcs n2, d2, n2 adcs n3, d3, xzr adcs n4, d4, xzr adcs n5, d5, xzr sbcs c, c, xzr // Now CF is set (because of inversion) if 2 * x >= p_384, in which case the // correct result is [n5; n4; n3; n2; n1; n0], otherwise // [d5; d4; d3; d2; d1; d0] csel d0, d0, n0, cc csel d1, d1, n1, cc csel d2, d2, n2, cc csel d3, d3, n3, cc csel d4, d4, n4, cc csel d5, d5, n5, cc // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_double_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
44,553
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/p384_montjmixadd_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point mixed addition on NIST curve P-384 in Montgomery-Jacobian coordinates // // extern void p384_montjmixadd_alt(uint64_t p3[static 18], // const uint64_t p1[static 18], // const uint64_t p2[static 12]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // The "mixed" part means that p2 only has x and y coordinates, with the // implicit z coordinate assumed to be the identity. // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjmixadd_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjmixadd_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjmixadd_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 // Stable homes for input arguments during main code sequence #define input_z x24 #define input_x x25 #define input_y x26 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define zp2 sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define NSPACE NUMSIZE*6 // Corresponds exactly to bignum_montmul_p384_alt #define montmul_p384(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P2] __LF \ mul x12, x3, x5 __LF \ umulh x13, x3, x5 __LF \ mul x11, x3, x6 __LF \ umulh x14, x3, x6 __LF \ adds x13, x13, x11 __LF \ ldp x7, x8, [P2+16] __LF \ mul x11, x3, x7 __LF \ umulh x15, x3, x7 __LF \ adcs x14, x14, x11 __LF \ mul x11, x3, x8 __LF \ umulh x16, x3, x8 __LF \ adcs x15, x15, x11 __LF \ ldp x9, x10, [P2+32] __LF \ mul x11, x3, x9 __LF \ umulh x17, x3, x9 __LF \ adcs x16, x16, x11 __LF \ mul x11, x3, x10 __LF \ umulh x19, x3, x10 __LF \ adcs x17, x17, x11 __LF \ adc x19, x19, xzr __LF \ mul x11, x4, x5 __LF \ adds x13, x13, x11 __LF \ mul x11, x4, x6 __LF \ adcs x14, x14, x11 __LF \ mul x11, x4, x7 __LF \ adcs x15, x15, x11 __LF \ mul x11, x4, x8 __LF \ adcs x16, x16, x11 __LF \ mul x11, x4, x9 __LF \ adcs x17, x17, x11 __LF \ mul x11, x4, x10 __LF \ adcs x19, x19, x11 __LF \ cset x20, cs __LF \ umulh x11, x4, x5 __LF \ adds x14, x14, x11 __LF \ umulh x11, x4, x6 __LF \ adcs x15, x15, x11 __LF \ umulh x11, x4, x7 __LF \ adcs x16, x16, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x17, x17, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x19, x19, x11 __LF \ umulh x11, x4, x10 __LF \ adc x20, x20, x11 __LF \ ldp x3, x4, [P1+16] __LF \ mul x11, x3, x5 __LF \ adds x14, x14, x11 __LF \ mul x11, x3, x6 __LF \ adcs x15, x15, x11 __LF \ mul x11, x3, x7 __LF \ adcs x16, x16, x11 __LF \ mul x11, x3, x8 __LF \ adcs x17, x17, x11 __LF \ mul x11, x3, x9 __LF \ adcs x19, x19, x11 __LF \ mul x11, x3, x10 __LF \ adcs x20, x20, x11 __LF \ cset x21, cs __LF \ umulh x11, x3, x5 __LF \ adds x15, x15, x11 __LF \ umulh x11, x3, x6 __LF \ adcs x16, x16, x11 __LF \ umulh x11, x3, x7 __LF \ adcs x17, x17, x11 __LF \ umulh x11, x3, x8 __LF \ adcs x19, x19, x11 __LF \ umulh x11, x3, x9 __LF \ adcs x20, x20, x11 __LF \ umulh x11, x3, x10 __LF \ adc x21, x21, x11 __LF \ mul x11, x4, x5 __LF \ adds x15, x15, x11 __LF \ mul x11, x4, x6 __LF \ adcs x16, x16, x11 __LF \ mul x11, x4, x7 __LF \ adcs x17, x17, x11 __LF \ mul x11, x4, x8 __LF \ adcs x19, x19, x11 __LF \ mul x11, x4, x9 __LF \ adcs x20, x20, x11 __LF \ mul x11, x4, x10 __LF \ adcs x21, x21, x11 __LF \ cset x22, cs __LF \ umulh x11, x4, x5 __LF \ adds x16, x16, x11 __LF \ umulh x11, x4, x6 __LF \ adcs x17, x17, x11 __LF \ umulh x11, x4, x7 __LF \ adcs x19, x19, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x20, x20, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x21, x21, x11 __LF \ umulh x11, x4, x10 __LF \ adc x22, x22, x11 __LF \ ldp x3, x4, [P1+32] __LF \ mul x11, x3, x5 __LF \ adds x16, x16, x11 __LF \ mul x11, x3, x6 __LF \ adcs x17, x17, x11 __LF \ mul x11, x3, x7 __LF \ adcs x19, x19, x11 __LF \ mul x11, x3, x8 __LF \ adcs x20, x20, x11 __LF \ mul x11, x3, x9 __LF \ adcs x21, x21, x11 __LF \ mul x11, x3, x10 __LF \ adcs x22, x22, x11 __LF \ cset x2, cs __LF \ umulh x11, x3, x5 __LF \ adds x17, x17, x11 __LF \ umulh x11, x3, x6 __LF \ adcs x19, x19, x11 __LF \ umulh x11, x3, x7 __LF \ adcs x20, x20, x11 __LF \ umulh x11, x3, x8 __LF \ adcs x21, x21, x11 __LF \ umulh x11, x3, x9 __LF \ adcs x22, x22, x11 __LF \ umulh x11, x3, x10 __LF \ adc x2, x2, x11 __LF \ mul x11, x4, x5 __LF \ adds x17, x17, x11 __LF \ mul x11, x4, x6 __LF \ adcs x19, x19, x11 __LF \ mul x11, x4, x7 __LF \ adcs x20, x20, x11 __LF \ mul x11, x4, x8 __LF \ adcs x21, x21, x11 __LF \ mul x11, x4, x9 __LF \ adcs x22, x22, x11 __LF \ mul x11, x4, x10 __LF \ adcs x2, x2, x11 __LF \ cset x1, cs __LF \ umulh x11, x4, x5 __LF \ adds x19, x19, x11 __LF \ umulh x11, x4, x6 __LF \ adcs x20, x20, x11 __LF \ umulh x11, x4, x7 __LF \ adcs x21, x21, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x22, x22, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x2, x2, x11 __LF \ umulh x11, x4, x10 __LF \ adc x1, x1, x11 __LF \ lsl x7, x12, #32 __LF \ add x12, x7, x12 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x12 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x12 __LF \ umulh x6, x6, x12 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x12 __LF \ adc x5, xzr, xzr __LF \ subs x13, x13, x7 __LF \ sbcs x14, x14, x6 __LF \ sbcs x15, x15, x5 __LF \ sbcs x16, x16, xzr __LF \ sbcs x17, x17, xzr __LF \ sbc x12, x12, xzr __LF \ lsl x7, x13, #32 __LF \ add x13, x7, x13 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x13 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x13 __LF \ umulh x6, x6, x13 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x13 __LF \ adc x5, xzr, xzr __LF \ subs x14, x14, x7 __LF \ sbcs x15, x15, x6 __LF \ sbcs x16, x16, x5 __LF \ sbcs x17, x17, xzr __LF \ sbcs x12, x12, xzr __LF \ sbc x13, x13, xzr __LF \ lsl x7, x14, #32 __LF \ add x14, x7, x14 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x14 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x14 __LF \ umulh x6, x6, x14 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x14 __LF \ adc x5, xzr, xzr __LF \ subs x15, x15, x7 __LF \ sbcs x16, x16, x6 __LF \ sbcs x17, x17, x5 __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ sbc x14, x14, xzr __LF \ lsl x7, x15, #32 __LF \ add x15, x7, x15 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x15 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x15 __LF \ umulh x6, x6, x15 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x15 __LF \ adc x5, xzr, xzr __LF \ subs x16, x16, x7 __LF \ sbcs x17, x17, x6 __LF \ sbcs x12, x12, x5 __LF \ sbcs x13, x13, xzr __LF \ sbcs x14, x14, xzr __LF \ sbc x15, x15, xzr __LF \ lsl x7, x16, #32 __LF \ add x16, x7, x16 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x16 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x16 __LF \ umulh x6, x6, x16 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x16 __LF \ adc x5, xzr, xzr __LF \ subs x17, x17, x7 __LF \ sbcs x12, x12, x6 __LF \ sbcs x13, x13, x5 __LF \ sbcs x14, x14, xzr __LF \ sbcs x15, x15, xzr __LF \ sbc x16, x16, xzr __LF \ lsl x7, x17, #32 __LF \ add x17, x7, x17 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x17 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x17 __LF \ umulh x6, x6, x17 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x17 __LF \ adc x5, xzr, xzr __LF \ subs x12, x12, x7 __LF \ sbcs x13, x13, x6 __LF \ sbcs x14, x14, x5 __LF \ sbcs x15, x15, xzr __LF \ sbcs x16, x16, xzr __LF \ sbc x17, x17, xzr __LF \ adds x12, x12, x19 __LF \ adcs x13, x13, x20 __LF \ adcs x14, x14, x21 __LF \ adcs x15, x15, x22 __LF \ adcs x16, x16, x2 __LF \ adcs x17, x17, x1 __LF \ adc x10, xzr, xzr __LF \ mov x11, #0xffffffff00000001 __LF \ adds x19, x12, x11 __LF \ mov x11, #0xffffffff __LF \ adcs x20, x13, x11 __LF \ mov x11, #0x1 __LF \ adcs x21, x14, x11 __LF \ adcs x22, x15, xzr __LF \ adcs x2, x16, xzr __LF \ adcs x1, x17, xzr __LF \ adcs x10, x10, xzr __LF \ csel x12, x12, x19, eq __LF \ csel x13, x13, x20, eq __LF \ csel x14, x14, x21, eq __LF \ csel x15, x15, x22, eq __LF \ csel x16, x16, x2, eq __LF \ csel x17, x17, x1, eq __LF \ stp x12, x13, [P0] __LF \ stp x14, x15, [P0+16] __LF \ stp x16, x17, [P0+32] // Corresponds exactly to bignum_montsqr_p384_alt #define montsqr_p384(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x9, x2, x3 __LF \ umulh x10, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x8, x2, x4 __LF \ adds x10, x10, x8 __LF \ mul x11, x2, x5 __LF \ mul x8, x3, x4 __LF \ adcs x11, x11, x8 __LF \ umulh x12, x2, x5 __LF \ mul x8, x3, x5 __LF \ adcs x12, x12, x8 __LF \ ldp x6, x7, [P1+32] __LF \ mul x13, x2, x7 __LF \ mul x8, x3, x6 __LF \ adcs x13, x13, x8 __LF \ umulh x14, x2, x7 __LF \ mul x8, x3, x7 __LF \ adcs x14, x14, x8 __LF \ mul x15, x5, x6 __LF \ adcs x15, x15, xzr __LF \ umulh x16, x5, x6 __LF \ adc x16, x16, xzr __LF \ umulh x8, x2, x4 __LF \ adds x11, x11, x8 __LF \ umulh x8, x3, x4 __LF \ adcs x12, x12, x8 __LF \ umulh x8, x3, x5 __LF \ adcs x13, x13, x8 __LF \ umulh x8, x3, x6 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x3, x7 __LF \ adcs x15, x15, x8 __LF \ adc x16, x16, xzr __LF \ mul x8, x2, x6 __LF \ adds x12, x12, x8 __LF \ mul x8, x4, x5 __LF \ adcs x13, x13, x8 __LF \ mul x8, x4, x6 __LF \ adcs x14, x14, x8 __LF \ mul x8, x4, x7 __LF \ adcs x15, x15, x8 __LF \ mul x8, x5, x7 __LF \ adcs x16, x16, x8 __LF \ mul x17, x6, x7 __LF \ adcs x17, x17, xzr __LF \ umulh x19, x6, x7 __LF \ adc x19, x19, xzr __LF \ umulh x8, x2, x6 __LF \ adds x13, x13, x8 __LF \ umulh x8, x4, x5 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x4, x6 __LF \ adcs x15, x15, x8 __LF \ umulh x8, x4, x7 __LF \ adcs x16, x16, x8 __LF \ umulh x8, x5, x7 __LF \ adcs x17, x17, x8 __LF \ adc x19, x19, xzr __LF \ adds x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ adcs x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adcs x17, x17, x17 __LF \ adcs x19, x19, x19 __LF \ cset x20, hs __LF \ umulh x8, x2, x2 __LF \ mul x2, x2, x2 __LF \ adds x9, x9, x8 __LF \ mul x8, x3, x3 __LF \ adcs x10, x10, x8 __LF \ umulh x8, x3, x3 __LF \ adcs x11, x11, x8 __LF \ mul x8, x4, x4 __LF \ adcs x12, x12, x8 __LF \ umulh x8, x4, x4 __LF \ adcs x13, x13, x8 __LF \ mul x8, x5, x5 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x5, x5 __LF \ adcs x15, x15, x8 __LF \ mul x8, x6, x6 __LF \ adcs x16, x16, x8 __LF \ umulh x8, x6, x6 __LF \ adcs x17, x17, x8 __LF \ mul x8, x7, x7 __LF \ adcs x19, x19, x8 __LF \ umulh x8, x7, x7 __LF \ adc x20, x20, x8 __LF \ lsl x5, x2, #32 __LF \ add x2, x5, x2 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x2 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x2 __LF \ umulh x4, x4, x2 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x2 __LF \ adc x3, xzr, xzr __LF \ subs x9, x9, x5 __LF \ sbcs x10, x10, x4 __LF \ sbcs x11, x11, x3 __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ sbc x2, x2, xzr __LF \ lsl x5, x9, #32 __LF \ add x9, x5, x9 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x9 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x9 __LF \ umulh x4, x4, x9 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x9 __LF \ adc x3, xzr, xzr __LF \ subs x10, x10, x5 __LF \ sbcs x11, x11, x4 __LF \ sbcs x12, x12, x3 __LF \ sbcs x13, x13, xzr __LF \ sbcs x2, x2, xzr __LF \ sbc x9, x9, xzr __LF \ lsl x5, x10, #32 __LF \ add x10, x5, x10 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x10 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x10 __LF \ umulh x4, x4, x10 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x10 __LF \ adc x3, xzr, xzr __LF \ subs x11, x11, x5 __LF \ sbcs x12, x12, x4 __LF \ sbcs x13, x13, x3 __LF \ sbcs x2, x2, xzr __LF \ sbcs x9, x9, xzr __LF \ sbc x10, x10, xzr __LF \ lsl x5, x11, #32 __LF \ add x11, x5, x11 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x11 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x11 __LF \ umulh x4, x4, x11 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x11 __LF \ adc x3, xzr, xzr __LF \ subs x12, x12, x5 __LF \ sbcs x13, x13, x4 __LF \ sbcs x2, x2, x3 __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbc x11, x11, xzr __LF \ lsl x5, x12, #32 __LF \ add x12, x5, x12 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x12 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x12 __LF \ umulh x4, x4, x12 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x12 __LF \ adc x3, xzr, xzr __LF \ subs x13, x13, x5 __LF \ sbcs x2, x2, x4 __LF \ sbcs x9, x9, x3 __LF \ sbcs x10, x10, xzr __LF \ sbcs x11, x11, xzr __LF \ sbc x12, x12, xzr __LF \ lsl x5, x13, #32 __LF \ add x13, x5, x13 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x13 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x13 __LF \ umulh x4, x4, x13 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x13 __LF \ adc x3, xzr, xzr __LF \ subs x2, x2, x5 __LF \ sbcs x9, x9, x4 __LF \ sbcs x10, x10, x3 __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbc x13, x13, xzr __LF \ adds x2, x2, x14 __LF \ adcs x9, x9, x15 __LF \ adcs x10, x10, x16 __LF \ adcs x11, x11, x17 __LF \ adcs x12, x12, x19 __LF \ adcs x13, x13, x20 __LF \ adc x6, xzr, xzr __LF \ mov x8, #-4294967295 __LF \ adds x14, x2, x8 __LF \ mov x8, #4294967295 __LF \ adcs x15, x9, x8 __LF \ mov x8, #1 __LF \ adcs x16, x10, x8 __LF \ adcs x17, x11, xzr __LF \ adcs x19, x12, xzr __LF \ adcs x20, x13, xzr __LF \ adcs x6, x6, xzr __LF \ csel x2, x2, x14, eq __LF \ csel x9, x9, x15, eq __LF \ csel x10, x10, x16, eq __LF \ csel x11, x11, x17, eq __LF \ csel x12, x12, x19, eq __LF \ csel x13, x13, x20, eq __LF \ stp x2, x9, [P0] __LF \ stp x10, x11, [P0+16] __LF \ stp x12, x13, [P0+32] // Almost-Montgomery variant which we use when an input to other muls // with the other argument fully reduced (which is always safe). In // fact, with the Karatsuba-based Montgomery mul here, we don't even // *need* the restriction that the other argument is reduced. #define amontsqr_p384(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x9, x2, x3 __LF \ umulh x10, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x8, x2, x4 __LF \ adds x10, x10, x8 __LF \ mul x11, x2, x5 __LF \ mul x8, x3, x4 __LF \ adcs x11, x11, x8 __LF \ umulh x12, x2, x5 __LF \ mul x8, x3, x5 __LF \ adcs x12, x12, x8 __LF \ ldp x6, x7, [P1+32] __LF \ mul x13, x2, x7 __LF \ mul x8, x3, x6 __LF \ adcs x13, x13, x8 __LF \ umulh x14, x2, x7 __LF \ mul x8, x3, x7 __LF \ adcs x14, x14, x8 __LF \ mul x15, x5, x6 __LF \ adcs x15, x15, xzr __LF \ umulh x16, x5, x6 __LF \ adc x16, x16, xzr __LF \ umulh x8, x2, x4 __LF \ adds x11, x11, x8 __LF \ umulh x8, x3, x4 __LF \ adcs x12, x12, x8 __LF \ umulh x8, x3, x5 __LF \ adcs x13, x13, x8 __LF \ umulh x8, x3, x6 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x3, x7 __LF \ adcs x15, x15, x8 __LF \ adc x16, x16, xzr __LF \ mul x8, x2, x6 __LF \ adds x12, x12, x8 __LF \ mul x8, x4, x5 __LF \ adcs x13, x13, x8 __LF \ mul x8, x4, x6 __LF \ adcs x14, x14, x8 __LF \ mul x8, x4, x7 __LF \ adcs x15, x15, x8 __LF \ mul x8, x5, x7 __LF \ adcs x16, x16, x8 __LF \ mul x17, x6, x7 __LF \ adcs x17, x17, xzr __LF \ umulh x19, x6, x7 __LF \ adc x19, x19, xzr __LF \ umulh x8, x2, x6 __LF \ adds x13, x13, x8 __LF \ umulh x8, x4, x5 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x4, x6 __LF \ adcs x15, x15, x8 __LF \ umulh x8, x4, x7 __LF \ adcs x16, x16, x8 __LF \ umulh x8, x5, x7 __LF \ adcs x17, x17, x8 __LF \ adc x19, x19, xzr __LF \ adds x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ adcs x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adcs x17, x17, x17 __LF \ adcs x19, x19, x19 __LF \ cset x20, hs __LF \ umulh x8, x2, x2 __LF \ mul x2, x2, x2 __LF \ adds x9, x9, x8 __LF \ mul x8, x3, x3 __LF \ adcs x10, x10, x8 __LF \ umulh x8, x3, x3 __LF \ adcs x11, x11, x8 __LF \ mul x8, x4, x4 __LF \ adcs x12, x12, x8 __LF \ umulh x8, x4, x4 __LF \ adcs x13, x13, x8 __LF \ mul x8, x5, x5 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x5, x5 __LF \ adcs x15, x15, x8 __LF \ mul x8, x6, x6 __LF \ adcs x16, x16, x8 __LF \ umulh x8, x6, x6 __LF \ adcs x17, x17, x8 __LF \ mul x8, x7, x7 __LF \ adcs x19, x19, x8 __LF \ umulh x8, x7, x7 __LF \ adc x20, x20, x8 __LF \ lsl x5, x2, #32 __LF \ add x2, x5, x2 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x2 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x2 __LF \ umulh x4, x4, x2 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x2 __LF \ adc x3, xzr, xzr __LF \ subs x9, x9, x5 __LF \ sbcs x10, x10, x4 __LF \ sbcs x11, x11, x3 __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ sbc x2, x2, xzr __LF \ lsl x5, x9, #32 __LF \ add x9, x5, x9 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x9 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x9 __LF \ umulh x4, x4, x9 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x9 __LF \ adc x3, xzr, xzr __LF \ subs x10, x10, x5 __LF \ sbcs x11, x11, x4 __LF \ sbcs x12, x12, x3 __LF \ sbcs x13, x13, xzr __LF \ sbcs x2, x2, xzr __LF \ sbc x9, x9, xzr __LF \ lsl x5, x10, #32 __LF \ add x10, x5, x10 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x10 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x10 __LF \ umulh x4, x4, x10 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x10 __LF \ adc x3, xzr, xzr __LF \ subs x11, x11, x5 __LF \ sbcs x12, x12, x4 __LF \ sbcs x13, x13, x3 __LF \ sbcs x2, x2, xzr __LF \ sbcs x9, x9, xzr __LF \ sbc x10, x10, xzr __LF \ lsl x5, x11, #32 __LF \ add x11, x5, x11 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x11 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x11 __LF \ umulh x4, x4, x11 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x11 __LF \ adc x3, xzr, xzr __LF \ subs x12, x12, x5 __LF \ sbcs x13, x13, x4 __LF \ sbcs x2, x2, x3 __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbc x11, x11, xzr __LF \ lsl x5, x12, #32 __LF \ add x12, x5, x12 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x12 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x12 __LF \ umulh x4, x4, x12 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x12 __LF \ adc x3, xzr, xzr __LF \ subs x13, x13, x5 __LF \ sbcs x2, x2, x4 __LF \ sbcs x9, x9, x3 __LF \ sbcs x10, x10, xzr __LF \ sbcs x11, x11, xzr __LF \ sbc x12, x12, xzr __LF \ lsl x5, x13, #32 __LF \ add x13, x5, x13 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x13 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x13 __LF \ umulh x4, x4, x13 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x13 __LF \ adc x3, xzr, xzr __LF \ subs x2, x2, x5 __LF \ sbcs x9, x9, x4 __LF \ sbcs x10, x10, x3 __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbc x13, x13, xzr __LF \ adds x2, x2, x14 __LF \ adcs x9, x9, x15 __LF \ adcs x10, x10, x16 __LF \ adcs x11, x11, x17 __LF \ adcs x12, x12, x19 __LF \ adcs x13, x13, x20 __LF \ mov x14, #-4294967295 __LF \ mov x15, #4294967295 __LF \ csel x14, x14, xzr, cs __LF \ csel x15, x15, xzr, cs __LF \ cset x16, cs __LF \ adds x2, x2, x14 __LF \ adcs x9, x9, x15 __LF \ adcs x10, x10, x16 __LF \ adcs x11, x11, xzr __LF \ adcs x12, x12, xzr __LF \ adc x13, x13, xzr __LF \ stp x2, x9, [P0] __LF \ stp x10, x11, [P0+16] __LF \ stp x12, x13, [P0+32] // Corresponds exactly to bignum_sub_p384 #define sub_p384(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ ldp x9, x10, [P1+32] __LF \ ldp x4, x3, [P2+32] __LF \ sbcs x9, x9, x4 __LF \ sbcs x10, x10, x3 __LF \ csetm x3, lo __LF \ mov x4, #4294967295 __LF \ and x4, x4, x3 __LF \ adds x5, x5, x4 __LF \ eor x4, x4, x3 __LF \ adcs x6, x6, x4 __LF \ mov x4, #-2 __LF \ and x4, x4, x3 __LF \ adcs x7, x7, x4 __LF \ adcs x8, x8, x3 __LF \ adcs x9, x9, x3 __LF \ adc x10, x10, x3 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] S2N_BN_SYMBOL(p384_montjmixadd_alt): CFI_START // Save regs and make room on stack for temporary variables CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_DEC_SP(NSPACE) // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations // 8 * multiply + 3 * square + 7 * subtract amontsqr_p384(zp2,z_1) montmul_p384(y2a,z_1,y_2) montmul_p384(x2a,zp2,x_2) montmul_p384(y2a,zp2,y2a) sub_p384(xd,x2a,x_1) sub_p384(yd,y2a,y_1) amontsqr_p384(zz,xd) montsqr_p384(ww,yd) montmul_p384(zzx1,zz,x_1) montmul_p384(zzx2,zz,x2a) sub_p384(resx,ww,zzx1) sub_p384(t1,zzx2,zzx1) montmul_p384(resz,xd,z_1) sub_p384(resx,resx,zzx2) sub_p384(t2,zzx1,resx) montmul_p384(t1,t1,y_1) montmul_p384(t2,yd,t2) sub_p384(resy,t2,t1) // Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence) ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] ldp x4, x5, [z_1+32] orr x6, x0, x1 orr x7, x2, x3 orr x8, x4, x5 orr x6, x6, x7 orr x6, x6, x8 cmp x6, xzr // Multiplex: if p1 <> 0 just copy the computed result from the staging area. // If p1 = 0 then return the point p2 augmented with a z = 1 coordinate (in // Montgomery form so not the simple constant 1 but rather 2^384 - p_384), // hence giving 0 + p2 = p2 for the final result. ldp x0, x1, [resx] ldp x19, x20, [x_2] csel x0, x0, x19, ne csel x1, x1, x20, ne ldp x2, x3, [resx+16] ldp x19, x20, [x_2+16] csel x2, x2, x19, ne csel x3, x3, x20, ne ldp x4, x5, [resx+32] ldp x19, x20, [x_2+32] csel x4, x4, x19, ne csel x5, x5, x20, ne ldp x6, x7, [resy] ldp x19, x20, [y_2] csel x6, x6, x19, ne csel x7, x7, x20, ne ldp x8, x9, [resy+16] ldp x19, x20, [y_2+16] csel x8, x8, x19, ne csel x9, x9, x20, ne ldp x10, x11, [resy+32] ldp x19, x20, [y_2+32] csel x10, x10, x19, ne csel x11, x11, x20, ne ldp x12, x13, [resz] mov x19, #0xffffffff00000001 mov x20, #0x00000000ffffffff csel x12, x12, x19, ne csel x13, x13, x20, ne ldp x14, x15, [resz+16] mov x19, #1 csel x14, x14, x19, ne csel x15, x15, xzr, ne ldp x16, x17, [resz+32] csel x16, x16, xzr, ne csel x17, x17, xzr, ne stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [x_3+32] stp x6, x7, [y_3] stp x8, x9, [y_3+16] stp x10, x11, [y_3+32] stp x12, x13, [z_3] stp x14, x15, [z_3+16] stp x16, x17, [z_3+32] // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p384_montjmixadd_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
5,627
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_tomont_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert to Montgomery form z := (2^384 * x) mod p_384 // Input x[6]; output z[6] // // extern void bignum_tomont_p384(uint64_t z[static 6], // const uint64_t x[static 6]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tomont_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_p384) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_p384_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tomont_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_p384_alt) .text .balign 4 // ---------------------------------------------------------------------------- // Core "x |-> (2^64 * x) mod p_384" macro, with x assumed to be < p_384. // Input is in [d6;d5;d4;d3;d2;d1] and output in [d5;d4;d3;d2;d1;d0] // using d6 as well as t1, t2, t3 as temporaries. // ---------------------------------------------------------------------------- #define modstep_p384(d6,d5,d4,d3,d2,d1,d0, t1,t2,t3) \ /* Initial quotient approximation q = min (h + 1) (2^64 - 1) */ \ adds d6, d6, #1 __LF \ csetm t3, cs __LF \ add d6, d6, t3 __LF \ orn t3, xzr, t3 __LF \ sub t2, d6, #1 __LF \ sub t1, xzr, d6 __LF \ /* Correction term [d6;t2;t1;d0] = q * (2^384 - p_384) */ \ lsl d0, t1, #32 __LF \ extr t1, t2, t1, #32 __LF \ lsr t2, t2, #32 __LF \ adds d0, d0, d6 __LF \ adcs t1, t1, xzr __LF \ adcs t2, t2, d6 __LF \ adc d6, xzr, xzr __LF \ /* Addition to the initial value */ \ adds d1, d1, t1 __LF \ adcs d2, d2, t2 __LF \ adcs d3, d3, d6 __LF \ adcs d4, d4, xzr __LF \ adcs d5, d5, xzr __LF \ adc t3, t3, xzr __LF \ /* Use net top of the 7-word answer in t3 for masked correction */ \ mov t1, #0x00000000ffffffff __LF \ and t1, t1, t3 __LF \ adds d0, d0, t1 __LF \ eor t1, t1, t3 __LF \ adcs d1, d1, t1 __LF \ mov t1, #0xfffffffffffffffe __LF \ and t1, t1, t3 __LF \ adcs d2, d2, t1 __LF \ adcs d3, d3, t3 __LF \ adcs d4, d4, t3 __LF \ adc d5, d5, t3 S2N_BN_SYMBOL(bignum_tomont_p384): S2N_BN_SYMBOL(bignum_tomont_p384_alt): CFI_START #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define d4 x6 #define d5 x7 #define d6 x8 #define t1 x9 #define t2 x10 #define t3 x11 #define n0 x8 #define n1 x9 #define n2 x10 #define n3 x11 #define n4 x12 #define n5 x1 // Load the inputs ldp d0, d1, [x1] ldp d2, d3, [x1, #16] ldp d4, d5, [x1, #32] // Do an initial reduction to make sure this is < p_384, using just // a copy of the bignum_mod_p384_6 code. This is needed to set up the // invariant "input < p_384" for the main modular reduction steps. mov n0, #0x00000000ffffffff mov n1, #0xffffffff00000000 mov n2, #0xfffffffffffffffe subs n0, d0, n0 sbcs n1, d1, n1 sbcs n2, d2, n2 adcs n3, d3, xzr adcs n4, d4, xzr adcs n5, d5, xzr csel d0, d0, n0, cc csel d1, d1, n1, cc csel d2, d2, n2, cc csel d3, d3, n3, cc csel d4, d4, n4, cc csel d5, d5, n5, cc // Successively multiply by 2^64 and reduce modstep_p384(d5,d4,d3,d2,d1,d0,d6, t1,t2,t3) modstep_p384(d4,d3,d2,d1,d0,d6,d5, t1,t2,t3) modstep_p384(d3,d2,d1,d0,d6,d5,d4, t1,t2,t3) modstep_p384(d2,d1,d0,d6,d5,d4,d3, t1,t2,t3) modstep_p384(d1,d0,d6,d5,d4,d3,d2, t1,t2,t3) modstep_p384(d0,d6,d5,d4,d3,d2,d1, t1,t2,t3) // Store the result and return stp d1, d2, [x0] stp d3, d4, [x0, #16] stp d5, d6, [x0, #32] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_tomont_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,219
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_mod_p384_6.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo field characteristic, z := x mod p_384 // Input x[6]; output z[6] // // extern void bignum_mod_p384_6(uint64_t z[static 6], const uint64_t x[static 6]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p384_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_p384_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p384_6) .text .balign 4 #define z x0 #define x x1 #define n0 x2 #define n1 x3 #define n2 x4 #define n3 x5 #define n4 x6 #define n5 x7 #define d0 x8 #define d1 x9 #define d2 x10 #define d3 x11 #define d4 x12 #define d5 x13 S2N_BN_SYMBOL(bignum_mod_p384_6): CFI_START // Load the complicated lower three words of p_384 = [-1;-1;-1;n2;n1;n0] mov n0, #0x00000000ffffffff mov n1, #0xffffffff00000000 mov n2, #0xfffffffffffffffe // Load the input number ldp d0, d1, [x] ldp d2, d3, [x, #16] ldp d4, d5, [x, #32] // Do the subtraction. Since the top three words of p_384 are all 1s // we can devolve the top to adding 0, thanks to the inverted carry. subs n0, d0, n0 sbcs n1, d1, n1 sbcs n2, d2, n2 adcs n3, d3, xzr adcs n4, d4, xzr adcs n5, d5, xzr // Now if the carry is *clear* (inversion at work) the subtraction carried // and hence we should have done nothing, so we reset each n_i = d_i csel n0, d0, n0, cc csel n1, d1, n1, cc csel n2, d2, n2, cc csel n3, d3, n3, cc csel n4, d4, n4, cc csel n5, d5, n5, cc // Store the end result stp n0, n1, [z] stp n2, n3, [z, #16] stp n4, n5, [z, #32] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_p384_6) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,369
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_littleendian_6.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert 6-digit (384-bit) bignum to/from little-endian form // Input x[6]; output z[6] // // extern void bignum_littleendian_6(uint64_t z[static 6], // const uint64_t x[static 6]); // // The same function is given two other prototypes whose names reflect the // treatment of one or other argument as a byte array rather than word array: // // extern void bignum_fromlebytes_6(uint64_t z[static 6], // const uint8_t x[static 48]); // // extern void bignum_tolebytes_6(uint8_t z[static 48], // const uint64_t x[static 6]); // // The implementation works by loading in bytes and storing in words (i.e. // stylistically it is "fromlebytes"); in the more common little-endian // usage of ARM, this is just copying. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_littleendian_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_littleendian_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_littleendian_6) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_fromlebytes_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_fromlebytes_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_fromlebytes_6) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tolebytes_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tolebytes_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tolebytes_6) .text .balign 4 #define z x0 #define x x1 #define d x2 #define dshort w2 #define a x3 S2N_BN_SYMBOL(bignum_littleendian_6): S2N_BN_SYMBOL(bignum_fromlebytes_6): S2N_BN_SYMBOL(bignum_tolebytes_6): CFI_START // word 0 ldrb dshort, [x] extr a, d, xzr, #8 ldrb dshort, [x, #1] extr a, d, a, #8 ldrb dshort, [x, #2] extr a, d, a, #8 ldrb dshort, [x, #3] extr a, d, a, #8 ldrb dshort, [x, #4] extr a, d, a, #8 ldrb dshort, [x, #5] extr a, d, a, #8 ldrb dshort, [x, #6] extr a, d, a, #8 ldrb dshort, [x, #7] extr a, d, a, #8 str a, [z] // word 1 ldrb dshort, [x, #8] extr a, d, xzr, #8 ldrb dshort, [x, #9] extr a, d, a, #8 ldrb dshort, [x, #10] extr a, d, a, #8 ldrb dshort, [x, #11] extr a, d, a, #8 ldrb dshort, [x, #12] extr a, d, a, #8 ldrb dshort, [x, #13] extr a, d, a, #8 ldrb dshort, [x, #14] extr a, d, a, #8 ldrb dshort, [x, #15] extr a, d, a, #8 str a, [z, #8] // word 2 ldrb dshort, [x, #16] extr a, d, xzr, #8 ldrb dshort, [x, #17] extr a, d, a, #8 ldrb dshort, [x, #18] extr a, d, a, #8 ldrb dshort, [x, #19] extr a, d, a, #8 ldrb dshort, [x, #20] extr a, d, a, #8 ldrb dshort, [x, #21] extr a, d, a, #8 ldrb dshort, [x, #22] extr a, d, a, #8 ldrb dshort, [x, #23] extr a, d, a, #8 str a, [z, #16] // word 3 ldrb dshort, [x, #24] extr a, d, xzr, #8 ldrb dshort, [x, #25] extr a, d, a, #8 ldrb dshort, [x, #26] extr a, d, a, #8 ldrb dshort, [x, #27] extr a, d, a, #8 ldrb dshort, [x, #28] extr a, d, a, #8 ldrb dshort, [x, #29] extr a, d, a, #8 ldrb dshort, [x, #30] extr a, d, a, #8 ldrb dshort, [x, #31] extr a, d, a, #8 str a, [z, #24] // word 4 ldrb dshort, [x, #32] extr a, d, xzr, #8 ldrb dshort, [x, #33] extr a, d, a, #8 ldrb dshort, [x, #34] extr a, d, a, #8 ldrb dshort, [x, #35] extr a, d, a, #8 ldrb dshort, [x, #36] extr a, d, a, #8 ldrb dshort, [x, #37] extr a, d, a, #8 ldrb dshort, [x, #38] extr a, d, a, #8 ldrb dshort, [x, #39] extr a, d, a, #8 str a, [z, #32] // word 5 ldrb dshort, [x, #40] extr a, d, xzr, #8 ldrb dshort, [x, #41] extr a, d, a, #8 ldrb dshort, [x, #42] extr a, d, a, #8 ldrb dshort, [x, #43] extr a, d, a, #8 ldrb dshort, [x, #44] extr a, d, a, #8 ldrb dshort, [x, #45] extr a, d, a, #8 ldrb dshort, [x, #46] extr a, d, a, #8 ldrb dshort, [x, #47] extr a, d, a, #8 str a, [z, #40] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_littleendian_6) S2N_BN_SIZE_DIRECTIVE(bignum_fromlebytes_6) S2N_BN_SIZE_DIRECTIVE(bignum_tolebytes_6) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,538
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_deamont_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert from almost-Montgomery form, z := (x / 2^384) mod p_384 // Input x[6]; output z[6] // // extern void bignum_deamont_p384(uint64_t z[static 6], // const uint64_t x[static 6]); // // Convert a 6-digit bignum x out of its (optionally almost) Montgomery form, // "almost" meaning any 6-digit input will work, with no range restriction. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_deamont_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_deamont_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_deamont_p384) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_deamont_p384_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_deamont_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_deamont_p384_alt) .text .balign 4 // --------------------------------------------------------------------------- // Core one-step "short" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1], // adding to the existing contents of [d5;d4;d3;d2;d1]. It is fine // for d6 to be the same register as d0. // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // --------------------------------------------------------------------------- #define montreds(d6,d5,d4,d3,d2,d1,d0, t3,t2,t1) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ /* Recycle d0 (which we know gets implicitly cancelled) to store it */ \ lsl t1, d0, #32 __LF \ add d0, t1, d0 __LF \ /* Now let [t2;t1] = 2^64 * w - w + w_hi where w_hi = floor(w/2^32) */ \ /* We need to subtract 2^32 * this, and we can ignore its lower 32 */ \ /* bits since by design it will cancel anyway; we only need the w_hi */ \ /* part to get the carry propagation going. */ \ lsr t1, d0, #32 __LF \ subs t1, t1, d0 __LF \ sbc t2, d0, xzr __LF \ /* Now select in t1 the field to subtract from d1 */ \ extr t1, t2, t1, #32 __LF \ /* And now get the terms to subtract from d2 and d3 */ \ lsr t2, t2, #32 __LF \ adds t2, t2, d0 __LF \ adc t3, xzr, xzr __LF \ /* Do the subtraction of that portion */ \ subs d1, d1, t1 __LF \ sbcs d2, d2, t2 __LF \ sbcs d3, d3, t3 __LF \ sbcs d4, d4, xzr __LF \ sbcs d5, d5, xzr __LF \ /* Now effectively add 2^384 * w by taking d0 as the input for last sbc */ \ sbc d6, d0, xzr // Input parameters #define z x0 #define x x1 // Rotating registers for the intermediate windows #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define d4 x6 #define d5 x7 // Other temporaries #define u x8 #define v x9 #define w x10 S2N_BN_SYMBOL(bignum_deamont_p384): S2N_BN_SYMBOL(bignum_deamont_p384_alt): CFI_START // Set up an initial window with the input x and an extra leading zero ldp d0, d1, [x] ldp d2, d3, [x, #16] ldp d4, d5, [x, #32] // Systematically scroll left doing 1-step reductions montreds(d0,d5,d4,d3,d2,d1,d0, u,v,w) montreds(d1,d0,d5,d4,d3,d2,d1, u,v,w) montreds(d2,d1,d0,d5,d4,d3,d2, u,v,w) montreds(d3,d2,d1,d0,d5,d4,d3, u,v,w) montreds(d4,d3,d2,d1,d0,d5,d4, u,v,w) montreds(d5,d4,d3,d2,d1,d0,d5, u,v,w) // Now compare end result in [d5;d4;d3;d2;d1;d0] = dd with p_384 by *adding* // 2^384 - p_384 = [0;0;0;w;v;u]. This will set CF if // dd + (2^384 - p_384) >= 2^384, hence iff dd >= p_384 mov u, #0xffffffff00000001 mov v, #0x00000000ffffffff mov w, #0x0000000000000001 adds xzr, d0, u adcs xzr, d1, v adcs xzr, d2, w adcs xzr, d3, xzr adcs xzr, d4, xzr adcs xzr, d5, xzr // Convert the condition dd >= p_384 into a bitmask in w and do a masked // subtraction of p_384, via a masked addition of 2^384 - p_384: csetm w, cs and u, u, w adds d0, d0, u and v, v, w adcs d1, d1, v and w, w, #1 adcs d2, d2, w adcs d3, d3, xzr adcs d4, d4, xzr adc d5, d5, xzr // Store it back stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_deamont_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
8,017
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_montsqr_p384_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^384) mod p_384 // Input x[6]; output z[6] // // extern void bignum_montsqr_p384_alt(uint64_t z[static 6], // const uint64_t x[static 6]); // // Does z := (x^2 / 2^384) mod p_384, assuming x^2 <= 2^384 * p_384, which is // guaranteed in particular if x < p_384 initially (the "intended" case). // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p384_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p384_alt) .text .balign 4 // --------------------------------------------------------------------------- // Core one-step "short" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1], // adding to the existing contents of [d5;d4;d3;d2;d1]. It is fine // for d6 to be the same register as d0. // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // --------------------------------------------------------------------------- #define montreds(d6,d5,d4,d3,d2,d1,d0, t3,t2,t1) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ /* Store it in d6 to make the 2^384 * w contribution already */ \ lsl t1, d0, #32 __LF \ add d6, t1, d0 __LF \ /* Now let [t3;t2;t1;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel d0 so we don't need it */ \ mov t1, #0xffffffff00000001 __LF \ umulh t1, t1, d6 __LF \ mov t2, #0x00000000ffffffff __LF \ mul t3, t2, d6 __LF \ umulh t2, t2, d6 __LF \ adds t1, t1, t3 __LF \ adcs t2, t2, d6 __LF \ adc t3, xzr, xzr __LF \ /* Now add it, by subtracting from 2^384 * w + x */ \ subs d1, d1, t1 __LF \ sbcs d2, d2, t2 __LF \ sbcs d3, d3, t3 __LF \ sbcs d4, d4, xzr __LF \ sbcs d5, d5, xzr __LF \ sbc d6, d6, xzr #define z x0 #define x x1 #define a0 x2 #define a1 x3 #define a2 x4 #define a3 x5 #define a4 x6 #define a5 x7 #define l x8 #define u0 x2 // The same as a0, which is safe #define u1 x9 #define u2 x10 #define u3 x11 #define u4 x12 #define u5 x13 #define u6 x14 #define u7 x15 #define u8 x16 #define u9 x17 #define u10 x19 #define u11 x20 #define h x6 // same as a4 S2N_BN_SYMBOL(bignum_montsqr_p384_alt): CFI_START // It's convenient to have two more registers to play with CFI_PUSH2(x19,x20) // Load all the elements as [a5;a4;a3;a2;a1;a0], set up an initial // window [u8;u7; u6;u5; u4;u3; u2;u1] = [34;05;03;01], and then // chain in the addition of 02 + 12 + 13 + 14 + 15 to that window // (no carry-out possible since we add it to the top of a product). ldp a0, a1, [x] mul u1, a0, a1 umulh u2, a0, a1 ldp a2, a3, [x, #16] mul l, a0, a2 adds u2, u2, l mul u3, a0, a3 mul l, a1, a2 adcs u3, u3, l umulh u4, a0, a3 mul l, a1, a3 adcs u4, u4, l ldp a4, a5, [x, #32] mul u5, a0, a5 mul l, a1, a4 adcs u5, u5, l umulh u6, a0, a5 mul l, a1, a5 adcs u6, u6, l mul u7, a3, a4 adcs u7, u7, xzr umulh u8, a3, a4 adc u8, u8, xzr umulh l, a0, a2 adds u3, u3, l umulh l, a1, a2 adcs u4, u4, l umulh l, a1, a3 adcs u5, u5, l umulh l, a1, a4 adcs u6, u6, l umulh l, a1, a5 adcs u7, u7, l adc u8, u8, xzr // Now chain in the 04 + 23 + 24 + 25 + 35 + 45 terms mul l, a0, a4 adds u4, u4, l mul l, a2, a3 adcs u5, u5, l mul l, a2, a4 adcs u6, u6, l mul l, a2, a5 adcs u7, u7, l mul l, a3, a5 adcs u8, u8, l mul u9, a4, a5 adcs u9, u9, xzr umulh u10, a4, a5 adc u10, u10, xzr umulh l, a0, a4 adds u5, u5, l umulh l, a2, a3 adcs u6, u6, l umulh l, a2, a4 adcs u7, u7, l umulh l, a2, a5 adcs u8, u8, l umulh l, a3, a5 adcs u9, u9, l adc u10, u10, xzr // Double that, with u11 holding the top carry adds u1, u1, u1 adcs u2, u2, u2 adcs u3, u3, u3 adcs u4, u4, u4 adcs u5, u5, u5 adcs u6, u6, u6 adcs u7, u7, u7 adcs u8, u8, u8 adcs u9, u9, u9 adcs u10, u10, u10 cset u11, cs // Add the homogeneous terms 00 + 11 + 22 + 33 + 44 + 55 umulh l, a0, a0 mul u0, a0, a0 adds u1, u1, l mul l, a1, a1 adcs u2, u2, l umulh l, a1, a1 adcs u3, u3, l mul l, a2, a2 adcs u4, u4, l umulh l, a2, a2 adcs u5, u5, l mul l, a3, a3 adcs u6, u6, l umulh l, a3, a3 adcs u7, u7, l mul l, a4, a4 adcs u8, u8, l umulh l, a4, a4 adcs u9, u9, l mul l, a5, a5 adcs u10, u10, l umulh l, a5, a5 adc u11, u11, l // Montgomery rotate the low half montreds(u0,u5,u4,u3,u2,u1,u0, a1,a2,a3) montreds(u1,u0,u5,u4,u3,u2,u1, a1,a2,a3) montreds(u2,u1,u0,u5,u4,u3,u2, a1,a2,a3) montreds(u3,u2,u1,u0,u5,u4,u3, a1,a2,a3) montreds(u4,u3,u2,u1,u0,u5,u4, a1,a2,a3) montreds(u5,u4,u3,u2,u1,u0,u5, a1,a2,a3) // Add up the high and low parts as [h; u5;u4;u3;u2;u1;u0] = z adds u0, u0, u6 adcs u1, u1, u7 adcs u2, u2, u8 adcs u3, u3, u9 adcs u4, u4, u10 adcs u5, u5, u11 adc h, xzr, xzr // Now add [h; u11;u10;u9;u8;u7;u6] = z + (2^384 - p_384) mov l, #0xffffffff00000001 adds u6, u0, l mov l, #0x00000000ffffffff adcs u7, u1, l mov l, #0x0000000000000001 adcs u8, u2, l adcs u9, u3, xzr adcs u10, u4, xzr adcs u11, u5, xzr adcs h, h, xzr // Now z >= p_384 iff h is nonzero, so select accordingly csel u0, u0, u6, eq csel u1, u1, u7, eq csel u2, u2, u8, eq csel u3, u3, u9, eq csel u4, u4, u10, eq csel u5, u5, u11, eq // Store back final result stp u0, u1, [z] stp u2, u3, [z, #16] stp u4, u5, [z, #32] // Restore registers CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_p384_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,526
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_bigendian_6.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert 6-digit (384-bit) bignum to/from big-endian form // Input x[6]; output z[6] // // extern void bignum_bigendian_6(uint64_t z[static 6], // const uint64_t x[static 6]); // // The same function is given two other prototypes whose names reflect the // treatment of one or other argument as a byte array rather than word array: // // extern void bignum_frombebytes_6(uint64_t z[static 6], // const uint8_t x[static 48]); // // extern void bignum_tobebytes_6(uint8_t z[static 48], // const uint64_t x[static 6]); // // The implementation works by loading in bytes and storing in words (i.e. // stylistically it is "frombebytes"); in the more common little-endian // usage of ARM, this is just byte reversal. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_bigendian_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_bigendian_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_bigendian_6) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_frombebytes_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_frombebytes_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_frombebytes_6) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tobebytes_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tobebytes_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tobebytes_6) .text .balign 4 #define z x0 #define x x1 #define d x2 #define dshort w2 #define a x3 #define c x4 // The reads and writes are organized in mirror-image pairs (0-5, 1-4, 2-3) // to allow x and z to point to the same buffer without using more // intermediate registers. S2N_BN_SYMBOL(bignum_bigendian_6): S2N_BN_SYMBOL(bignum_frombebytes_6): S2N_BN_SYMBOL(bignum_tobebytes_6): CFI_START // 0 and 5 words ldrb dshort, [x, #7] extr a, d, xzr, #8 ldrb dshort, [x, #6] extr a, d, a, #8 ldrb dshort, [x, #5] extr a, d, a, #8 ldrb dshort, [x, #4] extr a, d, a, #8 ldrb dshort, [x, #3] extr a, d, a, #8 ldrb dshort, [x, #2] extr a, d, a, #8 ldrb dshort, [x, #1] extr a, d, a, #8 ldrb dshort, [x] extr a, d, a, #8 ldrb dshort, [x, #47] extr c, d, xzr, #8 ldrb dshort, [x, #46] extr c, d, c, #8 ldrb dshort, [x, #45] extr c, d, c, #8 ldrb dshort, [x, #44] extr c, d, c, #8 ldrb dshort, [x, #43] extr c, d, c, #8 ldrb dshort, [x, #42] extr c, d, c, #8 ldrb dshort, [x, #41] extr c, d, c, #8 ldrb dshort, [x, #40] extr c, d, c, #8 str a, [z, #40] str c, [z] // 1 and 4 words ldrb dshort, [x, #15] extr a, d, xzr, #8 ldrb dshort, [x, #14] extr a, d, a, #8 ldrb dshort, [x, #13] extr a, d, a, #8 ldrb dshort, [x, #12] extr a, d, a, #8 ldrb dshort, [x, #11] extr a, d, a, #8 ldrb dshort, [x, #10] extr a, d, a, #8 ldrb dshort, [x, #9] extr a, d, a, #8 ldrb dshort, [x, #8] extr a, d, a, #8 ldrb dshort, [x, #39] extr c, d, xzr, #8 ldrb dshort, [x, #38] extr c, d, c, #8 ldrb dshort, [x, #37] extr c, d, c, #8 ldrb dshort, [x, #36] extr c, d, c, #8 ldrb dshort, [x, #35] extr c, d, c, #8 ldrb dshort, [x, #34] extr c, d, c, #8 ldrb dshort, [x, #33] extr c, d, c, #8 ldrb dshort, [x, #32] extr c, d, c, #8 str a, [z, #32] str c, [z, #8] // 2 and 3 words ldrb dshort, [x, #23] extr a, d, xzr, #8 ldrb dshort, [x, #22] extr a, d, a, #8 ldrb dshort, [x, #21] extr a, d, a, #8 ldrb dshort, [x, #20] extr a, d, a, #8 ldrb dshort, [x, #19] extr a, d, a, #8 ldrb dshort, [x, #18] extr a, d, a, #8 ldrb dshort, [x, #17] extr a, d, a, #8 ldrb dshort, [x, #16] extr a, d, a, #8 ldrb dshort, [x, #31] extr c, d, xzr, #8 ldrb dshort, [x, #30] extr c, d, c, #8 ldrb dshort, [x, #29] extr c, d, c, #8 ldrb dshort, [x, #28] extr c, d, c, #8 ldrb dshort, [x, #27] extr c, d, c, #8 ldrb dshort, [x, #26] extr c, d, c, #8 ldrb dshort, [x, #25] extr c, d, c, #8 ldrb dshort, [x, #24] extr c, d, c, #8 str a, [z, #24] str c, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_bigendian_6) S2N_BN_SIZE_DIRECTIVE(bignum_frombebytes_6) S2N_BN_SIZE_DIRECTIVE(bignum_tobebytes_6) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,681
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_demont_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert from Montgomery form z := (x / 2^384) mod p_384, assuming x reduced // Input x[6]; output z[6] // // extern void bignum_demont_p384(uint64_t z[static 6], // const uint64_t x[static 6]); // // This assumes the input is < p_384 for correctness. If this is not the case, // use the variant "bignum_deamont_p384" instead. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_demont_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_demont_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_demont_p384) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_demont_p384_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_demont_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_demont_p384_alt) .text .balign 4 // --------------------------------------------------------------------------- // Core one-step "short" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1], // adding to the existing contents of [d5;d4;d3;d2;d1]. It is fine // for d6 to be the same register as d0. // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // --------------------------------------------------------------------------- #define montreds(d6,d5,d4,d3,d2,d1,d0, t3,t2,t1) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ /* Recycle d0 (which we know gets implicitly cancelled) to store it */ \ lsl t1, d0, #32 __LF \ add d0, t1, d0 __LF \ /* Now let [t2;t1] = 2^64 * w - w + w_hi where w_hi = floor(w/2^32) */ \ /* We need to subtract 2^32 * this, and we can ignore its lower 32 */ \ /* bits since by design it will cancel anyway; we only need the w_hi */ \ /* part to get the carry propagation going. */ \ lsr t1, d0, #32 __LF \ subs t1, t1, d0 __LF \ sbc t2, d0, xzr __LF \ /* Now select in t1 the field to subtract from d1 */ \ extr t1, t2, t1, #32 __LF \ /* And now get the terms to subtract from d2 and d3 */ \ lsr t2, t2, #32 __LF \ adds t2, t2, d0 __LF \ adc t3, xzr, xzr __LF \ /* Do the subtraction of that portion */ \ subs d1, d1, t1 __LF \ sbcs d2, d2, t2 __LF \ sbcs d3, d3, t3 __LF \ sbcs d4, d4, xzr __LF \ sbcs d5, d5, xzr __LF \ /* Now effectively add 2^384 * w by taking d0 as the input for last sbc */ \ sbc d6, d0, xzr // Input parameters #define z x0 #define x x1 // Rotating registers for the intermediate windows #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define d4 x6 #define d5 x7 // Other temporaries #define u x8 #define v x9 #define w x10 S2N_BN_SYMBOL(bignum_demont_p384): S2N_BN_SYMBOL(bignum_demont_p384_alt): CFI_START // Set up an initial window with the input x and an extra leading zero ldp d0, d1, [x] ldp d2, d3, [x, #16] ldp d4, d5, [x, #32] // Systematically scroll left doing 1-step reductions montreds(d0,d5,d4,d3,d2,d1,d0, u,v,w) montreds(d1,d0,d5,d4,d3,d2,d1, u,v,w) montreds(d2,d1,d0,d5,d4,d3,d2, u,v,w) montreds(d3,d2,d1,d0,d5,d4,d3, u,v,w) montreds(d4,d3,d2,d1,d0,d5,d4, u,v,w) montreds(d5,d4,d3,d2,d1,d0,d5, u,v,w) // This is already our answer with no correction needed stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_demont_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,043
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_neg_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Negate modulo p_384, z := (-x) mod p_384, assuming x reduced // Input x[6]; output z[6] // // extern void bignum_neg_p384(uint64_t z[static 6], const uint64_t x[static 6]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_neg_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_neg_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_neg_p384) .text .balign 4 #define z x0 #define x x1 #define p x2 #define t x3 #define d0 x4 #define d1 x5 #define d2 x6 #define d3 x7 #define d4 x8 #define d5 x9 S2N_BN_SYMBOL(bignum_neg_p384): CFI_START // Load the 6 digits of x ldp d0, d1, [x] ldp d2, d3, [x, #16] ldp d4, d5, [x, #32] // Set a bitmask p for the input being nonzero, so that we avoid doing // -0 = p_384 and hence maintain strict modular reduction orr p, d0, d1 orr t, d2, d3 orr p, p, t orr t, d4, d5 orr p, p, t cmp p, #0 csetm p, ne // Mask the complicated lower three words of p_384 = [-1;-1;-1;n2;n1;n0] // and subtract, using mask itself for upper digits and t, p, #0x00000000ffffffff subs d0, t, d0 and t, p, #0xffffffff00000000 sbcs d1, t, d1 and t, p, #0xfffffffffffffffe sbcs d2, t, d2 sbcs d3, p, d3 sbcs d4, p, d4 sbc d5, p, d5 // Write back the result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] // Return CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_neg_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,024
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_cmul_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply by a single word modulo p_384, z := (c * x) mod p_384, assuming // x reduced // Inputs c, x[6]; output z[6] // // extern void bignum_cmul_p384(uint64_t z[static 6], uint64_t c, // const uint64_t x[static 6]); // // Standard ARM ABI: X0 = z, X1 = c, X2 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p384) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p384_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p384_alt) .text .balign 4 #define z x0 #define c x1 #define x x2 #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define d4 x6 #define d5 x7 #define a0 x8 #define a1 x9 #define a2 x10 #define a3 x11 #define a4 x12 #define a5 x13 // Some shared here #define h x1 #define h1 x12 #define hn x13 #define m x8 #define l x9 S2N_BN_SYMBOL(bignum_cmul_p384): S2N_BN_SYMBOL(bignum_cmul_p384_alt): CFI_START // First do the multiply, straightforwardly, getting [h; d5; ...; d0] ldp a0, a1, [x] ldp a2, a3, [x, #16] ldp a4, a5, [x, #32] mul d0, c, a0 mul d1, c, a1 mul d2, c, a2 mul d3, c, a3 mul d4, c, a4 mul d5, c, a5 umulh a0, c, a0 umulh a1, c, a1 umulh a2, c, a2 umulh a3, c, a3 umulh a4, c, a4 umulh h, c, a5 adds d1, d1, a0 adcs d2, d2, a1 adcs d3, d3, a2 adcs d4, d4, a3 adcs d5, d5, a4 adc h, h, xzr // Let h be the top word of this intermediate product and l the low 6 words. // By the range hypothesis on the input, we know h1 = h + 1 does not wrap // And then -p_384 <= z - h1 * p_384 < p_384, so we just need to subtract // h1 * p_384 and then correct if that is negative by adding p_384. // // Write p_384 = 2^384 - r where r = 2^128 + 2^96 - 2^32 + 1 // // We want z - (h + 1) * (2^384 - r) // = (2^384 * h + l) - (h + 1) * (2^384 - r) // = (l + (h + 1) * r) - 2^384. // // Thus we can do the computation in 6 words of l + (h + 1) * r, and if it // does *not* carry we need to add p_384. We can rewrite this as the following, // using ~h = 2^64 - (h + 1) and absorbing the 2^64 in the higher term // using h instead of h + 1. // // l + (h + 1) * r // = l + 2^128 * (h + 1) + 2^96 * (h + 1) - 2^32 * (h + 1) + (h + 1) // = l + 2^128 * (h + 1) + 2^96 * h + 2^32 * ~h + (h + 1) add h1, h, #1 orn hn, xzr, h lsl a0, hn, #32 extr a1, h, hn, #32 lsr a2, h, #32 adds a0, a0, h1 adcs a1, a1, xzr adcs a2, a2, h1 adc a3, xzr, xzr adds d0, d0, a0 adcs d1, d1, a1 adcs d2, d2, a2 adcs d3, d3, a3 adcs d4, d4, xzr adcs d5, d5, xzr // Catch the carry and do a masked addition of p_384 csetm m, cc mov l, #0x00000000ffffffff and l, l, m adds d0, d0, l eor l, l, m adcs d1, d1, l mov l, #0xfffffffffffffffe and l, l, m adcs d2, d2, l adcs d3, d3, m adcs d4, d4, m adc d5, d5, m // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_cmul_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
25,286
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_montmul_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^384) mod p_384 // Inputs x[6], y[6]; output z[6] // // extern void bignum_montmul_p384(uint64_t z[static 6], // const uint64_t x[static 6], // const uint64_t y[static 6]); // // Does z := (2^{-384} * x * y) mod p_384, assuming that the inputs x and y // satisfy x * y <= 2^384 * p_384 (in particular this is true if we are in // the "usual" case x < p_384 and y < p_384). // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- // bignum_montmul_p384 is functionally equivalent to // unopt/bignum_montmul_p384_base. // It is written in a way that // 1. A subset of scalar multiplications in bignum_montmul_p384 are carefully // chosen and vectorized // 2. The vectorized assembly is rescheduled using the SLOTHY superoptimizer. // https://github.com/slothy-optimizer/slothy // // The output program of step 1. is as follows: // // stp x19, x20, [sp, #-16]! // stp x21, x22, [sp, #-16]! // stp x23, x24, [sp, #-16]! // ldp x3, x21, [x1] // ldr q30, [x1] // ldp x8, x24, [x1, #16] // ldp x5, x10, [x1, #32] // ldp x13, x23, [x2] // ldr q19, [x2] // ldp x6, x14, [x2, #16] // ldp x15, x17, [x2, #32] // ldr q1, [x1, #32] // ldr q28, [x2, #32] // uzp1 v5.4S, v19.4S, v30.4S // rev64 v19.4S, v19.4S // uzp1 v0.4S, v30.4S, v30.4S // mul v21.4S, v19.4S, v30.4S // uaddlp v19.2D, v21.4S // shl v19.2D, v19.2D, #32 // umlal v19.2D, v0.2S, v5.2S // mov x12, v19.d[0] // mov x16, v19.d[1] // mul x20, x8, x6 // umulh x4, x3, x13 // umulh x1, x21, x23 // umulh x2, x8, x6 // adds x4, x4, x16 // adcs x19, x1, x20 // adc x20, x2, xzr // adds x11, x4, x12 // adcs x16, x19, x4 // adcs x1, x20, x19 // adc x2, x20, xzr // adds x7, x16, x12 // adcs x4, x1, x4 // adcs x9, x2, x19 // adc x19, x20, xzr // subs x2, x3, x21 // cneg x20, x2, cc // csetm x16, cc // subs x2, x23, x13 // cneg x2, x2, cc // mul x1, x20, x2 // umulh x2, x20, x2 // cinv x16, x16, cc // eor x1, x1, x16 // eor x2, x2, x16 // cmn x16, #0x1 // adcs x11, x11, x1 // adcs x7, x7, x2 // adcs x4, x4, x16 // adcs x9, x9, x16 // adc x19, x19, x16 // subs x2, x3, x8 // cneg x20, x2, cc // csetm x16, cc // subs x2, x6, x13 // cneg x2, x2, cc // mul x1, x20, x2 // umulh x2, x20, x2 // cinv x16, x16, cc // eor x1, x1, x16 // eor x2, x2, x16 // cmn x16, #0x1 // adcs x7, x7, x1 // adcs x4, x4, x2 // adcs x9, x9, x16 // adc x19, x19, x16 // subs x2, x21, x8 // cneg x20, x2, cc // csetm x16, cc // subs x2, x6, x23 // cneg x2, x2, cc // mul x1, x20, x2 // umulh x2, x20, x2 // cinv x16, x16, cc // eor x1, x1, x16 // eor x2, x2, x16 // cmn x16, #0x1 // adcs x4, x4, x1 // adcs x20, x9, x2 // adc x16, x19, x16 // lsl x2, x12, #32 // add x19, x2, x12 // lsr x2, x19, #32 // subs x1, x2, x19 // sbc x2, x19, xzr // extr x1, x2, x1, #32 // lsr x2, x2, #32 // adds x12, x2, x19 // adc x2, xzr, xzr // subs x1, x11, x1 // sbcs x7, x7, x12 // sbcs x4, x4, x2 // sbcs x20, x20, xzr // sbcs x16, x16, xzr // sbc x9, x19, xzr // lsl x2, x1, #32 // add x19, x2, x1 // lsr x2, x19, #32 // subs x1, x2, x19 // sbc x2, x19, xzr // extr x1, x2, x1, #32 // lsr x2, x2, #32 // adds x12, x2, x19 // adc x2, xzr, xzr // subs x1, x7, x1 // sbcs x4, x4, x12 // sbcs x20, x20, x2 // sbcs x16, x16, xzr // sbcs x7, x9, xzr // sbc x9, x19, xzr // lsl x2, x1, #32 // add x19, x2, x1 // lsr x2, x19, #32 // subs x1, x2, x19 // sbc x2, x19, xzr // extr x12, x2, x1, #32 // lsr x2, x2, #32 // adds x1, x2, x19 // adc x2, xzr, xzr // subs x4, x4, x12 // sbcs x20, x20, x1 // sbcs x16, x16, x2 // sbcs x12, x7, xzr // sbcs x1, x9, xzr // sbc x2, x19, xzr // stp x4, x20, [x0] // @slothy:writes=buffer0 // stp x16, x12, [x0, #16] // @slothy:writes=buffer16 // stp x1, x2, [x0, #32] // @slothy:writes=buffer32 // mul x22, x24, x14 // movi v31.2D, #0x00000000ffffffff // uzp2 v16.4S, v28.4S, v28.4S // xtn v6.2S, v1.2D // xtn v30.2S, v28.2D // rev64 v28.4S, v28.4S // umull v5.2D, v6.2S, v30.2S // umull v0.2D, v6.2S, v16.2S // uzp2 v19.4S, v1.4S, v1.4S // mul v20.4S, v28.4S, v1.4S // usra v0.2D, v5.2D, #32 // umull v1.2D, v19.2S, v16.2S // uaddlp v24.2D, v20.4S // and v5.16B, v0.16B, v31.16B // umlal v5.2D, v19.2S, v30.2S // shl v19.2D, v24.2D, #32 // usra v1.2D, v0.2D, #32 // umlal v19.2D, v6.2S, v30.2S // usra v1.2D, v5.2D, #32 // mov x20, v19.d[0] // mov x16, v19.d[1] // umulh x12, x24, x14 // mov x1, v1.d[0] // mov x2, v1.d[1] // adds x4, x12, x20 // adcs x20, x1, x16 // adc x16, x2, xzr // adds x7, x4, x22 // adcs x12, x20, x4 // adcs x1, x16, x20 // adc x2, x16, xzr // adds x9, x12, x22 // adcs x19, x1, x4 // adcs x4, x2, x20 // adc x20, x16, xzr // subs x2, x24, x5 // cneg x16, x2, cc // csetm x12, cc // subs x2, x15, x14 // cneg x2, x2, cc // mul x1, x16, x2 // umulh x2, x16, x2 // cinv x12, x12, cc // eor x1, x1, x12 // eor x2, x2, x12 // cmn x12, #0x1 // adcs x11, x7, x1 // adcs x9, x9, x2 // adcs x19, x19, x12 // adcs x4, x4, x12 // adc x20, x20, x12 // subs x2, x24, x10 // cneg x16, x2, cc // csetm x12, cc // subs x2, x17, x14 // cneg x2, x2, cc // mul x1, x16, x2 // umulh x2, x16, x2 // cinv x12, x12, cc // eor x1, x1, x12 // eor x2, x2, x12 // cmn x12, #0x1 // adcs x7, x9, x1 // adcs x19, x19, x2 // adcs x4, x4, x12 // adc x20, x20, x12 // subs x2, x5, x10 // cneg x16, x2, cc // csetm x12, cc // subs x2, x17, x15 // cneg x2, x2, cc // mul x1, x16, x2 // umulh x2, x16, x2 // cinv x16, x12, cc // eor x1, x1, x16 // eor x2, x2, x16 // cmn x16, #0x1 // adcs x19, x19, x1 // adcs x12, x4, x2 // adc x1, x20, x16 // subs x2, x24, x3 // sbcs x24, x5, x21 // sbcs x21, x10, x8 // ngc x5, xzr // cmn x5, #0x1 // eor x2, x2, x5 // adcs x4, x2, xzr // eor x2, x24, x5 // adcs x20, x2, xzr // eor x2, x21, x5 // adc x16, x2, xzr // subs x2, x13, x14 // sbcs x24, x23, x15 // sbcs x8, x6, x17 // ngc x21, xzr // cmn x21, #0x1 // eor x2, x2, x21 // adcs x15, x2, xzr // eor x2, x24, x21 // adcs x14, x2, xzr // eor x2, x8, x21 // adc x6, x2, xzr // eor x9, x5, x21 // ldp x21, x2, [x0] // @slothy:reads=buffer0 // adds x10, x22, x21 // adcs x5, x11, x2 // ldp x21, x2, [x0, #16] // @slothy:reads=buffer16 // adcs x24, x7, x21 // adcs x8, x19, x2 // ldp x21, x2, [x0, #32] // @slothy:reads=buffer32 // adcs x21, x12, x21 // adcs x2, x1, x2 // adc x19, xzr, xzr // stp x10, x5, [x0] // @slothy:writes=buffer0 // stp x24, x8, [x0, #16] // @slothy:writes=buffer16 // stp x21, x2, [x0, #32] // @slothy:writes=buffer32 // mul x12, x4, x15 // mul x5, x20, x14 // mul x24, x16, x6 // umulh x8, x4, x15 // umulh x21, x20, x14 // umulh x2, x16, x6 // adds x10, x8, x5 // adcs x5, x21, x24 // adc x24, x2, xzr // adds x23, x10, x12 // adcs x8, x5, x10 // adcs x21, x24, x5 // adc x2, x24, xzr // adds x13, x8, x12 // adcs x1, x21, x10 // adcs x10, x2, x5 // adc x5, x24, xzr // subs x2, x4, x20 // cneg x24, x2, cc // csetm x8, cc // subs x2, x14, x15 // cneg x2, x2, cc // mul x21, x24, x2 // umulh x2, x24, x2 // cinv x8, x8, cc // eor x21, x21, x8 // eor x2, x2, x8 // cmn x8, #0x1 // adcs x23, x23, x21 // adcs x13, x13, x2 // adcs x1, x1, x8 // adcs x10, x10, x8 // adc x5, x5, x8 // subs x2, x4, x16 // cneg x24, x2, cc // csetm x8, cc // subs x2, x6, x15 // cneg x2, x2, cc // mul x21, x24, x2 // umulh x2, x24, x2 // cinv x8, x8, cc // eor x21, x21, x8 // eor x2, x2, x8 // cmn x8, #0x1 // adcs x4, x13, x21 // adcs x13, x1, x2 // adcs x1, x10, x8 // adc x10, x5, x8 // subs x2, x20, x16 // cneg x24, x2, cc // csetm x8, cc // subs x2, x6, x14 // cneg x2, x2, cc // mul x21, x24, x2 // umulh x2, x24, x2 // cinv x5, x8, cc // eor x21, x21, x5 // eor x2, x2, x5 // cmn x5, #0x1 // adcs x24, x13, x21 // adcs x8, x1, x2 // adc x21, x10, x5 // ldp x20, x16, [x0] // @slothy:reads=buffer0 // ldp x17, x15, [x0, #16] // @slothy:reads=buffer16 // ldp x14, x6, [x0, #32] // @slothy:reads=buffer32 // cmn x9, #0x1 // eor x2, x12, x9 // adcs x12, x2, x20 // eor x2, x23, x9 // adcs x23, x2, x16 // eor x2, x4, x9 // adcs x13, x2, x17 // eor x2, x24, x9 // adcs x10, x2, x15 // eor x2, x8, x9 // adcs x5, x2, x14 // eor x2, x21, x9 // adcs x24, x2, x6 // adcs x1, x9, x19 // adcs x8, x9, xzr // adcs x21, x9, xzr // adc x2, x9, xzr // adds x10, x10, x20 // adcs x5, x5, x16 // adcs x24, x24, x17 // adcs x17, x1, x15 // adcs x15, x8, x14 // adcs x14, x21, x6 // adc x6, x2, x19 // lsl x2, x12, #32 // add x1, x2, x12 // lsr x2, x1, #32 // subs x21, x2, x1 // sbc x2, x1, xzr // extr x21, x2, x21, #32 // lsr x2, x2, #32 // adds x8, x2, x1 // adc x2, xzr, xzr // subs x21, x23, x21 // sbcs x23, x13, x8 // sbcs x10, x10, x2 // sbcs x5, x5, xzr // sbcs x24, x24, xzr // sbc x13, x1, xzr // lsl x2, x21, #32 // add x1, x2, x21 // lsr x2, x1, #32 // subs x21, x2, x1 // sbc x2, x1, xzr // extr x21, x2, x21, #32 // lsr x2, x2, #32 // adds x8, x2, x1 // adc x2, xzr, xzr // subs x21, x23, x21 // sbcs x10, x10, x8 // sbcs x5, x5, x2 // sbcs x24, x24, xzr // sbcs x23, x13, xzr // sbc x13, x1, xzr // lsl x2, x21, #32 // add x1, x2, x21 // lsr x2, x1, #32 // subs x21, x2, x1 // sbc x2, x1, xzr // extr x8, x2, x21, #32 // lsr x2, x2, #32 // adds x21, x2, x1 // adc x2, xzr, xzr // subs x10, x10, x8 // sbcs x5, x5, x21 // sbcs x24, x24, x2 // sbcs x8, x23, xzr // sbcs x21, x13, xzr // sbc x2, x1, xzr // adds x23, x17, x8 // adcs x13, x15, x21 // adcs x1, x14, x2 // adc x2, x6, xzr // add x8, x2, #0x1 // lsl x2, x8, #32 // subs x21, x8, x2 // sbc x2, x2, xzr // adds x10, x10, x21 // adcs x5, x5, x2 // adcs x24, x24, x8 // adcs x8, x23, xzr // adcs x21, x13, xzr // adcs x13, x1, xzr // csetm x1, cc // mov x2, #0xffffffff // and x2, x2, x1 // adds x10, x10, x2 // eor x2, x2, x1 // adcs x5, x5, x2 // mov x2, #0xfffffffffffffffe // and x2, x2, x1 // adcs x24, x24, x2 // adcs x8, x8, x1 // adcs x21, x21, x1 // adc x2, x13, x1 // stp x10, x5, [x0] // @slothy:writes=buffer0 // stp x24, x8, [x0, #16] // @slothy:writes=buffer16 // stp x21, x2, [x0, #32] // @slothy:writes=buffer32 // ldp x23, x24, [sp], #16 // ldp x21, x22, [sp], #16 // ldp x19, x20, [sp], #16 // ret // // The bash script used for step 2 is as follows: // // # Store the assembly instructions except the last 'ret' and // # callee-register store/loads as, say, 'input.S'. // export OUTPUTS="[hint_buffer0,hint_buffer16,hint_buffer32]" // export RESERVED_REGS="[x18,x25,x26,x27,x28,x29,x30,sp,q8,q9,q10,q11,q12,q13,q14,q15,v8,v9,v10,v11,v12,v13,v14,v15]" // <s2n-bignum>/tools/external/slothy.sh input.S my_out_dir // # my_out_dir/3.opt.s is the optimized assembly. Its output may differ // # from this file since the sequence is non-deterministically chosen. // # Please add 'ret' at the end of the output assembly. #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p384) .text .balign 4 S2N_BN_SYMBOL(bignum_montmul_p384): CFI_START // Save some registers CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) ldr q3, [x1] ldr q25, [x2] ldp x13, x23, [x2] ldp x3, x21, [x1] rev64 v23.4S, v25.4S uzp1 v17.4S, v25.4S, v3.4S umulh x15, x3, x13 mul v6.4S, v23.4S, v3.4S uzp1 v3.4S, v3.4S, v3.4S ldr q27, [x2, #32] ldp x8, x24, [x1, #16] subs x6, x3, x21 ldr q0, [x1, #32] movi v23.2D, #0x00000000ffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4S, v27.4S uzp2 v25.4S, v27.4S, v27.4S cneg x4, x6, cc subs x7, x23, x13 xtn v22.2S, v0.2D xtn v24.2S, v27.2D cneg x20, x7, cc ldp x6, x14, [x2, #16] mul v27.4S, v4.4S, v0.4S uaddlp v20.2D, v6.4S cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4S, v0.4S, v0.4S umull v21.2D, v22.2S, v25.2S shl v0.2D, v20.2D, #32 umlal v0.2D, v3.2S, v17.2S mul x22, x8, x6 umull v1.2D, v6.2S, v25.2S subs x12, x3, x8 umull v20.2D, v22.2S, v24.2S cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2D, v20.2D, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2D, v21.2D, #32 adds x22, x15, x7 and v26.16B, v21.16B, v23.16B adcs x16, x12, x15 uaddlp v25.2D, v27.4S adcs x9, x19, x12 umlal v26.2D, v6.2S, v24.2S adc x4, x19, xzr adds x16, x16, x7 shl v27.2D, v25.2D, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2D, v22.2S, v24.2S mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2D, v26.2D, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x2, #32] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [x1, #32] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [x0] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [x0, #16] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [x0, #32] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [x0] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [x0, #16] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [x0, #32] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [x0] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [x0, #16] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [x0, #32] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [x0] ldp x21, x12, [x0, #16] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [x0, #32] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [x0] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [x0, #16] adc x12, x15, x23 stp x21, x12, [x0, #32] // Restore registers and return CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montmul_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
71,936
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_inv_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Modular inverse modulo p_384 = 2^384 - 2^128 - 2^96 + 2^32 - 1 // Input x[6]; output z[6] // // extern void bignum_inv_p384(uint64_t z[static 6],const uint64_t x[static 6]); // // If the 6-digit input x is coprime to p_384, i.e. is not divisible // by it, returns z < p_384 such that x * z == 1 (mod p_384). Note that // x does not need to be reduced modulo p_384, but the output always is. // If the input is divisible (i.e. is 0 or p_384), then there can be no // modular inverse and z = 0 is returned. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_inv_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_inv_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_inv_p384) .text .balign 4 // Size in bytes of a 64-bit word #define N 8 // Used for the return pointer #define res x20 // Loop counter and d = 2 * delta value for divstep #define i x21 #define d x22 // Registers used for matrix element magnitudes and signs #define m00 x10 #define m01 x11 #define m10 x12 #define m11 x13 #define s00 x14 #define s01 x15 #define s10 x16 #define s11 x17 // Initial carries for combinations #define car0 x9 #define car1 x19 // Input and output, plain registers treated according to pattern #define reg0 x0, #0 #define reg1 x1, #0 #define reg2 x2, #0 #define reg3 x3, #0 #define reg4 x4, #0 #define x x1, #0 #define z x0, #0 // Pointer-offset pairs for temporaries on stack // The u and v variables are 6 words each as expected, but the f and g // variables are 8 words each -- they need to have at least one extra // word for a sign word, and to preserve alignment we "round up" to 8. // In fact, we currently keep an extra word in u and v as well. #define f sp, #0 #define g sp, #(8*N) #define u sp, #(16*N) #define v sp, #(24*N) // Total size to reserve on the stack #define NSPACE 32*N // --------------------------------------------------------------------------- // Core signed almost-Montgomery reduction macro. Takes input in // [d6;d5;d4;d3;d2;d1;d0] and returns result in [d6;d5d4;d3;d2;d1], adding // to the existing [d6;d5;d4;d3;d2;d1], and re-using d0 as a temporary // internally as well as t0, t1, t2. This is almost-Montgomery, i.e. the // result fits in 6 digits but is not necessarily strictly reduced mod p_384. // --------------------------------------------------------------------------- #define amontred(d6,d5,d4,d3,d2,d1,d0, t3,t2,t1) \ /* We only know the input is -2^444 < x < 2^444. To do traditional */ \ /* unsigned Montgomery reduction, start by adding 2^61 * p_384. */ \ mov t1, #0xe000000000000000 __LF \ adds d0, d0, t1 __LF \ mov t2, #0x000000001fffffff __LF \ adcs d1, d1, t2 __LF \ mov t3, #0xffffffffe0000000 __LF \ bic t3, t3, #0x2000000000000000 __LF \ adcs d2, d2, t3 __LF \ sbcs d3, d3, xzr __LF \ sbcs d4, d4, xzr __LF \ sbcs d5, d5, xzr __LF \ mov t1, #0x1fffffffffffffff __LF \ adc d6, d6, t1 __LF \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ /* Store it back into d0 since we no longer need that digit. */ \ add d0, d0, d0, lsl #32 __LF \ /* Now let [t3;t2;t1;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel d0 so we don't need it */ \ mov t1, #0xffffffff00000001 __LF \ umulh t1, t1, d0 __LF \ mov t2, #0x00000000ffffffff __LF \ mul t3, t2, d0 __LF \ umulh t2, t2, d0 __LF \ adds t1, t1, t3 __LF \ adcs t2, t2, d0 __LF \ cset t3, cs __LF \ /* Now x + p_384 * w = (x + 2^384 * w) - (2^384 - p_384) * w */ \ /* We catch the net top carry from add-subtract in the digit d0 */ \ adds d6, d6, d0 __LF \ cset d0, cs __LF \ subs d1, d1, t1 __LF \ sbcs d2, d2, t2 __LF \ sbcs d3, d3, t3 __LF \ sbcs d4, d4, xzr __LF \ sbcs d5, d5, xzr __LF \ sbcs d6, d6, xzr __LF \ sbcs d0, d0, xzr __LF \ /* Now if d0 is nonzero we subtract p_384 (almost-Montgomery) */ \ neg d0, d0 __LF \ and t1, d0, #0x00000000ffffffff __LF \ and t2, d0, #0xffffffff00000000 __LF \ and t3, d0, #0xfffffffffffffffe __LF \ subs d1, d1, t1 __LF \ sbcs d2, d2, t2 __LF \ sbcs d3, d3, t3 __LF \ sbcs d4, d4, d0 __LF \ sbcs d5, d5, d0 __LF \ sbc d6, d6, d0 // Very similar to a subroutine call to the s2n-bignum word_divstep59. // But different in register usage and returning the final matrix in // registers as follows // // [ m00 m01] // [ m10 m11] #define divstep59() \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x8, x4, #0x100, lsl #12 __LF \ sbfx x8, x8, #21, #21 __LF \ mov x11, #0x100000 __LF \ add x11, x11, x11, lsl #21 __LF \ add x9, x4, x11 __LF \ asr x9, x9, #42 __LF \ add x10, x5, #0x100, lsl #12 __LF \ sbfx x10, x10, #21, #21 __LF \ add x11, x5, x11 __LF \ asr x11, x11, #42 __LF \ mul x6, x8, x2 __LF \ mul x7, x9, x3 __LF \ mul x2, x10, x2 __LF \ mul x3, x11, x3 __LF \ add x4, x6, x7 __LF \ add x5, x2, x3 __LF \ asr x2, x4, #20 __LF \ asr x3, x5, #20 __LF \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x12, x4, #0x100, lsl #12 __LF \ sbfx x12, x12, #21, #21 __LF \ mov x15, #0x100000 __LF \ add x15, x15, x15, lsl #21 __LF \ add x13, x4, x15 __LF \ asr x13, x13, #42 __LF \ add x14, x5, #0x100, lsl #12 __LF \ sbfx x14, x14, #21, #21 __LF \ add x15, x5, x15 __LF \ asr x15, x15, #42 __LF \ mul x6, x12, x2 __LF \ mul x7, x13, x3 __LF \ mul x2, x14, x2 __LF \ mul x3, x15, x3 __LF \ add x4, x6, x7 __LF \ add x5, x2, x3 __LF \ asr x2, x4, #20 __LF \ asr x3, x5, #20 __LF \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ mul x2, x12, x8 __LF \ mul x3, x12, x9 __LF \ mul x6, x14, x8 __LF \ mul x7, x14, x9 __LF \ madd x8, x13, x10, x2 __LF \ madd x9, x13, x11, x3 __LF \ madd x16, x15, x10, x6 __LF \ madd x17, x15, x11, x7 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x12, x4, #0x100, lsl #12 __LF \ sbfx x12, x12, #22, #21 __LF \ mov x15, #0x100000 __LF \ add x15, x15, x15, lsl #21 __LF \ add x13, x4, x15 __LF \ asr x13, x13, #43 __LF \ add x14, x5, #0x100, lsl #12 __LF \ sbfx x14, x14, #22, #21 __LF \ add x15, x5, x15 __LF \ asr x15, x15, #43 __LF \ mneg x2, x12, x8 __LF \ mneg x3, x12, x9 __LF \ mneg x4, x14, x8 __LF \ mneg x5, x14, x9 __LF \ msub m00, x13, x16, x2 __LF \ msub m01, x13, x17, x3 __LF \ msub m10, x15, x16, x4 __LF \ msub m11, x15, x17, x5 S2N_BN_SYMBOL(bignum_inv_p384): CFI_START // Save registers and make room for temporaries CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_DEC_SP(NSPACE) // Save the return pointer for the end so we can overwrite x0 later mov res, x0 // Copy the prime and input into the main f and g variables respectively. // Make sure x is reduced so that g <= f as assumed in the bound proof. mov x10, #0x00000000ffffffff mov x11, #0xffffffff00000000 mov x12, #0xfffffffffffffffe mov x15, #0xffffffffffffffff stp x10, x11, [f] stp x12, x15, [f+2*N] stp x15, x15, [f+4*N] str xzr, [f+6*N] ldp x2, x3, [x1] subs x10, x2, x10 sbcs x11, x3, x11 ldp x4, x5, [x1, #(2*N)] sbcs x12, x4, x12 sbcs x13, x5, x15 ldp x6, x7, [x1, #(4*N)] sbcs x14, x6, x15 sbcs x15, x7, x15 csel x2, x2, x10, cc csel x3, x3, x11, cc csel x4, x4, x12, cc csel x5, x5, x13, cc csel x6, x6, x14, cc csel x7, x7, x15, cc stp x2, x3, [g] stp x4, x5, [g+2*N] stp x6, x7, [g+4*N] str xzr, [g+6*N] // Also maintain reduced < 2^384 vector [u,v] such that // [f,g] == x * 2^{5*i-75} * [u,v] (mod p_384) // starting with [p_384,x] == x * 2^{5*0-75} * [0,2^75] (mod p_384) // The weird-looking 5*i modifications come in because we are doing // 64-bit word-sized Montgomery reductions at each stage, which is // 5 bits more than the 59-bit requirement to keep things stable. stp xzr, xzr, [u] stp xzr, xzr, [u+2*N] stp xzr, xzr, [u+4*N] mov x10, #2048 stp xzr, x10, [v] stp xzr, xzr, [v+2*N] stp xzr, xzr, [v+4*N] // Start of main loop. We jump into the middle so that the divstep // portion is common to the special fifteenth iteration after a uniform // first 14. mov i, #15 mov d, #1 b Lbignum_inv_p384_midloop Lbignum_inv_p384_loop: // Separate the matrix elements into sign-magnitude pairs cmp m00, xzr csetm s00, mi cneg m00, m00, mi cmp m01, xzr csetm s01, mi cneg m01, m01, mi cmp m10, xzr csetm s10, mi cneg m10, m10, mi cmp m11, xzr csetm s11, mi cneg m11, m11, mi // Adjust the initial values to allow for complement instead of negation // This initial offset is the same for [f,g] and [u,v] compositions. // Save it in stable registers for the [u,v] part and do [f,g] first. and x0, m00, s00 and x1, m01, s01 add car0, x0, x1 and x0, m10, s10 and x1, m11, s11 add car1, x0, x1 // Now the computation of the updated f and g values. This maintains a // 2-word carry between stages so we can conveniently insert the shift // right by 59 before storing back, and not overwrite digits we need // again of the old f and g values. // // Digit 0 of [f,g] ldr x7, [f] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [g] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 adc x2, x2, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x5, car1, x0 adc x3, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 adc x3, x3, x1 // Digit 1 of [f,g] ldr x7, [f+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [g+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 adc x6, x6, x1 extr x4, x2, x4, #59 str x4, [f] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x3, x3, x0 adc x4, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x3, x3, x0 adc x4, x4, x1 extr x5, x3, x5, #59 str x5, [g] // Digit 2 of [f,g] ldr x7, [f+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [g+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 adc x5, x5, x1 extr x2, x6, x2, #59 str x2, [f+N] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x4, x4, x0 adc x2, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x4, x4, x0 adc x2, x2, x1 extr x3, x4, x3, #59 str x3, [g+N] // Digit 3 of [f,g] ldr x7, [f+3*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, xzr, x1 ldr x8, [g+3*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 adc x3, x3, x1 extr x6, x5, x6, #59 str x6, [f+2*N] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x2, x2, x0 adc x6, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x2, x2, x0 adc x6, x6, x1 extr x4, x2, x4, #59 str x4, [g+2*N] // Digit 4 of [f,g] ldr x7, [f+4*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x3, x3, x0 adc x4, xzr, x1 ldr x8, [g+4*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x3, x3, x0 adc x4, x4, x1 extr x5, x3, x5, #59 str x5, [f+3*N] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x6, x6, x0 adc x5, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x6, x6, x0 adc x5, x5, x1 extr x2, x6, x2, #59 str x2, [g+3*N] // Digits 5 and 6 of [f,g] ldr x7, [f+5*N] eor x1, x7, s00 ldr x23, [f+6*N] eor x2, x23, s00 and x2, x2, m00 neg x2, x2 mul x0, x1, m00 umulh x1, x1, m00 adds x4, x4, x0 adc x2, x2, x1 ldr x8, [g+5*N] eor x1, x8, s01 ldr x24, [g+6*N] eor x0, x24, s01 and x0, x0, m01 sub x2, x2, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 adc x2, x2, x1 extr x3, x4, x3, #59 str x3, [f+4*N] extr x4, x2, x4, #59 str x4, [f+5*N] asr x2, x2, #59 str x2, [f+6*N] eor x1, x7, s10 eor x4, x23, s10 and x4, x4, m10 neg x4, x4 mul x0, x1, m10 umulh x1, x1, m10 adds x5, x5, x0 adc x4, x4, x1 eor x1, x8, s11 eor x0, x24, s11 and x0, x0, m11 sub x4, x4, x0 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 adc x4, x4, x1 extr x6, x5, x6, #59 str x6, [g+4*N] extr x5, x4, x5, #59 str x5, [g+5*N] asr x4, x4, #59 str x4, [g+6*N] // Now the computation of the updated u and v values and their // Montgomery reductions. A very similar accumulation except that // the top words of u and v are unsigned and we don't shift. // // Digit 0 of [u,v] ldr x7, [u] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [v] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u] adc x2, x2, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x5, car1, x0 adc x3, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 str x5, [v] adc x3, x3, x1 // Digit 1 of [u,v] ldr x7, [u+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [v+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 str x2, [u+N] adc x6, x6, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x3, x3, x0 adc x4, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x3, x3, x0 str x3, [v+N] adc x4, x4, x1 // Digit 2 of [u,v] ldr x7, [u+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [v+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 str x6, [u+2*N] adc x5, x5, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x4, x4, x0 adc x2, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x4, x4, x0 str x4, [v+2*N] adc x2, x2, x1 // Digit 3 of [u,v] ldr x7, [u+3*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, xzr, x1 ldr x8, [v+3*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 str x5, [u+3*N] adc x3, x3, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x2, x2, x0 adc x6, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x2, x2, x0 str x2, [v+3*N] adc x6, x6, x1 // Digit 4 of [u,v] ldr x7, [u+4*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x3, x3, x0 adc x4, xzr, x1 ldr x8, [v+4*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x3, x3, x0 str x3, [u+4*N] adc x4, x4, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x6, x6, x0 adc x5, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x6, x6, x0 str x6, [v+4*N] adc x5, x5, x1 // Digits 5 and 6 of [u,v] (top is unsigned) ldr x7, [u+5*N] eor x1, x7, s00 and x2, s00, m00 neg x2, x2 mul x0, x1, m00 umulh x1, x1, m00 adds x4, x4, x0 adc x2, x2, x1 ldr x8, [v+5*N] eor x1, x8, s01 and x0, s01, m01 sub x2, x2, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u+5*N] adc x2, x2, x1 str x2, [u+6*N] eor x1, x7, s10 and x4, s10, m10 neg x4, x4 mul x0, x1, m10 umulh x1, x1, m10 adds x5, x5, x0 adc x4, x4, x1 eor x1, x8, s11 and x0, s11, m11 sub x4, x4, x0 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 str x5, [v+5*N] adc x4, x4, x1 str x4, [v+6*N] // Montgomery reduction of u ldp x0, x1, [u] ldp x2, x3, [u+16] ldp x4, x5, [u+32] ldr x6, [u+48] amontred(x6,x5,x4,x3,x2,x1,x0, x9,x8,x7) stp x1, x2, [u] stp x3, x4, [u+16] stp x5, x6, [u+32] // Montgomery reduction of v ldp x0, x1, [v] ldp x2, x3, [v+16] ldp x4, x5, [v+32] ldr x6, [v+48] amontred(x6,x5,x4,x3,x2,x1,x0, x9,x8,x7) stp x1, x2, [v] stp x3, x4, [v+16] stp x5, x6, [v+32] Lbignum_inv_p384_midloop: mov x1, d ldr x2, [f] ldr x3, [g] divstep59() mov d, x1 // Next iteration subs i, i, #1 bne Lbignum_inv_p384_loop // The 15th and last iteration does not need anything except the // u value and the sign of f; the latter can be obtained from the // lowest word of f. So it's done differently from the main loop. // Find the sign of the new f. For this we just need one digit // since we know (for in-scope cases) that f is either +1 or -1. // We don't explicitly shift right by 59 either, but looking at // bit 63 (or any bit >= 60) of the unshifted result is enough // to distinguish -1 from +1; this is then made into a mask. ldr x0, [f] ldr x1, [g] mul x0, x0, m00 madd x1, x1, m01, x0 asr x0, x1, #63 // Now separate out the matrix into sign-magnitude pairs // and adjust each one based on the sign of f. // // Note that at this point we expect |f|=1 and we got its // sign above, so then since [f,0] == x * [u,v] (mod p_384) // we want to flip the sign of u according to that of f. cmp m00, xzr csetm s00, mi cneg m00, m00, mi eor s00, s00, x0 cmp m01, xzr csetm s01, mi cneg m01, m01, mi eor s01, s01, x0 cmp m10, xzr csetm s10, mi cneg m10, m10, mi eor s10, s10, x0 cmp m11, xzr csetm s11, mi cneg m11, m11, mi eor s11, s11, x0 // Adjust the initial value to allow for complement instead of negation and x0, m00, s00 and x1, m01, s01 add car0, x0, x1 // Digit 0 of [u] ldr x7, [u] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [v] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u] adc x2, x2, x1 // Digit 1 of [u] ldr x7, [u+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [v+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 str x2, [u+N] adc x6, x6, x1 // Digit 2 of [u] ldr x7, [u+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [v+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 str x6, [u+2*N] adc x5, x5, x1 // Digit 3 of [u] ldr x7, [u+3*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, xzr, x1 ldr x8, [v+3*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 str x5, [u+3*N] adc x3, x3, x1 // Digit 4 of [u] ldr x7, [u+4*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x3, x3, x0 adc x4, xzr, x1 ldr x8, [v+4*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x3, x3, x0 str x3, [u+4*N] adc x4, x4, x1 // Digits 5 and 6 of [u] (top is unsigned) ldr x7, [u+5*N] eor x1, x7, s00 and x2, s00, m00 neg x2, x2 mul x0, x1, m00 umulh x1, x1, m00 adds x4, x4, x0 adc x2, x2, x1 ldr x8, [v+5*N] eor x1, x8, s01 and x0, s01, m01 sub x2, x2, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u+5*N] adc x2, x2, x1 str x2, [u+6*N] // Montgomery reduction of u. This needs to be strict not "almost" // so it is followed by an optional subtraction of p_384 ldp x10, x0, [u] ldp x1, x2, [u+16] ldp x3, x4, [u+32] ldr x5, [u+48] amontred(x5,x4,x3,x2,x1,x0,x10, x9,x8,x7) mov x10, #0x00000000ffffffff subs x10, x0, x10 mov x11, #0xffffffff00000000 sbcs x11, x1, x11 mov x12, #0xfffffffffffffffe sbcs x12, x2, x12 mov x15, #0xffffffffffffffff sbcs x13, x3, x15 sbcs x14, x4, x15 sbcs x15, x5, x15 csel x0, x0, x10, cc csel x1, x1, x11, cc csel x2, x2, x12, cc csel x3, x3, x13, cc csel x4, x4, x14, cc csel x5, x5, x15, cc // Store it back to the final output stp x0, x1, [res] stp x2, x3, [res, #16] stp x4, x5, [res, #32] // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_inv_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
210,203
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/p384_montjscalarmul_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery-Jacobian form scalar multiplication for P-384 // Input scalar[6], point[18]; output res[18] // // extern void p384_montjscalarmul_alt // (uint64_t res[static 18], // const uint64_t scalar[static 6], // const uint64_t point[static 18]); // // This function is a variant of its affine point version p384_scalarmul_alt. // Here, input and output points are assumed to be in Jacobian form with // their coordinates in the Montgomery domain. Thus, if priming indicates // Montgomery form, x' = (2^384 * x) mod p_384 etc., each point argument // is a triple (x',y',z') representing the affine point (x/z^2,y/z^3) when // z' is nonzero or the point at infinity (group identity) if z' = 0. // // Given scalar = n and point = P, assumed to be on the NIST elliptic // curve P-384, returns a representation of n * P. If the result is the // point at infinity (either because the input point was or because the // scalar was a multiple of p_384) then the output is guaranteed to // represent the point at infinity, i.e. to have its z coordinate zero. // // Standard ARM ABI: X0 = res, X1 = scalar, X2 = point // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjscalarmul_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjscalarmul_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjscalarmul_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 #define JACSIZE (3*NUMSIZE) // Safe copies of input res and additional values in variables. #define bf x22 #define sgn x23 #define j x24 #define res x25 // Intermediate variables on the stack. // The table is 16 entries, each of size JACSIZE = 3 * NUMSIZE #define scalarb sp, #(0*NUMSIZE) #define acc sp, #(1*NUMSIZE) #define tabent sp, #(4*NUMSIZE) #define tab sp, #(7*NUMSIZE) #define NSPACE 55*NUMSIZE // Avoid using .rep for the sake of the BoringSSL/AWS-LC delocator, // which doesn't accept repetitions, assembler macros etc. #define selectblock(I) \ cmp bf, #(1*I) __LF \ ldp x20, x21, [x19] __LF \ csel x0, x20, x0, eq __LF \ csel x1, x21, x1, eq __LF \ ldp x20, x21, [x19, #16] __LF \ csel x2, x20, x2, eq __LF \ csel x3, x21, x3, eq __LF \ ldp x20, x21, [x19, #32] __LF \ csel x4, x20, x4, eq __LF \ csel x5, x21, x5, eq __LF \ ldp x20, x21, [x19, #48] __LF \ csel x6, x20, x6, eq __LF \ csel x7, x21, x7, eq __LF \ ldp x20, x21, [x19, #64] __LF \ csel x8, x20, x8, eq __LF \ csel x9, x21, x9, eq __LF \ ldp x20, x21, [x19, #80] __LF \ csel x10, x20, x10, eq __LF \ csel x11, x21, x11, eq __LF \ ldp x20, x21, [x19, #96] __LF \ csel x12, x20, x12, eq __LF \ csel x13, x21, x13, eq __LF \ ldp x20, x21, [x19, #112] __LF \ csel x14, x20, x14, eq __LF \ csel x15, x21, x15, eq __LF \ ldp x20, x21, [x19, #128] __LF \ csel x16, x20, x16, eq __LF \ csel x17, x21, x17, eq __LF \ add x19, x19, #JACSIZE // Loading large constants #define movbig(nn,n3,n2,n1,n0) \ movz nn, n0 __LF \ movk nn, n1, lsl #16 __LF \ movk nn, n2, lsl #32 __LF \ movk nn, n3, lsl #48 S2N_BN_SYMBOL(p384_montjscalarmul_alt): CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x30) CFI_DEC_SP(NSPACE) // Preserve the "res" input argument; others get processed early. mov res, x0 // Reduce the input scalar mod n_384, i.e. conditionally subtract n_384. // Store it to "scalarb". ldp x3, x4, [x1] movbig(x15, #0xecec, #0x196a, #0xccc5, #0x2973) ldp x5, x6, [x1, #16] movbig(x16, #0x581a, #0x0db2, #0x48b0, #0xa77a) ldp x7, x8, [x1, #32] movbig(x17, #0xc763, #0x4d81, #0xf437, #0x2ddf) subs x9, x3, x15 sbcs x10, x4, x16 sbcs x11, x5, x17 adcs x12, x6, xzr adcs x13, x7, xzr adcs x14, x8, xzr csel x3, x3, x9, cc csel x4, x4, x10, cc csel x5, x5, x11, cc csel x6, x6, x12, cc csel x7, x7, x13, cc csel x8, x8, x14, cc stp x3, x4, [scalarb] stp x5, x6, [scalarb+16] stp x7, x8, [scalarb+32] // Set the tab[0] table entry to the input point = 1 * P ldp x10, x11, [x2] stp x10, x11, [tab] ldp x12, x13, [x2, #16] stp x12, x13, [tab+16] ldp x14, x15, [x2, #32] stp x14, x15, [tab+32] ldp x10, x11, [x2, #48] stp x10, x11, [tab+48] ldp x12, x13, [x2, #64] stp x12, x13, [tab+64] ldp x14, x15, [x2, #80] stp x14, x15, [tab+80] ldp x10, x11, [x2, #96] stp x10, x11, [tab+96] ldp x12, x13, [x2, #112] stp x12, x13, [tab+112] ldp x14, x15, [x2, #128] stp x14, x15, [tab+128] // Compute and record tab[1] = 2 * p, ..., tab[15] = 16 * P add x0, tab+JACSIZE*1 add x1, tab CFI_BL(Lp384_montjscalarmul_alt_p384_montjdouble) add x0, tab+JACSIZE*2 add x1, tab+JACSIZE*1 add x2, tab CFI_BL(Lp384_montjscalarmul_alt_p384_montjadd) add x0, tab+JACSIZE*3 add x1, tab+JACSIZE*1 CFI_BL(Lp384_montjscalarmul_alt_p384_montjdouble) add x0, tab+JACSIZE*4 add x1, tab+JACSIZE*3 add x2, tab CFI_BL(Lp384_montjscalarmul_alt_p384_montjadd) add x0, tab+JACSIZE*5 add x1, tab+JACSIZE*2 CFI_BL(Lp384_montjscalarmul_alt_p384_montjdouble) add x0, tab+JACSIZE*6 add x1, tab+JACSIZE*5 add x2, tab CFI_BL(Lp384_montjscalarmul_alt_p384_montjadd) add x0, tab+JACSIZE*7 add x1, tab+JACSIZE*3 CFI_BL(Lp384_montjscalarmul_alt_p384_montjdouble) add x0, tab+JACSIZE*8 add x1, tab+JACSIZE*7 add x2, tab CFI_BL(Lp384_montjscalarmul_alt_p384_montjadd) add x0, tab+JACSIZE*9 add x1, tab+JACSIZE*4 CFI_BL(Lp384_montjscalarmul_alt_p384_montjdouble) add x0, tab+JACSIZE*10 add x1, tab+JACSIZE*9 add x2, tab CFI_BL(Lp384_montjscalarmul_alt_p384_montjadd) add x0, tab+JACSIZE*11 add x1, tab+JACSIZE*5 CFI_BL(Lp384_montjscalarmul_alt_p384_montjdouble) add x0, tab+JACSIZE*12 add x1, tab+JACSIZE*11 add x2, tab CFI_BL(Lp384_montjscalarmul_alt_p384_montjadd) add x0, tab+JACSIZE*13 add x1, tab+JACSIZE*6 CFI_BL(Lp384_montjscalarmul_alt_p384_montjdouble) add x0, tab+JACSIZE*14 add x1, tab+JACSIZE*13 add x2, tab CFI_BL(Lp384_montjscalarmul_alt_p384_montjadd) add x0, tab+JACSIZE*15 add x1, tab+JACSIZE*7 CFI_BL(Lp384_montjscalarmul_alt_p384_montjdouble) // Add the recoding constant sum_i(16 * 32^i) to the scalar to allow signed // digits. The digits of the constant, in lowest-to-highest order, are as // follows; they are generated dynamically since none is a simple ARM load. // // 0x0842108421084210 // 0x1084210842108421 // 0x2108421084210842 // 0x4210842108421084 // 0x8421084210842108 // 0x0842108421084210 ldp x0, x1, [scalarb] ldp x2, x3, [scalarb+16] ldp x4, x5, [scalarb+32] movbig(x8, #0x1084, #0x2108, #0x4210, #0x8421) adds x0, x0, x8, lsr #1 adcs x1, x1, x8 lsl x8, x8, #1 adcs x2, x2, x8 lsl x8, x8, #1 adcs x3, x3, x8 lsl x8, x8, #1 adcs x4, x4, x8 lsr x8, x8, #4 adcs x5, x5, x8 cset x6, cs // Record the top bitfield then shift the whole scalar left 4 bits // to align the top of the next bitfield with the MSB (bits 379..383). extr bf, x6, x5, #60 extr x5, x5, x4, #60 extr x4, x4, x3, #60 extr x3, x3, x2, #60 extr x2, x2, x1, #60 extr x1, x1, x0, #60 lsl x0, x0, #4 stp x0, x1, [scalarb] stp x2, x3, [scalarb+16] stp x4, x5, [scalarb+32] // Initialize the accumulator to the corresponding entry using constant-time // lookup in the table. This top digit, uniquely, is not recoded so there is // no sign adjustment to make. mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr mov x4, xzr mov x5, xzr mov x6, xzr mov x7, xzr mov x8, xzr mov x9, xzr mov x10, xzr mov x11, xzr mov x12, xzr mov x13, xzr mov x14, xzr mov x15, xzr mov x16, xzr mov x17, xzr add x19, tab selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) selectblock(9) selectblock(10) selectblock(11) selectblock(12) selectblock(13) selectblock(14) selectblock(15) selectblock(16) stp x0, x1, [acc] stp x2, x3, [acc+16] stp x4, x5, [acc+32] stp x6, x7, [acc+48] stp x8, x9, [acc+64] stp x10, x11, [acc+80] stp x12, x13, [acc+96] stp x14, x15, [acc+112] stp x16, x17, [acc+128] mov j, #380 // Main loop over size-5 bitfields: double 5 times then add signed digit // At each stage we shift the scalar left by 5 bits so we can simply pick // the top 5 bits as the bitfield, saving some fiddle over indexing. Lp384_montjscalarmul_alt_mainloop: sub j, j, #5 add x0, acc add x1, acc CFI_BL(Lp384_montjscalarmul_alt_p384_montjdouble) add x0, acc add x1, acc CFI_BL(Lp384_montjscalarmul_alt_p384_montjdouble) add x0, acc add x1, acc CFI_BL(Lp384_montjscalarmul_alt_p384_montjdouble) add x0, acc add x1, acc CFI_BL(Lp384_montjscalarmul_alt_p384_montjdouble) add x0, acc add x1, acc CFI_BL(Lp384_montjscalarmul_alt_p384_montjdouble) // Choose the bitfield and adjust it to sign and magnitude ldp x0, x1, [scalarb] ldp x2, x3, [scalarb+16] ldp x4, x5, [scalarb+32] lsr bf, x5, #59 extr x5, x5, x4, #59 extr x4, x4, x3, #59 extr x3, x3, x2, #59 extr x2, x2, x1, #59 extr x1, x1, x0, #59 lsl x0, x0, #5 stp x0, x1, [scalarb] stp x2, x3, [scalarb+16] stp x4, x5, [scalarb+32] subs bf, bf, #16 cset sgn, lo // sgn = sign of digit (1 = negative) cneg bf, bf, lo // bf = absolute value of digit // Conditionally select the table entry tab[i-1] = i * P in constant time mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr mov x4, xzr mov x5, xzr mov x6, xzr mov x7, xzr mov x8, xzr mov x9, xzr mov x10, xzr mov x11, xzr mov x12, xzr mov x13, xzr mov x14, xzr mov x15, xzr mov x16, xzr mov x17, xzr add x19, tab selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) selectblock(9) selectblock(10) selectblock(11) selectblock(12) selectblock(13) selectblock(14) selectblock(15) selectblock(16) // Store it to "tabent" with the y coordinate optionally negated. // This is done carefully to give coordinates < p_384 even in // the degenerate case y = 0 (when z = 0 for points on the curve). stp x0, x1, [tabent] stp x2, x3, [tabent+16] stp x4, x5, [tabent+32] stp x12, x13, [tabent+96] stp x14, x15, [tabent+112] stp x16, x17, [tabent+128] mov x0, #0x00000000ffffffff subs x0, x0, x6 orr x12, x6, x7 mov x1, #0xffffffff00000000 sbcs x1, x1, x7 orr x13, x8, x9 mov x2, #0xfffffffffffffffe sbcs x2, x2, x8 orr x14, x10, x11 mov x5, #0xffffffffffffffff sbcs x3, x5, x9 orr x12, x12, x13 sbcs x4, x5, x10 orr x12, x12, x14 sbcs x5, x5, x11 cmp sgn, xzr ccmp x12, xzr, #4, ne csel x6, x0, x6, ne csel x7, x1, x7, ne csel x8, x2, x8, ne csel x9, x3, x9, ne csel x10, x4, x10, ne csel x11, x5, x11, ne stp x6, x7, [tabent+48] stp x8, x9, [tabent+64] stp x10, x11, [tabent+80] // Add to the accumulator add x0, acc add x1, acc add x2, tabent CFI_BL(Lp384_montjscalarmul_alt_p384_montjadd) cbnz j, Lp384_montjscalarmul_alt_mainloop // That's the end of the main loop, and we just need to copy the // result in "acc" to the output. ldp x0, x1, [acc] stp x0, x1, [res] ldp x0, x1, [acc+16] stp x0, x1, [res, #16] ldp x0, x1, [acc+32] stp x0, x1, [res, #32] ldp x0, x1, [acc+48] stp x0, x1, [res, #48] ldp x0, x1, [acc+64] stp x0, x1, [res, #64] ldp x0, x1, [acc+80] stp x0, x1, [res, #80] ldp x0, x1, [acc+96] stp x0, x1, [res, #96] ldp x0, x1, [acc+112] stp x0, x1, [res, #112] ldp x0, x1, [acc+128] stp x0, x1, [res, #128] // Restore stack and registers and return CFI_INC_SP(NSPACE) CFI_POP2(x25,x30) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p384_montjscalarmul_alt) // Local copies of subroutines, complete clones at the moment S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp384_montjscalarmul_alt_p384_montjadd) Lp384_montjscalarmul_alt_p384_montjadd: CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_DEC_SP(336) mov x24, x0 mov x25, x1 mov x26, x2 ldp x2, x3, [x25, #96] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [x25, #112] mul x8, x2, x4 adds x10, x10, x8 mul x11, x2, x5 mul x8, x3, x4 adcs x11, x11, x8 umulh x12, x2, x5 mul x8, x3, x5 adcs x12, x12, x8 ldp x6, x7, [x25, #128] mul x13, x2, x7 mul x8, x3, x6 adcs x13, x13, x8 umulh x14, x2, x7 mul x8, x3, x7 adcs x14, x14, x8 mul x15, x5, x6 adcs x15, x15, xzr umulh x16, x5, x6 adc x16, x16, xzr umulh x8, x2, x4 adds x11, x11, x8 umulh x8, x3, x4 adcs x12, x12, x8 umulh x8, x3, x5 adcs x13, x13, x8 umulh x8, x3, x6 adcs x14, x14, x8 umulh x8, x3, x7 adcs x15, x15, x8 adc x16, x16, xzr mul x8, x2, x6 adds x12, x12, x8 mul x8, x4, x5 adcs x13, x13, x8 mul x8, x4, x6 adcs x14, x14, x8 mul x8, x4, x7 adcs x15, x15, x8 mul x8, x5, x7 adcs x16, x16, x8 mul x17, x6, x7 adcs x17, x17, xzr umulh x19, x6, x7 adc x19, x19, xzr umulh x8, x2, x6 adds x13, x13, x8 umulh x8, x4, x5 adcs x14, x14, x8 umulh x8, x4, x6 adcs x15, x15, x8 umulh x8, x4, x7 adcs x16, x16, x8 umulh x8, x5, x7 adcs x17, x17, x8 adc x19, x19, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 adcs x15, x15, x15 adcs x16, x16, x16 adcs x17, x17, x17 adcs x19, x19, x19 cset x20, hs umulh x8, x2, x2 mul x2, x2, x2 adds x9, x9, x8 mul x8, x3, x3 adcs x10, x10, x8 umulh x8, x3, x3 adcs x11, x11, x8 mul x8, x4, x4 adcs x12, x12, x8 umulh x8, x4, x4 adcs x13, x13, x8 mul x8, x5, x5 adcs x14, x14, x8 umulh x8, x5, x5 adcs x15, x15, x8 mul x8, x6, x6 adcs x16, x16, x8 umulh x8, x6, x6 adcs x17, x17, x8 mul x8, x7, x7 adcs x19, x19, x8 umulh x8, x7, x7 adc x20, x20, x8 lsl x5, x2, #32 add x2, x5, x2 mov x5, #-4294967295 umulh x5, x5, x2 mov x4, #4294967295 mul x3, x4, x2 umulh x4, x4, x2 adds x5, x5, x3 adcs x4, x4, x2 adc x3, xzr, xzr subs x9, x9, x5 sbcs x10, x10, x4 sbcs x11, x11, x3 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x2, x2, xzr lsl x5, x9, #32 add x9, x5, x9 mov x5, #-4294967295 umulh x5, x5, x9 mov x4, #4294967295 mul x3, x4, x9 umulh x4, x4, x9 adds x5, x5, x3 adcs x4, x4, x9 adc x3, xzr, xzr subs x10, x10, x5 sbcs x11, x11, x4 sbcs x12, x12, x3 sbcs x13, x13, xzr sbcs x2, x2, xzr sbc x9, x9, xzr lsl x5, x10, #32 add x10, x5, x10 mov x5, #-4294967295 umulh x5, x5, x10 mov x4, #4294967295 mul x3, x4, x10 umulh x4, x4, x10 adds x5, x5, x3 adcs x4, x4, x10 adc x3, xzr, xzr subs x11, x11, x5 sbcs x12, x12, x4 sbcs x13, x13, x3 sbcs x2, x2, xzr sbcs x9, x9, xzr sbc x10, x10, xzr lsl x5, x11, #32 add x11, x5, x11 mov x5, #-4294967295 umulh x5, x5, x11 mov x4, #4294967295 mul x3, x4, x11 umulh x4, x4, x11 adds x5, x5, x3 adcs x4, x4, x11 adc x3, xzr, xzr subs x12, x12, x5 sbcs x13, x13, x4 sbcs x2, x2, x3 sbcs x9, x9, xzr sbcs x10, x10, xzr sbc x11, x11, xzr lsl x5, x12, #32 add x12, x5, x12 mov x5, #-4294967295 umulh x5, x5, x12 mov x4, #4294967295 mul x3, x4, x12 umulh x4, x4, x12 adds x5, x5, x3 adcs x4, x4, x12 adc x3, xzr, xzr subs x13, x13, x5 sbcs x2, x2, x4 sbcs x9, x9, x3 sbcs x10, x10, xzr sbcs x11, x11, xzr sbc x12, x12, xzr lsl x5, x13, #32 add x13, x5, x13 mov x5, #-4294967295 umulh x5, x5, x13 mov x4, #4294967295 mul x3, x4, x13 umulh x4, x4, x13 adds x5, x5, x3 adcs x4, x4, x13 adc x3, xzr, xzr subs x2, x2, x5 sbcs x9, x9, x4 sbcs x10, x10, x3 sbcs x11, x11, xzr sbcs x12, x12, xzr sbc x13, x13, xzr adds x2, x2, x14 adcs x9, x9, x15 adcs x10, x10, x16 adcs x11, x11, x17 adcs x12, x12, x19 adcs x13, x13, x20 mov x14, #-4294967295 mov x15, #4294967295 csel x14, x14, xzr, hs csel x15, x15, xzr, hs cset x16, hs adds x2, x2, x14 adcs x9, x9, x15 adcs x10, x10, x16 adcs x11, x11, xzr adcs x12, x12, xzr adc x13, x13, xzr stp x2, x9, [sp] stp x10, x11, [sp, #16] stp x12, x13, [sp, #32] ldp x2, x3, [x26, #96] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [x26, #112] mul x8, x2, x4 adds x10, x10, x8 mul x11, x2, x5 mul x8, x3, x4 adcs x11, x11, x8 umulh x12, x2, x5 mul x8, x3, x5 adcs x12, x12, x8 ldp x6, x7, [x26, #128] mul x13, x2, x7 mul x8, x3, x6 adcs x13, x13, x8 umulh x14, x2, x7 mul x8, x3, x7 adcs x14, x14, x8 mul x15, x5, x6 adcs x15, x15, xzr umulh x16, x5, x6 adc x16, x16, xzr umulh x8, x2, x4 adds x11, x11, x8 umulh x8, x3, x4 adcs x12, x12, x8 umulh x8, x3, x5 adcs x13, x13, x8 umulh x8, x3, x6 adcs x14, x14, x8 umulh x8, x3, x7 adcs x15, x15, x8 adc x16, x16, xzr mul x8, x2, x6 adds x12, x12, x8 mul x8, x4, x5 adcs x13, x13, x8 mul x8, x4, x6 adcs x14, x14, x8 mul x8, x4, x7 adcs x15, x15, x8 mul x8, x5, x7 adcs x16, x16, x8 mul x17, x6, x7 adcs x17, x17, xzr umulh x19, x6, x7 adc x19, x19, xzr umulh x8, x2, x6 adds x13, x13, x8 umulh x8, x4, x5 adcs x14, x14, x8 umulh x8, x4, x6 adcs x15, x15, x8 umulh x8, x4, x7 adcs x16, x16, x8 umulh x8, x5, x7 adcs x17, x17, x8 adc x19, x19, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 adcs x15, x15, x15 adcs x16, x16, x16 adcs x17, x17, x17 adcs x19, x19, x19 cset x20, hs umulh x8, x2, x2 mul x2, x2, x2 adds x9, x9, x8 mul x8, x3, x3 adcs x10, x10, x8 umulh x8, x3, x3 adcs x11, x11, x8 mul x8, x4, x4 adcs x12, x12, x8 umulh x8, x4, x4 adcs x13, x13, x8 mul x8, x5, x5 adcs x14, x14, x8 umulh x8, x5, x5 adcs x15, x15, x8 mul x8, x6, x6 adcs x16, x16, x8 umulh x8, x6, x6 adcs x17, x17, x8 mul x8, x7, x7 adcs x19, x19, x8 umulh x8, x7, x7 adc x20, x20, x8 lsl x5, x2, #32 add x2, x5, x2 mov x5, #-4294967295 umulh x5, x5, x2 mov x4, #4294967295 mul x3, x4, x2 umulh x4, x4, x2 adds x5, x5, x3 adcs x4, x4, x2 adc x3, xzr, xzr subs x9, x9, x5 sbcs x10, x10, x4 sbcs x11, x11, x3 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x2, x2, xzr lsl x5, x9, #32 add x9, x5, x9 mov x5, #-4294967295 umulh x5, x5, x9 mov x4, #4294967295 mul x3, x4, x9 umulh x4, x4, x9 adds x5, x5, x3 adcs x4, x4, x9 adc x3, xzr, xzr subs x10, x10, x5 sbcs x11, x11, x4 sbcs x12, x12, x3 sbcs x13, x13, xzr sbcs x2, x2, xzr sbc x9, x9, xzr lsl x5, x10, #32 add x10, x5, x10 mov x5, #-4294967295 umulh x5, x5, x10 mov x4, #4294967295 mul x3, x4, x10 umulh x4, x4, x10 adds x5, x5, x3 adcs x4, x4, x10 adc x3, xzr, xzr subs x11, x11, x5 sbcs x12, x12, x4 sbcs x13, x13, x3 sbcs x2, x2, xzr sbcs x9, x9, xzr sbc x10, x10, xzr lsl x5, x11, #32 add x11, x5, x11 mov x5, #-4294967295 umulh x5, x5, x11 mov x4, #4294967295 mul x3, x4, x11 umulh x4, x4, x11 adds x5, x5, x3 adcs x4, x4, x11 adc x3, xzr, xzr subs x12, x12, x5 sbcs x13, x13, x4 sbcs x2, x2, x3 sbcs x9, x9, xzr sbcs x10, x10, xzr sbc x11, x11, xzr lsl x5, x12, #32 add x12, x5, x12 mov x5, #-4294967295 umulh x5, x5, x12 mov x4, #4294967295 mul x3, x4, x12 umulh x4, x4, x12 adds x5, x5, x3 adcs x4, x4, x12 adc x3, xzr, xzr subs x13, x13, x5 sbcs x2, x2, x4 sbcs x9, x9, x3 sbcs x10, x10, xzr sbcs x11, x11, xzr sbc x12, x12, xzr lsl x5, x13, #32 add x13, x5, x13 mov x5, #-4294967295 umulh x5, x5, x13 mov x4, #4294967295 mul x3, x4, x13 umulh x4, x4, x13 adds x5, x5, x3 adcs x4, x4, x13 adc x3, xzr, xzr subs x2, x2, x5 sbcs x9, x9, x4 sbcs x10, x10, x3 sbcs x11, x11, xzr sbcs x12, x12, xzr sbc x13, x13, xzr adds x2, x2, x14 adcs x9, x9, x15 adcs x10, x10, x16 adcs x11, x11, x17 adcs x12, x12, x19 adcs x13, x13, x20 mov x14, #-4294967295 mov x15, #4294967295 csel x14, x14, xzr, hs csel x15, x15, xzr, hs cset x16, hs adds x2, x2, x14 adcs x9, x9, x15 adcs x10, x10, x16 adcs x11, x11, xzr adcs x12, x12, xzr adc x13, x13, xzr stp x2, x9, [sp, #240] stp x10, x11, [sp, #256] stp x12, x13, [sp, #272] ldp x3, x4, [x26, #96] ldp x5, x6, [x25, #48] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [x25, #64] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [x25, #80] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [x26, #112] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [x26, #128] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #288] stp x14, x15, [sp, #304] stp x16, x17, [sp, #320] ldp x3, x4, [x25, #96] ldp x5, x6, [x26, #48] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [x26, #64] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [x26, #80] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [x25, #112] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [x25, #128] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #48] stp x14, x15, [sp, #64] stp x16, x17, [sp, #80] ldp x3, x4, [sp] ldp x5, x6, [x26] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [x26, #16] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [x26, #32] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [sp, #16] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [sp, #32] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #96] stp x14, x15, [sp, #112] stp x16, x17, [sp, #128] ldp x3, x4, [sp, #240] ldp x5, x6, [x25] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [x25, #16] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [x25, #32] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [sp, #256] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [sp, #272] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #192] stp x14, x15, [sp, #208] stp x16, x17, [sp, #224] ldp x3, x4, [sp] ldp x5, x6, [sp, #48] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [sp, #64] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [sp, #80] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [sp, #16] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [sp, #32] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #48] stp x14, x15, [sp, #64] stp x16, x17, [sp, #80] ldp x3, x4, [sp, #240] ldp x5, x6, [sp, #288] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [sp, #304] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [sp, #320] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [sp, #256] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [sp, #272] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #288] stp x14, x15, [sp, #304] stp x16, x17, [sp, #320] ldp x5, x6, [sp, #96] ldp x4, x3, [sp, #192] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #112] ldp x4, x3, [sp, #208] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #128] ldp x4, x3, [sp, #224] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, lo mov x4, #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #240] stp x7, x8, [sp, #256] stp x9, x10, [sp, #272] ldp x5, x6, [sp, #48] ldp x4, x3, [sp, #288] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #64] ldp x4, x3, [sp, #304] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #80] ldp x4, x3, [sp, #320] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, lo mov x4, #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #48] stp x7, x8, [sp, #64] stp x9, x10, [sp, #80] ldp x2, x3, [sp, #240] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [sp, #256] mul x8, x2, x4 adds x10, x10, x8 mul x11, x2, x5 mul x8, x3, x4 adcs x11, x11, x8 umulh x12, x2, x5 mul x8, x3, x5 adcs x12, x12, x8 ldp x6, x7, [sp, #272] mul x13, x2, x7 mul x8, x3, x6 adcs x13, x13, x8 umulh x14, x2, x7 mul x8, x3, x7 adcs x14, x14, x8 mul x15, x5, x6 adcs x15, x15, xzr umulh x16, x5, x6 adc x16, x16, xzr umulh x8, x2, x4 adds x11, x11, x8 umulh x8, x3, x4 adcs x12, x12, x8 umulh x8, x3, x5 adcs x13, x13, x8 umulh x8, x3, x6 adcs x14, x14, x8 umulh x8, x3, x7 adcs x15, x15, x8 adc x16, x16, xzr mul x8, x2, x6 adds x12, x12, x8 mul x8, x4, x5 adcs x13, x13, x8 mul x8, x4, x6 adcs x14, x14, x8 mul x8, x4, x7 adcs x15, x15, x8 mul x8, x5, x7 adcs x16, x16, x8 mul x17, x6, x7 adcs x17, x17, xzr umulh x19, x6, x7 adc x19, x19, xzr umulh x8, x2, x6 adds x13, x13, x8 umulh x8, x4, x5 adcs x14, x14, x8 umulh x8, x4, x6 adcs x15, x15, x8 umulh x8, x4, x7 adcs x16, x16, x8 umulh x8, x5, x7 adcs x17, x17, x8 adc x19, x19, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 adcs x15, x15, x15 adcs x16, x16, x16 adcs x17, x17, x17 adcs x19, x19, x19 cset x20, hs umulh x8, x2, x2 mul x2, x2, x2 adds x9, x9, x8 mul x8, x3, x3 adcs x10, x10, x8 umulh x8, x3, x3 adcs x11, x11, x8 mul x8, x4, x4 adcs x12, x12, x8 umulh x8, x4, x4 adcs x13, x13, x8 mul x8, x5, x5 adcs x14, x14, x8 umulh x8, x5, x5 adcs x15, x15, x8 mul x8, x6, x6 adcs x16, x16, x8 umulh x8, x6, x6 adcs x17, x17, x8 mul x8, x7, x7 adcs x19, x19, x8 umulh x8, x7, x7 adc x20, x20, x8 lsl x5, x2, #32 add x2, x5, x2 mov x5, #-4294967295 umulh x5, x5, x2 mov x4, #4294967295 mul x3, x4, x2 umulh x4, x4, x2 adds x5, x5, x3 adcs x4, x4, x2 adc x3, xzr, xzr subs x9, x9, x5 sbcs x10, x10, x4 sbcs x11, x11, x3 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x2, x2, xzr lsl x5, x9, #32 add x9, x5, x9 mov x5, #-4294967295 umulh x5, x5, x9 mov x4, #4294967295 mul x3, x4, x9 umulh x4, x4, x9 adds x5, x5, x3 adcs x4, x4, x9 adc x3, xzr, xzr subs x10, x10, x5 sbcs x11, x11, x4 sbcs x12, x12, x3 sbcs x13, x13, xzr sbcs x2, x2, xzr sbc x9, x9, xzr lsl x5, x10, #32 add x10, x5, x10 mov x5, #-4294967295 umulh x5, x5, x10 mov x4, #4294967295 mul x3, x4, x10 umulh x4, x4, x10 adds x5, x5, x3 adcs x4, x4, x10 adc x3, xzr, xzr subs x11, x11, x5 sbcs x12, x12, x4 sbcs x13, x13, x3 sbcs x2, x2, xzr sbcs x9, x9, xzr sbc x10, x10, xzr lsl x5, x11, #32 add x11, x5, x11 mov x5, #-4294967295 umulh x5, x5, x11 mov x4, #4294967295 mul x3, x4, x11 umulh x4, x4, x11 adds x5, x5, x3 adcs x4, x4, x11 adc x3, xzr, xzr subs x12, x12, x5 sbcs x13, x13, x4 sbcs x2, x2, x3 sbcs x9, x9, xzr sbcs x10, x10, xzr sbc x11, x11, xzr lsl x5, x12, #32 add x12, x5, x12 mov x5, #-4294967295 umulh x5, x5, x12 mov x4, #4294967295 mul x3, x4, x12 umulh x4, x4, x12 adds x5, x5, x3 adcs x4, x4, x12 adc x3, xzr, xzr subs x13, x13, x5 sbcs x2, x2, x4 sbcs x9, x9, x3 sbcs x10, x10, xzr sbcs x11, x11, xzr sbc x12, x12, xzr lsl x5, x13, #32 add x13, x5, x13 mov x5, #-4294967295 umulh x5, x5, x13 mov x4, #4294967295 mul x3, x4, x13 umulh x4, x4, x13 adds x5, x5, x3 adcs x4, x4, x13 adc x3, xzr, xzr subs x2, x2, x5 sbcs x9, x9, x4 sbcs x10, x10, x3 sbcs x11, x11, xzr sbcs x12, x12, xzr sbc x13, x13, xzr adds x2, x2, x14 adcs x9, x9, x15 adcs x10, x10, x16 adcs x11, x11, x17 adcs x12, x12, x19 adcs x13, x13, x20 mov x14, #-4294967295 mov x15, #4294967295 csel x14, x14, xzr, hs csel x15, x15, xzr, hs cset x16, hs adds x2, x2, x14 adcs x9, x9, x15 adcs x10, x10, x16 adcs x11, x11, xzr adcs x12, x12, xzr adc x13, x13, xzr stp x2, x9, [sp, #144] stp x10, x11, [sp, #160] stp x12, x13, [sp, #176] ldp x2, x3, [sp, #48] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [sp, #64] mul x8, x2, x4 adds x10, x10, x8 mul x11, x2, x5 mul x8, x3, x4 adcs x11, x11, x8 umulh x12, x2, x5 mul x8, x3, x5 adcs x12, x12, x8 ldp x6, x7, [sp, #80] mul x13, x2, x7 mul x8, x3, x6 adcs x13, x13, x8 umulh x14, x2, x7 mul x8, x3, x7 adcs x14, x14, x8 mul x15, x5, x6 adcs x15, x15, xzr umulh x16, x5, x6 adc x16, x16, xzr umulh x8, x2, x4 adds x11, x11, x8 umulh x8, x3, x4 adcs x12, x12, x8 umulh x8, x3, x5 adcs x13, x13, x8 umulh x8, x3, x6 adcs x14, x14, x8 umulh x8, x3, x7 adcs x15, x15, x8 adc x16, x16, xzr mul x8, x2, x6 adds x12, x12, x8 mul x8, x4, x5 adcs x13, x13, x8 mul x8, x4, x6 adcs x14, x14, x8 mul x8, x4, x7 adcs x15, x15, x8 mul x8, x5, x7 adcs x16, x16, x8 mul x17, x6, x7 adcs x17, x17, xzr umulh x19, x6, x7 adc x19, x19, xzr umulh x8, x2, x6 adds x13, x13, x8 umulh x8, x4, x5 adcs x14, x14, x8 umulh x8, x4, x6 adcs x15, x15, x8 umulh x8, x4, x7 adcs x16, x16, x8 umulh x8, x5, x7 adcs x17, x17, x8 adc x19, x19, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 adcs x15, x15, x15 adcs x16, x16, x16 adcs x17, x17, x17 adcs x19, x19, x19 cset x20, hs umulh x8, x2, x2 mul x2, x2, x2 adds x9, x9, x8 mul x8, x3, x3 adcs x10, x10, x8 umulh x8, x3, x3 adcs x11, x11, x8 mul x8, x4, x4 adcs x12, x12, x8 umulh x8, x4, x4 adcs x13, x13, x8 mul x8, x5, x5 adcs x14, x14, x8 umulh x8, x5, x5 adcs x15, x15, x8 mul x8, x6, x6 adcs x16, x16, x8 umulh x8, x6, x6 adcs x17, x17, x8 mul x8, x7, x7 adcs x19, x19, x8 umulh x8, x7, x7 adc x20, x20, x8 lsl x5, x2, #32 add x2, x5, x2 mov x5, #-4294967295 umulh x5, x5, x2 mov x4, #4294967295 mul x3, x4, x2 umulh x4, x4, x2 adds x5, x5, x3 adcs x4, x4, x2 adc x3, xzr, xzr subs x9, x9, x5 sbcs x10, x10, x4 sbcs x11, x11, x3 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x2, x2, xzr lsl x5, x9, #32 add x9, x5, x9 mov x5, #-4294967295 umulh x5, x5, x9 mov x4, #4294967295 mul x3, x4, x9 umulh x4, x4, x9 adds x5, x5, x3 adcs x4, x4, x9 adc x3, xzr, xzr subs x10, x10, x5 sbcs x11, x11, x4 sbcs x12, x12, x3 sbcs x13, x13, xzr sbcs x2, x2, xzr sbc x9, x9, xzr lsl x5, x10, #32 add x10, x5, x10 mov x5, #-4294967295 umulh x5, x5, x10 mov x4, #4294967295 mul x3, x4, x10 umulh x4, x4, x10 adds x5, x5, x3 adcs x4, x4, x10 adc x3, xzr, xzr subs x11, x11, x5 sbcs x12, x12, x4 sbcs x13, x13, x3 sbcs x2, x2, xzr sbcs x9, x9, xzr sbc x10, x10, xzr lsl x5, x11, #32 add x11, x5, x11 mov x5, #-4294967295 umulh x5, x5, x11 mov x4, #4294967295 mul x3, x4, x11 umulh x4, x4, x11 adds x5, x5, x3 adcs x4, x4, x11 adc x3, xzr, xzr subs x12, x12, x5 sbcs x13, x13, x4 sbcs x2, x2, x3 sbcs x9, x9, xzr sbcs x10, x10, xzr sbc x11, x11, xzr lsl x5, x12, #32 add x12, x5, x12 mov x5, #-4294967295 umulh x5, x5, x12 mov x4, #4294967295 mul x3, x4, x12 umulh x4, x4, x12 adds x5, x5, x3 adcs x4, x4, x12 adc x3, xzr, xzr subs x13, x13, x5 sbcs x2, x2, x4 sbcs x9, x9, x3 sbcs x10, x10, xzr sbcs x11, x11, xzr sbc x12, x12, xzr lsl x5, x13, #32 add x13, x5, x13 mov x5, #-4294967295 umulh x5, x5, x13 mov x4, #4294967295 mul x3, x4, x13 umulh x4, x4, x13 adds x5, x5, x3 adcs x4, x4, x13 adc x3, xzr, xzr subs x2, x2, x5 sbcs x9, x9, x4 sbcs x10, x10, x3 sbcs x11, x11, xzr sbcs x12, x12, xzr sbc x13, x13, xzr adds x2, x2, x14 adcs x9, x9, x15 adcs x10, x10, x16 adcs x11, x11, x17 adcs x12, x12, x19 adcs x13, x13, x20 adc x6, xzr, xzr mov x8, #-4294967295 adds x14, x2, x8 mov x8, #4294967295 adcs x15, x9, x8 mov x8, #1 adcs x16, x10, x8 adcs x17, x11, xzr adcs x19, x12, xzr adcs x20, x13, xzr adcs x6, x6, xzr csel x2, x2, x14, eq csel x9, x9, x15, eq csel x10, x10, x16, eq csel x11, x11, x17, eq csel x12, x12, x19, eq csel x13, x13, x20, eq stp x2, x9, [sp] stp x10, x11, [sp, #16] stp x12, x13, [sp, #32] ldp x3, x4, [sp, #144] ldp x5, x6, [sp, #192] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [sp, #208] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [sp, #224] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [sp, #160] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [sp, #176] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #192] stp x14, x15, [sp, #208] stp x16, x17, [sp, #224] ldp x3, x4, [sp, #144] ldp x5, x6, [sp, #96] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [sp, #112] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [sp, #128] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [sp, #160] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [sp, #176] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #96] stp x14, x15, [sp, #112] stp x16, x17, [sp, #128] ldp x5, x6, [sp] ldp x4, x3, [sp, #192] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #16] ldp x4, x3, [sp, #208] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #32] ldp x4, x3, [sp, #224] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, lo mov x4, #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp] stp x7, x8, [sp, #16] stp x9, x10, [sp, #32] ldp x5, x6, [sp, #96] ldp x4, x3, [sp, #192] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #112] ldp x4, x3, [sp, #208] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #128] ldp x4, x3, [sp, #224] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, lo mov x4, #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #144] stp x7, x8, [sp, #160] stp x9, x10, [sp, #176] ldp x3, x4, [sp, #240] ldp x5, x6, [x25, #96] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [x25, #112] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [x25, #128] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [sp, #256] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [sp, #272] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #240] stp x14, x15, [sp, #256] stp x16, x17, [sp, #272] ldp x5, x6, [sp] ldp x4, x3, [sp, #96] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #16] ldp x4, x3, [sp, #112] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #32] ldp x4, x3, [sp, #128] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, lo mov x4, #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp] stp x7, x8, [sp, #16] stp x9, x10, [sp, #32] ldp x5, x6, [sp, #192] ldp x4, x3, [sp] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #208] ldp x4, x3, [sp, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #224] ldp x4, x3, [sp, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, lo mov x4, #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #192] stp x7, x8, [sp, #208] stp x9, x10, [sp, #224] ldp x3, x4, [sp, #144] ldp x5, x6, [sp, #288] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [sp, #304] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [sp, #320] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [sp, #160] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [sp, #176] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #144] stp x14, x15, [sp, #160] stp x16, x17, [sp, #176] ldp x3, x4, [sp, #240] ldp x5, x6, [x26, #96] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [x26, #112] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [x26, #128] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [sp, #256] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [sp, #272] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #240] stp x14, x15, [sp, #256] stp x16, x17, [sp, #272] ldp x3, x4, [sp, #48] ldp x5, x6, [sp, #192] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [sp, #208] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [sp, #224] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [sp, #64] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [sp, #80] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #192] stp x14, x15, [sp, #208] stp x16, x17, [sp, #224] ldp x5, x6, [sp, #192] ldp x4, x3, [sp, #144] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #208] ldp x4, x3, [sp, #160] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #224] ldp x4, x3, [sp, #176] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, lo mov x4, #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #192] stp x7, x8, [sp, #208] stp x9, x10, [sp, #224] ldp x0, x1, [x25, #96] ldp x2, x3, [x25, #112] ldp x4, x5, [x25, #128] orr x20, x0, x1 orr x21, x2, x3 orr x22, x4, x5 orr x20, x20, x21 orr x20, x20, x22 cmp x20, xzr cset x20, ne ldp x6, x7, [x26, #96] ldp x8, x9, [x26, #112] ldp x10, x11, [x26, #128] orr x21, x6, x7 orr x22, x8, x9 orr x23, x10, x11 orr x21, x21, x22 orr x21, x21, x23 cmp x21, xzr cset x21, ne cmp x21, x20 ldp x12, x13, [sp, #240] csel x12, x0, x12, lo csel x13, x1, x13, lo csel x12, x6, x12, hi csel x13, x7, x13, hi ldp x14, x15, [sp, #256] csel x14, x2, x14, lo csel x15, x3, x15, lo csel x14, x8, x14, hi csel x15, x9, x15, hi ldp x16, x17, [sp, #272] csel x16, x4, x16, lo csel x17, x5, x17, lo csel x16, x10, x16, hi csel x17, x11, x17, hi ldp x20, x21, [x25] ldp x0, x1, [sp] csel x0, x20, x0, lo csel x1, x21, x1, lo ldp x20, x21, [x26] csel x0, x20, x0, hi csel x1, x21, x1, hi ldp x20, x21, [x25, #16] ldp x2, x3, [sp, #16] csel x2, x20, x2, lo csel x3, x21, x3, lo ldp x20, x21, [x26, #16] csel x2, x20, x2, hi csel x3, x21, x3, hi ldp x20, x21, [x25, #32] ldp x4, x5, [sp, #32] csel x4, x20, x4, lo csel x5, x21, x5, lo ldp x20, x21, [x26, #32] csel x4, x20, x4, hi csel x5, x21, x5, hi ldp x20, x21, [x25, #48] ldp x6, x7, [sp, #192] csel x6, x20, x6, lo csel x7, x21, x7, lo ldp x20, x21, [x26, #48] csel x6, x20, x6, hi csel x7, x21, x7, hi ldp x20, x21, [x25, #64] ldp x8, x9, [sp, #208] csel x8, x20, x8, lo csel x9, x21, x9, lo ldp x20, x21, [x26, #64] csel x8, x20, x8, hi csel x9, x21, x9, hi ldp x20, x21, [x25, #80] ldp x10, x11, [sp, #224] csel x10, x20, x10, lo csel x11, x21, x11, lo ldp x20, x21, [x26, #80] csel x10, x20, x10, hi csel x11, x21, x11, hi stp x0, x1, [x24] stp x2, x3, [x24, #16] stp x4, x5, [x24, #32] stp x6, x7, [x24, #48] stp x8, x9, [x24, #64] stp x10, x11, [x24, #80] stp x12, x13, [x24, #96] stp x14, x15, [x24, #112] stp x16, x17, [x24, #128] CFI_INC_SP(336) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp384_montjscalarmul_alt_p384_montjadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp384_montjscalarmul_alt_p384_montjdouble) Lp384_montjscalarmul_alt_p384_montjdouble: CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_DEC_SP(336) mov x23, x0 mov x24, x1 ldp x2, x3, [x24, #96] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [x24, #112] mul x8, x2, x4 adds x10, x10, x8 mul x11, x2, x5 mul x8, x3, x4 adcs x11, x11, x8 umulh x12, x2, x5 mul x8, x3, x5 adcs x12, x12, x8 ldp x6, x7, [x24, #128] mul x13, x2, x7 mul x8, x3, x6 adcs x13, x13, x8 umulh x14, x2, x7 mul x8, x3, x7 adcs x14, x14, x8 mul x15, x5, x6 adcs x15, x15, xzr umulh x16, x5, x6 adc x16, x16, xzr umulh x8, x2, x4 adds x11, x11, x8 umulh x8, x3, x4 adcs x12, x12, x8 umulh x8, x3, x5 adcs x13, x13, x8 umulh x8, x3, x6 adcs x14, x14, x8 umulh x8, x3, x7 adcs x15, x15, x8 adc x16, x16, xzr mul x8, x2, x6 adds x12, x12, x8 mul x8, x4, x5 adcs x13, x13, x8 mul x8, x4, x6 adcs x14, x14, x8 mul x8, x4, x7 adcs x15, x15, x8 mul x8, x5, x7 adcs x16, x16, x8 mul x17, x6, x7 adcs x17, x17, xzr umulh x19, x6, x7 adc x19, x19, xzr umulh x8, x2, x6 adds x13, x13, x8 umulh x8, x4, x5 adcs x14, x14, x8 umulh x8, x4, x6 adcs x15, x15, x8 umulh x8, x4, x7 adcs x16, x16, x8 umulh x8, x5, x7 adcs x17, x17, x8 adc x19, x19, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 adcs x15, x15, x15 adcs x16, x16, x16 adcs x17, x17, x17 adcs x19, x19, x19 cset x20, hs umulh x8, x2, x2 mul x2, x2, x2 adds x9, x9, x8 mul x8, x3, x3 adcs x10, x10, x8 umulh x8, x3, x3 adcs x11, x11, x8 mul x8, x4, x4 adcs x12, x12, x8 umulh x8, x4, x4 adcs x13, x13, x8 mul x8, x5, x5 adcs x14, x14, x8 umulh x8, x5, x5 adcs x15, x15, x8 mul x8, x6, x6 adcs x16, x16, x8 umulh x8, x6, x6 adcs x17, x17, x8 mul x8, x7, x7 adcs x19, x19, x8 umulh x8, x7, x7 adc x20, x20, x8 lsl x5, x2, #32 add x2, x5, x2 mov x5, #-4294967295 umulh x5, x5, x2 mov x4, #4294967295 mul x3, x4, x2 umulh x4, x4, x2 adds x5, x5, x3 adcs x4, x4, x2 adc x3, xzr, xzr subs x9, x9, x5 sbcs x10, x10, x4 sbcs x11, x11, x3 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x2, x2, xzr lsl x5, x9, #32 add x9, x5, x9 mov x5, #-4294967295 umulh x5, x5, x9 mov x4, #4294967295 mul x3, x4, x9 umulh x4, x4, x9 adds x5, x5, x3 adcs x4, x4, x9 adc x3, xzr, xzr subs x10, x10, x5 sbcs x11, x11, x4 sbcs x12, x12, x3 sbcs x13, x13, xzr sbcs x2, x2, xzr sbc x9, x9, xzr lsl x5, x10, #32 add x10, x5, x10 mov x5, #-4294967295 umulh x5, x5, x10 mov x4, #4294967295 mul x3, x4, x10 umulh x4, x4, x10 adds x5, x5, x3 adcs x4, x4, x10 adc x3, xzr, xzr subs x11, x11, x5 sbcs x12, x12, x4 sbcs x13, x13, x3 sbcs x2, x2, xzr sbcs x9, x9, xzr sbc x10, x10, xzr lsl x5, x11, #32 add x11, x5, x11 mov x5, #-4294967295 umulh x5, x5, x11 mov x4, #4294967295 mul x3, x4, x11 umulh x4, x4, x11 adds x5, x5, x3 adcs x4, x4, x11 adc x3, xzr, xzr subs x12, x12, x5 sbcs x13, x13, x4 sbcs x2, x2, x3 sbcs x9, x9, xzr sbcs x10, x10, xzr sbc x11, x11, xzr lsl x5, x12, #32 add x12, x5, x12 mov x5, #-4294967295 umulh x5, x5, x12 mov x4, #4294967295 mul x3, x4, x12 umulh x4, x4, x12 adds x5, x5, x3 adcs x4, x4, x12 adc x3, xzr, xzr subs x13, x13, x5 sbcs x2, x2, x4 sbcs x9, x9, x3 sbcs x10, x10, xzr sbcs x11, x11, xzr sbc x12, x12, xzr lsl x5, x13, #32 add x13, x5, x13 mov x5, #-4294967295 umulh x5, x5, x13 mov x4, #4294967295 mul x3, x4, x13 umulh x4, x4, x13 adds x5, x5, x3 adcs x4, x4, x13 adc x3, xzr, xzr subs x2, x2, x5 sbcs x9, x9, x4 sbcs x10, x10, x3 sbcs x11, x11, xzr sbcs x12, x12, xzr sbc x13, x13, xzr adds x2, x2, x14 adcs x9, x9, x15 adcs x10, x10, x16 adcs x11, x11, x17 adcs x12, x12, x19 adcs x13, x13, x20 adc x6, xzr, xzr mov x8, #-4294967295 adds x14, x2, x8 mov x8, #4294967295 adcs x15, x9, x8 mov x8, #1 adcs x16, x10, x8 adcs x17, x11, xzr adcs x19, x12, xzr adcs x20, x13, xzr adcs x6, x6, xzr csel x2, x2, x14, eq csel x9, x9, x15, eq csel x10, x10, x16, eq csel x11, x11, x17, eq csel x12, x12, x19, eq csel x13, x13, x20, eq stp x2, x9, [sp] stp x10, x11, [sp, #16] stp x12, x13, [sp, #32] ldp x2, x3, [x24, #48] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [x24, #64] mul x8, x2, x4 adds x10, x10, x8 mul x11, x2, x5 mul x8, x3, x4 adcs x11, x11, x8 umulh x12, x2, x5 mul x8, x3, x5 adcs x12, x12, x8 ldp x6, x7, [x24, #80] mul x13, x2, x7 mul x8, x3, x6 adcs x13, x13, x8 umulh x14, x2, x7 mul x8, x3, x7 adcs x14, x14, x8 mul x15, x5, x6 adcs x15, x15, xzr umulh x16, x5, x6 adc x16, x16, xzr umulh x8, x2, x4 adds x11, x11, x8 umulh x8, x3, x4 adcs x12, x12, x8 umulh x8, x3, x5 adcs x13, x13, x8 umulh x8, x3, x6 adcs x14, x14, x8 umulh x8, x3, x7 adcs x15, x15, x8 adc x16, x16, xzr mul x8, x2, x6 adds x12, x12, x8 mul x8, x4, x5 adcs x13, x13, x8 mul x8, x4, x6 adcs x14, x14, x8 mul x8, x4, x7 adcs x15, x15, x8 mul x8, x5, x7 adcs x16, x16, x8 mul x17, x6, x7 adcs x17, x17, xzr umulh x19, x6, x7 adc x19, x19, xzr umulh x8, x2, x6 adds x13, x13, x8 umulh x8, x4, x5 adcs x14, x14, x8 umulh x8, x4, x6 adcs x15, x15, x8 umulh x8, x4, x7 adcs x16, x16, x8 umulh x8, x5, x7 adcs x17, x17, x8 adc x19, x19, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 adcs x15, x15, x15 adcs x16, x16, x16 adcs x17, x17, x17 adcs x19, x19, x19 cset x20, hs umulh x8, x2, x2 mul x2, x2, x2 adds x9, x9, x8 mul x8, x3, x3 adcs x10, x10, x8 umulh x8, x3, x3 adcs x11, x11, x8 mul x8, x4, x4 adcs x12, x12, x8 umulh x8, x4, x4 adcs x13, x13, x8 mul x8, x5, x5 adcs x14, x14, x8 umulh x8, x5, x5 adcs x15, x15, x8 mul x8, x6, x6 adcs x16, x16, x8 umulh x8, x6, x6 adcs x17, x17, x8 mul x8, x7, x7 adcs x19, x19, x8 umulh x8, x7, x7 adc x20, x20, x8 lsl x5, x2, #32 add x2, x5, x2 mov x5, #-4294967295 umulh x5, x5, x2 mov x4, #4294967295 mul x3, x4, x2 umulh x4, x4, x2 adds x5, x5, x3 adcs x4, x4, x2 adc x3, xzr, xzr subs x9, x9, x5 sbcs x10, x10, x4 sbcs x11, x11, x3 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x2, x2, xzr lsl x5, x9, #32 add x9, x5, x9 mov x5, #-4294967295 umulh x5, x5, x9 mov x4, #4294967295 mul x3, x4, x9 umulh x4, x4, x9 adds x5, x5, x3 adcs x4, x4, x9 adc x3, xzr, xzr subs x10, x10, x5 sbcs x11, x11, x4 sbcs x12, x12, x3 sbcs x13, x13, xzr sbcs x2, x2, xzr sbc x9, x9, xzr lsl x5, x10, #32 add x10, x5, x10 mov x5, #-4294967295 umulh x5, x5, x10 mov x4, #4294967295 mul x3, x4, x10 umulh x4, x4, x10 adds x5, x5, x3 adcs x4, x4, x10 adc x3, xzr, xzr subs x11, x11, x5 sbcs x12, x12, x4 sbcs x13, x13, x3 sbcs x2, x2, xzr sbcs x9, x9, xzr sbc x10, x10, xzr lsl x5, x11, #32 add x11, x5, x11 mov x5, #-4294967295 umulh x5, x5, x11 mov x4, #4294967295 mul x3, x4, x11 umulh x4, x4, x11 adds x5, x5, x3 adcs x4, x4, x11 adc x3, xzr, xzr subs x12, x12, x5 sbcs x13, x13, x4 sbcs x2, x2, x3 sbcs x9, x9, xzr sbcs x10, x10, xzr sbc x11, x11, xzr lsl x5, x12, #32 add x12, x5, x12 mov x5, #-4294967295 umulh x5, x5, x12 mov x4, #4294967295 mul x3, x4, x12 umulh x4, x4, x12 adds x5, x5, x3 adcs x4, x4, x12 adc x3, xzr, xzr subs x13, x13, x5 sbcs x2, x2, x4 sbcs x9, x9, x3 sbcs x10, x10, xzr sbcs x11, x11, xzr sbc x12, x12, xzr lsl x5, x13, #32 add x13, x5, x13 mov x5, #-4294967295 umulh x5, x5, x13 mov x4, #4294967295 mul x3, x4, x13 umulh x4, x4, x13 adds x5, x5, x3 adcs x4, x4, x13 adc x3, xzr, xzr subs x2, x2, x5 sbcs x9, x9, x4 sbcs x10, x10, x3 sbcs x11, x11, xzr sbcs x12, x12, xzr sbc x13, x13, xzr adds x2, x2, x14 adcs x9, x9, x15 adcs x10, x10, x16 adcs x11, x11, x17 adcs x12, x12, x19 adcs x13, x13, x20 adc x6, xzr, xzr mov x8, #-4294967295 adds x14, x2, x8 mov x8, #4294967295 adcs x15, x9, x8 mov x8, #1 adcs x16, x10, x8 adcs x17, x11, xzr adcs x19, x12, xzr adcs x20, x13, xzr adcs x6, x6, xzr csel x2, x2, x14, eq csel x9, x9, x15, eq csel x10, x10, x16, eq csel x11, x11, x17, eq csel x12, x12, x19, eq csel x13, x13, x20, eq stp x2, x9, [sp, #48] stp x10, x11, [sp, #64] stp x12, x13, [sp, #80] ldp x5, x6, [x24] ldp x4, x3, [sp] adds x5, x5, x4 adcs x6, x6, x3 ldp x7, x8, [x24, #16] ldp x4, x3, [sp, #16] adcs x7, x7, x4 adcs x8, x8, x3 ldp x9, x10, [x24, #32] ldp x4, x3, [sp, #32] adcs x9, x9, x4 adcs x10, x10, x3 csetm x3, hs mov x4, #4294967295 and x4, x4, x3 subs x5, x5, x4 eor x4, x4, x3 sbcs x6, x6, x4 mov x4, #-2 and x4, x4, x3 sbcs x7, x7, x4 sbcs x8, x8, x3 sbcs x9, x9, x3 sbc x10, x10, x3 stp x5, x6, [sp, #240] stp x7, x8, [sp, #256] stp x9, x10, [sp, #272] ldp x5, x6, [x24] ldp x4, x3, [sp] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x24, #16] ldp x4, x3, [sp, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x24, #32] ldp x4, x3, [sp, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, lo mov x4, #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #192] stp x7, x8, [sp, #208] stp x9, x10, [sp, #224] ldp x3, x4, [sp, #240] ldp x5, x6, [sp, #192] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [sp, #208] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [sp, #224] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [sp, #256] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [sp, #272] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #96] stp x14, x15, [sp, #112] stp x16, x17, [sp, #128] ldp x5, x6, [x24, #48] ldp x4, x3, [x24, #96] adds x5, x5, x4 adcs x6, x6, x3 ldp x7, x8, [x24, #64] ldp x4, x3, [x24, #112] adcs x7, x7, x4 adcs x8, x8, x3 ldp x9, x10, [x24, #80] ldp x4, x3, [x24, #128] adcs x9, x9, x4 adcs x10, x10, x3 adc x3, xzr, xzr mov x4, #4294967295 cmp x5, x4 mov x4, #-4294967296 sbcs xzr, x6, x4 mov x4, #-2 sbcs xzr, x7, x4 adcs xzr, x8, xzr adcs xzr, x9, xzr adcs xzr, x10, xzr adcs x3, x3, xzr csetm x3, ne mov x4, #4294967295 and x4, x4, x3 subs x5, x5, x4 eor x4, x4, x3 sbcs x6, x6, x4 mov x4, #-2 and x4, x4, x3 sbcs x7, x7, x4 sbcs x8, x8, x3 sbcs x9, x9, x3 sbc x10, x10, x3 stp x5, x6, [sp, #240] stp x7, x8, [sp, #256] stp x9, x10, [sp, #272] ldp x2, x3, [sp, #96] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [sp, #112] mul x8, x2, x4 adds x10, x10, x8 mul x11, x2, x5 mul x8, x3, x4 adcs x11, x11, x8 umulh x12, x2, x5 mul x8, x3, x5 adcs x12, x12, x8 ldp x6, x7, [sp, #128] mul x13, x2, x7 mul x8, x3, x6 adcs x13, x13, x8 umulh x14, x2, x7 mul x8, x3, x7 adcs x14, x14, x8 mul x15, x5, x6 adcs x15, x15, xzr umulh x16, x5, x6 adc x16, x16, xzr umulh x8, x2, x4 adds x11, x11, x8 umulh x8, x3, x4 adcs x12, x12, x8 umulh x8, x3, x5 adcs x13, x13, x8 umulh x8, x3, x6 adcs x14, x14, x8 umulh x8, x3, x7 adcs x15, x15, x8 adc x16, x16, xzr mul x8, x2, x6 adds x12, x12, x8 mul x8, x4, x5 adcs x13, x13, x8 mul x8, x4, x6 adcs x14, x14, x8 mul x8, x4, x7 adcs x15, x15, x8 mul x8, x5, x7 adcs x16, x16, x8 mul x17, x6, x7 adcs x17, x17, xzr umulh x19, x6, x7 adc x19, x19, xzr umulh x8, x2, x6 adds x13, x13, x8 umulh x8, x4, x5 adcs x14, x14, x8 umulh x8, x4, x6 adcs x15, x15, x8 umulh x8, x4, x7 adcs x16, x16, x8 umulh x8, x5, x7 adcs x17, x17, x8 adc x19, x19, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 adcs x15, x15, x15 adcs x16, x16, x16 adcs x17, x17, x17 adcs x19, x19, x19 cset x20, hs umulh x8, x2, x2 mul x2, x2, x2 adds x9, x9, x8 mul x8, x3, x3 adcs x10, x10, x8 umulh x8, x3, x3 adcs x11, x11, x8 mul x8, x4, x4 adcs x12, x12, x8 umulh x8, x4, x4 adcs x13, x13, x8 mul x8, x5, x5 adcs x14, x14, x8 umulh x8, x5, x5 adcs x15, x15, x8 mul x8, x6, x6 adcs x16, x16, x8 umulh x8, x6, x6 adcs x17, x17, x8 mul x8, x7, x7 adcs x19, x19, x8 umulh x8, x7, x7 adc x20, x20, x8 lsl x5, x2, #32 add x2, x5, x2 mov x5, #-4294967295 umulh x5, x5, x2 mov x4, #4294967295 mul x3, x4, x2 umulh x4, x4, x2 adds x5, x5, x3 adcs x4, x4, x2 adc x3, xzr, xzr subs x9, x9, x5 sbcs x10, x10, x4 sbcs x11, x11, x3 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x2, x2, xzr lsl x5, x9, #32 add x9, x5, x9 mov x5, #-4294967295 umulh x5, x5, x9 mov x4, #4294967295 mul x3, x4, x9 umulh x4, x4, x9 adds x5, x5, x3 adcs x4, x4, x9 adc x3, xzr, xzr subs x10, x10, x5 sbcs x11, x11, x4 sbcs x12, x12, x3 sbcs x13, x13, xzr sbcs x2, x2, xzr sbc x9, x9, xzr lsl x5, x10, #32 add x10, x5, x10 mov x5, #-4294967295 umulh x5, x5, x10 mov x4, #4294967295 mul x3, x4, x10 umulh x4, x4, x10 adds x5, x5, x3 adcs x4, x4, x10 adc x3, xzr, xzr subs x11, x11, x5 sbcs x12, x12, x4 sbcs x13, x13, x3 sbcs x2, x2, xzr sbcs x9, x9, xzr sbc x10, x10, xzr lsl x5, x11, #32 add x11, x5, x11 mov x5, #-4294967295 umulh x5, x5, x11 mov x4, #4294967295 mul x3, x4, x11 umulh x4, x4, x11 adds x5, x5, x3 adcs x4, x4, x11 adc x3, xzr, xzr subs x12, x12, x5 sbcs x13, x13, x4 sbcs x2, x2, x3 sbcs x9, x9, xzr sbcs x10, x10, xzr sbc x11, x11, xzr lsl x5, x12, #32 add x12, x5, x12 mov x5, #-4294967295 umulh x5, x5, x12 mov x4, #4294967295 mul x3, x4, x12 umulh x4, x4, x12 adds x5, x5, x3 adcs x4, x4, x12 adc x3, xzr, xzr subs x13, x13, x5 sbcs x2, x2, x4 sbcs x9, x9, x3 sbcs x10, x10, xzr sbcs x11, x11, xzr sbc x12, x12, xzr lsl x5, x13, #32 add x13, x5, x13 mov x5, #-4294967295 umulh x5, x5, x13 mov x4, #4294967295 mul x3, x4, x13 umulh x4, x4, x13 adds x5, x5, x3 adcs x4, x4, x13 adc x3, xzr, xzr subs x2, x2, x5 sbcs x9, x9, x4 sbcs x10, x10, x3 sbcs x11, x11, xzr sbcs x12, x12, xzr sbc x13, x13, xzr adds x2, x2, x14 adcs x9, x9, x15 adcs x10, x10, x16 adcs x11, x11, x17 adcs x12, x12, x19 adcs x13, x13, x20 adc x6, xzr, xzr mov x8, #-4294967295 adds x14, x2, x8 mov x8, #4294967295 adcs x15, x9, x8 mov x8, #1 adcs x16, x10, x8 adcs x17, x11, xzr adcs x19, x12, xzr adcs x20, x13, xzr adcs x6, x6, xzr csel x2, x2, x14, eq csel x9, x9, x15, eq csel x10, x10, x16, eq csel x11, x11, x17, eq csel x12, x12, x19, eq csel x13, x13, x20, eq stp x2, x9, [sp, #288] stp x10, x11, [sp, #304] stp x12, x13, [sp, #320] ldp x3, x4, [x24] ldp x5, x6, [sp, #48] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [sp, #64] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [sp, #80] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [x24, #16] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [x24, #32] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #144] stp x14, x15, [sp, #160] stp x16, x17, [sp, #176] ldp x2, x3, [sp, #240] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [sp, #256] mul x8, x2, x4 adds x10, x10, x8 mul x11, x2, x5 mul x8, x3, x4 adcs x11, x11, x8 umulh x12, x2, x5 mul x8, x3, x5 adcs x12, x12, x8 ldp x6, x7, [sp, #272] mul x13, x2, x7 mul x8, x3, x6 adcs x13, x13, x8 umulh x14, x2, x7 mul x8, x3, x7 adcs x14, x14, x8 mul x15, x5, x6 adcs x15, x15, xzr umulh x16, x5, x6 adc x16, x16, xzr umulh x8, x2, x4 adds x11, x11, x8 umulh x8, x3, x4 adcs x12, x12, x8 umulh x8, x3, x5 adcs x13, x13, x8 umulh x8, x3, x6 adcs x14, x14, x8 umulh x8, x3, x7 adcs x15, x15, x8 adc x16, x16, xzr mul x8, x2, x6 adds x12, x12, x8 mul x8, x4, x5 adcs x13, x13, x8 mul x8, x4, x6 adcs x14, x14, x8 mul x8, x4, x7 adcs x15, x15, x8 mul x8, x5, x7 adcs x16, x16, x8 mul x17, x6, x7 adcs x17, x17, xzr umulh x19, x6, x7 adc x19, x19, xzr umulh x8, x2, x6 adds x13, x13, x8 umulh x8, x4, x5 adcs x14, x14, x8 umulh x8, x4, x6 adcs x15, x15, x8 umulh x8, x4, x7 adcs x16, x16, x8 umulh x8, x5, x7 adcs x17, x17, x8 adc x19, x19, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 adcs x15, x15, x15 adcs x16, x16, x16 adcs x17, x17, x17 adcs x19, x19, x19 cset x20, hs umulh x8, x2, x2 mul x2, x2, x2 adds x9, x9, x8 mul x8, x3, x3 adcs x10, x10, x8 umulh x8, x3, x3 adcs x11, x11, x8 mul x8, x4, x4 adcs x12, x12, x8 umulh x8, x4, x4 adcs x13, x13, x8 mul x8, x5, x5 adcs x14, x14, x8 umulh x8, x5, x5 adcs x15, x15, x8 mul x8, x6, x6 adcs x16, x16, x8 umulh x8, x6, x6 adcs x17, x17, x8 mul x8, x7, x7 adcs x19, x19, x8 umulh x8, x7, x7 adc x20, x20, x8 lsl x5, x2, #32 add x2, x5, x2 mov x5, #-4294967295 umulh x5, x5, x2 mov x4, #4294967295 mul x3, x4, x2 umulh x4, x4, x2 adds x5, x5, x3 adcs x4, x4, x2 adc x3, xzr, xzr subs x9, x9, x5 sbcs x10, x10, x4 sbcs x11, x11, x3 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x2, x2, xzr lsl x5, x9, #32 add x9, x5, x9 mov x5, #-4294967295 umulh x5, x5, x9 mov x4, #4294967295 mul x3, x4, x9 umulh x4, x4, x9 adds x5, x5, x3 adcs x4, x4, x9 adc x3, xzr, xzr subs x10, x10, x5 sbcs x11, x11, x4 sbcs x12, x12, x3 sbcs x13, x13, xzr sbcs x2, x2, xzr sbc x9, x9, xzr lsl x5, x10, #32 add x10, x5, x10 mov x5, #-4294967295 umulh x5, x5, x10 mov x4, #4294967295 mul x3, x4, x10 umulh x4, x4, x10 adds x5, x5, x3 adcs x4, x4, x10 adc x3, xzr, xzr subs x11, x11, x5 sbcs x12, x12, x4 sbcs x13, x13, x3 sbcs x2, x2, xzr sbcs x9, x9, xzr sbc x10, x10, xzr lsl x5, x11, #32 add x11, x5, x11 mov x5, #-4294967295 umulh x5, x5, x11 mov x4, #4294967295 mul x3, x4, x11 umulh x4, x4, x11 adds x5, x5, x3 adcs x4, x4, x11 adc x3, xzr, xzr subs x12, x12, x5 sbcs x13, x13, x4 sbcs x2, x2, x3 sbcs x9, x9, xzr sbcs x10, x10, xzr sbc x11, x11, xzr lsl x5, x12, #32 add x12, x5, x12 mov x5, #-4294967295 umulh x5, x5, x12 mov x4, #4294967295 mul x3, x4, x12 umulh x4, x4, x12 adds x5, x5, x3 adcs x4, x4, x12 adc x3, xzr, xzr subs x13, x13, x5 sbcs x2, x2, x4 sbcs x9, x9, x3 sbcs x10, x10, xzr sbcs x11, x11, xzr sbc x12, x12, xzr lsl x5, x13, #32 add x13, x5, x13 mov x5, #-4294967295 umulh x5, x5, x13 mov x4, #4294967295 mul x3, x4, x13 umulh x4, x4, x13 adds x5, x5, x3 adcs x4, x4, x13 adc x3, xzr, xzr subs x2, x2, x5 sbcs x9, x9, x4 sbcs x10, x10, x3 sbcs x11, x11, xzr sbcs x12, x12, xzr sbc x13, x13, xzr adds x2, x2, x14 adcs x9, x9, x15 adcs x10, x10, x16 adcs x11, x11, x17 adcs x12, x12, x19 adcs x13, x13, x20 adc x6, xzr, xzr mov x8, #-4294967295 adds x14, x2, x8 mov x8, #4294967295 adcs x15, x9, x8 mov x8, #1 adcs x16, x10, x8 adcs x17, x11, xzr adcs x19, x12, xzr adcs x20, x13, xzr adcs x6, x6, xzr csel x2, x2, x14, eq csel x9, x9, x15, eq csel x10, x10, x16, eq csel x11, x11, x17, eq csel x12, x12, x19, eq csel x13, x13, x20, eq stp x2, x9, [sp, #192] stp x10, x11, [sp, #208] stp x12, x13, [sp, #224] ldp x0, x1, [sp, #288] mov x6, #4294967295 subs x6, x6, x0 mov x7, #-4294967296 sbcs x7, x7, x1 ldp x0, x1, [sp, #304] mov x8, #-2 sbcs x8, x8, x0 mov x13, #-1 sbcs x9, x13, x1 ldp x0, x1, [sp, #320] sbcs x10, x13, x0 sbc x11, x13, x1 mov x12, #9 mul x0, x12, x6 mul x1, x12, x7 mul x2, x12, x8 mul x3, x12, x9 mul x4, x12, x10 mul x5, x12, x11 umulh x6, x12, x6 umulh x7, x12, x7 umulh x8, x12, x8 umulh x9, x12, x9 umulh x10, x12, x10 umulh x12, x12, x11 adds x1, x1, x6 adcs x2, x2, x7 adcs x3, x3, x8 adcs x4, x4, x9 adcs x5, x5, x10 mov x6, #1 adc x6, x12, x6 ldp x8, x9, [sp, #144] ldp x10, x11, [sp, #160] ldp x12, x13, [sp, #176] mov x14, #12 mul x15, x14, x8 umulh x8, x14, x8 adds x0, x0, x15 mul x15, x14, x9 umulh x9, x14, x9 adcs x1, x1, x15 mul x15, x14, x10 umulh x10, x14, x10 adcs x2, x2, x15 mul x15, x14, x11 umulh x11, x14, x11 adcs x3, x3, x15 mul x15, x14, x12 umulh x12, x14, x12 adcs x4, x4, x15 mul x15, x14, x13 umulh x13, x14, x13 adcs x5, x5, x15 adc x6, x6, xzr adds x1, x1, x8 adcs x2, x2, x9 adcs x3, x3, x10 adcs x4, x4, x11 adcs x5, x5, x12 adcs x6, x6, x13 lsl x7, x6, #32 subs x8, x6, x7 sbc x7, x7, xzr adds x0, x0, x8 adcs x1, x1, x7 adcs x2, x2, x6 adcs x3, x3, xzr adcs x4, x4, xzr adcs x5, x5, xzr csetm x6, lo mov x7, #4294967295 and x7, x7, x6 adds x0, x0, x7 eor x7, x7, x6 adcs x1, x1, x7 mov x7, #-2 and x7, x7, x6 adcs x2, x2, x7 adcs x3, x3, x6 adcs x4, x4, x6 adc x5, x5, x6 stp x0, x1, [sp, #288] stp x2, x3, [sp, #304] stp x4, x5, [sp, #320] ldp x5, x6, [sp, #192] ldp x4, x3, [sp] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #208] ldp x4, x3, [sp, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #224] ldp x4, x3, [sp, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, lo mov x4, #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #240] stp x7, x8, [sp, #256] stp x9, x10, [sp, #272] ldp x2, x3, [sp, #48] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [sp, #64] mul x8, x2, x4 adds x10, x10, x8 mul x11, x2, x5 mul x8, x3, x4 adcs x11, x11, x8 umulh x12, x2, x5 mul x8, x3, x5 adcs x12, x12, x8 ldp x6, x7, [sp, #80] mul x13, x2, x7 mul x8, x3, x6 adcs x13, x13, x8 umulh x14, x2, x7 mul x8, x3, x7 adcs x14, x14, x8 mul x15, x5, x6 adcs x15, x15, xzr umulh x16, x5, x6 adc x16, x16, xzr umulh x8, x2, x4 adds x11, x11, x8 umulh x8, x3, x4 adcs x12, x12, x8 umulh x8, x3, x5 adcs x13, x13, x8 umulh x8, x3, x6 adcs x14, x14, x8 umulh x8, x3, x7 adcs x15, x15, x8 adc x16, x16, xzr mul x8, x2, x6 adds x12, x12, x8 mul x8, x4, x5 adcs x13, x13, x8 mul x8, x4, x6 adcs x14, x14, x8 mul x8, x4, x7 adcs x15, x15, x8 mul x8, x5, x7 adcs x16, x16, x8 mul x17, x6, x7 adcs x17, x17, xzr umulh x19, x6, x7 adc x19, x19, xzr umulh x8, x2, x6 adds x13, x13, x8 umulh x8, x4, x5 adcs x14, x14, x8 umulh x8, x4, x6 adcs x15, x15, x8 umulh x8, x4, x7 adcs x16, x16, x8 umulh x8, x5, x7 adcs x17, x17, x8 adc x19, x19, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 adcs x15, x15, x15 adcs x16, x16, x16 adcs x17, x17, x17 adcs x19, x19, x19 cset x20, hs umulh x8, x2, x2 mul x2, x2, x2 adds x9, x9, x8 mul x8, x3, x3 adcs x10, x10, x8 umulh x8, x3, x3 adcs x11, x11, x8 mul x8, x4, x4 adcs x12, x12, x8 umulh x8, x4, x4 adcs x13, x13, x8 mul x8, x5, x5 adcs x14, x14, x8 umulh x8, x5, x5 adcs x15, x15, x8 mul x8, x6, x6 adcs x16, x16, x8 umulh x8, x6, x6 adcs x17, x17, x8 mul x8, x7, x7 adcs x19, x19, x8 umulh x8, x7, x7 adc x20, x20, x8 lsl x5, x2, #32 add x2, x5, x2 mov x5, #-4294967295 umulh x5, x5, x2 mov x4, #4294967295 mul x3, x4, x2 umulh x4, x4, x2 adds x5, x5, x3 adcs x4, x4, x2 adc x3, xzr, xzr subs x9, x9, x5 sbcs x10, x10, x4 sbcs x11, x11, x3 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x2, x2, xzr lsl x5, x9, #32 add x9, x5, x9 mov x5, #-4294967295 umulh x5, x5, x9 mov x4, #4294967295 mul x3, x4, x9 umulh x4, x4, x9 adds x5, x5, x3 adcs x4, x4, x9 adc x3, xzr, xzr subs x10, x10, x5 sbcs x11, x11, x4 sbcs x12, x12, x3 sbcs x13, x13, xzr sbcs x2, x2, xzr sbc x9, x9, xzr lsl x5, x10, #32 add x10, x5, x10 mov x5, #-4294967295 umulh x5, x5, x10 mov x4, #4294967295 mul x3, x4, x10 umulh x4, x4, x10 adds x5, x5, x3 adcs x4, x4, x10 adc x3, xzr, xzr subs x11, x11, x5 sbcs x12, x12, x4 sbcs x13, x13, x3 sbcs x2, x2, xzr sbcs x9, x9, xzr sbc x10, x10, xzr lsl x5, x11, #32 add x11, x5, x11 mov x5, #-4294967295 umulh x5, x5, x11 mov x4, #4294967295 mul x3, x4, x11 umulh x4, x4, x11 adds x5, x5, x3 adcs x4, x4, x11 adc x3, xzr, xzr subs x12, x12, x5 sbcs x13, x13, x4 sbcs x2, x2, x3 sbcs x9, x9, xzr sbcs x10, x10, xzr sbc x11, x11, xzr lsl x5, x12, #32 add x12, x5, x12 mov x5, #-4294967295 umulh x5, x5, x12 mov x4, #4294967295 mul x3, x4, x12 umulh x4, x4, x12 adds x5, x5, x3 adcs x4, x4, x12 adc x3, xzr, xzr subs x13, x13, x5 sbcs x2, x2, x4 sbcs x9, x9, x3 sbcs x10, x10, xzr sbcs x11, x11, xzr sbc x12, x12, xzr lsl x5, x13, #32 add x13, x5, x13 mov x5, #-4294967295 umulh x5, x5, x13 mov x4, #4294967295 mul x3, x4, x13 umulh x4, x4, x13 adds x5, x5, x3 adcs x4, x4, x13 adc x3, xzr, xzr subs x2, x2, x5 sbcs x9, x9, x4 sbcs x10, x10, x3 sbcs x11, x11, xzr sbcs x12, x12, xzr sbc x13, x13, xzr adds x2, x2, x14 adcs x9, x9, x15 adcs x10, x10, x16 adcs x11, x11, x17 adcs x12, x12, x19 adcs x13, x13, x20 adc x6, xzr, xzr mov x8, #-4294967295 adds x14, x2, x8 mov x8, #4294967295 adcs x15, x9, x8 mov x8, #1 adcs x16, x10, x8 adcs x17, x11, xzr adcs x19, x12, xzr adcs x20, x13, xzr adcs x6, x6, xzr csel x2, x2, x14, eq csel x9, x9, x15, eq csel x10, x10, x16, eq csel x11, x11, x17, eq csel x12, x12, x19, eq csel x13, x13, x20, eq stp x2, x9, [sp, #192] stp x10, x11, [sp, #208] stp x12, x13, [sp, #224] ldp x5, x6, [sp, #240] ldp x4, x3, [sp, #48] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #256] ldp x4, x3, [sp, #64] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #272] ldp x4, x3, [sp, #80] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, lo mov x4, #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [x23, #96] stp x7, x8, [x23, #112] stp x9, x10, [x23, #128] ldp x3, x4, [sp, #288] ldp x5, x6, [sp, #96] mul x12, x3, x5 umulh x13, x3, x5 mul x11, x3, x6 umulh x14, x3, x6 adds x13, x13, x11 ldp x7, x8, [sp, #112] mul x11, x3, x7 umulh x15, x3, x7 adcs x14, x14, x11 mul x11, x3, x8 umulh x16, x3, x8 adcs x15, x15, x11 ldp x9, x10, [sp, #128] mul x11, x3, x9 umulh x17, x3, x9 adcs x16, x16, x11 mul x11, x3, x10 umulh x19, x3, x10 adcs x17, x17, x11 adc x19, x19, xzr mul x11, x4, x5 adds x13, x13, x11 mul x11, x4, x6 adcs x14, x14, x11 mul x11, x4, x7 adcs x15, x15, x11 mul x11, x4, x8 adcs x16, x16, x11 mul x11, x4, x9 adcs x17, x17, x11 mul x11, x4, x10 adcs x19, x19, x11 cset x20, hs umulh x11, x4, x5 adds x14, x14, x11 umulh x11, x4, x6 adcs x15, x15, x11 umulh x11, x4, x7 adcs x16, x16, x11 umulh x11, x4, x8 adcs x17, x17, x11 umulh x11, x4, x9 adcs x19, x19, x11 umulh x11, x4, x10 adc x20, x20, x11 ldp x3, x4, [sp, #304] mul x11, x3, x5 adds x14, x14, x11 mul x11, x3, x6 adcs x15, x15, x11 mul x11, x3, x7 adcs x16, x16, x11 mul x11, x3, x8 adcs x17, x17, x11 mul x11, x3, x9 adcs x19, x19, x11 mul x11, x3, x10 adcs x20, x20, x11 cset x21, hs umulh x11, x3, x5 adds x15, x15, x11 umulh x11, x3, x6 adcs x16, x16, x11 umulh x11, x3, x7 adcs x17, x17, x11 umulh x11, x3, x8 adcs x19, x19, x11 umulh x11, x3, x9 adcs x20, x20, x11 umulh x11, x3, x10 adc x21, x21, x11 mul x11, x4, x5 adds x15, x15, x11 mul x11, x4, x6 adcs x16, x16, x11 mul x11, x4, x7 adcs x17, x17, x11 mul x11, x4, x8 adcs x19, x19, x11 mul x11, x4, x9 adcs x20, x20, x11 mul x11, x4, x10 adcs x21, x21, x11 cset x22, hs umulh x11, x4, x5 adds x16, x16, x11 umulh x11, x4, x6 adcs x17, x17, x11 umulh x11, x4, x7 adcs x19, x19, x11 umulh x11, x4, x8 adcs x20, x20, x11 umulh x11, x4, x9 adcs x21, x21, x11 umulh x11, x4, x10 adc x22, x22, x11 ldp x3, x4, [sp, #320] mul x11, x3, x5 adds x16, x16, x11 mul x11, x3, x6 adcs x17, x17, x11 mul x11, x3, x7 adcs x19, x19, x11 mul x11, x3, x8 adcs x20, x20, x11 mul x11, x3, x9 adcs x21, x21, x11 mul x11, x3, x10 adcs x22, x22, x11 cset x2, hs umulh x11, x3, x5 adds x17, x17, x11 umulh x11, x3, x6 adcs x19, x19, x11 umulh x11, x3, x7 adcs x20, x20, x11 umulh x11, x3, x8 adcs x21, x21, x11 umulh x11, x3, x9 adcs x22, x22, x11 umulh x11, x3, x10 adc x2, x2, x11 mul x11, x4, x5 adds x17, x17, x11 mul x11, x4, x6 adcs x19, x19, x11 mul x11, x4, x7 adcs x20, x20, x11 mul x11, x4, x8 adcs x21, x21, x11 mul x11, x4, x9 adcs x22, x22, x11 mul x11, x4, x10 adcs x2, x2, x11 cset x1, hs umulh x11, x4, x5 adds x19, x19, x11 umulh x11, x4, x6 adcs x20, x20, x11 umulh x11, x4, x7 adcs x21, x21, x11 umulh x11, x4, x8 adcs x22, x22, x11 umulh x11, x4, x9 adcs x2, x2, x11 umulh x11, x4, x10 adc x1, x1, x11 lsl x7, x12, #32 add x12, x7, x12 mov x7, #-4294967295 umulh x7, x7, x12 mov x6, #4294967295 mul x5, x6, x12 umulh x6, x6, x12 adds x7, x7, x5 adcs x6, x6, x12 adc x5, xzr, xzr subs x13, x13, x7 sbcs x14, x14, x6 sbcs x15, x15, x5 sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x12, x12, xzr lsl x7, x13, #32 add x13, x7, x13 mov x7, #-4294967295 umulh x7, x7, x13 mov x6, #4294967295 mul x5, x6, x13 umulh x6, x6, x13 adds x7, x7, x5 adcs x6, x6, x13 adc x5, xzr, xzr subs x14, x14, x7 sbcs x15, x15, x6 sbcs x16, x16, x5 sbcs x17, x17, xzr sbcs x12, x12, xzr sbc x13, x13, xzr lsl x7, x14, #32 add x14, x7, x14 mov x7, #-4294967295 umulh x7, x7, x14 mov x6, #4294967295 mul x5, x6, x14 umulh x6, x6, x14 adds x7, x7, x5 adcs x6, x6, x14 adc x5, xzr, xzr subs x15, x15, x7 sbcs x16, x16, x6 sbcs x17, x17, x5 sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr lsl x7, x15, #32 add x15, x7, x15 mov x7, #-4294967295 umulh x7, x7, x15 mov x6, #4294967295 mul x5, x6, x15 umulh x6, x6, x15 adds x7, x7, x5 adcs x6, x6, x15 adc x5, xzr, xzr subs x16, x16, x7 sbcs x17, x17, x6 sbcs x12, x12, x5 sbcs x13, x13, xzr sbcs x14, x14, xzr sbc x15, x15, xzr lsl x7, x16, #32 add x16, x7, x16 mov x7, #-4294967295 umulh x7, x7, x16 mov x6, #4294967295 mul x5, x6, x16 umulh x6, x6, x16 adds x7, x7, x5 adcs x6, x6, x16 adc x5, xzr, xzr subs x17, x17, x7 sbcs x12, x12, x6 sbcs x13, x13, x5 sbcs x14, x14, xzr sbcs x15, x15, xzr sbc x16, x16, xzr lsl x7, x17, #32 add x17, x7, x17 mov x7, #-4294967295 umulh x7, x7, x17 mov x6, #4294967295 mul x5, x6, x17 umulh x6, x6, x17 adds x7, x7, x5 adcs x6, x6, x17 adc x5, xzr, xzr subs x12, x12, x7 sbcs x13, x13, x6 sbcs x14, x14, x5 sbcs x15, x15, xzr sbcs x16, x16, xzr sbc x17, x17, xzr adds x12, x12, x19 adcs x13, x13, x20 adcs x14, x14, x21 adcs x15, x15, x22 adcs x16, x16, x2 adcs x17, x17, x1 adc x10, xzr, xzr mov x11, #-4294967295 adds x19, x12, x11 mov x11, #4294967295 adcs x20, x13, x11 mov x11, #1 adcs x21, x14, x11 adcs x22, x15, xzr adcs x2, x16, xzr adcs x1, x17, xzr adcs x10, x10, xzr csel x12, x12, x19, eq csel x13, x13, x20, eq csel x14, x14, x21, eq csel x15, x15, x22, eq csel x16, x16, x2, eq csel x17, x17, x1, eq stp x12, x13, [sp, #240] stp x14, x15, [sp, #256] stp x16, x17, [sp, #272] ldp x1, x2, [sp, #144] ldp x3, x4, [sp, #160] ldp x5, x6, [sp, #176] lsl x0, x1, #2 ldp x7, x8, [sp, #288] subs x0, x0, x7 extr x1, x2, x1, #62 sbcs x1, x1, x8 ldp x7, x8, [sp, #304] extr x2, x3, x2, #62 sbcs x2, x2, x7 extr x3, x4, x3, #62 sbcs x3, x3, x8 extr x4, x5, x4, #62 ldp x7, x8, [sp, #320] sbcs x4, x4, x7 extr x5, x6, x5, #62 sbcs x5, x5, x8 lsr x6, x6, #62 adc x6, x6, xzr lsl x7, x6, #32 subs x8, x6, x7 sbc x7, x7, xzr adds x0, x0, x8 adcs x1, x1, x7 adcs x2, x2, x6 adcs x3, x3, xzr adcs x4, x4, xzr adcs x5, x5, xzr csetm x8, lo mov x9, #4294967295 and x9, x9, x8 adds x0, x0, x9 eor x9, x9, x8 adcs x1, x1, x9 mov x9, #-2 and x9, x9, x8 adcs x2, x2, x9 adcs x3, x3, x8 adcs x4, x4, x8 adc x5, x5, x8 stp x0, x1, [x23] stp x2, x3, [x23, #16] stp x4, x5, [x23, #32] ldp x0, x1, [sp, #192] mov x6, #4294967295 subs x6, x6, x0 mov x7, #-4294967296 sbcs x7, x7, x1 ldp x0, x1, [sp, #208] mov x8, #-2 sbcs x8, x8, x0 mov x13, #-1 sbcs x9, x13, x1 ldp x0, x1, [sp, #224] sbcs x10, x13, x0 sbc x11, x13, x1 lsl x0, x6, #3 extr x1, x7, x6, #61 extr x2, x8, x7, #61 extr x3, x9, x8, #61 extr x4, x10, x9, #61 extr x5, x11, x10, #61 lsr x6, x11, #61 add x6, x6, #1 ldp x8, x9, [sp, #240] ldp x10, x11, [sp, #256] ldp x12, x13, [sp, #272] mov x14, #3 mul x15, x14, x8 umulh x8, x14, x8 adds x0, x0, x15 mul x15, x14, x9 umulh x9, x14, x9 adcs x1, x1, x15 mul x15, x14, x10 umulh x10, x14, x10 adcs x2, x2, x15 mul x15, x14, x11 umulh x11, x14, x11 adcs x3, x3, x15 mul x15, x14, x12 umulh x12, x14, x12 adcs x4, x4, x15 mul x15, x14, x13 umulh x13, x14, x13 adcs x5, x5, x15 adc x6, x6, xzr adds x1, x1, x8 adcs x2, x2, x9 adcs x3, x3, x10 adcs x4, x4, x11 adcs x5, x5, x12 adcs x6, x6, x13 lsl x7, x6, #32 subs x8, x6, x7 sbc x7, x7, xzr adds x0, x0, x8 adcs x1, x1, x7 adcs x2, x2, x6 adcs x3, x3, xzr adcs x4, x4, xzr adcs x5, x5, xzr csetm x6, lo mov x7, #4294967295 and x7, x7, x6 adds x0, x0, x7 eor x7, x7, x6 adcs x1, x1, x7 mov x7, #-2 and x7, x7, x6 adcs x2, x2, x7 adcs x3, x3, x6 adcs x4, x4, x6 adc x5, x5, x6 stp x0, x1, [x23, #48] stp x2, x3, [x23, #64] stp x4, x5, [x23, #80] CFI_INC_SP(336) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp384_montjscalarmul_alt_p384_montjdouble) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
169,601
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/p384_montjadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point addition on NIST curve P-384 in Montgomery-Jacobian coordinates // // extern void p384_montjadd(uint64_t p3[static 18], const uint64_t p1[static 18], // const uint64_t p2[static 18]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" // This is functionally equivalent to p384_montjadd in unopt/p384_montjadd.S. // This is the result of doing the following sequence of optimizations: // 1. Function inlining // 2. Eliminating redundant load/store instructions // 3. Folding (add addr, const) + load/store // Function inlining is done manually. The second and third optimizations are // done by a script. S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjadd) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 // 7 NUMSIZEs for the point operation, one extra NUMSIZE for field operations #define NSPACE NUMSIZE*8 S2N_BN_SYMBOL(p384_montjadd): CFI_START // Save regs and make room on stack for temporary variables CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_PUSH1Z(x27) CFI_DEC_SP(NSPACE) mov x24, x0 mov x25, x1 mov x26, x2 mov x0, sp ldr q1, [x25, #96] ldp x9, x2, [x25, #96] ldr q0, [x25, #96] ldp x4, x6, [x25, #112] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [x25, #128] xtn v30.2s, v0.2d ldr q1, [x25, #128] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [x25, #128] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [x0] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [x0, #16] csetm x15, cc cneg x1, x1, cc stp x11, x14, [x0, #32] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc cinv x16, x15, cc mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc cneg x9, x9, cc subs x4, x2, x4 cneg x4, x4, cc csetm x7, cc subs x2, x10, x6 cinv x8, x8, cc cneg x2, x2, cc cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc cneg x1, x1, cc eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [x0, #16] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [x0] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [x0, #32] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 adcs x14, x14, x2 mov x2, #0x1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [x0] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [x0, #16] adc x17, x14, xzr stp x2, x17, [x0, #32] ldr q1, [x26, #96] ldp x9, x2, [x26, #96] ldr q0, [x26, #96] ldp x4, x6, [x26, #112] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [x26, #128] xtn v30.2s, v0.2d ldr q1, [x26, #128] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [x26, #128] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [sp, #240] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [sp, #256] csetm x15, cc cneg x1, x1, cc stp x11, x14, [sp, #272] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc cinv x16, x15, cc mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc cneg x9, x9, cc subs x4, x2, x4 cneg x4, x4, cc csetm x7, cc subs x2, x10, x6 cinv x8, x8, cc cneg x2, x2, cc cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc cneg x1, x1, cc eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [sp, #256] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [sp, #240] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [sp, #272] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 adcs x14, x14, x2 mov x2, #0x1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [sp, #240] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [sp, #256] adc x17, x14, xzr stp x2, x17, [sp, #272] stp x23, x24, [sp, #0x150] // It is #-48 after inlining, but access to sp+negative in the middle of fn is bad ldr q3, [x26, #96] ldr q25, [x25, #48] ldp x13, x23, [x25, #48] ldp x3, x21, [x26, #96] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [x25, #80] ldp x8, x24, [x26, #112] subs x6, x3, x21 ldr q0, [x26, #128] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [x25, #64] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x25, #80] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [x26, #128] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #288] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #304] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #320] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #288] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #304] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #320] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #288] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #304] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #320] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #288] ldp x21, x12, [sp, #304] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #320] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #288] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #304] adc x12, x15, x23 stp x21, x12, [sp, #320] ldr q3, [x25, #96] ldr q25, [x26, #48] ldp x13, x23, [x26, #48] ldp x3, x21, [x25, #96] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [x26, #80] ldp x8, x24, [x25, #112] subs x6, x3, x21 ldr q0, [x25, #128] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [x26, #64] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x26, #80] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [x25, #128] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #48] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #64] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #80] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #48] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #64] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #80] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #48] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #64] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #80] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #48] ldp x21, x12, [sp, #64] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #80] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #48] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #64] adc x12, x15, x23 stp x21, x12, [sp, #80] mov x1, sp ldr q3, [x1] ldr q25, [x26, #0] ldp x13, x23, [x26, #0] ldp x3, x21, [x1] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [x26, #32] ldp x8, x24, [x1, #16] subs x6, x3, x21 ldr q0, [x1, #32] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [x26, #16] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x26, #32] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [x1, #32] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #96] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #112] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #128] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #96] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #112] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #128] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #96] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #112] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #128] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #96] ldp x21, x12, [sp, #112] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #128] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #96] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #112] adc x12, x15, x23 stp x21, x12, [sp, #128] ldr q3, [sp, #240] ldr q25, [x25, #0] ldp x13, x23, [x25, #0] ldp x3, x21, [sp, #240] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [x25, #32] ldp x8, x24, [sp, #256] subs x6, x3, x21 ldr q0, [sp, #272] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [x25, #16] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x25, #32] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #272] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #192] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #208] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #224] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #192] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #208] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #224] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #192] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #208] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #224] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #192] ldp x21, x12, [sp, #208] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #224] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #192] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #208] adc x12, x15, x23 stp x21, x12, [sp, #224] mov x1, sp ldr q3, [x1] ldr q25, [sp, #48] ldp x13, x23, [sp, #48] ldp x3, x21, [x1] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #80] ldp x8, x24, [x1, #16] subs x6, x3, x21 ldr q0, [x1, #32] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [sp, #64] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #80] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [x1, #32] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #48] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #64] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #80] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #48] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #64] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #80] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #48] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #64] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #80] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #48] ldp x21, x12, [sp, #64] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #80] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #48] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #64] adc x12, x15, x23 stp x21, x12, [sp, #80] ldr q3, [sp, #240] ldr q25, [sp, #288] ldp x13, x23, [sp, #288] ldp x3, x21, [sp, #240] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #320] ldp x8, x24, [sp, #256] subs x6, x3, x21 ldr q0, [sp, #272] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [sp, #304] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #320] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #272] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #288] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #304] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #320] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #288] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #304] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #320] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #288] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #304] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #320] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #288] ldp x21, x12, [sp, #304] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #320] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x2, x24, x11 stp x22, x5, [sp, #288] adcs x11, x13, x23 adcs x12, x8, x23 stp x2, x11, [sp, #304] adc x13, x15, x23 stp x12, x13, [sp, #320] ldp x5, x6, [sp, #96] ldp x4, x3, [sp, #192] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #112] ldp x4, x3, [sp, #208] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #128] ldp x4, x3, [sp, #224] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, cc mov x4, #0xffffffff and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #240] stp x7, x8, [sp, #256] stp x9, x10, [sp, #272] ldp x5, x6, [sp, #48] ldp x4, x3, [sp, #288] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #64] sbcs x7, x7, x2 sbcs x8, x8, x11 ldp x9, x10, [sp, #80] sbcs x9, x9, x12 sbcs x10, x10, x13 csetm x3, cc mov x4, #0xffffffff and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #48] stp x7, x8, [sp, #64] stp x9, x10, [sp, #80] ldr q1, [sp, #240] ldp x9, x2, [sp, #240] ldr q0, [sp, #240] ldp x4, x6, [sp, #256] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [sp, #272] xtn v30.2s, v0.2d ldr q1, [sp, #272] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [sp, #272] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [sp, #144] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [sp, #160] csetm x15, cc cneg x1, x1, cc stp x11, x14, [sp, #176] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc cinv x16, x15, cc mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc cneg x9, x9, cc subs x4, x2, x4 cneg x4, x4, cc csetm x7, cc subs x2, x10, x6 cinv x8, x8, cc cneg x2, x2, cc cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc cneg x1, x1, cc eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [sp, #160] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [sp, #144] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [sp, #176] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 adcs x14, x14, x2 mov x2, #0x1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [sp, #144] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [sp, #160] adc x17, x14, xzr stp x2, x17, [sp, #176] mov x0, sp ldr q1, [sp, #48] ldp x9, x2, [sp, #48] ldr q0, [sp, #48] ldp x4, x6, [sp, #64] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [sp, #80] xtn v30.2s, v0.2d ldr q1, [sp, #80] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [sp, #80] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [x0] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [x0, #16] csetm x15, cc cneg x1, x1, cc stp x11, x14, [x0, #32] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc cinv x16, x15, cc mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc cneg x9, x9, cc subs x4, x2, x4 cneg x4, x4, cc csetm x7, cc subs x2, x10, x6 cinv x8, x8, cc cneg x2, x2, cc cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc cneg x1, x1, cc eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [x0, #16] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [x0] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [x0, #32] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 adcs x14, x14, x2 mov x2, #0x1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [x0] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [x0, #16] adc x17, x14, xzr stp x2, x17, [x0, #32] ldr q3, [sp, #144] ldr q25, [sp, #192] ldp x13, x23, [sp, #192] ldp x3, x21, [sp, #144] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #224] ldp x8, x24, [sp, #160] subs x6, x3, x21 ldr q0, [sp, #176] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [sp, #208] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #224] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #176] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #192] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #208] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #224] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #192] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #208] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #224] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #192] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #208] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #224] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #192] ldp x21, x12, [sp, #208] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #224] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #192] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #208] adc x12, x15, x23 stp x21, x12, [sp, #224] ldr q3, [sp, #144] ldr q25, [sp, #96] ldp x13, x23, [sp, #96] ldp x3, x21, [sp, #144] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #128] ldp x8, x24, [sp, #160] subs x6, x3, x21 ldr q0, [sp, #176] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [sp, #112] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #128] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #176] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #96] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #112] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #128] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #96] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #112] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #128] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #96] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #112] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #128] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #96] ldp x21, x12, [sp, #112] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #128] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x2, x24, x11 stp x22, x5, [sp, #96] adcs x11, x13, x23 adcs x12, x8, x23 stp x2, x11, [sp, #112] adc x13, x15, x23 stp x12, x13, [sp, #128] mov x0, sp mov x1, sp ldp x5, x6, [x1] ldp x4, x3, [sp, #192] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x1, #16] ldp x4, x3, [sp, #208] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x1, #32] ldp x4, x3, [sp, #224] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, cc mov x4, #0xffffffff and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [x0] stp x7, x8, [x0, #16] stp x9, x10, [x0, #32] ldp x5, x6, [sp, #96] ldp x4, x3, [sp, #192] subs x5, x5, x4 sbcs x6, x6, x3 ldp x4, x3, [sp, #208] sbcs x7, x2, x4 sbcs x8, x11, x3 ldp x4, x3, [sp, #224] sbcs x9, x12, x4 sbcs x10, x13, x3 csetm x3, cc mov x4, #0xffffffff and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #144] stp x7, x8, [sp, #160] stp x9, x10, [sp, #176] ldr q3, [sp, #240] ldr q25, [x25, #96] ldp x13, x23, [x25, #96] ldp x3, x21, [sp, #240] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [x25, #128] ldp x8, x24, [sp, #256] subs x6, x3, x21 ldr q0, [sp, #272] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [x25, #112] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x25, #128] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #272] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #240] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #256] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #272] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #240] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #256] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #272] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #240] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #256] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #272] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #240] ldp x21, x12, [sp, #256] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #272] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #240] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #256] adc x12, x15, x23 stp x21, x12, [sp, #272] mov x0, sp mov x1, sp ldp x5, x6, [x1] ldp x4, x3, [sp, #96] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x1, #16] ldp x4, x3, [sp, #112] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x1, #32] ldp x4, x3, [sp, #128] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, cc mov x4, #0xffffffff and x4, x4, x3 adds x2, x5, x4 eor x4, x4, x3 adcs x11, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 adcs x4, x7, x4 adcs x12, x8, x3 adcs x13, x9, x3 adc x3, x10, x3 stp x2, x11, [x0] stp x4, x12, [x0, #16] stp x13, x3, [x0, #32] ldp x5, x6, [sp, #192] subs x5, x5, x2 sbcs x6, x6, x11 ldp x7, x8, [sp, #208] sbcs x7, x7, x4 sbcs x8, x8, x12 ldp x9, x10, [sp, #224] sbcs x9, x9, x13 sbcs x10, x10, x3 csetm x3, cc mov x4, #0xffffffff and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #192] stp x7, x8, [sp, #208] stp x9, x10, [sp, #224] ldr q3, [sp, #144] ldr q25, [sp, #288] ldp x13, x23, [sp, #288] ldp x3, x21, [sp, #144] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #320] ldp x8, x24, [sp, #160] subs x6, x3, x21 ldr q0, [sp, #176] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [sp, #304] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #320] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #176] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #144] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #160] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #176] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #144] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #160] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #176] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #144] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #160] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #176] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #144] ldp x21, x12, [sp, #160] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #176] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #144] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #160] adc x12, x15, x23 stp x21, x12, [sp, #176] ldr q3, [sp, #240] ldr q25, [x26, #96] ldp x13, x23, [x26, #96] ldp x3, x21, [sp, #240] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [x26, #128] ldp x8, x24, [sp, #256] subs x6, x3, x21 ldr q0, [sp, #272] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [x26, #112] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x26, #128] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #272] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #240] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #256] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #272] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #240] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #256] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #272] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #240] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #256] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #272] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #240] ldp x21, x12, [sp, #256] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #272] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #240] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #256] adc x12, x15, x23 stp x21, x12, [sp, #272] ldp x2, x27, [sp, #0x150] // It is #-48 after inlining, but access to sp+negative in the middle of fn is bad ldr q3, [sp, #48] ldr q25, [sp, #192] ldp x13, x23, [sp, #192] ldp x3, x21, [sp, #48] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #224] ldp x8, x24, [sp, #64] subs x6, x3, x21 ldr q0, [sp, #80] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [sp, #208] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #224] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #80] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #192] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #208] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #224] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #192] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #208] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #224] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #192] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #208] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #224] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #192] ldp x21, x12, [sp, #208] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #224] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x2, x6, x20 eor x3, x20, x23 adcs x6, x7, x3 adcs x7, x24, x11 adcs x9, x13, x23 adcs x10, x8, x23 adc x11, x15, x23 ldp x4, x3, [sp, #144] subs x5, x2, x4 sbcs x6, x6, x3 ldp x4, x3, [sp, #160] sbcs x7, x7, x4 sbcs x8, x9, x3 ldp x4, x3, [sp, #176] sbcs x9, x10, x4 sbcs x10, x11, x3 csetm x3, cc mov x4, #0xffffffff and x4, x4, x3 adds x19, x5, x4 eor x4, x4, x3 adcs x24, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x7, x8, [sp, #208] stp x9, x10, [sp, #224] ldp x0, x1, [x25, #96] ldp x2, x3, [x25, #112] ldp x4, x5, [x25, #128] orr x20, x0, x1 orr x21, x2, x3 orr x22, x4, x5 orr x20, x20, x21 orr x20, x20, x22 cmp x20, xzr cset x20, ne ldp x6, x7, [x26, #96] ldp x8, x9, [x26, #112] ldp x10, x11, [x26, #128] orr x21, x6, x7 orr x22, x8, x9 orr x23, x10, x11 orr x21, x21, x22 orr x21, x21, x23 cmp x21, xzr cset x21, ne cmp x21, x20 ldp x12, x13, [sp, #240] csel x12, x0, x12, cc csel x13, x1, x13, cc csel x12, x6, x12, hi csel x13, x7, x13, hi ldp x14, x15, [sp, #256] csel x14, x2, x14, cc csel x15, x3, x15, cc csel x14, x8, x14, hi csel x15, x9, x15, hi ldp x16, x17, [sp, #272] csel x16, x4, x16, cc csel x17, x5, x17, cc csel x16, x10, x16, hi csel x17, x11, x17, hi ldp x20, x21, [x25] ldp x0, x1, [sp, #0] csel x0, x20, x0, cc csel x1, x21, x1, cc ldp x20, x21, [x26] csel x0, x20, x0, hi csel x1, x21, x1, hi ldp x20, x21, [x25, #16] ldp x2, x3, [sp, #16] csel x2, x20, x2, cc csel x3, x21, x3, cc ldp x20, x21, [x26, #16] csel x2, x20, x2, hi csel x3, x21, x3, hi ldp x20, x21, [x25, #32] ldp x4, x5, [sp, #32] csel x4, x20, x4, cc csel x5, x21, x5, cc ldp x20, x21, [x26, #32] csel x4, x20, x4, hi csel x5, x21, x5, hi ldp x20, x21, [x25, #48] csel x6, x20, x19, cc csel x7, x21, x24, cc ldp x20, x21, [x26, #48] csel x6, x20, x6, hi csel x7, x21, x7, hi ldp x20, x21, [x25, #64] ldp x8, x9, [sp, #208] csel x8, x20, x8, cc csel x9, x21, x9, cc ldp x20, x21, [x26, #64] csel x8, x20, x8, hi csel x9, x21, x9, hi ldp x20, x21, [x25, #80] ldp x10, x11, [sp, #224] csel x10, x20, x10, cc csel x11, x21, x11, cc ldp x20, x21, [x26, #80] csel x10, x20, x10, hi csel x11, x21, x11, hi stp x0, x1, [x27] stp x2, x3, [x27, #16] stp x4, x5, [x27, #32] stp x6, x7, [x27, #48] stp x8, x9, [x27, #64] stp x10, x11, [x27, #80] stp x12, x13, [x27, #96] stp x14, x15, [x27, #112] stp x16, x17, [x27, #128] // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP1Z(x27) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p384_montjadd) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
45,741
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/p384_montjadd_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point addition on NIST curve P-384 in Montgomery-Jacobian coordinates // // extern void p384_montjadd_alt(uint64_t p3[static 18], // const uint64_t p1[static 18], // const uint64_t p2[static 18]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjadd_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjadd_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjadd_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 // Stable homes for input arguments during main code sequence #define input_z x24 #define input_x x25 #define input_y x26 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define z_2 input_y, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define z1sq sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define x1a sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define z2sq sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define y1a sp, #(NUMSIZE*6) #define NSPACE NUMSIZE*7 // Corresponds exactly to bignum_montmul_p384_alt #define montmul_p384(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P2] __LF \ mul x12, x3, x5 __LF \ umulh x13, x3, x5 __LF \ mul x11, x3, x6 __LF \ umulh x14, x3, x6 __LF \ adds x13, x13, x11 __LF \ ldp x7, x8, [P2+16] __LF \ mul x11, x3, x7 __LF \ umulh x15, x3, x7 __LF \ adcs x14, x14, x11 __LF \ mul x11, x3, x8 __LF \ umulh x16, x3, x8 __LF \ adcs x15, x15, x11 __LF \ ldp x9, x10, [P2+32] __LF \ mul x11, x3, x9 __LF \ umulh x17, x3, x9 __LF \ adcs x16, x16, x11 __LF \ mul x11, x3, x10 __LF \ umulh x19, x3, x10 __LF \ adcs x17, x17, x11 __LF \ adc x19, x19, xzr __LF \ mul x11, x4, x5 __LF \ adds x13, x13, x11 __LF \ mul x11, x4, x6 __LF \ adcs x14, x14, x11 __LF \ mul x11, x4, x7 __LF \ adcs x15, x15, x11 __LF \ mul x11, x4, x8 __LF \ adcs x16, x16, x11 __LF \ mul x11, x4, x9 __LF \ adcs x17, x17, x11 __LF \ mul x11, x4, x10 __LF \ adcs x19, x19, x11 __LF \ cset x20, cs __LF \ umulh x11, x4, x5 __LF \ adds x14, x14, x11 __LF \ umulh x11, x4, x6 __LF \ adcs x15, x15, x11 __LF \ umulh x11, x4, x7 __LF \ adcs x16, x16, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x17, x17, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x19, x19, x11 __LF \ umulh x11, x4, x10 __LF \ adc x20, x20, x11 __LF \ ldp x3, x4, [P1+16] __LF \ mul x11, x3, x5 __LF \ adds x14, x14, x11 __LF \ mul x11, x3, x6 __LF \ adcs x15, x15, x11 __LF \ mul x11, x3, x7 __LF \ adcs x16, x16, x11 __LF \ mul x11, x3, x8 __LF \ adcs x17, x17, x11 __LF \ mul x11, x3, x9 __LF \ adcs x19, x19, x11 __LF \ mul x11, x3, x10 __LF \ adcs x20, x20, x11 __LF \ cset x21, cs __LF \ umulh x11, x3, x5 __LF \ adds x15, x15, x11 __LF \ umulh x11, x3, x6 __LF \ adcs x16, x16, x11 __LF \ umulh x11, x3, x7 __LF \ adcs x17, x17, x11 __LF \ umulh x11, x3, x8 __LF \ adcs x19, x19, x11 __LF \ umulh x11, x3, x9 __LF \ adcs x20, x20, x11 __LF \ umulh x11, x3, x10 __LF \ adc x21, x21, x11 __LF \ mul x11, x4, x5 __LF \ adds x15, x15, x11 __LF \ mul x11, x4, x6 __LF \ adcs x16, x16, x11 __LF \ mul x11, x4, x7 __LF \ adcs x17, x17, x11 __LF \ mul x11, x4, x8 __LF \ adcs x19, x19, x11 __LF \ mul x11, x4, x9 __LF \ adcs x20, x20, x11 __LF \ mul x11, x4, x10 __LF \ adcs x21, x21, x11 __LF \ cset x22, cs __LF \ umulh x11, x4, x5 __LF \ adds x16, x16, x11 __LF \ umulh x11, x4, x6 __LF \ adcs x17, x17, x11 __LF \ umulh x11, x4, x7 __LF \ adcs x19, x19, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x20, x20, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x21, x21, x11 __LF \ umulh x11, x4, x10 __LF \ adc x22, x22, x11 __LF \ ldp x3, x4, [P1+32] __LF \ mul x11, x3, x5 __LF \ adds x16, x16, x11 __LF \ mul x11, x3, x6 __LF \ adcs x17, x17, x11 __LF \ mul x11, x3, x7 __LF \ adcs x19, x19, x11 __LF \ mul x11, x3, x8 __LF \ adcs x20, x20, x11 __LF \ mul x11, x3, x9 __LF \ adcs x21, x21, x11 __LF \ mul x11, x3, x10 __LF \ adcs x22, x22, x11 __LF \ cset x2, cs __LF \ umulh x11, x3, x5 __LF \ adds x17, x17, x11 __LF \ umulh x11, x3, x6 __LF \ adcs x19, x19, x11 __LF \ umulh x11, x3, x7 __LF \ adcs x20, x20, x11 __LF \ umulh x11, x3, x8 __LF \ adcs x21, x21, x11 __LF \ umulh x11, x3, x9 __LF \ adcs x22, x22, x11 __LF \ umulh x11, x3, x10 __LF \ adc x2, x2, x11 __LF \ mul x11, x4, x5 __LF \ adds x17, x17, x11 __LF \ mul x11, x4, x6 __LF \ adcs x19, x19, x11 __LF \ mul x11, x4, x7 __LF \ adcs x20, x20, x11 __LF \ mul x11, x4, x8 __LF \ adcs x21, x21, x11 __LF \ mul x11, x4, x9 __LF \ adcs x22, x22, x11 __LF \ mul x11, x4, x10 __LF \ adcs x2, x2, x11 __LF \ cset x1, cs __LF \ umulh x11, x4, x5 __LF \ adds x19, x19, x11 __LF \ umulh x11, x4, x6 __LF \ adcs x20, x20, x11 __LF \ umulh x11, x4, x7 __LF \ adcs x21, x21, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x22, x22, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x2, x2, x11 __LF \ umulh x11, x4, x10 __LF \ adc x1, x1, x11 __LF \ lsl x7, x12, #32 __LF \ add x12, x7, x12 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x12 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x12 __LF \ umulh x6, x6, x12 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x12 __LF \ adc x5, xzr, xzr __LF \ subs x13, x13, x7 __LF \ sbcs x14, x14, x6 __LF \ sbcs x15, x15, x5 __LF \ sbcs x16, x16, xzr __LF \ sbcs x17, x17, xzr __LF \ sbc x12, x12, xzr __LF \ lsl x7, x13, #32 __LF \ add x13, x7, x13 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x13 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x13 __LF \ umulh x6, x6, x13 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x13 __LF \ adc x5, xzr, xzr __LF \ subs x14, x14, x7 __LF \ sbcs x15, x15, x6 __LF \ sbcs x16, x16, x5 __LF \ sbcs x17, x17, xzr __LF \ sbcs x12, x12, xzr __LF \ sbc x13, x13, xzr __LF \ lsl x7, x14, #32 __LF \ add x14, x7, x14 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x14 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x14 __LF \ umulh x6, x6, x14 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x14 __LF \ adc x5, xzr, xzr __LF \ subs x15, x15, x7 __LF \ sbcs x16, x16, x6 __LF \ sbcs x17, x17, x5 __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ sbc x14, x14, xzr __LF \ lsl x7, x15, #32 __LF \ add x15, x7, x15 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x15 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x15 __LF \ umulh x6, x6, x15 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x15 __LF \ adc x5, xzr, xzr __LF \ subs x16, x16, x7 __LF \ sbcs x17, x17, x6 __LF \ sbcs x12, x12, x5 __LF \ sbcs x13, x13, xzr __LF \ sbcs x14, x14, xzr __LF \ sbc x15, x15, xzr __LF \ lsl x7, x16, #32 __LF \ add x16, x7, x16 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x16 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x16 __LF \ umulh x6, x6, x16 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x16 __LF \ adc x5, xzr, xzr __LF \ subs x17, x17, x7 __LF \ sbcs x12, x12, x6 __LF \ sbcs x13, x13, x5 __LF \ sbcs x14, x14, xzr __LF \ sbcs x15, x15, xzr __LF \ sbc x16, x16, xzr __LF \ lsl x7, x17, #32 __LF \ add x17, x7, x17 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x17 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x17 __LF \ umulh x6, x6, x17 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x17 __LF \ adc x5, xzr, xzr __LF \ subs x12, x12, x7 __LF \ sbcs x13, x13, x6 __LF \ sbcs x14, x14, x5 __LF \ sbcs x15, x15, xzr __LF \ sbcs x16, x16, xzr __LF \ sbc x17, x17, xzr __LF \ adds x12, x12, x19 __LF \ adcs x13, x13, x20 __LF \ adcs x14, x14, x21 __LF \ adcs x15, x15, x22 __LF \ adcs x16, x16, x2 __LF \ adcs x17, x17, x1 __LF \ adc x10, xzr, xzr __LF \ mov x11, #0xffffffff00000001 __LF \ adds x19, x12, x11 __LF \ mov x11, #0xffffffff __LF \ adcs x20, x13, x11 __LF \ mov x11, #0x1 __LF \ adcs x21, x14, x11 __LF \ adcs x22, x15, xzr __LF \ adcs x2, x16, xzr __LF \ adcs x1, x17, xzr __LF \ adcs x10, x10, xzr __LF \ csel x12, x12, x19, eq __LF \ csel x13, x13, x20, eq __LF \ csel x14, x14, x21, eq __LF \ csel x15, x15, x22, eq __LF \ csel x16, x16, x2, eq __LF \ csel x17, x17, x1, eq __LF \ stp x12, x13, [P0] __LF \ stp x14, x15, [P0+16] __LF \ stp x16, x17, [P0+32] // Corresponds exactly to bignum_montsqr_p384_alt #define montsqr_p384(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x9, x2, x3 __LF \ umulh x10, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x8, x2, x4 __LF \ adds x10, x10, x8 __LF \ mul x11, x2, x5 __LF \ mul x8, x3, x4 __LF \ adcs x11, x11, x8 __LF \ umulh x12, x2, x5 __LF \ mul x8, x3, x5 __LF \ adcs x12, x12, x8 __LF \ ldp x6, x7, [P1+32] __LF \ mul x13, x2, x7 __LF \ mul x8, x3, x6 __LF \ adcs x13, x13, x8 __LF \ umulh x14, x2, x7 __LF \ mul x8, x3, x7 __LF \ adcs x14, x14, x8 __LF \ mul x15, x5, x6 __LF \ adcs x15, x15, xzr __LF \ umulh x16, x5, x6 __LF \ adc x16, x16, xzr __LF \ umulh x8, x2, x4 __LF \ adds x11, x11, x8 __LF \ umulh x8, x3, x4 __LF \ adcs x12, x12, x8 __LF \ umulh x8, x3, x5 __LF \ adcs x13, x13, x8 __LF \ umulh x8, x3, x6 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x3, x7 __LF \ adcs x15, x15, x8 __LF \ adc x16, x16, xzr __LF \ mul x8, x2, x6 __LF \ adds x12, x12, x8 __LF \ mul x8, x4, x5 __LF \ adcs x13, x13, x8 __LF \ mul x8, x4, x6 __LF \ adcs x14, x14, x8 __LF \ mul x8, x4, x7 __LF \ adcs x15, x15, x8 __LF \ mul x8, x5, x7 __LF \ adcs x16, x16, x8 __LF \ mul x17, x6, x7 __LF \ adcs x17, x17, xzr __LF \ umulh x19, x6, x7 __LF \ adc x19, x19, xzr __LF \ umulh x8, x2, x6 __LF \ adds x13, x13, x8 __LF \ umulh x8, x4, x5 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x4, x6 __LF \ adcs x15, x15, x8 __LF \ umulh x8, x4, x7 __LF \ adcs x16, x16, x8 __LF \ umulh x8, x5, x7 __LF \ adcs x17, x17, x8 __LF \ adc x19, x19, xzr __LF \ adds x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ adcs x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adcs x17, x17, x17 __LF \ adcs x19, x19, x19 __LF \ cset x20, hs __LF \ umulh x8, x2, x2 __LF \ mul x2, x2, x2 __LF \ adds x9, x9, x8 __LF \ mul x8, x3, x3 __LF \ adcs x10, x10, x8 __LF \ umulh x8, x3, x3 __LF \ adcs x11, x11, x8 __LF \ mul x8, x4, x4 __LF \ adcs x12, x12, x8 __LF \ umulh x8, x4, x4 __LF \ adcs x13, x13, x8 __LF \ mul x8, x5, x5 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x5, x5 __LF \ adcs x15, x15, x8 __LF \ mul x8, x6, x6 __LF \ adcs x16, x16, x8 __LF \ umulh x8, x6, x6 __LF \ adcs x17, x17, x8 __LF \ mul x8, x7, x7 __LF \ adcs x19, x19, x8 __LF \ umulh x8, x7, x7 __LF \ adc x20, x20, x8 __LF \ lsl x5, x2, #32 __LF \ add x2, x5, x2 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x2 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x2 __LF \ umulh x4, x4, x2 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x2 __LF \ adc x3, xzr, xzr __LF \ subs x9, x9, x5 __LF \ sbcs x10, x10, x4 __LF \ sbcs x11, x11, x3 __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ sbc x2, x2, xzr __LF \ lsl x5, x9, #32 __LF \ add x9, x5, x9 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x9 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x9 __LF \ umulh x4, x4, x9 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x9 __LF \ adc x3, xzr, xzr __LF \ subs x10, x10, x5 __LF \ sbcs x11, x11, x4 __LF \ sbcs x12, x12, x3 __LF \ sbcs x13, x13, xzr __LF \ sbcs x2, x2, xzr __LF \ sbc x9, x9, xzr __LF \ lsl x5, x10, #32 __LF \ add x10, x5, x10 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x10 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x10 __LF \ umulh x4, x4, x10 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x10 __LF \ adc x3, xzr, xzr __LF \ subs x11, x11, x5 __LF \ sbcs x12, x12, x4 __LF \ sbcs x13, x13, x3 __LF \ sbcs x2, x2, xzr __LF \ sbcs x9, x9, xzr __LF \ sbc x10, x10, xzr __LF \ lsl x5, x11, #32 __LF \ add x11, x5, x11 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x11 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x11 __LF \ umulh x4, x4, x11 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x11 __LF \ adc x3, xzr, xzr __LF \ subs x12, x12, x5 __LF \ sbcs x13, x13, x4 __LF \ sbcs x2, x2, x3 __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbc x11, x11, xzr __LF \ lsl x5, x12, #32 __LF \ add x12, x5, x12 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x12 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x12 __LF \ umulh x4, x4, x12 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x12 __LF \ adc x3, xzr, xzr __LF \ subs x13, x13, x5 __LF \ sbcs x2, x2, x4 __LF \ sbcs x9, x9, x3 __LF \ sbcs x10, x10, xzr __LF \ sbcs x11, x11, xzr __LF \ sbc x12, x12, xzr __LF \ lsl x5, x13, #32 __LF \ add x13, x5, x13 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x13 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x13 __LF \ umulh x4, x4, x13 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x13 __LF \ adc x3, xzr, xzr __LF \ subs x2, x2, x5 __LF \ sbcs x9, x9, x4 __LF \ sbcs x10, x10, x3 __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbc x13, x13, xzr __LF \ adds x2, x2, x14 __LF \ adcs x9, x9, x15 __LF \ adcs x10, x10, x16 __LF \ adcs x11, x11, x17 __LF \ adcs x12, x12, x19 __LF \ adcs x13, x13, x20 __LF \ adc x6, xzr, xzr __LF \ mov x8, #-4294967295 __LF \ adds x14, x2, x8 __LF \ mov x8, #4294967295 __LF \ adcs x15, x9, x8 __LF \ mov x8, #1 __LF \ adcs x16, x10, x8 __LF \ adcs x17, x11, xzr __LF \ adcs x19, x12, xzr __LF \ adcs x20, x13, xzr __LF \ adcs x6, x6, xzr __LF \ csel x2, x2, x14, eq __LF \ csel x9, x9, x15, eq __LF \ csel x10, x10, x16, eq __LF \ csel x11, x11, x17, eq __LF \ csel x12, x12, x19, eq __LF \ csel x13, x13, x20, eq __LF \ stp x2, x9, [P0] __LF \ stp x10, x11, [P0+16] __LF \ stp x12, x13, [P0+32] // Almost-Montgomery variant which we use when an input to other muls // with the other argument fully reduced (which is always safe). In // fact, with the Karatsuba-based Montgomery mul here, we don't even // *need* the restriction that the other argument is reduced. #define amontsqr_p384(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x9, x2, x3 __LF \ umulh x10, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x8, x2, x4 __LF \ adds x10, x10, x8 __LF \ mul x11, x2, x5 __LF \ mul x8, x3, x4 __LF \ adcs x11, x11, x8 __LF \ umulh x12, x2, x5 __LF \ mul x8, x3, x5 __LF \ adcs x12, x12, x8 __LF \ ldp x6, x7, [P1+32] __LF \ mul x13, x2, x7 __LF \ mul x8, x3, x6 __LF \ adcs x13, x13, x8 __LF \ umulh x14, x2, x7 __LF \ mul x8, x3, x7 __LF \ adcs x14, x14, x8 __LF \ mul x15, x5, x6 __LF \ adcs x15, x15, xzr __LF \ umulh x16, x5, x6 __LF \ adc x16, x16, xzr __LF \ umulh x8, x2, x4 __LF \ adds x11, x11, x8 __LF \ umulh x8, x3, x4 __LF \ adcs x12, x12, x8 __LF \ umulh x8, x3, x5 __LF \ adcs x13, x13, x8 __LF \ umulh x8, x3, x6 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x3, x7 __LF \ adcs x15, x15, x8 __LF \ adc x16, x16, xzr __LF \ mul x8, x2, x6 __LF \ adds x12, x12, x8 __LF \ mul x8, x4, x5 __LF \ adcs x13, x13, x8 __LF \ mul x8, x4, x6 __LF \ adcs x14, x14, x8 __LF \ mul x8, x4, x7 __LF \ adcs x15, x15, x8 __LF \ mul x8, x5, x7 __LF \ adcs x16, x16, x8 __LF \ mul x17, x6, x7 __LF \ adcs x17, x17, xzr __LF \ umulh x19, x6, x7 __LF \ adc x19, x19, xzr __LF \ umulh x8, x2, x6 __LF \ adds x13, x13, x8 __LF \ umulh x8, x4, x5 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x4, x6 __LF \ adcs x15, x15, x8 __LF \ umulh x8, x4, x7 __LF \ adcs x16, x16, x8 __LF \ umulh x8, x5, x7 __LF \ adcs x17, x17, x8 __LF \ adc x19, x19, xzr __LF \ adds x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ adcs x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adcs x17, x17, x17 __LF \ adcs x19, x19, x19 __LF \ cset x20, hs __LF \ umulh x8, x2, x2 __LF \ mul x2, x2, x2 __LF \ adds x9, x9, x8 __LF \ mul x8, x3, x3 __LF \ adcs x10, x10, x8 __LF \ umulh x8, x3, x3 __LF \ adcs x11, x11, x8 __LF \ mul x8, x4, x4 __LF \ adcs x12, x12, x8 __LF \ umulh x8, x4, x4 __LF \ adcs x13, x13, x8 __LF \ mul x8, x5, x5 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x5, x5 __LF \ adcs x15, x15, x8 __LF \ mul x8, x6, x6 __LF \ adcs x16, x16, x8 __LF \ umulh x8, x6, x6 __LF \ adcs x17, x17, x8 __LF \ mul x8, x7, x7 __LF \ adcs x19, x19, x8 __LF \ umulh x8, x7, x7 __LF \ adc x20, x20, x8 __LF \ lsl x5, x2, #32 __LF \ add x2, x5, x2 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x2 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x2 __LF \ umulh x4, x4, x2 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x2 __LF \ adc x3, xzr, xzr __LF \ subs x9, x9, x5 __LF \ sbcs x10, x10, x4 __LF \ sbcs x11, x11, x3 __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ sbc x2, x2, xzr __LF \ lsl x5, x9, #32 __LF \ add x9, x5, x9 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x9 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x9 __LF \ umulh x4, x4, x9 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x9 __LF \ adc x3, xzr, xzr __LF \ subs x10, x10, x5 __LF \ sbcs x11, x11, x4 __LF \ sbcs x12, x12, x3 __LF \ sbcs x13, x13, xzr __LF \ sbcs x2, x2, xzr __LF \ sbc x9, x9, xzr __LF \ lsl x5, x10, #32 __LF \ add x10, x5, x10 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x10 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x10 __LF \ umulh x4, x4, x10 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x10 __LF \ adc x3, xzr, xzr __LF \ subs x11, x11, x5 __LF \ sbcs x12, x12, x4 __LF \ sbcs x13, x13, x3 __LF \ sbcs x2, x2, xzr __LF \ sbcs x9, x9, xzr __LF \ sbc x10, x10, xzr __LF \ lsl x5, x11, #32 __LF \ add x11, x5, x11 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x11 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x11 __LF \ umulh x4, x4, x11 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x11 __LF \ adc x3, xzr, xzr __LF \ subs x12, x12, x5 __LF \ sbcs x13, x13, x4 __LF \ sbcs x2, x2, x3 __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbc x11, x11, xzr __LF \ lsl x5, x12, #32 __LF \ add x12, x5, x12 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x12 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x12 __LF \ umulh x4, x4, x12 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x12 __LF \ adc x3, xzr, xzr __LF \ subs x13, x13, x5 __LF \ sbcs x2, x2, x4 __LF \ sbcs x9, x9, x3 __LF \ sbcs x10, x10, xzr __LF \ sbcs x11, x11, xzr __LF \ sbc x12, x12, xzr __LF \ lsl x5, x13, #32 __LF \ add x13, x5, x13 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x13 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x13 __LF \ umulh x4, x4, x13 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x13 __LF \ adc x3, xzr, xzr __LF \ subs x2, x2, x5 __LF \ sbcs x9, x9, x4 __LF \ sbcs x10, x10, x3 __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbc x13, x13, xzr __LF \ adds x2, x2, x14 __LF \ adcs x9, x9, x15 __LF \ adcs x10, x10, x16 __LF \ adcs x11, x11, x17 __LF \ adcs x12, x12, x19 __LF \ adcs x13, x13, x20 __LF \ mov x14, #-4294967295 __LF \ mov x15, #4294967295 __LF \ csel x14, x14, xzr, cs __LF \ csel x15, x15, xzr, cs __LF \ cset x16, cs __LF \ adds x2, x2, x14 __LF \ adcs x9, x9, x15 __LF \ adcs x10, x10, x16 __LF \ adcs x11, x11, xzr __LF \ adcs x12, x12, xzr __LF \ adc x13, x13, xzr __LF \ stp x2, x9, [P0] __LF \ stp x10, x11, [P0+16] __LF \ stp x12, x13, [P0+32] // Corresponds exactly to bignum_sub_p384 #define sub_p384(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ ldp x9, x10, [P1+32] __LF \ ldp x4, x3, [P2+32] __LF \ sbcs x9, x9, x4 __LF \ sbcs x10, x10, x3 __LF \ csetm x3, lo __LF \ mov x4, #4294967295 __LF \ and x4, x4, x3 __LF \ adds x5, x5, x4 __LF \ eor x4, x4, x3 __LF \ adcs x6, x6, x4 __LF \ mov x4, #-2 __LF \ and x4, x4, x3 __LF \ adcs x7, x7, x4 __LF \ adcs x8, x8, x3 __LF \ adcs x9, x9, x3 __LF \ adc x10, x10, x3 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] S2N_BN_SYMBOL(p384_montjadd_alt): CFI_START // Save regs and make room on stack for temporary variables CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_DEC_SP(NSPACE) // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations // 8 * multiply + 3 * square + 7 * subtract amontsqr_p384(z1sq,z_1) amontsqr_p384(z2sq,z_2) montmul_p384(y1a,z_2,y_1) montmul_p384(y2a,z_1,y_2) montmul_p384(x2a,z1sq,x_2) montmul_p384(x1a,z2sq,x_1) montmul_p384(y2a,z1sq,y2a) montmul_p384(y1a,z2sq,y1a) sub_p384(xd,x2a,x1a) sub_p384(yd,y2a,y1a) amontsqr_p384(zz,xd) montsqr_p384(ww,yd) montmul_p384(zzx1,zz,x1a) montmul_p384(zzx2,zz,x2a) sub_p384(resx,ww,zzx1) sub_p384(t1,zzx2,zzx1) montmul_p384(xd,xd,z_1) sub_p384(resx,resx,zzx2) sub_p384(t2,zzx1,resx) montmul_p384(t1,t1,y1a) montmul_p384(resz,xd,z_2) montmul_p384(t2,yd,t2) sub_p384(resy,t2,t1) // Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0 // The condition codes get set by a comparison (P2 != 0) - (P1 != 0) // So "HI" <=> CF /\ ~ZF <=> P1 = 0 /\ ~(P2 = 0) // and "LO" <=> ~CF <=> ~(P1 = 0) /\ P2 = 0 ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] ldp x4, x5, [z_1+32] orr x20, x0, x1 orr x21, x2, x3 orr x22, x4, x5 orr x20, x20, x21 orr x20, x20, x22 cmp x20, xzr cset x20, ne ldp x6, x7, [z_2] ldp x8, x9, [z_2+16] ldp x10, x11, [z_2+32] orr x21, x6, x7 orr x22, x8, x9 orr x23, x10, x11 orr x21, x21, x22 orr x21, x21, x23 cmp x21, xzr cset x21, ne cmp x21, x20 // Multiplex the outputs accordingly, re-using the z's in registers ldp x12, x13, [resz] csel x12, x0, x12, lo csel x13, x1, x13, lo csel x12, x6, x12, hi csel x13, x7, x13, hi ldp x14, x15, [resz+16] csel x14, x2, x14, lo csel x15, x3, x15, lo csel x14, x8, x14, hi csel x15, x9, x15, hi ldp x16, x17, [resz+32] csel x16, x4, x16, lo csel x17, x5, x17, lo csel x16, x10, x16, hi csel x17, x11, x17, hi ldp x20, x21, [x_1] ldp x0, x1, [resx] csel x0, x20, x0, lo csel x1, x21, x1, lo ldp x20, x21, [x_2] csel x0, x20, x0, hi csel x1, x21, x1, hi ldp x20, x21, [x_1+16] ldp x2, x3, [resx+16] csel x2, x20, x2, lo csel x3, x21, x3, lo ldp x20, x21, [x_2+16] csel x2, x20, x2, hi csel x3, x21, x3, hi ldp x20, x21, [x_1+32] ldp x4, x5, [resx+32] csel x4, x20, x4, lo csel x5, x21, x5, lo ldp x20, x21, [x_2+32] csel x4, x20, x4, hi csel x5, x21, x5, hi ldp x20, x21, [y_1] ldp x6, x7, [resy] csel x6, x20, x6, lo csel x7, x21, x7, lo ldp x20, x21, [y_2] csel x6, x20, x6, hi csel x7, x21, x7, hi ldp x20, x21, [y_1+16] ldp x8, x9, [resy+16] csel x8, x20, x8, lo csel x9, x21, x9, lo ldp x20, x21, [y_2+16] csel x8, x20, x8, hi csel x9, x21, x9, hi ldp x20, x21, [y_1+32] ldp x10, x11, [resy+32] csel x10, x20, x10, lo csel x11, x21, x11, lo ldp x20, x21, [y_2+32] csel x10, x20, x10, hi csel x11, x21, x11, hi // Finally store back the multiplexed values stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [x_3+32] stp x6, x7, [y_3] stp x8, x9, [y_3+16] stp x10, x11, [y_3+32] stp x12, x13, [z_3] stp x14, x15, [z_3+16] stp x16, x17, [z_3+32] // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p384_montjadd_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
2,605
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_add_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Add modulo p_384, z := (x + y) mod p_384, assuming x and y reduced // Inputs x[6], y[6]; output z[6] // // extern void bignum_add_p384(uint64_t z[static 6], const uint64_t x[static 6], // const uint64_t y[static 6]); // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_add_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_add_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_add_p384) .text .balign 4 #define z x0 #define x x1 #define y x2 #define c x3 #define l x4 #define d0 x5 #define d1 x6 #define d2 x7 #define d3 x8 #define d4 x9 #define d5 x10 S2N_BN_SYMBOL(bignum_add_p384): CFI_START // First just add the numbers as c + [d5; d4; d3; d2; d1; d0] ldp d0, d1, [x] ldp l, c, [y] adds d0, d0, l adcs d1, d1, c ldp d2, d3, [x, #16] ldp l, c, [y, #16] adcs d2, d2, l adcs d3, d3, c ldp d4, d5, [x, #32] ldp l, c, [y, #32] adcs d4, d4, l adcs d5, d5, c adc c, xzr, xzr // Now compare [d5; d4; d3; d2; d1; d0] with p_384 mov l, #0x00000000ffffffff subs xzr, d0, l mov l, #0xffffffff00000000 sbcs xzr, d1, l mov l, #0xfffffffffffffffe sbcs xzr, d2, l adcs xzr, d3, xzr adcs xzr, d4, xzr adcs xzr, d5, xzr // Now CF is set (because of inversion) if (x + y) % 2^384 >= p_384 // Thus we want to correct if either this is set or the original carry c was adcs c, c, xzr csetm c, ne // Now correct by subtracting masked p_384 mov l, #0x00000000ffffffff and l, l, c subs d0, d0, l eor l, l, c sbcs d1, d1, l mov l, #0xfffffffffffffffe and l, l, c sbcs d2, d2, l sbcs d3, d3, c sbcs d4, d4, c sbc d5, d5, c // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_add_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,377
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_nonzero_6.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // 384-bit nonzeroness test, returning 1 if x is nonzero, 0 if x is zero // Input x[6]; output function return // // extern uint64_t bignum_nonzero_6(const uint64_t x[static 6]); // // Standard ARM ABI: X0 = x, returns X0 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_nonzero_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_nonzero_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_nonzero_6) .text .balign 4 #define x x0 #define a x1 #define d x2 #define c x3 S2N_BN_SYMBOL(bignum_nonzero_6): CFI_START // Generate a = an OR of all the words in the bignum ldp a, d, [x] orr a, a, d ldp c, d, [x, #16] orr c, c, d orr a, a, c ldp c, d, [x, #32] orr c, c, d orr a, a, c // Set a standard C condition based on whether a is nonzero cmp a, xzr cset x0, ne CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_nonzero_6) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
45,310
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/p384_montjdouble_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point doubling on NIST curve P-384 in Montgomery-Jacobian coordinates // // extern void p384_montjdouble_alt(uint64_t p3[static 18], // const uint64_t p1[static 18]); // // Does p3 := 2 * p1 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // // Standard ARM ABI: X0 = p3, X1 = p1 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjdouble_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjdouble_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjdouble_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 // Stable homes for input arguments during main code sequence #define input_z x23 #define input_x x24 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define z2 sp, #(NUMSIZE*0) #define y2 sp, #(NUMSIZE*1) #define x2p sp, #(NUMSIZE*2) #define xy2 sp, #(NUMSIZE*3) #define y4 sp, #(NUMSIZE*4) #define t2 sp, #(NUMSIZE*4) #define dx2 sp, #(NUMSIZE*5) #define t1 sp, #(NUMSIZE*5) #define d sp, #(NUMSIZE*6) #define x4p sp, #(NUMSIZE*6) #define NSPACE NUMSIZE*7 // Corresponds exactly to bignum_montmul_p384_alt #define montmul_p384(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P2] __LF \ mul x12, x3, x5 __LF \ umulh x13, x3, x5 __LF \ mul x11, x3, x6 __LF \ umulh x14, x3, x6 __LF \ adds x13, x13, x11 __LF \ ldp x7, x8, [P2+16] __LF \ mul x11, x3, x7 __LF \ umulh x15, x3, x7 __LF \ adcs x14, x14, x11 __LF \ mul x11, x3, x8 __LF \ umulh x16, x3, x8 __LF \ adcs x15, x15, x11 __LF \ ldp x9, x10, [P2+32] __LF \ mul x11, x3, x9 __LF \ umulh x17, x3, x9 __LF \ adcs x16, x16, x11 __LF \ mul x11, x3, x10 __LF \ umulh x19, x3, x10 __LF \ adcs x17, x17, x11 __LF \ adc x19, x19, xzr __LF \ mul x11, x4, x5 __LF \ adds x13, x13, x11 __LF \ mul x11, x4, x6 __LF \ adcs x14, x14, x11 __LF \ mul x11, x4, x7 __LF \ adcs x15, x15, x11 __LF \ mul x11, x4, x8 __LF \ adcs x16, x16, x11 __LF \ mul x11, x4, x9 __LF \ adcs x17, x17, x11 __LF \ mul x11, x4, x10 __LF \ adcs x19, x19, x11 __LF \ cset x20, cs __LF \ umulh x11, x4, x5 __LF \ adds x14, x14, x11 __LF \ umulh x11, x4, x6 __LF \ adcs x15, x15, x11 __LF \ umulh x11, x4, x7 __LF \ adcs x16, x16, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x17, x17, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x19, x19, x11 __LF \ umulh x11, x4, x10 __LF \ adc x20, x20, x11 __LF \ ldp x3, x4, [P1+16] __LF \ mul x11, x3, x5 __LF \ adds x14, x14, x11 __LF \ mul x11, x3, x6 __LF \ adcs x15, x15, x11 __LF \ mul x11, x3, x7 __LF \ adcs x16, x16, x11 __LF \ mul x11, x3, x8 __LF \ adcs x17, x17, x11 __LF \ mul x11, x3, x9 __LF \ adcs x19, x19, x11 __LF \ mul x11, x3, x10 __LF \ adcs x20, x20, x11 __LF \ cset x21, cs __LF \ umulh x11, x3, x5 __LF \ adds x15, x15, x11 __LF \ umulh x11, x3, x6 __LF \ adcs x16, x16, x11 __LF \ umulh x11, x3, x7 __LF \ adcs x17, x17, x11 __LF \ umulh x11, x3, x8 __LF \ adcs x19, x19, x11 __LF \ umulh x11, x3, x9 __LF \ adcs x20, x20, x11 __LF \ umulh x11, x3, x10 __LF \ adc x21, x21, x11 __LF \ mul x11, x4, x5 __LF \ adds x15, x15, x11 __LF \ mul x11, x4, x6 __LF \ adcs x16, x16, x11 __LF \ mul x11, x4, x7 __LF \ adcs x17, x17, x11 __LF \ mul x11, x4, x8 __LF \ adcs x19, x19, x11 __LF \ mul x11, x4, x9 __LF \ adcs x20, x20, x11 __LF \ mul x11, x4, x10 __LF \ adcs x21, x21, x11 __LF \ cset x22, cs __LF \ umulh x11, x4, x5 __LF \ adds x16, x16, x11 __LF \ umulh x11, x4, x6 __LF \ adcs x17, x17, x11 __LF \ umulh x11, x4, x7 __LF \ adcs x19, x19, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x20, x20, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x21, x21, x11 __LF \ umulh x11, x4, x10 __LF \ adc x22, x22, x11 __LF \ ldp x3, x4, [P1+32] __LF \ mul x11, x3, x5 __LF \ adds x16, x16, x11 __LF \ mul x11, x3, x6 __LF \ adcs x17, x17, x11 __LF \ mul x11, x3, x7 __LF \ adcs x19, x19, x11 __LF \ mul x11, x3, x8 __LF \ adcs x20, x20, x11 __LF \ mul x11, x3, x9 __LF \ adcs x21, x21, x11 __LF \ mul x11, x3, x10 __LF \ adcs x22, x22, x11 __LF \ cset x2, cs __LF \ umulh x11, x3, x5 __LF \ adds x17, x17, x11 __LF \ umulh x11, x3, x6 __LF \ adcs x19, x19, x11 __LF \ umulh x11, x3, x7 __LF \ adcs x20, x20, x11 __LF \ umulh x11, x3, x8 __LF \ adcs x21, x21, x11 __LF \ umulh x11, x3, x9 __LF \ adcs x22, x22, x11 __LF \ umulh x11, x3, x10 __LF \ adc x2, x2, x11 __LF \ mul x11, x4, x5 __LF \ adds x17, x17, x11 __LF \ mul x11, x4, x6 __LF \ adcs x19, x19, x11 __LF \ mul x11, x4, x7 __LF \ adcs x20, x20, x11 __LF \ mul x11, x4, x8 __LF \ adcs x21, x21, x11 __LF \ mul x11, x4, x9 __LF \ adcs x22, x22, x11 __LF \ mul x11, x4, x10 __LF \ adcs x2, x2, x11 __LF \ cset x1, cs __LF \ umulh x11, x4, x5 __LF \ adds x19, x19, x11 __LF \ umulh x11, x4, x6 __LF \ adcs x20, x20, x11 __LF \ umulh x11, x4, x7 __LF \ adcs x21, x21, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x22, x22, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x2, x2, x11 __LF \ umulh x11, x4, x10 __LF \ adc x1, x1, x11 __LF \ lsl x7, x12, #32 __LF \ add x12, x7, x12 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x12 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x12 __LF \ umulh x6, x6, x12 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x12 __LF \ adc x5, xzr, xzr __LF \ subs x13, x13, x7 __LF \ sbcs x14, x14, x6 __LF \ sbcs x15, x15, x5 __LF \ sbcs x16, x16, xzr __LF \ sbcs x17, x17, xzr __LF \ sbc x12, x12, xzr __LF \ lsl x7, x13, #32 __LF \ add x13, x7, x13 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x13 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x13 __LF \ umulh x6, x6, x13 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x13 __LF \ adc x5, xzr, xzr __LF \ subs x14, x14, x7 __LF \ sbcs x15, x15, x6 __LF \ sbcs x16, x16, x5 __LF \ sbcs x17, x17, xzr __LF \ sbcs x12, x12, xzr __LF \ sbc x13, x13, xzr __LF \ lsl x7, x14, #32 __LF \ add x14, x7, x14 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x14 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x14 __LF \ umulh x6, x6, x14 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x14 __LF \ adc x5, xzr, xzr __LF \ subs x15, x15, x7 __LF \ sbcs x16, x16, x6 __LF \ sbcs x17, x17, x5 __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ sbc x14, x14, xzr __LF \ lsl x7, x15, #32 __LF \ add x15, x7, x15 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x15 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x15 __LF \ umulh x6, x6, x15 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x15 __LF \ adc x5, xzr, xzr __LF \ subs x16, x16, x7 __LF \ sbcs x17, x17, x6 __LF \ sbcs x12, x12, x5 __LF \ sbcs x13, x13, xzr __LF \ sbcs x14, x14, xzr __LF \ sbc x15, x15, xzr __LF \ lsl x7, x16, #32 __LF \ add x16, x7, x16 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x16 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x16 __LF \ umulh x6, x6, x16 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x16 __LF \ adc x5, xzr, xzr __LF \ subs x17, x17, x7 __LF \ sbcs x12, x12, x6 __LF \ sbcs x13, x13, x5 __LF \ sbcs x14, x14, xzr __LF \ sbcs x15, x15, xzr __LF \ sbc x16, x16, xzr __LF \ lsl x7, x17, #32 __LF \ add x17, x7, x17 __LF \ mov x7, #0xffffffff00000001 __LF \ umulh x7, x7, x17 __LF \ mov x6, #0xffffffff __LF \ mul x5, x6, x17 __LF \ umulh x6, x6, x17 __LF \ adds x7, x7, x5 __LF \ adcs x6, x6, x17 __LF \ adc x5, xzr, xzr __LF \ subs x12, x12, x7 __LF \ sbcs x13, x13, x6 __LF \ sbcs x14, x14, x5 __LF \ sbcs x15, x15, xzr __LF \ sbcs x16, x16, xzr __LF \ sbc x17, x17, xzr __LF \ adds x12, x12, x19 __LF \ adcs x13, x13, x20 __LF \ adcs x14, x14, x21 __LF \ adcs x15, x15, x22 __LF \ adcs x16, x16, x2 __LF \ adcs x17, x17, x1 __LF \ adc x10, xzr, xzr __LF \ mov x11, #0xffffffff00000001 __LF \ adds x19, x12, x11 __LF \ mov x11, #0xffffffff __LF \ adcs x20, x13, x11 __LF \ mov x11, #0x1 __LF \ adcs x21, x14, x11 __LF \ adcs x22, x15, xzr __LF \ adcs x2, x16, xzr __LF \ adcs x1, x17, xzr __LF \ adcs x10, x10, xzr __LF \ csel x12, x12, x19, eq __LF \ csel x13, x13, x20, eq __LF \ csel x14, x14, x21, eq __LF \ csel x15, x15, x22, eq __LF \ csel x16, x16, x2, eq __LF \ csel x17, x17, x1, eq __LF \ stp x12, x13, [P0] __LF \ stp x14, x15, [P0+16] __LF \ stp x16, x17, [P0+32] // Corresponds exactly to bignum_montsqr_p384_alt #define montsqr_p384(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x9, x2, x3 __LF \ umulh x10, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x8, x2, x4 __LF \ adds x10, x10, x8 __LF \ mul x11, x2, x5 __LF \ mul x8, x3, x4 __LF \ adcs x11, x11, x8 __LF \ umulh x12, x2, x5 __LF \ mul x8, x3, x5 __LF \ adcs x12, x12, x8 __LF \ ldp x6, x7, [P1+32] __LF \ mul x13, x2, x7 __LF \ mul x8, x3, x6 __LF \ adcs x13, x13, x8 __LF \ umulh x14, x2, x7 __LF \ mul x8, x3, x7 __LF \ adcs x14, x14, x8 __LF \ mul x15, x5, x6 __LF \ adcs x15, x15, xzr __LF \ umulh x16, x5, x6 __LF \ adc x16, x16, xzr __LF \ umulh x8, x2, x4 __LF \ adds x11, x11, x8 __LF \ umulh x8, x3, x4 __LF \ adcs x12, x12, x8 __LF \ umulh x8, x3, x5 __LF \ adcs x13, x13, x8 __LF \ umulh x8, x3, x6 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x3, x7 __LF \ adcs x15, x15, x8 __LF \ adc x16, x16, xzr __LF \ mul x8, x2, x6 __LF \ adds x12, x12, x8 __LF \ mul x8, x4, x5 __LF \ adcs x13, x13, x8 __LF \ mul x8, x4, x6 __LF \ adcs x14, x14, x8 __LF \ mul x8, x4, x7 __LF \ adcs x15, x15, x8 __LF \ mul x8, x5, x7 __LF \ adcs x16, x16, x8 __LF \ mul x17, x6, x7 __LF \ adcs x17, x17, xzr __LF \ umulh x19, x6, x7 __LF \ adc x19, x19, xzr __LF \ umulh x8, x2, x6 __LF \ adds x13, x13, x8 __LF \ umulh x8, x4, x5 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x4, x6 __LF \ adcs x15, x15, x8 __LF \ umulh x8, x4, x7 __LF \ adcs x16, x16, x8 __LF \ umulh x8, x5, x7 __LF \ adcs x17, x17, x8 __LF \ adc x19, x19, xzr __LF \ adds x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ adcs x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adcs x17, x17, x17 __LF \ adcs x19, x19, x19 __LF \ cset x20, hs __LF \ umulh x8, x2, x2 __LF \ mul x2, x2, x2 __LF \ adds x9, x9, x8 __LF \ mul x8, x3, x3 __LF \ adcs x10, x10, x8 __LF \ umulh x8, x3, x3 __LF \ adcs x11, x11, x8 __LF \ mul x8, x4, x4 __LF \ adcs x12, x12, x8 __LF \ umulh x8, x4, x4 __LF \ adcs x13, x13, x8 __LF \ mul x8, x5, x5 __LF \ adcs x14, x14, x8 __LF \ umulh x8, x5, x5 __LF \ adcs x15, x15, x8 __LF \ mul x8, x6, x6 __LF \ adcs x16, x16, x8 __LF \ umulh x8, x6, x6 __LF \ adcs x17, x17, x8 __LF \ mul x8, x7, x7 __LF \ adcs x19, x19, x8 __LF \ umulh x8, x7, x7 __LF \ adc x20, x20, x8 __LF \ lsl x5, x2, #32 __LF \ add x2, x5, x2 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x2 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x2 __LF \ umulh x4, x4, x2 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x2 __LF \ adc x3, xzr, xzr __LF \ subs x9, x9, x5 __LF \ sbcs x10, x10, x4 __LF \ sbcs x11, x11, x3 __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ sbc x2, x2, xzr __LF \ lsl x5, x9, #32 __LF \ add x9, x5, x9 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x9 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x9 __LF \ umulh x4, x4, x9 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x9 __LF \ adc x3, xzr, xzr __LF \ subs x10, x10, x5 __LF \ sbcs x11, x11, x4 __LF \ sbcs x12, x12, x3 __LF \ sbcs x13, x13, xzr __LF \ sbcs x2, x2, xzr __LF \ sbc x9, x9, xzr __LF \ lsl x5, x10, #32 __LF \ add x10, x5, x10 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x10 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x10 __LF \ umulh x4, x4, x10 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x10 __LF \ adc x3, xzr, xzr __LF \ subs x11, x11, x5 __LF \ sbcs x12, x12, x4 __LF \ sbcs x13, x13, x3 __LF \ sbcs x2, x2, xzr __LF \ sbcs x9, x9, xzr __LF \ sbc x10, x10, xzr __LF \ lsl x5, x11, #32 __LF \ add x11, x5, x11 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x11 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x11 __LF \ umulh x4, x4, x11 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x11 __LF \ adc x3, xzr, xzr __LF \ subs x12, x12, x5 __LF \ sbcs x13, x13, x4 __LF \ sbcs x2, x2, x3 __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbc x11, x11, xzr __LF \ lsl x5, x12, #32 __LF \ add x12, x5, x12 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x12 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x12 __LF \ umulh x4, x4, x12 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x12 __LF \ adc x3, xzr, xzr __LF \ subs x13, x13, x5 __LF \ sbcs x2, x2, x4 __LF \ sbcs x9, x9, x3 __LF \ sbcs x10, x10, xzr __LF \ sbcs x11, x11, xzr __LF \ sbc x12, x12, xzr __LF \ lsl x5, x13, #32 __LF \ add x13, x5, x13 __LF \ mov x5, #-4294967295 __LF \ umulh x5, x5, x13 __LF \ mov x4, #4294967295 __LF \ mul x3, x4, x13 __LF \ umulh x4, x4, x13 __LF \ adds x5, x5, x3 __LF \ adcs x4, x4, x13 __LF \ adc x3, xzr, xzr __LF \ subs x2, x2, x5 __LF \ sbcs x9, x9, x4 __LF \ sbcs x10, x10, x3 __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbc x13, x13, xzr __LF \ adds x2, x2, x14 __LF \ adcs x9, x9, x15 __LF \ adcs x10, x10, x16 __LF \ adcs x11, x11, x17 __LF \ adcs x12, x12, x19 __LF \ adcs x13, x13, x20 __LF \ adc x6, xzr, xzr __LF \ mov x8, #-4294967295 __LF \ adds x14, x2, x8 __LF \ mov x8, #4294967295 __LF \ adcs x15, x9, x8 __LF \ mov x8, #1 __LF \ adcs x16, x10, x8 __LF \ adcs x17, x11, xzr __LF \ adcs x19, x12, xzr __LF \ adcs x20, x13, xzr __LF \ adcs x6, x6, xzr __LF \ csel x2, x2, x14, eq __LF \ csel x9, x9, x15, eq __LF \ csel x10, x10, x16, eq __LF \ csel x11, x11, x17, eq __LF \ csel x12, x12, x19, eq __LF \ csel x13, x13, x20, eq __LF \ stp x2, x9, [P0] __LF \ stp x10, x11, [P0+16] __LF \ stp x12, x13, [P0+32] // Corresponds exactly to bignum_sub_p384 #define sub_p384(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ ldp x9, x10, [P1+32] __LF \ ldp x4, x3, [P2+32] __LF \ sbcs x9, x9, x4 __LF \ sbcs x10, x10, x3 __LF \ csetm x3, lo __LF \ mov x4, #4294967295 __LF \ and x4, x4, x3 __LF \ adds x5, x5, x4 __LF \ eor x4, x4, x3 __LF \ adcs x6, x6, x4 __LF \ mov x4, #-2 __LF \ and x4, x4, x3 __LF \ adcs x7, x7, x4 __LF \ adcs x8, x8, x3 __LF \ adcs x9, x9, x3 __LF \ adc x10, x10, x3 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] // Corresponds exactly to bignum_add_p384 #define add_p384(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ adds x5, x5, x4 __LF \ adcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ adcs x7, x7, x4 __LF \ adcs x8, x8, x3 __LF \ ldp x9, x10, [P1+32] __LF \ ldp x4, x3, [P2+32] __LF \ adcs x9, x9, x4 __LF \ adcs x10, x10, x3 __LF \ adc x3, xzr, xzr __LF \ mov x4, #0xffffffff __LF \ cmp x5, x4 __LF \ mov x4, #0xffffffff00000000 __LF \ sbcs xzr, x6, x4 __LF \ mov x4, #0xfffffffffffffffe __LF \ sbcs xzr, x7, x4 __LF \ adcs xzr, x8, xzr __LF \ adcs xzr, x9, xzr __LF \ adcs xzr, x10, xzr __LF \ adcs x3, x3, xzr __LF \ csetm x3, ne __LF \ mov x4, #0xffffffff __LF \ and x4, x4, x3 __LF \ subs x5, x5, x4 __LF \ eor x4, x4, x3 __LF \ sbcs x6, x6, x4 __LF \ mov x4, #0xfffffffffffffffe __LF \ and x4, x4, x3 __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ sbcs x9, x9, x3 __LF \ sbc x10, x10, x3 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] // P0 = 4 * P1 - P2 #define cmsub41_p384(P0,P1,P2) \ ldp x1, x2, [P1] __LF \ ldp x3, x4, [P1+16] __LF \ ldp x5, x6, [P1+32] __LF \ lsl x0, x1, #2 __LF \ ldp x7, x8, [P2] __LF \ subs x0, x0, x7 __LF \ extr x1, x2, x1, #62 __LF \ sbcs x1, x1, x8 __LF \ ldp x7, x8, [P2+16] __LF \ extr x2, x3, x2, #62 __LF \ sbcs x2, x2, x7 __LF \ extr x3, x4, x3, #62 __LF \ sbcs x3, x3, x8 __LF \ extr x4, x5, x4, #62 __LF \ ldp x7, x8, [P2+32] __LF \ sbcs x4, x4, x7 __LF \ extr x5, x6, x5, #62 __LF \ sbcs x5, x5, x8 __LF \ lsr x6, x6, #62 __LF \ adc x6, x6, xzr __LF \ lsl x7, x6, #32 __LF \ subs x8, x6, x7 __LF \ sbc x7, x7, xzr __LF \ adds x0, x0, x8 __LF \ adcs x1, x1, x7 __LF \ adcs x2, x2, x6 __LF \ adcs x3, x3, xzr __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ csetm x8, cc __LF \ mov x9, #0xffffffff __LF \ and x9, x9, x8 __LF \ adds x0, x0, x9 __LF \ eor x9, x9, x8 __LF \ adcs x1, x1, x9 __LF \ mov x9, #0xfffffffffffffffe __LF \ and x9, x9, x8 __LF \ adcs x2, x2, x9 __LF \ adcs x3, x3, x8 __LF \ adcs x4, x4, x8 __LF \ adc x5, x5, x8 __LF \ stp x0, x1, [P0] __LF \ stp x2, x3, [P0+16] __LF \ stp x4, x5, [P0+32] // P0 = C * P1 - D * P2 #define cmsub_p384(P0,C,P1,D,P2) \ ldp x0, x1, [P2] __LF \ mov x6, #0x00000000ffffffff __LF \ subs x6, x6, x0 __LF \ mov x7, #0xffffffff00000000 __LF \ sbcs x7, x7, x1 __LF \ ldp x0, x1, [P2+16] __LF \ mov x8, #0xfffffffffffffffe __LF \ sbcs x8, x8, x0 __LF \ mov x13, #0xffffffffffffffff __LF \ sbcs x9, x13, x1 __LF \ ldp x0, x1, [P2+32] __LF \ sbcs x10, x13, x0 __LF \ sbc x11, x13, x1 __LF \ mov x12, D __LF \ mul x0, x12, x6 __LF \ mul x1, x12, x7 __LF \ mul x2, x12, x8 __LF \ mul x3, x12, x9 __LF \ mul x4, x12, x10 __LF \ mul x5, x12, x11 __LF \ umulh x6, x12, x6 __LF \ umulh x7, x12, x7 __LF \ umulh x8, x12, x8 __LF \ umulh x9, x12, x9 __LF \ umulh x10, x12, x10 __LF \ umulh x12, x12, x11 __LF \ adds x1, x1, x6 __LF \ adcs x2, x2, x7 __LF \ adcs x3, x3, x8 __LF \ adcs x4, x4, x9 __LF \ adcs x5, x5, x10 __LF \ mov x6, #1 __LF \ adc x6, x12, x6 __LF \ ldp x8, x9, [P1] __LF \ ldp x10, x11, [P1+16] __LF \ ldp x12, x13, [P1+32] __LF \ mov x14, C __LF \ mul x15, x14, x8 __LF \ umulh x8, x14, x8 __LF \ adds x0, x0, x15 __LF \ mul x15, x14, x9 __LF \ umulh x9, x14, x9 __LF \ adcs x1, x1, x15 __LF \ mul x15, x14, x10 __LF \ umulh x10, x14, x10 __LF \ adcs x2, x2, x15 __LF \ mul x15, x14, x11 __LF \ umulh x11, x14, x11 __LF \ adcs x3, x3, x15 __LF \ mul x15, x14, x12 __LF \ umulh x12, x14, x12 __LF \ adcs x4, x4, x15 __LF \ mul x15, x14, x13 __LF \ umulh x13, x14, x13 __LF \ adcs x5, x5, x15 __LF \ adc x6, x6, xzr __LF \ adds x1, x1, x8 __LF \ adcs x2, x2, x9 __LF \ adcs x3, x3, x10 __LF \ adcs x4, x4, x11 __LF \ adcs x5, x5, x12 __LF \ adcs x6, x6, x13 __LF \ lsl x7, x6, #32 __LF \ subs x8, x6, x7 __LF \ sbc x7, x7, xzr __LF \ adds x0, x0, x8 __LF \ adcs x1, x1, x7 __LF \ adcs x2, x2, x6 __LF \ adcs x3, x3, xzr __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ csetm x6, cc __LF \ mov x7, #0xffffffff __LF \ and x7, x7, x6 __LF \ adds x0, x0, x7 __LF \ eor x7, x7, x6 __LF \ adcs x1, x1, x7 __LF \ mov x7, #0xfffffffffffffffe __LF \ and x7, x7, x6 __LF \ adcs x2, x2, x7 __LF \ adcs x3, x3, x6 __LF \ adcs x4, x4, x6 __LF \ adc x5, x5, x6 __LF \ stp x0, x1, [P0] __LF \ stp x2, x3, [P0+16] __LF \ stp x4, x5, [P0+32] // A weak version of add that only guarantees sum in 6 digits #define weakadd_p384(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ adds x5, x5, x4 __LF \ adcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ adcs x7, x7, x4 __LF \ adcs x8, x8, x3 __LF \ ldp x9, x10, [P1+32] __LF \ ldp x4, x3, [P2+32] __LF \ adcs x9, x9, x4 __LF \ adcs x10, x10, x3 __LF \ csetm x3, cs __LF \ mov x4, #0xffffffff __LF \ and x4, x4, x3 __LF \ subs x5, x5, x4 __LF \ eor x4, x4, x3 __LF \ sbcs x6, x6, x4 __LF \ mov x4, #0xfffffffffffffffe __LF \ and x4, x4, x3 __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ sbcs x9, x9, x3 __LF \ sbc x10, x10, x3 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] // P0 = 3 * P1 - 8 * P2 #define cmsub38_p384(P0,P1,P2) \ ldp x0, x1, [P2] __LF \ mov x6, #0x00000000ffffffff __LF \ subs x6, x6, x0 __LF \ mov x7, #0xffffffff00000000 __LF \ sbcs x7, x7, x1 __LF \ ldp x0, x1, [P2+16] __LF \ mov x8, #0xfffffffffffffffe __LF \ sbcs x8, x8, x0 __LF \ mov x13, #0xffffffffffffffff __LF \ sbcs x9, x13, x1 __LF \ ldp x0, x1, [P2+32] __LF \ sbcs x10, x13, x0 __LF \ sbc x11, x13, x1 __LF \ lsl x0, x6, #3 __LF \ extr x1, x7, x6, #61 __LF \ extr x2, x8, x7, #61 __LF \ extr x3, x9, x8, #61 __LF \ extr x4, x10, x9, #61 __LF \ extr x5, x11, x10, #61 __LF \ lsr x6, x11, #61 __LF \ add x6, x6, #1 __LF \ ldp x8, x9, [P1] __LF \ ldp x10, x11, [P1+16] __LF \ ldp x12, x13, [P1+32] __LF \ mov x14, 3 __LF \ mul x15, x14, x8 __LF \ umulh x8, x14, x8 __LF \ adds x0, x0, x15 __LF \ mul x15, x14, x9 __LF \ umulh x9, x14, x9 __LF \ adcs x1, x1, x15 __LF \ mul x15, x14, x10 __LF \ umulh x10, x14, x10 __LF \ adcs x2, x2, x15 __LF \ mul x15, x14, x11 __LF \ umulh x11, x14, x11 __LF \ adcs x3, x3, x15 __LF \ mul x15, x14, x12 __LF \ umulh x12, x14, x12 __LF \ adcs x4, x4, x15 __LF \ mul x15, x14, x13 __LF \ umulh x13, x14, x13 __LF \ adcs x5, x5, x15 __LF \ adc x6, x6, xzr __LF \ adds x1, x1, x8 __LF \ adcs x2, x2, x9 __LF \ adcs x3, x3, x10 __LF \ adcs x4, x4, x11 __LF \ adcs x5, x5, x12 __LF \ adcs x6, x6, x13 __LF \ lsl x7, x6, #32 __LF \ subs x8, x6, x7 __LF \ sbc x7, x7, xzr __LF \ adds x0, x0, x8 __LF \ adcs x1, x1, x7 __LF \ adcs x2, x2, x6 __LF \ adcs x3, x3, xzr __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ csetm x6, cc __LF \ mov x7, #0xffffffff __LF \ and x7, x7, x6 __LF \ adds x0, x0, x7 __LF \ eor x7, x7, x6 __LF \ adcs x1, x1, x7 __LF \ mov x7, #0xfffffffffffffffe __LF \ and x7, x7, x6 __LF \ adcs x2, x2, x7 __LF \ adcs x3, x3, x6 __LF \ adcs x4, x4, x6 __LF \ adc x5, x5, x6 __LF \ stp x0, x1, [P0] __LF \ stp x2, x3, [P0+16] __LF \ stp x4, x5, [P0+32] S2N_BN_SYMBOL(p384_montjdouble_alt): CFI_START // Save regs and make room on stack for temporary variables CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_DEC_SP(NSPACE) // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 // Main code, just a sequence of basic field operations // z2 = z^2 // y2 = y^2 montsqr_p384(z2,z_1) montsqr_p384(y2,y_1) // x2p = x^2 - z^4 = (x + z^2) * (x - z^2) weakadd_p384(t1,x_1,z2) sub_p384(t2,x_1,z2) montmul_p384(x2p,t1,t2) // t1 = y + z // x4p = x2p^2 // xy2 = x * y^2 add_p384(t1,y_1,z_1) montsqr_p384(x4p,x2p) montmul_p384(xy2,x_1,y2) // t2 = (y + z)^2 montsqr_p384(t2,t1) // d = 12 * xy2 - 9 * x4p // t1 = y^2 + 2 * y * z cmsub_p384(d,12,xy2,9,x4p) sub_p384(t1,t2,z2) // y4 = y^4 montsqr_p384(y4,y2) // z_3' = 2 * y * z // dx2 = d * x2p sub_p384(z_3,t1,y2) montmul_p384(dx2,d,x2p) // x' = 4 * xy2 - d cmsub41_p384(x_3,xy2,d) // y' = 3 * dx2 - 8 * y4 cmsub38_p384(y_3,dx2,y4) // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p384_montjdouble_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
4,817
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_mod_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo field characteristic, z := x mod p_384 // Input x[k]; output z[6] // // extern void bignum_mod_p384(uint64_t z[static 6], uint64_t k, // const uint64_t *x); // // Standard ARM ABI: X0 = z, X1 = k, X2 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p384) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p384_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p384_alt) .text .balign 4 #define z x0 #define k x1 #define x x2 #define m0 x3 #define m1 x4 #define m2 x5 #define m3 x6 #define m4 x7 #define m5 x8 #define t0 x9 #define t1 x10 #define t2 x11 #define t3 x12 #define t4 x13 #define t5 x14 #define n0 x15 #define n1 x16 #define n2 x17 S2N_BN_SYMBOL(bignum_mod_p384): S2N_BN_SYMBOL(bignum_mod_p384_alt): CFI_START // If the input is already <= 5 words long, go to a trivial "copy" path cmp k, #6 bcc Lbignum_mod_p384_short // Otherwise load the top 6 digits (top-down) and reduce k by 6 sub k, k, #6 lsl t0, k, #3 add t0, t0, x ldp m4, m5, [t0, #32] ldp m2, m3, [t0, #16] ldp m0, m1, [t0] // Load the complicated lower three words of p_384 = [-1;-1;-1;n2;n1;n0] mov n0, #0x00000000ffffffff mov n1, #0xffffffff00000000 mov n2, #0xfffffffffffffffe // Reduce the top 6 digits mod p_384 (a conditional subtraction of p_384) subs t0, m0, n0 sbcs t1, m1, n1 sbcs t2, m2, n2 adcs t3, m3, xzr adcs t4, m4, xzr adcs t5, m5, xzr csel m0, m0, t0, cc csel m1, m1, t1, cc csel m2, m2, t2, cc csel m3, m3, t3, cc csel m4, m4, t4, cc csel m5, m5, t5, cc // Now do (k-6) iterations of 7->6 word modular reduction cbz k, Lbignum_mod_p384_writeback Lbignum_mod_p384_loop: // Decrement k and load the next digit as t5. We now want to reduce // [m5;m4;m3;m2;m1;m0;t5] |-> [m5;m4;m3;m2;m1;m0]; the shuffling downwards is // absorbed into the various ALU operations sub k, k, #1 ldr t5, [x, k, lsl #3] // Initial quotient approximation q = min (h + 1) (2^64 - 1) adds m5, m5, #1 csetm t3, cs add m5, m5, t3 orn n1, xzr, t3 sub t2, m5, #1 sub t1, xzr, m5 // Correction term [m5;t2;t1;t0] = q * (2^384 - p_384), using m5 as a temp lsl t0, t1, #32 extr t1, t2, t1, #32 lsr t2, t2, #32 adds t0, t0, m5 adcs t1, t1, xzr adcs t2, t2, m5 adc m5, xzr, xzr // Addition to the initial value adds t0, t5, t0 adcs t1, m0, t1 adcs t2, m1, t2 adcs t3, m2, m5 adcs t4, m3, xzr adcs t5, m4, xzr adc n1, n1, xzr // Use net top of the 7-word answer (now in n1) for masked correction and m5, n0, n1 adds m0, t0, m5 eor m5, m5, n1 adcs m1, t1, m5 and m5, n2, n1 adcs m2, t2, m5 adcs m3, t3, n1 adcs m4, t4, n1 adc m5, t5, n1 cbnz k, Lbignum_mod_p384_loop // Finally write back [m5;m4;m3;m2;m1;m0] and return Lbignum_mod_p384_writeback: stp m0, m1, [z] stp m2, m3, [z, #16] stp m4, m5, [z, #32] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_p384) // Short case: just copy the input with zero-padding Lbignum_mod_p384_short: mov m0, xzr mov m1, xzr mov m2, xzr mov m3, xzr mov m4, xzr mov m5, xzr cbz k, Lbignum_mod_p384_writeback ldr m0, [x] subs k, k, #1 beq Lbignum_mod_p384_writeback ldr m1, [x, #8] subs k, k, #1 beq Lbignum_mod_p384_writeback ldr m2, [x, #16] subs k, k, #1 beq Lbignum_mod_p384_writeback ldr m3, [x, #24] subs k, k, #1 beq Lbignum_mod_p384_writeback ldr m4, [x, #32] b Lbignum_mod_p384_writeback #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,622
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_optneg_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Optionally negate modulo p_384, z := (-x) mod p_384 (if p nonzero) or // z := x (if p zero), assuming x reduced // Inputs p, x[6]; output z[6] // // extern void bignum_optneg_p384(uint64_t z[static 6], uint64_t p, // const uint64_t x[static 6]); // // Standard ARM ABI: X0 = z, X1 = p, X2 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_optneg_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_optneg_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_optneg_p384) .text .balign 4 #define z x0 #define p x1 #define x x2 #define d0 x3 #define d1 x4 #define d2 x5 #define d3 x6 #define d4 x7 #define d5 x8 #define n0 x9 #define n1 x10 #define n2 x11 #define n3 x12 #define n4 x13 #define n5 x14 S2N_BN_SYMBOL(bignum_optneg_p384): CFI_START // Load the 6 digits of x ldp d0, d1, [x] ldp d2, d3, [x, #16] ldp d4, d5, [x, #32] // Adjust p by zeroing it if the input is zero (to avoid giving -0 = p, which // is not strictly reduced even though it's correct modulo p) orr n0, d0, d1 orr n1, d2, d3 orr n2, d4, d5 orr n3, n0, n1 orr n4, n2, n3 cmp n4, #0 csel p, xzr, p, eq // Load the complicated lower three words of p_384 = [-1;-1;-1;n2;n1;n0] and -1 mov n0, #0x00000000ffffffff mov n1, #0xffffffff00000000 mov n2, #0xfffffffffffffffe mov n5, #0xffffffffffffffff // Do the subtraction, which by hypothesis does not underflow subs n0, n0, d0 sbcs n1, n1, d1 sbcs n2, n2, d2 sbcs n3, n5, d3 sbcs n4, n5, d4 sbcs n5, n5, d5 // Set condition code if original x is nonzero and p was nonzero cmp p, #0 // Hence multiplex and write back csel n0, n0, d0, ne csel n1, n1, d1, ne csel n2, n2, d2, ne csel n3, n3, d3, ne csel n4, n4, d4, ne csel n5, n5, d5, ne stp n0, n1, [z] stp n2, n3, [z, #16] stp n4, n5, [z, #32] // Return CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_optneg_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,073
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_half_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Halve modulo p_384, z := (x / 2) mod p_384, assuming x reduced // Input x[6]; output z[6] // // extern void bignum_half_p384(uint64_t z[static 6], const uint64_t x[static 6]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_half_p384) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_half_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_half_p384) .text .balign 4 #define z x0 #define x x1 #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define d4 x6 #define d5 x7 #define d6 x8 #define d7 x9 #define m x10 #define n x11 S2N_BN_SYMBOL(bignum_half_p384): CFI_START // Load the 4 digits of x ldp d0, d1, [x] ldp d2, d3, [x, #16] ldp d4, d5, [x, #32] // Get a bitmask corresponding to the lowest bit of the input and m, d0, #1 neg m, m // Do a masked addition of p_384, catching carry in a 7th word and n, m, #0x00000000ffffffff adds d0, d0, n and n, m, #0xffffffff00000000 adcs d1, d1, n and n, m, #0xfffffffffffffffe adcs d2, d2, n adcs d3, d3, m adcs d4, d4, m adcs d5, d5, m adc d6, xzr, xzr // Now shift that sum right one place extr d0, d1, d0, #1 extr d1, d2, d1, #1 extr d2, d3, d2, #1 extr d3, d4, d3, #1 extr d4, d5, d4, #1 extr d5, d6, d5, #1 // Store back stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] // Return CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_half_p384) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
314,701
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/p384_montjscalarmul.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery-Jacobian form scalar multiplication for P-384 // Input scalar[6], point[18]; output res[18] // // extern void p384_montjscalarmul // (uint64_t res[static 18], // const uint64_t scalar[static 6], // const uint64_t point[static 18]); // // This function is a variant of its affine point version p384_scalarmul. // Here, input and output points are assumed to be in Jacobian form with // their coordinates in the Montgomery domain. Thus, if priming indicates // Montgomery form, x' = (2^384 * x) mod p_384 etc., each point argument // is a triple (x',y',z') representing the affine point (x/z^2,y/z^3) when // z' is nonzero or the point at infinity (group identity) if z' = 0. // // Given scalar = n and point = P, assumed to be on the NIST elliptic // curve P-384, returns a representation of n * P. If the result is the // point at infinity (either because the input point was or because the // scalar was a multiple of p_384) then the output is guaranteed to // represent the point at infinity, i.e. to have its z coordinate zero. // // Standard ARM ABI: X0 = res, X1 = scalar, X2 = point // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjscalarmul) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjscalarmul) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjscalarmul) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 #define JACSIZE (3*NUMSIZE) // Safe copies of input res and additional values in variables. #define bf x22 #define sgn x23 #define j x24 #define res x25 // Intermediate variables on the stack. // The table is 16 entries, each of size JACSIZE = 3 * NUMSIZE #define scalarb sp, #(0*NUMSIZE) #define acc sp, #(1*NUMSIZE) #define tabent sp, #(4*NUMSIZE) #define tab sp, #(7*NUMSIZE) #define NSPACE 55*NUMSIZE // Avoid using .rep for the sake of the BoringSSL/AWS-LC delocator, // which doesn't accept repetitions, assembler macros etc. #define selectblock(I) \ cmp bf, #(1*I) __LF \ ldp x20, x21, [x19] __LF \ csel x0, x20, x0, eq __LF \ csel x1, x21, x1, eq __LF \ ldp x20, x21, [x19, #16] __LF \ csel x2, x20, x2, eq __LF \ csel x3, x21, x3, eq __LF \ ldp x20, x21, [x19, #32] __LF \ csel x4, x20, x4, eq __LF \ csel x5, x21, x5, eq __LF \ ldp x20, x21, [x19, #48] __LF \ csel x6, x20, x6, eq __LF \ csel x7, x21, x7, eq __LF \ ldp x20, x21, [x19, #64] __LF \ csel x8, x20, x8, eq __LF \ csel x9, x21, x9, eq __LF \ ldp x20, x21, [x19, #80] __LF \ csel x10, x20, x10, eq __LF \ csel x11, x21, x11, eq __LF \ ldp x20, x21, [x19, #96] __LF \ csel x12, x20, x12, eq __LF \ csel x13, x21, x13, eq __LF \ ldp x20, x21, [x19, #112] __LF \ csel x14, x20, x14, eq __LF \ csel x15, x21, x15, eq __LF \ ldp x20, x21, [x19, #128] __LF \ csel x16, x20, x16, eq __LF \ csel x17, x21, x17, eq __LF \ add x19, x19, #JACSIZE // Loading large constants #define movbig(nn,n3,n2,n1,n0) \ movz nn, n0 __LF \ movk nn, n1, lsl #16 __LF \ movk nn, n2, lsl #32 __LF \ movk nn, n3, lsl #48 S2N_BN_SYMBOL(p384_montjscalarmul): CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x30) CFI_DEC_SP(NSPACE) // Preserve the "res" input argument; others get processed early. mov res, x0 // Reduce the input scalar mod n_384, i.e. conditionally subtract n_384. // Store it to "scalarb". ldp x3, x4, [x1] movbig(x15, #0xecec, #0x196a, #0xccc5, #0x2973) ldp x5, x6, [x1, #16] movbig(x16, #0x581a, #0x0db2, #0x48b0, #0xa77a) ldp x7, x8, [x1, #32] movbig(x17, #0xc763, #0x4d81, #0xf437, #0x2ddf) subs x9, x3, x15 sbcs x10, x4, x16 sbcs x11, x5, x17 adcs x12, x6, xzr adcs x13, x7, xzr adcs x14, x8, xzr csel x3, x3, x9, cc csel x4, x4, x10, cc csel x5, x5, x11, cc csel x6, x6, x12, cc csel x7, x7, x13, cc csel x8, x8, x14, cc stp x3, x4, [scalarb] stp x5, x6, [scalarb+16] stp x7, x8, [scalarb+32] // Set the tab[0] table entry to the input point = 1 * P ldp x10, x11, [x2] stp x10, x11, [tab] ldp x12, x13, [x2, #16] stp x12, x13, [tab+16] ldp x14, x15, [x2, #32] stp x14, x15, [tab+32] ldp x10, x11, [x2, #48] stp x10, x11, [tab+48] ldp x12, x13, [x2, #64] stp x12, x13, [tab+64] ldp x14, x15, [x2, #80] stp x14, x15, [tab+80] ldp x10, x11, [x2, #96] stp x10, x11, [tab+96] ldp x12, x13, [x2, #112] stp x12, x13, [tab+112] ldp x14, x15, [x2, #128] stp x14, x15, [tab+128] // Compute and record tab[1] = 2 * p, ..., tab[15] = 16 * P add x0, tab+JACSIZE*1 add x1, tab CFI_BL(Lp384_montjscalarmul_p384_montjdouble) add x0, tab+JACSIZE*2 add x1, tab+JACSIZE*1 add x2, tab CFI_BL(Lp384_montjscalarmul_p384_montjadd) add x0, tab+JACSIZE*3 add x1, tab+JACSIZE*1 CFI_BL(Lp384_montjscalarmul_p384_montjdouble) add x0, tab+JACSIZE*4 add x1, tab+JACSIZE*3 add x2, tab CFI_BL(Lp384_montjscalarmul_p384_montjadd) add x0, tab+JACSIZE*5 add x1, tab+JACSIZE*2 CFI_BL(Lp384_montjscalarmul_p384_montjdouble) add x0, tab+JACSIZE*6 add x1, tab+JACSIZE*5 add x2, tab CFI_BL(Lp384_montjscalarmul_p384_montjadd) add x0, tab+JACSIZE*7 add x1, tab+JACSIZE*3 CFI_BL(Lp384_montjscalarmul_p384_montjdouble) add x0, tab+JACSIZE*8 add x1, tab+JACSIZE*7 add x2, tab CFI_BL(Lp384_montjscalarmul_p384_montjadd) add x0, tab+JACSIZE*9 add x1, tab+JACSIZE*4 CFI_BL(Lp384_montjscalarmul_p384_montjdouble) add x0, tab+JACSIZE*10 add x1, tab+JACSIZE*9 add x2, tab CFI_BL(Lp384_montjscalarmul_p384_montjadd) add x0, tab+JACSIZE*11 add x1, tab+JACSIZE*5 CFI_BL(Lp384_montjscalarmul_p384_montjdouble) add x0, tab+JACSIZE*12 add x1, tab+JACSIZE*11 add x2, tab CFI_BL(Lp384_montjscalarmul_p384_montjadd) add x0, tab+JACSIZE*13 add x1, tab+JACSIZE*6 CFI_BL(Lp384_montjscalarmul_p384_montjdouble) add x0, tab+JACSIZE*14 add x1, tab+JACSIZE*13 add x2, tab CFI_BL(Lp384_montjscalarmul_p384_montjadd) add x0, tab+JACSIZE*15 add x1, tab+JACSIZE*7 CFI_BL(Lp384_montjscalarmul_p384_montjdouble) // Add the recoding constant sum_i(16 * 32^i) to the scalar to allow signed // digits. The digits of the constant, in lowest-to-highest order, are as // follows; they are generated dynamically since none is a simple ARM load. // // 0x0842108421084210 // 0x1084210842108421 // 0x2108421084210842 // 0x4210842108421084 // 0x8421084210842108 // 0x0842108421084210 ldp x0, x1, [scalarb] ldp x2, x3, [scalarb+16] ldp x4, x5, [scalarb+32] movbig(x8, #0x1084, #0x2108, #0x4210, #0x8421) adds x0, x0, x8, lsr #1 adcs x1, x1, x8 lsl x8, x8, #1 adcs x2, x2, x8 lsl x8, x8, #1 adcs x3, x3, x8 lsl x8, x8, #1 adcs x4, x4, x8 lsr x8, x8, #4 adcs x5, x5, x8 cset x6, cs // Record the top bitfield then shift the whole scalar left 4 bits // to align the top of the next bitfield with the MSB (bits 379..383). extr bf, x6, x5, #60 extr x5, x5, x4, #60 extr x4, x4, x3, #60 extr x3, x3, x2, #60 extr x2, x2, x1, #60 extr x1, x1, x0, #60 lsl x0, x0, #4 stp x0, x1, [scalarb] stp x2, x3, [scalarb+16] stp x4, x5, [scalarb+32] // Initialize the accumulator to the corresponding entry using constant-time // lookup in the table. This top digit, uniquely, is not recoded so there is // no sign adjustment to make. mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr mov x4, xzr mov x5, xzr mov x6, xzr mov x7, xzr mov x8, xzr mov x9, xzr mov x10, xzr mov x11, xzr mov x12, xzr mov x13, xzr mov x14, xzr mov x15, xzr mov x16, xzr mov x17, xzr add x19, tab selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) selectblock(9) selectblock(10) selectblock(11) selectblock(12) selectblock(13) selectblock(14) selectblock(15) selectblock(16) stp x0, x1, [acc] stp x2, x3, [acc+16] stp x4, x5, [acc+32] stp x6, x7, [acc+48] stp x8, x9, [acc+64] stp x10, x11, [acc+80] stp x12, x13, [acc+96] stp x14, x15, [acc+112] stp x16, x17, [acc+128] mov j, #380 // Main loop over size-5 bitfields: double 5 times then add signed digit // At each stage we shift the scalar left by 5 bits so we can simply pick // the top 5 bits as the bitfield, saving some fiddle over indexing. Lp384_montjscalarmul_mainloop: sub j, j, #5 add x0, acc add x1, acc CFI_BL(Lp384_montjscalarmul_p384_montjdouble) add x0, acc add x1, acc CFI_BL(Lp384_montjscalarmul_p384_montjdouble) add x0, acc add x1, acc CFI_BL(Lp384_montjscalarmul_p384_montjdouble) add x0, acc add x1, acc CFI_BL(Lp384_montjscalarmul_p384_montjdouble) add x0, acc add x1, acc CFI_BL(Lp384_montjscalarmul_p384_montjdouble) // Choose the bitfield and adjust it to sign and magnitude ldp x0, x1, [scalarb] ldp x2, x3, [scalarb+16] ldp x4, x5, [scalarb+32] lsr bf, x5, #59 extr x5, x5, x4, #59 extr x4, x4, x3, #59 extr x3, x3, x2, #59 extr x2, x2, x1, #59 extr x1, x1, x0, #59 lsl x0, x0, #5 stp x0, x1, [scalarb] stp x2, x3, [scalarb+16] stp x4, x5, [scalarb+32] subs bf, bf, #16 cset sgn, lo // sgn = sign of digit (1 = negative) cneg bf, bf, lo // bf = absolute value of digit // Conditionally select the table entry tab[i-1] = i * P in constant time mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr mov x4, xzr mov x5, xzr mov x6, xzr mov x7, xzr mov x8, xzr mov x9, xzr mov x10, xzr mov x11, xzr mov x12, xzr mov x13, xzr mov x14, xzr mov x15, xzr mov x16, xzr mov x17, xzr add x19, tab selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) selectblock(9) selectblock(10) selectblock(11) selectblock(12) selectblock(13) selectblock(14) selectblock(15) selectblock(16) // Store it to "tabent" with the y coordinate optionally negated. // This is done carefully to give coordinates < p_384 even in // the degenerate case y = 0 (when z = 0 for points on the curve). stp x0, x1, [tabent] stp x2, x3, [tabent+16] stp x4, x5, [tabent+32] stp x12, x13, [tabent+96] stp x14, x15, [tabent+112] stp x16, x17, [tabent+128] mov x0, #0x00000000ffffffff subs x0, x0, x6 orr x12, x6, x7 mov x1, #0xffffffff00000000 sbcs x1, x1, x7 orr x13, x8, x9 mov x2, #0xfffffffffffffffe sbcs x2, x2, x8 orr x14, x10, x11 mov x5, #0xffffffffffffffff sbcs x3, x5, x9 orr x12, x12, x13 sbcs x4, x5, x10 orr x12, x12, x14 sbcs x5, x5, x11 cmp sgn, xzr ccmp x12, xzr, #4, ne csel x6, x0, x6, ne csel x7, x1, x7, ne csel x8, x2, x8, ne csel x9, x3, x9, ne csel x10, x4, x10, ne csel x11, x5, x11, ne stp x6, x7, [tabent+48] stp x8, x9, [tabent+64] stp x10, x11, [tabent+80] // Add to the accumulator add x0, acc add x1, acc add x2, tabent CFI_BL(Lp384_montjscalarmul_p384_montjadd) cbnz j, Lp384_montjscalarmul_mainloop // That's the end of the main loop, and we just need to copy the // result in "acc" to the output. ldp x0, x1, [acc] stp x0, x1, [res] ldp x0, x1, [acc+16] stp x0, x1, [res, #16] ldp x0, x1, [acc+32] stp x0, x1, [res, #32] ldp x0, x1, [acc+48] stp x0, x1, [res, #48] ldp x0, x1, [acc+64] stp x0, x1, [res, #64] ldp x0, x1, [acc+80] stp x0, x1, [res, #80] ldp x0, x1, [acc+96] stp x0, x1, [res, #96] ldp x0, x1, [acc+112] stp x0, x1, [res, #112] ldp x0, x1, [acc+128] stp x0, x1, [res, #128] // Restore stack and registers and return CFI_INC_SP(NSPACE) CFI_POP2(x25,x30) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p384_montjscalarmul) // Local copies of subroutines, complete clones at the moment S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp384_montjscalarmul_p384_montjadd) Lp384_montjscalarmul_p384_montjadd: CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_PUSH1Z(x27) CFI_DEC_SP(384) mov x24, x0 mov x25, x1 mov x26, x2 mov x0, sp ldr q1, [x25, #96] ldp x9, x2, [x25, #96] ldr q0, [x25, #96] ldp x4, x6, [x25, #112] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [x25, #128] xtn v30.2s, v0.2d ldr q1, [x25, #128] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [x25, #128] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [x0] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [x0, #16] csetm x15, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last stp x11, x14, [x0, #32] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc // cc = lo, ul, last cinv x16, x15, cc // cc = lo, ul, last mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x4, x2, x4 cneg x4, x4, cc // cc = lo, ul, last csetm x7, cc // cc = lo, ul, last subs x2, x10, x6 cinv x8, x8, cc // cc = lo, ul, last cneg x2, x2, cc // cc = lo, ul, last cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [x0, #16] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [x0] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [x0, #32] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff // #4294967295 adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 // #-4294967295 adcs x14, x14, x2 mov x2, #0x1 // #1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [x0] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [x0, #16] adc x17, x14, xzr stp x2, x17, [x0, #32] ldr q1, [x26, #96] ldp x9, x2, [x26, #96] ldr q0, [x26, #96] ldp x4, x6, [x26, #112] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [x26, #128] xtn v30.2s, v0.2d ldr q1, [x26, #128] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [x26, #128] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [sp, #240] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [sp, #256] csetm x15, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last stp x11, x14, [sp, #272] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc // cc = lo, ul, last cinv x16, x15, cc // cc = lo, ul, last mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x4, x2, x4 cneg x4, x4, cc // cc = lo, ul, last csetm x7, cc // cc = lo, ul, last subs x2, x10, x6 cinv x8, x8, cc // cc = lo, ul, last cneg x2, x2, cc // cc = lo, ul, last cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [sp, #256] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [sp, #240] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [sp, #272] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff // #4294967295 adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 // #-4294967295 adcs x14, x14, x2 mov x2, #0x1 // #1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [sp, #240] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [sp, #256] adc x17, x14, xzr stp x2, x17, [sp, #272] stp x23, x24, [sp, #0x150] ldr q3, [x26, #96] ldr q25, [x25, #48] ldp x13, x23, [x25, #48] ldp x3, x21, [x26, #96] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [x25, #80] ldp x8, x24, [x26, #112] subs x6, x3, x21 ldr q0, [x26, #128] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [x25, #64] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x25, #80] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [x26, #128] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #288] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #304] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #320] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #288] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #304] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #320] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #288] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #304] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #320] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #288] ldp x21, x12, [sp, #304] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #320] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #288] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #304] adc x12, x15, x23 stp x21, x12, [sp, #320] ldr q3, [x25, #96] ldr q25, [x26, #48] ldp x13, x23, [x26, #48] ldp x3, x21, [x25, #96] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [x26, #80] ldp x8, x24, [x25, #112] subs x6, x3, x21 ldr q0, [x25, #128] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [x26, #64] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x26, #80] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [x25, #128] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #48] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #64] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #80] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #48] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #64] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #80] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #48] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #64] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #80] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #48] ldp x21, x12, [sp, #64] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #80] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #48] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #64] adc x12, x15, x23 stp x21, x12, [sp, #80] mov x1, sp ldr q3, [x1] ldr q25, [x26] ldp x13, x23, [x26] ldp x3, x21, [x1] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [x26, #32] ldp x8, x24, [x1, #16] subs x6, x3, x21 ldr q0, [x1, #32] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [x26, #16] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x26, #32] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [x1, #32] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #96] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #112] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #128] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #96] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #112] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #128] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #96] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #112] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #128] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #96] ldp x21, x12, [sp, #112] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #128] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #96] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #112] adc x12, x15, x23 stp x21, x12, [sp, #128] ldr q3, [sp, #240] ldr q25, [x25] ldp x13, x23, [x25] ldp x3, x21, [sp, #240] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [x25, #32] ldp x8, x24, [sp, #256] subs x6, x3, x21 ldr q0, [sp, #272] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [x25, #16] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x25, #32] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #272] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #192] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #208] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #224] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #192] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #208] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #224] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #192] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #208] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #224] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #192] ldp x21, x12, [sp, #208] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #224] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #192] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #208] adc x12, x15, x23 stp x21, x12, [sp, #224] mov x1, sp ldr q3, [x1] ldr q25, [sp, #48] ldp x13, x23, [sp, #48] ldp x3, x21, [x1] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #80] ldp x8, x24, [x1, #16] subs x6, x3, x21 ldr q0, [x1, #32] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [sp, #64] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #80] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [x1, #32] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #48] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #64] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #80] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #48] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #64] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #80] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #48] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #64] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #80] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #48] ldp x21, x12, [sp, #64] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #80] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #48] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #64] adc x12, x15, x23 stp x21, x12, [sp, #80] ldr q3, [sp, #240] ldr q25, [sp, #288] ldp x13, x23, [sp, #288] ldp x3, x21, [sp, #240] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #320] ldp x8, x24, [sp, #256] subs x6, x3, x21 ldr q0, [sp, #272] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [sp, #304] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #320] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #272] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #288] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #304] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #320] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #288] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #304] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #320] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #288] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #304] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #320] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #288] ldp x21, x12, [sp, #304] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #320] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x2, x24, x11 stp x22, x5, [sp, #288] adcs x11, x13, x23 adcs x12, x8, x23 stp x2, x11, [sp, #304] adc x13, x15, x23 stp x12, x13, [sp, #320] ldp x5, x6, [sp, #96] ldp x4, x3, [sp, #192] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #112] ldp x4, x3, [sp, #208] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #128] ldp x4, x3, [sp, #224] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, cc // cc = lo, ul, last mov x4, #0xffffffff // #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe // #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #240] stp x7, x8, [sp, #256] stp x9, x10, [sp, #272] ldp x5, x6, [sp, #48] ldp x4, x3, [sp, #288] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #64] sbcs x7, x7, x2 sbcs x8, x8, x11 ldp x9, x10, [sp, #80] sbcs x9, x9, x12 sbcs x10, x10, x13 csetm x3, cc // cc = lo, ul, last mov x4, #0xffffffff // #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe // #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #48] stp x7, x8, [sp, #64] stp x9, x10, [sp, #80] ldr q1, [sp, #240] ldp x9, x2, [sp, #240] ldr q0, [sp, #240] ldp x4, x6, [sp, #256] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [sp, #272] xtn v30.2s, v0.2d ldr q1, [sp, #272] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [sp, #272] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [sp, #144] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [sp, #160] csetm x15, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last stp x11, x14, [sp, #176] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc // cc = lo, ul, last cinv x16, x15, cc // cc = lo, ul, last mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x4, x2, x4 cneg x4, x4, cc // cc = lo, ul, last csetm x7, cc // cc = lo, ul, last subs x2, x10, x6 cinv x8, x8, cc // cc = lo, ul, last cneg x2, x2, cc // cc = lo, ul, last cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [sp, #160] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [sp, #144] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [sp, #176] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff // #4294967295 adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 // #-4294967295 adcs x14, x14, x2 mov x2, #0x1 // #1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [sp, #144] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [sp, #160] adc x17, x14, xzr stp x2, x17, [sp, #176] mov x0, sp ldr q1, [sp, #48] ldp x9, x2, [sp, #48] ldr q0, [sp, #48] ldp x4, x6, [sp, #64] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [sp, #80] xtn v30.2s, v0.2d ldr q1, [sp, #80] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [sp, #80] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [x0] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [x0, #16] csetm x15, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last stp x11, x14, [x0, #32] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc // cc = lo, ul, last cinv x16, x15, cc // cc = lo, ul, last mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x4, x2, x4 cneg x4, x4, cc // cc = lo, ul, last csetm x7, cc // cc = lo, ul, last subs x2, x10, x6 cinv x8, x8, cc // cc = lo, ul, last cneg x2, x2, cc // cc = lo, ul, last cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [x0, #16] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [x0] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [x0, #32] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff // #4294967295 adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 // #-4294967295 adcs x14, x14, x2 mov x2, #0x1 // #1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [x0] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [x0, #16] adc x17, x14, xzr stp x2, x17, [x0, #32] ldr q3, [sp, #144] ldr q25, [sp, #192] ldp x13, x23, [sp, #192] ldp x3, x21, [sp, #144] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #224] ldp x8, x24, [sp, #160] subs x6, x3, x21 ldr q0, [sp, #176] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [sp, #208] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #224] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #176] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #192] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #208] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #224] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #192] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #208] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #224] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #192] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #208] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #224] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #192] ldp x21, x12, [sp, #208] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #224] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #192] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #208] adc x12, x15, x23 stp x21, x12, [sp, #224] ldr q3, [sp, #144] ldr q25, [sp, #96] ldp x13, x23, [sp, #96] ldp x3, x21, [sp, #144] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #128] ldp x8, x24, [sp, #160] subs x6, x3, x21 ldr q0, [sp, #176] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [sp, #112] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #128] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #176] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #96] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #112] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #128] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #96] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #112] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #128] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #96] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #112] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #128] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #96] ldp x21, x12, [sp, #112] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #128] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x2, x24, x11 stp x22, x5, [sp, #96] adcs x11, x13, x23 adcs x12, x8, x23 stp x2, x11, [sp, #112] adc x13, x15, x23 stp x12, x13, [sp, #128] mov x0, sp mov x1, sp ldp x5, x6, [x1] ldp x4, x3, [sp, #192] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x1, #16] ldp x4, x3, [sp, #208] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x1, #32] ldp x4, x3, [sp, #224] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, cc // cc = lo, ul, last mov x4, #0xffffffff // #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe // #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [x0] stp x7, x8, [x0, #16] stp x9, x10, [x0, #32] ldp x5, x6, [sp, #96] ldp x4, x3, [sp, #192] subs x5, x5, x4 sbcs x6, x6, x3 ldp x4, x3, [sp, #208] sbcs x7, x2, x4 sbcs x8, x11, x3 ldp x4, x3, [sp, #224] sbcs x9, x12, x4 sbcs x10, x13, x3 csetm x3, cc // cc = lo, ul, last mov x4, #0xffffffff // #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe // #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #144] stp x7, x8, [sp, #160] stp x9, x10, [sp, #176] ldr q3, [sp, #240] ldr q25, [x25, #96] ldp x13, x23, [x25, #96] ldp x3, x21, [sp, #240] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [x25, #128] ldp x8, x24, [sp, #256] subs x6, x3, x21 ldr q0, [sp, #272] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [x25, #112] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x25, #128] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #272] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #240] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #256] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #272] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #240] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #256] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #272] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #240] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #256] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #272] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #240] ldp x21, x12, [sp, #256] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #272] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #240] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #256] adc x12, x15, x23 stp x21, x12, [sp, #272] mov x0, sp mov x1, sp ldp x5, x6, [x1] ldp x4, x3, [sp, #96] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x1, #16] ldp x4, x3, [sp, #112] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x1, #32] ldp x4, x3, [sp, #128] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, cc // cc = lo, ul, last mov x4, #0xffffffff // #4294967295 and x4, x4, x3 adds x2, x5, x4 eor x4, x4, x3 adcs x11, x6, x4 mov x4, #0xfffffffffffffffe // #-2 and x4, x4, x3 adcs x4, x7, x4 adcs x12, x8, x3 adcs x13, x9, x3 adc x3, x10, x3 stp x2, x11, [x0] stp x4, x12, [x0, #16] stp x13, x3, [x0, #32] ldp x5, x6, [sp, #192] subs x5, x5, x2 sbcs x6, x6, x11 ldp x7, x8, [sp, #208] sbcs x7, x7, x4 sbcs x8, x8, x12 ldp x9, x10, [sp, #224] sbcs x9, x9, x13 sbcs x10, x10, x3 csetm x3, cc // cc = lo, ul, last mov x4, #0xffffffff // #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe // #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #192] stp x7, x8, [sp, #208] stp x9, x10, [sp, #224] ldr q3, [sp, #144] ldr q25, [sp, #288] ldp x13, x23, [sp, #288] ldp x3, x21, [sp, #144] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #320] ldp x8, x24, [sp, #160] subs x6, x3, x21 ldr q0, [sp, #176] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [sp, #304] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #320] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #176] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #144] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #160] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #176] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #144] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #160] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #176] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #144] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #160] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #176] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #144] ldp x21, x12, [sp, #160] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #176] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #144] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #160] adc x12, x15, x23 stp x21, x12, [sp, #176] ldr q3, [sp, #240] ldr q25, [x26, #96] ldp x13, x23, [x26, #96] ldp x3, x21, [sp, #240] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [x26, #128] ldp x8, x24, [sp, #256] subs x6, x3, x21 ldr q0, [sp, #272] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [x26, #112] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x26, #128] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #272] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #240] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #256] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #272] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #240] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #256] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #272] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #240] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #256] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #272] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #240] ldp x21, x12, [sp, #256] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #272] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #240] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #256] adc x12, x15, x23 stp x21, x12, [sp, #272] ldp x2, x27, [sp, #0x150] ldr q3, [sp, #48] ldr q25, [sp, #192] ldp x13, x23, [sp, #192] ldp x3, x21, [sp, #48] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #224] ldp x8, x24, [sp, #64] subs x6, x3, x21 ldr q0, [sp, #80] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [sp, #208] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #224] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #80] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #192] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #208] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #224] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #192] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #208] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #224] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #192] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #208] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #224] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #192] ldp x21, x12, [sp, #208] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #224] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x2, x6, x20 eor x3, x20, x23 adcs x6, x7, x3 adcs x7, x24, x11 adcs x9, x13, x23 adcs x10, x8, x23 adc x11, x15, x23 ldp x4, x3, [sp, #144] subs x5, x2, x4 sbcs x6, x6, x3 ldp x4, x3, [sp, #160] sbcs x7, x7, x4 sbcs x8, x9, x3 ldp x4, x3, [sp, #176] sbcs x9, x10, x4 sbcs x10, x11, x3 csetm x3, cc // cc = lo, ul, last mov x4, #0xffffffff // #4294967295 and x4, x4, x3 adds x19, x5, x4 eor x4, x4, x3 adcs x24, x6, x4 mov x4, #0xfffffffffffffffe // #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x7, x8, [sp, #208] stp x9, x10, [sp, #224] ldp x0, x1, [x25, #96] ldp x2, x3, [x25, #112] ldp x4, x5, [x25, #128] orr x20, x0, x1 orr x21, x2, x3 orr x22, x4, x5 orr x20, x20, x21 orr x20, x20, x22 cmp x20, xzr cset x20, ne // ne = any ldp x6, x7, [x26, #96] ldp x8, x9, [x26, #112] ldp x10, x11, [x26, #128] orr x21, x6, x7 orr x22, x8, x9 orr x23, x10, x11 orr x21, x21, x22 orr x21, x21, x23 cmp x21, xzr cset x21, ne // ne = any cmp x21, x20 ldp x12, x13, [sp, #240] csel x12, x0, x12, cc // cc = lo, ul, last csel x13, x1, x13, cc // cc = lo, ul, last csel x12, x6, x12, hi // hi = pmore csel x13, x7, x13, hi // hi = pmore ldp x14, x15, [sp, #256] csel x14, x2, x14, cc // cc = lo, ul, last csel x15, x3, x15, cc // cc = lo, ul, last csel x14, x8, x14, hi // hi = pmore csel x15, x9, x15, hi // hi = pmore ldp x16, x17, [sp, #272] csel x16, x4, x16, cc // cc = lo, ul, last csel x17, x5, x17, cc // cc = lo, ul, last csel x16, x10, x16, hi // hi = pmore csel x17, x11, x17, hi // hi = pmore ldp x20, x21, [x25] ldp x0, x1, [sp] csel x0, x20, x0, cc // cc = lo, ul, last csel x1, x21, x1, cc // cc = lo, ul, last ldp x20, x21, [x26] csel x0, x20, x0, hi // hi = pmore csel x1, x21, x1, hi // hi = pmore ldp x20, x21, [x25, #16] ldp x2, x3, [sp, #16] csel x2, x20, x2, cc // cc = lo, ul, last csel x3, x21, x3, cc // cc = lo, ul, last ldp x20, x21, [x26, #16] csel x2, x20, x2, hi // hi = pmore csel x3, x21, x3, hi // hi = pmore ldp x20, x21, [x25, #32] ldp x4, x5, [sp, #32] csel x4, x20, x4, cc // cc = lo, ul, last csel x5, x21, x5, cc // cc = lo, ul, last ldp x20, x21, [x26, #32] csel x4, x20, x4, hi // hi = pmore csel x5, x21, x5, hi // hi = pmore ldp x20, x21, [x25, #48] csel x6, x20, x19, cc // cc = lo, ul, last csel x7, x21, x24, cc // cc = lo, ul, last ldp x20, x21, [x26, #48] csel x6, x20, x6, hi // hi = pmore csel x7, x21, x7, hi // hi = pmore ldp x20, x21, [x25, #64] ldp x8, x9, [sp, #208] csel x8, x20, x8, cc // cc = lo, ul, last csel x9, x21, x9, cc // cc = lo, ul, last ldp x20, x21, [x26, #64] csel x8, x20, x8, hi // hi = pmore csel x9, x21, x9, hi // hi = pmore ldp x20, x21, [x25, #80] ldp x10, x11, [sp, #224] csel x10, x20, x10, cc // cc = lo, ul, last csel x11, x21, x11, cc // cc = lo, ul, last ldp x20, x21, [x26, #80] csel x10, x20, x10, hi // hi = pmore csel x11, x21, x11, hi // hi = pmore stp x0, x1, [x27] stp x2, x3, [x27, #16] stp x4, x5, [x27, #32] stp x6, x7, [x27, #48] stp x8, x9, [x27, #64] stp x10, x11, [x27, #80] stp x12, x13, [x27, #96] stp x14, x15, [x27, #112] stp x16, x17, [x27, #128] CFI_INC_SP(384) CFI_POP1Z(x27) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp384_montjscalarmul_p384_montjadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp384_montjscalarmul_p384_montjdouble) Lp384_montjscalarmul_p384_montjdouble: CFI_START CFI_DEC_SP(416) stp x19, x20, [sp, #336] stp x21, x22, [sp, #352] stp x23, x24, [sp, #368] stp x25, x26, [sp, #384] stp x27, xzr, [sp, #400] mov x25, x0 mov x26, x1 mov x0, sp ldr q1, [x26, #96] ldp x9, x2, [x26, #96] ldr q0, [x26, #96] ldp x4, x6, [x26, #112] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [x26, #128] xtn v30.2s, v0.2d ldr q1, [x26, #128] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [x26, #128] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [x0] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [x0, #16] csetm x15, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last stp x11, x14, [x0, #32] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc // cc = lo, ul, last cinv x16, x15, cc // cc = lo, ul, last mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x4, x2, x4 cneg x4, x4, cc // cc = lo, ul, last csetm x7, cc // cc = lo, ul, last subs x2, x10, x6 cinv x8, x8, cc // cc = lo, ul, last cneg x2, x2, cc // cc = lo, ul, last cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [x0, #16] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [x0] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [x0, #32] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff // #4294967295 adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 // #-4294967295 adcs x14, x14, x2 mov x2, #0x1 // #1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [x0] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [x0, #16] adc x17, x14, xzr stp x2, x17, [x0, #32] ldr q1, [x26, #48] ldp x9, x2, [x26, #48] ldr q0, [x26, #48] ldp x4, x6, [x26, #64] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [x26, #80] xtn v30.2s, v0.2d ldr q1, [x26, #80] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [x26, #80] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [sp, #48] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [sp, #64] csetm x15, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last stp x11, x14, [sp, #80] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc // cc = lo, ul, last cinv x16, x15, cc // cc = lo, ul, last mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x4, x2, x4 cneg x4, x4, cc // cc = lo, ul, last csetm x7, cc // cc = lo, ul, last subs x2, x10, x6 cinv x8, x8, cc // cc = lo, ul, last cneg x2, x2, cc // cc = lo, ul, last cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [sp, #64] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [sp, #48] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [sp, #80] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff // #4294967295 adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 // #-4294967295 adcs x14, x14, x2 mov x2, #0x1 // #1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [sp, #48] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [sp, #64] adc x17, x14, xzr stp x2, x17, [sp, #80] ldp x5, x6, [x26] ldp x4, x3, [sp] adds x5, x5, x4 adcs x6, x6, x3 ldp x7, x8, [x26, #16] ldp x4, x3, [sp, #16] adcs x7, x7, x4 adcs x8, x8, x3 ldp x9, x10, [x26, #32] ldp x4, x3, [sp, #32] adcs x9, x9, x4 adcs x10, x10, x3 csetm x3, cs // cs = hs, nlast mov x4, #0xffffffff // #4294967295 and x4, x4, x3 subs x5, x5, x4 eor x4, x4, x3 sbcs x6, x6, x4 mov x4, #0xfffffffffffffffe // #-2 and x4, x4, x3 sbcs x7, x7, x4 sbcs x8, x8, x3 sbcs x9, x9, x3 sbc x10, x10, x3 stp x5, x6, [sp, #240] stp x7, x8, [sp, #256] stp x9, x10, [sp, #272] mov x2, sp ldp x5, x6, [x26] ldp x4, x3, [x2] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x26, #16] ldp x4, x3, [x2, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x26, #32] ldp x4, x3, [x2, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, cc // cc = lo, ul, last mov x4, #0xffffffff // #4294967295 and x4, x4, x3 adds x13, x5, x4 eor x4, x4, x3 adcs x23, x6, x4 mov x4, #0xfffffffffffffffe // #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x13, x23, [sp, #192] stp x7, x8, [sp, #208] stp x9, x10, [sp, #224] ldr q3, [sp, #240] ldr q25, [sp, #192] ldp x3, x21, [sp, #240] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #224] ldp x8, x24, [sp, #256] subs x6, x3, x21 ldr q0, [sp, #272] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [sp, #208] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #224] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #272] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #96] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #112] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #128] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #96] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #112] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #128] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #96] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #112] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #128] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #96] ldp x21, x12, [sp, #112] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #128] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #96] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #112] adc x12, x15, x23 stp x21, x12, [sp, #128] ldp x5, x6, [x26, #48] ldp x4, x3, [x26, #96] adds x5, x5, x4 adcs x6, x6, x3 ldp x7, x8, [x26, #64] ldp x4, x3, [x26, #112] adcs x7, x7, x4 adcs x8, x8, x3 ldp x9, x10, [x26, #80] ldp x4, x3, [x26, #128] adcs x9, x9, x4 adcs x10, x10, x3 adc x3, xzr, xzr mov x4, #0xffffffff // #4294967295 cmp x5, x4 mov x4, #0xffffffff00000000 // #-4294967296 sbcs xzr, x6, x4 mov x4, #0xfffffffffffffffe // #-2 sbcs xzr, x7, x4 adcs xzr, x8, xzr adcs xzr, x9, xzr adcs xzr, x10, xzr adcs x3, x3, xzr csetm x3, ne // ne = any mov x4, #0xffffffff // #4294967295 and x4, x4, x3 subs x5, x5, x4 eor x4, x4, x3 sbcs x6, x6, x4 mov x4, #0xfffffffffffffffe // #-2 and x4, x4, x3 sbcs x7, x7, x4 sbcs x8, x8, x3 sbcs x9, x9, x3 sbc x10, x10, x3 stp x5, x6, [sp, #240] stp x7, x8, [sp, #256] stp x9, x10, [sp, #272] ldr q1, [sp, #96] ldp x9, x2, [sp, #96] ldr q0, [sp, #96] ldp x4, x6, [sp, #112] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [sp, #128] xtn v30.2s, v0.2d ldr q1, [sp, #128] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [sp, #128] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [sp, #288] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [sp, #304] csetm x15, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last stp x11, x14, [sp, #320] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc // cc = lo, ul, last cinv x16, x15, cc // cc = lo, ul, last mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x4, x2, x4 cneg x4, x4, cc // cc = lo, ul, last csetm x7, cc // cc = lo, ul, last subs x2, x10, x6 cinv x8, x8, cc // cc = lo, ul, last cneg x2, x2, cc // cc = lo, ul, last cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [sp, #304] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [sp, #288] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [sp, #320] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff // #4294967295 adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 // #-4294967295 adcs x14, x14, x2 mov x2, #0x1 // #1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [sp, #288] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [sp, #304] adc x17, x14, xzr stp x2, x17, [sp, #320] ldr q3, [x26] ldr q25, [sp, #48] ldp x13, x23, [sp, #48] ldp x3, x21, [x26] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #80] ldp x8, x24, [x26, #16] subs x6, x3, x21 ldr q0, [x26, #32] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [sp, #64] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #80] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [x26, #32] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x26, x4, x16 mov x4, v27.d[0] sbcs x27, x20, x11 sbcs x20, x9, x12 sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #160] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #176] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #160] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #176] adds x20, x22, x26 mul x10, x13, x14 adcs x11, x11, x27 eor x9, x8, x21 adcs x26, x19, x17 stp x20, x11, [sp, #144] adcs x27, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #176] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #144] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #176] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x26 eor x1, x22, x9 adcs x24, x23, x27 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x26 adcs x15, x17, x27 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #144] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #160] adc x12, x15, x23 stp x21, x12, [sp, #176] ldr q1, [sp, #240] ldp x9, x2, [sp, #240] ldr q0, [sp, #240] ldp x4, x6, [sp, #256] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [sp, #272] xtn v30.2s, v0.2d ldr q1, [sp, #272] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [sp, #272] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x19, x3, x17 sbcs x20, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [sp, #192] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 csetm x15, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last stp x11, x14, [sp, #224] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc // cc = lo, ul, last cinv x16, x15, cc // cc = lo, ul, last mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x4, x2, x4 cneg x4, x4, cc // cc = lo, ul, last csetm x7, cc // cc = lo, ul, last subs x2, x10, x6 cinv x8, x8, cc // cc = lo, ul, last cneg x2, x2, cc // cc = lo, ul, last cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [sp, #192] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [sp, #224] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x19 adcs x1, x1, x20 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff // #4294967295 adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 // #-4294967295 adcs x14, x14, x2 mov x2, #0x1 // #1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x19, x13, x1 and x13, x4, x9 adcs x20, x6, x13 and x1, x2, x9 adcs x7, x8, x1 adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [sp, #208] adc x17, x14, xzr stp x2, x17, [sp, #224] ldp x0, x1, [sp, #288] mov x6, #0xffffffff // #4294967295 subs x6, x6, x0 mov x7, #0xffffffff00000000 // #-4294967296 sbcs x7, x7, x1 ldp x0, x1, [sp, #304] mov x8, #0xfffffffffffffffe // #-2 sbcs x8, x8, x0 mov x13, #0xffffffffffffffff // #-1 sbcs x9, x13, x1 ldp x0, x1, [sp, #320] sbcs x10, x13, x0 sbc x11, x13, x1 mov x12, #0x9 // #9 mul x0, x12, x6 mul x1, x12, x7 mul x2, x12, x8 mul x3, x12, x9 mul x4, x12, x10 mul x5, x12, x11 umulh x6, x12, x6 umulh x7, x12, x7 umulh x8, x12, x8 umulh x9, x12, x9 umulh x10, x12, x10 umulh x12, x12, x11 adds x1, x1, x6 adcs x2, x2, x7 adcs x3, x3, x8 adcs x4, x4, x9 adcs x5, x5, x10 mov x6, #0x1 // #1 adc x6, x12, x6 ldp x8, x9, [sp, #144] ldp x10, x11, [sp, #160] ldp x12, x13, [sp, #176] mov x14, #0xc // #12 mul x15, x14, x8 umulh x8, x14, x8 adds x0, x0, x15 mul x15, x14, x9 umulh x9, x14, x9 adcs x1, x1, x15 mul x15, x14, x10 umulh x10, x14, x10 adcs x2, x2, x15 mul x15, x14, x11 umulh x11, x14, x11 adcs x3, x3, x15 mul x15, x14, x12 umulh x12, x14, x12 adcs x4, x4, x15 mul x15, x14, x13 umulh x13, x14, x13 adcs x5, x5, x15 adc x6, x6, xzr adds x1, x1, x8 adcs x2, x2, x9 adcs x3, x3, x10 adcs x4, x4, x11 adcs x5, x5, x12 adcs x6, x6, x13 lsl x7, x6, #32 subs x8, x6, x7 sbc x7, x7, xzr adds x0, x0, x8 adcs x1, x1, x7 adcs x2, x2, x6 adcs x3, x3, xzr adcs x4, x4, xzr adcs x5, x5, xzr csetm x6, cc // cc = lo, ul, last mov x7, #0xffffffff // #4294967295 and x7, x7, x6 adds x0, x0, x7 eor x7, x7, x6 adcs x1, x1, x7 mov x7, #0xfffffffffffffffe // #-2 and x7, x7, x6 adcs x2, x2, x7 adcs x3, x3, x6 adcs x4, x4, x6 adc x5, x5, x6 stp x0, x1, [sp, #288] stp x2, x3, [sp, #304] stp x4, x5, [sp, #320] mov x2, sp ldp x4, x3, [x2] subs x5, x19, x4 sbcs x6, x20, x3 ldp x7, x8, [sp, #208] ldp x4, x3, [x2, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #224] ldp x4, x3, [x2, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, cc // cc = lo, ul, last mov x4, #0xffffffff // #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe // #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #240] stp x7, x8, [sp, #256] stp x9, x10, [sp, #272] ldr q1, [sp, #48] ldp x9, x2, [sp, #48] ldr q0, [sp, #48] ldp x4, x6, [sp, #64] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [sp, #80] xtn v30.2s, v0.2d ldr q1, [sp, #80] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [sp, #80] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x19, x3, x17 sbcs x20, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [sp, #192] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 csetm x15, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last stp x11, x14, [sp, #224] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc // cc = lo, ul, last cinv x16, x15, cc // cc = lo, ul, last mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x4, x2, x4 cneg x4, x4, cc // cc = lo, ul, last csetm x7, cc // cc = lo, ul, last subs x2, x10, x6 cinv x8, x8, cc // cc = lo, ul, last cneg x2, x2, cc // cc = lo, ul, last cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc // cc = lo, ul, last cneg x1, x1, cc // cc = lo, ul, last eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [sp, #192] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [sp, #224] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x19 adcs x1, x1, x20 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff // #4294967295 adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 // #-4294967295 adcs x14, x14, x2 mov x2, #0x1 // #1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [sp, #192] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [sp, #208] adc x17, x14, xzr stp x2, x17, [sp, #224] ldp x5, x6, [sp, #240] ldp x4, x3, [sp, #48] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #256] ldp x4, x3, [sp, #64] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #272] ldp x4, x3, [sp, #80] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, cc // cc = lo, ul, last mov x4, #0xffffffff // #4294967295 and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe // #-2 and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [x25, #96] stp x7, x8, [x25, #112] stp x9, x10, [x25, #128] ldr q3, [sp, #288] ldr q25, [sp, #96] ldp x13, x23, [sp, #96] ldp x3, x21, [sp, #288] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #128] ldp x8, x24, [sp, #304] subs x6, x3, x21 ldr q0, [sp, #320] movi v23.2d, #0xffffffff csetm x10, cc // cc = lo, ul, last umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc // cc = lo, ul, last subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc // cc = lo, ul, last ldp x6, x14, [sp, #112] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc // cc = lo, ul, last mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc // cc = lo, ul, last umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc // cc = lo, ul, last usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc // cc = lo, ul, last cinv x10, x10, cc // cc = lo, ul, last cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc // cc = lo, ul, last eor x19, x19, x10 csetm x4, cc // cc = lo, ul, last subs x16, x6, x23 cneg x16, x16, cc // cc = lo, ul, last umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc // cc = lo, ul, last cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #128] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #320] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x26, x4, x16 mov x4, v27.d[0] sbcs x27, x20, x11 sbcs x20, x9, x12 sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #256] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc // cc = lo, ul, last csetm x2, cc // cc = lo, ul, last subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc // cc = lo, ul, last cneg x19, x19, cc // cc = lo, ul, last stp x9, x20, [sp, #272] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc // cc = lo, ul, last csetm x12, cc // cc = lo, ul, last subs x9, x17, x14 cinv x12, x12, cc // cc = lo, ul, last cneg x9, x9, cc // cc = lo, ul, last subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc // cc = lo, ul, last cneg x24, x10, cc // cc = lo, ul, last subs x10, x17, x15 cinv x7, x7, cc // cc = lo, ul, last cneg x10, x10, cc // cc = lo, ul, last subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #256] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #272] adds x20, x22, x26 mul x10, x13, x14 adcs x11, x11, x27 eor x9, x8, x21 adcs x26, x19, x17 stp x20, x11, [sp, #240] adcs x27, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #272] cneg x3, x21, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc // cc = lo, ul, last csetm x16, cc // cc = lo, ul, last subs x21, x6, x15 cneg x22, x21, cc // cc = lo, ul, last cinv x21, x24, cc // cc = lo, ul, last subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc // cc = lo, ul, last csetm x24, cc // cc = lo, ul, last subs x20, x14, x15 cinv x24, x24, cc // cc = lo, ul, last mul x22, x3, x22 cneg x3, x20, cc // cc = lo, ul, last subs x13, x6, x14 cneg x20, x13, cc // cc = lo, ul, last cinv x15, x16, cc // cc = lo, ul, last adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe // #-2 eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #240] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #272] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x26 eor x1, x22, x9 adcs x24, x23, x27 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x26 adcs x15, x17, x27 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff // #4294967295 adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc // cc = lo, ul, last and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #240] adcs x5, x13, x23 adcs x12, x8, x23 stp x14, x5, [sp, #256] adc x19, x15, x23 ldp x1, x2, [sp, #144] ldp x3, x4, [sp, #160] ldp x5, x6, [sp, #176] lsl x0, x1, #2 ldp x7, x8, [sp, #288] subs x0, x0, x7 extr x1, x2, x1, #62 sbcs x1, x1, x8 ldp x7, x8, [sp, #304] extr x2, x3, x2, #62 sbcs x2, x2, x7 extr x3, x4, x3, #62 sbcs x3, x3, x8 extr x4, x5, x4, #62 ldp x7, x8, [sp, #320] sbcs x4, x4, x7 extr x5, x6, x5, #62 sbcs x5, x5, x8 lsr x6, x6, #62 adc x6, x6, xzr lsl x7, x6, #32 subs x8, x6, x7 sbc x7, x7, xzr adds x0, x0, x8 adcs x1, x1, x7 adcs x2, x2, x6 adcs x3, x3, xzr adcs x4, x4, xzr adcs x5, x5, xzr csetm x8, cc // cc = lo, ul, last mov x9, #0xffffffff // #4294967295 and x9, x9, x8 adds x0, x0, x9 eor x9, x9, x8 adcs x1, x1, x9 mov x9, #0xfffffffffffffffe // #-2 and x9, x9, x8 adcs x2, x2, x9 adcs x3, x3, x8 adcs x4, x4, x8 adc x5, x5, x8 stp x0, x1, [x25] stp x2, x3, [x25, #16] stp x4, x5, [x25, #32] ldp x0, x1, [sp, #192] mov x6, #0xffffffff // #4294967295 subs x6, x6, x0 mov x7, #0xffffffff00000000 // #-4294967296 sbcs x7, x7, x1 ldp x0, x1, [sp, #208] mov x8, #0xfffffffffffffffe // #-2 sbcs x8, x8, x0 mov x13, #0xffffffffffffffff // #-1 sbcs x9, x13, x1 ldp x0, x1, [sp, #224] sbcs x10, x13, x0 sbc x11, x13, x1 lsl x0, x6, #3 extr x1, x7, x6, #61 extr x2, x8, x7, #61 extr x3, x9, x8, #61 extr x4, x10, x9, #61 extr x5, x11, x10, #61 lsr x6, x11, #61 add x6, x6, #0x1 ldp x8, x9, [sp, #240] ldp x10, x11, [sp, #256] mov x14, #0x3 // #3 mul x15, x14, x8 umulh x8, x14, x8 adds x0, x0, x15 mul x15, x14, x9 umulh x9, x14, x9 adcs x1, x1, x15 mul x15, x14, x10 umulh x10, x14, x10 adcs x2, x2, x15 mul x15, x14, x11 umulh x11, x14, x11 adcs x3, x3, x15 mul x15, x14, x12 umulh x12, x14, x12 adcs x4, x4, x15 mul x15, x14, x19 umulh x13, x14, x19 adcs x5, x5, x15 adc x6, x6, xzr adds x1, x1, x8 adcs x2, x2, x9 adcs x3, x3, x10 adcs x4, x4, x11 adcs x5, x5, x12 adcs x6, x6, x13 lsl x7, x6, #32 subs x8, x6, x7 sbc x7, x7, xzr adds x0, x0, x8 adcs x1, x1, x7 adcs x2, x2, x6 adcs x3, x3, xzr adcs x4, x4, xzr adcs x5, x5, xzr csetm x6, cc // cc = lo, ul, last mov x7, #0xffffffff // #4294967295 and x7, x7, x6 adds x0, x0, x7 eor x7, x7, x6 adcs x1, x1, x7 mov x7, #0xfffffffffffffffe // #-2 and x7, x7, x6 adcs x2, x2, x7 adcs x3, x3, x6 adcs x4, x4, x6 adc x5, x5, x6 stp x0, x1, [x25, #48] stp x2, x3, [x25, #64] stp x4, x5, [x25, #80] ldp x19, x20, [sp, #336] ldp x21, x22, [sp, #352] ldp x23, x24, [sp, #368] ldp x25, x26, [sp, #384] ldp x27, xzr, [sp, #400] CFI_INC_SP(416) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp384_montjscalarmul_p384_montjdouble) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
1,925
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/bignum_mux_6.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // 384-bit multiplex/select z := x (if p nonzero) or z := y (if p zero) // Inputs p, x[6], y[6]; output z[6] // // extern void bignum_mux_6(uint64_t p, uint64_t z[static 6], // const uint64_t x[static 6], // const uint64_t y[static 6]); // // It is assumed that all numbers x, y and z have the same size 6 digits. // // Standard ARM ABI: X0 = p, X1 = z, X2 = x, X3 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mux_6) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mux_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mux_6) .text .balign 4 #define p x0 #define z x1 #define x x2 #define y x3 #define a x4 S2N_BN_SYMBOL(bignum_mux_6): CFI_START cmp p, #0 // Set condition codes p = 0 ldr a, [x] ldr p, [y] csel a, a, p, ne str a, [z] ldr a, [x, #8] ldr p, [y, #8] csel a, a, p, ne str a, [z, #8] ldr a, [x, #16] ldr p, [y, #16] csel a, a, p, ne str a, [z, #16] ldr a, [x, #24] ldr p, [y, #24] csel a, a, p, ne str a, [z, #24] ldr a, [x, #32] ldr p, [y, #32] csel a, a, p, ne str a, [z, #32] ldr a, [x, #40] ldr p, [y, #40] csel a, a, p, ne str a, [z, #40] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mux_6) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
83,317
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/p384_montjdouble.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point doubling on NIST curve P-384 in Montgomery-Jacobian coordinates // // extern void p384_montjdouble(uint64_t p3[static 18], // const uint64_t p1[static 18]); // // Does p3 := 2 * p1 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // // Standard ARM ABI: X0 = p3, X1 = p1 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" // This is functionally equivalent to p384_montjdouble in unopt/p384_montjdouble.S. // This is the result of doing the following sequence of optimizations: // 1. Function inlining // 2. Eliminating redundant load/store instructions // 3. Folding (add addr, const) + load/store // Function inlining is done manually. The second and third optimizations are // done by a script. S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjdouble) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p384_montjdouble) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjdouble) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 #define NSPACE NUMSIZE*7 S2N_BN_SYMBOL(p384_montjdouble): CFI_START // Save regs and make room on stack for temporary variables CFI_DEC_SP(416) CFI_STACKSAVE2X(x19,x20,336,344) CFI_STACKSAVE2X(x21,x22,352,360) CFI_STACKSAVE2X(x23,x24,368,376) CFI_STACKSAVE2X(x25,x26,384,392) CFI_STACKSAVE1Z(x27,400) mov x25, x0 mov x26, x1 mov x0, sp ldr q1, [x26, #96] ldp x9, x2, [x26, #96] ldr q0, [x26, #96] ldp x4, x6, [x26, #112] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [x26, #128] xtn v30.2s, v0.2d ldr q1, [x26, #128] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [x26, #128] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [x0] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [x0, #16] csetm x15, cc cneg x1, x1, cc stp x11, x14, [x0, #32] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc cinv x16, x15, cc mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc cneg x9, x9, cc subs x4, x2, x4 cneg x4, x4, cc csetm x7, cc subs x2, x10, x6 cinv x8, x8, cc cneg x2, x2, cc cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc cneg x1, x1, cc eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [x0, #16] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [x0] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [x0, #32] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 adcs x14, x14, x2 mov x2, #0x1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [x0] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [x0, #16] adc x17, x14, xzr stp x2, x17, [x0, #32] ldr q1, [x26, #48] ldp x9, x2, [x26, #48] ldr q0, [x26, #48] ldp x4, x6, [x26, #64] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [x26, #80] xtn v30.2s, v0.2d ldr q1, [x26, #80] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [x26, #80] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [sp, #48] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [sp, #64] csetm x15, cc cneg x1, x1, cc stp x11, x14, [sp, #80] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc cinv x16, x15, cc mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc cneg x9, x9, cc subs x4, x2, x4 cneg x4, x4, cc csetm x7, cc subs x2, x10, x6 cinv x8, x8, cc cneg x2, x2, cc cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc cneg x1, x1, cc eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [sp, #64] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [sp, #48] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [sp, #80] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 adcs x14, x14, x2 mov x2, #0x1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [sp, #48] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [sp, #64] adc x17, x14, xzr stp x2, x17, [sp, #80] ldp x5, x6, [x26] ldp x4, x3, [sp] adds x5, x5, x4 adcs x6, x6, x3 ldp x7, x8, [x26, #16] ldp x4, x3, [sp, #16] adcs x7, x7, x4 adcs x8, x8, x3 ldp x9, x10, [x26, #32] ldp x4, x3, [sp, #32] adcs x9, x9, x4 adcs x10, x10, x3 csetm x3, cs mov x4, #0xffffffff and x4, x4, x3 subs x5, x5, x4 eor x4, x4, x3 sbcs x6, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 sbcs x7, x7, x4 sbcs x8, x8, x3 sbcs x9, x9, x3 sbc x10, x10, x3 stp x5, x6, [sp, #240] stp x7, x8, [sp, #256] stp x9, x10, [sp, #272] mov x2, sp ldp x5, x6, [x26, #0] ldp x4, x3, [x2] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x26, #16] ldp x4, x3, [x2, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x26, #32] ldp x4, x3, [x2, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, cc mov x4, #0xffffffff and x4, x4, x3 adds x13, x5, x4 eor x4, x4, x3 adcs x23, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x13, x23, [sp, #192] stp x7, x8, [sp, #208] stp x9, x10, [sp, #224] ldr q3, [sp, #240] ldr q25, [sp, #192] ldp x3, x21, [sp, #240] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #224] ldp x8, x24, [sp, #256] subs x6, x3, x21 ldr q0, [sp, #272] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [sp, #208] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #224] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #272] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [sp, #96] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #112] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #128] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [sp, #96] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #112] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #128] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [sp, #96] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [sp, #112] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #128] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #96] ldp x21, x12, [sp, #112] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #128] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #96] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #112] adc x12, x15, x23 stp x21, x12, [sp, #128] ldp x5, x6, [x26, #48] ldp x4, x3, [x26, #96] adds x5, x5, x4 adcs x6, x6, x3 ldp x7, x8, [x26, #64] ldp x4, x3, [x26, #112] adcs x7, x7, x4 adcs x8, x8, x3 ldp x9, x10, [x26, #80] ldp x4, x3, [x26, #128] adcs x9, x9, x4 adcs x10, x10, x3 adc x3, xzr, xzr mov x4, #0xffffffff cmp x5, x4 mov x4, #0xffffffff00000000 sbcs xzr, x6, x4 mov x4, #0xfffffffffffffffe sbcs xzr, x7, x4 adcs xzr, x8, xzr adcs xzr, x9, xzr adcs xzr, x10, xzr adcs x3, x3, xzr csetm x3, ne mov x4, #0xffffffff and x4, x4, x3 subs x5, x5, x4 eor x4, x4, x3 sbcs x6, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 sbcs x7, x7, x4 sbcs x8, x8, x3 sbcs x9, x9, x3 sbc x10, x10, x3 stp x5, x6, [sp, #240] stp x7, x8, [sp, #256] stp x9, x10, [sp, #272] ldr q1, [sp, #96] ldp x9, x2, [sp, #96] ldr q0, [sp, #96] ldp x4, x6, [sp, #112] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [sp, #128] xtn v30.2s, v0.2d ldr q1, [sp, #128] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [sp, #128] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [sp, #288] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [sp, #304] csetm x15, cc cneg x1, x1, cc stp x11, x14, [sp, #320] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc cinv x16, x15, cc mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc cneg x9, x9, cc subs x4, x2, x4 cneg x4, x4, cc csetm x7, cc subs x2, x10, x6 cinv x8, x8, cc cneg x2, x2, cc cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc cneg x1, x1, cc eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [sp, #304] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [sp, #288] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [sp, #320] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 adcs x14, x14, x2 mov x2, #0x1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [sp, #288] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [sp, #304] adc x17, x14, xzr stp x2, x17, [sp, #320] ldr q3, [x26, #0] ldr q25, [sp, #48] ldp x13, x23, [sp, #48] ldp x3, x21, [x26, #0] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #80] ldp x8, x24, [x26, #16] subs x6, x3, x21 ldr q0, [x26, #32] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [sp, #64] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #80] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [x26, #32] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x26, x4, x16 mov x4, v27.d[0] sbcs x27, x20, x11 sbcs x20, x9, x12 sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #160] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #176] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #160] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #176] adds x20, x22, x26 mul x10, x13, x14 adcs x11, x11, x27 eor x9, x8, x21 adcs x26, x19, x17 stp x20, x11, [sp, #144] adcs x27, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #176] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #144] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #176] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x26 eor x1, x22, x9 adcs x24, x23, x27 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x26 adcs x15, x17, x27 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #144] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [sp, #160] adc x12, x15, x23 stp x21, x12, [sp, #176] ldr q1, [sp, #240] ldp x9, x2, [sp, #240] ldr q0, [sp, #240] ldp x4, x6, [sp, #256] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [sp, #272] xtn v30.2s, v0.2d ldr q1, [sp, #272] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [sp, #272] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x19, x3, x17 sbcs x20, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [sp, #192] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 csetm x15, cc cneg x1, x1, cc stp x11, x14, [sp, #224] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc cinv x16, x15, cc mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc cneg x9, x9, cc subs x4, x2, x4 cneg x4, x4, cc csetm x7, cc subs x2, x10, x6 cinv x8, x8, cc cneg x2, x2, cc cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc cneg x1, x1, cc eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [sp, #192] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [sp, #224] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x19 adcs x1, x1, x20 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 adcs x14, x14, x2 mov x2, #0x1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x19, x13, x1 and x13, x4, x9 adcs x20, x6, x13 and x1, x2, x9 adcs x7, x8, x1 adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [sp, #208] adc x17, x14, xzr stp x2, x17, [sp, #224] ldp x0, x1, [sp, #288] mov x6, #0xffffffff subs x6, x6, x0 mov x7, #0xffffffff00000000 sbcs x7, x7, x1 ldp x0, x1, [sp, #304] mov x8, #0xfffffffffffffffe sbcs x8, x8, x0 mov x13, #0xffffffffffffffff sbcs x9, x13, x1 ldp x0, x1, [sp, #320] sbcs x10, x13, x0 sbc x11, x13, x1 mov x12, #0x9 mul x0, x12, x6 mul x1, x12, x7 mul x2, x12, x8 mul x3, x12, x9 mul x4, x12, x10 mul x5, x12, x11 umulh x6, x12, x6 umulh x7, x12, x7 umulh x8, x12, x8 umulh x9, x12, x9 umulh x10, x12, x10 umulh x12, x12, x11 adds x1, x1, x6 adcs x2, x2, x7 adcs x3, x3, x8 adcs x4, x4, x9 adcs x5, x5, x10 mov x6, #0x1 adc x6, x12, x6 ldp x8, x9, [sp, #144] ldp x10, x11, [sp, #160] ldp x12, x13, [sp, #176] mov x14, #0xc mul x15, x14, x8 umulh x8, x14, x8 adds x0, x0, x15 mul x15, x14, x9 umulh x9, x14, x9 adcs x1, x1, x15 mul x15, x14, x10 umulh x10, x14, x10 adcs x2, x2, x15 mul x15, x14, x11 umulh x11, x14, x11 adcs x3, x3, x15 mul x15, x14, x12 umulh x12, x14, x12 adcs x4, x4, x15 mul x15, x14, x13 umulh x13, x14, x13 adcs x5, x5, x15 adc x6, x6, xzr adds x1, x1, x8 adcs x2, x2, x9 adcs x3, x3, x10 adcs x4, x4, x11 adcs x5, x5, x12 adcs x6, x6, x13 lsl x7, x6, #32 subs x8, x6, x7 sbc x7, x7, xzr adds x0, x0, x8 adcs x1, x1, x7 adcs x2, x2, x6 adcs x3, x3, xzr adcs x4, x4, xzr adcs x5, x5, xzr csetm x6, cc mov x7, #0xffffffff and x7, x7, x6 adds x0, x0, x7 eor x7, x7, x6 adcs x1, x1, x7 mov x7, #0xfffffffffffffffe and x7, x7, x6 adcs x2, x2, x7 adcs x3, x3, x6 adcs x4, x4, x6 adc x5, x5, x6 stp x0, x1, [sp, #288] stp x2, x3, [sp, #304] stp x4, x5, [sp, #320] mov x2, sp ldp x4, x3, [x2] subs x5, x19, x4 sbcs x6, x20, x3 ldp x7, x8, [sp, #208] ldp x4, x3, [x2, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #224] ldp x4, x3, [x2, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, cc mov x4, #0xffffffff and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [sp, #240] stp x7, x8, [sp, #256] stp x9, x10, [sp, #272] ldr q1, [sp, #48] ldp x9, x2, [sp, #48] ldr q0, [sp, #48] ldp x4, x6, [sp, #64] rev64 v21.4s, v1.4s uzp2 v28.4s, v1.4s, v1.4s umulh x7, x9, x2 xtn v17.2s, v1.2d mul v27.4s, v21.4s, v0.4s ldr q20, [sp, #80] xtn v30.2s, v0.2d ldr q1, [sp, #80] uzp2 v31.4s, v0.4s, v0.4s ldp x5, x10, [sp, #80] umulh x8, x9, x4 uaddlp v3.2d, v27.4s umull v16.2d, v30.2s, v17.2s mul x16, x9, x4 umull v27.2d, v30.2s, v28.2s shrn v0.2s, v20.2d, #32 xtn v7.2s, v20.2d shl v20.2d, v3.2d, #32 umull v3.2d, v31.2s, v28.2s mul x3, x2, x4 umlal v20.2d, v30.2s, v17.2s umull v22.2d, v7.2s, v0.2s usra v27.2d, v16.2d, #32 umulh x11, x2, x4 movi v21.2d, #0xffffffff uzp2 v28.4s, v1.4s, v1.4s adds x15, x16, x7 and v5.16b, v27.16b, v21.16b adcs x3, x3, x8 usra v3.2d, v27.2d, #32 dup v29.2d, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2d, v31.2s, v17.2s mul x8, x9, x2 mov x7, v20.d[1] shl v19.2d, v22.2d, #33 xtn v25.2s, v29.2d rev64 v31.4s, v1.4s lsl x13, x14, #32 uzp2 v6.4s, v29.4s, v29.4s umlal v19.2d, v7.2s, v7.2s usra v3.2d, v5.2d, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4s, v31.4s, v29.4s xtn v4.2s, v1.2d adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2d, v25.2s, v28.2s adcs x11, x16, x16 umull v21.2d, v25.2s, v4.2s mov x17, v3.d[0] umull v18.2d, v6.2s, v28.2s adc x16, x8, xzr uaddlp v16.2d, v17.4s movi v1.2d, #0xffffffff subs x13, x13, x12 usra v31.2d, v21.2d, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2d, v16.2d, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16b, v31.16b, v1.16b adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2d, v6.2s, v4.2s usra v18.2d, v31.2d, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2d, v25.2s, v4.2s adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2d, v3.2d, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x19, x3, x17 sbcs x20, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [sp, #192] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 csetm x15, cc cneg x1, x1, cc stp x11, x14, [sp, #224] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc cinv x16, x15, cc mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc cneg x9, x9, cc subs x4, x2, x4 cneg x4, x4, cc csetm x7, cc subs x2, x10, x6 cinv x8, x8, cc cneg x2, x2, cc cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc cneg x1, x1, cc eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [sp, #192] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [sp, #224] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x19 adcs x1, x1, x20 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 adcs x14, x14, x2 mov x2, #0x1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [sp, #192] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [sp, #208] adc x17, x14, xzr stp x2, x17, [sp, #224] ldp x5, x6, [sp, #240] ldp x4, x3, [sp, #48] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #256] ldp x4, x3, [sp, #64] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #272] ldp x4, x3, [sp, #80] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, cc mov x4, #0xffffffff and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [x25, #96] stp x7, x8, [x25, #112] stp x9, x10, [x25, #128] ldr q3, [sp, #288] ldr q25, [sp, #96] ldp x13, x23, [sp, #96] ldp x3, x21, [sp, #288] rev64 v23.4s, v25.4s uzp1 v17.4s, v25.4s, v3.4s umulh x15, x3, x13 mul v6.4s, v23.4s, v3.4s uzp1 v3.4s, v3.4s, v3.4s ldr q27, [sp, #128] ldp x8, x24, [sp, #304] subs x6, x3, x21 ldr q0, [sp, #320] movi v23.2d, #0xffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4s, v27.4s uzp2 v25.4s, v27.4s, v27.4s cneg x4, x6, cc subs x7, x23, x13 xtn v22.2s, v0.2d xtn v24.2s, v27.2d cneg x20, x7, cc ldp x6, x14, [sp, #112] mul v27.4s, v4.4s, v0.4s uaddlp v20.2d, v6.4s cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4s, v0.4s, v0.4s umull v21.2d, v22.2s, v25.2s shl v0.2d, v20.2d, #32 umlal v0.2d, v3.2s, v17.2s mul x22, x8, x6 umull v1.2d, v6.2s, v25.2s subs x12, x3, x8 umull v20.2d, v22.2s, v24.2s cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2d, v20.2d, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2d, v21.2d, #32 adds x22, x15, x7 and v26.16b, v21.16b, v23.16b adcs x16, x12, x15 uaddlp v25.2d, v27.4s adcs x9, x19, x12 umlal v26.2d, v6.2s, v24.2s adc x4, x19, xzr adds x16, x16, x7 shl v27.2d, v25.2d, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2d, v22.2s, v24.2s mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2d, v26.2d, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [sp, #128] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [sp, #320] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x26, x4, x16 mov x4, v27.d[0] sbcs x27, x20, x11 sbcs x20, x9, x12 sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [sp, #256] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [sp, #272] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [sp, #256] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [sp, #272] adds x20, x22, x26 mul x10, x13, x14 adcs x11, x11, x27 eor x9, x8, x21 adcs x26, x19, x17 stp x20, x11, [sp, #240] adcs x27, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [sp, #272] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [sp, #240] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [sp, #272] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x26 eor x1, x22, x9 adcs x24, x23, x27 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x26 adcs x15, x17, x27 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [sp, #240] adcs x5, x13, x23 adcs x12, x8, x23 stp x14, x5, [sp, #256] adc x19, x15, x23 ldp x1, x2, [sp, #144] ldp x3, x4, [sp, #160] ldp x5, x6, [sp, #176] lsl x0, x1, #2 ldp x7, x8, [sp, #288] subs x0, x0, x7 extr x1, x2, x1, #62 sbcs x1, x1, x8 ldp x7, x8, [sp, #304] extr x2, x3, x2, #62 sbcs x2, x2, x7 extr x3, x4, x3, #62 sbcs x3, x3, x8 extr x4, x5, x4, #62 ldp x7, x8, [sp, #320] sbcs x4, x4, x7 extr x5, x6, x5, #62 sbcs x5, x5, x8 lsr x6, x6, #62 adc x6, x6, xzr lsl x7, x6, #32 subs x8, x6, x7 sbc x7, x7, xzr adds x0, x0, x8 adcs x1, x1, x7 adcs x2, x2, x6 adcs x3, x3, xzr adcs x4, x4, xzr adcs x5, x5, xzr csetm x8, cc mov x9, #0xffffffff and x9, x9, x8 adds x0, x0, x9 eor x9, x9, x8 adcs x1, x1, x9 mov x9, #0xfffffffffffffffe and x9, x9, x8 adcs x2, x2, x9 adcs x3, x3, x8 adcs x4, x4, x8 adc x5, x5, x8 stp x0, x1, [x25] stp x2, x3, [x25, #16] stp x4, x5, [x25, #32] ldp x0, x1, [sp, #192] mov x6, #0xffffffff subs x6, x6, x0 mov x7, #0xffffffff00000000 sbcs x7, x7, x1 ldp x0, x1, [sp, #208] mov x8, #0xfffffffffffffffe sbcs x8, x8, x0 mov x13, #0xffffffffffffffff sbcs x9, x13, x1 ldp x0, x1, [sp, #224] sbcs x10, x13, x0 sbc x11, x13, x1 lsl x0, x6, #3 extr x1, x7, x6, #61 extr x2, x8, x7, #61 extr x3, x9, x8, #61 extr x4, x10, x9, #61 extr x5, x11, x10, #61 lsr x6, x11, #61 add x6, x6, #0x1 ldp x8, x9, [sp, #240] ldp x10, x11, [sp, #256] mov x14, #0x3 mul x15, x14, x8 umulh x8, x14, x8 adds x0, x0, x15 mul x15, x14, x9 umulh x9, x14, x9 adcs x1, x1, x15 mul x15, x14, x10 umulh x10, x14, x10 adcs x2, x2, x15 mul x15, x14, x11 umulh x11, x14, x11 adcs x3, x3, x15 mul x15, x14, x12 umulh x12, x14, x12 adcs x4, x4, x15 mul x15, x14, x19 umulh x13, x14, x19 adcs x5, x5, x15 adc x6, x6, xzr adds x1, x1, x8 adcs x2, x2, x9 adcs x3, x3, x10 adcs x4, x4, x11 adcs x5, x5, x12 adcs x6, x6, x13 lsl x7, x6, #32 subs x8, x6, x7 sbc x7, x7, xzr adds x0, x0, x8 adcs x1, x1, x7 adcs x2, x2, x6 adcs x3, x3, xzr adcs x4, x4, xzr adcs x5, x5, xzr csetm x6, cc mov x7, #0xffffffff and x7, x7, x6 adds x0, x0, x7 eor x7, x7, x6 adcs x1, x1, x7 mov x7, #0xfffffffffffffffe and x7, x7, x6 adcs x2, x2, x7 adcs x3, x3, x6 adcs x4, x4, x6 adc x5, x5, x6 stp x0, x1, [x25, #48] stp x2, x3, [x25, #64] stp x4, x5, [x25, #80] // Restore stack and registers CFI_STACKLOAD2(x19,x20,336) CFI_STACKLOAD2(x21,x22,352) CFI_STACKLOAD2(x23,x24,368) CFI_STACKLOAD2(x25,x26,384) CFI_STACKLOAD1Z(x27,400) CFI_INC_SP(416) CFI_RET S2N_BN_SIZE_DIRECTIVE(p384_montjdouble) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
4,225
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_sqr_p256k1_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square modulo p_256k1, z := (x^2) mod p_256k1 // Input x[4]; output z[4] // // extern void bignum_sqr_p256k1_alt(uint64_t z[static 4], // const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_p256k1_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sqr_p256k1_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_p256k1_alt) .text .balign 4 #define z x0 #define x x1 #define a0 x2 #define a1 x3 #define a2 x4 #define a3 x5 #define h x6 #define l x7 #define u0 x8 #define u1 x9 #define u2 x10 #define u3 x11 #define u4 x12 #define u5 x13 #define u6 x14 // Just aliases #define q a0 #define c a1 #define t a2 #define u7 h S2N_BN_SYMBOL(bignum_sqr_p256k1_alt): CFI_START // Load all the elements, set up an initial window [u6;...u1] = [23;03;01] // and chain in the addition of 02 + 12 + 13 (no carry-out is possible). // This gives all the "heterogeneous" terms of the squaring ready to double ldp a0, a1, [x] mul u1, a0, a1 umulh u2, a0, a1 ldp a2, a3, [x, #16] mul u3, a0, a3 umulh u4, a0, a3 mul l, a0, a2 umulh h, a0, a2 adds u2, u2, l adcs u3, u3, h mul l, a1, a2 umulh h, a1, a2 adc h, h, xzr adds u3, u3, l mul u5, a2, a3 umulh u6, a2, a3 adcs u4, u4, h mul l, a1, a3 umulh h, a1, a3 adc h, h, xzr adds u4, u4, l adcs u5, u5, h adc u6, u6, xzr // Now just double it; this simple approach seems to work better than extr adds u1, u1, u1 adcs u2, u2, u2 adcs u3, u3, u3 adcs u4, u4, u4 adcs u5, u5, u5 adcs u6, u6, u6 cset u7, cs // Add the homogeneous terms 00 + 11 + 22 + 33 umulh l, a0, a0 mul u0, a0, a0 adds u1, u1, l mul l, a1, a1 adcs u2, u2, l umulh l, a1, a1 adcs u3, u3, l mul l, a2, a2 adcs u4, u4, l umulh l, a2, a2 adcs u5, u5, l mul l, a3, a3 adcs u6, u6, l umulh l, a3, a3 adc u7, u7, l // Now we have the full 8-digit product 2^256 * h + l where // h = [u7,u6,u5,u4] and l = [u3,u2,u1,u0] // and this is == 4294968273 * h + l (mod p_256k1) mov c, #977 orr c, c, #0x100000000 mul l, c, u4 umulh t, c, u4 adds u0, u0, l mul l, c, u5 umulh u5, c, u5 adcs u1, u1, l mul l, c, u6 umulh u6, c, u6 adcs u2, u2, l mul l, c, u7 umulh u7, c, u7 adcs u3, u3, l cset u4, cs adds u1, u1, t adcs u2, u2, u5 adcs u3, u3, u6 adc u4, u4, u7 // Now we have reduced to 5 digits, 2^256 * h + l = [u4,u3,u2,u1,u0] // Use q = h + 1 as the initial quotient estimate, either right or 1 too big. add q, u4, #1 mul l, c, q umulh h, c, q adds u0, u0, l adcs u1, u1, h adcs u2, u2, xzr adcs u3, u3, xzr // Now the effective answer is 2^256 * (CF - 1) + [u3,u2,u1,u0] // So we correct if CF = 0 by subtracting 4294968273, i.e. by // adding p_256k1 to the "full" answer csel c, c, xzr, cc subs u0, u0, c sbcs u1, u1, xzr sbcs u2, u2, xzr sbc u3, u3, xzr // Write back and return stp u0, u1, [x0] stp u2, u3, [x0, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sqr_p256k1_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
30,526
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/secp256k1_jdouble_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point doubling on SECG curve secp256k1 in Jacobian coordinates // // extern void secp256k1_jdouble_alt(uint64_t p3[static 12], // const uint64_t p1[static 12]); // // Does p3 := 2 * p1 where all points are regarded as Jacobian triples. // A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3). // It is assumed that all coordinates of the input point are fully // reduced mod p_256k1 and that the z coordinate is not zero. // // Standard ARM ABI: X0 = p3, X1 = p1 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(secp256k1_jdouble_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(secp256k1_jdouble_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(secp256k1_jdouble_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Stable homes for input arguments during main code sequence #define input_z x15 #define input_x x16 // The magic constant 2^256 - p_256k1 #define pconst x17 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries #define x_2 sp, #(NUMSIZE*0) #define y_2 sp, #(NUMSIZE*1) #define d sp, #(NUMSIZE*2) #define tmp sp, #(NUMSIZE*3) #define x_4 sp, #(NUMSIZE*4) #define y_4 sp, #(NUMSIZE*6) #define dx2 sp, #(NUMSIZE*8) #define xy2 sp, #(NUMSIZE*10) #define NSPACE NUMSIZE*12 // Corresponds exactly to bignum_mul_p256k1_alt except for // re-use of the pconst register for the constant 4294968273 #define mul_p256k1(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x7, x8, [P2] __LF \ mul x12, x3, x7 __LF \ umulh x13, x3, x7 __LF \ mul x11, x3, x8 __LF \ umulh x14, x3, x8 __LF \ adds x13, x13, x11 __LF \ ldp x9, x10, [P2+16] __LF \ mul x11, x3, x9 __LF \ umulh x0, x3, x9 __LF \ adcs x14, x14, x11 __LF \ mul x11, x3, x10 __LF \ umulh x1, x3, x10 __LF \ adcs x0, x0, x11 __LF \ adc x1, x1, xzr __LF \ ldp x5, x6, [P1+16] __LF \ mul x11, x4, x7 __LF \ adds x13, x13, x11 __LF \ mul x11, x4, x8 __LF \ adcs x14, x14, x11 __LF \ mul x11, x4, x9 __LF \ adcs x0, x0, x11 __LF \ mul x11, x4, x10 __LF \ adcs x1, x1, x11 __LF \ umulh x3, x4, x10 __LF \ adc x3, x3, xzr __LF \ umulh x11, x4, x7 __LF \ adds x14, x14, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x0, x0, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x1, x1, x11 __LF \ adc x3, x3, xzr __LF \ mul x11, x5, x7 __LF \ adds x14, x14, x11 __LF \ mul x11, x5, x8 __LF \ adcs x0, x0, x11 __LF \ mul x11, x5, x9 __LF \ adcs x1, x1, x11 __LF \ mul x11, x5, x10 __LF \ adcs x3, x3, x11 __LF \ umulh x4, x5, x10 __LF \ adc x4, x4, xzr __LF \ umulh x11, x5, x7 __LF \ adds x0, x0, x11 __LF \ umulh x11, x5, x8 __LF \ adcs x1, x1, x11 __LF \ umulh x11, x5, x9 __LF \ adcs x3, x3, x11 __LF \ adc x4, x4, xzr __LF \ mul x11, x6, x7 __LF \ adds x0, x0, x11 __LF \ mul x11, x6, x8 __LF \ adcs x1, x1, x11 __LF \ mul x11, x6, x9 __LF \ adcs x3, x3, x11 __LF \ mul x11, x6, x10 __LF \ adcs x4, x4, x11 __LF \ umulh x5, x6, x10 __LF \ adc x5, x5, xzr __LF \ umulh x11, x6, x7 __LF \ adds x1, x1, x11 __LF \ umulh x11, x6, x8 __LF \ adcs x3, x3, x11 __LF \ umulh x11, x6, x9 __LF \ adcs x4, x4, x11 __LF \ adc x5, x5, xzr __LF \ mul x11, pconst, x1 __LF \ umulh x9, pconst, x1 __LF \ adds x12, x12, x11 __LF \ mul x11, pconst, x3 __LF \ umulh x3, pconst, x3 __LF \ adcs x13, x13, x11 __LF \ mul x11, pconst, x4 __LF \ umulh x4, pconst, x4 __LF \ adcs x14, x14, x11 __LF \ mul x11, pconst, x5 __LF \ umulh x5, pconst, x5 __LF \ adcs x0, x0, x11 __LF \ cset x1, cs __LF \ adds x13, x13, x9 __LF \ adcs x14, x14, x3 __LF \ adcs x0, x0, x4 __LF \ adc x1, x1, x5 __LF \ add x8, x1, #0x1 __LF \ mul x11, pconst, x8 __LF \ umulh x9, pconst, x8 __LF \ adds x12, x12, x11 __LF \ adcs x13, x13, x9 __LF \ adcs x14, x14, xzr __LF \ adcs x0, x0, xzr __LF \ csel x7, pconst, xzr, cc __LF \ subs x12, x12, x7 __LF \ sbcs x13, x13, xzr __LF \ sbcs x14, x14, xzr __LF \ sbc x0, x0, xzr __LF \ stp x12, x13, [P0] __LF \ stp x14, x0, [P0+16] // Corresponds exactly to bignum_sqr_p256k1_alt except for // re-use of the pconst register for the constant 4294968273 #define sqr_p256k1(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x9, x2, x3 __LF \ umulh x10, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x11, x2, x5 __LF \ umulh x12, x2, x5 __LF \ mul x7, x2, x4 __LF \ umulh x6, x2, x4 __LF \ adds x10, x10, x7 __LF \ adcs x11, x11, x6 __LF \ mul x7, x3, x4 __LF \ umulh x6, x3, x4 __LF \ adc x6, x6, xzr __LF \ adds x11, x11, x7 __LF \ mul x13, x4, x5 __LF \ umulh x14, x4, x5 __LF \ adcs x12, x12, x6 __LF \ mul x7, x3, x5 __LF \ umulh x6, x3, x5 __LF \ adc x6, x6, xzr __LF \ adds x12, x12, x7 __LF \ adcs x13, x13, x6 __LF \ adc x14, x14, xzr __LF \ adds x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ cset x6, cs __LF \ umulh x7, x2, x2 __LF \ mul x8, x2, x2 __LF \ adds x9, x9, x7 __LF \ mul x7, x3, x3 __LF \ adcs x10, x10, x7 __LF \ umulh x7, x3, x3 __LF \ adcs x11, x11, x7 __LF \ mul x7, x4, x4 __LF \ adcs x12, x12, x7 __LF \ umulh x7, x4, x4 __LF \ adcs x13, x13, x7 __LF \ mul x7, x5, x5 __LF \ adcs x14, x14, x7 __LF \ umulh x7, x5, x5 __LF \ adc x6, x6, x7 __LF \ mul x7, pconst, x12 __LF \ umulh x4, pconst, x12 __LF \ adds x8, x8, x7 __LF \ mul x7, pconst, x13 __LF \ umulh x13, pconst, x13 __LF \ adcs x9, x9, x7 __LF \ mul x7, pconst, x14 __LF \ umulh x14, pconst, x14 __LF \ adcs x10, x10, x7 __LF \ mul x7, pconst, x6 __LF \ umulh x6, pconst, x6 __LF \ adcs x11, x11, x7 __LF \ cset x12, cs __LF \ adds x9, x9, x4 __LF \ adcs x10, x10, x13 __LF \ adcs x11, x11, x14 __LF \ adc x12, x12, x6 __LF \ add x2, x12, #0x1 __LF \ mul x7, pconst, x2 __LF \ umulh x6, pconst, x2 __LF \ adds x8, x8, x7 __LF \ adcs x9, x9, x6 __LF \ adcs x10, x10, xzr __LF \ adcs x11, x11, xzr __LF \ csel x3, pconst, xzr, cc __LF \ subs x8, x8, x3 __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbc x11, x11, xzr __LF \ stp x8, x9, [P0] __LF \ stp x10, x11, [P0+16] // Rough versions producing 5-word results #define roughmul_p256k1(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x7, x8, [P2] __LF \ mul x12, x3, x7 __LF \ umulh x13, x3, x7 __LF \ mul x11, x3, x8 __LF \ umulh x14, x3, x8 __LF \ adds x13, x13, x11 __LF \ ldp x9, x10, [P2+16] __LF \ mul x11, x3, x9 __LF \ umulh x0, x3, x9 __LF \ adcs x14, x14, x11 __LF \ mul x11, x3, x10 __LF \ umulh x1, x3, x10 __LF \ adcs x0, x0, x11 __LF \ adc x1, x1, xzr __LF \ ldp x5, x6, [P1+16] __LF \ mul x11, x4, x7 __LF \ adds x13, x13, x11 __LF \ mul x11, x4, x8 __LF \ adcs x14, x14, x11 __LF \ mul x11, x4, x9 __LF \ adcs x0, x0, x11 __LF \ mul x11, x4, x10 __LF \ adcs x1, x1, x11 __LF \ umulh x3, x4, x10 __LF \ adc x3, x3, xzr __LF \ umulh x11, x4, x7 __LF \ adds x14, x14, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x0, x0, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x1, x1, x11 __LF \ adc x3, x3, xzr __LF \ mul x11, x5, x7 __LF \ adds x14, x14, x11 __LF \ mul x11, x5, x8 __LF \ adcs x0, x0, x11 __LF \ mul x11, x5, x9 __LF \ adcs x1, x1, x11 __LF \ mul x11, x5, x10 __LF \ adcs x3, x3, x11 __LF \ umulh x4, x5, x10 __LF \ adc x4, x4, xzr __LF \ umulh x11, x5, x7 __LF \ adds x0, x0, x11 __LF \ umulh x11, x5, x8 __LF \ adcs x1, x1, x11 __LF \ umulh x11, x5, x9 __LF \ adcs x3, x3, x11 __LF \ adc x4, x4, xzr __LF \ mul x11, x6, x7 __LF \ adds x0, x0, x11 __LF \ mul x11, x6, x8 __LF \ adcs x1, x1, x11 __LF \ mul x11, x6, x9 __LF \ adcs x3, x3, x11 __LF \ mul x11, x6, x10 __LF \ adcs x4, x4, x11 __LF \ umulh x5, x6, x10 __LF \ adc x5, x5, xzr __LF \ umulh x11, x6, x7 __LF \ adds x1, x1, x11 __LF \ umulh x11, x6, x8 __LF \ adcs x3, x3, x11 __LF \ umulh x11, x6, x9 __LF \ adcs x4, x4, x11 __LF \ adc x5, x5, xzr __LF \ mul x11, pconst, x1 __LF \ umulh x9, pconst, x1 __LF \ adds x12, x12, x11 __LF \ mul x11, pconst, x3 __LF \ umulh x3, pconst, x3 __LF \ adcs x13, x13, x11 __LF \ mul x11, pconst, x4 __LF \ umulh x4, pconst, x4 __LF \ adcs x14, x14, x11 __LF \ mul x11, pconst, x5 __LF \ umulh x5, pconst, x5 __LF \ adcs x0, x0, x11 __LF \ cset x1, cs __LF \ adds x13, x13, x9 __LF \ adcs x14, x14, x3 __LF \ adcs x0, x0, x4 __LF \ adc x1, x1, x5 __LF \ stp x12, x13, [P0] __LF \ stp x14, x0, [P0+16] __LF \ str x1, [P0+32] #define roughsqr_p256k1(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x9, x2, x3 __LF \ umulh x10, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x11, x2, x5 __LF \ umulh x12, x2, x5 __LF \ mul x7, x2, x4 __LF \ umulh x6, x2, x4 __LF \ adds x10, x10, x7 __LF \ adcs x11, x11, x6 __LF \ mul x7, x3, x4 __LF \ umulh x6, x3, x4 __LF \ adc x6, x6, xzr __LF \ adds x11, x11, x7 __LF \ mul x13, x4, x5 __LF \ umulh x14, x4, x5 __LF \ adcs x12, x12, x6 __LF \ mul x7, x3, x5 __LF \ umulh x6, x3, x5 __LF \ adc x6, x6, xzr __LF \ adds x12, x12, x7 __LF \ adcs x13, x13, x6 __LF \ adc x14, x14, xzr __LF \ adds x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ cset x6, cs __LF \ umulh x7, x2, x2 __LF \ mul x8, x2, x2 __LF \ adds x9, x9, x7 __LF \ mul x7, x3, x3 __LF \ adcs x10, x10, x7 __LF \ umulh x7, x3, x3 __LF \ adcs x11, x11, x7 __LF \ mul x7, x4, x4 __LF \ adcs x12, x12, x7 __LF \ umulh x7, x4, x4 __LF \ adcs x13, x13, x7 __LF \ mul x7, x5, x5 __LF \ adcs x14, x14, x7 __LF \ umulh x7, x5, x5 __LF \ adc x6, x6, x7 __LF \ mul x7, pconst, x12 __LF \ umulh x4, pconst, x12 __LF \ adds x8, x8, x7 __LF \ mul x7, pconst, x13 __LF \ umulh x13, pconst, x13 __LF \ adcs x9, x9, x7 __LF \ mul x7, pconst, x14 __LF \ umulh x14, pconst, x14 __LF \ adcs x10, x10, x7 __LF \ mul x7, pconst, x6 __LF \ umulh x6, pconst, x6 __LF \ adcs x11, x11, x7 __LF \ cset x12, cs __LF \ adds x9, x9, x4 __LF \ adcs x10, x10, x13 __LF \ adcs x11, x11, x14 __LF \ adc x12, x12, x6 __LF \ stp x8, x9, [P0] __LF \ stp x10, x11, [P0+16] __LF \ str x12, [P0+32] // Weak doubling operation, staying in 4 digits but not in general // fully normalizing modulo p_256k1 #define weakdouble_p256k1(P0,P1) \ ldp x1, x2, [P1] __LF \ lsl x0, x1, #1 __LF \ ldp x3, x4, [P1+16] __LF \ ands xzr, x4, #0x8000000000000000 __LF \ csel x5, pconst, xzr, ne __LF \ extr x1, x2, x1, #63 __LF \ adds x0, x0, x5 __LF \ extr x2, x3, x2, #63 __LF \ adcs x1, x1, xzr __LF \ extr x3, x4, x3, #63 __LF \ adcs x2, x2, xzr __LF \ stp x0, x1, [P0] __LF \ adc x3, x3, xzr __LF \ stp x2, x3, [P0+16] // P0 = C * P1 - D * P2 with 5-word inputs P1 and P2 // Only used here with C = 12, D = 9, but could be used more generally. // We start with (2^40 * 2^256 + C * P1) - (D * P2 + 2^40 * k) // where p_256k1 = 2^256 - k (so k = 4294968273) #define cmsub_p256k1(P0,C,P1,D,P2) \ mov x10, C __LF \ ldp x4, x5, [P1] __LF \ mul x0, x4, x10 __LF \ mul x1, x5, x10 __LF \ ldp x6, x7, [P1+16] __LF \ mul x2, x6, x10 __LF \ mul x3, x7, x10 __LF \ ldr x13, [P1+32] __LF \ umulh x4, x4, x10 __LF \ adds x1, x1, x4 __LF \ umulh x5, x5, x10 __LF \ adcs x2, x2, x5 __LF \ umulh x6, x6, x10 __LF \ adcs x3, x3, x6 __LF \ umulh x4, x7, x10 __LF \ mul x13, x13, x10 __LF \ adc x9, x4, x13 __LF \ orr x9, x9, #0x10000000000 __LF \ /* [x9; x3;x2;x1;x0] = 2^40 * 2^256 + C * P1 */ \ mov x10, D __LF \ ldp x13, x14, [P2] __LF \ mul x5, x14, x10 __LF \ umulh x6, x14, x10 __LF \ adds x5, x5, pconst, lsr #24 __LF \ adc x6, x6, xzr __LF \ mul x4, x13, x10 __LF \ adds x4, x4, pconst, lsl #40 __LF \ umulh x13, x13, x10 __LF \ adcs x5, x5, x13 __LF \ ldp x13, x14, [P2+16] __LF \ mul x12, x13, x10 __LF \ umulh x7, x13, x10 __LF \ ldr x13, [P2+32] __LF \ adcs x6, x6, x12 __LF \ mul x12, x14, x10 __LF \ umulh x8, x14, x10 __LF \ mul x13, x13, x10 __LF \ adcs x7, x7, x12 __LF \ adc x8, x8, x13 __LF \ /* [x8; x7;x6;x5;x4] = D * P2 + 2^40 * k */ \ subs x0, x0, x4 __LF \ sbcs x1, x1, x5 __LF \ sbcs x2, x2, x6 __LF \ sbcs x3, x3, x7 __LF \ sbc x4, x9, x8 __LF \ /* [x4; x3;x2;x1;x0] = 2^40*p_256k1+result */ \ add x10, x4, #1 __LF \ /* (h + 1) is quotient estimate */ \ mul x4, pconst, x10 __LF \ umulh x5, pconst, x10 __LF \ adds x0, x0, x4 __LF \ adcs x1, x1, x5 __LF \ adcs x2, x2, xzr __LF \ adcs x3, x3, xzr __LF \ csel x11, pconst, xzr, cc __LF \ /* If un-correction needed */ \ subs x0, x0, x11 __LF \ sbcs x1, x1, xzr __LF \ stp x0, x1, [P0] __LF \ sbcs x2, x2, xzr __LF \ sbc x3, x3, xzr __LF \ stp x2, x3, [P0+16] // P0 = 3 * P1 - 8 * P2 with 5-digit P1 and P2 // We start with (2^40 * 2^256 + 3 * P1) - (8 * P2 + 2^40 * k) // where p_256k1 = 2^256 - k (so k = 4294968273) #define cmsub38_p256k1(P0,P1,P2) \ mov x10, #3 __LF \ ldp x4, x5, [P1] __LF \ mul x0, x4, x10 __LF \ mul x1, x5, x10 __LF \ ldp x6, x7, [P1+16] __LF \ mul x2, x6, x10 __LF \ mul x3, x7, x10 __LF \ ldr x13, [P1+32] __LF \ umulh x4, x4, x10 __LF \ adds x1, x1, x4 __LF \ umulh x5, x5, x10 __LF \ adcs x2, x2, x5 __LF \ umulh x6, x6, x10 __LF \ adcs x3, x3, x6 __LF \ umulh x4, x7, x10 __LF \ mul x13, x13, x10 __LF \ adc x9, x4, x13 __LF \ orr x9, x9, #0x10000000000 __LF \ /* [x9; x3;x2;x1;x0] = 2^40 * 2^256 + 3 * P1 */ \ lsl x12, pconst, #40 __LF \ ldp x13, x14, [P2] __LF \ lsl x4, x13, #3 __LF \ adds x4, x4, x12 __LF \ extr x5, x14, x13, #61 __LF \ lsr x12, pconst, #24 __LF \ adcs x5, x5, x12 __LF \ ldp x11, x12, [P2+16] __LF \ extr x6, x11, x14, #61 __LF \ adcs x6, x6, xzr __LF \ ldr x13, [P2+32] __LF \ extr x7, x12, x11, #61 __LF \ adcs x7, x7, xzr __LF \ extr x8, x13, x12, #61 __LF \ adc x8, x8, xzr __LF \ /* [x8; x7;x6;x5;x4] = 8 * P2 + 2^40 * k */ \ subs x0, x0, x4 __LF \ sbcs x1, x1, x5 __LF \ sbcs x2, x2, x6 __LF \ sbcs x3, x3, x7 __LF \ sbc x4, x9, x8 __LF \ /* [x4; x3;x2;x1;x0] = 2^40*p_256k1+result */ \ add x10, x4, #1 __LF \ /* (h + 1) is quotient estimate */ \ mul x4, pconst, x10 __LF \ umulh x5, pconst, x10 __LF \ adds x0, x0, x4 __LF \ adcs x1, x1, x5 __LF \ adcs x2, x2, xzr __LF \ adcs x3, x3, xzr __LF \ csel x11, pconst, xzr, cc __LF \ /* If un-correction needed */ \ subs x0, x0, x11 __LF \ sbcs x1, x1, xzr __LF \ stp x0, x1, [P0] __LF \ sbcs x2, x2, xzr __LF \ sbc x3, x3, xzr __LF \ stp x2, x3, [P0+16] // P0 = 4 * P1 - P2 with 5-digit P1, 4-digit P2 and result. // This is done by direct subtraction of P2 since the method // in bignum_cmul_p256k1 etc. for quotient estimation still // works when the value to be reduced is negative, as // long as it is > -p_256k1, which is the case here. #define cmsub41_p256k1(P0,P1,P2) \ ldp x1, x2, [P1] __LF \ lsl x0, x1, #2 __LF \ ldp x6, x7, [P2] __LF \ subs x0, x0, x6 __LF \ extr x1, x2, x1, #62 __LF \ sbcs x1, x1, x7 __LF \ ldp x3, x4, [P1+16] __LF \ extr x2, x3, x2, #62 __LF \ ldp x6, x7, [P2+16] __LF \ sbcs x2, x2, x6 __LF \ extr x3, x4, x3, #62 __LF \ sbcs x3, x3, x7 __LF \ ldr x5, [P1+32] __LF \ extr x4, x5, x4, #62 __LF \ sbc x4, x4, xzr __LF \ add x5, x4, #1 __LF \ /* (h + 1) is quotient estimate */ \ mul x4, pconst, x5 __LF \ adds x0, x0, x4 __LF \ umulh x5, pconst, x5 __LF \ adcs x1, x1, x5 __LF \ adcs x2, x2, xzr __LF \ adcs x3, x3, xzr __LF \ csel x4, pconst, xzr, cc __LF \ /* If un-correction needed */ \ subs x0, x0, x4 __LF \ sbcs x1, x1, xzr __LF \ stp x0, x1, [P0] __LF \ sbcs x2, x2, xzr __LF \ sbc x3, x3, xzr __LF \ stp x2, x3, [P0+16] S2N_BN_SYMBOL(secp256k1_jdouble_alt): CFI_START // Make room on stack for temp registers CFI_DEC_SP(NSPACE) // Move the input arguments to stable place mov input_z, x0 mov input_x, x1 // Set up pconst = 4294968273, so p_256k1 = 2^256 - pconst mov pconst, #977 orr pconst, pconst, #0x100000000 // Main sequence of operations // y_2 = y^2 sqr_p256k1(y_2,y_1) // x_2 = x^2 sqr_p256k1(x_2,x_1) // tmp = 2 * y_1 (in 4 words but not fully normalized) weakdouble_p256k1(tmp,y_1) // xy2 = x * y^2 (5-digit partially reduced) // x_4 = x^4 (5-digit partially reduced) roughmul_p256k1(xy2,x_1,y_2) roughsqr_p256k1(x_4,x_2) // z_3 = 2 * y_1 * z_1 mul_p256k1(z_3,z_1,tmp) // d = 12 * xy2 - 9 * x_4 cmsub_p256k1(d,12,xy2,9,x_4) // y4 = y2^2 (5-digit partially reduced) roughsqr_p256k1(y_4,y_2) // dx2 = d * x_2 (5-digit partially reduced) roughmul_p256k1(dx2,x_2,d) // x_3 = 4 * xy2 - d cmsub41_p256k1(x_3,xy2,d) // y_3 = 3 * dx2 - 8 * y_4 cmsub38_p256k1(y_3,dx2,y_4) // Restore stack and return CFI_INC_SP(NSPACE) CFI_RET S2N_BN_SIZE_DIRECTIVE(secp256k1_jdouble_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
5,084
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_montsqr_p256k1_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^256) mod p_256k1 // Input x[4]; output z[4] // // extern void bignum_montsqr_p256k1_alt(uint64_t z[static 4], // const uint64_t x[static 4]); // // Does z := (x^2 / 2^256) mod p_256k1, assuming x^2 <= 2^256 * p_256k1, which // is guaranteed in particular if x < p_256k1 initially (the "intended" case). // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p256k1_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_p256k1_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p256k1_alt) .text .balign 4 #define z x0 #define x x1 #define a0 x2 #define a1 x3 #define a2 x4 #define a3 x5 #define h x6 #define l x7 #define u0 x8 #define u1 x9 #define u2 x10 #define u3 x11 #define u4 x12 #define u5 x13 #define u6 x14 #define u7 x15 // Just aliases (we only use w after loading the inputs) #define w x #define t h #define c a0 #define uu a1 S2N_BN_SYMBOL(bignum_montsqr_p256k1_alt): CFI_START // Load all the elements, set up an initial window [u6;...u1] = [23;03;01] // and chain in the addition of 02 + 12 + 13 (no carry-out is possible). // This gives all the "heterogeneous" terms of the squaring ready to double ldp a0, a1, [x] mul u1, a0, a1 umulh u2, a0, a1 ldp a2, a3, [x, #16] mul u3, a0, a3 umulh u4, a0, a3 mul l, a0, a2 umulh h, a0, a2 adds u2, u2, l adcs u3, u3, h mul l, a1, a2 umulh h, a1, a2 adc h, h, xzr adds u3, u3, l mul u5, a2, a3 umulh u6, a2, a3 adcs u4, u4, h mul l, a1, a3 umulh h, a1, a3 adc h, h, xzr adds u4, u4, l adcs u5, u5, h adc u6, u6, xzr // Now just double it; this simple approach seems to work better than extr adds u1, u1, u1 adcs u2, u2, u2 adcs u3, u3, u3 adcs u4, u4, u4 adcs u5, u5, u5 adcs u6, u6, u6 cset u7, cs // Add the homogeneous terms 00 + 11 + 22 + 33 umulh l, a0, a0 mul u0, a0, a0 adds u1, u1, l // Start the Montgomery reductions now to interleave better, though // conceptually they all happen after the multiplication, only modifying // any u_i when the multiplication process no longer uses it. Set up // constants c = 4294968273 so that p_256k1 = 2^256 - c, and w the negated // multiplicative inverse so that p_256k1 * w == -1 (mod 2^64). // Precompute a little ahead of the main Montgomery stage. movz w, #0x3531 movk w, #0xd225, lsl #16 movk w, #0x091d, lsl #32 movk w, #0xd838, lsl #48 mov c, #977 orr c, c, #0x100000000 mul u0, w, u0 mul l, a1, a1 adcs u2, u2, l umulh l, a1, a1 adcs u3, u3, l mul l, a2, a2 adcs u4, u4, l umulh l, a2, a2 adcs u5, u5, l mul l, a3, a3 adcs u6, u6, l umulh l, a3, a3 adc u7, u7, l // Now we have the full 8-digit product 2^256 * h + l where // h = [u7,u6,u5,u4] and l = [u3,u2,u1,u0']. We actually precomputed // the Montgomery multiplier in u0, but otherwise continue with // 4 iterations of Montgomery reduction, rotating [u3;u2;u1;u0] umulh l, u0, c subs u1, u1, l mul u1, w, u1 umulh l, u1, c sbcs u2, u2, l mul u2, w, u2 umulh l, u2, c sbcs u3, u3, l mul u3, w, u3 umulh l, u3, c sbcs u0, u0, l sbcs u1, u1, xzr sbcs u2, u2, xzr sbc u3, u3, xzr // Add the high part and the Montgomery reduced low part adds u0, u0, u4 adcs u1, u1, u5 adcs u2, u2, u6 and uu, u1, u2 adcs u3, u3, u7 and uu, uu, u3 cset t, cs // Decide whether z >= p_256k1 <=> z + 4294968273 >= 2^256 adds xzr, u0, c adcs xzr, uu, xzr adcs t, t, xzr // Now t <> 0 <=> z >= p_256k1, so mask the constant c accordingly csel c, c, xzr, ne // If z >= p_256k1 do z := z - p_256k1, i.e. add c in 4 digits adds u0, u0, c adcs u1, u1, xzr adcs u2, u2, xzr adc u3, u3, xzr // Write back stp u0, u1, [z] stp u2, u3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_p256k1_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,766
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_cmul_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply by a single word modulo p_256k1, z := (c * x) mod p_256k1, assuming // x reduced // Inputs c, x[4]; output z[4] // // extern void bignum_cmul_p256k1(uint64_t z[static 4], uint64_t c, // const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = c, X2 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p256k1) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p256k1_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_p256k1_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p256k1_alt) .text .balign 4 #define z x0 #define m x1 #define x x2 #define d0 x3 #define d1 x4 #define d2 x5 #define d3 x6 #define a0 x7 #define a1 x8 #define a2 x9 #define c x9 #define a3 x10 #define h x10 #define q x10 S2N_BN_SYMBOL(bignum_cmul_p256k1): S2N_BN_SYMBOL(bignum_cmul_p256k1_alt): CFI_START // First do the multiply, straightforwardly to get [h;d3;d2;d1;d0] ldp a0, a1, [x] ldp a2, a3, [x, #16] mul d0, m, a0 mul d1, m, a1 mul d2, m, a2 mul d3, m, a3 umulh a0, m, a0 umulh a1, m, a1 umulh a2, m, a2 umulh h, m, a3 adds d1, d1, a0 adcs d2, d2, a1 adcs d3, d3, a2 adcs h, h, xzr // Now the quotient estimate is q = h + 1, and then we do the reduction, // writing z = [d3;d2;d1;d0], as z' = (2^256 * h + z) - q * p_256k1 = // (2^256 * h + z) - q * (2^256 - 4294968273) = -2^256 + (z + 4294968273 * q) add q, h, #1 mov c, #977 orr c, c, #0x100000000 mul a0, q, c umulh a1, q, c adds d0, d0, a0 adcs d1, d1, a1 adcs d2, d2, xzr adcs d3, d3, xzr // Because of the implicit -2^256, CF means >= 0 so z' is the answer; ~CF // means z' < 0 so we add p_256k1, which in 4 digits means subtracting c. csel c, c, xzr, cc subs d0, d0, c sbcs d1, d1, xzr sbcs d2, d2, xzr sbc d3, d3, xzr // Finally store the result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_cmul_p256k1) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,987
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_half_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Halve modulo p_256k1, z := (x / 2) mod p_256k1, assuming x reduced // Input x[4]; output z[4] // // extern void bignum_half_p256k1(uint64_t z[static 4], // const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_half_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_half_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_half_p256k1) .text .balign 4 #define z x0 #define x x1 #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define c x6 S2N_BN_SYMBOL(bignum_half_p256k1): CFI_START // Load the 4 digits of x ldp d0, d1, [x] ldp d2, d3, [x, #16] // Let b be the LSB of the input (i.e. whether it is odd). // Create c = 4294968273 * b mov c, #977 orr c, c, #0x100000000 tst d0, #1 csel c, c, xzr, ne // We want (x + b * p_256k1) / 2 where b is that LSB, in {0,1}. // That amounts to (2^256 * b + x - 4294968273 * b) / 2, and // modulo 4 words that's the same as ([2^256 * c + x] - c) / 2. // So do that subtraction and shift a place right as we go. subs d0, d0, c sbcs d1, d1, xzr extr d0, d1, d0, #1 sbcs d2, d2, xzr extr d1, d2, d1, #1 sbcs d3, d3, xzr extr d2, d3, d2, #1 sbc c, c, xzr extr d3, c, d3, #1 // Store back and return stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_half_p256k1) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
21,905
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/secp256k1_jmixadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point mixed addition on SECG curve secp256k1 in Jacobian coordinates // // extern void secp256k1_jmixadd(uint64_t p3[static 12], // const uint64_t p1[static 12], // const uint64_t p2[static 8]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples. // A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3). // The "mixed" part means that p2 only has x and y coordinates, with the // implicit z coordinate assumed to be the identity. It is assumed that // all the coordinates of the input points p1 and p2 are fully reduced // mod p_256k1, that the z coordinate of p1 is nonzero and that neither // p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents the same affine // point as". // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(secp256k1_jmixadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(secp256k1_jmixadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(secp256k1_jmixadd) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Stable homes for input arguments during main code sequence #define input_z x19 #define input_x x20 #define input_y x21 // The magic constant 2^256 - p_256k1 #define pconst x17 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define zp2 sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define NSPACE NUMSIZE*6 // Corresponds exactly to bignum_mul_p256k1 except for registers and // re-use of the pconst register for the constant 4294968273 #define mul_p256k1(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P2] __LF \ mul x7, x3, x5 __LF \ umulh x8, x3, x5 __LF \ mul x9, x4, x6 __LF \ umulh x10, x4, x6 __LF \ subs x4, x4, x3 __LF \ cneg x4, x4, lo __LF \ csetm x16, lo __LF \ adds x9, x9, x8 __LF \ adc x10, x10, xzr __LF \ subs x3, x5, x6 __LF \ cneg x3, x3, lo __LF \ cinv x16, x16, lo __LF \ mul x15, x4, x3 __LF \ umulh x3, x4, x3 __LF \ adds x8, x7, x9 __LF \ adcs x9, x9, x10 __LF \ adc x10, x10, xzr __LF \ cmn x16, #1 __LF \ eor x15, x15, x16 __LF \ adcs x8, x15, x8 __LF \ eor x3, x3, x16 __LF \ adcs x9, x3, x9 __LF \ adc x10, x10, x16 __LF \ ldp x3, x4, [P1+16] __LF \ ldp x5, x6, [P2+16] __LF \ mul x11, x3, x5 __LF \ umulh x12, x3, x5 __LF \ mul x13, x4, x6 __LF \ umulh x14, x4, x6 __LF \ subs x4, x4, x3 __LF \ cneg x4, x4, lo __LF \ csetm x16, lo __LF \ adds x13, x13, x12 __LF \ adc x14, x14, xzr __LF \ subs x3, x5, x6 __LF \ cneg x3, x3, lo __LF \ cinv x16, x16, lo __LF \ mul x15, x4, x3 __LF \ umulh x3, x4, x3 __LF \ adds x12, x11, x13 __LF \ adcs x13, x13, x14 __LF \ adc x14, x14, xzr __LF \ cmn x16, #1 __LF \ eor x15, x15, x16 __LF \ adcs x12, x15, x12 __LF \ eor x3, x3, x16 __LF \ adcs x13, x3, x13 __LF \ adc x14, x14, x16 __LF \ ldp x3, x4, [P1+16] __LF \ ldp x15, x16, [P1] __LF \ subs x3, x3, x15 __LF \ sbcs x4, x4, x16 __LF \ csetm x16, lo __LF \ ldp x15, x0, [P2] __LF \ subs x5, x15, x5 __LF \ sbcs x6, x0, x6 __LF \ csetm x0, lo __LF \ eor x3, x3, x16 __LF \ subs x3, x3, x16 __LF \ eor x4, x4, x16 __LF \ sbc x4, x4, x16 __LF \ eor x5, x5, x0 __LF \ subs x5, x5, x0 __LF \ eor x6, x6, x0 __LF \ sbc x6, x6, x0 __LF \ eor x16, x0, x16 __LF \ adds x11, x11, x9 __LF \ adcs x12, x12, x10 __LF \ adcs x13, x13, xzr __LF \ adc x14, x14, xzr __LF \ mul x2, x3, x5 __LF \ umulh x0, x3, x5 __LF \ mul x15, x4, x6 __LF \ umulh x1, x4, x6 __LF \ subs x4, x4, x3 __LF \ cneg x4, x4, lo __LF \ csetm x9, lo __LF \ adds x15, x15, x0 __LF \ adc x1, x1, xzr __LF \ subs x6, x5, x6 __LF \ cneg x6, x6, lo __LF \ cinv x9, x9, lo __LF \ mul x5, x4, x6 __LF \ umulh x6, x4, x6 __LF \ adds x0, x2, x15 __LF \ adcs x15, x15, x1 __LF \ adc x1, x1, xzr __LF \ cmn x9, #1 __LF \ eor x5, x5, x9 __LF \ adcs x0, x5, x0 __LF \ eor x6, x6, x9 __LF \ adcs x15, x6, x15 __LF \ adc x1, x1, x9 __LF \ adds x9, x11, x7 __LF \ adcs x10, x12, x8 __LF \ adcs x11, x13, x11 __LF \ adcs x12, x14, x12 __LF \ adcs x13, x13, xzr __LF \ adc x14, x14, xzr __LF \ cmn x16, #1 __LF \ eor x2, x2, x16 __LF \ adcs x9, x2, x9 __LF \ eor x0, x0, x16 __LF \ adcs x10, x0, x10 __LF \ eor x15, x15, x16 __LF \ adcs x11, x15, x11 __LF \ eor x1, x1, x16 __LF \ adcs x12, x1, x12 __LF \ adcs x13, x13, x16 __LF \ adc x14, x14, x16 __LF \ mov x16, #977 __LF \ mul x3, pconst, x11 __LF \ umulh x5, pconst, x11 __LF \ and x15, x12, #0xffffffff __LF \ lsr x2, x12, #32 __LF \ mul x4, x16, x15 __LF \ madd x15, x16, x2, x15 __LF \ adds x4, x4, x15, lsl #32 __LF \ lsr x15, x15, #32 __LF \ adc x6, x2, x15 __LF \ mul x11, pconst, x13 __LF \ umulh x13, pconst, x13 __LF \ and x15, x14, #0xffffffff __LF \ lsr x2, x14, #32 __LF \ mul x12, x16, x15 __LF \ madd x15, x16, x2, x15 __LF \ adds x12, x12, x15, lsl #32 __LF \ lsr x15, x15, #32 __LF \ adc x14, x2, x15 __LF \ adds x7, x7, x3 __LF \ adcs x8, x8, x4 __LF \ adcs x9, x9, x11 __LF \ adcs x10, x10, x12 __LF \ cset x11, hs __LF \ adds x8, x8, x5 __LF \ adcs x9, x9, x6 __LF \ adcs x10, x10, x13 __LF \ adc x11, x11, x14 __LF \ add x0, x11, #1 __LF \ mul x3, x16, x0 __LF \ lsr x4, x0, #32 __LF \ adds x3, x3, x0, lsl #32 __LF \ adc x4, xzr, x4 __LF \ adds x7, x7, x3 __LF \ adcs x8, x8, x4 __LF \ adcs x9, x9, xzr __LF \ adcs x10, x10, xzr __LF \ csel x1, pconst, xzr, lo __LF \ subs x7, x7, x1 __LF \ sbcs x8, x8, xzr __LF \ sbcs x9, x9, xzr __LF \ sbc x10, x10, xzr __LF \ stp x7, x8, [P0] __LF \ stp x9, x10, [P0+16] // Corresponds exactly to bignum_sqr_p256k1 except for // re-use of the pconst register for the constant 4294968273 #define sqr_p256k1(P0,P1) \ ldp x10, x11, [P1] __LF \ ldp x12, x13, [P1+16] __LF \ umull x2, w10, w10 __LF \ lsr x14, x10, #32 __LF \ umull x3, w14, w14 __LF \ umull x14, w10, w14 __LF \ adds x2, x2, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x3, x3, x14 __LF \ umull x4, w11, w11 __LF \ lsr x14, x11, #32 __LF \ umull x5, w14, w14 __LF \ umull x14, w11, w14 __LF \ mul x15, x10, x11 __LF \ umulh x16, x10, x11 __LF \ adds x4, x4, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x5, x5, x14 __LF \ adds x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adc x5, x5, xzr __LF \ adds x3, x3, x15 __LF \ adcs x4, x4, x16 __LF \ adc x5, x5, xzr __LF \ umull x6, w12, w12 __LF \ lsr x14, x12, #32 __LF \ umull x7, w14, w14 __LF \ umull x14, w12, w14 __LF \ adds x6, x6, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x7, x7, x14 __LF \ umull x8, w13, w13 __LF \ lsr x14, x13, #32 __LF \ umull x9, w14, w14 __LF \ umull x14, w13, w14 __LF \ mul x15, x12, x13 __LF \ umulh x16, x12, x13 __LF \ adds x8, x8, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x9, x9, x14 __LF \ adds x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adc x9, x9, xzr __LF \ adds x7, x7, x15 __LF \ adcs x8, x8, x16 __LF \ adc x9, x9, xzr __LF \ subs x10, x10, x12 __LF \ sbcs x11, x11, x13 __LF \ csetm x16, lo __LF \ eor x10, x10, x16 __LF \ subs x10, x10, x16 __LF \ eor x11, x11, x16 __LF \ sbc x11, x11, x16 __LF \ adds x6, x6, x4 __LF \ adcs x7, x7, x5 __LF \ adcs x8, x8, xzr __LF \ adc x9, x9, xzr __LF \ umull x12, w10, w10 __LF \ lsr x5, x10, #32 __LF \ umull x13, w5, w5 __LF \ umull x5, w10, w5 __LF \ adds x12, x12, x5, lsl #33 __LF \ lsr x5, x5, #31 __LF \ adc x13, x13, x5 __LF \ umull x15, w11, w11 __LF \ lsr x5, x11, #32 __LF \ umull x14, w5, w5 __LF \ umull x5, w11, w5 __LF \ mul x4, x10, x11 __LF \ umulh x16, x10, x11 __LF \ adds x15, x15, x5, lsl #33 __LF \ lsr x5, x5, #31 __LF \ adc x14, x14, x5 __LF \ adds x4, x4, x4 __LF \ adcs x16, x16, x16 __LF \ adc x14, x14, xzr __LF \ adds x13, x13, x4 __LF \ adcs x15, x15, x16 __LF \ adc x14, x14, xzr __LF \ adds x4, x2, x6 __LF \ adcs x5, x3, x7 __LF \ adcs x6, x6, x8 __LF \ adcs x7, x7, x9 __LF \ csetm x16, lo __LF \ subs x4, x4, x12 __LF \ sbcs x5, x5, x13 __LF \ sbcs x6, x6, x15 __LF \ sbcs x7, x7, x14 __LF \ adcs x8, x8, x16 __LF \ adc x9, x9, x16 __LF \ mov x16, #977 __LF \ mul x10, pconst, x6 __LF \ umulh x13, pconst, x6 __LF \ and x6, x7, #0xffffffff __LF \ lsr x7, x7, #32 __LF \ mul x11, x16, x6 __LF \ madd x6, x16, x7, x6 __LF \ adds x11, x11, x6, lsl #32 __LF \ lsr x6, x6, #32 __LF \ adc x14, x7, x6 __LF \ mul x12, pconst, x8 __LF \ umulh x8, pconst, x8 __LF \ and x6, x9, #0xffffffff __LF \ lsr x7, x9, #32 __LF \ mul x9, x16, x6 __LF \ madd x6, x16, x7, x6 __LF \ adds x9, x9, x6, lsl #32 __LF \ lsr x6, x6, #32 __LF \ adc x15, x7, x6 __LF \ adds x2, x2, x10 __LF \ adcs x3, x3, x11 __LF \ adcs x4, x4, x12 __LF \ adcs x5, x5, x9 __LF \ cset x6, hs __LF \ adds x3, x3, x13 __LF \ adcs x4, x4, x14 __LF \ adcs x5, x5, x8 __LF \ adc x6, x6, x15 __LF \ add x6, x6, #1 __LF \ mul x10, x16, x6 __LF \ lsr x11, x6, #32 __LF \ adds x10, x10, x6, lsl #32 __LF \ adc x11, xzr, x11 __LF \ adds x2, x2, x10 __LF \ adcs x3, x3, x11 __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ csel x16, pconst, xzr, lo __LF \ subs x2, x2, x16 __LF \ sbcs x3, x3, xzr __LF \ sbcs x4, x4, xzr __LF \ sbc x5, x5, xzr __LF \ stp x2, x3, [P0] __LF \ stp x4, x5, [P0+16] // Corresponds exactly to bignum_sub_p256k1 #define sub_p256k1(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ mov x4, #0x3d1 __LF \ orr x3, x4, #0x100000000 __LF \ csel x3, x3, xzr, cc __LF \ subs x5, x5, x3 __LF \ sbcs x6, x6, xzr __LF \ sbcs x7, x7, xzr __LF \ sbc x8, x8, xzr __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] S2N_BN_SYMBOL(secp256k1_jmixadd): CFI_START // Save registers and make room on stack for temporary variables CFI_DEC_SP(NSPACE+32) CFI_STACKSAVE2(x19,x20,NSPACE) CFI_STACKSAVE2(x21,x22,NSPACE+16) // Move the input arguments to stable place mov input_z, x0 mov input_x, x1 mov input_y, x2 // Set up pconst = 4294968273, so p_256k1 = 2^256 - pconst mov pconst, #977 orr pconst, pconst, #0x100000000 // Main code, just a sequence of basic field operations sqr_p256k1(zp2,z_1) mul_p256k1(y2a,z_1,y_2) mul_p256k1(x2a,zp2,x_2) mul_p256k1(y2a,zp2,y2a) sub_p256k1(xd,x2a,x_1) sub_p256k1(yd,y2a,y_1) sqr_p256k1(zz,xd) sqr_p256k1(ww,yd) mul_p256k1(zzx1,zz,x_1) mul_p256k1(zzx2,zz,x2a) sub_p256k1(resx,ww,zzx1) sub_p256k1(t1,zzx2,zzx1) mul_p256k1(resz,xd,z_1) sub_p256k1(resx,resx,zzx2) sub_p256k1(t2,zzx1,resx) mul_p256k1(t1,t1,y_1) mul_p256k1(t2,yd,t2) sub_p256k1(resy,t2,t1) // Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence) ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] orr x4, x0, x1 orr x5, x2, x3 orr x4, x4, x5 cmp x4, xzr // Multiplex: if p1 <> 0 just copy the computed result from the staging area. // If p1 = 0 then return the point p2 augmented with an extra z = 1 // coordinate, hence giving 0 + p2 = p2 for the final result. ldp x0, x1, [resx] ldp x12, x13, [x_2] csel x0, x0, x12, ne csel x1, x1, x13, ne ldp x2, x3, [resx+16] ldp x12, x13, [x_2+16] csel x2, x2, x12, ne csel x3, x3, x13, ne ldp x4, x5, [resy] ldp x12, x13, [y_2] csel x4, x4, x12, ne csel x5, x5, x13, ne ldp x6, x7, [resy+16] ldp x12, x13, [y_2+16] csel x6, x6, x12, ne csel x7, x7, x13, ne ldp x8, x9, [resz] mov x12, #1 csel x8, x8, x12, ne csel x9, x9, xzr, ne ldp x10, x11, [resz+16] csel x10, x10, xzr, ne csel x11, x11, xzr, ne stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [y_3] stp x6, x7, [y_3+16] stp x8, x9, [z_3] stp x10, x11, [z_3+16] // Restore stack and return CFI_STACKLOAD2(x19,x20,NSPACE) CFI_STACKLOAD2(x21,x22,NSPACE+16) CFI_INC_SP((NSPACE+32)) CFI_RET S2N_BN_SIZE_DIRECTIVE(secp256k1_jmixadd) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
15,469
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/secp256k1_jmixadd_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point mixed addition on SECG curve secp256k1 in Jacobian coordinates // // extern void secp256k1_jmixadd_alt(uint64_t p3[static 12], // const uint64_t p1[static 12], // const uint64_t p2[static 8]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples. // A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3). // The "mixed" part means that p2 only has x and y coordinates, with the // implicit z coordinate assumed to be the identity. It is assumed that // all the coordinates of the input points p1 and p2 are fully reduced // mod p_256k1, that the z coordinate of p1 is nonzero and that neither // p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents the same affine // point as". // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(secp256k1_jmixadd_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(secp256k1_jmixadd_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(secp256k1_jmixadd_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Stable homes for input arguments during main code sequence #define input_z x15 #define input_x x16 #define input_y x17 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define zp2 sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define NSPACE NUMSIZE*6 // Corresponds exactly to bignum_mul_p256k1_alt #define mul_p256k1(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x7, x8, [P2] __LF \ mul x12, x3, x7 __LF \ umulh x13, x3, x7 __LF \ mul x11, x3, x8 __LF \ umulh x14, x3, x8 __LF \ adds x13, x13, x11 __LF \ ldp x9, x10, [P2+16] __LF \ mul x11, x3, x9 __LF \ umulh x0, x3, x9 __LF \ adcs x14, x14, x11 __LF \ mul x11, x3, x10 __LF \ umulh x1, x3, x10 __LF \ adcs x0, x0, x11 __LF \ adc x1, x1, xzr __LF \ ldp x5, x6, [P1+16] __LF \ mul x11, x4, x7 __LF \ adds x13, x13, x11 __LF \ mul x11, x4, x8 __LF \ adcs x14, x14, x11 __LF \ mul x11, x4, x9 __LF \ adcs x0, x0, x11 __LF \ mul x11, x4, x10 __LF \ adcs x1, x1, x11 __LF \ umulh x3, x4, x10 __LF \ adc x3, x3, xzr __LF \ umulh x11, x4, x7 __LF \ adds x14, x14, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x0, x0, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x1, x1, x11 __LF \ adc x3, x3, xzr __LF \ mul x11, x5, x7 __LF \ adds x14, x14, x11 __LF \ mul x11, x5, x8 __LF \ adcs x0, x0, x11 __LF \ mul x11, x5, x9 __LF \ adcs x1, x1, x11 __LF \ mul x11, x5, x10 __LF \ adcs x3, x3, x11 __LF \ umulh x4, x5, x10 __LF \ adc x4, x4, xzr __LF \ umulh x11, x5, x7 __LF \ adds x0, x0, x11 __LF \ umulh x11, x5, x8 __LF \ adcs x1, x1, x11 __LF \ umulh x11, x5, x9 __LF \ adcs x3, x3, x11 __LF \ adc x4, x4, xzr __LF \ mul x11, x6, x7 __LF \ adds x0, x0, x11 __LF \ mul x11, x6, x8 __LF \ adcs x1, x1, x11 __LF \ mul x11, x6, x9 __LF \ adcs x3, x3, x11 __LF \ mul x11, x6, x10 __LF \ adcs x4, x4, x11 __LF \ umulh x5, x6, x10 __LF \ adc x5, x5, xzr __LF \ umulh x11, x6, x7 __LF \ adds x1, x1, x11 __LF \ umulh x11, x6, x8 __LF \ adcs x3, x3, x11 __LF \ umulh x11, x6, x9 __LF \ adcs x4, x4, x11 __LF \ adc x5, x5, xzr __LF \ mov x7, #0x3d1 __LF \ orr x7, x7, #0x100000000 __LF \ mul x11, x7, x1 __LF \ umulh x9, x7, x1 __LF \ adds x12, x12, x11 __LF \ mul x11, x7, x3 __LF \ umulh x3, x7, x3 __LF \ adcs x13, x13, x11 __LF \ mul x11, x7, x4 __LF \ umulh x4, x7, x4 __LF \ adcs x14, x14, x11 __LF \ mul x11, x7, x5 __LF \ umulh x5, x7, x5 __LF \ adcs x0, x0, x11 __LF \ cset x1, cs __LF \ adds x13, x13, x9 __LF \ adcs x14, x14, x3 __LF \ adcs x0, x0, x4 __LF \ adc x1, x1, x5 __LF \ add x8, x1, #0x1 __LF \ mul x11, x7, x8 __LF \ umulh x9, x7, x8 __LF \ adds x12, x12, x11 __LF \ adcs x13, x13, x9 __LF \ adcs x14, x14, xzr __LF \ adcs x0, x0, xzr __LF \ csel x7, x7, xzr, cc __LF \ subs x12, x12, x7 __LF \ sbcs x13, x13, xzr __LF \ sbcs x14, x14, xzr __LF \ sbc x0, x0, xzr __LF \ stp x12, x13, [P0] __LF \ stp x14, x0, [P0+16] // Corresponds exactly to bignum_sqr_p256k1_alt #define sqr_p256k1(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x9, x2, x3 __LF \ umulh x10, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x11, x2, x5 __LF \ umulh x12, x2, x5 __LF \ mul x7, x2, x4 __LF \ umulh x6, x2, x4 __LF \ adds x10, x10, x7 __LF \ adcs x11, x11, x6 __LF \ mul x7, x3, x4 __LF \ umulh x6, x3, x4 __LF \ adc x6, x6, xzr __LF \ adds x11, x11, x7 __LF \ mul x13, x4, x5 __LF \ umulh x14, x4, x5 __LF \ adcs x12, x12, x6 __LF \ mul x7, x3, x5 __LF \ umulh x6, x3, x5 __LF \ adc x6, x6, xzr __LF \ adds x12, x12, x7 __LF \ adcs x13, x13, x6 __LF \ adc x14, x14, xzr __LF \ adds x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ cset x6, cs __LF \ umulh x7, x2, x2 __LF \ mul x8, x2, x2 __LF \ adds x9, x9, x7 __LF \ mul x7, x3, x3 __LF \ adcs x10, x10, x7 __LF \ umulh x7, x3, x3 __LF \ adcs x11, x11, x7 __LF \ mul x7, x4, x4 __LF \ adcs x12, x12, x7 __LF \ umulh x7, x4, x4 __LF \ adcs x13, x13, x7 __LF \ mul x7, x5, x5 __LF \ adcs x14, x14, x7 __LF \ umulh x7, x5, x5 __LF \ adc x6, x6, x7 __LF \ mov x3, #0x3d1 __LF \ orr x3, x3, #0x100000000 __LF \ mul x7, x3, x12 __LF \ umulh x4, x3, x12 __LF \ adds x8, x8, x7 __LF \ mul x7, x3, x13 __LF \ umulh x13, x3, x13 __LF \ adcs x9, x9, x7 __LF \ mul x7, x3, x14 __LF \ umulh x14, x3, x14 __LF \ adcs x10, x10, x7 __LF \ mul x7, x3, x6 __LF \ umulh x6, x3, x6 __LF \ adcs x11, x11, x7 __LF \ cset x12, cs __LF \ adds x9, x9, x4 __LF \ adcs x10, x10, x13 __LF \ adcs x11, x11, x14 __LF \ adc x12, x12, x6 __LF \ add x2, x12, #0x1 __LF \ mul x7, x3, x2 __LF \ umulh x6, x3, x2 __LF \ adds x8, x8, x7 __LF \ adcs x9, x9, x6 __LF \ adcs x10, x10, xzr __LF \ adcs x11, x11, xzr __LF \ csel x3, x3, xzr, cc __LF \ subs x8, x8, x3 __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbc x11, x11, xzr __LF \ stp x8, x9, [P0] __LF \ stp x10, x11, [P0+16] // Corresponds exactly to bignum_sub_p256k1 #define sub_p256k1(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ mov x4, #0x3d1 __LF \ orr x3, x4, #0x100000000 __LF \ csel x3, x3, xzr, cc __LF \ subs x5, x5, x3 __LF \ sbcs x6, x6, xzr __LF \ sbcs x7, x7, xzr __LF \ sbc x8, x8, xzr __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] S2N_BN_SYMBOL(secp256k1_jmixadd_alt): CFI_START // Make room on stack for temporary variables // Move the input arguments to stable places CFI_DEC_SP(NSPACE) mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations sqr_p256k1(zp2,z_1) mul_p256k1(y2a,z_1,y_2) mul_p256k1(x2a,zp2,x_2) mul_p256k1(y2a,zp2,y2a) sub_p256k1(xd,x2a,x_1) sub_p256k1(yd,y2a,y_1) sqr_p256k1(zz,xd) sqr_p256k1(ww,yd) mul_p256k1(zzx1,zz,x_1) mul_p256k1(zzx2,zz,x2a) sub_p256k1(resx,ww,zzx1) sub_p256k1(t1,zzx2,zzx1) mul_p256k1(resz,xd,z_1) sub_p256k1(resx,resx,zzx2) sub_p256k1(t2,zzx1,resx) mul_p256k1(t1,t1,y_1) mul_p256k1(t2,yd,t2) sub_p256k1(resy,t2,t1) // Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence) ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] orr x4, x0, x1 orr x5, x2, x3 orr x4, x4, x5 cmp x4, xzr // Multiplex: if p1 <> 0 just copy the computed result from the staging area. // If p1 = 0 then return the point p2 augmented with an extra z = 1 // coordinate, hence giving 0 + p2 = p2 for the final result. ldp x0, x1, [resx] ldp x12, x13, [x_2] csel x0, x0, x12, ne csel x1, x1, x13, ne ldp x2, x3, [resx+16] ldp x12, x13, [x_2+16] csel x2, x2, x12, ne csel x3, x3, x13, ne ldp x4, x5, [resy] ldp x12, x13, [y_2] csel x4, x4, x12, ne csel x5, x5, x13, ne ldp x6, x7, [resy+16] ldp x12, x13, [y_2+16] csel x6, x6, x12, ne csel x7, x7, x13, ne ldp x8, x9, [resz] mov x12, #1 csel x8, x8, x12, ne csel x9, x9, xzr, ne ldp x10, x11, [resz+16] csel x10, x10, xzr, ne csel x11, x11, xzr, ne stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [y_3] stp x6, x7, [y_3+16] stp x8, x9, [z_3] stp x10, x11, [z_3+16] // Restore stack and return CFI_INC_SP(NSPACE) CFI_RET S2N_BN_SIZE_DIRECTIVE(secp256k1_jmixadd_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
5,006
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_montsqr_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^256) mod p_256k1 // Input x[4]; output z[4] // // extern void bignum_montsqr_p256k1(uint64_t z[static 4], // const uint64_t x[static 4]); // // Does z := (x^2 / 2^256) mod p_256k1, assuming x^2 <= 2^256 * p_256k1, which // is guaranteed in particular if x < p_256k1 initially (the "intended" case). // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p256k1) .text .balign 4 #define z x0 #define x x1 // Variables #define u0 x2 #define u1 x3 #define u2 x4 #define u3 x5 #define u4 x6 #define u5 x7 #define u6 x8 #define u7 x9 #define w x10 #define c x11 #define t x12 #define uu x13 S2N_BN_SYMBOL(bignum_montsqr_p256k1): CFI_START // First just a near-clone of bignum_sqr_4_8 to get the square, using // different registers to collect full product without writeback. ldp u4, u5, [x] ldp x10, x11, [x, #16] mul u2, u4, x10 mul u7, u5, x11 umulh x12, u4, x10 subs x13, u4, u5 cneg x13, x13, cc csetm u1, cc subs u0, x11, x10 cneg u0, u0, cc mul u6, x13, u0 umulh u0, x13, u0 cinv u1, u1, cc eor u6, u6, u1 eor u0, u0, u1 adds u3, u2, x12 adc x12, x12, xzr umulh x13, u5, x11 adds u3, u3, u7 adcs x12, x12, x13 adc x13, x13, xzr adds x12, x12, u7 adc x13, x13, xzr cmn u1, #0x1 adcs u3, u3, u6 adcs x12, x12, u0 adc x13, x13, u1 adds u2, u2, u2 adcs u3, u3, u3 adcs x12, x12, x12 adcs x13, x13, x13 adc x14, xzr, xzr mul u0, u4, u4 mul u6, u5, u5 mul x15, u4, u5 umulh u1, u4, u4 umulh u7, u5, u5 umulh x16, u4, u5 adds u1, u1, x15 adcs u6, u6, x16 adc u7, u7, xzr adds u1, u1, x15 adcs u6, u6, x16 adc u7, u7, xzr adds u2, u2, u6 adcs u3, u3, u7 adcs x12, x12, xzr adcs x13, x13, xzr adc x14, x14, xzr mul u4, x10, x10 mul u6, x11, x11 mul x15, x10, x11 umulh u5, x10, x10 umulh u7, x11, x11 umulh x16, x10, x11 adds u5, u5, x15 adcs u6, u6, x16 adc u7, u7, xzr adds u5, u5, x15 adcs u6, u6, x16 adc u7, u7, xzr adds u4, u4, x12 adcs u5, u5, x13 adcs u6, u6, x14 adc u7, u7, xzr // Now we have the full 8-digit product 2^256 * h + l where // h = [u7,u6,u5,u4] and l = [u3,u2,u1,u0]. Set up constants // c = 4294968273 so that p_256k1 = 2^256 - c, and w the negated // multiplicative inverse so that p_256k1 * w == -1 (mod 2^64). movz w, #0x3531 movk w, #0xd225, lsl #16 movk w, #0x091d, lsl #32 movk w, #0xd838, lsl #48 mov c, #977 orr c, c, #0x100000000 // Do 4 iterations of Montgomery reduction, rotating [u3;u2;u1;u0] mul u0, w, u0 umulh t, u0, c subs u1, u1, t mul u1, w, u1 umulh t, u1, c sbcs u2, u2, t mul u2, w, u2 umulh t, u2, c sbcs u3, u3, t mul u3, w, u3 umulh t, u3, c sbcs u0, u0, t sbcs u1, u1, xzr sbcs u2, u2, xzr sbc u3, u3, xzr // Add the high part and the Montgomery reduced low part adds u0, u0, u4 adcs u1, u1, u5 adcs u2, u2, u6 and uu, u1, u2 adcs u3, u3, u7 and uu, uu, u3 cset t, cs // Decide whether z >= p_256k1 <=> z + 4294968273 >= 2^256 adds xzr, u0, c adcs xzr, uu, xzr adcs t, t, xzr // Now t <> 0 <=> z >= p_256k1, so mask the constant c accordingly csel c, c, xzr, ne // If z >= p_256k1 do z := z - p_256k1, i.e. add c in 4 digits adds u0, u0, c adcs u1, u1, xzr adcs u2, u2, xzr adc u3, u3, xzr // Write back stp u0, u1, [x0] stp u2, u3, [x0, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_p256k1) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,467
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_mod_n256k1_4.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo group order, z := x mod n_256k1 // Input x[4]; output z[4] // // extern void bignum_mod_n256k1_4(uint64_t z[static 4], // const uint64_t x[static 4]); // // Reduction is modulo the group order of the secp256k1 curve. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n256k1_4) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_n256k1_4) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n256k1_4) .text .balign 4 #define z x0 #define x x1 #define n0 x2 #define n1 x3 #define n2 x4 #define n3 x5 #define d0 x6 #define d1 x7 #define d2 x8 #define d3 x9 // Loading large constants #define movbig(nn,n3,n2,n1,n0) \ movz nn, n0 __LF \ movk nn, n1, lsl #16 __LF \ movk nn, n2, lsl #32 __LF \ movk nn, n3, lsl #48 S2N_BN_SYMBOL(bignum_mod_n256k1_4): CFI_START // Load the complicated three words of n_256k1, the other being all 1s movbig( n0, #0xbfd2, #0x5e8c, #0xd036, #0x4141) movbig( n1, #0xbaae, #0xdce6, #0xaf48, #0xa03b) mov n2, 0xFFFFFFFFFFFFFFFE // Load the input number ldp d0, d1, [x] ldp d2, d3, [x, #16] // Do the subtraction. Since word 3 of n_256k1 is all 1s, that can be // done by adding zero with carry, thanks to the inverted carry. subs n0, d0, n0 sbcs n1, d1, n1 sbcs n2, d2, n2 adcs n3, d3, xzr // Now if the carry is *clear* (inversion at work) the subtraction carried // and hence we should have done nothing, so we reset each n_i = d_i csel n0, d0, n0, cc csel n1, d1, n1, cc csel n2, d2, n2, cc csel n3, d3, n3, cc // Store the end result stp n0, n1, [z] stp n2, n3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_n256k1_4) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,083
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_tomont_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert to Montgomery form z := (2^256 * x) mod p_256k1 // Input x[4]; output z[4] // // extern void bignum_tomont_p256k1(uint64_t z[static 4], // const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tomont_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_p256k1) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_p256k1_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tomont_p256k1_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_p256k1_alt) .text .balign 4 #define z x0 #define x x1 #define m x2 #define d0 x3 #define d1 x4 #define d2 x5 #define d3 x6 #define a0 x7 #define a1 x8 #define a2 x9 #define c x9 #define a3 x10 #define h x10 #define q x10 S2N_BN_SYMBOL(bignum_tomont_p256k1): S2N_BN_SYMBOL(bignum_tomont_p256k1_alt): CFI_START // Since 2^256 == 4294968273 (mod p_256k1) we more or less just set // m = 4294968273 then devolve to a near-clone of bignum_cmul_p256k1; // the logic that q = h + 1 < 2^64 and hence doesn't wrap still holds // since the multiplier 4294968273 is known to be much less than 2^64. // We can also re-use the initial constant m instead of re-creating it. mov m, #977 orr m, m, #0x100000000 // First do the multiply, straightforwardly to get [h;d3;d2;d1;d0] ldp a0, a1, [x] ldp a2, a3, [x, #16] mul d0, m, a0 mul d1, m, a1 mul d2, m, a2 mul d3, m, a3 umulh a0, m, a0 umulh a1, m, a1 umulh a2, m, a2 umulh h, m, a3 adds d1, d1, a0 adcs d2, d2, a1 adcs d3, d3, a2 adcs h, h, xzr // Now the quotient estimate is q = h + 1, and then we do the reduction, // writing z = [d3;d2;d1;d0], as z' = (2^256 * h + z) - q * p_256k1 = // (2^256 * h + z) - q * (2^256 - 4294968273) = -2^256 + (z + 4294968273 * q) add q, h, #1 mul a0, q, m umulh a1, q, m adds d0, d0, a0 adcs d1, d1, a1 adcs d2, d2, xzr adcs d3, d3, xzr // Because of the implicit -2^256, CF means >= 0 so z' is the answer; ~CF // means z' < 0 so we add p_256k1, which in 4 digits means subtracting m. csel m, m, xzr, cc subs d0, d0, m sbcs d1, d1, xzr sbcs d2, d2, xzr sbcs d3, d3, xzr // Finally store the result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_tomont_p256k1) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
6,025
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_montmul_p256k1_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^256) mod p_256k1 // Inputs x[4], y[4]; output z[4] // // extern void bignum_montmul_p256k1_alt(uint64_t z[static 4], // const uint64_t x[static 4], // const uint64_t y[static 4]); // // Does z := (2^{-256} * x * y) mod p_256k1, assuming that the inputs x and y // satisfy x * y <= 2^256 * p_256k1 (in particular this is true if we are in // the "usual" case x < p_256k1 and y < p_256k1). // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p256k1_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_p256k1_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p256k1_alt) .text .balign 4 #define z x0 #define x x1 #define y x2 #define a0 x3 #define a1 x4 #define a2 x5 #define a3 x6 #define b0 x7 #define b1 x8 #define b2 x9 #define b3 x10 #define l x11 #define u0 x12 #define u1 x13 #define u2 x14 #define u3 x15 #define u4 x16 #define c x17 // These alias to the input arguments when no longer needed #define u5 a0 #define u6 a1 #define u7 a2 #define w x1 #define t x2 #define uu b3 S2N_BN_SYMBOL(bignum_montmul_p256k1_alt): CFI_START // Load operands and set up row 0 = [u4;...;u0] = a0 * [b3;...;b0] ldp a0, a1, [x] ldp b0, b1, [y] mul u0, a0, b0 umulh u1, a0, b0 mul l, a0, b1 umulh u2, a0, b1 adds u1, u1, l ldp b2, b3, [y, #16] mul l, a0, b2 umulh u3, a0, b2 adcs u2, u2, l mul l, a0, b3 umulh u4, a0, b3 adcs u3, u3, l adc u4, u4, xzr ldp a2, a3, [x, #16] // Start the Montgomery reductions now to interleave better, though // conceptually they all happen after the multiplication, only modifying // any u_i when the multiplication process no longer uses it. Set up // constants c = 4294968273 so that p_256k1 = 2^256 - c, and w the negated // multiplicative inverse so that p_256k1 * w == -1 (mod 2^64). movz w, #0x3531 movk w, #0xd225, lsl #16 movk w, #0x091d, lsl #32 movk w, #0xd838, lsl #48 mov c, #977 orr c, c, #0x100000000 // Precompute this part ahead of the main Montgomery stage. This // is a repeated pattern below, since it seems to slightly improve // dependent latencies. mul u0, w, u0 // Row 1 = [u5;...;u0] = [a1;a0] * [b3;...;b0] mul l, a1, b0 adds u1, u1, l mul l, a1, b1 adcs u2, u2, l mul l, a1, b2 adcs u3, u3, l mul l, a1, b3 adcs u4, u4, l umulh u5, a1, b3 adc u5, u5, xzr umulh l, a1, b0 adds u2, u2, l umulh l, a1, b1 adcs u3, u3, l umulh l, a1, b2 adcs u4, u4, l adc u5, u5, xzr // Montgomery stage 0; use t to record the suspended carry umulh l, u0, c subs u1, u1, l cset t, cc // Row 2 = [u6;...;u0] = [a2;a1;a0] * [b3;...;b0] mul l, a2, b0 adds u2, u2, l mul l, a2, b1 adcs u3, u3, l mul l, a2, b2 adcs u4, u4, l mul l, a2, b3 adcs u5, u5, l umulh u6, a2, b3 adc u6, u6, xzr mul u1, w, u1 umulh l, a2, b0 adds u3, u3, l umulh l, a2, b1 adcs u4, u4, l umulh l, a2, b2 adcs u5, u5, l adc u6, u6, xzr // Montgomery stage 1 umulh l, u1, c add l, l, t subs u2, u2, l cset t, cc // Row 3 = [u7;...;u0] = [a3;...a0] * [b3;...;b0] mul l, a3, b0 adds u3, u3, l mul l, a3, b1 adcs u4, u4, l mul l, a3, b2 adcs u5, u5, l mul l, a3, b3 adcs u6, u6, l umulh u7, a3, b3 adc u7, u7, xzr mul u2, w, u2 umulh l, a3, b0 adds u4, u4, l umulh l, a3, b1 adcs u5, u5, l umulh l, a3, b2 adcs u6, u6, l adc u7, u7, xzr // Montgomery stages 2 and 3 (no longer using t to link the carries). umulh l, u2, c add l, l, t subs u3, u3, l mul u3, w, u3 umulh l, u3, c sbcs u0, u0, l sbcs u1, u1, xzr sbcs u2, u2, xzr sbc u3, u3, xzr // Now if a * b = 2^256 * h + l is the full product, we now have // [u7;u6;u5;u4] = h and 2^256 * [u3;u2;u1;u0] == l (mod p_256k1) because // of the Montgomery reductions on the low half. Now add the high part // and the Montgomery-reduced low part. adds u0, u0, u4 adcs u1, u1, u5 adcs u2, u2, u6 and uu, u1, u2 adcs u3, u3, u7 and uu, uu, u3 cset t, cs // Decide whether z >= p_256k1 <=> z + 4294968273 >= 2^256 adds xzr, u0, c adcs xzr, uu, xzr adcs t, t, xzr // Now t <> 0 <=> z >= p_256k1, so mask the constant c accordingly csel c, c, xzr, ne // If z >= p_256k1 do z := z - p_256k1, i.e. add c in 4 digits adds u0, u0, c adcs u1, u1, xzr adcs u2, u2, xzr adc u3, u3, xzr // Write back stp u0, u1, [z] stp u2, u3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montmul_p256k1_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
7,324
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_mul_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply modulo p_256k1, z := (x * y) mod p_256k1 // Inputs x[4], y[4]; output z[4] // // extern void bignum_mul_p256k1(uint64_t z[static 4], const uint64_t x[static 4], // const uint64_t y[static 4]); // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_p256k1) .text .balign 4 #define z x0 #define x x1 #define y x2 #define a0 x3 #define a1 x4 #define b0 x5 #define b1 x6 #define u0 x7 #define u1 x8 #define u2 x9 #define u3 x10 #define u4 x11 #define u5 x12 #define u6 x13 #define u7 x14 #define t x15 #define sgn x16 #define ysgn x17 // These are aliases to registers used elsewhere including input pointers. // By the time they are used this does not conflict with other uses. #define m0 y #define m1 ysgn #define m2 t #define m3 x #define u u2 // For the reduction stages, again aliasing other things #define c x1 #define h x2 #define l x15 #define d x16 #define q x17 #define a2 x11 #define a3 x12 #define b2 x13 #define b3 x14 S2N_BN_SYMBOL(bignum_mul_p256k1): CFI_START // Multiply the low halves using Karatsuba 2x2->4 to get [u3,u2,u1,u0] ldp a0, a1, [x] ldp b0, b1, [y] mul u0, a0, b0 umulh u1, a0, b0 mul u2, a1, b1 umulh u3, a1, b1 subs a1, a1, a0 cneg a1, a1, cc csetm sgn, cc adds u2, u2, u1 adc u3, u3, xzr subs a0, b0, b1 cneg a0, a0, cc cinv sgn, sgn, cc mul t, a1, a0 umulh a0, a1, a0 adds u1, u0, u2 adcs u2, u2, u3 adc u3, u3, xzr adds xzr, sgn, #1 eor t, t, sgn adcs u1, t, u1 eor a0, a0, sgn adcs u2, a0, u2 adc u3, u3, sgn // Multiply the high halves using Karatsuba 2x2->4 to get [u7,u6,u5,u4] ldp a0, a1, [x, #16] ldp b0, b1, [y, #16] mul u4, a0, b0 umulh u5, a0, b0 mul u6, a1, b1 umulh u7, a1, b1 subs a1, a1, a0 cneg a1, a1, cc csetm sgn, cc adds u6, u6, u5 adc u7, u7, xzr subs a0, b0, b1 cneg a0, a0, cc cinv sgn, sgn, cc mul t, a1, a0 umulh a0, a1, a0 adds u5, u4, u6 adcs u6, u6, u7 adc u7, u7, xzr adds xzr, sgn, #1 eor t, t, sgn adcs u5, t, u5 eor a0, a0, sgn adcs u6, a0, u6 adc u7, u7, sgn // Compute sgn,[a1,a0] = x_hi - x_lo // and ysgn,[b1,b0] = y_lo - y_hi // sign-magnitude differences ldp a0, a1, [x, #16] ldp t, sgn, [x] subs a0, a0, t sbcs a1, a1, sgn csetm sgn, cc ldp t, ysgn, [y] subs b0, t, b0 sbcs b1, ysgn, b1 csetm ysgn, cc eor a0, a0, sgn subs a0, a0, sgn eor a1, a1, sgn sbc a1, a1, sgn eor b0, b0, ysgn subs b0, b0, ysgn eor b1, b1, ysgn sbc b1, b1, ysgn // Save the correct sign for the sub-product eor sgn, ysgn, sgn // Add H' = H + L_top, still in [u7,u6,u5,u4] adds u4, u4, u2 adcs u5, u5, u3 adcs u6, u6, xzr adc u7, u7, xzr // Now compute the mid-product as [m3,m2,m1,m0] mul m0, a0, b0 umulh m1, a0, b0 mul m2, a1, b1 umulh m3, a1, b1 subs a1, a1, a0 cneg a1, a1, cc csetm u, cc adds m2, m2, m1 adc m3, m3, xzr subs b1, b0, b1 cneg b1, b1, cc cinv u, u, cc mul b0, a1, b1 umulh b1, a1, b1 adds m1, m0, m2 adcs m2, m2, m3 adc m3, m3, xzr adds xzr, u, #1 eor b0, b0, u adcs m1, b0, m1 eor b1, b1, u adcs m2, b1, m2 adc m3, m3, u // Accumulate the positive mid-terms as [u7,u6,u5,u4,u3,u2] adds u2, u4, u0 adcs u3, u5, u1 adcs u4, u6, u4 adcs u5, u7, u5 adcs u6, u6, xzr adc u7, u7, xzr // Add in the sign-adjusted complex term adds xzr, sgn, #1 eor m0, m0, sgn adcs u2, m0, u2 eor m1, m1, sgn adcs u3, m1, u3 eor m2, m2, sgn adcs u4, m2, u4 eor m3, m3, sgn adcs u5, m3, u5 adcs u6, u6, sgn adc u7, u7, sgn // Now we have the full 8-digit product 2^256 * h + l where // h = [u7,u6,u5,u4] and l = [u3,u2,u1,u0] // and this is == 4294968273 * h + l (mod p_256k1) // Some of the word products are done straightforwardly using mul + umulh // while others are broken down in a more complicated way as // (2^32 + 977) * (2^32 * h + l) = 2^64 * h + 2^32 * (d * h + l) + d * l mov d, #977 orr c, d, #0x100000000 mul a0, c, u4 umulh b0, c, u4 and l, u5, #0xFFFFFFFF lsr h, u5, #32 mul a1, d, l madd l, d, h, l adds a1, a1, l, lsl #32 lsr l, l, #32 adc b1, h, l mul a2, c, u6 umulh b2, c, u6 and l, u7, #0xFFFFFFFF lsr h, u7, #32 mul a3, d, l madd l, d, h, l adds a3, a3, l, lsl #32 lsr l, l, #32 adc b3, h, l adds u0, u0, a0 adcs u1, u1, a1 adcs u2, u2, a2 adcs u3, u3, a3 cset u4, cs adds u1, u1, b0 adcs u2, u2, b1 adcs u3, u3, b2 adc u4, u4, b3 // Now we have reduced to 5 digits, 2^256 * h + l = [u4,u3,u2,u1,u0] // Use q = h + 1 as the initial quotient estimate, either right or 1 too big. // Since q <= 2^33 we do 4294968273 * q = (q<<32) + 977 * q to avoid umulh add q, u4, #1 mul a0, d, q lsr a1, q, #32 adds a0, a0, q, lsl #32 adc a1, xzr, a1 adds u0, u0, a0 adcs u1, u1, a1 adcs u2, u2, xzr adcs u3, u3, xzr // Now the effective answer is 2^256 * (CF - 1) + [u3,u2,u1,u0] // So we correct if CF = 0 by subtracting 4294968273, i.e. by // adding p_256k1 to the "full" answer csel c, c, xzr, cc subs u0, u0, c sbcs u1, u1, xzr sbcs u2, u2, xzr sbc u3, u3, xzr // Write back and return stp u0, u1, [x0] stp u2, u3, [x0, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mul_p256k1) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,047
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_triple_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Triple modulo p_256k1, z := (3 * x) mod p_256k1 // Input x[4]; output z[4] // // extern void bignum_triple_p256k1(uint64_t z[static 4], // const uint64_t x[static 4]); // // The input x can be any 4-digit bignum, not necessarily reduced modulo // p_256k1, and the result is always fully reduced, z = (3 * x) mod p_256k1. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p256k1) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p256k1_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_p256k1_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p256k1_alt) .text .balign 4 #define z x0 #define x x1 #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define h x6 // Slightly offset aliases for the d_i for readability. #define a0 x3 #define a1 x4 #define a2 x5 #define a3 x6 // More aliases for the same thing at different stages #define m x6 // Other temporary variables #define c x7 S2N_BN_SYMBOL(bignum_triple_p256k1): S2N_BN_SYMBOL(bignum_triple_p256k1_alt): CFI_START // Load the inputs ldp a0, a1, [x] ldp a2, a3, [x, #16] // First do the multiplication by 3, getting z = [h; d3; ...; d0] adds d0, a0, a0, lsl #1 extr d1, a1, a0, #63 adcs d1, d1, a1 extr d2, a2, a1, #63 adcs d2, d2, a2 extr d3, a3, a2, #63 adcs d3, d3, a3 lsr h, a3, #63 adc h, h, xzr // For this limited range a simple quotient estimate of q = h + 1 works, where // h = floor(z / 2^256). Then -p_256k1 <= z - q * p_256k1 < p_256k1. mov c, #977 orr c, c, #0x100000000 madd m, h, c, c // Initial subtraction of z - q * p_256k1, actually by adding q * 4294968273. adds d0, d0, m adcs d1, d1, xzr adcs d2, d2, xzr adcs d3, d3, xzr // With z = 2^256 * h + l, the underlying result z' is actually // (2^256 * h + l) - q * (2^256 - 4294968273) = (l + q * 4294968273) - 2^256 // so carry-clear <=> z' is negative. Correct by subtracting in that case. csel c, c, xzr, cc subs d0, d0, c sbcs d1, d1, xzr sbcs d2, d2, xzr sbc d3, d3, xzr // Finally store the result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_triple_p256k1) S2N_BN_SIZE_DIRECTIVE(bignum_triple_p256k1_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,238
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_add_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Add modulo p_256k1, z := (x + y) mod p_256k1, assuming x and y reduced // Inputs x[4], y[4]; output z[4] // // extern void bignum_add_p256k1(uint64_t z[static 4], const uint64_t x[static 4], // const uint64_t y[static 4]); // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_add_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_add_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_add_p256k1) .text .balign 4 #define z x0 #define x x1 #define y x2 #define c x3 #define l x4 #define d0 x5 #define d1 x6 #define d2 x7 #define d3 x8 #define dd x9 S2N_BN_SYMBOL(bignum_add_p256k1): CFI_START // First just add the numbers as z = x + y = 2^256 * c + [d3; d2; d1; d0] // Also create dd = d3 AND d2 AND d1 to condense the later comparison ldp d0, d1, [x] ldp l, c, [y] adds d0, d0, l adcs d1, d1, c ldp d2, d3, [x, #16] ldp l, c, [y, #16] adcs d2, d2, l and dd, d1, d2 adcs d3, d3, c and dd, dd, d3 adc c, xzr, xzr // Let l = 4294968273 so that p_256k1 = 2^256 - l mov l, #977 orr l, l, #0x100000000 // Decide whether z >= p_256k1 <=> z + 4294968273 >= 2^256 adds xzr, d0, l adcs xzr, dd, xzr adcs c, c, xzr // Now c <> 0 <=> z >= p_256k1, so mask the constant l accordingly csel l, l, xzr, ne // If z >= p_256k1 do z := z - p_256k1, i.e. add l in 4 digits adds d0, d0, l adcs d1, d1, xzr adcs d2, d2, xzr adc d3, d3, xzr // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_add_p256k1) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,340
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_demont_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert from Montgomery form z := (x / 2^256) mod p_256k1, // assuming x reduced // Input x[4]; output z[4] // // extern void bignum_demont_p256k1(uint64_t z[static 4], // const uint64_t x[static 4]); // // This assumes the input is < p_256k1 for correctness. If this is not the // case, use the variant "bignum_deamont_p256k1" instead. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_demont_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_demont_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_demont_p256k1) .text .balign 4 // Input parameters #define z x0 #define x x1 // Rotating registers for the intermediate windows #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 // Other temporaries #define t x6 #define w x7 #define c x8 S2N_BN_SYMBOL(bignum_demont_p256k1): CFI_START // Load input and set up constants c = 4294968273 so p_256k1 = 2^256 - c, // and w the negated multiplicative inverse p_256k1 * w == -1 (mod 2^64). ldp d0, d1, [x] movz w, #0x3531 movk w, #0xd225, lsl #16 ldp d2, d3, [x, #16] movk w, #0x091d, lsl #32 movk w, #0xd838, lsl #48 mov c, #977 orr c, c, #0x100000000 // Four stages of Montgomery reduction, rotating the register window mul d0, w, d0 umulh t, d0, c subs d1, d1, t mul d1, w, d1 umulh t, d1, c sbcs d2, d2, t mul d2, w, d2 umulh t, d2, c sbcs d3, d3, t mul d3, w, d3 umulh t, d3, c sbcs d0, d0, t sbcs d1, d1, xzr sbcs d2, d2, xzr sbc d3, d3, xzr // Write back result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_demont_p256k1) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
6,129
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_sqr_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square modulo p_256k1, z := (x^2) mod p_256k1 // Input x[4]; output z[4] // // extern void bignum_sqr_p256k1(uint64_t z[static 4], const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sqr_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_p256k1) .text .balign 4 #define z x0 #define x x1 // Variables #define u0 x2 #define u1 x3 #define u2 x4 #define u3 x5 #define u4 x6 #define u5 x7 #define u6 x8 #define u7 x9 #define a0 x10 #define a1 x11 #define a2 x12 #define b0 x13 #define b1 x14 #define b3 x15 #define c x16 #define d x17 // Some additional aliases #define l u4 #define h u5 #define b2 u6 #define q u4 #define a3 u7 S2N_BN_SYMBOL(bignum_sqr_p256k1): CFI_START // First just a near-clone of bignum_sqr_4_8 to get the square, using // different registers to collect full product without writeback. ldp x10, x11, [x1] ldp x12, x13, [x1, #16] umull x2, w10, w10 lsr x14, x10, #32 umull x3, w14, w14 umull x14, w10, w14 adds x2, x2, x14, lsl #33 lsr x14, x14, #31 adc x3, x3, x14 umull x4, w11, w11 lsr x14, x11, #32 umull x5, w14, w14 umull x14, w11, w14 mul x15, x10, x11 umulh x16, x10, x11 adds x4, x4, x14, lsl #33 lsr x14, x14, #31 adc x5, x5, x14 adds x15, x15, x15 adcs x16, x16, x16 adc x5, x5, xzr adds x3, x3, x15 adcs x4, x4, x16 adc x5, x5, xzr umull x6, w12, w12 lsr x14, x12, #32 umull x7, w14, w14 umull x14, w12, w14 adds x6, x6, x14, lsl #33 lsr x14, x14, #31 adc x7, x7, x14 umull x8, w13, w13 lsr x14, x13, #32 umull x9, w14, w14 umull x14, w13, w14 mul x15, x12, x13 umulh x16, x12, x13 adds x8, x8, x14, lsl #33 lsr x14, x14, #31 adc x9, x9, x14 adds x15, x15, x15 adcs x16, x16, x16 adc x9, x9, xzr adds x7, x7, x15 adcs x8, x8, x16 adc x9, x9, xzr subs x10, x10, x12 sbcs x11, x11, x13 csetm x16, cc eor x10, x10, x16 subs x10, x10, x16 eor x11, x11, x16 sbc x11, x11, x16 adds x6, x6, x4 adcs x7, x7, x5 adcs x8, x8, xzr adc x9, x9, xzr umull x12, w10, w10 lsr x5, x10, #32 umull x13, w5, w5 umull x5, w10, w5 adds x12, x12, x5, lsl #33 lsr x5, x5, #31 adc x13, x13, x5 umull x15, w11, w11 lsr x5, x11, #32 umull x14, w5, w5 umull x5, w11, w5 mul x4, x10, x11 umulh x16, x10, x11 adds x15, x15, x5, lsl #33 lsr x5, x5, #31 adc x14, x14, x5 adds x4, x4, x4 adcs x16, x16, x16 adc x14, x14, xzr adds x13, x13, x4 adcs x15, x15, x16 adc x14, x14, xzr adds x4, x2, x6 adcs x5, x3, x7 adcs x6, x6, x8 adcs x7, x7, x9 csetm x16, cc subs x4, x4, x12 sbcs x5, x5, x13 sbcs x6, x6, x15 sbcs x7, x7, x14 adcs x8, x8, x16 adc x9, x9, x16 // Now we have the full 8-digit product 2^256 * h + l where // h = [u7,u6,u5,u4] and l = [u3,u2,u1,u0] // and this is == 4294968273 * h + l (mod p_256k1) // Some of the word products are done straightforwardly using mul + umulh // while others are broken down in a more complicated way as // (2^32 + 977) * (2^32 * h + l) = 2^64 * h + 2^32 * (d * h + l) + d * l mov d, #977 orr c, d, #0x100000000 mul a0, c, u4 umulh b0, c, u4 and l, u5, #0xFFFFFFFF lsr h, u5, #32 mul a1, d, l madd l, d, h, l adds a1, a1, l, lsl #32 lsr l, l, #32 adc b1, h, l mul a2, c, u6 umulh b2, c, u6 and l, u7, #0xFFFFFFFF lsr h, u7, #32 mul a3, d, l madd l, d, h, l adds a3, a3, l, lsl #32 lsr l, l, #32 adc b3, h, l adds u0, u0, a0 adcs u1, u1, a1 adcs u2, u2, a2 adcs u3, u3, a3 cset u4, cs adds u1, u1, b0 adcs u2, u2, b1 adcs u3, u3, b2 adc u4, u4, b3 // Now we have reduced to 5 digits, 2^256 * h + l = [u4,u3,u2,u1,u0] // Use q = h + 1 as the initial quotient estimate, either right or 1 too big. // Since q <= 2^33 we do 4294968273 * q = (q<<32) + 977 * q to avoid umulh add q, u4, #1 mul a0, d, q lsr a1, q, #32 adds a0, a0, q, lsl #32 adc a1, xzr, a1 adds u0, u0, a0 adcs u1, u1, a1 adcs u2, u2, xzr adcs u3, u3, xzr // Now the effective answer is 2^256 * (CF - 1) + [u3,u2,u1,u0] // So we correct if CF = 0 by subtracting 4294968273, i.e. by // adding p_256k1 to the "full" answer csel c, c, xzr, cc subs u0, u0, c sbcs u1, u1, xzr sbcs u2, u2, xzr sbc u3, u3, xzr // Write back stp u0, u1, [x0] stp u2, u3, [x0, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sqr_p256k1) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,915
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_mod_p256k1_4.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo field characteristic, z := x mod p_256k1 // Input x[4]; output z[4] // // extern void bignum_mod_p256k1_4(uint64_t z[static 4], // const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p256k1_4) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_p256k1_4) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p256k1_4) .text .balign 4 #define z x0 #define x x1 #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define d x6 #define c x7 S2N_BN_SYMBOL(bignum_mod_p256k1_4): CFI_START // Load the inputs as [d3;d2;d1;d0] and let d be an AND of [d3;d2;d1] to // condense the comparison below. ldp d0, d1, [x] ldp d2, d3, [x, #16] and d, d1, d2 and d, d, d3 // Compare x >= p_256k1 = 2^256 - 4294968273 using condensed carry: // we get a carry from the lowest digit and all other digits are 1. // We end up with c and d as adjusted digits for x - p_256k1 if so. mov c, #977 orr c, c, #0x100000000 adds c, c, d0 adcs d, d, xzr // If indeed x >= p_256k1 then x := x - p_256k1, using c and d csel d0, d0, c, cc csel d1, d1, d, cc csel d2, d2, d, cc csel d3, d3, d, cc // Store the end result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_p256k1_4) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,795
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_neg_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Negate modulo p_256k1, z := (-x) mod p_256k1, assuming x reduced // Input x[4]; output z[4] // // extern void bignum_neg_p256k1(uint64_t z[static 4], const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_neg_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_neg_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_neg_p256k1) .text .balign 4 #define z x0 #define x x1 #define p x2 #define d0 x3 #define d1 x4 #define d2 x5 #define d3 x6 #define c x7 S2N_BN_SYMBOL(bignum_neg_p256k1): CFI_START // Load the 4 digits of x and let c be an OR of all the digits ldp d0, d1, [x] orr c, d0, d1 ldp d2, d3, [x, #16] orr c, c, d2 orr c, c, d3 // Turn q into a strict bitmask, and c a masked constant -4294968273, // computing it in effect as ~4294968272 = ~(2^32 + 976) cmp c, xzr csetm p, ne mov c, #976 orr c, c, #0x100000000 bic c, p, c // Now just do [2^256 - 4294968273] - x where the constant is masked subs d0, c, d0 sbcs d1, p, d1 sbcs d2, p, d2 sbc d3, p, d3 // Write back result and return stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_neg_p256k1) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
22,823
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/secp256k1_jadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point addition on SECG curve secp256k1 in Jacobian coordinates // // extern void secp256k1_jadd(uint64_t p3[static 12], const uint64_t p1[static 12], // const uint64_t p2[static 12]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples. // A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3). // It is assumed that all coordinates of the input points p1 and p2 are // fully reduced mod p_256k1, that both z coordinates are nonzero and // that neither p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents // the same affine point as". // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(secp256k1_jadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(secp256k1_jadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(secp256k1_jadd) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Stable homes for input arguments during main code sequence #define input_z x19 #define input_x x20 #define input_y x21 // The magic constant 2^256 - p_256k1 #define pconst x17 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define z_2 input_y, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define z1sq sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define x1a sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define z2sq sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define y1a sp, #(NUMSIZE*6) #define NSPACE NUMSIZE*7 // Corresponds exactly to bignum_mul_p256k1 except for registers and // re-use of the pconst register for the constant 4294968273 #define mul_p256k1(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P2] __LF \ mul x7, x3, x5 __LF \ umulh x8, x3, x5 __LF \ mul x9, x4, x6 __LF \ umulh x10, x4, x6 __LF \ subs x4, x4, x3 __LF \ cneg x4, x4, lo __LF \ csetm x16, lo __LF \ adds x9, x9, x8 __LF \ adc x10, x10, xzr __LF \ subs x3, x5, x6 __LF \ cneg x3, x3, lo __LF \ cinv x16, x16, lo __LF \ mul x15, x4, x3 __LF \ umulh x3, x4, x3 __LF \ adds x8, x7, x9 __LF \ adcs x9, x9, x10 __LF \ adc x10, x10, xzr __LF \ cmn x16, #1 __LF \ eor x15, x15, x16 __LF \ adcs x8, x15, x8 __LF \ eor x3, x3, x16 __LF \ adcs x9, x3, x9 __LF \ adc x10, x10, x16 __LF \ ldp x3, x4, [P1+16] __LF \ ldp x5, x6, [P2+16] __LF \ mul x11, x3, x5 __LF \ umulh x12, x3, x5 __LF \ mul x13, x4, x6 __LF \ umulh x14, x4, x6 __LF \ subs x4, x4, x3 __LF \ cneg x4, x4, lo __LF \ csetm x16, lo __LF \ adds x13, x13, x12 __LF \ adc x14, x14, xzr __LF \ subs x3, x5, x6 __LF \ cneg x3, x3, lo __LF \ cinv x16, x16, lo __LF \ mul x15, x4, x3 __LF \ umulh x3, x4, x3 __LF \ adds x12, x11, x13 __LF \ adcs x13, x13, x14 __LF \ adc x14, x14, xzr __LF \ cmn x16, #1 __LF \ eor x15, x15, x16 __LF \ adcs x12, x15, x12 __LF \ eor x3, x3, x16 __LF \ adcs x13, x3, x13 __LF \ adc x14, x14, x16 __LF \ ldp x3, x4, [P1+16] __LF \ ldp x15, x16, [P1] __LF \ subs x3, x3, x15 __LF \ sbcs x4, x4, x16 __LF \ csetm x16, lo __LF \ ldp x15, x0, [P2] __LF \ subs x5, x15, x5 __LF \ sbcs x6, x0, x6 __LF \ csetm x0, lo __LF \ eor x3, x3, x16 __LF \ subs x3, x3, x16 __LF \ eor x4, x4, x16 __LF \ sbc x4, x4, x16 __LF \ eor x5, x5, x0 __LF \ subs x5, x5, x0 __LF \ eor x6, x6, x0 __LF \ sbc x6, x6, x0 __LF \ eor x16, x0, x16 __LF \ adds x11, x11, x9 __LF \ adcs x12, x12, x10 __LF \ adcs x13, x13, xzr __LF \ adc x14, x14, xzr __LF \ mul x2, x3, x5 __LF \ umulh x0, x3, x5 __LF \ mul x15, x4, x6 __LF \ umulh x1, x4, x6 __LF \ subs x4, x4, x3 __LF \ cneg x4, x4, lo __LF \ csetm x9, lo __LF \ adds x15, x15, x0 __LF \ adc x1, x1, xzr __LF \ subs x6, x5, x6 __LF \ cneg x6, x6, lo __LF \ cinv x9, x9, lo __LF \ mul x5, x4, x6 __LF \ umulh x6, x4, x6 __LF \ adds x0, x2, x15 __LF \ adcs x15, x15, x1 __LF \ adc x1, x1, xzr __LF \ cmn x9, #1 __LF \ eor x5, x5, x9 __LF \ adcs x0, x5, x0 __LF \ eor x6, x6, x9 __LF \ adcs x15, x6, x15 __LF \ adc x1, x1, x9 __LF \ adds x9, x11, x7 __LF \ adcs x10, x12, x8 __LF \ adcs x11, x13, x11 __LF \ adcs x12, x14, x12 __LF \ adcs x13, x13, xzr __LF \ adc x14, x14, xzr __LF \ cmn x16, #1 __LF \ eor x2, x2, x16 __LF \ adcs x9, x2, x9 __LF \ eor x0, x0, x16 __LF \ adcs x10, x0, x10 __LF \ eor x15, x15, x16 __LF \ adcs x11, x15, x11 __LF \ eor x1, x1, x16 __LF \ adcs x12, x1, x12 __LF \ adcs x13, x13, x16 __LF \ adc x14, x14, x16 __LF \ mov x16, #977 __LF \ mul x3, pconst, x11 __LF \ umulh x5, pconst, x11 __LF \ and x15, x12, #0xffffffff __LF \ lsr x2, x12, #32 __LF \ mul x4, x16, x15 __LF \ madd x15, x16, x2, x15 __LF \ adds x4, x4, x15, lsl #32 __LF \ lsr x15, x15, #32 __LF \ adc x6, x2, x15 __LF \ mul x11, pconst, x13 __LF \ umulh x13, pconst, x13 __LF \ and x15, x14, #0xffffffff __LF \ lsr x2, x14, #32 __LF \ mul x12, x16, x15 __LF \ madd x15, x16, x2, x15 __LF \ adds x12, x12, x15, lsl #32 __LF \ lsr x15, x15, #32 __LF \ adc x14, x2, x15 __LF \ adds x7, x7, x3 __LF \ adcs x8, x8, x4 __LF \ adcs x9, x9, x11 __LF \ adcs x10, x10, x12 __LF \ cset x11, hs __LF \ adds x8, x8, x5 __LF \ adcs x9, x9, x6 __LF \ adcs x10, x10, x13 __LF \ adc x11, x11, x14 __LF \ add x0, x11, #1 __LF \ mul x3, x16, x0 __LF \ lsr x4, x0, #32 __LF \ adds x3, x3, x0, lsl #32 __LF \ adc x4, xzr, x4 __LF \ adds x7, x7, x3 __LF \ adcs x8, x8, x4 __LF \ adcs x9, x9, xzr __LF \ adcs x10, x10, xzr __LF \ csel x1, pconst, xzr, lo __LF \ subs x7, x7, x1 __LF \ sbcs x8, x8, xzr __LF \ sbcs x9, x9, xzr __LF \ sbc x10, x10, xzr __LF \ stp x7, x8, [P0] __LF \ stp x9, x10, [P0+16] // Corresponds exactly to bignum_sqr_p256k1 except for // re-use of the pconst register for the constant 4294968273 #define sqr_p256k1(P0,P1) \ ldp x10, x11, [P1] __LF \ ldp x12, x13, [P1+16] __LF \ umull x2, w10, w10 __LF \ lsr x14, x10, #32 __LF \ umull x3, w14, w14 __LF \ umull x14, w10, w14 __LF \ adds x2, x2, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x3, x3, x14 __LF \ umull x4, w11, w11 __LF \ lsr x14, x11, #32 __LF \ umull x5, w14, w14 __LF \ umull x14, w11, w14 __LF \ mul x15, x10, x11 __LF \ umulh x16, x10, x11 __LF \ adds x4, x4, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x5, x5, x14 __LF \ adds x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adc x5, x5, xzr __LF \ adds x3, x3, x15 __LF \ adcs x4, x4, x16 __LF \ adc x5, x5, xzr __LF \ umull x6, w12, w12 __LF \ lsr x14, x12, #32 __LF \ umull x7, w14, w14 __LF \ umull x14, w12, w14 __LF \ adds x6, x6, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x7, x7, x14 __LF \ umull x8, w13, w13 __LF \ lsr x14, x13, #32 __LF \ umull x9, w14, w14 __LF \ umull x14, w13, w14 __LF \ mul x15, x12, x13 __LF \ umulh x16, x12, x13 __LF \ adds x8, x8, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x9, x9, x14 __LF \ adds x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adc x9, x9, xzr __LF \ adds x7, x7, x15 __LF \ adcs x8, x8, x16 __LF \ adc x9, x9, xzr __LF \ subs x10, x10, x12 __LF \ sbcs x11, x11, x13 __LF \ csetm x16, lo __LF \ eor x10, x10, x16 __LF \ subs x10, x10, x16 __LF \ eor x11, x11, x16 __LF \ sbc x11, x11, x16 __LF \ adds x6, x6, x4 __LF \ adcs x7, x7, x5 __LF \ adcs x8, x8, xzr __LF \ adc x9, x9, xzr __LF \ umull x12, w10, w10 __LF \ lsr x5, x10, #32 __LF \ umull x13, w5, w5 __LF \ umull x5, w10, w5 __LF \ adds x12, x12, x5, lsl #33 __LF \ lsr x5, x5, #31 __LF \ adc x13, x13, x5 __LF \ umull x15, w11, w11 __LF \ lsr x5, x11, #32 __LF \ umull x14, w5, w5 __LF \ umull x5, w11, w5 __LF \ mul x4, x10, x11 __LF \ umulh x16, x10, x11 __LF \ adds x15, x15, x5, lsl #33 __LF \ lsr x5, x5, #31 __LF \ adc x14, x14, x5 __LF \ adds x4, x4, x4 __LF \ adcs x16, x16, x16 __LF \ adc x14, x14, xzr __LF \ adds x13, x13, x4 __LF \ adcs x15, x15, x16 __LF \ adc x14, x14, xzr __LF \ adds x4, x2, x6 __LF \ adcs x5, x3, x7 __LF \ adcs x6, x6, x8 __LF \ adcs x7, x7, x9 __LF \ csetm x16, lo __LF \ subs x4, x4, x12 __LF \ sbcs x5, x5, x13 __LF \ sbcs x6, x6, x15 __LF \ sbcs x7, x7, x14 __LF \ adcs x8, x8, x16 __LF \ adc x9, x9, x16 __LF \ mov x16, #977 __LF \ mul x10, pconst, x6 __LF \ umulh x13, pconst, x6 __LF \ and x6, x7, #0xffffffff __LF \ lsr x7, x7, #32 __LF \ mul x11, x16, x6 __LF \ madd x6, x16, x7, x6 __LF \ adds x11, x11, x6, lsl #32 __LF \ lsr x6, x6, #32 __LF \ adc x14, x7, x6 __LF \ mul x12, pconst, x8 __LF \ umulh x8, pconst, x8 __LF \ and x6, x9, #0xffffffff __LF \ lsr x7, x9, #32 __LF \ mul x9, x16, x6 __LF \ madd x6, x16, x7, x6 __LF \ adds x9, x9, x6, lsl #32 __LF \ lsr x6, x6, #32 __LF \ adc x15, x7, x6 __LF \ adds x2, x2, x10 __LF \ adcs x3, x3, x11 __LF \ adcs x4, x4, x12 __LF \ adcs x5, x5, x9 __LF \ cset x6, hs __LF \ adds x3, x3, x13 __LF \ adcs x4, x4, x14 __LF \ adcs x5, x5, x8 __LF \ adc x6, x6, x15 __LF \ add x6, x6, #1 __LF \ mul x10, x16, x6 __LF \ lsr x11, x6, #32 __LF \ adds x10, x10, x6, lsl #32 __LF \ adc x11, xzr, x11 __LF \ adds x2, x2, x10 __LF \ adcs x3, x3, x11 __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ csel x16, pconst, xzr, lo __LF \ subs x2, x2, x16 __LF \ sbcs x3, x3, xzr __LF \ sbcs x4, x4, xzr __LF \ sbc x5, x5, xzr __LF \ stp x2, x3, [P0] __LF \ stp x4, x5, [P0+16] // Corresponds exactly to bignum_sub_p256k1 #define sub_p256k1(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ mov x4, #0x3d1 __LF \ orr x3, x4, #0x100000000 __LF \ csel x3, x3, xzr, cc __LF \ subs x5, x5, x3 __LF \ sbcs x6, x6, xzr __LF \ sbcs x7, x7, xzr __LF \ sbc x8, x8, xzr __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] S2N_BN_SYMBOL(secp256k1_jadd): CFI_START // Save registers and make room on stack for temporary variables CFI_DEC_SP(NSPACE+32) CFI_STACKSAVE2(x19,x20,NSPACE) CFI_STACKSAVE2(x21,x22,NSPACE+16) // Move the input arguments to stable place mov input_z, x0 mov input_x, x1 mov input_y, x2 // Set up pconst = 4294968273, so p_256k1 = 2^256 - pconst mov pconst, #977 orr pconst, pconst, #0x100000000 // Main code, just a sequence of basic field operations sqr_p256k1(z1sq,z_1) sqr_p256k1(z2sq,z_2) mul_p256k1(y1a,z_2,y_1) mul_p256k1(y2a,z_1,y_2) mul_p256k1(x2a,z1sq,x_2) mul_p256k1(x1a,z2sq,x_1) mul_p256k1(y2a,z1sq,y2a) mul_p256k1(y1a,z2sq,y1a) sub_p256k1(xd,x2a,x1a) sub_p256k1(yd,y2a,y1a) sqr_p256k1(zz,xd) sqr_p256k1(ww,yd) mul_p256k1(zzx1,zz,x1a) mul_p256k1(zzx2,zz,x2a) sub_p256k1(resx,ww,zzx1) sub_p256k1(t1,zzx2,zzx1) mul_p256k1(xd,xd,z_1) sub_p256k1(resx,resx,zzx2) sub_p256k1(t2,zzx1,resx) mul_p256k1(t1,t1,y1a) mul_p256k1(resz,xd,z_2) mul_p256k1(t2,yd,t2) sub_p256k1(resy,t2,t1) // Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0 // The condition codes get set by a comparison (P2 != 0) - (P1 != 0) // So "HI" <=> CF /\ ~ZF <=> P1 = 0 /\ ~(P2 = 0) // and "LO" <=> ~CF <=> ~(P1 = 0) /\ P2 = 0 ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] orr x12, x0, x1 orr x13, x2, x3 orr x12, x12, x13 cmp x12, xzr cset x12, ne ldp x4, x5, [z_2] ldp x6, x7, [z_2+16] orr x13, x4, x5 orr x14, x6, x7 orr x13, x13, x14 cmp x13, xzr cset x13, ne cmp x13, x12 // Multiplex the outputs accordingly, re-using the z's in registers ldp x8, x9, [resz] csel x8, x0, x8, lo csel x9, x1, x9, lo csel x8, x4, x8, hi csel x9, x5, x9, hi ldp x10, x11, [resz+16] csel x10, x2, x10, lo csel x11, x3, x11, lo csel x10, x6, x10, hi csel x11, x7, x11, hi ldp x12, x13, [x_1] ldp x0, x1, [resx] csel x0, x12, x0, lo csel x1, x13, x1, lo ldp x12, x13, [x_2] csel x0, x12, x0, hi csel x1, x13, x1, hi ldp x12, x13, [x_1+16] ldp x2, x3, [resx+16] csel x2, x12, x2, lo csel x3, x13, x3, lo ldp x12, x13, [x_2+16] csel x2, x12, x2, hi csel x3, x13, x3, hi ldp x12, x13, [y_1] ldp x4, x5, [resy] csel x4, x12, x4, lo csel x5, x13, x5, lo ldp x12, x13, [y_2] csel x4, x12, x4, hi csel x5, x13, x5, hi ldp x12, x13, [y_1+16] ldp x6, x7, [resy+16] csel x6, x12, x6, lo csel x7, x13, x7, lo ldp x12, x13, [y_2+16] csel x6, x12, x6, hi csel x7, x13, x7, hi // Finally store back the multiplexed values stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [y_3] stp x6, x7, [y_3+16] stp x8, x9, [z_3] stp x10, x11, [z_3+16] // Restore stack and return CFI_STACKLOAD2(x19,x20,NSPACE) CFI_STACKLOAD2(x21,x22,NSPACE+16) CFI_INC_SP((NSPACE+32)) CFI_RET S2N_BN_SIZE_DIRECTIVE(secp256k1_jadd) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
2,169
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_optneg_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Optionally negate modulo p_256k1, z := (-x) mod p_256k1 (if p nonzero) or // z := x (if p zero), assuming x reduced // Inputs p, x[4]; output z[4] // // extern void bignum_optneg_p256k1(uint64_t z[static 4], uint64_t p, // const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = p, X2 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_optneg_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_optneg_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_optneg_p256k1) .text .balign 4 #define z x0 #define p x1 #define x x2 #define d0 x3 #define d1 x4 #define d2 x5 #define d3 x6 #define c x7 S2N_BN_SYMBOL(bignum_optneg_p256k1): CFI_START // Load the 4 digits of x and let c be an OR of all the digits ldp d0, d1, [x] orr c, d0, d1 ldp d2, d3, [x, #16] orr c, c, d2 orr c, c, d3 // Turn p into a strict bitmask. Force it to zero if the input is zero, // to avoid giving -0 = p_256k1, which is not reduced though correct modulo. cmp p, xzr csetm p, ne cmp c, xzr csel p, xzr, p, eq // We want z := if p then (2^256 - 4294968273) - x else x // which is: [if p then ~x else x] - [if p then 4294968272 else 0] mov c, #976 orr c, c, #0x100000000 and c, c, p eor d0, d0, p subs d0, d0, c eor d1, d1, p sbcs d1, d1, xzr eor d2, d2, p sbcs d2, d2, xzr eor d3, d3, p sbc d3, d3, xzr // Write back result and return stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_optneg_p256k1) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,164
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_double_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Double modulo p_256k1, z := (2 * x) mod p_256k1, assuming x reduced // Input x[4]; output z[4] // // extern void bignum_double_p256k1(uint64_t z[static 4], // const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_double_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_double_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_double_p256k1) .text .balign 4 #define z x0 #define x x1 #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define c x6 #define dd x7 #define l x8 S2N_BN_SYMBOL(bignum_double_p256k1): CFI_START // Load the inputs and double top-down as z = 2^256 * c + [d3;d2;d1;d0] // While doing this, create an AND dd of [d3;d2;d1] to condense comparison ldp d2, d3, [x, #16] lsr c, d3, #63 extr d3, d3, d2, #63 ldp d0, d1, [x] extr d2, d2, d1, #63 and dd, d2, d3 extr d1, d1, d0, #63 and dd, dd, d1 lsl d0, d0, #1 // Let l = 4294968273 so that p_256k1 = 2^256 - l mov l, #977 orr l, l, #0x100000000 // Decide whether z >= p_256k1 <=> z + 4294968273 >= 2^256 adds xzr, d0, l adcs xzr, dd, xzr adcs c, c, xzr // Now c <> 0 <=> z >= p_256k1, so mask the constant l accordingly csel l, l, xzr, ne // If z >= p_256k1 do z := z - p_256k1, i.e. add l in 4 digits adds d0, d0, l adcs d1, d1, xzr adcs d2, d2, xzr adc d3, d3, xzr // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_double_p256k1) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
16,420
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/secp256k1_jadd_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point addition on SECG curve secp256k1 in Jacobian coordinates // // extern void secp256k1_jadd_alt(uint64_t p3[static 12], // const uint64_t p1[static 12], // const uint64_t p2[static 12]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples. // A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3). // It is assumed that all coordinates of the input points p1 and p2 are // fully reduced mod p_256k1, that both z coordinates are nonzero and // that neither p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents // the same affine point as". // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(secp256k1_jadd_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(secp256k1_jadd_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(secp256k1_jadd_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Stable homes for input arguments during main code sequence #define input_z x15 #define input_x x16 #define input_y x17 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define z_2 input_y, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define z1sq sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define x1a sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define z2sq sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define y1a sp, #(NUMSIZE*6) #define NSPACE NUMSIZE*7 // Corresponds exactly to bignum_mul_p256k1_alt #define mul_p256k1(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x7, x8, [P2] __LF \ mul x12, x3, x7 __LF \ umulh x13, x3, x7 __LF \ mul x11, x3, x8 __LF \ umulh x14, x3, x8 __LF \ adds x13, x13, x11 __LF \ ldp x9, x10, [P2+16] __LF \ mul x11, x3, x9 __LF \ umulh x0, x3, x9 __LF \ adcs x14, x14, x11 __LF \ mul x11, x3, x10 __LF \ umulh x1, x3, x10 __LF \ adcs x0, x0, x11 __LF \ adc x1, x1, xzr __LF \ ldp x5, x6, [P1+16] __LF \ mul x11, x4, x7 __LF \ adds x13, x13, x11 __LF \ mul x11, x4, x8 __LF \ adcs x14, x14, x11 __LF \ mul x11, x4, x9 __LF \ adcs x0, x0, x11 __LF \ mul x11, x4, x10 __LF \ adcs x1, x1, x11 __LF \ umulh x3, x4, x10 __LF \ adc x3, x3, xzr __LF \ umulh x11, x4, x7 __LF \ adds x14, x14, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x0, x0, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x1, x1, x11 __LF \ adc x3, x3, xzr __LF \ mul x11, x5, x7 __LF \ adds x14, x14, x11 __LF \ mul x11, x5, x8 __LF \ adcs x0, x0, x11 __LF \ mul x11, x5, x9 __LF \ adcs x1, x1, x11 __LF \ mul x11, x5, x10 __LF \ adcs x3, x3, x11 __LF \ umulh x4, x5, x10 __LF \ adc x4, x4, xzr __LF \ umulh x11, x5, x7 __LF \ adds x0, x0, x11 __LF \ umulh x11, x5, x8 __LF \ adcs x1, x1, x11 __LF \ umulh x11, x5, x9 __LF \ adcs x3, x3, x11 __LF \ adc x4, x4, xzr __LF \ mul x11, x6, x7 __LF \ adds x0, x0, x11 __LF \ mul x11, x6, x8 __LF \ adcs x1, x1, x11 __LF \ mul x11, x6, x9 __LF \ adcs x3, x3, x11 __LF \ mul x11, x6, x10 __LF \ adcs x4, x4, x11 __LF \ umulh x5, x6, x10 __LF \ adc x5, x5, xzr __LF \ umulh x11, x6, x7 __LF \ adds x1, x1, x11 __LF \ umulh x11, x6, x8 __LF \ adcs x3, x3, x11 __LF \ umulh x11, x6, x9 __LF \ adcs x4, x4, x11 __LF \ adc x5, x5, xzr __LF \ mov x7, #0x3d1 __LF \ orr x7, x7, #0x100000000 __LF \ mul x11, x7, x1 __LF \ umulh x9, x7, x1 __LF \ adds x12, x12, x11 __LF \ mul x11, x7, x3 __LF \ umulh x3, x7, x3 __LF \ adcs x13, x13, x11 __LF \ mul x11, x7, x4 __LF \ umulh x4, x7, x4 __LF \ adcs x14, x14, x11 __LF \ mul x11, x7, x5 __LF \ umulh x5, x7, x5 __LF \ adcs x0, x0, x11 __LF \ cset x1, cs __LF \ adds x13, x13, x9 __LF \ adcs x14, x14, x3 __LF \ adcs x0, x0, x4 __LF \ adc x1, x1, x5 __LF \ add x8, x1, #0x1 __LF \ mul x11, x7, x8 __LF \ umulh x9, x7, x8 __LF \ adds x12, x12, x11 __LF \ adcs x13, x13, x9 __LF \ adcs x14, x14, xzr __LF \ adcs x0, x0, xzr __LF \ csel x7, x7, xzr, cc __LF \ subs x12, x12, x7 __LF \ sbcs x13, x13, xzr __LF \ sbcs x14, x14, xzr __LF \ sbc x0, x0, xzr __LF \ stp x12, x13, [P0] __LF \ stp x14, x0, [P0+16] // Corresponds exactly to bignum_sqr_p256k1_alt #define sqr_p256k1(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x9, x2, x3 __LF \ umulh x10, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x11, x2, x5 __LF \ umulh x12, x2, x5 __LF \ mul x7, x2, x4 __LF \ umulh x6, x2, x4 __LF \ adds x10, x10, x7 __LF \ adcs x11, x11, x6 __LF \ mul x7, x3, x4 __LF \ umulh x6, x3, x4 __LF \ adc x6, x6, xzr __LF \ adds x11, x11, x7 __LF \ mul x13, x4, x5 __LF \ umulh x14, x4, x5 __LF \ adcs x12, x12, x6 __LF \ mul x7, x3, x5 __LF \ umulh x6, x3, x5 __LF \ adc x6, x6, xzr __LF \ adds x12, x12, x7 __LF \ adcs x13, x13, x6 __LF \ adc x14, x14, xzr __LF \ adds x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ cset x6, cs __LF \ umulh x7, x2, x2 __LF \ mul x8, x2, x2 __LF \ adds x9, x9, x7 __LF \ mul x7, x3, x3 __LF \ adcs x10, x10, x7 __LF \ umulh x7, x3, x3 __LF \ adcs x11, x11, x7 __LF \ mul x7, x4, x4 __LF \ adcs x12, x12, x7 __LF \ umulh x7, x4, x4 __LF \ adcs x13, x13, x7 __LF \ mul x7, x5, x5 __LF \ adcs x14, x14, x7 __LF \ umulh x7, x5, x5 __LF \ adc x6, x6, x7 __LF \ mov x3, #0x3d1 __LF \ orr x3, x3, #0x100000000 __LF \ mul x7, x3, x12 __LF \ umulh x4, x3, x12 __LF \ adds x8, x8, x7 __LF \ mul x7, x3, x13 __LF \ umulh x13, x3, x13 __LF \ adcs x9, x9, x7 __LF \ mul x7, x3, x14 __LF \ umulh x14, x3, x14 __LF \ adcs x10, x10, x7 __LF \ mul x7, x3, x6 __LF \ umulh x6, x3, x6 __LF \ adcs x11, x11, x7 __LF \ cset x12, cs __LF \ adds x9, x9, x4 __LF \ adcs x10, x10, x13 __LF \ adcs x11, x11, x14 __LF \ adc x12, x12, x6 __LF \ add x2, x12, #0x1 __LF \ mul x7, x3, x2 __LF \ umulh x6, x3, x2 __LF \ adds x8, x8, x7 __LF \ adcs x9, x9, x6 __LF \ adcs x10, x10, xzr __LF \ adcs x11, x11, xzr __LF \ csel x3, x3, xzr, cc __LF \ subs x8, x8, x3 __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbc x11, x11, xzr __LF \ stp x8, x9, [P0] __LF \ stp x10, x11, [P0+16] // Corresponds exactly to bignum_sub_p256k1 #define sub_p256k1(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ mov x4, #0x3d1 __LF \ orr x3, x4, #0x100000000 __LF \ csel x3, x3, xzr, cc __LF \ subs x5, x5, x3 __LF \ sbcs x6, x6, xzr __LF \ sbcs x7, x7, xzr __LF \ sbc x8, x8, xzr __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] S2N_BN_SYMBOL(secp256k1_jadd_alt): CFI_START // Make room on stack for temporary variables // Move the input arguments to stable places CFI_DEC_SP(NSPACE) mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations sqr_p256k1(z1sq,z_1) sqr_p256k1(z2sq,z_2) mul_p256k1(y1a,z_2,y_1) mul_p256k1(y2a,z_1,y_2) mul_p256k1(x2a,z1sq,x_2) mul_p256k1(x1a,z2sq,x_1) mul_p256k1(y2a,z1sq,y2a) mul_p256k1(y1a,z2sq,y1a) sub_p256k1(xd,x2a,x1a) sub_p256k1(yd,y2a,y1a) sqr_p256k1(zz,xd) sqr_p256k1(ww,yd) mul_p256k1(zzx1,zz,x1a) mul_p256k1(zzx2,zz,x2a) sub_p256k1(resx,ww,zzx1) sub_p256k1(t1,zzx2,zzx1) mul_p256k1(xd,xd,z_1) sub_p256k1(resx,resx,zzx2) sub_p256k1(t2,zzx1,resx) mul_p256k1(t1,t1,y1a) mul_p256k1(resz,xd,z_2) mul_p256k1(t2,yd,t2) sub_p256k1(resy,t2,t1) // Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0 // The condition codes get set by a comparison (P2 != 0) - (P1 != 0) // So "HI" <=> CF /\ ~ZF <=> P1 = 0 /\ ~(P2 = 0) // and "LO" <=> ~CF <=> ~(P1 = 0) /\ P2 = 0 ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] orr x12, x0, x1 orr x13, x2, x3 orr x12, x12, x13 cmp x12, xzr cset x12, ne ldp x4, x5, [z_2] ldp x6, x7, [z_2+16] orr x13, x4, x5 orr x14, x6, x7 orr x13, x13, x14 cmp x13, xzr cset x13, ne cmp x13, x12 // Multiplex the outputs accordingly, re-using the z's in registers ldp x8, x9, [resz] csel x8, x0, x8, lo csel x9, x1, x9, lo csel x8, x4, x8, hi csel x9, x5, x9, hi ldp x10, x11, [resz+16] csel x10, x2, x10, lo csel x11, x3, x11, lo csel x10, x6, x10, hi csel x11, x7, x11, hi ldp x12, x13, [x_1] ldp x0, x1, [resx] csel x0, x12, x0, lo csel x1, x13, x1, lo ldp x12, x13, [x_2] csel x0, x12, x0, hi csel x1, x13, x1, hi ldp x12, x13, [x_1+16] ldp x2, x3, [resx+16] csel x2, x12, x2, lo csel x3, x13, x3, lo ldp x12, x13, [x_2+16] csel x2, x12, x2, hi csel x3, x13, x3, hi ldp x12, x13, [y_1] ldp x4, x5, [resy] csel x4, x12, x4, lo csel x5, x13, x5, lo ldp x12, x13, [y_2] csel x4, x12, x4, hi csel x5, x13, x5, hi ldp x12, x13, [y_1+16] ldp x6, x7, [resy+16] csel x6, x12, x6, lo csel x7, x13, x7, lo ldp x12, x13, [y_2+16] csel x6, x12, x6, hi csel x7, x13, x7, hi // Finally store back the multiplexed values stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [y_3] stp x6, x7, [y_3+16] stp x8, x9, [z_3] stp x10, x11, [z_3+16] // Restore stack and return CFI_INC_SP(NSPACE) CFI_RET S2N_BN_SIZE_DIRECTIVE(secp256k1_jadd_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
3,369
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_deamont_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert from Montgomery form z := (x / 2^256) mod p_256k1, // Input x[4]; output z[4] // // extern void bignum_deamont_p256k1(uint64_t z[static 4], // const uint64_t x[static 4]); // // Convert a 4-digit bignum x out of its (optionally almost) Montgomery form, // "almost" meaning any 4-digit input will work, with no range restriction. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_deamont_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_deamont_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_deamont_p256k1) .text .balign 4 // Input parameters #define z x0 #define x x1 // Rotating registers for the intermediate windows #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 // Other temporaries #define t x6 #define w x7 #define c x8 #define dd x9 S2N_BN_SYMBOL(bignum_deamont_p256k1): CFI_START // Load input and set up constants c = 4294968273 so p_256k1 = 2^256 - c, // and w the negated multiplicative inverse p_256k1 * w == -1 (mod 2^64). ldp d0, d1, [x] movz w, #0x3531 movk w, #0xd225, lsl #16 ldp d2, d3, [x, #16] movk w, #0x091d, lsl #32 movk w, #0xd838, lsl #48 mov c, #977 orr c, c, #0x100000000 // Four stages of Montgomery reduction, rotating the register window // Let dd be the AND of all 4 words of the cofactor q as it is computed mul d0, w, d0 umulh t, d0, c subs d1, d1, t mul d1, w, d1 umulh t, d1, c and dd, d0, d1 sbcs d2, d2, t mul d2, w, d2 umulh t, d2, c and dd, dd, d2 sbcs d3, d3, t mul d3, w, d3 umulh t, d3, c and dd, dd, d3 sbcs d0, d0, t sbcs d1, d1, xzr sbcs d2, d2, xzr sbc d3, d3, xzr // The result thus far is z = (x + q * p_256k1) / 2^256. Note that // z < p_256k1 <=> x < (2^256 - q) * p_256k1, and since // x < 2^256 < 2 * p_256k1, we have that *if* q < 2^256 - 1 then // z < p_256k1. Conversely if q = 2^256 - 1 then since // x + q * p_256k1 == 0 (mod 2^256) we have x == p_256k1 (mod 2^256) // and thus x = p_256k1, and z >= p_256k1 (in fact z = p_256k1). // So in summary z < p_256k1 <=> ~(q = 2^256 - 1) <=> ~(x = p_256k1). // and hence iff q is all 1s, or equivalently dd is all 1s, we // correct by subtracting p_256k1 to get 0. Since this is only one // case we compute the result more explicitly rather than doing // arithmetic with carry propagation. add c, c, d0 cmp dd, #-1 csel d0, c, d0, eq csel d1, xzr, d1, eq csel d2, xzr, d2, eq csel d3, xzr, d3, eq // Write back result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_deamont_p256k1) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,874
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_mul_p256k1_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply modulo p_256k1, z := (x * y) mod p_256k1 // Inputs x[4], y[4]; output z[4] // // extern void bignum_mul_p256k1_alt(uint64_t z[static 4], // const uint64_t x[static 4], // const uint64_t y[static 4]); // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_p256k1_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul_p256k1_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_p256k1_alt) .text .balign 4 #define z x0 #define x x1 #define y x2 #define a0 x3 #define a1 x4 #define a2 x5 #define a3 x6 #define b0 x7 #define b1 x8 #define b2 x9 #define b3 x10 #define l x11 #define u0 x12 #define u1 x13 #define u2 x14 #define u3 x15 #define u4 x16 // These alias to the input arguments when no longer needed #define u5 a0 #define u6 a1 #define u7 a2 #define c b0 #define q b1 #define h b2 S2N_BN_SYMBOL(bignum_mul_p256k1_alt): CFI_START // Load operands and set up row 0 = [u4;...;u0] = a0 * [b3;...;b0] ldp a0, a1, [x] ldp b0, b1, [y] mul u0, a0, b0 umulh u1, a0, b0 mul l, a0, b1 umulh u2, a0, b1 adds u1, u1, l ldp b2, b3, [y, #16] mul l, a0, b2 umulh u3, a0, b2 adcs u2, u2, l mul l, a0, b3 umulh u4, a0, b3 adcs u3, u3, l adc u4, u4, xzr ldp a2, a3, [x, #16] // Row 1 = [u5;...;u0] = [a1;a0] * [b3;...;b0] mul l, a1, b0 adds u1, u1, l mul l, a1, b1 adcs u2, u2, l mul l, a1, b2 adcs u3, u3, l mul l, a1, b3 adcs u4, u4, l umulh u5, a1, b3 adc u5, u5, xzr umulh l, a1, b0 adds u2, u2, l umulh l, a1, b1 adcs u3, u3, l umulh l, a1, b2 adcs u4, u4, l adc u5, u5, xzr // Row 2 = [u6;...;u0] = [a2;a1;a0] * [b3;...;b0] mul l, a2, b0 adds u2, u2, l mul l, a2, b1 adcs u3, u3, l mul l, a2, b2 adcs u4, u4, l mul l, a2, b3 adcs u5, u5, l umulh u6, a2, b3 adc u6, u6, xzr umulh l, a2, b0 adds u3, u3, l umulh l, a2, b1 adcs u4, u4, l umulh l, a2, b2 adcs u5, u5, l adc u6, u6, xzr // Row 3 = [u7;...;u0] = [a3;...a0] * [b3;...;b0] mul l, a3, b0 adds u3, u3, l mul l, a3, b1 adcs u4, u4, l mul l, a3, b2 adcs u5, u5, l mul l, a3, b3 adcs u6, u6, l umulh u7, a3, b3 adc u7, u7, xzr umulh l, a3, b0 adds u4, u4, l umulh l, a3, b1 adcs u5, u5, l umulh l, a3, b2 adcs u6, u6, l adc u7, u7, xzr // Now we have the full 8-digit product 2^256 * h + l where // h = [u7,u6,u5,u4] and l = [u3,u2,u1,u0] // and this is == 4294968273 * h + l (mod p_256k1) mov c, #977 orr c, c, #0x100000000 mul l, c, u4 umulh h, c, u4 adds u0, u0, l mul l, c, u5 umulh u5, c, u5 adcs u1, u1, l mul l, c, u6 umulh u6, c, u6 adcs u2, u2, l mul l, c, u7 umulh u7, c, u7 adcs u3, u3, l cset u4, cs adds u1, u1, h adcs u2, u2, u5 adcs u3, u3, u6 adc u4, u4, u7 // Now we have reduced to 5 digits, 2^256 * h + l = [u4,u3,u2,u1,u0] // Use q = h + 1 as the initial quotient estimate, either right or 1 too big. add q, u4, #1 mul l, c, q umulh h, c, q adds u0, u0, l adcs u1, u1, h adcs u2, u2, xzr adcs u3, u3, xzr // Now the effective answer is 2^256 * (CF - 1) + [u3,u2,u1,u0] // So we correct if CF = 0 by subtracting 4294968273, i.e. by // adding p_256k1 to the "full" answer csel c, c, xzr, cc subs u0, u0, c sbcs u1, u1, xzr sbcs u2, u2, xzr sbc u3, u3, xzr // Write back and return stp u0, u1, [x0] stp u2, u3, [x0, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mul_p256k1_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,040
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_sub_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Subtract modulo p_256k1, z := (x - y) mod p_256k1 // Inputs x[4], y[4]; output z[4] // // extern void bignum_sub_p256k1(uint64_t z[static 4], const uint64_t x[static 4], // const uint64_t y[static 4]); // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sub_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sub_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sub_p256k1) .text .balign 4 #define z x0 #define x x1 #define y x2 #define c x3 #define l x4 #define d0 x5 #define d1 x6 #define d2 x7 #define d3 x8 S2N_BN_SYMBOL(bignum_sub_p256k1): CFI_START // First just subtract the numbers as [d3; d2; d1; d0] = x - y, // with the inverted carry flag meaning CF <=> x >= y. ldp d0, d1, [x] ldp l, c, [y] subs d0, d0, l sbcs d1, d1, c ldp d2, d3, [x, #16] ldp l, c, [y, #16] sbcs d2, d2, l sbcs d3, d3, c // Now if x < y we want to add back p_256k1, which staying within 4 digits // means subtracting 4294968273, since p_256k1 = 2^256 - 4294968273. // Let c be that constant 4294968273 when x < y, zero otherwise. mov l, #977 orr c, l, #0x100000000 csel c, c, xzr, cc // Now correct by adding masked p_256k1, i.e. subtracting c subs d0, d0, c sbcs d1, d1, xzr sbcs d2, d2, xzr sbc d3, d3, xzr // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sub_p256k1) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
42,696
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/secp256k1_jdouble.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point doubling on SECG curve secp256k1 in Jacobian coordinates // // extern void secp256k1_jdouble(uint64_t p3[static 12], // const uint64_t p1[static 12]); // // Does p3 := 2 * p1 where all points are regarded as Jacobian triples. // A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3). // It is assumed that all coordinates of the input point are fully // reduced mod p_256k1 and that the z coordinate is not zero. // // Standard ARM ABI: X0 = p3, X1 = p1 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(secp256k1_jdouble) S2N_BN_FUNCTION_TYPE_DIRECTIVE(secp256k1_jdouble) S2N_BN_SYM_PRIVACY_DIRECTIVE(secp256k1_jdouble) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Stable homes for input arguments during main code sequence #define input_z x19 #define input_x x20 // The magic constant 2^256 - p_256k1 #define pconst x17 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries #define x_2 sp, #(NUMSIZE*0) #define y_2 sp, #(NUMSIZE*1) #define d sp, #(NUMSIZE*2) #define tmp sp, #(NUMSIZE*3) #define x_4 sp, #(NUMSIZE*4) #define y_4 sp, #(NUMSIZE*6) #define dx2 sp, #(NUMSIZE*8) #define xy2 sp, #(NUMSIZE*10) #define NSPACE NUMSIZE*12 // Corresponds exactly to bignum_mul_p256k1 except for registers and // re-use of the pconst register for the constant 4294968273 #define mul_p256k1(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P2] __LF \ mul x7, x3, x5 __LF \ umulh x8, x3, x5 __LF \ mul x9, x4, x6 __LF \ umulh x10, x4, x6 __LF \ subs x4, x4, x3 __LF \ cneg x4, x4, lo __LF \ csetm x16, lo __LF \ adds x9, x9, x8 __LF \ adc x10, x10, xzr __LF \ subs x3, x5, x6 __LF \ cneg x3, x3, lo __LF \ cinv x16, x16, lo __LF \ mul x15, x4, x3 __LF \ umulh x3, x4, x3 __LF \ adds x8, x7, x9 __LF \ adcs x9, x9, x10 __LF \ adc x10, x10, xzr __LF \ cmn x16, #1 __LF \ eor x15, x15, x16 __LF \ adcs x8, x15, x8 __LF \ eor x3, x3, x16 __LF \ adcs x9, x3, x9 __LF \ adc x10, x10, x16 __LF \ ldp x3, x4, [P1+16] __LF \ ldp x5, x6, [P2+16] __LF \ mul x11, x3, x5 __LF \ umulh x12, x3, x5 __LF \ mul x13, x4, x6 __LF \ umulh x14, x4, x6 __LF \ subs x4, x4, x3 __LF \ cneg x4, x4, lo __LF \ csetm x16, lo __LF \ adds x13, x13, x12 __LF \ adc x14, x14, xzr __LF \ subs x3, x5, x6 __LF \ cneg x3, x3, lo __LF \ cinv x16, x16, lo __LF \ mul x15, x4, x3 __LF \ umulh x3, x4, x3 __LF \ adds x12, x11, x13 __LF \ adcs x13, x13, x14 __LF \ adc x14, x14, xzr __LF \ cmn x16, #1 __LF \ eor x15, x15, x16 __LF \ adcs x12, x15, x12 __LF \ eor x3, x3, x16 __LF \ adcs x13, x3, x13 __LF \ adc x14, x14, x16 __LF \ ldp x3, x4, [P1+16] __LF \ ldp x15, x16, [P1] __LF \ subs x3, x3, x15 __LF \ sbcs x4, x4, x16 __LF \ csetm x16, lo __LF \ ldp x15, x0, [P2] __LF \ subs x5, x15, x5 __LF \ sbcs x6, x0, x6 __LF \ csetm x0, lo __LF \ eor x3, x3, x16 __LF \ subs x3, x3, x16 __LF \ eor x4, x4, x16 __LF \ sbc x4, x4, x16 __LF \ eor x5, x5, x0 __LF \ subs x5, x5, x0 __LF \ eor x6, x6, x0 __LF \ sbc x6, x6, x0 __LF \ eor x16, x0, x16 __LF \ adds x11, x11, x9 __LF \ adcs x12, x12, x10 __LF \ adcs x13, x13, xzr __LF \ adc x14, x14, xzr __LF \ mul x2, x3, x5 __LF \ umulh x0, x3, x5 __LF \ mul x15, x4, x6 __LF \ umulh x1, x4, x6 __LF \ subs x4, x4, x3 __LF \ cneg x4, x4, lo __LF \ csetm x9, lo __LF \ adds x15, x15, x0 __LF \ adc x1, x1, xzr __LF \ subs x6, x5, x6 __LF \ cneg x6, x6, lo __LF \ cinv x9, x9, lo __LF \ mul x5, x4, x6 __LF \ umulh x6, x4, x6 __LF \ adds x0, x2, x15 __LF \ adcs x15, x15, x1 __LF \ adc x1, x1, xzr __LF \ cmn x9, #1 __LF \ eor x5, x5, x9 __LF \ adcs x0, x5, x0 __LF \ eor x6, x6, x9 __LF \ adcs x15, x6, x15 __LF \ adc x1, x1, x9 __LF \ adds x9, x11, x7 __LF \ adcs x10, x12, x8 __LF \ adcs x11, x13, x11 __LF \ adcs x12, x14, x12 __LF \ adcs x13, x13, xzr __LF \ adc x14, x14, xzr __LF \ cmn x16, #1 __LF \ eor x2, x2, x16 __LF \ adcs x9, x2, x9 __LF \ eor x0, x0, x16 __LF \ adcs x10, x0, x10 __LF \ eor x15, x15, x16 __LF \ adcs x11, x15, x11 __LF \ eor x1, x1, x16 __LF \ adcs x12, x1, x12 __LF \ adcs x13, x13, x16 __LF \ adc x14, x14, x16 __LF \ mov x16, #977 __LF \ mul x3, pconst, x11 __LF \ umulh x5, pconst, x11 __LF \ and x15, x12, #0xffffffff __LF \ lsr x2, x12, #32 __LF \ mul x4, x16, x15 __LF \ madd x15, x16, x2, x15 __LF \ adds x4, x4, x15, lsl #32 __LF \ lsr x15, x15, #32 __LF \ adc x6, x2, x15 __LF \ mul x11, pconst, x13 __LF \ umulh x13, pconst, x13 __LF \ and x15, x14, #0xffffffff __LF \ lsr x2, x14, #32 __LF \ mul x12, x16, x15 __LF \ madd x15, x16, x2, x15 __LF \ adds x12, x12, x15, lsl #32 __LF \ lsr x15, x15, #32 __LF \ adc x14, x2, x15 __LF \ adds x7, x7, x3 __LF \ adcs x8, x8, x4 __LF \ adcs x9, x9, x11 __LF \ adcs x10, x10, x12 __LF \ cset x11, hs __LF \ adds x8, x8, x5 __LF \ adcs x9, x9, x6 __LF \ adcs x10, x10, x13 __LF \ adc x11, x11, x14 __LF \ add x0, x11, #1 __LF \ mul x3, x16, x0 __LF \ lsr x4, x0, #32 __LF \ adds x3, x3, x0, lsl #32 __LF \ adc x4, xzr, x4 __LF \ adds x7, x7, x3 __LF \ adcs x8, x8, x4 __LF \ adcs x9, x9, xzr __LF \ adcs x10, x10, xzr __LF \ csel x1, pconst, xzr, lo __LF \ subs x7, x7, x1 __LF \ sbcs x8, x8, xzr __LF \ sbcs x9, x9, xzr __LF \ sbc x10, x10, xzr __LF \ stp x7, x8, [P0] __LF \ stp x9, x10, [P0+16] // Corresponds exactly to bignum_sqr_p256k1 except for // re-use of the pconst register for the constant 4294968273 #define sqr_p256k1(P0,P1) \ ldp x10, x11, [P1] __LF \ ldp x12, x13, [P1+16] __LF \ umull x2, w10, w10 __LF \ lsr x14, x10, #32 __LF \ umull x3, w14, w14 __LF \ umull x14, w10, w14 __LF \ adds x2, x2, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x3, x3, x14 __LF \ umull x4, w11, w11 __LF \ lsr x14, x11, #32 __LF \ umull x5, w14, w14 __LF \ umull x14, w11, w14 __LF \ mul x15, x10, x11 __LF \ umulh x16, x10, x11 __LF \ adds x4, x4, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x5, x5, x14 __LF \ adds x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adc x5, x5, xzr __LF \ adds x3, x3, x15 __LF \ adcs x4, x4, x16 __LF \ adc x5, x5, xzr __LF \ umull x6, w12, w12 __LF \ lsr x14, x12, #32 __LF \ umull x7, w14, w14 __LF \ umull x14, w12, w14 __LF \ adds x6, x6, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x7, x7, x14 __LF \ umull x8, w13, w13 __LF \ lsr x14, x13, #32 __LF \ umull x9, w14, w14 __LF \ umull x14, w13, w14 __LF \ mul x15, x12, x13 __LF \ umulh x16, x12, x13 __LF \ adds x8, x8, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x9, x9, x14 __LF \ adds x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adc x9, x9, xzr __LF \ adds x7, x7, x15 __LF \ adcs x8, x8, x16 __LF \ adc x9, x9, xzr __LF \ subs x10, x10, x12 __LF \ sbcs x11, x11, x13 __LF \ csetm x16, lo __LF \ eor x10, x10, x16 __LF \ subs x10, x10, x16 __LF \ eor x11, x11, x16 __LF \ sbc x11, x11, x16 __LF \ adds x6, x6, x4 __LF \ adcs x7, x7, x5 __LF \ adcs x8, x8, xzr __LF \ adc x9, x9, xzr __LF \ umull x12, w10, w10 __LF \ lsr x5, x10, #32 __LF \ umull x13, w5, w5 __LF \ umull x5, w10, w5 __LF \ adds x12, x12, x5, lsl #33 __LF \ lsr x5, x5, #31 __LF \ adc x13, x13, x5 __LF \ umull x15, w11, w11 __LF \ lsr x5, x11, #32 __LF \ umull x14, w5, w5 __LF \ umull x5, w11, w5 __LF \ mul x4, x10, x11 __LF \ umulh x16, x10, x11 __LF \ adds x15, x15, x5, lsl #33 __LF \ lsr x5, x5, #31 __LF \ adc x14, x14, x5 __LF \ adds x4, x4, x4 __LF \ adcs x16, x16, x16 __LF \ adc x14, x14, xzr __LF \ adds x13, x13, x4 __LF \ adcs x15, x15, x16 __LF \ adc x14, x14, xzr __LF \ adds x4, x2, x6 __LF \ adcs x5, x3, x7 __LF \ adcs x6, x6, x8 __LF \ adcs x7, x7, x9 __LF \ csetm x16, lo __LF \ subs x4, x4, x12 __LF \ sbcs x5, x5, x13 __LF \ sbcs x6, x6, x15 __LF \ sbcs x7, x7, x14 __LF \ adcs x8, x8, x16 __LF \ adc x9, x9, x16 __LF \ mov x16, #977 __LF \ mul x10, pconst, x6 __LF \ umulh x13, pconst, x6 __LF \ and x6, x7, #0xffffffff __LF \ lsr x7, x7, #32 __LF \ mul x11, x16, x6 __LF \ madd x6, x16, x7, x6 __LF \ adds x11, x11, x6, lsl #32 __LF \ lsr x6, x6, #32 __LF \ adc x14, x7, x6 __LF \ mul x12, pconst, x8 __LF \ umulh x8, pconst, x8 __LF \ and x6, x9, #0xffffffff __LF \ lsr x7, x9, #32 __LF \ mul x9, x16, x6 __LF \ madd x6, x16, x7, x6 __LF \ adds x9, x9, x6, lsl #32 __LF \ lsr x6, x6, #32 __LF \ adc x15, x7, x6 __LF \ adds x2, x2, x10 __LF \ adcs x3, x3, x11 __LF \ adcs x4, x4, x12 __LF \ adcs x5, x5, x9 __LF \ cset x6, hs __LF \ adds x3, x3, x13 __LF \ adcs x4, x4, x14 __LF \ adcs x5, x5, x8 __LF \ adc x6, x6, x15 __LF \ add x6, x6, #1 __LF \ mul x10, x16, x6 __LF \ lsr x11, x6, #32 __LF \ adds x10, x10, x6, lsl #32 __LF \ adc x11, xzr, x11 __LF \ adds x2, x2, x10 __LF \ adcs x3, x3, x11 __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ csel x16, pconst, xzr, lo __LF \ subs x2, x2, x16 __LF \ sbcs x3, x3, xzr __LF \ sbcs x4, x4, xzr __LF \ sbc x5, x5, xzr __LF \ stp x2, x3, [P0] __LF \ stp x4, x5, [P0+16] // Rough versions producing 5-word results #define roughmul_p256k1(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P2] __LF \ mul x7, x3, x5 __LF \ umulh x8, x3, x5 __LF \ mul x9, x4, x6 __LF \ umulh x10, x4, x6 __LF \ subs x4, x4, x3 __LF \ cneg x4, x4, lo __LF \ csetm x16, lo __LF \ adds x9, x9, x8 __LF \ adc x10, x10, xzr __LF \ subs x3, x5, x6 __LF \ cneg x3, x3, lo __LF \ cinv x16, x16, lo __LF \ mul x15, x4, x3 __LF \ umulh x3, x4, x3 __LF \ adds x8, x7, x9 __LF \ adcs x9, x9, x10 __LF \ adc x10, x10, xzr __LF \ cmn x16, #1 __LF \ eor x15, x15, x16 __LF \ adcs x8, x15, x8 __LF \ eor x3, x3, x16 __LF \ adcs x9, x3, x9 __LF \ adc x10, x10, x16 __LF \ ldp x3, x4, [P1+16] __LF \ ldp x5, x6, [P2+16] __LF \ mul x11, x3, x5 __LF \ umulh x12, x3, x5 __LF \ mul x13, x4, x6 __LF \ umulh x14, x4, x6 __LF \ subs x4, x4, x3 __LF \ cneg x4, x4, lo __LF \ csetm x16, lo __LF \ adds x13, x13, x12 __LF \ adc x14, x14, xzr __LF \ subs x3, x5, x6 __LF \ cneg x3, x3, lo __LF \ cinv x16, x16, lo __LF \ mul x15, x4, x3 __LF \ umulh x3, x4, x3 __LF \ adds x12, x11, x13 __LF \ adcs x13, x13, x14 __LF \ adc x14, x14, xzr __LF \ cmn x16, #1 __LF \ eor x15, x15, x16 __LF \ adcs x12, x15, x12 __LF \ eor x3, x3, x16 __LF \ adcs x13, x3, x13 __LF \ adc x14, x14, x16 __LF \ ldp x3, x4, [P1+16] __LF \ ldp x15, x16, [P1] __LF \ subs x3, x3, x15 __LF \ sbcs x4, x4, x16 __LF \ csetm x16, lo __LF \ ldp x15, x0, [P2] __LF \ subs x5, x15, x5 __LF \ sbcs x6, x0, x6 __LF \ csetm x0, lo __LF \ eor x3, x3, x16 __LF \ subs x3, x3, x16 __LF \ eor x4, x4, x16 __LF \ sbc x4, x4, x16 __LF \ eor x5, x5, x0 __LF \ subs x5, x5, x0 __LF \ eor x6, x6, x0 __LF \ sbc x6, x6, x0 __LF \ eor x16, x0, x16 __LF \ adds x11, x11, x9 __LF \ adcs x12, x12, x10 __LF \ adcs x13, x13, xzr __LF \ adc x14, x14, xzr __LF \ mul x2, x3, x5 __LF \ umulh x0, x3, x5 __LF \ mul x15, x4, x6 __LF \ umulh x1, x4, x6 __LF \ subs x4, x4, x3 __LF \ cneg x4, x4, lo __LF \ csetm x9, lo __LF \ adds x15, x15, x0 __LF \ adc x1, x1, xzr __LF \ subs x6, x5, x6 __LF \ cneg x6, x6, lo __LF \ cinv x9, x9, lo __LF \ mul x5, x4, x6 __LF \ umulh x6, x4, x6 __LF \ adds x0, x2, x15 __LF \ adcs x15, x15, x1 __LF \ adc x1, x1, xzr __LF \ cmn x9, #1 __LF \ eor x5, x5, x9 __LF \ adcs x0, x5, x0 __LF \ eor x6, x6, x9 __LF \ adcs x15, x6, x15 __LF \ adc x1, x1, x9 __LF \ adds x9, x11, x7 __LF \ adcs x10, x12, x8 __LF \ adcs x11, x13, x11 __LF \ adcs x12, x14, x12 __LF \ adcs x13, x13, xzr __LF \ adc x14, x14, xzr __LF \ cmn x16, #1 __LF \ eor x2, x2, x16 __LF \ adcs x9, x2, x9 __LF \ eor x0, x0, x16 __LF \ adcs x10, x0, x10 __LF \ eor x15, x15, x16 __LF \ adcs x11, x15, x11 __LF \ eor x1, x1, x16 __LF \ adcs x12, x1, x12 __LF \ adcs x13, x13, x16 __LF \ adc x14, x14, x16 __LF \ mov x16, #977 __LF \ mul x3, pconst, x11 __LF \ umulh x5, pconst, x11 __LF \ and x15, x12, #0xffffffff __LF \ lsr x2, x12, #32 __LF \ mul x4, x16, x15 __LF \ madd x15, x16, x2, x15 __LF \ adds x4, x4, x15, lsl #32 __LF \ lsr x15, x15, #32 __LF \ adc x6, x2, x15 __LF \ mul x11, pconst, x13 __LF \ umulh x13, pconst, x13 __LF \ and x15, x14, #0xffffffff __LF \ lsr x2, x14, #32 __LF \ mul x12, x16, x15 __LF \ madd x15, x16, x2, x15 __LF \ adds x12, x12, x15, lsl #32 __LF \ lsr x15, x15, #32 __LF \ adc x14, x2, x15 __LF \ adds x7, x7, x3 __LF \ adcs x8, x8, x4 __LF \ adcs x9, x9, x11 __LF \ adcs x10, x10, x12 __LF \ cset x11, hs __LF \ adds x8, x8, x5 __LF \ adcs x9, x9, x6 __LF \ adcs x10, x10, x13 __LF \ adc x11, x11, x14 __LF \ stp x7, x8, [P0] __LF \ stp x9, x10, [P0+16] __LF \ str x11, [P0+32] #define roughsqr_p256k1(P0,P1) \ ldp x10, x11, [P1] __LF \ ldp x12, x13, [P1+16] __LF \ umull x2, w10, w10 __LF \ lsr x14, x10, #32 __LF \ umull x3, w14, w14 __LF \ umull x14, w10, w14 __LF \ adds x2, x2, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x3, x3, x14 __LF \ umull x4, w11, w11 __LF \ lsr x14, x11, #32 __LF \ umull x5, w14, w14 __LF \ umull x14, w11, w14 __LF \ mul x15, x10, x11 __LF \ umulh x16, x10, x11 __LF \ adds x4, x4, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x5, x5, x14 __LF \ adds x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adc x5, x5, xzr __LF \ adds x3, x3, x15 __LF \ adcs x4, x4, x16 __LF \ adc x5, x5, xzr __LF \ umull x6, w12, w12 __LF \ lsr x14, x12, #32 __LF \ umull x7, w14, w14 __LF \ umull x14, w12, w14 __LF \ adds x6, x6, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x7, x7, x14 __LF \ umull x8, w13, w13 __LF \ lsr x14, x13, #32 __LF \ umull x9, w14, w14 __LF \ umull x14, w13, w14 __LF \ mul x15, x12, x13 __LF \ umulh x16, x12, x13 __LF \ adds x8, x8, x14, lsl #33 __LF \ lsr x14, x14, #31 __LF \ adc x9, x9, x14 __LF \ adds x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adc x9, x9, xzr __LF \ adds x7, x7, x15 __LF \ adcs x8, x8, x16 __LF \ adc x9, x9, xzr __LF \ subs x10, x10, x12 __LF \ sbcs x11, x11, x13 __LF \ csetm x16, lo __LF \ eor x10, x10, x16 __LF \ subs x10, x10, x16 __LF \ eor x11, x11, x16 __LF \ sbc x11, x11, x16 __LF \ adds x6, x6, x4 __LF \ adcs x7, x7, x5 __LF \ adcs x8, x8, xzr __LF \ adc x9, x9, xzr __LF \ umull x12, w10, w10 __LF \ lsr x5, x10, #32 __LF \ umull x13, w5, w5 __LF \ umull x5, w10, w5 __LF \ adds x12, x12, x5, lsl #33 __LF \ lsr x5, x5, #31 __LF \ adc x13, x13, x5 __LF \ umull x15, w11, w11 __LF \ lsr x5, x11, #32 __LF \ umull x14, w5, w5 __LF \ umull x5, w11, w5 __LF \ mul x4, x10, x11 __LF \ umulh x16, x10, x11 __LF \ adds x15, x15, x5, lsl #33 __LF \ lsr x5, x5, #31 __LF \ adc x14, x14, x5 __LF \ adds x4, x4, x4 __LF \ adcs x16, x16, x16 __LF \ adc x14, x14, xzr __LF \ adds x13, x13, x4 __LF \ adcs x15, x15, x16 __LF \ adc x14, x14, xzr __LF \ adds x4, x2, x6 __LF \ adcs x5, x3, x7 __LF \ adcs x6, x6, x8 __LF \ adcs x7, x7, x9 __LF \ csetm x16, lo __LF \ subs x4, x4, x12 __LF \ sbcs x5, x5, x13 __LF \ sbcs x6, x6, x15 __LF \ sbcs x7, x7, x14 __LF \ adcs x8, x8, x16 __LF \ adc x9, x9, x16 __LF \ mov x16, #977 __LF \ mul x10, pconst, x6 __LF \ umulh x13, pconst, x6 __LF \ and x6, x7, #0xffffffff __LF \ lsr x7, x7, #32 __LF \ mul x11, x16, x6 __LF \ madd x6, x16, x7, x6 __LF \ adds x11, x11, x6, lsl #32 __LF \ lsr x6, x6, #32 __LF \ adc x14, x7, x6 __LF \ mul x12, pconst, x8 __LF \ umulh x8, pconst, x8 __LF \ and x6, x9, #0xffffffff __LF \ lsr x7, x9, #32 __LF \ mul x9, x16, x6 __LF \ madd x6, x16, x7, x6 __LF \ adds x9, x9, x6, lsl #32 __LF \ lsr x6, x6, #32 __LF \ adc x15, x7, x6 __LF \ adds x2, x2, x10 __LF \ adcs x3, x3, x11 __LF \ adcs x4, x4, x12 __LF \ adcs x5, x5, x9 __LF \ cset x6, hs __LF \ adds x3, x3, x13 __LF \ adcs x4, x4, x14 __LF \ adcs x5, x5, x8 __LF \ adc x6, x6, x15 __LF \ stp x2, x3, [P0] __LF \ stp x4, x5, [P0+16] __LF \ str x6, [P0+32] // Weak doubling operation, staying in 4 digits but not in general // fully normalizing modulo p_256k1 #define weakdouble_p256k1(P0,P1) \ ldp x1, x2, [P1] __LF \ lsl x0, x1, #1 __LF \ ldp x3, x4, [P1+16] __LF \ ands xzr, x4, #0x8000000000000000 __LF \ csel x5, pconst, xzr, ne __LF \ extr x1, x2, x1, #63 __LF \ adds x0, x0, x5 __LF \ extr x2, x3, x2, #63 __LF \ adcs x1, x1, xzr __LF \ extr x3, x4, x3, #63 __LF \ adcs x2, x2, xzr __LF \ stp x0, x1, [P0] __LF \ adc x3, x3, xzr __LF \ stp x2, x3, [P0+16] // P0 = C * P1 - D * P2 with 5-word inputs P1 and P2 // Only used here with C = 12, D = 9, but could be used more generally. // We start with (2^40 * 2^256 + C * P1) - (D * P2 + 2^40 * k) // where p_256k1 = 2^256 - k (so k = 4294968273) #define cmsub_p256k1(P0,C,P1,D,P2) \ mov x10, C __LF \ ldp x4, x5, [P1] __LF \ mul x0, x4, x10 __LF \ mul x1, x5, x10 __LF \ ldp x6, x7, [P1+16] __LF \ mul x2, x6, x10 __LF \ mul x3, x7, x10 __LF \ ldr x13, [P1+32] __LF \ umulh x4, x4, x10 __LF \ adds x1, x1, x4 __LF \ umulh x5, x5, x10 __LF \ adcs x2, x2, x5 __LF \ umulh x6, x6, x10 __LF \ adcs x3, x3, x6 __LF \ umulh x4, x7, x10 __LF \ mul x13, x13, x10 __LF \ adc x9, x4, x13 __LF \ orr x9, x9, #0x10000000000 __LF \ /* [x9; x3;x2;x1;x0] = 2^40 * 2^256 + C * P1 */ \ mov x10, D __LF \ ldp x13, x14, [P2] __LF \ mul x5, x14, x10 __LF \ umulh x6, x14, x10 __LF \ adds x5, x5, pconst, lsr #24 __LF \ adc x6, x6, xzr __LF \ mul x4, x13, x10 __LF \ adds x4, x4, pconst, lsl #40 __LF \ umulh x13, x13, x10 __LF \ adcs x5, x5, x13 __LF \ ldp x13, x14, [P2+16] __LF \ mul x12, x13, x10 __LF \ umulh x7, x13, x10 __LF \ ldr x13, [P2+32] __LF \ adcs x6, x6, x12 __LF \ mul x12, x14, x10 __LF \ umulh x8, x14, x10 __LF \ mul x13, x13, x10 __LF \ adcs x7, x7, x12 __LF \ adc x8, x8, x13 __LF \ /* [x8; x7;x6;x5;x4] = D * P2 + 2^40 * k */ \ subs x0, x0, x4 __LF \ sbcs x1, x1, x5 __LF \ sbcs x2, x2, x6 __LF \ sbcs x3, x3, x7 __LF \ sbc x4, x9, x8 __LF \ /* [x4; x3;x2;x1;x0] = 2^40*p_256k1+result */ \ add x10, x4, #1 __LF \ /* (h + 1) is quotient estimate */ \ mul x4, pconst, x10 __LF \ umulh x5, pconst, x10 __LF \ adds x0, x0, x4 __LF \ adcs x1, x1, x5 __LF \ adcs x2, x2, xzr __LF \ adcs x3, x3, xzr __LF \ csel x11, pconst, xzr, cc __LF \ /* If un-correction needed */ \ subs x0, x0, x11 __LF \ sbcs x1, x1, xzr __LF \ stp x0, x1, [P0] __LF \ sbcs x2, x2, xzr __LF \ sbc x3, x3, xzr __LF \ stp x2, x3, [P0+16] // P0 = 3 * P1 - 8 * P2 with 5-digit P1 and P2 // We start with (2^40 * 2^256 + 3 * P1) - (8 * P2 + 2^40 * k) // where p_256k1 = 2^256 - k (so k = 4294968273) #define cmsub38_p256k1(P0,P1,P2) \ mov x10, #3 __LF \ ldp x4, x5, [P1] __LF \ mul x0, x4, x10 __LF \ mul x1, x5, x10 __LF \ ldp x6, x7, [P1+16] __LF \ mul x2, x6, x10 __LF \ mul x3, x7, x10 __LF \ ldr x13, [P1+32] __LF \ umulh x4, x4, x10 __LF \ adds x1, x1, x4 __LF \ umulh x5, x5, x10 __LF \ adcs x2, x2, x5 __LF \ umulh x6, x6, x10 __LF \ adcs x3, x3, x6 __LF \ umulh x4, x7, x10 __LF \ mul x13, x13, x10 __LF \ adc x9, x4, x13 __LF \ orr x9, x9, #0x10000000000 __LF \ /* [x9; x3;x2;x1;x0] = 2^40 * 2^256 + 3 * P1 */ \ lsl x12, pconst, #40 __LF \ ldp x13, x14, [P2] __LF \ lsl x4, x13, #3 __LF \ adds x4, x4, x12 __LF \ extr x5, x14, x13, #61 __LF \ lsr x12, pconst, #24 __LF \ adcs x5, x5, x12 __LF \ ldp x11, x12, [P2+16] __LF \ extr x6, x11, x14, #61 __LF \ adcs x6, x6, xzr __LF \ ldr x13, [P2+32] __LF \ extr x7, x12, x11, #61 __LF \ adcs x7, x7, xzr __LF \ extr x8, x13, x12, #61 __LF \ adc x8, x8, xzr __LF \ /* [x8; x7;x6;x5;x4] = 8 * P2 + 2^40 * k */ \ subs x0, x0, x4 __LF \ sbcs x1, x1, x5 __LF \ sbcs x2, x2, x6 __LF \ sbcs x3, x3, x7 __LF \ sbc x4, x9, x8 __LF \ /* [x4; x3;x2;x1;x0] = 2^40*p_256k1+result */ \ add x10, x4, #1 __LF \ /* (h + 1) is quotient estimate */ \ mul x4, pconst, x10 __LF \ umulh x5, pconst, x10 __LF \ adds x0, x0, x4 __LF \ adcs x1, x1, x5 __LF \ adcs x2, x2, xzr __LF \ adcs x3, x3, xzr __LF \ csel x11, pconst, xzr, cc __LF \ /* If un-correction needed */ \ subs x0, x0, x11 __LF \ sbcs x1, x1, xzr __LF \ stp x0, x1, [P0] __LF \ sbcs x2, x2, xzr __LF \ sbc x3, x3, xzr __LF \ stp x2, x3, [P0+16] // P0 = 4 * P1 - P2 with 5-digit P1, 4-digit P2 and result. // This is done by direct subtraction of P2 since the method // in bignum_cmul_p256k1 etc. for quotient estimation still // works when the value to be reduced is negative, as // long as it is > -p_256k1, which is the case here. #define cmsub41_p256k1(P0,P1,P2) \ ldp x1, x2, [P1] __LF \ lsl x0, x1, #2 __LF \ ldp x6, x7, [P2] __LF \ subs x0, x0, x6 __LF \ extr x1, x2, x1, #62 __LF \ sbcs x1, x1, x7 __LF \ ldp x3, x4, [P1+16] __LF \ extr x2, x3, x2, #62 __LF \ ldp x6, x7, [P2+16] __LF \ sbcs x2, x2, x6 __LF \ extr x3, x4, x3, #62 __LF \ sbcs x3, x3, x7 __LF \ ldr x5, [P1+32] __LF \ extr x4, x5, x4, #62 __LF \ sbc x4, x4, xzr __LF \ add x5, x4, #1 __LF \ /* (h + 1) is quotient estimate */ \ mul x4, pconst, x5 __LF \ adds x0, x0, x4 __LF \ umulh x5, pconst, x5 __LF \ adcs x1, x1, x5 __LF \ adcs x2, x2, xzr __LF \ adcs x3, x3, xzr __LF \ csel x4, pconst, xzr, cc __LF \ /* If un-correction needed */ \ subs x0, x0, x4 __LF \ sbcs x1, x1, xzr __LF \ stp x0, x1, [P0] __LF \ sbcs x2, x2, xzr __LF \ sbc x3, x3, xzr __LF \ stp x2, x3, [P0+16] S2N_BN_SYMBOL(secp256k1_jdouble): CFI_START // Save registers and make room on stack for temporary variables CFI_DEC_SP(NSPACE+16) CFI_STACKSAVE2(x19,x20,NSPACE) // Move the input arguments to stable place mov input_z, x0 mov input_x, x1 // Set up pconst = 4294968273, so p_256k1 = 2^256 - pconst mov pconst, #977 orr pconst, pconst, #0x100000000 // Main sequence of operations // y_2 = y^2 sqr_p256k1(y_2,y_1) // x_2 = x^2 sqr_p256k1(x_2,x_1) // tmp = 2 * y_1 (in 4 words but not fully normalized) weakdouble_p256k1(tmp,y_1) // xy2 = x * y^2 (5-digit partially reduced) // x_4 = x^4 (5-digit partially reduced) roughmul_p256k1(xy2,x_1,y_2) roughsqr_p256k1(x_4,x_2) // z_3 = 2 * y_1 * z_1 mul_p256k1(z_3,z_1,tmp) // d = 12 * xy2 - 9 * x_4 cmsub_p256k1(d,12,xy2,9,x_4) // y4 = y2^2 (5-digit partially reduced) roughsqr_p256k1(y_4,y_2) // dx2 = d * x_2 (5-digit partially reduced) roughmul_p256k1(dx2,x_2,d) // x_3 = 4 * xy2 - d cmsub41_p256k1(x_3,xy2,d) // y_3 = 3 * dx2 - 8 * y_4 cmsub38_p256k1(y_3,dx2,y_4) // Restore stack and return CFI_STACKLOAD2(x19,x20,NSPACE) CFI_INC_SP((NSPACE+16)) CFI_RET S2N_BN_SIZE_DIRECTIVE(secp256k1_jdouble) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
8,848
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/secp256k1/bignum_montmul_p256k1.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^256) mod p_256k1 // Inputs x[4], y[4]; output z[4] // // extern void bignum_montmul_p256k1(uint64_t z[static 4], // const uint64_t x[static 4], // const uint64_t y[static 4]); // // Does z := (2^{-256} * x * y) mod p_256k1, assuming that the inputs x and y // satisfy x * y <= 2^256 * p_256k1 (in particular this is true if we are in // the "usual" case x < p_256k1 and y < p_256k1). // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p256k1) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_p256k1) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p256k1) .text .balign 4 // Loading large constants #define movbig(nn,n3,n2,n1,n0) \ movz nn, n0 __LF \ movk nn, n1, lsl #16 __LF \ movk nn, n2, lsl #32 __LF \ movk nn, n3, lsl #48 // --------------------------------------------------------------------------- // Macro returning (c,h,l) = 3-word 1s complement (x - y) * (w - z) // c,h,l,t should all be different // t,h should not overlap w,z // --------------------------------------------------------------------------- #define muldiffn(c,h,l, t, x,y, w,z) \ subs t, x, y __LF \ cneg t, t, cc __LF \ csetm c, cc __LF \ subs h, w, z __LF \ cneg h, h, cc __LF \ mul l, t, h __LF \ umulh h, t, h __LF \ cinv c, c, cc __LF \ eor l, l, c __LF \ eor h, h, c // --------------------------------------------------------------------------- // Core two-step "short" Montgomery reduction macro. Takes input in // [d3;d2;d1;d0] and returns result in [d5;d4;d3;d2], modifying the // existing contents of [d3;d2;d1] and generating d4 and d5, while // using t1, t2, and t3 as temporaries. It's OK if d4 == d0 and d5 == d1. // --------------------------------------------------------------------------- #define montreds2(d5,d4,d3,d2,d1,d0) \ movbig(t2, 0xd838, #0x091d, #0xd225, #0x3531) __LF \ mul d4, t2, d0 __LF \ mov t3, #977 __LF \ orr t3, t3, #0x100000000 __LF \ umulh t1, d4, t3 __LF \ subs d1, d1, t1 __LF \ mul d5, t2, d1 __LF \ umulh t1, d5, t3 __LF \ sbcs d2, d2, t1 __LF \ sbcs d3, d3, xzr __LF \ sbcs d4, d4, xzr __LF \ sbc d5, d5, xzr #define a0 x3 #define a1 x4 #define a2 x5 #define a3 x6 #define b0 x7 #define b1 x8 #define b2 x9 #define b3 x10 #define s0 x11 #define s1 x12 #define s2 x13 #define s3 x14 #define t0 x15 #define t1 x16 #define t2 x17 #define t3 x1 #define s4 x2 S2N_BN_SYMBOL(bignum_montmul_p256k1): CFI_START // Load in all words of both inputs ldp a0, a1, [x1] ldp a2, a3, [x1, #16] ldp b0, b1, [x2] ldp b2, b3, [x2, #16] // Multiply low halves with a 2x2->4 ADK multiplier as L = [s3;s2;s1;s0] mul s0, a0, b0 mul s2, a1, b1 umulh s1, a0, b0 adds t1, s0, s2 umulh s3, a1, b1 adcs t2, s1, s3 adcs s3, s3, xzr adds s1, s1, t1 adcs s2, s2, t2 adcs s3, s3, xzr muldiffn(t3,t2,t1, t0, a0,a1, b1,b0) adds xzr, t3, #1 adcs s1, s1, t1 adcs s2, s2, t2 adc s3, s3, t3 // Perform two "short" Montgomery steps on the low product to // get a modified low result L' = [s1;s0;s3;s2] // This shifts it to an offset compatible with middle terms // Stash the result L' temporarily in the output buffer to avoid // using additional registers. montreds2(s1,s0,s3,s2,s1,s0) stp s2, s3, [x0] stp s0, s1, [x0, #16] // Multiply high halves with a 2x2->4 ADK multiplier as H = [s3;s2;s1;s0] mul s0, a2, b2 mul s2, a3, b3 umulh s1, a2, b2 adds t1, s0, s2 umulh s3, a3, b3 adcs t2, s1, s3 adcs s3, s3, xzr adds s1, s1, t1 adcs s2, s2, t2 adcs s3, s3, xzr muldiffn(t3,t2,t1, t0, a2,a3, b3,b2) adds xzr, t3, #1 adcs s1, s1, t1 adcs s2, s2, t2 adc s3, s3, t3 // Compute sign-magnitude a2,[a1,a0] = x_hi - x_lo subs a0, a2, a0 sbcs a1, a3, a1 sbc a2, xzr, xzr adds xzr, a2, #1 eor a0, a0, a2 adcs a0, a0, xzr eor a1, a1, a2 adcs a1, a1, xzr // Compute sign-magnitude b2,[b1,b0] = y_lo - y_hi subs b0, b0, b2 sbcs b1, b1, b3 sbc b2, xzr, xzr adds xzr, b2, #1 eor b0, b0, b2 adcs b0, b0, xzr eor b1, b1, b2 adcs b1, b1, xzr // Save the correct sign for the sub-product in b3 eor b3, a2, b2 // Add the high H to the modified low term L' as H + L' = [s4;b2;a2;t3;t0] ldp t0, t3, [x0] adds t0, s0, t0 adcs t3, s1, t3 ldp a2, b2, [x0, #16] adcs a2, s2, a2 adcs b2, s3, b2 adc s4, xzr, xzr // Multiply with yet a third 2x2->4 ADK multiplier for complex mid-term M mul s0, a0, b0 mul s2, a1, b1 umulh s1, a0, b0 adds t1, s0, s2 umulh s3, a1, b1 adcs t2, s1, s3 adcs s3, s3, xzr adds s1, s1, t1 adcs s2, s2, t2 adcs s3, s3, xzr muldiffn(a1,t2,t1, a0, a0,a1, b1,b0) adds xzr, a1, #1 adcs s1, s1, t1 adcs s2, s2, t2 adc s3, s3, a1 // Set up a sign-modified version of the mid-product in a long accumulator // as [b3;a1;a0;s3;s2;s1;s0], adding in the H + L' term once with // zero offset as this signed value is created adds xzr, b3, #1 eor s0, s0, b3 adcs s0, s0, t0 eor s1, s1, b3 adcs s1, s1, t3 eor s2, s2, b3 adcs s2, s2, a2 eor s3, s3, b3 adcs s3, s3, b2 adcs a0, s4, b3 adcs a1, b3, xzr adc b3, b3, xzr // Add in the stashed H + L' term an offset of 2 words as well adds s2, s2, t0 adcs s3, s3, t3 adcs a0, a0, a2 adcs a1, a1, b2 adc b3, b3, s4 // Do two more Montgomery steps on the composed term // Net pre-reduct is in [b3;a1;a0;s3;s2] montreds2(s1,s0,s3,s2,s1,s0) // Finish addition and form condensed upper digits as "dd" #define dd b1 adds a0, a0, s0 and dd, s3, a0 adcs a1, a1, s1 and dd, dd, a1 adc b3, b3, xzr // Because of the way we added L' in two places, we can overspill by // more than usual in Montgomery, with the result being only known to // be < 3 * p_256k1, not the usual < 2 * p_256k1. So now we do a more // elaborate final correction, making use of the condensed carry dd // to see if the initial estimate q = 4294968273 * (h + 1) results // in a negative true result, and if so use q = 4294968273 * h. #define d0 s2 #define d1 s3 #define d2 a0 #define d3 a1 #define h b3 #define q s4 #define c b0 madd q, h, t3, t3 adds xzr, d0, q sub h, q, t3 adcs xzr, dd, xzr csel q, q, h, cs adds d0, d0, q adcs d1, d1, xzr adcs d2, d2, xzr adc d3, d3, xzr // Finally store the result stp d0, d1, [x0] stp d2, d3, [x0, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montmul_p256k1) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,801
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_half_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Halve modulo p_sm2, z := (x / 2) mod p_sm2, assuming x reduced // Input x[4]; output z[4] // // extern void bignum_half_sm2(uint64_t z[static 4], const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_half_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_half_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_half_sm2) .text .balign 4 #define z x0 #define x x1 #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define d4 x6 #define m x7 #define n x8 S2N_BN_SYMBOL(bignum_half_sm2): CFI_START // Load the 4 digits of x ldp d0, d1, [x] ldp d2, d3, [x, #16] // Get a bitmask corresponding to the lowest bit of the input and m, d0, #1 neg m, m // Do a masked addition of p_sm2, catching carry in a 5th word adds d0, d0, m and n, m, #0xffffffff00000000 adcs d1, d1, n adcs d2, d2, m and n, m, #0xfffffffeffffffff adcs d3, d3, n adc d4, xzr, xzr // Now shift that sum right one place extr d0, d1, d0, #1 extr d1, d2, d1, #1 extr d2, d3, d2, #1 extr d3, d4, d3, #1 // Store back stp d0, d1, [z] stp d2, d3, [z, #16] // Return CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_half_sm2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
133,004
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/sm2_montjscalarmul.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery-Jacobian form scalar multiplication for GM/T 0003-2012 curve SM2 // Input scalar[4], point[12]; output res[12] // // extern void sm2_montjscalarmul // (uint64_t res[static 12], // const uint64_t scalar[static 4], // const uint64_t point[static 12]); // // This function is a variant of its affine point version sm2_scalarmul. // Here, input and output points are assumed to be in Jacobian form with // their coordinates in the Montgomery domain. Thus, if priming indicates // Montgomery form, x' = (2^256 * x) mod p_sm2 etc., each point argument // is a triple (x',y',z') representing the affine point (x/z^2,y/z^3) when // z' is nonzero or the point at infinity (group identity) if z' = 0. // // Given scalar = n and point = P, assumed to be on the NIST elliptic // curve SM2, returns a representation of n * P. If the result is the // point at infinity (either because the input point was or because the // scalar was a multiple of p_sm2) then the output is guaranteed to // represent the point at infinity, i.e. to have its z coordinate zero. // // Standard ARM ABI: X0 = res, X1 = scalar, X2 = point // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjscalarmul) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjscalarmul) S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjscalarmul) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Safe copies of inputs (res lasts the whole code, point not so long) // and additional values in variables, with some aliasing #define res x19 #define sgn x20 #define j x20 #define point x21 // Intermediate variables on the stack. #define scalarb sp, #(0*NUMSIZE) #define acc sp, #(1*NUMSIZE) #define tabent sp, #(4*NUMSIZE) #define tab sp, #(7*NUMSIZE) #define NSPACE 31*NUMSIZE // Avoid using .rep for the sake of the BoringSSL/AWS-LC delocator, // which doesn't accept repetitions, assembler macros etc. #define selectblock(I) \ cmp x14, #(1*I) __LF \ ldp x12, x13, [x15] __LF \ csel x0, x12, x0, eq __LF \ csel x1, x13, x1, eq __LF \ ldp x12, x13, [x15, #16] __LF \ csel x2, x12, x2, eq __LF \ csel x3, x13, x3, eq __LF \ ldp x12, x13, [x15, #32] __LF \ csel x4, x12, x4, eq __LF \ csel x5, x13, x5, eq __LF \ ldp x12, x13, [x15, #48] __LF \ csel x6, x12, x6, eq __LF \ csel x7, x13, x7, eq __LF \ ldp x12, x13, [x15, #64] __LF \ csel x8, x12, x8, eq __LF \ csel x9, x13, x9, eq __LF \ ldp x12, x13, [x15, #80] __LF \ csel x10, x12, x10, eq __LF \ csel x11, x13, x11, eq __LF \ add x15, x15, #96 // Loading large constants #define movbig(nn,n3,n2,n1,n0) \ movz nn, n0 __LF \ movk nn, n1, lsl #16 __LF \ movk nn, n2, lsl #32 __LF \ movk nn, n3, lsl #48 S2N_BN_SYMBOL(sm2_montjscalarmul): CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x30) CFI_DEC_SP(NSPACE) // Preserve the "res" and "point" input arguments. We load and process the // scalar immediately so we don't bother preserving that input argument. // Also, "point" is only needed early on and so its register gets re-used. mov res, x0 mov point, x2 // Load the digits of group order n_sm2 = [x12;x13;x14;x15] movbig(x12, #0x53bb, #0xf409, #0x39d5, #0x4123) movbig(x13, #0x7203, #0xdf6b, #0x21c6, #0x052b) mov x14, #0xffffffffffffffff mov x15, #0xfffffffeffffffff // First, reduce the input scalar mod n_sm2, i.e. conditionally subtract n_sm2 ldp x2, x3, [x1] ldp x4, x5, [x1, #16] subs x6, x2, x12 sbcs x7, x3, x13 sbcs x8, x4, x14 sbcs x9, x5, x15 csel x2, x2, x6, cc csel x3, x3, x7, cc csel x4, x4, x8, cc csel x5, x5, x9, cc // Now if the top bit of the reduced scalar is set, negate it mod n_sm2, // i.e. do n |-> n_sm2 - n. Remember the sign as "sgn" so we can // correspondingly negate the point below. subs x6, x12, x2 sbcs x7, x13, x3 sbcs x8, x14, x4 sbc x9, x15, x5 tst x5, #0x8000000000000000 csel x2, x2, x6, eq csel x3, x3, x7, eq csel x4, x4, x8, eq csel x5, x5, x9, eq cset sgn, ne // In either case then add the recoding constant 0x08888...888 to allow // signed digits. mov x6, 0x8888888888888888 adds x2, x2, x6 adcs x3, x3, x6 bic x7, x6, #0xF000000000000000 adcs x4, x4, x6 adc x5, x5, x7 stp x2, x3, [scalarb] stp x4, x5, [scalarb+16] // Set the tab[0] table entry to the input point = 1 * P, except // that we negate it if the top bit of the scalar was set. This // negation takes care over the y = 0 case to maintain all the // coordinates < p_sm2 throughout, even though triples (x,y,z) // with y = 0 can only represent a point on the curve when z = 0 // and it represents the point at infinity regardless of x and y. ldp x0, x1, [point] stp x0, x1, [tab] ldp x2, x3, [point, #16] stp x2, x3, [tab+16] ldp x4, x5, [point, #32] ldp x6, x7, [point, #48] mov x0, #0xffffffffffffffff subs x0, x0, x4 mov x1, #0xffffffff00000000 sbcs x1, x1, x5 mov x2, #0xffffffffffffffff sbcs x2, x2, x6 mov x3, #0xfffffffeffffffff sbc x3, x3, x7 orr x8, x4, x5 orr x9, x6, x7 orr x8, x8, x9 cmp x8, xzr ccmp sgn, xzr, #4, ne csel x4, x0, x4, ne csel x5, x1, x5, ne csel x6, x2, x6, ne csel x7, x3, x7, ne stp x4, x5, [tab+32] stp x6, x7, [tab+48] ldp x0, x1, [point, #64] stp x0, x1, [tab+64] ldp x2, x3, [point, #80] stp x2, x3, [tab+80] // Compute and record tab[1] = 2 * p, ..., tab[7] = 8 * P add x0, tab+96*1 add x1, tab CFI_BL(Lsm2_montjscalarmul_sm2_montjdouble) add x0, tab+96*2 add x1, tab+96*1 add x2, tab CFI_BL(Lsm2_montjscalarmul_sm2_montjadd) add x0, tab+96*3 add x1, tab+96*1 CFI_BL(Lsm2_montjscalarmul_sm2_montjdouble) add x0, tab+96*4 add x1, tab+96*3 add x2, tab CFI_BL(Lsm2_montjscalarmul_sm2_montjadd) add x0, tab+96*5 add x1, tab+96*2 CFI_BL(Lsm2_montjscalarmul_sm2_montjdouble) add x0, tab+96*6 add x1, tab+96*5 add x2, tab CFI_BL(Lsm2_montjscalarmul_sm2_montjadd) add x0, tab+96*7 add x1, tab+96*3 CFI_BL(Lsm2_montjscalarmul_sm2_montjdouble) // Initialize the accumulator as a table entry for top 4 bits (unrecoded) ldr x14, [scalarb+24] lsr x14, x14, #60 mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr mov x4, xzr mov x5, xzr mov x6, xzr mov x7, xzr mov x8, xzr mov x9, xzr mov x10, xzr mov x11, xzr add x15, tab selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) stp x0, x1, [acc] stp x2, x3, [acc+16] stp x4, x5, [acc+32] stp x6, x7, [acc+48] stp x8, x9, [acc+64] stp x10, x11, [acc+80] mov j, #252 // Main loop over size-4 bitfields: double 4 times then add signed digit Lsm2_montjscalarmul_mainloop: sub j, j, #4 add x0, acc add x1, acc CFI_BL(Lsm2_montjscalarmul_sm2_montjdouble) add x0, acc add x1, acc CFI_BL(Lsm2_montjscalarmul_sm2_montjdouble) add x0, acc add x1, acc CFI_BL(Lsm2_montjscalarmul_sm2_montjdouble) add x0, acc add x1, acc CFI_BL(Lsm2_montjscalarmul_sm2_montjdouble) lsr x2, j, #6 ldr x14, [sp, x2, lsl #3] // Exploits scalarb = sp exactly lsr x14, x14, j and x14, x14, #15 subs x14, x14, #8 cset x16, lo // x16 = sign of digit (1 = negative) cneg x14, x14, lo // x14 = absolute value of digit // Conditionally select the table entry tab[i-1] = i * P in constant time mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr mov x4, xzr mov x5, xzr mov x6, xzr mov x7, xzr mov x8, xzr mov x9, xzr mov x10, xzr mov x11, xzr add x15, tab selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) // Store it to "tabent" with the y coordinate optionally negated // Again, do it carefully to give coordinates < p_sm2 even in // the degenerate case y = 0 (when z = 0 for points on the curve). stp x0, x1, [tabent] stp x2, x3, [tabent+16] mov x0, #0xffffffffffffffff subs x0, x0, x4 mov x1, #0xffffffff00000000 sbcs x1, x1, x5 mov x2, #0xffffffffffffffff sbcs x2, x2, x6 mov x3, #0xfffffffeffffffff sbc x3, x3, x7 orr x12, x4, x5 orr x13, x6, x7 orr x12, x12, x13 cmp x12, xzr ccmp x16, xzr, #4, ne csel x4, x0, x4, ne csel x5, x1, x5, ne csel x6, x2, x6, ne csel x7, x3, x7, ne stp x4, x5, [tabent+32] stp x6, x7, [tabent+48] stp x8, x9, [tabent+64] stp x10, x11, [tabent+80] add x0, acc add x1, acc add x2, tabent CFI_BL(Lsm2_montjscalarmul_sm2_montjadd) cbnz j, Lsm2_montjscalarmul_mainloop // That's the end of the main loop, and we just need to copy the // result in "acc" to the output. ldp x0, x1, [acc] stp x0, x1, [res] ldp x0, x1, [acc+16] stp x0, x1, [res, #16] ldp x0, x1, [acc+32] stp x0, x1, [res, #32] ldp x0, x1, [acc+48] stp x0, x1, [res, #48] ldp x0, x1, [acc+64] stp x0, x1, [res, #64] ldp x0, x1, [acc+80] stp x0, x1, [res, #80] // Restore stack and registers and return CFI_INC_SP(NSPACE) CFI_POP2(x21,x30) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(sm2_montjscalarmul) // Local copies of subroutines, complete clones at the moment S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lsm2_montjscalarmul_sm2_montjadd) Lsm2_montjscalarmul_sm2_montjadd: CFI_START CFI_PUSH2(x19,x20) CFI_DEC_SP(224) mov x17, x0 mov x19, x1 mov x20, x2 ldp x2, x3, [x19, #0x40] ldp x4, x5, [x19, #0x50] umull x15, w2, w2 lsr x11, x2, #32 umull x16, w11, w11 umull x11, w2, w11 adds x15, x15, x11, lsl #33 lsr x11, x11, #31 adc x16, x16, x11 umull x0, w3, w3 lsr x11, x3, #32 umull x1, w11, w11 umull x11, w3, w11 mul x12, x2, x3 umulh x13, x2, x3 adds x0, x0, x11, lsl #33 lsr x11, x11, #31 adc x1, x1, x11 adds x12, x12, x12 adcs x13, x13, x13 adc x1, x1, xzr adds x16, x16, x12 adcs x0, x0, x13 adc x1, x1, xzr lsl x12, x15, #32 lsr x11, x15, #32 subs x14, x12, x15 sbc x13, x11, xzr subs x16, x16, x14 sbcs x0, x0, x13 sbcs x1, x1, x12 sbc x15, x15, x11 lsl x12, x16, #32 lsr x11, x16, #32 subs x14, x12, x16 sbc x13, x11, xzr subs x0, x0, x14 sbcs x1, x1, x13 sbcs x15, x15, x12 sbc x16, x16, x11 mul x6, x2, x4 mul x14, x3, x5 umulh x8, x2, x4 subs x10, x2, x3 cneg x10, x10, lo csetm x13, lo subs x12, x5, x4 cneg x12, x12, lo mul x11, x10, x12 umulh x12, x10, x12 cinv x13, x13, lo eor x11, x11, x13 eor x12, x12, x13 adds x7, x6, x8 adc x8, x8, xzr umulh x9, x3, x5 adds x7, x7, x14 adcs x8, x8, x9 adc x9, x9, xzr adds x8, x8, x14 adc x9, x9, xzr cmn x13, #0x1 adcs x7, x7, x11 adcs x8, x8, x12 adc x9, x9, x13 adds x6, x6, x6 adcs x7, x7, x7 adcs x8, x8, x8 adcs x9, x9, x9 adc x10, xzr, xzr adds x6, x6, x0 adcs x7, x7, x1 adcs x8, x8, x15 adcs x9, x9, x16 adc x10, x10, xzr lsl x12, x6, #32 lsr x11, x6, #32 subs x14, x12, x6 sbc x13, x11, xzr subs x7, x7, x14 sbcs x8, x8, x13 sbcs x9, x9, x12 sbc x14, x6, x11 adds x10, x10, x14 adc x6, xzr, xzr lsl x12, x7, #32 lsr x11, x7, #32 subs x14, x12, x7 sbc x13, x11, xzr subs x8, x8, x14 sbcs x9, x9, x13 sbcs x10, x10, x12 sbc x14, x7, x11 adds x6, x6, x14 adc x7, xzr, xzr mul x11, x4, x4 adds x8, x8, x11 mul x12, x5, x5 umulh x11, x4, x4 adcs x9, x9, x11 adcs x10, x10, x12 umulh x12, x5, x5 adcs x6, x6, x12 adc x7, x7, xzr mul x11, x4, x5 umulh x12, x4, x5 adds x11, x11, x11 adcs x12, x12, x12 adc x13, xzr, xzr adds x9, x9, x11 adcs x10, x10, x12 adcs x6, x6, x13 adcs x7, x7, xzr mov x11, #-0x100000000 adds x5, x8, #0x1 sbcs x11, x9, x11 mov x13, #-0x100000001 adcs x12, x10, xzr sbcs x13, x6, x13 sbcs xzr, x7, xzr csel x8, x5, x8, hs csel x9, x11, x9, hs csel x10, x12, x10, hs csel x6, x13, x6, hs stp x8, x9, [sp] stp x10, x6, [sp, #0x10] ldp x2, x3, [x20, #0x40] ldp x4, x5, [x20, #0x50] umull x15, w2, w2 lsr x11, x2, #32 umull x16, w11, w11 umull x11, w2, w11 adds x15, x15, x11, lsl #33 lsr x11, x11, #31 adc x16, x16, x11 umull x0, w3, w3 lsr x11, x3, #32 umull x1, w11, w11 umull x11, w3, w11 mul x12, x2, x3 umulh x13, x2, x3 adds x0, x0, x11, lsl #33 lsr x11, x11, #31 adc x1, x1, x11 adds x12, x12, x12 adcs x13, x13, x13 adc x1, x1, xzr adds x16, x16, x12 adcs x0, x0, x13 adc x1, x1, xzr lsl x12, x15, #32 lsr x11, x15, #32 subs x14, x12, x15 sbc x13, x11, xzr subs x16, x16, x14 sbcs x0, x0, x13 sbcs x1, x1, x12 sbc x15, x15, x11 lsl x12, x16, #32 lsr x11, x16, #32 subs x14, x12, x16 sbc x13, x11, xzr subs x0, x0, x14 sbcs x1, x1, x13 sbcs x15, x15, x12 sbc x16, x16, x11 mul x6, x2, x4 mul x14, x3, x5 umulh x8, x2, x4 subs x10, x2, x3 cneg x10, x10, lo csetm x13, lo subs x12, x5, x4 cneg x12, x12, lo mul x11, x10, x12 umulh x12, x10, x12 cinv x13, x13, lo eor x11, x11, x13 eor x12, x12, x13 adds x7, x6, x8 adc x8, x8, xzr umulh x9, x3, x5 adds x7, x7, x14 adcs x8, x8, x9 adc x9, x9, xzr adds x8, x8, x14 adc x9, x9, xzr cmn x13, #0x1 adcs x7, x7, x11 adcs x8, x8, x12 adc x9, x9, x13 adds x6, x6, x6 adcs x7, x7, x7 adcs x8, x8, x8 adcs x9, x9, x9 adc x10, xzr, xzr adds x6, x6, x0 adcs x7, x7, x1 adcs x8, x8, x15 adcs x9, x9, x16 adc x10, x10, xzr lsl x12, x6, #32 lsr x11, x6, #32 subs x14, x12, x6 sbc x13, x11, xzr subs x7, x7, x14 sbcs x8, x8, x13 sbcs x9, x9, x12 sbc x14, x6, x11 adds x10, x10, x14 adc x6, xzr, xzr lsl x12, x7, #32 lsr x11, x7, #32 subs x14, x12, x7 sbc x13, x11, xzr subs x8, x8, x14 sbcs x9, x9, x13 sbcs x10, x10, x12 sbc x14, x7, x11 adds x6, x6, x14 adc x7, xzr, xzr mul x11, x4, x4 adds x8, x8, x11 mul x12, x5, x5 umulh x11, x4, x4 adcs x9, x9, x11 adcs x10, x10, x12 umulh x12, x5, x5 adcs x6, x6, x12 adc x7, x7, xzr mul x11, x4, x5 umulh x12, x4, x5 adds x11, x11, x11 adcs x12, x12, x12 adc x13, xzr, xzr adds x9, x9, x11 adcs x10, x10, x12 adcs x6, x6, x13 adcs x7, x7, xzr mov x11, #-0x100000000 adds x5, x8, #0x1 sbcs x11, x9, x11 mov x13, #-0x100000001 adcs x12, x10, xzr sbcs x13, x6, x13 sbcs xzr, x7, xzr csel x8, x5, x8, hs csel x9, x11, x9, hs csel x10, x12, x10, hs csel x6, x13, x6, hs stp x8, x9, [sp, #0xa0] stp x10, x6, [sp, #0xb0] ldp x3, x4, [x20, #0x40] ldp x5, x6, [x20, #0x50] ldp x7, x8, [x19, #0x20] ldp x9, x10, [x19, #0x30] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0xc0] stp x11, x12, [sp, #0xd0] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x0, x10, x9 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0xc0] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0xd0] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x3, x0 umulh x0, x3, x0 cinv x4, x4, lo eor x16, x16, x4 eor x0, x0, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0xc0] stp x3, x4, [sp, #0xd0] ldp x3, x4, [x19, #0x40] ldp x5, x6, [x19, #0x50] ldp x7, x8, [x20, #0x20] ldp x9, x10, [x20, #0x30] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0x20] stp x11, x12, [sp, #0x30] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x0, x10, x9 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0x20] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0x30] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x3, x0 umulh x0, x3, x0 cinv x4, x4, lo eor x16, x16, x4 eor x0, x0, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0x20] stp x3, x4, [sp, #0x30] ldp x3, x4, [sp] ldp x5, x6, [sp, #0x10] ldp x7, x8, [x20] ldp x9, x10, [x20, #0x10] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0x40] stp x11, x12, [sp, #0x50] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x0, x10, x9 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0x40] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0x50] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x3, x0 umulh x0, x3, x0 cinv x4, x4, lo eor x16, x16, x4 eor x0, x0, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0x40] stp x3, x4, [sp, #0x50] ldp x3, x4, [sp, #0xa0] ldp x5, x6, [sp, #0xb0] ldp x7, x8, [x19] ldp x9, x10, [x19, #0x10] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0x80] stp x11, x12, [sp, #0x90] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x0, x10, x9 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0x80] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0x90] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x3, x0 umulh x0, x3, x0 cinv x4, x4, lo eor x16, x16, x4 eor x0, x0, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0x80] stp x3, x4, [sp, #0x90] ldp x3, x4, [sp] ldp x5, x6, [sp, #0x10] ldp x7, x8, [sp, #0x20] ldp x9, x10, [sp, #0x30] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0x20] stp x11, x12, [sp, #0x30] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x0, x10, x9 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0x20] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0x30] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x3, x0 umulh x0, x3, x0 cinv x4, x4, lo eor x16, x16, x4 eor x0, x0, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0x20] stp x3, x4, [sp, #0x30] ldp x3, x4, [sp, #0xa0] ldp x5, x6, [sp, #0xb0] ldp x7, x8, [sp, #0xc0] ldp x9, x10, [sp, #0xd0] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0xc0] stp x11, x12, [sp, #0xd0] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x0, x10, x9 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0xc0] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0xd0] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x3, x0 umulh x0, x3, x0 cinv x4, x4, lo eor x16, x16, x4 eor x0, x0, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0xc0] stp x3, x4, [sp, #0xd0] ldp x5, x6, [sp, #0x40] ldp x4, x3, [sp, #0x80] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x50] ldp x4, x3, [sp, #0x90] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp, #0xa0] stp x7, x8, [sp, #0xb0] ldp x5, x6, [sp, #0x20] ldp x4, x3, [sp, #0xc0] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x30] ldp x4, x3, [sp, #0xd0] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp, #0x20] stp x7, x8, [sp, #0x30] ldp x2, x3, [sp, #0xa0] ldp x4, x5, [sp, #0xb0] umull x15, w2, w2 lsr x11, x2, #32 umull x16, w11, w11 umull x11, w2, w11 adds x15, x15, x11, lsl #33 lsr x11, x11, #31 adc x16, x16, x11 umull x0, w3, w3 lsr x11, x3, #32 umull x1, w11, w11 umull x11, w3, w11 mul x12, x2, x3 umulh x13, x2, x3 adds x0, x0, x11, lsl #33 lsr x11, x11, #31 adc x1, x1, x11 adds x12, x12, x12 adcs x13, x13, x13 adc x1, x1, xzr adds x16, x16, x12 adcs x0, x0, x13 adc x1, x1, xzr lsl x12, x15, #32 lsr x11, x15, #32 subs x14, x12, x15 sbc x13, x11, xzr subs x16, x16, x14 sbcs x0, x0, x13 sbcs x1, x1, x12 sbc x15, x15, x11 lsl x12, x16, #32 lsr x11, x16, #32 subs x14, x12, x16 sbc x13, x11, xzr subs x0, x0, x14 sbcs x1, x1, x13 sbcs x15, x15, x12 sbc x16, x16, x11 mul x6, x2, x4 mul x14, x3, x5 umulh x8, x2, x4 subs x10, x2, x3 cneg x10, x10, lo csetm x13, lo subs x12, x5, x4 cneg x12, x12, lo mul x11, x10, x12 umulh x12, x10, x12 cinv x13, x13, lo eor x11, x11, x13 eor x12, x12, x13 adds x7, x6, x8 adc x8, x8, xzr umulh x9, x3, x5 adds x7, x7, x14 adcs x8, x8, x9 adc x9, x9, xzr adds x8, x8, x14 adc x9, x9, xzr cmn x13, #0x1 adcs x7, x7, x11 adcs x8, x8, x12 adc x9, x9, x13 adds x6, x6, x6 adcs x7, x7, x7 adcs x8, x8, x8 adcs x9, x9, x9 adc x10, xzr, xzr adds x6, x6, x0 adcs x7, x7, x1 adcs x8, x8, x15 adcs x9, x9, x16 adc x10, x10, xzr lsl x12, x6, #32 lsr x11, x6, #32 subs x14, x12, x6 sbc x13, x11, xzr subs x7, x7, x14 sbcs x8, x8, x13 sbcs x9, x9, x12 sbc x14, x6, x11 adds x10, x10, x14 adc x6, xzr, xzr lsl x12, x7, #32 lsr x11, x7, #32 subs x14, x12, x7 sbc x13, x11, xzr subs x8, x8, x14 sbcs x9, x9, x13 sbcs x10, x10, x12 sbc x14, x7, x11 adds x6, x6, x14 adc x7, xzr, xzr mul x11, x4, x4 adds x8, x8, x11 mul x12, x5, x5 umulh x11, x4, x4 adcs x9, x9, x11 adcs x10, x10, x12 umulh x12, x5, x5 adcs x6, x6, x12 adc x7, x7, xzr mul x11, x4, x5 umulh x12, x4, x5 adds x11, x11, x11 adcs x12, x12, x12 adc x13, xzr, xzr adds x9, x9, x11 adcs x10, x10, x12 adcs x6, x6, x13 adcs x7, x7, xzr mov x11, #-0x100000000 adds x5, x8, #0x1 sbcs x11, x9, x11 mov x13, #-0x100000001 adcs x12, x10, xzr sbcs x13, x6, x13 sbcs xzr, x7, xzr csel x8, x5, x8, hs csel x9, x11, x9, hs csel x10, x12, x10, hs csel x6, x13, x6, hs stp x8, x9, [sp, #0x60] stp x10, x6, [sp, #0x70] ldp x2, x3, [sp, #0x20] ldp x4, x5, [sp, #0x30] umull x15, w2, w2 lsr x11, x2, #32 umull x16, w11, w11 umull x11, w2, w11 adds x15, x15, x11, lsl #33 lsr x11, x11, #31 adc x16, x16, x11 umull x0, w3, w3 lsr x11, x3, #32 umull x1, w11, w11 umull x11, w3, w11 mul x12, x2, x3 umulh x13, x2, x3 adds x0, x0, x11, lsl #33 lsr x11, x11, #31 adc x1, x1, x11 adds x12, x12, x12 adcs x13, x13, x13 adc x1, x1, xzr adds x16, x16, x12 adcs x0, x0, x13 adc x1, x1, xzr lsl x12, x15, #32 lsr x11, x15, #32 subs x14, x12, x15 sbc x13, x11, xzr subs x16, x16, x14 sbcs x0, x0, x13 sbcs x1, x1, x12 sbc x15, x15, x11 lsl x12, x16, #32 lsr x11, x16, #32 subs x14, x12, x16 sbc x13, x11, xzr subs x0, x0, x14 sbcs x1, x1, x13 sbcs x15, x15, x12 sbc x16, x16, x11 mul x6, x2, x4 mul x14, x3, x5 umulh x8, x2, x4 subs x10, x2, x3 cneg x10, x10, lo csetm x13, lo subs x12, x5, x4 cneg x12, x12, lo mul x11, x10, x12 umulh x12, x10, x12 cinv x13, x13, lo eor x11, x11, x13 eor x12, x12, x13 adds x7, x6, x8 adc x8, x8, xzr umulh x9, x3, x5 adds x7, x7, x14 adcs x8, x8, x9 adc x9, x9, xzr adds x8, x8, x14 adc x9, x9, xzr cmn x13, #0x1 adcs x7, x7, x11 adcs x8, x8, x12 adc x9, x9, x13 adds x6, x6, x6 adcs x7, x7, x7 adcs x8, x8, x8 adcs x9, x9, x9 adc x10, xzr, xzr adds x6, x6, x0 adcs x7, x7, x1 adcs x8, x8, x15 adcs x9, x9, x16 adc x10, x10, xzr lsl x12, x6, #32 lsr x11, x6, #32 subs x14, x12, x6 sbc x13, x11, xzr subs x7, x7, x14 sbcs x8, x8, x13 sbcs x9, x9, x12 sbc x14, x6, x11 adds x10, x10, x14 adc x6, xzr, xzr lsl x12, x7, #32 lsr x11, x7, #32 subs x14, x12, x7 sbc x13, x11, xzr subs x8, x8, x14 sbcs x9, x9, x13 sbcs x10, x10, x12 sbc x14, x7, x11 adds x6, x6, x14 adc x7, xzr, xzr mul x11, x4, x4 adds x8, x8, x11 mul x12, x5, x5 umulh x11, x4, x4 adcs x9, x9, x11 adcs x10, x10, x12 umulh x12, x5, x5 adcs x6, x6, x12 adc x7, x7, xzr mul x11, x4, x5 umulh x12, x4, x5 adds x11, x11, x11 adcs x12, x12, x12 adc x13, xzr, xzr adds x9, x9, x11 adcs x10, x10, x12 adcs x6, x6, x13 adcs x7, x7, xzr mov x11, #-0x100000000 adds x5, x8, #0x1 sbcs x11, x9, x11 mov x13, #-0x100000001 adcs x12, x10, xzr sbcs x13, x6, x13 sbcs xzr, x7, xzr csel x8, x5, x8, hs csel x9, x11, x9, hs csel x10, x12, x10, hs csel x6, x13, x6, hs stp x8, x9, [sp] stp x10, x6, [sp, #0x10] ldp x3, x4, [sp, #0x60] ldp x5, x6, [sp, #0x70] ldp x7, x8, [sp, #0x80] ldp x9, x10, [sp, #0x90] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0x80] stp x11, x12, [sp, #0x90] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x0, x10, x9 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0x80] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0x90] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x3, x0 umulh x0, x3, x0 cinv x4, x4, lo eor x16, x16, x4 eor x0, x0, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0x80] stp x3, x4, [sp, #0x90] ldp x3, x4, [sp, #0x60] ldp x5, x6, [sp, #0x70] ldp x7, x8, [sp, #0x40] ldp x9, x10, [sp, #0x50] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0x40] stp x11, x12, [sp, #0x50] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x0, x10, x9 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0x40] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0x50] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x3, x0 umulh x0, x3, x0 cinv x4, x4, lo eor x16, x16, x4 eor x0, x0, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0x40] stp x3, x4, [sp, #0x50] ldp x5, x6, [sp] ldp x4, x3, [sp, #0x80] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x10] ldp x4, x3, [sp, #0x90] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp] stp x7, x8, [sp, #0x10] ldp x5, x6, [sp, #0x40] ldp x4, x3, [sp, #0x80] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x50] ldp x4, x3, [sp, #0x90] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp, #0x60] stp x7, x8, [sp, #0x70] ldp x3, x4, [sp, #0xa0] ldp x5, x6, [sp, #0xb0] ldp x7, x8, [x19, #0x40] ldp x9, x10, [x19, #0x50] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0xa0] stp x11, x12, [sp, #0xb0] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x0, x10, x9 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0xa0] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0xb0] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x3, x0 umulh x0, x3, x0 cinv x4, x4, lo eor x16, x16, x4 eor x0, x0, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0xa0] stp x3, x4, [sp, #0xb0] ldp x5, x6, [sp] ldp x4, x3, [sp, #0x40] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x10] ldp x4, x3, [sp, #0x50] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp] stp x7, x8, [sp, #0x10] ldp x5, x6, [sp, #0x80] ldp x4, x3, [sp] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x90] ldp x4, x3, [sp, #0x10] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp, #0x80] stp x7, x8, [sp, #0x90] ldp x3, x4, [sp, #0x60] ldp x5, x6, [sp, #0x70] ldp x7, x8, [sp, #0xc0] ldp x9, x10, [sp, #0xd0] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0x60] stp x11, x12, [sp, #0x70] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x0, x10, x9 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0x60] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0x70] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x3, x0 umulh x0, x3, x0 cinv x4, x4, lo eor x16, x16, x4 eor x0, x0, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0x60] stp x3, x4, [sp, #0x70] ldp x3, x4, [sp, #0xa0] ldp x5, x6, [sp, #0xb0] ldp x7, x8, [x20, #0x40] ldp x9, x10, [x20, #0x50] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0xa0] stp x11, x12, [sp, #0xb0] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x0, x10, x9 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0xa0] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0xb0] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x3, x0 umulh x0, x3, x0 cinv x4, x4, lo eor x16, x16, x4 eor x0, x0, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0xa0] stp x3, x4, [sp, #0xb0] ldp x3, x4, [sp, #0x20] ldp x5, x6, [sp, #0x30] ldp x7, x8, [sp, #0x80] ldp x9, x10, [sp, #0x90] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0x80] stp x11, x12, [sp, #0x90] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x0, x10, x9 cneg x0, x0, lo mul x16, x15, x0 umulh x0, x15, x0 cinv x1, x1, lo eor x16, x16, x1 eor x0, x0, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0x80] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0x90] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x0, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x0 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x0, x8, x7 cneg x0, x0, lo mul x16, x3, x0 umulh x0, x3, x0 cinv x4, x4, lo eor x16, x16, x4 eor x0, x0, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x0 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x0, x15, xzr subs x12, x12, x1 sbcs x13, x13, x0 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x0, x15, xzr subs x13, x13, x1 sbcs x14, x14, x0 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0x80] stp x3, x4, [sp, #0x90] ldp x5, x6, [sp, #0x80] ldp x4, x3, [sp, #0x60] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x90] ldp x4, x3, [sp, #0x70] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp, #0x80] stp x7, x8, [sp, #0x90] ldp x0, x1, [x19, #0x40] ldp x2, x3, [x19, #0x50] orr x12, x0, x1 orr x13, x2, x3 orr x12, x12, x13 cmp x12, xzr cset x12, ne ldp x4, x5, [x20, #0x40] ldp x6, x7, [x20, #0x50] orr x13, x4, x5 orr x14, x6, x7 orr x13, x13, x14 cmp x13, xzr cset x13, ne cmp x13, x12 ldp x8, x9, [sp, #0xa0] csel x8, x0, x8, lo csel x9, x1, x9, lo csel x8, x4, x8, hi csel x9, x5, x9, hi ldp x10, x11, [sp, #0xb0] csel x10, x2, x10, lo csel x11, x3, x11, lo csel x10, x6, x10, hi csel x11, x7, x11, hi ldp x12, x13, [x19] ldp x0, x1, [sp] csel x0, x12, x0, lo csel x1, x13, x1, lo ldp x12, x13, [x20] csel x0, x12, x0, hi csel x1, x13, x1, hi ldp x12, x13, [x19, #0x10] ldp x2, x3, [sp, #0x10] csel x2, x12, x2, lo csel x3, x13, x3, lo ldp x12, x13, [x20, #0x10] csel x2, x12, x2, hi csel x3, x13, x3, hi ldp x12, x13, [x19, #0x20] ldp x4, x5, [sp, #0x80] csel x4, x12, x4, lo csel x5, x13, x5, lo ldp x12, x13, [x20, #0x20] csel x4, x12, x4, hi csel x5, x13, x5, hi ldp x12, x13, [x19, #0x30] ldp x6, x7, [sp, #0x90] csel x6, x12, x6, lo csel x7, x13, x7, lo ldp x12, x13, [x20, #0x30] csel x6, x12, x6, hi csel x7, x13, x7, hi stp x0, x1, [x17] stp x2, x3, [x17, #0x10] stp x4, x5, [x17, #0x20] stp x6, x7, [x17, #0x30] stp x8, x9, [x17, #0x40] stp x10, x11, [x17, #0x50] CFI_INC_SP(224) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lsm2_montjscalarmul_sm2_montjadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lsm2_montjscalarmul_sm2_montjdouble) Lsm2_montjscalarmul_sm2_montjdouble: CFI_START CFI_DEC_SP(208) stp x19, x20, [sp, #0xc0] mov x19, x0 mov x20, x1 ldp x2, x3, [x20, #0x40] ldp x4, x5, [x20, #0x50] umull x15, w2, w2 lsr x11, x2, #32 umull x16, w11, w11 umull x11, w2, w11 adds x15, x15, x11, lsl #33 lsr x11, x11, #31 adc x16, x16, x11 umull x17, w3, w3 lsr x11, x3, #32 umull x1, w11, w11 umull x11, w3, w11 mul x12, x2, x3 umulh x13, x2, x3 adds x17, x17, x11, lsl #33 lsr x11, x11, #31 adc x1, x1, x11 adds x12, x12, x12 adcs x13, x13, x13 adc x1, x1, xzr adds x16, x16, x12 adcs x17, x17, x13 adc x1, x1, xzr lsl x12, x15, #32 lsr x11, x15, #32 subs x14, x12, x15 sbc x13, x11, xzr subs x16, x16, x14 sbcs x17, x17, x13 sbcs x1, x1, x12 sbc x15, x15, x11 lsl x12, x16, #32 lsr x11, x16, #32 subs x14, x12, x16 sbc x13, x11, xzr subs x17, x17, x14 sbcs x1, x1, x13 sbcs x15, x15, x12 sbc x16, x16, x11 mul x6, x2, x4 mul x14, x3, x5 umulh x8, x2, x4 subs x10, x2, x3 cneg x10, x10, lo csetm x13, lo subs x12, x5, x4 cneg x12, x12, lo mul x11, x10, x12 umulh x12, x10, x12 cinv x13, x13, lo eor x11, x11, x13 eor x12, x12, x13 adds x7, x6, x8 adc x8, x8, xzr umulh x9, x3, x5 adds x7, x7, x14 adcs x8, x8, x9 adc x9, x9, xzr adds x8, x8, x14 adc x9, x9, xzr cmn x13, #0x1 adcs x7, x7, x11 adcs x8, x8, x12 adc x9, x9, x13 adds x6, x6, x6 adcs x7, x7, x7 adcs x8, x8, x8 adcs x9, x9, x9 adc x10, xzr, xzr adds x6, x6, x17 adcs x7, x7, x1 adcs x8, x8, x15 adcs x9, x9, x16 adc x10, x10, xzr lsl x12, x6, #32 lsr x11, x6, #32 subs x14, x12, x6 sbc x13, x11, xzr subs x7, x7, x14 sbcs x8, x8, x13 sbcs x9, x9, x12 sbc x14, x6, x11 adds x10, x10, x14 adc x6, xzr, xzr lsl x12, x7, #32 lsr x11, x7, #32 subs x14, x12, x7 sbc x13, x11, xzr subs x8, x8, x14 sbcs x9, x9, x13 sbcs x10, x10, x12 sbc x14, x7, x11 adds x6, x6, x14 adc x7, xzr, xzr mul x11, x4, x4 adds x8, x8, x11 mul x12, x5, x5 umulh x11, x4, x4 adcs x9, x9, x11 adcs x10, x10, x12 umulh x12, x5, x5 adcs x6, x6, x12 adc x7, x7, xzr mul x11, x4, x5 umulh x12, x4, x5 adds x11, x11, x11 adcs x12, x12, x12 adc x13, xzr, xzr adds x9, x9, x11 adcs x10, x10, x12 adcs x6, x6, x13 adcs x7, x7, xzr mov x11, #-0x100000000 adds x5, x8, #0x1 sbcs x11, x9, x11 mov x13, #-0x100000001 adcs x12, x10, xzr sbcs x13, x6, x13 sbcs xzr, x7, xzr csel x8, x5, x8, hs csel x9, x11, x9, hs csel x10, x12, x10, hs csel x6, x13, x6, hs stp x8, x9, [sp] stp x10, x6, [sp, #0x10] ldp x2, x3, [x20, #0x20] ldp x4, x5, [x20, #0x30] umull x15, w2, w2 lsr x11, x2, #32 umull x16, w11, w11 umull x11, w2, w11 adds x15, x15, x11, lsl #33 lsr x11, x11, #31 adc x16, x16, x11 umull x17, w3, w3 lsr x11, x3, #32 umull x1, w11, w11 umull x11, w3, w11 mul x12, x2, x3 umulh x13, x2, x3 adds x17, x17, x11, lsl #33 lsr x11, x11, #31 adc x1, x1, x11 adds x12, x12, x12 adcs x13, x13, x13 adc x1, x1, xzr adds x16, x16, x12 adcs x17, x17, x13 adc x1, x1, xzr lsl x12, x15, #32 lsr x11, x15, #32 subs x14, x12, x15 sbc x13, x11, xzr subs x16, x16, x14 sbcs x17, x17, x13 sbcs x1, x1, x12 sbc x15, x15, x11 lsl x12, x16, #32 lsr x11, x16, #32 subs x14, x12, x16 sbc x13, x11, xzr subs x17, x17, x14 sbcs x1, x1, x13 sbcs x15, x15, x12 sbc x16, x16, x11 mul x6, x2, x4 mul x14, x3, x5 umulh x8, x2, x4 subs x10, x2, x3 cneg x10, x10, lo csetm x13, lo subs x12, x5, x4 cneg x12, x12, lo mul x11, x10, x12 umulh x12, x10, x12 cinv x13, x13, lo eor x11, x11, x13 eor x12, x12, x13 adds x7, x6, x8 adc x8, x8, xzr umulh x9, x3, x5 adds x7, x7, x14 adcs x8, x8, x9 adc x9, x9, xzr adds x8, x8, x14 adc x9, x9, xzr cmn x13, #0x1 adcs x7, x7, x11 adcs x8, x8, x12 adc x9, x9, x13 adds x6, x6, x6 adcs x7, x7, x7 adcs x8, x8, x8 adcs x9, x9, x9 adc x10, xzr, xzr adds x6, x6, x17 adcs x7, x7, x1 adcs x8, x8, x15 adcs x9, x9, x16 adc x10, x10, xzr lsl x12, x6, #32 lsr x11, x6, #32 subs x14, x12, x6 sbc x13, x11, xzr subs x7, x7, x14 sbcs x8, x8, x13 sbcs x9, x9, x12 sbc x14, x6, x11 adds x10, x10, x14 adc x6, xzr, xzr lsl x12, x7, #32 lsr x11, x7, #32 subs x14, x12, x7 sbc x13, x11, xzr subs x8, x8, x14 sbcs x9, x9, x13 sbcs x10, x10, x12 sbc x14, x7, x11 adds x6, x6, x14 adc x7, xzr, xzr mul x11, x4, x4 adds x8, x8, x11 mul x12, x5, x5 umulh x11, x4, x4 adcs x9, x9, x11 adcs x10, x10, x12 umulh x12, x5, x5 adcs x6, x6, x12 adc x7, x7, xzr mul x11, x4, x5 umulh x12, x4, x5 adds x11, x11, x11 adcs x12, x12, x12 adc x13, xzr, xzr adds x9, x9, x11 adcs x10, x10, x12 adcs x6, x6, x13 adcs x7, x7, xzr mov x11, #-0x100000000 adds x5, x8, #0x1 sbcs x11, x9, x11 mov x13, #-0x100000001 adcs x12, x10, xzr sbcs x13, x6, x13 sbcs xzr, x7, xzr csel x8, x5, x8, hs csel x9, x11, x9, hs csel x10, x12, x10, hs csel x6, x13, x6, hs stp x8, x9, [sp, #0x20] stp x10, x6, [sp, #0x30] ldp x5, x6, [x20] ldp x4, x3, [sp] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x20, #0x10] ldp x4, x3, [sp, #0x10] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp, #0x60] stp x7, x8, [sp, #0x70] ldp x4, x5, [x20] ldp x8, x9, [sp] adds x4, x4, x8 adcs x5, x5, x9 ldp x6, x7, [x20, #0x10] ldp x10, x11, [sp, #0x10] adcs x6, x6, x10 adcs x7, x7, x11 csetm x2, hs subs x4, x4, x2 and x3, x2, #0xffffffff00000000 sbcs x5, x5, x3 and x1, x2, #0xfffffffeffffffff sbcs x6, x6, x2 sbc x7, x7, x1 stp x4, x5, [sp, #0x40] stp x6, x7, [sp, #0x50] ldp x3, x4, [sp, #0x40] ldp x5, x6, [sp, #0x50] ldp x7, x8, [sp, #0x60] ldp x9, x10, [sp, #0x70] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x17, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x17 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x17, x8, x7 cneg x17, x17, lo mul x16, x15, x17 umulh x17, x15, x17 cinv x1, x1, lo eor x16, x16, x1 eor x17, x17, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x17 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x17, x15, xzr subs x12, x12, x1 sbcs x13, x13, x17 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x17, x15, xzr subs x13, x13, x1 sbcs x14, x14, x17 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0x60] stp x11, x12, [sp, #0x70] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x17, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x17 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x17, x10, x9 cneg x17, x17, lo mul x16, x15, x17 umulh x17, x15, x17 cinv x1, x1, lo eor x16, x16, x1 eor x17, x17, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x17 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0x60] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0x70] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x17, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x17 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x17, x8, x7 cneg x17, x17, lo mul x16, x3, x17 umulh x17, x3, x17 cinv x4, x4, lo eor x16, x16, x4 eor x17, x17, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x17 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x17, x15, xzr subs x12, x12, x1 sbcs x13, x13, x17 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x17, x15, xzr subs x13, x13, x1 sbcs x14, x14, x17 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0x60] stp x3, x4, [sp, #0x70] ldp x4, x5, [x20, #0x20] ldp x8, x9, [x20, #0x40] adds x4, x4, x8 adcs x5, x5, x9 ldp x6, x7, [x20, #0x30] ldp x10, x11, [x20, #0x50] adcs x6, x6, x10 adcs x7, x7, x11 adc x3, xzr, xzr adds x8, x4, #0x1 mov x9, #-0x100000000 sbcs x9, x5, x9 adcs x10, x6, xzr mov x11, #-0x100000001 sbcs x11, x7, x11 sbcs x3, x3, xzr csel x4, x4, x8, lo csel x5, x5, x9, lo csel x6, x6, x10, lo csel x7, x7, x11, lo stp x4, x5, [sp, #0x40] stp x6, x7, [sp, #0x50] ldp x3, x4, [x20] ldp x5, x6, [x20, #0x10] ldp x7, x8, [sp, #0x20] ldp x9, x10, [sp, #0x30] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x17, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x17 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x17, x8, x7 cneg x17, x17, lo mul x16, x15, x17 umulh x17, x15, x17 cinv x1, x1, lo eor x16, x16, x1 eor x17, x17, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x17 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x17, x15, xzr subs x12, x12, x1 sbcs x13, x13, x17 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x17, x15, xzr subs x13, x13, x1 sbcs x14, x14, x17 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0x80] stp x11, x12, [sp, #0x90] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x17, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x17 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x17, x10, x9 cneg x17, x17, lo mul x16, x15, x17 umulh x17, x15, x17 cinv x1, x1, lo eor x16, x16, x1 eor x17, x17, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x17 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0x80] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0x90] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x17, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x17 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x17, x8, x7 cneg x17, x17, lo mul x16, x3, x17 umulh x17, x3, x17 cinv x4, x4, lo eor x16, x16, x4 eor x17, x17, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x17 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x17, x15, xzr subs x12, x12, x1 sbcs x13, x13, x17 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x17, x15, xzr subs x13, x13, x1 sbcs x14, x14, x17 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0x80] stp x3, x4, [sp, #0x90] ldp x2, x3, [sp, #0x60] ldp x4, x5, [sp, #0x70] umull x15, w2, w2 lsr x11, x2, #32 umull x16, w11, w11 umull x11, w2, w11 adds x15, x15, x11, lsl #33 lsr x11, x11, #31 adc x16, x16, x11 umull x17, w3, w3 lsr x11, x3, #32 umull x1, w11, w11 umull x11, w3, w11 mul x12, x2, x3 umulh x13, x2, x3 adds x17, x17, x11, lsl #33 lsr x11, x11, #31 adc x1, x1, x11 adds x12, x12, x12 adcs x13, x13, x13 adc x1, x1, xzr adds x16, x16, x12 adcs x17, x17, x13 adc x1, x1, xzr lsl x12, x15, #32 lsr x11, x15, #32 subs x14, x12, x15 sbc x13, x11, xzr subs x16, x16, x14 sbcs x17, x17, x13 sbcs x1, x1, x12 sbc x15, x15, x11 lsl x12, x16, #32 lsr x11, x16, #32 subs x14, x12, x16 sbc x13, x11, xzr subs x17, x17, x14 sbcs x1, x1, x13 sbcs x15, x15, x12 sbc x16, x16, x11 mul x6, x2, x4 mul x14, x3, x5 umulh x8, x2, x4 subs x10, x2, x3 cneg x10, x10, lo csetm x13, lo subs x12, x5, x4 cneg x12, x12, lo mul x11, x10, x12 umulh x12, x10, x12 cinv x13, x13, lo eor x11, x11, x13 eor x12, x12, x13 adds x7, x6, x8 adc x8, x8, xzr umulh x9, x3, x5 adds x7, x7, x14 adcs x8, x8, x9 adc x9, x9, xzr adds x8, x8, x14 adc x9, x9, xzr cmn x13, #0x1 adcs x7, x7, x11 adcs x8, x8, x12 adc x9, x9, x13 adds x6, x6, x6 adcs x7, x7, x7 adcs x8, x8, x8 adcs x9, x9, x9 adc x10, xzr, xzr adds x6, x6, x17 adcs x7, x7, x1 adcs x8, x8, x15 adcs x9, x9, x16 adc x10, x10, xzr lsl x12, x6, #32 lsr x11, x6, #32 subs x14, x12, x6 sbc x13, x11, xzr subs x7, x7, x14 sbcs x8, x8, x13 sbcs x9, x9, x12 sbc x14, x6, x11 adds x10, x10, x14 adc x6, xzr, xzr lsl x12, x7, #32 lsr x11, x7, #32 subs x14, x12, x7 sbc x13, x11, xzr subs x8, x8, x14 sbcs x9, x9, x13 sbcs x10, x10, x12 sbc x14, x7, x11 adds x6, x6, x14 adc x7, xzr, xzr mul x11, x4, x4 adds x8, x8, x11 mul x12, x5, x5 umulh x11, x4, x4 adcs x9, x9, x11 adcs x10, x10, x12 umulh x12, x5, x5 adcs x6, x6, x12 adc x7, x7, xzr mul x11, x4, x5 umulh x12, x4, x5 adds x11, x11, x11 adcs x12, x12, x12 adc x13, xzr, xzr adds x9, x9, x11 adcs x10, x10, x12 adcs x6, x6, x13 adcs x7, x7, xzr mov x11, #-0x100000000 adds x5, x8, #0x1 sbcs x11, x9, x11 mov x13, #-0x100000001 adcs x12, x10, xzr sbcs x13, x6, x13 sbcs xzr, x7, xzr csel x8, x5, x8, hs csel x9, x11, x9, hs csel x10, x12, x10, hs csel x6, x13, x6, hs stp x8, x9, [sp, #0xa0] stp x10, x6, [sp, #0xb0] ldp x2, x3, [sp, #0x40] ldp x4, x5, [sp, #0x50] umull x15, w2, w2 lsr x11, x2, #32 umull x16, w11, w11 umull x11, w2, w11 adds x15, x15, x11, lsl #33 lsr x11, x11, #31 adc x16, x16, x11 umull x17, w3, w3 lsr x11, x3, #32 umull x1, w11, w11 umull x11, w3, w11 mul x12, x2, x3 umulh x13, x2, x3 adds x17, x17, x11, lsl #33 lsr x11, x11, #31 adc x1, x1, x11 adds x12, x12, x12 adcs x13, x13, x13 adc x1, x1, xzr adds x16, x16, x12 adcs x17, x17, x13 adc x1, x1, xzr lsl x12, x15, #32 lsr x11, x15, #32 subs x14, x12, x15 sbc x13, x11, xzr subs x16, x16, x14 sbcs x17, x17, x13 sbcs x1, x1, x12 sbc x15, x15, x11 lsl x12, x16, #32 lsr x11, x16, #32 subs x14, x12, x16 sbc x13, x11, xzr subs x17, x17, x14 sbcs x1, x1, x13 sbcs x15, x15, x12 sbc x16, x16, x11 mul x6, x2, x4 mul x14, x3, x5 umulh x8, x2, x4 subs x10, x2, x3 cneg x10, x10, lo csetm x13, lo subs x12, x5, x4 cneg x12, x12, lo mul x11, x10, x12 umulh x12, x10, x12 cinv x13, x13, lo eor x11, x11, x13 eor x12, x12, x13 adds x7, x6, x8 adc x8, x8, xzr umulh x9, x3, x5 adds x7, x7, x14 adcs x8, x8, x9 adc x9, x9, xzr adds x8, x8, x14 adc x9, x9, xzr cmn x13, #0x1 adcs x7, x7, x11 adcs x8, x8, x12 adc x9, x9, x13 adds x6, x6, x6 adcs x7, x7, x7 adcs x8, x8, x8 adcs x9, x9, x9 adc x10, xzr, xzr adds x6, x6, x17 adcs x7, x7, x1 adcs x8, x8, x15 adcs x9, x9, x16 adc x10, x10, xzr lsl x12, x6, #32 lsr x11, x6, #32 subs x14, x12, x6 sbc x13, x11, xzr subs x7, x7, x14 sbcs x8, x8, x13 sbcs x9, x9, x12 sbc x14, x6, x11 adds x10, x10, x14 adc x6, xzr, xzr lsl x12, x7, #32 lsr x11, x7, #32 subs x14, x12, x7 sbc x13, x11, xzr subs x8, x8, x14 sbcs x9, x9, x13 sbcs x10, x10, x12 sbc x14, x7, x11 adds x6, x6, x14 adc x7, xzr, xzr mul x11, x4, x4 adds x8, x8, x11 mul x12, x5, x5 umulh x11, x4, x4 adcs x9, x9, x11 adcs x10, x10, x12 umulh x12, x5, x5 adcs x6, x6, x12 adc x7, x7, xzr mul x11, x4, x5 umulh x12, x4, x5 adds x11, x11, x11 adcs x12, x12, x12 adc x13, xzr, xzr adds x9, x9, x11 adcs x10, x10, x12 adcs x6, x6, x13 adcs x7, x7, xzr mov x11, #-0x100000000 adds x5, x8, #0x1 sbcs x11, x9, x11 mov x13, #-0x100000001 adcs x12, x10, xzr sbcs x13, x6, x13 sbcs xzr, x7, xzr csel x8, x5, x8, hs csel x9, x11, x9, hs csel x10, x12, x10, hs csel x6, x13, x6, hs stp x8, x9, [sp, #0x40] stp x10, x6, [sp, #0x50] mov x1, #0x9 mov x2, #-0x1 ldp x9, x10, [sp, #0xa0] subs x9, x2, x9 mov x3, #-0x100000000 sbcs x10, x3, x10 ldp x11, x12, [sp, #0xb0] sbcs x11, x2, x11 mov x4, #-0x100000001 sbc x12, x4, x12 mul x3, x1, x9 mul x4, x1, x10 mul x5, x1, x11 mul x6, x1, x12 umulh x9, x1, x9 umulh x10, x1, x10 umulh x11, x1, x11 umulh x7, x1, x12 adds x4, x4, x9 adcs x5, x5, x10 adcs x6, x6, x11 adc x7, x7, xzr mov x1, #0xc ldp x9, x10, [sp, #0x80] mul x8, x9, x1 umulh x9, x9, x1 adds x3, x3, x8 mul x8, x10, x1 umulh x10, x10, x1 adcs x4, x4, x8 ldp x11, x12, [sp, #0x90] mul x8, x11, x1 umulh x11, x11, x1 adcs x5, x5, x8 mul x8, x12, x1 umulh x12, x12, x1 adcs x6, x6, x8 adc x7, x7, xzr adds x4, x4, x9 adcs x5, x5, x10 adcs x6, x6, x11 adc x7, x7, x12 add x7, x7, #0x1 lsl x8, x7, #32 sub x9, x8, x7 adds x3, x3, x7 adcs x4, x4, x9 adcs x5, x5, xzr adcs x6, x6, x8 csetm x7, lo adds x3, x3, x7 and x9, x7, #0xffffffff00000000 adcs x4, x4, x9 adcs x5, x5, x7 and x8, x7, #0xfffffffeffffffff adc x6, x6, x8 stp x3, x4, [sp, #0xa0] stp x5, x6, [sp, #0xb0] ldp x5, x6, [sp, #0x40] ldp x4, x3, [sp] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x50] ldp x4, x3, [sp, #0x10] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp, #0x40] stp x7, x8, [sp, #0x50] ldp x2, x3, [sp, #0x20] ldp x4, x5, [sp, #0x30] umull x15, w2, w2 lsr x11, x2, #32 umull x16, w11, w11 umull x11, w2, w11 adds x15, x15, x11, lsl #33 lsr x11, x11, #31 adc x16, x16, x11 umull x17, w3, w3 lsr x11, x3, #32 umull x1, w11, w11 umull x11, w3, w11 mul x12, x2, x3 umulh x13, x2, x3 adds x17, x17, x11, lsl #33 lsr x11, x11, #31 adc x1, x1, x11 adds x12, x12, x12 adcs x13, x13, x13 adc x1, x1, xzr adds x16, x16, x12 adcs x17, x17, x13 adc x1, x1, xzr lsl x12, x15, #32 lsr x11, x15, #32 subs x14, x12, x15 sbc x13, x11, xzr subs x16, x16, x14 sbcs x17, x17, x13 sbcs x1, x1, x12 sbc x15, x15, x11 lsl x12, x16, #32 lsr x11, x16, #32 subs x14, x12, x16 sbc x13, x11, xzr subs x17, x17, x14 sbcs x1, x1, x13 sbcs x15, x15, x12 sbc x16, x16, x11 mul x6, x2, x4 mul x14, x3, x5 umulh x8, x2, x4 subs x10, x2, x3 cneg x10, x10, lo csetm x13, lo subs x12, x5, x4 cneg x12, x12, lo mul x11, x10, x12 umulh x12, x10, x12 cinv x13, x13, lo eor x11, x11, x13 eor x12, x12, x13 adds x7, x6, x8 adc x8, x8, xzr umulh x9, x3, x5 adds x7, x7, x14 adcs x8, x8, x9 adc x9, x9, xzr adds x8, x8, x14 adc x9, x9, xzr cmn x13, #0x1 adcs x7, x7, x11 adcs x8, x8, x12 adc x9, x9, x13 adds x6, x6, x6 adcs x7, x7, x7 adcs x8, x8, x8 adcs x9, x9, x9 adc x10, xzr, xzr adds x6, x6, x17 adcs x7, x7, x1 adcs x8, x8, x15 adcs x9, x9, x16 adc x10, x10, xzr lsl x12, x6, #32 lsr x11, x6, #32 subs x14, x12, x6 sbc x13, x11, xzr subs x7, x7, x14 sbcs x8, x8, x13 sbcs x9, x9, x12 sbc x14, x6, x11 adds x10, x10, x14 adc x6, xzr, xzr lsl x12, x7, #32 lsr x11, x7, #32 subs x14, x12, x7 sbc x13, x11, xzr subs x8, x8, x14 sbcs x9, x9, x13 sbcs x10, x10, x12 sbc x14, x7, x11 adds x6, x6, x14 adc x7, xzr, xzr mul x11, x4, x4 adds x8, x8, x11 mul x12, x5, x5 umulh x11, x4, x4 adcs x9, x9, x11 adcs x10, x10, x12 umulh x12, x5, x5 adcs x6, x6, x12 adc x7, x7, xzr mul x11, x4, x5 umulh x12, x4, x5 adds x11, x11, x11 adcs x12, x12, x12 adc x13, xzr, xzr adds x9, x9, x11 adcs x10, x10, x12 adcs x6, x6, x13 adcs x7, x7, xzr mov x11, #-0x100000000 adds x5, x8, #0x1 sbcs x11, x9, x11 mov x13, #-0x100000001 adcs x12, x10, xzr sbcs x13, x6, x13 sbcs xzr, x7, xzr csel x8, x5, x8, hs csel x9, x11, x9, hs csel x10, x12, x10, hs csel x6, x13, x6, hs stp x8, x9, [sp] stp x10, x6, [sp, #0x10] ldp x3, x4, [sp, #0xa0] ldp x5, x6, [sp, #0xb0] ldp x7, x8, [sp, #0x60] ldp x9, x10, [sp, #0x70] mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x17, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x17 adcs x14, x14, xzr subs x15, x3, x4 cneg x15, x15, lo csetm x1, lo subs x17, x8, x7 cneg x17, x17, lo mul x16, x15, x17 umulh x17, x15, x17 cinv x1, x1, lo eor x16, x16, x1 eor x17, x17, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x17 adc x14, x14, x1 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x17, x15, xzr subs x12, x12, x1 sbcs x13, x13, x17 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x17, x15, xzr subs x13, x13, x1 sbcs x14, x14, x17 sbcs x11, x11, x16 sbc x12, x12, x15 stp x13, x14, [sp, #0x60] stp x11, x12, [sp, #0x70] mul x11, x5, x9 mul x13, x6, x10 umulh x12, x5, x9 adds x16, x11, x13 umulh x14, x6, x10 adcs x17, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x17 adcs x14, x14, xzr subs x15, x5, x6 cneg x15, x15, lo csetm x1, lo subs x17, x10, x9 cneg x17, x17, lo mul x16, x15, x17 umulh x17, x15, x17 cinv x1, x1, lo eor x16, x16, x1 eor x17, x17, x1 cmn x1, #0x1 adcs x12, x12, x16 adcs x13, x13, x17 adc x14, x14, x1 subs x3, x5, x3 sbcs x4, x6, x4 ngc x5, xzr cmn x5, #0x1 eor x3, x3, x5 adcs x3, x3, xzr eor x4, x4, x5 adcs x4, x4, xzr subs x7, x7, x9 sbcs x8, x8, x10 ngc x9, xzr cmn x9, #0x1 eor x7, x7, x9 adcs x7, x7, xzr eor x8, x8, x9 adcs x8, x8, xzr eor x10, x5, x9 ldp x15, x1, [sp, #0x60] adds x15, x11, x15 adcs x1, x12, x1 ldp x5, x9, [sp, #0x70] adcs x5, x13, x5 adcs x9, x14, x9 adc x2, xzr, xzr mul x11, x3, x7 mul x13, x4, x8 umulh x12, x3, x7 adds x16, x11, x13 umulh x14, x4, x8 adcs x17, x12, x14 adcs x14, x14, xzr adds x12, x12, x16 adcs x13, x13, x17 adcs x14, x14, xzr subs x3, x3, x4 cneg x3, x3, lo csetm x4, lo subs x17, x8, x7 cneg x17, x17, lo mul x16, x3, x17 umulh x17, x3, x17 cinv x4, x4, lo eor x16, x16, x4 eor x17, x17, x4 cmn x4, #0x1 adcs x12, x12, x16 adcs x13, x13, x17 adc x14, x14, x4 cmn x10, #0x1 eor x11, x11, x10 adcs x11, x11, x15 eor x12, x12, x10 adcs x12, x12, x1 eor x13, x13, x10 adcs x13, x13, x5 eor x14, x14, x10 adcs x14, x14, x9 adcs x3, x2, x10 adcs x4, x10, xzr adc x10, x10, xzr adds x13, x13, x15 adcs x14, x14, x1 adcs x3, x3, x5 adcs x4, x4, x9 adc x10, x10, x2 lsl x16, x11, #32 lsr x15, x11, #32 subs x1, x16, x11 sbc x17, x15, xzr subs x12, x12, x1 sbcs x13, x13, x17 sbcs x14, x14, x16 sbc x11, x11, x15 lsl x16, x12, #32 lsr x15, x12, #32 subs x1, x16, x12 sbc x17, x15, xzr subs x13, x13, x1 sbcs x14, x14, x17 sbcs x11, x11, x16 sbc x12, x12, x15 adds x3, x3, x11 adcs x4, x4, x12 adc x10, x10, xzr add x2, x10, #0x1 lsl x15, x2, #32 sub x16, x15, x2 adds x13, x13, x2 adcs x14, x14, x16 adcs x3, x3, xzr adcs x4, x4, x15 csetm x7, lo adds x13, x13, x7 and x16, x7, #0xffffffff00000000 adcs x14, x14, x16 adcs x3, x3, x7 and x15, x7, #0xfffffffeffffffff adc x4, x4, x15 stp x13, x14, [sp, #0x60] stp x3, x4, [sp, #0x70] ldp x5, x6, [sp, #0x40] ldp x4, x3, [sp, #0x20] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x50] ldp x4, x3, [sp, #0x30] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [x19, #0x40] stp x7, x8, [x19, #0x50] ldp x1, x2, [sp, #0x80] lsl x0, x1, #2 ldp x6, x7, [sp, #0xa0] subs x0, x0, x6 extr x1, x2, x1, #0x3e sbcs x1, x1, x7 ldp x3, x4, [sp, #0x90] extr x2, x3, x2, #0x3e ldp x6, x7, [sp, #0xb0] sbcs x2, x2, x6 extr x3, x4, x3, #0x3e sbcs x3, x3, x7 lsr x4, x4, #62 sbc x4, x4, xzr add x4, x4, #0x1 lsl x5, x4, #32 sub x6, x5, x4 adds x0, x0, x4 adcs x1, x1, x6 adcs x2, x2, xzr adcs x3, x3, x5 csetm x4, lo adds x0, x0, x4 and x6, x4, #0xffffffff00000000 adcs x1, x1, x6 adcs x2, x2, x4 and x5, x4, #0xfffffffeffffffff adc x3, x3, x5 stp x0, x1, [x19] stp x2, x3, [x19, #0x10] mov x1, #0x8 mov x2, #-0x1 ldp x9, x10, [sp] subs x9, x2, x9 mov x3, #-0x100000000 sbcs x10, x3, x10 ldp x11, x12, [sp, #0x10] sbcs x11, x2, x11 mov x4, #-0x100000001 sbc x12, x4, x12 lsl x3, x9, #3 extr x4, x10, x9, #0x3d extr x5, x11, x10, #0x3d extr x6, x12, x11, #0x3d lsr x7, x12, #61 mov x1, #0x3 ldp x9, x10, [sp, #0x60] mul x8, x9, x1 umulh x9, x9, x1 adds x3, x3, x8 mul x8, x10, x1 umulh x10, x10, x1 adcs x4, x4, x8 ldp x11, x12, [sp, #0x70] mul x8, x11, x1 umulh x11, x11, x1 adcs x5, x5, x8 mul x8, x12, x1 umulh x12, x12, x1 adcs x6, x6, x8 adc x7, x7, xzr adds x4, x4, x9 adcs x5, x5, x10 adcs x6, x6, x11 adc x7, x7, x12 add x7, x7, #0x1 lsl x8, x7, #32 sub x9, x8, x7 adds x3, x3, x7 adcs x4, x4, x9 adcs x5, x5, xzr adcs x6, x6, x8 csetm x7, lo adds x3, x3, x7 and x9, x7, #0xffffffff00000000 adcs x4, x4, x9 adcs x5, x5, x7 and x8, x7, #0xfffffffeffffffff adc x6, x6, x8 stp x3, x4, [x19, #0x20] stp x5, x6, [x19, #0x30] ldp x19, x20, [sp, #0xc0] CFI_INC_SP(208) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lsm2_montjscalarmul_sm2_montjdouble) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
100,569
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/sm2_montjscalarmul_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery-Jacobian form scalar multiplication for GM/T 0003-2012 curve SM2 // Input scalar[4], point[12]; output res[12] // // extern void sm2_montjscalarmul_alt // (uint64_t res[static 12], // const uint64_t scalar[static 4], // const uint64_t point[static 12]); // // This function is a variant of its affine point version sm2_scalarmul_alt. // Here, input and output points are assumed to be in Jacobian form with // their coordinates in the Montgomery domain. Thus, if priming indicates // Montgomery form, x' = (2^256 * x) mod p_sm2 etc., each point argument // is a triple (x',y',z') representing the affine point (x/z^2,y/z^3) when // z' is nonzero or the point at infinity (group identity) if z' = 0. // // Given scalar = n and point = P, assumed to be on the NIST elliptic // curve SM2, returns a representation of n * P. If the result is the // point at infinity (either because the input point was or because the // scalar was a multiple of p_sm2) then the output is guaranteed to // represent the point at infinity, i.e. to have its z coordinate zero. // // Standard ARM ABI: X0 = res, X1 = scalar, X2 = point // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjscalarmul_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjscalarmul_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjscalarmul_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Safe copies of inputs (res lasts the whole code, point not so long) // and additional values in variables, with some aliasing #define res x19 #define sgn x20 #define j x20 #define point x21 // Intermediate variables on the stack. #define scalarb sp, #(0*NUMSIZE) #define acc sp, #(1*NUMSIZE) #define tabent sp, #(4*NUMSIZE) #define tab sp, #(7*NUMSIZE) #define NSPACE 31*NUMSIZE // Avoid using .rep for the sake of the BoringSSL/AWS-LC delocator, // which doesn't accept repetitions, assembler macros etc. #define selectblock(I) \ cmp x14, #(1*I) __LF \ ldp x12, x13, [x15] __LF \ csel x0, x12, x0, eq __LF \ csel x1, x13, x1, eq __LF \ ldp x12, x13, [x15, #16] __LF \ csel x2, x12, x2, eq __LF \ csel x3, x13, x3, eq __LF \ ldp x12, x13, [x15, #32] __LF \ csel x4, x12, x4, eq __LF \ csel x5, x13, x5, eq __LF \ ldp x12, x13, [x15, #48] __LF \ csel x6, x12, x6, eq __LF \ csel x7, x13, x7, eq __LF \ ldp x12, x13, [x15, #64] __LF \ csel x8, x12, x8, eq __LF \ csel x9, x13, x9, eq __LF \ ldp x12, x13, [x15, #80] __LF \ csel x10, x12, x10, eq __LF \ csel x11, x13, x11, eq __LF \ add x15, x15, #96 // Loading large constants #define movbig(nn,n3,n2,n1,n0) \ movz nn, n0 __LF \ movk nn, n1, lsl #16 __LF \ movk nn, n2, lsl #32 __LF \ movk nn, n3, lsl #48 S2N_BN_SYMBOL(sm2_montjscalarmul_alt): CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x30) CFI_DEC_SP(NSPACE) // Preserve the "res" and "point" input arguments. We load and process the // scalar immediately so we don't bother preserving that input argument. // Also, "point" is only needed early on and so its register gets re-used. mov res, x0 mov point, x2 // Load the digits of group order n_sm2 = [x12;x13;x14;x15] movbig(x12, #0x53bb, #0xf409, #0x39d5, #0x4123) movbig(x13, #0x7203, #0xdf6b, #0x21c6, #0x052b) mov x14, #0xffffffffffffffff mov x15, #0xfffffffeffffffff // First, reduce the input scalar mod n_sm2, i.e. conditionally subtract n_sm2 ldp x2, x3, [x1] ldp x4, x5, [x1, #16] subs x6, x2, x12 sbcs x7, x3, x13 sbcs x8, x4, x14 sbcs x9, x5, x15 csel x2, x2, x6, cc csel x3, x3, x7, cc csel x4, x4, x8, cc csel x5, x5, x9, cc // Now if the top bit of the reduced scalar is set, negate it mod n_sm2, // i.e. do n |-> n_sm2 - n. Remember the sign as "sgn" so we can // correspondingly negate the point below. subs x6, x12, x2 sbcs x7, x13, x3 sbcs x8, x14, x4 sbc x9, x15, x5 tst x5, #0x8000000000000000 csel x2, x2, x6, eq csel x3, x3, x7, eq csel x4, x4, x8, eq csel x5, x5, x9, eq cset sgn, ne // In either case then add the recoding constant 0x08888...888 to allow // signed digits. mov x6, 0x8888888888888888 adds x2, x2, x6 adcs x3, x3, x6 bic x7, x6, #0xF000000000000000 adcs x4, x4, x6 adc x5, x5, x7 stp x2, x3, [scalarb] stp x4, x5, [scalarb+16] // Set the tab[0] table entry to the input point = 1 * P, except // that we negate it if the top bit of the scalar was set. This // negation takes care over the y = 0 case to maintain all the // coordinates < p_sm2 throughout, even though triples (x,y,z) // with y = 0 can only represent a point on the curve when z = 0 // and it represents the point at infinity regardless of x and y. ldp x0, x1, [point] stp x0, x1, [tab] ldp x2, x3, [point, #16] stp x2, x3, [tab+16] ldp x4, x5, [point, #32] ldp x6, x7, [point, #48] mov x0, #0xffffffffffffffff subs x0, x0, x4 mov x1, #0xffffffff00000000 sbcs x1, x1, x5 mov x2, #0xffffffffffffffff sbcs x2, x2, x6 mov x3, #0xfffffffeffffffff sbc x3, x3, x7 orr x8, x4, x5 orr x9, x6, x7 orr x8, x8, x9 cmp x8, xzr ccmp sgn, xzr, #4, ne csel x4, x0, x4, ne csel x5, x1, x5, ne csel x6, x2, x6, ne csel x7, x3, x7, ne stp x4, x5, [tab+32] stp x6, x7, [tab+48] ldp x0, x1, [point, #64] stp x0, x1, [tab+64] ldp x2, x3, [point, #80] stp x2, x3, [tab+80] // Compute and record tab[1] = 2 * p, ..., tab[7] = 8 * P add x0, tab+96*1 add x1, tab CFI_BL(Lsm2_montjscalarmul_alt_sm2_montjdouble) add x0, tab+96*2 add x1, tab+96*1 add x2, tab CFI_BL(Lsm2_montjscalarmul_alt_sm2_montjadd) add x0, tab+96*3 add x1, tab+96*1 CFI_BL(Lsm2_montjscalarmul_alt_sm2_montjdouble) add x0, tab+96*4 add x1, tab+96*3 add x2, tab CFI_BL(Lsm2_montjscalarmul_alt_sm2_montjadd) add x0, tab+96*5 add x1, tab+96*2 CFI_BL(Lsm2_montjscalarmul_alt_sm2_montjdouble) add x0, tab+96*6 add x1, tab+96*5 add x2, tab CFI_BL(Lsm2_montjscalarmul_alt_sm2_montjadd) add x0, tab+96*7 add x1, tab+96*3 CFI_BL(Lsm2_montjscalarmul_alt_sm2_montjdouble) // Initialize the accumulator as a table entry for top 4 bits (unrecoded) ldr x14, [scalarb+24] lsr x14, x14, #60 mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr mov x4, xzr mov x5, xzr mov x6, xzr mov x7, xzr mov x8, xzr mov x9, xzr mov x10, xzr mov x11, xzr add x15, tab selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) stp x0, x1, [acc] stp x2, x3, [acc+16] stp x4, x5, [acc+32] stp x6, x7, [acc+48] stp x8, x9, [acc+64] stp x10, x11, [acc+80] mov j, #252 // Main loop over size-4 bitfields: double 4 times then add signed digit Lsm2_montjscalarmul_alt_mainloop: sub j, j, #4 add x0, acc add x1, acc CFI_BL(Lsm2_montjscalarmul_alt_sm2_montjdouble) add x0, acc add x1, acc CFI_BL(Lsm2_montjscalarmul_alt_sm2_montjdouble) add x0, acc add x1, acc CFI_BL(Lsm2_montjscalarmul_alt_sm2_montjdouble) add x0, acc add x1, acc CFI_BL(Lsm2_montjscalarmul_alt_sm2_montjdouble) lsr x2, j, #6 ldr x14, [sp, x2, lsl #3] // Exploits scalarb = sp exactly lsr x14, x14, j and x14, x14, #15 subs x14, x14, #8 cset x16, lo // x16 = sign of digit (1 = negative) cneg x14, x14, lo // x14 = absolute value of digit // Conditionally select the table entry tab[i-1] = i * P in constant time mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr mov x4, xzr mov x5, xzr mov x6, xzr mov x7, xzr mov x8, xzr mov x9, xzr mov x10, xzr mov x11, xzr add x15, tab selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) // Store it to "tabent" with the y coordinate optionally negated // Again, do it carefully to give coordinates < p_sm2 even in // the degenerate case y = 0 (when z = 0 for points on the curve). stp x0, x1, [tabent] stp x2, x3, [tabent+16] mov x0, #0xffffffffffffffff subs x0, x0, x4 mov x1, #0xffffffff00000000 sbcs x1, x1, x5 mov x2, #0xffffffffffffffff sbcs x2, x2, x6 mov x3, #0xfffffffeffffffff sbc x3, x3, x7 orr x12, x4, x5 orr x13, x6, x7 orr x12, x12, x13 cmp x12, xzr ccmp x16, xzr, #4, ne csel x4, x0, x4, ne csel x5, x1, x5, ne csel x6, x2, x6, ne csel x7, x3, x7, ne stp x4, x5, [tabent+32] stp x6, x7, [tabent+48] stp x8, x9, [tabent+64] stp x10, x11, [tabent+80] add x0, acc add x1, acc add x2, tabent CFI_BL(Lsm2_montjscalarmul_alt_sm2_montjadd) cbnz j, Lsm2_montjscalarmul_alt_mainloop // That's the end of the main loop, and we just need to copy the // result in "acc" to the output. ldp x0, x1, [acc] stp x0, x1, [res] ldp x0, x1, [acc+16] stp x0, x1, [res, #16] ldp x0, x1, [acc+32] stp x0, x1, [res, #32] ldp x0, x1, [acc+48] stp x0, x1, [res, #48] ldp x0, x1, [acc+64] stp x0, x1, [res, #64] ldp x0, x1, [acc+80] stp x0, x1, [res, #80] // Restore stack and registers and return CFI_INC_SP(NSPACE) CFI_POP2(x21,x30) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(sm2_montjscalarmul_alt) // Local copies of subroutines, complete clones at the moment S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lsm2_montjscalarmul_alt_sm2_montjadd) Lsm2_montjscalarmul_alt_sm2_montjadd: CFI_START CFI_DEC_SP(224) mov x15, x0 mov x16, x1 mov x17, x2 ldp x2, x3, [x16, #0x40] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [x16, #0x50] mul x11, x2, x5 umulh x12, x2, x5 mul x6, x2, x4 umulh x7, x2, x4 adds x10, x10, x6 adcs x11, x11, x7 mul x6, x3, x4 umulh x7, x3, x4 adc x7, x7, xzr adds x11, x11, x6 mul x13, x4, x5 umulh x14, x4, x5 adcs x12, x12, x7 mul x6, x3, x5 umulh x7, x3, x5 adc x7, x7, xzr adds x12, x12, x6 adcs x13, x13, x7 adc x14, x14, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 cset x7, hs umulh x6, x2, x2 mul x8, x2, x2 adds x9, x9, x6 mul x6, x3, x3 adcs x10, x10, x6 umulh x6, x3, x3 adcs x11, x11, x6 mul x6, x4, x4 adcs x12, x12, x6 umulh x6, x4, x4 adcs x13, x13, x6 mul x6, x5, x5 adcs x14, x14, x6 umulh x6, x5, x5 adc x7, x7, x6 lsl x4, x8, #32 lsr x5, x8, #32 subs x2, x4, x8 sbc x3, x5, xzr subs x9, x9, x2 sbcs x10, x10, x3 sbcs x11, x11, x4 sbc x8, x8, x5 lsl x4, x9, #32 lsr x5, x9, #32 subs x2, x4, x9 sbc x3, x5, xzr subs x10, x10, x2 sbcs x11, x11, x3 sbcs x8, x8, x4 sbc x9, x9, x5 lsl x4, x10, #32 lsr x5, x10, #32 subs x2, x4, x10 sbc x3, x5, xzr subs x11, x11, x2 sbcs x8, x8, x3 sbcs x9, x9, x4 sbc x10, x10, x5 lsl x4, x11, #32 lsr x5, x11, #32 subs x2, x4, x11 sbc x3, x5, xzr subs x8, x8, x2 sbcs x9, x9, x3 sbcs x10, x10, x4 sbc x11, x11, x5 adds x8, x8, x12 adcs x9, x9, x13 adcs x10, x10, x14 adcs x11, x11, x7 csetm x2, hs subs x8, x8, x2 and x3, x2, #0xffffffff00000000 sbcs x9, x9, x3 and x5, x2, #0xfffffffeffffffff sbcs x10, x10, x2 sbc x11, x11, x5 stp x8, x9, [sp] stp x10, x11, [sp, #0x10] ldp x2, x3, [x17, #0x40] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [x17, #0x50] mul x11, x2, x5 umulh x12, x2, x5 mul x6, x2, x4 umulh x7, x2, x4 adds x10, x10, x6 adcs x11, x11, x7 mul x6, x3, x4 umulh x7, x3, x4 adc x7, x7, xzr adds x11, x11, x6 mul x13, x4, x5 umulh x14, x4, x5 adcs x12, x12, x7 mul x6, x3, x5 umulh x7, x3, x5 adc x7, x7, xzr adds x12, x12, x6 adcs x13, x13, x7 adc x14, x14, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 cset x7, hs umulh x6, x2, x2 mul x8, x2, x2 adds x9, x9, x6 mul x6, x3, x3 adcs x10, x10, x6 umulh x6, x3, x3 adcs x11, x11, x6 mul x6, x4, x4 adcs x12, x12, x6 umulh x6, x4, x4 adcs x13, x13, x6 mul x6, x5, x5 adcs x14, x14, x6 umulh x6, x5, x5 adc x7, x7, x6 lsl x4, x8, #32 lsr x5, x8, #32 subs x2, x4, x8 sbc x3, x5, xzr subs x9, x9, x2 sbcs x10, x10, x3 sbcs x11, x11, x4 sbc x8, x8, x5 lsl x4, x9, #32 lsr x5, x9, #32 subs x2, x4, x9 sbc x3, x5, xzr subs x10, x10, x2 sbcs x11, x11, x3 sbcs x8, x8, x4 sbc x9, x9, x5 lsl x4, x10, #32 lsr x5, x10, #32 subs x2, x4, x10 sbc x3, x5, xzr subs x11, x11, x2 sbcs x8, x8, x3 sbcs x9, x9, x4 sbc x10, x10, x5 lsl x4, x11, #32 lsr x5, x11, #32 subs x2, x4, x11 sbc x3, x5, xzr subs x8, x8, x2 sbcs x9, x9, x3 sbcs x10, x10, x4 sbc x11, x11, x5 adds x8, x8, x12 adcs x9, x9, x13 adcs x10, x10, x14 adcs x11, x11, x7 csetm x2, hs subs x8, x8, x2 and x3, x2, #0xffffffff00000000 sbcs x9, x9, x3 and x5, x2, #0xfffffffeffffffff sbcs x10, x10, x2 sbc x11, x11, x5 stp x8, x9, [sp, #0xa0] stp x10, x11, [sp, #0xb0] ldp x3, x4, [x17, #0x40] ldp x7, x8, [x16, #0x20] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [x16, #0x30] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [x17, #0x50] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0xc0] stp x14, x0, [sp, #0xd0] ldp x3, x4, [x16, #0x40] ldp x7, x8, [x17, #0x20] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [x17, #0x30] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [x16, #0x50] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0x20] stp x14, x0, [sp, #0x30] ldp x3, x4, [sp] ldp x7, x8, [x17] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [x17, #0x10] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [sp, #0x10] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0x40] stp x14, x0, [sp, #0x50] ldp x3, x4, [sp, #0xa0] ldp x7, x8, [x16] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [x16, #0x10] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [sp, #0xb0] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0x80] stp x14, x0, [sp, #0x90] ldp x3, x4, [sp] ldp x7, x8, [sp, #0x20] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [sp, #0x30] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [sp, #0x10] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0x20] stp x14, x0, [sp, #0x30] ldp x3, x4, [sp, #0xa0] ldp x7, x8, [sp, #0xc0] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [sp, #0xd0] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [sp, #0xb0] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0xc0] stp x14, x0, [sp, #0xd0] ldp x5, x6, [sp, #0x40] ldp x4, x3, [sp, #0x80] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x50] ldp x4, x3, [sp, #0x90] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp, #0xa0] stp x7, x8, [sp, #0xb0] ldp x5, x6, [sp, #0x20] ldp x4, x3, [sp, #0xc0] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x30] ldp x4, x3, [sp, #0xd0] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp, #0x20] stp x7, x8, [sp, #0x30] ldp x2, x3, [sp, #0xa0] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [sp, #0xb0] mul x11, x2, x5 umulh x12, x2, x5 mul x6, x2, x4 umulh x7, x2, x4 adds x10, x10, x6 adcs x11, x11, x7 mul x6, x3, x4 umulh x7, x3, x4 adc x7, x7, xzr adds x11, x11, x6 mul x13, x4, x5 umulh x14, x4, x5 adcs x12, x12, x7 mul x6, x3, x5 umulh x7, x3, x5 adc x7, x7, xzr adds x12, x12, x6 adcs x13, x13, x7 adc x14, x14, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 cset x7, hs umulh x6, x2, x2 mul x8, x2, x2 adds x9, x9, x6 mul x6, x3, x3 adcs x10, x10, x6 umulh x6, x3, x3 adcs x11, x11, x6 mul x6, x4, x4 adcs x12, x12, x6 umulh x6, x4, x4 adcs x13, x13, x6 mul x6, x5, x5 adcs x14, x14, x6 umulh x6, x5, x5 adc x7, x7, x6 lsl x4, x8, #32 lsr x5, x8, #32 subs x2, x4, x8 sbc x3, x5, xzr subs x9, x9, x2 sbcs x10, x10, x3 sbcs x11, x11, x4 sbc x8, x8, x5 lsl x4, x9, #32 lsr x5, x9, #32 subs x2, x4, x9 sbc x3, x5, xzr subs x10, x10, x2 sbcs x11, x11, x3 sbcs x8, x8, x4 sbc x9, x9, x5 lsl x4, x10, #32 lsr x5, x10, #32 subs x2, x4, x10 sbc x3, x5, xzr subs x11, x11, x2 sbcs x8, x8, x3 sbcs x9, x9, x4 sbc x10, x10, x5 lsl x4, x11, #32 lsr x5, x11, #32 subs x2, x4, x11 sbc x3, x5, xzr subs x8, x8, x2 sbcs x9, x9, x3 sbcs x10, x10, x4 sbc x11, x11, x5 adds x8, x8, x12 adcs x9, x9, x13 adcs x10, x10, x14 adcs x11, x11, x7 csetm x2, hs subs x8, x8, x2 and x3, x2, #0xffffffff00000000 sbcs x9, x9, x3 and x5, x2, #0xfffffffeffffffff sbcs x10, x10, x2 sbc x11, x11, x5 stp x8, x9, [sp, #0x60] stp x10, x11, [sp, #0x70] ldp x2, x3, [sp, #0x20] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [sp, #0x30] mul x11, x2, x5 umulh x12, x2, x5 mul x6, x2, x4 umulh x7, x2, x4 adds x10, x10, x6 adcs x11, x11, x7 mul x6, x3, x4 umulh x7, x3, x4 adc x7, x7, xzr adds x11, x11, x6 mul x13, x4, x5 umulh x14, x4, x5 adcs x12, x12, x7 mul x6, x3, x5 umulh x7, x3, x5 adc x7, x7, xzr adds x12, x12, x6 adcs x13, x13, x7 adc x14, x14, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 cset x7, hs umulh x6, x2, x2 mul x8, x2, x2 adds x9, x9, x6 mul x6, x3, x3 adcs x10, x10, x6 umulh x6, x3, x3 adcs x11, x11, x6 mul x6, x4, x4 adcs x12, x12, x6 umulh x6, x4, x4 adcs x13, x13, x6 mul x6, x5, x5 adcs x14, x14, x6 umulh x6, x5, x5 adc x7, x7, x6 lsl x4, x8, #32 lsr x5, x8, #32 subs x2, x4, x8 sbc x3, x5, xzr subs x9, x9, x2 sbcs x10, x10, x3 sbcs x11, x11, x4 sbc x8, x8, x5 lsl x4, x9, #32 lsr x5, x9, #32 subs x2, x4, x9 sbc x3, x5, xzr subs x10, x10, x2 sbcs x11, x11, x3 sbcs x8, x8, x4 sbc x9, x9, x5 lsl x4, x10, #32 lsr x5, x10, #32 subs x2, x4, x10 sbc x3, x5, xzr subs x11, x11, x2 sbcs x8, x8, x3 sbcs x9, x9, x4 sbc x10, x10, x5 lsl x4, x11, #32 lsr x5, x11, #32 subs x2, x4, x11 sbc x3, x5, xzr subs x8, x8, x2 sbcs x9, x9, x3 sbcs x10, x10, x4 sbc x11, x11, x5 adds x8, x8, x12 adcs x9, x9, x13 adcs x10, x10, x14 adcs x11, x11, x7 cset x2, hs mov x3, #-0x100000000 mov x5, #-0x100000001 adds x12, x8, #0x1 sbcs x13, x9, x3 adcs x14, x10, xzr sbcs x7, x11, x5 sbcs xzr, x2, xzr csel x8, x8, x12, lo csel x9, x9, x13, lo csel x10, x10, x14, lo csel x11, x11, x7, lo stp x8, x9, [sp] stp x10, x11, [sp, #0x10] ldp x3, x4, [sp, #0x60] ldp x7, x8, [sp, #0x80] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [sp, #0x90] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [sp, #0x70] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0x80] stp x14, x0, [sp, #0x90] ldp x3, x4, [sp, #0x60] ldp x7, x8, [sp, #0x40] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [sp, #0x50] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [sp, #0x70] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0x40] stp x14, x0, [sp, #0x50] ldp x5, x6, [sp] ldp x4, x3, [sp, #0x80] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x10] ldp x4, x3, [sp, #0x90] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp] stp x7, x8, [sp, #0x10] ldp x5, x6, [sp, #0x40] ldp x4, x3, [sp, #0x80] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x50] ldp x4, x3, [sp, #0x90] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp, #0x60] stp x7, x8, [sp, #0x70] ldp x3, x4, [sp, #0xa0] ldp x7, x8, [x16, #0x40] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [x16, #0x50] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [sp, #0xb0] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0xa0] stp x14, x0, [sp, #0xb0] ldp x5, x6, [sp] ldp x4, x3, [sp, #0x40] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x10] ldp x4, x3, [sp, #0x50] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp] stp x7, x8, [sp, #0x10] ldp x5, x6, [sp, #0x80] ldp x4, x3, [sp] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x90] ldp x4, x3, [sp, #0x10] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp, #0x80] stp x7, x8, [sp, #0x90] ldp x3, x4, [sp, #0x60] ldp x7, x8, [sp, #0xc0] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [sp, #0xd0] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [sp, #0x70] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0x60] stp x14, x0, [sp, #0x70] ldp x3, x4, [sp, #0xa0] ldp x7, x8, [x17, #0x40] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [x17, #0x50] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [sp, #0xb0] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0xa0] stp x14, x0, [sp, #0xb0] ldp x3, x4, [sp, #0x20] ldp x7, x8, [sp, #0x80] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [sp, #0x90] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [sp, #0x30] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0x80] stp x14, x0, [sp, #0x90] ldp x5, x6, [sp, #0x80] ldp x4, x3, [sp, #0x60] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x90] ldp x4, x3, [sp, #0x70] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp, #0x80] stp x7, x8, [sp, #0x90] ldp x0, x1, [x16, #0x40] ldp x2, x3, [x16, #0x50] orr x12, x0, x1 orr x13, x2, x3 orr x12, x12, x13 cmp x12, xzr cset x12, ne ldp x4, x5, [x17, #0x40] ldp x6, x7, [x17, #0x50] orr x13, x4, x5 orr x14, x6, x7 orr x13, x13, x14 cmp x13, xzr cset x13, ne cmp x13, x12 ldp x8, x9, [sp, #0xa0] csel x8, x0, x8, lo csel x9, x1, x9, lo csel x8, x4, x8, hi csel x9, x5, x9, hi ldp x10, x11, [sp, #0xb0] csel x10, x2, x10, lo csel x11, x3, x11, lo csel x10, x6, x10, hi csel x11, x7, x11, hi ldp x12, x13, [x16] ldp x0, x1, [sp] csel x0, x12, x0, lo csel x1, x13, x1, lo ldp x12, x13, [x17] csel x0, x12, x0, hi csel x1, x13, x1, hi ldp x12, x13, [x16, #0x10] ldp x2, x3, [sp, #0x10] csel x2, x12, x2, lo csel x3, x13, x3, lo ldp x12, x13, [x17, #0x10] csel x2, x12, x2, hi csel x3, x13, x3, hi ldp x12, x13, [x16, #0x20] ldp x4, x5, [sp, #0x80] csel x4, x12, x4, lo csel x5, x13, x5, lo ldp x12, x13, [x17, #0x20] csel x4, x12, x4, hi csel x5, x13, x5, hi ldp x12, x13, [x16, #0x30] ldp x6, x7, [sp, #0x90] csel x6, x12, x6, lo csel x7, x13, x7, lo ldp x12, x13, [x17, #0x30] csel x6, x12, x6, hi csel x7, x13, x7, hi stp x0, x1, [x15] stp x2, x3, [x15, #0x10] stp x4, x5, [x15, #0x20] stp x6, x7, [x15, #0x30] stp x8, x9, [x15, #0x40] stp x10, x11, [x15, #0x50] CFI_INC_SP(224) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lsm2_montjscalarmul_alt_sm2_montjadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lsm2_montjscalarmul_alt_sm2_montjdouble) Lsm2_montjscalarmul_alt_sm2_montjdouble: CFI_START CFI_DEC_SP(192) mov x15, x0 mov x16, x1 ldp x2, x3, [x16, #0x40] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [x16, #0x50] mul x11, x2, x5 umulh x12, x2, x5 mul x6, x2, x4 umulh x7, x2, x4 adds x10, x10, x6 adcs x11, x11, x7 mul x6, x3, x4 umulh x7, x3, x4 adc x7, x7, xzr adds x11, x11, x6 mul x13, x4, x5 umulh x14, x4, x5 adcs x12, x12, x7 mul x6, x3, x5 umulh x7, x3, x5 adc x7, x7, xzr adds x12, x12, x6 adcs x13, x13, x7 adc x14, x14, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 cset x7, hs umulh x6, x2, x2 mul x8, x2, x2 adds x9, x9, x6 mul x6, x3, x3 adcs x10, x10, x6 umulh x6, x3, x3 adcs x11, x11, x6 mul x6, x4, x4 adcs x12, x12, x6 umulh x6, x4, x4 adcs x13, x13, x6 mul x6, x5, x5 adcs x14, x14, x6 umulh x6, x5, x5 adc x7, x7, x6 lsl x4, x8, #32 lsr x5, x8, #32 subs x2, x4, x8 sbc x3, x5, xzr subs x9, x9, x2 sbcs x10, x10, x3 sbcs x11, x11, x4 sbc x8, x8, x5 lsl x4, x9, #32 lsr x5, x9, #32 subs x2, x4, x9 sbc x3, x5, xzr subs x10, x10, x2 sbcs x11, x11, x3 sbcs x8, x8, x4 sbc x9, x9, x5 lsl x4, x10, #32 lsr x5, x10, #32 subs x2, x4, x10 sbc x3, x5, xzr subs x11, x11, x2 sbcs x8, x8, x3 sbcs x9, x9, x4 sbc x10, x10, x5 lsl x4, x11, #32 lsr x5, x11, #32 subs x2, x4, x11 sbc x3, x5, xzr subs x8, x8, x2 sbcs x9, x9, x3 sbcs x10, x10, x4 sbc x11, x11, x5 adds x8, x8, x12 adcs x9, x9, x13 adcs x10, x10, x14 adcs x11, x11, x7 cset x2, hs mov x3, #-0x100000000 mov x5, #-0x100000001 adds x12, x8, #0x1 sbcs x13, x9, x3 adcs x14, x10, xzr sbcs x7, x11, x5 sbcs xzr, x2, xzr csel x8, x8, x12, lo csel x9, x9, x13, lo csel x10, x10, x14, lo csel x11, x11, x7, lo stp x8, x9, [sp] stp x10, x11, [sp, #0x10] ldp x2, x3, [x16, #0x20] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [x16, #0x30] mul x11, x2, x5 umulh x12, x2, x5 mul x6, x2, x4 umulh x7, x2, x4 adds x10, x10, x6 adcs x11, x11, x7 mul x6, x3, x4 umulh x7, x3, x4 adc x7, x7, xzr adds x11, x11, x6 mul x13, x4, x5 umulh x14, x4, x5 adcs x12, x12, x7 mul x6, x3, x5 umulh x7, x3, x5 adc x7, x7, xzr adds x12, x12, x6 adcs x13, x13, x7 adc x14, x14, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 cset x7, hs umulh x6, x2, x2 mul x8, x2, x2 adds x9, x9, x6 mul x6, x3, x3 adcs x10, x10, x6 umulh x6, x3, x3 adcs x11, x11, x6 mul x6, x4, x4 adcs x12, x12, x6 umulh x6, x4, x4 adcs x13, x13, x6 mul x6, x5, x5 adcs x14, x14, x6 umulh x6, x5, x5 adc x7, x7, x6 lsl x4, x8, #32 lsr x5, x8, #32 subs x2, x4, x8 sbc x3, x5, xzr subs x9, x9, x2 sbcs x10, x10, x3 sbcs x11, x11, x4 sbc x8, x8, x5 lsl x4, x9, #32 lsr x5, x9, #32 subs x2, x4, x9 sbc x3, x5, xzr subs x10, x10, x2 sbcs x11, x11, x3 sbcs x8, x8, x4 sbc x9, x9, x5 lsl x4, x10, #32 lsr x5, x10, #32 subs x2, x4, x10 sbc x3, x5, xzr subs x11, x11, x2 sbcs x8, x8, x3 sbcs x9, x9, x4 sbc x10, x10, x5 lsl x4, x11, #32 lsr x5, x11, #32 subs x2, x4, x11 sbc x3, x5, xzr subs x8, x8, x2 sbcs x9, x9, x3 sbcs x10, x10, x4 sbc x11, x11, x5 adds x8, x8, x12 adcs x9, x9, x13 adcs x10, x10, x14 adcs x11, x11, x7 cset x2, hs mov x3, #-0x100000000 mov x5, #-0x100000001 adds x12, x8, #0x1 sbcs x13, x9, x3 adcs x14, x10, xzr sbcs x7, x11, x5 sbcs xzr, x2, xzr csel x8, x8, x12, lo csel x9, x9, x13, lo csel x10, x10, x14, lo csel x11, x11, x7, lo stp x8, x9, [sp, #0x20] stp x10, x11, [sp, #0x30] ldp x5, x6, [x16] ldp x4, x3, [sp] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x16, #0x10] ldp x4, x3, [sp, #0x10] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp, #0x60] stp x7, x8, [sp, #0x70] ldp x4, x5, [x16] ldp x8, x9, [sp] adds x4, x4, x8 adcs x5, x5, x9 ldp x6, x7, [x16, #0x10] ldp x10, x11, [sp, #0x10] adcs x6, x6, x10 adcs x7, x7, x11 csetm x2, hs subs x4, x4, x2 and x3, x2, #0xffffffff00000000 sbcs x5, x5, x3 and x1, x2, #0xfffffffeffffffff sbcs x6, x6, x2 sbc x7, x7, x1 stp x4, x5, [sp, #0x40] stp x6, x7, [sp, #0x50] ldp x3, x4, [sp, #0x40] ldp x7, x8, [sp, #0x60] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [sp, #0x70] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [sp, #0x50] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0x60] stp x14, x0, [sp, #0x70] ldp x4, x5, [x16, #0x20] ldp x8, x9, [x16, #0x40] adds x4, x4, x8 adcs x5, x5, x9 ldp x6, x7, [x16, #0x30] ldp x10, x11, [x16, #0x50] adcs x6, x6, x10 adcs x7, x7, x11 adc x3, xzr, xzr adds x8, x4, #0x1 mov x9, #-0x100000000 sbcs x9, x5, x9 adcs x10, x6, xzr mov x11, #-0x100000001 sbcs x11, x7, x11 sbcs x3, x3, xzr csel x4, x4, x8, lo csel x5, x5, x9, lo csel x6, x6, x10, lo csel x7, x7, x11, lo stp x4, x5, [sp, #0x40] stp x6, x7, [sp, #0x50] ldp x3, x4, [x16] ldp x7, x8, [sp, #0x20] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [sp, #0x30] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [x16, #0x10] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0x80] stp x14, x0, [sp, #0x90] ldp x2, x3, [sp, #0x60] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [sp, #0x70] mul x11, x2, x5 umulh x12, x2, x5 mul x6, x2, x4 umulh x7, x2, x4 adds x10, x10, x6 adcs x11, x11, x7 mul x6, x3, x4 umulh x7, x3, x4 adc x7, x7, xzr adds x11, x11, x6 mul x13, x4, x5 umulh x14, x4, x5 adcs x12, x12, x7 mul x6, x3, x5 umulh x7, x3, x5 adc x7, x7, xzr adds x12, x12, x6 adcs x13, x13, x7 adc x14, x14, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 cset x7, hs umulh x6, x2, x2 mul x8, x2, x2 adds x9, x9, x6 mul x6, x3, x3 adcs x10, x10, x6 umulh x6, x3, x3 adcs x11, x11, x6 mul x6, x4, x4 adcs x12, x12, x6 umulh x6, x4, x4 adcs x13, x13, x6 mul x6, x5, x5 adcs x14, x14, x6 umulh x6, x5, x5 adc x7, x7, x6 lsl x4, x8, #32 lsr x5, x8, #32 subs x2, x4, x8 sbc x3, x5, xzr subs x9, x9, x2 sbcs x10, x10, x3 sbcs x11, x11, x4 sbc x8, x8, x5 lsl x4, x9, #32 lsr x5, x9, #32 subs x2, x4, x9 sbc x3, x5, xzr subs x10, x10, x2 sbcs x11, x11, x3 sbcs x8, x8, x4 sbc x9, x9, x5 lsl x4, x10, #32 lsr x5, x10, #32 subs x2, x4, x10 sbc x3, x5, xzr subs x11, x11, x2 sbcs x8, x8, x3 sbcs x9, x9, x4 sbc x10, x10, x5 lsl x4, x11, #32 lsr x5, x11, #32 subs x2, x4, x11 sbc x3, x5, xzr subs x8, x8, x2 sbcs x9, x9, x3 sbcs x10, x10, x4 sbc x11, x11, x5 adds x8, x8, x12 adcs x9, x9, x13 adcs x10, x10, x14 adcs x11, x11, x7 cset x2, hs mov x3, #-0x100000000 mov x5, #-0x100000001 adds x12, x8, #0x1 sbcs x13, x9, x3 adcs x14, x10, xzr sbcs x7, x11, x5 sbcs xzr, x2, xzr csel x8, x8, x12, lo csel x9, x9, x13, lo csel x10, x10, x14, lo csel x11, x11, x7, lo stp x8, x9, [sp, #0xa0] stp x10, x11, [sp, #0xb0] ldp x2, x3, [sp, #0x40] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [sp, #0x50] mul x11, x2, x5 umulh x12, x2, x5 mul x6, x2, x4 umulh x7, x2, x4 adds x10, x10, x6 adcs x11, x11, x7 mul x6, x3, x4 umulh x7, x3, x4 adc x7, x7, xzr adds x11, x11, x6 mul x13, x4, x5 umulh x14, x4, x5 adcs x12, x12, x7 mul x6, x3, x5 umulh x7, x3, x5 adc x7, x7, xzr adds x12, x12, x6 adcs x13, x13, x7 adc x14, x14, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 cset x7, hs umulh x6, x2, x2 mul x8, x2, x2 adds x9, x9, x6 mul x6, x3, x3 adcs x10, x10, x6 umulh x6, x3, x3 adcs x11, x11, x6 mul x6, x4, x4 adcs x12, x12, x6 umulh x6, x4, x4 adcs x13, x13, x6 mul x6, x5, x5 adcs x14, x14, x6 umulh x6, x5, x5 adc x7, x7, x6 lsl x4, x8, #32 lsr x5, x8, #32 subs x2, x4, x8 sbc x3, x5, xzr subs x9, x9, x2 sbcs x10, x10, x3 sbcs x11, x11, x4 sbc x8, x8, x5 lsl x4, x9, #32 lsr x5, x9, #32 subs x2, x4, x9 sbc x3, x5, xzr subs x10, x10, x2 sbcs x11, x11, x3 sbcs x8, x8, x4 sbc x9, x9, x5 lsl x4, x10, #32 lsr x5, x10, #32 subs x2, x4, x10 sbc x3, x5, xzr subs x11, x11, x2 sbcs x8, x8, x3 sbcs x9, x9, x4 sbc x10, x10, x5 lsl x4, x11, #32 lsr x5, x11, #32 subs x2, x4, x11 sbc x3, x5, xzr subs x8, x8, x2 sbcs x9, x9, x3 sbcs x10, x10, x4 sbc x11, x11, x5 adds x8, x8, x12 adcs x9, x9, x13 adcs x10, x10, x14 adcs x11, x11, x7 cset x2, hs mov x3, #-0x100000000 mov x5, #-0x100000001 adds x12, x8, #0x1 sbcs x13, x9, x3 adcs x14, x10, xzr sbcs x7, x11, x5 sbcs xzr, x2, xzr csel x8, x8, x12, lo csel x9, x9, x13, lo csel x10, x10, x14, lo csel x11, x11, x7, lo stp x8, x9, [sp, #0x40] stp x10, x11, [sp, #0x50] mov x1, #0x9 mov x2, #-0x1 ldp x9, x10, [sp, #0xa0] subs x9, x2, x9 mov x3, #-0x100000000 sbcs x10, x3, x10 ldp x11, x12, [sp, #0xb0] sbcs x11, x2, x11 mov x4, #-0x100000001 sbc x12, x4, x12 mul x3, x1, x9 mul x4, x1, x10 mul x5, x1, x11 mul x6, x1, x12 umulh x9, x1, x9 umulh x10, x1, x10 umulh x11, x1, x11 umulh x7, x1, x12 adds x4, x4, x9 adcs x5, x5, x10 adcs x6, x6, x11 adc x7, x7, xzr mov x1, #0xc ldp x9, x10, [sp, #0x80] mul x8, x9, x1 umulh x9, x9, x1 adds x3, x3, x8 mul x8, x10, x1 umulh x10, x10, x1 adcs x4, x4, x8 ldp x11, x12, [sp, #0x90] mul x8, x11, x1 umulh x11, x11, x1 adcs x5, x5, x8 mul x8, x12, x1 umulh x12, x12, x1 adcs x6, x6, x8 adc x7, x7, xzr adds x4, x4, x9 adcs x5, x5, x10 adcs x6, x6, x11 adc x7, x7, x12 add x7, x7, #0x1 lsl x8, x7, #32 sub x9, x8, x7 adds x3, x3, x7 adcs x4, x4, x9 adcs x5, x5, xzr adcs x6, x6, x8 csetm x7, lo adds x3, x3, x7 and x9, x7, #0xffffffff00000000 adcs x4, x4, x9 adcs x5, x5, x7 and x8, x7, #0xfffffffeffffffff adc x6, x6, x8 stp x3, x4, [sp, #0xa0] stp x5, x6, [sp, #0xb0] ldp x5, x6, [sp, #0x40] ldp x4, x3, [sp] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x50] ldp x4, x3, [sp, #0x10] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [sp, #0x40] stp x7, x8, [sp, #0x50] ldp x2, x3, [sp, #0x20] mul x9, x2, x3 umulh x10, x2, x3 ldp x4, x5, [sp, #0x30] mul x11, x2, x5 umulh x12, x2, x5 mul x6, x2, x4 umulh x7, x2, x4 adds x10, x10, x6 adcs x11, x11, x7 mul x6, x3, x4 umulh x7, x3, x4 adc x7, x7, xzr adds x11, x11, x6 mul x13, x4, x5 umulh x14, x4, x5 adcs x12, x12, x7 mul x6, x3, x5 umulh x7, x3, x5 adc x7, x7, xzr adds x12, x12, x6 adcs x13, x13, x7 adc x14, x14, xzr adds x9, x9, x9 adcs x10, x10, x10 adcs x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 cset x7, hs umulh x6, x2, x2 mul x8, x2, x2 adds x9, x9, x6 mul x6, x3, x3 adcs x10, x10, x6 umulh x6, x3, x3 adcs x11, x11, x6 mul x6, x4, x4 adcs x12, x12, x6 umulh x6, x4, x4 adcs x13, x13, x6 mul x6, x5, x5 adcs x14, x14, x6 umulh x6, x5, x5 adc x7, x7, x6 lsl x4, x8, #32 lsr x5, x8, #32 subs x2, x4, x8 sbc x3, x5, xzr subs x9, x9, x2 sbcs x10, x10, x3 sbcs x11, x11, x4 sbc x8, x8, x5 lsl x4, x9, #32 lsr x5, x9, #32 subs x2, x4, x9 sbc x3, x5, xzr subs x10, x10, x2 sbcs x11, x11, x3 sbcs x8, x8, x4 sbc x9, x9, x5 lsl x4, x10, #32 lsr x5, x10, #32 subs x2, x4, x10 sbc x3, x5, xzr subs x11, x11, x2 sbcs x8, x8, x3 sbcs x9, x9, x4 sbc x10, x10, x5 lsl x4, x11, #32 lsr x5, x11, #32 subs x2, x4, x11 sbc x3, x5, xzr subs x8, x8, x2 sbcs x9, x9, x3 sbcs x10, x10, x4 sbc x11, x11, x5 adds x8, x8, x12 adcs x9, x9, x13 adcs x10, x10, x14 adcs x11, x11, x7 cset x2, hs mov x3, #-0x100000000 mov x5, #-0x100000001 adds x12, x8, #0x1 sbcs x13, x9, x3 adcs x14, x10, xzr sbcs x7, x11, x5 sbcs xzr, x2, xzr csel x8, x8, x12, lo csel x9, x9, x13, lo csel x10, x10, x14, lo csel x11, x11, x7, lo stp x8, x9, [sp] stp x10, x11, [sp, #0x10] ldp x3, x4, [sp, #0xa0] ldp x7, x8, [sp, #0x60] mul x12, x3, x7 umulh x13, x3, x7 mul x11, x3, x8 umulh x14, x3, x8 adds x13, x13, x11 ldp x9, x10, [sp, #0x70] mul x11, x3, x9 umulh x0, x3, x9 adcs x14, x14, x11 mul x11, x3, x10 umulh x1, x3, x10 adcs x0, x0, x11 adc x1, x1, xzr ldp x5, x6, [sp, #0xb0] mul x11, x4, x7 adds x13, x13, x11 mul x11, x4, x8 adcs x14, x14, x11 mul x11, x4, x9 adcs x0, x0, x11 mul x11, x4, x10 adcs x1, x1, x11 umulh x3, x4, x10 adc x3, x3, xzr umulh x11, x4, x7 adds x14, x14, x11 umulh x11, x4, x8 adcs x0, x0, x11 umulh x11, x4, x9 adcs x1, x1, x11 adc x3, x3, xzr mul x11, x5, x7 adds x14, x14, x11 mul x11, x5, x8 adcs x0, x0, x11 mul x11, x5, x9 adcs x1, x1, x11 mul x11, x5, x10 adcs x3, x3, x11 umulh x4, x5, x10 adc x4, x4, xzr umulh x11, x5, x7 adds x0, x0, x11 umulh x11, x5, x8 adcs x1, x1, x11 umulh x11, x5, x9 adcs x3, x3, x11 adc x4, x4, xzr mul x11, x6, x7 adds x0, x0, x11 mul x11, x6, x8 adcs x1, x1, x11 mul x11, x6, x9 adcs x3, x3, x11 mul x11, x6, x10 adcs x4, x4, x11 umulh x5, x6, x10 adc x5, x5, xzr umulh x11, x6, x7 adds x1, x1, x11 umulh x11, x6, x8 adcs x3, x3, x11 umulh x11, x6, x9 adcs x4, x4, x11 adc x5, x5, xzr lsl x11, x12, #32 lsr x6, x12, #32 subs x8, x11, x12 sbc x7, x6, xzr subs x13, x13, x8 sbcs x14, x14, x7 sbcs x0, x0, x11 sbc x12, x12, x6 lsl x11, x13, #32 lsr x6, x13, #32 subs x8, x11, x13 sbc x7, x6, xzr subs x14, x14, x8 sbcs x0, x0, x7 sbcs x12, x12, x11 sbc x13, x13, x6 lsl x11, x14, #32 lsr x6, x14, #32 subs x8, x11, x14 sbc x7, x6, xzr subs x0, x0, x8 sbcs x12, x12, x7 sbcs x13, x13, x11 sbc x14, x14, x6 lsl x11, x0, #32 lsr x6, x0, #32 subs x8, x11, x0 sbc x7, x6, xzr subs x12, x12, x8 sbcs x13, x13, x7 sbcs x14, x14, x11 sbc x0, x0, x6 adds x12, x12, x1 adcs x13, x13, x3 adcs x14, x14, x4 adcs x0, x0, x5 cset x8, hs mov x11, #-0x100000000 mov x6, #-0x100000001 adds x1, x12, #0x1 sbcs x3, x13, x11 adcs x4, x14, xzr sbcs x5, x0, x6 sbcs xzr, x8, xzr csel x12, x12, x1, lo csel x13, x13, x3, lo csel x14, x14, x4, lo csel x0, x0, x5, lo stp x12, x13, [sp, #0x60] stp x14, x0, [sp, #0x70] ldp x5, x6, [sp, #0x40] ldp x4, x3, [sp, #0x20] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #0x50] ldp x4, x3, [sp, #0x30] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, lo adds x5, x5, x3 and x4, x3, #0xffffffff00000000 adcs x6, x6, x4 adcs x7, x7, x3 and x4, x3, #0xfffffffeffffffff adc x8, x8, x4 stp x5, x6, [x15, #0x40] stp x7, x8, [x15, #0x50] ldp x1, x2, [sp, #0x80] lsl x0, x1, #2 ldp x6, x7, [sp, #0xa0] subs x0, x0, x6 extr x1, x2, x1, #0x3e sbcs x1, x1, x7 ldp x3, x4, [sp, #0x90] extr x2, x3, x2, #0x3e ldp x6, x7, [sp, #0xb0] sbcs x2, x2, x6 extr x3, x4, x3, #0x3e sbcs x3, x3, x7 lsr x4, x4, #62 sbc x4, x4, xzr add x4, x4, #0x1 lsl x5, x4, #32 sub x6, x5, x4 adds x0, x0, x4 adcs x1, x1, x6 adcs x2, x2, xzr adcs x3, x3, x5 csetm x4, lo adds x0, x0, x4 and x6, x4, #0xffffffff00000000 adcs x1, x1, x6 adcs x2, x2, x4 and x5, x4, #0xfffffffeffffffff adc x3, x3, x5 stp x0, x1, [x15] stp x2, x3, [x15, #0x10] mov x1, #0x8 mov x2, #-0x1 ldp x9, x10, [sp] subs x9, x2, x9 mov x3, #-0x100000000 sbcs x10, x3, x10 ldp x11, x12, [sp, #0x10] sbcs x11, x2, x11 mov x4, #-0x100000001 sbc x12, x4, x12 lsl x3, x9, #3 extr x4, x10, x9, #0x3d extr x5, x11, x10, #0x3d extr x6, x12, x11, #0x3d lsr x7, x12, #61 mov x1, #0x3 ldp x9, x10, [sp, #0x60] mul x8, x9, x1 umulh x9, x9, x1 adds x3, x3, x8 mul x8, x10, x1 umulh x10, x10, x1 adcs x4, x4, x8 ldp x11, x12, [sp, #0x70] mul x8, x11, x1 umulh x11, x11, x1 adcs x5, x5, x8 mul x8, x12, x1 umulh x12, x12, x1 adcs x6, x6, x8 adc x7, x7, xzr adds x4, x4, x9 adcs x5, x5, x10 adcs x6, x6, x11 adc x7, x7, x12 add x7, x7, #0x1 lsl x8, x7, #32 sub x9, x8, x7 adds x3, x3, x7 adcs x4, x4, x9 adcs x5, x5, xzr adcs x6, x6, x8 csetm x7, lo adds x3, x3, x7 and x9, x7, #0xffffffff00000000 adcs x4, x4, x9 adcs x5, x5, x7 and x8, x7, #0xfffffffeffffffff adc x6, x6, x8 stp x3, x4, [x15, #0x20] stp x5, x6, [x15, #0x30] CFI_INC_SP(192) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lsm2_montjscalarmul_alt_sm2_montjdouble) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
2,411
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_mod_nsm2_4.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo group order, z := x mod n_sm2 // Input x[4]; output z[4] // // extern void bignum_mod_nsm2_4(uint64_t z[static 4], const uint64_t x[static 4]); // // Reduction is modulo the group order of the GM/T 0003-2012 curve SM2. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_nsm2_4) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_nsm2_4) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_nsm2_4) .text .balign 4 #define z x0 #define x x1 #define n0 x2 #define n1 x3 #define n2 x4 #define n3 x5 #define d0 x6 #define d1 x7 #define d2 x8 #define d3 x9 // Loading large constants #define movbig(nn,n3,n2,n1,n0) \ movz nn, n0 __LF \ movk nn, n1, lsl #16 __LF \ movk nn, n2, lsl #32 __LF \ movk nn, n3, lsl #48 S2N_BN_SYMBOL(bignum_mod_nsm2_4): CFI_START // Load the complicated three words of n_sm2, the other being all 1s movbig( n0, #0x53BB, #0xF409, #0x39D5, #0x4123) movbig( n1, #0x7203, #0xDF6B, #0x21C6, #0x052B) mov n3, #0xFFFFFFFEFFFFFFFF // Load the input number ldp d0, d1, [x] ldp d2, d3, [x, #16] // Do the subtraction. Since word 2 of n_sm2 is all 1s, that can be // done by adding zero with carry, thanks to the inverted carry. subs n0, d0, n0 sbcs n1, d1, n1 adcs n2, d2, xzr sbcs n3, d3, n3 // Now if the carry is *clear* (inversion at work) the subtraction carried // and hence we should have done nothing, so we reset each n_i = d_i csel n0, d0, n0, cc csel n1, d1, n1, cc csel n2, d2, n2, cc csel n3, d3, n3, cc // Store the end result stp n0, n1, [z] stp n2, n3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_nsm2_4) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
22,261
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/sm2_montjmixadd_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point mixed addition on GM/T 0003-2012 curve SM2 in Montgomery-Jacobian coordinates // // extern void sm2_montjmixadd_alt(uint64_t p3[static 12], // const uint64_t p1[static 12], // const uint64_t p2[static 8]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^256 * x) mod p_sm2. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // The "mixed" part means that p2 only has x and y coordinates, with the // implicit z coordinate assumed to be the identity. // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjmixadd_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjmixadd_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjmixadd_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Stable homes for input arguments during main code sequence #define input_z x15 #define input_x x16 #define input_y x17 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define zp2 sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define NSPACE NUMSIZE*6 // Corresponds to bignum_montmul_sm2_alt except for registers #define montmul_sm2(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x7, x8, [P2] __LF \ mul x12, x3, x7 __LF \ umulh x13, x3, x7 __LF \ mul x11, x3, x8 __LF \ umulh x14, x3, x8 __LF \ adds x13, x13, x11 __LF \ ldp x9, x10, [P2+16] __LF \ mul x11, x3, x9 __LF \ umulh x0, x3, x9 __LF \ adcs x14, x14, x11 __LF \ mul x11, x3, x10 __LF \ umulh x1, x3, x10 __LF \ adcs x0, x0, x11 __LF \ adc x1, x1, xzr __LF \ ldp x5, x6, [P1+16] __LF \ mul x11, x4, x7 __LF \ adds x13, x13, x11 __LF \ mul x11, x4, x8 __LF \ adcs x14, x14, x11 __LF \ mul x11, x4, x9 __LF \ adcs x0, x0, x11 __LF \ mul x11, x4, x10 __LF \ adcs x1, x1, x11 __LF \ umulh x3, x4, x10 __LF \ adc x3, x3, xzr __LF \ umulh x11, x4, x7 __LF \ adds x14, x14, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x0, x0, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x1, x1, x11 __LF \ adc x3, x3, xzr __LF \ mul x11, x5, x7 __LF \ adds x14, x14, x11 __LF \ mul x11, x5, x8 __LF \ adcs x0, x0, x11 __LF \ mul x11, x5, x9 __LF \ adcs x1, x1, x11 __LF \ mul x11, x5, x10 __LF \ adcs x3, x3, x11 __LF \ umulh x4, x5, x10 __LF \ adc x4, x4, xzr __LF \ umulh x11, x5, x7 __LF \ adds x0, x0, x11 __LF \ umulh x11, x5, x8 __LF \ adcs x1, x1, x11 __LF \ umulh x11, x5, x9 __LF \ adcs x3, x3, x11 __LF \ adc x4, x4, xzr __LF \ mul x11, x6, x7 __LF \ adds x0, x0, x11 __LF \ mul x11, x6, x8 __LF \ adcs x1, x1, x11 __LF \ mul x11, x6, x9 __LF \ adcs x3, x3, x11 __LF \ mul x11, x6, x10 __LF \ adcs x4, x4, x11 __LF \ umulh x5, x6, x10 __LF \ adc x5, x5, xzr __LF \ umulh x11, x6, x7 __LF \ adds x1, x1, x11 __LF \ umulh x11, x6, x8 __LF \ adcs x3, x3, x11 __LF \ umulh x11, x6, x9 __LF \ adcs x4, x4, x11 __LF \ adc x5, x5, xzr __LF \ lsl x11, x12, #32 __LF \ lsr x6, x12, #32 __LF \ subs x8, x11, x12 __LF \ sbc x7, x6, xzr __LF \ subs x13, x13, x8 __LF \ sbcs x14, x14, x7 __LF \ sbcs x0, x0, x11 __LF \ sbc x12, x12, x6 __LF \ lsl x11, x13, #32 __LF \ lsr x6, x13, #32 __LF \ subs x8, x11, x13 __LF \ sbc x7, x6, xzr __LF \ subs x14, x14, x8 __LF \ sbcs x0, x0, x7 __LF \ sbcs x12, x12, x11 __LF \ sbc x13, x13, x6 __LF \ lsl x11, x14, #32 __LF \ lsr x6, x14, #32 __LF \ subs x8, x11, x14 __LF \ sbc x7, x6, xzr __LF \ subs x0, x0, x8 __LF \ sbcs x12, x12, x7 __LF \ sbcs x13, x13, x11 __LF \ sbc x14, x14, x6 __LF \ lsl x11, x0, #32 __LF \ lsr x6, x0, #32 __LF \ subs x8, x11, x0 __LF \ sbc x7, x6, xzr __LF \ subs x12, x12, x8 __LF \ sbcs x13, x13, x7 __LF \ sbcs x14, x14, x11 __LF \ sbc x0, x0, x6 __LF \ adds x12, x12, x1 __LF \ adcs x13, x13, x3 __LF \ adcs x14, x14, x4 __LF \ adcs x0, x0, x5 __LF \ cset x8, cs __LF \ mov x11, #0xffffffff00000000 __LF \ mov x6, #0xfffffffeffffffff __LF \ adds x1, x12, #0x1 __LF \ sbcs x3, x13, x11 __LF \ adcs x4, x14, xzr __LF \ sbcs x5, x0, x6 __LF \ sbcs xzr, x8, xzr __LF \ csel x12, x12, x1, cc __LF \ csel x13, x13, x3, cc __LF \ csel x14, x14, x4, cc __LF \ csel x0, x0, x5, cc __LF \ stp x12, x13, [P0] __LF \ stp x14, x0, [P0+16] // Corresponds to bignum_montsqr_sm2_alt exactly #define montsqr_sm2(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x9, x2, x3 __LF \ umulh x10, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x11, x2, x5 __LF \ umulh x12, x2, x5 __LF \ mul x6, x2, x4 __LF \ umulh x7, x2, x4 __LF \ adds x10, x10, x6 __LF \ adcs x11, x11, x7 __LF \ mul x6, x3, x4 __LF \ umulh x7, x3, x4 __LF \ adc x7, x7, xzr __LF \ adds x11, x11, x6 __LF \ mul x13, x4, x5 __LF \ umulh x14, x4, x5 __LF \ adcs x12, x12, x7 __LF \ mul x6, x3, x5 __LF \ umulh x7, x3, x5 __LF \ adc x7, x7, xzr __LF \ adds x12, x12, x6 __LF \ adcs x13, x13, x7 __LF \ adc x14, x14, xzr __LF \ adds x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ cset x7, cs __LF \ umulh x6, x2, x2 __LF \ mul x8, x2, x2 __LF \ adds x9, x9, x6 __LF \ mul x6, x3, x3 __LF \ adcs x10, x10, x6 __LF \ umulh x6, x3, x3 __LF \ adcs x11, x11, x6 __LF \ mul x6, x4, x4 __LF \ adcs x12, x12, x6 __LF \ umulh x6, x4, x4 __LF \ adcs x13, x13, x6 __LF \ mul x6, x5, x5 __LF \ adcs x14, x14, x6 __LF \ umulh x6, x5, x5 __LF \ adc x7, x7, x6 __LF \ lsl x4, x8, #32 __LF \ lsr x5, x8, #32 __LF \ subs x2, x4, x8 __LF \ sbc x3, x5, xzr __LF \ subs x9, x9, x2 __LF \ sbcs x10, x10, x3 __LF \ sbcs x11, x11, x4 __LF \ sbc x8, x8, x5 __LF \ lsl x4, x9, #32 __LF \ lsr x5, x9, #32 __LF \ subs x2, x4, x9 __LF \ sbc x3, x5, xzr __LF \ subs x10, x10, x2 __LF \ sbcs x11, x11, x3 __LF \ sbcs x8, x8, x4 __LF \ sbc x9, x9, x5 __LF \ lsl x4, x10, #32 __LF \ lsr x5, x10, #32 __LF \ subs x2, x4, x10 __LF \ sbc x3, x5, xzr __LF \ subs x11, x11, x2 __LF \ sbcs x8, x8, x3 __LF \ sbcs x9, x9, x4 __LF \ sbc x10, x10, x5 __LF \ lsl x4, x11, #32 __LF \ lsr x5, x11, #32 __LF \ subs x2, x4, x11 __LF \ sbc x3, x5, xzr __LF \ subs x8, x8, x2 __LF \ sbcs x9, x9, x3 __LF \ sbcs x10, x10, x4 __LF \ sbc x11, x11, x5 __LF \ adds x8, x8, x12 __LF \ adcs x9, x9, x13 __LF \ adcs x10, x10, x14 __LF \ adcs x11, x11, x7 __LF \ cset x2, cs __LF \ mov x3, #0xffffffff00000000 __LF \ mov x5, #0xfffffffeffffffff __LF \ adds x12, x8, #0x1 __LF \ sbcs x13, x9, x3 __LF \ adcs x14, x10, xzr __LF \ sbcs x7, x11, x5 __LF \ sbcs xzr, x2, xzr __LF \ csel x8, x8, x12, cc __LF \ csel x9, x9, x13, cc __LF \ csel x10, x10, x14, cc __LF \ csel x11, x11, x7, cc __LF \ stp x8, x9, [P0] __LF \ stp x10, x11, [P0+16] // Almost-Montgomery variant which we use when an input to other muls // with the other argument fully reduced (which is always safe). #define amontsqr_sm2(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x9, x2, x3 __LF \ umulh x10, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x11, x2, x5 __LF \ umulh x12, x2, x5 __LF \ mul x6, x2, x4 __LF \ umulh x7, x2, x4 __LF \ adds x10, x10, x6 __LF \ adcs x11, x11, x7 __LF \ mul x6, x3, x4 __LF \ umulh x7, x3, x4 __LF \ adc x7, x7, xzr __LF \ adds x11, x11, x6 __LF \ mul x13, x4, x5 __LF \ umulh x14, x4, x5 __LF \ adcs x12, x12, x7 __LF \ mul x6, x3, x5 __LF \ umulh x7, x3, x5 __LF \ adc x7, x7, xzr __LF \ adds x12, x12, x6 __LF \ adcs x13, x13, x7 __LF \ adc x14, x14, xzr __LF \ adds x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ cset x7, cs __LF \ umulh x6, x2, x2 __LF \ mul x8, x2, x2 __LF \ adds x9, x9, x6 __LF \ mul x6, x3, x3 __LF \ adcs x10, x10, x6 __LF \ umulh x6, x3, x3 __LF \ adcs x11, x11, x6 __LF \ mul x6, x4, x4 __LF \ adcs x12, x12, x6 __LF \ umulh x6, x4, x4 __LF \ adcs x13, x13, x6 __LF \ mul x6, x5, x5 __LF \ adcs x14, x14, x6 __LF \ umulh x6, x5, x5 __LF \ adc x7, x7, x6 __LF \ lsl x4, x8, #32 __LF \ lsr x5, x8, #32 __LF \ subs x2, x4, x8 __LF \ sbc x3, x5, xzr __LF \ subs x9, x9, x2 __LF \ sbcs x10, x10, x3 __LF \ sbcs x11, x11, x4 __LF \ sbc x8, x8, x5 __LF \ lsl x4, x9, #32 __LF \ lsr x5, x9, #32 __LF \ subs x2, x4, x9 __LF \ sbc x3, x5, xzr __LF \ subs x10, x10, x2 __LF \ sbcs x11, x11, x3 __LF \ sbcs x8, x8, x4 __LF \ sbc x9, x9, x5 __LF \ lsl x4, x10, #32 __LF \ lsr x5, x10, #32 __LF \ subs x2, x4, x10 __LF \ sbc x3, x5, xzr __LF \ subs x11, x11, x2 __LF \ sbcs x8, x8, x3 __LF \ sbcs x9, x9, x4 __LF \ sbc x10, x10, x5 __LF \ lsl x4, x11, #32 __LF \ lsr x5, x11, #32 __LF \ subs x2, x4, x11 __LF \ sbc x3, x5, xzr __LF \ subs x8, x8, x2 __LF \ sbcs x9, x9, x3 __LF \ sbcs x10, x10, x4 __LF \ sbc x11, x11, x5 __LF \ adds x8, x8, x12 __LF \ adcs x9, x9, x13 __LF \ adcs x10, x10, x14 __LF \ adcs x11, x11, x7 __LF \ csetm x2, cs __LF \ subs x8, x8, x2 __LF \ and x3, x2, #0xffffffff00000000 __LF \ sbcs x9, x9, x3 __LF \ and x5, x2, #0xfffffffeffffffff __LF \ sbcs x10, x10, x2 __LF \ sbc x11, x11, x5 __LF \ stp x8, x9, [P0] __LF \ stp x10, x11, [P0+16] // Corresponds exactly to bignum_sub_sm2 #define sub_sm2(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ csetm x3, cc __LF \ adds x5, x5, x3 __LF \ and x4, x3, #0xffffffff00000000 __LF \ adcs x6, x6, x4 __LF \ adcs x7, x7, x3 __LF \ and x4, x3, #0xfffffffeffffffff __LF \ adc x8, x8, x4 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] S2N_BN_SYMBOL(sm2_montjmixadd_alt): CFI_START // Make room on stack for temporary variables // Move the input arguments to stable places CFI_DEC_SP(NSPACE) mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations // 8 * multiply + 3 * square + 7 * subtract amontsqr_sm2(zp2,z_1) montmul_sm2(y2a,z_1,y_2) montmul_sm2(x2a,zp2,x_2) montmul_sm2(y2a,zp2,y2a) sub_sm2(xd,x2a,x_1) sub_sm2(yd,y2a,y_1) amontsqr_sm2(zz,xd) montsqr_sm2(ww,yd) montmul_sm2(zzx1,zz,x_1) montmul_sm2(zzx2,zz,x2a) sub_sm2(resx,ww,zzx1) sub_sm2(t1,zzx2,zzx1) montmul_sm2(resz,xd,z_1) sub_sm2(resx,resx,zzx2) sub_sm2(t2,zzx1,resx) montmul_sm2(t1,t1,y_1) montmul_sm2(t2,yd,t2) sub_sm2(resy,t2,t1) // Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence) ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] orr x4, x0, x1 orr x5, x2, x3 orr x4, x4, x5 cmp x4, xzr // Multiplex: if p1 <> 0 just copy the computed result from the staging area. // If p1 = 0 then return the point p2 augmented with a z = 1 coordinate (in // Montgomery form so not the simple constant 1 but rather 2^256 - p_sm2), // hence giving 0 + p2 = p2 for the final result. ldp x0, x1, [resx] ldp x12, x13, [x_2] csel x0, x0, x12, ne csel x1, x1, x13, ne ldp x2, x3, [resx+16] ldp x12, x13, [x_2+16] csel x2, x2, x12, ne csel x3, x3, x13, ne ldp x4, x5, [resy] ldp x12, x13, [y_2] csel x4, x4, x12, ne csel x5, x5, x13, ne ldp x6, x7, [resy+16] ldp x12, x13, [y_2+16] csel x6, x6, x12, ne csel x7, x7, x13, ne ldp x8, x9, [resz] mov x12, #0x0000000000000001 mov x13, #0x00000000ffffffff csel x8, x8, x12, ne csel x9, x9, x13, ne ldp x10, x11, [resz+16] mov x13, #0x0000000100000000 csel x10, x10, xzr, ne csel x11, x11, x13, ne stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [y_3] stp x6, x7, [y_3+16] stp x8, x9, [z_3] stp x10, x11, [z_3+16] // Restore stack and return CFI_INC_SP(NSPACE) CFI_RET S2N_BN_SIZE_DIRECTIVE(sm2_montjmixadd_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
25,660
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/sm2_montjdouble_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point doubling on GM/T 0003-2012 curve SM2 in Montgomery-Jacobian coordinates // // extern void sm2_montjdouble_alt(uint64_t p3[static 12], // const uint64_t p1[static 12]); // // Does p3 := 2 * p1 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^256 * x) mod p_sm2. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // // Standard ARM ABI: X0 = p3, X1 = p1 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjdouble_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjdouble_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjdouble_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Stable homes for input arguments during main code sequence #define input_z x15 #define input_x x16 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define z2 sp, #(NUMSIZE*0) #define y4 sp, #(NUMSIZE*0) #define y2 sp, #(NUMSIZE*1) #define t1 sp, #(NUMSIZE*2) #define t2 sp, #(NUMSIZE*3) #define x2p sp, #(NUMSIZE*3) #define dx2 sp, #(NUMSIZE*3) #define xy2 sp, #(NUMSIZE*4) #define x4p sp, #(NUMSIZE*5) #define d sp, #(NUMSIZE*5) #define NSPACE NUMSIZE*6 // Corresponds to bignum_montmul_sm2_alt except for registers #define montmul_sm2(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x7, x8, [P2] __LF \ mul x12, x3, x7 __LF \ umulh x13, x3, x7 __LF \ mul x11, x3, x8 __LF \ umulh x14, x3, x8 __LF \ adds x13, x13, x11 __LF \ ldp x9, x10, [P2+16] __LF \ mul x11, x3, x9 __LF \ umulh x0, x3, x9 __LF \ adcs x14, x14, x11 __LF \ mul x11, x3, x10 __LF \ umulh x1, x3, x10 __LF \ adcs x0, x0, x11 __LF \ adc x1, x1, xzr __LF \ ldp x5, x6, [P1+16] __LF \ mul x11, x4, x7 __LF \ adds x13, x13, x11 __LF \ mul x11, x4, x8 __LF \ adcs x14, x14, x11 __LF \ mul x11, x4, x9 __LF \ adcs x0, x0, x11 __LF \ mul x11, x4, x10 __LF \ adcs x1, x1, x11 __LF \ umulh x3, x4, x10 __LF \ adc x3, x3, xzr __LF \ umulh x11, x4, x7 __LF \ adds x14, x14, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x0, x0, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x1, x1, x11 __LF \ adc x3, x3, xzr __LF \ mul x11, x5, x7 __LF \ adds x14, x14, x11 __LF \ mul x11, x5, x8 __LF \ adcs x0, x0, x11 __LF \ mul x11, x5, x9 __LF \ adcs x1, x1, x11 __LF \ mul x11, x5, x10 __LF \ adcs x3, x3, x11 __LF \ umulh x4, x5, x10 __LF \ adc x4, x4, xzr __LF \ umulh x11, x5, x7 __LF \ adds x0, x0, x11 __LF \ umulh x11, x5, x8 __LF \ adcs x1, x1, x11 __LF \ umulh x11, x5, x9 __LF \ adcs x3, x3, x11 __LF \ adc x4, x4, xzr __LF \ mul x11, x6, x7 __LF \ adds x0, x0, x11 __LF \ mul x11, x6, x8 __LF \ adcs x1, x1, x11 __LF \ mul x11, x6, x9 __LF \ adcs x3, x3, x11 __LF \ mul x11, x6, x10 __LF \ adcs x4, x4, x11 __LF \ umulh x5, x6, x10 __LF \ adc x5, x5, xzr __LF \ umulh x11, x6, x7 __LF \ adds x1, x1, x11 __LF \ umulh x11, x6, x8 __LF \ adcs x3, x3, x11 __LF \ umulh x11, x6, x9 __LF \ adcs x4, x4, x11 __LF \ adc x5, x5, xzr __LF \ lsl x11, x12, #32 __LF \ lsr x6, x12, #32 __LF \ subs x8, x11, x12 __LF \ sbc x7, x6, xzr __LF \ subs x13, x13, x8 __LF \ sbcs x14, x14, x7 __LF \ sbcs x0, x0, x11 __LF \ sbc x12, x12, x6 __LF \ lsl x11, x13, #32 __LF \ lsr x6, x13, #32 __LF \ subs x8, x11, x13 __LF \ sbc x7, x6, xzr __LF \ subs x14, x14, x8 __LF \ sbcs x0, x0, x7 __LF \ sbcs x12, x12, x11 __LF \ sbc x13, x13, x6 __LF \ lsl x11, x14, #32 __LF \ lsr x6, x14, #32 __LF \ subs x8, x11, x14 __LF \ sbc x7, x6, xzr __LF \ subs x0, x0, x8 __LF \ sbcs x12, x12, x7 __LF \ sbcs x13, x13, x11 __LF \ sbc x14, x14, x6 __LF \ lsl x11, x0, #32 __LF \ lsr x6, x0, #32 __LF \ subs x8, x11, x0 __LF \ sbc x7, x6, xzr __LF \ subs x12, x12, x8 __LF \ sbcs x13, x13, x7 __LF \ sbcs x14, x14, x11 __LF \ sbc x0, x0, x6 __LF \ adds x12, x12, x1 __LF \ adcs x13, x13, x3 __LF \ adcs x14, x14, x4 __LF \ adcs x0, x0, x5 __LF \ cset x8, cs __LF \ mov x11, #0xffffffff00000000 __LF \ mov x6, #0xfffffffeffffffff __LF \ adds x1, x12, #0x1 __LF \ sbcs x3, x13, x11 __LF \ adcs x4, x14, xzr __LF \ sbcs x5, x0, x6 __LF \ sbcs xzr, x8, xzr __LF \ csel x12, x12, x1, cc __LF \ csel x13, x13, x3, cc __LF \ csel x14, x14, x4, cc __LF \ csel x0, x0, x5, cc __LF \ stp x12, x13, [P0] __LF \ stp x14, x0, [P0+16] // Corresponds to bignum_montsqr_sm2_alt exactly #define montsqr_sm2(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x9, x2, x3 __LF \ umulh x10, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x11, x2, x5 __LF \ umulh x12, x2, x5 __LF \ mul x6, x2, x4 __LF \ umulh x7, x2, x4 __LF \ adds x10, x10, x6 __LF \ adcs x11, x11, x7 __LF \ mul x6, x3, x4 __LF \ umulh x7, x3, x4 __LF \ adc x7, x7, xzr __LF \ adds x11, x11, x6 __LF \ mul x13, x4, x5 __LF \ umulh x14, x4, x5 __LF \ adcs x12, x12, x7 __LF \ mul x6, x3, x5 __LF \ umulh x7, x3, x5 __LF \ adc x7, x7, xzr __LF \ adds x12, x12, x6 __LF \ adcs x13, x13, x7 __LF \ adc x14, x14, xzr __LF \ adds x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ cset x7, cs __LF \ umulh x6, x2, x2 __LF \ mul x8, x2, x2 __LF \ adds x9, x9, x6 __LF \ mul x6, x3, x3 __LF \ adcs x10, x10, x6 __LF \ umulh x6, x3, x3 __LF \ adcs x11, x11, x6 __LF \ mul x6, x4, x4 __LF \ adcs x12, x12, x6 __LF \ umulh x6, x4, x4 __LF \ adcs x13, x13, x6 __LF \ mul x6, x5, x5 __LF \ adcs x14, x14, x6 __LF \ umulh x6, x5, x5 __LF \ adc x7, x7, x6 __LF \ lsl x4, x8, #32 __LF \ lsr x5, x8, #32 __LF \ subs x2, x4, x8 __LF \ sbc x3, x5, xzr __LF \ subs x9, x9, x2 __LF \ sbcs x10, x10, x3 __LF \ sbcs x11, x11, x4 __LF \ sbc x8, x8, x5 __LF \ lsl x4, x9, #32 __LF \ lsr x5, x9, #32 __LF \ subs x2, x4, x9 __LF \ sbc x3, x5, xzr __LF \ subs x10, x10, x2 __LF \ sbcs x11, x11, x3 __LF \ sbcs x8, x8, x4 __LF \ sbc x9, x9, x5 __LF \ lsl x4, x10, #32 __LF \ lsr x5, x10, #32 __LF \ subs x2, x4, x10 __LF \ sbc x3, x5, xzr __LF \ subs x11, x11, x2 __LF \ sbcs x8, x8, x3 __LF \ sbcs x9, x9, x4 __LF \ sbc x10, x10, x5 __LF \ lsl x4, x11, #32 __LF \ lsr x5, x11, #32 __LF \ subs x2, x4, x11 __LF \ sbc x3, x5, xzr __LF \ subs x8, x8, x2 __LF \ sbcs x9, x9, x3 __LF \ sbcs x10, x10, x4 __LF \ sbc x11, x11, x5 __LF \ adds x8, x8, x12 __LF \ adcs x9, x9, x13 __LF \ adcs x10, x10, x14 __LF \ adcs x11, x11, x7 __LF \ cset x2, cs __LF \ mov x3, #0xffffffff00000000 __LF \ mov x5, #0xfffffffeffffffff __LF \ adds x12, x8, #0x1 __LF \ sbcs x13, x9, x3 __LF \ adcs x14, x10, xzr __LF \ sbcs x7, x11, x5 __LF \ sbcs xzr, x2, xzr __LF \ csel x8, x8, x12, cc __LF \ csel x9, x9, x13, cc __LF \ csel x10, x10, x14, cc __LF \ csel x11, x11, x7, cc __LF \ stp x8, x9, [P0] __LF \ stp x10, x11, [P0+16] // Corresponds exactly to bignum_sub_sm2 #define sub_sm2(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ csetm x3, cc __LF \ adds x5, x5, x3 __LF \ and x4, x3, #0xffffffff00000000 __LF \ adcs x6, x6, x4 __LF \ adcs x7, x7, x3 __LF \ and x4, x3, #0xfffffffeffffffff __LF \ adc x8, x8, x4 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] // Corresponds exactly to bignum_add_sm2 #define add_sm2(P0,P1,P2) \ ldp x4, x5, [P1] __LF \ ldp x8, x9, [P2] __LF \ adds x4, x4, x8 __LF \ adcs x5, x5, x9 __LF \ ldp x6, x7, [P1+16] __LF \ ldp x10, x11, [P2+16] __LF \ adcs x6, x6, x10 __LF \ adcs x7, x7, x11 __LF \ adc x3, xzr, xzr __LF \ adds x8, x4, #0x1 __LF \ mov x9, #0xffffffff00000000 __LF \ sbcs x9, x5, x9 __LF \ adcs x10, x6, xzr __LF \ mov x11, #0xfffffffeffffffff __LF \ sbcs x11, x7, x11 __LF \ sbcs x3, x3, xzr __LF \ csel x4, x4, x8, cc __LF \ csel x5, x5, x9, cc __LF \ csel x6, x6, x10, cc __LF \ csel x7, x7, x11, cc __LF \ stp x4, x5, [P0] __LF \ stp x6, x7, [P0+16] // A weak version of add that only guarantees sum in 4 digits #define weakadd_sm2(P0,P1,P2) \ ldp x4, x5, [P1] __LF \ ldp x8, x9, [P2] __LF \ adds x4, x4, x8 __LF \ adcs x5, x5, x9 __LF \ ldp x6, x7, [P1+16] __LF \ ldp x10, x11, [P2+16] __LF \ adcs x6, x6, x10 __LF \ adcs x7, x7, x11 __LF \ csetm x2, cs __LF \ subs x4, x4, x2 __LF \ and x3, x2, #0xffffffff00000000 __LF \ sbcs x5, x5, x3 __LF \ and x1, x2, #0xfffffffeffffffff __LF \ sbcs x6, x6, x2 __LF \ sbc x7, x7, x1 __LF \ stp x4, x5, [P0] __LF \ stp x6, x7, [P0+16] // P0 = C * P1 - D * P2 computed as D * (p_sm2 - P2) + C * P1 // Quotient estimation is done just as q = h + 1 as in bignum_triple_sm2 // This also applies to the other functions following. #define cmsub_sm2(P0,C,P1,D,P2) \ mov x1, D __LF \ mov x2, #-1 __LF \ ldp x9, x10, [P2] __LF \ subs x9, x2, x9 __LF \ mov x3, #0xffffffff00000000 __LF \ sbcs x10, x3, x10 __LF \ ldp x11, x12, [P2+16] __LF \ sbcs x11, x2, x11 __LF \ mov x4, #0xfffffffeffffffff __LF \ sbc x12, x4, x12 __LF \ mul x3, x1, x9 __LF \ mul x4, x1, x10 __LF \ mul x5, x1, x11 __LF \ mul x6, x1, x12 __LF \ umulh x9, x1, x9 __LF \ umulh x10, x1, x10 __LF \ umulh x11, x1, x11 __LF \ umulh x7, x1, x12 __LF \ adds x4, x4, x9 __LF \ adcs x5, x5, x10 __LF \ adcs x6, x6, x11 __LF \ adc x7, x7, xzr __LF \ mov x1, C __LF \ ldp x9, x10, [P1] __LF \ mul x8, x9, x1 __LF \ umulh x9, x9, x1 __LF \ adds x3, x3, x8 __LF \ mul x8, x10, x1 __LF \ umulh x10, x10, x1 __LF \ adcs x4, x4, x8 __LF \ ldp x11, x12, [P1+16] __LF \ mul x8, x11, x1 __LF \ umulh x11, x11, x1 __LF \ adcs x5, x5, x8 __LF \ mul x8, x12, x1 __LF \ umulh x12, x12, x1 __LF \ adcs x6, x6, x8 __LF \ adc x7, x7, xzr __LF \ adds x4, x4, x9 __LF \ adcs x5, x5, x10 __LF \ adcs x6, x6, x11 __LF \ adc x7, x7, x12 __LF \ add x7, x7, #0x1 __LF \ lsl x8, x7, #32 __LF \ sub x9, x8, x7 __LF \ adds x3, x3, x7 __LF \ adcs x4, x4, x9 __LF \ adcs x5, x5, xzr __LF \ adcs x6, x6, x8 __LF \ csetm x7, cc __LF \ adds x3, x3, x7 __LF \ and x9, x7, #0xffffffff00000000 __LF \ adcs x4, x4, x9 __LF \ adcs x5, x5, x7 __LF \ and x8, x7, #0xfffffffeffffffff __LF \ adc x6, x6, x8 __LF \ stp x3, x4, [P0] __LF \ stp x5, x6, [P0+16] // P0 = 4 * P1 - P2, by direct subtraction of P2; the method // in bignum_cmul_sm2 etc. for quotient estimation still // works when the value to be reduced is negative, as // long as it is > -p_sm2, which is the case here. #define cmsub41_sm2(P0,P1,P2) \ ldp x1, x2, [P1] __LF \ lsl x0, x1, #2 __LF \ ldp x6, x7, [P2] __LF \ subs x0, x0, x6 __LF \ extr x1, x2, x1, #62 __LF \ sbcs x1, x1, x7 __LF \ ldp x3, x4, [P1+16] __LF \ extr x2, x3, x2, #62 __LF \ ldp x6, x7, [P2+16] __LF \ sbcs x2, x2, x6 __LF \ extr x3, x4, x3, #62 __LF \ sbcs x3, x3, x7 __LF \ lsr x4, x4, #62 __LF \ sbc x4, x4, xzr __LF \ add x4, x4, #0x1 __LF \ lsl x5, x4, #32 __LF \ sub x6, x5, x4 __LF \ adds x0, x0, x4 __LF \ adcs x1, x1, x6 __LF \ adcs x2, x2, xzr __LF \ adcs x3, x3, x5 __LF \ csetm x4, cc __LF \ adds x0, x0, x4 __LF \ and x6, x4, #0xffffffff00000000 __LF \ adcs x1, x1, x6 __LF \ adcs x2, x2, x4 __LF \ and x5, x4, #0xfffffffeffffffff __LF \ adc x3, x3, x5 __LF \ stp x0, x1, [P0] __LF \ stp x2, x3, [P0+16] // P0 = 3 * P1 - 8 * P2, computed as (p_sm2 - P2) << 3 + 3 * P1 #define cmsub38_sm2(P0,P1,P2) \ mov x1, 8 __LF \ mov x2, #-1 __LF \ ldp x9, x10, [P2] __LF \ subs x9, x2, x9 __LF \ mov x3, #0xffffffff00000000 __LF \ sbcs x10, x3, x10 __LF \ ldp x11, x12, [P2+16] __LF \ sbcs x11, x2, x11 __LF \ mov x4, #0xfffffffeffffffff __LF \ sbc x12, x4, x12 __LF \ lsl x3, x9, #3 __LF \ extr x4, x10, x9, #61 __LF \ extr x5, x11, x10, #61 __LF \ extr x6, x12, x11, #61 __LF \ lsr x7, x12, #61 __LF \ mov x1, 3 __LF \ ldp x9, x10, [P1] __LF \ mul x8, x9, x1 __LF \ umulh x9, x9, x1 __LF \ adds x3, x3, x8 __LF \ mul x8, x10, x1 __LF \ umulh x10, x10, x1 __LF \ adcs x4, x4, x8 __LF \ ldp x11, x12, [P1+16] __LF \ mul x8, x11, x1 __LF \ umulh x11, x11, x1 __LF \ adcs x5, x5, x8 __LF \ mul x8, x12, x1 __LF \ umulh x12, x12, x1 __LF \ adcs x6, x6, x8 __LF \ adc x7, x7, xzr __LF \ adds x4, x4, x9 __LF \ adcs x5, x5, x10 __LF \ adcs x6, x6, x11 __LF \ adc x7, x7, x12 __LF \ add x7, x7, #0x1 __LF \ lsl x8, x7, #32 __LF \ sub x9, x8, x7 __LF \ adds x3, x3, x7 __LF \ adcs x4, x4, x9 __LF \ adcs x5, x5, xzr __LF \ adcs x6, x6, x8 __LF \ csetm x7, cc __LF \ adds x3, x3, x7 __LF \ and x9, x7, #0xffffffff00000000 __LF \ adcs x4, x4, x9 __LF \ adcs x5, x5, x7 __LF \ and x8, x7, #0xfffffffeffffffff __LF \ adc x6, x6, x8 __LF \ stp x3, x4, [P0] __LF \ stp x5, x6, [P0+16] S2N_BN_SYMBOL(sm2_montjdouble_alt): CFI_START // Make room on stack for temporary variables CFI_DEC_SP(NSPACE) // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 // Main code, just a sequence of basic field operations // z2 = z^2 // y2 = y^2 montsqr_sm2(z2,z_1) montsqr_sm2(y2,y_1) // x2p = x^2 - z^4 = (x + z^2) * (x - z^2) sub_sm2(t2,x_1,z2) weakadd_sm2(t1,x_1,z2) montmul_sm2(x2p,t1,t2) // t1 = y + z // xy2 = x * y^2 // x4p = x2p^2 add_sm2(t1,y_1,z_1) montmul_sm2(xy2,x_1,y2) montsqr_sm2(x4p,x2p) // t1 = (y + z)^2 montsqr_sm2(t1,t1) // d = 12 * xy2 - 9 * x4p // t1 = y^2 + 2 * y * z cmsub_sm2(d,12,xy2,9,x4p) sub_sm2(t1,t1,z2) // y4 = y^4 montsqr_sm2(y4,y2) // dx2 = d * x2p montmul_sm2(dx2,d,x2p) // z_3' = 2 * y * z sub_sm2(z_3,t1,y2) // x' = 4 * xy2 - d cmsub41_sm2(x_3,xy2,d) // y' = 3 * dx2 - 8 * y4 cmsub38_sm2(y_3,dx2,y4) // Restore stack and return CFI_INC_SP(NSPACE) CFI_RET S2N_BN_SIZE_DIRECTIVE(sm2_montjdouble_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
30,184
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/sm2_montjdouble.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point doubling on GM/T 0003-2012 curve SM2 in Montgomery-Jacobian coordinates // // extern void sm2_montjdouble(uint64_t p3[static 12], // const uint64_t p1[static 12]); // // Does p3 := 2 * p1 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^256 * x) mod p_sm2. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // // Standard ARM ABI: X0 = p3, X1 = p1 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjdouble) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjdouble) S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjdouble) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Stable homes for input arguments during main code sequence #define input_z x19 #define input_x x20 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define z2 sp, #(NUMSIZE*0) #define y4 sp, #(NUMSIZE*0) #define y2 sp, #(NUMSIZE*1) #define t1 sp, #(NUMSIZE*2) #define t2 sp, #(NUMSIZE*3) #define x2p sp, #(NUMSIZE*3) #define dx2 sp, #(NUMSIZE*3) #define xy2 sp, #(NUMSIZE*4) #define x4p sp, #(NUMSIZE*5) #define d_ sp, #(NUMSIZE*5) #define NSPACE NUMSIZE*6 // Corresponds to bignum_montmul_sm2 exactly #define montmul_sm2(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P1+16] __LF \ ldp x7, x8, [P2] __LF \ ldp x9, x10, [P2+16] __LF \ mul x11, x3, x7 __LF \ mul x13, x4, x8 __LF \ umulh x12, x3, x7 __LF \ adds x16, x11, x13 __LF \ umulh x14, x4, x8 __LF \ adcs x17, x12, x14 __LF \ adcs x14, x14, xzr __LF \ adds x12, x12, x16 __LF \ adcs x13, x13, x17 __LF \ adcs x14, x14, xzr __LF \ subs x15, x3, x4 __LF \ cneg x15, x15, lo __LF \ csetm x1, lo __LF \ subs x17, x8, x7 __LF \ cneg x17, x17, lo __LF \ mul x16, x15, x17 __LF \ umulh x17, x15, x17 __LF \ cinv x1, x1, lo __LF \ eor x16, x16, x1 __LF \ eor x17, x17, x1 __LF \ cmn x1, #1 __LF \ adcs x12, x12, x16 __LF \ adcs x13, x13, x17 __LF \ adc x14, x14, x1 __LF \ lsl x16, x11, #32 __LF \ lsr x15, x11, #32 __LF \ subs x1, x16, x11 __LF \ sbc x17, x15, xzr __LF \ subs x12, x12, x1 __LF \ sbcs x13, x13, x17 __LF \ sbcs x14, x14, x16 __LF \ sbc x11, x11, x15 __LF \ lsl x16, x12, #32 __LF \ lsr x15, x12, #32 __LF \ subs x1, x16, x12 __LF \ sbc x17, x15, xzr __LF \ subs x13, x13, x1 __LF \ sbcs x14, x14, x17 __LF \ sbcs x11, x11, x16 __LF \ sbc x12, x12, x15 __LF \ stp x13, x14, [P0] __LF \ stp x11, x12, [P0+16] __LF \ mul x11, x5, x9 __LF \ mul x13, x6, x10 __LF \ umulh x12, x5, x9 __LF \ adds x16, x11, x13 __LF \ umulh x14, x6, x10 __LF \ adcs x17, x12, x14 __LF \ adcs x14, x14, xzr __LF \ adds x12, x12, x16 __LF \ adcs x13, x13, x17 __LF \ adcs x14, x14, xzr __LF \ subs x15, x5, x6 __LF \ cneg x15, x15, lo __LF \ csetm x1, lo __LF \ subs x17, x10, x9 __LF \ cneg x17, x17, lo __LF \ mul x16, x15, x17 __LF \ umulh x17, x15, x17 __LF \ cinv x1, x1, lo __LF \ eor x16, x16, x1 __LF \ eor x17, x17, x1 __LF \ cmn x1, #1 __LF \ adcs x12, x12, x16 __LF \ adcs x13, x13, x17 __LF \ adc x14, x14, x1 __LF \ subs x3, x5, x3 __LF \ sbcs x4, x6, x4 __LF \ ngc x5, xzr __LF \ cmn x5, #1 __LF \ eor x3, x3, x5 __LF \ adcs x3, x3, xzr __LF \ eor x4, x4, x5 __LF \ adcs x4, x4, xzr __LF \ subs x7, x7, x9 __LF \ sbcs x8, x8, x10 __LF \ ngc x9, xzr __LF \ cmn x9, #1 __LF \ eor x7, x7, x9 __LF \ adcs x7, x7, xzr __LF \ eor x8, x8, x9 __LF \ adcs x8, x8, xzr __LF \ eor x10, x5, x9 __LF \ ldp x15, x1, [P0] __LF \ adds x15, x11, x15 __LF \ adcs x1, x12, x1 __LF \ ldp x5, x9, [P0+16] __LF \ adcs x5, x13, x5 __LF \ adcs x9, x14, x9 __LF \ adc x2, xzr, xzr __LF \ mul x11, x3, x7 __LF \ mul x13, x4, x8 __LF \ umulh x12, x3, x7 __LF \ adds x16, x11, x13 __LF \ umulh x14, x4, x8 __LF \ adcs x17, x12, x14 __LF \ adcs x14, x14, xzr __LF \ adds x12, x12, x16 __LF \ adcs x13, x13, x17 __LF \ adcs x14, x14, xzr __LF \ subs x3, x3, x4 __LF \ cneg x3, x3, lo __LF \ csetm x4, lo __LF \ subs x17, x8, x7 __LF \ cneg x17, x17, lo __LF \ mul x16, x3, x17 __LF \ umulh x17, x3, x17 __LF \ cinv x4, x4, lo __LF \ eor x16, x16, x4 __LF \ eor x17, x17, x4 __LF \ cmn x4, #1 __LF \ adcs x12, x12, x16 __LF \ adcs x13, x13, x17 __LF \ adc x14, x14, x4 __LF \ cmn x10, #1 __LF \ eor x11, x11, x10 __LF \ adcs x11, x11, x15 __LF \ eor x12, x12, x10 __LF \ adcs x12, x12, x1 __LF \ eor x13, x13, x10 __LF \ adcs x13, x13, x5 __LF \ eor x14, x14, x10 __LF \ adcs x14, x14, x9 __LF \ adcs x3, x2, x10 __LF \ adcs x4, x10, xzr __LF \ adc x10, x10, xzr __LF \ adds x13, x13, x15 __LF \ adcs x14, x14, x1 __LF \ adcs x3, x3, x5 __LF \ adcs x4, x4, x9 __LF \ adc x10, x10, x2 __LF \ lsl x16, x11, #32 __LF \ lsr x15, x11, #32 __LF \ subs x1, x16, x11 __LF \ sbc x17, x15, xzr __LF \ subs x12, x12, x1 __LF \ sbcs x13, x13, x17 __LF \ sbcs x14, x14, x16 __LF \ sbc x11, x11, x15 __LF \ lsl x16, x12, #32 __LF \ lsr x15, x12, #32 __LF \ subs x1, x16, x12 __LF \ sbc x17, x15, xzr __LF \ subs x13, x13, x1 __LF \ sbcs x14, x14, x17 __LF \ sbcs x11, x11, x16 __LF \ sbc x12, x12, x15 __LF \ adds x3, x3, x11 __LF \ adcs x4, x4, x12 __LF \ adc x10, x10, xzr __LF \ add x2, x10, #1 __LF \ lsl x15, x2, #32 __LF \ sub x16, x15, x2 __LF \ adds x13, x13, x2 __LF \ adcs x14, x14, x16 __LF \ adcs x3, x3, xzr __LF \ adcs x4, x4, x15 __LF \ csetm x7, lo __LF \ adds x13, x13, x7 __LF \ and x16, x7, #0xffffffff00000000 __LF \ adcs x14, x14, x16 __LF \ adcs x3, x3, x7 __LF \ and x15, x7, #0xfffffffeffffffff __LF \ adc x4, x4, x15 __LF \ stp x13, x14, [P0] __LF \ stp x3, x4, [P0+16] // Corresponds to bignum_montsqr_sm2 exactly #define montsqr_sm2(P0,P1) \ ldp x2, x3, [P1] __LF \ ldp x4, x5, [P1+16] __LF \ umull x15, w2, w2 __LF \ lsr x11, x2, #32 __LF \ umull x16, w11, w11 __LF \ umull x11, w2, w11 __LF \ adds x15, x15, x11, lsl #33 __LF \ lsr x11, x11, #31 __LF \ adc x16, x16, x11 __LF \ umull x17, w3, w3 __LF \ lsr x11, x3, #32 __LF \ umull x1, w11, w11 __LF \ umull x11, w3, w11 __LF \ mul x12, x2, x3 __LF \ umulh x13, x2, x3 __LF \ adds x17, x17, x11, lsl #33 __LF \ lsr x11, x11, #31 __LF \ adc x1, x1, x11 __LF \ adds x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adc x1, x1, xzr __LF \ adds x16, x16, x12 __LF \ adcs x17, x17, x13 __LF \ adc x1, x1, xzr __LF \ lsl x12, x15, #32 __LF \ lsr x11, x15, #32 __LF \ subs x14, x12, x15 __LF \ sbc x13, x11, xzr __LF \ subs x16, x16, x14 __LF \ sbcs x17, x17, x13 __LF \ sbcs x1, x1, x12 __LF \ sbc x15, x15, x11 __LF \ lsl x12, x16, #32 __LF \ lsr x11, x16, #32 __LF \ subs x14, x12, x16 __LF \ sbc x13, x11, xzr __LF \ subs x17, x17, x14 __LF \ sbcs x1, x1, x13 __LF \ sbcs x15, x15, x12 __LF \ sbc x16, x16, x11 __LF \ mul x6, x2, x4 __LF \ mul x14, x3, x5 __LF \ umulh x8, x2, x4 __LF \ subs x10, x2, x3 __LF \ cneg x10, x10, lo __LF \ csetm x13, lo __LF \ subs x12, x5, x4 __LF \ cneg x12, x12, lo __LF \ mul x11, x10, x12 __LF \ umulh x12, x10, x12 __LF \ cinv x13, x13, lo __LF \ eor x11, x11, x13 __LF \ eor x12, x12, x13 __LF \ adds x7, x6, x8 __LF \ adc x8, x8, xzr __LF \ umulh x9, x3, x5 __LF \ adds x7, x7, x14 __LF \ adcs x8, x8, x9 __LF \ adc x9, x9, xzr __LF \ adds x8, x8, x14 __LF \ adc x9, x9, xzr __LF \ cmn x13, #1 __LF \ adcs x7, x7, x11 __LF \ adcs x8, x8, x12 __LF \ adc x9, x9, x13 __LF \ adds x6, x6, x6 __LF \ adcs x7, x7, x7 __LF \ adcs x8, x8, x8 __LF \ adcs x9, x9, x9 __LF \ adc x10, xzr, xzr __LF \ adds x6, x6, x17 __LF \ adcs x7, x7, x1 __LF \ adcs x8, x8, x15 __LF \ adcs x9, x9, x16 __LF \ adc x10, x10, xzr __LF \ lsl x12, x6, #32 __LF \ lsr x11, x6, #32 __LF \ subs x14, x12, x6 __LF \ sbc x13, x11, xzr __LF \ subs x7, x7, x14 __LF \ sbcs x8, x8, x13 __LF \ sbcs x9, x9, x12 __LF \ sbc x14, x6, x11 __LF \ adds x10, x10, x14 __LF \ adc x6, xzr, xzr __LF \ lsl x12, x7, #32 __LF \ lsr x11, x7, #32 __LF \ subs x14, x12, x7 __LF \ sbc x13, x11, xzr __LF \ subs x8, x8, x14 __LF \ sbcs x9, x9, x13 __LF \ sbcs x10, x10, x12 __LF \ sbc x14, x7, x11 __LF \ adds x6, x6, x14 __LF \ adc x7, xzr, xzr __LF \ mul x11, x4, x4 __LF \ adds x8, x8, x11 __LF \ mul x12, x5, x5 __LF \ umulh x11, x4, x4 __LF \ adcs x9, x9, x11 __LF \ adcs x10, x10, x12 __LF \ umulh x12, x5, x5 __LF \ adcs x6, x6, x12 __LF \ adc x7, x7, xzr __LF \ mul x11, x4, x5 __LF \ umulh x12, x4, x5 __LF \ adds x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adc x13, xzr, xzr __LF \ adds x9, x9, x11 __LF \ adcs x10, x10, x12 __LF \ adcs x6, x6, x13 __LF \ adcs x7, x7, xzr __LF \ mov x11, #-4294967296 __LF \ adds x5, x8, #1 __LF \ sbcs x11, x9, x11 __LF \ mov x13, #-4294967297 __LF \ adcs x12, x10, xzr __LF \ sbcs x13, x6, x13 __LF \ sbcs xzr, x7, xzr __LF \ csel x8, x5, x8, hs __LF \ csel x9, x11, x9, hs __LF \ csel x10, x12, x10, hs __LF \ csel x6, x13, x6, hs __LF \ stp x8, x9, [P0] __LF \ stp x10, x6, [P0+16] // Corresponds exactly to bignum_sub_sm2 #define sub_sm2(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ csetm x3, cc __LF \ adds x5, x5, x3 __LF \ and x4, x3, #0xffffffff00000000 __LF \ adcs x6, x6, x4 __LF \ adcs x7, x7, x3 __LF \ and x4, x3, #0xfffffffeffffffff __LF \ adc x8, x8, x4 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] // Corresponds exactly to bignum_add_sm2 #define add_sm2(P0,P1,P2) \ ldp x4, x5, [P1] __LF \ ldp x8, x9, [P2] __LF \ adds x4, x4, x8 __LF \ adcs x5, x5, x9 __LF \ ldp x6, x7, [P1+16] __LF \ ldp x10, x11, [P2+16] __LF \ adcs x6, x6, x10 __LF \ adcs x7, x7, x11 __LF \ adc x3, xzr, xzr __LF \ adds x8, x4, #0x1 __LF \ mov x9, #0xffffffff00000000 __LF \ sbcs x9, x5, x9 __LF \ adcs x10, x6, xzr __LF \ mov x11, #0xfffffffeffffffff __LF \ sbcs x11, x7, x11 __LF \ sbcs x3, x3, xzr __LF \ csel x4, x4, x8, cc __LF \ csel x5, x5, x9, cc __LF \ csel x6, x6, x10, cc __LF \ csel x7, x7, x11, cc __LF \ stp x4, x5, [P0] __LF \ stp x6, x7, [P0+16] // A weak version of add that only guarantees sum in 4 digits #define weakadd_sm2(P0,P1,P2) \ ldp x4, x5, [P1] __LF \ ldp x8, x9, [P2] __LF \ adds x4, x4, x8 __LF \ adcs x5, x5, x9 __LF \ ldp x6, x7, [P1+16] __LF \ ldp x10, x11, [P2+16] __LF \ adcs x6, x6, x10 __LF \ adcs x7, x7, x11 __LF \ csetm x2, cs __LF \ subs x4, x4, x2 __LF \ and x3, x2, #0xffffffff00000000 __LF \ sbcs x5, x5, x3 __LF \ and x1, x2, #0xfffffffeffffffff __LF \ sbcs x6, x6, x2 __LF \ sbc x7, x7, x1 __LF \ stp x4, x5, [P0] __LF \ stp x6, x7, [P0+16] // P0 = C * P1 - D * P2 computed as D * (p_sm2 - P2) + C * P1 // Quotient estimation is done just as q = h + 1 as in bignum_triple_sm2 // This also applies to the other functions following. #define cmsub_sm2(P0,C,P1,D,P2) \ mov x1, D __LF \ mov x2, #-1 __LF \ ldp x9, x10, [P2] __LF \ subs x9, x2, x9 __LF \ mov x3, #0xffffffff00000000 __LF \ sbcs x10, x3, x10 __LF \ ldp x11, x12, [P2+16] __LF \ sbcs x11, x2, x11 __LF \ mov x4, #0xfffffffeffffffff __LF \ sbc x12, x4, x12 __LF \ mul x3, x1, x9 __LF \ mul x4, x1, x10 __LF \ mul x5, x1, x11 __LF \ mul x6, x1, x12 __LF \ umulh x9, x1, x9 __LF \ umulh x10, x1, x10 __LF \ umulh x11, x1, x11 __LF \ umulh x7, x1, x12 __LF \ adds x4, x4, x9 __LF \ adcs x5, x5, x10 __LF \ adcs x6, x6, x11 __LF \ adc x7, x7, xzr __LF \ mov x1, C __LF \ ldp x9, x10, [P1] __LF \ mul x8, x9, x1 __LF \ umulh x9, x9, x1 __LF \ adds x3, x3, x8 __LF \ mul x8, x10, x1 __LF \ umulh x10, x10, x1 __LF \ adcs x4, x4, x8 __LF \ ldp x11, x12, [P1+16] __LF \ mul x8, x11, x1 __LF \ umulh x11, x11, x1 __LF \ adcs x5, x5, x8 __LF \ mul x8, x12, x1 __LF \ umulh x12, x12, x1 __LF \ adcs x6, x6, x8 __LF \ adc x7, x7, xzr __LF \ adds x4, x4, x9 __LF \ adcs x5, x5, x10 __LF \ adcs x6, x6, x11 __LF \ adc x7, x7, x12 __LF \ add x7, x7, #0x1 __LF \ lsl x8, x7, #32 __LF \ sub x9, x8, x7 __LF \ adds x3, x3, x7 __LF \ adcs x4, x4, x9 __LF \ adcs x5, x5, xzr __LF \ adcs x6, x6, x8 __LF \ csetm x7, cc __LF \ adds x3, x3, x7 __LF \ and x9, x7, #0xffffffff00000000 __LF \ adcs x4, x4, x9 __LF \ adcs x5, x5, x7 __LF \ and x8, x7, #0xfffffffeffffffff __LF \ adc x6, x6, x8 __LF \ stp x3, x4, [P0] __LF \ stp x5, x6, [P0+16] // P0 = 4 * P1 - P2, by direct subtraction of P2; the method // in bignum_cmul_sm2 etc. for quotient estimation still // works when the value to be reduced is negative, as // long as it is > -p_sm2, which is the case here. #define cmsub41_sm2(P0,P1,P2) \ ldp x1, x2, [P1] __LF \ lsl x0, x1, #2 __LF \ ldp x6, x7, [P2] __LF \ subs x0, x0, x6 __LF \ extr x1, x2, x1, #62 __LF \ sbcs x1, x1, x7 __LF \ ldp x3, x4, [P1+16] __LF \ extr x2, x3, x2, #62 __LF \ ldp x6, x7, [P2+16] __LF \ sbcs x2, x2, x6 __LF \ extr x3, x4, x3, #62 __LF \ sbcs x3, x3, x7 __LF \ lsr x4, x4, #62 __LF \ sbc x4, x4, xzr __LF \ add x4, x4, #0x1 __LF \ lsl x5, x4, #32 __LF \ sub x6, x5, x4 __LF \ adds x0, x0, x4 __LF \ adcs x1, x1, x6 __LF \ adcs x2, x2, xzr __LF \ adcs x3, x3, x5 __LF \ csetm x4, cc __LF \ adds x0, x0, x4 __LF \ and x6, x4, #0xffffffff00000000 __LF \ adcs x1, x1, x6 __LF \ adcs x2, x2, x4 __LF \ and x5, x4, #0xfffffffeffffffff __LF \ adc x3, x3, x5 __LF \ stp x0, x1, [P0] __LF \ stp x2, x3, [P0+16] // P0 = 3 * P1 - 8 * P2, computed as (p_sm2 - P2) << 3 + 3 * P1 #define cmsub38_sm2(P0,P1,P2) \ mov x1, 8 __LF \ mov x2, #-1 __LF \ ldp x9, x10, [P2] __LF \ subs x9, x2, x9 __LF \ mov x3, #0xffffffff00000000 __LF \ sbcs x10, x3, x10 __LF \ ldp x11, x12, [P2+16] __LF \ sbcs x11, x2, x11 __LF \ mov x4, #0xfffffffeffffffff __LF \ sbc x12, x4, x12 __LF \ lsl x3, x9, #3 __LF \ extr x4, x10, x9, #61 __LF \ extr x5, x11, x10, #61 __LF \ extr x6, x12, x11, #61 __LF \ lsr x7, x12, #61 __LF \ mov x1, 3 __LF \ ldp x9, x10, [P1] __LF \ mul x8, x9, x1 __LF \ umulh x9, x9, x1 __LF \ adds x3, x3, x8 __LF \ mul x8, x10, x1 __LF \ umulh x10, x10, x1 __LF \ adcs x4, x4, x8 __LF \ ldp x11, x12, [P1+16] __LF \ mul x8, x11, x1 __LF \ umulh x11, x11, x1 __LF \ adcs x5, x5, x8 __LF \ mul x8, x12, x1 __LF \ umulh x12, x12, x1 __LF \ adcs x6, x6, x8 __LF \ adc x7, x7, xzr __LF \ adds x4, x4, x9 __LF \ adcs x5, x5, x10 __LF \ adcs x6, x6, x11 __LF \ adc x7, x7, x12 __LF \ add x7, x7, #0x1 __LF \ lsl x8, x7, #32 __LF \ sub x9, x8, x7 __LF \ adds x3, x3, x7 __LF \ adcs x4, x4, x9 __LF \ adcs x5, x5, xzr __LF \ adcs x6, x6, x8 __LF \ csetm x7, cc __LF \ adds x3, x3, x7 __LF \ and x9, x7, #0xffffffff00000000 __LF \ adcs x4, x4, x9 __LF \ adcs x5, x5, x7 __LF \ and x8, x7, #0xfffffffeffffffff __LF \ adc x6, x6, x8 __LF \ stp x3, x4, [P0] __LF \ stp x5, x6, [P0+16] S2N_BN_SYMBOL(sm2_montjdouble): CFI_START // Save registers and make room on stack for temporary variables CFI_DEC_SP(NSPACE+16) CFI_STACKSAVE2(x19,x20,NSPACE) // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 // Main code, just a sequence of basic field operations // z2 = z^2 // y2 = y^2 montsqr_sm2(z2,z_1) montsqr_sm2(y2,y_1) // x2p = x^2 - z^4 = (x + z^2) * (x - z^2) sub_sm2(t2,x_1,z2) weakadd_sm2(t1,x_1,z2) montmul_sm2(x2p,t1,t2) // t1 = y + z // xy2 = x * y^2 // x4p = x2p^2 add_sm2(t1,y_1,z_1) montmul_sm2(xy2,x_1,y2) montsqr_sm2(x4p,x2p) // t1 = (y + z)^2 montsqr_sm2(t1,t1) // d = 12 * xy2 - 9 * x4p // t1 = y^2 + 2 * y * z cmsub_sm2(d_,12,xy2,9,x4p) sub_sm2(t1,t1,z2) // y4 = y^4 montsqr_sm2(y4,y2) // dx2 = d * x2p montmul_sm2(dx2,d_,x2p) // z_3' = 2 * y * z sub_sm2(z_3,t1,y2) // x' = 4 * xy2 - d cmsub41_sm2(x_3,xy2,d_) // y' = 3 * dx2 - 8 * y4 cmsub38_sm2(y_3,dx2,y4) // Restore registers and stack and return CFI_STACKLOAD2(x19,x20,NSPACE) CFI_INC_SP((NSPACE+16)) CFI_RET S2N_BN_SIZE_DIRECTIVE(sm2_montjdouble) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
4,998
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_mod_nsm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo group order, z := x mod n_sm2 // Input x[k]; output z[4] // // extern void bignum_mod_nsm2(uint64_t z[static 4], uint64_t k, // const uint64_t *x); // // Reduction is modulo the group order of the GM/T 0003-2012 curve SM2. // // Standard ARM ABI: X0 = z, X1 = k, X2 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_nsm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_nsm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_nsm2) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_nsm2_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_nsm2_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_nsm2_alt) .text .balign 4 #define z x0 #define k x1 #define x x2 #define m0 x3 #define m1 x4 #define m2 x5 #define m3 x6 #define t0 x7 #define t1 x8 #define t2 x9 #define t3 x10 #define t4 x11 #define n0 x12 #define n1 x13 #define n3 x14 // These two are aliased: we only load d when finished with q #define q x15 #define d x15 // Loading large constants #define movbig(nn,n3,n2,n1,n0) \ movz nn, n0 __LF \ movk nn, n1, lsl #16 __LF \ movk nn, n2, lsl #32 __LF \ movk nn, n3, lsl #48 S2N_BN_SYMBOL(bignum_mod_nsm2): S2N_BN_SYMBOL(bignum_mod_nsm2_alt): CFI_START // If the input is already <= 3 words long, go to a trivial "copy" path cmp k, #4 bcc Lbignum_mod_nsm2_short // Otherwise load the top 4 digits (top-down) and reduce k by 4 sub k, k, #4 lsl t0, k, #3 add t0, t0, x ldp m2, m3, [t0, #16] ldp m0, m1, [t0] // Load the complicated three words of 2^256 - n_sm2 = [n3; 0; n1; n0] movbig(n0, #0xac44, #0x0bf6, #0xc62a, #0xbedd) movbig(n1, #0x8dfc, #0x2094, #0xde39, #0xfad4) mov n3, 0x0000000100000000 // Reduce the top 4 digits mod n_sm2 (a conditional subtraction of n_sm2) adds t0, m0, n0 adcs t1, m1, n1 adcs t2, m2, xzr adcs t3, m3, n3 csel m0, m0, t0, cc csel m1, m1, t1, cc csel m2, m2, t2, cc csel m3, m3, t3, cc // Now do (k-4) iterations of 5->4 word modular reduction cbz k, Lbignum_mod_nsm2_writeback Lbignum_mod_nsm2_loop: // Writing the input, with the new zeroth digit implicitly appended, as // z = 2^256 * m3 + 2^192 * m2 + t, our intended quotient approximation is // MIN ((m3 * (1 + 2^32 + 2^64) + m2 + 2^64) >> 64) (2^64 - 1) adds t0, m2, m3 mov t2, #1 adc t1, m3, t2 add t2, m3, t0, lsr #32 adds q, t1, t2, lsr #32 cinv q, q, cs // [t4;t3;t2;t1;t0] = q * (2^256 - n_sm2) mul t0, n0, q mul t1, n1, q mul t3, n3, q umulh t2, n0, q adds t1, t1, t2 umulh t2, n1, q adc t2, t2, xzr // No carry: high of mul + {0,1} umulh t4, n3, q // Compensate for 2^256 * q sub m3, m3, q // Decrement k and load the next digit (note that d aliases to q) sub k, k, #1 ldr d, [x, k, lsl #3] // [t4;t3;t2;t1;t0] = [m3;m2;m1;m0;d] - q * n_sm2 adds t0, d, t0 adcs t1, m0, t1 adcs t2, m1, t2 adcs t3, m2, t3 adc t4, m3, t4 // Now our top word t4 is either zero or all 1s. Use it for a masked // addition of n_sm2, which we can do by a *subtraction* of // 2^256 - n_sm2 from our portion, re-using the constants and d, t4, n0 subs m0, t0, d and d, t4, n1 sbcs m1, t1, d sbcs m2, t2, xzr and d, t4, n3 sbc m3, t3, d cbnz k, Lbignum_mod_nsm2_loop // Finally write back [m3;m2;m1;m0] and return Lbignum_mod_nsm2_writeback: stp m0, m1, [z] stp m2, m3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_nsm2) // Short case: just copy the input with zero-padding Lbignum_mod_nsm2_short: mov m0, xzr mov m1, xzr mov m2, xzr mov m3, xzr cbz k, Lbignum_mod_nsm2_writeback ldr m0, [x] subs k, k, #1 beq Lbignum_mod_nsm2_writeback ldr m1, [x, #8] subs k, k, #1 beq Lbignum_mod_nsm2_writeback ldr m2, [x, #16] b Lbignum_mod_nsm2_writeback #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
8,490
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_montmul_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^256) mod p_sm2 // Inputs x[4], y[4]; output z[4] // // extern void bignum_montmul_sm2(uint64_t z[static 4], const uint64_t x[static 4], // const uint64_t y[static 4]); // // Does z := (2^{-256} * x * y) mod p_sm2, assuming that the inputs x and y // satisfy x * y <= 2^256 * p_sm2 (in particular this is true if we are in // the "usual" case x < p_sm2 and y < p_sm2). // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_sm2) .text .balign 4 // --------------------------------------------------------------------------- // Macro returning (c,h,l) = 3-word 1s complement (x - y) * (w - z) // c,h,l,t should all be different // t,h should not overlap w,z // --------------------------------------------------------------------------- #define muldiffn(c,h,l, t, x,y, w,z) \ subs t, x, y __LF \ cneg t, t, cc __LF \ csetm c, cc __LF \ subs h, w, z __LF \ cneg h, h, cc __LF \ mul l, t, h __LF \ umulh h, t, h __LF \ cinv c, c, cc __LF \ eor l, l, c __LF \ eor h, h, c // --------------------------------------------------------------------------- // Core one-step "short" Montgomery reduction macro. Takes input in // [d3;d2;d1;d0] and returns result in [d4;d3;d2;d1], adding to the // existing contents of [d3;d2;d1], and using t0, t1, t2 and t3 as // temporaries. It is fine for d4 to be the same register as d0, // and it often is. // --------------------------------------------------------------------------- #define montreds(d4,d3,d2,d1,d0, t3,t2,t1,t0) \ /* First let [t3;t2] = 2^32 * d0 and [t1;t0] = (2^32-1) * d0 */ \ lsl t2, d0, #32 __LF \ lsr t3, d0, #32 __LF \ subs t0, t2, d0 __LF \ sbc t1, t3, xzr __LF \ /* Now [d4;d3;d2;d1] := [d0;d3;d2;d1] - [t3;t2;t1;t0] */ \ subs d1, d1, t0 __LF \ sbcs d2, d2, t1 __LF \ sbcs d3, d3, t2 __LF \ sbc d4, d0, t3 #define a0 x3 #define a1 x4 #define a2 x5 #define a3 x6 #define b0 x7 #define b1 x8 #define b2 x9 #define b3 x10 #define s0 x11 #define s1 x12 #define s2 x13 #define s3 x14 #define t0 x15 #define t1 x16 #define t2 x17 #define t3 x1 #define s4 x2 S2N_BN_SYMBOL(bignum_montmul_sm2): CFI_START // Load in all words of both inputs ldp a0, a1, [x1] ldp a2, a3, [x1, #16] ldp b0, b1, [x2] ldp b2, b3, [x2, #16] // Multiply low halves with a 2x2->4 ADK multiplier as L = [s3;s2;s1;s0] mul s0, a0, b0 mul s2, a1, b1 umulh s1, a0, b0 adds t1, s0, s2 umulh s3, a1, b1 adcs t2, s1, s3 adcs s3, s3, xzr adds s1, s1, t1 adcs s2, s2, t2 adcs s3, s3, xzr muldiffn(t3,t2,t1, t0, a0,a1, b1,b0) adds xzr, t3, #1 adcs s1, s1, t1 adcs s2, s2, t2 adc s3, s3, t3 // Perform two "short" Montgomery steps on the low product to // get a modified low result L' = [s1;s0;s3;s2] // This shifts it to an offset compatible with middle terms // Stash the result L' temporarily in the output buffer to avoid // using additional registers. montreds(s0,s3,s2,s1,s0, t0,t1,t2,t3) montreds(s1,s0,s3,s2,s1, t0,t1,t2,t3) stp s2, s3, [x0] stp s0, s1, [x0, #16] // Multiply high halves with a 2x2->4 ADK multiplier as H = [s3;s2;s1;s0] mul s0, a2, b2 mul s2, a3, b3 umulh s1, a2, b2 adds t1, s0, s2 umulh s3, a3, b3 adcs t2, s1, s3 adcs s3, s3, xzr adds s1, s1, t1 adcs s2, s2, t2 adcs s3, s3, xzr muldiffn(t3,t2,t1, t0, a2,a3, b3,b2) adds xzr, t3, #1 adcs s1, s1, t1 adcs s2, s2, t2 adc s3, s3, t3 // Compute sign-magnitude a2,[a1,a0] = x_hi - x_lo subs a0, a2, a0 sbcs a1, a3, a1 sbc a2, xzr, xzr adds xzr, a2, #1 eor a0, a0, a2 adcs a0, a0, xzr eor a1, a1, a2 adcs a1, a1, xzr // Compute sign-magnitude b2,[b1,b0] = y_lo - y_hi subs b0, b0, b2 sbcs b1, b1, b3 sbc b2, xzr, xzr adds xzr, b2, #1 eor b0, b0, b2 adcs b0, b0, xzr eor b1, b1, b2 adcs b1, b1, xzr // Save the correct sign for the sub-product in b3 eor b3, a2, b2 // Add the high H to the modified low term L' as H + L' = [s4;b2;a2;t3;t0] ldp t0, t3, [x0] adds t0, s0, t0 adcs t3, s1, t3 ldp a2, b2, [x0, #16] adcs a2, s2, a2 adcs b2, s3, b2 adc s4, xzr, xzr // Multiply with yet a third 2x2->4 ADK multiplier for complex mid-term M mul s0, a0, b0 mul s2, a1, b1 umulh s1, a0, b0 adds t1, s0, s2 umulh s3, a1, b1 adcs t2, s1, s3 adcs s3, s3, xzr adds s1, s1, t1 adcs s2, s2, t2 adcs s3, s3, xzr muldiffn(a1,t2,t1, a0, a0,a1, b1,b0) adds xzr, a1, #1 adcs s1, s1, t1 adcs s2, s2, t2 adc s3, s3, a1 // Set up a sign-modified version of the mid-product in a long accumulator // as [b3;a1;a0;s3;s2;s1;s0], adding in the H + L' term once with // zero offset as this signed value is created adds xzr, b3, #1 eor s0, s0, b3 adcs s0, s0, t0 eor s1, s1, b3 adcs s1, s1, t3 eor s2, s2, b3 adcs s2, s2, a2 eor s3, s3, b3 adcs s3, s3, b2 adcs a0, s4, b3 adcs a1, b3, xzr adc b3, b3, xzr // Add in the stashed H + L' term an offset of 2 words as well adds s2, s2, t0 adcs s3, s3, t3 adcs a0, a0, a2 adcs a1, a1, b2 adc b3, b3, s4 // Do two more Montgomery steps on the composed term // Net pre-reduct is in [b3;a1;a0;s3;s2] montreds(s0,s3,s2,s1,s0, t0,t1,t2,t3) montreds(s1,s0,s3,s2,s1, t0,t1,t2,t3) adds a0, a0, s0 adcs a1, a1, s1 adc b3, b3, xzr // Because of the way we added L' in two places, we can overspill by // more than usual in Montgomery, with the result being only known to // be < 3 * p_sm2, not the usual < 2 * p_sm2. So now we do a more // elaborate final correction in the style of bignum_cmul_sm2, though // we can use much simpler quotient estimation logic (q = h + 1) and // slightly more direct accumulation of p_sm2 * q. #define d0 s2 #define d1 s3 #define d2 a0 #define d3 a1 #define h b3 #define q s4 #define c b0 add q, h, #1 lsl t0, q, #32 sub t1, t0, q adds d0, d0, q adcs d1, d1, t1 adcs d2, d2, xzr adcs d3, d3, t0 csetm c, cc adds d0, d0, c and t1, c, #0xffffffff00000000 adcs d1, d1, t1 adcs d2, d2, c and t0, c, #0xfffffffeffffffff adc d3, d3, t0 // Finally store the result stp d0, d1, [x0] stp d2, d3, [x0, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montmul_sm2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
65,941
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_inv_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Modular inverse modulo p_sm2 = 2^256 - 2^224 - 2^96 + 2^64 - 1 // Input x[4]; output z[4] // // extern void bignum_inv_sm2(uint64_t z[static 4],const uint64_t x[static 4]); // // If the 4-digit input x is coprime to p_sm2, i.e. is not divisible // by it, returns z < p_sm2 such that x * z == 1 (mod p_sm2). Note that // x does not need to be reduced modulo p_sm2, but the output always is. // If the input is divisible (i.e. is 0 or p_sm2), then there can be no // modular inverse and z = 0 is returned. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_inv_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_inv_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_inv_sm2) .text .balign 4 // Size in bytes of a 64-bit word #define N 8 // Used for the return pointer #define res x20 // Loop counter and d = 2 * delta value for divstep #define i x21 #define d x22 // Registers used for matrix element magnitudes and signs #define m00 x10 #define m01 x11 #define m10 x12 #define m11 x13 #define s00 x14 #define s01 x15 #define s10 x16 #define s11 x17 // Initial carries for combinations #define car0 x9 #define car1 x19 // Input and output, plain registers treated according to pattern #define reg0 x0, #0 #define reg1 x1, #0 #define reg2 x2, #0 #define reg3 x3, #0 #define reg4 x4, #0 #define x x1, #0 #define z x0, #0 // Pointer-offset pairs for temporaries on stack #define f sp, #0 #define g sp, #(6*N) #define u sp, #(12*N) #define v sp, #(16*N) // Total size to reserve on the stack #define NSPACE 20*N // --------------------------------------------------------------------------- // Core signed almost-Montgomery reduction macro. Takes input in // [d4;d3;d2;d1;d0] and returns result in [d4;d3;d2;d1], adding to // the existing [d4;d3;d2;d1], and re-using d0 as a temporary internally // as well as t0, t1, t2, t3. This is almost-Montgomery, i.e. the result // fits in 4 digits but is not necessarily strictly reduced mod p_sm2. // --------------------------------------------------------------------------- #define amontred(d4,d3,d2,d1,d0, t3,t2,t1,t0) \ /* We only know the input is -2^316 < x < 2^316. To do traditional */ \ /* unsigned Montgomery reduction, start by adding 2^61 * p_sm2. */ \ mov t0, #0xe000000000000000 __LF \ adds d0, d0, t0 __LF \ mov t1, #0x1fffffffffffffff __LF \ adcs d1, d1, t1 __LF \ mov t2, #0xffffffffe0000000 __LF \ adcs d2, d2, t2 __LF \ sbcs d3, d3, xzr __LF \ and t0, t1, #0xffffffffdfffffff __LF \ adc d4, d4, t0 __LF \ /* First let [t3;t2] = 2^32 * d0 and [t1;t0] = (2^32-1) * d0 */ \ lsl t2, d0, #32 __LF \ lsr t3, d0, #32 __LF \ subs t0, t2, d0 __LF \ sbc t1, t3, xzr __LF \ /* Now [d4;d3;d2;d1] := [d0;d3;d2;d1] - [t3;t2;t1;t0] */ \ subs d1, d1, t0 __LF \ sbcs d2, d2, t1 __LF \ sbcs d3, d3, t2 __LF \ sbc t0, d0, t3 __LF \ adds d4, d4, t0 __LF \ /* Now capture top carry and subtract p_sm2 if set (almost-Montgomery) */ \ csetm t0, cs __LF \ subs d1, d1, t0 __LF \ and t1, t0, #0xffffffff00000000 __LF \ sbcs d2, d2, t1 __LF \ and t2, t0, #0xfffffffeffffffff __LF \ sbcs d3, d3, t0 __LF \ sbc d4, d4, t2 // Very similar to a subroutine call to the s2n-bignum word_divstep59. // But different in register usage and returning the final matrix in // registers as follows // // [ m00 m01] // [ m10 m11] #define divstep59() \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x8, x4, #0x100, lsl #12 __LF \ sbfx x8, x8, #21, #21 __LF \ mov x11, #0x100000 __LF \ add x11, x11, x11, lsl #21 __LF \ add x9, x4, x11 __LF \ asr x9, x9, #42 __LF \ add x10, x5, #0x100, lsl #12 __LF \ sbfx x10, x10, #21, #21 __LF \ add x11, x5, x11 __LF \ asr x11, x11, #42 __LF \ mul x6, x8, x2 __LF \ mul x7, x9, x3 __LF \ mul x2, x10, x2 __LF \ mul x3, x11, x3 __LF \ add x4, x6, x7 __LF \ add x5, x2, x3 __LF \ asr x2, x4, #20 __LF \ asr x3, x5, #20 __LF \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x12, x4, #0x100, lsl #12 __LF \ sbfx x12, x12, #21, #21 __LF \ mov x15, #0x100000 __LF \ add x15, x15, x15, lsl #21 __LF \ add x13, x4, x15 __LF \ asr x13, x13, #42 __LF \ add x14, x5, #0x100, lsl #12 __LF \ sbfx x14, x14, #21, #21 __LF \ add x15, x5, x15 __LF \ asr x15, x15, #42 __LF \ mul x6, x12, x2 __LF \ mul x7, x13, x3 __LF \ mul x2, x14, x2 __LF \ mul x3, x15, x3 __LF \ add x4, x6, x7 __LF \ add x5, x2, x3 __LF \ asr x2, x4, #20 __LF \ asr x3, x5, #20 __LF \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ mul x2, x12, x8 __LF \ mul x3, x12, x9 __LF \ mul x6, x14, x8 __LF \ mul x7, x14, x9 __LF \ madd x8, x13, x10, x2 __LF \ madd x9, x13, x11, x3 __LF \ madd x16, x15, x10, x6 __LF \ madd x17, x15, x11, x7 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x12, x4, #0x100, lsl #12 __LF \ sbfx x12, x12, #22, #21 __LF \ mov x15, #0x100000 __LF \ add x15, x15, x15, lsl #21 __LF \ add x13, x4, x15 __LF \ asr x13, x13, #43 __LF \ add x14, x5, #0x100, lsl #12 __LF \ sbfx x14, x14, #22, #21 __LF \ add x15, x5, x15 __LF \ asr x15, x15, #43 __LF \ mneg x2, x12, x8 __LF \ mneg x3, x12, x9 __LF \ mneg x4, x14, x8 __LF \ mneg x5, x14, x9 __LF \ msub m00, x13, x16, x2 __LF \ msub m01, x13, x17, x3 __LF \ msub m10, x15, x16, x4 __LF \ msub m11, x15, x17, x5 S2N_BN_SYMBOL(bignum_inv_sm2): CFI_START // Save registers and make room for temporaries CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_DEC_SP(NSPACE) // Save the return pointer for the end so we can overwrite x0 later mov res, x0 // Copy the prime and input into the main f and g variables respectively. // Make sure x is reduced so that g <= f as assumed in the bound proof. mov x10, #0xffffffffffffffff mov x11, #0xffffffff00000000 mov x13, #0xfffffffeffffffff stp x10, x11, [f] stp x10, x13, [f+2*N] str xzr, [f+4*N] ldp x2, x3, [x1] subs x10, x2, #-1 sbcs x11, x3, x11 ldp x4, x5, [x1, #(2*N)] adcs x12, x4, xzr sbcs x13, x5, x13 csel x2, x2, x10, cc csel x3, x3, x11, cc csel x4, x4, x12, cc csel x5, x5, x13, cc stp x2, x3, [g] stp x4, x5, [g+2*N] str xzr, [g+4*N] // Also maintain reduced < 2^256 vector [u,v] such that // [f,g] == x * 2^{5*i-50} * [u,v] (mod p_sm2) // starting with [p_sm2,x] == x * 2^{5*0-50} * [0,2^50] (mod p_sm2) // The weird-looking 5*i modifications come in because we are doing // 64-bit word-sized Montgomery reductions at each stage, which is // 5 bits more than the 59-bit requirement to keep things stable. stp xzr, xzr, [u] stp xzr, xzr, [u+2*N] mov x10, #0x0004000000000000 stp x10, xzr, [v] stp xzr, xzr, [v+2*N] // Start of main loop. We jump into the middle so that the divstep // portion is common to the special tenth iteration after a uniform // first 9. mov i, #10 mov d, #1 b Lbignum_inv_sm2_midloop Lbignum_inv_sm2_loop: // Separate the matrix elements into sign-magnitude pairs cmp m00, xzr csetm s00, mi cneg m00, m00, mi cmp m01, xzr csetm s01, mi cneg m01, m01, mi cmp m10, xzr csetm s10, mi cneg m10, m10, mi cmp m11, xzr csetm s11, mi cneg m11, m11, mi // Adjust the initial values to allow for complement instead of negation // This initial offset is the same for [f,g] and [u,v] compositions. // Save it in stable registers for the [u,v] part and do [f,g] first. and x0, m00, s00 and x1, m01, s01 add car0, x0, x1 and x0, m10, s10 and x1, m11, s11 add car1, x0, x1 // Now the computation of the updated f and g values. This maintains a // 2-word carry between stages so we can conveniently insert the shift // right by 59 before storing back, and not overwrite digits we need // again of the old f and g values. // // Digit 0 of [f,g] ldr x7, [f] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [g] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 adc x2, x2, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x5, car1, x0 adc x3, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 adc x3, x3, x1 // Digit 1 of [f,g] ldr x7, [f+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [g+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 adc x6, x6, x1 extr x4, x2, x4, #59 str x4, [f] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x3, x3, x0 adc x4, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x3, x3, x0 adc x4, x4, x1 extr x5, x3, x5, #59 str x5, [g] // Digit 2 of [f,g] ldr x7, [f+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [g+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 adc x5, x5, x1 extr x2, x6, x2, #59 str x2, [f+N] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x4, x4, x0 adc x2, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x4, x4, x0 adc x2, x2, x1 extr x3, x4, x3, #59 str x3, [g+N] // Digits 3 and 4 of [f,g] ldr x7, [f+3*N] eor x1, x7, s00 ldr x23, [f+4*N] eor x3, x23, s00 and x3, x3, m00 neg x3, x3 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, x3, x1 ldr x8, [g+3*N] eor x1, x8, s01 ldr x24, [g+4*N] eor x0, x24, s01 and x0, x0, m01 sub x3, x3, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 adc x3, x3, x1 extr x6, x5, x6, #59 str x6, [f+2*N] extr x5, x3, x5, #59 str x5, [f+3*N] asr x3, x3, #59 str x3, [f+4*N] eor x1, x7, s10 eor x5, x23, s10 and x5, x5, m10 neg x5, x5 mul x0, x1, m10 umulh x1, x1, m10 adds x2, x2, x0 adc x5, x5, x1 eor x1, x8, s11 eor x0, x24, s11 and x0, x0, m11 sub x5, x5, x0 mul x0, x1, m11 umulh x1, x1, m11 adds x2, x2, x0 adc x5, x5, x1 extr x4, x2, x4, #59 str x4, [g+2*N] extr x2, x5, x2, #59 str x2, [g+3*N] asr x5, x5, #59 str x5, [g+4*N] // Now the computation of the updated u and v values and their // Montgomery reductions. A very similar accumulation except that // the top words of u and v are unsigned and we don't shift. // // Digit 0 of [u,v] ldr x7, [u] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [v] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u] adc x2, x2, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x5, car1, x0 adc x3, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 str x5, [v] adc x3, x3, x1 // Digit 1 of [u,v] ldr x7, [u+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [v+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 str x2, [u+N] adc x6, x6, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x3, x3, x0 adc x4, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x3, x3, x0 str x3, [v+N] adc x4, x4, x1 // Digit 2 of [u,v] ldr x7, [u+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [v+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 str x6, [u+2*N] adc x5, x5, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x4, x4, x0 adc x2, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x4, x4, x0 str x4, [v+2*N] adc x2, x2, x1 // Digits 3 and 4 of u (top is unsigned) ldr x7, [u+3*N] eor x1, x7, s00 and x3, s00, m00 neg x3, x3 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, x3, x1 ldr x8, [v+3*N] eor x1, x8, s01 and x0, s01, m01 sub x3, x3, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 adc x3, x3, x1 // Montgomery reduction of u ldp x0, x1, [u] ldr x6, [u+2*N] amontred(x3,x5,x6,x1,x0, x24,x10,x11,x14) stp x1, x6, [u] stp x5, x3, [u+16] // Digits 3 and 4 of v (top is unsigned) eor x1, x7, s10 and x5, s10, m10 neg x5, x5 mul x0, x1, m10 umulh x1, x1, m10 adds x2, x2, x0 adc x5, x5, x1 eor x1, x8, s11 and x0, s11, m11 sub x5, x5, x0 mul x0, x1, m11 umulh x1, x1, m11 adds x2, x2, x0 adc x5, x5, x1 // Montgomery reduction of v ldp x0, x1, [v] ldr x3, [v+2*N] amontred(x5,x2,x3,x1,x0, x24,x10,x11,x14) stp x1, x3, [v] stp x2, x5, [v+16] Lbignum_inv_sm2_midloop: mov x1, d ldr x2, [f] ldr x3, [g] divstep59() mov d, x1 // Next iteration subs i, i, #1 bne Lbignum_inv_sm2_loop // The 10th and last iteration does not need anything except the // u value and the sign of f; the latter can be obtained from the // lowest word of f. So it's done differently from the main loop. // Find the sign of the new f. For this we just need one digit // since we know (for in-scope cases) that f is either +1 or -1. // We don't explicitly shift right by 59 either, but looking at // bit 63 (or any bit >= 60) of the unshifted result is enough // to distinguish -1 from +1; this is then made into a mask. ldr x0, [f] ldr x1, [g] mul x0, x0, m00 madd x1, x1, m01, x0 asr x0, x1, #63 // Now separate out the matrix into sign-magnitude pairs // and adjust each one based on the sign of f. // // Note that at this point we expect |f|=1 and we got its // sign above, so then since [f,0] == x * [u,v] (mod p_sm2) // we want to flip the sign of u according to that of f. cmp m00, xzr csetm s00, mi cneg m00, m00, mi eor s00, s00, x0 cmp m01, xzr csetm s01, mi cneg m01, m01, mi eor s01, s01, x0 cmp m10, xzr csetm s10, mi cneg m10, m10, mi eor s10, s10, x0 cmp m11, xzr csetm s11, mi cneg m11, m11, mi eor s11, s11, x0 // Adjust the initial value to allow for complement instead of negation and x0, m00, s00 and x1, m01, s01 add car0, x0, x1 // Digit 0 of [u] ldr x7, [u] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [v] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u] adc x2, x2, x1 // Digit 1 of [u] ldr x7, [u+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [v+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 str x2, [u+N] adc x6, x6, x1 // Digit 2 of [u] ldr x7, [u+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [v+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 str x6, [u+2*N] adc x5, x5, x1 // Digits 3 and 4 of u (top is unsigned) ldr x7, [u+3*N] eor x1, x7, s00 and x3, s00, m00 neg x3, x3 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, x3, x1 ldr x8, [v+3*N] eor x1, x8, s01 and x0, s01, m01 sub x3, x3, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 adc x3, x3, x1 // Montgomery reduction of u. This needs to be strict not "almost" // so it is followed by an optional subtraction of p_sm2 ldp x0, x1, [u] ldr x2, [u+2*N] amontred(x3,x5,x2,x1,x0, x24,x10,x11,x14) mov x10, #0xffffffffffffffff subs x10, x1, #-1 mov x11, #0xffffffff00000000 sbcs x11, x2, x11 mov x13, #0xfffffffeffffffff adcs x12, x5, xzr sbcs x13, x3, x13 csel x10, x1, x10, cc csel x11, x2, x11, cc csel x12, x5, x12, cc csel x13, x3, x13, cc // Store it back to the final output stp x10, x11, [res] stp x12, x13, [res, #16] // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_inv_sm2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
22,585
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/sm2_montjadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point addition on GM/T 0003-2012 curve SM2 in Montgomery-Jacobian coordinates // // extern void sm2_montjadd(uint64_t p3[static 12], const uint64_t p1[static 12], // const uint64_t p2[static 12]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^256 * x) mod p_sm2. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjadd) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Stable homes for input arguments during main code sequence #define input_z x17 #define input_x x19 #define input_y x20 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define z_2 input_y, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define z1sq sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define x1a sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define z2sq sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define y1a sp, #(NUMSIZE*6) #define NSPACE NUMSIZE*7 // Corresponds to bignum_montmul_sm2 with x0 in place of x17 #define montmul_sm2(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P1+16] __LF \ ldp x7, x8, [P2] __LF \ ldp x9, x10, [P2+16] __LF \ mul x11, x3, x7 __LF \ mul x13, x4, x8 __LF \ umulh x12, x3, x7 __LF \ adds x16, x11, x13 __LF \ umulh x14, x4, x8 __LF \ adcs x0, x12, x14 __LF \ adcs x14, x14, xzr __LF \ adds x12, x12, x16 __LF \ adcs x13, x13, x0 __LF \ adcs x14, x14, xzr __LF \ subs x15, x3, x4 __LF \ cneg x15, x15, lo __LF \ csetm x1, lo __LF \ subs x0, x8, x7 __LF \ cneg x0, x0, lo __LF \ mul x16, x15, x0 __LF \ umulh x0, x15, x0 __LF \ cinv x1, x1, lo __LF \ eor x16, x16, x1 __LF \ eor x0, x0, x1 __LF \ cmn x1, #1 __LF \ adcs x12, x12, x16 __LF \ adcs x13, x13, x0 __LF \ adc x14, x14, x1 __LF \ lsl x16, x11, #32 __LF \ lsr x15, x11, #32 __LF \ subs x1, x16, x11 __LF \ sbc x0, x15, xzr __LF \ subs x12, x12, x1 __LF \ sbcs x13, x13, x0 __LF \ sbcs x14, x14, x16 __LF \ sbc x11, x11, x15 __LF \ lsl x16, x12, #32 __LF \ lsr x15, x12, #32 __LF \ subs x1, x16, x12 __LF \ sbc x0, x15, xzr __LF \ subs x13, x13, x1 __LF \ sbcs x14, x14, x0 __LF \ sbcs x11, x11, x16 __LF \ sbc x12, x12, x15 __LF \ stp x13, x14, [P0] __LF \ stp x11, x12, [P0+16] __LF \ mul x11, x5, x9 __LF \ mul x13, x6, x10 __LF \ umulh x12, x5, x9 __LF \ adds x16, x11, x13 __LF \ umulh x14, x6, x10 __LF \ adcs x0, x12, x14 __LF \ adcs x14, x14, xzr __LF \ adds x12, x12, x16 __LF \ adcs x13, x13, x0 __LF \ adcs x14, x14, xzr __LF \ subs x15, x5, x6 __LF \ cneg x15, x15, lo __LF \ csetm x1, lo __LF \ subs x0, x10, x9 __LF \ cneg x0, x0, lo __LF \ mul x16, x15, x0 __LF \ umulh x0, x15, x0 __LF \ cinv x1, x1, lo __LF \ eor x16, x16, x1 __LF \ eor x0, x0, x1 __LF \ cmn x1, #1 __LF \ adcs x12, x12, x16 __LF \ adcs x13, x13, x0 __LF \ adc x14, x14, x1 __LF \ subs x3, x5, x3 __LF \ sbcs x4, x6, x4 __LF \ ngc x5, xzr __LF \ cmn x5, #1 __LF \ eor x3, x3, x5 __LF \ adcs x3, x3, xzr __LF \ eor x4, x4, x5 __LF \ adcs x4, x4, xzr __LF \ subs x7, x7, x9 __LF \ sbcs x8, x8, x10 __LF \ ngc x9, xzr __LF \ cmn x9, #1 __LF \ eor x7, x7, x9 __LF \ adcs x7, x7, xzr __LF \ eor x8, x8, x9 __LF \ adcs x8, x8, xzr __LF \ eor x10, x5, x9 __LF \ ldp x15, x1, [P0] __LF \ adds x15, x11, x15 __LF \ adcs x1, x12, x1 __LF \ ldp x5, x9, [P0+16] __LF \ adcs x5, x13, x5 __LF \ adcs x9, x14, x9 __LF \ adc x2, xzr, xzr __LF \ mul x11, x3, x7 __LF \ mul x13, x4, x8 __LF \ umulh x12, x3, x7 __LF \ adds x16, x11, x13 __LF \ umulh x14, x4, x8 __LF \ adcs x0, x12, x14 __LF \ adcs x14, x14, xzr __LF \ adds x12, x12, x16 __LF \ adcs x13, x13, x0 __LF \ adcs x14, x14, xzr __LF \ subs x3, x3, x4 __LF \ cneg x3, x3, lo __LF \ csetm x4, lo __LF \ subs x0, x8, x7 __LF \ cneg x0, x0, lo __LF \ mul x16, x3, x0 __LF \ umulh x0, x3, x0 __LF \ cinv x4, x4, lo __LF \ eor x16, x16, x4 __LF \ eor x0, x0, x4 __LF \ cmn x4, #1 __LF \ adcs x12, x12, x16 __LF \ adcs x13, x13, x0 __LF \ adc x14, x14, x4 __LF \ cmn x10, #1 __LF \ eor x11, x11, x10 __LF \ adcs x11, x11, x15 __LF \ eor x12, x12, x10 __LF \ adcs x12, x12, x1 __LF \ eor x13, x13, x10 __LF \ adcs x13, x13, x5 __LF \ eor x14, x14, x10 __LF \ adcs x14, x14, x9 __LF \ adcs x3, x2, x10 __LF \ adcs x4, x10, xzr __LF \ adc x10, x10, xzr __LF \ adds x13, x13, x15 __LF \ adcs x14, x14, x1 __LF \ adcs x3, x3, x5 __LF \ adcs x4, x4, x9 __LF \ adc x10, x10, x2 __LF \ lsl x16, x11, #32 __LF \ lsr x15, x11, #32 __LF \ subs x1, x16, x11 __LF \ sbc x0, x15, xzr __LF \ subs x12, x12, x1 __LF \ sbcs x13, x13, x0 __LF \ sbcs x14, x14, x16 __LF \ sbc x11, x11, x15 __LF \ lsl x16, x12, #32 __LF \ lsr x15, x12, #32 __LF \ subs x1, x16, x12 __LF \ sbc x0, x15, xzr __LF \ subs x13, x13, x1 __LF \ sbcs x14, x14, x0 __LF \ sbcs x11, x11, x16 __LF \ sbc x12, x12, x15 __LF \ adds x3, x3, x11 __LF \ adcs x4, x4, x12 __LF \ adc x10, x10, xzr __LF \ add x2, x10, #1 __LF \ lsl x15, x2, #32 __LF \ sub x16, x15, x2 __LF \ adds x13, x13, x2 __LF \ adcs x14, x14, x16 __LF \ adcs x3, x3, xzr __LF \ adcs x4, x4, x15 __LF \ csetm x7, lo __LF \ adds x13, x13, x7 __LF \ and x16, x7, #0xffffffff00000000 __LF \ adcs x14, x14, x16 __LF \ adcs x3, x3, x7 __LF \ and x15, x7, #0xfffffffeffffffff __LF \ adc x4, x4, x15 __LF \ stp x13, x14, [P0] __LF \ stp x3, x4, [P0+16] // Corresponds to bignum_montsqr_sm2 with x0 in place of x17 #define montsqr_sm2(P0,P1) \ ldp x2, x3, [P1] __LF \ ldp x4, x5, [P1+16] __LF \ umull x15, w2, w2 __LF \ lsr x11, x2, #32 __LF \ umull x16, w11, w11 __LF \ umull x11, w2, w11 __LF \ adds x15, x15, x11, lsl #33 __LF \ lsr x11, x11, #31 __LF \ adc x16, x16, x11 __LF \ umull x0, w3, w3 __LF \ lsr x11, x3, #32 __LF \ umull x1, w11, w11 __LF \ umull x11, w3, w11 __LF \ mul x12, x2, x3 __LF \ umulh x13, x2, x3 __LF \ adds x0, x0, x11, lsl #33 __LF \ lsr x11, x11, #31 __LF \ adc x1, x1, x11 __LF \ adds x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adc x1, x1, xzr __LF \ adds x16, x16, x12 __LF \ adcs x0, x0, x13 __LF \ adc x1, x1, xzr __LF \ lsl x12, x15, #32 __LF \ lsr x11, x15, #32 __LF \ subs x14, x12, x15 __LF \ sbc x13, x11, xzr __LF \ subs x16, x16, x14 __LF \ sbcs x0, x0, x13 __LF \ sbcs x1, x1, x12 __LF \ sbc x15, x15, x11 __LF \ lsl x12, x16, #32 __LF \ lsr x11, x16, #32 __LF \ subs x14, x12, x16 __LF \ sbc x13, x11, xzr __LF \ subs x0, x0, x14 __LF \ sbcs x1, x1, x13 __LF \ sbcs x15, x15, x12 __LF \ sbc x16, x16, x11 __LF \ mul x6, x2, x4 __LF \ mul x14, x3, x5 __LF \ umulh x8, x2, x4 __LF \ subs x10, x2, x3 __LF \ cneg x10, x10, lo __LF \ csetm x13, lo __LF \ subs x12, x5, x4 __LF \ cneg x12, x12, lo __LF \ mul x11, x10, x12 __LF \ umulh x12, x10, x12 __LF \ cinv x13, x13, lo __LF \ eor x11, x11, x13 __LF \ eor x12, x12, x13 __LF \ adds x7, x6, x8 __LF \ adc x8, x8, xzr __LF \ umulh x9, x3, x5 __LF \ adds x7, x7, x14 __LF \ adcs x8, x8, x9 __LF \ adc x9, x9, xzr __LF \ adds x8, x8, x14 __LF \ adc x9, x9, xzr __LF \ cmn x13, #1 __LF \ adcs x7, x7, x11 __LF \ adcs x8, x8, x12 __LF \ adc x9, x9, x13 __LF \ adds x6, x6, x6 __LF \ adcs x7, x7, x7 __LF \ adcs x8, x8, x8 __LF \ adcs x9, x9, x9 __LF \ adc x10, xzr, xzr __LF \ adds x6, x6, x0 __LF \ adcs x7, x7, x1 __LF \ adcs x8, x8, x15 __LF \ adcs x9, x9, x16 __LF \ adc x10, x10, xzr __LF \ lsl x12, x6, #32 __LF \ lsr x11, x6, #32 __LF \ subs x14, x12, x6 __LF \ sbc x13, x11, xzr __LF \ subs x7, x7, x14 __LF \ sbcs x8, x8, x13 __LF \ sbcs x9, x9, x12 __LF \ sbc x14, x6, x11 __LF \ adds x10, x10, x14 __LF \ adc x6, xzr, xzr __LF \ lsl x12, x7, #32 __LF \ lsr x11, x7, #32 __LF \ subs x14, x12, x7 __LF \ sbc x13, x11, xzr __LF \ subs x8, x8, x14 __LF \ sbcs x9, x9, x13 __LF \ sbcs x10, x10, x12 __LF \ sbc x14, x7, x11 __LF \ adds x6, x6, x14 __LF \ adc x7, xzr, xzr __LF \ mul x11, x4, x4 __LF \ adds x8, x8, x11 __LF \ mul x12, x5, x5 __LF \ umulh x11, x4, x4 __LF \ adcs x9, x9, x11 __LF \ adcs x10, x10, x12 __LF \ umulh x12, x5, x5 __LF \ adcs x6, x6, x12 __LF \ adc x7, x7, xzr __LF \ mul x11, x4, x5 __LF \ umulh x12, x4, x5 __LF \ adds x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adc x13, xzr, xzr __LF \ adds x9, x9, x11 __LF \ adcs x10, x10, x12 __LF \ adcs x6, x6, x13 __LF \ adcs x7, x7, xzr __LF \ mov x11, #-4294967296 __LF \ adds x5, x8, #1 __LF \ sbcs x11, x9, x11 __LF \ mov x13, #-4294967297 __LF \ adcs x12, x10, xzr __LF \ sbcs x13, x6, x13 __LF \ sbcs xzr, x7, xzr __LF \ csel x8, x5, x8, hs __LF \ csel x9, x11, x9, hs __LF \ csel x10, x12, x10, hs __LF \ csel x6, x13, x6, hs __LF \ stp x8, x9, [P0] __LF \ stp x10, x6, [P0+16] // Corresponds exactly to bignum_sub_sm2 #define sub_sm2(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ csetm x3, cc __LF \ adds x5, x5, x3 __LF \ and x4, x3, #0xffffffff00000000 __LF \ adcs x6, x6, x4 __LF \ adcs x7, x7, x3 __LF \ and x4, x3, #0xfffffffeffffffff __LF \ adc x8, x8, x4 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] S2N_BN_SYMBOL(sm2_montjadd): CFI_START // Save regs and make room on stack for temporary variables CFI_PUSH2(x19,x20) CFI_DEC_SP(NSPACE) // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations // 12 * multiply + 4 * square + 7 * subtract montsqr_sm2(z1sq,z_1) montsqr_sm2(z2sq,z_2) montmul_sm2(y1a,z_2,y_1) montmul_sm2(y2a,z_1,y_2) montmul_sm2(x2a,z1sq,x_2) montmul_sm2(x1a,z2sq,x_1) montmul_sm2(y2a,z1sq,y2a) montmul_sm2(y1a,z2sq,y1a) sub_sm2(xd,x2a,x1a) sub_sm2(yd,y2a,y1a) montsqr_sm2(zz,xd) montsqr_sm2(ww,yd) montmul_sm2(zzx1,zz,x1a) montmul_sm2(zzx2,zz,x2a) sub_sm2(resx,ww,zzx1) sub_sm2(t1,zzx2,zzx1) montmul_sm2(xd,xd,z_1) sub_sm2(resx,resx,zzx2) sub_sm2(t2,zzx1,resx) montmul_sm2(t1,t1,y1a) montmul_sm2(resz,xd,z_2) montmul_sm2(t2,yd,t2) sub_sm2(resy,t2,t1) // Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0 // The condition codes get set by a comparison (P2 != 0) - (P1 != 0) // So "HI" <=> CF /\ ~ZF <=> P1 = 0 /\ ~(P2 = 0) // and "LO" <=> ~CF <=> ~(P1 = 0) /\ P2 = 0 ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] orr x12, x0, x1 orr x13, x2, x3 orr x12, x12, x13 cmp x12, xzr cset x12, ne ldp x4, x5, [z_2] ldp x6, x7, [z_2+16] orr x13, x4, x5 orr x14, x6, x7 orr x13, x13, x14 cmp x13, xzr cset x13, ne cmp x13, x12 // Multiplex the outputs accordingly, re-using the z's in registers ldp x8, x9, [resz] csel x8, x0, x8, lo csel x9, x1, x9, lo csel x8, x4, x8, hi csel x9, x5, x9, hi ldp x10, x11, [resz+16] csel x10, x2, x10, lo csel x11, x3, x11, lo csel x10, x6, x10, hi csel x11, x7, x11, hi ldp x12, x13, [x_1] ldp x0, x1, [resx] csel x0, x12, x0, lo csel x1, x13, x1, lo ldp x12, x13, [x_2] csel x0, x12, x0, hi csel x1, x13, x1, hi ldp x12, x13, [x_1+16] ldp x2, x3, [resx+16] csel x2, x12, x2, lo csel x3, x13, x3, lo ldp x12, x13, [x_2+16] csel x2, x12, x2, hi csel x3, x13, x3, hi ldp x12, x13, [y_1] ldp x4, x5, [resy] csel x4, x12, x4, lo csel x5, x13, x5, lo ldp x12, x13, [y_2] csel x4, x12, x4, hi csel x5, x13, x5, hi ldp x12, x13, [y_1+16] ldp x6, x7, [resy+16] csel x6, x12, x6, lo csel x7, x13, x7, lo ldp x12, x13, [y_2+16] csel x6, x12, x6, hi csel x7, x13, x7, hi // Finally store back the multiplexed values stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [y_3] stp x6, x7, [y_3+16] stp x8, x9, [z_3] stp x10, x11, [z_3+16] // Restore registers and return CFI_INC_SP(NSPACE) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(sm2_montjadd) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
2,252
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_optneg_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Optionally negate modulo p_sm2, z := (-x) mod p_sm2 (if p nonzero) or // z := x (if p zero), assuming x reduced // Inputs p, x[4]; output z[4] // // extern void bignum_optneg_sm2(uint64_t z[static 4], uint64_t p, // const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = p, X2 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_optneg_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_optneg_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_optneg_sm2) .text .balign 4 #define z x0 #define p x1 #define x x2 #define d0 x3 #define d1 x4 #define d2 x5 #define d3 x6 #define n0 x7 #define n1 x8 #define n2 x9 #define n3 x10 S2N_BN_SYMBOL(bignum_optneg_sm2): CFI_START // Load the 4 digits of x ldp d0, d1, [x] ldp d2, d3, [x, #16] // Adjust p by zeroing it if the input is zero (to avoid giving -0 = p, which // is not strictly reduced even though it's correct modulo p) orr n0, d0, d1 orr n1, d2, d3 orr n2, n0, n1 cmp n2, #0 csel p, xzr, p, eq // Load the nontrivial words of p_sm2 = [n3;-1;n1;-1] mov n2, #0xffffffffffffffff mov n1, #0xffffffff00000000 mov n3, #0xfffffffeffffffff // Do the subtraction, which by hypothesis does not underflow subs n0, n2, d0 sbcs n1, n1, d1 sbcs n2, n2, d2 sbc n3, n3, d3 // Set condition code if original x is nonzero and p was nonzero cmp p, #0 // Hence multiplex and write back csel n0, n0, d0, ne csel n1, n1, d1, ne csel n2, n2, d2, ne csel n3, n3, d3, ne stp n0, n1, [z] stp n2, n3, [z, #16] // Return CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_optneg_sm2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,046
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_add_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Add modulo p_sm2, z := (x + y) mod p_sm2, assuming x and y reduced // Inputs x[4], y[4]; output z[4] // // extern void bignum_add_sm2(uint64_t z[static 4], const uint64_t x[static 4], // const uint64_t y[static 4]); // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_add_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_add_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_add_sm2) .text .balign 4 #define z x0 #define x x1 #define y x2 #define c x3 #define d0 x4 #define d1 x5 #define d2 x6 #define d3 x7 #define n0 x8 #define n1 x9 #define n2 x10 #define n3 x11 S2N_BN_SYMBOL(bignum_add_sm2): CFI_START // First just add the numbers as [c;d3;d2;d1;d0] ldp d0, d1, [x] ldp n0, n1, [y] adds d0, d0, n0 adcs d1, d1, n1 ldp d2, d3, [x, #16] ldp n2, n3, [y, #16] adcs d2, d2, n2 adcs d3, d3, n3 adc c, xzr, xzr // Now let [c;n3;n2;n1;n0] = [c;d3;d2;d1;d0] - p_sm2 subs n0, d0, #0xffffffffffffffff mov n1, #0xffffffff00000000 sbcs n1, d1, n1 adcs n2, d2, xzr mov n3, #0xfffffffeffffffff sbcs n3, d3, n3 sbcs c, c, xzr // Select result according to whether (x + y) - p_sm2 < 0 csel d0, d0, n0, cc csel d1, d1, n1, cc csel d2, d2, n2, cc csel d3, d3, n3, cc // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_add_sm2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
21,822
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/sm2_montjmixadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point mixed addition on GM/T 0003-2012 curve SM2 in Montgomery-Jacobian coordinates // // extern void sm2_montjmixadd(uint64_t p3[static 12], // const uint64_t p1[static 12], // const uint64_t p2[static 8]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^256 * x) mod p_sm2. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // The "mixed" part means that p2 only has x and y coordinates, with the // implicit z coordinate assumed to be the identity. // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjmixadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjmixadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjmixadd) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Stable homes for input arguments during main code sequence #define input_z x17 #define input_x x19 #define input_y x20 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define zp2 sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define NSPACE NUMSIZE*6 // Corresponds to bignum_montmul_sm2 with x0 in place of x17 #define montmul_sm2(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P1+16] __LF \ ldp x7, x8, [P2] __LF \ ldp x9, x10, [P2+16] __LF \ mul x11, x3, x7 __LF \ mul x13, x4, x8 __LF \ umulh x12, x3, x7 __LF \ adds x16, x11, x13 __LF \ umulh x14, x4, x8 __LF \ adcs x0, x12, x14 __LF \ adcs x14, x14, xzr __LF \ adds x12, x12, x16 __LF \ adcs x13, x13, x0 __LF \ adcs x14, x14, xzr __LF \ subs x15, x3, x4 __LF \ cneg x15, x15, lo __LF \ csetm x1, lo __LF \ subs x0, x8, x7 __LF \ cneg x0, x0, lo __LF \ mul x16, x15, x0 __LF \ umulh x0, x15, x0 __LF \ cinv x1, x1, lo __LF \ eor x16, x16, x1 __LF \ eor x0, x0, x1 __LF \ cmn x1, #1 __LF \ adcs x12, x12, x16 __LF \ adcs x13, x13, x0 __LF \ adc x14, x14, x1 __LF \ lsl x16, x11, #32 __LF \ lsr x15, x11, #32 __LF \ subs x1, x16, x11 __LF \ sbc x0, x15, xzr __LF \ subs x12, x12, x1 __LF \ sbcs x13, x13, x0 __LF \ sbcs x14, x14, x16 __LF \ sbc x11, x11, x15 __LF \ lsl x16, x12, #32 __LF \ lsr x15, x12, #32 __LF \ subs x1, x16, x12 __LF \ sbc x0, x15, xzr __LF \ subs x13, x13, x1 __LF \ sbcs x14, x14, x0 __LF \ sbcs x11, x11, x16 __LF \ sbc x12, x12, x15 __LF \ stp x13, x14, [P0] __LF \ stp x11, x12, [P0+16] __LF \ mul x11, x5, x9 __LF \ mul x13, x6, x10 __LF \ umulh x12, x5, x9 __LF \ adds x16, x11, x13 __LF \ umulh x14, x6, x10 __LF \ adcs x0, x12, x14 __LF \ adcs x14, x14, xzr __LF \ adds x12, x12, x16 __LF \ adcs x13, x13, x0 __LF \ adcs x14, x14, xzr __LF \ subs x15, x5, x6 __LF \ cneg x15, x15, lo __LF \ csetm x1, lo __LF \ subs x0, x10, x9 __LF \ cneg x0, x0, lo __LF \ mul x16, x15, x0 __LF \ umulh x0, x15, x0 __LF \ cinv x1, x1, lo __LF \ eor x16, x16, x1 __LF \ eor x0, x0, x1 __LF \ cmn x1, #1 __LF \ adcs x12, x12, x16 __LF \ adcs x13, x13, x0 __LF \ adc x14, x14, x1 __LF \ subs x3, x5, x3 __LF \ sbcs x4, x6, x4 __LF \ ngc x5, xzr __LF \ cmn x5, #1 __LF \ eor x3, x3, x5 __LF \ adcs x3, x3, xzr __LF \ eor x4, x4, x5 __LF \ adcs x4, x4, xzr __LF \ subs x7, x7, x9 __LF \ sbcs x8, x8, x10 __LF \ ngc x9, xzr __LF \ cmn x9, #1 __LF \ eor x7, x7, x9 __LF \ adcs x7, x7, xzr __LF \ eor x8, x8, x9 __LF \ adcs x8, x8, xzr __LF \ eor x10, x5, x9 __LF \ ldp x15, x1, [P0] __LF \ adds x15, x11, x15 __LF \ adcs x1, x12, x1 __LF \ ldp x5, x9, [P0+16] __LF \ adcs x5, x13, x5 __LF \ adcs x9, x14, x9 __LF \ adc x2, xzr, xzr __LF \ mul x11, x3, x7 __LF \ mul x13, x4, x8 __LF \ umulh x12, x3, x7 __LF \ adds x16, x11, x13 __LF \ umulh x14, x4, x8 __LF \ adcs x0, x12, x14 __LF \ adcs x14, x14, xzr __LF \ adds x12, x12, x16 __LF \ adcs x13, x13, x0 __LF \ adcs x14, x14, xzr __LF \ subs x3, x3, x4 __LF \ cneg x3, x3, lo __LF \ csetm x4, lo __LF \ subs x0, x8, x7 __LF \ cneg x0, x0, lo __LF \ mul x16, x3, x0 __LF \ umulh x0, x3, x0 __LF \ cinv x4, x4, lo __LF \ eor x16, x16, x4 __LF \ eor x0, x0, x4 __LF \ cmn x4, #1 __LF \ adcs x12, x12, x16 __LF \ adcs x13, x13, x0 __LF \ adc x14, x14, x4 __LF \ cmn x10, #1 __LF \ eor x11, x11, x10 __LF \ adcs x11, x11, x15 __LF \ eor x12, x12, x10 __LF \ adcs x12, x12, x1 __LF \ eor x13, x13, x10 __LF \ adcs x13, x13, x5 __LF \ eor x14, x14, x10 __LF \ adcs x14, x14, x9 __LF \ adcs x3, x2, x10 __LF \ adcs x4, x10, xzr __LF \ adc x10, x10, xzr __LF \ adds x13, x13, x15 __LF \ adcs x14, x14, x1 __LF \ adcs x3, x3, x5 __LF \ adcs x4, x4, x9 __LF \ adc x10, x10, x2 __LF \ lsl x16, x11, #32 __LF \ lsr x15, x11, #32 __LF \ subs x1, x16, x11 __LF \ sbc x0, x15, xzr __LF \ subs x12, x12, x1 __LF \ sbcs x13, x13, x0 __LF \ sbcs x14, x14, x16 __LF \ sbc x11, x11, x15 __LF \ lsl x16, x12, #32 __LF \ lsr x15, x12, #32 __LF \ subs x1, x16, x12 __LF \ sbc x0, x15, xzr __LF \ subs x13, x13, x1 __LF \ sbcs x14, x14, x0 __LF \ sbcs x11, x11, x16 __LF \ sbc x12, x12, x15 __LF \ adds x3, x3, x11 __LF \ adcs x4, x4, x12 __LF \ adc x10, x10, xzr __LF \ add x2, x10, #1 __LF \ lsl x15, x2, #32 __LF \ sub x16, x15, x2 __LF \ adds x13, x13, x2 __LF \ adcs x14, x14, x16 __LF \ adcs x3, x3, xzr __LF \ adcs x4, x4, x15 __LF \ csetm x7, lo __LF \ adds x13, x13, x7 __LF \ and x16, x7, #0xffffffff00000000 __LF \ adcs x14, x14, x16 __LF \ adcs x3, x3, x7 __LF \ and x15, x7, #0xfffffffeffffffff __LF \ adc x4, x4, x15 __LF \ stp x13, x14, [P0] __LF \ stp x3, x4, [P0+16] // Corresponds to bignum_montsqr_sm2 with x0 in place of x17 #define montsqr_sm2(P0,P1) \ ldp x2, x3, [P1] __LF \ ldp x4, x5, [P1+16] __LF \ umull x15, w2, w2 __LF \ lsr x11, x2, #32 __LF \ umull x16, w11, w11 __LF \ umull x11, w2, w11 __LF \ adds x15, x15, x11, lsl #33 __LF \ lsr x11, x11, #31 __LF \ adc x16, x16, x11 __LF \ umull x0, w3, w3 __LF \ lsr x11, x3, #32 __LF \ umull x1, w11, w11 __LF \ umull x11, w3, w11 __LF \ mul x12, x2, x3 __LF \ umulh x13, x2, x3 __LF \ adds x0, x0, x11, lsl #33 __LF \ lsr x11, x11, #31 __LF \ adc x1, x1, x11 __LF \ adds x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adc x1, x1, xzr __LF \ adds x16, x16, x12 __LF \ adcs x0, x0, x13 __LF \ adc x1, x1, xzr __LF \ lsl x12, x15, #32 __LF \ lsr x11, x15, #32 __LF \ subs x14, x12, x15 __LF \ sbc x13, x11, xzr __LF \ subs x16, x16, x14 __LF \ sbcs x0, x0, x13 __LF \ sbcs x1, x1, x12 __LF \ sbc x15, x15, x11 __LF \ lsl x12, x16, #32 __LF \ lsr x11, x16, #32 __LF \ subs x14, x12, x16 __LF \ sbc x13, x11, xzr __LF \ subs x0, x0, x14 __LF \ sbcs x1, x1, x13 __LF \ sbcs x15, x15, x12 __LF \ sbc x16, x16, x11 __LF \ mul x6, x2, x4 __LF \ mul x14, x3, x5 __LF \ umulh x8, x2, x4 __LF \ subs x10, x2, x3 __LF \ cneg x10, x10, lo __LF \ csetm x13, lo __LF \ subs x12, x5, x4 __LF \ cneg x12, x12, lo __LF \ mul x11, x10, x12 __LF \ umulh x12, x10, x12 __LF \ cinv x13, x13, lo __LF \ eor x11, x11, x13 __LF \ eor x12, x12, x13 __LF \ adds x7, x6, x8 __LF \ adc x8, x8, xzr __LF \ umulh x9, x3, x5 __LF \ adds x7, x7, x14 __LF \ adcs x8, x8, x9 __LF \ adc x9, x9, xzr __LF \ adds x8, x8, x14 __LF \ adc x9, x9, xzr __LF \ cmn x13, #1 __LF \ adcs x7, x7, x11 __LF \ adcs x8, x8, x12 __LF \ adc x9, x9, x13 __LF \ adds x6, x6, x6 __LF \ adcs x7, x7, x7 __LF \ adcs x8, x8, x8 __LF \ adcs x9, x9, x9 __LF \ adc x10, xzr, xzr __LF \ adds x6, x6, x0 __LF \ adcs x7, x7, x1 __LF \ adcs x8, x8, x15 __LF \ adcs x9, x9, x16 __LF \ adc x10, x10, xzr __LF \ lsl x12, x6, #32 __LF \ lsr x11, x6, #32 __LF \ subs x14, x12, x6 __LF \ sbc x13, x11, xzr __LF \ subs x7, x7, x14 __LF \ sbcs x8, x8, x13 __LF \ sbcs x9, x9, x12 __LF \ sbc x14, x6, x11 __LF \ adds x10, x10, x14 __LF \ adc x6, xzr, xzr __LF \ lsl x12, x7, #32 __LF \ lsr x11, x7, #32 __LF \ subs x14, x12, x7 __LF \ sbc x13, x11, xzr __LF \ subs x8, x8, x14 __LF \ sbcs x9, x9, x13 __LF \ sbcs x10, x10, x12 __LF \ sbc x14, x7, x11 __LF \ adds x6, x6, x14 __LF \ adc x7, xzr, xzr __LF \ mul x11, x4, x4 __LF \ adds x8, x8, x11 __LF \ mul x12, x5, x5 __LF \ umulh x11, x4, x4 __LF \ adcs x9, x9, x11 __LF \ adcs x10, x10, x12 __LF \ umulh x12, x5, x5 __LF \ adcs x6, x6, x12 __LF \ adc x7, x7, xzr __LF \ mul x11, x4, x5 __LF \ umulh x12, x4, x5 __LF \ adds x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adc x13, xzr, xzr __LF \ adds x9, x9, x11 __LF \ adcs x10, x10, x12 __LF \ adcs x6, x6, x13 __LF \ adcs x7, x7, xzr __LF \ mov x11, #-4294967296 __LF \ adds x5, x8, #1 __LF \ sbcs x11, x9, x11 __LF \ mov x13, #-4294967297 __LF \ adcs x12, x10, xzr __LF \ sbcs x13, x6, x13 __LF \ sbcs xzr, x7, xzr __LF \ csel x8, x5, x8, hs __LF \ csel x9, x11, x9, hs __LF \ csel x10, x12, x10, hs __LF \ csel x6, x13, x6, hs __LF \ stp x8, x9, [P0] __LF \ stp x10, x6, [P0+16] // Corresponds exactly to bignum_sub_sm2 #define sub_sm2(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ csetm x3, cc __LF \ adds x5, x5, x3 __LF \ and x4, x3, #0xffffffff00000000 __LF \ adcs x6, x6, x4 __LF \ adcs x7, x7, x3 __LF \ and x4, x3, #0xfffffffeffffffff __LF \ adc x8, x8, x4 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] S2N_BN_SYMBOL(sm2_montjmixadd): CFI_START // Save regs and make room on stack for temporary variables CFI_PUSH2(x19,x20) CFI_DEC_SP(NSPACE) // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations // 8 * multiply + 3 * square + 7 * subtract montsqr_sm2(zp2,z_1) montmul_sm2(y2a,z_1,y_2) montmul_sm2(x2a,zp2,x_2) montmul_sm2(y2a,zp2,y2a) sub_sm2(xd,x2a,x_1) sub_sm2(yd,y2a,y_1) montsqr_sm2(zz,xd) montsqr_sm2(ww,yd) montmul_sm2(zzx1,zz,x_1) montmul_sm2(zzx2,zz,x2a) sub_sm2(resx,ww,zzx1) sub_sm2(t1,zzx2,zzx1) montmul_sm2(resz,xd,z_1) sub_sm2(resx,resx,zzx2) sub_sm2(t2,zzx1,resx) montmul_sm2(t1,t1,y_1) montmul_sm2(t2,yd,t2) sub_sm2(resy,t2,t1) // Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence) ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] orr x4, x0, x1 orr x5, x2, x3 orr x4, x4, x5 cmp x4, xzr // Multiplex: if p1 <> 0 just copy the computed result from the staging area. // If p1 = 0 then return the point p2 augmented with a z = 1 coordinate (in // Montgomery form so not the simple constant 1 but rather 2^256 - p_sm2), // hence giving 0 + p2 = p2 for the final result. ldp x0, x1, [resx] ldp x12, x13, [x_2] csel x0, x0, x12, ne csel x1, x1, x13, ne ldp x2, x3, [resx+16] ldp x12, x13, [x_2+16] csel x2, x2, x12, ne csel x3, x3, x13, ne ldp x4, x5, [resy] ldp x12, x13, [y_2] csel x4, x4, x12, ne csel x5, x5, x13, ne ldp x6, x7, [resy+16] ldp x12, x13, [y_2+16] csel x6, x6, x12, ne csel x7, x7, x13, ne ldp x8, x9, [resz] mov x12, #0x0000000000000001 mov x13, #0x00000000ffffffff csel x8, x8, x12, ne csel x9, x9, x13, ne ldp x10, x11, [resz+16] mov x13, #0x0000000100000000 csel x10, x10, xzr, ne csel x11, x11, x13, ne stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [y_3] stp x6, x7, [y_3+16] stp x8, x9, [z_3] stp x10, x11, [z_3+16] // Restore registers and return CFI_INC_SP(NSPACE) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(sm2_montjmixadd) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
2,075
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_double_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Double modulo p_sm2, z := (2 * x) mod p_sm2, assuming x reduced // Input x[4]; output z[4] // // extern void bignum_double_sm2(uint64_t z[static 4], const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_double_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_double_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_double_sm2) .text .balign 4 #define z x0 #define x x1 #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define c x6 #define n0 x7 #define n1 x8 #define n2 x9 #define n3 x10 S2N_BN_SYMBOL(bignum_double_sm2): CFI_START // Double the input number as 2 * x = c + [d3; d2; d1; d0] // It's worth considering doing this with extr...63 instead ldp d0, d1, [x] ldp d2, d3, [x, #16] adds d0, d0, d0 adcs d1, d1, d1 adcs d2, d2, d2 adcs d3, d3, d3 adc c, xzr, xzr // Subtract p_sm2 to give 2 * x - p_sm2 = c + [n3; n2; n1; n0] subs n0, d0, #0xffffffffffffffff mov n1, #0xffffffff00000000 sbcs n1, d1, n1 adcs n2, d2, xzr mov n3, #0xfffffffeffffffff sbcs n3, d3, n3 sbcs c, c, xzr // Now CF is set (because of inversion) if 2 * x >= p_sm2, in which case the // correct result is [n3; n2; n1; n0], otherwise [d3; d2; d1; d0] csel d0, d0, n0, cc csel d1, d1, n1, cc csel d2, d2, n2, cc csel d3, d3, n3, cc // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_double_sm2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,951
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_mod_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo field characteristic, z := x mod p_sm2 // Input x[k]; output z[4] // // extern void bignum_mod_sm2(uint64_t z[static 4], uint64_t k, const uint64_t *x); // // Standard ARM ABI: X0 = z, X1 = k, X2 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_sm2) .text .balign 4 #define z x0 #define k x1 #define x x2 #define m0 x3 #define m1 x4 #define m2 x5 #define m3 x6 #define t0 x7 #define t1 x8 #define t2 x9 #define t3 x10 #define t4 x11 #define n1 x12 #define n3 x13 #define q x14 S2N_BN_SYMBOL(bignum_mod_sm2): CFI_START // If the input is already <= 3 words long, go to a trivial "copy" path cmp k, #4 bcc Lbignum_mod_sm2_short // Otherwise load the top 4 digits (top-down) and reduce k by 4 sub k, k, #4 lsl t0, k, #3 add t0, t0, x ldp m2, m3, [t0, #16] ldp m0, m1, [t0] // Load the complicated words of p_sm2 = [n3;-1;n1;-1] mov n1, #0xffffffff00000000 mov n3, #0xfffffffeffffffff // Reduce the top 4 digits mod p_sm2 (a conditional subtraction of p_sm2) subs t0, m0, #-1 sbcs t1, m1, n1 adcs t2, m2, xzr sbcs t3, m3, n3 csel m0, m0, t0, cc csel m1, m1, t1, cc csel m2, m2, t2, cc csel m3, m3, t3, cc // Now do (k-4) iterations of 5->4 word modular reduction cbz k, Lbignum_mod_sm2_writeback Lbignum_mod_sm2_loop: // Decrement k and load the next digit as t0. We then want to reduce // [m3;m2;m1;m0;t0] |-> [m3;m2;m1;m0]; the shuffling downwards is absorbed // into the various ALU operations sub k, k, #1 ldr t0, [x, k, lsl #3] // Writing the input, with the new zeroth digit t0 appended, as // z = 2^256 * m3 + 2^192 * m2 + t, our intended quotient approximation is // MIN ((m3 * (1 + 2^32 + 2^64) + m2 + 2^64) >> 64) (2^64 - 1) adds t3, m2, m3 mov t2, #1 adc t1, m3, t2 add t2, m3, t3, lsr #32 adds q, t1, t2, lsr #32 cinv q, q, cs // Let t3 = q<<32 and t4 = q>>32 then [t2;t1] = 2^32 * q - q lsl t3, q, #32 subs t1, t3, q lsr t4, q, #32 sbc t2, t4, xzr // Do the basic correction to get [t4;t2;t2;t1;t0] = [m3;m2;m1;m0;t0] - q * p adds t0, t0, q adcs t1, t1, m0 sub m3, m3, q adcs t2, t2, m1 adcs t3, t3, m2 adc t4, t4, m3 // Use top word as mask to correct adds m0, t0, t4 and t0, n1, t4 adcs m1, t1, t0 adcs m2, t2, t4 and t0, n3, t4 adc m3, t3, t0 cbnz k, Lbignum_mod_sm2_loop // Finally write back [m3;m2;m1;m0] and return Lbignum_mod_sm2_writeback: stp m0, m1, [z] stp m2, m3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_sm2) // Short case: just copy the input with zero-padding Lbignum_mod_sm2_short: mov m0, xzr mov m1, xzr mov m2, xzr mov m3, xzr cbz k, Lbignum_mod_sm2_writeback ldr m0, [x] subs k, k, #1 beq Lbignum_mod_sm2_writeback ldr m1, [x, #8] subs k, k, #1 beq Lbignum_mod_sm2_writeback ldr m2, [x, #16] b Lbignum_mod_sm2_writeback #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,796
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_deamont_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert from almost-Montgomery form, z := (x / 2^256) mod p_sm2 // Input x[4]; output z[4] // // extern void bignum_deamont_sm2(uint64_t z[static 4], // const uint64_t x[static 4]); // // Convert a 4-digit bignum x out of its (optionally almost) Montgomery form, // "almost" meaning any 4-digit input will work, with no range restriction. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_deamont_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_deamont_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_deamont_sm2) .text .balign 4 // --------------------------------------------------------------------------- // Core one-step "short" Montgomery reduction macro. Takes input in // [d3;d2;d1;d0] and returns result in [d4;d3;d2;d1], adding to the // existing contents of [d3;d2;d1], and using t0, t1, t2 and t3 as // temporaries. It is fine for d4 to be the same register as d0, // and it often is. // --------------------------------------------------------------------------- #define montreds(d4,d3,d2,d1,d0, t3,t2,t1,t0) \ /* First let [t3;t2] = 2^32 * d0 and [t1;t0] = (2^32-1) * d0 */ \ lsl t2, d0, #32 __LF \ lsr t3, d0, #32 __LF \ subs t0, t2, d0 __LF \ sbc t1, t3, xzr __LF \ /* Now [d4;d3;d2;d1] := [d0;d3;d2;d1] - [t3;t2;t1;t0] */ \ subs d1, d1, t0 __LF \ sbcs d2, d2, t1 __LF \ sbcs d3, d3, t2 __LF \ sbc d4, d0, t3 // Input parameters #define z x0 #define x x1 // Rotating registers for the intermediate windows (with repetitions) #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 // Other temporaries #define t x6 #define u x7 #define v x8 #define w x9 S2N_BN_SYMBOL(bignum_deamont_sm2): CFI_START // Set up an initial window with the input x ldp d0, d1, [x] ldp d2, d3, [x, #16] // Systematically scroll left doing 1-step reductions. This process // keeps things inside 4 digits (i.e. < 2^256) at each stage, since // we have w * p_sm2 + x <= (2^64 - 1) * p_sm2 + (2 EXP 256 - 1) // <= (2^64 - 1) * (2^256 - 1) + (2 EXP 256 - 1) <= 2^64 * (2^256 - 1) montreds(d0,d3,d2,d1,d0, t,u,v,w) montreds(d1,d0,d3,d2,d1, t,u,v,w) montreds(d2,d1,d0,d3,d2, t,u,v,w) montreds(d3,d2,d1,d0,d3, t,u,v,w) // Let [w;v;u;t] = [d3;d2;d1;d0] - p_sm2 subs t, d0, #-1 mov u, #0xffffffff00000000 sbcs u, d1, u adcs v, d2, xzr mov w, #0xfffffffeffffffff sbcs w, d3, w // If [d3;d2;d1;d0] < p_sm2 then [d3;d2;d1;d0] is the final answer, // being reduced mod p_sm2, otherwise [d3;d2;d1;d0] - p_sm2. csel d0, d0, t, cc csel d1, d1, u, cc csel d2, d2, v, cc csel d3, d3, w, cc // Write back result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_deamont_sm2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
23,056
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/sm2_montjadd_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point addition on GM/T 0003-2012 curve SM2 in Montgomery-Jacobian coordinates // // extern void sm2_montjadd_alt(uint64_t p3[static 12], // const uint64_t p1[static 12], // const uint64_t p2[static 12]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^256 * x) mod p_sm2. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(sm2_montjadd_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sm2_montjadd_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(sm2_montjadd_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Stable homes for input arguments during main code sequence #define input_z x15 #define input_x x16 #define input_y x17 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define z_2 input_y, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define z1sq sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define x1a sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define z2sq sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define y1a sp, #(NUMSIZE*6) #define NSPACE NUMSIZE*7 // Corresponds to bignum_montmul_sm2_alt except for registers #define montmul_sm2(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x7, x8, [P2] __LF \ mul x12, x3, x7 __LF \ umulh x13, x3, x7 __LF \ mul x11, x3, x8 __LF \ umulh x14, x3, x8 __LF \ adds x13, x13, x11 __LF \ ldp x9, x10, [P2+16] __LF \ mul x11, x3, x9 __LF \ umulh x0, x3, x9 __LF \ adcs x14, x14, x11 __LF \ mul x11, x3, x10 __LF \ umulh x1, x3, x10 __LF \ adcs x0, x0, x11 __LF \ adc x1, x1, xzr __LF \ ldp x5, x6, [P1+16] __LF \ mul x11, x4, x7 __LF \ adds x13, x13, x11 __LF \ mul x11, x4, x8 __LF \ adcs x14, x14, x11 __LF \ mul x11, x4, x9 __LF \ adcs x0, x0, x11 __LF \ mul x11, x4, x10 __LF \ adcs x1, x1, x11 __LF \ umulh x3, x4, x10 __LF \ adc x3, x3, xzr __LF \ umulh x11, x4, x7 __LF \ adds x14, x14, x11 __LF \ umulh x11, x4, x8 __LF \ adcs x0, x0, x11 __LF \ umulh x11, x4, x9 __LF \ adcs x1, x1, x11 __LF \ adc x3, x3, xzr __LF \ mul x11, x5, x7 __LF \ adds x14, x14, x11 __LF \ mul x11, x5, x8 __LF \ adcs x0, x0, x11 __LF \ mul x11, x5, x9 __LF \ adcs x1, x1, x11 __LF \ mul x11, x5, x10 __LF \ adcs x3, x3, x11 __LF \ umulh x4, x5, x10 __LF \ adc x4, x4, xzr __LF \ umulh x11, x5, x7 __LF \ adds x0, x0, x11 __LF \ umulh x11, x5, x8 __LF \ adcs x1, x1, x11 __LF \ umulh x11, x5, x9 __LF \ adcs x3, x3, x11 __LF \ adc x4, x4, xzr __LF \ mul x11, x6, x7 __LF \ adds x0, x0, x11 __LF \ mul x11, x6, x8 __LF \ adcs x1, x1, x11 __LF \ mul x11, x6, x9 __LF \ adcs x3, x3, x11 __LF \ mul x11, x6, x10 __LF \ adcs x4, x4, x11 __LF \ umulh x5, x6, x10 __LF \ adc x5, x5, xzr __LF \ umulh x11, x6, x7 __LF \ adds x1, x1, x11 __LF \ umulh x11, x6, x8 __LF \ adcs x3, x3, x11 __LF \ umulh x11, x6, x9 __LF \ adcs x4, x4, x11 __LF \ adc x5, x5, xzr __LF \ lsl x11, x12, #32 __LF \ lsr x6, x12, #32 __LF \ subs x8, x11, x12 __LF \ sbc x7, x6, xzr __LF \ subs x13, x13, x8 __LF \ sbcs x14, x14, x7 __LF \ sbcs x0, x0, x11 __LF \ sbc x12, x12, x6 __LF \ lsl x11, x13, #32 __LF \ lsr x6, x13, #32 __LF \ subs x8, x11, x13 __LF \ sbc x7, x6, xzr __LF \ subs x14, x14, x8 __LF \ sbcs x0, x0, x7 __LF \ sbcs x12, x12, x11 __LF \ sbc x13, x13, x6 __LF \ lsl x11, x14, #32 __LF \ lsr x6, x14, #32 __LF \ subs x8, x11, x14 __LF \ sbc x7, x6, xzr __LF \ subs x0, x0, x8 __LF \ sbcs x12, x12, x7 __LF \ sbcs x13, x13, x11 __LF \ sbc x14, x14, x6 __LF \ lsl x11, x0, #32 __LF \ lsr x6, x0, #32 __LF \ subs x8, x11, x0 __LF \ sbc x7, x6, xzr __LF \ subs x12, x12, x8 __LF \ sbcs x13, x13, x7 __LF \ sbcs x14, x14, x11 __LF \ sbc x0, x0, x6 __LF \ adds x12, x12, x1 __LF \ adcs x13, x13, x3 __LF \ adcs x14, x14, x4 __LF \ adcs x0, x0, x5 __LF \ cset x8, cs __LF \ mov x11, #0xffffffff00000000 __LF \ mov x6, #0xfffffffeffffffff __LF \ adds x1, x12, #0x1 __LF \ sbcs x3, x13, x11 __LF \ adcs x4, x14, xzr __LF \ sbcs x5, x0, x6 __LF \ sbcs xzr, x8, xzr __LF \ csel x12, x12, x1, cc __LF \ csel x13, x13, x3, cc __LF \ csel x14, x14, x4, cc __LF \ csel x0, x0, x5, cc __LF \ stp x12, x13, [P0] __LF \ stp x14, x0, [P0+16] // Corresponds to bignum_montsqr_sm2_alt exactly #define montsqr_sm2(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x9, x2, x3 __LF \ umulh x10, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x11, x2, x5 __LF \ umulh x12, x2, x5 __LF \ mul x6, x2, x4 __LF \ umulh x7, x2, x4 __LF \ adds x10, x10, x6 __LF \ adcs x11, x11, x7 __LF \ mul x6, x3, x4 __LF \ umulh x7, x3, x4 __LF \ adc x7, x7, xzr __LF \ adds x11, x11, x6 __LF \ mul x13, x4, x5 __LF \ umulh x14, x4, x5 __LF \ adcs x12, x12, x7 __LF \ mul x6, x3, x5 __LF \ umulh x7, x3, x5 __LF \ adc x7, x7, xzr __LF \ adds x12, x12, x6 __LF \ adcs x13, x13, x7 __LF \ adc x14, x14, xzr __LF \ adds x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ cset x7, cs __LF \ umulh x6, x2, x2 __LF \ mul x8, x2, x2 __LF \ adds x9, x9, x6 __LF \ mul x6, x3, x3 __LF \ adcs x10, x10, x6 __LF \ umulh x6, x3, x3 __LF \ adcs x11, x11, x6 __LF \ mul x6, x4, x4 __LF \ adcs x12, x12, x6 __LF \ umulh x6, x4, x4 __LF \ adcs x13, x13, x6 __LF \ mul x6, x5, x5 __LF \ adcs x14, x14, x6 __LF \ umulh x6, x5, x5 __LF \ adc x7, x7, x6 __LF \ lsl x4, x8, #32 __LF \ lsr x5, x8, #32 __LF \ subs x2, x4, x8 __LF \ sbc x3, x5, xzr __LF \ subs x9, x9, x2 __LF \ sbcs x10, x10, x3 __LF \ sbcs x11, x11, x4 __LF \ sbc x8, x8, x5 __LF \ lsl x4, x9, #32 __LF \ lsr x5, x9, #32 __LF \ subs x2, x4, x9 __LF \ sbc x3, x5, xzr __LF \ subs x10, x10, x2 __LF \ sbcs x11, x11, x3 __LF \ sbcs x8, x8, x4 __LF \ sbc x9, x9, x5 __LF \ lsl x4, x10, #32 __LF \ lsr x5, x10, #32 __LF \ subs x2, x4, x10 __LF \ sbc x3, x5, xzr __LF \ subs x11, x11, x2 __LF \ sbcs x8, x8, x3 __LF \ sbcs x9, x9, x4 __LF \ sbc x10, x10, x5 __LF \ lsl x4, x11, #32 __LF \ lsr x5, x11, #32 __LF \ subs x2, x4, x11 __LF \ sbc x3, x5, xzr __LF \ subs x8, x8, x2 __LF \ sbcs x9, x9, x3 __LF \ sbcs x10, x10, x4 __LF \ sbc x11, x11, x5 __LF \ adds x8, x8, x12 __LF \ adcs x9, x9, x13 __LF \ adcs x10, x10, x14 __LF \ adcs x11, x11, x7 __LF \ cset x2, cs __LF \ mov x3, #0xffffffff00000000 __LF \ mov x5, #0xfffffffeffffffff __LF \ adds x12, x8, #0x1 __LF \ sbcs x13, x9, x3 __LF \ adcs x14, x10, xzr __LF \ sbcs x7, x11, x5 __LF \ sbcs xzr, x2, xzr __LF \ csel x8, x8, x12, cc __LF \ csel x9, x9, x13, cc __LF \ csel x10, x10, x14, cc __LF \ csel x11, x11, x7, cc __LF \ stp x8, x9, [P0] __LF \ stp x10, x11, [P0+16] // Almost-Montgomery variant which we use when an input to other muls // with the other argument fully reduced (which is always safe). #define amontsqr_sm2(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x9, x2, x3 __LF \ umulh x10, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x11, x2, x5 __LF \ umulh x12, x2, x5 __LF \ mul x6, x2, x4 __LF \ umulh x7, x2, x4 __LF \ adds x10, x10, x6 __LF \ adcs x11, x11, x7 __LF \ mul x6, x3, x4 __LF \ umulh x7, x3, x4 __LF \ adc x7, x7, xzr __LF \ adds x11, x11, x6 __LF \ mul x13, x4, x5 __LF \ umulh x14, x4, x5 __LF \ adcs x12, x12, x7 __LF \ mul x6, x3, x5 __LF \ umulh x7, x3, x5 __LF \ adc x7, x7, xzr __LF \ adds x12, x12, x6 __LF \ adcs x13, x13, x7 __LF \ adc x14, x14, xzr __LF \ adds x9, x9, x9 __LF \ adcs x10, x10, x10 __LF \ adcs x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ cset x7, cs __LF \ umulh x6, x2, x2 __LF \ mul x8, x2, x2 __LF \ adds x9, x9, x6 __LF \ mul x6, x3, x3 __LF \ adcs x10, x10, x6 __LF \ umulh x6, x3, x3 __LF \ adcs x11, x11, x6 __LF \ mul x6, x4, x4 __LF \ adcs x12, x12, x6 __LF \ umulh x6, x4, x4 __LF \ adcs x13, x13, x6 __LF \ mul x6, x5, x5 __LF \ adcs x14, x14, x6 __LF \ umulh x6, x5, x5 __LF \ adc x7, x7, x6 __LF \ lsl x4, x8, #32 __LF \ lsr x5, x8, #32 __LF \ subs x2, x4, x8 __LF \ sbc x3, x5, xzr __LF \ subs x9, x9, x2 __LF \ sbcs x10, x10, x3 __LF \ sbcs x11, x11, x4 __LF \ sbc x8, x8, x5 __LF \ lsl x4, x9, #32 __LF \ lsr x5, x9, #32 __LF \ subs x2, x4, x9 __LF \ sbc x3, x5, xzr __LF \ subs x10, x10, x2 __LF \ sbcs x11, x11, x3 __LF \ sbcs x8, x8, x4 __LF \ sbc x9, x9, x5 __LF \ lsl x4, x10, #32 __LF \ lsr x5, x10, #32 __LF \ subs x2, x4, x10 __LF \ sbc x3, x5, xzr __LF \ subs x11, x11, x2 __LF \ sbcs x8, x8, x3 __LF \ sbcs x9, x9, x4 __LF \ sbc x10, x10, x5 __LF \ lsl x4, x11, #32 __LF \ lsr x5, x11, #32 __LF \ subs x2, x4, x11 __LF \ sbc x3, x5, xzr __LF \ subs x8, x8, x2 __LF \ sbcs x9, x9, x3 __LF \ sbcs x10, x10, x4 __LF \ sbc x11, x11, x5 __LF \ adds x8, x8, x12 __LF \ adcs x9, x9, x13 __LF \ adcs x10, x10, x14 __LF \ adcs x11, x11, x7 __LF \ csetm x2, cs __LF \ subs x8, x8, x2 __LF \ and x3, x2, #0xffffffff00000000 __LF \ sbcs x9, x9, x3 __LF \ and x5, x2, #0xfffffffeffffffff __LF \ sbcs x10, x10, x2 __LF \ sbc x11, x11, x5 __LF \ stp x8, x9, [P0] __LF \ stp x10, x11, [P0+16] // Corresponds exactly to bignum_sub_sm2 #define sub_sm2(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ csetm x3, cc __LF \ adds x5, x5, x3 __LF \ and x4, x3, #0xffffffff00000000 __LF \ adcs x6, x6, x4 __LF \ adcs x7, x7, x3 __LF \ and x4, x3, #0xfffffffeffffffff __LF \ adc x8, x8, x4 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] S2N_BN_SYMBOL(sm2_montjadd_alt): CFI_START // Make room on stack for temporary variables // Move the input arguments to stable places CFI_DEC_SP(NSPACE) mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations // 12 * multiply + 4 * square + 7 * subtract amontsqr_sm2(z1sq,z_1) amontsqr_sm2(z2sq,z_2) montmul_sm2(y1a,z_2,y_1) montmul_sm2(y2a,z_1,y_2) montmul_sm2(x2a,z1sq,x_2) montmul_sm2(x1a,z2sq,x_1) montmul_sm2(y2a,z1sq,y2a) montmul_sm2(y1a,z2sq,y1a) sub_sm2(xd,x2a,x1a) sub_sm2(yd,y2a,y1a) amontsqr_sm2(zz,xd) montsqr_sm2(ww,yd) montmul_sm2(zzx1,zz,x1a) montmul_sm2(zzx2,zz,x2a) sub_sm2(resx,ww,zzx1) sub_sm2(t1,zzx2,zzx1) montmul_sm2(xd,xd,z_1) sub_sm2(resx,resx,zzx2) sub_sm2(t2,zzx1,resx) montmul_sm2(t1,t1,y1a) montmul_sm2(resz,xd,z_2) montmul_sm2(t2,yd,t2) sub_sm2(resy,t2,t1) // Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0 // The condition codes get set by a comparison (P2 != 0) - (P1 != 0) // So "HI" <=> CF /\ ~ZF <=> P1 = 0 /\ ~(P2 = 0) // and "LO" <=> ~CF <=> ~(P1 = 0) /\ P2 = 0 ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] orr x12, x0, x1 orr x13, x2, x3 orr x12, x12, x13 cmp x12, xzr cset x12, ne ldp x4, x5, [z_2] ldp x6, x7, [z_2+16] orr x13, x4, x5 orr x14, x6, x7 orr x13, x13, x14 cmp x13, xzr cset x13, ne cmp x13, x12 // Multiplex the outputs accordingly, re-using the z's in registers ldp x8, x9, [resz] csel x8, x0, x8, lo csel x9, x1, x9, lo csel x8, x4, x8, hi csel x9, x5, x9, hi ldp x10, x11, [resz+16] csel x10, x2, x10, lo csel x11, x3, x11, lo csel x10, x6, x10, hi csel x11, x7, x11, hi ldp x12, x13, [x_1] ldp x0, x1, [resx] csel x0, x12, x0, lo csel x1, x13, x1, lo ldp x12, x13, [x_2] csel x0, x12, x0, hi csel x1, x13, x1, hi ldp x12, x13, [x_1+16] ldp x2, x3, [resx+16] csel x2, x12, x2, lo csel x3, x13, x3, lo ldp x12, x13, [x_2+16] csel x2, x12, x2, hi csel x3, x13, x3, hi ldp x12, x13, [y_1] ldp x4, x5, [resy] csel x4, x12, x4, lo csel x5, x13, x5, lo ldp x12, x13, [y_2] csel x4, x12, x4, hi csel x5, x13, x5, hi ldp x12, x13, [y_1+16] ldp x6, x7, [resy+16] csel x6, x12, x6, lo csel x7, x13, x7, lo ldp x12, x13, [y_2+16] csel x6, x12, x6, hi csel x7, x13, x7, hi // Finally store back the multiplexed values stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [y_3] stp x6, x7, [y_3+16] stp x8, x9, [z_3] stp x10, x11, [z_3+16] // Restore stack and return CFI_INC_SP(NSPACE) CFI_RET S2N_BN_SIZE_DIRECTIVE(sm2_montjadd_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
3,187
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_demont_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert from Montgomery form z := (x / 2^256) mod p_sm2, assuming x reduced // Input x[4]; output z[4] // // extern void bignum_demont_sm2(uint64_t z[static 4], const uint64_t x[static 4]); // // This assumes the input is < p_sm2 for correctness. If this is not the case, // use the variant "bignum_deamont_sm2" instead. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_demont_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_demont_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_demont_sm2) .text .balign 4 // --------------------------------------------------------------------------- // Core one-step "short" Montgomery reduction macro. Takes input in // [d3;d2;d1;d0] and returns result in [d4;d3;d2;d1], adding to the // existing contents of [d3;d2;d1], and using t0, t1, t2 and t3 as // temporaries. It is fine for d4 to be the same register as d0, // and it often is. // --------------------------------------------------------------------------- #define montreds(d4,d3,d2,d1,d0, t3,t2,t1,t0) \ /* First let [t3;t2] = 2^32 * d0 and [t1;t0] = (2^32-1) * d0 */ \ lsl t2, d0, #32 __LF \ lsr t3, d0, #32 __LF \ subs t0, t2, d0 __LF \ sbc t1, t3, xzr __LF \ /* Now [d4;d3;d2;d1] := [d0;d3;d2;d1] - [t3;t2;t1;t0] */ \ subs d1, d1, t0 __LF \ sbcs d2, d2, t1 __LF \ sbcs d3, d3, t2 __LF \ sbc d4, d0, t3 // Input parameters #define z x0 #define x x1 // Rotating registers for the intermediate windows (with repetitions) #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 // Other temporaries #define t x6 #define u x7 #define v x8 #define w x9 S2N_BN_SYMBOL(bignum_demont_sm2): CFI_START // Set up an initial window with the input x ldp d0, d1, [x] ldp d2, d3, [x, #16] // Systematically scroll left doing 1-step reductions. This process // keeps things reduced < p_sm2 at each stage, since we have // w * p_sm2 + x <= (2^64 - 1) * p_sm2 + (p_sm2 - 1) < 2^64 * p_sm2 montreds(d0,d3,d2,d1,d0, t,u,v,w) montreds(d1,d0,d3,d2,d1, t,u,v,w) montreds(d2,d1,d0,d3,d2, t,u,v,w) montreds(d3,d2,d1,d0,d3, t,u,v,w) // Write back result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_demont_sm2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,952
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_triple_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Triple modulo p_sm2, z := (3 * x) mod p_sm2 // Input x[4]; output z[4] // // extern void bignum_triple_sm2(uint64_t z[static 4], const uint64_t x[static 4]); // // The input x can be any 4-digit bignum, not necessarily reduced modulo p_sm2, // and the result is always fully reduced, i.e. z = (3 * x) mod p_sm2. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_sm2) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_sm2_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_sm2_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_sm2_alt) .text .balign 4 #define z x0 #define x x1 #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define h x6 // Slightly offset aliases for the d_i for readability. #define a0 x3 #define a1 x4 #define a2 x5 #define a3 x6 // More aliases for the same thing at different stages #define q x6 #define c x6 // Other temporary variables #define t0 x7 #define t1 x8 S2N_BN_SYMBOL(bignum_triple_sm2): S2N_BN_SYMBOL(bignum_triple_sm2_alt): CFI_START // Load the inputs ldp a0, a1, [x] ldp a2, a3, [x, #16] // First do the multiplication by 3, getting z = [h; d3; ...; d0] lsl d0, a0, #1 adds d0, d0, a0 extr d1, a1, a0, #63 adcs d1, d1, a1 extr d2, a2, a1, #63 adcs d2, d2, a2 extr d3, a3, a2, #63 adcs d3, d3, a3 lsr h, a3, #63 adc h, h, xzr // For this limited range a simple quotient estimate of q = h + 1 works, where // h = floor(z / 2^256). Then -p_sm2 <= z - q * p_sm2 < p_sm2, so we just need // to subtract q * p_sm2 and then if that's negative, add back p_sm2. add q, h, #1 // Initial subtraction of z - q * p_sm2, with bitmask c for the carry lsl t0, q, #32 sub t1, t0, q adds d0, d0, q adcs d1, d1, t1 adcs d2, d2, xzr adcs d3, d3, t0 csetm c, cc // Use the bitmask c for final masked addition of p_sm2. adds d0, d0, c and t1, c, #0xffffffff00000000 adcs d1, d1, t1 adcs d2, d2, c and t0, c, #0xfffffffeffffffff adc d3, d3, t0 // Finally store the result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_triple_sm2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
8,879
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_montsqr_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^256) mod p_sm2 // Input x[4]; output z[4] // // extern void bignum_montsqr_sm2(uint64_t z[static 4], // const uint64_t x[static 4]); // // Does z := (x^2 / 2^256) mod p_sm2, assuming x^2 <= 2^256 * p_sm2, which is // guaranteed in particular if x < p_sm2 initially (the "intended" case). // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_sm2) .text .balign 4 // --------------------------------------------------------------------------- // Macro returning (c,h,l) = 3-word 1s complement (x - y) * (w - z) // c,h,l,t should all be different // t,h should not overlap w,z // --------------------------------------------------------------------------- #define muldiffn(c,h,l, t, x,y, w,z) \ subs t, x, y __LF \ cneg t, t, cc __LF \ csetm c, cc __LF \ subs h, w, z __LF \ cneg h, h, cc __LF \ mul l, t, h __LF \ umulh h, t, h __LF \ cinv c, c, cc __LF \ eor l, l, c __LF \ eor h, h, c // --------------------------------------------------------------------------- // Core one-step "end" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d5;d4;d3;d2;d1], adding to // the existing [d4;d3;d2;d1], re-using d0 as a temporary internally as well // as t1, t2, t3, and initializing d5 from zero (hence "end"). // --------------------------------------------------------------------------- #define montrede(d5, d4,d3,d2,d1,d0, t3,t2,t1,t0) \ /* First let [t3;t2] = 2^32 * d0 and [t1;t0] = (2^32-1) * d0 */ \ lsl t2, d0, #32 __LF \ lsr t3, d0, #32 __LF \ subs t0, t2, d0 __LF \ sbc t1, t3, xzr __LF \ /* Now [d4;d3;d2;d1] := [d0;d3;d2;d1] - [t3;t2;t1;t0] */ \ subs d1, d1, t0 __LF \ sbcs d2, d2, t1 __LF \ sbcs d3, d3, t2 __LF \ sbc t0, d0, t3 __LF \ adds d4, d4, t0 __LF \ adc d5, xzr, xzr // --------------------------------------------------------------------------- // Core one-step "short" Montgomery reduction macro. Takes input in // [d3;d2;d1;d0] and returns result in [d4;d3;d2;d1], adding to the // existing contents of [d3;d2;d1], and using t0, t1, t2 and t3 as // temporaries. It is fine for d4 to be the same register as d0, // and it often is. // --------------------------------------------------------------------------- #define montreds(d4,d3,d2,d1,d0, t3,t2,t1,t0) \ /* First let [t3;t2] = 2^32 * d0 and [t1;t0] = (2^32-1) * d0 */ \ lsl t2, d0, #32 __LF \ lsr t3, d0, #32 __LF \ subs t0, t2, d0 __LF \ sbc t1, t3, xzr __LF \ /* Now [d4;d3;d2;d1] := [d0;d3;d2;d1] - [t3;t2;t1;t0] */ \ subs d1, d1, t0 __LF \ sbcs d2, d2, t1 __LF \ sbcs d3, d3, t2 __LF \ sbc d4, d0, t3 #define a0 x2 #define a1 x3 #define a2 x4 #define a3 x5 #define c0 x6 #define c1 x7 #define c2 x8 #define c3 x9 #define c4 x10 #define d1 x11 #define d2 x12 #define d3 x13 #define d4 x14 #define s0 x15 #define s1 x16 #define s2 x17 #define s3 x1 #define a0short w2 #define a1short w3 #define d1short w11 S2N_BN_SYMBOL(bignum_montsqr_sm2): CFI_START // Load in all words of the input ldp a0, a1, [x1] ldp a2, a3, [x1, #16] // Square the low half, getting a result in [s3;s2;s1;s0] // This uses 32x32->64 multiplications to reduce the number of UMULHs umull s0, a0short, a0short lsr d1, a0, #32 umull s1, d1short, d1short umull d1, a0short, d1short adds s0, s0, d1, lsl #33 lsr d1, d1, #31 adc s1, s1, d1 umull s2, a1short, a1short lsr d1, a1, #32 umull s3, d1short, d1short umull d1, a1short, d1short mul d2, a0, a1 umulh d3, a0, a1 adds s2, s2, d1, lsl #33 lsr d1, d1, #31 adc s3, s3, d1 adds d2, d2, d2 adcs d3, d3, d3 adc s3, s3, xzr adds s1, s1, d2 adcs s2, s2, d3 adc s3, s3, xzr // Perform two "short" Montgomery steps on the low square // This shifts it to an offset compatible with middle product montreds(s0,s3,s2,s1,s0, d1,d2,d3,d4) montreds(s1,s0,s3,s2,s1, d1,d2,d3,d4) // Compute cross-product with ADK 2x2->4 multiplier as [c3;c2;c1;c0] mul c0, a0, a2 mul d4, a1, a3 umulh c2, a0, a2 muldiffn(d3,d2,d1, c4, a0,a1, a3,a2) adds c1, c0, c2 adc c2, c2, xzr umulh c3, a1, a3 adds c1, c1, d4 adcs c2, c2, c3 adc c3, c3, xzr adds c2, c2, d4 adc c3, c3, xzr adds xzr, d3, #1 adcs c1, c1, d1 adcs c2, c2, d2 adc c3, c3, d3 // Double it and add the Montgomerified low square adds c0, c0, c0 adcs c1, c1, c1 adcs c2, c2, c2 adcs c3, c3, c3 adc c4, xzr, xzr adds c0, c0, s2 adcs c1, c1, s3 adcs c2, c2, s0 adcs c3, c3, s1 adc c4, c4, xzr // Montgomery-reduce the combined low and middle term another twice montrede(c0,c4,c3,c2,c1,c0, d1,d2,d3,d4) montrede(c1,c0,c4,c3,c2,c1, d1,d2,d3,d4) // Our sum so far is in [c1,c0,c4,c3,c2]; choose more intuitive names #define r0 x8 #define r1 x9 #define r2 x10 #define r3 x6 #define c x7 // Remind ourselves what else we can't destroy #define a2 x4 #define a3 x5 // So we can have these as temps #define t1 x11 #define t2 x12 #define t3 x13 // Add in the pure squares 22 + 33 mul t1, a2, a2 adds r0, r0, t1 mul t2, a3, a3 umulh t1, a2, a2 adcs r1, r1, t1 adcs r2, r2, t2 umulh t2, a3, a3 adcs r3, r3, t2 adc c, c, xzr // Construct the 23 term, double and add it in mul t1, a2, a3 umulh t2, a2, a3 adds t1, t1, t1 adcs t2, t2, t2 adc t3, xzr, xzr adds r1, r1, t1 adcs r2, r2, t2 adcs r3, r3, t3 adcs c, c, xzr // We know, writing B = 2^{4*64} that the full implicit result is // B^2 c <= z + (B - 1) * p < B * p + (B - 1) * p < 2 * B * p, // so the top half is certainly < 2 * p. If c = 1 already, we know // subtracting p will give the reduced modulus. But now we do a // subtraction-comparison to catch cases where the residue is >= p. // The constants are such that [t3;0;t1;-1] = p_256. #define t0 x5 // Set CF (because of inversion) iff (0,p_256) <= (c,r3,r2,r1,r0) mov t1, #0xffffffff00000000 subs t0, r0, #-1 sbcs t1, r1, t1 mov t3, #0xfffffffeffffffff adcs t2, r2, xzr sbcs t3, r3, t3 sbcs xzr, c, xzr // Select final output accordingly csel r0, t0, r0, cs csel r1, t1, r1, cs csel r2, t2, r2, cs csel r3, t3, r3, cs // Store things back in place stp r0, r1, [x0] stp r2, r3, [x0, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_sm2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,075
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_cmul_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply by a single word modulo p_sm2, z := (c * x) mod p_sm2, assuming // x reduced // Inputs c, x[4]; output z[4] // // extern void bignum_cmul_sm2(uint64_t z[static 4], uint64_t c, // const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = c, X2 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_sm2) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_sm2_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_sm2_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_sm2_alt) .text .balign 4 #define z x0 #define m x1 #define x x2 #define d0 x3 #define d1 x4 #define d2 x5 #define d3 x6 #define h x7 #define q x8 #define a1 x9 #define a2 x10 #define a3 x11 #define a4 x12 S2N_BN_SYMBOL(bignum_cmul_sm2): S2N_BN_SYMBOL(bignum_cmul_sm2_alt): CFI_START // First do the multiply, straightforwardly to get [h;d3;d2;d1;d0] ldp a1, a2, [x] ldp a3, a4, [x, #16] mul d0, m, a1 mul d1, m, a2 mul d2, m, a3 mul d3, m, a4 umulh a1, m, a1 umulh a2, m, a2 umulh a3, m, a3 umulh h, m, a4 adds d1, d1, a1 adcs d2, d2, a2 adcs d3, d3, a3 adc h, h, xzr // Quotient approximation is (h * (1 + 2^32 + 2^64) + d3 + 2^64) >> 64. // Note that by hypothesis our product is <= (2^64 - 1) * (p_sm2 - 1), // so there is no need to max this out to avoid wrapping, unlike in the // more general case of bignum_mod_sm2. adds a3, d3, h mov a2, #1 adc a1, h, a2 add a2, h, a3, lsr #32 add q, a1, a2, lsr #32 // Let a3 = q<<32 and a4 = q>>32 then [a2;a1] = 2^32 * q - q lsl a3, q, #32 subs a1, a3, q lsr a4, q, #32 sbc a2, a4, xzr // Do the basic correction as [h;d3;d2;d1;d0] := [h;d3;d2;d1;d0] - q * p_sm2 sub h, h, q adds d0, d0, q adcs d1, d1, a1 adcs d2, d2, a2 adcs d3, d3, a3 adc h, h, a4 // Use top word (which will be all zeros or all ones) as a mask to correct adds d0, d0, h and a1, h, #0xffffffff00000000 adcs d1, d1, a1 adcs d2, d2, h and a3, h, #0xfffffffeffffffff adc d3, d3, a3 // Finally store the result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_cmul_sm2) S2N_BN_SIZE_DIRECTIVE(bignum_cmul_sm2_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,878
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_sub_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Subtract modulo p_sm2, z := (x - y) mod p_sm2 // Inputs x[4], y[4]; output z[4] // // extern void bignum_sub_sm2(uint64_t z[static 4], const uint64_t x[static 4], // const uint64_t y[static 4]); // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sub_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sub_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sub_sm2) .text .balign 4 #define z x0 #define x x1 #define y x2 #define c x3 #define l x4 #define d0 x5 #define d1 x6 #define d2 x7 #define d3 x8 S2N_BN_SYMBOL(bignum_sub_sm2): CFI_START // First just subtract the numbers as [d3; d2; d1; d0] // Set a mask based on (inverted) carry indicating x < y = correction is needed ldp d0, d1, [x] ldp l, c, [y] subs d0, d0, l sbcs d1, d1, c ldp d2, d3, [x, #16] ldp l, c, [y, #16] sbcs d2, d2, l sbcs d3, d3, c // Create a mask for the condition x < y, when we need to correct csetm c, cc // Now correct by adding masked p_sm2 adds d0, d0, c and l, c, #0xffffffff00000000 adcs d1, d1, l adcs d2, d2, c and l, c, #0xfffffffeffffffff adc d3, d3, l // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sub_sm2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,378
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_montsqr_sm2_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^256) mod p_sm2 // Input x[4]; output z[4] // // extern void bignum_montsqr_sm2_alt(uint64_t z[static 4], // const uint64_t x[static 4]); // // Does z := (x^2 / 2^256) mod p_sm2, assuming x^2 <= 2^256 * p_sm2, which is // guaranteed in particular if x < p_sm2 initially (the "intended" case). // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_sm2_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_sm2_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_sm2_alt) .text .balign 4 // --------------------------------------------------------------------------- // Core one-step "short" Montgomery reduction macro. Takes input in // [d3;d2;d1;d0] and returns result in [d4;d3;d2;d1], adding to the // existing contents of [d3;d2;d1], and using t0, t1, t2 and t3 as // temporaries. It is fine for d4 to be the same register as d0, // and it often is. // --------------------------------------------------------------------------- #define montreds(d4,d3,d2,d1,d0, t3,t2,t1,t0) \ /* First let [t3;t2] = 2^32 * d0 and [t1;t0] = (2^32-1) * d0 */ \ lsl t2, d0, #32 __LF \ lsr t3, d0, #32 __LF \ subs t0, t2, d0 __LF \ sbc t1, t3, xzr __LF \ /* Now [d4;d3;d2;d1] := [d0;d3;d2;d1] - [t3;t2;t1;t0] */ \ subs d1, d1, t0 __LF \ sbcs d2, d2, t1 __LF \ sbcs d3, d3, t2 __LF \ sbc d4, d0, t3 #define z x0 #define x x1 #define a0 x2 #define a1 x3 #define a2 x4 #define a3 x5 #define l x6 #define h x7 #define u0 x8 #define u1 x9 #define u2 x10 #define u3 x11 #define u4 x12 #define u5 x13 #define u6 x14 // This one is the same as h, which is safe with this computation sequence #define u7 h S2N_BN_SYMBOL(bignum_montsqr_sm2_alt): CFI_START // Load all the elements, set up an initial window [u6;...u1] = [23;03;01] // and chain in the addition of 02 + 12 + 13 (no carry-out is possible). // This gives all the "heterogeneous" terms of the squaring ready to double ldp a0, a1, [x] mul u1, a0, a1 umulh u2, a0, a1 ldp a2, a3, [x, #16] mul u3, a0, a3 umulh u4, a0, a3 mul l, a0, a2 umulh h, a0, a2 adds u2, u2, l adcs u3, u3, h mul l, a1, a2 umulh h, a1, a2 adc h, h, xzr adds u3, u3, l mul u5, a2, a3 umulh u6, a2, a3 adcs u4, u4, h mul l, a1, a3 umulh h, a1, a3 adc h, h, xzr adds u4, u4, l adcs u5, u5, h adc u6, u6, xzr // Now just double it; this simple approach seems to work better than extr adds u1, u1, u1 adcs u2, u2, u2 adcs u3, u3, u3 adcs u4, u4, u4 adcs u5, u5, u5 adcs u6, u6, u6 cset u7, cs // Add the homogeneous terms 00 + 11 + 22 + 33 umulh l, a0, a0 mul u0, a0, a0 adds u1, u1, l mul l, a1, a1 adcs u2, u2, l umulh l, a1, a1 adcs u3, u3, l mul l, a2, a2 adcs u4, u4, l umulh l, a2, a2 adcs u5, u5, l mul l, a3, a3 adcs u6, u6, l umulh l, a3, a3 adc u7, u7, l // Squaring complete. Perform 4 Montgomery steps to rotate the lower half montreds(u0,u3,u2,u1,u0, a3,a2,a1,a0) montreds(u1,u0,u3,u2,u1, a3,a2,a1,a0) montreds(u2,u1,u0,u3,u2, a3,a2,a1,a0) montreds(u3,u2,u1,u0,u3, a3,a2,a1,a0) // Add high and low parts, catching carry in a0 adds u0, u0, u4 adcs u1, u1, u5 adcs u2, u2, u6 adcs u3, u3, u7 cset a0, cs // Set [a3;-1;a1;-1] = p_sm2 and form [u7,u6,u5,u4] = [a0;u3;u2;u1;u0] - p_sm2 mov a1, #0xffffffff00000000 mov a3, #0xfffffffeffffffff subs u4, u0, #-1 sbcs u5, u1, a1 adcs u6, u2, xzr sbcs u7, u3, a3 sbcs xzr, a0, xzr // Now CF is clear if the comparison carried so the original was fine // Otherwise take the form with p_sm2 subtracted. csel u0, u0, u4, cc csel u1, u1, u5, cc csel u2, u2, u6, cc csel u3, u3, u7, cc // Store back final result stp u0, u1, [z] stp u2, u3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_sm2_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,797
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_mod_sm2_4.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo field characteristic, z := x mod p_sm2 // Input x[4]; output z[4] // // extern void bignum_mod_sm2_4(uint64_t z[static 4], const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_sm2_4) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_sm2_4) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_sm2_4) .text .balign 4 #define z x0 #define x x1 #define n0 x2 #define n1 x3 #define n2 x4 #define n3 x5 #define d0 x6 #define d1 x7 #define d2 x8 #define d3 x9 S2N_BN_SYMBOL(bignum_mod_sm2_4): CFI_START // Load the non-trivial words of p_sm2 = [n3;-1;n2;-1] mov n1, #0xffffffff00000000 mov n3, #0xfffffffeffffffff // Load the input number ldp d0, d1, [x] ldp d2, d3, [x, #16] // Do the subtraction. subs n0, d0, #-1 sbcs n1, d1, n1 adcs n2, d2, xzr sbcs n3, d3, n3 // Now if the carry is *clear* (inversion at work) the subtraction carried // and hence we should have done nothing, so we reset each n_i = d_i csel n0, d0, n0, cc csel n1, d1, n1, cc csel n2, d2, n2, cc csel n3, d3, n3, cc // Store the end result stp n0, n1, [z] stp n2, n3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_sm2_4) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,737
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_neg_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Negate modulo p_sm2, z := (-x) mod p_sm2, assuming x reduced // Input x[4]; output z[4] // // extern void bignum_neg_sm2(uint64_t z[static 4], const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_neg_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_neg_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_neg_sm2) .text .balign 4 #define z x0 #define x x1 #define p x2 #define t x3 #define d0 x4 #define d1 x5 #define d2 x6 #define d3 x7 S2N_BN_SYMBOL(bignum_neg_sm2): CFI_START // Load the 4 digits of x ldp d0, d1, [x] ldp d2, d3, [x, #16] // Set a bitmask p for the input being nonzero, so that we avoid doing // -0 = p_sm2 and hence maintain strict modular reduction orr t, d0, d1 orr p, d2, d3 orr p, p, t cmp p, #0 csetm p, ne // Mask nontrivial words of p_sm2 = [n3;-1;n1;-1] and subtract subs d0, p, d0 and t, p, #0xffffffff00000000 sbcs d1, t, d1 sbcs d2, p, d2 and t, p, #0xfffffffeffffffff sbc d3, t, d3 // Write back the result stp d0, d1, [z] stp d2, d3, [z, #16] // Return CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_neg_sm2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
67,129
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_montinv_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery inverse modulo p_sm2 = 2^256 - 2^224 - 2^96 + 2^64 - 1 // Input x[4]; output z[4] // // extern void bignum_montinv_sm2(uint64_t z[static 4], // const uint64_t x[static 4]); // // If the 4-digit input x is coprime to p_sm2, i.e. is not divisible // by it, returns z < p_sm2 such that x * z == 2^512 (mod p_sm2). This // is effectively "Montgomery inverse" because if we consider x and z as // Montgomery forms of X and Z, i.e. x == 2^256 * X and z == 2^256 * Z // (both mod p_sm2) then X * Z == 1 (mod p_sm2). That is, this function // gives the analog of the modular inverse bignum_inv_sm2 but with both // input and output in the Montgomery domain. Note that x does not need // to be reduced modulo p_sm2, but the output always is. If the input // is divisible (i.e. is 0 or p_sm2), then there can be no solution to // the congruence x * z == 2^512 (mod p_sm2), and z = 0 is returned. // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montinv_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montinv_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montinv_sm2) .text .balign 4 // Size in bytes of a 64-bit word #define N 8 // Used for the return pointer #define res x20 // Loop counter and d = 2 * delta value for divstep #define i x21 #define d x22 // Registers used for matrix element magnitudes and signs #define m00 x10 #define m01 x11 #define m10 x12 #define m11 x13 #define s00 x14 #define s01 x15 #define s10 x16 #define s11 x17 // Initial carries for combinations #define car0 x9 #define car1 x19 // Input and output, plain registers treated according to pattern #define reg0 x0, #0 #define reg1 x1, #0 #define reg2 x2, #0 #define reg3 x3, #0 #define reg4 x4, #0 #define x x1, #0 #define z x0, #0 // Pointer-offset pairs for temporaries on stack #define f sp, #0 #define g sp, #(6*N) #define u sp, #(12*N) #define v sp, #(16*N) // Total size to reserve on the stack #define NSPACE 20*N // --------------------------------------------------------------------------- // Core signed almost-Montgomery reduction macro. Takes input in // [d4;d3;d2;d1;d0] and returns result in [d4;d3;d2;d1], adding to // the existing [d4;d3;d2;d1], and re-using d0 as a temporary internally // as well as t0, t1, t2, t3. This is almost-Montgomery, i.e. the result // fits in 4 digits but is not necessarily strictly reduced mod p_sm2. // --------------------------------------------------------------------------- #define amontred(d4,d3,d2,d1,d0, t3,t2,t1,t0) \ /* We only know the input is -2^316 < x < 2^316. To do traditional */ \ /* unsigned Montgomery reduction, start by adding 2^61 * p_sm2. */ \ mov t0, #0xe000000000000000 __LF \ adds d0, d0, t0 __LF \ mov t1, #0x1fffffffffffffff __LF \ adcs d1, d1, t1 __LF \ mov t2, #0xffffffffe0000000 __LF \ adcs d2, d2, t2 __LF \ sbcs d3, d3, xzr __LF \ and t0, t1, #0xffffffffdfffffff __LF \ adc d4, d4, t0 __LF \ /* First let [t3;t2] = 2^32 * d0 and [t1;t0] = (2^32-1) * d0 */ \ lsl t2, d0, #32 __LF \ lsr t3, d0, #32 __LF \ subs t0, t2, d0 __LF \ sbc t1, t3, xzr __LF \ /* Now [d4;d3;d2;d1] := [d0;d3;d2;d1] - [t3;t2;t1;t0] */ \ subs d1, d1, t0 __LF \ sbcs d2, d2, t1 __LF \ sbcs d3, d3, t2 __LF \ sbc t0, d0, t3 __LF \ adds d4, d4, t0 __LF \ /* Now capture top carry and subtract p_sm2 if set (almost-Montgomery) */ \ csetm t0, cs __LF \ subs d1, d1, t0 __LF \ and t1, t0, #0xffffffff00000000 __LF \ sbcs d2, d2, t1 __LF \ and t2, t0, #0xfffffffeffffffff __LF \ sbcs d3, d3, t0 __LF \ sbc d4, d4, t2 // Very similar to a subroutine call to the s2n-bignum word_divstep59. // But different in register usage and returning the final matrix in // registers as follows // // [ m00 m01] // [ m10 m11] #define divstep59() \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x8, x4, #0x100, lsl #12 __LF \ sbfx x8, x8, #21, #21 __LF \ mov x11, #0x100000 __LF \ add x11, x11, x11, lsl #21 __LF \ add x9, x4, x11 __LF \ asr x9, x9, #42 __LF \ add x10, x5, #0x100, lsl #12 __LF \ sbfx x10, x10, #21, #21 __LF \ add x11, x5, x11 __LF \ asr x11, x11, #42 __LF \ mul x6, x8, x2 __LF \ mul x7, x9, x3 __LF \ mul x2, x10, x2 __LF \ mul x3, x11, x3 __LF \ add x4, x6, x7 __LF \ add x5, x2, x3 __LF \ asr x2, x4, #20 __LF \ asr x3, x5, #20 __LF \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x12, x4, #0x100, lsl #12 __LF \ sbfx x12, x12, #21, #21 __LF \ mov x15, #0x100000 __LF \ add x15, x15, x15, lsl #21 __LF \ add x13, x4, x15 __LF \ asr x13, x13, #42 __LF \ add x14, x5, #0x100, lsl #12 __LF \ sbfx x14, x14, #21, #21 __LF \ add x15, x5, x15 __LF \ asr x15, x15, #42 __LF \ mul x6, x12, x2 __LF \ mul x7, x13, x3 __LF \ mul x2, x14, x2 __LF \ mul x3, x15, x3 __LF \ add x4, x6, x7 __LF \ add x5, x2, x3 __LF \ asr x2, x4, #20 __LF \ asr x3, x5, #20 __LF \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ mul x2, x12, x8 __LF \ mul x3, x12, x9 __LF \ mul x6, x14, x8 __LF \ mul x7, x14, x9 __LF \ madd x8, x13, x10, x2 __LF \ madd x9, x13, x11, x3 __LF \ madd x16, x15, x10, x6 __LF \ madd x17, x15, x11, x7 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x12, x4, #0x100, lsl #12 __LF \ sbfx x12, x12, #22, #21 __LF \ mov x15, #0x100000 __LF \ add x15, x15, x15, lsl #21 __LF \ add x13, x4, x15 __LF \ asr x13, x13, #43 __LF \ add x14, x5, #0x100, lsl #12 __LF \ sbfx x14, x14, #22, #21 __LF \ add x15, x5, x15 __LF \ asr x15, x15, #43 __LF \ mneg x2, x12, x8 __LF \ mneg x3, x12, x9 __LF \ mneg x4, x14, x8 __LF \ mneg x5, x14, x9 __LF \ msub m00, x13, x16, x2 __LF \ msub m01, x13, x17, x3 __LF \ msub m10, x15, x16, x4 __LF \ msub m11, x15, x17, x5 S2N_BN_SYMBOL(bignum_montinv_sm2): CFI_START // Save registers and make room for temporaries CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_DEC_SP(NSPACE) // Save the return pointer for the end so we can overwrite x0 later mov res, x0 // Copy the prime and input into the main f and g variables respectively. // Make sure x is reduced so that g <= f as assumed in the bound proof. mov x10, #0xffffffffffffffff mov x11, #0xffffffff00000000 mov x13, #0xfffffffeffffffff stp x10, x11, [f] stp x10, x13, [f+2*N] str xzr, [f+4*N] ldp x2, x3, [x1] subs x10, x2, #-1 sbcs x11, x3, x11 ldp x4, x5, [x1, #(2*N)] adcs x12, x4, xzr sbcs x13, x5, x13 csel x2, x2, x10, cc csel x3, x3, x11, cc csel x4, x4, x12, cc csel x5, x5, x13, cc stp x2, x3, [g] stp x4, x5, [g+2*N] str xzr, [g+4*N] // Also maintain reduced < 2^256 vector [u,v] such that // [f,g] == x * 2^{5*i-50} * [u,v] (mod p_sm2) // starting with [p_sm2,x] == x * 2^{5*0-562} * [0,2^562] (mod p_sm2) // The weird-looking 5*i modifications come in because we are doing // 64-bit word-sized Montgomery reductions at each stage, which is // 5 bits more than the 59-bit requirement to keep things stable. // After the 10th and last iteration and sign adjustment, when // f == 1 for in-scope cases, we have x * 2^{50-562} * u == 1, i.e. // x * u == 2^512 as required. stp xzr, xzr, [u] stp xzr, xzr, [u+2*N] // The starting constant 2^562 mod p_sm2 is // 0x0018000000040000:0x00040000000c0000:0x000bfffffff80000:0x000c000000100000 // where colons separate 64-bit subwords, least significant at the right. // These each need a couple of instructions to create on ARM mov x10, #0x0000000000100000 orr x10, x10, #0x000c000000000000 mov x11, #0x000c000000000000 sub x11, x11, #0x80000 stp x10, x11, [v] mov x12, #0x0004000000000000 orr x12, x12, #0x00000000000c0000 mov x13, #0x0018000000000000 orr x13, x13, #0x0000000000040000 stp x12, x13, [v+2*N] // Start of main loop. We jump into the middle so that the divstep // portion is common to the special tenth iteration after a uniform // first 9. mov i, #10 mov d, #1 b Lbignum_montinv_sm2_midloop Lbignum_montinv_sm2_loop: // Separate the matrix elements into sign-magnitude pairs cmp m00, xzr csetm s00, mi cneg m00, m00, mi cmp m01, xzr csetm s01, mi cneg m01, m01, mi cmp m10, xzr csetm s10, mi cneg m10, m10, mi cmp m11, xzr csetm s11, mi cneg m11, m11, mi // Adjust the initial values to allow for complement instead of negation // This initial offset is the same for [f,g] and [u,v] compositions. // Save it in stable registers for the [u,v] part and do [f,g] first. and x0, m00, s00 and x1, m01, s01 add car0, x0, x1 and x0, m10, s10 and x1, m11, s11 add car1, x0, x1 // Now the computation of the updated f and g values. This maintains a // 2-word carry between stages so we can conveniently insert the shift // right by 59 before storing back, and not overwrite digits we need // again of the old f and g values. // // Digit 0 of [f,g] ldr x7, [f] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [g] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 adc x2, x2, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x5, car1, x0 adc x3, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 adc x3, x3, x1 // Digit 1 of [f,g] ldr x7, [f+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [g+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 adc x6, x6, x1 extr x4, x2, x4, #59 str x4, [f] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x3, x3, x0 adc x4, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x3, x3, x0 adc x4, x4, x1 extr x5, x3, x5, #59 str x5, [g] // Digit 2 of [f,g] ldr x7, [f+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [g+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 adc x5, x5, x1 extr x2, x6, x2, #59 str x2, [f+N] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x4, x4, x0 adc x2, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x4, x4, x0 adc x2, x2, x1 extr x3, x4, x3, #59 str x3, [g+N] // Digits 3 and 4 of [f,g] ldr x7, [f+3*N] eor x1, x7, s00 ldr x23, [f+4*N] eor x3, x23, s00 and x3, x3, m00 neg x3, x3 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, x3, x1 ldr x8, [g+3*N] eor x1, x8, s01 ldr x24, [g+4*N] eor x0, x24, s01 and x0, x0, m01 sub x3, x3, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 adc x3, x3, x1 extr x6, x5, x6, #59 str x6, [f+2*N] extr x5, x3, x5, #59 str x5, [f+3*N] asr x3, x3, #59 str x3, [f+4*N] eor x1, x7, s10 eor x5, x23, s10 and x5, x5, m10 neg x5, x5 mul x0, x1, m10 umulh x1, x1, m10 adds x2, x2, x0 adc x5, x5, x1 eor x1, x8, s11 eor x0, x24, s11 and x0, x0, m11 sub x5, x5, x0 mul x0, x1, m11 umulh x1, x1, m11 adds x2, x2, x0 adc x5, x5, x1 extr x4, x2, x4, #59 str x4, [g+2*N] extr x2, x5, x2, #59 str x2, [g+3*N] asr x5, x5, #59 str x5, [g+4*N] // Now the computation of the updated u and v values and their // Montgomery reductions. A very similar accumulation except that // the top words of u and v are unsigned and we don't shift. // // Digit 0 of [u,v] ldr x7, [u] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [v] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u] adc x2, x2, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x5, car1, x0 adc x3, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 str x5, [v] adc x3, x3, x1 // Digit 1 of [u,v] ldr x7, [u+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [v+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 str x2, [u+N] adc x6, x6, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x3, x3, x0 adc x4, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x3, x3, x0 str x3, [v+N] adc x4, x4, x1 // Digit 2 of [u,v] ldr x7, [u+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [v+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 str x6, [u+2*N] adc x5, x5, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x4, x4, x0 adc x2, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x4, x4, x0 str x4, [v+2*N] adc x2, x2, x1 // Digits 3 and 4 of u (top is unsigned) ldr x7, [u+3*N] eor x1, x7, s00 and x3, s00, m00 neg x3, x3 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, x3, x1 ldr x8, [v+3*N] eor x1, x8, s01 and x0, s01, m01 sub x3, x3, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 adc x3, x3, x1 // Montgomery reduction of u ldp x0, x1, [u] ldr x6, [u+2*N] amontred(x3,x5,x6,x1,x0, x24,x10,x11,x14) stp x1, x6, [u] stp x5, x3, [u+16] // Digits 3 and 4 of v (top is unsigned) eor x1, x7, s10 and x5, s10, m10 neg x5, x5 mul x0, x1, m10 umulh x1, x1, m10 adds x2, x2, x0 adc x5, x5, x1 eor x1, x8, s11 and x0, s11, m11 sub x5, x5, x0 mul x0, x1, m11 umulh x1, x1, m11 adds x2, x2, x0 adc x5, x5, x1 // Montgomery reduction of v ldp x0, x1, [v] ldr x3, [v+2*N] amontred(x5,x2,x3,x1,x0, x24,x10,x11,x14) stp x1, x3, [v] stp x2, x5, [v+16] Lbignum_montinv_sm2_midloop: mov x1, d ldr x2, [f] ldr x3, [g] divstep59() mov d, x1 // Next iteration subs i, i, #1 bne Lbignum_montinv_sm2_loop // The 10th and last iteration does not need anything except the // u value and the sign of f; the latter can be obtained from the // lowest word of f. So it's done differently from the main loop. // Find the sign of the new f. For this we just need one digit // since we know (for in-scope cases) that f is either +1 or -1. // We don't explicitly shift right by 59 either, but looking at // bit 63 (or any bit >= 60) of the unshifted result is enough // to distinguish -1 from +1; this is then made into a mask. ldr x0, [f] ldr x1, [g] mul x0, x0, m00 madd x1, x1, m01, x0 asr x0, x1, #63 // Now separate out the matrix into sign-magnitude pairs // and adjust each one based on the sign of f. // // Note that at this point we expect |f|=1 and we got its // sign above, so then since [f,0] == x * 2^{-512} [u,v] (mod p_sm2) // we want to flip the sign of u according to that of f. cmp m00, xzr csetm s00, mi cneg m00, m00, mi eor s00, s00, x0 cmp m01, xzr csetm s01, mi cneg m01, m01, mi eor s01, s01, x0 cmp m10, xzr csetm s10, mi cneg m10, m10, mi eor s10, s10, x0 cmp m11, xzr csetm s11, mi cneg m11, m11, mi eor s11, s11, x0 // Adjust the initial value to allow for complement instead of negation and x0, m00, s00 and x1, m01, s01 add car0, x0, x1 // Digit 0 of [u] ldr x7, [u] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [v] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u] adc x2, x2, x1 // Digit 1 of [u] ldr x7, [u+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [v+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 str x2, [u+N] adc x6, x6, x1 // Digit 2 of [u] ldr x7, [u+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [v+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 str x6, [u+2*N] adc x5, x5, x1 // Digits 3 and 4 of u (top is unsigned) ldr x7, [u+3*N] eor x1, x7, s00 and x3, s00, m00 neg x3, x3 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, x3, x1 ldr x8, [v+3*N] eor x1, x8, s01 and x0, s01, m01 sub x3, x3, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 adc x3, x3, x1 // Montgomery reduction of u. This needs to be strict not "almost" // so it is followed by an optional subtraction of p_sm2 ldp x0, x1, [u] ldr x2, [u+2*N] amontred(x3,x5,x2,x1,x0, x24,x10,x11,x14) mov x10, #0xffffffffffffffff subs x10, x1, #-1 mov x11, #0xffffffff00000000 sbcs x11, x2, x11 mov x13, #0xfffffffeffffffff adcs x12, x5, xzr sbcs x13, x3, x13 csel x10, x1, x10, cc csel x11, x2, x11, cc csel x12, x5, x12, cc csel x13, x3, x13, cc // Store it back to the final output stp x10, x11, [res] stp x12, x13, [res, #16] // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montinv_sm2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
4,466
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_tomont_sm2.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert to Montgomery form z := (2^256 * x) mod p_sm2 // Input x[4]; output z[4] // // extern void bignum_tomont_sm2(uint64_t z[static 4], const uint64_t x[static 4]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_sm2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tomont_sm2) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_sm2) .text .balign 4 // ---------------------------------------------------------------------------- // Core "x |-> (2^64 * x) mod p_sm2" macro, with x assumed to be < p_sm2. // We write it as a macro to be repeated instead of using .rep in assembler. // The code here is very similar to the core of bignum_mod_sm2, just // implicitly inserting zeros instead of fresh digits. // ---------------------------------------------------------------------------- #define modstep_sm2() \ /* Writing the input, with a lowest zero digit appended, as */ \ /* z = 2^256 * d3 + 2^192 * d2 + t, quotient approximation is */ \ /* MIN ((d3 * (1 + 2^32 + 2^64) + d2 + 2^64) >> 64) (2^64 - 1) */ \ adds t3, d2, d3 __LF \ mov t2, #1 __LF \ adc t1, d3, t2 __LF \ add t2, d3, t3, lsr #32 __LF \ adds q, t1, t2, lsr #32 __LF \ cinv q, q, cs __LF \ /* Let t3 = q<<32 and t4 = q>>32 then [t2;t1] = 2^32 * q - q */ \ lsl t3, q, #32 __LF \ subs t1, t3, q __LF \ lsr t4, q, #32 __LF \ sbc t2, t4, xzr __LF \ /* Do the basic correction [t4;t3;t2;t1;q] = 2^256 * x - q * p */ \ adds t1, t1, d0 __LF \ sub d3, d3, q __LF \ adcs t2, t2, d1 __LF \ adcs t3, t3, d2 __LF \ adc t4, t4, d3 __LF \ /* Use top word as mask to correct */ \ adds d0, q, t4 __LF \ and t0, t4, #0xffffffff00000000 __LF \ adcs d1, t1, t0 __LF \ adcs d2, t2, t4 __LF \ and t0, t4, #0xfffffffeffffffff __LF \ adc d3, t3, t0 #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define t1 x6 #define t2 x7 #define t3 x8 #define t4 x9 #define q x1 #define t0 x1 S2N_BN_SYMBOL(bignum_tomont_sm2): CFI_START // Load the input ldp d0, d1, [x1] ldp d2, d3, [x1, #16] // Do an initial reduction to make sure this is < p_sm2, using just // a copy of the bignum_mod_sm2_4 code. This is needed to set up the // invariant "input < p_sm2" for the main modular reduction steps. subs t0, d0, #-1 mov t1, #0xffffffff00000000 sbcs t1, d1, t1 adcs t2, d2, xzr mov t3, #0xfffffffeffffffff sbcs t3, d3, t3 csel d0, d0, t0, cc csel d1, d1, t1, cc csel d2, d2, t2, cc csel d3, d3, t3, cc // Now do 4 iterations of a basic x |-> (2^64 * x) mod p_sm2 step. modstep_sm2() modstep_sm2() modstep_sm2() modstep_sm2() // Store the result and return stp d0, d1, [x0] stp d2, d3, [x0, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_tomont_sm2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,993
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sm2/bignum_montmul_sm2_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^256) mod p_sm2 // Inputs x[4], y[4]; output z[4] // // extern void bignum_montmul_sm2_alt(uint64_t z[static 4], // const uint64_t x[static 4], // const uint64_t y[static 4]); // // Does z := (2^{-256} * x * y) mod p_sm2, assuming that the inputs x and y // satisfy x * y <= 2^256 * p_sm2 (in particular this is true if we are in // the "usual" case x < p_sm2 and y < p_sm2). // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_sm2_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_sm2_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_sm2_alt) .text .balign 4 // --------------------------------------------------------------------------- // Core one-step "short" Montgomery reduction macro. Takes input in // [d3;d2;d1;d0] and returns result in [d4;d3;d2;d1], adding to the // existing contents of [d3;d2;d1], and using t0, t1, t2 and t3 as // temporaries. It is fine for d4 to be the same register as d0, // and it often is. // --------------------------------------------------------------------------- #define montreds(d4,d3,d2,d1,d0, t3,t2,t1,t0) \ /* First let [t3;t2] = 2^32 * d0 and [t1;t0] = (2^32-1) * d0 */ \ lsl t2, d0, #32 __LF \ lsr t3, d0, #32 __LF \ subs t0, t2, d0 __LF \ sbc t1, t3, xzr __LF \ /* Now [d4;d3;d2;d1] := [d0;d3;d2;d1] - [t3;t2;t1;t0] */ \ subs d1, d1, t0 __LF \ sbcs d2, d2, t1 __LF \ sbcs d3, d3, t2 __LF \ sbc d4, d0, t3 #define z x0 #define x x1 #define y x2 #define a0 x3 #define a1 x4 #define a2 x5 #define a3 x6 #define b0 x7 #define b1 x8 #define b2 x9 #define b3 x10 #define l x11 #define u0 x12 #define u1 x13 #define u2 x14 #define u3 x15 #define u4 x16 // These alias to the input arguments when no longer needed #define u5 a0 #define u6 a1 #define u7 a2 #define h a3 S2N_BN_SYMBOL(bignum_montmul_sm2_alt): CFI_START // Load operands and set up row 0 = [u4;...;u0] = a0 * [b3;...;b0] ldp a0, a1, [x] ldp b0, b1, [y] mul u0, a0, b0 umulh u1, a0, b0 mul l, a0, b1 umulh u2, a0, b1 adds u1, u1, l ldp b2, b3, [y, #16] mul l, a0, b2 umulh u3, a0, b2 adcs u2, u2, l mul l, a0, b3 umulh u4, a0, b3 adcs u3, u3, l adc u4, u4, xzr ldp a2, a3, [x, #16] // Row 1 = [u5;...;u0] = [a1;a0] * [b3;...;b0] mul l, a1, b0 adds u1, u1, l mul l, a1, b1 adcs u2, u2, l mul l, a1, b2 adcs u3, u3, l mul l, a1, b3 adcs u4, u4, l umulh u5, a1, b3 adc u5, u5, xzr umulh l, a1, b0 adds u2, u2, l umulh l, a1, b1 adcs u3, u3, l umulh l, a1, b2 adcs u4, u4, l adc u5, u5, xzr // Row 2 = [u6;...;u0] = [a2;a1;a0] * [b3;...;b0] mul l, a2, b0 adds u2, u2, l mul l, a2, b1 adcs u3, u3, l mul l, a2, b2 adcs u4, u4, l mul l, a2, b3 adcs u5, u5, l umulh u6, a2, b3 adc u6, u6, xzr umulh l, a2, b0 adds u3, u3, l umulh l, a2, b1 adcs u4, u4, l umulh l, a2, b2 adcs u5, u5, l adc u6, u6, xzr // Row 3 = [u7;...;u0] = [a3;...a0] * [b3;...;b0] mul l, a3, b0 adds u3, u3, l mul l, a3, b1 adcs u4, u4, l mul l, a3, b2 adcs u5, u5, l mul l, a3, b3 adcs u6, u6, l umulh u7, a3, b3 adc u7, u7, xzr umulh l, a3, b0 adds u4, u4, l umulh l, a3, b1 adcs u5, u5, l umulh l, a3, b2 adcs u6, u6, l adc u7, u7, xzr // Perform 4 Montgomery steps to rotate the lower half montreds(u0,u3,u2,u1,u0, h,l,b0,b1) montreds(u1,u0,u3,u2,u1, h,l,b0,b1) montreds(u2,u1,u0,u3,u2, h,l,b0,b1) montreds(u3,u2,u1,u0,u3, h,l,b0,b1) // Add high and low parts, catching carry in b1 adds u0, u0, u4 adcs u1, u1, u5 adcs u2, u2, u6 adcs u3, u3, u7 cset b1, cs // Set [h;-1;l;-1] = p_sm2 and form [u7,u6,u5,u4] = [b1;u3;u2;u1;u0] - p_sm2 mov l, #0xffffffff00000000 mov h, #0xfffffffeffffffff subs u4, u0, #-1 sbcs u5, u1, l adcs u6, u2, xzr sbcs u7, u3, h sbcs xzr, b1, xzr // Now CF is clear if the comparison carried so the original was fine // Otherwise take the form with p_sm2 subtracted. csel u0, u0, u4, cc csel u1, u1, u5, cc csel u2, u2, u6, cc csel u3, u3, u7, cc // Store back final result stp u0, u1, [z] stp u2, u3, [z, #16] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montmul_sm2_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,564
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/tutorial/rodata.S
/* This assembly file is a cleaned (and less ABI-compliant) version of GCC output of the following C program: const int x[10] = {2, 4, 6, 8, 10, 12, 14, 16, 18, 20}; const int y[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; const int z = 1; int f(uint64_t i) { return x[i] + y[i]; } int g(int64_t i) { return f(i + z); } */ #if defined(__ELF__) .section .rodata .global x .type x, %object .size x, 40 #elif defined(__APPLE__) .const_data #endif .align 3 x: .word 2 .word 4 .word 6 .word 8 .word 10 .word 12 .word 14 .word 16 .word 18 .word 20 #if defined(__ELF__) .global y .type y, %object .size y, 40 #endif .align 3 y: .word 1 .word 2 .word 3 .word 4 .word 5 .word 6 .word 7 .word 8 .word 9 .word 10 #if defined(__ELF__) .global z .type z, %object .size z, 4 #endif .align 3 z: .word 1 .text .align 2 #if defined(__ELF__) .type f, %function #endif f: mov x3, x0 #if defined(__ELF__) adrp x10, x add x10, x10, :lo12:x #else adrp x10, x@PAGE add x10, x10, x@PAGEOFF #endif mov x1, x3 ldr w1, [x10, x1, lsl 2] #if defined(__ELF__) adrp x11, y add x11, x11, :lo12:y #else adrp x11, y@PAGE add x11, x11, y@PAGEOFF #endif mov x2, x3 ldr w0, [x11, x2, lsl 2] add w0, w1, w0 ret #if defined(__ELF__) .type g, %function #endif g: #if defined(__ELF__) adrp x10, z add x10, x10, :lo12:z #else adrp x10, z@PAGE add x10, x10, z@PAGEOFF #endif ldr w1, [x10] add x0, x1, x0 b f
wlsfx/bnbb
1,567
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/tutorial/rodata_local.S
/* This assembly file is a cleaned (and less ABI-compliant) version of GCC output of the following C program: static const int x[10] = {2, 4, 6, 8, 10, 12, 14, 16, 18, 20}; static const int y[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; static const int z = 1; int f(uint64_t i) { return x[i] + y[i]; } int g(int64_t i) { return f(i + z); } */ #if defined(__ELF__) .section .rodata .type Lx, %object .size Lx, 40 #elif defined(__APPLE__) .const_data #endif .align 3 Lx: .word 2 .word 4 .word 6 .word 8 .word 10 .word 12 .word 14 .word 16 .word 18 .word 20 #if defined(__ELF__) .type Ly, %object .size Ly, 40 #endif .align 3 Ly: .word 1 .word 2 .word 3 .word 4 .word 5 .word 6 .word 7 .word 8 .word 9 .word 10 #if defined(__ELF__) .type Lz, %object .size Lz, 4 #endif .align 3 Lz: .word 1 .text .align 2 #if defined(__ELF__) .type f, %function #endif f: mov x3, x0 #if defined(__ELF__) adrp x10, Lx add x10, x10, :lo12:Lx #else adrp x10, Lx@PAGE add x10, x10, Lx@PAGEOFF #endif mov x1, x3 ldr w1, [x10, x1, lsl 2] #if defined(__ELF__) adrp x11, Ly add x11, x11, :lo12:Ly #else adrp x11, Ly@PAGE add x11, x11, Ly@PAGEOFF #endif mov x2, x3 ldr w0, [x11, x2, lsl 2] add w0, w1, w0 ret #if defined(__ELF__) .type g, %function #endif g: #if defined(__ELF__) adrp x10, Lz add x10, x10, :lo12:Lz #else adrp x10, Lz@PAGE add x10, x10, Lz@PAGEOFF #endif ldr w1, [x10] add x0, x1, x0 b f
wlsfx/bnbb
38,493
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sha3/sha3_keccak4_f1600_alt.S
// Copyright (c) 2024 The mlkem-native project authors // Copyright (c) 2021-2022 Arm Limited // Copyright (c) 2022 Matthias Kannwischer // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT // ---------------------------------------------------------------------------- // Keccak-f1600 permutation for SHA3, batch of four independent operations // Input a[100], rc[24]; output a[100] // // // The input/output argument is in effect four 25-element Keccak arrays // a[0...24], a[25..49], a[50..74] and a[75..99], which could be considered // as type a[25][4]. // // Thinking of each such input/output array as a row-major flattening of a // 5x5 matrix of 64-bit words, this performs the Keccak-f1600 permutation, // all 24 rounds with the distinct round constants rc[i] for each one. For // correct operation, the input pointer rc should point at the standard // round constants as in the specification: // // https://keccak.team/keccak_specs_summary.html#roundConstants // // This operation is at the core of SHA3 and is fully specified here: // // https://keccak.team/files/Keccak-reference-3.0.pdf // // extern void sha3_keccak4_f1600_alt(uint64_t a[static 100], // const uint64_t rc[static 24]); // // Standard ARM ABI: X0 = a, X1 = rc // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(sha3_keccak4_f1600_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sha3_keccak4_f1600_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(sha3_keccak4_f1600_alt) .text .balign 4 S2N_BN_SYMBOL(sha3_keccak4_f1600_alt): CFI_START // This is similar to the code in the mlkem-native repository here: // // mlkem/fips202/native/aarch64/src/keccak_f1600_x4_v8a_scalar_hybrid_asm.S // // The main difference is the avoidance of ld2/st2 in favour of explicit // transposition operations and conventional loads and stores. CFI_DEC_SP(224) CFI_STACKSAVE2X(x19,x20,48,56) CFI_STACKSAVE2X(x21,x22,64,72) CFI_STACKSAVE2X(x23,x24,80,88) CFI_STACKSAVE2X(x25,x26,96,104) CFI_STACKSAVE2X(x27,x28,112,120) CFI_STACKSAVE2X(x29,x30,128,136) CFI_STACKSAVE2X(d8,d9,144,152) CFI_STACKSAVE2X(d10,d11,160,168) CFI_STACKSAVE2X(d12,d13,176,184) CFI_STACKSAVE2X(d14,d15,192,200) mov x29, x1 mov x30, #0x0 str x30, [sp, #0x20] str x29, [sp, #0x8] str x29, [sp, #0x10] str x0, [sp] add x2, x0, #0xc8 ldp q24, q25, [x0] ldp q26, q27, [x2] trn1 v0.2d, v24.2d, v26.2d trn2 v1.2d, v24.2d, v26.2d trn1 v2.2d, v25.2d, v27.2d trn2 v3.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x20] ldp q26, q27, [x2, #0x20] trn1 v4.2d, v24.2d, v26.2d trn2 v5.2d, v24.2d, v26.2d trn1 v6.2d, v25.2d, v27.2d trn2 v7.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x40] ldp q26, q27, [x2, #0x40] trn1 v8.2d, v24.2d, v26.2d trn2 v9.2d, v24.2d, v26.2d trn1 v10.2d, v25.2d, v27.2d trn2 v11.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x60] ldp q26, q27, [x2, #0x60] trn1 v12.2d, v24.2d, v26.2d trn2 v13.2d, v24.2d, v26.2d trn1 v14.2d, v25.2d, v27.2d trn2 v15.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x80] ldp q26, q27, [x2, #0x80] trn1 v16.2d, v24.2d, v26.2d trn2 v17.2d, v24.2d, v26.2d trn1 v18.2d, v25.2d, v27.2d trn2 v19.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0xa0] ldp q26, q27, [x2, #0xa0] trn1 v20.2d, v24.2d, v26.2d trn2 v21.2d, v24.2d, v26.2d trn1 v22.2d, v25.2d, v27.2d trn2 v23.2d, v25.2d, v27.2d ldr d24, [x0, #0xc0] ldr d25, [x2, #0xc0] trn1 v24.2d, v24.2d, v25.2d add x0, x0, #0x190 ldp x1, x6, [x0] ldp x11, x16, [x0, #0x10] ldp x21, x2, [x0, #0x20] ldp x7, x12, [x0, #0x30] ldp x17, x22, [x0, #0x40] ldp x3, x8, [x0, #0x50] ldp x13, x28, [x0, #0x60] ldp x23, x4, [x0, #0x70] ldp x9, x14, [x0, #0x80] ldp x19, x24, [x0, #0x90] ldp x5, x10, [x0, #0xa0] ldp x15, x20, [x0, #0xb0] ldr x25, [x0, #0xc0] sub x0, x0, #0x190 Lsha3_keccak4_f1600_alt_initial: eor x30, x24, x25 eor x27, x9, x10 eor x0, x30, x21 eor x26, x27, x6 eor x27, x26, x7 eor x29, x0, x22 eor x26, x29, x23 eor x29, x4, x5 eor x30, x29, x1 eor x0, x27, x8 eor x29, x30, x2 eor x30, x19, x20 eor x30, x30, x16 eor x27, x26, x0, ror #63 eor x4, x4, x27 eor x30, x30, x17 eor x30, x30, x28 eor x29, x29, x3 eor x0, x0, x30, ror #63 eor x30, x30, x29, ror #63 eor x22, x22, x30 eor x23, x23, x30 str x23, [sp, #0xd0] eor x23, x14, x15 eor x14, x14, x0 eor x23, x23, x11 eor x15, x15, x0 eor x1, x1, x27 eor x23, x23, x12 eor x23, x23, x13 eor x11, x11, x0 eor x29, x29, x23, ror #63 eor x23, x23, x26, ror #63 eor x26, x13, x0 eor x13, x28, x23 eor x28, x24, x30 eor x24, x16, x23 eor x16, x21, x30 eor x21, x25, x30 eor x30, x19, x23 eor x19, x20, x23 eor x20, x17, x23 eor x17, x12, x0 eor x0, x2, x27 eor x2, x6, x29 eor x6, x8, x29 bic x8, x28, x13, ror #47 eor x12, x3, x27 bic x3, x13, x17, ror #19 eor x5, x5, x27 ldr x27, [sp, #0xd0] bic x25, x17, x2, ror #5 eor x9, x9, x29 eor x23, x25, x5, ror #52 eor x3, x3, x2, ror #24 eor x8, x8, x17, ror #2 eor x17, x10, x29 bic x25, x12, x22, ror #47 eor x29, x7, x29 bic x10, x4, x27, ror #2 bic x7, x5, x28, ror #10 eor x10, x10, x20, ror #50 eor x13, x7, x13, ror #57 bic x7, x2, x5, ror #47 eor x2, x25, x24, ror #39 bic x25, x20, x11, ror #57 bic x5, x17, x4, ror #25 eor x25, x25, x17, ror #53 bic x17, x11, x17, ror #60 eor x28, x7, x28, ror #57 bic x7, x9, x12, ror #42 eor x7, x7, x22, ror #25 bic x22, x22, x24, ror #56 bic x24, x24, x15, ror #31 eor x22, x22, x15, ror #23 bic x20, x27, x20, ror #48 bic x15, x15, x9, ror #16 eor x12, x15, x12, ror #58 eor x15, x5, x27, ror #27 eor x5, x20, x11, ror #41 ldr x11, [sp, #0x8] eor x20, x17, x4, ror #21 eor x17, x24, x9, ror #47 mov x24, #0x1 bic x9, x0, x16, ror #9 str x24, [sp, #0x18] bic x24, x29, x1, ror #44 bic x27, x1, x21, ror #50 bic x4, x26, x29, ror #63 eor x1, x1, x4, ror #21 ldr x11, [x11] bic x4, x21, x30, ror #57 eor x21, x24, x21, ror #30 eor x24, x9, x19, ror #44 bic x9, x14, x6, ror #5 eor x9, x9, x0, ror #43 bic x0, x6, x0, ror #38 eor x1, x1, x11 eor x11, x4, x26, ror #35 eor x4, x0, x16, ror #47 bic x0, x16, x19, ror #35 eor x16, x27, x30, ror #43 bic x27, x30, x26, ror #42 bic x26, x19, x14, ror #41 eor x19, x0, x14, ror #12 eor x14, x26, x6, ror #46 eor x6, x27, x29, ror #41 eor x0, x15, x11, ror #52 eor x0, x0, x13, ror #48 eor x26, x8, x9, ror #57 eor x27, x0, x14, ror #10 eor x29, x16, x28, ror #63 eor x26, x26, x6, ror #51 eor x30, x23, x22, ror #50 eor x0, x26, x10, ror #31 eor x29, x29, x19, ror #37 eor x27, x27, x12, ror #5 eor x30, x30, x24, ror #34 eor x0, x0, x7, ror #27 eor x26, x30, x21, ror #26 eor x26, x26, x25, ror #15 ror x30, x27, #0x3e eor x30, x30, x26, ror #57 ror x26, x26, #0x3a eor x16, x30, x16 eor x28, x30, x28, ror #63 str x28, [sp, #0xd0] eor x29, x29, x17, ror #36 eor x28, x1, x2, ror #61 eor x19, x30, x19, ror #37 eor x29, x29, x20, ror #2 eor x28, x28, x4, ror #54 eor x26, x26, x0, ror #55 eor x28, x28, x3, ror #39 eor x28, x28, x5, ror #25 ror x0, x0, #0x38 eor x0, x0, x29, ror #63 eor x27, x28, x27, ror #61 eor x13, x0, x13, ror #46 eor x28, x29, x28, ror #63 eor x29, x30, x20, ror #2 eor x20, x26, x3, ror #39 eor x11, x0, x11, ror #50 eor x25, x28, x25, ror #9 eor x3, x28, x21, ror #20 eor x21, x26, x1 eor x9, x27, x9, ror #49 eor x24, x28, x24, ror #28 eor x1, x30, x17, ror #36 eor x14, x0, x14, ror #8 eor x22, x28, x22, ror #44 eor x8, x27, x8, ror #56 eor x17, x27, x7, ror #19 eor x15, x0, x15, ror #62 bic x7, x20, x22, ror #47 eor x4, x26, x4, ror #54 eor x0, x0, x12, ror #3 eor x28, x28, x23, ror #58 eor x23, x26, x2, ror #61 eor x26, x26, x5, ror #25 eor x2, x7, x16, ror #39 bic x7, x9, x20, ror #42 bic x30, x15, x9, ror #16 eor x7, x7, x22, ror #25 eor x12, x30, x20, ror #58 bic x20, x22, x16, ror #56 eor x30, x27, x6, ror #43 eor x22, x20, x15, ror #23 bic x6, x19, x13, ror #42 eor x6, x6, x17, ror #41 bic x5, x13, x17, ror #63 eor x5, x21, x5, ror #21 bic x17, x17, x21, ror #44 eor x27, x27, x10, ror #23 bic x21, x21, x25, ror #50 bic x20, x27, x4, ror #25 bic x10, x16, x15, ror #31 eor x16, x21, x19, ror #43 eor x21, x17, x25, ror #30 bic x19, x25, x19, ror #57 ldr x25, [sp, #0x18] eor x17, x10, x9, ror #47 ldr x9, [sp, #0x8] eor x15, x20, x28, ror #27 bic x20, x4, x28, ror #2 eor x10, x20, x1, ror #50 bic x20, x11, x27, ror #60 eor x20, x20, x4, ror #21 bic x4, x28, x1, ror #48 bic x1, x1, x11, ror #57 ldr x28, [x9, x25, lsl #3] ldr x9, [sp, #0xd0] add x25, x25, #0x1 str x25, [sp, #0x18] cmp x25, #0x17 eor x25, x1, x27, ror #53 bic x27, x30, x26, ror #47 eor x1, x5, x28 eor x5, x4, x11, ror #41 eor x11, x19, x13, ror #35 bic x13, x26, x24, ror #10 eor x28, x27, x24, ror #57 bic x27, x24, x9, ror #47 bic x19, x23, x3, ror #9 bic x4, x29, x14, ror #41 eor x24, x19, x29, ror #44 bic x29, x3, x29, ror #35 eor x13, x13, x9, ror #57 eor x19, x29, x14, ror #12 bic x29, x9, x0, ror #19 bic x14, x14, x8, ror #5 eor x9, x14, x23, ror #43 eor x14, x4, x8, ror #46 bic x23, x8, x23, ror #38 eor x8, x27, x0, ror #2 eor x4, x23, x3, ror #47 bic x3, x0, x30, ror #5 eor x23, x3, x26, ror #52 eor x3, x29, x30, ror #24 eor v30.16b, v0.16b, v5.16b eor v30.16b, v30.16b, v10.16b eor v30.16b, v30.16b, v15.16b eor v30.16b, v30.16b, v20.16b eor v29.16b, v1.16b, v6.16b eor v29.16b, v29.16b, v11.16b eor v29.16b, v29.16b, v16.16b eor v29.16b, v29.16b, v21.16b eor v28.16b, v2.16b, v7.16b eor v28.16b, v28.16b, v12.16b eor v28.16b, v28.16b, v17.16b eor v28.16b, v28.16b, v22.16b eor v27.16b, v3.16b, v8.16b eor v27.16b, v27.16b, v13.16b eor v27.16b, v27.16b, v18.16b eor v27.16b, v27.16b, v23.16b eor v26.16b, v4.16b, v9.16b eor v26.16b, v26.16b, v14.16b eor v26.16b, v26.16b, v19.16b eor v26.16b, v26.16b, v24.16b add v31.2d, v28.2d, v28.2d sri v31.2d, v28.2d, #0x3f eor v25.16b, v31.16b, v30.16b add v31.2d, v26.2d, v26.2d sri v31.2d, v26.2d, #0x3f eor v28.16b, v31.16b, v28.16b add v31.2d, v29.2d, v29.2d sri v31.2d, v29.2d, #0x3f eor v26.16b, v31.16b, v26.16b add v31.2d, v27.2d, v27.2d sri v31.2d, v27.2d, #0x3f eor v29.16b, v31.16b, v29.16b add v31.2d, v30.2d, v30.2d sri v31.2d, v30.2d, #0x3f eor v27.16b, v31.16b, v27.16b eor v30.16b, v0.16b, v26.16b eor v31.16b, v2.16b, v29.16b shl v0.2d, v31.2d, #0x3e sri v0.2d, v31.2d, #0x2 eor v31.16b, v12.16b, v29.16b shl v2.2d, v31.2d, #0x2b sri v2.2d, v31.2d, #0x15 eor v31.16b, v13.16b, v28.16b shl v12.2d, v31.2d, #0x19 sri v12.2d, v31.2d, #0x27 eor v31.16b, v19.16b, v27.16b shl v13.2d, v31.2d, #0x8 sri v13.2d, v31.2d, #0x38 eor v31.16b, v23.16b, v28.16b shl v19.2d, v31.2d, #0x38 sri v19.2d, v31.2d, #0x8 eor v31.16b, v15.16b, v26.16b shl v23.2d, v31.2d, #0x29 sri v23.2d, v31.2d, #0x17 eor v31.16b, v1.16b, v25.16b shl v15.2d, v31.2d, #0x1 sri v15.2d, v31.2d, #0x3f eor v31.16b, v8.16b, v28.16b shl v1.2d, v31.2d, #0x37 sri v1.2d, v31.2d, #0x9 eor v31.16b, v16.16b, v25.16b shl v8.2d, v31.2d, #0x2d sri v8.2d, v31.2d, #0x13 eor v31.16b, v7.16b, v29.16b shl v16.2d, v31.2d, #0x6 sri v16.2d, v31.2d, #0x3a eor v31.16b, v10.16b, v26.16b shl v7.2d, v31.2d, #0x3 sri v7.2d, v31.2d, #0x3d eor v31.16b, v3.16b, v28.16b shl v10.2d, v31.2d, #0x1c sri v10.2d, v31.2d, #0x24 eor v31.16b, v18.16b, v28.16b shl v3.2d, v31.2d, #0x15 sri v3.2d, v31.2d, #0x2b eor v31.16b, v17.16b, v29.16b shl v18.2d, v31.2d, #0xf sri v18.2d, v31.2d, #0x31 eor v31.16b, v11.16b, v25.16b shl v17.2d, v31.2d, #0xa sri v17.2d, v31.2d, #0x36 eor v31.16b, v9.16b, v27.16b shl v11.2d, v31.2d, #0x14 sri v11.2d, v31.2d, #0x2c eor v31.16b, v22.16b, v29.16b shl v9.2d, v31.2d, #0x3d sri v9.2d, v31.2d, #0x3 eor v31.16b, v14.16b, v27.16b shl v22.2d, v31.2d, #0x27 sri v22.2d, v31.2d, #0x19 eor v31.16b, v20.16b, v26.16b shl v14.2d, v31.2d, #0x12 sri v14.2d, v31.2d, #0x2e eor v31.16b, v4.16b, v27.16b shl v20.2d, v31.2d, #0x1b sri v20.2d, v31.2d, #0x25 eor v31.16b, v24.16b, v27.16b shl v4.2d, v31.2d, #0xe sri v4.2d, v31.2d, #0x32 eor v31.16b, v21.16b, v25.16b shl v24.2d, v31.2d, #0x2 sri v24.2d, v31.2d, #0x3e eor v31.16b, v5.16b, v26.16b shl v21.2d, v31.2d, #0x24 sri v21.2d, v31.2d, #0x1c eor v31.16b, v6.16b, v25.16b shl v27.2d, v31.2d, #0x2c sri v27.2d, v31.2d, #0x14 ldr x30, [sp, #0x10] ld1r { v28.2d }, [x30], #8 str x30, [sp, #0x10] bic v31.16b, v7.16b, v11.16b eor v5.16b, v31.16b, v10.16b bic v31.16b, v8.16b, v7.16b eor v6.16b, v31.16b, v11.16b bic v31.16b, v9.16b, v8.16b eor v7.16b, v31.16b, v7.16b bic v31.16b, v10.16b, v9.16b eor v8.16b, v31.16b, v8.16b bic v31.16b, v11.16b, v10.16b eor v9.16b, v31.16b, v9.16b bic v31.16b, v12.16b, v16.16b eor v10.16b, v31.16b, v15.16b bic v31.16b, v13.16b, v12.16b eor v11.16b, v31.16b, v16.16b bic v31.16b, v14.16b, v13.16b eor v12.16b, v31.16b, v12.16b bic v31.16b, v15.16b, v14.16b eor v13.16b, v31.16b, v13.16b bic v31.16b, v16.16b, v15.16b eor v14.16b, v31.16b, v14.16b bic v31.16b, v17.16b, v21.16b eor v15.16b, v31.16b, v20.16b bic v31.16b, v18.16b, v17.16b eor v16.16b, v31.16b, v21.16b bic v31.16b, v19.16b, v18.16b eor v17.16b, v31.16b, v17.16b bic v31.16b, v20.16b, v19.16b eor v18.16b, v31.16b, v18.16b bic v31.16b, v21.16b, v20.16b eor v19.16b, v31.16b, v19.16b bic v31.16b, v22.16b, v1.16b eor v20.16b, v31.16b, v0.16b bic v31.16b, v23.16b, v22.16b eor v21.16b, v31.16b, v1.16b bic v31.16b, v24.16b, v23.16b eor v22.16b, v31.16b, v22.16b bic v31.16b, v0.16b, v24.16b eor v23.16b, v31.16b, v23.16b bic v31.16b, v1.16b, v0.16b eor v24.16b, v31.16b, v24.16b bic v31.16b, v2.16b, v27.16b eor v0.16b, v31.16b, v30.16b bic v31.16b, v3.16b, v2.16b eor v1.16b, v31.16b, v27.16b bic v31.16b, v4.16b, v3.16b eor v2.16b, v31.16b, v2.16b bic v31.16b, v30.16b, v4.16b eor v3.16b, v31.16b, v3.16b bic v31.16b, v27.16b, v30.16b eor v4.16b, v31.16b, v4.16b eor v0.16b, v0.16b, v28.16b Lsha3_keccak4_f1600_alt_loop: eor x0, x15, x11, ror #52 eor x0, x0, x13, ror #48 eor v30.16b, v0.16b, v5.16b eor v30.16b, v30.16b, v10.16b eor x26, x8, x9, ror #57 eor v30.16b, v30.16b, v15.16b eor x27, x0, x14, ror #10 eor x29, x16, x28, ror #63 eor v30.16b, v30.16b, v20.16b eor x26, x26, x6, ror #51 eor v29.16b, v1.16b, v6.16b eor x30, x23, x22, ror #50 eor v29.16b, v29.16b, v11.16b eor x0, x26, x10, ror #31 eor x29, x29, x19, ror #37 eor v29.16b, v29.16b, v16.16b eor x27, x27, x12, ror #5 eor v29.16b, v29.16b, v21.16b eor x30, x30, x24, ror #34 eor x0, x0, x7, ror #27 eor v28.16b, v2.16b, v7.16b eor x26, x30, x21, ror #26 eor v28.16b, v28.16b, v12.16b eor x26, x26, x25, ror #15 eor v28.16b, v28.16b, v17.16b ror x30, x27, #0x3e eor x30, x30, x26, ror #57 eor v28.16b, v28.16b, v22.16b ror x26, x26, #0x3a eor v27.16b, v3.16b, v8.16b eor x16, x30, x16 eor v27.16b, v27.16b, v13.16b eor x28, x30, x28, ror #63 str x28, [sp, #0xd0] eor v27.16b, v27.16b, v18.16b eor x29, x29, x17, ror #36 eor v27.16b, v27.16b, v23.16b eor x28, x1, x2, ror #61 eor x19, x30, x19, ror #37 eor v26.16b, v4.16b, v9.16b eor x29, x29, x20, ror #2 eor v26.16b, v26.16b, v14.16b eor x28, x28, x4, ror #54 eor v26.16b, v26.16b, v19.16b eor x26, x26, x0, ror #55 eor x28, x28, x3, ror #39 eor v26.16b, v26.16b, v24.16b eor x28, x28, x5, ror #25 add v31.2d, v28.2d, v28.2d ror x0, x0, #0x38 eor x0, x0, x29, ror #63 sri v31.2d, v28.2d, #0x3f eor x27, x28, x27, ror #61 eor v25.16b, v31.16b, v30.16b eor x13, x0, x13, ror #46 add v31.2d, v26.2d, v26.2d eor x28, x29, x28, ror #63 eor x29, x30, x20, ror #2 sri v31.2d, v26.2d, #0x3f eor x20, x26, x3, ror #39 eor v28.16b, v31.16b, v28.16b eor x11, x0, x11, ror #50 add v31.2d, v29.2d, v29.2d eor x25, x28, x25, ror #9 eor x3, x28, x21, ror #20 sri v31.2d, v29.2d, #0x3f eor x21, x26, x1 eor v26.16b, v31.16b, v26.16b eor x9, x27, x9, ror #49 eor x24, x28, x24, ror #28 add v31.2d, v27.2d, v27.2d eor x1, x30, x17, ror #36 sri v31.2d, v27.2d, #0x3f eor x14, x0, x14, ror #8 eor v29.16b, v31.16b, v29.16b eor x22, x28, x22, ror #44 eor x8, x27, x8, ror #56 add v31.2d, v30.2d, v30.2d eor x17, x27, x7, ror #19 sri v31.2d, v30.2d, #0x3f eor x15, x0, x15, ror #62 bic x7, x20, x22, ror #47 eor v27.16b, v31.16b, v27.16b eor x4, x26, x4, ror #54 eor v30.16b, v0.16b, v26.16b eor x0, x0, x12, ror #3 eor v31.16b, v2.16b, v29.16b eor x28, x28, x23, ror #58 eor x23, x26, x2, ror #61 shl v0.2d, v31.2d, #0x3e eor x26, x26, x5, ror #25 sri v0.2d, v31.2d, #0x2 eor x2, x7, x16, ror #39 eor v31.16b, v12.16b, v29.16b bic x7, x9, x20, ror #42 bic x30, x15, x9, ror #16 shl v2.2d, v31.2d, #0x2b eor x7, x7, x22, ror #25 sri v2.2d, v31.2d, #0x15 eor x12, x30, x20, ror #58 bic x20, x22, x16, ror #56 eor v31.16b, v13.16b, v28.16b eor x30, x27, x6, ror #43 shl v12.2d, v31.2d, #0x19 eor x22, x20, x15, ror #23 sri v12.2d, v31.2d, #0x27 bic x6, x19, x13, ror #42 eor x6, x6, x17, ror #41 eor v31.16b, v19.16b, v27.16b bic x5, x13, x17, ror #63 shl v13.2d, v31.2d, #0x8 eor x5, x21, x5, ror #21 sri v13.2d, v31.2d, #0x38 bic x17, x17, x21, ror #44 eor x27, x27, x10, ror #23 eor v31.16b, v23.16b, v28.16b bic x21, x21, x25, ror #50 shl v19.2d, v31.2d, #0x38 bic x20, x27, x4, ror #25 bic x10, x16, x15, ror #31 sri v19.2d, v31.2d, #0x8 eor x16, x21, x19, ror #43 eor v31.16b, v15.16b, v26.16b eor x21, x17, x25, ror #30 shl v23.2d, v31.2d, #0x29 bic x19, x25, x19, ror #57 ldr x25, [sp, #0x18] sri v23.2d, v31.2d, #0x17 eor x17, x10, x9, ror #47 eor v31.16b, v1.16b, v25.16b ldr x9, [sp, #0x8] eor x15, x20, x28, ror #27 shl v15.2d, v31.2d, #0x1 bic x20, x4, x28, ror #2 sri v15.2d, v31.2d, #0x3f eor x10, x20, x1, ror #50 eor v31.16b, v8.16b, v28.16b bic x20, x11, x27, ror #60 eor x20, x20, x4, ror #21 shl v1.2d, v31.2d, #0x37 bic x4, x28, x1, ror #48 sri v1.2d, v31.2d, #0x9 bic x1, x1, x11, ror #57 eor v31.16b, v16.16b, v25.16b ldr x28, [x9, x25, lsl #3] ldr x9, [sp, #0xd0] shl v8.2d, v31.2d, #0x2d add x25, x25, #0x1 sri v8.2d, v31.2d, #0x13 str x25, [sp, #0x18] cmp x25, #0x17 eor v31.16b, v7.16b, v29.16b eor x25, x1, x27, ror #53 shl v16.2d, v31.2d, #0x6 bic x27, x30, x26, ror #47 sri v16.2d, v31.2d, #0x3a eor x1, x5, x28 eor x5, x4, x11, ror #41 eor v31.16b, v10.16b, v26.16b eor x11, x19, x13, ror #35 shl v7.2d, v31.2d, #0x3 bic x13, x26, x24, ror #10 eor x28, x27, x24, ror #57 sri v7.2d, v31.2d, #0x3d bic x27, x24, x9, ror #47 eor v31.16b, v3.16b, v28.16b bic x19, x23, x3, ror #9 shl v10.2d, v31.2d, #0x1c bic x4, x29, x14, ror #41 eor x24, x19, x29, ror #44 sri v10.2d, v31.2d, #0x24 bic x29, x3, x29, ror #35 eor v31.16b, v18.16b, v28.16b eor x13, x13, x9, ror #57 shl v3.2d, v31.2d, #0x15 eor x19, x29, x14, ror #12 bic x29, x9, x0, ror #19 sri v3.2d, v31.2d, #0x2b bic x14, x14, x8, ror #5 eor v31.16b, v17.16b, v29.16b eor x9, x14, x23, ror #43 eor x14, x4, x8, ror #46 shl v18.2d, v31.2d, #0xf bic x23, x8, x23, ror #38 sri v18.2d, v31.2d, #0x31 eor x8, x27, x0, ror #2 eor v31.16b, v11.16b, v25.16b eor x4, x23, x3, ror #47 bic x3, x0, x30, ror #5 shl v17.2d, v31.2d, #0xa eor x23, x3, x26, ror #52 sri v17.2d, v31.2d, #0x36 eor x3, x29, x30, ror #24 eor x0, x15, x11, ror #52 eor v31.16b, v9.16b, v27.16b eor x0, x0, x13, ror #48 shl v11.2d, v31.2d, #0x14 eor x26, x8, x9, ror #57 sri v11.2d, v31.2d, #0x2c eor x27, x0, x14, ror #10 eor x29, x16, x28, ror #63 eor v31.16b, v22.16b, v29.16b eor x26, x26, x6, ror #51 shl v9.2d, v31.2d, #0x3d eor x30, x23, x22, ror #50 sri v9.2d, v31.2d, #0x3 eor x0, x26, x10, ror #31 eor x29, x29, x19, ror #37 eor v31.16b, v14.16b, v27.16b eor x27, x27, x12, ror #5 shl v22.2d, v31.2d, #0x27 eor x30, x30, x24, ror #34 eor x0, x0, x7, ror #27 sri v22.2d, v31.2d, #0x19 eor x26, x30, x21, ror #26 eor v31.16b, v20.16b, v26.16b eor x26, x26, x25, ror #15 shl v14.2d, v31.2d, #0x12 ror x30, x27, #0x3e eor x30, x30, x26, ror #57 sri v14.2d, v31.2d, #0x2e ror x26, x26, #0x3a eor v31.16b, v4.16b, v27.16b eor x16, x30, x16 shl v20.2d, v31.2d, #0x1b eor x28, x30, x28, ror #63 str x28, [sp, #0xd0] sri v20.2d, v31.2d, #0x25 eor x29, x29, x17, ror #36 eor v31.16b, v24.16b, v27.16b eor x28, x1, x2, ror #61 eor x19, x30, x19, ror #37 shl v4.2d, v31.2d, #0xe eor x29, x29, x20, ror #2 sri v4.2d, v31.2d, #0x32 eor x28, x28, x4, ror #54 eor v31.16b, v21.16b, v25.16b eor x26, x26, x0, ror #55 eor x28, x28, x3, ror #39 shl v24.2d, v31.2d, #0x2 eor x28, x28, x5, ror #25 sri v24.2d, v31.2d, #0x3e ror x0, x0, #0x38 eor x0, x0, x29, ror #63 eor v31.16b, v5.16b, v26.16b eor x27, x28, x27, ror #61 shl v21.2d, v31.2d, #0x24 eor x13, x0, x13, ror #46 sri v21.2d, v31.2d, #0x1c eor x28, x29, x28, ror #63 eor x29, x30, x20, ror #2 eor v31.16b, v6.16b, v25.16b eor x20, x26, x3, ror #39 shl v27.2d, v31.2d, #0x2c eor x11, x0, x11, ror #50 sri v27.2d, v31.2d, #0x14 eor x25, x28, x25, ror #9 eor x3, x28, x21, ror #20 bic v31.16b, v7.16b, v11.16b eor x21, x26, x1 eor v5.16b, v31.16b, v10.16b eor x9, x27, x9, ror #49 eor x24, x28, x24, ror #28 bic v31.16b, v8.16b, v7.16b eor x1, x30, x17, ror #36 eor v6.16b, v31.16b, v11.16b eor x14, x0, x14, ror #8 bic v31.16b, v9.16b, v8.16b eor x22, x28, x22, ror #44 eor x8, x27, x8, ror #56 eor v7.16b, v31.16b, v7.16b eor x17, x27, x7, ror #19 bic v31.16b, v10.16b, v9.16b eor x15, x0, x15, ror #62 bic x7, x20, x22, ror #47 eor v8.16b, v31.16b, v8.16b eor x4, x26, x4, ror #54 bic v31.16b, v11.16b, v10.16b eor x0, x0, x12, ror #3 eor v9.16b, v31.16b, v9.16b eor x28, x28, x23, ror #58 eor x23, x26, x2, ror #61 bic v31.16b, v12.16b, v16.16b eor x26, x26, x5, ror #25 eor v10.16b, v31.16b, v15.16b eor x2, x7, x16, ror #39 bic v31.16b, v13.16b, v12.16b bic x7, x9, x20, ror #42 bic x30, x15, x9, ror #16 eor v11.16b, v31.16b, v16.16b eor x7, x7, x22, ror #25 bic v31.16b, v14.16b, v13.16b eor x12, x30, x20, ror #58 bic x20, x22, x16, ror #56 eor v12.16b, v31.16b, v12.16b eor x30, x27, x6, ror #43 bic v31.16b, v15.16b, v14.16b eor x22, x20, x15, ror #23 eor v13.16b, v31.16b, v13.16b bic x6, x19, x13, ror #42 eor x6, x6, x17, ror #41 bic v31.16b, v16.16b, v15.16b bic x5, x13, x17, ror #63 eor v14.16b, v31.16b, v14.16b eor x5, x21, x5, ror #21 bic v31.16b, v17.16b, v21.16b bic x17, x17, x21, ror #44 eor x27, x27, x10, ror #23 eor v15.16b, v31.16b, v20.16b bic x21, x21, x25, ror #50 bic v31.16b, v18.16b, v17.16b bic x20, x27, x4, ror #25 bic x10, x16, x15, ror #31 eor v16.16b, v31.16b, v21.16b eor x16, x21, x19, ror #43 bic v31.16b, v19.16b, v18.16b eor x21, x17, x25, ror #30 eor v17.16b, v31.16b, v17.16b bic x19, x25, x19, ror #57 ldr x25, [sp, #0x18] bic v31.16b, v20.16b, v19.16b eor x17, x10, x9, ror #47 eor v18.16b, v31.16b, v18.16b ldr x9, [sp, #0x8] eor x15, x20, x28, ror #27 bic v31.16b, v21.16b, v20.16b bic x20, x4, x28, ror #2 eor v19.16b, v31.16b, v19.16b eor x10, x20, x1, ror #50 bic v31.16b, v22.16b, v1.16b bic x20, x11, x27, ror #60 eor x20, x20, x4, ror #21 eor v20.16b, v31.16b, v0.16b bic x4, x28, x1, ror #48 bic v31.16b, v23.16b, v22.16b bic x1, x1, x11, ror #57 eor v21.16b, v31.16b, v1.16b ldr x28, [x9, x25, lsl #3] ldr x9, [sp, #0xd0] bic v31.16b, v24.16b, v23.16b add x25, x25, #0x1 eor v22.16b, v31.16b, v22.16b str x25, [sp, #0x18] cmp x25, #0x17 bic v31.16b, v0.16b, v24.16b eor x25, x1, x27, ror #53 eor v23.16b, v31.16b, v23.16b bic x27, x30, x26, ror #47 bic v31.16b, v1.16b, v0.16b eor x1, x5, x28 eor x5, x4, x11, ror #41 eor v24.16b, v31.16b, v24.16b eor x11, x19, x13, ror #35 bic v31.16b, v2.16b, v27.16b bic x13, x26, x24, ror #10 eor x28, x27, x24, ror #57 eor v0.16b, v31.16b, v30.16b bic x27, x24, x9, ror #47 bic v31.16b, v3.16b, v2.16b bic x19, x23, x3, ror #9 eor v1.16b, v31.16b, v27.16b bic x4, x29, x14, ror #41 eor x24, x19, x29, ror #44 bic v31.16b, v4.16b, v3.16b bic x29, x3, x29, ror #35 eor v2.16b, v31.16b, v2.16b eor x13, x13, x9, ror #57 bic v31.16b, v30.16b, v4.16b eor x19, x29, x14, ror #12 bic x29, x9, x0, ror #19 eor v3.16b, v31.16b, v3.16b bic x14, x14, x8, ror #5 bic v31.16b, v27.16b, v30.16b eor x9, x14, x23, ror #43 eor x14, x4, x8, ror #46 eor v4.16b, v31.16b, v4.16b bic x23, x8, x23, ror #38 eor x8, x27, x0, ror #2 eor x4, x23, x3, ror #47 bic x3, x0, x30, ror #5 eor x23, x3, x26, ror #52 eor x3, x29, x30, ror #24 ldr x30, [sp, #0x10] ld1r { v28.2d }, [x30], #8 str x30, [sp, #0x10] eor v0.16b, v0.16b, v28.16b b.le Lsha3_keccak4_f1600_alt_loop ror x2, x2, #0x3d ror x3, x3, #0x27 ror x4, x4, #0x36 ror x5, x5, #0x19 ror x6, x6, #0x2b ror x7, x7, #0x13 ror x8, x8, #0x38 ror x9, x9, #0x31 ror x10, x10, #0x17 ror x11, x11, #0x32 ror x12, x12, #0x3 ror x13, x13, #0x2e ror x14, x14, #0x8 ror x15, x15, #0x3e ror x17, x17, #0x24 ror x28, x28, #0x3f ror x19, x19, #0x25 ror x20, x20, #0x2 ror x21, x21, #0x14 ror x22, x22, #0x2c ror x23, x23, #0x3a ror x24, x24, #0x1c ror x25, x25, #0x9 ldr x30, [sp, #0x20] cmp x30, #0x1 b.eq Lsha3_keccak4_f1600_alt_done mov x30, #0x1 str x30, [sp, #0x20] ldr x0, [sp] add x0, x0, #0x190 stp x1, x6, [x0] stp x11, x16, [x0, #0x10] stp x21, x2, [x0, #0x20] stp x7, x12, [x0, #0x30] stp x17, x22, [x0, #0x40] stp x3, x8, [x0, #0x50] stp x13, x28, [x0, #0x60] stp x23, x4, [x0, #0x70] stp x9, x14, [x0, #0x80] stp x19, x24, [x0, #0x90] stp x5, x10, [x0, #0xa0] stp x15, x20, [x0, #0xb0] str x25, [x0, #0xc0] sub x0, x0, #0x190 add x0, x0, #0x258 ldp x1, x6, [x0] ldp x11, x16, [x0, #0x10] ldp x21, x2, [x0, #0x20] ldp x7, x12, [x0, #0x30] ldp x17, x22, [x0, #0x40] ldp x3, x8, [x0, #0x50] ldp x13, x28, [x0, #0x60] ldp x23, x4, [x0, #0x70] ldp x9, x14, [x0, #0x80] ldp x19, x24, [x0, #0x90] ldp x5, x10, [x0, #0xa0] ldp x15, x20, [x0, #0xb0] ldr x25, [x0, #0xc0] sub x0, x0, #0x258 b Lsha3_keccak4_f1600_alt_initial Lsha3_keccak4_f1600_alt_done: ldr x0, [sp] add x0, x0, #0x258 stp x1, x6, [x0] stp x11, x16, [x0, #0x10] stp x21, x2, [x0, #0x20] stp x7, x12, [x0, #0x30] stp x17, x22, [x0, #0x40] stp x3, x8, [x0, #0x50] stp x13, x28, [x0, #0x60] stp x23, x4, [x0, #0x70] stp x9, x14, [x0, #0x80] stp x19, x24, [x0, #0x90] stp x5, x10, [x0, #0xa0] stp x15, x20, [x0, #0xb0] str x25, [x0, #0xc0] sub x0, x0, #0x258 add x2, x0, #0xc8 trn1 v25.2d, v0.2d, v1.2d trn1 v26.2d, v2.2d, v3.2d stp q25, q26, [x0] trn2 v25.2d, v0.2d, v1.2d trn2 v26.2d, v2.2d, v3.2d stp q25, q26, [x2] trn1 v25.2d, v4.2d, v5.2d trn1 v26.2d, v6.2d, v7.2d stp q25, q26, [x0, #0x20] trn2 v25.2d, v4.2d, v5.2d trn2 v26.2d, v6.2d, v7.2d stp q25, q26, [x2, #0x20] trn1 v25.2d, v8.2d, v9.2d trn1 v26.2d, v10.2d, v11.2d stp q25, q26, [x0, #0x40] trn2 v25.2d, v8.2d, v9.2d trn2 v26.2d, v10.2d, v11.2d stp q25, q26, [x2, #0x40] trn1 v25.2d, v12.2d, v13.2d trn1 v26.2d, v14.2d, v15.2d stp q25, q26, [x0, #0x60] trn2 v25.2d, v12.2d, v13.2d trn2 v26.2d, v14.2d, v15.2d stp q25, q26, [x2, #0x60] trn1 v25.2d, v16.2d, v17.2d trn1 v26.2d, v18.2d, v19.2d stp q25, q26, [x0, #0x80] trn2 v25.2d, v16.2d, v17.2d trn2 v26.2d, v18.2d, v19.2d stp q25, q26, [x2, #0x80] trn1 v25.2d, v20.2d, v21.2d trn1 v26.2d, v22.2d, v23.2d stp q25, q26, [x0, #0xa0] trn2 v25.2d, v20.2d, v21.2d trn2 v26.2d, v22.2d, v23.2d stp q25, q26, [x2, #0xa0] str d24, [x0, #0xc0] trn2 v24.2d, v24.2d, v24.2d str d24, [x2, #0xc0] CFI_STACKLOAD2(d14,d15,192) CFI_STACKLOAD2(d12,d13,176) CFI_STACKLOAD2(d10,d11,160) CFI_STACKLOAD2(d8,d9,144) CFI_STACKLOAD2(x19,x20,48) CFI_STACKLOAD2(x21,x22,64) CFI_STACKLOAD2(x23,x24,80) CFI_STACKLOAD2(x25,x26,96) CFI_STACKLOAD2(x27,x28,112) CFI_STACKLOAD2(x29,x30,128) CFI_INC_SP(224) CFI_RET S2N_BN_SIZE_DIRECTIVE(sha3_keccak4_f1600_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
31,917
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sha3/sha3_keccak4_f1600.S
// Copyright (c) 2024 The mlkem-native project authors // Copyright (c) 2021-2022 Arm Limited // Copyright (c) 2022 Matthias Kannwischer // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT // ---------------------------------------------------------------------------- // Keccak-f1600 permutation for SHA3, batch of four independent operations // Input a[100], rc[24]; output a[100] // // The input/output argument is in effect four 25-element Keccak arrays // a[0...24], a[25..49], a[50..74] and a[75..99], which could be considered // as type a[25][4]. // // Thinking of each such input/output array as a row-major flattening of a // 5x5 matrix of 64-bit words, this performs the Keccak-f1600 permutation, // all 24 rounds with the distinct round constants rc[i] for each one. For // correct operation, the input pointer rc should point at the standard // round constants as in the specification: // // https://keccak.team/keccak_specs_summary.html#roundConstants // // This operation is at the core of SHA3 and is fully specified here: // // https://keccak.team/files/Keccak-reference-3.0.pdf // // extern void sha3_keccak4_f1600(uint64_t a[static 100], // const uint64_t rc[static 24]); // // Standard ARM ABI: X0 = a, X1 = rc // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" .arch armv8.4-a+sha3 S2N_BN_SYM_VISIBILITY_DIRECTIVE(sha3_keccak4_f1600) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sha3_keccak4_f1600) S2N_BN_SYM_PRIVACY_DIRECTIVE(sha3_keccak4_f1600) .text .balign 4 S2N_BN_SYMBOL(sha3_keccak4_f1600): CFI_START // This is similar to the code in the mlkem-native repository here: // // mlkem/fips202/native/aarch64/src/keccak_f1600_x4_scalar_v84a_asm_hybrid.S // // The main difference is the avoidance of ld2/st2 in favour of explicit // transposition operations and conventional loads and stores. CFI_DEC_SP(224) CFI_STACKSAVE2(x19,x20,0x30) CFI_STACKSAVE2(x21,x22,0x40) CFI_STACKSAVE2(x23,x24,0x50) CFI_STACKSAVE2(x25,x26,0x60) CFI_STACKSAVE2(x27,x28,0x70) CFI_STACKSAVE2(x29,x30,0x80) CFI_STACKSAVE2(d8,d9,0x90) CFI_STACKSAVE2(d10,d11,0xa0) CFI_STACKSAVE2(d12,d13,0xb0) CFI_STACKSAVE2(d14,d15,0xc0) mov x29, x1 mov x30, #0x0 str x30, [sp, #0x20] str x29, [sp, #0x8] str x29, [sp, #0x10] str x0, [sp] add x2, x0, #0xc8 ldp q24, q25, [x0] ldp q26, q27, [x2] trn1 v0.2d, v24.2d, v26.2d trn2 v1.2d, v24.2d, v26.2d trn1 v2.2d, v25.2d, v27.2d trn2 v3.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x20] ldp q26, q27, [x2, #0x20] trn1 v4.2d, v24.2d, v26.2d trn2 v5.2d, v24.2d, v26.2d trn1 v6.2d, v25.2d, v27.2d trn2 v7.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x40] ldp q26, q27, [x2, #0x40] trn1 v8.2d, v24.2d, v26.2d trn2 v9.2d, v24.2d, v26.2d trn1 v10.2d, v25.2d, v27.2d trn2 v11.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x60] ldp q26, q27, [x2, #0x60] trn1 v12.2d, v24.2d, v26.2d trn2 v13.2d, v24.2d, v26.2d trn1 v14.2d, v25.2d, v27.2d trn2 v15.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x80] ldp q26, q27, [x2, #0x80] trn1 v16.2d, v24.2d, v26.2d trn2 v17.2d, v24.2d, v26.2d trn1 v18.2d, v25.2d, v27.2d trn2 v19.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0xa0] ldp q26, q27, [x2, #0xa0] trn1 v20.2d, v24.2d, v26.2d trn2 v21.2d, v24.2d, v26.2d trn1 v22.2d, v25.2d, v27.2d trn2 v23.2d, v25.2d, v27.2d ldr d24, [x0, #0xc0] ldr d25, [x2, #0xc0] trn1 v24.2d, v24.2d, v25.2d add x0, x0, #0x190 ldp x1, x6, [x0] ldp x11, x16, [x0, #0x10] ldp x21, x2, [x0, #0x20] ldp x7, x12, [x0, #0x30] ldp x17, x22, [x0, #0x40] ldp x3, x8, [x0, #0x50] ldp x13, x28, [x0, #0x60] ldp x23, x4, [x0, #0x70] ldp x9, x14, [x0, #0x80] ldp x19, x24, [x0, #0x90] ldp x5, x10, [x0, #0xa0] ldp x15, x20, [x0, #0xb0] ldr x25, [x0, #0xc0] sub x0, x0, #0x190 Lsha3_keccak4_f1600_initial: eor x30, x24, x25 eor x27, x9, x10 eor x0, x30, x21 eor x26, x27, x6 eor x27, x26, x7 eor x29, x0, x22 eor x26, x29, x23 eor x29, x4, x5 eor x30, x29, x1 eor x0, x27, x8 eor x29, x30, x2 eor x30, x19, x20 eor x30, x30, x16 eor x27, x26, x0, ror #63 eor x4, x4, x27 eor x30, x30, x17 eor x30, x30, x28 eor x29, x29, x3 eor x0, x0, x30, ror #63 eor x30, x30, x29, ror #63 eor x22, x22, x30 eor x23, x23, x30 str x23, [sp, #0xd0] eor x23, x14, x15 eor x14, x14, x0 eor x23, x23, x11 eor x15, x15, x0 eor x1, x1, x27 eor x23, x23, x12 eor x23, x23, x13 eor x11, x11, x0 eor x29, x29, x23, ror #63 eor x23, x23, x26, ror #63 eor x26, x13, x0 eor x13, x28, x23 eor x28, x24, x30 eor x24, x16, x23 eor x16, x21, x30 eor x21, x25, x30 eor x30, x19, x23 eor x19, x20, x23 eor x20, x17, x23 eor x17, x12, x0 eor x0, x2, x27 eor x2, x6, x29 eor x6, x8, x29 bic x8, x28, x13, ror #47 eor x12, x3, x27 bic x3, x13, x17, ror #19 eor x5, x5, x27 ldr x27, [sp, #0xd0] bic x25, x17, x2, ror #5 eor x9, x9, x29 eor x23, x25, x5, ror #52 eor x3, x3, x2, ror #24 eor x8, x8, x17, ror #2 eor x17, x10, x29 bic x25, x12, x22, ror #47 eor x29, x7, x29 bic x10, x4, x27, ror #2 bic x7, x5, x28, ror #10 eor x10, x10, x20, ror #50 eor x13, x7, x13, ror #57 bic x7, x2, x5, ror #47 eor x2, x25, x24, ror #39 bic x25, x20, x11, ror #57 bic x5, x17, x4, ror #25 eor x25, x25, x17, ror #53 bic x17, x11, x17, ror #60 eor x28, x7, x28, ror #57 bic x7, x9, x12, ror #42 eor x7, x7, x22, ror #25 bic x22, x22, x24, ror #56 bic x24, x24, x15, ror #31 eor x22, x22, x15, ror #23 bic x20, x27, x20, ror #48 bic x15, x15, x9, ror #16 eor x12, x15, x12, ror #58 eor x15, x5, x27, ror #27 eor x5, x20, x11, ror #41 ldr x11, [sp, #0x8] eor x20, x17, x4, ror #21 eor x17, x24, x9, ror #47 mov x24, #0x1 bic x9, x0, x16, ror #9 str x24, [sp, #0x18] bic x24, x29, x1, ror #44 bic x27, x1, x21, ror #50 bic x4, x26, x29, ror #63 eor x1, x1, x4, ror #21 ldr x11, [x11] bic x4, x21, x30, ror #57 eor x21, x24, x21, ror #30 eor x24, x9, x19, ror #44 bic x9, x14, x6, ror #5 eor x9, x9, x0, ror #43 bic x0, x6, x0, ror #38 eor x1, x1, x11 eor x11, x4, x26, ror #35 eor x4, x0, x16, ror #47 bic x0, x16, x19, ror #35 eor x16, x27, x30, ror #43 bic x27, x30, x26, ror #42 bic x26, x19, x14, ror #41 eor x19, x0, x14, ror #12 eor x14, x26, x6, ror #46 eor x6, x27, x29, ror #41 eor x0, x15, x11, ror #52 eor x0, x0, x13, ror #48 eor x26, x8, x9, ror #57 eor x27, x0, x14, ror #10 eor x29, x16, x28, ror #63 eor x26, x26, x6, ror #51 eor x30, x23, x22, ror #50 eor x0, x26, x10, ror #31 eor x29, x29, x19, ror #37 eor x27, x27, x12, ror #5 eor x30, x30, x24, ror #34 eor x0, x0, x7, ror #27 eor x26, x30, x21, ror #26 eor x26, x26, x25, ror #15 ror x30, x27, #0x3e eor x30, x30, x26, ror #57 ror x26, x26, #0x3a eor x16, x30, x16 eor x28, x30, x28, ror #63 str x28, [sp, #0xd0] eor x29, x29, x17, ror #36 eor x28, x1, x2, ror #61 eor x19, x30, x19, ror #37 eor x29, x29, x20, ror #2 eor x28, x28, x4, ror #54 eor x26, x26, x0, ror #55 eor x28, x28, x3, ror #39 eor x28, x28, x5, ror #25 ror x0, x0, #0x38 eor x0, x0, x29, ror #63 eor x27, x28, x27, ror #61 eor x13, x0, x13, ror #46 eor x28, x29, x28, ror #63 eor x29, x30, x20, ror #2 eor x20, x26, x3, ror #39 eor x11, x0, x11, ror #50 eor x25, x28, x25, ror #9 eor x3, x28, x21, ror #20 eor x21, x26, x1 eor x9, x27, x9, ror #49 eor x24, x28, x24, ror #28 eor x1, x30, x17, ror #36 eor x14, x0, x14, ror #8 eor x22, x28, x22, ror #44 eor x8, x27, x8, ror #56 eor x17, x27, x7, ror #19 eor x15, x0, x15, ror #62 bic x7, x20, x22, ror #47 eor x4, x26, x4, ror #54 eor x0, x0, x12, ror #3 eor x28, x28, x23, ror #58 eor x23, x26, x2, ror #61 eor x26, x26, x5, ror #25 eor x2, x7, x16, ror #39 bic x7, x9, x20, ror #42 bic x30, x15, x9, ror #16 eor x7, x7, x22, ror #25 eor x12, x30, x20, ror #58 bic x20, x22, x16, ror #56 eor x30, x27, x6, ror #43 eor x22, x20, x15, ror #23 bic x6, x19, x13, ror #42 eor x6, x6, x17, ror #41 bic x5, x13, x17, ror #63 eor x5, x21, x5, ror #21 bic x17, x17, x21, ror #44 eor x27, x27, x10, ror #23 bic x21, x21, x25, ror #50 bic x20, x27, x4, ror #25 bic x10, x16, x15, ror #31 eor x16, x21, x19, ror #43 eor x21, x17, x25, ror #30 bic x19, x25, x19, ror #57 ldr x25, [sp, #0x18] eor x17, x10, x9, ror #47 ldr x9, [sp, #0x8] eor x15, x20, x28, ror #27 bic x20, x4, x28, ror #2 eor x10, x20, x1, ror #50 bic x20, x11, x27, ror #60 eor x20, x20, x4, ror #21 bic x4, x28, x1, ror #48 bic x1, x1, x11, ror #57 ldr x28, [x9, x25, lsl #3] ldr x9, [sp, #0xd0] add x25, x25, #0x1 str x25, [sp, #0x18] cmp x25, #0x17 eor x25, x1, x27, ror #53 bic x27, x30, x26, ror #47 eor x1, x5, x28 eor x5, x4, x11, ror #41 eor x11, x19, x13, ror #35 bic x13, x26, x24, ror #10 eor x28, x27, x24, ror #57 bic x27, x24, x9, ror #47 bic x19, x23, x3, ror #9 bic x4, x29, x14, ror #41 eor x24, x19, x29, ror #44 bic x29, x3, x29, ror #35 eor x13, x13, x9, ror #57 eor x19, x29, x14, ror #12 bic x29, x9, x0, ror #19 bic x14, x14, x8, ror #5 eor x9, x14, x23, ror #43 eor x14, x4, x8, ror #46 bic x23, x8, x23, ror #38 eor x8, x27, x0, ror #2 eor x4, x23, x3, ror #47 bic x3, x0, x30, ror #5 eor x23, x3, x26, ror #52 eor x3, x29, x30, ror #24 eor3 v30.16b, v0.16b, v5.16b, v10.16b eor3 v30.16b, v30.16b, v15.16b, v20.16b eor3 v29.16b, v1.16b, v6.16b, v11.16b eor3 v29.16b, v29.16b, v16.16b, v21.16b eor3 v28.16b, v2.16b, v7.16b, v12.16b eor3 v28.16b, v28.16b, v17.16b, v22.16b eor3 v27.16b, v3.16b, v8.16b, v13.16b eor3 v27.16b, v27.16b, v18.16b, v23.16b eor3 v26.16b, v4.16b, v9.16b, v14.16b eor3 v26.16b, v26.16b, v19.16b, v24.16b rax1 v25.2d, v30.2d, v28.2d rax1 v28.2d, v28.2d, v26.2d rax1 v26.2d, v26.2d, v29.2d rax1 v29.2d, v29.2d, v27.2d rax1 v27.2d, v27.2d, v30.2d eor v30.16b, v0.16b, v26.16b xar v0.2d, v2.2d, v29.2d, #0x2 xar v2.2d, v12.2d, v29.2d, #0x15 xar v12.2d, v13.2d, v28.2d, #0x27 xar v13.2d, v19.2d, v27.2d, #0x38 xar v19.2d, v23.2d, v28.2d, #0x8 xar v23.2d, v15.2d, v26.2d, #0x17 xar v15.2d, v1.2d, v25.2d, #0x3f xar v1.2d, v8.2d, v28.2d, #0x9 xar v8.2d, v16.2d, v25.2d, #0x13 xar v16.2d, v7.2d, v29.2d, #0x3a xar v7.2d, v10.2d, v26.2d, #0x3d xar v10.2d, v3.2d, v28.2d, #0x24 xar v3.2d, v18.2d, v28.2d, #0x2b xar v18.2d, v17.2d, v29.2d, #0x31 xar v17.2d, v11.2d, v25.2d, #0x36 xar v11.2d, v9.2d, v27.2d, #0x2c xar v9.2d, v22.2d, v29.2d, #0x3 xar v22.2d, v14.2d, v27.2d, #0x19 xar v14.2d, v20.2d, v26.2d, #0x2e xar v20.2d, v4.2d, v27.2d, #0x25 xar v4.2d, v24.2d, v27.2d, #0x32 xar v24.2d, v21.2d, v25.2d, #0x3e xar v21.2d, v5.2d, v26.2d, #0x1c xar v27.2d, v6.2d, v25.2d, #0x14 ldr x30, [sp, #0x10] ld1r { v28.2d }, [x30], #8 str x30, [sp, #0x10] bcax v5.16b, v10.16b, v7.16b, v11.16b bcax v6.16b, v11.16b, v8.16b, v7.16b bcax v7.16b, v7.16b, v9.16b, v8.16b bcax v8.16b, v8.16b, v10.16b, v9.16b bcax v9.16b, v9.16b, v11.16b, v10.16b bcax v10.16b, v15.16b, v12.16b, v16.16b bcax v11.16b, v16.16b, v13.16b, v12.16b bcax v12.16b, v12.16b, v14.16b, v13.16b bcax v13.16b, v13.16b, v15.16b, v14.16b bcax v14.16b, v14.16b, v16.16b, v15.16b bcax v15.16b, v20.16b, v17.16b, v21.16b bcax v16.16b, v21.16b, v18.16b, v17.16b bcax v17.16b, v17.16b, v19.16b, v18.16b bcax v18.16b, v18.16b, v20.16b, v19.16b bcax v19.16b, v19.16b, v21.16b, v20.16b bcax v20.16b, v0.16b, v22.16b, v1.16b bcax v21.16b, v1.16b, v23.16b, v22.16b bcax v22.16b, v22.16b, v24.16b, v23.16b bcax v23.16b, v23.16b, v0.16b, v24.16b bcax v24.16b, v24.16b, v1.16b, v0.16b bcax v0.16b, v30.16b, v2.16b, v27.16b bcax v1.16b, v27.16b, v3.16b, v2.16b bcax v2.16b, v2.16b, v4.16b, v3.16b bcax v3.16b, v3.16b, v30.16b, v4.16b bcax v4.16b, v4.16b, v27.16b, v30.16b eor v0.16b, v0.16b, v28.16b Lsha3_keccak4_f1600_loop: eor x0, x15, x11, ror #52 eor x0, x0, x13, ror #48 eor x26, x8, x9, ror #57 eor x27, x0, x14, ror #10 eor3 v30.16b, v0.16b, v5.16b, v10.16b eor3 v30.16b, v30.16b, v15.16b, v20.16b eor x29, x16, x28, ror #63 eor x26, x26, x6, ror #51 eor x30, x23, x22, ror #50 eor3 v29.16b, v1.16b, v6.16b, v11.16b eor x0, x26, x10, ror #31 eor x29, x29, x19, ror #37 eor x27, x27, x12, ror #5 eor3 v29.16b, v29.16b, v16.16b, v21.16b eor x30, x30, x24, ror #34 eor x0, x0, x7, ror #27 eor x26, x30, x21, ror #26 eor3 v28.16b, v2.16b, v7.16b, v12.16b eor x26, x26, x25, ror #15 ror x30, x27, #0x3e eor x30, x30, x26, ror #57 ror x26, x26, #0x3a eor3 v28.16b, v28.16b, v17.16b, v22.16b eor x16, x30, x16 eor x28, x30, x28, ror #63 str x28, [sp, #0xd0] eor3 v27.16b, v3.16b, v8.16b, v13.16b eor x29, x29, x17, ror #36 eor x28, x1, x2, ror #61 eor x19, x30, x19, ror #37 eor3 v27.16b, v27.16b, v18.16b, v23.16b eor x29, x29, x20, ror #2 eor x28, x28, x4, ror #54 eor x26, x26, x0, ror #55 eor3 v26.16b, v4.16b, v9.16b, v14.16b eor x28, x28, x3, ror #39 eor x28, x28, x5, ror #25 ror x0, x0, #0x38 eor x0, x0, x29, ror #63 eor3 v26.16b, v26.16b, v19.16b, v24.16b eor x27, x28, x27, ror #61 eor x13, x0, x13, ror #46 eor x28, x29, x28, ror #63 rax1 v25.2d, v30.2d, v28.2d eor x29, x30, x20, ror #2 eor x20, x26, x3, ror #39 eor x11, x0, x11, ror #50 rax1 v28.2d, v28.2d, v26.2d eor x25, x28, x25, ror #9 eor x3, x28, x21, ror #20 eor x21, x26, x1 rax1 v26.2d, v26.2d, v29.2d eor x9, x27, x9, ror #49 eor x24, x28, x24, ror #28 eor x1, x30, x17, ror #36 eor x14, x0, x14, ror #8 rax1 v29.2d, v29.2d, v27.2d eor x22, x28, x22, ror #44 eor x8, x27, x8, ror #56 eor x17, x27, x7, ror #19 rax1 v27.2d, v27.2d, v30.2d eor x15, x0, x15, ror #62 bic x7, x20, x22, ror #47 eor x4, x26, x4, ror #54 eor v30.16b, v0.16b, v26.16b eor x0, x0, x12, ror #3 eor x28, x28, x23, ror #58 eor x23, x26, x2, ror #61 xar v0.2d, v2.2d, v29.2d, #0x2 eor x26, x26, x5, ror #25 eor x2, x7, x16, ror #39 bic x7, x9, x20, ror #42 bic x30, x15, x9, ror #16 xar v2.2d, v12.2d, v29.2d, #0x15 eor x7, x7, x22, ror #25 eor x12, x30, x20, ror #58 bic x20, x22, x16, ror #56 xar v12.2d, v13.2d, v28.2d, #0x27 eor x30, x27, x6, ror #43 eor x22, x20, x15, ror #23 bic x6, x19, x13, ror #42 xar v13.2d, v19.2d, v27.2d, #0x38 eor x6, x6, x17, ror #41 bic x5, x13, x17, ror #63 eor x5, x21, x5, ror #21 xar v19.2d, v23.2d, v28.2d, #0x8 bic x17, x17, x21, ror #44 eor x27, x27, x10, ror #23 bic x21, x21, x25, ror #50 bic x20, x27, x4, ror #25 xar v23.2d, v15.2d, v26.2d, #0x17 bic x10, x16, x15, ror #31 eor x16, x21, x19, ror #43 eor x21, x17, x25, ror #30 xar v15.2d, v1.2d, v25.2d, #0x3f bic x19, x25, x19, ror #57 ldr x25, [sp, #0x18] eor x17, x10, x9, ror #47 xar v1.2d, v8.2d, v28.2d, #0x9 ldr x9, [sp, #0x8] eor x15, x20, x28, ror #27 bic x20, x4, x28, ror #2 xar v8.2d, v16.2d, v25.2d, #0x13 eor x10, x20, x1, ror #50 bic x20, x11, x27, ror #60 eor x20, x20, x4, ror #21 bic x4, x28, x1, ror #48 xar v16.2d, v7.2d, v29.2d, #0x3a bic x1, x1, x11, ror #57 ldr x28, [x9, x25, lsl #3] ldr x9, [sp, #0xd0] xar v7.2d, v10.2d, v26.2d, #0x3d add x25, x25, #0x1 str x25, [sp, #0x18] cmp x25, #0x17 xar v10.2d, v3.2d, v28.2d, #0x24 eor x25, x1, x27, ror #53 bic x27, x30, x26, ror #47 eor x1, x5, x28 xar v3.2d, v18.2d, v28.2d, #0x2b eor x5, x4, x11, ror #41 eor x11, x19, x13, ror #35 bic x13, x26, x24, ror #10 eor x28, x27, x24, ror #57 xar v18.2d, v17.2d, v29.2d, #0x31 bic x27, x24, x9, ror #47 bic x19, x23, x3, ror #9 bic x4, x29, x14, ror #41 xar v17.2d, v11.2d, v25.2d, #0x36 eor x24, x19, x29, ror #44 bic x29, x3, x29, ror #35 eor x13, x13, x9, ror #57 xar v11.2d, v9.2d, v27.2d, #0x2c eor x19, x29, x14, ror #12 bic x29, x9, x0, ror #19 bic x14, x14, x8, ror #5 xar v9.2d, v22.2d, v29.2d, #0x3 eor x9, x14, x23, ror #43 eor x14, x4, x8, ror #46 bic x23, x8, x23, ror #38 eor x8, x27, x0, ror #2 xar v22.2d, v14.2d, v27.2d, #0x19 eor x4, x23, x3, ror #47 bic x3, x0, x30, ror #5 eor x23, x3, x26, ror #52 xar v14.2d, v20.2d, v26.2d, #0x2e eor x3, x29, x30, ror #24 eor x0, x15, x11, ror #52 eor x0, x0, x13, ror #48 xar v20.2d, v4.2d, v27.2d, #0x25 eor x26, x8, x9, ror #57 eor x27, x0, x14, ror #10 eor x29, x16, x28, ror #63 xar v4.2d, v24.2d, v27.2d, #0x32 eor x26, x26, x6, ror #51 eor x30, x23, x22, ror #50 eor x0, x26, x10, ror #31 eor x29, x29, x19, ror #37 xar v24.2d, v21.2d, v25.2d, #0x3e eor x27, x27, x12, ror #5 eor x30, x30, x24, ror #34 eor x0, x0, x7, ror #27 xar v21.2d, v5.2d, v26.2d, #0x1c eor x26, x30, x21, ror #26 eor x26, x26, x25, ror #15 ror x30, x27, #0x3e xar v27.2d, v6.2d, v25.2d, #0x14 eor x30, x30, x26, ror #57 ror x26, x26, #0x3a eor x16, x30, x16 bcax v5.16b, v10.16b, v7.16b, v11.16b eor x28, x30, x28, ror #63 str x28, [sp, #0xd0] eor x29, x29, x17, ror #36 eor x28, x1, x2, ror #61 bcax v6.16b, v11.16b, v8.16b, v7.16b eor x19, x30, x19, ror #37 eor x29, x29, x20, ror #2 eor x28, x28, x4, ror #54 bcax v7.16b, v7.16b, v9.16b, v8.16b eor x26, x26, x0, ror #55 eor x28, x28, x3, ror #39 eor x28, x28, x5, ror #25 bcax v8.16b, v8.16b, v10.16b, v9.16b ror x0, x0, #0x38 eor x0, x0, x29, ror #63 eor x27, x28, x27, ror #61 bcax v9.16b, v9.16b, v11.16b, v10.16b eor x13, x0, x13, ror #46 eor x28, x29, x28, ror #63 eor x29, x30, x20, ror #2 eor x20, x26, x3, ror #39 bcax v10.16b, v15.16b, v12.16b, v16.16b eor x11, x0, x11, ror #50 eor x25, x28, x25, ror #9 eor x3, x28, x21, ror #20 bcax v11.16b, v16.16b, v13.16b, v12.16b eor x21, x26, x1 eor x9, x27, x9, ror #49 eor x24, x28, x24, ror #28 bcax v12.16b, v12.16b, v14.16b, v13.16b eor x1, x30, x17, ror #36 eor x14, x0, x14, ror #8 eor x22, x28, x22, ror #44 bcax v13.16b, v13.16b, v15.16b, v14.16b eor x8, x27, x8, ror #56 eor x17, x27, x7, ror #19 eor x15, x0, x15, ror #62 bic x7, x20, x22, ror #47 bcax v14.16b, v14.16b, v16.16b, v15.16b eor x4, x26, x4, ror #54 eor x0, x0, x12, ror #3 eor x28, x28, x23, ror #58 bcax v15.16b, v20.16b, v17.16b, v21.16b eor x23, x26, x2, ror #61 eor x26, x26, x5, ror #25 eor x2, x7, x16, ror #39 bcax v16.16b, v21.16b, v18.16b, v17.16b bic x7, x9, x20, ror #42 bic x30, x15, x9, ror #16 eor x7, x7, x22, ror #25 bcax v17.16b, v17.16b, v19.16b, v18.16b eor x12, x30, x20, ror #58 bic x20, x22, x16, ror #56 eor x30, x27, x6, ror #43 eor x22, x20, x15, ror #23 bcax v18.16b, v18.16b, v20.16b, v19.16b bic x6, x19, x13, ror #42 eor x6, x6, x17, ror #41 bic x5, x13, x17, ror #63 bcax v19.16b, v19.16b, v21.16b, v20.16b eor x5, x21, x5, ror #21 bic x17, x17, x21, ror #44 eor x27, x27, x10, ror #23 bcax v20.16b, v0.16b, v22.16b, v1.16b bic x21, x21, x25, ror #50 bic x20, x27, x4, ror #25 bic x10, x16, x15, ror #31 bcax v21.16b, v1.16b, v23.16b, v22.16b eor x16, x21, x19, ror #43 eor x21, x17, x25, ror #30 bic x19, x25, x19, ror #57 ldr x25, [sp, #0x18] bcax v22.16b, v22.16b, v24.16b, v23.16b eor x17, x10, x9, ror #47 ldr x9, [sp, #0x8] eor x15, x20, x28, ror #27 bcax v23.16b, v23.16b, v0.16b, v24.16b bic x20, x4, x28, ror #2 eor x10, x20, x1, ror #50 bic x20, x11, x27, ror #60 bcax v24.16b, v24.16b, v1.16b, v0.16b eor x20, x20, x4, ror #21 bic x4, x28, x1, ror #48 bic x1, x1, x11, ror #57 bcax v0.16b, v30.16b, v2.16b, v27.16b ldr x28, [x9, x25, lsl #3] ldr x9, [sp, #0xd0] add x25, x25, #0x1 str x25, [sp, #0x18] bcax v1.16b, v27.16b, v3.16b, v2.16b cmp x25, #0x17 eor x25, x1, x27, ror #53 bic x27, x30, x26, ror #47 bcax v2.16b, v2.16b, v4.16b, v3.16b eor x1, x5, x28 eor x5, x4, x11, ror #41 eor x11, x19, x13, ror #35 bcax v3.16b, v3.16b, v30.16b, v4.16b bic x13, x26, x24, ror #10 eor x28, x27, x24, ror #57 bic x27, x24, x9, ror #47 bcax v4.16b, v4.16b, v27.16b, v30.16b bic x19, x23, x3, ror #9 bic x4, x29, x14, ror #41 eor x24, x19, x29, ror #44 bic x29, x3, x29, ror #35 eor x13, x13, x9, ror #57 eor x19, x29, x14, ror #12 bic x29, x9, x0, ror #19 bic x14, x14, x8, ror #5 eor x9, x14, x23, ror #43 eor x14, x4, x8, ror #46 bic x23, x8, x23, ror #38 eor x8, x27, x0, ror #2 eor x4, x23, x3, ror #47 bic x3, x0, x30, ror #5 eor x23, x3, x26, ror #52 eor x3, x29, x30, ror #24 ldr x30, [sp, #0x10] ld1r { v28.2d }, [x30], #8 str x30, [sp, #0x10] eor v0.16b, v0.16b, v28.16b ble Lsha3_keccak4_f1600_loop ror x2, x2, #0x3d ror x3, x3, #0x27 ror x4, x4, #0x36 ror x5, x5, #0x19 ror x6, x6, #0x2b ror x7, x7, #0x13 ror x8, x8, #0x38 ror x9, x9, #0x31 ror x10, x10, #0x17 ror x11, x11, #0x32 ror x12, x12, #0x3 ror x13, x13, #0x2e ror x14, x14, #0x8 ror x15, x15, #0x3e ror x17, x17, #0x24 ror x28, x28, #0x3f ror x19, x19, #0x25 ror x20, x20, #0x2 ror x21, x21, #0x14 ror x22, x22, #0x2c ror x23, x23, #0x3a ror x24, x24, #0x1c ror x25, x25, #0x9 ldr x30, [sp, #0x20] cmp x30, #0x1 beq Lsha3_keccak4_f1600_done mov x30, #0x1 str x30, [sp, #0x20] ldr x0, [sp] add x0, x0, #0x190 stp x1, x6, [x0] stp x11, x16, [x0, #0x10] stp x21, x2, [x0, #0x20] stp x7, x12, [x0, #0x30] stp x17, x22, [x0, #0x40] stp x3, x8, [x0, #0x50] stp x13, x28, [x0, #0x60] stp x23, x4, [x0, #0x70] stp x9, x14, [x0, #0x80] stp x19, x24, [x0, #0x90] stp x5, x10, [x0, #0xa0] stp x15, x20, [x0, #0xb0] str x25, [x0, #0xc0] sub x0, x0, #0x190 add x0, x0, #0x258 ldp x1, x6, [x0] ldp x11, x16, [x0, #0x10] ldp x21, x2, [x0, #0x20] ldp x7, x12, [x0, #0x30] ldp x17, x22, [x0, #0x40] ldp x3, x8, [x0, #0x50] ldp x13, x28, [x0, #0x60] ldp x23, x4, [x0, #0x70] ldp x9, x14, [x0, #0x80] ldp x19, x24, [x0, #0x90] ldp x5, x10, [x0, #0xa0] ldp x15, x20, [x0, #0xb0] ldr x25, [x0, #0xc0] sub x0, x0, #0x258 b Lsha3_keccak4_f1600_initial Lsha3_keccak4_f1600_done: ldr x0, [sp] add x0, x0, #0x258 stp x1, x6, [x0] stp x11, x16, [x0, #0x10] stp x21, x2, [x0, #0x20] stp x7, x12, [x0, #0x30] stp x17, x22, [x0, #0x40] stp x3, x8, [x0, #0x50] stp x13, x28, [x0, #0x60] stp x23, x4, [x0, #0x70] stp x9, x14, [x0, #0x80] stp x19, x24, [x0, #0x90] stp x5, x10, [x0, #0xa0] stp x15, x20, [x0, #0xb0] str x25, [x0, #0xc0] sub x0, x0, #0x258 add x2, x0, #0xc8 trn1 v25.2d, v0.2d, v1.2d trn1 v26.2d, v2.2d, v3.2d stp q25, q26, [x0] trn2 v25.2d, v0.2d, v1.2d trn2 v26.2d, v2.2d, v3.2d stp q25, q26, [x2] trn1 v25.2d, v4.2d, v5.2d trn1 v26.2d, v6.2d, v7.2d stp q25, q26, [x0, #0x20] trn2 v25.2d, v4.2d, v5.2d trn2 v26.2d, v6.2d, v7.2d stp q25, q26, [x2, #0x20] trn1 v25.2d, v8.2d, v9.2d trn1 v26.2d, v10.2d, v11.2d stp q25, q26, [x0, #0x40] trn2 v25.2d, v8.2d, v9.2d trn2 v26.2d, v10.2d, v11.2d stp q25, q26, [x2, #0x40] trn1 v25.2d, v12.2d, v13.2d trn1 v26.2d, v14.2d, v15.2d stp q25, q26, [x0, #0x60] trn2 v25.2d, v12.2d, v13.2d trn2 v26.2d, v14.2d, v15.2d stp q25, q26, [x2, #0x60] trn1 v25.2d, v16.2d, v17.2d trn1 v26.2d, v18.2d, v19.2d stp q25, q26, [x0, #0x80] trn2 v25.2d, v16.2d, v17.2d trn2 v26.2d, v18.2d, v19.2d stp q25, q26, [x2, #0x80] trn1 v25.2d, v20.2d, v21.2d trn1 v26.2d, v22.2d, v23.2d stp q25, q26, [x0, #0xa0] trn2 v25.2d, v20.2d, v21.2d trn2 v26.2d, v22.2d, v23.2d stp q25, q26, [x2, #0xa0] str d24, [x0, #0xc0] trn2 v24.2d, v24.2d, v24.2d str d24, [x2, #0xc0] CFI_STACKLOAD2(d14,d15,0xc0) CFI_STACKLOAD2(d12,d13,0xb0) CFI_STACKLOAD2(d10,d11,0xa0) CFI_STACKLOAD2(d8,d9,0x90) CFI_STACKLOAD2(x19,x20,0x30) CFI_STACKLOAD2(x21,x22,0x40) CFI_STACKLOAD2(x23,x24,0x50) CFI_STACKLOAD2(x25,x26,0x60) CFI_STACKLOAD2(x27,x28,0x70) CFI_STACKLOAD2(x29,x30,0x80) CFI_INC_SP(224) CFI_RET S2N_BN_SIZE_DIRECTIVE(sha3_keccak4_f1600) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
6,766
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sha3/sha3_keccak_f1600_alt.S
// Copyright (c) 2024 The mlkem-native project authors // Copyright (c) 2021-2022 Arm Limited // Copyright (c) 2022 Matthias Kannwischer // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT // ---------------------------------------------------------------------------- // Keccak-f1600 permutation for SHA3 // Input a[25], rc[24]; output a[25] // // Thinking of the input/output array a as a row-major flattening of a // 5x5 matrix of 64-bit words, this performs the Keccak-f1600 permutation, // all 24 rounds with the distinct round constants rc[i] for each one. For // correct operation, the input pointer rc should point at the standard // round constants as in the specification: // // https://keccak.team/keccak_specs_summary.html#roundConstants // // This operation is at the core of SHA3 and is fully specified here: // // https://keccak.team/files/Keccak-reference-3.0.pdf // // extern void sha3_keccak_f1600_alt(uint64_t a[static 25], // const uint64_t rc[static 24]); // // Standard ARM ABI: X0 = a, X1 = rc // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" .arch armv8.4-a+sha3 S2N_BN_SYM_VISIBILITY_DIRECTIVE(sha3_keccak_f1600_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sha3_keccak_f1600_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(sha3_keccak_f1600_alt) .text .balign 4 S2N_BN_SYMBOL(sha3_keccak_f1600_alt): CFI_START // This is very similar to the vector code in the mlkem-native // repository here: // // https://github.com/pq-code-package/mlkem-native/blob/main/mlkem/fips202/native/aarch64/src/keccak_f1600_x1_v84a_asm.S // // The main difference is the use of ldp/stp dx,dy in place of // ld2, the variant used being currently unsupported by the // s2n-bignum formal model. CFI_DEC_SP(64) CFI_STACKSAVE2X(d8,d9,0,8) CFI_STACKSAVE2X(d10,d11,16,24) CFI_STACKSAVE2X(d12,d13,32,40) CFI_STACKSAVE2X(d14,d15,48,56) // Load the Keccak initial state into registers Q0..Q24 ldp d0, d1, [x0] ldp d2, d3, [x0, #0x10] ldp d4, d5, [x0, #0x20] ldp d6, d7, [x0, #0x30] ldp d8, d9, [x0, #0x40] ldp d10, d11, [x0, #0x50] ldp d12, d13, [x0, #0x60] ldp d14, d15, [x0, #0x70] ldp d16, d17, [x0, #0x80] ldp d18, d19, [x0, #0x90] ldp d20, d21, [x0, #0xa0] ldp d22, d23, [x0, #0xb0] ldr d24, [x0, #0xc0] // Now 24 rounds of the iteration mov x2, #24 Lsha3_keccak_f1600_alt_loop: eor3 v30.16b, v0.16b, v5.16b, v10.16b eor3 v29.16b, v1.16b, v6.16b, v11.16b eor3 v28.16b, v2.16b, v7.16b, v12.16b eor3 v27.16b, v3.16b, v8.16b, v13.16b eor3 v26.16b, v4.16b, v9.16b, v14.16b eor3 v30.16b, v30.16b, v15.16b, v20.16b eor3 v29.16b, v29.16b, v16.16b, v21.16b eor3 v28.16b, v28.16b, v17.16b, v22.16b eor3 v27.16b, v27.16b, v18.16b, v23.16b eor3 v26.16b, v26.16b, v19.16b, v24.16b rax1 v25.2d, v30.2d, v28.2d rax1 v28.2d, v28.2d, v26.2d rax1 v26.2d, v26.2d, v29.2d rax1 v29.2d, v29.2d, v27.2d rax1 v27.2d, v27.2d, v30.2d eor v30.16b, v0.16b, v26.16b xar v0.2d, v2.2d, v29.2d, #0x2 xar v2.2d, v12.2d, v29.2d, #0x15 xar v12.2d, v13.2d, v28.2d, #0x27 xar v13.2d, v19.2d, v27.2d, #0x38 xar v19.2d, v23.2d, v28.2d, #0x8 xar v23.2d, v15.2d, v26.2d, #0x17 xar v15.2d, v1.2d, v25.2d, #0x3f xar v1.2d, v8.2d, v28.2d, #0x9 xar v8.2d, v16.2d, v25.2d, #0x13 xar v16.2d, v7.2d, v29.2d, #0x3a xar v7.2d, v10.2d, v26.2d, #0x3d xar v10.2d, v3.2d, v28.2d, #0x24 xar v3.2d, v18.2d, v28.2d, #0x2b xar v18.2d, v17.2d, v29.2d, #0x31 xar v17.2d, v11.2d, v25.2d, #0x36 xar v11.2d, v9.2d, v27.2d, #0x2c xar v9.2d, v22.2d, v29.2d, #0x3 xar v22.2d, v14.2d, v27.2d, #0x19 xar v14.2d, v20.2d, v26.2d, #0x2e xar v20.2d, v4.2d, v27.2d, #0x25 xar v4.2d, v24.2d, v27.2d, #0x32 xar v24.2d, v21.2d, v25.2d, #0x3e xar v21.2d, v5.2d, v26.2d, #0x1c xar v27.2d, v6.2d, v25.2d, #0x14 ld1r { v31.2d }, [x1], #8 bcax v5.16b, v10.16b, v7.16b, v11.16b bcax v6.16b, v11.16b, v8.16b, v7.16b bcax v7.16b, v7.16b, v9.16b, v8.16b bcax v8.16b, v8.16b, v10.16b, v9.16b bcax v9.16b, v9.16b, v11.16b, v10.16b bcax v10.16b, v15.16b, v12.16b, v16.16b bcax v11.16b, v16.16b, v13.16b, v12.16b bcax v12.16b, v12.16b, v14.16b, v13.16b bcax v13.16b, v13.16b, v15.16b, v14.16b bcax v14.16b, v14.16b, v16.16b, v15.16b bcax v15.16b, v20.16b, v17.16b, v21.16b bcax v16.16b, v21.16b, v18.16b, v17.16b bcax v17.16b, v17.16b, v19.16b, v18.16b bcax v18.16b, v18.16b, v20.16b, v19.16b bcax v19.16b, v19.16b, v21.16b, v20.16b bcax v20.16b, v0.16b, v22.16b, v1.16b bcax v21.16b, v1.16b, v23.16b, v22.16b bcax v22.16b, v22.16b, v24.16b, v23.16b bcax v23.16b, v23.16b, v0.16b, v24.16b bcax v24.16b, v24.16b, v1.16b, v0.16b bcax v0.16b, v30.16b, v2.16b, v27.16b bcax v1.16b, v27.16b, v3.16b, v2.16b bcax v2.16b, v2.16b, v4.16b, v3.16b bcax v3.16b, v3.16b, v30.16b, v4.16b bcax v4.16b, v4.16b, v27.16b, v30.16b eor v0.16b, v0.16b, v31.16b sub x2, x2, #0x1 cbnz x2, Lsha3_keccak_f1600_alt_loop // Store back the state stp d0, d1, [x0] stp d2, d3, [x0, #0x10] stp d4, d5, [x0, #0x20] stp d6, d7, [x0, #0x30] stp d8, d9, [x0, #0x40] stp d10, d11, [x0, #0x50] stp d12, d13, [x0, #0x60] stp d14, d15, [x0, #0x70] stp d16, d17, [x0, #0x80] stp d18, d19, [x0, #0x90] stp d20, d21, [x0, #0xa0] stp d22, d23, [x0, #0xb0] str d24, [x0, #0xc0] // Restore registers and return CFI_STACKLOAD2(d8,d9,0) CFI_STACKLOAD2(d10,d11,16) CFI_STACKLOAD2(d12,d13,32) CFI_STACKLOAD2(d14,d15,48) CFI_INC_SP(64) CFI_RET S2N_BN_SIZE_DIRECTIVE(sha3_keccak_f1600_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
12,322
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sha3/sha3_keccak_f1600.S
// Copyright (c) 2024 The mlkem-native project authors // Copyright (c) 2021-2022 Arm Limited // Copyright (c) 2022 Matthias Kannwischer // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT // ---------------------------------------------------------------------------- // Keccak-f1600 permutation for SHA3 // Input a[25], rc[24]; output a[25] // // Thinking of the input/output array a as a row-major flattening of a // 5x5 matrix of 64-bit words, this performs the Keccak-f1600 permutation, // all 24 rounds with the distinct round constants rc[i] for each one. For // correct operation, the input pointer rc should point at the standard // round constants as in the specification: // // https://keccak.team/keccak_specs_summary.html#roundConstants // // This operation is at the core of SHA3 and is fully specified here: // // https://keccak.team/files/Keccak-reference-3.0.pdf // // extern void sha3_keccak_f1600(uint64_t a[static 25], // const uint64_t rc[static 24]); // // Standard ARM ABI: X0 = a, X1 = rc // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(sha3_keccak_f1600) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sha3_keccak_f1600) S2N_BN_SYM_PRIVACY_DIRECTIVE(sha3_keccak_f1600) .text .balign 4 S2N_BN_SYMBOL(sha3_keccak_f1600): CFI_START // This is almost exactly the same as the scalar code in the mlkem-native // repository here: // // https://github.com/pq-code-package/mlkem-native/blob/main/mlkem/fips202/native/aarch64/src/keccak_f1600_x1_scalar_asm.S // // The only change is the use of simple 64-bit addressing in place of // the unnecessary down-and-upcasting arising from [x9, w25, uxtw #3] CFI_DEC_SP(128) CFI_STACKSAVE2X(x19,x20,32,40) CFI_STACKSAVE2X(x21,x22,48,56) CFI_STACKSAVE2X(x23,x24,64,72) CFI_STACKSAVE2X(x25,x26,80,88) CFI_STACKSAVE2X(x27,x28,96,104) CFI_STACKSAVE2X(x29,x30,112,120) Lsha3_keccak_f1600_initial: mov x26, x1 str x26, [sp, #8] ldp x1, x6, [x0] ldp x11, x16, [x0, #16] ldp x21, x2, [x0, #32] ldp x7, x12, [x0, #48] ldp x17, x22, [x0, #64] ldp x3, x8, [x0, #80] ldp x13, x28, [x0, #96] ldp x23, x4, [x0, #112] ldp x9, x14, [x0, #128] ldp x19, x24, [x0, #144] ldp x5, x10, [x0, #160] ldp x15, x20, [x0, #176] ldr x25, [x0, #192] str x0, [sp] eor x30, x24, x25 eor x27, x9, x10 eor x0, x30, x21 eor x26, x27, x6 eor x27, x26, x7 eor x29, x0, x22 eor x26, x29, x23 eor x29, x4, x5 eor x30, x29, x1 eor x0, x27, x8 eor x29, x30, x2 eor x30, x19, x20 eor x30, x30, x16 eor x27, x26, x0, ror #63 eor x4, x4, x27 eor x30, x30, x17 eor x30, x30, x28 eor x29, x29, x3 eor x0, x0, x30, ror #63 eor x30, x30, x29, ror #63 eor x22, x22, x30 eor x23, x23, x30 str x23, [sp, #24] eor x23, x14, x15 eor x14, x14, x0 eor x23, x23, x11 eor x15, x15, x0 eor x1, x1, x27 eor x23, x23, x12 eor x23, x23, x13 eor x11, x11, x0 eor x29, x29, x23, ror #63 eor x23, x23, x26, ror #63 eor x26, x13, x0 eor x13, x28, x23 eor x28, x24, x30 eor x24, x16, x23 eor x16, x21, x30 eor x21, x25, x30 eor x30, x19, x23 eor x19, x20, x23 eor x20, x17, x23 eor x17, x12, x0 eor x0, x2, x27 eor x2, x6, x29 eor x6, x8, x29 bic x8, x28, x13, ror #47 eor x12, x3, x27 bic x3, x13, x17, ror #19 eor x5, x5, x27 ldr x27, [sp, #24] bic x25, x17, x2, ror #5 eor x9, x9, x29 eor x23, x25, x5, ror #52 eor x3, x3, x2, ror #24 eor x8, x8, x17, ror #2 eor x17, x10, x29 bic x25, x12, x22, ror #47 eor x29, x7, x29 bic x10, x4, x27, ror #2 bic x7, x5, x28, ror #10 eor x10, x10, x20, ror #50 eor x13, x7, x13, ror #57 bic x7, x2, x5, ror #47 eor x2, x25, x24, ror #39 bic x25, x20, x11, ror #57 bic x5, x17, x4, ror #25 eor x25, x25, x17, ror #53 bic x17, x11, x17, ror #60 eor x28, x7, x28, ror #57 bic x7, x9, x12, ror #42 eor x7, x7, x22, ror #25 bic x22, x22, x24, ror #56 bic x24, x24, x15, ror #31 eor x22, x22, x15, ror #23 bic x20, x27, x20, ror #48 bic x15, x15, x9, ror #16 eor x12, x15, x12, ror #58 eor x15, x5, x27, ror #27 eor x5, x20, x11, ror #41 ldr x11, [sp, #8] eor x20, x17, x4, ror #21 eor x17, x24, x9, ror #47 mov x24, #0x1 bic x9, x0, x16, ror #9 str x24, [sp, #16] bic x24, x29, x1, ror #44 bic x27, x1, x21, ror #50 bic x4, x26, x29, ror #63 eor x1, x1, x4, ror #21 ldr x11, [x11] bic x4, x21, x30, ror #57 eor x21, x24, x21, ror #30 eor x24, x9, x19, ror #44 bic x9, x14, x6, ror #5 eor x9, x9, x0, ror #43 bic x0, x6, x0, ror #38 eor x1, x1, x11 eor x11, x4, x26, ror #35 eor x4, x0, x16, ror #47 bic x0, x16, x19, ror #35 eor x16, x27, x30, ror #43 bic x27, x30, x26, ror #42 bic x26, x19, x14, ror #41 eor x19, x0, x14, ror #12 eor x14, x26, x6, ror #46 eor x6, x27, x29, ror #41 // Main loop maintains the invariant that there are deferred rotations to // reach the full effect of i Keccak rounds. By absorbing rotations into // shifted operands, the total number of rotation instructions is reduced // relative to a naive implementation. Lsha3_keccak_f1600_loop: eor x0, x15, x11, ror #52 eor x0, x0, x13, ror #48 eor x26, x8, x9, ror #57 eor x27, x0, x14, ror #10 eor x29, x16, x28, ror #63 eor x26, x26, x6, ror #51 eor x30, x23, x22, ror #50 eor x0, x26, x10, ror #31 eor x29, x29, x19, ror #37 eor x27, x27, x12, ror #5 eor x30, x30, x24, ror #34 eor x0, x0, x7, ror #27 eor x26, x30, x21, ror #26 eor x26, x26, x25, ror #15 ror x30, x27, #62 eor x30, x30, x26, ror #57 ror x26, x26, #58 eor x16, x30, x16 eor x28, x30, x28, ror #63 str x28, [sp, #24] eor x29, x29, x17, ror #36 eor x28, x1, x2, ror #61 eor x19, x30, x19, ror #37 eor x29, x29, x20, ror #2 eor x28, x28, x4, ror #54 eor x26, x26, x0, ror #55 eor x28, x28, x3, ror #39 eor x28, x28, x5, ror #25 ror x0, x0, #56 eor x0, x0, x29, ror #63 eor x27, x28, x27, ror #61 eor x13, x0, x13, ror #46 eor x28, x29, x28, ror #63 eor x29, x30, x20, ror #2 eor x20, x26, x3, ror #39 eor x11, x0, x11, ror #50 eor x25, x28, x25, ror #9 eor x3, x28, x21, ror #20 eor x21, x26, x1 eor x9, x27, x9, ror #49 eor x24, x28, x24, ror #28 eor x1, x30, x17, ror #36 eor x14, x0, x14, ror #8 eor x22, x28, x22, ror #44 eor x8, x27, x8, ror #56 eor x17, x27, x7, ror #19 eor x15, x0, x15, ror #62 bic x7, x20, x22, ror #47 eor x4, x26, x4, ror #54 eor x0, x0, x12, ror #3 eor x28, x28, x23, ror #58 eor x23, x26, x2, ror #61 eor x26, x26, x5, ror #25 eor x2, x7, x16, ror #39 bic x7, x9, x20, ror #42 bic x30, x15, x9, ror #16 eor x7, x7, x22, ror #25 eor x12, x30, x20, ror #58 bic x20, x22, x16, ror #56 eor x30, x27, x6, ror #43 eor x22, x20, x15, ror #23 bic x6, x19, x13, ror #42 eor x6, x6, x17, ror #41 bic x5, x13, x17, ror #63 eor x5, x21, x5, ror #21 bic x17, x17, x21, ror #44 eor x27, x27, x10, ror #23 bic x21, x21, x25, ror #50 bic x20, x27, x4, ror #25 bic x10, x16, x15, ror #31 eor x16, x21, x19, ror #43 eor x21, x17, x25, ror #30 bic x19, x25, x19, ror #57 ldr x25, [sp, #16] eor x17, x10, x9, ror #47 ldr x9, [sp, #8] eor x15, x20, x28, ror #27 bic x20, x4, x28, ror #2 eor x10, x20, x1, ror #50 bic x20, x11, x27, ror #60 eor x20, x20, x4, ror #21 bic x4, x28, x1, ror #48 bic x1, x1, x11, ror #57 ldr x28, [x9, x25, lsl #3] ldr x9, [sp, #24] add x25, x25, #0x1 str x25, [sp, #16] cmp x25, #0x17 eor x25, x1, x27, ror #53 bic x27, x30, x26, ror #47 eor x1, x5, x28 eor x5, x4, x11, ror #41 eor x11, x19, x13, ror #35 bic x13, x26, x24, ror #10 eor x28, x27, x24, ror #57 bic x27, x24, x9, ror #47 bic x19, x23, x3, ror #9 bic x4, x29, x14, ror #41 eor x24, x19, x29, ror #44 bic x29, x3, x29, ror #35 eor x13, x13, x9, ror #57 eor x19, x29, x14, ror #12 bic x29, x9, x0, ror #19 bic x14, x14, x8, ror #5 eor x9, x14, x23, ror #43 eor x14, x4, x8, ror #46 bic x23, x8, x23, ror #38 eor x8, x27, x0, ror #2 eor x4, x23, x3, ror #47 bic x3, x0, x30, ror #5 eor x23, x3, x26, ror #52 eor x3, x29, x30, ror #24 ble Lsha3_keccak_f1600_loop // Final rotations ror x6, x6, #43 ror x11, x11, #50 ror x21, x21, #20 ror x2, x2, #61 ror x7, x7, #19 ror x12, x12, #3 ror x17, x17, #36 ror x22, x22, #44 ror x3, x3, #39 ror x8, x8, #56 ror x13, x13, #46 ror x28, x28, #63 ror x23, x23, #58 ror x4, x4, #54 ror x9, x9, #49 ror x14, x14, #8 ror x19, x19, #37 ror x24, x24, #28 ror x5, x5, #25 ror x10, x10, #23 ror x15, x15, #62 ror x20, x20, #2 ror x25, x25, #9 ldr x0, [sp] stp x1, x6, [x0] stp x11, x16, [x0, #16] stp x21, x2, [x0, #32] stp x7, x12, [x0, #48] stp x17, x22, [x0, #64] stp x3, x8, [x0, #80] stp x13, x28, [x0, #96] stp x23, x4, [x0, #112] stp x9, x14, [x0, #128] stp x19, x24, [x0, #144] stp x5, x10, [x0, #160] stp x15, x20, [x0, #176] str x25, [x0, #192] CFI_STACKLOAD2(x19,x20,32) CFI_STACKLOAD2(x21,x22,48) CFI_STACKLOAD2(x23,x24,64) CFI_STACKLOAD2(x25,x26,80) CFI_STACKLOAD2(x27,x28,96) CFI_STACKLOAD2(x29,x30,112) CFI_INC_SP(128) CFI_RET S2N_BN_SIZE_DIRECTIVE(sha3_keccak_f1600) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
8,921
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sha3/sha3_keccak2_f1600.S
// Copyright (c) 2024 The mlkem-native project authors // Copyright (c) 2021-2022 Arm Limited // Copyright (c) 2022 Matthias Kannwischer // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT // ---------------------------------------------------------------------------- // Keccak-f1600 permutation for SHA3, batch of two independent operations // Input a[50], rc[24]; output a[50] // // The input/output argument is in effect two 25-element Keccak arrays // a[0...24] and a[25..49], which could be considered as type a[25][2]. // // Thinking of each such input/output array as a row-major flattening of a // 5x5 matrix of 64-bit words, this performs the Keccak-f1600 permutation, // all 24 rounds with the distinct round constants rc[i] for each one. For // correct operation, the input pointer rc should point at the standard // round constants as in the specification: // // https://keccak.team/keccak_specs_summary.html#roundConstants // // This operation is at the core of SHA3 and is fully specified here: // // https://keccak.team/files/Keccak-reference-3.0.pdf // // extern void sha3_keccak2_f1600(uint64_t a[static 50], // const uint64_t rc[static 24]); // // Standard ARM ABI: X0 = a, X1 = rc // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" .arch armv8.4-a+sha3 S2N_BN_SYM_VISIBILITY_DIRECTIVE(sha3_keccak2_f1600) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sha3_keccak2_f1600) S2N_BN_SYM_PRIVACY_DIRECTIVE(sha3_keccak2_f1600) .text .balign 4 S2N_BN_SYMBOL(sha3_keccak2_f1600): CFI_START // This is similar to the code in the mlkem-native repository here: // // mlkem/fips202/native/aarch64/src/keccak_f1600_x2_v84a_asm.S // // The main difference is the avoidance of ld2/st2 in favour of explicit // transposition operations and conventional loads and stores. CFI_DEC_SP(64) CFI_STACKSAVE2X(d8,d9,0,8) CFI_STACKSAVE2X(d10,d11,16,24) CFI_STACKSAVE2X(d12,d13,32,40) CFI_STACKSAVE2X(d14,d15,48,56) // Load the Keccak initial state into upper and lower parts of Q0..Q24 add x2, x0, #0xc8 ldp q24, q25, [x0] ldp q26, q27, [x2] trn1 v0.2d, v24.2d, v26.2d trn2 v1.2d, v24.2d, v26.2d trn1 v2.2d, v25.2d, v27.2d trn2 v3.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x20] ldp q26, q27, [x2, #0x20] trn1 v4.2d, v24.2d, v26.2d trn2 v5.2d, v24.2d, v26.2d trn1 v6.2d, v25.2d, v27.2d trn2 v7.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x40] ldp q26, q27, [x2, #0x40] trn1 v8.2d, v24.2d, v26.2d trn2 v9.2d, v24.2d, v26.2d trn1 v10.2d, v25.2d, v27.2d trn2 v11.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x60] ldp q26, q27, [x2, #0x60] trn1 v12.2d, v24.2d, v26.2d trn2 v13.2d, v24.2d, v26.2d trn1 v14.2d, v25.2d, v27.2d trn2 v15.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x80] ldp q26, q27, [x2, #0x80] trn1 v16.2d, v24.2d, v26.2d trn2 v17.2d, v24.2d, v26.2d trn1 v18.2d, v25.2d, v27.2d trn2 v19.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0xa0] ldp q26, q27, [x2, #0xa0] trn1 v20.2d, v24.2d, v26.2d trn2 v21.2d, v24.2d, v26.2d trn1 v22.2d, v25.2d, v27.2d trn2 v23.2d, v25.2d, v27.2d ldr d24, [x0, #0xc0] ldr d25, [x2, #0xc0] trn1 v24.2d, v24.2d, v25.2d // Now 24 rounds of the iteration mov x2, #24 Lsha3_keccak2_f1600_loop: eor3 v30.16b, v0.16b, v5.16b, v10.16b eor3 v29.16b, v1.16b, v6.16b, v11.16b eor3 v28.16b, v2.16b, v7.16b, v12.16b eor3 v27.16b, v3.16b, v8.16b, v13.16b eor3 v26.16b, v4.16b, v9.16b, v14.16b eor3 v30.16b, v30.16b, v15.16b, v20.16b eor3 v29.16b, v29.16b, v16.16b, v21.16b eor3 v28.16b, v28.16b, v17.16b, v22.16b eor3 v27.16b, v27.16b, v18.16b, v23.16b eor3 v26.16b, v26.16b, v19.16b, v24.16b rax1 v25.2d, v30.2d, v28.2d rax1 v28.2d, v28.2d, v26.2d rax1 v26.2d, v26.2d, v29.2d rax1 v29.2d, v29.2d, v27.2d rax1 v27.2d, v27.2d, v30.2d eor v30.16b, v0.16b, v26.16b xar v0.2d, v2.2d, v29.2d, #0x2 xar v2.2d, v12.2d, v29.2d, #0x15 xar v12.2d, v13.2d, v28.2d, #0x27 xar v13.2d, v19.2d, v27.2d, #0x38 xar v19.2d, v23.2d, v28.2d, #0x8 xar v23.2d, v15.2d, v26.2d, #0x17 xar v15.2d, v1.2d, v25.2d, #0x3f xar v1.2d, v8.2d, v28.2d, #0x9 xar v8.2d, v16.2d, v25.2d, #0x13 xar v16.2d, v7.2d, v29.2d, #0x3a xar v7.2d, v10.2d, v26.2d, #0x3d xar v10.2d, v3.2d, v28.2d, #0x24 xar v3.2d, v18.2d, v28.2d, #0x2b xar v18.2d, v17.2d, v29.2d, #0x31 xar v17.2d, v11.2d, v25.2d, #0x36 xar v11.2d, v9.2d, v27.2d, #0x2c xar v9.2d, v22.2d, v29.2d, #0x3 xar v22.2d, v14.2d, v27.2d, #0x19 xar v14.2d, v20.2d, v26.2d, #0x2e xar v20.2d, v4.2d, v27.2d, #0x25 xar v4.2d, v24.2d, v27.2d, #0x32 xar v24.2d, v21.2d, v25.2d, #0x3e xar v21.2d, v5.2d, v26.2d, #0x1c xar v27.2d, v6.2d, v25.2d, #0x14 ld1r { v31.2d }, [x1], #8 bcax v5.16b, v10.16b, v7.16b, v11.16b bcax v6.16b, v11.16b, v8.16b, v7.16b bcax v7.16b, v7.16b, v9.16b, v8.16b bcax v8.16b, v8.16b, v10.16b, v9.16b bcax v9.16b, v9.16b, v11.16b, v10.16b bcax v10.16b, v15.16b, v12.16b, v16.16b bcax v11.16b, v16.16b, v13.16b, v12.16b bcax v12.16b, v12.16b, v14.16b, v13.16b bcax v13.16b, v13.16b, v15.16b, v14.16b bcax v14.16b, v14.16b, v16.16b, v15.16b bcax v15.16b, v20.16b, v17.16b, v21.16b bcax v16.16b, v21.16b, v18.16b, v17.16b bcax v17.16b, v17.16b, v19.16b, v18.16b bcax v18.16b, v18.16b, v20.16b, v19.16b bcax v19.16b, v19.16b, v21.16b, v20.16b bcax v20.16b, v0.16b, v22.16b, v1.16b bcax v21.16b, v1.16b, v23.16b, v22.16b bcax v22.16b, v22.16b, v24.16b, v23.16b bcax v23.16b, v23.16b, v0.16b, v24.16b bcax v24.16b, v24.16b, v1.16b, v0.16b bcax v0.16b, v30.16b, v2.16b, v27.16b bcax v1.16b, v27.16b, v3.16b, v2.16b bcax v2.16b, v2.16b, v4.16b, v3.16b bcax v3.16b, v3.16b, v30.16b, v4.16b bcax v4.16b, v4.16b, v27.16b, v30.16b eor v0.16b, v0.16b, v31.16b sub x2, x2, #0x1 cbnz x2, Lsha3_keccak2_f1600_loop // Store back the state add x2, x0, #0xc8 trn1 v25.2d, v0.2d, v1.2d trn1 v26.2d, v2.2d, v3.2d stp q25, q26, [x0] trn2 v25.2d, v0.2d, v1.2d trn2 v26.2d, v2.2d, v3.2d stp q25, q26, [x2] trn1 v25.2d, v4.2d, v5.2d trn1 v26.2d, v6.2d, v7.2d stp q25, q26, [x0, #0x20] trn2 v25.2d, v4.2d, v5.2d trn2 v26.2d, v6.2d, v7.2d stp q25, q26, [x2, #0x20] trn1 v25.2d, v8.2d, v9.2d trn1 v26.2d, v10.2d, v11.2d stp q25, q26, [x0, #0x40] trn2 v25.2d, v8.2d, v9.2d trn2 v26.2d, v10.2d, v11.2d stp q25, q26, [x2, #0x40] trn1 v25.2d, v12.2d, v13.2d trn1 v26.2d, v14.2d, v15.2d stp q25, q26, [x0, #0x60] trn2 v25.2d, v12.2d, v13.2d trn2 v26.2d, v14.2d, v15.2d stp q25, q26, [x2, #0x60] trn1 v25.2d, v16.2d, v17.2d trn1 v26.2d, v18.2d, v19.2d stp q25, q26, [x0, #0x80] trn2 v25.2d, v16.2d, v17.2d trn2 v26.2d, v18.2d, v19.2d stp q25, q26, [x2, #0x80] trn1 v25.2d, v20.2d, v21.2d trn1 v26.2d, v22.2d, v23.2d stp q25, q26, [x0, #0xa0] trn2 v25.2d, v20.2d, v21.2d trn2 v26.2d, v22.2d, v23.2d stp q25, q26, [x2, #0xa0] str d24, [x0, #0xc0] trn2 v24.2d, v24.2d, v24.2d str d24, [x2, #0xc0] // Restore registers and return CFI_STACKLOAD2(d8,d9,0) CFI_STACKLOAD2(d10,d11,16) CFI_STACKLOAD2(d12,d13,32) CFI_STACKLOAD2(d14,d15,48) CFI_INC_SP(64) CFI_RET S2N_BN_SIZE_DIRECTIVE(sha3_keccak2_f1600) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
10,693
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sha3/sha3_keccak2_f1600_alt.S
// Copyright (c) 2024 The mlkem-native project authors // Copyright (c) 2021-2022 Arm Limited // Copyright (c) 2022 Matthias Kannwischer // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT // ---------------------------------------------------------------------------- // Keccak-f1600 permutation for SHA3, batch of two independent operations // Input a[50], rc[24]; output a[50] // // The input/output argument is in effect two 25-element Keccak arrays // a[0...24] and a[25..49], which could be considered as type a[25][2]. // // Thinking of each such input/output array as a row-major flattening of a // 5x5 matrix of 64-bit words, this performs the Keccak-f1600 permutation, // all 24 rounds with the distinct round constants rc[i] for each one. For // correct operation, the input pointer rc should point at the standard // round constants as in the specification: // // https://keccak.team/keccak_specs_summary.html#roundConstants // // This operation is at the core of SHA3 and is fully specified here: // // https://keccak.team/files/Keccak-reference-3.0.pdf // // extern void sha3_keccak2_f1600_alt(uint64_t a[static 50], // const uint64_t rc[static 24]); // // Standard ARM ABI: X0 = a, X1 = rc // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" .arch armv8.4-a+sha3 S2N_BN_SYM_VISIBILITY_DIRECTIVE(sha3_keccak2_f1600_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sha3_keccak2_f1600_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(sha3_keccak2_f1600_alt) .text .balign 4 S2N_BN_SYMBOL(sha3_keccak2_f1600_alt): CFI_START // This is similar to the code in the mlkem-native repository here: // // mlkem/fips202/native/aarch64/src/keccak_f1600_x2_v8a_v84a_asm_hybrid.S // // The main difference is the avoidance of ld2/st2 in favour of explicit // transposition operations and conventional loads and stores. We also // avoid saving and restoring some unused integer registers. CFI_DEC_SP(64) CFI_STACKSAVE2(d8,d9,0) CFI_STACKSAVE2(d10,d11,0x10) CFI_STACKSAVE2(d12,d13,0x20) CFI_STACKSAVE2(d14,d15,0x30) // Load the Keccak initial state into upper and lower parts of Q0..Q24 add x2, x0, #0xc8 ldp q24, q25, [x0] ldp q26, q27, [x2] trn1 v0.2d, v24.2d, v26.2d trn2 v1.2d, v24.2d, v26.2d trn1 v2.2d, v25.2d, v27.2d trn2 v3.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x20] ldp q26, q27, [x2, #0x20] trn1 v4.2d, v24.2d, v26.2d trn2 v5.2d, v24.2d, v26.2d trn1 v6.2d, v25.2d, v27.2d trn2 v7.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x40] ldp q26, q27, [x2, #0x40] trn1 v8.2d, v24.2d, v26.2d trn2 v9.2d, v24.2d, v26.2d trn1 v10.2d, v25.2d, v27.2d trn2 v11.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x60] ldp q26, q27, [x2, #0x60] trn1 v12.2d, v24.2d, v26.2d trn2 v13.2d, v24.2d, v26.2d trn1 v14.2d, v25.2d, v27.2d trn2 v15.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x80] ldp q26, q27, [x2, #0x80] trn1 v16.2d, v24.2d, v26.2d trn2 v17.2d, v24.2d, v26.2d trn1 v18.2d, v25.2d, v27.2d trn2 v19.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0xa0] ldp q26, q27, [x2, #0xa0] trn1 v20.2d, v24.2d, v26.2d trn2 v21.2d, v24.2d, v26.2d trn1 v22.2d, v25.2d, v27.2d trn2 v23.2d, v25.2d, v27.2d ldr d24, [x0, #0xc0] ldr d25, [x2, #0xc0] trn1 v24.2d, v24.2d, v25.2d // Now 24 rounds of the iteration mov x2, #24 Lsha3_keccak2_f1600_alt_loop: eor v30.16b, v0.16b, v5.16b eor v30.16b, v30.16b, v10.16b eor3 v30.16b, v30.16b, v15.16b, v20.16b eor v29.16b, v1.16b, v6.16b eor v29.16b, v29.16b, v11.16b eor3 v29.16b, v29.16b, v16.16b, v21.16b eor v28.16b, v2.16b, v7.16b eor v28.16b, v28.16b, v12.16b eor3 v28.16b, v28.16b, v17.16b, v22.16b eor v27.16b, v3.16b, v8.16b eor v27.16b, v27.16b, v13.16b eor3 v27.16b, v27.16b, v18.16b, v23.16b eor v26.16b, v4.16b, v9.16b eor v26.16b, v26.16b, v14.16b eor3 v26.16b, v26.16b, v19.16b, v24.16b add v31.2d, v28.2d, v28.2d sri v31.2d, v28.2d, #0x3f eor v25.16b, v31.16b, v30.16b rax1 v28.2d, v28.2d, v26.2d add v31.2d, v29.2d, v29.2d sri v31.2d, v29.2d, #0x3f eor v26.16b, v31.16b, v26.16b rax1 v29.2d, v29.2d, v27.2d add v31.2d, v30.2d, v30.2d sri v31.2d, v30.2d, #0x3f eor v27.16b, v31.16b, v27.16b eor v30.16b, v0.16b, v26.16b xar v0.2d, v2.2d, v29.2d, #0x2 eor v31.16b, v12.16b, v29.16b shl v2.2d, v31.2d, #0x2b sri v2.2d, v31.2d, #0x15 xar v12.2d, v13.2d, v28.2d, #0x27 eor v31.16b, v19.16b, v27.16b shl v13.2d, v31.2d, #0x8 sri v13.2d, v31.2d, #0x38 xar v19.2d, v23.2d, v28.2d, #0x8 eor v31.16b, v15.16b, v26.16b shl v23.2d, v31.2d, #0x29 sri v23.2d, v31.2d, #0x17 xar v15.2d, v1.2d, v25.2d, #0x3f eor v31.16b, v8.16b, v28.16b shl v1.2d, v31.2d, #0x37 sri v1.2d, v31.2d, #0x9 xar v8.2d, v16.2d, v25.2d, #0x13 eor v31.16b, v7.16b, v29.16b shl v16.2d, v31.2d, #0x6 sri v16.2d, v31.2d, #0x3a xar v7.2d, v10.2d, v26.2d, #0x3d eor v31.16b, v3.16b, v28.16b shl v10.2d, v31.2d, #0x1c sri v10.2d, v31.2d, #0x24 xar v3.2d, v18.2d, v28.2d, #0x2b eor v31.16b, v17.16b, v29.16b shl v18.2d, v31.2d, #0xf sri v18.2d, v31.2d, #0x31 xar v17.2d, v11.2d, v25.2d, #0x36 eor v31.16b, v9.16b, v27.16b shl v11.2d, v31.2d, #0x14 sri v11.2d, v31.2d, #0x2c xar v9.2d, v22.2d, v29.2d, #0x3 eor v31.16b, v14.16b, v27.16b shl v22.2d, v31.2d, #0x27 sri v22.2d, v31.2d, #0x19 xar v14.2d, v20.2d, v26.2d, #0x2e eor v31.16b, v4.16b, v27.16b shl v20.2d, v31.2d, #0x1b sri v20.2d, v31.2d, #0x25 xar v4.2d, v24.2d, v27.2d, #0x32 eor v31.16b, v21.16b, v25.16b shl v24.2d, v31.2d, #0x2 sri v24.2d, v31.2d, #0x3e xar v21.2d, v5.2d, v26.2d, #0x1c eor v31.16b, v6.16b, v25.16b shl v27.2d, v31.2d, #0x2c sri v27.2d, v31.2d, #0x14 ld1r { v28.2d }, [x1], #8 bcax v5.16b, v10.16b, v7.16b, v11.16b bic v31.16b, v8.16b, v7.16b eor v6.16b, v31.16b, v11.16b bcax v7.16b, v7.16b, v9.16b, v8.16b bic v31.16b, v10.16b, v9.16b eor v8.16b, v31.16b, v8.16b bcax v9.16b, v9.16b, v11.16b, v10.16b bic v31.16b, v12.16b, v16.16b eor v10.16b, v31.16b, v15.16b bcax v11.16b, v16.16b, v13.16b, v12.16b bic v31.16b, v14.16b, v13.16b eor v12.16b, v31.16b, v12.16b bcax v13.16b, v13.16b, v15.16b, v14.16b bic v31.16b, v16.16b, v15.16b eor v14.16b, v31.16b, v14.16b bcax v15.16b, v20.16b, v17.16b, v21.16b bic v31.16b, v18.16b, v17.16b eor v16.16b, v31.16b, v21.16b bcax v17.16b, v17.16b, v19.16b, v18.16b bic v31.16b, v20.16b, v19.16b eor v18.16b, v31.16b, v18.16b bcax v19.16b, v19.16b, v21.16b, v20.16b bic v31.16b, v22.16b, v1.16b eor v20.16b, v31.16b, v0.16b bcax v21.16b, v1.16b, v23.16b, v22.16b bic v31.16b, v24.16b, v23.16b eor v22.16b, v31.16b, v22.16b bcax v23.16b, v23.16b, v0.16b, v24.16b bic v31.16b, v1.16b, v0.16b eor v24.16b, v31.16b, v24.16b bcax v0.16b, v30.16b, v2.16b, v27.16b bic v31.16b, v3.16b, v2.16b eor v1.16b, v31.16b, v27.16b bcax v2.16b, v2.16b, v4.16b, v3.16b bic v31.16b, v30.16b, v4.16b eor v3.16b, v31.16b, v3.16b bcax v4.16b, v4.16b, v27.16b, v30.16b eor v0.16b, v0.16b, v28.16b sub x2, x2, #0x1 cbnz x2, Lsha3_keccak2_f1600_alt_loop // Store back the state add x2, x0, #0xc8 trn1 v25.2d, v0.2d, v1.2d trn1 v26.2d, v2.2d, v3.2d stp q25, q26, [x0] trn2 v25.2d, v0.2d, v1.2d trn2 v26.2d, v2.2d, v3.2d stp q25, q26, [x2] trn1 v25.2d, v4.2d, v5.2d trn1 v26.2d, v6.2d, v7.2d stp q25, q26, [x0, #0x20] trn2 v25.2d, v4.2d, v5.2d trn2 v26.2d, v6.2d, v7.2d stp q25, q26, [x2, #0x20] trn1 v25.2d, v8.2d, v9.2d trn1 v26.2d, v10.2d, v11.2d stp q25, q26, [x0, #0x40] trn2 v25.2d, v8.2d, v9.2d trn2 v26.2d, v10.2d, v11.2d stp q25, q26, [x2, #0x40] trn1 v25.2d, v12.2d, v13.2d trn1 v26.2d, v14.2d, v15.2d stp q25, q26, [x0, #0x60] trn2 v25.2d, v12.2d, v13.2d trn2 v26.2d, v14.2d, v15.2d stp q25, q26, [x2, #0x60] trn1 v25.2d, v16.2d, v17.2d trn1 v26.2d, v18.2d, v19.2d stp q25, q26, [x0, #0x80] trn2 v25.2d, v16.2d, v17.2d trn2 v26.2d, v18.2d, v19.2d stp q25, q26, [x2, #0x80] trn1 v25.2d, v20.2d, v21.2d trn1 v26.2d, v22.2d, v23.2d stp q25, q26, [x0, #0xa0] trn2 v25.2d, v20.2d, v21.2d trn2 v26.2d, v22.2d, v23.2d stp q25, q26, [x2, #0xa0] str d24, [x0, #0xc0] trn2 v24.2d, v24.2d, v24.2d str d24, [x2, #0xc0] // Restore registers and return CFI_STACKLOAD2(d8,d9,0) CFI_STACKLOAD2(d10,d11,0x10) CFI_STACKLOAD2(d12,d13,0x20) CFI_STACKLOAD2(d14,d15,0x30) CFI_INC_SP(64) CFI_RET S2N_BN_SIZE_DIRECTIVE(sha3_keccak2_f1600_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
35,222
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/sha3/sha3_keccak4_f1600_alt2.S
// Copyright (c) 2024 The mlkem-native project authors // Copyright (c) 2021-2022 Arm Limited // Copyright (c) 2022 Matthias Kannwischer // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT // ---------------------------------------------------------------------------- // Keccak-f1600 permutation for SHA3, batch of four independent operations // Input a[100], rc[24]; output a[100] // // // The input/output argument is in effect four 25-element Keccak arrays // a[0...24], a[25..49], a[50..74] and a[75..99], which could be considered // as type a[25][4]. // // Thinking of each such input/output array as a row-major flattening of a // 5x5 matrix of 64-bit words, this performs the Keccak-f1600 permutation, // all 24 rounds with the distinct round constants rc[i] for each one. For // correct operation, the input pointer rc should point at the standard // round constants as in the specification: // // https://keccak.team/keccak_specs_summary.html#roundConstants // // This operation is at the core of SHA3 and is fully specified here: // // https://keccak.team/files/Keccak-reference-3.0.pdf // // extern void sha3_keccak4_f1600_alt2(uint64_t a[static 100], // const uint64_t rc[static 24]); // // Standard ARM ABI: X0 = a, X1 = rc // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" .arch armv8.4-a+sha3 S2N_BN_SYM_VISIBILITY_DIRECTIVE(sha3_keccak4_f1600_alt2) S2N_BN_FUNCTION_TYPE_DIRECTIVE(sha3_keccak4_f1600_alt2) S2N_BN_SYM_PRIVACY_DIRECTIVE(sha3_keccak4_f1600_alt2) .text .balign 4 S2N_BN_SYMBOL(sha3_keccak4_f1600_alt2): CFI_START // This is similar to the code in the mlkem-native repository here: // // mlkem/fips202/native/aarch64/src/keccak_f1600_x4_v8a_v84a_scalar_hybrid_asm.S // // The main difference is the avoidance of ld2/st2 in favour of explicit // transposition operations and conventional loads and stores. CFI_DEC_SP(224) CFI_STACKSAVE2X(x19,x20,48,56) CFI_STACKSAVE2X(x21,x22,64,72) CFI_STACKSAVE2X(x23,x24,80,88) CFI_STACKSAVE2X(x25,x26,96,104) CFI_STACKSAVE2X(x27,x28,112,120) CFI_STACKSAVE2X(x29,x30,128,136) CFI_STACKSAVE2X(d8,d9,144,152) CFI_STACKSAVE2X(d10,d11,160,168) CFI_STACKSAVE2X(d12,d13,176,184) CFI_STACKSAVE2X(d14,d15,192,200) mov x29, x1 mov x30, #0x0 str x30, [sp, #0x20] str x29, [sp, #0x8] str x29, [sp, #0x10] str x0, [sp] add x2, x0, #0xc8 ldp q24, q25, [x0] ldp q26, q27, [x2] trn1 v0.2d, v24.2d, v26.2d trn2 v1.2d, v24.2d, v26.2d trn1 v2.2d, v25.2d, v27.2d trn2 v3.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x20] ldp q26, q27, [x2, #0x20] trn1 v4.2d, v24.2d, v26.2d trn2 v5.2d, v24.2d, v26.2d trn1 v6.2d, v25.2d, v27.2d trn2 v7.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x40] ldp q26, q27, [x2, #0x40] trn1 v8.2d, v24.2d, v26.2d trn2 v9.2d, v24.2d, v26.2d trn1 v10.2d, v25.2d, v27.2d trn2 v11.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x60] ldp q26, q27, [x2, #0x60] trn1 v12.2d, v24.2d, v26.2d trn2 v13.2d, v24.2d, v26.2d trn1 v14.2d, v25.2d, v27.2d trn2 v15.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0x80] ldp q26, q27, [x2, #0x80] trn1 v16.2d, v24.2d, v26.2d trn2 v17.2d, v24.2d, v26.2d trn1 v18.2d, v25.2d, v27.2d trn2 v19.2d, v25.2d, v27.2d ldp q24, q25, [x0, #0xa0] ldp q26, q27, [x2, #0xa0] trn1 v20.2d, v24.2d, v26.2d trn2 v21.2d, v24.2d, v26.2d trn1 v22.2d, v25.2d, v27.2d trn2 v23.2d, v25.2d, v27.2d ldr d24, [x0, #0xc0] ldr d25, [x2, #0xc0] trn1 v24.2d, v24.2d, v25.2d add x0, x0, #0x190 ldp x1, x6, [x0] ldp x11, x16, [x0, #0x10] ldp x21, x2, [x0, #0x20] ldp x7, x12, [x0, #0x30] ldp x17, x22, [x0, #0x40] ldp x3, x8, [x0, #0x50] ldp x13, x28, [x0, #0x60] ldp x23, x4, [x0, #0x70] ldp x9, x14, [x0, #0x80] ldp x19, x24, [x0, #0x90] ldp x5, x10, [x0, #0xa0] ldp x15, x20, [x0, #0xb0] ldr x25, [x0, #0xc0] sub x0, x0, #0x190 Lsha3_keccak4_f1600_alt2_initial: eor x30, x24, x25 eor x27, x9, x10 eor3 v30.16b, v0.16b, v5.16b, v10.16b eor v30.16b, v30.16b, v15.16b eor x0, x30, x21 eor x26, x27, x6 eor v30.16b, v30.16b, v20.16b eor x27, x26, x7 eor x29, x0, x22 eor3 v29.16b, v1.16b, v6.16b, v11.16b eor x26, x29, x23 eor x29, x4, x5 eor v29.16b, v29.16b, v16.16b eor x30, x29, x1 eor x0, x27, x8 eor v29.16b, v29.16b, v21.16b eor x29, x30, x2 eor x30, x19, x20 eor3 v28.16b, v2.16b, v7.16b, v12.16b eor x30, x30, x16 eor x27, x26, x0, ror #63 eor v28.16b, v28.16b, v17.16b eor x4, x4, x27 eor x30, x30, x17 eor v28.16b, v28.16b, v22.16b eor x30, x30, x28 eor x29, x29, x3 eor3 v27.16b, v3.16b, v8.16b, v13.16b eor x0, x0, x30, ror #63 eor x30, x30, x29, ror #63 eor v27.16b, v27.16b, v18.16b eor x22, x22, x30 eor v27.16b, v27.16b, v23.16b eor x23, x23, x30 str x23, [sp, #0xd0] eor3 v26.16b, v4.16b, v9.16b, v14.16b eor x23, x14, x15 eor x14, x14, x0 eor v26.16b, v26.16b, v19.16b eor x23, x23, x11 eor x15, x15, x0 eor v26.16b, v26.16b, v24.16b eor x1, x1, x27 eor x23, x23, x12 rax1 v25.2d, v30.2d, v28.2d eor x23, x23, x13 eor x11, x11, x0 add v31.2d, v26.2d, v26.2d eor x29, x29, x23, ror #63 eor x23, x23, x26, ror #63 sri v31.2d, v26.2d, #0x3f eor x26, x13, x0 eor x13, x28, x23 eor v28.16b, v31.16b, v28.16b eor x28, x24, x30 eor x24, x16, x23 rax1 v26.2d, v26.2d, v29.2d eor x16, x21, x30 eor x21, x25, x30 add v31.2d, v27.2d, v27.2d eor x30, x19, x23 sri v31.2d, v27.2d, #0x3f eor x19, x20, x23 eor x20, x17, x23 eor v29.16b, v31.16b, v29.16b eor x17, x12, x0 eor x0, x2, x27 rax1 v27.2d, v27.2d, v30.2d eor x2, x6, x29 eor x6, x8, x29 eor v30.16b, v0.16b, v26.16b bic x8, x28, x13, ror #47 eor x12, x3, x27 eor v31.16b, v2.16b, v29.16b bic x3, x13, x17, ror #19 eor x5, x5, x27 shl v0.2d, v31.2d, #0x3e ldr x27, [sp, #0xd0] bic x25, x17, x2, ror #5 sri v0.2d, v31.2d, #0x2 eor x9, x9, x29 eor x23, x25, x5, ror #52 xar v2.2d, v12.2d, v29.2d, #0x15 eor x3, x3, x2, ror #24 eor x8, x8, x17, ror #2 eor v31.16b, v13.16b, v28.16b eor x17, x10, x29 bic x25, x12, x22, ror #47 shl v12.2d, v31.2d, #0x19 eor x29, x7, x29 bic x10, x4, x27, ror #2 sri v12.2d, v31.2d, #0x27 bic x7, x5, x28, ror #10 xar v13.2d, v19.2d, v27.2d, #0x38 eor x10, x10, x20, ror #50 eor x13, x7, x13, ror #57 eor v31.16b, v23.16b, v28.16b bic x7, x2, x5, ror #47 eor x2, x25, x24, ror #39 shl v19.2d, v31.2d, #0x38 bic x25, x20, x11, ror #57 bic x5, x17, x4, ror #25 sri v19.2d, v31.2d, #0x8 eor x25, x25, x17, ror #53 bic x17, x11, x17, ror #60 xar v23.2d, v15.2d, v26.2d, #0x17 eor x28, x7, x28, ror #57 bic x7, x9, x12, ror #42 eor v31.16b, v1.16b, v25.16b eor x7, x7, x22, ror #25 bic x22, x22, x24, ror #56 shl v15.2d, v31.2d, #0x1 bic x24, x24, x15, ror #31 eor x22, x22, x15, ror #23 sri v15.2d, v31.2d, #0x3f bic x20, x27, x20, ror #48 bic x15, x15, x9, ror #16 xar v1.2d, v8.2d, v28.2d, #0x9 eor x12, x15, x12, ror #58 eor x15, x5, x27, ror #27 eor v31.16b, v16.16b, v25.16b eor x5, x20, x11, ror #41 shl v8.2d, v31.2d, #0x2d ldr x11, [sp, #0x8] eor x20, x17, x4, ror #21 sri v8.2d, v31.2d, #0x13 eor x17, x24, x9, ror #47 mov x24, #0x1 xar v16.2d, v7.2d, v29.2d, #0x3a bic x9, x0, x16, ror #9 str x24, [sp, #0x18] eor v31.16b, v10.16b, v26.16b bic x24, x29, x1, ror #44 bic x27, x1, x21, ror #50 shl v7.2d, v31.2d, #0x3 bic x4, x26, x29, ror #63 eor x1, x1, x4, ror #21 sri v7.2d, v31.2d, #0x3d ldr x11, [x11] bic x4, x21, x30, ror #57 xar v10.2d, v3.2d, v28.2d, #0x24 eor x21, x24, x21, ror #30 eor x24, x9, x19, ror #44 eor v31.16b, v18.16b, v28.16b bic x9, x14, x6, ror #5 eor x9, x9, x0, ror #43 shl v3.2d, v31.2d, #0x15 bic x0, x6, x0, ror #38 eor x1, x1, x11 sri v3.2d, v31.2d, #0x2b eor x11, x4, x26, ror #35 eor x4, x0, x16, ror #47 xar v18.2d, v17.2d, v29.2d, #0x31 bic x0, x16, x19, ror #35 eor v31.16b, v11.16b, v25.16b eor x16, x27, x30, ror #43 bic x27, x30, x26, ror #42 shl v17.2d, v31.2d, #0xa bic x26, x19, x14, ror #41 eor x19, x0, x14, ror #12 sri v17.2d, v31.2d, #0x36 eor x14, x26, x6, ror #46 eor x6, x27, x29, ror #41 xar v11.2d, v9.2d, v27.2d, #0x2c eor x0, x15, x11, ror #52 eor x0, x0, x13, ror #48 eor v31.16b, v22.16b, v29.16b eor x26, x8, x9, ror #57 eor x27, x0, x14, ror #10 shl v9.2d, v31.2d, #0x3d eor x29, x16, x28, ror #63 eor x26, x26, x6, ror #51 sri v9.2d, v31.2d, #0x3 eor x30, x23, x22, ror #50 eor x0, x26, x10, ror #31 xar v22.2d, v14.2d, v27.2d, #0x19 eor x29, x29, x19, ror #37 eor x27, x27, x12, ror #5 eor v31.16b, v20.16b, v26.16b eor x30, x30, x24, ror #34 eor x0, x0, x7, ror #27 shl v14.2d, v31.2d, #0x12 eor x26, x30, x21, ror #26 sri v14.2d, v31.2d, #0x2e eor x26, x26, x25, ror #15 ror x30, x27, #0x3e xar v20.2d, v4.2d, v27.2d, #0x25 eor x30, x30, x26, ror #57 ror x26, x26, #0x3a eor v31.16b, v24.16b, v27.16b eor x16, x30, x16 eor x28, x30, x28, ror #63 shl v4.2d, v31.2d, #0xe str x28, [sp, #0xd0] eor x29, x29, x17, ror #36 sri v4.2d, v31.2d, #0x32 eor x28, x1, x2, ror #61 eor x19, x30, x19, ror #37 xar v24.2d, v21.2d, v25.2d, #0x3e eor x29, x29, x20, ror #2 eor x28, x28, x4, ror #54 eor v31.16b, v5.16b, v26.16b eor x26, x26, x0, ror #55 eor x28, x28, x3, ror #39 shl v21.2d, v31.2d, #0x24 eor x28, x28, x5, ror #25 ror x0, x0, #0x38 sri v21.2d, v31.2d, #0x1c eor x0, x0, x29, ror #63 eor x27, x28, x27, ror #61 xar v27.2d, v6.2d, v25.2d, #0x14 eor x13, x0, x13, ror #46 eor x28, x29, x28, ror #63 bic v31.16b, v7.16b, v11.16b eor x29, x30, x20, ror #2 eor v5.16b, v31.16b, v10.16b eor x20, x26, x3, ror #39 eor x11, x0, x11, ror #50 bcax v6.16b, v11.16b, v8.16b, v7.16b eor x25, x28, x25, ror #9 eor x3, x28, x21, ror #20 bic v31.16b, v9.16b, v8.16b eor x21, x26, x1 eor x9, x27, x9, ror #49 eor v7.16b, v31.16b, v7.16b eor x24, x28, x24, ror #28 eor x1, x30, x17, ror #36 bcax v8.16b, v8.16b, v10.16b, v9.16b eor x14, x0, x14, ror #8 eor x22, x28, x22, ror #44 bic v31.16b, v11.16b, v10.16b eor x8, x27, x8, ror #56 eor x17, x27, x7, ror #19 eor v9.16b, v31.16b, v9.16b eor x15, x0, x15, ror #62 bic x7, x20, x22, ror #47 bcax v10.16b, v15.16b, v12.16b, v16.16b eor x4, x26, x4, ror #54 eor x0, x0, x12, ror #3 bic v31.16b, v13.16b, v12.16b eor x28, x28, x23, ror #58 eor x23, x26, x2, ror #61 eor v11.16b, v31.16b, v16.16b eor x26, x26, x5, ror #25 bcax v12.16b, v12.16b, v14.16b, v13.16b eor x2, x7, x16, ror #39 bic x7, x9, x20, ror #42 bic v31.16b, v15.16b, v14.16b bic x30, x15, x9, ror #16 eor x7, x7, x22, ror #25 eor v13.16b, v31.16b, v13.16b eor x12, x30, x20, ror #58 bic x20, x22, x16, ror #56 bic v31.16b, v16.16b, v15.16b eor x30, x27, x6, ror #43 eor x22, x20, x15, ror #23 eor v14.16b, v31.16b, v14.16b bic x6, x19, x13, ror #42 eor x6, x6, x17, ror #41 bcax v15.16b, v20.16b, v17.16b, v21.16b bic x5, x13, x17, ror #63 eor x5, x21, x5, ror #21 bic v31.16b, v18.16b, v17.16b bic x17, x17, x21, ror #44 eor x27, x27, x10, ror #23 eor v16.16b, v31.16b, v21.16b bic x21, x21, x25, ror #50 bic x20, x27, x4, ror #25 bcax v17.16b, v17.16b, v19.16b, v18.16b bic x10, x16, x15, ror #31 eor x16, x21, x19, ror #43 bic v31.16b, v20.16b, v19.16b eor x21, x17, x25, ror #30 bic x19, x25, x19, ror #57 eor v18.16b, v31.16b, v18.16b ldr x25, [sp, #0x18] bcax v19.16b, v19.16b, v21.16b, v20.16b eor x17, x10, x9, ror #47 ldr x9, [sp, #0x8] bic v31.16b, v22.16b, v1.16b eor x15, x20, x28, ror #27 bic x20, x4, x28, ror #2 eor v20.16b, v31.16b, v0.16b eor x10, x20, x1, ror #50 bic x20, x11, x27, ror #60 bcax v21.16b, v1.16b, v23.16b, v22.16b eor x20, x20, x4, ror #21 bic x4, x28, x1, ror #48 bic v31.16b, v24.16b, v23.16b bic x1, x1, x11, ror #57 ldr x28, [x9, x25, lsl #3] eor v22.16b, v31.16b, v22.16b ldr x9, [sp, #0xd0] add x25, x25, #0x1 bcax v23.16b, v23.16b, v0.16b, v24.16b str x25, [sp, #0x18] cmp x25, #0x17 bic v31.16b, v1.16b, v0.16b eor x25, x1, x27, ror #53 bic x27, x30, x26, ror #47 eor v24.16b, v31.16b, v24.16b eor x1, x5, x28 eor x5, x4, x11, ror #41 bcax v0.16b, v30.16b, v2.16b, v27.16b eor x11, x19, x13, ror #35 bic v31.16b, v3.16b, v2.16b bic x13, x26, x24, ror #10 eor x28, x27, x24, ror #57 eor v1.16b, v31.16b, v27.16b bic x27, x24, x9, ror #47 bic x19, x23, x3, ror #9 bcax v2.16b, v2.16b, v4.16b, v3.16b bic x4, x29, x14, ror #41 eor x24, x19, x29, ror #44 bic v31.16b, v30.16b, v4.16b bic x29, x3, x29, ror #35 eor x13, x13, x9, ror #57 eor v3.16b, v31.16b, v3.16b eor x19, x29, x14, ror #12 bic x29, x9, x0, ror #19 bcax v4.16b, v4.16b, v27.16b, v30.16b bic x14, x14, x8, ror #5 eor x9, x14, x23, ror #43 eor x14, x4, x8, ror #46 bic x23, x8, x23, ror #38 eor x8, x27, x0, ror #2 eor x4, x23, x3, ror #47 bic x3, x0, x30, ror #5 eor x23, x3, x26, ror #52 eor x3, x29, x30, ror #24 ldr x30, [sp, #0x10] ld1r { v28.2d }, [x30], #8 str x30, [sp, #0x10] eor v0.16b, v0.16b, v28.16b Lsha3_keccak4_f1600_alt2_loop: eor x0, x15, x11, ror #52 eor x0, x0, x13, ror #48 eor3 v30.16b, v0.16b, v5.16b, v10.16b eor v30.16b, v30.16b, v15.16b eor x26, x8, x9, ror #57 eor x27, x0, x14, ror #10 eor v30.16b, v30.16b, v20.16b eor x29, x16, x28, ror #63 eor x26, x26, x6, ror #51 eor3 v29.16b, v1.16b, v6.16b, v11.16b eor x30, x23, x22, ror #50 eor x0, x26, x10, ror #31 eor v29.16b, v29.16b, v16.16b eor x29, x29, x19, ror #37 eor x27, x27, x12, ror #5 eor v29.16b, v29.16b, v21.16b eor x30, x30, x24, ror #34 eor x0, x0, x7, ror #27 eor3 v28.16b, v2.16b, v7.16b, v12.16b eor x26, x30, x21, ror #26 eor x26, x26, x25, ror #15 eor v28.16b, v28.16b, v17.16b ror x30, x27, #0x3e eor x30, x30, x26, ror #57 eor v28.16b, v28.16b, v22.16b ror x26, x26, #0x3a eor x16, x30, x16 eor3 v27.16b, v3.16b, v8.16b, v13.16b eor x28, x30, x28, ror #63 str x28, [sp, #0xd0] eor v27.16b, v27.16b, v18.16b eor x29, x29, x17, ror #36 eor x28, x1, x2, ror #61 eor v27.16b, v27.16b, v23.16b eor x19, x30, x19, ror #37 eor x29, x29, x20, ror #2 eor3 v26.16b, v4.16b, v9.16b, v14.16b eor x28, x28, x4, ror #54 eor x26, x26, x0, ror #55 eor v26.16b, v26.16b, v19.16b eor x28, x28, x3, ror #39 eor x28, x28, x5, ror #25 eor v26.16b, v26.16b, v24.16b ror x0, x0, #0x38 eor x0, x0, x29, ror #63 rax1 v25.2d, v30.2d, v28.2d eor x27, x28, x27, ror #61 eor x13, x0, x13, ror #46 add v31.2d, v26.2d, v26.2d eor x28, x29, x28, ror #63 eor x29, x30, x20, ror #2 sri v31.2d, v26.2d, #0x3f eor x20, x26, x3, ror #39 eor x11, x0, x11, ror #50 eor v28.16b, v31.16b, v28.16b eor x25, x28, x25, ror #9 eor x3, x28, x21, ror #20 rax1 v26.2d, v26.2d, v29.2d eor x21, x26, x1 add v31.2d, v27.2d, v27.2d eor x9, x27, x9, ror #49 eor x24, x28, x24, ror #28 sri v31.2d, v27.2d, #0x3f eor x1, x30, x17, ror #36 eor x14, x0, x14, ror #8 eor v29.16b, v31.16b, v29.16b eor x22, x28, x22, ror #44 eor x8, x27, x8, ror #56 rax1 v27.2d, v27.2d, v30.2d eor x17, x27, x7, ror #19 eor x15, x0, x15, ror #62 eor v30.16b, v0.16b, v26.16b bic x7, x20, x22, ror #47 eor x4, x26, x4, ror #54 eor v31.16b, v2.16b, v29.16b eor x0, x0, x12, ror #3 eor x28, x28, x23, ror #58 shl v0.2d, v31.2d, #0x3e eor x23, x26, x2, ror #61 eor x26, x26, x5, ror #25 sri v0.2d, v31.2d, #0x2 eor x2, x7, x16, ror #39 bic x7, x9, x20, ror #42 xar v2.2d, v12.2d, v29.2d, #0x15 bic x30, x15, x9, ror #16 eor x7, x7, x22, ror #25 eor v31.16b, v13.16b, v28.16b eor x12, x30, x20, ror #58 bic x20, x22, x16, ror #56 shl v12.2d, v31.2d, #0x19 eor x30, x27, x6, ror #43 eor x22, x20, x15, ror #23 sri v12.2d, v31.2d, #0x27 bic x6, x19, x13, ror #42 eor x6, x6, x17, ror #41 xar v13.2d, v19.2d, v27.2d, #0x38 bic x5, x13, x17, ror #63 eor x5, x21, x5, ror #21 eor v31.16b, v23.16b, v28.16b bic x17, x17, x21, ror #44 eor x27, x27, x10, ror #23 shl v19.2d, v31.2d, #0x38 bic x21, x21, x25, ror #50 bic x20, x27, x4, ror #25 sri v19.2d, v31.2d, #0x8 bic x10, x16, x15, ror #31 eor x16, x21, x19, ror #43 xar v23.2d, v15.2d, v26.2d, #0x17 eor x21, x17, x25, ror #30 bic x19, x25, x19, ror #57 eor v31.16b, v1.16b, v25.16b ldr x25, [sp, #0x18] eor x17, x10, x9, ror #47 shl v15.2d, v31.2d, #0x1 ldr x9, [sp, #0x8] sri v15.2d, v31.2d, #0x3f eor x15, x20, x28, ror #27 bic x20, x4, x28, ror #2 xar v1.2d, v8.2d, v28.2d, #0x9 eor x10, x20, x1, ror #50 bic x20, x11, x27, ror #60 eor v31.16b, v16.16b, v25.16b eor x20, x20, x4, ror #21 bic x4, x28, x1, ror #48 shl v8.2d, v31.2d, #0x2d bic x1, x1, x11, ror #57 ldr x28, [x9, x25, lsl #3] sri v8.2d, v31.2d, #0x13 ldr x9, [sp, #0xd0] add x25, x25, #0x1 xar v16.2d, v7.2d, v29.2d, #0x3a str x25, [sp, #0x18] cmp x25, #0x17 eor v31.16b, v10.16b, v26.16b eor x25, x1, x27, ror #53 bic x27, x30, x26, ror #47 shl v7.2d, v31.2d, #0x3 eor x1, x5, x28 eor x5, x4, x11, ror #41 sri v7.2d, v31.2d, #0x3d eor x11, x19, x13, ror #35 bic x13, x26, x24, ror #10 xar v10.2d, v3.2d, v28.2d, #0x24 eor x28, x27, x24, ror #57 bic x27, x24, x9, ror #47 eor v31.16b, v18.16b, v28.16b bic x19, x23, x3, ror #9 bic x4, x29, x14, ror #41 shl v3.2d, v31.2d, #0x15 eor x24, x19, x29, ror #44 bic x29, x3, x29, ror #35 sri v3.2d, v31.2d, #0x2b eor x13, x13, x9, ror #57 eor x19, x29, x14, ror #12 xar v18.2d, v17.2d, v29.2d, #0x31 bic x29, x9, x0, ror #19 bic x14, x14, x8, ror #5 eor v31.16b, v11.16b, v25.16b eor x9, x14, x23, ror #43 eor x14, x4, x8, ror #46 shl v17.2d, v31.2d, #0xa bic x23, x8, x23, ror #38 eor x8, x27, x0, ror #2 sri v17.2d, v31.2d, #0x36 eor x4, x23, x3, ror #47 bic x3, x0, x30, ror #5 xar v11.2d, v9.2d, v27.2d, #0x2c eor x23, x3, x26, ror #52 eor x3, x29, x30, ror #24 eor v31.16b, v22.16b, v29.16b eor x0, x15, x11, ror #52 shl v9.2d, v31.2d, #0x3d eor x0, x0, x13, ror #48 eor x26, x8, x9, ror #57 sri v9.2d, v31.2d, #0x3 eor x27, x0, x14, ror #10 eor x29, x16, x28, ror #63 xar v22.2d, v14.2d, v27.2d, #0x19 eor x26, x26, x6, ror #51 eor x30, x23, x22, ror #50 eor v31.16b, v20.16b, v26.16b eor x0, x26, x10, ror #31 eor x29, x29, x19, ror #37 shl v14.2d, v31.2d, #0x12 eor x27, x27, x12, ror #5 eor x30, x30, x24, ror #34 sri v14.2d, v31.2d, #0x2e eor x0, x0, x7, ror #27 eor x26, x30, x21, ror #26 xar v20.2d, v4.2d, v27.2d, #0x25 eor x26, x26, x25, ror #15 ror x30, x27, #0x3e eor v31.16b, v24.16b, v27.16b eor x30, x30, x26, ror #57 ror x26, x26, #0x3a shl v4.2d, v31.2d, #0xe eor x16, x30, x16 eor x28, x30, x28, ror #63 sri v4.2d, v31.2d, #0x32 str x28, [sp, #0xd0] eor x29, x29, x17, ror #36 xar v24.2d, v21.2d, v25.2d, #0x3e eor x28, x1, x2, ror #61 eor x19, x30, x19, ror #37 eor v31.16b, v5.16b, v26.16b eor x29, x29, x20, ror #2 eor x28, x28, x4, ror #54 shl v21.2d, v31.2d, #0x24 eor x26, x26, x0, ror #55 eor x28, x28, x3, ror #39 sri v21.2d, v31.2d, #0x1c eor x28, x28, x5, ror #25 ror x0, x0, #0x38 xar v27.2d, v6.2d, v25.2d, #0x14 eor x0, x0, x29, ror #63 eor x27, x28, x27, ror #61 bic v31.16b, v7.16b, v11.16b eor x13, x0, x13, ror #46 eor x28, x29, x28, ror #63 eor v5.16b, v31.16b, v10.16b eor x29, x30, x20, ror #2 eor x20, x26, x3, ror #39 bcax v6.16b, v11.16b, v8.16b, v7.16b eor x11, x0, x11, ror #50 eor x25, x28, x25, ror #9 bic v31.16b, v9.16b, v8.16b eor x3, x28, x21, ror #20 eor v7.16b, v31.16b, v7.16b eor x21, x26, x1 eor x9, x27, x9, ror #49 bcax v8.16b, v8.16b, v10.16b, v9.16b eor x24, x28, x24, ror #28 eor x1, x30, x17, ror #36 bic v31.16b, v11.16b, v10.16b eor x14, x0, x14, ror #8 eor x22, x28, x22, ror #44 eor v9.16b, v31.16b, v9.16b eor x8, x27, x8, ror #56 eor x17, x27, x7, ror #19 bcax v10.16b, v15.16b, v12.16b, v16.16b eor x15, x0, x15, ror #62 bic x7, x20, x22, ror #47 bic v31.16b, v13.16b, v12.16b eor x4, x26, x4, ror #54 eor x0, x0, x12, ror #3 eor v11.16b, v31.16b, v16.16b eor x28, x28, x23, ror #58 eor x23, x26, x2, ror #61 bcax v12.16b, v12.16b, v14.16b, v13.16b eor x26, x26, x5, ror #25 eor x2, x7, x16, ror #39 bic v31.16b, v15.16b, v14.16b bic x7, x9, x20, ror #42 bic x30, x15, x9, ror #16 eor v13.16b, v31.16b, v13.16b eor x7, x7, x22, ror #25 eor x12, x30, x20, ror #58 bic v31.16b, v16.16b, v15.16b bic x20, x22, x16, ror #56 eor x30, x27, x6, ror #43 eor v14.16b, v31.16b, v14.16b eor x22, x20, x15, ror #23 bic x6, x19, x13, ror #42 bcax v15.16b, v20.16b, v17.16b, v21.16b eor x6, x6, x17, ror #41 bic x5, x13, x17, ror #63 bic v31.16b, v18.16b, v17.16b eor x5, x21, x5, ror #21 bic x17, x17, x21, ror #44 eor v16.16b, v31.16b, v21.16b eor x27, x27, x10, ror #23 bic x21, x21, x25, ror #50 bcax v17.16b, v17.16b, v19.16b, v18.16b bic x20, x27, x4, ror #25 bic x10, x16, x15, ror #31 bic v31.16b, v20.16b, v19.16b eor x16, x21, x19, ror #43 eor x21, x17, x25, ror #30 eor v18.16b, v31.16b, v18.16b bic x19, x25, x19, ror #57 ldr x25, [sp, #0x18] bcax v19.16b, v19.16b, v21.16b, v20.16b eor x17, x10, x9, ror #47 bic v31.16b, v22.16b, v1.16b ldr x9, [sp, #0x8] eor x15, x20, x28, ror #27 eor v20.16b, v31.16b, v0.16b bic x20, x4, x28, ror #2 eor x10, x20, x1, ror #50 bcax v21.16b, v1.16b, v23.16b, v22.16b bic x20, x11, x27, ror #60 eor x20, x20, x4, ror #21 bic v31.16b, v24.16b, v23.16b bic x4, x28, x1, ror #48 bic x1, x1, x11, ror #57 eor v22.16b, v31.16b, v22.16b ldr x28, [x9, x25, lsl #3] ldr x9, [sp, #0xd0] bcax v23.16b, v23.16b, v0.16b, v24.16b add x25, x25, #0x1 str x25, [sp, #0x18] bic v31.16b, v1.16b, v0.16b cmp x25, #0x17 eor x25, x1, x27, ror #53 eor v24.16b, v31.16b, v24.16b bic x27, x30, x26, ror #47 eor x1, x5, x28 bcax v0.16b, v30.16b, v2.16b, v27.16b eor x5, x4, x11, ror #41 eor x11, x19, x13, ror #35 bic v31.16b, v3.16b, v2.16b bic x13, x26, x24, ror #10 eor x28, x27, x24, ror #57 eor v1.16b, v31.16b, v27.16b bic x27, x24, x9, ror #47 bic x19, x23, x3, ror #9 bcax v2.16b, v2.16b, v4.16b, v3.16b bic x4, x29, x14, ror #41 eor x24, x19, x29, ror #44 bic v31.16b, v30.16b, v4.16b bic x29, x3, x29, ror #35 eor x13, x13, x9, ror #57 eor v3.16b, v31.16b, v3.16b eor x19, x29, x14, ror #12 bic x29, x9, x0, ror #19 bcax v4.16b, v4.16b, v27.16b, v30.16b bic x14, x14, x8, ror #5 eor x9, x14, x23, ror #43 eor x14, x4, x8, ror #46 bic x23, x8, x23, ror #38 eor x8, x27, x0, ror #2 eor x4, x23, x3, ror #47 bic x3, x0, x30, ror #5 eor x23, x3, x26, ror #52 eor x3, x29, x30, ror #24 ldr x30, [sp, #0x10] ld1r { v28.2d }, [x30], #8 str x30, [sp, #0x10] eor v0.16b, v0.16b, v28.16b b.le Lsha3_keccak4_f1600_alt2_loop ror x2, x2, #0x3d ror x3, x3, #0x27 ror x4, x4, #0x36 ror x5, x5, #0x19 ror x6, x6, #0x2b ror x7, x7, #0x13 ror x8, x8, #0x38 ror x9, x9, #0x31 ror x10, x10, #0x17 ror x11, x11, #0x32 ror x12, x12, #0x3 ror x13, x13, #0x2e ror x14, x14, #0x8 ror x15, x15, #0x3e ror x17, x17, #0x24 ror x28, x28, #0x3f ror x19, x19, #0x25 ror x20, x20, #0x2 ror x21, x21, #0x14 ror x22, x22, #0x2c ror x23, x23, #0x3a ror x24, x24, #0x1c ror x25, x25, #0x9 ldr x30, [sp, #0x20] cmp x30, #0x1 b.eq Lsha3_keccak4_f1600_alt2_done mov x30, #0x1 str x30, [sp, #0x20] ldr x0, [sp] add x0, x0, #0x190 stp x1, x6, [x0] stp x11, x16, [x0, #0x10] stp x21, x2, [x0, #0x20] stp x7, x12, [x0, #0x30] stp x17, x22, [x0, #0x40] stp x3, x8, [x0, #0x50] stp x13, x28, [x0, #0x60] stp x23, x4, [x0, #0x70] stp x9, x14, [x0, #0x80] stp x19, x24, [x0, #0x90] stp x5, x10, [x0, #0xa0] stp x15, x20, [x0, #0xb0] str x25, [x0, #0xc0] sub x0, x0, #0x190 add x0, x0, #0x258 ldp x1, x6, [x0] ldp x11, x16, [x0, #0x10] ldp x21, x2, [x0, #0x20] ldp x7, x12, [x0, #0x30] ldp x17, x22, [x0, #0x40] ldp x3, x8, [x0, #0x50] ldp x13, x28, [x0, #0x60] ldp x23, x4, [x0, #0x70] ldp x9, x14, [x0, #0x80] ldp x19, x24, [x0, #0x90] ldp x5, x10, [x0, #0xa0] ldp x15, x20, [x0, #0xb0] ldr x25, [x0, #0xc0] sub x0, x0, #0x258 b Lsha3_keccak4_f1600_alt2_initial Lsha3_keccak4_f1600_alt2_done: ldr x0, [sp] add x0, x0, #0x258 stp x1, x6, [x0] stp x11, x16, [x0, #0x10] stp x21, x2, [x0, #0x20] stp x7, x12, [x0, #0x30] stp x17, x22, [x0, #0x40] stp x3, x8, [x0, #0x50] stp x13, x28, [x0, #0x60] stp x23, x4, [x0, #0x70] stp x9, x14, [x0, #0x80] stp x19, x24, [x0, #0x90] stp x5, x10, [x0, #0xa0] stp x15, x20, [x0, #0xb0] str x25, [x0, #0xc0] sub x0, x0, #0x258 add x2, x0, #0xc8 trn1 v25.2d, v0.2d, v1.2d trn1 v26.2d, v2.2d, v3.2d stp q25, q26, [x0] trn2 v25.2d, v0.2d, v1.2d trn2 v26.2d, v2.2d, v3.2d stp q25, q26, [x2] trn1 v25.2d, v4.2d, v5.2d trn1 v26.2d, v6.2d, v7.2d stp q25, q26, [x0, #0x20] trn2 v25.2d, v4.2d, v5.2d trn2 v26.2d, v6.2d, v7.2d stp q25, q26, [x2, #0x20] trn1 v25.2d, v8.2d, v9.2d trn1 v26.2d, v10.2d, v11.2d stp q25, q26, [x0, #0x40] trn2 v25.2d, v8.2d, v9.2d trn2 v26.2d, v10.2d, v11.2d stp q25, q26, [x2, #0x40] trn1 v25.2d, v12.2d, v13.2d trn1 v26.2d, v14.2d, v15.2d stp q25, q26, [x0, #0x60] trn2 v25.2d, v12.2d, v13.2d trn2 v26.2d, v14.2d, v15.2d stp q25, q26, [x2, #0x60] trn1 v25.2d, v16.2d, v17.2d trn1 v26.2d, v18.2d, v19.2d stp q25, q26, [x0, #0x80] trn2 v25.2d, v16.2d, v17.2d trn2 v26.2d, v18.2d, v19.2d stp q25, q26, [x2, #0x80] trn1 v25.2d, v20.2d, v21.2d trn1 v26.2d, v22.2d, v23.2d stp q25, q26, [x0, #0xa0] trn2 v25.2d, v20.2d, v21.2d trn2 v26.2d, v22.2d, v23.2d stp q25, q26, [x2, #0xa0] str d24, [x0, #0xc0] trn2 v24.2d, v24.2d, v24.2d str d24, [x2, #0xc0] CFI_STACKLOAD2(d14,d15,192) CFI_STACKLOAD2(d12,d13,176) CFI_STACKLOAD2(d10,d11,160) CFI_STACKLOAD2(d8,d9,144) CFI_STACKLOAD2(x19,x20,48) CFI_STACKLOAD2(x21,x22,64) CFI_STACKLOAD2(x23,x24,80) CFI_STACKLOAD2(x25,x26,96) CFI_STACKLOAD2(x27,x28,112) CFI_STACKLOAD2(x29,x30,128) CFI_INC_SP(224) CFI_RET S2N_BN_SIZE_DIRECTIVE(sha3_keccak4_f1600_alt2) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
4,078
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_cmul_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply by a single word modulo p_521, z := (c * x) mod p_521, assuming // x reduced // Inputs c, x[9]; output z[9] // // extern void bignum_cmul_p521(uint64_t z[static 9], uint64_t c, // const uint64_t x[static 9]); // // Standard ARM ABI: X0 = z, X1 = c, X2 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p521) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p521_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_cmul_p521_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p521_alt) .text .balign 4 #define z x0 #define c x1 #define x x2 #define d0 x3 #define d1 x4 #define d2 x5 #define d3 x6 #define d4 x7 #define d5 x8 #define d6 x9 #define d7 x10 #define d8 x11 #define d9 x12 // Heavily aliased subject to ordering #define a0 d3 #define a1 d4 #define a2 d5 #define a3 d6 #define a4 d7 #define a5 d8 #define a6 d9 #define h d9 // Other variables #define a7 x13 #define a8 x14 #define dd x15 S2N_BN_SYMBOL(bignum_cmul_p521): S2N_BN_SYMBOL(bignum_cmul_p521_alt): CFI_START // First do the multiply, getting [d9; ...; d0], and as this is done // accumulate an AND "dd" of digits d7,...,d1 for later use ldp a0, a1, [x] mul d0, c, a0 mul d1, c, a1 umulh a0, c, a0 adds d1, d1, a0 umulh a1, c, a1 ldp a2, a3, [x, #16] mul d2, c, a2 mul d3, c, a3 umulh a2, c, a2 adcs d2, d2, a1 and dd, d1, d2 umulh a3, c, a3 adcs d3, d3, a2 and dd, dd, d3 ldp a4, a5, [x, #32] mul d4, c, a4 mul d5, c, a5 umulh a4, c, a4 adcs d4, d4, a3 and dd, dd, d4 umulh a5, c, a5 adcs d5, d5, a4 and dd, dd, d5 ldp a6, a7, [x, #48] mul d6, c, a6 mul d7, c, a7 umulh a6, c, a6 adcs d6, d6, a5 and dd, dd, d6 umulh a7, c, a7 adcs d7, d7, a6 and dd, dd, d7 ldr a8, [x, #64] mul d8, c, a8 adcs d8, d8, a7 umulh a8, c, a8 adc d9, xzr, a8 // Extract the high part h and mask off the low part l = [d8;d7;...;d0] // but stuff d8 with 1 bits at the left to ease a comparison below extr h, d9, d8, #9 orr d8, d8, #~0x1FF // Decide whether h + l >= p_521 <=> h + l + 1 >= 2^521. Since this can only // happen if digits d7,...d1 are all 1s, we use the AND of them "dd" to // condense the carry chain, and since we stuffed 1 bits into d8 we get // the result in CF without an additional comparison. subs xzr, xzr, xzr adcs xzr, d0, h adcs xzr, dd, xzr adcs xzr, d8, xzr // Now if CF is set we want (h + l) - p_521 = (h + l + 1) - 2^521 // while otherwise we want just h + l. So mask h + l + CF to 521 bits. // This masking also gets rid of the stuffing with 1s we did above. adcs d0, d0, h adcs d1, d1, xzr adcs d2, d2, xzr adcs d3, d3, xzr adcs d4, d4, xzr adcs d5, d5, xzr adcs d6, d6, xzr adcs d7, d7, xzr adc d8, d8, xzr and d8, d8, #0x1FF // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] stp d6, d7, [z, #48] str d8, [z, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_cmul_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
40,599
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/p521_jmixadd_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point mixed addition on NIST curve P-521 in Jacobian coordinates // // extern void p521_jmixadd_alt(uint64_t p3[static 27], // const uint64_t p1[static 27], // const uint64_t p2[static 18]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples. // A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3). // The "mixed" part means that p2 only has x and y coordinates, with the // implicit z coordinate assumed to be the identity. It is assumed that // all the coordinates of the input points p1 and p2 are fully reduced // mod p_521, that the z coordinate of p1 is nonzero and that neither // p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents the same affine // point as". // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jmixadd_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p521_jmixadd_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jmixadd_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 72 // Stable homes for input arguments during main code sequence #define input_z x26 #define input_x x27 #define input_y x28 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define zp2 sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define NSPACE NUMSIZE*6 // Corresponds exactly to bignum_mul_p521_alt #define mul_p521(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P2] __LF \ mul x15, x3, x5 __LF \ umulh x16, x3, x5 __LF \ mul x14, x3, x6 __LF \ umulh x17, x3, x6 __LF \ adds x16, x16, x14 __LF \ ldp x7, x8, [P2+16] __LF \ mul x14, x3, x7 __LF \ umulh x19, x3, x7 __LF \ adcs x17, x17, x14 __LF \ mul x14, x3, x8 __LF \ umulh x20, x3, x8 __LF \ adcs x19, x19, x14 __LF \ ldp x9, x10, [P2+32] __LF \ mul x14, x3, x9 __LF \ umulh x21, x3, x9 __LF \ adcs x20, x20, x14 __LF \ mul x14, x3, x10 __LF \ umulh x22, x3, x10 __LF \ adcs x21, x21, x14 __LF \ ldp x11, x12, [P2+48] __LF \ mul x14, x3, x11 __LF \ umulh x23, x3, x11 __LF \ adcs x22, x22, x14 __LF \ ldr x13, [P2+64] __LF \ mul x14, x3, x12 __LF \ umulh x24, x3, x12 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x13 __LF \ umulh x1, x3, x13 __LF \ adcs x24, x24, x14 __LF \ adc x1, x1, xzr __LF \ mul x14, x4, x5 __LF \ adds x16, x16, x14 __LF \ mul x14, x4, x6 __LF \ adcs x17, x17, x14 __LF \ mul x14, x4, x7 __LF \ adcs x19, x19, x14 __LF \ mul x14, x4, x8 __LF \ adcs x20, x20, x14 __LF \ mul x14, x4, x9 __LF \ adcs x21, x21, x14 __LF \ mul x14, x4, x10 __LF \ adcs x22, x22, x14 __LF \ mul x14, x4, x11 __LF \ adcs x23, x23, x14 __LF \ mul x14, x4, x12 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x13 __LF \ adcs x1, x1, x14 __LF \ cset x0, hs __LF \ umulh x14, x4, x5 __LF \ adds x17, x17, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x21, x21, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x13 __LF \ adc x0, x0, x14 __LF \ stp x15, x16, [P0] __LF \ ldp x3, x4, [P1+16] __LF \ mul x14, x3, x5 __LF \ adds x17, x17, x14 __LF \ mul x14, x3, x6 __LF \ adcs x19, x19, x14 __LF \ mul x14, x3, x7 __LF \ adcs x20, x20, x14 __LF \ mul x14, x3, x8 __LF \ adcs x21, x21, x14 __LF \ mul x14, x3, x9 __LF \ adcs x22, x22, x14 __LF \ mul x14, x3, x10 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x11 __LF \ adcs x24, x24, x14 __LF \ mul x14, x3, x12 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x13 __LF \ adcs x0, x0, x14 __LF \ cset x15, hs __LF \ umulh x14, x3, x5 __LF \ adds x19, x19, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x21, x21, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x3, x12 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x13 __LF \ adc x15, x15, x14 __LF \ mul x14, x4, x5 __LF \ adds x19, x19, x14 __LF \ mul x14, x4, x6 __LF \ adcs x20, x20, x14 __LF \ mul x14, x4, x7 __LF \ adcs x21, x21, x14 __LF \ mul x14, x4, x8 __LF \ adcs x22, x22, x14 __LF \ mul x14, x4, x9 __LF \ adcs x23, x23, x14 __LF \ mul x14, x4, x10 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x11 __LF \ adcs x1, x1, x14 __LF \ mul x14, x4, x12 __LF \ adcs x0, x0, x14 __LF \ mul x14, x4, x13 __LF \ adcs x15, x15, x14 __LF \ cset x16, hs __LF \ umulh x14, x4, x5 __LF \ adds x20, x20, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x21, x21, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x4, x13 __LF \ adc x16, x16, x14 __LF \ stp x17, x19, [P0+16] __LF \ ldp x3, x4, [P1+32] __LF \ mul x14, x3, x5 __LF \ adds x20, x20, x14 __LF \ mul x14, x3, x6 __LF \ adcs x21, x21, x14 __LF \ mul x14, x3, x7 __LF \ adcs x22, x22, x14 __LF \ mul x14, x3, x8 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x9 __LF \ adcs x24, x24, x14 __LF \ mul x14, x3, x10 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x11 __LF \ adcs x0, x0, x14 __LF \ mul x14, x3, x12 __LF \ adcs x15, x15, x14 __LF \ mul x14, x3, x13 __LF \ adcs x16, x16, x14 __LF \ cset x17, hs __LF \ umulh x14, x3, x5 __LF \ adds x21, x21, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x3, x12 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x3, x13 __LF \ adc x17, x17, x14 __LF \ mul x14, x4, x5 __LF \ adds x21, x21, x14 __LF \ mul x14, x4, x6 __LF \ adcs x22, x22, x14 __LF \ mul x14, x4, x7 __LF \ adcs x23, x23, x14 __LF \ mul x14, x4, x8 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x9 __LF \ adcs x1, x1, x14 __LF \ mul x14, x4, x10 __LF \ adcs x0, x0, x14 __LF \ mul x14, x4, x11 __LF \ adcs x15, x15, x14 __LF \ mul x14, x4, x12 __LF \ adcs x16, x16, x14 __LF \ mul x14, x4, x13 __LF \ adcs x17, x17, x14 __LF \ cset x19, hs __LF \ umulh x14, x4, x5 __LF \ adds x22, x22, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x4, x13 __LF \ adc x19, x19, x14 __LF \ stp x20, x21, [P0+32] __LF \ ldp x3, x4, [P1+48] __LF \ mul x14, x3, x5 __LF \ adds x22, x22, x14 __LF \ mul x14, x3, x6 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x7 __LF \ adcs x24, x24, x14 __LF \ mul x14, x3, x8 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x9 __LF \ adcs x0, x0, x14 __LF \ mul x14, x3, x10 __LF \ adcs x15, x15, x14 __LF \ mul x14, x3, x11 __LF \ adcs x16, x16, x14 __LF \ mul x14, x3, x12 __LF \ adcs x17, x17, x14 __LF \ mul x14, x3, x13 __LF \ adcs x19, x19, x14 __LF \ cset x20, hs __LF \ umulh x14, x3, x5 __LF \ adds x23, x23, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x3, x12 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x3, x13 __LF \ adc x20, x20, x14 __LF \ mul x14, x4, x5 __LF \ adds x23, x23, x14 __LF \ mul x14, x4, x6 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x7 __LF \ adcs x1, x1, x14 __LF \ mul x14, x4, x8 __LF \ adcs x0, x0, x14 __LF \ mul x14, x4, x9 __LF \ adcs x15, x15, x14 __LF \ mul x14, x4, x10 __LF \ adcs x16, x16, x14 __LF \ mul x14, x4, x11 __LF \ adcs x17, x17, x14 __LF \ mul x14, x4, x12 __LF \ adcs x19, x19, x14 __LF \ mul x14, x4, x13 __LF \ adcs x20, x20, x14 __LF \ cset x21, hs __LF \ umulh x14, x4, x5 __LF \ adds x24, x24, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x4, x13 __LF \ adc x21, x21, x14 __LF \ stp x22, x23, [P0+48] __LF \ ldr x3, [P1+64] __LF \ mul x14, x3, x5 __LF \ adds x24, x24, x14 __LF \ mul x14, x3, x6 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x7 __LF \ adcs x0, x0, x14 __LF \ mul x14, x3, x8 __LF \ adcs x15, x15, x14 __LF \ mul x14, x3, x9 __LF \ adcs x16, x16, x14 __LF \ mul x14, x3, x10 __LF \ adcs x17, x17, x14 __LF \ mul x14, x3, x11 __LF \ adcs x19, x19, x14 __LF \ mul x14, x3, x12 __LF \ adcs x20, x20, x14 __LF \ mul x14, x3, x13 __LF \ adc x21, x21, x14 __LF \ umulh x14, x3, x5 __LF \ adds x1, x1, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x3, x12 __LF \ adc x21, x21, x14 __LF \ cmp xzr, xzr __LF \ ldp x5, x6, [P0] __LF \ extr x14, x1, x24, #9 __LF \ adcs x5, x5, x14 __LF \ extr x14, x0, x1, #9 __LF \ adcs x6, x6, x14 __LF \ ldp x7, x8, [P0+16] __LF \ extr x14, x15, x0, #9 __LF \ adcs x7, x7, x14 __LF \ extr x14, x16, x15, #9 __LF \ adcs x8, x8, x14 __LF \ ldp x9, x10, [P0+32] __LF \ extr x14, x17, x16, #9 __LF \ adcs x9, x9, x14 __LF \ extr x14, x19, x17, #9 __LF \ adcs x10, x10, x14 __LF \ ldp x11, x12, [P0+48] __LF \ extr x14, x20, x19, #9 __LF \ adcs x11, x11, x14 __LF \ extr x14, x21, x20, #9 __LF \ adcs x12, x12, x14 __LF \ orr x13, x24, #0xfffffffffffffe00 __LF \ lsr x14, x21, #9 __LF \ adcs x13, x13, x14 __LF \ sbcs x5, x5, xzr __LF \ sbcs x6, x6, xzr __LF \ sbcs x7, x7, xzr __LF \ sbcs x8, x8, xzr __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbc x13, x13, xzr __LF \ and x13, x13, #0x1ff __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] __LF \ stp x11, x12, [P0+48] __LF \ str x13, [P0+64] // Corresponds exactly to bignum_sqr_p521_alt #define sqr_p521(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x11, x2, x3 __LF \ umulh x12, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x10, x2, x4 __LF \ umulh x13, x2, x4 __LF \ adds x12, x12, x10 __LF \ ldp x6, x7, [P1+32] __LF \ mul x10, x2, x5 __LF \ umulh x14, x2, x5 __LF \ adcs x13, x13, x10 __LF \ ldp x8, x9, [P1+48] __LF \ mul x10, x2, x6 __LF \ umulh x15, x2, x6 __LF \ adcs x14, x14, x10 __LF \ mul x10, x2, x7 __LF \ umulh x16, x2, x7 __LF \ adcs x15, x15, x10 __LF \ mul x10, x2, x8 __LF \ umulh x17, x2, x8 __LF \ adcs x16, x16, x10 __LF \ mul x10, x2, x9 __LF \ umulh x19, x2, x9 __LF \ adcs x17, x17, x10 __LF \ adc x19, x19, xzr __LF \ mul x10, x3, x4 __LF \ adds x13, x13, x10 __LF \ mul x10, x3, x5 __LF \ adcs x14, x14, x10 __LF \ mul x10, x3, x6 __LF \ adcs x15, x15, x10 __LF \ mul x10, x3, x7 __LF \ adcs x16, x16, x10 __LF \ mul x10, x3, x8 __LF \ adcs x17, x17, x10 __LF \ mul x10, x3, x9 __LF \ adcs x19, x19, x10 __LF \ cset x20, hs __LF \ umulh x10, x3, x4 __LF \ adds x14, x14, x10 __LF \ umulh x10, x3, x5 __LF \ adcs x15, x15, x10 __LF \ umulh x10, x3, x6 __LF \ adcs x16, x16, x10 __LF \ umulh x10, x3, x7 __LF \ adcs x17, x17, x10 __LF \ umulh x10, x3, x8 __LF \ adcs x19, x19, x10 __LF \ umulh x10, x3, x9 __LF \ adc x20, x20, x10 __LF \ mul x10, x6, x7 __LF \ umulh x21, x6, x7 __LF \ adds x20, x20, x10 __LF \ adc x21, x21, xzr __LF \ mul x10, x4, x5 __LF \ adds x15, x15, x10 __LF \ mul x10, x4, x6 __LF \ adcs x16, x16, x10 __LF \ mul x10, x4, x7 __LF \ adcs x17, x17, x10 __LF \ mul x10, x4, x8 __LF \ adcs x19, x19, x10 __LF \ mul x10, x4, x9 __LF \ adcs x20, x20, x10 __LF \ mul x10, x6, x8 __LF \ adcs x21, x21, x10 __LF \ cset x22, hs __LF \ umulh x10, x4, x5 __LF \ adds x16, x16, x10 __LF \ umulh x10, x4, x6 __LF \ adcs x17, x17, x10 __LF \ umulh x10, x4, x7 __LF \ adcs x19, x19, x10 __LF \ umulh x10, x4, x8 __LF \ adcs x20, x20, x10 __LF \ umulh x10, x4, x9 __LF \ adcs x21, x21, x10 __LF \ umulh x10, x6, x8 __LF \ adc x22, x22, x10 __LF \ mul x10, x7, x8 __LF \ umulh x23, x7, x8 __LF \ adds x22, x22, x10 __LF \ adc x23, x23, xzr __LF \ mul x10, x5, x6 __LF \ adds x17, x17, x10 __LF \ mul x10, x5, x7 __LF \ adcs x19, x19, x10 __LF \ mul x10, x5, x8 __LF \ adcs x20, x20, x10 __LF \ mul x10, x5, x9 __LF \ adcs x21, x21, x10 __LF \ mul x10, x6, x9 __LF \ adcs x22, x22, x10 __LF \ mul x10, x7, x9 __LF \ adcs x23, x23, x10 __LF \ cset x24, hs __LF \ umulh x10, x5, x6 __LF \ adds x19, x19, x10 __LF \ umulh x10, x5, x7 __LF \ adcs x20, x20, x10 __LF \ umulh x10, x5, x8 __LF \ adcs x21, x21, x10 __LF \ umulh x10, x5, x9 __LF \ adcs x22, x22, x10 __LF \ umulh x10, x6, x9 __LF \ adcs x23, x23, x10 __LF \ umulh x10, x7, x9 __LF \ adc x24, x24, x10 __LF \ mul x10, x8, x9 __LF \ umulh x25, x8, x9 __LF \ adds x24, x24, x10 __LF \ adc x25, x25, xzr __LF \ adds x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ adcs x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adcs x17, x17, x17 __LF \ adcs x19, x19, x19 __LF \ adcs x20, x20, x20 __LF \ adcs x21, x21, x21 __LF \ adcs x22, x22, x22 __LF \ adcs x23, x23, x23 __LF \ adcs x24, x24, x24 __LF \ adcs x25, x25, x25 __LF \ cset x0, hs __LF \ umulh x10, x2, x2 __LF \ adds x11, x11, x10 __LF \ mul x10, x3, x3 __LF \ adcs x12, x12, x10 __LF \ umulh x10, x3, x3 __LF \ adcs x13, x13, x10 __LF \ mul x10, x4, x4 __LF \ adcs x14, x14, x10 __LF \ umulh x10, x4, x4 __LF \ adcs x15, x15, x10 __LF \ mul x10, x5, x5 __LF \ adcs x16, x16, x10 __LF \ umulh x10, x5, x5 __LF \ adcs x17, x17, x10 __LF \ mul x10, x6, x6 __LF \ adcs x19, x19, x10 __LF \ umulh x10, x6, x6 __LF \ adcs x20, x20, x10 __LF \ mul x10, x7, x7 __LF \ adcs x21, x21, x10 __LF \ umulh x10, x7, x7 __LF \ adcs x22, x22, x10 __LF \ mul x10, x8, x8 __LF \ adcs x23, x23, x10 __LF \ umulh x10, x8, x8 __LF \ adcs x24, x24, x10 __LF \ mul x10, x9, x9 __LF \ adcs x25, x25, x10 __LF \ umulh x10, x9, x9 __LF \ adc x0, x0, x10 __LF \ ldr x1, [P1+64] __LF \ add x1, x1, x1 __LF \ mul x10, x1, x2 __LF \ adds x19, x19, x10 __LF \ umulh x10, x1, x2 __LF \ adcs x20, x20, x10 __LF \ mul x10, x1, x4 __LF \ adcs x21, x21, x10 __LF \ umulh x10, x1, x4 __LF \ adcs x22, x22, x10 __LF \ mul x10, x1, x6 __LF \ adcs x23, x23, x10 __LF \ umulh x10, x1, x6 __LF \ adcs x24, x24, x10 __LF \ mul x10, x1, x8 __LF \ adcs x25, x25, x10 __LF \ umulh x10, x1, x8 __LF \ adcs x0, x0, x10 __LF \ lsr x4, x1, #1 __LF \ mul x4, x4, x4 __LF \ adc x4, x4, xzr __LF \ mul x10, x1, x3 __LF \ adds x20, x20, x10 __LF \ umulh x10, x1, x3 __LF \ adcs x21, x21, x10 __LF \ mul x10, x1, x5 __LF \ adcs x22, x22, x10 __LF \ umulh x10, x1, x5 __LF \ adcs x23, x23, x10 __LF \ mul x10, x1, x7 __LF \ adcs x24, x24, x10 __LF \ umulh x10, x1, x7 __LF \ adcs x25, x25, x10 __LF \ mul x10, x1, x9 __LF \ adcs x0, x0, x10 __LF \ umulh x10, x1, x9 __LF \ adc x4, x4, x10 __LF \ mul x2, x2, x2 __LF \ cmp xzr, xzr __LF \ extr x10, x20, x19, #9 __LF \ adcs x2, x2, x10 __LF \ extr x10, x21, x20, #9 __LF \ adcs x11, x11, x10 __LF \ extr x10, x22, x21, #9 __LF \ adcs x12, x12, x10 __LF \ extr x10, x23, x22, #9 __LF \ adcs x13, x13, x10 __LF \ extr x10, x24, x23, #9 __LF \ adcs x14, x14, x10 __LF \ extr x10, x25, x24, #9 __LF \ adcs x15, x15, x10 __LF \ extr x10, x0, x25, #9 __LF \ adcs x16, x16, x10 __LF \ extr x10, x4, x0, #9 __LF \ adcs x17, x17, x10 __LF \ orr x19, x19, #0xfffffffffffffe00 __LF \ lsr x10, x4, #9 __LF \ adcs x19, x19, x10 __LF \ sbcs x2, x2, xzr __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ sbcs x14, x14, xzr __LF \ sbcs x15, x15, xzr __LF \ sbcs x16, x16, xzr __LF \ sbcs x17, x17, xzr __LF \ sbc x19, x19, xzr __LF \ and x19, x19, #0x1ff __LF \ stp x2, x11, [P0] __LF \ stp x12, x13, [P0+16] __LF \ stp x14, x15, [P0+32] __LF \ stp x16, x17, [P0+48] __LF \ str x19, [P0+64] // Corresponds exactly to bignum_sub_p521 #define sub_p521(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ ldp x9, x10, [P1+32] __LF \ ldp x4, x3, [P2+32] __LF \ sbcs x9, x9, x4 __LF \ sbcs x10, x10, x3 __LF \ ldp x11, x12, [P1+48] __LF \ ldp x4, x3, [P2+48] __LF \ sbcs x11, x11, x4 __LF \ sbcs x12, x12, x3 __LF \ ldr x13, [P1+64] __LF \ ldr x4, [P2+64] __LF \ sbcs x13, x13, x4 __LF \ sbcs x5, x5, xzr __LF \ sbcs x6, x6, xzr __LF \ sbcs x7, x7, xzr __LF \ sbcs x8, x8, xzr __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ and x13, x13, #0x1ff __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] __LF \ stp x11, x12, [P0+48] __LF \ str x13, [P0+64] S2N_BN_SYMBOL(p521_jmixadd_alt): CFI_START // Save regs and make room on stack for temporary variables CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_PUSH2(x27,x28) CFI_DEC_SP(NSPACE) // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations sqr_p521(zp2,z_1) mul_p521(y2a,z_1,y_2) mul_p521(x2a,zp2,x_2) mul_p521(y2a,zp2,y2a) sub_p521(xd,x2a,x_1) sub_p521(yd,y2a,y_1) sqr_p521(zz,xd) sqr_p521(ww,yd) mul_p521(zzx1,zz,x_1) mul_p521(zzx2,zz,x2a) sub_p521(resx,ww,zzx1) sub_p521(t1,zzx2,zzx1) mul_p521(resz,xd,z_1) sub_p521(resx,resx,zzx2) sub_p521(t2,zzx1,resx) mul_p521(t1,t1,y_1) mul_p521(t2,yd,t2) sub_p521(resy,t2,t1) // Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence) ldp x0, x1, [z_1] orr x0, x0, x1 ldp x2, x3, [z_1+16] orr x2, x2, x3 ldp x4, x5, [z_1+32] orr x4, x4, x5 ldp x6, x7, [z_1+48] orr x6, x6, x7 ldr x8, [z_1+64] orr x0, x0, x2 orr x4, x4, x6 orr x0, x0, x4 orr x0, x0, x8 cmp x0, xzr // Multiplex: if p1 <> 0 just copy the computed result from the staging area. // If p1 = 0 then return the point p2 augmented with an extra z = 1 // coordinate, hence giving 0 + p2 = p2 for the final result. ldp x0, x1, [resx] ldp x20, x21, [x_2] csel x0, x0, x20, ne csel x1, x1, x21, ne ldp x2, x3, [resx+16] ldp x20, x21, [x_2+16] csel x2, x2, x20, ne csel x3, x3, x21, ne ldp x4, x5, [resx+32] ldp x20, x21, [x_2+32] csel x4, x4, x20, ne csel x5, x5, x21, ne ldp x6, x7, [resx+48] ldp x20, x21, [x_2+48] csel x6, x6, x20, ne csel x7, x7, x21, ne ldr x8, [resx+64] ldr x20, [x_2+64] csel x8, x8, x20, ne ldp x10, x11, [resy] ldp x20, x21, [y_2] csel x10, x10, x20, ne csel x11, x11, x21, ne ldp x12, x13, [resy+16] ldp x20, x21, [y_2+16] csel x12, x12, x20, ne csel x13, x13, x21, ne ldp x14, x15, [resy+32] ldp x20, x21, [y_2+32] csel x14, x14, x20, ne csel x15, x15, x21, ne ldp x16, x17, [resy+48] ldp x20, x21, [y_2+48] csel x16, x16, x20, ne csel x17, x17, x21, ne ldr x19, [resy+64] ldr x20, [y_2+64] csel x19, x19, x20, ne stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [x_3+32] stp x6, x7, [x_3+48] str x8, [x_3+64] stp x10, x11, [y_3] stp x12, x13, [y_3+16] stp x14, x15, [y_3+32] stp x16, x17, [y_3+48] str x19, [y_3+64] ldp x0, x1, [resz] mov x20, #1 csel x0, x0, x20, ne csel x1, x1, xzr, ne ldp x2, x3, [resz+16] csel x2, x2, xzr, ne csel x3, x3, xzr, ne ldp x4, x5, [resz+32] csel x4, x4, xzr, ne csel x5, x5, xzr, ne ldp x6, x7, [resz+48] csel x6, x6, xzr, ne csel x7, x7, xzr, ne ldr x8, [resz+64] csel x8, x8, xzr, ne stp x0, x1, [z_3] stp x2, x3, [z_3+16] stp x4, x5, [z_3+32] stp x6, x7, [z_3+48] str x8, [z_3+64] // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x27,x28) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p521_jmixadd_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
54,392
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/p521_jdouble.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point doubling on NIST curve P-521 in Jacobian coordinates // // extern void p521_jdouble(uint64_t p3[static 27], const uint64_t p1[static 27]); // // Does p3 := 2 * p1 where all points are regarded as Jacobian triples. // A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3). // It is assumed that all coordinates of the input point are fully // reduced mod p_521 and that the z coordinate is not zero. // // Standard ARM ABI: X0 = p3, X1 = p1 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jdouble) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p521_jdouble) S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jdouble) .text .balign 4 // Size of individual field elements #define NUMSIZE 72 // Stable homes for input arguments during main code sequence #define input_z x27 #define input_x x28 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries #define z2 sp, #(NUMSIZE*0) #define y2 sp, #(NUMSIZE*1) #define x2p sp, #(NUMSIZE*2) #define xy2 sp, #(NUMSIZE*3) #define y4 sp, #(NUMSIZE*4) #define t2 sp, #(NUMSIZE*4) #define dx2 sp, #(NUMSIZE*5) #define t1 sp, #(NUMSIZE*5) #define d_ sp, #(NUMSIZE*6) #define x4p sp, #(NUMSIZE*6) // NUMSIZE*7 is not 16-aligned so we round it up #define NSPACE 512 // For the two "big" field operations, we use subroutines not inlining. // Call local code very close to bignum_mul_p521 and bignum_sqr_p521. #define mul_p521(P0,P1,P2) \ add x0, P0 __LF \ add x1, P1 __LF \ add x2, P2 __LF \ CFI_BL(Lp521_jdouble_local_mul_p521) // Call local code equivalent to bignum_sqr_p521 #define sqr_p521(P0,P1) \ add x0, P0 __LF \ add x1, P1 __LF \ CFI_BL(Lp521_jdouble_local_sqr_p521) // Corresponds exactly to bignum_add_p521 #define add_p521(P0,P1,P2) \ cmp xzr, xzr __LF \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ adcs x5, x5, x4 __LF \ adcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ adcs x7, x7, x4 __LF \ adcs x8, x8, x3 __LF \ ldp x9, x10, [P1+32] __LF \ ldp x4, x3, [P2+32] __LF \ adcs x9, x9, x4 __LF \ adcs x10, x10, x3 __LF \ ldp x11, x12, [P1+48] __LF \ ldp x4, x3, [P2+48] __LF \ adcs x11, x11, x4 __LF \ adcs x12, x12, x3 __LF \ ldr x13, [P1+64] __LF \ ldr x4, [P2+64] __LF \ adc x13, x13, x4 __LF \ subs x4, x13, #512 __LF \ csetm x4, hs __LF \ sbcs x5, x5, xzr __LF \ and x4, x4, #0x200 __LF \ sbcs x6, x6, xzr __LF \ sbcs x7, x7, xzr __LF \ sbcs x8, x8, xzr __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbc x13, x13, x4 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] __LF \ stp x11, x12, [P0+48] __LF \ str x13, [P0+64] // Corresponds exactly to bignum_sub_p521 #define sub_p521(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ ldp x9, x10, [P1+32] __LF \ ldp x4, x3, [P2+32] __LF \ sbcs x9, x9, x4 __LF \ sbcs x10, x10, x3 __LF \ ldp x11, x12, [P1+48] __LF \ ldp x4, x3, [P2+48] __LF \ sbcs x11, x11, x4 __LF \ sbcs x12, x12, x3 __LF \ ldr x13, [P1+64] __LF \ ldr x4, [P2+64] __LF \ sbcs x13, x13, x4 __LF \ sbcs x5, x5, xzr __LF \ sbcs x6, x6, xzr __LF \ sbcs x7, x7, xzr __LF \ sbcs x8, x8, xzr __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ and x13, x13, #0x1ff __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] __LF \ stp x11, x12, [P0+48] __LF \ str x13, [P0+64] // P0 = C * P1 - D * P2 == C * P1 + D * (p_521 - P2) #define cmsub_p521(P0,C,P1,D,P2) \ ldp x6, x7, [P1] __LF \ mov x1, #(C) __LF \ mul x3, x1, x6 __LF \ mul x4, x1, x7 __LF \ umulh x6, x1, x6 __LF \ adds x4, x4, x6 __LF \ umulh x7, x1, x7 __LF \ ldp x8, x9, [P1+16] __LF \ mul x5, x1, x8 __LF \ mul x6, x1, x9 __LF \ umulh x8, x1, x8 __LF \ adcs x5, x5, x7 __LF \ umulh x9, x1, x9 __LF \ adcs x6, x6, x8 __LF \ ldp x10, x11, [P1+32] __LF \ mul x7, x1, x10 __LF \ mul x8, x1, x11 __LF \ umulh x10, x1, x10 __LF \ adcs x7, x7, x9 __LF \ umulh x11, x1, x11 __LF \ adcs x8, x8, x10 __LF \ ldp x12, x13, [P1+48] __LF \ mul x9, x1, x12 __LF \ mul x10, x1, x13 __LF \ umulh x12, x1, x12 __LF \ adcs x9, x9, x11 __LF \ umulh x13, x1, x13 __LF \ adcs x10, x10, x12 __LF \ ldr x14, [P1+64] __LF \ mul x11, x1, x14 __LF \ adc x11, x11, x13 __LF \ mov x1, #(D) __LF \ ldp x20, x21, [P2] __LF \ mvn x20, x20 __LF \ mul x0, x1, x20 __LF \ umulh x20, x1, x20 __LF \ adds x3, x3, x0 __LF \ mvn x21, x21 __LF \ mul x0, x1, x21 __LF \ umulh x21, x1, x21 __LF \ adcs x4, x4, x0 __LF \ ldp x22, x23, [P2+16] __LF \ mvn x22, x22 __LF \ mul x0, x1, x22 __LF \ umulh x22, x1, x22 __LF \ adcs x5, x5, x0 __LF \ mvn x23, x23 __LF \ mul x0, x1, x23 __LF \ umulh x23, x1, x23 __LF \ adcs x6, x6, x0 __LF \ ldp x17, x19, [P2+32] __LF \ mvn x17, x17 __LF \ mul x0, x1, x17 __LF \ umulh x17, x1, x17 __LF \ adcs x7, x7, x0 __LF \ mvn x19, x19 __LF \ mul x0, x1, x19 __LF \ umulh x19, x1, x19 __LF \ adcs x8, x8, x0 __LF \ ldp x2, x16, [P2+48] __LF \ mvn x2, x2 __LF \ mul x0, x1, x2 __LF \ umulh x2, x1, x2 __LF \ adcs x9, x9, x0 __LF \ mvn x16, x16 __LF \ mul x0, x1, x16 __LF \ umulh x16, x1, x16 __LF \ adcs x10, x10, x0 __LF \ ldr x0, [P2+64] __LF \ eor x0, x0, #0x1ff __LF \ mul x0, x1, x0 __LF \ adc x11, x11, x0 __LF \ adds x4, x4, x20 __LF \ adcs x5, x5, x21 __LF \ and x15, x4, x5 __LF \ adcs x6, x6, x22 __LF \ and x15, x15, x6 __LF \ adcs x7, x7, x23 __LF \ and x15, x15, x7 __LF \ adcs x8, x8, x17 __LF \ and x15, x15, x8 __LF \ adcs x9, x9, x19 __LF \ and x15, x15, x9 __LF \ adcs x10, x10, x2 __LF \ and x15, x15, x10 __LF \ adc x11, x11, x16 __LF \ lsr x12, x11, #9 __LF \ orr x11, x11, #0xfffffffffffffe00 __LF \ cmp xzr, xzr __LF \ adcs xzr, x3, x12 __LF \ adcs xzr, x15, xzr __LF \ adcs xzr, x11, xzr __LF \ adcs x3, x3, x12 __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ adcs x6, x6, xzr __LF \ adcs x7, x7, xzr __LF \ adcs x8, x8, xzr __LF \ adcs x9, x9, xzr __LF \ adcs x10, x10, xzr __LF \ adc x11, x11, xzr __LF \ and x11, x11, #0x1ff __LF \ stp x3, x4, [P0] __LF \ stp x5, x6, [P0+16] __LF \ stp x7, x8, [P0+32] __LF \ stp x9, x10, [P0+48] __LF \ str x11, [P0+64] // P0 = 3 * P1 - 8 * P2 == 3 * P1 + 8 * (p_521 - P2) #define cmsub38_p521(P0,P1,P2) \ ldp x6, x7, [P1] __LF \ lsl x3, x6, #1 __LF \ adds x3, x3, x6 __LF \ extr x4, x7, x6, #63 __LF \ adcs x4, x4, x7 __LF \ ldp x8, x9, [P1+16] __LF \ extr x5, x8, x7, #63 __LF \ adcs x5, x5, x8 __LF \ extr x6, x9, x8, #63 __LF \ adcs x6, x6, x9 __LF \ ldp x10, x11, [P1+32] __LF \ extr x7, x10, x9, #63 __LF \ adcs x7, x7, x10 __LF \ extr x8, x11, x10, #63 __LF \ adcs x8, x8, x11 __LF \ ldp x12, x13, [P1+48] __LF \ extr x9, x12, x11, #63 __LF \ adcs x9, x9, x12 __LF \ extr x10, x13, x12, #63 __LF \ adcs x10, x10, x13 __LF \ ldr x14, [P1+64] __LF \ extr x11, x14, x13, #63 __LF \ adc x11, x11, x14 __LF \ ldp x20, x21, [P2] __LF \ mvn x20, x20 __LF \ lsl x0, x20, #3 __LF \ adds x3, x3, x0 __LF \ mvn x21, x21 __LF \ extr x0, x21, x20, #61 __LF \ adcs x4, x4, x0 __LF \ ldp x22, x23, [P2+16] __LF \ mvn x22, x22 __LF \ extr x0, x22, x21, #61 __LF \ adcs x5, x5, x0 __LF \ and x15, x4, x5 __LF \ mvn x23, x23 __LF \ extr x0, x23, x22, #61 __LF \ adcs x6, x6, x0 __LF \ and x15, x15, x6 __LF \ ldp x20, x21, [P2+32] __LF \ mvn x20, x20 __LF \ extr x0, x20, x23, #61 __LF \ adcs x7, x7, x0 __LF \ and x15, x15, x7 __LF \ mvn x21, x21 __LF \ extr x0, x21, x20, #61 __LF \ adcs x8, x8, x0 __LF \ and x15, x15, x8 __LF \ ldp x22, x23, [P2+48] __LF \ mvn x22, x22 __LF \ extr x0, x22, x21, #61 __LF \ adcs x9, x9, x0 __LF \ and x15, x15, x9 __LF \ mvn x23, x23 __LF \ extr x0, x23, x22, #61 __LF \ adcs x10, x10, x0 __LF \ and x15, x15, x10 __LF \ ldr x0, [P2+64] __LF \ eor x0, x0, #0x1ff __LF \ extr x0, x0, x23, #61 __LF \ adc x11, x11, x0 __LF \ lsr x12, x11, #9 __LF \ orr x11, x11, #0xfffffffffffffe00 __LF \ cmp xzr, xzr __LF \ adcs xzr, x3, x12 __LF \ adcs xzr, x15, xzr __LF \ adcs xzr, x11, xzr __LF \ adcs x3, x3, x12 __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ adcs x6, x6, xzr __LF \ adcs x7, x7, xzr __LF \ adcs x8, x8, xzr __LF \ adcs x9, x9, xzr __LF \ adcs x10, x10, xzr __LF \ adc x11, x11, xzr __LF \ and x11, x11, #0x1ff __LF \ stp x3, x4, [P0] __LF \ stp x5, x6, [P0+16] __LF \ stp x7, x8, [P0+32] __LF \ stp x9, x10, [P0+48] __LF \ str x11, [P0+64] // P0 = 4 * P1 - P2 = 4 * P1 + (p_521 - P2) #define cmsub41_p521(P0,P1,P2) \ ldp x6, x7, [P1] __LF \ lsl x3, x6, #2 __LF \ extr x4, x7, x6, #62 __LF \ ldp x8, x9, [P1+16] __LF \ extr x5, x8, x7, #62 __LF \ extr x6, x9, x8, #62 __LF \ ldp x10, x11, [P1+32] __LF \ extr x7, x10, x9, #62 __LF \ extr x8, x11, x10, #62 __LF \ ldp x12, x13, [P1+48] __LF \ extr x9, x12, x11, #62 __LF \ extr x10, x13, x12, #62 __LF \ ldr x14, [P1+64] __LF \ extr x11, x14, x13, #62 __LF \ ldp x0, x1, [P2] __LF \ mvn x0, x0 __LF \ adds x3, x3, x0 __LF \ sbcs x4, x4, x1 __LF \ ldp x0, x1, [P2+16] __LF \ sbcs x5, x5, x0 __LF \ and x15, x4, x5 __LF \ sbcs x6, x6, x1 __LF \ and x15, x15, x6 __LF \ ldp x0, x1, [P2+32] __LF \ sbcs x7, x7, x0 __LF \ and x15, x15, x7 __LF \ sbcs x8, x8, x1 __LF \ and x15, x15, x8 __LF \ ldp x0, x1, [P2+48] __LF \ sbcs x9, x9, x0 __LF \ and x15, x15, x9 __LF \ sbcs x10, x10, x1 __LF \ and x15, x15, x10 __LF \ ldr x0, [P2+64] __LF \ eor x0, x0, #0x1ff __LF \ adc x11, x11, x0 __LF \ lsr x12, x11, #9 __LF \ orr x11, x11, #0xfffffffffffffe00 __LF \ cmp xzr, xzr __LF \ adcs xzr, x3, x12 __LF \ adcs xzr, x15, xzr __LF \ adcs xzr, x11, xzr __LF \ adcs x3, x3, x12 __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ adcs x6, x6, xzr __LF \ adcs x7, x7, xzr __LF \ adcs x8, x8, xzr __LF \ adcs x9, x9, xzr __LF \ adcs x10, x10, xzr __LF \ adc x11, x11, xzr __LF \ and x11, x11, #0x1ff __LF \ stp x3, x4, [P0] __LF \ stp x5, x6, [P0+16] __LF \ stp x7, x8, [P0+32] __LF \ stp x9, x10, [P0+48] __LF \ str x11, [P0+64] S2N_BN_SYMBOL(p521_jdouble): CFI_START // Save regs and make room on stack for temporary variables CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_PUSH2(x27,x28) CFI_PUSH2(x29,x30) CFI_DEC_SP(NSPACE) // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 // Main code, just a sequence of basic field operations // z2 = z^2 // y2 = y^2 sqr_p521(z2,z_1) sqr_p521(y2,y_1) // x2p = x^2 - z^4 = (x + z^2) * (x - z^2) add_p521(t1,x_1,z2) sub_p521(t2,x_1,z2) mul_p521(x2p,t1,t2) // t1 = y + z // x4p = x2p^2 // xy2 = x * y^2 add_p521(t1,y_1,z_1) sqr_p521(x4p,x2p) mul_p521(xy2,x_1,y2) // t2 = (y + z)^2 sqr_p521(t2,t1) // d = 12 * xy2 - 9 * x4p // t1 = y^2 + 2 * y * z cmsub_p521(d_,12,xy2,9,x4p) sub_p521(t1,t2,z2) // y4 = y^4 sqr_p521(y4,y2) // z_3' = 2 * y * z // dx2 = d * x2p sub_p521(z_3,t1,y2) mul_p521(dx2,d_,x2p) // x' = 4 * xy2 - d cmsub41_p521(x_3,xy2,d_) // y' = 3 * dx2 - 8 * y4 cmsub38_p521(y_3,dx2,y4) // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x29,x30) CFI_POP2(x27,x28) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p521_jdouble) // Local versions of the two "big" field operations, identical to // bignum_mul_p521 and bignum_sqr_p521. S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jdouble_local_mul_p521) Lp521_jdouble_local_mul_p521: CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_DEC_SP(80) ldr q6, [x2] ldp x10, x17, [x1, #16] ldr q4, [x1] ldr q16, [x2, #32] ldp x5, x20, [x2, #16] ldr q2, [x1, #32] movi v31.2D, #0x00000000ffffffff uzp2 v17.4S, v6.4S, v6.4S rev64 v7.4S, v6.4S ldp x15, x21, [x1] xtn v25.2S, v6.2D xtn v22.2S, v4.2D subs x14, x10, x17 mul v7.4S, v7.4S, v4.4S csetm x8, cc rev64 v3.4S, v16.4S xtn v1.2S, v16.2D ldp x13, x16, [x2] mul x26, x10, x5 uzp2 v16.4S, v16.4S, v16.4S uaddlp v26.2D, v7.4S cneg x4, x14, cc subs x24, x15, x21 xtn v5.2S, v2.2D mul v28.4S, v3.4S, v2.4S shl v26.2D, v26.2D, #32 mul x22, x17, x20 umull v20.2D, v22.2S, v25.2S uzp2 v6.4S, v4.4S, v4.4S umull v18.2D, v22.2S, v17.2S uzp2 v4.4S, v2.4S, v2.4S cneg x14, x24, cc csetm x7, cc umulh x11, x17, x20 usra v18.2D, v20.2D, #32 uaddlp v7.2D, v28.4S subs x19, x16, x13 umlal v26.2D, v22.2S, v25.2S cneg x19, x19, cc shl v28.2D, v7.2D, #32 umull v7.2D, v5.2S, v1.2S umull v30.2D, v5.2S, v16.2S cinv x6, x7, cc mul x25, x14, x19 umlal v28.2D, v5.2S, v1.2S umull v21.2D, v6.2S, v17.2S umulh x14, x14, x19 usra v30.2D, v7.2D, #32 subs x9, x20, x5 and v29.16B, v18.16B, v31.16B cinv x23, x8, cc mov x8, v26.d[1] cneg x12, x9, cc usra v21.2D, v18.2D, #32 umlal v29.2D, v6.2S, v25.2S mul x24, x4, x12 umull v18.2D, v4.2S, v16.2S movi v25.2D, #0x00000000ffffffff eor x9, x14, x6 and v7.16B, v30.16B, v25.16B usra v21.2D, v29.2D, #32 umulh x7, x10, x5 usra v18.2D, v30.2D, #32 umlal v7.2D, v4.2S, v1.2S mov x19, v21.d[0] umulh x3, x4, x12 mov x14, v21.d[1] usra v18.2D, v7.2D, #32 adds x4, x8, x19 mov x8, v26.d[0] adcs x19, x26, x14 adcs x14, x22, x7 adc x12, x11, xzr adds x11, x4, x8 adcs x26, x19, x4 adcs x22, x14, x19 eor x4, x24, x23 adcs x14, x12, x14 eor x7, x25, x6 adc x25, xzr, x12 eor x19, x3, x23 adds x3, x26, x8 adcs x24, x22, x11 adcs x12, x14, x26 adcs x22, x25, x22 adcs x26, xzr, x14 adc x14, xzr, x25 cmn x23, #0x1 adcs x22, x22, x4 adcs x19, x26, x19 adc x25, x14, x23 subs x14, x21, x17 cneg x23, x14, cc csetm x26, cc subs x4, x20, x16 cneg x14, x4, cc cinv x4, x26, cc cmn x6, #0x1 adcs x11, x11, x7 mul x7, x23, x14 adcs x9, x3, x9 adcs x26, x24, x6 umulh x3, x23, x14 adcs x14, x12, x6 adcs x22, x22, x6 adcs x12, x19, x6 extr x24, x11, x8, #55 adc x6, x25, x6 subs x19, x15, x17 csetm x17, cc cneg x23, x19, cc subs x19, x20, x13 lsl x25, x8, #9 eor x8, x7, x4 cneg x20, x19, cc umulh x7, x23, x20 cinv x19, x17, cc subs x17, x15, x10 csetm x15, cc stp x25, x24, [sp, #32] cneg x24, x17, cc mul x20, x23, x20 subs x25, x5, x13 cneg x13, x25, cc cinv x15, x15, cc mul x25, x24, x13 subs x21, x21, x10 csetm x23, cc cneg x17, x21, cc subs x21, x5, x16 umulh x13, x24, x13 cinv x10, x23, cc cneg x23, x21, cc cmn x4, #0x1 adcs x14, x14, x8 eor x21, x3, x4 adcs x21, x22, x21 eor x5, x20, x19 adcs x24, x12, x4 mul x12, x17, x23 eor x8, x25, x15 adc x25, x6, x4 cmn x15, #0x1 adcs x6, x9, x8 ldp x20, x8, [x2, #48] eor x9, x13, x15 adcs x4, x26, x9 umulh x26, x17, x23 ldp x17, x13, [x1, #48] adcs x9, x14, x15 adcs x16, x21, x15 adcs x14, x24, x15 eor x21, x7, x19 mul x23, x17, x20 adc x24, x25, x15 cmn x19, #0x1 adcs x7, x4, x5 adcs x9, x9, x21 umulh x3, x13, x8 adcs x16, x16, x19 adcs x22, x14, x19 eor x5, x12, x10 adc x12, x24, x19 cmn x10, #0x1 adcs x19, x7, x5 eor x14, x26, x10 mov x7, v28.d[1] adcs x24, x9, x14 extr x4, x19, x6, #55 umulh x15, x17, x20 mov x14, v18.d[1] lsr x9, x19, #55 adcs x5, x16, x10 mov x16, v18.d[0] adcs x19, x22, x10 str x9, [sp, #64] extr x25, x6, x11, #55 adc x21, x12, x10 subs x26, x17, x13 stp x25, x4, [sp, #48] stp x19, x21, [sp, #16] csetm x6, cc cneg x4, x26, cc mul x19, x13, x8 subs x11, x8, x20 stp x24, x5, [sp] ldp x21, x10, [x1, #32] cinv x12, x6, cc cneg x6, x11, cc mov x9, v28.d[0] umulh x25, x4, x6 adds x22, x7, x16 ldp x16, x5, [x2, #32] adcs x14, x23, x14 adcs x11, x19, x15 adc x24, x3, xzr adds x3, x22, x9 adcs x15, x14, x22 mul x22, x4, x6 adcs x6, x11, x14 adcs x4, x24, x11 eor x14, x25, x12 adc x26, xzr, x24 subs x7, x21, x10 csetm x23, cc cneg x19, x7, cc subs x24, x5, x16 cneg x11, x24, cc cinv x7, x23, cc adds x25, x15, x9 eor x23, x22, x12 adcs x22, x6, x3 mul x24, x19, x11 adcs x15, x4, x15 adcs x6, x26, x6 umulh x19, x19, x11 adcs x11, xzr, x4 adc x26, xzr, x26 cmn x12, #0x1 adcs x4, x6, x23 eor x6, x24, x7 adcs x14, x11, x14 adc x26, x26, x12 subs x11, x10, x13 cneg x12, x11, cc csetm x11, cc eor x19, x19, x7 subs x24, x8, x5 cinv x11, x11, cc cneg x24, x24, cc cmn x7, #0x1 adcs x3, x3, x6 mul x23, x12, x24 adcs x25, x25, x19 adcs x6, x22, x7 umulh x19, x12, x24 adcs x22, x15, x7 adcs x12, x4, x7 eor x24, x23, x11 adcs x4, x14, x7 adc x26, x26, x7 eor x19, x19, x11 subs x14, x21, x17 cneg x7, x14, cc csetm x14, cc subs x23, x20, x16 cinv x14, x14, cc cneg x23, x23, cc cmn x11, #0x1 adcs x22, x22, x24 mul x24, x7, x23 adcs x15, x12, x19 adcs x4, x4, x11 adc x19, x26, x11 umulh x26, x7, x23 subs x7, x21, x13 eor x11, x24, x14 cneg x23, x7, cc csetm x12, cc subs x7, x8, x16 cneg x7, x7, cc cinv x12, x12, cc cmn x14, #0x1 eor x26, x26, x14 adcs x11, x25, x11 mul x25, x23, x7 adcs x26, x6, x26 adcs x6, x22, x14 adcs x24, x15, x14 umulh x23, x23, x7 adcs x4, x4, x14 adc x22, x19, x14 eor x14, x25, x12 eor x7, x23, x12 cmn x12, #0x1 adcs x14, x26, x14 ldp x19, x25, [x2] ldp x15, x23, [x2, #16] adcs x26, x6, x7 adcs x24, x24, x12 adcs x7, x4, x12 adc x4, x22, x12 subs x19, x19, x16 ldp x16, x22, [x1] sbcs x6, x25, x5 ldp x12, x25, [x1, #16] sbcs x15, x15, x20 sbcs x8, x23, x8 csetm x23, cc subs x21, x21, x16 eor x16, x19, x23 sbcs x19, x10, x22 eor x22, x6, x23 eor x8, x8, x23 sbcs x6, x17, x12 sbcs x13, x13, x25 csetm x12, cc subs x10, x10, x17 cneg x17, x10, cc csetm x25, cc subs x5, x20, x5 eor x10, x19, x12 cneg x19, x5, cc eor x20, x15, x23 eor x21, x21, x12 cinv x15, x25, cc mul x25, x17, x19 subs x16, x16, x23 sbcs x5, x22, x23 eor x6, x6, x12 sbcs x20, x20, x23 eor x22, x13, x12 sbc x8, x8, x23 subs x21, x21, x12 umulh x19, x17, x19 sbcs x10, x10, x12 sbcs x17, x6, x12 eor x6, x19, x15 eor x19, x25, x15 umulh x25, x17, x20 sbc x13, x22, x12 cmn x15, #0x1 adcs x22, x14, x19 adcs x19, x26, x6 ldp x6, x26, [sp] adcs x14, x24, x15 umulh x24, x21, x16 adcs x7, x7, x15 adc x15, x4, x15 adds x4, x9, x6 eor x9, x23, x12 adcs x12, x3, x26 stp x4, x12, [sp] ldp x4, x26, [sp, #16] umulh x12, x10, x5 ldp x6, x23, [sp, #32] adcs x3, x11, x4 mul x4, x13, x8 adcs x26, x22, x26 ldp x22, x11, [sp, #48] adcs x6, x19, x6 stp x3, x26, [sp, #16] mul x26, x10, x5 adcs x14, x14, x23 stp x6, x14, [sp, #32] ldr x6, [sp, #64] adcs x22, x7, x22 adcs x14, x15, x11 mul x11, x17, x20 adc x19, x6, xzr stp x22, x14, [sp, #48] adds x14, x26, x24 str x19, [sp, #64] umulh x19, x13, x8 adcs x7, x11, x12 adcs x22, x4, x25 mul x6, x21, x16 adc x19, x19, xzr subs x11, x17, x13 cneg x12, x11, cc csetm x11, cc subs x24, x8, x20 cinv x11, x11, cc cneg x24, x24, cc adds x4, x14, x6 adcs x14, x7, x14 mul x3, x12, x24 adcs x7, x22, x7 adcs x22, x19, x22 umulh x12, x12, x24 adc x24, xzr, x19 adds x19, x14, x6 eor x3, x3, x11 adcs x26, x7, x4 adcs x14, x22, x14 adcs x25, x24, x7 adcs x23, xzr, x22 eor x7, x12, x11 adc x12, xzr, x24 subs x22, x21, x10 cneg x24, x22, cc csetm x22, cc subs x15, x5, x16 cinv x22, x22, cc cneg x15, x15, cc cmn x11, #0x1 adcs x3, x25, x3 mul x25, x24, x15 adcs x23, x23, x7 adc x11, x12, x11 subs x7, x10, x13 umulh x15, x24, x15 cneg x12, x7, cc csetm x7, cc eor x24, x25, x22 eor x25, x15, x22 cmn x22, #0x1 adcs x24, x4, x24 adcs x19, x19, x25 adcs x15, x26, x22 adcs x4, x14, x22 adcs x26, x3, x22 adcs x25, x23, x22 adc x23, x11, x22 subs x14, x21, x17 cneg x3, x14, cc csetm x11, cc subs x14, x8, x5 cneg x14, x14, cc cinv x7, x7, cc subs x13, x21, x13 cneg x21, x13, cc csetm x13, cc mul x22, x12, x14 subs x8, x8, x16 cinv x13, x13, cc umulh x14, x12, x14 cneg x12, x8, cc subs x8, x20, x16 cneg x8, x8, cc cinv x16, x11, cc eor x22, x22, x7 cmn x7, #0x1 eor x14, x14, x7 adcs x4, x4, x22 mul x11, x3, x8 adcs x22, x26, x14 adcs x14, x25, x7 eor x25, x24, x9 adc x26, x23, x7 umulh x7, x3, x8 subs x17, x10, x17 cneg x24, x17, cc eor x3, x11, x16 csetm x11, cc subs x20, x20, x5 cneg x5, x20, cc cinv x11, x11, cc cmn x16, #0x1 mul x17, x21, x12 eor x8, x7, x16 adcs x10, x19, x3 and x19, x9, #0x1ff adcs x20, x15, x8 umulh x15, x21, x12 eor x12, x10, x9 eor x8, x6, x9 adcs x6, x4, x16 adcs x4, x22, x16 adcs x21, x14, x16 adc x7, x26, x16 mul x10, x24, x5 cmn x13, #0x1 ldp x3, x14, [x1] eor x17, x17, x13 umulh x5, x24, x5 adcs x20, x20, x17 eor x17, x15, x13 adcs x16, x6, x17 eor x22, x10, x11 adcs x23, x4, x13 extr x10, x14, x3, #52 and x26, x3, #0xfffffffffffff adcs x24, x21, x13 and x15, x10, #0xfffffffffffff adc x6, x7, x13 cmn x11, #0x1 adcs x17, x20, x22 eor x4, x5, x11 ldp x21, x10, [sp] adcs x7, x16, x4 eor x16, x17, x9 eor x13, x7, x9 ldp x3, x17, [sp, #16] adcs x7, x23, x11 eor x23, x7, x9 ldp x5, x22, [sp, #32] adcs x7, x24, x11 adc x24, x6, x11 ldr x6, [x2, #64] adds x20, x8, x21 lsl x11, x20, #9 eor x4, x7, x9 orr x7, x11, x19 eor x8, x24, x9 adcs x11, x25, x10 mul x26, x6, x26 ldp x19, x24, [sp, #48] adcs x12, x12, x3 adcs x16, x16, x17 adcs x9, x13, x5 ldr x25, [sp, #64] extr x20, x11, x20, #55 adcs x13, x23, x22 adcs x4, x4, x19 extr x23, x12, x11, #55 adcs x8, x8, x24 adc x11, x25, xzr adds x21, x9, x21 extr x9, x16, x12, #55 lsr x12, x16, #55 adcs x10, x13, x10 mul x15, x6, x15 adcs x13, x4, x3 ldp x16, x4, [x2] ldr x3, [x1, #64] adcs x17, x8, x17 adcs x5, x5, x7 adcs x20, x22, x20 adcs x8, x19, x23 and x22, x16, #0xfffffffffffff ldp x19, x7, [x1, #16] adcs x9, x24, x9 extr x24, x4, x16, #52 adc x16, x12, x25 mul x22, x3, x22 and x25, x24, #0xfffffffffffff extr x14, x19, x14, #40 and x12, x14, #0xfffffffffffff extr x23, x7, x19, #28 ldp x19, x24, [x2, #16] mul x14, x3, x25 and x23, x23, #0xfffffffffffff add x22, x26, x22 lsl x11, x11, #48 lsr x26, x22, #52 lsl x25, x22, #12 mul x22, x6, x12 extr x12, x19, x4, #40 add x4, x15, x14 mul x15, x6, x23 add x4, x4, x26 extr x23, x24, x19, #28 ldp x14, x19, [x1, #32] and x26, x12, #0xfffffffffffff extr x12, x4, x25, #12 and x25, x23, #0xfffffffffffff adds x21, x21, x12 mul x12, x3, x26 extr x23, x14, x7, #16 and x23, x23, #0xfffffffffffff mul x7, x3, x25 ldp x25, x26, [x2, #32] add x12, x22, x12 extr x22, x19, x14, #56 mul x23, x6, x23 lsr x14, x14, #4 extr x24, x25, x24, #16 add x7, x15, x7 and x15, x24, #0xfffffffffffff and x22, x22, #0xfffffffffffff lsr x24, x4, #52 mul x15, x3, x15 and x14, x14, #0xfffffffffffff add x12, x12, x24 lsl x24, x4, #12 lsr x4, x12, #52 extr x24, x12, x24, #24 adcs x10, x10, x24 lsl x24, x12, #12 add x12, x7, x4 mul x22, x6, x22 add x4, x23, x15 extr x7, x12, x24, #36 adcs x13, x13, x7 lsl x15, x12, #12 add x7, x4, x11 lsr x24, x12, #52 ldp x23, x11, [x2, #48] add x4, x7, x24 mul x12, x6, x14 extr x7, x26, x25, #56 extr x14, x4, x15, #48 and x2, x7, #0xfffffffffffff extr x24, x11, x23, #32 ldp x15, x7, [x1, #48] and x1, x24, #0xfffffffffffff lsr x24, x4, #52 mul x2, x3, x2 extr x26, x23, x26, #44 lsr x23, x25, #4 and x23, x23, #0xfffffffffffff and x25, x26, #0xfffffffffffff extr x26, x7, x15, #32 extr x19, x15, x19, #44 mul x23, x3, x23 and x15, x26, #0xfffffffffffff lsl x26, x4, #12 and x4, x19, #0xfffffffffffff lsr x11, x11, #20 mul x19, x6, x4 adcs x17, x17, x14 add x14, x22, x2 add x22, x12, x23 lsr x7, x7, #20 add x22, x22, x24 extr x2, x22, x26, #60 mul x24, x3, x25 lsr x22, x22, #52 add x14, x14, x22 lsl x22, x2, #8 extr x22, x14, x22, #8 lsl x2, x14, #12 mul x1, x3, x1 adcs x12, x5, x22 mul x5, x6, x15 and x26, x10, x13 and x4, x26, x17 add x23, x19, x24 lsr x14, x14, #52 mul x22, x3, x11 add x11, x23, x14 extr x25, x11, x2, #20 lsl x19, x11, #12 adcs x25, x20, x25 and x14, x4, x12 add x1, x5, x1 and x14, x14, x25 mul x15, x6, x7 add x26, x15, x22 mul x6, x6, x3 lsr x22, x11, #52 add x4, x1, x22 lsr x1, x4, #52 extr x3, x4, x19, #32 lsl x15, x4, #12 add x7, x26, x1 adcs x23, x8, x3 extr x20, x7, x15, #44 and x3, x14, x23 lsr x19, x7, #44 adcs x7, x9, x20 add x11, x6, x19 adc x4, x16, x11 lsr x14, x4, #9 cmp xzr, xzr and x15, x3, x7 orr x3, x4, #0xfffffffffffffe00 adcs xzr, x21, x14 adcs xzr, x15, xzr adcs xzr, x3, xzr adcs x11, x21, x14 and x14, x11, #0x1ff adcs x1, x10, xzr extr x10, x1, x11, #9 str x14, [x0, #64] adcs x14, x13, xzr extr x11, x14, x1, #9 adcs x1, x17, xzr extr x4, x1, x14, #9 stp x10, x11, [x0] adcs x11, x12, xzr extr x14, x11, x1, #9 adcs x10, x25, xzr extr x11, x10, x11, #9 stp x4, x14, [x0, #16] adcs x14, x23, xzr extr x10, x14, x10, #9 adcs x1, x7, xzr stp x11, x10, [x0, #32] extr x14, x1, x14, #9 adc x10, x3, xzr extr x26, x10, x1, #9 stp x14, x26, [x0, #48] CFI_INC_SP(80) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jdouble_local_mul_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jdouble_local_sqr_p521) Lp521_jdouble_local_sqr_p521: CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) ldr q23, [x1, #32] ldp x9, x2, [x1, #32] ldr q16, [x1, #32] ldr q20, [x1, #48] ldp x6, x13, [x1, #48] rev64 v2.4S, v23.4S mul x14, x9, x2 ldr q31, [x1, #48] subs x22, x9, x2 uzp2 v26.4S, v23.4S, v23.4S mul v30.4S, v2.4S, v16.4S xtn v0.2S, v20.2D csetm x12, cc xtn v21.2S, v16.2D xtn v23.2S, v23.2D umulh x10, x9, x6 rev64 v27.4S, v31.4S umull v2.2D, v21.2S, v26.2S cneg x23, x22, cc uaddlp v25.2D, v30.4S umull v18.2D, v21.2S, v23.2S mul x22, x9, x6 mul v6.4S, v27.4S, v20.4S uzp2 v17.4S, v20.4S, v20.4S shl v20.2D, v25.2D, #32 uzp2 v27.4S, v31.4S, v31.4S mul x16, x2, x13 umlal v20.2D, v21.2S, v23.2S usra v2.2D, v18.2D, #32 adds x8, x22, x10 umull v25.2D, v17.2S, v27.2S xtn v31.2S, v31.2D movi v1.2D, #0xffffffff adc x3, x10, xzr umulh x21, x2, x13 uzp2 v21.4S, v16.4S, v16.4S umull v18.2D, v0.2S, v27.2S subs x19, x13, x6 and v7.16B, v2.16B, v1.16B umull v27.2D, v0.2S, v31.2S cneg x20, x19, cc movi v30.2D, #0xffffffff umull v16.2D, v21.2S, v26.2S umlal v7.2D, v21.2S, v23.2S mul x19, x23, x20 cinv x7, x12, cc uaddlp v6.2D, v6.4S eor x12, x19, x7 adds x11, x8, x16 umulh x10, x23, x20 ldr q1, [x1] usra v16.2D, v2.2D, #32 adcs x19, x3, x21 shl v2.2D, v6.2D, #32 adc x20, x21, xzr adds x17, x19, x16 usra v18.2D, v27.2D, #32 adc x19, x20, xzr cmn x7, #0x1 umlal v2.2D, v0.2S, v31.2S umulh x16, x9, x2 adcs x8, x11, x12 usra v16.2D, v7.2D, #32 ldr x12, [x1, #64] eor x20, x10, x7 umulh x10, x6, x13 mov x23, v2.d[0] mov x3, v2.d[1] adcs x21, x17, x20 usra v25.2D, v18.2D, #32 and v23.16B, v18.16B, v30.16B adc x7, x19, x7 adds x22, x22, x22 ldr q7, [x1, #16] adcs x17, x8, x8 umlal v23.2D, v17.2S, v31.2S mov x19, v16.d[0] mul x11, x12, x12 ldr q4, [x1] usra v25.2D, v23.2D, #32 add x5, x12, x12 adcs x15, x21, x21 ldr q28, [x1] mov x12, v20.d[1] adcs x24, x7, x7 mov x21, v16.d[1] adc x4, xzr, xzr adds x19, x19, x14 ldr q18, [x1, #16] xtn v26.2S, v1.2D adcs x8, x12, x16 adc x21, x21, xzr adds x7, x19, x14 xtn v23.2S, v7.2D rev64 v21.4S, v28.4S adcs x12, x8, x16 ldp x20, x19, [x1] mov x16, v25.d[1] xtn v22.2S, v28.2D adc x14, x21, xzr adds x8, x22, x12 uzp2 v24.4S, v28.4S, v28.4S rev64 v28.4S, v18.4S mul x12, x6, x13 mul v16.4S, v21.4S, v1.4S shrn v31.2S, v7.2D, #32 adcs x22, x17, x14 mov x14, v25.d[0] and x21, x20, #0xfffffffffffff umull v17.2D, v26.2S, v24.2S ldr q2, [x1, #32] adcs x17, x15, xzr ldr q30, [x1, #48] umull v7.2D, v26.2S, v22.2S adcs x15, x24, xzr ldr q0, [x1, #16] movi v6.2D, #0xffffffff adc x4, x4, xzr adds x14, x14, x12 uzp1 v27.4S, v18.4S, v4.4S uzp2 v19.4S, v1.4S, v1.4S adcs x24, x3, x10 mul x3, x5, x21 umull v29.2D, v23.2S, v31.2S ldr q5, [x1] adc x21, x16, xzr adds x16, x14, x12 extr x12, x19, x20, #52 umull v18.2D, v19.2S, v24.2S adcs x24, x24, x10 and x10, x12, #0xfffffffffffff ldp x14, x12, [x1, #16] usra v17.2D, v7.2D, #32 adc x21, x21, xzr adds x23, x23, x17 mul x17, x5, x10 shl v21.2D, v29.2D, #33 lsl x10, x3, #12 lsr x1, x3, #52 rev64 v29.4S, v2.4S uaddlp v25.2D, v16.4S add x17, x17, x1 adcs x16, x16, x15 extr x3, x14, x19, #40 mov x15, v20.d[0] extr x10, x17, x10, #12 and x3, x3, #0xfffffffffffff shl v3.2D, v25.2D, #32 and v6.16B, v17.16B, v6.16B mul x1, x5, x3 usra v18.2D, v17.2D, #32 adcs x3, x24, x4 extr x4, x12, x14, #28 umlal v6.2D, v19.2S, v22.2S xtn v20.2S, v2.2D umlal v3.2D, v26.2S, v22.2S movi v26.2D, #0xffffffff lsr x24, x17, #52 and x4, x4, #0xfffffffffffff uzp2 v19.4S, v2.4S, v2.4S add x1, x1, x24 mul x24, x5, x4 lsl x4, x17, #12 xtn v24.2S, v5.2D extr x17, x1, x4, #24 adc x21, x21, xzr umlal v21.2D, v23.2S, v23.2S adds x4, x15, x10 lsl x10, x1, #12 adcs x15, x7, x17 mul v23.4S, v28.4S, v4.4S and x7, x4, #0x1ff lsr x17, x1, #52 umulh x1, x19, x12 uzp2 v17.4S, v5.4S, v5.4S extr x4, x15, x4, #9 add x24, x24, x17 mul v29.4S, v29.4S, v5.4S extr x17, x24, x10, #36 extr x10, x9, x12, #16 uzp1 v28.4S, v4.4S, v4.4S adcs x17, x8, x17 and x8, x10, #0xfffffffffffff umull v16.2D, v24.2S, v20.2S extr x10, x17, x15, #9 mul x15, x5, x8 stp x4, x10, [x0] lsl x4, x24, #12 lsr x8, x9, #4 uaddlp v4.2D, v23.4S and x8, x8, #0xfffffffffffff umull v23.2D, v24.2S, v19.2S mul x8, x5, x8 extr x10, x2, x9, #56 lsr x24, x24, #52 and x10, x10, #0xfffffffffffff add x15, x15, x24 extr x4, x15, x4, #48 mul x24, x5, x10 lsr x10, x15, #52 usra v23.2D, v16.2D, #32 add x10, x8, x10 shl v4.2D, v4.2D, #32 adcs x22, x22, x4 extr x4, x6, x2, #44 lsl x15, x15, #12 lsr x8, x10, #52 extr x15, x10, x15, #60 and x10, x4, #0xfffffffffffff umlal v4.2D, v28.2S, v27.2S add x8, x24, x8 extr x4, x13, x6, #32 mul x24, x5, x10 uzp2 v16.4S, v30.4S, v30.4S lsl x10, x15, #8 rev64 v28.4S, v30.4S and x15, x4, #0xfffffffffffff extr x4, x8, x10, #8 mul x10, x5, x15 lsl x15, x8, #12 adcs x23, x23, x4 lsr x4, x8, #52 lsr x8, x13, #20 add x4, x24, x4 mul x8, x5, x8 lsr x24, x4, #52 extr x15, x4, x15, #20 lsl x4, x4, #12 add x10, x10, x24 adcs x15, x16, x15 extr x4, x10, x4, #32 umulh x5, x20, x14 adcs x3, x3, x4 usra v18.2D, v6.2D, #32 lsl x16, x10, #12 extr x24, x15, x23, #9 lsr x10, x10, #52 uzp2 v27.4S, v0.4S, v0.4S add x8, x8, x10 extr x10, x3, x15, #9 extr x4, x22, x17, #9 and v25.16B, v23.16B, v26.16B lsr x17, x8, #44 extr x15, x8, x16, #44 extr x16, x23, x22, #9 xtn v7.2S, v30.2D mov x8, v4.d[0] stp x24, x10, [x0, #32] uaddlp v30.2D, v29.4S stp x4, x16, [x0, #16] umulh x24, x20, x19 adcs x15, x21, x15 adc x16, x11, x17 subs x11, x20, x19 xtn v5.2S, v0.2D csetm x17, cc extr x3, x15, x3, #9 mov x22, v4.d[1] cneg x21, x11, cc subs x10, x12, x14 mul v31.4S, v28.4S, v0.4S cneg x10, x10, cc cinv x11, x17, cc shl v4.2D, v30.2D, #32 umull v28.2D, v5.2S, v16.2S extr x23, x16, x15, #9 adds x4, x8, x5 mul x17, x21, x10 umull v22.2D, v5.2S, v7.2S adc x15, x5, xzr adds x4, x4, x22 uaddlp v2.2D, v31.4S lsr x5, x16, #9 adcs x16, x15, x1 mov x15, v18.d[0] adc x1, x1, xzr umulh x10, x21, x10 adds x22, x16, x22 umlal v4.2D, v24.2S, v20.2S umull v30.2D, v27.2S, v16.2S stp x3, x23, [x0, #48] add x3, x7, x5 adc x16, x1, xzr usra v28.2D, v22.2D, #32 mul x23, x20, x19 eor x1, x17, x11 cmn x11, #0x1 mov x17, v18.d[1] umull v18.2D, v17.2S, v19.2S adcs x7, x4, x1 eor x1, x10, x11 umlal v25.2D, v17.2S, v20.2S movi v16.2D, #0xffffffff adcs x22, x22, x1 usra v18.2D, v23.2D, #32 umulh x4, x14, x14 adc x1, x16, x11 adds x10, x8, x8 shl v23.2D, v2.2D, #32 str x3, [x0, #64] adcs x5, x7, x7 and v16.16B, v28.16B, v16.16B usra v30.2D, v28.2D, #32 adcs x7, x22, x22 mov x21, v3.d[1] adcs x11, x1, x1 umlal v16.2D, v27.2S, v7.2S adc x22, xzr, xzr adds x16, x15, x23 mul x8, x14, x12 umlal v23.2D, v5.2S, v7.2S usra v18.2D, v25.2D, #32 umulh x15, x14, x12 adcs x21, x21, x24 usra v30.2D, v16.2D, #32 adc x1, x17, xzr adds x3, x16, x23 adcs x21, x21, x24 adc x1, x1, xzr adds x24, x10, x21 umulh x21, x12, x12 adcs x16, x5, x1 adcs x10, x7, xzr mov x17, v21.d[1] adcs x23, x11, xzr adc x5, x22, xzr adds x1, x4, x8 adcs x22, x17, x15 ldp x17, x4, [x0] mov x11, v21.d[0] adc x21, x21, xzr adds x1, x1, x8 adcs x15, x22, x15 adc x8, x21, xzr adds x22, x11, x10 mov x21, v3.d[0] adcs x11, x1, x23 ldp x1, x10, [x0, #16] adcs x15, x15, x5 adc x7, x8, xzr adds x8, x17, x21 mov x23, v4.d[1] ldp x5, x21, [x0, #32] adcs x17, x4, x3 ldr x4, [x0, #64] mov x3, v18.d[0] adcs x24, x1, x24 stp x8, x17, [x0] adcs x17, x10, x16 ldp x1, x16, [x0, #48] adcs x5, x5, x22 adcs x8, x21, x11 stp x5, x8, [x0, #32] adcs x1, x1, x15 mov x15, v23.d[1] adcs x21, x16, x7 stp x1, x21, [x0, #48] adc x10, x4, xzr subs x7, x14, x12 mov x16, v18.d[1] cneg x5, x7, cc csetm x4, cc subs x11, x13, x6 mov x8, v23.d[0] cneg x7, x11, cc cinv x21, x4, cc mov x11, v30.d[0] adds x4, x23, x3 mul x22, x5, x7 mov x23, v30.d[1] adcs x8, x8, x16 adcs x16, x15, x11 adc x11, x23, xzr umulh x3, x5, x7 stp x24, x17, [x0, #16] mov x5, v4.d[0] subs x15, x20, x19 cneg x7, x15, cc str x10, [x0, #64] csetm x1, cc subs x24, x2, x9 cneg x17, x24, cc cinv x15, x1, cc adds x23, x4, x5 umulh x1, x7, x17 adcs x24, x8, x4 adcs x10, x16, x8 eor x8, x22, x21 adcs x16, x11, x16 mul x22, x7, x17 eor x17, x1, x15 adc x1, xzr, x11 adds x11, x24, x5 eor x7, x3, x21 adcs x3, x10, x23 adcs x24, x16, x24 adcs x4, x1, x10 eor x10, x22, x15 adcs x16, xzr, x16 adc x1, xzr, x1 cmn x21, #0x1 adcs x8, x4, x8 adcs x22, x16, x7 adc x7, x1, x21 subs x21, x19, x12 csetm x4, cc cneg x1, x21, cc subs x21, x13, x2 cinv x16, x4, cc cneg x4, x21, cc cmn x15, #0x1 adcs x21, x23, x10 mul x23, x1, x4 adcs x11, x11, x17 adcs x3, x3, x15 umulh x1, x1, x4 adcs x24, x24, x15 adcs x8, x8, x15 adcs x22, x22, x15 eor x17, x23, x16 adc x15, x7, x15 subs x7, x20, x14 cneg x7, x7, cc csetm x4, cc subs x10, x20, x12 cneg x23, x10, cc csetm x10, cc subs x12, x6, x9 cinv x20, x4, cc cneg x12, x12, cc cmn x16, #0x1 eor x1, x1, x16 adcs x17, x24, x17 mul x4, x7, x12 adcs x8, x8, x1 umulh x1, x7, x12 adcs x24, x22, x16 adc x7, x15, x16 subs x12, x13, x9 cneg x12, x12, cc cinv x13, x10, cc subs x19, x19, x14 mul x9, x23, x12 cneg x19, x19, cc csetm x10, cc eor x16, x1, x20 subs x22, x6, x2 umulh x12, x23, x12 eor x1, x4, x20 cinv x4, x10, cc cneg x22, x22, cc cmn x20, #0x1 adcs x15, x11, x1 eor x6, x12, x13 adcs x10, x3, x16 adcs x17, x17, x20 eor x23, x9, x13 adcs x2, x8, x20 mul x11, x19, x22 adcs x24, x24, x20 adc x7, x7, x20 cmn x13, #0x1 adcs x3, x10, x23 umulh x22, x19, x22 adcs x17, x17, x6 eor x12, x22, x4 extr x22, x15, x21, #63 adcs x8, x2, x13 extr x21, x21, x5, #63 ldp x16, x23, [x0] adcs x20, x24, x13 eor x1, x11, x4 adc x6, x7, x13 cmn x4, #0x1 ldp x2, x7, [x0, #16] adcs x1, x3, x1 extr x19, x1, x15, #63 adcs x14, x17, x12 extr x1, x14, x1, #63 lsl x17, x5, #1 adcs x8, x8, x4 extr x12, x8, x14, #8 ldp x15, x11, [x0, #32] adcs x9, x20, x4 adc x3, x6, x4 adds x16, x12, x16 extr x6, x9, x8, #8 ldp x14, x12, [x0, #48] extr x8, x3, x9, #8 adcs x20, x6, x23 ldr x24, [x0, #64] lsr x6, x3, #8 adcs x8, x8, x2 and x2, x1, #0x1ff and x1, x20, x8 adcs x4, x6, x7 adcs x3, x17, x15 and x1, x1, x4 adcs x9, x21, x11 and x1, x1, x3 adcs x6, x22, x14 and x1, x1, x9 and x21, x1, x6 adcs x14, x19, x12 adc x1, x24, x2 cmp xzr, xzr orr x12, x1, #0xfffffffffffffe00 lsr x1, x1, #9 adcs xzr, x16, x1 and x21, x21, x14 adcs xzr, x21, xzr adcs xzr, x12, xzr adcs x21, x16, x1 adcs x1, x20, xzr adcs x19, x8, xzr stp x21, x1, [x0] adcs x1, x4, xzr adcs x21, x3, xzr stp x19, x1, [x0, #16] adcs x1, x9, xzr stp x21, x1, [x0, #32] adcs x21, x6, xzr adcs x1, x14, xzr stp x21, x1, [x0, #48] adc x1, x12, xzr and x1, x1, #0x1ff str x1, [x0, #64] CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jdouble_local_sqr_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif