repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
wlsfx/bnbb
40,883
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_mul_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply modulo p_521, z := (x * y) mod p_521, assuming x and y reduced // Inputs x[9], y[9]; output z[9] // // extern void bignum_mul_p521(uint64_t z[static 9], const uint64_t x[static 9], // const uint64_t y[static 9]); // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" // bignum_mul_p521 is functionally equivalent to unopt/bignum_mul_p521_base. // It is written in a way that // 1. A subset of scalar multiplications in bignum_montmul_p384 are carefully // chosen and vectorized // 2. The vectorized assembly is rescheduled using the SLOTHY superoptimizer. // https://github.com/slothy-optimizer/slothy // // The output program of step 1. is as follows: // // stp x19, x20, [sp, #-16]! // stp x21, x22, [sp, #-16]! // stp x23, x24, [sp, #-16]! // stp x25, x26, [sp, #-16]! // sub sp, sp, #80 // ldp x15, x21, [x1] // ldp x10, x17, [x1, #16] // ldp x13, x16, [x2] // ldr q18, [x1] // ldr q28, [x2] // ldp x5, x20, [x2, #16] // movi v16.2D, #0x00000000ffffffff // uzp2 v7.4S, v28.4S, v28.4S // xtn v4.2S, v18.2D // xtn v1.2S, v28.2D // rev64 v27.4S, v28.4S // umull v21.2D, v4.2S, v1.2S // umull v28.2D, v4.2S, v7.2S // uzp2 v5.4S, v18.4S, v18.4S // mul v18.4S, v27.4S, v18.4S // usra v28.2D, v21.2D, #32 // umull v29.2D, v5.2S, v7.2S // uaddlp v18.2D, v18.4S // and v16.16B, v28.16B, v16.16B // umlal v16.2D, v5.2S, v1.2S // shl v18.2D, v18.2D, #32 // usra v29.2D, v28.2D, #32 // umlal v18.2D, v4.2S, v1.2S // usra v29.2D, v16.2D, #32 // mov x8, v18.d[0] // mov x9, v18.d[1] // mul x6, x10, x5 // mul x19, x17, x20 // mov x14, v29.d[0] // adds x9, x9, x14 // mov x14, v29.d[1] // adcs x6, x6, x14 // umulh x14, x10, x5 // adcs x19, x19, x14 // umulh x14, x17, x20 // adc x14, x14, xzr // adds x11, x9, x8 // adcs x9, x6, x9 // adcs x6, x19, x6 // adcs x19, x14, x19 // adc x14, xzr, x14 // adds x3, x9, x8 // adcs x24, x6, x11 // adcs x9, x19, x9 // adcs x6, x14, x6 // adcs x19, xzr, x19 // adc x14, xzr, x14 // subs x4, x10, x17 // cneg x4, x4, cc // csetm x7, cc // subs x23, x20, x5 // cneg x23, x23, cc // mul x22, x4, x23 // umulh x4, x4, x23 // cinv x7, x7, cc // cmn x7, #0x1 // eor x23, x22, x7 // adcs x6, x6, x23 // eor x4, x4, x7 // adcs x19, x19, x4 // adc x14, x14, x7 // subs x4, x15, x21 // cneg x4, x4, cc // csetm x7, cc // subs x23, x16, x13 // cneg x23, x23, cc // mul x22, x4, x23 // umulh x4, x4, x23 // cinv x7, x7, cc // cmn x7, #0x1 // eor x23, x22, x7 // adcs x11, x11, x23 // eor x4, x4, x7 // adcs x3, x3, x4 // adcs x24, x24, x7 // adcs x9, x9, x7 // adcs x6, x6, x7 // adcs x19, x19, x7 // adc x14, x14, x7 // subs x4, x21, x17 // cneg x4, x4, cc // csetm x7, cc // subs x23, x20, x16 // cneg x23, x23, cc // mul x22, x4, x23 // umulh x4, x4, x23 // cinv x7, x7, cc // cmn x7, #0x1 // eor x23, x22, x7 // adcs x9, x9, x23 // eor x4, x4, x7 // adcs x6, x6, x4 // adcs x19, x19, x7 // adc x14, x14, x7 // subs x4, x15, x10 // cneg x4, x4, cc // csetm x7, cc // subs x23, x5, x13 // cneg x23, x23, cc // mul x22, x4, x23 // umulh x4, x4, x23 // cinv x7, x7, cc // cmn x7, #0x1 // eor x23, x22, x7 // adcs x3, x3, x23 // eor x4, x4, x7 // adcs x24, x24, x4 // adcs x9, x9, x7 // adcs x6, x6, x7 // adcs x19, x19, x7 // adc x14, x14, x7 // subs x17, x15, x17 // cneg x17, x17, cc // csetm x4, cc // subs x13, x20, x13 // cneg x13, x13, cc // mul x20, x17, x13 // umulh x17, x17, x13 // cinv x13, x4, cc // cmn x13, #0x1 // eor x20, x20, x13 // adcs x20, x24, x20 // eor x17, x17, x13 // adcs x17, x9, x17 // adcs x9, x6, x13 // adcs x6, x19, x13 // adc x13, x14, x13 // subs x21, x21, x10 // cneg x21, x21, cc // csetm x10, cc // subs x16, x5, x16 // cneg x16, x16, cc // mul x5, x21, x16 // umulh x21, x21, x16 // cinv x10, x10, cc // cmn x10, #0x1 // eor x16, x5, x10 // adcs x16, x20, x16 // eor x21, x21, x10 // adcs x21, x17, x21 // adcs x17, x9, x10 // adcs x5, x6, x10 // adc x10, x13, x10 // lsl x13, x8, #9 // extr x20, x11, x8, #55 // extr x8, x3, x11, #55 // extr x9, x16, x3, #55 // lsr x16, x16, #55 // stp x21, x17, [sp] // @slothy:writes=stack0 // stp x5, x10, [sp, #16] // @slothy:writes=stack16 // stp x13, x20, [sp, #32] // @slothy:writes=stack32 // stp x8, x9, [sp, #48] // @slothy:writes=stack48 // str x16, [sp, #64] // @slothy:writes=stack64 // ldp x21, x10, [x1, #32] // ldp x17, x13, [x1, #48] // ldp x16, x5, [x2, #32] // ldr q18, [x1, #32] // ldr q28, [x2, #32] // ldp x20, x8, [x2, #48] // movi v16.2D, #0x00000000ffffffff // uzp2 v7.4S, v28.4S, v28.4S // xtn v4.2S, v18.2D // xtn v1.2S, v28.2D // rev64 v28.4S, v28.4S // umull v27.2D, v4.2S, v1.2S // umull v29.2D, v4.2S, v7.2S // uzp2 v21.4S, v18.4S, v18.4S // mul v28.4S, v28.4S, v18.4S // usra v29.2D, v27.2D, #32 // umull v18.2D, v21.2S, v7.2S // uaddlp v28.2D, v28.4S // and v16.16B, v29.16B, v16.16B // umlal v16.2D, v21.2S, v1.2S // shl v28.2D, v28.2D, #32 // usra v18.2D, v29.2D, #32 // umlal v28.2D, v4.2S, v1.2S // usra v18.2D, v16.2D, #32 // mov x9, v28.d[0] // mov x6, v28.d[1] // mul x19, x17, x20 // mul x14, x13, x8 // mov x11, v18.d[0] // adds x6, x6, x11 // mov x11, v18.d[1] // adcs x19, x19, x11 // umulh x11, x17, x20 // adcs x14, x14, x11 // umulh x11, x13, x8 // adc x11, x11, xzr // adds x3, x6, x9 // adcs x6, x19, x6 // adcs x19, x14, x19 // adcs x14, x11, x14 // adc x11, xzr, x11 // adds x24, x6, x9 // adcs x4, x19, x3 // adcs x6, x14, x6 // adcs x19, x11, x19 // adcs x14, xzr, x14 // adc x11, xzr, x11 // subs x7, x17, x13 // cneg x7, x7, cc // csetm x23, cc // subs x22, x8, x20 // cneg x22, x22, cc // mul x12, x7, x22 // umulh x7, x7, x22 // cinv x23, x23, cc // cmn x23, #0x1 // eor x22, x12, x23 // adcs x19, x19, x22 // eor x7, x7, x23 // adcs x14, x14, x7 // adc x11, x11, x23 // subs x7, x21, x10 // cneg x7, x7, cc // csetm x23, cc // subs x22, x5, x16 // cneg x22, x22, cc // mul x12, x7, x22 // umulh x7, x7, x22 // cinv x23, x23, cc // cmn x23, #0x1 // eor x22, x12, x23 // adcs x3, x3, x22 // eor x7, x7, x23 // adcs x24, x24, x7 // adcs x4, x4, x23 // adcs x6, x6, x23 // adcs x19, x19, x23 // adcs x14, x14, x23 // adc x11, x11, x23 // subs x7, x10, x13 // cneg x7, x7, cc // csetm x23, cc // subs x22, x8, x5 // cneg x22, x22, cc // mul x12, x7, x22 // umulh x7, x7, x22 // cinv x23, x23, cc // cmn x23, #0x1 // eor x22, x12, x23 // adcs x6, x6, x22 // eor x7, x7, x23 // adcs x19, x19, x7 // adcs x14, x14, x23 // adc x11, x11, x23 // subs x7, x21, x17 // cneg x7, x7, cc // csetm x23, cc // subs x22, x20, x16 // cneg x22, x22, cc // mul x12, x7, x22 // umulh x7, x7, x22 // cinv x23, x23, cc // cmn x23, #0x1 // eor x22, x12, x23 // adcs x24, x24, x22 // eor x7, x7, x23 // adcs x4, x4, x7 // adcs x6, x6, x23 // adcs x19, x19, x23 // adcs x14, x14, x23 // adc x11, x11, x23 // subs x7, x21, x13 // cneg x7, x7, cc // csetm x23, cc // subs x22, x8, x16 // cneg x22, x22, cc // mul x12, x7, x22 // umulh x7, x7, x22 // cinv x23, x23, cc // cmn x23, #0x1 // eor x22, x12, x23 // adcs x4, x4, x22 // eor x7, x7, x23 // adcs x6, x6, x7 // adcs x19, x19, x23 // adcs x14, x14, x23 // adc x11, x11, x23 // subs x7, x10, x17 // cneg x7, x7, cc // csetm x23, cc // subs x22, x20, x5 // cneg x22, x22, cc // mul x12, x7, x22 // umulh x7, x7, x22 // cinv x23, x23, cc // cmn x23, #0x1 // eor x22, x12, x23 // adcs x4, x4, x22 // eor x7, x7, x23 // adcs x6, x6, x7 // adcs x19, x19, x23 // adcs x14, x14, x23 // adc x11, x11, x23 // ldp x7, x23, [sp] // @slothy:reads=stack0 // adds x9, x9, x7 // adcs x3, x3, x23 // stp x9, x3, [sp] // @slothy:writes=stack0 // ldp x9, x3, [sp, #16] // @slothy:reads=stack16 // adcs x9, x24, x9 // adcs x3, x4, x3 // stp x9, x3, [sp, #16] // @slothy:writes=stack16 // ldp x9, x3, [sp, #32] // @slothy:reads=stack32 // adcs x9, x6, x9 // adcs x6, x19, x3 // stp x9, x6, [sp, #32] // @slothy:writes=stack32 // ldp x9, x6, [sp, #48] // @slothy:reads=stack48 // adcs x9, x14, x9 // adcs x6, x11, x6 // stp x9, x6, [sp, #48] // @slothy:writes=stack48 // ldr x9, [sp, #64] // @slothy:reads=stack64 // adc x9, x9, xzr // str x9, [sp, #64] // @slothy:writes=stack64 // ldp x9, x6, [x1] // subs x21, x21, x9 // sbcs x10, x10, x6 // ldp x9, x6, [x1, #16] // sbcs x17, x17, x9 // sbcs x13, x13, x6 // csetm x9, cc // ldp x6, x19, [x2] // subs x16, x6, x16 // sbcs x5, x19, x5 // ldp x6, x19, [x2, #16] // sbcs x20, x6, x20 // sbcs x8, x19, x8 // csetm x6, cc // eor x21, x21, x9 // subs x21, x21, x9 // eor x10, x10, x9 // sbcs x10, x10, x9 // eor x17, x17, x9 // sbcs x17, x17, x9 // eor x13, x13, x9 // sbc x13, x13, x9 // eor x16, x16, x6 // subs x16, x16, x6 // eor x5, x5, x6 // sbcs x5, x5, x6 // eor x20, x20, x6 // sbcs x20, x20, x6 // eor x8, x8, x6 // sbc x8, x8, x6 // eor x9, x6, x9 // mul x6, x21, x16 // mul x19, x10, x5 // mul x14, x17, x20 // mul x11, x13, x8 // umulh x3, x21, x16 // adds x19, x19, x3 // umulh x3, x10, x5 // adcs x14, x14, x3 // umulh x3, x17, x20 // adcs x11, x11, x3 // umulh x3, x13, x8 // adc x3, x3, xzr // adds x24, x19, x6 // adcs x19, x14, x19 // adcs x14, x11, x14 // adcs x11, x3, x11 // adc x3, xzr, x3 // adds x4, x19, x6 // adcs x7, x14, x24 // adcs x19, x11, x19 // adcs x14, x3, x14 // adcs x11, xzr, x11 // adc x3, xzr, x3 // subs x23, x17, x13 // cneg x23, x23, cc // csetm x22, cc // subs x12, x8, x20 // cneg x12, x12, cc // mul x15, x23, x12 // umulh x23, x23, x12 // cinv x22, x22, cc // cmn x22, #0x1 // eor x12, x15, x22 // adcs x14, x14, x12 // eor x23, x23, x22 // adcs x11, x11, x23 // adc x3, x3, x22 // subs x23, x21, x10 // cneg x23, x23, cc // csetm x22, cc // subs x12, x5, x16 // cneg x12, x12, cc // mul x15, x23, x12 // umulh x23, x23, x12 // cinv x22, x22, cc // cmn x22, #0x1 // eor x12, x15, x22 // adcs x24, x24, x12 // eor x23, x23, x22 // adcs x4, x4, x23 // adcs x7, x7, x22 // adcs x19, x19, x22 // adcs x14, x14, x22 // adcs x11, x11, x22 // adc x3, x3, x22 // subs x23, x10, x13 // cneg x23, x23, cc // csetm x22, cc // subs x12, x8, x5 // cneg x12, x12, cc // mul x15, x23, x12 // umulh x23, x23, x12 // cinv x22, x22, cc // cmn x22, #0x1 // eor x12, x15, x22 // adcs x19, x19, x12 // eor x23, x23, x22 // adcs x14, x14, x23 // adcs x11, x11, x22 // adc x3, x3, x22 // subs x23, x21, x17 // cneg x23, x23, cc // csetm x22, cc // subs x12, x20, x16 // cneg x12, x12, cc // mul x15, x23, x12 // umulh x23, x23, x12 // cinv x22, x22, cc // cmn x22, #0x1 // eor x12, x15, x22 // adcs x4, x4, x12 // eor x23, x23, x22 // adcs x7, x7, x23 // adcs x19, x19, x22 // adcs x14, x14, x22 // adcs x11, x11, x22 // adc x3, x3, x22 // subs x21, x21, x13 // cneg x21, x21, cc // csetm x13, cc // subs x16, x8, x16 // cneg x16, x16, cc // mul x8, x21, x16 // umulh x21, x21, x16 // cinv x13, x13, cc // cmn x13, #0x1 // eor x16, x8, x13 // adcs x16, x7, x16 // eor x21, x21, x13 // adcs x21, x19, x21 // adcs x8, x14, x13 // adcs x19, x11, x13 // adc x13, x3, x13 // subs x10, x10, x17 // cneg x10, x10, cc // csetm x17, cc // subs x5, x20, x5 // cneg x5, x5, cc // mul x20, x10, x5 // umulh x10, x10, x5 // cinv x17, x17, cc // cmn x17, #0x1 // eor x5, x20, x17 // adcs x16, x16, x5 // eor x10, x10, x17 // adcs x21, x21, x10 // adcs x10, x8, x17 // adcs x5, x19, x17 // adc x17, x13, x17 // ldp x13, x20, [sp] // @slothy:reads=stack0 // ldp x8, x19, [sp, #16] // @slothy:reads=stack16 // eor x6, x6, x9 // adds x6, x6, x13 // eor x14, x24, x9 // adcs x14, x14, x20 // eor x11, x4, x9 // adcs x11, x11, x8 // eor x16, x16, x9 // adcs x16, x16, x19 // eor x21, x21, x9 // ldp x3, x24, [sp, #32] // @slothy:reads=stack32 // ldp x4, x7, [sp, #48] // @slothy:reads=stack48 // ldr x23, [sp, #64] // @slothy:reads=stack64 // adcs x21, x21, x3 // eor x10, x10, x9 // adcs x10, x10, x24 // eor x5, x5, x9 // adcs x5, x5, x4 // eor x17, x17, x9 // adcs x17, x17, x7 // adc x22, x23, xzr // adds x21, x21, x13 // adcs x10, x10, x20 // adcs x13, x5, x8 // adcs x17, x17, x19 // and x5, x9, #0x1ff // lsl x20, x6, #9 // orr x5, x20, x5 // adcs x5, x3, x5 // extr x20, x14, x6, #55 // adcs x20, x24, x20 // extr x8, x11, x14, #55 // adcs x8, x4, x8 // extr x9, x16, x11, #55 // adcs x9, x7, x9 // lsr x16, x16, #55 // adc x16, x16, x23 // ldr x6, [x2, #64] // ldp x19, x14, [x1] // and x11, x19, #0xfffffffffffff // mul x11, x6, x11 // ldr x3, [x1, #64] // ldp x24, x4, [x2] // and x7, x24, #0xfffffffffffff // mul x7, x3, x7 // add x11, x11, x7 // extr x19, x14, x19, #52 // and x19, x19, #0xfffffffffffff // mul x19, x6, x19 // extr x24, x4, x24, #52 // and x24, x24, #0xfffffffffffff // mul x24, x3, x24 // add x19, x19, x24 // lsr x24, x11, #52 // add x19, x19, x24 // lsl x11, x11, #12 // extr x11, x19, x11, #12 // adds x21, x21, x11 // ldp x11, x24, [x1, #16] // ldp x7, x23, [x2, #16] // extr x14, x11, x14, #40 // and x14, x14, #0xfffffffffffff // mul x14, x6, x14 // extr x4, x7, x4, #40 // and x4, x4, #0xfffffffffffff // mul x4, x3, x4 // add x14, x14, x4 // lsr x4, x19, #52 // add x14, x14, x4 // lsl x19, x19, #12 // extr x19, x14, x19, #24 // adcs x10, x10, x19 // extr x19, x24, x11, #28 // and x19, x19, #0xfffffffffffff // mul x19, x6, x19 // extr x11, x23, x7, #28 // and x11, x11, #0xfffffffffffff // mul x11, x3, x11 // add x19, x19, x11 // lsr x11, x14, #52 // add x19, x19, x11 // lsl x14, x14, #12 // extr x14, x19, x14, #36 // adcs x13, x13, x14 // and x14, x10, x13 // ldp x11, x4, [x1, #32] // ldp x7, x12, [x2, #32] // extr x24, x11, x24, #16 // and x24, x24, #0xfffffffffffff // mul x24, x6, x24 // extr x23, x7, x23, #16 // and x23, x23, #0xfffffffffffff // mul x23, x3, x23 // add x24, x24, x23 // lsl x23, x22, #48 // add x24, x24, x23 // lsr x23, x19, #52 // add x24, x24, x23 // lsl x19, x19, #12 // extr x19, x24, x19, #48 // adcs x17, x17, x19 // and x19, x14, x17 // lsr x14, x11, #4 // and x14, x14, #0xfffffffffffff // mul x14, x6, x14 // lsr x23, x7, #4 // and x23, x23, #0xfffffffffffff // mul x23, x3, x23 // add x14, x14, x23 // lsr x23, x24, #52 // add x14, x14, x23 // lsl x24, x24, #12 // extr x24, x14, x24, #60 // extr x11, x4, x11, #56 // and x11, x11, #0xfffffffffffff // mul x11, x6, x11 // extr x7, x12, x7, #56 // and x7, x7, #0xfffffffffffff // mul x7, x3, x7 // add x11, x11, x7 // lsr x14, x14, #52 // add x14, x11, x14 // lsl x11, x24, #8 // extr x11, x14, x11, #8 // adcs x5, x5, x11 // and x19, x19, x5 // ldp x11, x24, [x1, #48] // ldp x2, x7, [x2, #48] // extr x4, x11, x4, #44 // and x4, x4, #0xfffffffffffff // mul x4, x6, x4 // extr x23, x2, x12, #44 // and x23, x23, #0xfffffffffffff // mul x23, x3, x23 // add x4, x4, x23 // lsr x23, x14, #52 // add x4, x4, x23 // lsl x14, x14, #12 // extr x14, x4, x14, #20 // adcs x20, x20, x14 // and x19, x19, x20 // extr x14, x24, x11, #32 // and x14, x14, #0xfffffffffffff // mul x14, x6, x14 // extr x2, x7, x2, #32 // and x2, x2, #0xfffffffffffff // mul x2, x3, x2 // add x2, x14, x2 // lsr x14, x4, #52 // add x2, x2, x14 // lsl x14, x4, #12 // extr x14, x2, x14, #32 // adcs x8, x8, x14 // and x19, x19, x8 // lsr x14, x24, #20 // mul x14, x6, x14 // lsr x11, x7, #20 // mul x11, x3, x11 // add x14, x14, x11 // lsr x11, x2, #52 // add x14, x14, x11 // lsl x2, x2, #12 // extr x2, x14, x2, #44 // adcs x9, x9, x2 // and x2, x19, x9 // mul x6, x6, x3 // lsr x19, x14, #44 // add x6, x6, x19 // adc x16, x16, x6 // lsr x6, x16, #9 // orr x16, x16, #0xfffffffffffffe00 // cmp xzr, xzr // adcs xzr, x21, x6 // adcs xzr, x2, xzr // adcs xzr, x16, xzr // adcs x21, x21, x6 // adcs x10, x10, xzr // adcs x13, x13, xzr // adcs x17, x17, xzr // adcs x5, x5, xzr // adcs x20, x20, xzr // adcs x8, x8, xzr // adcs x9, x9, xzr // adc x16, x16, xzr // and x2, x21, #0x1ff // extr x21, x10, x21, #9 // extr x10, x13, x10, #9 // stp x21, x10, [x0] // @slothy:writes=buffer0 // extr x21, x17, x13, #9 // extr x10, x5, x17, #9 // stp x21, x10, [x0, #16] // @slothy:writes=buffer16 // extr x21, x20, x5, #9 // extr x10, x8, x20, #9 // stp x21, x10, [x0, #32] // @slothy:writes=buffer32 // extr x21, x9, x8, #9 // extr x10, x16, x9, #9 // stp x21, x10, [x0, #48] // @slothy:writes=buffer48 // str x2, [x0, #64] // @slothy:writes=buffer64 // add sp, sp, #80 // ldp x25, x26, [sp], #16 // ldp x23, x24, [sp], #16 // ldp x21, x22, [sp], #16 // ldp x19, x20, [sp], #16 // ret // // The bash script used for step 2 is as follows: // // # Store the assembly instructions except the last 'ret', // # callee-register store/loads and add/sub sp #80 as, say, 'input.S'. // export OUTPUTS="[hint_buffer0,hint_buffer16,hint_buffer32,hint_buffer48,hint_buffer64]" // export RESERVED_REGS="[x18,x27,x28,x29,x30,sp,q8,q9,q10,q11,q12,q13,q14,q15,v8,v9,v10,v11,v12,v13,v14,v15]" // <s2n-bignum>/tools/external/slothy.sh input.S my_out_dir // # my_out_dir/3.opt.s is the optimized assembly. Its output may differ // # from this file since the sequence is non-deterministically chosen. // # Please add 'ret' at the end of the output assembly. S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_p521) .text .balign 4 S2N_BN_SYMBOL(bignum_mul_p521): CFI_START // Save registers and make space for the temporary buffer CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_DEC_SP(80) ldr q6, [x2] ldp x10, x17, [x1, #16] ldr q4, [x1] ldr q16, [x2, #32] ldp x5, x20, [x2, #16] ldr q2, [x1, #32] movi v31.2D, #0x00000000ffffffff uzp2 v17.4S, v6.4S, v6.4S rev64 v7.4S, v6.4S ldp x15, x21, [x1] xtn v25.2S, v6.2D xtn v22.2S, v4.2D subs x14, x10, x17 mul v7.4S, v7.4S, v4.4S csetm x8, cc rev64 v3.4S, v16.4S xtn v1.2S, v16.2D ldp x13, x16, [x2] mul x26, x10, x5 uzp2 v16.4S, v16.4S, v16.4S uaddlp v26.2D, v7.4S cneg x4, x14, cc subs x24, x15, x21 xtn v5.2S, v2.2D mul v28.4S, v3.4S, v2.4S shl v26.2D, v26.2D, #32 mul x22, x17, x20 umull v20.2D, v22.2S, v25.2S uzp2 v6.4S, v4.4S, v4.4S umull v18.2D, v22.2S, v17.2S uzp2 v4.4S, v2.4S, v2.4S cneg x14, x24, cc csetm x7, cc umulh x11, x17, x20 usra v18.2D, v20.2D, #32 uaddlp v7.2D, v28.4S subs x19, x16, x13 umlal v26.2D, v22.2S, v25.2S cneg x19, x19, cc shl v28.2D, v7.2D, #32 umull v7.2D, v5.2S, v1.2S umull v30.2D, v5.2S, v16.2S cinv x6, x7, cc mul x25, x14, x19 umlal v28.2D, v5.2S, v1.2S umull v21.2D, v6.2S, v17.2S umulh x14, x14, x19 usra v30.2D, v7.2D, #32 subs x9, x20, x5 and v29.16B, v18.16B, v31.16B cinv x23, x8, cc mov x8, v26.d[1] cneg x12, x9, cc usra v21.2D, v18.2D, #32 umlal v29.2D, v6.2S, v25.2S mul x24, x4, x12 umull v18.2D, v4.2S, v16.2S movi v25.2D, #0x00000000ffffffff eor x9, x14, x6 and v7.16B, v30.16B, v25.16B usra v21.2D, v29.2D, #32 umulh x7, x10, x5 usra v18.2D, v30.2D, #32 umlal v7.2D, v4.2S, v1.2S mov x19, v21.d[0] umulh x3, x4, x12 mov x14, v21.d[1] usra v18.2D, v7.2D, #32 adds x4, x8, x19 mov x8, v26.d[0] adcs x19, x26, x14 adcs x14, x22, x7 adc x12, x11, xzr adds x11, x4, x8 adcs x26, x19, x4 adcs x22, x14, x19 eor x4, x24, x23 adcs x14, x12, x14 eor x7, x25, x6 adc x25, xzr, x12 eor x19, x3, x23 adds x3, x26, x8 adcs x24, x22, x11 adcs x12, x14, x26 adcs x22, x25, x22 adcs x26, xzr, x14 adc x14, xzr, x25 cmn x23, #0x1 adcs x22, x22, x4 adcs x19, x26, x19 adc x25, x14, x23 subs x14, x21, x17 cneg x23, x14, cc csetm x26, cc subs x4, x20, x16 cneg x14, x4, cc cinv x4, x26, cc cmn x6, #0x1 adcs x11, x11, x7 mul x7, x23, x14 adcs x9, x3, x9 adcs x26, x24, x6 umulh x3, x23, x14 adcs x14, x12, x6 adcs x22, x22, x6 adcs x12, x19, x6 extr x24, x11, x8, #55 adc x6, x25, x6 subs x19, x15, x17 csetm x17, cc cneg x23, x19, cc subs x19, x20, x13 lsl x25, x8, #9 eor x8, x7, x4 cneg x20, x19, cc umulh x7, x23, x20 cinv x19, x17, cc subs x17, x15, x10 csetm x15, cc stp x25, x24, [sp, #32] cneg x24, x17, cc mul x20, x23, x20 subs x25, x5, x13 cneg x13, x25, cc cinv x15, x15, cc mul x25, x24, x13 subs x21, x21, x10 csetm x23, cc cneg x17, x21, cc subs x21, x5, x16 umulh x13, x24, x13 cinv x10, x23, cc cneg x23, x21, cc cmn x4, #0x1 adcs x14, x14, x8 eor x21, x3, x4 adcs x21, x22, x21 eor x5, x20, x19 adcs x24, x12, x4 mul x12, x17, x23 eor x8, x25, x15 adc x25, x6, x4 cmn x15, #0x1 adcs x6, x9, x8 ldp x20, x8, [x2, #48] eor x9, x13, x15 adcs x4, x26, x9 umulh x26, x17, x23 ldp x17, x13, [x1, #48] adcs x9, x14, x15 adcs x16, x21, x15 adcs x14, x24, x15 eor x21, x7, x19 mul x23, x17, x20 adc x24, x25, x15 cmn x19, #0x1 adcs x7, x4, x5 adcs x9, x9, x21 umulh x3, x13, x8 adcs x16, x16, x19 adcs x22, x14, x19 eor x5, x12, x10 adc x12, x24, x19 cmn x10, #0x1 adcs x19, x7, x5 eor x14, x26, x10 mov x7, v28.d[1] adcs x24, x9, x14 extr x4, x19, x6, #55 umulh x15, x17, x20 mov x14, v18.d[1] lsr x9, x19, #55 adcs x5, x16, x10 mov x16, v18.d[0] adcs x19, x22, x10 str x9, [sp, #64] extr x25, x6, x11, #55 adc x21, x12, x10 subs x26, x17, x13 stp x25, x4, [sp, #48] stp x19, x21, [sp, #16] csetm x6, cc cneg x4, x26, cc mul x19, x13, x8 subs x11, x8, x20 stp x24, x5, [sp] ldp x21, x10, [x1, #32] cinv x12, x6, cc cneg x6, x11, cc mov x9, v28.d[0] umulh x25, x4, x6 adds x22, x7, x16 ldp x16, x5, [x2, #32] adcs x14, x23, x14 adcs x11, x19, x15 adc x24, x3, xzr adds x3, x22, x9 adcs x15, x14, x22 mul x22, x4, x6 adcs x6, x11, x14 adcs x4, x24, x11 eor x14, x25, x12 adc x26, xzr, x24 subs x7, x21, x10 csetm x23, cc cneg x19, x7, cc subs x24, x5, x16 cneg x11, x24, cc cinv x7, x23, cc adds x25, x15, x9 eor x23, x22, x12 adcs x22, x6, x3 mul x24, x19, x11 adcs x15, x4, x15 adcs x6, x26, x6 umulh x19, x19, x11 adcs x11, xzr, x4 adc x26, xzr, x26 cmn x12, #0x1 adcs x4, x6, x23 eor x6, x24, x7 adcs x14, x11, x14 adc x26, x26, x12 subs x11, x10, x13 cneg x12, x11, cc csetm x11, cc eor x19, x19, x7 subs x24, x8, x5 cinv x11, x11, cc cneg x24, x24, cc cmn x7, #0x1 adcs x3, x3, x6 mul x23, x12, x24 adcs x25, x25, x19 adcs x6, x22, x7 umulh x19, x12, x24 adcs x22, x15, x7 adcs x12, x4, x7 eor x24, x23, x11 adcs x4, x14, x7 adc x26, x26, x7 eor x19, x19, x11 subs x14, x21, x17 cneg x7, x14, cc csetm x14, cc subs x23, x20, x16 cinv x14, x14, cc cneg x23, x23, cc cmn x11, #0x1 adcs x22, x22, x24 mul x24, x7, x23 adcs x15, x12, x19 adcs x4, x4, x11 adc x19, x26, x11 umulh x26, x7, x23 subs x7, x21, x13 eor x11, x24, x14 cneg x23, x7, cc csetm x12, cc subs x7, x8, x16 cneg x7, x7, cc cinv x12, x12, cc cmn x14, #0x1 eor x26, x26, x14 adcs x11, x25, x11 mul x25, x23, x7 adcs x26, x6, x26 adcs x6, x22, x14 adcs x24, x15, x14 umulh x23, x23, x7 adcs x4, x4, x14 adc x22, x19, x14 eor x14, x25, x12 eor x7, x23, x12 cmn x12, #0x1 adcs x14, x26, x14 ldp x19, x25, [x2] ldp x15, x23, [x2, #16] adcs x26, x6, x7 adcs x24, x24, x12 adcs x7, x4, x12 adc x4, x22, x12 subs x19, x19, x16 ldp x16, x22, [x1] sbcs x6, x25, x5 ldp x12, x25, [x1, #16] sbcs x15, x15, x20 sbcs x8, x23, x8 csetm x23, cc subs x21, x21, x16 eor x16, x19, x23 sbcs x19, x10, x22 eor x22, x6, x23 eor x8, x8, x23 sbcs x6, x17, x12 sbcs x13, x13, x25 csetm x12, cc subs x10, x10, x17 cneg x17, x10, cc csetm x25, cc subs x5, x20, x5 eor x10, x19, x12 cneg x19, x5, cc eor x20, x15, x23 eor x21, x21, x12 cinv x15, x25, cc mul x25, x17, x19 subs x16, x16, x23 sbcs x5, x22, x23 eor x6, x6, x12 sbcs x20, x20, x23 eor x22, x13, x12 sbc x8, x8, x23 subs x21, x21, x12 umulh x19, x17, x19 sbcs x10, x10, x12 sbcs x17, x6, x12 eor x6, x19, x15 eor x19, x25, x15 umulh x25, x17, x20 sbc x13, x22, x12 cmn x15, #0x1 adcs x22, x14, x19 adcs x19, x26, x6 ldp x6, x26, [sp] adcs x14, x24, x15 umulh x24, x21, x16 adcs x7, x7, x15 adc x15, x4, x15 adds x4, x9, x6 eor x9, x23, x12 adcs x12, x3, x26 stp x4, x12, [sp] ldp x4, x26, [sp, #16] umulh x12, x10, x5 ldp x6, x23, [sp, #32] adcs x3, x11, x4 mul x4, x13, x8 adcs x26, x22, x26 ldp x22, x11, [sp, #48] adcs x6, x19, x6 stp x3, x26, [sp, #16] mul x26, x10, x5 adcs x14, x14, x23 stp x6, x14, [sp, #32] ldr x6, [sp, #64] adcs x22, x7, x22 adcs x14, x15, x11 mul x11, x17, x20 adc x19, x6, xzr stp x22, x14, [sp, #48] adds x14, x26, x24 str x19, [sp, #64] umulh x19, x13, x8 adcs x7, x11, x12 adcs x22, x4, x25 mul x6, x21, x16 adc x19, x19, xzr subs x11, x17, x13 cneg x12, x11, cc csetm x11, cc subs x24, x8, x20 cinv x11, x11, cc cneg x24, x24, cc adds x4, x14, x6 adcs x14, x7, x14 mul x3, x12, x24 adcs x7, x22, x7 adcs x22, x19, x22 umulh x12, x12, x24 adc x24, xzr, x19 adds x19, x14, x6 eor x3, x3, x11 adcs x26, x7, x4 adcs x14, x22, x14 adcs x25, x24, x7 adcs x23, xzr, x22 eor x7, x12, x11 adc x12, xzr, x24 subs x22, x21, x10 cneg x24, x22, cc csetm x22, cc subs x15, x5, x16 cinv x22, x22, cc cneg x15, x15, cc cmn x11, #0x1 adcs x3, x25, x3 mul x25, x24, x15 adcs x23, x23, x7 adc x11, x12, x11 subs x7, x10, x13 umulh x15, x24, x15 cneg x12, x7, cc csetm x7, cc eor x24, x25, x22 eor x25, x15, x22 cmn x22, #0x1 adcs x24, x4, x24 adcs x19, x19, x25 adcs x15, x26, x22 adcs x4, x14, x22 adcs x26, x3, x22 adcs x25, x23, x22 adc x23, x11, x22 subs x14, x21, x17 cneg x3, x14, cc csetm x11, cc subs x14, x8, x5 cneg x14, x14, cc cinv x7, x7, cc subs x13, x21, x13 cneg x21, x13, cc csetm x13, cc mul x22, x12, x14 subs x8, x8, x16 cinv x13, x13, cc umulh x14, x12, x14 cneg x12, x8, cc subs x8, x20, x16 cneg x8, x8, cc cinv x16, x11, cc eor x22, x22, x7 cmn x7, #0x1 eor x14, x14, x7 adcs x4, x4, x22 mul x11, x3, x8 adcs x22, x26, x14 adcs x14, x25, x7 eor x25, x24, x9 adc x26, x23, x7 umulh x7, x3, x8 subs x17, x10, x17 cneg x24, x17, cc eor x3, x11, x16 csetm x11, cc subs x20, x20, x5 cneg x5, x20, cc cinv x11, x11, cc cmn x16, #0x1 mul x17, x21, x12 eor x8, x7, x16 adcs x10, x19, x3 and x19, x9, #0x1ff adcs x20, x15, x8 umulh x15, x21, x12 eor x12, x10, x9 eor x8, x6, x9 adcs x6, x4, x16 adcs x4, x22, x16 adcs x21, x14, x16 adc x7, x26, x16 mul x10, x24, x5 cmn x13, #0x1 ldp x3, x14, [x1] eor x17, x17, x13 umulh x5, x24, x5 adcs x20, x20, x17 eor x17, x15, x13 adcs x16, x6, x17 eor x22, x10, x11 adcs x23, x4, x13 extr x10, x14, x3, #52 and x26, x3, #0xfffffffffffff adcs x24, x21, x13 and x15, x10, #0xfffffffffffff adc x6, x7, x13 cmn x11, #0x1 adcs x17, x20, x22 eor x4, x5, x11 ldp x21, x10, [sp] adcs x7, x16, x4 eor x16, x17, x9 eor x13, x7, x9 ldp x3, x17, [sp, #16] adcs x7, x23, x11 eor x23, x7, x9 ldp x5, x22, [sp, #32] adcs x7, x24, x11 adc x24, x6, x11 ldr x6, [x2, #64] adds x20, x8, x21 lsl x11, x20, #9 eor x4, x7, x9 orr x7, x11, x19 eor x8, x24, x9 adcs x11, x25, x10 mul x26, x6, x26 ldp x19, x24, [sp, #48] adcs x12, x12, x3 adcs x16, x16, x17 adcs x9, x13, x5 ldr x25, [sp, #64] extr x20, x11, x20, #55 adcs x13, x23, x22 adcs x4, x4, x19 extr x23, x12, x11, #55 adcs x8, x8, x24 adc x11, x25, xzr adds x21, x9, x21 extr x9, x16, x12, #55 lsr x12, x16, #55 adcs x10, x13, x10 mul x15, x6, x15 adcs x13, x4, x3 ldp x16, x4, [x2] ldr x3, [x1, #64] adcs x17, x8, x17 adcs x5, x5, x7 adcs x20, x22, x20 adcs x8, x19, x23 and x22, x16, #0xfffffffffffff ldp x19, x7, [x1, #16] adcs x9, x24, x9 extr x24, x4, x16, #52 adc x16, x12, x25 mul x22, x3, x22 and x25, x24, #0xfffffffffffff extr x14, x19, x14, #40 and x12, x14, #0xfffffffffffff extr x23, x7, x19, #28 ldp x19, x24, [x2, #16] mul x14, x3, x25 and x23, x23, #0xfffffffffffff add x22, x26, x22 lsl x11, x11, #48 lsr x26, x22, #52 lsl x25, x22, #12 mul x22, x6, x12 extr x12, x19, x4, #40 add x4, x15, x14 mul x15, x6, x23 add x4, x4, x26 extr x23, x24, x19, #28 ldp x14, x19, [x1, #32] and x26, x12, #0xfffffffffffff extr x12, x4, x25, #12 and x25, x23, #0xfffffffffffff adds x21, x21, x12 mul x12, x3, x26 extr x23, x14, x7, #16 and x23, x23, #0xfffffffffffff mul x7, x3, x25 ldp x25, x26, [x2, #32] add x12, x22, x12 extr x22, x19, x14, #56 mul x23, x6, x23 lsr x14, x14, #4 extr x24, x25, x24, #16 add x7, x15, x7 and x15, x24, #0xfffffffffffff and x22, x22, #0xfffffffffffff lsr x24, x4, #52 mul x15, x3, x15 and x14, x14, #0xfffffffffffff add x12, x12, x24 lsl x24, x4, #12 lsr x4, x12, #52 extr x24, x12, x24, #24 adcs x10, x10, x24 lsl x24, x12, #12 add x12, x7, x4 mul x22, x6, x22 add x4, x23, x15 extr x7, x12, x24, #36 adcs x13, x13, x7 lsl x15, x12, #12 add x7, x4, x11 lsr x24, x12, #52 ldp x23, x11, [x2, #48] add x4, x7, x24 mul x12, x6, x14 extr x7, x26, x25, #56 extr x14, x4, x15, #48 and x2, x7, #0xfffffffffffff extr x24, x11, x23, #32 ldp x15, x7, [x1, #48] and x1, x24, #0xfffffffffffff lsr x24, x4, #52 mul x2, x3, x2 extr x26, x23, x26, #44 lsr x23, x25, #4 and x23, x23, #0xfffffffffffff and x25, x26, #0xfffffffffffff extr x26, x7, x15, #32 extr x19, x15, x19, #44 mul x23, x3, x23 and x15, x26, #0xfffffffffffff lsl x26, x4, #12 and x4, x19, #0xfffffffffffff lsr x11, x11, #20 mul x19, x6, x4 adcs x17, x17, x14 add x14, x22, x2 add x22, x12, x23 lsr x7, x7, #20 add x22, x22, x24 extr x2, x22, x26, #60 mul x24, x3, x25 lsr x22, x22, #52 add x14, x14, x22 lsl x22, x2, #8 extr x22, x14, x22, #8 lsl x2, x14, #12 mul x1, x3, x1 adcs x12, x5, x22 mul x5, x6, x15 and x26, x10, x13 and x4, x26, x17 add x23, x19, x24 lsr x14, x14, #52 mul x22, x3, x11 add x11, x23, x14 extr x25, x11, x2, #20 lsl x19, x11, #12 adcs x25, x20, x25 and x14, x4, x12 add x1, x5, x1 and x14, x14, x25 mul x15, x6, x7 add x26, x15, x22 mul x6, x6, x3 lsr x22, x11, #52 add x4, x1, x22 lsr x1, x4, #52 extr x3, x4, x19, #32 lsl x15, x4, #12 add x7, x26, x1 adcs x23, x8, x3 extr x20, x7, x15, #44 and x3, x14, x23 lsr x19, x7, #44 adcs x7, x9, x20 add x11, x6, x19 adc x4, x16, x11 lsr x14, x4, #9 cmp xzr, xzr and x15, x3, x7 orr x3, x4, #0xfffffffffffffe00 adcs xzr, x21, x14 adcs xzr, x15, xzr adcs xzr, x3, xzr adcs x11, x21, x14 and x14, x11, #0x1ff adcs x1, x10, xzr extr x10, x1, x11, #9 str x14, [x0, #64] adcs x14, x13, xzr extr x11, x14, x1, #9 adcs x1, x17, xzr extr x4, x1, x14, #9 stp x10, x11, [x0] adcs x11, x12, xzr extr x14, x11, x1, #9 adcs x10, x25, xzr extr x11, x10, x11, #9 stp x4, x14, [x0, #16] adcs x14, x23, xzr extr x10, x14, x10, #9 adcs x1, x7, xzr stp x11, x10, [x0, #32] extr x14, x1, x14, #9 adc x10, x3, xzr extr x26, x10, x1, #9 stp x14, x26, [x0, #48] // Restore regs and return CFI_INC_SP(80) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mul_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
13,920
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_montmul_p521_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^576) mod p_521 // Inputs x[9], y[9]; output z[9] // // extern void bignum_montmul_p521_alt(uint64_t z[static 9], // const uint64_t x[static 9], // const uint64_t y[static 9]); // // Does z := (x * y / 2^576) mod p_521, assuming x < p_521, y < p_521. This // means the Montgomery base is the "native size" 2^{9*64} = 2^576; since // p_521 is a Mersenne prime the basic modular multiplication bignum_mul_p521 // can be considered a Montgomery operation to base 2^521. // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p521_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_p521_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p521_alt) .text .balign 4 #define z x0 #define x x1 #define y x2 // These are repeated mod 2 as we load paris of inputs #define a0 x3 #define a1 x4 #define a2 x3 #define a3 x4 #define a4 x3 #define a5 x4 #define a6 x3 #define a7 x4 #define a8 x3 #define b0 x5 #define b1 x6 #define b2 x7 #define b3 x8 #define b4 x9 #define b5 x10 #define b6 x11 #define b7 x12 #define b8 x13 #define t x14 // These repeat mod 11 as we stash some intermediate results in the // output buffer. #define u0 x15 #define u1 x16 #define u2 x17 #define u3 x19 #define u4 x20 #define u5 x21 #define u6 x22 #define u7 x23 #define u8 x24 #define u9 x25 #define u10 x26 #define u11 x15 #define u12 x16 #define u13 x17 #define u14 x19 #define u15 x20 #define u16 x21 S2N_BN_SYMBOL(bignum_montmul_p521_alt): CFI_START // Save more registers and make space for the temporary buffer CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_DEC_SP(64) // Load operands and set up row 0 = [u9;...;u0] = a0 * [b8;...;b0] ldp a0, a1, [x] ldp b0, b1, [y] mul u0, a0, b0 umulh u1, a0, b0 mul t, a0, b1 umulh u2, a0, b1 adds u1, u1, t ldp b2, b3, [y, #16] mul t, a0, b2 umulh u3, a0, b2 adcs u2, u2, t mul t, a0, b3 umulh u4, a0, b3 adcs u3, u3, t ldp b4, b5, [y, #32] mul t, a0, b4 umulh u5, a0, b4 adcs u4, u4, t mul t, a0, b5 umulh u6, a0, b5 adcs u5, u5, t ldp b6, b7, [y, #48] mul t, a0, b6 umulh u7, a0, b6 adcs u6, u6, t ldr b8, [y, #64] mul t, a0, b7 umulh u8, a0, b7 adcs u7, u7, t mul t, a0, b8 umulh u9, a0, b8 adcs u8, u8, t adc u9, u9, xzr // Row 1 = [u10;...;u0] = [a1;a0] * [b8;...;b0] mul t, a1, b0 adds u1, u1, t mul t, a1, b1 adcs u2, u2, t mul t, a1, b2 adcs u3, u3, t mul t, a1, b3 adcs u4, u4, t mul t, a1, b4 adcs u5, u5, t mul t, a1, b5 adcs u6, u6, t mul t, a1, b6 adcs u7, u7, t mul t, a1, b7 adcs u8, u8, t mul t, a1, b8 adcs u9, u9, t cset u10, cs umulh t, a1, b0 adds u2, u2, t umulh t, a1, b1 adcs u3, u3, t umulh t, a1, b2 adcs u4, u4, t umulh t, a1, b3 adcs u5, u5, t umulh t, a1, b4 adcs u6, u6, t umulh t, a1, b5 adcs u7, u7, t umulh t, a1, b6 adcs u8, u8, t umulh t, a1, b7 adcs u9, u9, t umulh t, a1, b8 adc u10, u10, t stp u0, u1, [sp] // Row 2 = [u11;...;u0] = [a2;a1;a0] * [b8;...;b0] ldp a2, a3, [x, #16] mul t, a2, b0 adds u2, u2, t mul t, a2, b1 adcs u3, u3, t mul t, a2, b2 adcs u4, u4, t mul t, a2, b3 adcs u5, u5, t mul t, a2, b4 adcs u6, u6, t mul t, a2, b5 adcs u7, u7, t mul t, a2, b6 adcs u8, u8, t mul t, a2, b7 adcs u9, u9, t mul t, a2, b8 adcs u10, u10, t cset u11, cs umulh t, a2, b0 adds u3, u3, t umulh t, a2, b1 adcs u4, u4, t umulh t, a2, b2 adcs u5, u5, t umulh t, a2, b3 adcs u6, u6, t umulh t, a2, b4 adcs u7, u7, t umulh t, a2, b5 adcs u8, u8, t umulh t, a2, b6 adcs u9, u9, t umulh t, a2, b7 adcs u10, u10, t umulh t, a2, b8 adc u11, u11, t // Row 3 = [u12;...;u0] = [a3;a2;a1;a0] * [b8;...;b0] mul t, a3, b0 adds u3, u3, t mul t, a3, b1 adcs u4, u4, t mul t, a3, b2 adcs u5, u5, t mul t, a3, b3 adcs u6, u6, t mul t, a3, b4 adcs u7, u7, t mul t, a3, b5 adcs u8, u8, t mul t, a3, b6 adcs u9, u9, t mul t, a3, b7 adcs u10, u10, t mul t, a3, b8 adcs u11, u11, t cset u12, cs umulh t, a3, b0 adds u4, u4, t umulh t, a3, b1 adcs u5, u5, t umulh t, a3, b2 adcs u6, u6, t umulh t, a3, b3 adcs u7, u7, t umulh t, a3, b4 adcs u8, u8, t umulh t, a3, b5 adcs u9, u9, t umulh t, a3, b6 adcs u10, u10, t umulh t, a3, b7 adcs u11, u11, t umulh t, a3, b8 adc u12, u12, t stp u2, u3, [sp, #16] // Row 4 = [u13;...;u0] = [a4;a3;a2;a1;a0] * [b8;...;b0] ldp a4, a5, [x, #32] mul t, a4, b0 adds u4, u4, t mul t, a4, b1 adcs u5, u5, t mul t, a4, b2 adcs u6, u6, t mul t, a4, b3 adcs u7, u7, t mul t, a4, b4 adcs u8, u8, t mul t, a4, b5 adcs u9, u9, t mul t, a4, b6 adcs u10, u10, t mul t, a4, b7 adcs u11, u11, t mul t, a4, b8 adcs u12, u12, t cset u13, cs umulh t, a4, b0 adds u5, u5, t umulh t, a4, b1 adcs u6, u6, t umulh t, a4, b2 adcs u7, u7, t umulh t, a4, b3 adcs u8, u8, t umulh t, a4, b4 adcs u9, u9, t umulh t, a4, b5 adcs u10, u10, t umulh t, a4, b6 adcs u11, u11, t umulh t, a4, b7 adcs u12, u12, t umulh t, a4, b8 adc u13, u13, t // Row 5 = [u14;...;u0] = [a5;a4;a3;a2;a1;a0] * [b8;...;b0] mul t, a5, b0 adds u5, u5, t mul t, a5, b1 adcs u6, u6, t mul t, a5, b2 adcs u7, u7, t mul t, a5, b3 adcs u8, u8, t mul t, a5, b4 adcs u9, u9, t mul t, a5, b5 adcs u10, u10, t mul t, a5, b6 adcs u11, u11, t mul t, a5, b7 adcs u12, u12, t mul t, a5, b8 adcs u13, u13, t cset u14, cs umulh t, a5, b0 adds u6, u6, t umulh t, a5, b1 adcs u7, u7, t umulh t, a5, b2 adcs u8, u8, t umulh t, a5, b3 adcs u9, u9, t umulh t, a5, b4 adcs u10, u10, t umulh t, a5, b5 adcs u11, u11, t umulh t, a5, b6 adcs u12, u12, t umulh t, a5, b7 adcs u13, u13, t umulh t, a5, b8 adc u14, u14, t stp u4, u5, [sp, #32] // Row 6 = [u15;...;u0] = [a6;a5;a4;a3;a2;a1;a0] * [b8;...;b0] ldp a6, a7, [x, #48] mul t, a6, b0 adds u6, u6, t mul t, a6, b1 adcs u7, u7, t mul t, a6, b2 adcs u8, u8, t mul t, a6, b3 adcs u9, u9, t mul t, a6, b4 adcs u10, u10, t mul t, a6, b5 adcs u11, u11, t mul t, a6, b6 adcs u12, u12, t mul t, a6, b7 adcs u13, u13, t mul t, a6, b8 adcs u14, u14, t cset u15, cs umulh t, a6, b0 adds u7, u7, t umulh t, a6, b1 adcs u8, u8, t umulh t, a6, b2 adcs u9, u9, t umulh t, a6, b3 adcs u10, u10, t umulh t, a6, b4 adcs u11, u11, t umulh t, a6, b5 adcs u12, u12, t umulh t, a6, b6 adcs u13, u13, t umulh t, a6, b7 adcs u14, u14, t umulh t, a6, b8 adc u15, u15, t // Row 7 = [u16;...;u0] = [a7;a6;a5;a4;a3;a2;a1;a0] * [b8;...;b0] mul t, a7, b0 adds u7, u7, t mul t, a7, b1 adcs u8, u8, t mul t, a7, b2 adcs u9, u9, t mul t, a7, b3 adcs u10, u10, t mul t, a7, b4 adcs u11, u11, t mul t, a7, b5 adcs u12, u12, t mul t, a7, b6 adcs u13, u13, t mul t, a7, b7 adcs u14, u14, t mul t, a7, b8 adcs u15, u15, t cset u16, cs umulh t, a7, b0 adds u8, u8, t umulh t, a7, b1 adcs u9, u9, t umulh t, a7, b2 adcs u10, u10, t umulh t, a7, b3 adcs u11, u11, t umulh t, a7, b4 adcs u12, u12, t umulh t, a7, b5 adcs u13, u13, t umulh t, a7, b6 adcs u14, u14, t umulh t, a7, b7 adcs u15, u15, t umulh t, a7, b8 adc u16, u16, t stp u6, u7, [sp, #48] // Row 8 = [u16;...;u0] = [a8;a7;a6;a5;a4;a3;a2;a1;a0] * [b8;...;b0] ldr a8, [x, #64] mul t, a8, b0 adds u8, u8, t mul t, a8, b1 adcs u9, u9, t mul t, a8, b2 adcs u10, u10, t mul t, a8, b3 adcs u11, u11, t mul t, a8, b4 adcs u12, u12, t mul t, a8, b5 adcs u13, u13, t mul t, a8, b6 adcs u14, u14, t mul t, a8, b7 adcs u15, u15, t mul t, a8, b8 adc u16, u16, t umulh t, a8, b0 adds u9, u9, t umulh t, a8, b1 adcs u10, u10, t umulh t, a8, b2 adcs u11, u11, t umulh t, a8, b3 adcs u12, u12, t umulh t, a8, b4 adcs u13, u13, t umulh t, a8, b5 adcs u14, u14, t umulh t, a8, b6 adcs u15, u15, t umulh t, a8, b7 adc u16, u16, t // Now we have the full product, which we consider as // 2^521 * h + l. Form h + l + 1 subs xzr, xzr, xzr ldp b0, b1, [sp] extr t, u9, u8, #9 adcs b0, b0, t extr t, u10, u9, #9 adcs b1, b1, t ldp b2, b3, [sp, #16] extr t, u11, u10, #9 adcs b2, b2, t extr t, u12, u11, #9 adcs b3, b3, t ldp b4, b5, [sp, #32] extr t, u13, u12, #9 adcs b4, b4, t extr t, u14, u13, #9 adcs b5, b5, t ldp b6, b7, [sp, #48] extr t, u15, u14, #9 adcs b6, b6, t extr t, u16, u15, #9 adcs b7, b7, t orr b8, u8, #~0x1FF lsr t, u16, #9 adcs b8, b8, t // Now CF is set if h + l + 1 >= 2^521, which means it's already // the answer, while if ~CF the answer is h + l so we should subtract // 1 (all considered in 521 bits). Hence subtract ~CF and mask. sbcs b0, b0, xzr sbcs b1, b1, xzr sbcs b2, b2, xzr sbcs b3, b3, xzr sbcs b4, b4, xzr sbcs b5, b5, xzr sbcs b6, b6, xzr sbcs b7, b7, xzr sbc b8, b8, xzr and b8, b8, #0x1FF // So far, this has been the same as a pure modular multiplication. // Now finally the Montgomery ingredient, which is just a 521-bit // rotation by 9*64 - 521 = 55 bits right. lsl t, b0, #9 extr b0, b1, b0, #55 extr b1, b2, b1, #55 extr b2, b3, b2, #55 extr b3, b4, b3, #55 orr b8, b8, t extr b4, b5, b4, #55 extr b5, b6, b5, #55 extr b6, b7, b6, #55 extr b7, b8, b7, #55 lsr b8, b8, #55 // Store back digits of final result stp b0, b1, [z] stp b2, b3, [z, #16] stp b4, b5, [z, #32] stp b6, b7, [z, #48] str b8, [z, #64] // Restore registers CFI_INC_SP(64) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montmul_p521_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
40,612
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/p521_jmixadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point mixed addition on NIST curve P-521 in Jacobian coordinates // // extern void p521_jmixadd(uint64_t p3[static 27], const uint64_t p1[static 27], // const uint64_t p2[static 18]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples. // A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3). // The "mixed" part means that p2 only has x and y coordinates, with the // implicit z coordinate assumed to be the identity. It is assumed that // all the coordinates of the input points p1 and p2 are fully reduced // mod p_521, that the z coordinate of p1 is nonzero and that neither // p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents the same affine // point as". // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jmixadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p521_jmixadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jmixadd) .text .balign 4 // Size of individual field elements #define NUMSIZE 72 // Stable homes for input arguments during main code sequence #define input_z x26 #define input_x x27 #define input_y x28 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // NSPACE is the total stack needed for these temporaries #define zp2 sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define tmp sp, #(NUMSIZE*6) #define NSPACE 512 // For the three field operations, we use subroutines not inlining. // Call local code very close to bignum_mul_p521 and bignum_sqr_p521 // and bignum_sub_p521 #define mul_p521(P0,P1,P2) \ add x0, P0 __LF \ add x1, P1 __LF \ add x2, P2 __LF \ CFI_BL(Lp521_jmixadd_local_mul_p521) #define sqr_p521(P0,P1) \ add x0, P0 __LF \ add x1, P1 __LF \ CFI_BL(Lp521_jmixadd_local_sqr_p521) #define sub_p521(P0,P1,P2) \ add x0, P0 __LF \ add x1, P1 __LF \ add x2, P2 __LF \ CFI_BL(Lp521_jmixadd_local_sub_p521) S2N_BN_SYMBOL(p521_jmixadd): CFI_START // Save regs and make room on stack for temporary variables CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_PUSH2(x27,x28) CFI_PUSH2(x29,x30) CFI_DEC_SP(NSPACE) // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations sqr_p521(zp2,z_1) mul_p521(y2a,z_1,y_2) mul_p521(x2a,zp2,x_2) mul_p521(y2a,zp2,y2a) sub_p521(xd,x2a,x_1) sub_p521(yd,y2a,y_1) sqr_p521(zz,xd) sqr_p521(ww,yd) mul_p521(zzx1,zz,x_1) mul_p521(zzx2,zz,x2a) sub_p521(resx,ww,zzx1) sub_p521(t1,zzx2,zzx1) mul_p521(resz,xd,z_1) sub_p521(resx,resx,zzx2) sub_p521(t2,zzx1,resx) mul_p521(t1,t1,y_1) mul_p521(t2,yd,t2) sub_p521(resy,t2,t1) // Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence) ldp x0, x1, [z_1] orr x0, x0, x1 ldp x2, x3, [z_1+16] orr x2, x2, x3 ldp x4, x5, [z_1+32] orr x4, x4, x5 ldp x6, x7, [z_1+48] orr x6, x6, x7 ldr x8, [z_1+64] orr x0, x0, x2 orr x4, x4, x6 orr x0, x0, x4 orr x0, x0, x8 cmp x0, xzr // Multiplex: if p1 <> 0 just copy the computed result from the staging area. // If p1 = 0 then return the point p2 augmented with an extra z = 1 // coordinate, hence giving 0 + p2 = p2 for the final result. ldp x0, x1, [resx] ldp x20, x21, [x_2] csel x0, x0, x20, ne csel x1, x1, x21, ne ldp x2, x3, [resx+16] ldp x20, x21, [x_2+16] csel x2, x2, x20, ne csel x3, x3, x21, ne ldp x4, x5, [resx+32] ldp x20, x21, [x_2+32] csel x4, x4, x20, ne csel x5, x5, x21, ne ldp x6, x7, [resx+48] ldp x20, x21, [x_2+48] csel x6, x6, x20, ne csel x7, x7, x21, ne ldr x8, [resx+64] ldr x20, [x_2+64] csel x8, x8, x20, ne ldp x10, x11, [resy] ldp x20, x21, [y_2] csel x10, x10, x20, ne csel x11, x11, x21, ne ldp x12, x13, [resy+16] ldp x20, x21, [y_2+16] csel x12, x12, x20, ne csel x13, x13, x21, ne ldp x14, x15, [resy+32] ldp x20, x21, [y_2+32] csel x14, x14, x20, ne csel x15, x15, x21, ne ldp x16, x17, [resy+48] ldp x20, x21, [y_2+48] csel x16, x16, x20, ne csel x17, x17, x21, ne ldr x19, [resy+64] ldr x20, [y_2+64] csel x19, x19, x20, ne stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [x_3+32] stp x6, x7, [x_3+48] str x8, [x_3+64] stp x10, x11, [y_3] stp x12, x13, [y_3+16] stp x14, x15, [y_3+32] stp x16, x17, [y_3+48] str x19, [y_3+64] ldp x0, x1, [resz] mov x20, #1 csel x0, x0, x20, ne csel x1, x1, xzr, ne ldp x2, x3, [resz+16] csel x2, x2, xzr, ne csel x3, x3, xzr, ne ldp x4, x5, [resz+32] csel x4, x4, xzr, ne csel x5, x5, xzr, ne ldp x6, x7, [resz+48] csel x6, x6, xzr, ne csel x7, x7, xzr, ne ldr x8, [resz+64] csel x8, x8, xzr, ne stp x0, x1, [z_3] stp x2, x3, [z_3+16] stp x4, x5, [z_3+32] stp x6, x7, [z_3+48] str x8, [z_3+64] // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x29,x30) CFI_POP2(x27,x28) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p521_jmixadd) // Local versions of the three field operations, almost identical to // bignum_mul_p521, bignum_sqr_p521 and bignum_sub_p521 except for // avoiding all intial register save-restore, and in the case of // local_mul_p521, using the tmp buffer as temporary storage and // avoiding x26. S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jmixadd_local_mul_p521) Lp521_jmixadd_local_mul_p521: CFI_START ldp x3, x4, [x1] ldp x5, x6, [x1, #16] ldp x7, x8, [x2] ldp x9, x10, [x2, #16] mul x11, x3, x7 mul x15, x4, x8 mul x16, x5, x9 mul x17, x6, x10 umulh x19, x3, x7 adds x15, x15, x19 umulh x19, x4, x8 adcs x16, x16, x19 umulh x19, x5, x9 adcs x17, x17, x19 umulh x19, x6, x10 adc x19, x19, xzr adds x12, x15, x11 adcs x15, x16, x15 adcs x16, x17, x16 adcs x17, x19, x17 adc x19, xzr, x19 adds x13, x15, x11 adcs x14, x16, x12 adcs x15, x17, x15 adcs x16, x19, x16 adcs x17, xzr, x17 adc x19, xzr, x19 subs x24, x5, x6 cneg x24, x24, lo csetm x21, lo subs x22, x10, x9 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x16, x16, x23 eor x22, x22, x21 adcs x17, x17, x22 adc x19, x19, x21 subs x24, x3, x4 cneg x24, x24, lo csetm x21, lo subs x22, x8, x7 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x12, x12, x23 eor x22, x22, x21 adcs x13, x13, x22 adcs x14, x14, x21 adcs x15, x15, x21 adcs x16, x16, x21 adcs x17, x17, x21 adc x19, x19, x21 subs x24, x4, x6 cneg x24, x24, lo csetm x21, lo subs x22, x10, x8 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x15, x15, x23 eor x22, x22, x21 adcs x16, x16, x22 adcs x17, x17, x21 adc x19, x19, x21 subs x24, x3, x5 cneg x24, x24, lo csetm x21, lo subs x22, x9, x7 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x13, x13, x23 eor x22, x22, x21 adcs x14, x14, x22 adcs x15, x15, x21 adcs x16, x16, x21 adcs x17, x17, x21 adc x19, x19, x21 subs x24, x3, x6 cneg x24, x24, lo csetm x21, lo subs x22, x10, x7 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x14, x14, x23 eor x22, x22, x21 adcs x15, x15, x22 adcs x16, x16, x21 adcs x17, x17, x21 adc x19, x19, x21 subs x24, x4, x5 cneg x24, x24, lo csetm x21, lo subs x22, x9, x8 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x14, x14, x23 eor x22, x22, x21 adcs x15, x15, x22 adcs x16, x16, x21 adcs x17, x17, x21 adc x19, x19, x21 lsl x21, x11, #9 extr x11, x12, x11, #55 extr x12, x13, x12, #55 extr x13, x14, x13, #55 lsr x14, x14, #55 ldp x3, x4, [x1, #32] ldp x5, x6, [x1, #48] ldp x7, x8, [x2, #32] ldp x9, x10, [x2, #48] stp x15, x16, [tmp] stp x17, x19, [tmp+16] stp x21, x11, [tmp+32] stp x12, x13, [tmp+48] str x14, [tmp+64] mul x11, x3, x7 mul x15, x4, x8 mul x16, x5, x9 mul x17, x6, x10 umulh x19, x3, x7 adds x15, x15, x19 umulh x19, x4, x8 adcs x16, x16, x19 umulh x19, x5, x9 adcs x17, x17, x19 umulh x19, x6, x10 adc x19, x19, xzr adds x12, x15, x11 adcs x15, x16, x15 adcs x16, x17, x16 adcs x17, x19, x17 adc x19, xzr, x19 adds x13, x15, x11 adcs x14, x16, x12 adcs x15, x17, x15 adcs x16, x19, x16 adcs x17, xzr, x17 adc x19, xzr, x19 subs x24, x5, x6 cneg x24, x24, lo csetm x21, lo subs x22, x10, x9 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x16, x16, x23 eor x22, x22, x21 adcs x17, x17, x22 adc x19, x19, x21 subs x24, x3, x4 cneg x24, x24, lo csetm x21, lo subs x22, x8, x7 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x12, x12, x23 eor x22, x22, x21 adcs x13, x13, x22 adcs x14, x14, x21 adcs x15, x15, x21 adcs x16, x16, x21 adcs x17, x17, x21 adc x19, x19, x21 subs x24, x4, x6 cneg x24, x24, lo csetm x21, lo subs x22, x10, x8 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x15, x15, x23 eor x22, x22, x21 adcs x16, x16, x22 adcs x17, x17, x21 adc x19, x19, x21 subs x24, x3, x5 cneg x24, x24, lo csetm x21, lo subs x22, x9, x7 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x13, x13, x23 eor x22, x22, x21 adcs x14, x14, x22 adcs x15, x15, x21 adcs x16, x16, x21 adcs x17, x17, x21 adc x19, x19, x21 subs x24, x3, x6 cneg x24, x24, lo csetm x21, lo subs x22, x10, x7 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x14, x14, x23 eor x22, x22, x21 adcs x15, x15, x22 adcs x16, x16, x21 adcs x17, x17, x21 adc x19, x19, x21 subs x24, x4, x5 cneg x24, x24, lo csetm x21, lo subs x22, x9, x8 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x14, x14, x23 eor x22, x22, x21 adcs x15, x15, x22 adcs x16, x16, x21 adcs x17, x17, x21 adc x19, x19, x21 ldp x23, x22, [tmp] adds x11, x11, x23 adcs x12, x12, x22 stp x11, x12, [tmp] ldp x23, x22, [tmp+16] adcs x13, x13, x23 adcs x14, x14, x22 stp x13, x14, [tmp+16] ldp x23, x22, [tmp+32] adcs x15, x15, x23 adcs x16, x16, x22 stp x15, x16, [tmp+32] ldp x23, x22, [tmp+48] adcs x17, x17, x23 adcs x19, x19, x22 stp x17, x19, [tmp+48] ldr x21, [tmp+64] adc x21, x21, xzr str x21, [tmp+64] ldp x23, x22, [x1] subs x3, x3, x23 sbcs x4, x4, x22 ldp x23, x22, [x1, #16] sbcs x5, x5, x23 sbcs x6, x6, x22 csetm x24, lo ldp x23, x22, [x2] subs x7, x23, x7 sbcs x8, x22, x8 ldp x23, x22, [x2, #16] sbcs x9, x23, x9 sbcs x10, x22, x10 csetm x25, lo eor x3, x3, x24 subs x3, x3, x24 eor x4, x4, x24 sbcs x4, x4, x24 eor x5, x5, x24 sbcs x5, x5, x24 eor x6, x6, x24 sbc x6, x6, x24 eor x7, x7, x25 subs x7, x7, x25 eor x8, x8, x25 sbcs x8, x8, x25 eor x9, x9, x25 sbcs x9, x9, x25 eor x10, x10, x25 sbc x10, x10, x25 eor x25, x25, x24 mul x11, x3, x7 mul x15, x4, x8 mul x16, x5, x9 mul x17, x6, x10 umulh x19, x3, x7 adds x15, x15, x19 umulh x19, x4, x8 adcs x16, x16, x19 umulh x19, x5, x9 adcs x17, x17, x19 umulh x19, x6, x10 adc x19, x19, xzr adds x12, x15, x11 adcs x15, x16, x15 adcs x16, x17, x16 adcs x17, x19, x17 adc x19, xzr, x19 adds x13, x15, x11 adcs x14, x16, x12 adcs x15, x17, x15 adcs x16, x19, x16 adcs x17, xzr, x17 adc x19, xzr, x19 subs x24, x5, x6 cneg x24, x24, lo csetm x21, lo subs x22, x10, x9 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x16, x16, x23 eor x22, x22, x21 adcs x17, x17, x22 adc x19, x19, x21 subs x24, x3, x4 cneg x24, x24, lo csetm x21, lo subs x22, x8, x7 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x12, x12, x23 eor x22, x22, x21 adcs x13, x13, x22 adcs x14, x14, x21 adcs x15, x15, x21 adcs x16, x16, x21 adcs x17, x17, x21 adc x19, x19, x21 subs x24, x4, x6 cneg x24, x24, lo csetm x21, lo subs x22, x10, x8 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x15, x15, x23 eor x22, x22, x21 adcs x16, x16, x22 adcs x17, x17, x21 adc x19, x19, x21 subs x24, x3, x5 cneg x24, x24, lo csetm x21, lo subs x22, x9, x7 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x13, x13, x23 eor x22, x22, x21 adcs x14, x14, x22 adcs x15, x15, x21 adcs x16, x16, x21 adcs x17, x17, x21 adc x19, x19, x21 subs x24, x3, x6 cneg x24, x24, lo csetm x21, lo subs x22, x10, x7 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x14, x14, x23 eor x22, x22, x21 adcs x15, x15, x22 adcs x16, x16, x21 adcs x17, x17, x21 adc x19, x19, x21 subs x24, x4, x5 cneg x24, x24, lo csetm x21, lo subs x22, x9, x8 cneg x22, x22, lo mul x23, x24, x22 umulh x22, x24, x22 cinv x21, x21, lo cmn x21, #1 eor x23, x23, x21 adcs x14, x14, x23 eor x22, x22, x21 adcs x15, x15, x22 adcs x16, x16, x21 adcs x17, x17, x21 adc x19, x19, x21 ldp x3, x4, [tmp] ldp x5, x6, [tmp+16] eor x11, x11, x25 adds x11, x11, x3 eor x12, x12, x25 adcs x12, x12, x4 eor x13, x13, x25 adcs x13, x13, x5 eor x14, x14, x25 adcs x14, x14, x6 eor x15, x15, x25 ldp x7, x8, [tmp+32] ldp x9, x10, [tmp+48] ldr x20, [tmp+64] adcs x15, x15, x7 eor x16, x16, x25 adcs x16, x16, x8 eor x17, x17, x25 adcs x17, x17, x9 eor x19, x19, x25 adcs x19, x19, x10 adc x21, x20, xzr adds x15, x15, x3 adcs x16, x16, x4 adcs x17, x17, x5 adcs x19, x19, x6 and x25, x25, #0x1ff lsl x24, x11, #9 orr x24, x24, x25 adcs x7, x7, x24 extr x24, x12, x11, #55 adcs x8, x8, x24 extr x24, x13, x12, #55 adcs x9, x9, x24 extr x24, x14, x13, #55 adcs x10, x10, x24 lsr x24, x14, #55 adc x20, x24, x20 ldr x6, [x2, #64] ldp x3, x4, [x1] and x23, x3, #0xfffffffffffff mul x23, x6, x23 ldr x14, [x1, #64] ldp x11, x12, [x2] and x24, x11, #0xfffffffffffff mul x24, x14, x24 add x23, x23, x24 extr x24, x4, x3, #52 and x24, x24, #0xfffffffffffff mul x22, x6, x24 extr x24, x12, x11, #52 and x24, x24, #0xfffffffffffff mul x24, x14, x24 add x22, x22, x24 lsr x24, x23, #52 add x22, x22, x24 lsl x23, x23, #12 extr x24, x22, x23, #12 adds x15, x15, x24 ldp x5, x3, [x1, #16] ldp x13, x11, [x2, #16] extr x24, x5, x4, #40 and x24, x24, #0xfffffffffffff mul x23, x6, x24 extr x24, x13, x12, #40 and x24, x24, #0xfffffffffffff mul x24, x14, x24 add x23, x23, x24 lsr x24, x22, #52 add x23, x23, x24 lsl x22, x22, #12 extr x24, x23, x22, #24 adcs x16, x16, x24 extr x24, x3, x5, #28 and x24, x24, #0xfffffffffffff mul x22, x6, x24 extr x24, x11, x13, #28 and x24, x24, #0xfffffffffffff mul x24, x14, x24 add x22, x22, x24 lsr x24, x23, #52 add x22, x22, x24 lsl x23, x23, #12 extr x24, x22, x23, #36 adcs x17, x17, x24 and x25, x16, x17 ldp x4, x5, [x1, #32] ldp x12, x13, [x2, #32] extr x24, x4, x3, #16 and x24, x24, #0xfffffffffffff mul x23, x6, x24 extr x24, x12, x11, #16 and x24, x24, #0xfffffffffffff mul x24, x14, x24 add x23, x23, x24 lsl x21, x21, #48 add x23, x23, x21 lsr x24, x22, #52 add x23, x23, x24 lsl x22, x22, #12 extr x24, x23, x22, #48 adcs x19, x19, x24 and x25, x25, x19 lsr x24, x4, #4 and x24, x24, #0xfffffffffffff mul x22, x6, x24 lsr x24, x12, #4 and x24, x24, #0xfffffffffffff mul x24, x14, x24 add x22, x22, x24 lsr x24, x23, #52 add x22, x22, x24 lsl x23, x23, #12 extr x21, x22, x23, #60 extr x24, x5, x4, #56 and x24, x24, #0xfffffffffffff mul x23, x6, x24 extr x24, x13, x12, #56 and x24, x24, #0xfffffffffffff mul x24, x14, x24 add x23, x23, x24 lsr x24, x22, #52 add x23, x23, x24 lsl x21, x21, #8 extr x24, x23, x21, #8 adcs x7, x7, x24 and x25, x25, x7 ldp x3, x4, [x1, #48] ldp x11, x12, [x2, #48] extr x24, x3, x5, #44 and x24, x24, #0xfffffffffffff mul x22, x6, x24 extr x24, x11, x13, #44 and x24, x24, #0xfffffffffffff mul x24, x14, x24 add x22, x22, x24 lsr x24, x23, #52 add x22, x22, x24 lsl x23, x23, #12 extr x24, x22, x23, #20 adcs x8, x8, x24 and x25, x25, x8 extr x24, x4, x3, #32 and x24, x24, #0xfffffffffffff mul x23, x6, x24 extr x24, x12, x11, #32 and x24, x24, #0xfffffffffffff mul x24, x14, x24 add x23, x23, x24 lsr x24, x22, #52 add x23, x23, x24 lsl x22, x22, #12 extr x24, x23, x22, #32 adcs x9, x9, x24 and x25, x25, x9 lsr x24, x4, #20 mul x22, x6, x24 lsr x24, x12, #20 mul x24, x14, x24 add x22, x22, x24 lsr x24, x23, #52 add x22, x22, x24 lsl x23, x23, #12 extr x24, x22, x23, #44 adcs x10, x10, x24 and x25, x25, x10 mul x24, x6, x14 lsr x22, x22, #44 add x24, x24, x22 adc x20, x20, x24 lsr x22, x20, #9 orr x20, x20, #0xfffffffffffffe00 cmp xzr, xzr adcs xzr, x15, x22 adcs xzr, x25, xzr adcs xzr, x20, xzr adcs x15, x15, x22 adcs x16, x16, xzr adcs x17, x17, xzr adcs x19, x19, xzr adcs x7, x7, xzr adcs x8, x8, xzr adcs x9, x9, xzr adcs x10, x10, xzr adc x20, x20, xzr and x22, x15, #0x1ff extr x15, x16, x15, #9 extr x16, x17, x16, #9 stp x15, x16, [x0] extr x17, x19, x17, #9 extr x19, x7, x19, #9 stp x17, x19, [x0, #16] extr x7, x8, x7, #9 extr x8, x9, x8, #9 stp x7, x8, [x0, #32] extr x9, x10, x9, #9 extr x10, x20, x10, #9 stp x9, x10, [x0, #48] str x22, [x0, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jmixadd_local_mul_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jmixadd_local_sqr_p521) Lp521_jmixadd_local_sqr_p521: CFI_START ldp x2, x3, [x1] ldp x4, x5, [x1, #16] ldp x6, x7, [x1, #32] ldp x8, x9, [x1, #48] mul x12, x6, x8 mul x17, x7, x9 umulh x22, x6, x8 subs x23, x6, x7 cneg x23, x23, cc csetm x11, cc subs x10, x9, x8 cneg x10, x10, cc mul x16, x23, x10 umulh x10, x23, x10 cinv x11, x11, cc eor x16, x16, x11 eor x10, x10, x11 adds x13, x12, x22 adc x22, x22, xzr umulh x23, x7, x9 adds x13, x13, x17 adcs x22, x22, x23 adc x23, x23, xzr adds x22, x22, x17 adc x23, x23, xzr cmn x11, #0x1 adcs x13, x13, x16 adcs x22, x22, x10 adc x23, x23, x11 adds x12, x12, x12 adcs x13, x13, x13 adcs x22, x22, x22 adcs x23, x23, x23 adc x19, xzr, xzr mul x10, x6, x6 mul x16, x7, x7 mul x21, x6, x7 umulh x11, x6, x6 umulh x17, x7, x7 umulh x20, x6, x7 adds x11, x11, x21 adcs x16, x16, x20 adc x17, x17, xzr adds x11, x11, x21 adcs x16, x16, x20 adc x17, x17, xzr adds x12, x12, x16 adcs x13, x13, x17 adcs x22, x22, xzr adcs x23, x23, xzr adc x19, x19, xzr mul x14, x8, x8 mul x16, x9, x9 mul x21, x8, x9 umulh x15, x8, x8 umulh x17, x9, x9 umulh x20, x8, x9 adds x15, x15, x21 adcs x16, x16, x20 adc x17, x17, xzr adds x15, x15, x21 adcs x16, x16, x20 adc x17, x17, xzr adds x14, x14, x22 adcs x15, x15, x23 adcs x16, x16, x19 adc x17, x17, xzr ldr x19, [x1, #64] add x23, x19, x19 mul x19, x19, x19 and x21, x2, #0xfffffffffffff mul x21, x23, x21 extr x20, x3, x2, #52 and x20, x20, #0xfffffffffffff mul x20, x23, x20 lsr x22, x21, #52 add x20, x20, x22 lsl x21, x21, #12 extr x22, x20, x21, #12 adds x10, x10, x22 extr x21, x4, x3, #40 and x21, x21, #0xfffffffffffff mul x21, x23, x21 lsr x22, x20, #52 add x21, x21, x22 lsl x20, x20, #12 extr x22, x21, x20, #24 adcs x11, x11, x22 extr x20, x5, x4, #28 and x20, x20, #0xfffffffffffff mul x20, x23, x20 lsr x22, x21, #52 add x20, x20, x22 lsl x21, x21, #12 extr x22, x20, x21, #36 adcs x12, x12, x22 extr x21, x6, x5, #16 and x21, x21, #0xfffffffffffff mul x21, x23, x21 lsr x22, x20, #52 add x21, x21, x22 lsl x20, x20, #12 extr x22, x21, x20, #48 adcs x13, x13, x22 lsr x20, x6, #4 and x20, x20, #0xfffffffffffff mul x20, x23, x20 lsr x22, x21, #52 add x20, x20, x22 lsl x21, x21, #12 extr x24, x20, x21, #60 extr x21, x7, x6, #56 and x21, x21, #0xfffffffffffff mul x21, x23, x21 lsr x22, x20, #52 add x21, x21, x22 lsl x24, x24, #8 extr x22, x21, x24, #8 adcs x14, x14, x22 extr x20, x8, x7, #44 and x20, x20, #0xfffffffffffff mul x20, x23, x20 lsr x22, x21, #52 add x20, x20, x22 lsl x21, x21, #12 extr x22, x20, x21, #20 adcs x15, x15, x22 extr x21, x9, x8, #32 and x21, x21, #0xfffffffffffff mul x21, x23, x21 lsr x22, x20, #52 add x21, x21, x22 lsl x20, x20, #12 extr x22, x21, x20, #32 adcs x16, x16, x22 lsr x20, x9, #20 mul x20, x23, x20 lsr x22, x21, #52 add x20, x20, x22 lsl x21, x21, #12 extr x22, x20, x21, #44 adcs x17, x17, x22 lsr x20, x20, #44 adc x19, x19, x20 extr x21, x11, x10, #9 extr x20, x12, x11, #9 stp x21, x20, [x0] extr x21, x13, x12, #9 extr x20, x14, x13, #9 stp x21, x20, [x0, #16] extr x21, x15, x14, #9 extr x20, x16, x15, #9 stp x21, x20, [x0, #32] extr x21, x17, x16, #9 extr x20, x19, x17, #9 stp x21, x20, [x0, #48] and x22, x10, #0x1ff lsr x19, x19, #9 add x22, x22, x19 str x22, [x0, #64] mul x12, x2, x4 mul x17, x3, x5 umulh x22, x2, x4 subs x23, x2, x3 cneg x23, x23, cc csetm x11, cc subs x10, x5, x4 cneg x10, x10, cc mul x16, x23, x10 umulh x10, x23, x10 cinv x11, x11, cc eor x16, x16, x11 eor x10, x10, x11 adds x13, x12, x22 adc x22, x22, xzr umulh x23, x3, x5 adds x13, x13, x17 adcs x22, x22, x23 adc x23, x23, xzr adds x22, x22, x17 adc x23, x23, xzr cmn x11, #0x1 adcs x13, x13, x16 adcs x22, x22, x10 adc x23, x23, x11 adds x12, x12, x12 adcs x13, x13, x13 adcs x22, x22, x22 adcs x23, x23, x23 adc x19, xzr, xzr mul x10, x2, x2 mul x16, x3, x3 mul x21, x2, x3 umulh x11, x2, x2 umulh x17, x3, x3 umulh x20, x2, x3 adds x11, x11, x21 adcs x16, x16, x20 adc x17, x17, xzr adds x11, x11, x21 adcs x16, x16, x20 adc x17, x17, xzr adds x12, x12, x16 adcs x13, x13, x17 adcs x22, x22, xzr adcs x23, x23, xzr adc x19, x19, xzr mul x14, x4, x4 mul x16, x5, x5 mul x21, x4, x5 umulh x15, x4, x4 umulh x17, x5, x5 umulh x20, x4, x5 adds x15, x15, x21 adcs x16, x16, x20 adc x17, x17, xzr adds x15, x15, x21 adcs x16, x16, x20 adc x17, x17, xzr adds x14, x14, x22 adcs x15, x15, x23 adcs x16, x16, x19 adc x17, x17, xzr ldp x21, x20, [x0] adds x21, x21, x10 adcs x20, x20, x11 stp x21, x20, [x0] ldp x21, x20, [x0, #16] adcs x21, x21, x12 adcs x20, x20, x13 stp x21, x20, [x0, #16] ldp x21, x20, [x0, #32] adcs x21, x21, x14 adcs x20, x20, x15 stp x21, x20, [x0, #32] ldp x21, x20, [x0, #48] adcs x21, x21, x16 adcs x20, x20, x17 stp x21, x20, [x0, #48] ldr x22, [x0, #64] adc x22, x22, xzr str x22, [x0, #64] mul x10, x2, x6 mul x14, x3, x7 mul x15, x4, x8 mul x16, x5, x9 umulh x17, x2, x6 adds x14, x14, x17 umulh x17, x3, x7 adcs x15, x15, x17 umulh x17, x4, x8 adcs x16, x16, x17 umulh x17, x5, x9 adc x17, x17, xzr adds x11, x14, x10 adcs x14, x15, x14 adcs x15, x16, x15 adcs x16, x17, x16 adc x17, xzr, x17 adds x12, x14, x10 adcs x13, x15, x11 adcs x14, x16, x14 adcs x15, x17, x15 adcs x16, xzr, x16 adc x17, xzr, x17 subs x22, x4, x5 cneg x22, x22, cc csetm x19, cc subs x20, x9, x8 cneg x20, x20, cc mul x21, x22, x20 umulh x20, x22, x20 cinv x19, x19, cc cmn x19, #0x1 eor x21, x21, x19 adcs x15, x15, x21 eor x20, x20, x19 adcs x16, x16, x20 adc x17, x17, x19 subs x22, x2, x3 cneg x22, x22, cc csetm x19, cc subs x20, x7, x6 cneg x20, x20, cc mul x21, x22, x20 umulh x20, x22, x20 cinv x19, x19, cc cmn x19, #0x1 eor x21, x21, x19 adcs x11, x11, x21 eor x20, x20, x19 adcs x12, x12, x20 adcs x13, x13, x19 adcs x14, x14, x19 adcs x15, x15, x19 adcs x16, x16, x19 adc x17, x17, x19 subs x22, x3, x5 cneg x22, x22, cc csetm x19, cc subs x20, x9, x7 cneg x20, x20, cc mul x21, x22, x20 umulh x20, x22, x20 cinv x19, x19, cc cmn x19, #0x1 eor x21, x21, x19 adcs x14, x14, x21 eor x20, x20, x19 adcs x15, x15, x20 adcs x16, x16, x19 adc x17, x17, x19 subs x22, x2, x4 cneg x22, x22, cc csetm x19, cc subs x20, x8, x6 cneg x20, x20, cc mul x21, x22, x20 umulh x20, x22, x20 cinv x19, x19, cc cmn x19, #0x1 eor x21, x21, x19 adcs x12, x12, x21 eor x20, x20, x19 adcs x13, x13, x20 adcs x14, x14, x19 adcs x15, x15, x19 adcs x16, x16, x19 adc x17, x17, x19 subs x22, x2, x5 cneg x22, x22, cc csetm x19, cc subs x20, x9, x6 cneg x20, x20, cc mul x21, x22, x20 umulh x20, x22, x20 cinv x19, x19, cc cmn x19, #0x1 eor x21, x21, x19 adcs x13, x13, x21 eor x20, x20, x19 adcs x14, x14, x20 adcs x15, x15, x19 adcs x16, x16, x19 adc x17, x17, x19 subs x22, x3, x4 cneg x22, x22, cc csetm x19, cc subs x20, x8, x7 cneg x20, x20, cc mul x21, x22, x20 umulh x20, x22, x20 cinv x19, x19, cc cmn x19, #0x1 eor x21, x21, x19 adcs x13, x13, x21 eor x20, x20, x19 adcs x14, x14, x20 adcs x15, x15, x19 adcs x16, x16, x19 adc x17, x17, x19 ldp x21, x20, [x0] extr x2, x15, x14, #8 adds x2, x2, x21 extr x3, x16, x15, #8 adcs x3, x3, x20 ldp x21, x20, [x0, #16] extr x4, x17, x16, #8 adcs x4, x4, x21 and x22, x3, x4 lsr x5, x17, #8 adcs x5, x5, x20 and x22, x22, x5 ldp x21, x20, [x0, #32] lsl x6, x10, #1 adcs x6, x6, x21 and x22, x22, x6 extr x7, x11, x10, #63 adcs x7, x7, x20 and x22, x22, x7 ldp x21, x20, [x0, #48] extr x8, x12, x11, #63 adcs x8, x8, x21 and x22, x22, x8 extr x9, x13, x12, #63 adcs x9, x9, x20 and x22, x22, x9 ldr x21, [x0, #64] extr x10, x14, x13, #63 and x10, x10, #0x1ff adc x10, x21, x10 lsr x20, x10, #9 orr x10, x10, #0xfffffffffffffe00 cmp xzr, xzr adcs xzr, x2, x20 adcs xzr, x22, xzr adcs xzr, x10, xzr adcs x2, x2, x20 adcs x3, x3, xzr adcs x4, x4, xzr adcs x5, x5, xzr adcs x6, x6, xzr adcs x7, x7, xzr adcs x8, x8, xzr adcs x9, x9, xzr adc x10, x10, xzr and x10, x10, #0x1ff stp x2, x3, [x0] stp x4, x5, [x0, #16] stp x6, x7, [x0, #32] stp x8, x9, [x0, #48] str x10, [x0, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jmixadd_local_sqr_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jmixadd_local_sub_p521) Lp521_jmixadd_local_sub_p521: CFI_START ldp x5, x6, [x1] ldp x4, x3, [x2] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x1, #16] ldp x4, x3, [x2, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x1, #32] ldp x4, x3, [x2, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 ldp x11, x12, [x1, #48] ldp x4, x3, [x2, #48] sbcs x11, x11, x4 sbcs x12, x12, x3 ldr x13, [x1, #64] ldr x4, [x2, #64] sbcs x13, x13, x4 sbcs x5, x5, xzr sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbcs x13, x13, xzr and x13, x13, #0x1ff stp x5, x6, [x0] stp x7, x8, [x0, #16] stp x9, x10, [x0, #32] stp x11, x12, [x0, #48] str x13, [x0, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jmixadd_local_sub_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
3,314
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_triple_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Triple modulo p_521, z := (3 * x) mod p_521, assuming x reduced // Input x[9]; output z[9] // // extern void bignum_triple_p521(uint64_t z[static 9], // const uint64_t x[static 9]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p521) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p521_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_triple_p521_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p521_alt) .text .balign 4 #define z x0 #define x x1 #define h x2 #define l x3 #define d0 x4 #define d1 x5 #define d2 x6 #define d3 x7 #define d4 x8 #define d5 x9 #define d6 x10 #define d7 x11 #define d8 x12 S2N_BN_SYMBOL(bignum_triple_p521): S2N_BN_SYMBOL(bignum_triple_p521_alt): CFI_START // Pick out top bit to wrap to the zero position in the doubling step ldr d8, [x, #64] lsl l, d8, #55 // Rotate left to get x' == 2 * x (mod p_521) and add to x + 1 (carryin) to get // s = [d8;d7;d6;d5;d4;d3;d2;d1;d0] = x + x' + 1 == 3 * x + 1 (mod p_521) subs xzr, xzr, xzr ldp d0, d1, [x] extr l, d0, l, #63 extr h, d1, d0, #63 adcs d0, d0, l ldp d2, d3, [x, #16] extr l, d2, d1, #63 adcs d1, d1, h extr h, d3, d2, #63 adcs d2, d2, l ldp d4, d5, [x, #32] extr l, d4, d3, #63 adcs d3, d3, h extr h, d5, d4, #63 adcs d4, d4, l ldp d6, d7, [x, #48] extr l, d6, d5, #63 adcs d5, d5, h extr h, d7, d6, #63 adcs d6, d6, l extr l, d8, d7, #63 adcs d7, d7, h and l, l, #0x1FF adcs d8, d8, l // We know x, x' < p_521 (they are the same bits except for the positions) // so x + x' + 1 <= 2 * (p_521 - 1) + 1 < 2 * p_521. // Note that x + x' >= p_521 <=> s = x + x' + 1 >= 2^521 // Set CF <=> s = x + x' + 1 >= 2^521 and make it a mask in l as well subs l, d8, #512 csetm l, cs // Now if CF is set (and l is all 1s), we want (x + x') - p_521 = s - 2^521 // while otherwise we want x + x' = s - 1 (from existing CF, which is nice) sbcs d0, d0, xzr and l, l, #512 sbcs d1, d1, xzr sbcs d2, d2, xzr sbcs d3, d3, xzr sbcs d4, d4, xzr sbcs d5, d5, xzr sbcs d6, d6, xzr sbcs d7, d7, xzr sbc d8, d8, l // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] stp d6, d7, [z, #48] str d8, [z, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_triple_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
33,602
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_montsqr_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^576) mod p_521 // Input x[9]; output z[9] // // extern void bignum_montsqr_p521(uint64_t z[static 9], // const uint64_t x[static 9]); // // Does z := (x^2 / 2^576) mod p_521, assuming x < p_521. This means the // Montgomery base is the "native size" 2^{9*64} = 2^576; since p_521 is // a Mersenne prime the basic modular squaring bignum_sqr_p521 can be // considered a Montgomery operation to base 2^521. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" // bignum_montsqr_p521 is functionally equivalent to // unopt/bignum_montsqr_p521. // It is written in a way that // 1. A subset of scalar multiplications in bignum_montmul_p384 are carefully // chosen and vectorized // 2. The vectorized assembly is rescheduled using the SLOTHY superoptimizer. // https://github.com/slothy-optimizer/slothy // // The output program of step 1. is as follows: // // stp x19, x20, [sp, #-16]! // stp x21, x22, [sp, #-16]! // stp x23, x24, [sp, #-16]! // ldp x16, x8, [x1] // ldr q18, [x1] // ldr q5, [x1] // ldr q20, [x1] // ldp x17, x13, [x1, #16] // ldr q17, [x1, #16] // ldr q1, [x1, #16] // ldr q28, [x1, #16] // ldp x9, x15, [x1, #32] // ldr q27, [x1] // ldr q29, [x1, #32] // ldp x23, x2, [x1, #48] // ldr q6, [x1, #48] // ldr q4, [x1, #48] // mul x24, x9, x23 // mul x11, x15, x2 // umulh x20, x9, x23 // subs x4, x9, x15 // cneg x22, x4, cc // csetm x12, cc // subs x4, x2, x23 // cneg x4, x4, cc // mul x19, x22, x4 // umulh x4, x22, x4 // cinv x7, x12, cc // eor x14, x19, x7 // eor x22, x4, x7 // adds x12, x24, x20 // adc x19, x20, xzr // umulh x4, x15, x2 // adds x12, x12, x11 // adcs x19, x19, x4 // adc x4, x4, xzr // adds x19, x19, x11 // adc x4, x4, xzr // cmn x7, #0x1 // adcs x12, x12, x14 // adcs x19, x19, x22 // adc x4, x4, x7 // adds x11, x24, x24 // adcs x20, x12, x12 // adcs x10, x19, x19 // adcs x3, x4, x4 // adc x5, xzr, xzr // ldr q30, [x1, #32] // umull v0.2D, v30.2S, v30.2S // umull2 v2.2D, v30.4S, v30.4S // xtn v24.2S, v30.2D // uzp2 v30.4S, v30.4S, v30.4S // umull v30.2D, v30.2S, v24.2S // mov x7, v0.d[0] // mov x14, v0.d[1] // mov x19, v2.d[0] // mov x22, v2.d[1] // mov x4, v30.d[0] // mov x12, v30.d[1] // adds x21, x7, x4, lsl #33 // lsr x4, x4, #31 // adc x14, x14, x4 // adds x19, x19, x12, lsl #33 // lsr x4, x12, #31 // adc x22, x22, x4 // mul x4, x9, x15 // umulh x12, x9, x15 // adds x24, x14, x4, lsl #1 // extr x4, x12, x4, #63 // adcs x19, x19, x4 // lsr x4, x12, #63 // adc x4, x22, x4 // adds x11, x11, x19 // adcs x20, x20, x4 // adcs x10, x10, xzr // adcs x3, x3, xzr // adc x6, x5, xzr // movi v3.2D, #0x00000000ffffffff // uzp2 v16.4S, v4.4S, v4.4S // xtn v25.2S, v6.2D // xtn v23.2S, v4.2D // rev64 v30.4S, v4.4S // umull v24.2D, v25.2S, v23.2S // umull v0.2D, v25.2S, v16.2S // uzp2 v2.4S, v6.4S, v6.4S // mul v30.4S, v30.4S, v6.4S // usra v0.2D, v24.2D, #32 // umull v19.2D, v2.2S, v16.2S // uaddlp v30.2D, v30.4S // and v24.16B, v0.16B, v3.16B // umlal v24.2D, v2.2S, v23.2S // shl v30.2D, v30.2D, #32 // usra v19.2D, v0.2D, #32 // umlal v30.2D, v25.2S, v23.2S // usra v19.2D, v24.2D, #32 // mov x5, v30.d[0] // mov x7, v30.d[1] // mul x14, x23, x2 // mov x19, v19.d[0] // mov x4, v19.d[1] // umulh x22, x23, x2 // adds x12, x19, x14 // adcs x19, x7, x22 // adc x4, x4, xzr // adds x12, x12, x14 // adcs x19, x19, x22 // adc x4, x4, xzr // adds x7, x5, x10 // adcs x3, x12, x3 // adcs x14, x19, x6 // adc x10, x4, xzr // ldr x4, [x1, #64] // add x6, x4, x4 // mul x5, x4, x4 // and x4, x16, #0xfffffffffffff // mul x22, x6, x4 // extr x4, x8, x16, #52 // and x4, x4, #0xfffffffffffff // mul x19, x6, x4 // lsr x4, x22, #52 // add x12, x19, x4 // lsl x4, x22, #12 // extr x4, x12, x4, #12 // adds x21, x21, x4 // extr x4, x17, x8, #40 // and x4, x4, #0xfffffffffffff // mul x19, x6, x4 // lsr x4, x12, #52 // add x22, x19, x4 // lsl x4, x12, #12 // extr x4, x22, x4, #24 // adcs x24, x24, x4 // extr x4, x13, x17, #28 // and x4, x4, #0xfffffffffffff // mul x19, x6, x4 // lsr x4, x22, #52 // add x12, x19, x4 // lsl x4, x22, #12 // extr x4, x12, x4, #36 // adcs x11, x11, x4 // extr x4, x9, x13, #16 // and x4, x4, #0xfffffffffffff // mul x19, x6, x4 // lsr x4, x12, #52 // add x22, x19, x4 // lsl x4, x12, #12 // extr x4, x22, x4, #48 // adcs x20, x20, x4 // lsr x4, x9, #4 // and x4, x4, #0xfffffffffffff // mul x19, x6, x4 // lsr x4, x22, #52 // add x12, x19, x4 // lsl x4, x22, #12 // extr x22, x12, x4, #60 // extr x4, x15, x9, #56 // and x4, x4, #0xfffffffffffff // mul x19, x6, x4 // lsr x4, x12, #52 // add x12, x19, x4 // lsl x4, x22, #8 // extr x4, x12, x4, #8 // adcs x7, x7, x4 // extr x4, x23, x15, #44 // and x4, x4, #0xfffffffffffff // mul x19, x6, x4 // lsr x4, x12, #52 // add x22, x19, x4 // lsl x4, x12, #12 // extr x4, x22, x4, #20 // adcs x1, x3, x4 // extr x4, x2, x23, #32 // and x4, x4, #0xfffffffffffff // mul x19, x6, x4 // lsr x4, x22, #52 // add x12, x19, x4 // lsl x4, x22, #12 // extr x4, x12, x4, #32 // adcs x14, x14, x4 // lsr x4, x2, #20 // mul x19, x6, x4 // lsr x4, x12, #52 // add x19, x19, x4 // lsl x4, x12, #12 // extr x4, x19, x4, #44 // adcs x22, x10, x4 // lsr x4, x19, #44 // adc x12, x5, x4 // extr x19, x24, x21, #9 // extr x4, x11, x24, #9 // stp x19, x4, [x0] // @slothy:writes=buffer0 // extr x19, x20, x11, #9 // extr x4, x7, x20, #9 // stp x19, x4, [x0, #16] // @slothy:writes=buffer16 // extr x19, x1, x7, #9 // extr x4, x14, x1, #9 // stp x19, x4, [x0, #32] // @slothy:writes=buffer32 // extr x19, x22, x14, #9 // extr x4, x12, x22, #9 // stp x19, x4, [x0, #48] // @slothy:writes=buffer48 // and x19, x21, #0x1ff // lsr x4, x12, #9 // add x4, x19, x4 // str x4, [x0, #64] // uzp1 v2.4S, v28.4S, v18.4S // rev64 v30.4S, v28.4S // uzp1 v24.4S, v18.4S, v18.4S // mul v30.4S, v30.4S, v18.4S // uaddlp v30.2D, v30.4S // shl v30.2D, v30.2D, #32 // umlal v30.2D, v24.2S, v2.2S // mov x11, v30.d[0] // mov x20, v30.d[1] // umulh x7, x16, x17 // subs x4, x16, x8 // cneg x22, x4, cc // csetm x12, cc // subs x4, x13, x17 // cneg x4, x4, cc // mul x19, x22, x4 // umulh x4, x22, x4 // cinv x1, x12, cc // eor x14, x19, x1 // eor x22, x4, x1 // adds x12, x11, x7 // adc x19, x7, xzr // umulh x4, x8, x13 // adds x12, x12, x20 // adcs x19, x19, x4 // adc x4, x4, xzr // adds x19, x19, x20 // adc x4, x4, xzr // cmn x1, #0x1 // adcs x12, x12, x14 // adcs x19, x19, x22 // adc x4, x4, x1 // adds x21, x11, x11 // adcs x24, x12, x12 // adcs x11, x19, x19 // adcs x20, x4, x4 // adc x7, xzr, xzr // movi v3.2D, #0x00000000ffffffff // uzp2 v16.4S, v20.4S, v20.4S // xtn v25.2S, v5.2D // xtn v23.2S, v20.2D // rev64 v30.4S, v20.4S // umull v24.2D, v25.2S, v23.2S // umull v0.2D, v25.2S, v16.2S // uzp2 v2.4S, v5.4S, v5.4S // mul v30.4S, v30.4S, v5.4S // usra v0.2D, v24.2D, #32 // umull v19.2D, v2.2S, v16.2S // uaddlp v30.2D, v30.4S // and v24.16B, v0.16B, v3.16B // umlal v24.2D, v2.2S, v23.2S // shl v30.2D, v30.2D, #32 // usra v19.2D, v0.2D, #32 // umlal v30.2D, v25.2S, v23.2S // usra v19.2D, v24.2D, #32 // mov x10, v30.d[0] // mov x1, v30.d[1] // mul x14, x16, x8 // mov x19, v19.d[0] // mov x4, v19.d[1] // umulh x22, x16, x8 // adds x12, x19, x14 // adcs x19, x1, x22 // adc x4, x4, xzr // adds x3, x12, x14 // adcs x19, x19, x22 // adc x4, x4, xzr // adds x5, x21, x19 // adcs x21, x24, x4 // adcs x24, x11, xzr // adcs x11, x20, xzr // adc x20, x7, xzr // movi v3.2D, #0x00000000ffffffff // uzp2 v16.4S, v1.4S, v1.4S // xtn v25.2S, v17.2D // xtn v23.2S, v1.2D // rev64 v30.4S, v1.4S // umull v24.2D, v25.2S, v23.2S // umull v0.2D, v25.2S, v16.2S // uzp2 v2.4S, v17.4S, v17.4S // mul v30.4S, v30.4S, v17.4S // usra v0.2D, v24.2D, #32 // umull v19.2D, v2.2S, v16.2S // uaddlp v30.2D, v30.4S // and v24.16B, v0.16B, v3.16B // umlal v24.2D, v2.2S, v23.2S // shl v30.2D, v30.2D, #32 // usra v19.2D, v0.2D, #32 // umlal v30.2D, v25.2S, v23.2S // usra v19.2D, v24.2D, #32 // mov x7, v30.d[0] // mov x1, v30.d[1] // mul x14, x17, x13 // mov x19, v19.d[0] // mov x4, v19.d[1] // umulh x22, x17, x13 // adds x12, x19, x14 // adcs x19, x1, x22 // adc x4, x4, xzr // adds x12, x12, x14 // adcs x19, x19, x22 // adc x4, x4, xzr // adds x1, x7, x24 // adcs x14, x12, x11 // adcs x22, x19, x20 // adc x12, x4, xzr // ldp x19, x4, [x0] // @slothy:reads=buffer0 // adds x19, x19, x10 // adcs x4, x4, x3 // stp x19, x4, [x0] // @slothy:writes=buffer0 // ldp x19, x4, [x0, #16] // @slothy:reads=buffer16 // adcs x19, x19, x5 // adcs x4, x4, x21 // stp x19, x4, [x0, #16] // @slothy:writes=buffer16 // ldp x19, x4, [x0, #32] // @slothy:reads=buffer32 // adcs x19, x19, x1 // adcs x4, x4, x14 // stp x19, x4, [x0, #32] // @slothy:writes=buffer32 // ldp x19, x4, [x0, #48] // @slothy:reads=buffer48 // adcs x19, x19, x22 // adcs x4, x4, x12 // stp x19, x4, [x0, #48] // @slothy:writes=buffer48 // ldr x4, [x0, #64] // adc x4, x4, xzr // str x4, [x0, #64] // movi v3.2D, #0x00000000ffffffff // uzp2 v2.4S, v29.4S, v29.4S // xtn v16.2S, v27.2D // xtn v25.2S, v29.2D // rev64 v30.4S, v29.4S // umull v24.2D, v16.2S, v25.2S // umull v23.2D, v16.2S, v2.2S // uzp2 v0.4S, v27.4S, v27.4S // mul v30.4S, v30.4S, v27.4S // usra v23.2D, v24.2D, #32 // umull v2.2D, v0.2S, v2.2S // uaddlp v30.2D, v30.4S // and v24.16B, v23.16B, v3.16B // umlal v24.2D, v0.2S, v25.2S // shl v30.2D, v30.2D, #32 // usra v2.2D, v23.2D, #32 // umlal v30.2D, v16.2S, v25.2S // usra v2.2D, v24.2D, #32 // mov x6, v30.d[0] // mov x22, v30.d[1] // mul x12, x17, x23 // mul x19, x13, x2 // mov x4, v2.d[0] // adds x22, x22, x4 // mov x4, v2.d[1] // adcs x12, x12, x4 // umulh x4, x17, x23 // adcs x19, x19, x4 // umulh x4, x13, x2 // adc x4, x4, xzr // adds x21, x22, x6 // adcs x22, x12, x22 // adcs x12, x19, x12 // adcs x19, x4, x19 // adc x4, xzr, x4 // adds x24, x22, x6 // adcs x11, x12, x21 // adcs x20, x19, x22 // adcs x1, x4, x12 // adcs x14, xzr, x19 // adc x7, xzr, x4 // subs x4, x17, x13 // cneg x12, x4, cc // csetm x22, cc // subs x4, x2, x23 // cneg x19, x4, cc // mul x4, x12, x19 // umulh x12, x12, x19 // cinv x19, x22, cc // cmn x19, #0x1 // eor x4, x4, x19 // adcs x1, x1, x4 // eor x4, x12, x19 // adcs x14, x14, x4 // adc x7, x7, x19 // subs x4, x16, x8 // cneg x12, x4, cc // csetm x22, cc // subs x4, x15, x9 // cneg x19, x4, cc // mul x4, x12, x19 // umulh x12, x12, x19 // cinv x19, x22, cc // cmn x19, #0x1 // eor x4, x4, x19 // adcs x10, x21, x4 // eor x4, x12, x19 // adcs x24, x24, x4 // adcs x11, x11, x19 // adcs x20, x20, x19 // adcs x1, x1, x19 // adcs x14, x14, x19 // adc x7, x7, x19 // subs x4, x8, x13 // cneg x12, x4, cc // csetm x22, cc // subs x4, x2, x15 // cneg x19, x4, cc // mul x4, x12, x19 // umulh x12, x12, x19 // cinv x19, x22, cc // cmn x19, #0x1 // eor x4, x4, x19 // adcs x20, x20, x4 // eor x4, x12, x19 // adcs x1, x1, x4 // adcs x14, x14, x19 // adc x7, x7, x19 // subs x4, x16, x17 // cneg x12, x4, cc // csetm x22, cc // subs x4, x23, x9 // cneg x19, x4, cc // mul x4, x12, x19 // umulh x12, x12, x19 // cinv x19, x22, cc // cmn x19, #0x1 // eor x4, x4, x19 // adcs x24, x24, x4 // eor x4, x12, x19 // adcs x11, x11, x4 // adcs x20, x20, x19 // adcs x1, x1, x19 // adcs x14, x14, x19 // adc x7, x7, x19 // subs x4, x16, x13 // cneg x12, x4, cc // csetm x22, cc // subs x4, x2, x9 // cneg x19, x4, cc // mul x4, x12, x19 // umulh x12, x12, x19 // cinv x19, x22, cc // cmn x19, #0x1 // eor x4, x4, x19 // adcs x11, x11, x4 // eor x4, x12, x19 // adcs x20, x20, x4 // adcs x1, x1, x19 // adcs x14, x14, x19 // adc x7, x7, x19 // subs x4, x8, x17 // cneg x12, x4, cc // csetm x22, cc // subs x4, x23, x15 // cneg x19, x4, cc // mul x4, x12, x19 // umulh x12, x12, x19 // cinv x19, x22, cc // cmn x19, #0x1 // eor x4, x4, x19 // adcs x3, x11, x4 // eor x4, x12, x19 // adcs x5, x20, x4 // adcs x1, x1, x19 // adcs x14, x14, x19 // adc x22, x7, x19 // ldp x12, x19, [x0] // @slothy:reads=buffer0 // extr x4, x1, x5, #8 // adds x11, x4, x12 // extr x4, x14, x1, #8 // adcs x20, x4, x19 // ldp x19, x12, [x0, #16] // @slothy:reads=buffer16 // extr x4, x22, x14, #8 // adcs x7, x4, x19 // and x19, x20, x7 // lsr x4, x22, #8 // adcs x1, x4, x12 // and x22, x19, x1 // ldp x19, x12, [x0, #32] // @slothy:reads=buffer32 // lsl x4, x6, #1 // adcs x14, x4, x19 // and x19, x22, x14 // extr x4, x10, x6, #63 // adcs x21, x4, x12 // and x22, x19, x21 // ldp x19, x12, [x0, #48] // @slothy:reads=buffer48 // extr x4, x24, x10, #63 // adcs x2, x4, x19 // and x19, x22, x2 // extr x4, x3, x24, #63 // adcs x24, x4, x12 // and x12, x19, x24 // ldr x19, [x0, #64] // extr x4, x5, x3, #63 // and x4, x4, #0x1ff // adc x4, x19, x4 // lsr x19, x4, #9 // orr x4, x4, #0xfffffffffffffe00 // cmp xzr, xzr // adcs xzr, x11, x19 // adcs xzr, x12, xzr // adcs xzr, x4, xzr // adcs x11, x11, x19 // adcs x20, x20, xzr // adcs x7, x7, xzr // adcs x1, x1, xzr // adcs x14, x14, xzr // adcs x22, x21, xzr // adcs x12, x2, xzr // adcs x24, x24, xzr // adc x4, x4, xzr // and x19, x4, #0x1ff // lsl x4, x11, #9 // extr x11, x20, x11, #55 // extr x20, x7, x20, #55 // extr x7, x1, x7, #55 // extr x1, x14, x1, #55 // orr x4, x19, x4 // extr x14, x22, x14, #55 // extr x22, x12, x22, #55 // extr x12, x24, x12, #55 // extr x19, x4, x24, #55 // lsr x4, x4, #55 // stp x11, x20, [x0] // @slothy:writes=buffer0 // stp x7, x1, [x0, #16] // @slothy:writes=buffer16 // stp x14, x22, [x0, #32] // @slothy:writes=buffer32 // stp x12, x19, [x0, #48] // @slothy:writes=buffer48 // str x4, [x0, #64] // ldp x23, x24, [sp], #16 // ldp x21, x22, [sp], #16 // ldp x19, x20, [sp], #16 // ret // // The bash script used for step 2 is as follows: // // # Store the assembly instructions except the last 'ret', // # callee-register store/loads as, say, 'input.S'. // export OUTPUTS="[hint_buffer0,hint_buffer16,hint_buffer32,hint_buffer48,hint_buffer64]" // export RESERVED_REGS="[x18,x25,x26,x27,x28,x29,x30,sp,q8,q9,q10,q11,q12,q13,q14,q15,v8,v9,v10,v11,v12,v13,v14,v15]" // <s2n-bignum>/tools/external/slothy.sh input.S my_out_dir // # my_out_dir/3.opt.s is the optimized assembly. Its output may differ // # from this file since the sequence is non-deterministically chosen. // # Please add 'ret' at the end of the output assembly. S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p521) .text .balign 4 S2N_BN_SYMBOL(bignum_montsqr_p521): CFI_START // Save registers CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) // The optimized body ldr q31, [x1, #48] ldp x9, x15, [x1, #32] ldp x23, x2, [x1, #48] ldr q0, [x1, #48] ldr q29, [x1, #32] rev64 v21.4S, v31.4S umulh x13, x9, x23 mul v23.4S, v21.4S, v0.4S xtn v21.2S, v0.2D uzp2 v19.4S, v31.4S, v31.4S xtn v2.2S, v29.2D xtn v30.2S, v31.2D uzp2 v3.4S, v29.4S, v29.4S umull v6.2D, v21.2S, v19.2S mul x10, x9, x23 uaddlp v23.2D, v23.4S umull v22.2D, v21.2S, v30.2S adds x22, x10, x13 mul x17, x9, x15 movi v25.2D, #0x00000000ffffffff uzp2 v1.4S, v0.4S, v0.4S adc x8, x13, xzr subs x19, x9, x15 umull v28.2D, v3.2S, v2.2S shl v31.2D, v23.2D, #32 csetm x5, cc cneg x3, x19, cc umull v19.2D, v1.2S, v19.2S ldr q4, [x1, #16] subs x24, x2, x23 mul x6, x15, x2 usra v6.2D, v22.2D, #32 ldr q23, [x1] cneg x13, x24, cc umulh x24, x15, x2 umull v5.2D, v29.2S, v29.2S rev64 v3.4S, v4.4S cinv x19, x5, cc adds x16, x22, x6 mov x14, v28.d[1] umlal v31.2D, v21.2S, v30.2S umull2 v17.2D, v29.4S, v29.4S mov x20, v28.d[0] mul v29.4S, v3.4S, v23.4S and v22.16B, v6.16B, v25.16B mul x5, x3, x13 mov x4, v5.d[1] mov x7, v5.d[0] adcs x11, x8, x24 ldr q5, [x1] ldr q0, [x1] adc x22, x24, xzr adds x8, x11, x6 usra v19.2D, v6.2D, #32 umlal v22.2D, v1.2S, v30.2S adc x11, x22, xzr adds x21, x7, x20, lsl #33 mov x24, v17.d[1] mov x22, v17.d[0] lsr x12, x20, #31 uzp1 v2.4S, v4.4S, v23.4S uzp1 v20.4S, v23.4S, v23.4S usra v19.2D, v22.2D, #32 adc x4, x4, x12 lsr x6, x14, #31 adds x20, x22, x14, lsl #33 ldr q17, [x1, #16] uzp2 v22.4S, v0.4S, v0.4S eor x12, x5, x19 umulh x7, x3, x13 xtn v23.2S, v0.2D adc x5, x24, x6 cmn x19, #0x1 xtn v25.2S, v5.2D ldr q27, [x1] adcs x16, x16, x12 uaddlp v1.2D, v29.4S umulh x3, x9, x15 eor x13, x7, x19 adcs x24, x8, x13 adc x11, x11, x19 adds x12, x10, x10 adcs x13, x16, x16 mul x19, x23, x2 umull v21.2D, v25.2S, v23.2S adcs x7, x24, x24 ldp x16, x8, [x1] umull v3.2D, v25.2S, v22.2S uzp2 v6.4S, v5.4S, v5.4S adcs x10, x11, x11 ldr q29, [x1, #32] adc x14, xzr, xzr adds x24, x4, x17, lsl #1 mov x4, v31.d[1] shl v30.2D, v1.2D, #32 lsr x6, x3, #63 extr x11, x3, x17, #63 ldr q1, [x1, #16] mov x22, v19.d[1] adcs x20, x20, x11 umulh x3, x23, x2 movi v4.2D, #0x00000000ffffffff usra v3.2D, v21.2D, #32 adc x5, x5, x6 adds x11, x12, x20 mov x6, v19.d[0] umull v19.2D, v6.2S, v22.2S adcs x20, x13, x5 rev64 v22.4S, v0.4S ldr x5, [x1, #64] ldp x17, x13, [x1, #16] adcs x7, x7, xzr umlal v30.2D, v20.2S, v2.2S adcs x12, x10, xzr and x1, x16, #0xfffffffffffff mul v22.4S, v22.4S, v5.4S adc x14, x14, xzr adds x6, x6, x19 xtn v5.2S, v1.2D adcs x10, x4, x3 mov x4, v31.d[0] adc x22, x22, xzr adds x19, x6, x19 add x6, x5, x5 and v21.16B, v3.16B, v4.16B adcs x10, x10, x3 extr x3, x8, x16, #52 mul x1, x6, x1 usra v19.2D, v3.2D, #32 adc x22, x22, xzr adds x7, x4, x7 umlal v21.2D, v6.2S, v23.2S and x4, x3, #0xfffffffffffff adcs x3, x19, x12 uzp2 v28.4S, v1.4S, v1.4S extr x19, x17, x8, #40 mul x12, x6, x4 adcs x14, x10, x14 rev64 v4.4S, v1.4S mul x5, x5, x5 lsr x4, x9, #4 adc x10, x22, xzr lsl x22, x1, #12 lsr x1, x1, #52 add x12, x12, x1 and x1, x19, #0xfffffffffffff extr x19, x12, x22, #12 mul x1, x6, x1 extr x22, x13, x17, #28 adds x21, x21, x19 mul v31.4S, v4.4S, v17.4S and x19, x22, #0xfffffffffffff lsr x22, x12, #52 lsl x12, x12, #12 mul x19, x6, x19 add x22, x1, x22 extr x1, x22, x12, #24 and x4, x4, #0xfffffffffffff adcs x12, x24, x1 extr x1, x9, x13, #16 mul x24, x6, x4 and x1, x1, #0xfffffffffffff lsr x4, x22, #52 add x4, x19, x4 lsl x22, x22, #12 mul x1, x6, x1 extr x22, x4, x22, #36 adcs x11, x11, x22 extr x22, x11, x12, #9 extr x19, x12, x21, #9 uaddlp v3.2D, v22.4S lsl x12, x4, #12 stp x19, x22, [x0] umulh x19, x16, x17 uaddlp v31.2D, v31.4S lsr x22, x4, #52 extr x4, x15, x9, #56 usra v19.2D, v21.2D, #32 add x22, x1, x22 extr x1, x23, x15, #44 shl v4.2D, v31.2D, #32 extr x12, x22, x12, #48 and x4, x4, #0xfffffffffffff uzp2 v7.4S, v17.4S, v17.4S adcs x20, x20, x12 xtn v17.2S, v17.2D lsl x12, x22, #12 lsr x22, x22, #52 mul x4, x6, x4 add x22, x24, x22 and x24, x1, #0xfffffffffffff extr x1, x2, x23, #32 extr x12, x22, x12, #60 lsl x12, x12, #8 lsr x22, x22, #52 mul x24, x6, x24 add x4, x4, x22 and x22, x1, #0xfffffffffffff extr x12, x4, x12, #8 lsl x1, x4, #12 lsr x4, x4, #52 adcs x7, x7, x12 mul x12, x6, x22 add x24, x24, x4 extr x1, x24, x1, #20 extr x22, x20, x11, #9 extr x20, x7, x20, #9 lsr x11, x2, #20 mul x6, x6, x11 lsr x4, x24, #52 add x4, x12, x4 lsl x12, x24, #12 adcs x3, x3, x1 extr x24, x4, x12, #32 lsr x11, x4, #52 adcs x12, x14, x24 umull v31.2D, v17.2S, v28.2S add x24, x6, x11 lsl x1, x4, #12 extr x7, x3, x7, #9 rev64 v6.4S, v29.4S umull v22.2D, v17.2S, v5.2S extr x11, x12, x3, #9 extr x14, x24, x1, #44 umlal v4.2D, v17.2S, v5.2S adcs x3, x10, x14 umulh x10, x8, x13 lsr x14, x24, #44 adc x24, x5, x14 subs x5, x16, x8 stp x22, x20, [x0, #16] csetm x1, cc shl v21.2D, v3.2D, #32 movi v17.2D, #0x00000000ffffffff cneg x20, x5, cc subs x5, x13, x17 usra v31.2D, v22.2D, #32 cneg x14, x5, cc lsr x6, x24, #9 and x22, x21, #0x1ff mov x4, v30.d[0] add x6, x22, x6 stp x7, x11, [x0, #32] umulh x22, x20, x14 mov x5, v30.d[1] str x6, [x0, #64] extr x12, x3, x12, #9 umull v28.2D, v7.2S, v28.2S mul x11, x20, x14 mul v6.4S, v6.4S, v27.4S and v1.16B, v31.16B, v17.16B cinv x21, x1, cc adds x6, x4, x19 uzp2 v22.4S, v27.4S, v27.4S adc x20, x19, xzr adds x6, x6, x5 umlal v1.2D, v7.2S, v5.2S xtn v20.2S, v29.2D eor x22, x22, x21 adcs x7, x20, x10 usra v28.2D, v31.2D, #32 eor x20, x11, x21 usra v28.2D, v1.2D, #32 xtn v0.2S, v27.2D adc x10, x10, xzr adds x1, x7, x5 umlal v21.2D, v25.2S, v23.2S uzp2 v29.4S, v29.4S, v29.4S adc x19, x10, xzr cmn x21, #0x1 umull v3.2D, v0.2S, v20.2S adcs x5, x6, x20 extr x10, x24, x3, #9 umull v31.2D, v0.2S, v29.2S adcs x1, x1, x22 stp x12, x10, [x0, #48] mul x24, x16, x8 mov x3, v28.d[1] usra v31.2D, v3.2D, #32 adc x10, x19, x21 adds x7, x4, x4 umulh x14, x16, x8 uaddlp v3.2D, v6.4S mov x4, v28.d[0] adcs x12, x5, x5 mov x5, v19.d[0] movi v23.2D, #0x00000000ffffffff adcs x20, x1, x1 mov x19, v21.d[1] mov x1, v19.d[1] adcs x22, x10, x10 and v17.16B, v31.16B, v23.16B adc x6, xzr, xzr umlal v17.2D, v22.2S, v20.2S adds x10, x5, x24 mul x11, x17, x13 mov x5, v21.d[0] umull v28.2D, v22.2S, v29.2S adcs x19, x19, x14 shl v5.2D, v3.2D, #32 adc x21, x1, xzr adds x10, x10, x24 adcs x1, x19, x14 umulh x14, x17, x13 adc x19, x21, xzr adds x7, x7, x1 adcs x1, x12, x19 adcs x24, x20, xzr mov x20, v4.d[1] usra v28.2D, v31.2D, #32 mov x21, v4.d[0] adcs x19, x22, xzr adc x6, x6, xzr adds x4, x4, x11 adcs x20, x20, x14 adc x22, x3, xzr adds x12, x4, x11 umulh x11, x13, x2 adcs x3, x20, x14 adc x20, x22, xzr adds x21, x21, x24 ldp x22, x24, [x0] adcs x4, x12, x19 ldp x19, x14, [x0, #16] usra v28.2D, v17.2D, #32 adcs x3, x3, x6 umlal v5.2D, v0.2S, v20.2S adc x6, x20, xzr umulh x20, x17, x23 adds x12, x22, x5 ldp x22, x5, [x0, #32] adcs x10, x24, x10 adcs x19, x19, x7 stp x12, x10, [x0] ldp x12, x7, [x0, #48] adcs x10, x14, x1 mul x14, x13, x2 ldr x24, [x0, #64] adcs x22, x22, x21 adcs x5, x5, x4 mov x21, v28.d[1] stp x22, x5, [x0, #32] mul x1, x17, x23 adcs x3, x12, x3 mov x4, v28.d[0] mov x12, v5.d[1] stp x19, x10, [x0, #16] adcs x19, x7, x6 mov x6, v5.d[0] adc x10, x24, xzr subs x7, x16, x8 cneg x5, x7, cc csetm x24, cc subs x7, x15, x9 cneg x22, x7, cc cinv x7, x24, cc adds x12, x12, x4 umulh x4, x5, x22 adcs x1, x1, x21 stp x3, x19, [x0, #48] str x10, [x0, #64] adcs x20, x14, x20 adc x21, x11, xzr subs x14, x17, x13 cneg x10, x14, cc csetm x3, cc subs x19, x2, x23 cneg x19, x19, cc cinv x11, x3, cc adds x14, x12, x6 mul x24, x5, x22 adcs x22, x1, x12 eor x3, x4, x7 mul x4, x10, x19 adcs x1, x20, x1 adcs x12, x21, x20 adc x5, xzr, x21 umulh x19, x10, x19 adds x20, x22, x6 eor x10, x24, x7 adcs x21, x1, x14 eor x24, x4, x11 adcs x4, x12, x22 adcs x1, x5, x1 adcs x12, xzr, x12 adc x22, xzr, x5 eor x5, x19, x11 cmn x11, #0x1 adcs x19, x1, x24 adcs x5, x12, x5 adc x24, x22, x11 subs x1, x8, x13 cneg x22, x1, cc csetm x1, cc subs x11, x2, x15 cinv x1, x1, cc cneg x12, x11, cc cmn x7, #0x1 adcs x10, x14, x10 mul x14, x22, x12 adcs x20, x20, x3 eor x11, x14, x1 adcs x3, x21, x7 umulh x21, x22, x12 adcs x22, x4, x7 adcs x4, x19, x7 adcs x12, x5, x7 adc x7, x24, x7 subs x14, x16, x17 csetm x5, cc cneg x19, x14, cc subs x24, x23, x9 cneg x14, x24, cc cinv x5, x5, cc cmn x1, #0x1 mul x24, x19, x14 adcs x22, x22, x11 eor x11, x21, x1 eor x24, x24, x5 umulh x19, x19, x14 adcs x4, x4, x11 adcs x14, x12, x1 adc x1, x7, x1 subs x17, x8, x17 cneg x12, x17, cc csetm x17, cc subs x16, x16, x13 cneg x11, x16, cc csetm x16, cc subs x23, x23, x15 cinv x7, x17, cc cneg x13, x23, cc mul x15, x12, x13 subs x23, x2, x9 cinv x8, x16, cc cneg x17, x23, cc eor x16, x19, x5 mul x23, x11, x17 cmn x5, #0x1 adcs x20, x20, x24 eor x15, x15, x7 adcs x3, x3, x16 adcs x2, x22, x5 umulh x16, x11, x17 adcs x19, x4, x5 ldp x4, x22, [x0, #48] extr x21, x10, x6, #63 adcs x24, x14, x5 eor x23, x23, x8 adc x1, x1, x5 cmn x8, #0x1 umulh x9, x12, x13 eor x14, x16, x8 adcs x3, x3, x23 ldp x11, x5, [x0, #16] ldp x13, x16, [x0] adcs x23, x2, x14 adcs x14, x19, x8 extr x19, x20, x10, #63 lsl x12, x6, #1 adcs x17, x24, x8 adc x1, x1, x8 cmn x7, #0x1 adcs x24, x3, x15 eor x9, x9, x7 ldp x15, x3, [x0, #32] adcs x9, x23, x9 ldr x8, [x0, #64] extr x20, x24, x20, #63 adcs x23, x14, x7 extr x2, x9, x24, #63 adcs x14, x17, x7 and x24, x2, #0x1ff extr x9, x23, x9, #8 extr x6, x14, x23, #8 adc x23, x1, x7 adds x10, x9, x13 adcs x13, x6, x16 extr x1, x23, x14, #8 lsr x23, x23, #8 adcs x7, x1, x11 adcs x2, x23, x5 and x23, x13, x7 adcs x16, x12, x15 and x23, x23, x2 adcs x14, x21, x3 and x23, x23, x16 adcs x5, x19, x4 and x23, x23, x14 adcs x22, x20, x22 and x23, x23, x5 and x1, x23, x22 adc x9, x8, x24 lsr x23, x9, #9 cmp xzr, xzr orr x17, x9, #0xfffffffffffffe00 adcs xzr, x10, x23 adcs xzr, x1, xzr adcs xzr, x17, xzr adcs x23, x10, x23 adcs x9, x13, xzr lsl x4, x23, #9 adcs x1, x7, xzr extr x23, x9, x23, #55 extr x9, x1, x9, #55 adcs x10, x2, xzr extr x1, x10, x1, #55 stp x23, x9, [x0] adcs x19, x16, xzr adcs x9, x14, xzr extr x23, x19, x10, #55 adcs x10, x5, xzr stp x1, x23, [x0, #16] extr x5, x9, x19, #55 adcs x1, x22, xzr extr x23, x10, x9, #55 adc x9, x17, xzr stp x5, x23, [x0, #32] extr x10, x1, x10, #55 and x23, x9, #0x1ff orr x23, x23, x4 extr x9, x23, x1, #55 lsr x23, x23, #55 stp x10, x9, [x0, #48] str x23, [x0, #64] // Restore regs and return CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
71,880
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/p521_jdouble_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point doubling on NIST curve P-521 in Jacobian coordinates // // extern void p521_jdouble_alt(uint64_t p3[static 27], // const uint64_t p1[static 27]); // // Does p3 := 2 * p1 where all points are regarded as Jacobian triples. // A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3). // It is assumed that all coordinates of the input point are fully // reduced mod p_521 and that the z coordinate is not zero. // // Standard ARM ABI: X0 = p3, X1 = p1 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jdouble_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p521_jdouble_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jdouble_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 72 // Stable homes for input arguments during main code sequence #define input_z x26 #define input_x x27 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries #define z2 sp, #(NUMSIZE*0) #define y2 sp, #(NUMSIZE*1) #define x2p sp, #(NUMSIZE*2) #define xy2 sp, #(NUMSIZE*3) #define y4 sp, #(NUMSIZE*4) #define t2 sp, #(NUMSIZE*4) #define dx2 sp, #(NUMSIZE*5) #define t1 sp, #(NUMSIZE*5) #define d sp, #(NUMSIZE*6) #define x4p sp, #(NUMSIZE*6) // NUMSIZE*7 is not 16-aligned so we round it up #define NSPACE 512 // Corresponds exactly to bignum_mul_p521_alt #define mul_p521(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P2] __LF \ mul x15, x3, x5 __LF \ umulh x16, x3, x5 __LF \ mul x14, x3, x6 __LF \ umulh x17, x3, x6 __LF \ adds x16, x16, x14 __LF \ ldp x7, x8, [P2+16] __LF \ mul x14, x3, x7 __LF \ umulh x19, x3, x7 __LF \ adcs x17, x17, x14 __LF \ mul x14, x3, x8 __LF \ umulh x20, x3, x8 __LF \ adcs x19, x19, x14 __LF \ ldp x9, x10, [P2+32] __LF \ mul x14, x3, x9 __LF \ umulh x21, x3, x9 __LF \ adcs x20, x20, x14 __LF \ mul x14, x3, x10 __LF \ umulh x22, x3, x10 __LF \ adcs x21, x21, x14 __LF \ ldp x11, x12, [P2+48] __LF \ mul x14, x3, x11 __LF \ umulh x23, x3, x11 __LF \ adcs x22, x22, x14 __LF \ ldr x13, [P2+64] __LF \ mul x14, x3, x12 __LF \ umulh x24, x3, x12 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x13 __LF \ umulh x1, x3, x13 __LF \ adcs x24, x24, x14 __LF \ adc x1, x1, xzr __LF \ mul x14, x4, x5 __LF \ adds x16, x16, x14 __LF \ mul x14, x4, x6 __LF \ adcs x17, x17, x14 __LF \ mul x14, x4, x7 __LF \ adcs x19, x19, x14 __LF \ mul x14, x4, x8 __LF \ adcs x20, x20, x14 __LF \ mul x14, x4, x9 __LF \ adcs x21, x21, x14 __LF \ mul x14, x4, x10 __LF \ adcs x22, x22, x14 __LF \ mul x14, x4, x11 __LF \ adcs x23, x23, x14 __LF \ mul x14, x4, x12 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x13 __LF \ adcs x1, x1, x14 __LF \ cset x0, hs __LF \ umulh x14, x4, x5 __LF \ adds x17, x17, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x21, x21, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x13 __LF \ adc x0, x0, x14 __LF \ stp x15, x16, [P0] __LF \ ldp x3, x4, [P1+16] __LF \ mul x14, x3, x5 __LF \ adds x17, x17, x14 __LF \ mul x14, x3, x6 __LF \ adcs x19, x19, x14 __LF \ mul x14, x3, x7 __LF \ adcs x20, x20, x14 __LF \ mul x14, x3, x8 __LF \ adcs x21, x21, x14 __LF \ mul x14, x3, x9 __LF \ adcs x22, x22, x14 __LF \ mul x14, x3, x10 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x11 __LF \ adcs x24, x24, x14 __LF \ mul x14, x3, x12 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x13 __LF \ adcs x0, x0, x14 __LF \ cset x15, hs __LF \ umulh x14, x3, x5 __LF \ adds x19, x19, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x21, x21, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x3, x12 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x13 __LF \ adc x15, x15, x14 __LF \ mul x14, x4, x5 __LF \ adds x19, x19, x14 __LF \ mul x14, x4, x6 __LF \ adcs x20, x20, x14 __LF \ mul x14, x4, x7 __LF \ adcs x21, x21, x14 __LF \ mul x14, x4, x8 __LF \ adcs x22, x22, x14 __LF \ mul x14, x4, x9 __LF \ adcs x23, x23, x14 __LF \ mul x14, x4, x10 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x11 __LF \ adcs x1, x1, x14 __LF \ mul x14, x4, x12 __LF \ adcs x0, x0, x14 __LF \ mul x14, x4, x13 __LF \ adcs x15, x15, x14 __LF \ cset x16, hs __LF \ umulh x14, x4, x5 __LF \ adds x20, x20, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x21, x21, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x4, x13 __LF \ adc x16, x16, x14 __LF \ stp x17, x19, [P0+16] __LF \ ldp x3, x4, [P1+32] __LF \ mul x14, x3, x5 __LF \ adds x20, x20, x14 __LF \ mul x14, x3, x6 __LF \ adcs x21, x21, x14 __LF \ mul x14, x3, x7 __LF \ adcs x22, x22, x14 __LF \ mul x14, x3, x8 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x9 __LF \ adcs x24, x24, x14 __LF \ mul x14, x3, x10 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x11 __LF \ adcs x0, x0, x14 __LF \ mul x14, x3, x12 __LF \ adcs x15, x15, x14 __LF \ mul x14, x3, x13 __LF \ adcs x16, x16, x14 __LF \ cset x17, hs __LF \ umulh x14, x3, x5 __LF \ adds x21, x21, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x3, x12 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x3, x13 __LF \ adc x17, x17, x14 __LF \ mul x14, x4, x5 __LF \ adds x21, x21, x14 __LF \ mul x14, x4, x6 __LF \ adcs x22, x22, x14 __LF \ mul x14, x4, x7 __LF \ adcs x23, x23, x14 __LF \ mul x14, x4, x8 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x9 __LF \ adcs x1, x1, x14 __LF \ mul x14, x4, x10 __LF \ adcs x0, x0, x14 __LF \ mul x14, x4, x11 __LF \ adcs x15, x15, x14 __LF \ mul x14, x4, x12 __LF \ adcs x16, x16, x14 __LF \ mul x14, x4, x13 __LF \ adcs x17, x17, x14 __LF \ cset x19, hs __LF \ umulh x14, x4, x5 __LF \ adds x22, x22, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x4, x13 __LF \ adc x19, x19, x14 __LF \ stp x20, x21, [P0+32] __LF \ ldp x3, x4, [P1+48] __LF \ mul x14, x3, x5 __LF \ adds x22, x22, x14 __LF \ mul x14, x3, x6 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x7 __LF \ adcs x24, x24, x14 __LF \ mul x14, x3, x8 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x9 __LF \ adcs x0, x0, x14 __LF \ mul x14, x3, x10 __LF \ adcs x15, x15, x14 __LF \ mul x14, x3, x11 __LF \ adcs x16, x16, x14 __LF \ mul x14, x3, x12 __LF \ adcs x17, x17, x14 __LF \ mul x14, x3, x13 __LF \ adcs x19, x19, x14 __LF \ cset x20, hs __LF \ umulh x14, x3, x5 __LF \ adds x23, x23, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x3, x12 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x3, x13 __LF \ adc x20, x20, x14 __LF \ mul x14, x4, x5 __LF \ adds x23, x23, x14 __LF \ mul x14, x4, x6 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x7 __LF \ adcs x1, x1, x14 __LF \ mul x14, x4, x8 __LF \ adcs x0, x0, x14 __LF \ mul x14, x4, x9 __LF \ adcs x15, x15, x14 __LF \ mul x14, x4, x10 __LF \ adcs x16, x16, x14 __LF \ mul x14, x4, x11 __LF \ adcs x17, x17, x14 __LF \ mul x14, x4, x12 __LF \ adcs x19, x19, x14 __LF \ mul x14, x4, x13 __LF \ adcs x20, x20, x14 __LF \ cset x21, hs __LF \ umulh x14, x4, x5 __LF \ adds x24, x24, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x4, x13 __LF \ adc x21, x21, x14 __LF \ stp x22, x23, [P0+48] __LF \ ldr x3, [P1+64] __LF \ mul x14, x3, x5 __LF \ adds x24, x24, x14 __LF \ mul x14, x3, x6 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x7 __LF \ adcs x0, x0, x14 __LF \ mul x14, x3, x8 __LF \ adcs x15, x15, x14 __LF \ mul x14, x3, x9 __LF \ adcs x16, x16, x14 __LF \ mul x14, x3, x10 __LF \ adcs x17, x17, x14 __LF \ mul x14, x3, x11 __LF \ adcs x19, x19, x14 __LF \ mul x14, x3, x12 __LF \ adcs x20, x20, x14 __LF \ mul x14, x3, x13 __LF \ adc x21, x21, x14 __LF \ umulh x14, x3, x5 __LF \ adds x1, x1, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x3, x12 __LF \ adc x21, x21, x14 __LF \ cmp xzr, xzr __LF \ ldp x5, x6, [P0] __LF \ extr x14, x1, x24, #9 __LF \ adcs x5, x5, x14 __LF \ extr x14, x0, x1, #9 __LF \ adcs x6, x6, x14 __LF \ ldp x7, x8, [P0+16] __LF \ extr x14, x15, x0, #9 __LF \ adcs x7, x7, x14 __LF \ extr x14, x16, x15, #9 __LF \ adcs x8, x8, x14 __LF \ ldp x9, x10, [P0+32] __LF \ extr x14, x17, x16, #9 __LF \ adcs x9, x9, x14 __LF \ extr x14, x19, x17, #9 __LF \ adcs x10, x10, x14 __LF \ ldp x11, x12, [P0+48] __LF \ extr x14, x20, x19, #9 __LF \ adcs x11, x11, x14 __LF \ extr x14, x21, x20, #9 __LF \ adcs x12, x12, x14 __LF \ orr x13, x24, #0xfffffffffffffe00 __LF \ lsr x14, x21, #9 __LF \ adcs x13, x13, x14 __LF \ sbcs x5, x5, xzr __LF \ sbcs x6, x6, xzr __LF \ sbcs x7, x7, xzr __LF \ sbcs x8, x8, xzr __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbc x13, x13, xzr __LF \ and x13, x13, #0x1ff __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] __LF \ stp x11, x12, [P0+48] __LF \ str x13, [P0+64] // Corresponds exactly to bignum_sqr_p521_alt #define sqr_p521(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x11, x2, x3 __LF \ umulh x12, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x10, x2, x4 __LF \ umulh x13, x2, x4 __LF \ adds x12, x12, x10 __LF \ ldp x6, x7, [P1+32] __LF \ mul x10, x2, x5 __LF \ umulh x14, x2, x5 __LF \ adcs x13, x13, x10 __LF \ ldp x8, x9, [P1+48] __LF \ mul x10, x2, x6 __LF \ umulh x15, x2, x6 __LF \ adcs x14, x14, x10 __LF \ mul x10, x2, x7 __LF \ umulh x16, x2, x7 __LF \ adcs x15, x15, x10 __LF \ mul x10, x2, x8 __LF \ umulh x17, x2, x8 __LF \ adcs x16, x16, x10 __LF \ mul x10, x2, x9 __LF \ umulh x19, x2, x9 __LF \ adcs x17, x17, x10 __LF \ adc x19, x19, xzr __LF \ mul x10, x3, x4 __LF \ adds x13, x13, x10 __LF \ mul x10, x3, x5 __LF \ adcs x14, x14, x10 __LF \ mul x10, x3, x6 __LF \ adcs x15, x15, x10 __LF \ mul x10, x3, x7 __LF \ adcs x16, x16, x10 __LF \ mul x10, x3, x8 __LF \ adcs x17, x17, x10 __LF \ mul x10, x3, x9 __LF \ adcs x19, x19, x10 __LF \ cset x20, hs __LF \ umulh x10, x3, x4 __LF \ adds x14, x14, x10 __LF \ umulh x10, x3, x5 __LF \ adcs x15, x15, x10 __LF \ umulh x10, x3, x6 __LF \ adcs x16, x16, x10 __LF \ umulh x10, x3, x7 __LF \ adcs x17, x17, x10 __LF \ umulh x10, x3, x8 __LF \ adcs x19, x19, x10 __LF \ umulh x10, x3, x9 __LF \ adc x20, x20, x10 __LF \ mul x10, x6, x7 __LF \ umulh x21, x6, x7 __LF \ adds x20, x20, x10 __LF \ adc x21, x21, xzr __LF \ mul x10, x4, x5 __LF \ adds x15, x15, x10 __LF \ mul x10, x4, x6 __LF \ adcs x16, x16, x10 __LF \ mul x10, x4, x7 __LF \ adcs x17, x17, x10 __LF \ mul x10, x4, x8 __LF \ adcs x19, x19, x10 __LF \ mul x10, x4, x9 __LF \ adcs x20, x20, x10 __LF \ mul x10, x6, x8 __LF \ adcs x21, x21, x10 __LF \ cset x22, hs __LF \ umulh x10, x4, x5 __LF \ adds x16, x16, x10 __LF \ umulh x10, x4, x6 __LF \ adcs x17, x17, x10 __LF \ umulh x10, x4, x7 __LF \ adcs x19, x19, x10 __LF \ umulh x10, x4, x8 __LF \ adcs x20, x20, x10 __LF \ umulh x10, x4, x9 __LF \ adcs x21, x21, x10 __LF \ umulh x10, x6, x8 __LF \ adc x22, x22, x10 __LF \ mul x10, x7, x8 __LF \ umulh x23, x7, x8 __LF \ adds x22, x22, x10 __LF \ adc x23, x23, xzr __LF \ mul x10, x5, x6 __LF \ adds x17, x17, x10 __LF \ mul x10, x5, x7 __LF \ adcs x19, x19, x10 __LF \ mul x10, x5, x8 __LF \ adcs x20, x20, x10 __LF \ mul x10, x5, x9 __LF \ adcs x21, x21, x10 __LF \ mul x10, x6, x9 __LF \ adcs x22, x22, x10 __LF \ mul x10, x7, x9 __LF \ adcs x23, x23, x10 __LF \ cset x24, hs __LF \ umulh x10, x5, x6 __LF \ adds x19, x19, x10 __LF \ umulh x10, x5, x7 __LF \ adcs x20, x20, x10 __LF \ umulh x10, x5, x8 __LF \ adcs x21, x21, x10 __LF \ umulh x10, x5, x9 __LF \ adcs x22, x22, x10 __LF \ umulh x10, x6, x9 __LF \ adcs x23, x23, x10 __LF \ umulh x10, x7, x9 __LF \ adc x24, x24, x10 __LF \ mul x10, x8, x9 __LF \ umulh x25, x8, x9 __LF \ adds x24, x24, x10 __LF \ adc x25, x25, xzr __LF \ adds x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ adcs x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adcs x17, x17, x17 __LF \ adcs x19, x19, x19 __LF \ adcs x20, x20, x20 __LF \ adcs x21, x21, x21 __LF \ adcs x22, x22, x22 __LF \ adcs x23, x23, x23 __LF \ adcs x24, x24, x24 __LF \ adcs x25, x25, x25 __LF \ cset x0, hs __LF \ umulh x10, x2, x2 __LF \ adds x11, x11, x10 __LF \ mul x10, x3, x3 __LF \ adcs x12, x12, x10 __LF \ umulh x10, x3, x3 __LF \ adcs x13, x13, x10 __LF \ mul x10, x4, x4 __LF \ adcs x14, x14, x10 __LF \ umulh x10, x4, x4 __LF \ adcs x15, x15, x10 __LF \ mul x10, x5, x5 __LF \ adcs x16, x16, x10 __LF \ umulh x10, x5, x5 __LF \ adcs x17, x17, x10 __LF \ mul x10, x6, x6 __LF \ adcs x19, x19, x10 __LF \ umulh x10, x6, x6 __LF \ adcs x20, x20, x10 __LF \ mul x10, x7, x7 __LF \ adcs x21, x21, x10 __LF \ umulh x10, x7, x7 __LF \ adcs x22, x22, x10 __LF \ mul x10, x8, x8 __LF \ adcs x23, x23, x10 __LF \ umulh x10, x8, x8 __LF \ adcs x24, x24, x10 __LF \ mul x10, x9, x9 __LF \ adcs x25, x25, x10 __LF \ umulh x10, x9, x9 __LF \ adc x0, x0, x10 __LF \ ldr x1, [P1+64] __LF \ add x1, x1, x1 __LF \ mul x10, x1, x2 __LF \ adds x19, x19, x10 __LF \ umulh x10, x1, x2 __LF \ adcs x20, x20, x10 __LF \ mul x10, x1, x4 __LF \ adcs x21, x21, x10 __LF \ umulh x10, x1, x4 __LF \ adcs x22, x22, x10 __LF \ mul x10, x1, x6 __LF \ adcs x23, x23, x10 __LF \ umulh x10, x1, x6 __LF \ adcs x24, x24, x10 __LF \ mul x10, x1, x8 __LF \ adcs x25, x25, x10 __LF \ umulh x10, x1, x8 __LF \ adcs x0, x0, x10 __LF \ lsr x4, x1, #1 __LF \ mul x4, x4, x4 __LF \ adc x4, x4, xzr __LF \ mul x10, x1, x3 __LF \ adds x20, x20, x10 __LF \ umulh x10, x1, x3 __LF \ adcs x21, x21, x10 __LF \ mul x10, x1, x5 __LF \ adcs x22, x22, x10 __LF \ umulh x10, x1, x5 __LF \ adcs x23, x23, x10 __LF \ mul x10, x1, x7 __LF \ adcs x24, x24, x10 __LF \ umulh x10, x1, x7 __LF \ adcs x25, x25, x10 __LF \ mul x10, x1, x9 __LF \ adcs x0, x0, x10 __LF \ umulh x10, x1, x9 __LF \ adc x4, x4, x10 __LF \ mul x2, x2, x2 __LF \ cmp xzr, xzr __LF \ extr x10, x20, x19, #9 __LF \ adcs x2, x2, x10 __LF \ extr x10, x21, x20, #9 __LF \ adcs x11, x11, x10 __LF \ extr x10, x22, x21, #9 __LF \ adcs x12, x12, x10 __LF \ extr x10, x23, x22, #9 __LF \ adcs x13, x13, x10 __LF \ extr x10, x24, x23, #9 __LF \ adcs x14, x14, x10 __LF \ extr x10, x25, x24, #9 __LF \ adcs x15, x15, x10 __LF \ extr x10, x0, x25, #9 __LF \ adcs x16, x16, x10 __LF \ extr x10, x4, x0, #9 __LF \ adcs x17, x17, x10 __LF \ orr x19, x19, #0xfffffffffffffe00 __LF \ lsr x10, x4, #9 __LF \ adcs x19, x19, x10 __LF \ sbcs x2, x2, xzr __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ sbcs x14, x14, xzr __LF \ sbcs x15, x15, xzr __LF \ sbcs x16, x16, xzr __LF \ sbcs x17, x17, xzr __LF \ sbc x19, x19, xzr __LF \ and x19, x19, #0x1ff __LF \ stp x2, x11, [P0] __LF \ stp x12, x13, [P0+16] __LF \ stp x14, x15, [P0+32] __LF \ stp x16, x17, [P0+48] __LF \ str x19, [P0+64] // Corresponds exactly to bignum_add_p521 #define add_p521(P0,P1,P2) \ cmp xzr, xzr __LF \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ adcs x5, x5, x4 __LF \ adcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ adcs x7, x7, x4 __LF \ adcs x8, x8, x3 __LF \ ldp x9, x10, [P1+32] __LF \ ldp x4, x3, [P2+32] __LF \ adcs x9, x9, x4 __LF \ adcs x10, x10, x3 __LF \ ldp x11, x12, [P1+48] __LF \ ldp x4, x3, [P2+48] __LF \ adcs x11, x11, x4 __LF \ adcs x12, x12, x3 __LF \ ldr x13, [P1+64] __LF \ ldr x4, [P2+64] __LF \ adc x13, x13, x4 __LF \ subs x4, x13, #512 __LF \ csetm x4, hs __LF \ sbcs x5, x5, xzr __LF \ and x4, x4, #0x200 __LF \ sbcs x6, x6, xzr __LF \ sbcs x7, x7, xzr __LF \ sbcs x8, x8, xzr __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbc x13, x13, x4 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] __LF \ stp x11, x12, [P0+48] __LF \ str x13, [P0+64] // Corresponds exactly to bignum_sub_p521 #define sub_p521(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ ldp x9, x10, [P1+32] __LF \ ldp x4, x3, [P2+32] __LF \ sbcs x9, x9, x4 __LF \ sbcs x10, x10, x3 __LF \ ldp x11, x12, [P1+48] __LF \ ldp x4, x3, [P2+48] __LF \ sbcs x11, x11, x4 __LF \ sbcs x12, x12, x3 __LF \ ldr x13, [P1+64] __LF \ ldr x4, [P2+64] __LF \ sbcs x13, x13, x4 __LF \ sbcs x5, x5, xzr __LF \ sbcs x6, x6, xzr __LF \ sbcs x7, x7, xzr __LF \ sbcs x8, x8, xzr __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ and x13, x13, #0x1ff __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] __LF \ stp x11, x12, [P0+48] __LF \ str x13, [P0+64] // Weak multiplication not fully reducing #define weakmul_p521(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P2] __LF \ mul x15, x3, x5 __LF \ umulh x16, x3, x5 __LF \ mul x14, x3, x6 __LF \ umulh x17, x3, x6 __LF \ adds x16, x16, x14 __LF \ ldp x7, x8, [P2+16] __LF \ mul x14, x3, x7 __LF \ umulh x19, x3, x7 __LF \ adcs x17, x17, x14 __LF \ mul x14, x3, x8 __LF \ umulh x20, x3, x8 __LF \ adcs x19, x19, x14 __LF \ ldp x9, x10, [P2+32] __LF \ mul x14, x3, x9 __LF \ umulh x21, x3, x9 __LF \ adcs x20, x20, x14 __LF \ mul x14, x3, x10 __LF \ umulh x22, x3, x10 __LF \ adcs x21, x21, x14 __LF \ ldp x11, x12, [P2+48] __LF \ mul x14, x3, x11 __LF \ umulh x23, x3, x11 __LF \ adcs x22, x22, x14 __LF \ ldr x13, [P2+64] __LF \ mul x14, x3, x12 __LF \ umulh x24, x3, x12 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x13 __LF \ umulh x1, x3, x13 __LF \ adcs x24, x24, x14 __LF \ adc x1, x1, xzr __LF \ mul x14, x4, x5 __LF \ adds x16, x16, x14 __LF \ mul x14, x4, x6 __LF \ adcs x17, x17, x14 __LF \ mul x14, x4, x7 __LF \ adcs x19, x19, x14 __LF \ mul x14, x4, x8 __LF \ adcs x20, x20, x14 __LF \ mul x14, x4, x9 __LF \ adcs x21, x21, x14 __LF \ mul x14, x4, x10 __LF \ adcs x22, x22, x14 __LF \ mul x14, x4, x11 __LF \ adcs x23, x23, x14 __LF \ mul x14, x4, x12 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x13 __LF \ adcs x1, x1, x14 __LF \ cset x0, hs __LF \ umulh x14, x4, x5 __LF \ adds x17, x17, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x21, x21, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x13 __LF \ adc x0, x0, x14 __LF \ stp x15, x16, [P0] __LF \ ldp x3, x4, [P1+16] __LF \ mul x14, x3, x5 __LF \ adds x17, x17, x14 __LF \ mul x14, x3, x6 __LF \ adcs x19, x19, x14 __LF \ mul x14, x3, x7 __LF \ adcs x20, x20, x14 __LF \ mul x14, x3, x8 __LF \ adcs x21, x21, x14 __LF \ mul x14, x3, x9 __LF \ adcs x22, x22, x14 __LF \ mul x14, x3, x10 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x11 __LF \ adcs x24, x24, x14 __LF \ mul x14, x3, x12 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x13 __LF \ adcs x0, x0, x14 __LF \ cset x15, hs __LF \ umulh x14, x3, x5 __LF \ adds x19, x19, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x21, x21, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x3, x12 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x13 __LF \ adc x15, x15, x14 __LF \ mul x14, x4, x5 __LF \ adds x19, x19, x14 __LF \ mul x14, x4, x6 __LF \ adcs x20, x20, x14 __LF \ mul x14, x4, x7 __LF \ adcs x21, x21, x14 __LF \ mul x14, x4, x8 __LF \ adcs x22, x22, x14 __LF \ mul x14, x4, x9 __LF \ adcs x23, x23, x14 __LF \ mul x14, x4, x10 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x11 __LF \ adcs x1, x1, x14 __LF \ mul x14, x4, x12 __LF \ adcs x0, x0, x14 __LF \ mul x14, x4, x13 __LF \ adcs x15, x15, x14 __LF \ cset x16, hs __LF \ umulh x14, x4, x5 __LF \ adds x20, x20, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x21, x21, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x4, x13 __LF \ adc x16, x16, x14 __LF \ stp x17, x19, [P0+16] __LF \ ldp x3, x4, [P1+32] __LF \ mul x14, x3, x5 __LF \ adds x20, x20, x14 __LF \ mul x14, x3, x6 __LF \ adcs x21, x21, x14 __LF \ mul x14, x3, x7 __LF \ adcs x22, x22, x14 __LF \ mul x14, x3, x8 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x9 __LF \ adcs x24, x24, x14 __LF \ mul x14, x3, x10 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x11 __LF \ adcs x0, x0, x14 __LF \ mul x14, x3, x12 __LF \ adcs x15, x15, x14 __LF \ mul x14, x3, x13 __LF \ adcs x16, x16, x14 __LF \ cset x17, hs __LF \ umulh x14, x3, x5 __LF \ adds x21, x21, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x3, x12 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x3, x13 __LF \ adc x17, x17, x14 __LF \ mul x14, x4, x5 __LF \ adds x21, x21, x14 __LF \ mul x14, x4, x6 __LF \ adcs x22, x22, x14 __LF \ mul x14, x4, x7 __LF \ adcs x23, x23, x14 __LF \ mul x14, x4, x8 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x9 __LF \ adcs x1, x1, x14 __LF \ mul x14, x4, x10 __LF \ adcs x0, x0, x14 __LF \ mul x14, x4, x11 __LF \ adcs x15, x15, x14 __LF \ mul x14, x4, x12 __LF \ adcs x16, x16, x14 __LF \ mul x14, x4, x13 __LF \ adcs x17, x17, x14 __LF \ cset x19, hs __LF \ umulh x14, x4, x5 __LF \ adds x22, x22, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x4, x13 __LF \ adc x19, x19, x14 __LF \ stp x20, x21, [P0+32] __LF \ ldp x3, x4, [P1+48] __LF \ mul x14, x3, x5 __LF \ adds x22, x22, x14 __LF \ mul x14, x3, x6 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x7 __LF \ adcs x24, x24, x14 __LF \ mul x14, x3, x8 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x9 __LF \ adcs x0, x0, x14 __LF \ mul x14, x3, x10 __LF \ adcs x15, x15, x14 __LF \ mul x14, x3, x11 __LF \ adcs x16, x16, x14 __LF \ mul x14, x3, x12 __LF \ adcs x17, x17, x14 __LF \ mul x14, x3, x13 __LF \ adcs x19, x19, x14 __LF \ cset x20, hs __LF \ umulh x14, x3, x5 __LF \ adds x23, x23, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x3, x12 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x3, x13 __LF \ adc x20, x20, x14 __LF \ mul x14, x4, x5 __LF \ adds x23, x23, x14 __LF \ mul x14, x4, x6 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x7 __LF \ adcs x1, x1, x14 __LF \ mul x14, x4, x8 __LF \ adcs x0, x0, x14 __LF \ mul x14, x4, x9 __LF \ adcs x15, x15, x14 __LF \ mul x14, x4, x10 __LF \ adcs x16, x16, x14 __LF \ mul x14, x4, x11 __LF \ adcs x17, x17, x14 __LF \ mul x14, x4, x12 __LF \ adcs x19, x19, x14 __LF \ mul x14, x4, x13 __LF \ adcs x20, x20, x14 __LF \ cset x21, hs __LF \ umulh x14, x4, x5 __LF \ adds x24, x24, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x4, x13 __LF \ adc x21, x21, x14 __LF \ stp x22, x23, [P0+48] __LF \ ldr x3, [P1+64] __LF \ mul x14, x3, x5 __LF \ adds x24, x24, x14 __LF \ mul x14, x3, x6 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x7 __LF \ adcs x0, x0, x14 __LF \ mul x14, x3, x8 __LF \ adcs x15, x15, x14 __LF \ mul x14, x3, x9 __LF \ adcs x16, x16, x14 __LF \ mul x14, x3, x10 __LF \ adcs x17, x17, x14 __LF \ mul x14, x3, x11 __LF \ adcs x19, x19, x14 __LF \ mul x14, x3, x12 __LF \ adcs x20, x20, x14 __LF \ mul x14, x3, x13 __LF \ adc x21, x21, x14 __LF \ umulh x14, x3, x5 __LF \ adds x1, x1, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x3, x12 __LF \ adc x21, x21, x14 __LF \ ldp x5, x6, [P0] __LF \ extr x14, x1, x24, #9 __LF \ adds x5, x5, x14 __LF \ extr x14, x0, x1, #9 __LF \ adcs x6, x6, x14 __LF \ ldp x7, x8, [P0+16] __LF \ extr x14, x15, x0, #9 __LF \ adcs x7, x7, x14 __LF \ extr x14, x16, x15, #9 __LF \ adcs x8, x8, x14 __LF \ ldp x9, x10, [P0+32] __LF \ extr x14, x17, x16, #9 __LF \ adcs x9, x9, x14 __LF \ extr x14, x19, x17, #9 __LF \ adcs x10, x10, x14 __LF \ ldp x11, x12, [P0+48] __LF \ extr x14, x20, x19, #9 __LF \ adcs x11, x11, x14 __LF \ extr x14, x21, x20, #9 __LF \ adcs x12, x12, x14 __LF \ and x13, x24, #0x1ff __LF \ lsr x14, x21, #9 __LF \ adc x13, x13, x14 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] __LF \ stp x11, x12, [P0+48] __LF \ str x13, [P0+64] // P0 = C * P1 - D * P2 == C * P1 + D * (p_521 - P2) #define cmsub_p521(P0,C,P1,D,P2) \ ldp x6, x7, [P1] __LF \ mov x1, #(C) __LF \ mul x3, x1, x6 __LF \ mul x4, x1, x7 __LF \ umulh x6, x1, x6 __LF \ adds x4, x4, x6 __LF \ umulh x7, x1, x7 __LF \ ldp x8, x9, [P1+16] __LF \ mul x5, x1, x8 __LF \ mul x6, x1, x9 __LF \ umulh x8, x1, x8 __LF \ adcs x5, x5, x7 __LF \ umulh x9, x1, x9 __LF \ adcs x6, x6, x8 __LF \ ldp x10, x11, [P1+32] __LF \ mul x7, x1, x10 __LF \ mul x8, x1, x11 __LF \ umulh x10, x1, x10 __LF \ adcs x7, x7, x9 __LF \ umulh x11, x1, x11 __LF \ adcs x8, x8, x10 __LF \ ldp x12, x13, [P1+48] __LF \ mul x9, x1, x12 __LF \ mul x10, x1, x13 __LF \ umulh x12, x1, x12 __LF \ adcs x9, x9, x11 __LF \ umulh x13, x1, x13 __LF \ adcs x10, x10, x12 __LF \ ldr x14, [P1+64] __LF \ mul x11, x1, x14 __LF \ adc x11, x11, x13 __LF \ mov x1, #(D) __LF \ ldp x20, x21, [P2] __LF \ mvn x20, x20 __LF \ mul x0, x1, x20 __LF \ umulh x20, x1, x20 __LF \ adds x3, x3, x0 __LF \ mvn x21, x21 __LF \ mul x0, x1, x21 __LF \ umulh x21, x1, x21 __LF \ adcs x4, x4, x0 __LF \ ldp x22, x23, [P2+16] __LF \ mvn x22, x22 __LF \ mul x0, x1, x22 __LF \ umulh x22, x1, x22 __LF \ adcs x5, x5, x0 __LF \ mvn x23, x23 __LF \ mul x0, x1, x23 __LF \ umulh x23, x1, x23 __LF \ adcs x6, x6, x0 __LF \ ldp x17, x19, [P2+32] __LF \ mvn x17, x17 __LF \ mul x0, x1, x17 __LF \ umulh x17, x1, x17 __LF \ adcs x7, x7, x0 __LF \ mvn x19, x19 __LF \ mul x0, x1, x19 __LF \ umulh x19, x1, x19 __LF \ adcs x8, x8, x0 __LF \ ldp x2, x16, [P2+48] __LF \ mvn x2, x2 __LF \ mul x0, x1, x2 __LF \ umulh x2, x1, x2 __LF \ adcs x9, x9, x0 __LF \ mvn x16, x16 __LF \ mul x0, x1, x16 __LF \ umulh x16, x1, x16 __LF \ adcs x10, x10, x0 __LF \ ldr x0, [P2+64] __LF \ eor x0, x0, #0x1ff __LF \ mul x0, x1, x0 __LF \ adc x11, x11, x0 __LF \ adds x4, x4, x20 __LF \ adcs x5, x5, x21 __LF \ and x15, x4, x5 __LF \ adcs x6, x6, x22 __LF \ and x15, x15, x6 __LF \ adcs x7, x7, x23 __LF \ and x15, x15, x7 __LF \ adcs x8, x8, x17 __LF \ and x15, x15, x8 __LF \ adcs x9, x9, x19 __LF \ and x15, x15, x9 __LF \ adcs x10, x10, x2 __LF \ and x15, x15, x10 __LF \ adc x11, x11, x16 __LF \ lsr x12, x11, #9 __LF \ orr x11, x11, #0xfffffffffffffe00 __LF \ cmp xzr, xzr __LF \ adcs xzr, x3, x12 __LF \ adcs xzr, x15, xzr __LF \ adcs xzr, x11, xzr __LF \ adcs x3, x3, x12 __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ adcs x6, x6, xzr __LF \ adcs x7, x7, xzr __LF \ adcs x8, x8, xzr __LF \ adcs x9, x9, xzr __LF \ adcs x10, x10, xzr __LF \ adc x11, x11, xzr __LF \ and x11, x11, #0x1ff __LF \ stp x3, x4, [P0] __LF \ stp x5, x6, [P0+16] __LF \ stp x7, x8, [P0+32] __LF \ stp x9, x10, [P0+48] __LF \ str x11, [P0+64] // P0 = 3 * P1 - 8 * P2 == 3 * P1 + 8 * (p_521 - P2) #define cmsub38_p521(P0,P1,P2) \ ldp x6, x7, [P1] __LF \ lsl x3, x6, #1 __LF \ adds x3, x3, x6 __LF \ extr x4, x7, x6, #63 __LF \ adcs x4, x4, x7 __LF \ ldp x8, x9, [P1+16] __LF \ extr x5, x8, x7, #63 __LF \ adcs x5, x5, x8 __LF \ extr x6, x9, x8, #63 __LF \ adcs x6, x6, x9 __LF \ ldp x10, x11, [P1+32] __LF \ extr x7, x10, x9, #63 __LF \ adcs x7, x7, x10 __LF \ extr x8, x11, x10, #63 __LF \ adcs x8, x8, x11 __LF \ ldp x12, x13, [P1+48] __LF \ extr x9, x12, x11, #63 __LF \ adcs x9, x9, x12 __LF \ extr x10, x13, x12, #63 __LF \ adcs x10, x10, x13 __LF \ ldr x14, [P1+64] __LF \ extr x11, x14, x13, #63 __LF \ adc x11, x11, x14 __LF \ ldp x20, x21, [P2] __LF \ mvn x20, x20 __LF \ lsl x0, x20, #3 __LF \ adds x3, x3, x0 __LF \ mvn x21, x21 __LF \ extr x0, x21, x20, #61 __LF \ adcs x4, x4, x0 __LF \ ldp x22, x23, [P2+16] __LF \ mvn x22, x22 __LF \ extr x0, x22, x21, #61 __LF \ adcs x5, x5, x0 __LF \ and x15, x4, x5 __LF \ mvn x23, x23 __LF \ extr x0, x23, x22, #61 __LF \ adcs x6, x6, x0 __LF \ and x15, x15, x6 __LF \ ldp x20, x21, [P2+32] __LF \ mvn x20, x20 __LF \ extr x0, x20, x23, #61 __LF \ adcs x7, x7, x0 __LF \ and x15, x15, x7 __LF \ mvn x21, x21 __LF \ extr x0, x21, x20, #61 __LF \ adcs x8, x8, x0 __LF \ and x15, x15, x8 __LF \ ldp x22, x23, [P2+48] __LF \ mvn x22, x22 __LF \ extr x0, x22, x21, #61 __LF \ adcs x9, x9, x0 __LF \ and x15, x15, x9 __LF \ mvn x23, x23 __LF \ extr x0, x23, x22, #61 __LF \ adcs x10, x10, x0 __LF \ and x15, x15, x10 __LF \ ldr x0, [P2+64] __LF \ eor x0, x0, #0x1ff __LF \ extr x0, x0, x23, #61 __LF \ adc x11, x11, x0 __LF \ lsr x12, x11, #9 __LF \ orr x11, x11, #0xfffffffffffffe00 __LF \ cmp xzr, xzr __LF \ adcs xzr, x3, x12 __LF \ adcs xzr, x15, xzr __LF \ adcs xzr, x11, xzr __LF \ adcs x3, x3, x12 __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ adcs x6, x6, xzr __LF \ adcs x7, x7, xzr __LF \ adcs x8, x8, xzr __LF \ adcs x9, x9, xzr __LF \ adcs x10, x10, xzr __LF \ adc x11, x11, xzr __LF \ and x11, x11, #0x1ff __LF \ stp x3, x4, [P0] __LF \ stp x5, x6, [P0+16] __LF \ stp x7, x8, [P0+32] __LF \ stp x9, x10, [P0+48] __LF \ str x11, [P0+64] // P0 = 4 * P1 - P2 = 4 * P1 + (p_521 - P2) #define cmsub41_p521(P0,P1,P2) \ ldp x6, x7, [P1] __LF \ lsl x3, x6, #2 __LF \ extr x4, x7, x6, #62 __LF \ ldp x8, x9, [P1+16] __LF \ extr x5, x8, x7, #62 __LF \ extr x6, x9, x8, #62 __LF \ ldp x10, x11, [P1+32] __LF \ extr x7, x10, x9, #62 __LF \ extr x8, x11, x10, #62 __LF \ ldp x12, x13, [P1+48] __LF \ extr x9, x12, x11, #62 __LF \ extr x10, x13, x12, #62 __LF \ ldr x14, [P1+64] __LF \ extr x11, x14, x13, #62 __LF \ ldp x0, x1, [P2] __LF \ mvn x0, x0 __LF \ adds x3, x3, x0 __LF \ sbcs x4, x4, x1 __LF \ ldp x0, x1, [P2+16] __LF \ sbcs x5, x5, x0 __LF \ and x15, x4, x5 __LF \ sbcs x6, x6, x1 __LF \ and x15, x15, x6 __LF \ ldp x0, x1, [P2+32] __LF \ sbcs x7, x7, x0 __LF \ and x15, x15, x7 __LF \ sbcs x8, x8, x1 __LF \ and x15, x15, x8 __LF \ ldp x0, x1, [P2+48] __LF \ sbcs x9, x9, x0 __LF \ and x15, x15, x9 __LF \ sbcs x10, x10, x1 __LF \ and x15, x15, x10 __LF \ ldr x0, [P2+64] __LF \ eor x0, x0, #0x1ff __LF \ adc x11, x11, x0 __LF \ lsr x12, x11, #9 __LF \ orr x11, x11, #0xfffffffffffffe00 __LF \ cmp xzr, xzr __LF \ adcs xzr, x3, x12 __LF \ adcs xzr, x15, xzr __LF \ adcs xzr, x11, xzr __LF \ adcs x3, x3, x12 __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ adcs x6, x6, xzr __LF \ adcs x7, x7, xzr __LF \ adcs x8, x8, xzr __LF \ adcs x9, x9, xzr __LF \ adcs x10, x10, xzr __LF \ adc x11, x11, xzr __LF \ and x11, x11, #0x1ff __LF \ stp x3, x4, [P0] __LF \ stp x5, x6, [P0+16] __LF \ stp x7, x8, [P0+32] __LF \ stp x9, x10, [P0+48] __LF \ str x11, [P0+64] S2N_BN_SYMBOL(p521_jdouble_alt): CFI_START // Save regs and make room on stack for temporary variables CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_PUSH2(x27,x28) CFI_DEC_SP(NSPACE) // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 // Main code, just a sequence of basic field operations // z2 = z^2 // y2 = y^2 sqr_p521(z2,z_1) sqr_p521(y2,y_1) // x2p = x^2 - z^4 = (x + z^2) * (x - z^2) add_p521(t1,x_1,z2) sub_p521(t2,x_1,z2) mul_p521(x2p,t1,t2) // t1 = y + z // x4p = x2p^2 // xy2 = x * y^2 add_p521(t1,y_1,z_1) sqr_p521(x4p,x2p) weakmul_p521(xy2,x_1,y2) // t2 = (y + z)^2 sqr_p521(t2,t1) // d = 12 * xy2 - 9 * x4p // t1 = y^2 + 2 * y * z cmsub_p521(d,12,xy2,9,x4p) sub_p521(t1,t2,z2) // y4 = y^4 sqr_p521(y4,y2) // z_3' = 2 * y * z // dx2 = d * x2p sub_p521(z_3,t1,y2) weakmul_p521(dx2,d,x2p) // x' = 4 * xy2 - d cmsub41_p521(x_3,xy2,d) // y' = 3 * dx2 - 8 * y4 cmsub38_p521(y_3,dx2,y4) // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x27,x28) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p521_jdouble_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
13,097
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_mul_p521_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply modulo p_521, z := (x * y) mod p_521, assuming x and y reduced // Inputs x[9], y[9]; output z[9] // // extern void bignum_mul_p521_alt(uint64_t z[static 9], // const uint64_t x[static 9], // const uint64_t y[static 9]); // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_p521_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mul_p521_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_p521_alt) .text .balign 4 #define z x0 #define x x1 #define y x2 // These are repeated mod 2 as we load paris of inputs #define a0 x3 #define a1 x4 #define a2 x3 #define a3 x4 #define a4 x3 #define a5 x4 #define a6 x3 #define a7 x4 #define a8 x3 #define b0 x5 #define b1 x6 #define b2 x7 #define b3 x8 #define b4 x9 #define b5 x10 #define b6 x11 #define b7 x12 #define b8 x13 #define t x14 // These repeat mod 11 as we stash some intermediate results in the // output buffer. #define u0 x15 #define u1 x16 #define u2 x17 #define u3 x19 #define u4 x20 #define u5 x21 #define u6 x22 #define u7 x23 #define u8 x24 #define u9 x25 #define u10 x26 #define u11 x15 #define u12 x16 #define u13 x17 #define u14 x19 #define u15 x20 #define u16 x21 S2N_BN_SYMBOL(bignum_mul_p521_alt): CFI_START // Save more registers and make temporary space on stack CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_DEC_SP(64) // Load operands and set up row 0 = [u9;...;u0] = a0 * [b8;...;b0] ldp a0, a1, [x] ldp b0, b1, [y] mul u0, a0, b0 umulh u1, a0, b0 mul t, a0, b1 umulh u2, a0, b1 adds u1, u1, t ldp b2, b3, [y, #16] mul t, a0, b2 umulh u3, a0, b2 adcs u2, u2, t mul t, a0, b3 umulh u4, a0, b3 adcs u3, u3, t ldp b4, b5, [y, #32] mul t, a0, b4 umulh u5, a0, b4 adcs u4, u4, t mul t, a0, b5 umulh u6, a0, b5 adcs u5, u5, t ldp b6, b7, [y, #48] mul t, a0, b6 umulh u7, a0, b6 adcs u6, u6, t ldr b8, [y, #64] mul t, a0, b7 umulh u8, a0, b7 adcs u7, u7, t mul t, a0, b8 umulh u9, a0, b8 adcs u8, u8, t adc u9, u9, xzr // Row 1 = [u10;...;u0] = [a1;a0] * [b8;...;b0] mul t, a1, b0 adds u1, u1, t mul t, a1, b1 adcs u2, u2, t mul t, a1, b2 adcs u3, u3, t mul t, a1, b3 adcs u4, u4, t mul t, a1, b4 adcs u5, u5, t mul t, a1, b5 adcs u6, u6, t mul t, a1, b6 adcs u7, u7, t mul t, a1, b7 adcs u8, u8, t mul t, a1, b8 adcs u9, u9, t cset u10, cs umulh t, a1, b0 adds u2, u2, t umulh t, a1, b1 adcs u3, u3, t umulh t, a1, b2 adcs u4, u4, t umulh t, a1, b3 adcs u5, u5, t umulh t, a1, b4 adcs u6, u6, t umulh t, a1, b5 adcs u7, u7, t umulh t, a1, b6 adcs u8, u8, t umulh t, a1, b7 adcs u9, u9, t umulh t, a1, b8 adc u10, u10, t stp u0, u1, [sp] // Row 2 = [u11;...;u0] = [a2;a1;a0] * [b8;...;b0] ldp a2, a3, [x, #16] mul t, a2, b0 adds u2, u2, t mul t, a2, b1 adcs u3, u3, t mul t, a2, b2 adcs u4, u4, t mul t, a2, b3 adcs u5, u5, t mul t, a2, b4 adcs u6, u6, t mul t, a2, b5 adcs u7, u7, t mul t, a2, b6 adcs u8, u8, t mul t, a2, b7 adcs u9, u9, t mul t, a2, b8 adcs u10, u10, t cset u11, cs umulh t, a2, b0 adds u3, u3, t umulh t, a2, b1 adcs u4, u4, t umulh t, a2, b2 adcs u5, u5, t umulh t, a2, b3 adcs u6, u6, t umulh t, a2, b4 adcs u7, u7, t umulh t, a2, b5 adcs u8, u8, t umulh t, a2, b6 adcs u9, u9, t umulh t, a2, b7 adcs u10, u10, t umulh t, a2, b8 adc u11, u11, t // Row 3 = [u12;...;u0] = [a3;a2;a1;a0] * [b8;...;b0] mul t, a3, b0 adds u3, u3, t mul t, a3, b1 adcs u4, u4, t mul t, a3, b2 adcs u5, u5, t mul t, a3, b3 adcs u6, u6, t mul t, a3, b4 adcs u7, u7, t mul t, a3, b5 adcs u8, u8, t mul t, a3, b6 adcs u9, u9, t mul t, a3, b7 adcs u10, u10, t mul t, a3, b8 adcs u11, u11, t cset u12, cs umulh t, a3, b0 adds u4, u4, t umulh t, a3, b1 adcs u5, u5, t umulh t, a3, b2 adcs u6, u6, t umulh t, a3, b3 adcs u7, u7, t umulh t, a3, b4 adcs u8, u8, t umulh t, a3, b5 adcs u9, u9, t umulh t, a3, b6 adcs u10, u10, t umulh t, a3, b7 adcs u11, u11, t umulh t, a3, b8 adc u12, u12, t stp u2, u3, [sp, #16] // Row 4 = [u13;...;u0] = [a4;a3;a2;a1;a0] * [b8;...;b0] ldp a4, a5, [x, #32] mul t, a4, b0 adds u4, u4, t mul t, a4, b1 adcs u5, u5, t mul t, a4, b2 adcs u6, u6, t mul t, a4, b3 adcs u7, u7, t mul t, a4, b4 adcs u8, u8, t mul t, a4, b5 adcs u9, u9, t mul t, a4, b6 adcs u10, u10, t mul t, a4, b7 adcs u11, u11, t mul t, a4, b8 adcs u12, u12, t cset u13, cs umulh t, a4, b0 adds u5, u5, t umulh t, a4, b1 adcs u6, u6, t umulh t, a4, b2 adcs u7, u7, t umulh t, a4, b3 adcs u8, u8, t umulh t, a4, b4 adcs u9, u9, t umulh t, a4, b5 adcs u10, u10, t umulh t, a4, b6 adcs u11, u11, t umulh t, a4, b7 adcs u12, u12, t umulh t, a4, b8 adc u13, u13, t // Row 5 = [u14;...;u0] = [a5;a4;a3;a2;a1;a0] * [b8;...;b0] mul t, a5, b0 adds u5, u5, t mul t, a5, b1 adcs u6, u6, t mul t, a5, b2 adcs u7, u7, t mul t, a5, b3 adcs u8, u8, t mul t, a5, b4 adcs u9, u9, t mul t, a5, b5 adcs u10, u10, t mul t, a5, b6 adcs u11, u11, t mul t, a5, b7 adcs u12, u12, t mul t, a5, b8 adcs u13, u13, t cset u14, cs umulh t, a5, b0 adds u6, u6, t umulh t, a5, b1 adcs u7, u7, t umulh t, a5, b2 adcs u8, u8, t umulh t, a5, b3 adcs u9, u9, t umulh t, a5, b4 adcs u10, u10, t umulh t, a5, b5 adcs u11, u11, t umulh t, a5, b6 adcs u12, u12, t umulh t, a5, b7 adcs u13, u13, t umulh t, a5, b8 adc u14, u14, t stp u4, u5, [sp, #32] // Row 6 = [u15;...;u0] = [a6;a5;a4;a3;a2;a1;a0] * [b8;...;b0] ldp a6, a7, [x, #48] mul t, a6, b0 adds u6, u6, t mul t, a6, b1 adcs u7, u7, t mul t, a6, b2 adcs u8, u8, t mul t, a6, b3 adcs u9, u9, t mul t, a6, b4 adcs u10, u10, t mul t, a6, b5 adcs u11, u11, t mul t, a6, b6 adcs u12, u12, t mul t, a6, b7 adcs u13, u13, t mul t, a6, b8 adcs u14, u14, t cset u15, cs umulh t, a6, b0 adds u7, u7, t umulh t, a6, b1 adcs u8, u8, t umulh t, a6, b2 adcs u9, u9, t umulh t, a6, b3 adcs u10, u10, t umulh t, a6, b4 adcs u11, u11, t umulh t, a6, b5 adcs u12, u12, t umulh t, a6, b6 adcs u13, u13, t umulh t, a6, b7 adcs u14, u14, t umulh t, a6, b8 adc u15, u15, t // Row 7 = [u16;...;u0] = [a7;a6;a5;a4;a3;a2;a1;a0] * [b8;...;b0] mul t, a7, b0 adds u7, u7, t mul t, a7, b1 adcs u8, u8, t mul t, a7, b2 adcs u9, u9, t mul t, a7, b3 adcs u10, u10, t mul t, a7, b4 adcs u11, u11, t mul t, a7, b5 adcs u12, u12, t mul t, a7, b6 adcs u13, u13, t mul t, a7, b7 adcs u14, u14, t mul t, a7, b8 adcs u15, u15, t cset u16, cs umulh t, a7, b0 adds u8, u8, t umulh t, a7, b1 adcs u9, u9, t umulh t, a7, b2 adcs u10, u10, t umulh t, a7, b3 adcs u11, u11, t umulh t, a7, b4 adcs u12, u12, t umulh t, a7, b5 adcs u13, u13, t umulh t, a7, b6 adcs u14, u14, t umulh t, a7, b7 adcs u15, u15, t umulh t, a7, b8 adc u16, u16, t stp u6, u7, [sp, #48] // Row 8 = [u16;...;u0] = [a8;a7;a6;a5;a4;a3;a2;a1;a0] * [b8;...;b0] ldr a8, [x, #64] mul t, a8, b0 adds u8, u8, t mul t, a8, b1 adcs u9, u9, t mul t, a8, b2 adcs u10, u10, t mul t, a8, b3 adcs u11, u11, t mul t, a8, b4 adcs u12, u12, t mul t, a8, b5 adcs u13, u13, t mul t, a8, b6 adcs u14, u14, t mul t, a8, b7 adcs u15, u15, t mul t, a8, b8 adc u16, u16, t umulh t, a8, b0 adds u9, u9, t umulh t, a8, b1 adcs u10, u10, t umulh t, a8, b2 adcs u11, u11, t umulh t, a8, b3 adcs u12, u12, t umulh t, a8, b4 adcs u13, u13, t umulh t, a8, b5 adcs u14, u14, t umulh t, a8, b6 adcs u15, u15, t umulh t, a8, b7 adc u16, u16, t // Now we have the full product, which we consider as // 2^521 * h + l. Form h + l + 1 subs xzr, xzr, xzr ldp b0, b1, [sp] extr t, u9, u8, #9 adcs b0, b0, t extr t, u10, u9, #9 adcs b1, b1, t ldp b2, b3, [sp, #16] extr t, u11, u10, #9 adcs b2, b2, t extr t, u12, u11, #9 adcs b3, b3, t ldp b4, b5, [sp, #32] extr t, u13, u12, #9 adcs b4, b4, t extr t, u14, u13, #9 adcs b5, b5, t ldp b6, b7, [sp, #48] extr t, u15, u14, #9 adcs b6, b6, t extr t, u16, u15, #9 adcs b7, b7, t orr b8, u8, #~0x1FF lsr t, u16, #9 adcs b8, b8, t // Now CF is set if h + l + 1 >= 2^521, which means it's already // the answer, while if ~CF the answer is h + l so we should subtract // 1 (all considered in 521 bits). Hence subtract ~CF and mask. sbcs b0, b0, xzr sbcs b1, b1, xzr sbcs b2, b2, xzr sbcs b3, b3, xzr sbcs b4, b4, xzr sbcs b5, b5, xzr sbcs b6, b6, xzr sbcs b7, b7, xzr sbc b8, b8, xzr and b8, b8, #0x1FF // Store back digits of final result stp b0, b1, [z] stp b2, b3, [z, #16] stp b4, b5, [z, #32] stp b6, b7, [z, #48] str b8, [z, #64] // Restore registers CFI_INC_SP(64) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mul_p521_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
5,117
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_tolebytes_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert 9-digit 528-bit bignum to little-endian bytes // // extern void bignum_tolebytes_p521(uint8_t z[static 66], // const uint64_t x[static 9]); // // This is assuming the input x is < 2^528 so that it fits in 66 bytes. // In particular this holds if x < p_521 < 2^521 < 2^528. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tolebytes_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tolebytes_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tolebytes_p521) .text .balign 4 #define z x0 #define x x1 #define d x2 #define dshort w2 S2N_BN_SYMBOL(bignum_tolebytes_p521): CFI_START // word 0 ldr d, [x] strb dshort, [z] lsr d, d, #8 strb dshort, [z, #1] lsr d, d, #8 strb dshort, [z, #2] lsr d, d, #8 strb dshort, [z, #3] lsr d, d, #8 strb dshort, [z, #4] lsr d, d, #8 strb dshort, [z, #5] lsr d, d, #8 strb dshort, [z, #6] lsr d, d, #8 strb dshort, [z, #7] // word 1 ldr d, [x, #8] strb dshort, [z, #8] lsr d, d, #8 strb dshort, [z, #9] lsr d, d, #8 strb dshort, [z, #10] lsr d, d, #8 strb dshort, [z, #11] lsr d, d, #8 strb dshort, [z, #12] lsr d, d, #8 strb dshort, [z, #13] lsr d, d, #8 strb dshort, [z, #14] lsr d, d, #8 strb dshort, [z, #15] // word 2 ldr d, [x, #16] strb dshort, [z, #16] lsr d, d, #8 strb dshort, [z, #17] lsr d, d, #8 strb dshort, [z, #18] lsr d, d, #8 strb dshort, [z, #19] lsr d, d, #8 strb dshort, [z, #20] lsr d, d, #8 strb dshort, [z, #21] lsr d, d, #8 strb dshort, [z, #22] lsr d, d, #8 strb dshort, [z, #23] // word 3 ldr d, [x, #24] strb dshort, [z, #24] lsr d, d, #8 strb dshort, [z, #25] lsr d, d, #8 strb dshort, [z, #26] lsr d, d, #8 strb dshort, [z, #27] lsr d, d, #8 strb dshort, [z, #28] lsr d, d, #8 strb dshort, [z, #29] lsr d, d, #8 strb dshort, [z, #30] lsr d, d, #8 strb dshort, [z, #31] // word 4 ldr d, [x, #32] strb dshort, [z, #32] lsr d, d, #8 strb dshort, [z, #33] lsr d, d, #8 strb dshort, [z, #34] lsr d, d, #8 strb dshort, [z, #35] lsr d, d, #8 strb dshort, [z, #36] lsr d, d, #8 strb dshort, [z, #37] lsr d, d, #8 strb dshort, [z, #38] lsr d, d, #8 strb dshort, [z, #39] // word 5 ldr d, [x, #40] strb dshort, [z, #40] lsr d, d, #8 strb dshort, [z, #41] lsr d, d, #8 strb dshort, [z, #42] lsr d, d, #8 strb dshort, [z, #43] lsr d, d, #8 strb dshort, [z, #44] lsr d, d, #8 strb dshort, [z, #45] lsr d, d, #8 strb dshort, [z, #46] lsr d, d, #8 strb dshort, [z, #47] // word 6 ldr d, [x, #48] strb dshort, [z, #48] lsr d, d, #8 strb dshort, [z, #49] lsr d, d, #8 strb dshort, [z, #50] lsr d, d, #8 strb dshort, [z, #51] lsr d, d, #8 strb dshort, [z, #52] lsr d, d, #8 strb dshort, [z, #53] lsr d, d, #8 strb dshort, [z, #54] lsr d, d, #8 strb dshort, [z, #55] // word 7 ldr d, [x, #56] strb dshort, [z, #56] lsr d, d, #8 strb dshort, [z, #57] lsr d, d, #8 strb dshort, [z, #58] lsr d, d, #8 strb dshort, [z, #59] lsr d, d, #8 strb dshort, [z, #60] lsr d, d, #8 strb dshort, [z, #61] lsr d, d, #8 strb dshort, [z, #62] lsr d, d, #8 strb dshort, [z, #63] // word 8 ldr d, [x, #64] strb dshort, [z, #64] lsr d, d, #8 strb dshort, [z, #65] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_tolebytes_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,818
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_tomont_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert to Montgomery form z := (2^576 * x) mod p_521 // Input x[9]; output z[9] // // extern void bignum_tomont_p521(uint64_t z[static 9], // const uint64_t x[static 9]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_tomont_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_p521) .text .balign 4 #define z x0 #define x x1 #define h x2 #define t x3 #define d0 x4 #define d1 x5 #define d2 x6 #define d3 x7 #define d4 x8 #define d5 x9 #define d6 x10 #define d7 x11 #define d8 x12 S2N_BN_SYMBOL(bignum_tomont_p521): CFI_START // Load top digit first and get its upper bits in h so that we // separate out x = 2^521 * H + L with h = H. Now x mod p_521 = // (H + L) mod p_521 = if H + L >= p_521 then H + L - p_521 else H + L. ldr d8, [x, #64] lsr h, d8, #9 // Load in the other digits and decide whether H + L >= p_521. This is // equivalent to H + L + 1 >= 2^521, and since this can only happen if // digits d7,...,d1 consist entirely of 1 bits, we can condense the // carry chain by ANDing digits together, perhaps reducing its latency. // This condenses only three pairs; the payoff beyond that seems limited. // By stuffing in 1 bits from 521 position upwards, get CF directly subs xzr, xzr, xzr ldp d0, d1, [x] adcs xzr, d0, h adcs xzr, d1, xzr ldp d2, d3, [x, #16] and t, d2, d3 adcs xzr, t, xzr ldp d4, d5, [x, #32] and t, d4, d5 adcs xzr, t, xzr ldp d6, d7, [x, #48] and t, d6, d7 adcs xzr, t, xzr orr t, d8, #~0x1FF adcs t, t, xzr // Now H + L >= p_521 <=> H + L + 1 >= 2^521 <=> CF from this comparison. // So if CF is set we want (H + L) - p_521 = (H + L + 1) - 2^521 // while otherwise we want just H + L. So mask H + L + CF to 521 bits. adcs d0, d0, h adcs d1, d1, xzr adcs d2, d2, xzr adcs d3, d3, xzr adcs d4, d4, xzr adcs d5, d5, xzr adcs d6, d6, xzr adcs d7, d7, xzr adc d8, d8, xzr // So far, this is just a modular reduction as in bignum_mod_p521_9, // except that the final masking of d8 is skipped since that comes out // in the wash anyway from the next block, which is the Montgomery map, // multiplying by 2^576 modulo p_521. Because 2^521 == 1 (mod p_521) // this is just rotation left by 576 - 521 = 55 bits. To rotate in a // right-to-left fashion, which might blend better with the carry // chain above, the digit register indices themselves get shuffled up. lsl t, d0, #55 extr d0, d1, d0, #9 extr d1, d2, d1, #9 extr d2, d3, d2, #9 extr d3, d4, d3, #9 extr d4, d5, d4, #9 extr d5, d6, d5, #9 extr d6, d7, d6, #9 extr d7, d8, d7, #9 lsr d8, d7, #9 orr t, t, d8 and d7, d7, #0x1FF // Store the result from the shuffled registers [d7;d6;...;d1;d0;t] stp t, d0, [z] stp d1, d2, [z, #16] stp d3, d4, [z, #32] stp d5, d6, [z, #48] str d7, [z, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_tomont_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,320
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_neg_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Negate modulo p_521, z := (-x) mod p_521, assuming x reduced // Input x[9]; output z[9] // // extern void bignum_neg_p521(uint64_t z[static 9], const uint64_t x[static 9]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_neg_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_neg_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_neg_p521) .text .balign 4 #define z x0 #define x x1 #define p x2 #define d0 x3 #define d1 x4 #define d2 x5 #define d3 x6 #define d4 x7 #define d5 x8 #define d6 x9 #define d7 x10 #define d8 x11 S2N_BN_SYMBOL(bignum_neg_p521): CFI_START // Load the 9 digits of x and generate p = the OR of them all ldp d0, d1, [x] orr d6, d0, d1 ldp d2, d3, [x, #16] orr d7, d2, d3 orr p, d6, d7 ldp d4, d5, [x, #32] orr d8, d4, d5 orr p, p, d8 ldp d6, d7, [x, #48] orr d8, d6, d7 orr p, p, d8 ldr d8, [x, #64] orr p, p, d8 // Turn p into a bitmask for "input is nonzero", so that we avoid doing // -0 = p_521 and hence maintain strict modular reduction cmp p, #0 csetm p, ne // Since p_521 is all 1s, the subtraction is just an exclusive-or with p // to give an optional inversion, with a slight fiddle for the top digit. eor d0, d0, p eor d1, d1, p eor d2, d2, p eor d3, d3, p eor d4, d4, p eor d5, d5, p eor d6, d6, p eor d7, d7, p and p, p, #0x1FF eor d8, d8, p // Write back the result and return stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] stp d6, d7, [z, #48] str d8, [z, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_neg_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
75,275
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_inv_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Modular inverse modulo p_521 = 2^521 - 1 // Input x[9]; output z[9] // // extern void bignum_inv_p521(uint64_t z[static 9],const uint64_t x[static 9]); // // Assuming the 9-digit input x is coprime to p_521, i.e. is not divisible // by it, returns z < p_521 such that x * z == 1 (mod p_521). Note that // x does not need to be reduced modulo p_521, but the output always is. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_inv_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_inv_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_inv_p521) .text .balign 4 // Size in bytes of a 64-bit word #define N 8 // Used for the return pointer #define res x20 // Loop counter and d = 2 * delta value for divstep #define i x21 #define d x22 // Registers used for matrix element magnitudes and signs #define m00 x10 #define m01 x11 #define m10 x12 #define m11 x13 #define s00 x14 #define s01 x15 #define s10 x16 #define s11 x17 // Initial carries for combinations #define car0 x9 #define car1 x19 // Input and output, plain registers treated according to pattern #define reg0 x0, #0 #define reg1 x1, #0 #define reg2 x2, #0 #define reg3 x3, #0 #define reg4 x4, #0 #define x x1, #0 #define z x0, #0 // Pointer-offset pairs for temporaries on stack #define f sp, #0 #define g sp, #(9*N) #define u sp, #(18*N) #define v sp, #(27*N) // Total size to reserve on the stack #define NSPACE 36*N // Very similar to a subroutine call to the s2n-bignum word_divstep59. // But different in register usage and returning the final matrix in // registers as follows // // [ m00 m01] // [ m10 m11] #define divstep59() \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x8, x4, #0x100, lsl #12 __LF \ sbfx x8, x8, #21, #21 __LF \ mov x11, #0x100000 __LF \ add x11, x11, x11, lsl #21 __LF \ add x9, x4, x11 __LF \ asr x9, x9, #42 __LF \ add x10, x5, #0x100, lsl #12 __LF \ sbfx x10, x10, #21, #21 __LF \ add x11, x5, x11 __LF \ asr x11, x11, #42 __LF \ mul x6, x8, x2 __LF \ mul x7, x9, x3 __LF \ mul x2, x10, x2 __LF \ mul x3, x11, x3 __LF \ add x4, x6, x7 __LF \ add x5, x2, x3 __LF \ asr x2, x4, #20 __LF \ asr x3, x5, #20 __LF \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x12, x4, #0x100, lsl #12 __LF \ sbfx x12, x12, #21, #21 __LF \ mov x15, #0x100000 __LF \ add x15, x15, x15, lsl #21 __LF \ add x13, x4, x15 __LF \ asr x13, x13, #42 __LF \ add x14, x5, #0x100, lsl #12 __LF \ sbfx x14, x14, #21, #21 __LF \ add x15, x5, x15 __LF \ asr x15, x15, #42 __LF \ mul x6, x12, x2 __LF \ mul x7, x13, x3 __LF \ mul x2, x14, x2 __LF \ mul x3, x15, x3 __LF \ add x4, x6, x7 __LF \ add x5, x2, x3 __LF \ asr x2, x4, #20 __LF \ asr x3, x5, #20 __LF \ and x4, x2, #0xfffff __LF \ orr x4, x4, #0xfffffe0000000000 __LF \ and x5, x3, #0xfffff __LF \ orr x5, x5, #0xc000000000000000 __LF \ tst x5, #0x1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ mul x2, x12, x8 __LF \ mul x3, x12, x9 __LF \ mul x6, x14, x8 __LF \ mul x7, x14, x9 __LF \ madd x8, x13, x10, x2 __LF \ madd x9, x13, x11, x3 __LF \ madd x16, x15, x10, x6 __LF \ madd x17, x15, x11, x7 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ tst x5, #0x2 __LF \ asr x5, x5, #1 __LF \ csel x6, x4, xzr, ne __LF \ ccmp x1, xzr, #0x8, ne __LF \ cneg x1, x1, ge __LF \ cneg x6, x6, ge __LF \ csel x4, x5, x4, ge __LF \ add x5, x5, x6 __LF \ add x1, x1, #0x2 __LF \ asr x5, x5, #1 __LF \ add x12, x4, #0x100, lsl #12 __LF \ sbfx x12, x12, #22, #21 __LF \ mov x15, #0x100000 __LF \ add x15, x15, x15, lsl #21 __LF \ add x13, x4, x15 __LF \ asr x13, x13, #43 __LF \ add x14, x5, #0x100, lsl #12 __LF \ sbfx x14, x14, #22, #21 __LF \ add x15, x5, x15 __LF \ asr x15, x15, #43 __LF \ mneg x2, x12, x8 __LF \ mneg x3, x12, x9 __LF \ mneg x4, x14, x8 __LF \ mneg x5, x14, x9 __LF \ msub m00, x13, x16, x2 __LF \ msub m01, x13, x17, x3 __LF \ msub m10, x15, x16, x4 __LF \ msub m11, x15, x17, x5 // Loading large constants #define movbig(nn,n3,n2,n1,n0) \ movz nn, n0 __LF \ movk nn, n1, lsl #16 __LF \ movk nn, n2, lsl #32 __LF \ movk nn, n3, lsl #48 S2N_BN_SYMBOL(bignum_inv_p521): CFI_START // Save registers and make room for temporaries CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_DEC_SP(NSPACE) // Save the return pointer for the end so we can overwrite x0 later mov res, x0 // Copy the prime p_521 = 2^521 - 1 into the f variable mov x10, #0xFFFFFFFFFFFFFFFF stp x10, x10, [f] stp x10, x10, [f+16] stp x10, x10, [f+32] stp x10, x10, [f+48] mov x11, #0x1FF str x11, [f+64] // Copy the input into the g variable, but reduce it strictly mod p_521 // so that g <= f as assumed in the bound proof. This code fragment is // very similar to bignum_mod_p521_9 complete with carry condensation. ldr x8, [x1, #64] lsr x9, x8, #9 subs xzr, xzr, xzr ldp x10, x11, [x1] adcs xzr, x10, x9 adcs xzr, x11, xzr ldp x12, x13, [x1, #16] and x7, x12, x13 adcs xzr, x7, xzr ldp x14, x15, [x1, #32] and x7, x14, x15 adcs xzr, x7, xzr ldp x16, x17, [x1, #48] and x7, x16, x17 adcs xzr, x7, xzr orr x7, x8, #~0x1FF adcs x7, x7, xzr adcs x10, x10, x9 adcs x11, x11, xzr adcs x12, x12, xzr adcs x13, x13, xzr adcs x14, x14, xzr adcs x15, x15, xzr adcs x16, x16, xzr adcs x17, x17, xzr adc x8, x8, xzr and x8, x8, #0x1FF stp x10, x11, [g] stp x12, x13, [g+16] stp x14, x15, [g+32] stp x16, x17, [g+48] str x8, [g+64] // Also maintain weakly reduced < 2*p_521 vector [u,v] such that // [f,g] == x * 2^{1239-59*i} * [u,v] (mod p_521) // starting with [p_521,x] == x * 2^{1239-59*0} * [0,2^-1239] (mod p_521) // Note that because (2^{a+521} == 2^a) (mod p_521) we simply have // (2^-1239 == 2^324) (mod p_521) so the constant initializer is simple. // // Based on the standard divstep bound, for inputs <= 2^b we need at least // n >= (9437 * b + 1) / 4096. Since b is 521, that means 1201 iterations. // Since we package divstep in multiples of 59 bits, we do 21 blocks of 59 // making *1239* total. (With a bit more effort we could avoid the full 59 // divsteps and use a shorter tail computation, but we keep it simple.) // Hence, after the 21st iteration we have [f,g] == x * [u,v] and since // |f| = 1 we get the modular inverse from u by flipping its sign with f. stp xzr, xzr, [u] stp xzr, xzr, [u+16] stp xzr, xzr, [u+32] stp xzr, xzr, [u+48] str xzr, [u+64] mov x10, #16 stp xzr, xzr, [v] stp xzr, xzr, [v+16] stp xzr, x10, [v+32] stp xzr, xzr, [v+48] str xzr, [v+64] // Start of main loop. We jump into the middle so that the divstep // portion is common to the special 21st iteration after a uniform // first 20. mov i, #21 mov d, #1 b Lbignum_inv_p521_midloop Lbignum_inv_p521_loop: // Separate the matrix elements into sign-magnitude pairs cmp m00, xzr csetm s00, mi cneg m00, m00, mi cmp m01, xzr csetm s01, mi cneg m01, m01, mi cmp m10, xzr csetm s10, mi cneg m10, m10, mi cmp m11, xzr csetm s11, mi cneg m11, m11, mi // Adjust the initial values to allow for complement instead of negation // This initial offset is the same for [f,g] and [u,v] compositions. // Save it in stable registers for the [u,v] part and do [f,g] first. and x0, m00, s00 and x1, m01, s01 add car0, x0, x1 and x0, m10, s10 and x1, m11, s11 add car1, x0, x1 // Now the computation of the updated f and g values. This maintains a // 2-word carry between stages so we can conveniently insert the shift // right by 59 before storing back, and not overwrite digits we need // again of the old f and g values. // // Digit 0 of [f,g] ldr x7, [f] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [g] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 adc x2, x2, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x5, car1, x0 adc x3, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 adc x3, x3, x1 // Digit 1 of [f,g] ldr x7, [f+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [g+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 adc x6, x6, x1 extr x4, x2, x4, #59 str x4, [f] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x3, x3, x0 adc x4, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x3, x3, x0 adc x4, x4, x1 extr x5, x3, x5, #59 str x5, [g] // Digit 2 of [f,g] ldr x7, [f+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [g+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 adc x5, x5, x1 extr x2, x6, x2, #59 str x2, [f+N] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x4, x4, x0 adc x2, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x4, x4, x0 adc x2, x2, x1 extr x3, x4, x3, #59 str x3, [g+N] // Digit 3 of [f,g] ldr x7, [f+3*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, xzr, x1 ldr x8, [g+3*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 adc x3, x3, x1 extr x6, x5, x6, #59 str x6, [f+2*N] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x2, x2, x0 adc x6, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x2, x2, x0 adc x6, x6, x1 extr x4, x2, x4, #59 str x4, [g+2*N] // Digit 4 of [f,g] ldr x7, [f+4*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x3, x3, x0 adc x4, xzr, x1 ldr x8, [g+4*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x3, x3, x0 adc x4, x4, x1 extr x5, x3, x5, #59 str x5, [f+3*N] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x6, x6, x0 adc x5, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x6, x6, x0 adc x5, x5, x1 extr x2, x6, x2, #59 str x2, [g+3*N] // Digit 5 of [f,g] ldr x7, [f+5*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, x4, x0 adc x2, xzr, x1 ldr x8, [g+5*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 adc x2, x2, x1 extr x3, x4, x3, #59 str x3, [f+4*N] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x5, x5, x0 adc x3, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 adc x3, x3, x1 extr x6, x5, x6, #59 str x6, [g+4*N] // Digit 6 of [f,g] ldr x7, [f+6*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [g+6*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 adc x6, x6, x1 extr x4, x2, x4, #59 str x4, [f+5*N] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x3, x3, x0 adc x4, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x3, x3, x0 adc x4, x4, x1 extr x5, x3, x5, #59 str x5, [g+5*N] // Digit 7 of [f,g] ldr x7, [f+7*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [g+7*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 adc x5, x5, x1 extr x2, x6, x2, #59 str x2, [f+6*N] eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x4, x4, x0 adc x2, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x4, x4, x0 adc x2, x2, x1 extr x3, x4, x3, #59 str x3, [g+6*N] // Digits 8 and 9 of [f,g] ldr x7, [f+8*N] eor x1, x7, s00 asr x3, x1, #63 and x3, x3, m00 neg x3, x3 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, x3, x1 ldr x8, [g+8*N] eor x1, x8, s01 asr x0, x1, #63 and x0, x0, m01 sub x3, x3, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 adc x3, x3, x1 extr x6, x5, x6, #59 str x6, [f+7*N] extr x5, x3, x5, #59 str x5, [f+8*N] eor x1, x7, s10 asr x5, x1, #63 and x5, x5, m10 neg x5, x5 mul x0, x1, m10 umulh x1, x1, m10 adds x2, x2, x0 adc x5, x5, x1 eor x1, x8, s11 asr x0, x1, #63 and x0, x0, m11 sub x5, x5, x0 mul x0, x1, m11 umulh x1, x1, m11 adds x2, x2, x0 adc x5, x5, x1 extr x4, x2, x4, #59 str x4, [g+7*N] extr x2, x5, x2, #59 str x2, [g+8*N] // Now the computation of the updated u and v values and their // modular reductions. A very similar accumulation except that // the top words of u and v are unsigned and we don't shift. // // Digit 0 of [u,v] ldr x7, [u] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [v] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u] adc x2, x2, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x5, car1, x0 adc x3, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 str x5, [v] adc x3, x3, x1 // Digit 1 of [u,v] ldr x7, [u+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [v+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 str x2, [u+N] adc x6, x6, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x3, x3, x0 adc x4, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x3, x3, x0 str x3, [v+N] adc x4, x4, x1 // Digit 2 of [u,v] ldr x7, [u+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [v+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 str x6, [u+2*N] adc x5, x5, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x4, x4, x0 adc x2, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x4, x4, x0 str x4, [v+2*N] adc x2, x2, x1 // Digit 3 of [u,v] ldr x7, [u+3*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, xzr, x1 ldr x8, [v+3*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 str x5, [u+3*N] adc x3, x3, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x2, x2, x0 adc x6, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x2, x2, x0 str x2, [v+3*N] adc x6, x6, x1 // Digit 4 of [u,v] ldr x7, [u+4*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x3, x3, x0 adc x4, xzr, x1 ldr x8, [v+4*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x3, x3, x0 str x3, [u+4*N] adc x4, x4, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x6, x6, x0 adc x5, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x6, x6, x0 str x6, [v+4*N] adc x5, x5, x1 // Digit 5 of [u,v] ldr x7, [u+5*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, x4, x0 adc x2, xzr, x1 ldr x8, [v+5*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u+5*N] adc x2, x2, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x5, x5, x0 adc x3, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x5, x5, x0 str x5, [v+5*N] adc x3, x3, x1 // Digit 6 of [u,v] ldr x7, [u+6*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [v+6*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 str x2, [u+6*N] adc x6, x6, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x3, x3, x0 adc x4, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x3, x3, x0 str x3, [v+6*N] adc x4, x4, x1 // Digit 7 of [u,v] ldr x7, [u+7*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [v+7*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 str x6, [u+7*N] adc x5, x5, x1 eor x1, x7, s10 mul x0, x1, m10 umulh x1, x1, m10 adds x4, x4, x0 adc x2, xzr, x1 eor x1, x8, s11 mul x0, x1, m11 umulh x1, x1, m11 adds x4, x4, x0 str x4, [v+7*N] adc x2, x2, x1 // Digits 8 and 9 of u (top is unsigned) ldr x7, [u+8*N] eor x1, x7, s00 and x3, s00, m00 neg x3, x3 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, x3, x1 ldr x8, [v+8*N] eor x1, x8, s01 and x0, s01, m01 sub x3, x3, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 adc x3, x3, x1 // Modular reduction of u, reloading as needed from u[0],...,u[7],x5,x3 extr x6, x3, x5, #9 ldp x0, x1, [u] add x6, x6, x3, asr #63 sub x5, x5, x6, lsl #9 adds x0, x0, x6 asr x6, x6, #63 adcs x1, x1, x6 stp x0, x1, [u] ldp x0, x1, [u+16] adcs x0, x0, x6 adcs x1, x1, x6 stp x0, x1, [u+16] ldp x0, x1, [u+32] adcs x0, x0, x6 adcs x1, x1, x6 stp x0, x1, [u+32] ldp x0, x1, [u+48] adcs x0, x0, x6 adcs x1, x1, x6 stp x0, x1, [u+48] adc x5, x5, x6 str x5, [u+64] // Digits 8 and 9 of v (top is unsigned) eor x1, x7, s10 and x5, s10, m10 neg x5, x5 mul x0, x1, m10 umulh x1, x1, m10 adds x2, x2, x0 adc x5, x5, x1 eor x1, x8, s11 and x0, s11, m11 sub x5, x5, x0 mul x0, x1, m11 umulh x1, x1, m11 adds x2, x2, x0 adc x5, x5, x1 // Modular reduction of v, reloading as needed from v[0],...,v[7],x2,x5 extr x6, x5, x2, #9 ldp x0, x1, [v] add x6, x6, x5, asr #63 sub x2, x2, x6, lsl #9 adds x0, x0, x6 asr x6, x6, #63 adcs x1, x1, x6 stp x0, x1, [v] ldp x0, x1, [v+16] adcs x0, x0, x6 adcs x1, x1, x6 stp x0, x1, [v+16] ldp x0, x1, [v+32] adcs x0, x0, x6 adcs x1, x1, x6 stp x0, x1, [v+32] ldp x0, x1, [v+48] adcs x0, x0, x6 adcs x1, x1, x6 stp x0, x1, [v+48] adc x2, x2, x6 str x2, [v+64] Lbignum_inv_p521_midloop: mov x1, d ldr x2, [f] ldr x3, [g] divstep59() mov d, x1 // Next iteration subs i, i, #1 bne Lbignum_inv_p521_loop // The 21st and last iteration does not need anything except the // u value and the sign of f; the latter can be obtained from the // lowest word of f. So it's done differently from the main loop. // Find the sign of the new f. For this we just need one digit // since we know (for in-scope cases) that f is either +1 or -1. // We don't explicitly shift right by 59 either, but looking at // bit 63 (or any bit >= 60) of the unshifted result is enough // to distinguish -1 from +1; this is then made into a mask. ldr x0, [f] ldr x1, [g] mul x0, x0, m00 madd x1, x1, m01, x0 asr x0, x1, #63 // Now separate out the matrix into sign-magnitude pairs // and adjust each one based on the sign of f. // // Note that at this point we expect |f|=1 and we got its // sign above, so then since [f,0] == x * [u,v] (mod p_521) // we want to flip the sign of u according to that of f. cmp m00, xzr csetm s00, mi cneg m00, m00, mi eor s00, s00, x0 cmp m01, xzr csetm s01, mi cneg m01, m01, mi eor s01, s01, x0 cmp m10, xzr csetm s10, mi cneg m10, m10, mi eor s10, s10, x0 cmp m11, xzr csetm s11, mi cneg m11, m11, mi eor s11, s11, x0 // Adjust the initial value to allow for complement instead of negation and x0, m00, s00 and x1, m01, s01 add car0, x0, x1 // Digit 0 of [u] ldr x7, [u] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, car0, x0 adc x2, xzr, x1 ldr x8, [v] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u] adc x2, x2, x1 // Digit 1 of [u] ldr x7, [u+N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [v+N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 str x2, [u+N] adc x6, x6, x1 // Digit 2 of [u] ldr x7, [u+2*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [v+2*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 str x6, [u+2*N] adc x5, x5, x1 // Digit 3 of [u] ldr x7, [u+3*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, xzr, x1 ldr x8, [v+3*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 str x5, [u+3*N] adc x3, x3, x1 // Digit 4 of [u] ldr x7, [u+4*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x3, x3, x0 adc x4, xzr, x1 ldr x8, [v+4*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x3, x3, x0 str x3, [u+4*N] adc x4, x4, x1 // Digit 5 of [u] ldr x7, [u+5*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x4, x4, x0 adc x2, xzr, x1 ldr x8, [v+5*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x4, x4, x0 str x4, [u+5*N] adc x2, x2, x1 // Digit 6 of [u] ldr x7, [u+6*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x2, x2, x0 adc x6, xzr, x1 ldr x8, [v+6*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x2, x2, x0 str x2, [u+6*N] adc x6, x6, x1 // Digit 7 of [u] ldr x7, [u+7*N] eor x1, x7, s00 mul x0, x1, m00 umulh x1, x1, m00 adds x6, x6, x0 adc x5, xzr, x1 ldr x8, [v+7*N] eor x1, x8, s01 mul x0, x1, m01 umulh x1, x1, m01 adds x6, x6, x0 str x6, [u+7*N] adc x5, x5, x1 // Digits 8 and 9 of u (top is unsigned) ldr x7, [u+8*N] eor x1, x7, s00 and x3, s00, m00 neg x3, x3 mul x0, x1, m00 umulh x1, x1, m00 adds x5, x5, x0 adc x3, x3, x1 ldr x8, [v+8*N] eor x1, x8, s01 and x0, s01, m01 sub x3, x3, x0 mul x0, x1, m01 umulh x1, x1, m01 adds x5, x5, x0 adc x3, x3, x1 // Modular reduction of u, reloading as needed from u[0],...,u[7],x5,x3 extr x6, x3, x5, #9 ldp x10, x11, [u] add x6, x6, x3, asr #63 sub x5, x5, x6, lsl #9 adds x10, x10, x6 asr x6, x6, #63 adcs x11, x11, x6 ldp x12, x13, [u+16] adcs x12, x12, x6 adcs x13, x13, x6 ldp x14, x15, [u+32] adcs x14, x14, x6 adcs x15, x15, x6 ldp x16, x17, [u+48] adcs x16, x16, x6 adcs x17, x17, x6 adc x19, x5, x6 // Further strict reduction ready for the output, which just means // a conditional subtraction of p_521 subs x0, x10, #-1 adcs x1, x11, xzr adcs x2, x12, xzr adcs x3, x13, xzr adcs x4, x14, xzr adcs x5, x15, xzr adcs x6, x16, xzr adcs x7, x17, xzr mov x8, #0x1FF sbcs x8, x19, x8 csel x0, x0, x10, cs csel x1, x1, x11, cs csel x2, x2, x12, cs csel x3, x3, x13, cs csel x4, x4, x14, cs csel x5, x5, x15, cs csel x6, x6, x16, cs csel x7, x7, x17, cs csel x8, x8, x19, cs // Store it back to the final output stp x0, x1, [res] stp x2, x3, [res, #16] stp x4, x5, [res, #32] stp x6, x7, [res, #48] str x8, [res, #64] // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_inv_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
5,587
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_fromlebytes_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert little-endian bytes to 9-digit 528-bit bignum // // extern void bignum_fromlebytes_p521(uint64_t z[static 9], // const uint8_t x[static 66]); // // The result will be < 2^528 since it is translated from 66 bytes. // It is mainly intended for inputs x < p_521 < 2^521 < 2^528. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_fromlebytes_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_fromlebytes_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_fromlebytes_p521) .text .balign 4 #define z x0 #define x x1 #define d x2 #define dshort w2 #define a x3 S2N_BN_SYMBOL(bignum_fromlebytes_p521): CFI_START // word 0 ldrb dshort, [x] extr a, d, xzr, #8 ldrb dshort, [x, #1] extr a, d, a, #8 ldrb dshort, [x, #2] extr a, d, a, #8 ldrb dshort, [x, #3] extr a, d, a, #8 ldrb dshort, [x, #4] extr a, d, a, #8 ldrb dshort, [x, #5] extr a, d, a, #8 ldrb dshort, [x, #6] extr a, d, a, #8 ldrb dshort, [x, #7] extr a, d, a, #8 str a, [z] // word 1 ldrb dshort, [x, #8] extr a, d, xzr, #8 ldrb dshort, [x, #9] extr a, d, a, #8 ldrb dshort, [x, #10] extr a, d, a, #8 ldrb dshort, [x, #11] extr a, d, a, #8 ldrb dshort, [x, #12] extr a, d, a, #8 ldrb dshort, [x, #13] extr a, d, a, #8 ldrb dshort, [x, #14] extr a, d, a, #8 ldrb dshort, [x, #15] extr a, d, a, #8 str a, [z, #8] // word 2 ldrb dshort, [x, #16] extr a, d, xzr, #8 ldrb dshort, [x, #17] extr a, d, a, #8 ldrb dshort, [x, #18] extr a, d, a, #8 ldrb dshort, [x, #19] extr a, d, a, #8 ldrb dshort, [x, #20] extr a, d, a, #8 ldrb dshort, [x, #21] extr a, d, a, #8 ldrb dshort, [x, #22] extr a, d, a, #8 ldrb dshort, [x, #23] extr a, d, a, #8 str a, [z, #16] // word 3 ldrb dshort, [x, #24] extr a, d, xzr, #8 ldrb dshort, [x, #25] extr a, d, a, #8 ldrb dshort, [x, #26] extr a, d, a, #8 ldrb dshort, [x, #27] extr a, d, a, #8 ldrb dshort, [x, #28] extr a, d, a, #8 ldrb dshort, [x, #29] extr a, d, a, #8 ldrb dshort, [x, #30] extr a, d, a, #8 ldrb dshort, [x, #31] extr a, d, a, #8 str a, [z, #24] // word 4 ldrb dshort, [x, #32] extr a, d, xzr, #8 ldrb dshort, [x, #33] extr a, d, a, #8 ldrb dshort, [x, #34] extr a, d, a, #8 ldrb dshort, [x, #35] extr a, d, a, #8 ldrb dshort, [x, #36] extr a, d, a, #8 ldrb dshort, [x, #37] extr a, d, a, #8 ldrb dshort, [x, #38] extr a, d, a, #8 ldrb dshort, [x, #39] extr a, d, a, #8 str a, [z, #32] // word 5 ldrb dshort, [x, #40] extr a, d, xzr, #8 ldrb dshort, [x, #41] extr a, d, a, #8 ldrb dshort, [x, #42] extr a, d, a, #8 ldrb dshort, [x, #43] extr a, d, a, #8 ldrb dshort, [x, #44] extr a, d, a, #8 ldrb dshort, [x, #45] extr a, d, a, #8 ldrb dshort, [x, #46] extr a, d, a, #8 ldrb dshort, [x, #47] extr a, d, a, #8 str a, [z, #40] // word 6 ldrb dshort, [x, #48] extr a, d, xzr, #8 ldrb dshort, [x, #49] extr a, d, a, #8 ldrb dshort, [x, #50] extr a, d, a, #8 ldrb dshort, [x, #51] extr a, d, a, #8 ldrb dshort, [x, #52] extr a, d, a, #8 ldrb dshort, [x, #53] extr a, d, a, #8 ldrb dshort, [x, #54] extr a, d, a, #8 ldrb dshort, [x, #55] extr a, d, a, #8 str a, [z, #48] // word 7 ldrb dshort, [x, #56] extr a, d, xzr, #8 ldrb dshort, [x, #57] extr a, d, a, #8 ldrb dshort, [x, #58] extr a, d, a, #8 ldrb dshort, [x, #59] extr a, d, a, #8 ldrb dshort, [x, #60] extr a, d, a, #8 ldrb dshort, [x, #61] extr a, d, a, #8 ldrb dshort, [x, #62] extr a, d, a, #8 ldrb dshort, [x, #63] extr a, d, a, #8 str a, [z, #56] // word 8 ldrb dshort, [x, #64] extr a, d, xzr, #8 ldrb dshort, [x, #65] extr a, d, a, #56 str a, [z, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_fromlebytes_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
9,303
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_sqr_p521_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square modulo p_521, z := (x^2) mod p_521, assuming x reduced // Input x[9]; output z[9] // // extern void bignum_sqr_p521_alt(uint64_t z[static 9], // const uint64_t x[static 9]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_p521_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sqr_p521_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_p521_alt) .text .balign 4 #define z x0 #define x x1 #define a0 x2 #define a1 x3 #define a2 x4 #define a3 x5 #define a4 x6 #define a5 x7 #define a6 x8 #define a7 x9 #define a8 x1 // Overwrites input argument at last load #define l x10 #define u0 x2 // The same as a0 #define u1 x11 #define u2 x12 #define u3 x13 #define u4 x14 #define u5 x15 #define u6 x16 #define u7 x17 #define u8 x19 #define u9 x20 #define u10 x21 #define u11 x22 #define u12 x23 #define u13 x24 #define u14 x25 #define u15 x26 #define u16 x4 // The same as a2 S2N_BN_SYMBOL(bignum_sqr_p521_alt): CFI_START // It's convenient to have more registers to play with CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) // Load low 8 elements as [a7;a6;a5;a4;a3;a2;a1;a0], set up an initial // window [u8;u7;u6;u5;u4;u3;u2;u1] = 10 + 20 + 30 + 40 + 50 + 60 + 70 ldp a0, a1, [x] mul u1, a0, a1 umulh u2, a0, a1 ldp a2, a3, [x, #16] mul l, a0, a2 umulh u3, a0, a2 adds u2, u2, l ldp a4, a5, [x, #32] mul l, a0, a3 umulh u4, a0, a3 adcs u3, u3, l ldp a6, a7, [x, #48] mul l, a0, a4 umulh u5, a0, a4 adcs u4, u4, l mul l, a0, a5 umulh u6, a0, a5 adcs u5, u5, l mul l, a0, a6 umulh u7, a0, a6 adcs u6, u6, l mul l, a0, a7 umulh u8, a0, a7 adcs u7, u7, l adc u8, u8, xzr // Add in the next diagonal = 21 + 31 + 41 + 51 + 61 + 71 + 54 mul l, a1, a2 adds u3, u3, l mul l, a1, a3 adcs u4, u4, l mul l, a1, a4 adcs u5, u5, l mul l, a1, a5 adcs u6, u6, l mul l, a1, a6 adcs u7, u7, l mul l, a1, a7 adcs u8, u8, l cset u9, cs umulh l, a1, a2 adds u4, u4, l umulh l, a1, a3 adcs u5, u5, l umulh l, a1, a4 adcs u6, u6, l umulh l, a1, a5 adcs u7, u7, l umulh l, a1, a6 adcs u8, u8, l umulh l, a1, a7 adc u9, u9, l mul l, a4, a5 umulh u10, a4, a5 adds u9, u9, l adc u10, u10, xzr // And the next one = 32 + 42 + 52 + 62 + 72 + 64 + 65 mul l, a2, a3 adds u5, u5, l mul l, a2, a4 adcs u6, u6, l mul l, a2, a5 adcs u7, u7, l mul l, a2, a6 adcs u8, u8, l mul l, a2, a7 adcs u9, u9, l mul l, a4, a6 adcs u10, u10, l cset u11, cs umulh l, a2, a3 adds u6, u6, l umulh l, a2, a4 adcs u7, u7, l umulh l, a2, a5 adcs u8, u8, l umulh l, a2, a6 adcs u9, u9, l umulh l, a2, a7 adcs u10, u10, l umulh l, a4, a6 adc u11, u11, l mul l, a5, a6 umulh u12, a5, a6 adds u11, u11, l adc u12, u12, xzr // And the final one = 43 + 53 + 63 + 73 + 74 + 75 + 76 mul l, a3, a4 adds u7, u7, l mul l, a3, a5 adcs u8, u8, l mul l, a3, a6 adcs u9, u9, l mul l, a3, a7 adcs u10, u10, l mul l, a4, a7 adcs u11, u11, l mul l, a5, a7 adcs u12, u12, l cset u13, cs umulh l, a3, a4 adds u8, u8, l umulh l, a3, a5 adcs u9, u9, l umulh l, a3, a6 adcs u10, u10, l umulh l, a3, a7 adcs u11, u11, l umulh l, a4, a7 adcs u12, u12, l umulh l, a5, a7 adc u13, u13, l mul l, a6, a7 umulh u14, a6, a7 adds u13, u13, l adc u14, u14, xzr // Double that, with u15 holding the top carry adds u1, u1, u1 adcs u2, u2, u2 adcs u3, u3, u3 adcs u4, u4, u4 adcs u5, u5, u5 adcs u6, u6, u6 adcs u7, u7, u7 adcs u8, u8, u8 adcs u9, u9, u9 adcs u10, u10, u10 adcs u11, u11, u11 adcs u12, u12, u12 adcs u13, u13, u13 adcs u14, u14, u14 cset u15, cs // Add the homogeneous terms 00 + 11 + 22 + 33 + 44 + 55 + 66 + 77 umulh l, a0, a0 adds u1, u1, l mul l, a1, a1 adcs u2, u2, l umulh l, a1, a1 adcs u3, u3, l mul l, a2, a2 adcs u4, u4, l umulh l, a2, a2 adcs u5, u5, l mul l, a3, a3 adcs u6, u6, l umulh l, a3, a3 adcs u7, u7, l mul l, a4, a4 adcs u8, u8, l umulh l, a4, a4 adcs u9, u9, l mul l, a5, a5 adcs u10, u10, l umulh l, a5, a5 adcs u11, u11, l mul l, a6, a6 adcs u12, u12, l umulh l, a6, a6 adcs u13, u13, l mul l, a7, a7 adcs u14, u14, l umulh l, a7, a7 adc u15, u15, l // Now load in the top digit a8, and immediately double the register ldr a8, [x, #64] add a8, a8, a8 // Add (2 * a8) * [a7;...;a0] into the top of the buffer // At the end of the first chain we form u16 = a8 ^ 2. // This needs us to shift right the modified a8 again but it saves a // register, and the overall performance impact seems slightly positive. mul l, a8, a0 adds u8, u8, l umulh l, a8, a0 adcs u9, u9, l mul l, a8, a2 adcs u10, u10, l umulh l, a8, a2 adcs u11, u11, l mul l, a8, a4 adcs u12, u12, l umulh l, a8, a4 adcs u13, u13, l mul l, a8, a6 adcs u14, u14, l umulh l, a8, a6 adcs u15, u15, l lsr u16, a8, #1 mul u16, u16, u16 adc u16, u16, xzr mul l, a8, a1 adds u9, u9, l umulh l, a8, a1 adcs u10, u10, l mul l, a8, a3 adcs u11, u11, l umulh l, a8, a3 adcs u12, u12, l mul l, a8, a5 adcs u13, u13, l umulh l, a8, a5 adcs u14, u14, l mul l, a8, a7 adcs u15, u15, l umulh l, a8, a7 adc u16, u16, l // Finally squeeze in the lowest mul. This didn't need to be involved // in the addition chains and moreover lets us re-use u0 == a0 mul u0, a0, a0 // Now we have the full product, which we consider as // 2^521 * h + l. Form h + l + 1 subs xzr, xzr, xzr extr l, u9, u8, #9 adcs u0, u0, l extr l, u10, u9, #9 adcs u1, u1, l extr l, u11, u10, #9 adcs u2, u2, l extr l, u12, u11, #9 adcs u3, u3, l extr l, u13, u12, #9 adcs u4, u4, l extr l, u14, u13, #9 adcs u5, u5, l extr l, u15, u14, #9 adcs u6, u6, l extr l, u16, u15, #9 adcs u7, u7, l orr u8, u8, #~0x1FF lsr l, u16, #9 adcs u8, u8, l // Now CF is set if h + l + 1 >= 2^521, which means it's already // the answer, while if ~CF the answer is h + l so we should subtract // 1 (all considered in 521 bits). Hence subtract ~CF and mask. sbcs u0, u0, xzr sbcs u1, u1, xzr sbcs u2, u2, xzr sbcs u3, u3, xzr sbcs u4, u4, xzr sbcs u5, u5, xzr sbcs u6, u6, xzr sbcs u7, u7, xzr sbc u8, u8, xzr and u8, u8, #0x1FF // Store back digits of final result stp u0, u1, [z] stp u2, u3, [z, #16] stp u4, u5, [z, #32] stp u6, u7, [z, #48] str u8, [z, #64] // Restore registers and return CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sqr_p521_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
3,362
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_deamont_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert from Montgomery form z := (x / 2^576) mod p_521 // Input x[9]; output z[9] // // extern void bignum_deamont_p521(uint64_t z[static 9], // const uint64_t x[static 9]); // // Convert a 9-digit bignum x out of its (optionally almost) Montgomery form, // "almost" meaning any 9-digit input will work, with no range restriction. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_deamont_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_deamont_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_deamont_p521) .text .balign 4 // Input parameters #define z x0 #define x x1 // Rotating registers for the intermediate windows #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define d4 x6 #define d5 x7 #define d6 x8 #define d7 x9 #define d8 x10 // Some other variables, not all distinct #define c x11 #define h x11 #define l x12 #define u x12 S2N_BN_SYMBOL(bignum_deamont_p521): CFI_START // Load all the inputs ldp d0, d1, [x] ldp d2, d3, [x, #16] ldp d4, d5, [x, #32] ldp d6, d7, [x, #48] ldr d8, [x, #64] // Stash the lowest 55 bits at the top of c, then shift the whole 576-bit // input right by 9*64 - 521 = 576 - 521 = 55 bits. As this is done, // accumulate an AND of words d0..d6. lsl c, d0, #9 extr d0, d1, d0, #55 extr d1, d2, d1, #55 and u, d0, d1 extr d2, d3, d2, #55 and u, u, d2 extr d3, d4, d3, #55 and u, u, d3 extr d4, d5, d4, #55 and u, u, d4 extr d5, d6, d5, #55 and u, u, d5 extr d6, d7, d6, #55 and u, u, d6 extr d7, d8, d7, #55 lsr d8, d8, #55 // Now writing x = 2^55 * h + l (so here [d8;..d0] = h and c = 2^9 * l) // we want (h + 2^{521-55} * l) mod p_521 = s mod p_521. Since s < 2 * p_521 // this is just "if s >= p_521 then s - p_521 else s". First get // CF <=> s >= p_521, creating the digits [h,l] to add for the l part. adds xzr, u, #1 lsl l, c, #9 adcs xzr, d7, l orr d8, d8, #~0x1FF lsr h, c, #55 adcs xzr, d8, h // Now the result = s mod p_521 = (if s >= p_521 then s - p_521 else s) = // (s + CF) mod 2^521. So do the addition inheriting the carry-in. adcs d0, d0, xzr adcs d1, d1, xzr adcs d2, d2, xzr adcs d3, d3, xzr adcs d4, d4, xzr adcs d5, d5, xzr adcs d6, d6, xzr adcs d7, d7, l adc d8, d8, h and d8, d8, #0x1FF // Store back the result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] stp d6, d7, [z, #48] str d8, [z, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_deamont_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
42,931
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/p521_jadd_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point addition on NIST curve P-521 in Jacobian coordinates // // extern void p521_jadd_alt(uint64_t p3[static 27], const uint64_t p1[static 27], // const uint64_t p2[static 27]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples. // A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3). // It is assumed that all coordinates of the input points p1 and p2 are // fully reduced mod p_521, that both z coordinates are nonzero and // that neither p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents // the same affine point as". // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jadd_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p521_jadd_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jadd_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 72 // Stable homes for input arguments during main code sequence #define input_z x26 #define input_x x27 #define input_y x28 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define z_2 input_y, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // NSPACE is the total stack needed for these temporaries #define z1sq sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define x1a sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define z2sq sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define y1a sp, #(NUMSIZE*6) // NUMSIZE*7 is not 16-aligned so we round it up #define NSPACE 512 // Corresponds exactly to bignum_mul_p521_alt #define mul_p521(P0,P1,P2) \ ldp x3, x4, [P1] __LF \ ldp x5, x6, [P2] __LF \ mul x15, x3, x5 __LF \ umulh x16, x3, x5 __LF \ mul x14, x3, x6 __LF \ umulh x17, x3, x6 __LF \ adds x16, x16, x14 __LF \ ldp x7, x8, [P2+16] __LF \ mul x14, x3, x7 __LF \ umulh x19, x3, x7 __LF \ adcs x17, x17, x14 __LF \ mul x14, x3, x8 __LF \ umulh x20, x3, x8 __LF \ adcs x19, x19, x14 __LF \ ldp x9, x10, [P2+32] __LF \ mul x14, x3, x9 __LF \ umulh x21, x3, x9 __LF \ adcs x20, x20, x14 __LF \ mul x14, x3, x10 __LF \ umulh x22, x3, x10 __LF \ adcs x21, x21, x14 __LF \ ldp x11, x12, [P2+48] __LF \ mul x14, x3, x11 __LF \ umulh x23, x3, x11 __LF \ adcs x22, x22, x14 __LF \ ldr x13, [P2+64] __LF \ mul x14, x3, x12 __LF \ umulh x24, x3, x12 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x13 __LF \ umulh x1, x3, x13 __LF \ adcs x24, x24, x14 __LF \ adc x1, x1, xzr __LF \ mul x14, x4, x5 __LF \ adds x16, x16, x14 __LF \ mul x14, x4, x6 __LF \ adcs x17, x17, x14 __LF \ mul x14, x4, x7 __LF \ adcs x19, x19, x14 __LF \ mul x14, x4, x8 __LF \ adcs x20, x20, x14 __LF \ mul x14, x4, x9 __LF \ adcs x21, x21, x14 __LF \ mul x14, x4, x10 __LF \ adcs x22, x22, x14 __LF \ mul x14, x4, x11 __LF \ adcs x23, x23, x14 __LF \ mul x14, x4, x12 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x13 __LF \ adcs x1, x1, x14 __LF \ cset x0, hs __LF \ umulh x14, x4, x5 __LF \ adds x17, x17, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x21, x21, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x13 __LF \ adc x0, x0, x14 __LF \ stp x15, x16, [P0] __LF \ ldp x3, x4, [P1+16] __LF \ mul x14, x3, x5 __LF \ adds x17, x17, x14 __LF \ mul x14, x3, x6 __LF \ adcs x19, x19, x14 __LF \ mul x14, x3, x7 __LF \ adcs x20, x20, x14 __LF \ mul x14, x3, x8 __LF \ adcs x21, x21, x14 __LF \ mul x14, x3, x9 __LF \ adcs x22, x22, x14 __LF \ mul x14, x3, x10 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x11 __LF \ adcs x24, x24, x14 __LF \ mul x14, x3, x12 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x13 __LF \ adcs x0, x0, x14 __LF \ cset x15, hs __LF \ umulh x14, x3, x5 __LF \ adds x19, x19, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x21, x21, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x3, x12 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x13 __LF \ adc x15, x15, x14 __LF \ mul x14, x4, x5 __LF \ adds x19, x19, x14 __LF \ mul x14, x4, x6 __LF \ adcs x20, x20, x14 __LF \ mul x14, x4, x7 __LF \ adcs x21, x21, x14 __LF \ mul x14, x4, x8 __LF \ adcs x22, x22, x14 __LF \ mul x14, x4, x9 __LF \ adcs x23, x23, x14 __LF \ mul x14, x4, x10 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x11 __LF \ adcs x1, x1, x14 __LF \ mul x14, x4, x12 __LF \ adcs x0, x0, x14 __LF \ mul x14, x4, x13 __LF \ adcs x15, x15, x14 __LF \ cset x16, hs __LF \ umulh x14, x4, x5 __LF \ adds x20, x20, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x21, x21, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x4, x13 __LF \ adc x16, x16, x14 __LF \ stp x17, x19, [P0+16] __LF \ ldp x3, x4, [P1+32] __LF \ mul x14, x3, x5 __LF \ adds x20, x20, x14 __LF \ mul x14, x3, x6 __LF \ adcs x21, x21, x14 __LF \ mul x14, x3, x7 __LF \ adcs x22, x22, x14 __LF \ mul x14, x3, x8 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x9 __LF \ adcs x24, x24, x14 __LF \ mul x14, x3, x10 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x11 __LF \ adcs x0, x0, x14 __LF \ mul x14, x3, x12 __LF \ adcs x15, x15, x14 __LF \ mul x14, x3, x13 __LF \ adcs x16, x16, x14 __LF \ cset x17, hs __LF \ umulh x14, x3, x5 __LF \ adds x21, x21, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x22, x22, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x3, x12 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x3, x13 __LF \ adc x17, x17, x14 __LF \ mul x14, x4, x5 __LF \ adds x21, x21, x14 __LF \ mul x14, x4, x6 __LF \ adcs x22, x22, x14 __LF \ mul x14, x4, x7 __LF \ adcs x23, x23, x14 __LF \ mul x14, x4, x8 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x9 __LF \ adcs x1, x1, x14 __LF \ mul x14, x4, x10 __LF \ adcs x0, x0, x14 __LF \ mul x14, x4, x11 __LF \ adcs x15, x15, x14 __LF \ mul x14, x4, x12 __LF \ adcs x16, x16, x14 __LF \ mul x14, x4, x13 __LF \ adcs x17, x17, x14 __LF \ cset x19, hs __LF \ umulh x14, x4, x5 __LF \ adds x22, x22, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x23, x23, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x4, x13 __LF \ adc x19, x19, x14 __LF \ stp x20, x21, [P0+32] __LF \ ldp x3, x4, [P1+48] __LF \ mul x14, x3, x5 __LF \ adds x22, x22, x14 __LF \ mul x14, x3, x6 __LF \ adcs x23, x23, x14 __LF \ mul x14, x3, x7 __LF \ adcs x24, x24, x14 __LF \ mul x14, x3, x8 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x9 __LF \ adcs x0, x0, x14 __LF \ mul x14, x3, x10 __LF \ adcs x15, x15, x14 __LF \ mul x14, x3, x11 __LF \ adcs x16, x16, x14 __LF \ mul x14, x3, x12 __LF \ adcs x17, x17, x14 __LF \ mul x14, x3, x13 __LF \ adcs x19, x19, x14 __LF \ cset x20, hs __LF \ umulh x14, x3, x5 __LF \ adds x23, x23, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x24, x24, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x3, x12 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x3, x13 __LF \ adc x20, x20, x14 __LF \ mul x14, x4, x5 __LF \ adds x23, x23, x14 __LF \ mul x14, x4, x6 __LF \ adcs x24, x24, x14 __LF \ mul x14, x4, x7 __LF \ adcs x1, x1, x14 __LF \ mul x14, x4, x8 __LF \ adcs x0, x0, x14 __LF \ mul x14, x4, x9 __LF \ adcs x15, x15, x14 __LF \ mul x14, x4, x10 __LF \ adcs x16, x16, x14 __LF \ mul x14, x4, x11 __LF \ adcs x17, x17, x14 __LF \ mul x14, x4, x12 __LF \ adcs x19, x19, x14 __LF \ mul x14, x4, x13 __LF \ adcs x20, x20, x14 __LF \ cset x21, hs __LF \ umulh x14, x4, x5 __LF \ adds x24, x24, x14 __LF \ umulh x14, x4, x6 __LF \ adcs x1, x1, x14 __LF \ umulh x14, x4, x7 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x4, x8 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x4, x9 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x4, x10 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x4, x11 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x4, x12 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x4, x13 __LF \ adc x21, x21, x14 __LF \ stp x22, x23, [P0+48] __LF \ ldr x3, [P1+64] __LF \ mul x14, x3, x5 __LF \ adds x24, x24, x14 __LF \ mul x14, x3, x6 __LF \ adcs x1, x1, x14 __LF \ mul x14, x3, x7 __LF \ adcs x0, x0, x14 __LF \ mul x14, x3, x8 __LF \ adcs x15, x15, x14 __LF \ mul x14, x3, x9 __LF \ adcs x16, x16, x14 __LF \ mul x14, x3, x10 __LF \ adcs x17, x17, x14 __LF \ mul x14, x3, x11 __LF \ adcs x19, x19, x14 __LF \ mul x14, x3, x12 __LF \ adcs x20, x20, x14 __LF \ mul x14, x3, x13 __LF \ adc x21, x21, x14 __LF \ umulh x14, x3, x5 __LF \ adds x1, x1, x14 __LF \ umulh x14, x3, x6 __LF \ adcs x0, x0, x14 __LF \ umulh x14, x3, x7 __LF \ adcs x15, x15, x14 __LF \ umulh x14, x3, x8 __LF \ adcs x16, x16, x14 __LF \ umulh x14, x3, x9 __LF \ adcs x17, x17, x14 __LF \ umulh x14, x3, x10 __LF \ adcs x19, x19, x14 __LF \ umulh x14, x3, x11 __LF \ adcs x20, x20, x14 __LF \ umulh x14, x3, x12 __LF \ adc x21, x21, x14 __LF \ cmp xzr, xzr __LF \ ldp x5, x6, [P0] __LF \ extr x14, x1, x24, #9 __LF \ adcs x5, x5, x14 __LF \ extr x14, x0, x1, #9 __LF \ adcs x6, x6, x14 __LF \ ldp x7, x8, [P0+16] __LF \ extr x14, x15, x0, #9 __LF \ adcs x7, x7, x14 __LF \ extr x14, x16, x15, #9 __LF \ adcs x8, x8, x14 __LF \ ldp x9, x10, [P0+32] __LF \ extr x14, x17, x16, #9 __LF \ adcs x9, x9, x14 __LF \ extr x14, x19, x17, #9 __LF \ adcs x10, x10, x14 __LF \ ldp x11, x12, [P0+48] __LF \ extr x14, x20, x19, #9 __LF \ adcs x11, x11, x14 __LF \ extr x14, x21, x20, #9 __LF \ adcs x12, x12, x14 __LF \ orr x13, x24, #0xfffffffffffffe00 __LF \ lsr x14, x21, #9 __LF \ adcs x13, x13, x14 __LF \ sbcs x5, x5, xzr __LF \ sbcs x6, x6, xzr __LF \ sbcs x7, x7, xzr __LF \ sbcs x8, x8, xzr __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbc x13, x13, xzr __LF \ and x13, x13, #0x1ff __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] __LF \ stp x11, x12, [P0+48] __LF \ str x13, [P0+64] // Corresponds exactly to bignum_sqr_p521_alt #define sqr_p521(P0,P1) \ ldp x2, x3, [P1] __LF \ mul x11, x2, x3 __LF \ umulh x12, x2, x3 __LF \ ldp x4, x5, [P1+16] __LF \ mul x10, x2, x4 __LF \ umulh x13, x2, x4 __LF \ adds x12, x12, x10 __LF \ ldp x6, x7, [P1+32] __LF \ mul x10, x2, x5 __LF \ umulh x14, x2, x5 __LF \ adcs x13, x13, x10 __LF \ ldp x8, x9, [P1+48] __LF \ mul x10, x2, x6 __LF \ umulh x15, x2, x6 __LF \ adcs x14, x14, x10 __LF \ mul x10, x2, x7 __LF \ umulh x16, x2, x7 __LF \ adcs x15, x15, x10 __LF \ mul x10, x2, x8 __LF \ umulh x17, x2, x8 __LF \ adcs x16, x16, x10 __LF \ mul x10, x2, x9 __LF \ umulh x19, x2, x9 __LF \ adcs x17, x17, x10 __LF \ adc x19, x19, xzr __LF \ mul x10, x3, x4 __LF \ adds x13, x13, x10 __LF \ mul x10, x3, x5 __LF \ adcs x14, x14, x10 __LF \ mul x10, x3, x6 __LF \ adcs x15, x15, x10 __LF \ mul x10, x3, x7 __LF \ adcs x16, x16, x10 __LF \ mul x10, x3, x8 __LF \ adcs x17, x17, x10 __LF \ mul x10, x3, x9 __LF \ adcs x19, x19, x10 __LF \ cset x20, hs __LF \ umulh x10, x3, x4 __LF \ adds x14, x14, x10 __LF \ umulh x10, x3, x5 __LF \ adcs x15, x15, x10 __LF \ umulh x10, x3, x6 __LF \ adcs x16, x16, x10 __LF \ umulh x10, x3, x7 __LF \ adcs x17, x17, x10 __LF \ umulh x10, x3, x8 __LF \ adcs x19, x19, x10 __LF \ umulh x10, x3, x9 __LF \ adc x20, x20, x10 __LF \ mul x10, x6, x7 __LF \ umulh x21, x6, x7 __LF \ adds x20, x20, x10 __LF \ adc x21, x21, xzr __LF \ mul x10, x4, x5 __LF \ adds x15, x15, x10 __LF \ mul x10, x4, x6 __LF \ adcs x16, x16, x10 __LF \ mul x10, x4, x7 __LF \ adcs x17, x17, x10 __LF \ mul x10, x4, x8 __LF \ adcs x19, x19, x10 __LF \ mul x10, x4, x9 __LF \ adcs x20, x20, x10 __LF \ mul x10, x6, x8 __LF \ adcs x21, x21, x10 __LF \ cset x22, hs __LF \ umulh x10, x4, x5 __LF \ adds x16, x16, x10 __LF \ umulh x10, x4, x6 __LF \ adcs x17, x17, x10 __LF \ umulh x10, x4, x7 __LF \ adcs x19, x19, x10 __LF \ umulh x10, x4, x8 __LF \ adcs x20, x20, x10 __LF \ umulh x10, x4, x9 __LF \ adcs x21, x21, x10 __LF \ umulh x10, x6, x8 __LF \ adc x22, x22, x10 __LF \ mul x10, x7, x8 __LF \ umulh x23, x7, x8 __LF \ adds x22, x22, x10 __LF \ adc x23, x23, xzr __LF \ mul x10, x5, x6 __LF \ adds x17, x17, x10 __LF \ mul x10, x5, x7 __LF \ adcs x19, x19, x10 __LF \ mul x10, x5, x8 __LF \ adcs x20, x20, x10 __LF \ mul x10, x5, x9 __LF \ adcs x21, x21, x10 __LF \ mul x10, x6, x9 __LF \ adcs x22, x22, x10 __LF \ mul x10, x7, x9 __LF \ adcs x23, x23, x10 __LF \ cset x24, hs __LF \ umulh x10, x5, x6 __LF \ adds x19, x19, x10 __LF \ umulh x10, x5, x7 __LF \ adcs x20, x20, x10 __LF \ umulh x10, x5, x8 __LF \ adcs x21, x21, x10 __LF \ umulh x10, x5, x9 __LF \ adcs x22, x22, x10 __LF \ umulh x10, x6, x9 __LF \ adcs x23, x23, x10 __LF \ umulh x10, x7, x9 __LF \ adc x24, x24, x10 __LF \ mul x10, x8, x9 __LF \ umulh x25, x8, x9 __LF \ adds x24, x24, x10 __LF \ adc x25, x25, xzr __LF \ adds x11, x11, x11 __LF \ adcs x12, x12, x12 __LF \ adcs x13, x13, x13 __LF \ adcs x14, x14, x14 __LF \ adcs x15, x15, x15 __LF \ adcs x16, x16, x16 __LF \ adcs x17, x17, x17 __LF \ adcs x19, x19, x19 __LF \ adcs x20, x20, x20 __LF \ adcs x21, x21, x21 __LF \ adcs x22, x22, x22 __LF \ adcs x23, x23, x23 __LF \ adcs x24, x24, x24 __LF \ adcs x25, x25, x25 __LF \ cset x0, hs __LF \ umulh x10, x2, x2 __LF \ adds x11, x11, x10 __LF \ mul x10, x3, x3 __LF \ adcs x12, x12, x10 __LF \ umulh x10, x3, x3 __LF \ adcs x13, x13, x10 __LF \ mul x10, x4, x4 __LF \ adcs x14, x14, x10 __LF \ umulh x10, x4, x4 __LF \ adcs x15, x15, x10 __LF \ mul x10, x5, x5 __LF \ adcs x16, x16, x10 __LF \ umulh x10, x5, x5 __LF \ adcs x17, x17, x10 __LF \ mul x10, x6, x6 __LF \ adcs x19, x19, x10 __LF \ umulh x10, x6, x6 __LF \ adcs x20, x20, x10 __LF \ mul x10, x7, x7 __LF \ adcs x21, x21, x10 __LF \ umulh x10, x7, x7 __LF \ adcs x22, x22, x10 __LF \ mul x10, x8, x8 __LF \ adcs x23, x23, x10 __LF \ umulh x10, x8, x8 __LF \ adcs x24, x24, x10 __LF \ mul x10, x9, x9 __LF \ adcs x25, x25, x10 __LF \ umulh x10, x9, x9 __LF \ adc x0, x0, x10 __LF \ ldr x1, [P1+64] __LF \ add x1, x1, x1 __LF \ mul x10, x1, x2 __LF \ adds x19, x19, x10 __LF \ umulh x10, x1, x2 __LF \ adcs x20, x20, x10 __LF \ mul x10, x1, x4 __LF \ adcs x21, x21, x10 __LF \ umulh x10, x1, x4 __LF \ adcs x22, x22, x10 __LF \ mul x10, x1, x6 __LF \ adcs x23, x23, x10 __LF \ umulh x10, x1, x6 __LF \ adcs x24, x24, x10 __LF \ mul x10, x1, x8 __LF \ adcs x25, x25, x10 __LF \ umulh x10, x1, x8 __LF \ adcs x0, x0, x10 __LF \ lsr x4, x1, #1 __LF \ mul x4, x4, x4 __LF \ adc x4, x4, xzr __LF \ mul x10, x1, x3 __LF \ adds x20, x20, x10 __LF \ umulh x10, x1, x3 __LF \ adcs x21, x21, x10 __LF \ mul x10, x1, x5 __LF \ adcs x22, x22, x10 __LF \ umulh x10, x1, x5 __LF \ adcs x23, x23, x10 __LF \ mul x10, x1, x7 __LF \ adcs x24, x24, x10 __LF \ umulh x10, x1, x7 __LF \ adcs x25, x25, x10 __LF \ mul x10, x1, x9 __LF \ adcs x0, x0, x10 __LF \ umulh x10, x1, x9 __LF \ adc x4, x4, x10 __LF \ mul x2, x2, x2 __LF \ cmp xzr, xzr __LF \ extr x10, x20, x19, #9 __LF \ adcs x2, x2, x10 __LF \ extr x10, x21, x20, #9 __LF \ adcs x11, x11, x10 __LF \ extr x10, x22, x21, #9 __LF \ adcs x12, x12, x10 __LF \ extr x10, x23, x22, #9 __LF \ adcs x13, x13, x10 __LF \ extr x10, x24, x23, #9 __LF \ adcs x14, x14, x10 __LF \ extr x10, x25, x24, #9 __LF \ adcs x15, x15, x10 __LF \ extr x10, x0, x25, #9 __LF \ adcs x16, x16, x10 __LF \ extr x10, x4, x0, #9 __LF \ adcs x17, x17, x10 __LF \ orr x19, x19, #0xfffffffffffffe00 __LF \ lsr x10, x4, #9 __LF \ adcs x19, x19, x10 __LF \ sbcs x2, x2, xzr __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ sbcs x14, x14, xzr __LF \ sbcs x15, x15, xzr __LF \ sbcs x16, x16, xzr __LF \ sbcs x17, x17, xzr __LF \ sbc x19, x19, xzr __LF \ and x19, x19, #0x1ff __LF \ stp x2, x11, [P0] __LF \ stp x12, x13, [P0+16] __LF \ stp x14, x15, [P0+32] __LF \ stp x16, x17, [P0+48] __LF \ str x19, [P0+64] // Corresponds exactly to bignum_sub_p521 #define sub_p521(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ subs x5, x5, x4 __LF \ sbcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ ldp x9, x10, [P1+32] __LF \ ldp x4, x3, [P2+32] __LF \ sbcs x9, x9, x4 __LF \ sbcs x10, x10, x3 __LF \ ldp x11, x12, [P1+48] __LF \ ldp x4, x3, [P2+48] __LF \ sbcs x11, x11, x4 __LF \ sbcs x12, x12, x3 __LF \ ldr x13, [P1+64] __LF \ ldr x4, [P2+64] __LF \ sbcs x13, x13, x4 __LF \ sbcs x5, x5, xzr __LF \ sbcs x6, x6, xzr __LF \ sbcs x7, x7, xzr __LF \ sbcs x8, x8, xzr __LF \ sbcs x9, x9, xzr __LF \ sbcs x10, x10, xzr __LF \ sbcs x11, x11, xzr __LF \ sbcs x12, x12, xzr __LF \ sbcs x13, x13, xzr __LF \ and x13, x13, #0x1ff __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] __LF \ stp x11, x12, [P0+48] __LF \ str x13, [P0+64] S2N_BN_SYMBOL(p521_jadd_alt): CFI_START // Save regs and make room on stack for temporary variables CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_PUSH2(x27,x28) CFI_DEC_SP(NSPACE) // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations sqr_p521(z1sq,z_1) sqr_p521(z2sq,z_2) mul_p521(y1a,z_2,y_1) mul_p521(y2a,z_1,y_2) mul_p521(x2a,z1sq,x_2) mul_p521(x1a,z2sq,x_1) mul_p521(y2a,z1sq,y2a) mul_p521(y1a,z2sq,y1a) sub_p521(xd,x2a,x1a) sub_p521(yd,y2a,y1a) sqr_p521(zz,xd) sqr_p521(ww,yd) mul_p521(zzx1,zz,x1a) mul_p521(zzx2,zz,x2a) sub_p521(resx,ww,zzx1) sub_p521(t1,zzx2,zzx1) mul_p521(xd,xd,z_1) sub_p521(resx,resx,zzx2) sub_p521(t2,zzx1,resx) mul_p521(t1,t1,y1a) mul_p521(resz,xd,z_2) mul_p521(t2,yd,t2) sub_p521(resy,t2,t1) // Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0 // The condition codes get set by a comparison (P2 != 0) - (P1 != 0) // So "HI" <=> CF /\ ~ZF <=> P1 = 0 /\ ~(P2 = 0) // and "LO" <=> ~CF <=> ~(P1 = 0) /\ P2 = 0 // Multiplex the z outputs accordingly and re-store in resz ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] ldp x4, x5, [z_1+32] ldp x6, x7, [z_1+48] ldr x8, [z_1+64] orr x20, x0, x1 orr x21, x2, x3 orr x22, x4, x5 orr x23, x6, x7 orr x20, x20, x21 orr x22, x22, x23 orr x20, x20, x8 orr x20, x20, x22 cmp x20, xzr cset x20, ne ldp x10, x11, [z_2] ldp x12, x13, [z_2+16] ldp x14, x15, [z_2+32] ldp x16, x17, [z_2+48] ldr x19, [z_2+64] orr x21, x10, x11 orr x22, x12, x13 orr x23, x14, x15 orr x24, x16, x17 orr x21, x21, x22 orr x23, x23, x24 orr x21, x21, x19 orr x21, x21, x23 csel x0, x0, x10, ne csel x1, x1, x11, ne csel x2, x2, x12, ne csel x3, x3, x13, ne csel x4, x4, x14, ne csel x5, x5, x15, ne csel x6, x6, x16, ne csel x7, x7, x17, ne csel x8, x8, x19, ne cmp x21, xzr cset x21, ne cmp x21, x20 ldp x10, x11, [resz] ldp x12, x13, [resz+16] ldp x14, x15, [resz+32] ldp x16, x17, [resz+48] ldr x19, [resz+64] csel x0, x0, x10, ne csel x1, x1, x11, ne csel x2, x2, x12, ne csel x3, x3, x13, ne csel x4, x4, x14, ne csel x5, x5, x15, ne csel x6, x6, x16, ne csel x7, x7, x17, ne csel x8, x8, x19, ne stp x0, x1, [resz] stp x2, x3, [resz+16] stp x4, x5, [resz+32] stp x6, x7, [resz+48] str x8, [resz+64] // Multiplex the x and y outputs too, keeping the results in registers ldp x20, x21, [x_1] ldp x0, x1, [resx] csel x0, x20, x0, lo csel x1, x21, x1, lo ldp x20, x21, [x_2] csel x0, x20, x0, hi csel x1, x21, x1, hi ldp x20, x21, [x_1+16] ldp x2, x3, [resx+16] csel x2, x20, x2, lo csel x3, x21, x3, lo ldp x20, x21, [x_2+16] csel x2, x20, x2, hi csel x3, x21, x3, hi ldp x20, x21, [x_1+32] ldp x4, x5, [resx+32] csel x4, x20, x4, lo csel x5, x21, x5, lo ldp x20, x21, [x_2+32] csel x4, x20, x4, hi csel x5, x21, x5, hi ldp x20, x21, [x_1+48] ldp x6, x7, [resx+48] csel x6, x20, x6, lo csel x7, x21, x7, lo ldp x20, x21, [x_2+48] csel x6, x20, x6, hi csel x7, x21, x7, hi ldr x20, [x_1+64] ldr x8, [resx+64] csel x8, x20, x8, lo ldr x21, [x_2+64] csel x8, x21, x8, hi ldp x20, x21, [y_1] ldp x10, x11, [resy] csel x10, x20, x10, lo csel x11, x21, x11, lo ldp x20, x21, [y_2] csel x10, x20, x10, hi csel x11, x21, x11, hi ldp x20, x21, [y_1+16] ldp x12, x13, [resy+16] csel x12, x20, x12, lo csel x13, x21, x13, lo ldp x20, x21, [y_2+16] csel x12, x20, x12, hi csel x13, x21, x13, hi ldp x20, x21, [y_1+32] ldp x14, x15, [resy+32] csel x14, x20, x14, lo csel x15, x21, x15, lo ldp x20, x21, [y_2+32] csel x14, x20, x14, hi csel x15, x21, x15, hi ldp x20, x21, [y_1+48] ldp x16, x17, [resy+48] csel x16, x20, x16, lo csel x17, x21, x17, lo ldp x20, x21, [y_2+48] csel x16, x20, x16, hi csel x17, x21, x17, hi ldr x20, [y_1+64] ldr x19, [resy+64] csel x19, x20, x19, lo ldr x21, [y_2+64] csel x19, x21, x19, hi // Finally store back the multiplexed values stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [x_3+32] stp x6, x7, [x_3+48] str x8, [x_3+64] ldp x0, x1, [resz] ldp x2, x3, [resz+16] ldp x4, x5, [resz+32] ldp x6, x7, [resz+48] ldr x8, [resz+64] stp x10, x11, [y_3] stp x12, x13, [y_3+16] stp x14, x15, [y_3+32] stp x16, x17, [y_3+48] str x19, [y_3+64] stp x0, x1, [z_3] stp x2, x3, [z_3+16] stp x4, x5, [z_3+32] stp x6, x7, [z_3+48] str x8, [z_3+64] // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x27,x28) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p521_jadd_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
44,264
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/p521_jadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point addition on NIST curve P-521 in Jacobian coordinates // // extern void p521_jadd(uint64_t p3[static 27], const uint64_t p1[static 27], // const uint64_t p2[static 27]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples. // A Jacobian triple (x,y,z) represents affine point (x/z^2,y/z^3). // It is assumed that all coordinates of the input points p1 and p2 are // fully reduced mod p_521, that both z coordinates are nonzero and // that neither p1 =~= p2 or p1 =~= -p2, where "=~=" means "represents // the same affine point as". // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p521_jadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jadd) .text .balign 4 // Size of individual field elements #define NUMSIZE 72 // Stable homes for input arguments during main code sequence #define input_z x26 #define input_x x27 #define input_y x28 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define z_2 input_y, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // #NSPACE is the total stack needed for these temporaries #define z1sq sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define x1a sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define z2sq sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define tmp sp, #(NUMSIZE*6) #define y1a sp, #(NUMSIZE*7) #define NSPACE NUMSIZE*8 // For the three field operations, we use subroutines not inlining. // Call local code very close to bignum_mul_p521 and bignum_sqr_p521 // and bignum_sub_p521 #define mul_p521(P0,P1,P2) \ add x0, P0 __LF \ add x1, P1 __LF \ add x2, P2 __LF \ CFI_BL(Lp521_jadd_local_mul_p521) #define sqr_p521(P0,P1) \ add x0, P0 __LF \ add x1, P1 __LF \ CFI_BL(Lp521_jadd_local_sqr_p521) #define sub_p521(P0,P1,P2) \ add x0, P0 __LF \ add x1, P1 __LF \ add x2, P2 __LF \ CFI_BL(Lp521_jadd_local_sub_p521) S2N_BN_SYMBOL(p521_jadd): CFI_START // Save regs and make room on stack for temporary variables CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_PUSH2(x27,x28) CFI_PUSH2(x29,x30) CFI_DEC_SP(NSPACE) // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations sqr_p521(z1sq,z_1) sqr_p521(z2sq,z_2) mul_p521(y1a,z_2,y_1) mul_p521(y2a,z_1,y_2) mul_p521(x2a,z1sq,x_2) mul_p521(x1a,z2sq,x_1) mul_p521(y2a,z1sq,y2a) mul_p521(y1a,z2sq,y1a) sub_p521(xd,x2a,x1a) sub_p521(yd,y2a,y1a) sqr_p521(zz,xd) sqr_p521(ww,yd) mul_p521(zzx1,zz,x1a) mul_p521(zzx2,zz,x2a) sub_p521(resx,ww,zzx1) sub_p521(t1,zzx2,zzx1) mul_p521(xd,xd,z_1) sub_p521(resx,resx,zzx2) sub_p521(t2,zzx1,resx) mul_p521(t1,t1,y1a) mul_p521(resz,xd,z_2) mul_p521(t2,yd,t2) sub_p521(resy,t2,t1) // Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0 // The condition codes get set by a comparison (P2 != 0) - (P1 != 0) // So "HI" <=> CF /\ ~ZF <=> P1 = 0 /\ ~(P2 = 0) // and "LO" <=> ~CF <=> ~(P1 = 0) /\ P2 = 0 // Multiplex the z outputs accordingly and re-store in resz ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] ldp x4, x5, [z_1+32] ldp x6, x7, [z_1+48] ldr x8, [z_1+64] orr x20, x0, x1 orr x21, x2, x3 orr x22, x4, x5 orr x23, x6, x7 orr x20, x20, x21 orr x22, x22, x23 orr x20, x20, x8 orr x20, x20, x22 cmp x20, xzr cset x20, ne ldp x10, x11, [z_2] ldp x12, x13, [z_2+16] ldp x14, x15, [z_2+32] ldp x16, x17, [z_2+48] ldr x19, [z_2+64] orr x21, x10, x11 orr x22, x12, x13 orr x23, x14, x15 orr x24, x16, x17 orr x21, x21, x22 orr x23, x23, x24 orr x21, x21, x19 orr x21, x21, x23 csel x0, x0, x10, ne csel x1, x1, x11, ne csel x2, x2, x12, ne csel x3, x3, x13, ne csel x4, x4, x14, ne csel x5, x5, x15, ne csel x6, x6, x16, ne csel x7, x7, x17, ne csel x8, x8, x19, ne cmp x21, xzr cset x21, ne cmp x21, x20 ldp x10, x11, [resz] ldp x12, x13, [resz+16] ldp x14, x15, [resz+32] ldp x16, x17, [resz+48] ldr x19, [resz+64] csel x0, x0, x10, ne csel x1, x1, x11, ne csel x2, x2, x12, ne csel x3, x3, x13, ne csel x4, x4, x14, ne csel x5, x5, x15, ne csel x6, x6, x16, ne csel x7, x7, x17, ne csel x8, x8, x19, ne stp x0, x1, [resz] stp x2, x3, [resz+16] stp x4, x5, [resz+32] stp x6, x7, [resz+48] str x8, [resz+64] // Multiplex the x and y outputs too, keeping the results in registers ldp x20, x21, [x_1] ldp x0, x1, [resx] csel x0, x20, x0, lo csel x1, x21, x1, lo ldp x20, x21, [x_2] csel x0, x20, x0, hi csel x1, x21, x1, hi ldp x20, x21, [x_1+16] ldp x2, x3, [resx+16] csel x2, x20, x2, lo csel x3, x21, x3, lo ldp x20, x21, [x_2+16] csel x2, x20, x2, hi csel x3, x21, x3, hi ldp x20, x21, [x_1+32] ldp x4, x5, [resx+32] csel x4, x20, x4, lo csel x5, x21, x5, lo ldp x20, x21, [x_2+32] csel x4, x20, x4, hi csel x5, x21, x5, hi ldp x20, x21, [x_1+48] ldp x6, x7, [resx+48] csel x6, x20, x6, lo csel x7, x21, x7, lo ldp x20, x21, [x_2+48] csel x6, x20, x6, hi csel x7, x21, x7, hi ldr x20, [x_1+64] ldr x8, [resx+64] csel x8, x20, x8, lo ldr x21, [x_2+64] csel x8, x21, x8, hi ldp x20, x21, [y_1] ldp x10, x11, [resy] csel x10, x20, x10, lo csel x11, x21, x11, lo ldp x20, x21, [y_2] csel x10, x20, x10, hi csel x11, x21, x11, hi ldp x20, x21, [y_1+16] ldp x12, x13, [resy+16] csel x12, x20, x12, lo csel x13, x21, x13, lo ldp x20, x21, [y_2+16] csel x12, x20, x12, hi csel x13, x21, x13, hi ldp x20, x21, [y_1+32] ldp x14, x15, [resy+32] csel x14, x20, x14, lo csel x15, x21, x15, lo ldp x20, x21, [y_2+32] csel x14, x20, x14, hi csel x15, x21, x15, hi ldp x20, x21, [y_1+48] ldp x16, x17, [resy+48] csel x16, x20, x16, lo csel x17, x21, x17, lo ldp x20, x21, [y_2+48] csel x16, x20, x16, hi csel x17, x21, x17, hi ldr x20, [y_1+64] ldr x19, [resy+64] csel x19, x20, x19, lo ldr x21, [y_2+64] csel x19, x21, x19, hi // Finally store back the multiplexed values stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [x_3+32] stp x6, x7, [x_3+48] str x8, [x_3+64] ldp x0, x1, [resz] ldp x2, x3, [resz+16] ldp x4, x5, [resz+32] ldp x6, x7, [resz+48] ldr x8, [resz+64] stp x10, x11, [y_3] stp x12, x13, [y_3+16] stp x14, x15, [y_3+32] stp x16, x17, [y_3+48] str x19, [y_3+64] stp x0, x1, [z_3] stp x2, x3, [z_3+16] stp x4, x5, [z_3+32] stp x6, x7, [z_3+48] str x8, [z_3+64] // Restore stack and registers CFI_INC_SP(NSPACE) CFI_POP2(x29,x30) CFI_POP2(x27,x28) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p521_jadd) // Local versions of the three field operations, identical to // bignum_mul_p521, bignum_sqr_p521 and bignum_sub_p521. S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jadd_local_mul_p521) Lp521_jadd_local_mul_p521: CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_DEC_SP(80) ldr q6, [x2] ldp x10, x17, [x1, #16] ldr q4, [x1] ldr q16, [x2, #32] ldp x5, x20, [x2, #16] ldr q2, [x1, #32] movi v31.2D, #0x00000000ffffffff uzp2 v17.4S, v6.4S, v6.4S rev64 v7.4S, v6.4S ldp x15, x21, [x1] xtn v25.2S, v6.2D xtn v22.2S, v4.2D subs x14, x10, x17 mul v7.4S, v7.4S, v4.4S csetm x8, cc rev64 v3.4S, v16.4S xtn v1.2S, v16.2D ldp x13, x16, [x2] mul x26, x10, x5 uzp2 v16.4S, v16.4S, v16.4S uaddlp v26.2D, v7.4S cneg x4, x14, cc subs x24, x15, x21 xtn v5.2S, v2.2D mul v28.4S, v3.4S, v2.4S shl v26.2D, v26.2D, #32 mul x22, x17, x20 umull v20.2D, v22.2S, v25.2S uzp2 v6.4S, v4.4S, v4.4S umull v18.2D, v22.2S, v17.2S uzp2 v4.4S, v2.4S, v2.4S cneg x14, x24, cc csetm x7, cc umulh x11, x17, x20 usra v18.2D, v20.2D, #32 uaddlp v7.2D, v28.4S subs x19, x16, x13 umlal v26.2D, v22.2S, v25.2S cneg x19, x19, cc shl v28.2D, v7.2D, #32 umull v7.2D, v5.2S, v1.2S umull v30.2D, v5.2S, v16.2S cinv x6, x7, cc mul x25, x14, x19 umlal v28.2D, v5.2S, v1.2S umull v21.2D, v6.2S, v17.2S umulh x14, x14, x19 usra v30.2D, v7.2D, #32 subs x9, x20, x5 and v29.16B, v18.16B, v31.16B cinv x23, x8, cc mov x8, v26.d[1] cneg x12, x9, cc usra v21.2D, v18.2D, #32 umlal v29.2D, v6.2S, v25.2S mul x24, x4, x12 umull v18.2D, v4.2S, v16.2S movi v25.2D, #0x00000000ffffffff eor x9, x14, x6 and v7.16B, v30.16B, v25.16B usra v21.2D, v29.2D, #32 umulh x7, x10, x5 usra v18.2D, v30.2D, #32 umlal v7.2D, v4.2S, v1.2S mov x19, v21.d[0] umulh x3, x4, x12 mov x14, v21.d[1] usra v18.2D, v7.2D, #32 adds x4, x8, x19 mov x8, v26.d[0] adcs x19, x26, x14 adcs x14, x22, x7 adc x12, x11, xzr adds x11, x4, x8 adcs x26, x19, x4 adcs x22, x14, x19 eor x4, x24, x23 adcs x14, x12, x14 eor x7, x25, x6 adc x25, xzr, x12 eor x19, x3, x23 adds x3, x26, x8 adcs x24, x22, x11 adcs x12, x14, x26 adcs x22, x25, x22 adcs x26, xzr, x14 adc x14, xzr, x25 cmn x23, #0x1 adcs x22, x22, x4 adcs x19, x26, x19 adc x25, x14, x23 subs x14, x21, x17 cneg x23, x14, cc csetm x26, cc subs x4, x20, x16 cneg x14, x4, cc cinv x4, x26, cc cmn x6, #0x1 adcs x11, x11, x7 mul x7, x23, x14 adcs x9, x3, x9 adcs x26, x24, x6 umulh x3, x23, x14 adcs x14, x12, x6 adcs x22, x22, x6 adcs x12, x19, x6 extr x24, x11, x8, #55 adc x6, x25, x6 subs x19, x15, x17 csetm x17, cc cneg x23, x19, cc subs x19, x20, x13 lsl x25, x8, #9 eor x8, x7, x4 cneg x20, x19, cc umulh x7, x23, x20 cinv x19, x17, cc subs x17, x15, x10 csetm x15, cc stp x25, x24, [sp, #32] cneg x24, x17, cc mul x20, x23, x20 subs x25, x5, x13 cneg x13, x25, cc cinv x15, x15, cc mul x25, x24, x13 subs x21, x21, x10 csetm x23, cc cneg x17, x21, cc subs x21, x5, x16 umulh x13, x24, x13 cinv x10, x23, cc cneg x23, x21, cc cmn x4, #0x1 adcs x14, x14, x8 eor x21, x3, x4 adcs x21, x22, x21 eor x5, x20, x19 adcs x24, x12, x4 mul x12, x17, x23 eor x8, x25, x15 adc x25, x6, x4 cmn x15, #0x1 adcs x6, x9, x8 ldp x20, x8, [x2, #48] eor x9, x13, x15 adcs x4, x26, x9 umulh x26, x17, x23 ldp x17, x13, [x1, #48] adcs x9, x14, x15 adcs x16, x21, x15 adcs x14, x24, x15 eor x21, x7, x19 mul x23, x17, x20 adc x24, x25, x15 cmn x19, #0x1 adcs x7, x4, x5 adcs x9, x9, x21 umulh x3, x13, x8 adcs x16, x16, x19 adcs x22, x14, x19 eor x5, x12, x10 adc x12, x24, x19 cmn x10, #0x1 adcs x19, x7, x5 eor x14, x26, x10 mov x7, v28.d[1] adcs x24, x9, x14 extr x4, x19, x6, #55 umulh x15, x17, x20 mov x14, v18.d[1] lsr x9, x19, #55 adcs x5, x16, x10 mov x16, v18.d[0] adcs x19, x22, x10 str x9, [sp, #64] extr x25, x6, x11, #55 adc x21, x12, x10 subs x26, x17, x13 stp x25, x4, [sp, #48] stp x19, x21, [sp, #16] csetm x6, cc cneg x4, x26, cc mul x19, x13, x8 subs x11, x8, x20 stp x24, x5, [sp] ldp x21, x10, [x1, #32] cinv x12, x6, cc cneg x6, x11, cc mov x9, v28.d[0] umulh x25, x4, x6 adds x22, x7, x16 ldp x16, x5, [x2, #32] adcs x14, x23, x14 adcs x11, x19, x15 adc x24, x3, xzr adds x3, x22, x9 adcs x15, x14, x22 mul x22, x4, x6 adcs x6, x11, x14 adcs x4, x24, x11 eor x14, x25, x12 adc x26, xzr, x24 subs x7, x21, x10 csetm x23, cc cneg x19, x7, cc subs x24, x5, x16 cneg x11, x24, cc cinv x7, x23, cc adds x25, x15, x9 eor x23, x22, x12 adcs x22, x6, x3 mul x24, x19, x11 adcs x15, x4, x15 adcs x6, x26, x6 umulh x19, x19, x11 adcs x11, xzr, x4 adc x26, xzr, x26 cmn x12, #0x1 adcs x4, x6, x23 eor x6, x24, x7 adcs x14, x11, x14 adc x26, x26, x12 subs x11, x10, x13 cneg x12, x11, cc csetm x11, cc eor x19, x19, x7 subs x24, x8, x5 cinv x11, x11, cc cneg x24, x24, cc cmn x7, #0x1 adcs x3, x3, x6 mul x23, x12, x24 adcs x25, x25, x19 adcs x6, x22, x7 umulh x19, x12, x24 adcs x22, x15, x7 adcs x12, x4, x7 eor x24, x23, x11 adcs x4, x14, x7 adc x26, x26, x7 eor x19, x19, x11 subs x14, x21, x17 cneg x7, x14, cc csetm x14, cc subs x23, x20, x16 cinv x14, x14, cc cneg x23, x23, cc cmn x11, #0x1 adcs x22, x22, x24 mul x24, x7, x23 adcs x15, x12, x19 adcs x4, x4, x11 adc x19, x26, x11 umulh x26, x7, x23 subs x7, x21, x13 eor x11, x24, x14 cneg x23, x7, cc csetm x12, cc subs x7, x8, x16 cneg x7, x7, cc cinv x12, x12, cc cmn x14, #0x1 eor x26, x26, x14 adcs x11, x25, x11 mul x25, x23, x7 adcs x26, x6, x26 adcs x6, x22, x14 adcs x24, x15, x14 umulh x23, x23, x7 adcs x4, x4, x14 adc x22, x19, x14 eor x14, x25, x12 eor x7, x23, x12 cmn x12, #0x1 adcs x14, x26, x14 ldp x19, x25, [x2] ldp x15, x23, [x2, #16] adcs x26, x6, x7 adcs x24, x24, x12 adcs x7, x4, x12 adc x4, x22, x12 subs x19, x19, x16 ldp x16, x22, [x1] sbcs x6, x25, x5 ldp x12, x25, [x1, #16] sbcs x15, x15, x20 sbcs x8, x23, x8 csetm x23, cc subs x21, x21, x16 eor x16, x19, x23 sbcs x19, x10, x22 eor x22, x6, x23 eor x8, x8, x23 sbcs x6, x17, x12 sbcs x13, x13, x25 csetm x12, cc subs x10, x10, x17 cneg x17, x10, cc csetm x25, cc subs x5, x20, x5 eor x10, x19, x12 cneg x19, x5, cc eor x20, x15, x23 eor x21, x21, x12 cinv x15, x25, cc mul x25, x17, x19 subs x16, x16, x23 sbcs x5, x22, x23 eor x6, x6, x12 sbcs x20, x20, x23 eor x22, x13, x12 sbc x8, x8, x23 subs x21, x21, x12 umulh x19, x17, x19 sbcs x10, x10, x12 sbcs x17, x6, x12 eor x6, x19, x15 eor x19, x25, x15 umulh x25, x17, x20 sbc x13, x22, x12 cmn x15, #0x1 adcs x22, x14, x19 adcs x19, x26, x6 ldp x6, x26, [sp] adcs x14, x24, x15 umulh x24, x21, x16 adcs x7, x7, x15 adc x15, x4, x15 adds x4, x9, x6 eor x9, x23, x12 adcs x12, x3, x26 stp x4, x12, [sp] ldp x4, x26, [sp, #16] umulh x12, x10, x5 ldp x6, x23, [sp, #32] adcs x3, x11, x4 mul x4, x13, x8 adcs x26, x22, x26 ldp x22, x11, [sp, #48] adcs x6, x19, x6 stp x3, x26, [sp, #16] mul x26, x10, x5 adcs x14, x14, x23 stp x6, x14, [sp, #32] ldr x6, [sp, #64] adcs x22, x7, x22 adcs x14, x15, x11 mul x11, x17, x20 adc x19, x6, xzr stp x22, x14, [sp, #48] adds x14, x26, x24 str x19, [sp, #64] umulh x19, x13, x8 adcs x7, x11, x12 adcs x22, x4, x25 mul x6, x21, x16 adc x19, x19, xzr subs x11, x17, x13 cneg x12, x11, cc csetm x11, cc subs x24, x8, x20 cinv x11, x11, cc cneg x24, x24, cc adds x4, x14, x6 adcs x14, x7, x14 mul x3, x12, x24 adcs x7, x22, x7 adcs x22, x19, x22 umulh x12, x12, x24 adc x24, xzr, x19 adds x19, x14, x6 eor x3, x3, x11 adcs x26, x7, x4 adcs x14, x22, x14 adcs x25, x24, x7 adcs x23, xzr, x22 eor x7, x12, x11 adc x12, xzr, x24 subs x22, x21, x10 cneg x24, x22, cc csetm x22, cc subs x15, x5, x16 cinv x22, x22, cc cneg x15, x15, cc cmn x11, #0x1 adcs x3, x25, x3 mul x25, x24, x15 adcs x23, x23, x7 adc x11, x12, x11 subs x7, x10, x13 umulh x15, x24, x15 cneg x12, x7, cc csetm x7, cc eor x24, x25, x22 eor x25, x15, x22 cmn x22, #0x1 adcs x24, x4, x24 adcs x19, x19, x25 adcs x15, x26, x22 adcs x4, x14, x22 adcs x26, x3, x22 adcs x25, x23, x22 adc x23, x11, x22 subs x14, x21, x17 cneg x3, x14, cc csetm x11, cc subs x14, x8, x5 cneg x14, x14, cc cinv x7, x7, cc subs x13, x21, x13 cneg x21, x13, cc csetm x13, cc mul x22, x12, x14 subs x8, x8, x16 cinv x13, x13, cc umulh x14, x12, x14 cneg x12, x8, cc subs x8, x20, x16 cneg x8, x8, cc cinv x16, x11, cc eor x22, x22, x7 cmn x7, #0x1 eor x14, x14, x7 adcs x4, x4, x22 mul x11, x3, x8 adcs x22, x26, x14 adcs x14, x25, x7 eor x25, x24, x9 adc x26, x23, x7 umulh x7, x3, x8 subs x17, x10, x17 cneg x24, x17, cc eor x3, x11, x16 csetm x11, cc subs x20, x20, x5 cneg x5, x20, cc cinv x11, x11, cc cmn x16, #0x1 mul x17, x21, x12 eor x8, x7, x16 adcs x10, x19, x3 and x19, x9, #0x1ff adcs x20, x15, x8 umulh x15, x21, x12 eor x12, x10, x9 eor x8, x6, x9 adcs x6, x4, x16 adcs x4, x22, x16 adcs x21, x14, x16 adc x7, x26, x16 mul x10, x24, x5 cmn x13, #0x1 ldp x3, x14, [x1] eor x17, x17, x13 umulh x5, x24, x5 adcs x20, x20, x17 eor x17, x15, x13 adcs x16, x6, x17 eor x22, x10, x11 adcs x23, x4, x13 extr x10, x14, x3, #52 and x26, x3, #0xfffffffffffff adcs x24, x21, x13 and x15, x10, #0xfffffffffffff adc x6, x7, x13 cmn x11, #0x1 adcs x17, x20, x22 eor x4, x5, x11 ldp x21, x10, [sp] adcs x7, x16, x4 eor x16, x17, x9 eor x13, x7, x9 ldp x3, x17, [sp, #16] adcs x7, x23, x11 eor x23, x7, x9 ldp x5, x22, [sp, #32] adcs x7, x24, x11 adc x24, x6, x11 ldr x6, [x2, #64] adds x20, x8, x21 lsl x11, x20, #9 eor x4, x7, x9 orr x7, x11, x19 eor x8, x24, x9 adcs x11, x25, x10 mul x26, x6, x26 ldp x19, x24, [sp, #48] adcs x12, x12, x3 adcs x16, x16, x17 adcs x9, x13, x5 ldr x25, [sp, #64] extr x20, x11, x20, #55 adcs x13, x23, x22 adcs x4, x4, x19 extr x23, x12, x11, #55 adcs x8, x8, x24 adc x11, x25, xzr adds x21, x9, x21 extr x9, x16, x12, #55 lsr x12, x16, #55 adcs x10, x13, x10 mul x15, x6, x15 adcs x13, x4, x3 ldp x16, x4, [x2] ldr x3, [x1, #64] adcs x17, x8, x17 adcs x5, x5, x7 adcs x20, x22, x20 adcs x8, x19, x23 and x22, x16, #0xfffffffffffff ldp x19, x7, [x1, #16] adcs x9, x24, x9 extr x24, x4, x16, #52 adc x16, x12, x25 mul x22, x3, x22 and x25, x24, #0xfffffffffffff extr x14, x19, x14, #40 and x12, x14, #0xfffffffffffff extr x23, x7, x19, #28 ldp x19, x24, [x2, #16] mul x14, x3, x25 and x23, x23, #0xfffffffffffff add x22, x26, x22 lsl x11, x11, #48 lsr x26, x22, #52 lsl x25, x22, #12 mul x22, x6, x12 extr x12, x19, x4, #40 add x4, x15, x14 mul x15, x6, x23 add x4, x4, x26 extr x23, x24, x19, #28 ldp x14, x19, [x1, #32] and x26, x12, #0xfffffffffffff extr x12, x4, x25, #12 and x25, x23, #0xfffffffffffff adds x21, x21, x12 mul x12, x3, x26 extr x23, x14, x7, #16 and x23, x23, #0xfffffffffffff mul x7, x3, x25 ldp x25, x26, [x2, #32] add x12, x22, x12 extr x22, x19, x14, #56 mul x23, x6, x23 lsr x14, x14, #4 extr x24, x25, x24, #16 add x7, x15, x7 and x15, x24, #0xfffffffffffff and x22, x22, #0xfffffffffffff lsr x24, x4, #52 mul x15, x3, x15 and x14, x14, #0xfffffffffffff add x12, x12, x24 lsl x24, x4, #12 lsr x4, x12, #52 extr x24, x12, x24, #24 adcs x10, x10, x24 lsl x24, x12, #12 add x12, x7, x4 mul x22, x6, x22 add x4, x23, x15 extr x7, x12, x24, #36 adcs x13, x13, x7 lsl x15, x12, #12 add x7, x4, x11 lsr x24, x12, #52 ldp x23, x11, [x2, #48] add x4, x7, x24 mul x12, x6, x14 extr x7, x26, x25, #56 extr x14, x4, x15, #48 and x2, x7, #0xfffffffffffff extr x24, x11, x23, #32 ldp x15, x7, [x1, #48] and x1, x24, #0xfffffffffffff lsr x24, x4, #52 mul x2, x3, x2 extr x26, x23, x26, #44 lsr x23, x25, #4 and x23, x23, #0xfffffffffffff and x25, x26, #0xfffffffffffff extr x26, x7, x15, #32 extr x19, x15, x19, #44 mul x23, x3, x23 and x15, x26, #0xfffffffffffff lsl x26, x4, #12 and x4, x19, #0xfffffffffffff lsr x11, x11, #20 mul x19, x6, x4 adcs x17, x17, x14 add x14, x22, x2 add x22, x12, x23 lsr x7, x7, #20 add x22, x22, x24 extr x2, x22, x26, #60 mul x24, x3, x25 lsr x22, x22, #52 add x14, x14, x22 lsl x22, x2, #8 extr x22, x14, x22, #8 lsl x2, x14, #12 mul x1, x3, x1 adcs x12, x5, x22 mul x5, x6, x15 and x26, x10, x13 and x4, x26, x17 add x23, x19, x24 lsr x14, x14, #52 mul x22, x3, x11 add x11, x23, x14 extr x25, x11, x2, #20 lsl x19, x11, #12 adcs x25, x20, x25 and x14, x4, x12 add x1, x5, x1 and x14, x14, x25 mul x15, x6, x7 add x26, x15, x22 mul x6, x6, x3 lsr x22, x11, #52 add x4, x1, x22 lsr x1, x4, #52 extr x3, x4, x19, #32 lsl x15, x4, #12 add x7, x26, x1 adcs x23, x8, x3 extr x20, x7, x15, #44 and x3, x14, x23 lsr x19, x7, #44 adcs x7, x9, x20 add x11, x6, x19 adc x4, x16, x11 lsr x14, x4, #9 cmp xzr, xzr and x15, x3, x7 orr x3, x4, #0xfffffffffffffe00 adcs xzr, x21, x14 adcs xzr, x15, xzr adcs xzr, x3, xzr adcs x11, x21, x14 and x14, x11, #0x1ff adcs x1, x10, xzr extr x10, x1, x11, #9 str x14, [x0, #64] adcs x14, x13, xzr extr x11, x14, x1, #9 adcs x1, x17, xzr extr x4, x1, x14, #9 stp x10, x11, [x0] adcs x11, x12, xzr extr x14, x11, x1, #9 adcs x10, x25, xzr extr x11, x10, x11, #9 stp x4, x14, [x0, #16] adcs x14, x23, xzr extr x10, x14, x10, #9 adcs x1, x7, xzr stp x11, x10, [x0, #32] extr x14, x1, x14, #9 adc x10, x3, xzr extr x26, x10, x1, #9 stp x14, x26, [x0, #48] CFI_INC_SP(80) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jadd_local_mul_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jadd_local_sqr_p521) Lp521_jadd_local_sqr_p521: CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) ldr q23, [x1, #32] ldp x9, x2, [x1, #32] ldr q16, [x1, #32] ldr q20, [x1, #48] ldp x6, x13, [x1, #48] rev64 v2.4S, v23.4S mul x14, x9, x2 ldr q31, [x1, #48] subs x22, x9, x2 uzp2 v26.4S, v23.4S, v23.4S mul v30.4S, v2.4S, v16.4S xtn v0.2S, v20.2D csetm x12, cc xtn v21.2S, v16.2D xtn v23.2S, v23.2D umulh x10, x9, x6 rev64 v27.4S, v31.4S umull v2.2D, v21.2S, v26.2S cneg x23, x22, cc uaddlp v25.2D, v30.4S umull v18.2D, v21.2S, v23.2S mul x22, x9, x6 mul v6.4S, v27.4S, v20.4S uzp2 v17.4S, v20.4S, v20.4S shl v20.2D, v25.2D, #32 uzp2 v27.4S, v31.4S, v31.4S mul x16, x2, x13 umlal v20.2D, v21.2S, v23.2S usra v2.2D, v18.2D, #32 adds x8, x22, x10 umull v25.2D, v17.2S, v27.2S xtn v31.2S, v31.2D movi v1.2D, #0xffffffff adc x3, x10, xzr umulh x21, x2, x13 uzp2 v21.4S, v16.4S, v16.4S umull v18.2D, v0.2S, v27.2S subs x19, x13, x6 and v7.16B, v2.16B, v1.16B umull v27.2D, v0.2S, v31.2S cneg x20, x19, cc movi v30.2D, #0xffffffff umull v16.2D, v21.2S, v26.2S umlal v7.2D, v21.2S, v23.2S mul x19, x23, x20 cinv x7, x12, cc uaddlp v6.2D, v6.4S eor x12, x19, x7 adds x11, x8, x16 umulh x10, x23, x20 ldr q1, [x1] usra v16.2D, v2.2D, #32 adcs x19, x3, x21 shl v2.2D, v6.2D, #32 adc x20, x21, xzr adds x17, x19, x16 usra v18.2D, v27.2D, #32 adc x19, x20, xzr cmn x7, #0x1 umlal v2.2D, v0.2S, v31.2S umulh x16, x9, x2 adcs x8, x11, x12 usra v16.2D, v7.2D, #32 ldr x12, [x1, #64] eor x20, x10, x7 umulh x10, x6, x13 mov x23, v2.d[0] mov x3, v2.d[1] adcs x21, x17, x20 usra v25.2D, v18.2D, #32 and v23.16B, v18.16B, v30.16B adc x7, x19, x7 adds x22, x22, x22 ldr q7, [x1, #16] adcs x17, x8, x8 umlal v23.2D, v17.2S, v31.2S mov x19, v16.d[0] mul x11, x12, x12 ldr q4, [x1] usra v25.2D, v23.2D, #32 add x5, x12, x12 adcs x15, x21, x21 ldr q28, [x1] mov x12, v20.d[1] adcs x24, x7, x7 mov x21, v16.d[1] adc x4, xzr, xzr adds x19, x19, x14 ldr q18, [x1, #16] xtn v26.2S, v1.2D adcs x8, x12, x16 adc x21, x21, xzr adds x7, x19, x14 xtn v23.2S, v7.2D rev64 v21.4S, v28.4S adcs x12, x8, x16 ldp x20, x19, [x1] mov x16, v25.d[1] xtn v22.2S, v28.2D adc x14, x21, xzr adds x8, x22, x12 uzp2 v24.4S, v28.4S, v28.4S rev64 v28.4S, v18.4S mul x12, x6, x13 mul v16.4S, v21.4S, v1.4S shrn v31.2S, v7.2D, #32 adcs x22, x17, x14 mov x14, v25.d[0] and x21, x20, #0xfffffffffffff umull v17.2D, v26.2S, v24.2S ldr q2, [x1, #32] adcs x17, x15, xzr ldr q30, [x1, #48] umull v7.2D, v26.2S, v22.2S adcs x15, x24, xzr ldr q0, [x1, #16] movi v6.2D, #0xffffffff adc x4, x4, xzr adds x14, x14, x12 uzp1 v27.4S, v18.4S, v4.4S uzp2 v19.4S, v1.4S, v1.4S adcs x24, x3, x10 mul x3, x5, x21 umull v29.2D, v23.2S, v31.2S ldr q5, [x1] adc x21, x16, xzr adds x16, x14, x12 extr x12, x19, x20, #52 umull v18.2D, v19.2S, v24.2S adcs x24, x24, x10 and x10, x12, #0xfffffffffffff ldp x14, x12, [x1, #16] usra v17.2D, v7.2D, #32 adc x21, x21, xzr adds x23, x23, x17 mul x17, x5, x10 shl v21.2D, v29.2D, #33 lsl x10, x3, #12 lsr x1, x3, #52 rev64 v29.4S, v2.4S uaddlp v25.2D, v16.4S add x17, x17, x1 adcs x16, x16, x15 extr x3, x14, x19, #40 mov x15, v20.d[0] extr x10, x17, x10, #12 and x3, x3, #0xfffffffffffff shl v3.2D, v25.2D, #32 and v6.16B, v17.16B, v6.16B mul x1, x5, x3 usra v18.2D, v17.2D, #32 adcs x3, x24, x4 extr x4, x12, x14, #28 umlal v6.2D, v19.2S, v22.2S xtn v20.2S, v2.2D umlal v3.2D, v26.2S, v22.2S movi v26.2D, #0xffffffff lsr x24, x17, #52 and x4, x4, #0xfffffffffffff uzp2 v19.4S, v2.4S, v2.4S add x1, x1, x24 mul x24, x5, x4 lsl x4, x17, #12 xtn v24.2S, v5.2D extr x17, x1, x4, #24 adc x21, x21, xzr umlal v21.2D, v23.2S, v23.2S adds x4, x15, x10 lsl x10, x1, #12 adcs x15, x7, x17 mul v23.4S, v28.4S, v4.4S and x7, x4, #0x1ff lsr x17, x1, #52 umulh x1, x19, x12 uzp2 v17.4S, v5.4S, v5.4S extr x4, x15, x4, #9 add x24, x24, x17 mul v29.4S, v29.4S, v5.4S extr x17, x24, x10, #36 extr x10, x9, x12, #16 uzp1 v28.4S, v4.4S, v4.4S adcs x17, x8, x17 and x8, x10, #0xfffffffffffff umull v16.2D, v24.2S, v20.2S extr x10, x17, x15, #9 mul x15, x5, x8 stp x4, x10, [x0] lsl x4, x24, #12 lsr x8, x9, #4 uaddlp v4.2D, v23.4S and x8, x8, #0xfffffffffffff umull v23.2D, v24.2S, v19.2S mul x8, x5, x8 extr x10, x2, x9, #56 lsr x24, x24, #52 and x10, x10, #0xfffffffffffff add x15, x15, x24 extr x4, x15, x4, #48 mul x24, x5, x10 lsr x10, x15, #52 usra v23.2D, v16.2D, #32 add x10, x8, x10 shl v4.2D, v4.2D, #32 adcs x22, x22, x4 extr x4, x6, x2, #44 lsl x15, x15, #12 lsr x8, x10, #52 extr x15, x10, x15, #60 and x10, x4, #0xfffffffffffff umlal v4.2D, v28.2S, v27.2S add x8, x24, x8 extr x4, x13, x6, #32 mul x24, x5, x10 uzp2 v16.4S, v30.4S, v30.4S lsl x10, x15, #8 rev64 v28.4S, v30.4S and x15, x4, #0xfffffffffffff extr x4, x8, x10, #8 mul x10, x5, x15 lsl x15, x8, #12 adcs x23, x23, x4 lsr x4, x8, #52 lsr x8, x13, #20 add x4, x24, x4 mul x8, x5, x8 lsr x24, x4, #52 extr x15, x4, x15, #20 lsl x4, x4, #12 add x10, x10, x24 adcs x15, x16, x15 extr x4, x10, x4, #32 umulh x5, x20, x14 adcs x3, x3, x4 usra v18.2D, v6.2D, #32 lsl x16, x10, #12 extr x24, x15, x23, #9 lsr x10, x10, #52 uzp2 v27.4S, v0.4S, v0.4S add x8, x8, x10 extr x10, x3, x15, #9 extr x4, x22, x17, #9 and v25.16B, v23.16B, v26.16B lsr x17, x8, #44 extr x15, x8, x16, #44 extr x16, x23, x22, #9 xtn v7.2S, v30.2D mov x8, v4.d[0] stp x24, x10, [x0, #32] uaddlp v30.2D, v29.4S stp x4, x16, [x0, #16] umulh x24, x20, x19 adcs x15, x21, x15 adc x16, x11, x17 subs x11, x20, x19 xtn v5.2S, v0.2D csetm x17, cc extr x3, x15, x3, #9 mov x22, v4.d[1] cneg x21, x11, cc subs x10, x12, x14 mul v31.4S, v28.4S, v0.4S cneg x10, x10, cc cinv x11, x17, cc shl v4.2D, v30.2D, #32 umull v28.2D, v5.2S, v16.2S extr x23, x16, x15, #9 adds x4, x8, x5 mul x17, x21, x10 umull v22.2D, v5.2S, v7.2S adc x15, x5, xzr adds x4, x4, x22 uaddlp v2.2D, v31.4S lsr x5, x16, #9 adcs x16, x15, x1 mov x15, v18.d[0] adc x1, x1, xzr umulh x10, x21, x10 adds x22, x16, x22 umlal v4.2D, v24.2S, v20.2S umull v30.2D, v27.2S, v16.2S stp x3, x23, [x0, #48] add x3, x7, x5 adc x16, x1, xzr usra v28.2D, v22.2D, #32 mul x23, x20, x19 eor x1, x17, x11 cmn x11, #0x1 mov x17, v18.d[1] umull v18.2D, v17.2S, v19.2S adcs x7, x4, x1 eor x1, x10, x11 umlal v25.2D, v17.2S, v20.2S movi v16.2D, #0xffffffff adcs x22, x22, x1 usra v18.2D, v23.2D, #32 umulh x4, x14, x14 adc x1, x16, x11 adds x10, x8, x8 shl v23.2D, v2.2D, #32 str x3, [x0, #64] adcs x5, x7, x7 and v16.16B, v28.16B, v16.16B usra v30.2D, v28.2D, #32 adcs x7, x22, x22 mov x21, v3.d[1] adcs x11, x1, x1 umlal v16.2D, v27.2S, v7.2S adc x22, xzr, xzr adds x16, x15, x23 mul x8, x14, x12 umlal v23.2D, v5.2S, v7.2S usra v18.2D, v25.2D, #32 umulh x15, x14, x12 adcs x21, x21, x24 usra v30.2D, v16.2D, #32 adc x1, x17, xzr adds x3, x16, x23 adcs x21, x21, x24 adc x1, x1, xzr adds x24, x10, x21 umulh x21, x12, x12 adcs x16, x5, x1 adcs x10, x7, xzr mov x17, v21.d[1] adcs x23, x11, xzr adc x5, x22, xzr adds x1, x4, x8 adcs x22, x17, x15 ldp x17, x4, [x0] mov x11, v21.d[0] adc x21, x21, xzr adds x1, x1, x8 adcs x15, x22, x15 adc x8, x21, xzr adds x22, x11, x10 mov x21, v3.d[0] adcs x11, x1, x23 ldp x1, x10, [x0, #16] adcs x15, x15, x5 adc x7, x8, xzr adds x8, x17, x21 mov x23, v4.d[1] ldp x5, x21, [x0, #32] adcs x17, x4, x3 ldr x4, [x0, #64] mov x3, v18.d[0] adcs x24, x1, x24 stp x8, x17, [x0] adcs x17, x10, x16 ldp x1, x16, [x0, #48] adcs x5, x5, x22 adcs x8, x21, x11 stp x5, x8, [x0, #32] adcs x1, x1, x15 mov x15, v23.d[1] adcs x21, x16, x7 stp x1, x21, [x0, #48] adc x10, x4, xzr subs x7, x14, x12 mov x16, v18.d[1] cneg x5, x7, cc csetm x4, cc subs x11, x13, x6 mov x8, v23.d[0] cneg x7, x11, cc cinv x21, x4, cc mov x11, v30.d[0] adds x4, x23, x3 mul x22, x5, x7 mov x23, v30.d[1] adcs x8, x8, x16 adcs x16, x15, x11 adc x11, x23, xzr umulh x3, x5, x7 stp x24, x17, [x0, #16] mov x5, v4.d[0] subs x15, x20, x19 cneg x7, x15, cc str x10, [x0, #64] csetm x1, cc subs x24, x2, x9 cneg x17, x24, cc cinv x15, x1, cc adds x23, x4, x5 umulh x1, x7, x17 adcs x24, x8, x4 adcs x10, x16, x8 eor x8, x22, x21 adcs x16, x11, x16 mul x22, x7, x17 eor x17, x1, x15 adc x1, xzr, x11 adds x11, x24, x5 eor x7, x3, x21 adcs x3, x10, x23 adcs x24, x16, x24 adcs x4, x1, x10 eor x10, x22, x15 adcs x16, xzr, x16 adc x1, xzr, x1 cmn x21, #0x1 adcs x8, x4, x8 adcs x22, x16, x7 adc x7, x1, x21 subs x21, x19, x12 csetm x4, cc cneg x1, x21, cc subs x21, x13, x2 cinv x16, x4, cc cneg x4, x21, cc cmn x15, #0x1 adcs x21, x23, x10 mul x23, x1, x4 adcs x11, x11, x17 adcs x3, x3, x15 umulh x1, x1, x4 adcs x24, x24, x15 adcs x8, x8, x15 adcs x22, x22, x15 eor x17, x23, x16 adc x15, x7, x15 subs x7, x20, x14 cneg x7, x7, cc csetm x4, cc subs x10, x20, x12 cneg x23, x10, cc csetm x10, cc subs x12, x6, x9 cinv x20, x4, cc cneg x12, x12, cc cmn x16, #0x1 eor x1, x1, x16 adcs x17, x24, x17 mul x4, x7, x12 adcs x8, x8, x1 umulh x1, x7, x12 adcs x24, x22, x16 adc x7, x15, x16 subs x12, x13, x9 cneg x12, x12, cc cinv x13, x10, cc subs x19, x19, x14 mul x9, x23, x12 cneg x19, x19, cc csetm x10, cc eor x16, x1, x20 subs x22, x6, x2 umulh x12, x23, x12 eor x1, x4, x20 cinv x4, x10, cc cneg x22, x22, cc cmn x20, #0x1 adcs x15, x11, x1 eor x6, x12, x13 adcs x10, x3, x16 adcs x17, x17, x20 eor x23, x9, x13 adcs x2, x8, x20 mul x11, x19, x22 adcs x24, x24, x20 adc x7, x7, x20 cmn x13, #0x1 adcs x3, x10, x23 umulh x22, x19, x22 adcs x17, x17, x6 eor x12, x22, x4 extr x22, x15, x21, #63 adcs x8, x2, x13 extr x21, x21, x5, #63 ldp x16, x23, [x0] adcs x20, x24, x13 eor x1, x11, x4 adc x6, x7, x13 cmn x4, #0x1 ldp x2, x7, [x0, #16] adcs x1, x3, x1 extr x19, x1, x15, #63 adcs x14, x17, x12 extr x1, x14, x1, #63 lsl x17, x5, #1 adcs x8, x8, x4 extr x12, x8, x14, #8 ldp x15, x11, [x0, #32] adcs x9, x20, x4 adc x3, x6, x4 adds x16, x12, x16 extr x6, x9, x8, #8 ldp x14, x12, [x0, #48] extr x8, x3, x9, #8 adcs x20, x6, x23 ldr x24, [x0, #64] lsr x6, x3, #8 adcs x8, x8, x2 and x2, x1, #0x1ff and x1, x20, x8 adcs x4, x6, x7 adcs x3, x17, x15 and x1, x1, x4 adcs x9, x21, x11 and x1, x1, x3 adcs x6, x22, x14 and x1, x1, x9 and x21, x1, x6 adcs x14, x19, x12 adc x1, x24, x2 cmp xzr, xzr orr x12, x1, #0xfffffffffffffe00 lsr x1, x1, #9 adcs xzr, x16, x1 and x21, x21, x14 adcs xzr, x21, xzr adcs xzr, x12, xzr adcs x21, x16, x1 adcs x1, x20, xzr adcs x19, x8, xzr stp x21, x1, [x0] adcs x1, x4, xzr adcs x21, x3, xzr stp x19, x1, [x0, #16] adcs x1, x9, xzr stp x21, x1, [x0, #32] adcs x21, x6, xzr adcs x1, x14, xzr stp x21, x1, [x0, #48] adc x1, x12, xzr and x1, x1, #0x1ff str x1, [x0, #64] CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jadd_local_sqr_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jadd_local_sub_p521) Lp521_jadd_local_sub_p521: CFI_START ldp x5, x6, [x1] ldp x4, x3, [x2] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x1, #16] ldp x4, x3, [x2, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x1, #32] ldp x4, x3, [x2, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 ldp x11, x12, [x1, #48] ldp x4, x3, [x2, #48] sbcs x11, x11, x4 sbcs x12, x12, x3 ldr x13, [x1, #64] ldr x4, [x2, #64] sbcs x13, x13, x4 sbcs x5, x5, xzr sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbcs x13, x13, xzr and x13, x13, #0x1ff stp x5, x6, [x0] stp x7, x8, [x0, #16] stp x9, x10, [x0, #32] stp x11, x12, [x0, #48] str x13, [x0, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jadd_local_sub_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
2,911
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_mod_p521_9.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo field characteristic, z := x mod p_521 // Input x[9]; output z[9] // // extern void bignum_mod_p521_9(uint64_t z[static 9], const uint64_t x[static 9]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p521_9) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_p521_9) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p521_9) .text .balign 4 #define z x0 #define x x1 #define h x2 #define t x3 #define d0 x4 #define d1 x5 #define d2 x6 #define d3 x7 #define d4 x8 #define d5 x9 #define d6 x10 #define d7 x11 #define d8 x12 S2N_BN_SYMBOL(bignum_mod_p521_9): CFI_START // Load top digit first and get its upper bits in h so that we // separate out x = 2^521 * H + L with h = H. Now x mod p_521 = // (H + L) mod p_521 = if H + L >= p_521 then H + L - p_521 else H + L. ldr d8, [x, #64] lsr h, d8, #9 // Load in the other digits and decide whether H + L >= p_521. This is // equivalent to H + L + 1 >= 2^521, and since this can only happen if // digits d7,...,d1 consist entirely of 1 bits, we can condense the // carry chain by ANDing digits together, perhaps reducing its latency. // This condenses only three pairs; the payoff beyond that seems limited. // By stuffing in 1 bits from 521 position upwards, get CF directly subs xzr, xzr, xzr ldp d0, d1, [x] adcs xzr, d0, h adcs xzr, d1, xzr ldp d2, d3, [x, #16] and t, d2, d3 adcs xzr, t, xzr ldp d4, d5, [x, #32] and t, d4, d5 adcs xzr, t, xzr ldp d6, d7, [x, #48] and t, d6, d7 adcs xzr, t, xzr orr t, d8, #~0x1FF adcs t, t, xzr // Now H + L >= p_521 <=> H + L + 1 >= 2^521 <=> CF from this comparison. // So if CF is set we want (H + L) - p_521 = (H + L + 1) - 2^521 // while otherwise we want just H + L. So mask H + L + CF to 521 bits. adcs d0, d0, h adcs d1, d1, xzr adcs d2, d2, xzr adcs d3, d3, xzr adcs d4, d4, xzr adcs d5, d5, xzr adcs d6, d6, xzr adcs d7, d7, xzr adc d8, d8, xzr and d8, d8, #0x1FF // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] stp d6, d7, [z, #48] str d8, [z, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_p521_9) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,963
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_double_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Double modulo p_521, z := (2 * x) mod p_521, assuming x reduced // Input x[9]; output z[9] // // extern void bignum_double_p521(uint64_t z[static 9], // const uint64_t x[static 9]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_double_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_double_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_double_p521) .text .balign 4 #define z x0 #define x x1 #define c x2 #define h x3 #define l x4 S2N_BN_SYMBOL(bignum_double_p521): CFI_START // We can decide whether 2 * x >= p_521 just by 2 * x >= 2^521, which // amounts to whether the top word is >= 256 ldr c, [x, #64] subs xzr, c, #256 // Now if 2 * x >= p_521 we want 2 * x - p_521 = (2 * x + 1) - 2^521 // and otherwise just 2 * x. Feed in the condition as the carry bit // to get 2 * x + [2 * x >= p_521] then just mask it off to 521 bits. ldp l, h, [x] adcs l, l, l adcs h, h, h stp l, h, [z] ldp l, h, [x, #16] adcs l, l, l adcs h, h, h stp l, h, [z, #16] ldp l, h, [x, #32] adcs l, l, l adcs h, h, h stp l, h, [z, #32] ldp l, h, [x, #48] adcs l, l, l adcs h, h, h stp l, h, [z, #48] adc c, c, c and c, c, #0x1FF str c, [z, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_double_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,532
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_optneg_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Optionally negate modulo p_521, z := (-x) mod p_521 (if p nonzero) or // z := x (if p zero), assuming x reduced // Inputs p, x[9]; output z[9] // // extern void bignum_optneg_p521(uint64_t z[static 9], uint64_t p, // const uint64_t x[static 9]); // // Standard ARM ABI: X0 = z, X1 = p, X2 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_optneg_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_optneg_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_optneg_p521) .text .balign 4 #define z x0 #define p x1 #define x x2 #define q x3 #define d0 x4 #define d1 x5 #define d2 x6 #define d3 x7 #define d4 x8 #define d5 x9 #define d6 x10 #define d7 x11 #define d8 x12 S2N_BN_SYMBOL(bignum_optneg_p521): CFI_START // Load the 9 digits of x and generate q = the OR of them all ldp d0, d1, [x] orr d6, d0, d1 ldp d2, d3, [x, #16] orr d7, d2, d3 orr q, d6, d7 ldp d4, d5, [x, #32] orr d8, d4, d5 orr q, q, d8 ldp d6, d7, [x, #48] orr d8, d6, d7 orr q, q, d8 ldr d8, [x, #64] orr q, q, d8 // Turn q into a bitmask for "input is nonzero and p is nonzero", so that // we avoid doing -0 = p_521 and hence maintain strict modular reduction cmp q, #0 csetm q, ne cmp p, #0 csel q, xzr, q, eq // Since p_521 is all 1s, the subtraction is just an exclusive-or with q // to give an optional inversion, with a slight fiddle for the top digit. eor d0, d0, q eor d1, d1, q eor d2, d2, q eor d3, d3, q eor d4, d4, q eor d5, d5, q eor d6, d6, q eor d7, d7, q and q, q, #0x1FF eor d8, d8, q // Write back the result and return stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] stp d6, d7, [z, #48] str d8, [z, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_optneg_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,473
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_sub_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Subtract modulo p_521, z := (x - y) mod p_521 // Inputs x[9], y[9]; output z[9] // // extern void bignum_sub_p521(uint64_t z[static 9], const uint64_t x[static 9], // const uint64_t y[static 9]); // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sub_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sub_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sub_p521) .text .balign 4 #define z x0 #define x x1 #define y x2 #define h x3 #define l x4 #define d0 x5 #define d1 x6 #define d2 x7 #define d3 x8 #define d4 x9 #define d5 x10 #define d6 x11 #define d7 x12 #define d8 x13 S2N_BN_SYMBOL(bignum_sub_p521): CFI_START // First just subtract the numbers as [d8;d7;d6;d5;d4;d3;d2;d1;d0] = x - y ldp d0, d1, [x] ldp l, h, [y] subs d0, d0, l sbcs d1, d1, h ldp d2, d3, [x, #16] ldp l, h, [y, #16] sbcs d2, d2, l sbcs d3, d3, h ldp d4, d5, [x, #32] ldp l, h, [y, #32] sbcs d4, d4, l sbcs d5, d5, h ldp d6, d7, [x, #48] ldp l, h, [y, #48] sbcs d6, d6, l sbcs d7, d7, h ldr d8, [x, #64] ldr l, [y, #64] sbcs d8, d8, l // Now if x < y we want (x - y) + p_521 == (x - y) - 1 (mod 2^521) // Otherwise we just want the existing x - y result. So subtract // 1 iff the initial subtraction carried, then mask to 521 bits. sbcs d0, d0, xzr sbcs d1, d1, xzr sbcs d2, d2, xzr sbcs d3, d3, xzr sbcs d4, d4, xzr sbcs d5, d5, xzr sbcs d6, d6, xzr sbcs d7, d7, xzr sbcs d8, d8, xzr and d8, d8, #0x1FF // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] stp d6, d7, [z, #48] str d8, [z, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sub_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
33,860
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_sqr_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square modulo p_521, z := (x^2) mod p_521, assuming x reduced // Input x[9]; output z[9] // // extern void bignum_sqr_p521(uint64_t z[static 9], const uint64_t x[static 9]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" // bignum_sqr_p521 is functionally equivalent to unopt/bignum_sqr_p521_base. // It is written in a way that // 1. A subset of scalar multiplications in bignum_montmul_p384 are carefully // chosen and vectorized // 2. The vectorized assembly is rescheduled using the SLOTHY superoptimizer. // https://github.com/slothy-optimizer/slothy // // The output program of step 1. is as follows: // // stp x19, x20, [sp, #-16]! // stp x21, x22, [sp, #-16]! // stp x23, x24, [sp, #-16]! // ldp x20, x19, [x1] // ldr q23, [x1] // ldr q1, [x1] // ldr q16, [x1] // ldp x14, x12, [x1, #16] // ldr q28, [x1, #16] // ldr q31, [x1, #16] // ldp x9, x2, [x1, #32] // ldr q29, [x1, #32] // ldr q4, [x1, #32] // ldr q5, [x1] // ldr q2, [x1, #32] // ldp x6, x13, [x1, #48] // ldr q24, [x1, #48] // ldr q27, [x1, #48] // ldr q0, [x1, #16] // ldr q30, [x1, #48] // mul x17, x9, x6 // mul x10, x2, x13 // umulh x24, x9, x6 // subs x4, x9, x2 // cneg x4, x4, cc // csetm x16, cc // subs x3, x13, x6 // cneg x23, x3, cc // mul x3, x4, x23 // umulh x4, x4, x23 // cinv x22, x16, cc // eor x23, x3, x22 // eor x16, x4, x22 // adds x3, x17, x24 // adc x24, x24, xzr // umulh x4, x2, x13 // adds x3, x3, x10 // adcs x24, x24, x4 // adc x4, x4, xzr // adds x24, x24, x10 // adc x10, x4, xzr // cmn x22, #0x1 // adcs x4, x3, x23 // adcs x24, x24, x16 // adc x10, x10, x22 // adds x8, x17, x17 // adcs x22, x4, x4 // adcs x5, x24, x24 // adcs x11, x10, x10 // adc x23, xzr, xzr // movi v25.2D, #0xffffffff // uzp2 v19.4S, v4.4S, v4.4S // xtn v26.2S, v29.2D // xtn v22.2S, v4.2D // rev64 v4.4S, v4.4S // umull v7.2D, v26.2S, v22.2S // umull v21.2D, v26.2S, v19.2S // uzp2 v17.4S, v29.4S, v29.4S // mul v4.4S, v4.4S, v29.4S // usra v21.2D, v7.2D, #32 // umull v18.2D, v17.2S, v19.2S // uaddlp v4.2D, v4.4S // and v7.16B, v21.16B, v25.16B // umlal v7.2D, v17.2S, v22.2S // shl v4.2D, v4.2D, #32 // usra v18.2D, v21.2D, #32 // umlal v4.2D, v26.2S, v22.2S // usra v18.2D, v7.2D, #32 // mov x15, v4.d[0] // mov x16, v4.d[1] // mul x3, x9, x2 // mov x10, v18.d[0] // mov x17, v18.d[1] // umulh x4, x9, x2 // adds x24, x10, x3 // adcs x10, x16, x4 // adc x17, x17, xzr // adds x7, x24, x3 // adcs x10, x10, x4 // adc x17, x17, xzr // adds x8, x8, x10 // adcs x22, x22, x17 // adcs x21, x5, xzr // adcs x5, x11, xzr // adc x11, x23, xzr // movi v25.2D, #0xffffffff // uzp2 v19.4S, v27.4S, v27.4S // xtn v26.2S, v24.2D // xtn v22.2S, v27.2D // rev64 v4.4S, v27.4S // umull v7.2D, v26.2S, v22.2S // umull v21.2D, v26.2S, v19.2S // uzp2 v17.4S, v24.4S, v24.4S // mul v4.4S, v4.4S, v24.4S // usra v21.2D, v7.2D, #32 // umull v18.2D, v17.2S, v19.2S // uaddlp v4.2D, v4.4S // and v7.16B, v21.16B, v25.16B // umlal v7.2D, v17.2S, v22.2S // shl v4.2D, v4.2D, #32 // usra v18.2D, v21.2D, #32 // umlal v4.2D, v26.2S, v22.2S // usra v18.2D, v7.2D, #32 // mov x23, v4.d[0] // mov x16, v4.d[1] // mul x3, x6, x13 // mov x10, v18.d[0] // mov x17, v18.d[1] // umulh x4, x6, x13 // adds x24, x10, x3 // adcs x10, x16, x4 // adc x17, x17, xzr // adds x24, x24, x3 // adcs x10, x10, x4 // adc x17, x17, xzr // adds x23, x23, x21 // adcs x16, x24, x5 // adcs x3, x10, x11 // adc x21, x17, xzr // ldr x17, [x1, #64] // add x5, x17, x17 // mul x11, x17, x17 // and x17, x20, #0xfffffffffffff // mul x4, x5, x17 // extr x17, x19, x20, #52 // and x17, x17, #0xfffffffffffff // mul x10, x5, x17 // lsr x17, x4, #52 // add x24, x10, x17 // lsl x17, x4, #12 // extr x17, x24, x17, #12 // adds x15, x15, x17 // extr x17, x14, x19, #40 // and x17, x17, #0xfffffffffffff // mul x10, x5, x17 // lsr x17, x24, #52 // add x4, x10, x17 // lsl x17, x24, #12 // extr x17, x4, x17, #24 // adcs x7, x7, x17 // extr x17, x12, x14, #28 // and x17, x17, #0xfffffffffffff // mul x10, x5, x17 // lsr x17, x4, #52 // add x24, x10, x17 // lsl x17, x4, #12 // extr x17, x24, x17, #36 // adcs x8, x8, x17 // extr x17, x9, x12, #16 // and x17, x17, #0xfffffffffffff // mul x10, x5, x17 // lsr x17, x24, #52 // add x4, x10, x17 // lsl x17, x24, #12 // extr x17, x4, x17, #48 // adcs x22, x22, x17 // lsr x17, x9, #4 // and x17, x17, #0xfffffffffffff // mul x10, x5, x17 // lsr x17, x4, #52 // add x24, x10, x17 // lsl x17, x4, #12 // extr x4, x24, x17, #60 // extr x17, x2, x9, #56 // and x17, x17, #0xfffffffffffff // mul x10, x5, x17 // lsr x17, x24, #52 // add x24, x10, x17 // lsl x17, x4, #8 // extr x17, x24, x17, #8 // adcs x23, x23, x17 // extr x17, x6, x2, #44 // and x17, x17, #0xfffffffffffff // mul x10, x5, x17 // lsr x17, x24, #52 // add x4, x10, x17 // lsl x17, x24, #12 // extr x17, x4, x17, #20 // adcs x16, x16, x17 // extr x17, x13, x6, #32 // and x17, x17, #0xfffffffffffff // mul x10, x5, x17 // lsr x17, x4, #52 // add x24, x10, x17 // lsl x17, x4, #12 // extr x17, x24, x17, #32 // adcs x3, x3, x17 // lsr x17, x13, #20 // mul x10, x5, x17 // lsr x17, x24, #52 // add x10, x10, x17 // lsl x17, x24, #12 // extr x17, x10, x17, #44 // adcs x4, x21, x17 // lsr x17, x10, #44 // adc x24, x11, x17 // extr x10, x7, x15, #9 // extr x17, x8, x7, #9 // stp x10, x17, [x0] // @slothy:writes=buffer0 // extr x10, x22, x8, #9 // extr x17, x23, x22, #9 // stp x10, x17, [x0, #16] // @slothy:writes=buffer16 // extr x10, x16, x23, #9 // extr x17, x3, x16, #9 // stp x10, x17, [x0, #32] // @slothy:writes=buffer32 // extr x10, x4, x3, #9 // extr x17, x24, x4, #9 // stp x10, x17, [x0, #48] // @slothy:writes=buffer48 // and x10, x15, #0x1ff // lsr x17, x24, #9 // add x17, x10, x17 // str x17, [x0, #64] // @slothy:writes=buffer64 // uzp1 v17.4S, v28.4S, v23.4S // rev64 v4.4S, v28.4S // uzp1 v7.4S, v23.4S, v23.4S // mul v4.4S, v4.4S, v23.4S // uaddlp v4.2D, v4.4S // shl v4.2D, v4.2D, #32 // umlal v4.2D, v7.2S, v17.2S // mov x8, v4.d[0] // mov x22, v4.d[1] // umulh x23, x20, x14 // subs x17, x20, x19 // cneg x4, x17, cc // csetm x24, cc // subs x17, x12, x14 // cneg x17, x17, cc // mul x10, x4, x17 // umulh x17, x4, x17 // cinv x16, x24, cc // eor x3, x10, x16 // eor x4, x17, x16 // adds x24, x8, x23 // adc x10, x23, xzr // umulh x17, x19, x12 // adds x24, x24, x22 // adcs x10, x10, x17 // adc x17, x17, xzr // adds x10, x10, x22 // adc x17, x17, xzr // cmn x16, #0x1 // adcs x24, x24, x3 // adcs x10, x10, x4 // adc x17, x17, x16 // adds x15, x8, x8 // adcs x7, x24, x24 // adcs x8, x10, x10 // adcs x22, x17, x17 // adc x23, xzr, xzr // movi v25.2D, #0xffffffff // uzp2 v19.4S, v16.4S, v16.4S // xtn v26.2S, v1.2D // xtn v22.2S, v16.2D // rev64 v4.4S, v16.4S // umull v7.2D, v26.2S, v22.2S // umull v21.2D, v26.2S, v19.2S // uzp2 v17.4S, v1.4S, v1.4S // mul v4.4S, v4.4S, v1.4S // usra v21.2D, v7.2D, #32 // umull v18.2D, v17.2S, v19.2S // uaddlp v4.2D, v4.4S // and v7.16B, v21.16B, v25.16B // umlal v7.2D, v17.2S, v22.2S // shl v4.2D, v4.2D, #32 // usra v18.2D, v21.2D, #32 // umlal v4.2D, v26.2S, v22.2S // usra v18.2D, v7.2D, #32 // mov x21, v4.d[0] // mov x16, v4.d[1] // mul x3, x20, x19 // mov x10, v18.d[0] // mov x17, v18.d[1] // umulh x4, x20, x19 // adds x24, x10, x3 // adcs x10, x16, x4 // adc x17, x17, xzr // adds x5, x24, x3 // adcs x10, x10, x4 // adc x17, x17, xzr // adds x11, x15, x10 // adcs x15, x7, x17 // adcs x7, x8, xzr // adcs x8, x22, xzr // adc x22, x23, xzr // xtn v7.2S, v31.2D // shrn v4.2S, v31.2D, #32 // umull v4.2D, v7.2S, v4.2S // shl v4.2D, v4.2D, #33 // umlal v4.2D, v7.2S, v7.2S // mov x23, v4.d[0] // mov x16, v4.d[1] // mul x3, x14, x12 // umulh x10, x14, x14 // umulh x17, x12, x12 // umulh x4, x14, x12 // adds x24, x10, x3 // adcs x10, x16, x4 // adc x17, x17, xzr // adds x24, x24, x3 // adcs x10, x10, x4 // adc x17, x17, xzr // adds x16, x23, x7 // adcs x3, x24, x8 // adcs x4, x10, x22 // adc x24, x17, xzr // ldp x10, x17, [x0] // @slothy:reads=buffer0 // adds x10, x10, x21 // adcs x17, x17, x5 // stp x10, x17, [x0] // @slothy:writes=buffer0 // ldp x10, x17, [x0, #16] // @slothy:reads=buffer16 // adcs x10, x10, x11 // adcs x17, x17, x15 // stp x10, x17, [x0, #16] // @slothy:writes=buffer16 // ldp x10, x17, [x0, #32] // @slothy:reads=buffer32 // adcs x10, x10, x16 // adcs x17, x17, x3 // stp x10, x17, [x0, #32] // @slothy:writes=buffer32 // ldp x10, x17, [x0, #48] // @slothy:reads=buffer48 // adcs x10, x10, x4 // adcs x17, x17, x24 // stp x10, x17, [x0, #48] // @slothy:writes=buffer48 // ldr x17, [x0, #64] // @slothy:reads=buffer64 // adc x17, x17, xzr // str x17, [x0, #64] // @slothy:writes=buffer64 // movi v25.2D, #0xffffffff // uzp2 v19.4S, v2.4S, v2.4S // xtn v26.2S, v5.2D // xtn v22.2S, v2.2D // rev64 v4.4S, v2.4S // umull v7.2D, v26.2S, v22.2S // umull v21.2D, v26.2S, v19.2S // uzp2 v17.4S, v5.4S, v5.4S // mul v4.4S, v4.4S, v5.4S // usra v21.2D, v7.2D, #32 // umull v18.2D, v17.2S, v19.2S // uaddlp v4.2D, v4.4S // and v7.16B, v21.16B, v25.16B // umlal v7.2D, v17.2S, v22.2S // shl v4.2D, v4.2D, #32 // usra v18.2D, v21.2D, #32 // umlal v4.2D, v26.2S, v22.2S // usra v18.2D, v7.2D, #32 // mov x5, v4.d[0] // mov x4, v4.d[1] // movi v25.2D, #0xffffffff // uzp2 v17.4S, v30.4S, v30.4S // xtn v19.2S, v0.2D // xtn v26.2S, v30.2D // rev64 v4.4S, v30.4S // umull v7.2D, v19.2S, v26.2S // umull v22.2D, v19.2S, v17.2S // uzp2 v21.4S, v0.4S, v0.4S // mul v4.4S, v4.4S, v0.4S // usra v22.2D, v7.2D, #32 // umull v17.2D, v21.2S, v17.2S // uaddlp v4.2D, v4.4S // and v7.16B, v22.16B, v25.16B // umlal v7.2D, v21.2S, v26.2S // shl v4.2D, v4.2D, #32 // usra v17.2D, v22.2D, #32 // umlal v4.2D, v19.2S, v26.2S // usra v17.2D, v7.2D, #32 // mov x24, v4.d[0] // mov x10, v4.d[1] // mov x17, v18.d[0] // adds x4, x4, x17 // mov x17, v18.d[1] // adcs x24, x24, x17 // mov x17, v17.d[0] // adcs x10, x10, x17 // mov x17, v17.d[1] // adc x17, x17, xzr // adds x15, x4, x5 // adcs x4, x24, x4 // adcs x24, x10, x24 // adcs x10, x17, x10 // adc x17, xzr, x17 // adds x7, x4, x5 // adcs x8, x24, x15 // adcs x22, x10, x4 // adcs x23, x17, x24 // adcs x16, xzr, x10 // adc x3, xzr, x17 // subs x17, x14, x12 // cneg x24, x17, cc // csetm x4, cc // subs x17, x13, x6 // cneg x10, x17, cc // mul x17, x24, x10 // umulh x24, x24, x10 // cinv x10, x4, cc // cmn x10, #0x1 // eor x17, x17, x10 // adcs x23, x23, x17 // eor x17, x24, x10 // adcs x16, x16, x17 // adc x3, x3, x10 // subs x17, x20, x19 // cneg x24, x17, cc // csetm x4, cc // subs x17, x2, x9 // cneg x10, x17, cc // mul x17, x24, x10 // umulh x24, x24, x10 // cinv x10, x4, cc // cmn x10, #0x1 // eor x17, x17, x10 // adcs x11, x15, x17 // eor x17, x24, x10 // adcs x15, x7, x17 // adcs x7, x8, x10 // adcs x22, x22, x10 // adcs x23, x23, x10 // adcs x16, x16, x10 // adc x3, x3, x10 // subs x17, x19, x12 // cneg x24, x17, cc // csetm x4, cc // subs x17, x13, x2 // cneg x10, x17, cc // mul x17, x24, x10 // umulh x24, x24, x10 // cinv x10, x4, cc // cmn x10, #0x1 // eor x17, x17, x10 // adcs x8, x22, x17 // eor x17, x24, x10 // adcs x23, x23, x17 // adcs x16, x16, x10 // adc x3, x3, x10 // subs x17, x20, x14 // cneg x24, x17, cc // csetm x4, cc // subs x17, x6, x9 // cneg x10, x17, cc // mul x17, x24, x10 // umulh x24, x24, x10 // cinv x10, x4, cc // cmn x10, #0x1 // eor x17, x17, x10 // adcs x22, x15, x17 // eor x17, x24, x10 // adcs x4, x7, x17 // adcs x24, x8, x10 // adcs x23, x23, x10 // adcs x16, x16, x10 // adc x3, x3, x10 // subs x12, x20, x12 // cneg x10, x12, cc // csetm x17, cc // subs x12, x13, x9 // cneg x9, x12, cc // mul x12, x10, x9 // umulh x13, x10, x9 // cinv x9, x17, cc // cmn x9, #0x1 // eor x12, x12, x9 // adcs x4, x4, x12 // eor x12, x13, x9 // adcs x24, x24, x12 // adcs x10, x23, x9 // adcs x17, x16, x9 // adc x13, x3, x9 // subs x19, x19, x14 // cneg x12, x19, cc // csetm x9, cc // subs x6, x6, x2 // cneg x14, x6, cc // mul x19, x12, x14 // umulh x12, x12, x14 // cinv x14, x9, cc // cmn x14, #0x1 // eor x19, x19, x14 // adcs x23, x4, x19 // eor x19, x12, x14 // adcs x16, x24, x19 // adcs x6, x10, x14 // adcs x2, x17, x14 // adc x9, x13, x14 // ldp x12, x14, [x0] // @slothy:reads=buffer0 // extr x19, x6, x16, #8 // adds x10, x19, x12 // extr x19, x2, x6, #8 // adcs x17, x19, x14 // ldp x14, x12, [x0, #16] // @slothy:reads=buffer16 // extr x19, x9, x2, #8 // adcs x13, x19, x14 // and x14, x17, x13 // lsr x19, x9, #8 // adcs x6, x19, x12 // and x9, x14, x6 // ldp x14, x12, [x0, #32] // @slothy:reads=buffer32 // lsl x19, x5, #1 // adcs x2, x19, x14 // and x14, x9, x2 // extr x19, x11, x5, #63 // adcs x3, x19, x12 // and x9, x14, x3 // ldp x14, x12, [x0, #48] // @slothy:reads=buffer48 // extr x19, x22, x11, #63 // adcs x4, x19, x14 // and x14, x9, x4 // extr x19, x23, x22, #63 // adcs x24, x19, x12 // and x12, x14, x24 // ldr x14, [x0, #64] // @slothy:reads=buffer64 // extr x19, x16, x23, #63 // and x19, x19, #0x1ff // adc x19, x14, x19 // lsr x14, x19, #9 // orr x19, x19, #0xfffffffffffffe00 // cmp xzr, xzr // adcs xzr, x10, x14 // adcs xzr, x12, xzr // adcs xzr, x19, xzr // adcs x10, x10, x14 // adcs x17, x17, xzr // adcs x13, x13, xzr // adcs x6, x6, xzr // adcs x2, x2, xzr // adcs x9, x3, xzr // adcs x12, x4, xzr // adcs x14, x24, xzr // adc x19, x19, xzr // and x19, x19, #0x1ff // stp x10, x17, [x0] // @slothy:writes=buffer0 // stp x13, x6, [x0, #16] // @slothy:writes=buffer16 // stp x2, x9, [x0, #32] // @slothy:writes=buffer32 // stp x12, x14, [x0, #48] // @slothy:writes=buffer48 // str x19, [x0, #64] // @slothy:writes=buffer64 // ldp x23, x24, [sp], #16 // ldp x21, x22, [sp], #16 // ldp x19, x20, [sp], #16 // ret // // The bash script used for step 2 is as follows: // // # Store the assembly instructions except the last 'ret', // # callee-register store/loads as, say, 'input.S'. // export OUTPUTS="[hint_buffer0,hint_buffer16,hint_buffer32,hint_buffer48,hint_buffer64]" // export RESERVED_REGS="[x18,x25,x26,x27,x28,x29,x30,sp,q8,q9,q10,q11,q12,q13,q14,q15,v8,v9,v10,v11,v12,v13,v14,v15]" // <s2n-bignum>/tools/external/slothy.sh input.S my_out_dir // # my_out_dir/3.opt.s is the optimized assembly. Its output may differ // # from this file since the sequence is non-deterministically chosen. // # Please add 'ret' at the end of the output assembly. S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_sqr_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_p521) .text .balign 4 S2N_BN_SYMBOL(bignum_sqr_p521): CFI_START // Save registers CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) ldr q23, [x1, #32] ldp x9, x2, [x1, #32] ldr q16, [x1, #32] ldr q20, [x1, #48] ldp x6, x13, [x1, #48] rev64 v2.4S, v23.4S mul x14, x9, x2 ldr q31, [x1, #48] subs x22, x9, x2 uzp2 v26.4S, v23.4S, v23.4S mul v30.4S, v2.4S, v16.4S xtn v0.2S, v20.2D csetm x12, cc xtn v21.2S, v16.2D xtn v23.2S, v23.2D umulh x10, x9, x6 rev64 v27.4S, v31.4S umull v2.2D, v21.2S, v26.2S cneg x23, x22, cc uaddlp v25.2D, v30.4S umull v18.2D, v21.2S, v23.2S mul x22, x9, x6 mul v6.4S, v27.4S, v20.4S uzp2 v17.4S, v20.4S, v20.4S shl v20.2D, v25.2D, #32 uzp2 v27.4S, v31.4S, v31.4S mul x16, x2, x13 umlal v20.2D, v21.2S, v23.2S usra v2.2D, v18.2D, #32 adds x8, x22, x10 umull v25.2D, v17.2S, v27.2S xtn v31.2S, v31.2D movi v1.2D, #0xffffffff adc x3, x10, xzr umulh x21, x2, x13 uzp2 v21.4S, v16.4S, v16.4S umull v18.2D, v0.2S, v27.2S subs x19, x13, x6 and v7.16B, v2.16B, v1.16B umull v27.2D, v0.2S, v31.2S cneg x20, x19, cc movi v30.2D, #0xffffffff umull v16.2D, v21.2S, v26.2S umlal v7.2D, v21.2S, v23.2S mul x19, x23, x20 cinv x7, x12, cc uaddlp v6.2D, v6.4S eor x12, x19, x7 adds x11, x8, x16 umulh x10, x23, x20 ldr q1, [x1] usra v16.2D, v2.2D, #32 adcs x19, x3, x21 shl v2.2D, v6.2D, #32 adc x20, x21, xzr adds x17, x19, x16 usra v18.2D, v27.2D, #32 adc x19, x20, xzr cmn x7, #0x1 umlal v2.2D, v0.2S, v31.2S umulh x16, x9, x2 adcs x8, x11, x12 usra v16.2D, v7.2D, #32 ldr x12, [x1, #64] eor x20, x10, x7 umulh x10, x6, x13 mov x23, v2.d[0] mov x3, v2.d[1] adcs x21, x17, x20 usra v25.2D, v18.2D, #32 and v23.16B, v18.16B, v30.16B adc x7, x19, x7 adds x22, x22, x22 ldr q7, [x1, #16] adcs x17, x8, x8 umlal v23.2D, v17.2S, v31.2S mov x19, v16.d[0] mul x11, x12, x12 ldr q4, [x1] usra v25.2D, v23.2D, #32 add x5, x12, x12 adcs x15, x21, x21 ldr q28, [x1] mov x12, v20.d[1] adcs x24, x7, x7 mov x21, v16.d[1] adc x4, xzr, xzr adds x19, x19, x14 ldr q18, [x1, #16] xtn v26.2S, v1.2D adcs x8, x12, x16 adc x21, x21, xzr adds x7, x19, x14 xtn v23.2S, v7.2D rev64 v21.4S, v28.4S adcs x12, x8, x16 ldp x20, x19, [x1] mov x16, v25.d[1] xtn v22.2S, v28.2D adc x14, x21, xzr adds x8, x22, x12 uzp2 v24.4S, v28.4S, v28.4S rev64 v28.4S, v18.4S mul x12, x6, x13 mul v16.4S, v21.4S, v1.4S shrn v31.2S, v7.2D, #32 adcs x22, x17, x14 mov x14, v25.d[0] and x21, x20, #0xfffffffffffff umull v17.2D, v26.2S, v24.2S ldr q2, [x1, #32] adcs x17, x15, xzr ldr q30, [x1, #48] umull v7.2D, v26.2S, v22.2S adcs x15, x24, xzr ldr q0, [x1, #16] movi v6.2D, #0xffffffff adc x4, x4, xzr adds x14, x14, x12 uzp1 v27.4S, v18.4S, v4.4S uzp2 v19.4S, v1.4S, v1.4S adcs x24, x3, x10 mul x3, x5, x21 umull v29.2D, v23.2S, v31.2S ldr q5, [x1] adc x21, x16, xzr adds x16, x14, x12 extr x12, x19, x20, #52 umull v18.2D, v19.2S, v24.2S adcs x24, x24, x10 and x10, x12, #0xfffffffffffff ldp x14, x12, [x1, #16] usra v17.2D, v7.2D, #32 adc x21, x21, xzr adds x23, x23, x17 mul x17, x5, x10 shl v21.2D, v29.2D, #33 lsl x10, x3, #12 lsr x1, x3, #52 rev64 v29.4S, v2.4S uaddlp v25.2D, v16.4S add x17, x17, x1 adcs x16, x16, x15 extr x3, x14, x19, #40 mov x15, v20.d[0] extr x10, x17, x10, #12 and x3, x3, #0xfffffffffffff shl v3.2D, v25.2D, #32 and v6.16B, v17.16B, v6.16B mul x1, x5, x3 usra v18.2D, v17.2D, #32 adcs x3, x24, x4 extr x4, x12, x14, #28 umlal v6.2D, v19.2S, v22.2S xtn v20.2S, v2.2D umlal v3.2D, v26.2S, v22.2S movi v26.2D, #0xffffffff lsr x24, x17, #52 and x4, x4, #0xfffffffffffff uzp2 v19.4S, v2.4S, v2.4S add x1, x1, x24 mul x24, x5, x4 lsl x4, x17, #12 xtn v24.2S, v5.2D extr x17, x1, x4, #24 adc x21, x21, xzr umlal v21.2D, v23.2S, v23.2S adds x4, x15, x10 lsl x10, x1, #12 adcs x15, x7, x17 mul v23.4S, v28.4S, v4.4S and x7, x4, #0x1ff lsr x17, x1, #52 umulh x1, x19, x12 uzp2 v17.4S, v5.4S, v5.4S extr x4, x15, x4, #9 add x24, x24, x17 mul v29.4S, v29.4S, v5.4S extr x17, x24, x10, #36 extr x10, x9, x12, #16 uzp1 v28.4S, v4.4S, v4.4S adcs x17, x8, x17 and x8, x10, #0xfffffffffffff umull v16.2D, v24.2S, v20.2S extr x10, x17, x15, #9 mul x15, x5, x8 stp x4, x10, [x0] lsl x4, x24, #12 lsr x8, x9, #4 uaddlp v4.2D, v23.4S and x8, x8, #0xfffffffffffff umull v23.2D, v24.2S, v19.2S mul x8, x5, x8 extr x10, x2, x9, #56 lsr x24, x24, #52 and x10, x10, #0xfffffffffffff add x15, x15, x24 extr x4, x15, x4, #48 mul x24, x5, x10 lsr x10, x15, #52 usra v23.2D, v16.2D, #32 add x10, x8, x10 shl v4.2D, v4.2D, #32 adcs x22, x22, x4 extr x4, x6, x2, #44 lsl x15, x15, #12 lsr x8, x10, #52 extr x15, x10, x15, #60 and x10, x4, #0xfffffffffffff umlal v4.2D, v28.2S, v27.2S add x8, x24, x8 extr x4, x13, x6, #32 mul x24, x5, x10 uzp2 v16.4S, v30.4S, v30.4S lsl x10, x15, #8 rev64 v28.4S, v30.4S and x15, x4, #0xfffffffffffff extr x4, x8, x10, #8 mul x10, x5, x15 lsl x15, x8, #12 adcs x23, x23, x4 lsr x4, x8, #52 lsr x8, x13, #20 add x4, x24, x4 mul x8, x5, x8 lsr x24, x4, #52 extr x15, x4, x15, #20 lsl x4, x4, #12 add x10, x10, x24 adcs x15, x16, x15 extr x4, x10, x4, #32 umulh x5, x20, x14 adcs x3, x3, x4 usra v18.2D, v6.2D, #32 lsl x16, x10, #12 extr x24, x15, x23, #9 lsr x10, x10, #52 uzp2 v27.4S, v0.4S, v0.4S add x8, x8, x10 extr x10, x3, x15, #9 extr x4, x22, x17, #9 and v25.16B, v23.16B, v26.16B lsr x17, x8, #44 extr x15, x8, x16, #44 extr x16, x23, x22, #9 xtn v7.2S, v30.2D mov x8, v4.d[0] stp x24, x10, [x0, #32] uaddlp v30.2D, v29.4S stp x4, x16, [x0, #16] umulh x24, x20, x19 adcs x15, x21, x15 adc x16, x11, x17 subs x11, x20, x19 xtn v5.2S, v0.2D csetm x17, cc extr x3, x15, x3, #9 mov x22, v4.d[1] cneg x21, x11, cc subs x10, x12, x14 mul v31.4S, v28.4S, v0.4S cneg x10, x10, cc cinv x11, x17, cc shl v4.2D, v30.2D, #32 umull v28.2D, v5.2S, v16.2S extr x23, x16, x15, #9 adds x4, x8, x5 mul x17, x21, x10 umull v22.2D, v5.2S, v7.2S adc x15, x5, xzr adds x4, x4, x22 uaddlp v2.2D, v31.4S lsr x5, x16, #9 adcs x16, x15, x1 mov x15, v18.d[0] adc x1, x1, xzr umulh x10, x21, x10 adds x22, x16, x22 umlal v4.2D, v24.2S, v20.2S umull v30.2D, v27.2S, v16.2S stp x3, x23, [x0, #48] add x3, x7, x5 adc x16, x1, xzr usra v28.2D, v22.2D, #32 mul x23, x20, x19 eor x1, x17, x11 cmn x11, #0x1 mov x17, v18.d[1] umull v18.2D, v17.2S, v19.2S adcs x7, x4, x1 eor x1, x10, x11 umlal v25.2D, v17.2S, v20.2S movi v16.2D, #0xffffffff adcs x22, x22, x1 usra v18.2D, v23.2D, #32 umulh x4, x14, x14 adc x1, x16, x11 adds x10, x8, x8 shl v23.2D, v2.2D, #32 str x3, [x0, #64] adcs x5, x7, x7 and v16.16B, v28.16B, v16.16B usra v30.2D, v28.2D, #32 adcs x7, x22, x22 mov x21, v3.d[1] adcs x11, x1, x1 umlal v16.2D, v27.2S, v7.2S adc x22, xzr, xzr adds x16, x15, x23 mul x8, x14, x12 umlal v23.2D, v5.2S, v7.2S usra v18.2D, v25.2D, #32 umulh x15, x14, x12 adcs x21, x21, x24 usra v30.2D, v16.2D, #32 adc x1, x17, xzr adds x3, x16, x23 adcs x21, x21, x24 adc x1, x1, xzr adds x24, x10, x21 umulh x21, x12, x12 adcs x16, x5, x1 adcs x10, x7, xzr mov x17, v21.d[1] adcs x23, x11, xzr adc x5, x22, xzr adds x1, x4, x8 adcs x22, x17, x15 ldp x17, x4, [x0] mov x11, v21.d[0] adc x21, x21, xzr adds x1, x1, x8 adcs x15, x22, x15 adc x8, x21, xzr adds x22, x11, x10 mov x21, v3.d[0] adcs x11, x1, x23 ldp x1, x10, [x0, #16] adcs x15, x15, x5 adc x7, x8, xzr adds x8, x17, x21 mov x23, v4.d[1] ldp x5, x21, [x0, #32] adcs x17, x4, x3 ldr x4, [x0, #64] mov x3, v18.d[0] adcs x24, x1, x24 stp x8, x17, [x0] adcs x17, x10, x16 ldp x1, x16, [x0, #48] adcs x5, x5, x22 adcs x8, x21, x11 stp x5, x8, [x0, #32] adcs x1, x1, x15 mov x15, v23.d[1] adcs x21, x16, x7 stp x1, x21, [x0, #48] adc x10, x4, xzr subs x7, x14, x12 mov x16, v18.d[1] cneg x5, x7, cc csetm x4, cc subs x11, x13, x6 mov x8, v23.d[0] cneg x7, x11, cc cinv x21, x4, cc mov x11, v30.d[0] adds x4, x23, x3 mul x22, x5, x7 mov x23, v30.d[1] adcs x8, x8, x16 adcs x16, x15, x11 adc x11, x23, xzr umulh x3, x5, x7 stp x24, x17, [x0, #16] mov x5, v4.d[0] subs x15, x20, x19 cneg x7, x15, cc str x10, [x0, #64] csetm x1, cc subs x24, x2, x9 cneg x17, x24, cc cinv x15, x1, cc adds x23, x4, x5 umulh x1, x7, x17 adcs x24, x8, x4 adcs x10, x16, x8 eor x8, x22, x21 adcs x16, x11, x16 mul x22, x7, x17 eor x17, x1, x15 adc x1, xzr, x11 adds x11, x24, x5 eor x7, x3, x21 adcs x3, x10, x23 adcs x24, x16, x24 adcs x4, x1, x10 eor x10, x22, x15 adcs x16, xzr, x16 adc x1, xzr, x1 cmn x21, #0x1 adcs x8, x4, x8 adcs x22, x16, x7 adc x7, x1, x21 subs x21, x19, x12 csetm x4, cc cneg x1, x21, cc subs x21, x13, x2 cinv x16, x4, cc cneg x4, x21, cc cmn x15, #0x1 adcs x21, x23, x10 mul x23, x1, x4 adcs x11, x11, x17 adcs x3, x3, x15 umulh x1, x1, x4 adcs x24, x24, x15 adcs x8, x8, x15 adcs x22, x22, x15 eor x17, x23, x16 adc x15, x7, x15 subs x7, x20, x14 cneg x7, x7, cc csetm x4, cc subs x10, x20, x12 cneg x23, x10, cc csetm x10, cc subs x12, x6, x9 cinv x20, x4, cc cneg x12, x12, cc cmn x16, #0x1 eor x1, x1, x16 adcs x17, x24, x17 mul x4, x7, x12 adcs x8, x8, x1 umulh x1, x7, x12 adcs x24, x22, x16 adc x7, x15, x16 subs x12, x13, x9 cneg x12, x12, cc cinv x13, x10, cc subs x19, x19, x14 mul x9, x23, x12 cneg x19, x19, cc csetm x10, cc eor x16, x1, x20 subs x22, x6, x2 umulh x12, x23, x12 eor x1, x4, x20 cinv x4, x10, cc cneg x22, x22, cc cmn x20, #0x1 adcs x15, x11, x1 eor x6, x12, x13 adcs x10, x3, x16 adcs x17, x17, x20 eor x23, x9, x13 adcs x2, x8, x20 mul x11, x19, x22 adcs x24, x24, x20 adc x7, x7, x20 cmn x13, #0x1 adcs x3, x10, x23 umulh x22, x19, x22 adcs x17, x17, x6 eor x12, x22, x4 extr x22, x15, x21, #63 adcs x8, x2, x13 extr x21, x21, x5, #63 ldp x16, x23, [x0] adcs x20, x24, x13 eor x1, x11, x4 adc x6, x7, x13 cmn x4, #0x1 ldp x2, x7, [x0, #16] adcs x1, x3, x1 extr x19, x1, x15, #63 adcs x14, x17, x12 extr x1, x14, x1, #63 lsl x17, x5, #1 adcs x8, x8, x4 extr x12, x8, x14, #8 ldp x15, x11, [x0, #32] adcs x9, x20, x4 adc x3, x6, x4 adds x16, x12, x16 extr x6, x9, x8, #8 ldp x14, x12, [x0, #48] extr x8, x3, x9, #8 adcs x20, x6, x23 ldr x24, [x0, #64] lsr x6, x3, #8 adcs x8, x8, x2 and x2, x1, #0x1ff and x1, x20, x8 adcs x4, x6, x7 adcs x3, x17, x15 and x1, x1, x4 adcs x9, x21, x11 and x1, x1, x3 adcs x6, x22, x14 and x1, x1, x9 and x21, x1, x6 adcs x14, x19, x12 adc x1, x24, x2 cmp xzr, xzr orr x12, x1, #0xfffffffffffffe00 lsr x1, x1, #9 adcs xzr, x16, x1 and x21, x21, x14 adcs xzr, x21, xzr adcs xzr, x12, xzr adcs x21, x16, x1 adcs x1, x20, xzr adcs x19, x8, xzr stp x21, x1, [x0] adcs x1, x4, xzr adcs x21, x3, xzr stp x19, x1, [x0, #16] adcs x1, x9, xzr stp x21, x1, [x0, #32] adcs x21, x6, xzr adcs x1, x14, xzr stp x21, x1, [x0, #48] adc x1, x12, xzr and x1, x1, #0x1ff str x1, [x0, #64] // Restore regs and return CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_sqr_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
2,711
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_add_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Add modulo p_521, z := (x + y) mod p_521, assuming x and y reduced // Inputs x[9], y[9]; output z[9] // // extern void bignum_add_p521(uint64_t z[static 9], const uint64_t x[static 9], // const uint64_t y[static 9]); // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_add_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_add_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_add_p521) .text .balign 4 #define z x0 #define x x1 #define y x2 #define h x3 #define l x4 #define d0 x5 #define d1 x6 #define d2 x7 #define d3 x8 #define d4 x9 #define d5 x10 #define d6 x11 #define d7 x12 #define d8 x13 S2N_BN_SYMBOL(bignum_add_p521): CFI_START // Force carry-in to get s = [d8;d7;d6;d5;d4;d3;d2;d1;d0] = x + y + 1. // We ignore the carry-out, assuming inputs are reduced so there is none. subs xzr, xzr, xzr ldp d0, d1, [x] ldp l, h, [y] adcs d0, d0, l adcs d1, d1, h ldp d2, d3, [x, #16] ldp l, h, [y, #16] adcs d2, d2, l adcs d3, d3, h ldp d4, d5, [x, #32] ldp l, h, [y, #32] adcs d4, d4, l adcs d5, d5, h ldp d6, d7, [x, #48] ldp l, h, [y, #48] adcs d6, d6, l adcs d7, d7, h ldr d8, [x, #64] ldr l, [y, #64] adc d8, d8, l // Now x + y >= p_521 <=> s = x + y + 1 >= 2^521 // Set CF <=> s = x + y + 1 >= 2^521 and make it a mask in l as well subs l, d8, #512 csetm l, cs // Now if CF is set (and l is all 1s), we want (x + y) - p_521 = s - 2^521 // while otherwise we want x + y = s - 1 (from existing CF, which is nice) sbcs d0, d0, xzr and l, l, #512 sbcs d1, d1, xzr sbcs d2, d2, xzr sbcs d3, d3, xzr sbcs d4, d4, xzr sbcs d5, d5, xzr sbcs d6, d6, xzr sbcs d7, d7, xzr sbc d8, d8, l // Store the result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] stp d6, d7, [z, #48] str d8, [z, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_add_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
9,749
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_montsqr_p521_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^576) mod p_521 // Input x[9]; output z[9] // // extern void bignum_montsqr_p521_alt(uint64_t z[static 9], // const uint64_t x[static 9]); // // Does z := (x^2 / 2^576) mod p_521, assuming x < p_521. This means the // Montgomery base is the "native size" 2^{9*64} = 2^576; since p_521 is // a Mersenne prime the basic modular squaring bignum_sqr_p521 can be // considered a Montgomery operation to base 2^521. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p521_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montsqr_p521_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p521_alt) .text .balign 4 #define z x0 #define x x1 #define a0 x2 #define a1 x3 #define a2 x4 #define a3 x5 #define a4 x6 #define a5 x7 #define a6 x8 #define a7 x9 #define a8 x1 // Overwrites input argument at last load #define l x10 #define u0 x11 #define u1 x12 #define u2 x13 #define u3 x14 #define u4 x15 #define u5 x16 #define u6 x17 #define u7 x19 #define u8 x20 #define u9 x21 #define u10 x22 #define u11 x23 #define u12 x24 #define u13 x25 #define u14 x26 #define u15 x27 #define u16 x29 S2N_BN_SYMBOL(bignum_montsqr_p521_alt): CFI_START // It's convenient to have more registers to play with CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_PUSH2(x27,x29) // Load low 8 elements as [a7;a6;a5;a4;a3;a2;a1;a0], set up an initial // window [u8;u7;u6;u5;u4;u3;u2;u1] = 10 + 20 + 30 + 40 + 50 + 60 + 70 ldp a0, a1, [x] mul u1, a0, a1 umulh u2, a0, a1 ldp a2, a3, [x, #16] mul l, a0, a2 umulh u3, a0, a2 adds u2, u2, l ldp a4, a5, [x, #32] mul l, a0, a3 umulh u4, a0, a3 adcs u3, u3, l ldp a6, a7, [x, #48] mul l, a0, a4 umulh u5, a0, a4 adcs u4, u4, l mul l, a0, a5 umulh u6, a0, a5 adcs u5, u5, l mul l, a0, a6 umulh u7, a0, a6 adcs u6, u6, l mul l, a0, a7 umulh u8, a0, a7 adcs u7, u7, l adc u8, u8, xzr // Add in the next diagonal = 21 + 31 + 41 + 51 + 61 + 71 + 54 mul l, a1, a2 adds u3, u3, l mul l, a1, a3 adcs u4, u4, l mul l, a1, a4 adcs u5, u5, l mul l, a1, a5 adcs u6, u6, l mul l, a1, a6 adcs u7, u7, l mul l, a1, a7 adcs u8, u8, l cset u9, cs umulh l, a1, a2 adds u4, u4, l umulh l, a1, a3 adcs u5, u5, l umulh l, a1, a4 adcs u6, u6, l umulh l, a1, a5 adcs u7, u7, l umulh l, a1, a6 adcs u8, u8, l umulh l, a1, a7 adc u9, u9, l mul l, a4, a5 umulh u10, a4, a5 adds u9, u9, l adc u10, u10, xzr // And the next one = 32 + 42 + 52 + 62 + 72 + 64 + 65 mul l, a2, a3 adds u5, u5, l mul l, a2, a4 adcs u6, u6, l mul l, a2, a5 adcs u7, u7, l mul l, a2, a6 adcs u8, u8, l mul l, a2, a7 adcs u9, u9, l mul l, a4, a6 adcs u10, u10, l cset u11, cs umulh l, a2, a3 adds u6, u6, l umulh l, a2, a4 adcs u7, u7, l umulh l, a2, a5 adcs u8, u8, l umulh l, a2, a6 adcs u9, u9, l umulh l, a2, a7 adcs u10, u10, l umulh l, a4, a6 adc u11, u11, l mul l, a5, a6 umulh u12, a5, a6 adds u11, u11, l adc u12, u12, xzr // And the final one = 43 + 53 + 63 + 73 + 74 + 75 + 76 mul l, a3, a4 adds u7, u7, l mul l, a3, a5 adcs u8, u8, l mul l, a3, a6 adcs u9, u9, l mul l, a3, a7 adcs u10, u10, l mul l, a4, a7 adcs u11, u11, l mul l, a5, a7 adcs u12, u12, l cset u13, cs umulh l, a3, a4 adds u8, u8, l umulh l, a3, a5 adcs u9, u9, l umulh l, a3, a6 adcs u10, u10, l umulh l, a3, a7 adcs u11, u11, l umulh l, a4, a7 adcs u12, u12, l umulh l, a5, a7 adc u13, u13, l mul l, a6, a7 umulh u14, a6, a7 adds u13, u13, l adc u14, u14, xzr // Double that, with u15 holding the top carry adds u1, u1, u1 adcs u2, u2, u2 adcs u3, u3, u3 adcs u4, u4, u4 adcs u5, u5, u5 adcs u6, u6, u6 adcs u7, u7, u7 adcs u8, u8, u8 adcs u9, u9, u9 adcs u10, u10, u10 adcs u11, u11, u11 adcs u12, u12, u12 adcs u13, u13, u13 adcs u14, u14, u14 cset u15, cs // Add the homogeneous terms 00 + 11 + 22 + 33 + 44 + 55 + 66 + 77 umulh l, a0, a0 mul u0, a0, a0 adds u1, u1, l mul l, a1, a1 adcs u2, u2, l umulh l, a1, a1 adcs u3, u3, l mul l, a2, a2 adcs u4, u4, l umulh l, a2, a2 adcs u5, u5, l mul l, a3, a3 adcs u6, u6, l umulh l, a3, a3 adcs u7, u7, l mul l, a4, a4 adcs u8, u8, l umulh l, a4, a4 adcs u9, u9, l mul l, a5, a5 adcs u10, u10, l umulh l, a5, a5 adcs u11, u11, l mul l, a6, a6 adcs u12, u12, l umulh l, a6, a6 adcs u13, u13, l mul l, a7, a7 adcs u14, u14, l umulh l, a7, a7 adc u15, u15, l // Now load in the top digit a8, and also set up its double and square ldr a8, [x, #64] mul u16, a8, a8 add a8, a8, a8 // Add a8 * [a7;...;a0] into the top of the buffer mul l, a8, a0 adds u8, u8, l mul l, a8, a1 adcs u9, u9, l mul l, a8, a2 adcs u10, u10, l mul l, a8, a3 adcs u11, u11, l mul l, a8, a4 adcs u12, u12, l mul l, a8, a5 adcs u13, u13, l mul l, a8, a6 adcs u14, u14, l mul l, a8, a7 adcs u15, u15, l adc u16, u16, xzr umulh l, a8, a0 adds u9, u9, l umulh l, a8, a1 adcs u10, u10, l umulh l, a8, a2 adcs u11, u11, l umulh l, a8, a3 adcs u12, u12, l umulh l, a8, a4 adcs u13, u13, l umulh l, a8, a5 adcs u14, u14, l umulh l, a8, a6 adcs u15, u15, l umulh l, a8, a7 adc u16, u16, l // Now we have the full product, which we consider as // 2^521 * h + l. Form h + l + 1 subs xzr, xzr, xzr extr l, u9, u8, #9 adcs u0, u0, l extr l, u10, u9, #9 adcs u1, u1, l extr l, u11, u10, #9 adcs u2, u2, l extr l, u12, u11, #9 adcs u3, u3, l extr l, u13, u12, #9 adcs u4, u4, l extr l, u14, u13, #9 adcs u5, u5, l extr l, u15, u14, #9 adcs u6, u6, l extr l, u16, u15, #9 adcs u7, u7, l orr u8, u8, #~0x1FF lsr l, u16, #9 adcs u8, u8, l // Now CF is set if h + l + 1 >= 2^521, which means it's already // the answer, while if ~CF the answer is h + l so we should subtract // 1 (all considered in 521 bits). Hence subtract ~CF and mask. sbcs u0, u0, xzr sbcs u1, u1, xzr sbcs u2, u2, xzr sbcs u3, u3, xzr sbcs u4, u4, xzr sbcs u5, u5, xzr sbcs u6, u6, xzr sbcs u7, u7, xzr sbc u8, u8, xzr and u8, u8, #0x1FF // So far, this has been the same as a pure modular squaring // Now finally the Montgomery ingredient, which is just a 521-bit // rotation by 9*64 - 521 = 55 bits right. lsl l, u0, #9 extr u0, u1, u0, #55 extr u1, u2, u1, #55 extr u2, u3, u2, #55 extr u3, u4, u3, #55 orr u8, u8, l extr u4, u5, u4, #55 extr u5, u6, u5, #55 extr u6, u7, u6, #55 extr u7, u8, u7, #55 lsr u8, u8, #55 // Store back digits of final result stp u0, u1, [z] stp u2, u3, [z, #16] stp u4, u5, [z, #32] stp u6, u7, [z, #48] str u8, [z, #64] // Restore registers and return CFI_POP2(x27,x29) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montsqr_p521_alt) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
65,489
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/p521_jscalarmul_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Jacobian form scalar multiplication for P-521 // Input scalar[9], point[27]; output res[27] // // extern void p521_jscalarmul_alt // (uint64_t res[static 27], // const uint64_t scalar[static 9], // const uint64_t point[static 27]); // // This function is a variant of its affine point version p521_scalarmul. // Here, input and output points are assumed to be in Jacobian form with // a triple (x,y,z) representing the affine point (x/z^2,y/z^3) when // z is nonzero or the point at infinity (group identity) if z = 0. // // Given scalar = n and point = P, assumed to be on the NIST elliptic // curve P-521, returns a representation of n * P. If the result is the // point at infinity (either because the input point was or because the // scalar was a multiple of p_521) then the output is guaranteed to // represent the point at infinity, i.e. to have its z coordinate zero. // // Standard ARM ABI: X0 = res, X1 = scalar, X2 = point // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jscalarmul_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p521_jscalarmul_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jscalarmul_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 72 #define JACSIZE (3*NUMSIZE) // Safe copies of input res and additional values in variables. #define tabup x15 #define bf x16 #define sgn x17 #define j x19 #define res x20 // Intermediate variables on the stack. // The table is 16 entries, each of size JACSIZE = 3 * NUMSIZE #define scalarb sp, #(0*NUMSIZE) #define acc sp, #(1*NUMSIZE) #define tabent sp, #(4*NUMSIZE) #define tab sp, #(7*NUMSIZE) // Round up to maintain stack alignment #define NSPACE 3968 #define selectblock(I) \ cmp bf, #(1*I) __LF \ ldp x10, x11, [tabup] __LF \ csel x0, x10, x0, eq __LF \ csel x1, x11, x1, eq __LF \ ldp x10, x11, [tabup, #16] __LF \ csel x2, x10, x2, eq __LF \ csel x3, x11, x3, eq __LF \ ldp x10, x11, [tabup, #32] __LF \ csel x4, x10, x4, eq __LF \ csel x5, x11, x5, eq __LF \ ldp x10, x11, [tabup, #48] __LF \ csel x6, x10, x6, eq __LF \ csel x7, x11, x7, eq __LF \ ldr x10, [tabup, #64] __LF \ csel x8, x10, x8, eq __LF \ add tabup, tabup, #JACSIZE // Loading large constants #define movbig(nn,n3,n2,n1,n0) \ movz nn, n0 __LF \ movk nn, n1, lsl #16 __LF \ movk nn, n2, lsl #32 __LF \ movk nn, n3, lsl #48 S2N_BN_SYMBOL(p521_jscalarmul_alt): CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x30) CFI_DEC_SP(NSPACE) // Preserve the "res" input argument; others get processed early. mov res, x0 // Reduce the input scalar mod n_521 and store it to "scalarb". mov x19, x2 add x0, scalarb CFI_BL(Lp521_jscalarmul_alt_bignum_mod_n521_9) mov x2, x19 // Set the tab[0] table entry to the input point = 1 * P, but also // reduce all coordinates modulo p. In principle we assume reduction // as a precondition, but this reduces the scope for surprise, e.g. // making sure that any input with z = 0 is treated as zero, even // if the other coordinates are not in fact reduced. add x0, tab mov x1, x19 CFI_BL(Lp521_jscalarmul_alt_bignum_mod_p521_9) add x0, tab+NUMSIZE add x1, x19, #NUMSIZE CFI_BL(Lp521_jscalarmul_alt_bignum_mod_p521_9) add x0, tab+2*NUMSIZE add x1, x19, #(2*NUMSIZE) CFI_BL(Lp521_jscalarmul_alt_bignum_mod_p521_9) // If bit 520 of the scalar is set, then negate the scalar mod n_521, // i.e. do scalar |-> n_521 - scalar, and also the point to compensate // by negating its y coordinate. This further step is not needed by // the indexing scheme (the top window is only a couple of bits either // way), but is convenient to exclude a problem with the specific value // scalar = n_521 - 18, where the last Jacobian addition is of the form // (n_521 - 9) * P + -(9 * P) and hence is a degenerate doubling case. ldp x0, x1, [scalarb] movbig(x10, #0xbb6f, #0xb71e, #0x9138, #0x6409) subs x10, x10, x0 movbig(x11, #0x3bb5, #0xc9b8, #0x899c, #0x47ae) sbcs x11, x11, x1 ldp x2, x3, [scalarb+16] movbig(x12, #0x7fcc, #0x0148, #0xf709, #0xa5d0) sbcs x12, x12, x2 movbig(x13, #0x5186, #0x8783, #0xbf2f, #0x966b) sbcs x13, x13, x3 ldp x4, x5, [scalarb+32] mov x14, 0xfffffffffffffffa sbcs x14, x14, x4 mov x15, 0xffffffffffffffff sbcs x15, x15, x5 ldp x6, x7, [scalarb+48] mov x16, 0xffffffffffffffff sbcs x16, x16, x6 mov x17, 0xffffffffffffffff sbcs x17, x17, x7 ldr x8, [scalarb+64] mov x19, 0x00000000000001ff sbc x19, x19, x8 tst x8, 0x100 csetm x9, ne csel x0, x10, x0, ne csel x1, x11, x1, ne csel x2, x12, x2, ne csel x3, x13, x3, ne csel x4, x14, x4, ne csel x5, x15, x5, ne csel x6, x16, x6, ne csel x7, x17, x7, ne csel x8, x19, x8, ne stp x0, x1, [scalarb] stp x2, x3, [scalarb+16] stp x4, x5, [scalarb+32] stp x6, x7, [scalarb+48] str x8, [scalarb+64] add tabup, tab ldp x0, x1, [tabup, #NUMSIZE] ldp x2, x3, [tabup, #NUMSIZE+16] ldp x4, x5, [tabup, #NUMSIZE+32] ldp x6, x7, [tabup, #NUMSIZE+48] ldr x8, [tabup, #NUMSIZE+64] orr x10, x0, x1 orr x11, x2, x3 orr x12, x4, x5 orr x13, x6, x7 orr x10, x10, x11 orr x12, x12, x13 orr x12, x12, x8 orr x10, x10, x12 cmp x10, xzr csel x9, x9, xzr, ne eor x0, x0, x9 eor x1, x1, x9 eor x2, x2, x9 eor x3, x3, x9 eor x4, x4, x9 eor x5, x5, x9 eor x6, x6, x9 eor x7, x7, x9 and x9, x9, #0x1FF eor x8, x8, x9 stp x0, x1, [tabup, #NUMSIZE] stp x2, x3, [tabup, #NUMSIZE+16] stp x4, x5, [tabup, #NUMSIZE+32] stp x6, x7, [tabup, #NUMSIZE+48] str x8, [tabup, #NUMSIZE+64] // Compute and record tab[1] = 2 * p, ..., tab[15] = 16 * P add x0, tab+JACSIZE*1 add x1, tab CFI_BL(Lp521_jscalarmul_alt_jdouble) add x0, tab+JACSIZE*2 add x1, tab+JACSIZE*1 add x2, tab CFI_BL(Lp521_jscalarmul_alt_jadd) add x0, tab+JACSIZE*3 add x1, tab+JACSIZE*1 CFI_BL(Lp521_jscalarmul_alt_jdouble) add x0, tab+JACSIZE*4 add x1, tab+JACSIZE*3 add x2, tab CFI_BL(Lp521_jscalarmul_alt_jadd) add x0, tab+JACSIZE*5 add x1, tab+JACSIZE*2 CFI_BL(Lp521_jscalarmul_alt_jdouble) add x0, tab+JACSIZE*6 add x1, tab+JACSIZE*5 add x2, tab CFI_BL(Lp521_jscalarmul_alt_jadd) add x0, tab+JACSIZE*7 add x1, tab+JACSIZE*3 CFI_BL(Lp521_jscalarmul_alt_jdouble) add x0, tab+JACSIZE*8 add x1, tab+JACSIZE*7 add x2, tab CFI_BL(Lp521_jscalarmul_alt_jadd) add x0, tab+JACSIZE*9 add x1, tab+JACSIZE*4 CFI_BL(Lp521_jscalarmul_alt_jdouble) add x0, tab+JACSIZE*10 add x1, tab+JACSIZE*9 add x2, tab CFI_BL(Lp521_jscalarmul_alt_jadd) add x0, tab+JACSIZE*11 add x1, tab+JACSIZE*5 CFI_BL(Lp521_jscalarmul_alt_jdouble) add x0, tab+JACSIZE*12 add x1, tab+JACSIZE*11 add x2, tab CFI_BL(Lp521_jscalarmul_alt_jadd) add x0, tab+JACSIZE*13 add x1, tab+JACSIZE*6 CFI_BL(Lp521_jscalarmul_alt_jdouble) add x0, tab+JACSIZE*14 add x1, tab+JACSIZE*13 add x2, tab CFI_BL(Lp521_jscalarmul_alt_jadd) add x0, tab+JACSIZE*15 add x1, tab+JACSIZE*7 CFI_BL(Lp521_jscalarmul_alt_jdouble) // Add the recoding constant sum_i(16 * 32^i) to the scalar to allow signed // digits. The digits of the constant, in lowest-to-highest order, are as // follows; they are generated dynamically since none is a simple ARM load. // // 0x0842108421084210 // 0x1084210842108421 // 0x2108421084210842 // 0x4210842108421084 // 0x8421084210842108 // 0x0842108421084210 // 0x1084210842108421 // 0x2108421084210842 // 0x0000000000000084 ldp x0, x1, [scalarb] ldp x2, x3, [scalarb+16] ldp x4, x5, [scalarb+32] ldp x6, x7, [scalarb+48] ldr x8, [scalarb+64] movbig(x10, #0x1084, #0x2108, #0x4210, #0x8421) adds x0, x0, x10, lsr #1 adcs x1, x1, x10 lsl x10, x10, #1 adcs x2, x2, x10 lsl x10, x10, #1 adcs x3, x3, x10 lsl x10, x10, #1 adcs x4, x4, x10 lsr x11, x10, #4 adcs x5, x5, x11 lsr x10, x10, #3 adcs x6, x6, x10 lsl x10, x10, #1 adcs x7, x7, x10 lsl x10, x10, #1 and x10, x10, #0xFF adc x8, x8, x10 // Because of the initial reduction the top bitfield (>= bits 520) is <= 1, // i.e. just a single bit. Record that in "bf", then shift the whole // scalar left 56 bits to align the top of the next bitfield with the MSB // (bits 571..575). lsr bf, x8, #8 extr x8, x8, x7, #8 extr x7, x7, x6, #8 extr x6, x6, x5, #8 extr x5, x5, x4, #8 extr x4, x4, x3, #8 extr x3, x3, x2, #8 extr x2, x2, x1, #8 extr x1, x1, x0, #8 lsl x0, x0, #56 stp x0, x1, [scalarb] stp x2, x3, [scalarb+16] stp x4, x5, [scalarb+32] stp x6, x7, [scalarb+48] str x8, [scalarb+64] // According to the top bit, initialize the accumulator to P or 0. This top // digit, uniquely, is not recoded so there is no sign adjustment to make. // We only really need to adjust the z coordinate to zero, but do all three. add tabup, tab cmp bf, xzr ldp x0, x1, [tabup] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc] ldp x0, x1, [tabup, #16] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+16] ldp x0, x1, [tabup, #32] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+32] ldp x0, x1, [tabup, #48] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+48] ldp x0, x1, [tabup, #64] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+64] ldp x0, x1, [tabup, #80] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+80] ldp x0, x1, [tabup, #96] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+96] ldp x0, x1, [tabup, #112] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+112] ldp x0, x1, [tabup, #128] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+128] ldp x0, x1, [tabup, #144] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+144] ldp x0, x1, [tabup, #160] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+160] ldp x0, x1, [tabup, #176] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+176] ldp x0, x1, [tabup, #192] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+192] ldr x0, [tabup, #208] csel x0, x0, xzr, ne str x0, [acc+208] // Main loop over size-5 bitfields: double 5 times then add signed digit // At each stage we shift the scalar left by 5 bits so we can simply pick // the top 5 bits as the bitfield, saving some fiddle over indexing. mov j, #520 Lp521_jscalarmul_alt_mainloop: sub j, j, #5 add x0, acc add x1, acc CFI_BL(Lp521_jscalarmul_alt_jdouble) add x0, acc add x1, acc CFI_BL(Lp521_jscalarmul_alt_jdouble) add x0, acc add x1, acc CFI_BL(Lp521_jscalarmul_alt_jdouble) add x0, acc add x1, acc CFI_BL(Lp521_jscalarmul_alt_jdouble) add x0, acc add x1, acc CFI_BL(Lp521_jscalarmul_alt_jdouble) // Choose the bitfield and adjust it to sign and magnitude ldp x0, x1, [scalarb] ldp x2, x3, [scalarb+16] ldp x4, x5, [scalarb+32] ldp x6, x7, [scalarb+48] ldr x8, [scalarb+64] lsr bf, x8, #59 extr x8, x8, x7, #59 extr x7, x7, x6, #59 extr x6, x6, x5, #59 extr x5, x5, x4, #59 extr x4, x4, x3, #59 extr x3, x3, x2, #59 extr x2, x2, x1, #59 extr x1, x1, x0, #59 lsl x0, x0, #5 stp x0, x1, [scalarb] stp x2, x3, [scalarb+16] stp x4, x5, [scalarb+32] stp x6, x7, [scalarb+48] str x8, [scalarb+64] subs bf, bf, #16 csetm sgn, lo // sgn = sign of digit (1 = negative) cneg bf, bf, lo // bf = absolute value of digit // Conditionally select the table entry tab[i-1] = i * P in constant time mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr mov x4, xzr mov x5, xzr mov x6, xzr mov x7, xzr mov x8, xzr add tabup, tab selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) selectblock(9) selectblock(10) selectblock(11) selectblock(12) selectblock(13) selectblock(14) selectblock(15) selectblock(16) stp x0, x1, [tabent] stp x2, x3, [tabent+16] stp x4, x5, [tabent+32] stp x6, x7, [tabent+48] str x8, [tabent+64] mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr mov x4, xzr mov x5, xzr mov x6, xzr mov x7, xzr mov x8, xzr add tabup, tab+2*NUMSIZE selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) selectblock(9) selectblock(10) selectblock(11) selectblock(12) selectblock(13) selectblock(14) selectblock(15) selectblock(16) stp x0, x1, [tabent+2*NUMSIZE] stp x2, x3, [tabent+2*NUMSIZE+16] stp x4, x5, [tabent+2*NUMSIZE+32] stp x6, x7, [tabent+2*NUMSIZE+48] str x8, [tabent+2*NUMSIZE+64] mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr mov x4, xzr mov x5, xzr mov x6, xzr mov x7, xzr mov x8, xzr add tabup, tab+NUMSIZE selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) selectblock(9) selectblock(10) selectblock(11) selectblock(12) selectblock(13) selectblock(14) selectblock(15) selectblock(16) // Store it to "tabent" with the y coordinate optionally negated. // This is done carefully to give coordinates < p_521 even in // the degenerate case y = 0 (when z = 0 for points on the curve). orr x10, x0, x1 orr x11, x2, x3 orr x12, x4, x5 orr x13, x6, x7 orr x10, x10, x11 orr x12, x12, x13 orr x12, x12, x8 orr x10, x10, x12 cmp x10, xzr csel sgn, sgn, xzr, ne eor x0, x0, sgn eor x1, x1, sgn eor x2, x2, sgn eor x3, x3, sgn eor x4, x4, sgn eor x5, x5, sgn eor x6, x6, sgn eor x7, x7, sgn and sgn, sgn, #0x1FF eor x8, x8, sgn stp x0, x1, [tabent+NUMSIZE] stp x2, x3, [tabent+NUMSIZE+16] stp x4, x5, [tabent+NUMSIZE+32] stp x6, x7, [tabent+NUMSIZE+48] str x8, [tabent+NUMSIZE+64] // Add to the accumulator add x0, acc add x1, acc add x2, tabent CFI_BL(Lp521_jscalarmul_alt_jadd) cbnz j, Lp521_jscalarmul_alt_mainloop // That's the end of the main loop, and we just need to copy the // result in "acc" to the output. ldp x0, x1, [acc] stp x0, x1, [res] ldp x0, x1, [acc+16] stp x0, x1, [res, #16] ldp x0, x1, [acc+32] stp x0, x1, [res, #32] ldp x0, x1, [acc+48] stp x0, x1, [res, #48] ldp x0, x1, [acc+64] stp x0, x1, [res, #64] ldp x0, x1, [acc+80] stp x0, x1, [res, #80] ldp x0, x1, [acc+96] stp x0, x1, [res, #96] ldp x0, x1, [acc+112] stp x0, x1, [res, #112] ldp x0, x1, [acc+128] stp x0, x1, [res, #128] ldp x0, x1, [acc+144] stp x0, x1, [res, #144] ldp x0, x1, [acc+160] stp x0, x1, [res, #160] ldp x0, x1, [acc+176] stp x0, x1, [res, #176] ldp x0, x1, [acc+192] stp x0, x1, [res, #192] ldr x0, [acc+208] str x0, [res, #208] // Restore stack and registers and return CFI_INC_SP(NSPACE) CFI_POP2(x21,x30) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p521_jscalarmul_alt) // Local copies of subroutines, complete clones at the moment except // that we share multiplication and squaring between the point operations. S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jscalarmul_alt_bignum_mod_p521_9) Lp521_jscalarmul_alt_bignum_mod_p521_9: CFI_START ldr x12, [x1, #64] lsr x2, x12, #9 cmp xzr, xzr ldp x4, x5, [x1] adcs xzr, x4, x2 adcs xzr, x5, xzr ldp x6, x7, [x1, #16] and x3, x6, x7 adcs xzr, x3, xzr ldp x8, x9, [x1, #32] and x3, x8, x9 adcs xzr, x3, xzr ldp x10, x11, [x1, #48] and x3, x10, x11 adcs xzr, x3, xzr orr x3, x12, #0xfffffffffffffe00 adcs x3, x3, xzr adcs x4, x4, x2 adcs x5, x5, xzr adcs x6, x6, xzr adcs x7, x7, xzr adcs x8, x8, xzr adcs x9, x9, xzr adcs x10, x10, xzr adcs x11, x11, xzr adc x12, x12, xzr and x12, x12, #0x1ff stp x4, x5, [x0] stp x6, x7, [x0, #16] stp x8, x9, [x0, #32] stp x10, x11, [x0, #48] str x12, [x0, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jscalarmul_alt_bignum_mod_p521_9) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jscalarmul_alt_bignum_mod_n521_9) Lp521_jscalarmul_alt_bignum_mod_n521_9: CFI_START ldr x14, [x1, #64] lsr x15, x14, #9 add x15, x15, #1 mov x2, #39927 movk x2, #28359, lsl #16 movk x2, #18657, lsl #32 movk x2, #17552, lsl #48 mul x6, x2, x15 mov x3, #47185 movk x3, #30307, lsl #16 movk x3, #13895, lsl #32 movk x3, #50250, lsl #48 mul x7, x3, x15 mov x4, #23087 movk x4, #2294, lsl #16 movk x4, #65207, lsl #32 movk x4, #32819, lsl #48 mul x8, x4, x15 mov x5, #27028 movk x5, #16592, lsl #16 movk x5, #30844, lsl #32 movk x5, #44665, lsl #48 mul x9, x5, x15 lsl x10, x15, #2 add x10, x10, x15 umulh x13, x2, x15 adds x7, x7, x13 umulh x13, x3, x15 adcs x8, x8, x13 umulh x13, x4, x15 adcs x9, x9, x13 umulh x13, x5, x15 adc x10, x10, x13 ldp x12, x13, [x1] adds x6, x6, x12 adcs x7, x7, x13 ldp x12, x13, [x1, #16] adcs x8, x8, x12 adcs x9, x9, x13 ldp x13, x11, [x1, #32] adcs x10, x10, x13 adcs x11, x11, xzr ldp x12, x13, [x1, #48] adcs x12, x12, xzr adcs x13, x13, xzr orr x14, x14, #0xfffffffffffffe00 adcs x14, x14, xzr csetm x15, lo and x2, x2, x15 subs x6, x6, x2 and x3, x3, x15 sbcs x7, x7, x3 and x4, x4, x15 sbcs x8, x8, x4 and x5, x5, x15 sbcs x9, x9, x5 mov x2, #5 and x2, x2, x15 sbcs x10, x10, x2 sbcs x11, x11, xzr sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr and x14, x14, #0x1ff stp x6, x7, [x0] stp x8, x9, [x0, #16] stp x10, x11, [x0, #32] stp x12, x13, [x0, #48] str x14, [x0, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jscalarmul_alt_bignum_mod_n521_9) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jscalarmul_alt_jadd) Lp521_jscalarmul_alt_jadd: CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_PUSH2(x27,x28) CFI_PUSH2(x29,x30) CFI_DEC_SP(576) mov x27, x0 mov x28, x1 mov x29, x2 mov x0, sp add x1, x28, #0x90 CFI_BL(Lp521_jscalarmul_alt_sqr_p521) add x0, sp, #0x168 add x1, x29, #0x90 CFI_BL(Lp521_jscalarmul_alt_sqr_p521) add x0, sp, #0x1f8 add x1, x29, #0x90 add x2, x28, #0x48 CFI_BL(Lp521_jscalarmul_alt_mul_p521) add x0, sp, #0x48 add x1, x28, #0x90 add x2, x29, #0x48 CFI_BL(Lp521_jscalarmul_alt_mul_p521) add x0, sp, #0x90 mov x1, sp add x2, x29, #0x0 CFI_BL(Lp521_jscalarmul_alt_mul_p521) add x0, sp, #0x120 add x1, sp, #0x168 add x2, x28, #0x0 CFI_BL(Lp521_jscalarmul_alt_mul_p521) add x0, sp, #0x48 mov x1, sp add x2, sp, #0x48 CFI_BL(Lp521_jscalarmul_alt_mul_p521) add x0, sp, #0x1f8 add x1, sp, #0x168 add x2, sp, #0x1f8 CFI_BL(Lp521_jscalarmul_alt_mul_p521) add x0, sp, #0x168 add x1, sp, #0x90 add x2, sp, #0x120 CFI_BL(Lp521_jscalarmul_alt_sub_p521) add x0, sp, #0x48 add x1, sp, #0x48 add x2, sp, #0x1f8 CFI_BL(Lp521_jscalarmul_alt_sub_p521) add x0, sp, #0xd8 add x1, sp, #0x168 CFI_BL(Lp521_jscalarmul_alt_sqr_p521) mov x0, sp add x1, sp, #0x48 CFI_BL(Lp521_jscalarmul_alt_sqr_p521) add x0, sp, #0x120 add x1, sp, #0xd8 add x2, sp, #0x120 CFI_BL(Lp521_jscalarmul_alt_mul_p521) add x0, sp, #0x90 add x1, sp, #0xd8 add x2, sp, #0x90 CFI_BL(Lp521_jscalarmul_alt_mul_p521) mov x0, sp mov x1, sp add x2, sp, #0x120 CFI_BL(Lp521_jscalarmul_alt_sub_p521) add x0, sp, #0xd8 add x1, sp, #0x90 add x2, sp, #0x120 CFI_BL(Lp521_jscalarmul_alt_sub_p521) add x0, sp, #0x168 add x1, sp, #0x168 add x2, x28, #0x90 CFI_BL(Lp521_jscalarmul_alt_mul_p521) mov x0, sp mov x1, sp add x2, sp, #0x90 CFI_BL(Lp521_jscalarmul_alt_sub_p521) add x0, sp, #0x120 add x1, sp, #0x120 mov x2, sp CFI_BL(Lp521_jscalarmul_alt_sub_p521) add x0, sp, #0xd8 add x1, sp, #0xd8 add x2, sp, #0x1f8 CFI_BL(Lp521_jscalarmul_alt_mul_p521) add x0, sp, #0x168 add x1, sp, #0x168 add x2, x29, #0x90 CFI_BL(Lp521_jscalarmul_alt_mul_p521) add x0, sp, #0x120 add x1, sp, #0x48 add x2, sp, #0x120 CFI_BL(Lp521_jscalarmul_alt_mul_p521) add x0, sp, #0x120 add x1, sp, #0x120 add x2, sp, #0xd8 CFI_BL(Lp521_jscalarmul_alt_sub_p521) ldp x0, x1, [x28, #144] ldp x2, x3, [x28, #160] ldp x4, x5, [x28, #176] ldp x6, x7, [x28, #192] ldr x8, [x28, #208] orr x20, x0, x1 orr x21, x2, x3 orr x22, x4, x5 orr x23, x6, x7 orr x20, x20, x21 orr x22, x22, x23 orr x20, x20, x8 orr x20, x20, x22 cmp x20, xzr cset x20, ne ldp x10, x11, [x29, #144] ldp x12, x13, [x29, #160] ldp x14, x15, [x29, #176] ldp x16, x17, [x29, #192] ldr x19, [x29, #208] orr x21, x10, x11 orr x22, x12, x13 orr x23, x14, x15 orr x24, x16, x17 orr x21, x21, x22 orr x23, x23, x24 orr x21, x21, x19 orr x21, x21, x23 csel x0, x0, x10, ne csel x1, x1, x11, ne csel x2, x2, x12, ne csel x3, x3, x13, ne csel x4, x4, x14, ne csel x5, x5, x15, ne csel x6, x6, x16, ne csel x7, x7, x17, ne csel x8, x8, x19, ne cmp x21, xzr cset x21, ne cmp x21, x20 ldp x10, x11, [sp, #360] ldp x12, x13, [sp, #376] ldp x14, x15, [sp, #392] ldp x16, x17, [sp, #408] ldr x19, [sp, #424] csel x0, x0, x10, ne csel x1, x1, x11, ne csel x2, x2, x12, ne csel x3, x3, x13, ne csel x4, x4, x14, ne csel x5, x5, x15, ne csel x6, x6, x16, ne csel x7, x7, x17, ne csel x8, x8, x19, ne stp x0, x1, [sp, #360] stp x2, x3, [sp, #376] stp x4, x5, [sp, #392] stp x6, x7, [sp, #408] str x8, [sp, #424] ldp x20, x21, [x28] ldp x0, x1, [sp] csel x0, x20, x0, cc csel x1, x21, x1, cc ldp x20, x21, [x29] csel x0, x20, x0, hi csel x1, x21, x1, hi ldp x20, x21, [x28, #16] ldp x2, x3, [sp, #16] csel x2, x20, x2, cc csel x3, x21, x3, cc ldp x20, x21, [x29, #16] csel x2, x20, x2, hi csel x3, x21, x3, hi ldp x20, x21, [x28, #32] ldp x4, x5, [sp, #32] csel x4, x20, x4, cc csel x5, x21, x5, cc ldp x20, x21, [x29, #32] csel x4, x20, x4, hi csel x5, x21, x5, hi ldp x20, x21, [x28, #48] ldp x6, x7, [sp, #48] csel x6, x20, x6, cc csel x7, x21, x7, cc ldp x20, x21, [x29, #48] csel x6, x20, x6, hi csel x7, x21, x7, hi ldr x20, [x28, #64] ldr x8, [sp, #64] csel x8, x20, x8, cc ldr x21, [x29, #64] csel x8, x21, x8, hi ldp x20, x21, [x28, #72] ldp x10, x11, [sp, #288] csel x10, x20, x10, cc csel x11, x21, x11, cc ldp x20, x21, [x29, #72] csel x10, x20, x10, hi csel x11, x21, x11, hi ldp x20, x21, [x28, #88] ldp x12, x13, [sp, #304] csel x12, x20, x12, cc csel x13, x21, x13, cc ldp x20, x21, [x29, #88] csel x12, x20, x12, hi csel x13, x21, x13, hi ldp x20, x21, [x28, #104] ldp x14, x15, [sp, #320] csel x14, x20, x14, cc csel x15, x21, x15, cc ldp x20, x21, [x29, #104] csel x14, x20, x14, hi csel x15, x21, x15, hi ldp x20, x21, [x28, #120] ldp x16, x17, [sp, #336] csel x16, x20, x16, cc csel x17, x21, x17, cc ldp x20, x21, [x29, #120] csel x16, x20, x16, hi csel x17, x21, x17, hi ldr x20, [x28, #136] ldr x19, [sp, #352] csel x19, x20, x19, cc ldr x21, [x29, #136] csel x19, x21, x19, hi stp x0, x1, [x27] stp x2, x3, [x27, #16] stp x4, x5, [x27, #32] stp x6, x7, [x27, #48] str x8, [x27, #64] ldp x0, x1, [sp, #360] ldp x2, x3, [sp, #376] ldp x4, x5, [sp, #392] ldp x6, x7, [sp, #408] ldr x8, [sp, #424] stp x10, x11, [x27, #72] stp x12, x13, [x27, #88] stp x14, x15, [x27, #104] stp x16, x17, [x27, #120] str x19, [x27, #136] stp x0, x1, [x27, #144] stp x2, x3, [x27, #160] stp x4, x5, [x27, #176] stp x6, x7, [x27, #192] str x8, [x27, #208] CFI_INC_SP(576) CFI_POP2(x29,x30) CFI_POP2(x27,x28) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jscalarmul_alt_jadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jscalarmul_alt_jdouble) Lp521_jscalarmul_alt_jdouble: CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_PUSH2(x27,x28) CFI_PUSH2(x29,x30) CFI_DEC_SP(512) mov x27, x0 mov x28, x1 mov x0, sp add x1, x28, #0x90 CFI_BL(Lp521_jscalarmul_alt_sqr_p521) add x0, sp, #0x48 add x1, x28, #0x48 CFI_BL(Lp521_jscalarmul_alt_sqr_p521) ldp x5, x6, [x28] ldp x4, x3, [sp] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x28, #16] ldp x4, x3, [sp, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x28, #32] ldp x4, x3, [sp, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 ldp x11, x12, [x28, #48] ldp x4, x3, [sp, #48] sbcs x11, x11, x4 sbcs x12, x12, x3 ldr x13, [x28, #64] ldr x4, [sp, #64] sbcs x13, x13, x4 sbcs x5, x5, xzr sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbcs x13, x13, xzr and x13, x13, #0x1ff stp x5, x6, [sp, #216] stp x7, x8, [sp, #232] stp x9, x10, [sp, #248] stp x11, x12, [sp, #264] str x13, [sp, #280] cmp xzr, xzr ldp x5, x6, [x28] ldp x4, x3, [sp] adcs x5, x5, x4 adcs x6, x6, x3 ldp x7, x8, [x28, #16] ldp x4, x3, [sp, #16] adcs x7, x7, x4 adcs x8, x8, x3 ldp x9, x10, [x28, #32] ldp x4, x3, [sp, #32] adcs x9, x9, x4 adcs x10, x10, x3 ldp x11, x12, [x28, #48] ldp x4, x3, [sp, #48] adcs x11, x11, x4 adcs x12, x12, x3 ldr x13, [x28, #64] ldr x4, [sp, #64] adc x13, x13, x4 subs x4, x13, #0x200 csetm x4, cs sbcs x5, x5, xzr and x4, x4, #0x200 sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbc x13, x13, x4 stp x5, x6, [sp, #144] stp x7, x8, [sp, #160] stp x9, x10, [sp, #176] stp x11, x12, [sp, #192] str x13, [sp, #208] add x0, sp, #0xd8 add x1, sp, #0x90 add x2, sp, #0xd8 CFI_BL(Lp521_jscalarmul_alt_mul_p521) cmp xzr, xzr ldp x5, x6, [x28, #72] ldp x4, x3, [x28, #144] adcs x5, x5, x4 adcs x6, x6, x3 ldp x7, x8, [x28, #88] ldp x4, x3, [x28, #160] adcs x7, x7, x4 adcs x8, x8, x3 ldp x9, x10, [x28, #104] ldp x4, x3, [x28, #176] adcs x9, x9, x4 adcs x10, x10, x3 ldp x11, x12, [x28, #120] ldp x4, x3, [x28, #192] adcs x11, x11, x4 adcs x12, x12, x3 ldr x13, [x28, #136] ldr x4, [x28, #208] adc x13, x13, x4 subs x4, x13, #0x200 csetm x4, cs sbcs x5, x5, xzr and x4, x4, #0x200 sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbc x13, x13, x4 stp x5, x6, [sp, #144] stp x7, x8, [sp, #160] stp x9, x10, [sp, #176] stp x11, x12, [sp, #192] str x13, [sp, #208] add x0, sp, #0x120 add x1, x28, #0x0 add x2, sp, #0x48 CFI_BL(Lp521_jscalarmul_alt_mul_p521) add x0, sp, #0x168 add x1, sp, #0xd8 CFI_BL(Lp521_jscalarmul_alt_sqr_p521) add x0, sp, #0x90 add x1, sp, #0x90 CFI_BL(Lp521_jscalarmul_alt_sqr_p521) ldp x6, x7, [sp, #288] mov x1, #0xc mul x3, x1, x6 mul x4, x1, x7 umulh x6, x1, x6 adds x4, x4, x6 umulh x7, x1, x7 ldp x8, x9, [sp, #304] mul x5, x1, x8 mul x6, x1, x9 umulh x8, x1, x8 adcs x5, x5, x7 umulh x9, x1, x9 adcs x6, x6, x8 ldp x10, x11, [sp, #320] mul x7, x1, x10 mul x8, x1, x11 umulh x10, x1, x10 adcs x7, x7, x9 umulh x11, x1, x11 adcs x8, x8, x10 ldp x12, x13, [sp, #336] mul x9, x1, x12 mul x10, x1, x13 umulh x12, x1, x12 adcs x9, x9, x11 umulh x13, x1, x13 adcs x10, x10, x12 ldr x14, [sp, #352] mul x11, x1, x14 adc x11, x11, x13 mov x1, #0x9 ldp x20, x21, [sp, #360] mvn x20, x20 mul x0, x1, x20 umulh x20, x1, x20 adds x3, x3, x0 mvn x21, x21 mul x0, x1, x21 umulh x21, x1, x21 adcs x4, x4, x0 ldp x22, x23, [sp, #376] mvn x22, x22 mul x0, x1, x22 umulh x22, x1, x22 adcs x5, x5, x0 mvn x23, x23 mul x0, x1, x23 umulh x23, x1, x23 adcs x6, x6, x0 ldp x17, x19, [sp, #392] mvn x17, x17 mul x0, x1, x17 umulh x17, x1, x17 adcs x7, x7, x0 mvn x19, x19 mul x0, x1, x19 umulh x19, x1, x19 adcs x8, x8, x0 ldp x2, x16, [sp, #408] mvn x2, x2 mul x0, x1, x2 umulh x2, x1, x2 adcs x9, x9, x0 mvn x16, x16 mul x0, x1, x16 umulh x16, x1, x16 adcs x10, x10, x0 ldr x0, [sp, #424] eor x0, x0, #0x1ff mul x0, x1, x0 adc x11, x11, x0 adds x4, x4, x20 adcs x5, x5, x21 and x15, x4, x5 adcs x6, x6, x22 and x15, x15, x6 adcs x7, x7, x23 and x15, x15, x7 adcs x8, x8, x17 and x15, x15, x8 adcs x9, x9, x19 and x15, x15, x9 adcs x10, x10, x2 and x15, x15, x10 adc x11, x11, x16 lsr x12, x11, #9 orr x11, x11, #0xfffffffffffffe00 cmp xzr, xzr adcs xzr, x3, x12 adcs xzr, x15, xzr adcs xzr, x11, xzr adcs x3, x3, x12 adcs x4, x4, xzr adcs x5, x5, xzr adcs x6, x6, xzr adcs x7, x7, xzr adcs x8, x8, xzr adcs x9, x9, xzr adcs x10, x10, xzr adc x11, x11, xzr and x11, x11, #0x1ff stp x3, x4, [sp, #360] stp x5, x6, [sp, #376] stp x7, x8, [sp, #392] stp x9, x10, [sp, #408] str x11, [sp, #424] ldp x5, x6, [sp, #144] ldp x4, x3, [sp] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #160] ldp x4, x3, [sp, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #176] ldp x4, x3, [sp, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 ldp x11, x12, [sp, #192] ldp x4, x3, [sp, #48] sbcs x11, x11, x4 sbcs x12, x12, x3 ldr x13, [sp, #208] ldr x4, [sp, #64] sbcs x13, x13, x4 sbcs x5, x5, xzr sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbcs x13, x13, xzr and x13, x13, #0x1ff stp x5, x6, [sp, #144] stp x7, x8, [sp, #160] stp x9, x10, [sp, #176] stp x11, x12, [sp, #192] str x13, [sp, #208] mov x0, sp add x1, sp, #0x48 CFI_BL(Lp521_jscalarmul_alt_sqr_p521) add x0, sp, #0xd8 add x1, sp, #0x168 add x2, sp, #0xd8 CFI_BL(Lp521_jscalarmul_alt_mul_p521) ldp x5, x6, [sp, #144] ldp x4, x3, [sp, #72] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #160] ldp x4, x3, [sp, #88] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #176] ldp x4, x3, [sp, #104] sbcs x9, x9, x4 sbcs x10, x10, x3 ldp x11, x12, [sp, #192] ldp x4, x3, [sp, #120] sbcs x11, x11, x4 sbcs x12, x12, x3 ldr x13, [sp, #208] ldr x4, [sp, #136] sbcs x13, x13, x4 sbcs x5, x5, xzr sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbcs x13, x13, xzr and x13, x13, #0x1ff stp x5, x6, [x27, #144] stp x7, x8, [x27, #160] stp x9, x10, [x27, #176] stp x11, x12, [x27, #192] str x13, [x27, #208] ldp x6, x7, [sp, #288] lsl x3, x6, #2 extr x4, x7, x6, #62 ldp x8, x9, [sp, #304] extr x5, x8, x7, #62 extr x6, x9, x8, #62 ldp x10, x11, [sp, #320] extr x7, x10, x9, #62 extr x8, x11, x10, #62 ldp x12, x13, [sp, #336] extr x9, x12, x11, #62 extr x10, x13, x12, #62 ldr x14, [sp, #352] extr x11, x14, x13, #62 ldp x0, x1, [sp, #360] mvn x0, x0 adds x3, x3, x0 sbcs x4, x4, x1 ldp x0, x1, [sp, #376] sbcs x5, x5, x0 and x15, x4, x5 sbcs x6, x6, x1 and x15, x15, x6 ldp x0, x1, [sp, #392] sbcs x7, x7, x0 and x15, x15, x7 sbcs x8, x8, x1 and x15, x15, x8 ldp x0, x1, [sp, #408] sbcs x9, x9, x0 and x15, x15, x9 sbcs x10, x10, x1 and x15, x15, x10 ldr x0, [sp, #424] eor x0, x0, #0x1ff adc x11, x11, x0 lsr x12, x11, #9 orr x11, x11, #0xfffffffffffffe00 cmp xzr, xzr adcs xzr, x3, x12 adcs xzr, x15, xzr adcs xzr, x11, xzr adcs x3, x3, x12 adcs x4, x4, xzr adcs x5, x5, xzr adcs x6, x6, xzr adcs x7, x7, xzr adcs x8, x8, xzr adcs x9, x9, xzr adcs x10, x10, xzr adc x11, x11, xzr and x11, x11, #0x1ff stp x3, x4, [x27] stp x5, x6, [x27, #16] stp x7, x8, [x27, #32] stp x9, x10, [x27, #48] str x11, [x27, #64] ldp x6, x7, [sp, #216] lsl x3, x6, #1 adds x3, x3, x6 extr x4, x7, x6, #63 adcs x4, x4, x7 ldp x8, x9, [sp, #232] extr x5, x8, x7, #63 adcs x5, x5, x8 extr x6, x9, x8, #63 adcs x6, x6, x9 ldp x10, x11, [sp, #248] extr x7, x10, x9, #63 adcs x7, x7, x10 extr x8, x11, x10, #63 adcs x8, x8, x11 ldp x12, x13, [sp, #264] extr x9, x12, x11, #63 adcs x9, x9, x12 extr x10, x13, x12, #63 adcs x10, x10, x13 ldr x14, [sp, #280] extr x11, x14, x13, #63 adc x11, x11, x14 ldp x20, x21, [sp] mvn x20, x20 lsl x0, x20, #3 adds x3, x3, x0 mvn x21, x21 extr x0, x21, x20, #61 adcs x4, x4, x0 ldp x22, x23, [sp, #16] mvn x22, x22 extr x0, x22, x21, #61 adcs x5, x5, x0 and x15, x4, x5 mvn x23, x23 extr x0, x23, x22, #61 adcs x6, x6, x0 and x15, x15, x6 ldp x20, x21, [sp, #32] mvn x20, x20 extr x0, x20, x23, #61 adcs x7, x7, x0 and x15, x15, x7 mvn x21, x21 extr x0, x21, x20, #61 adcs x8, x8, x0 and x15, x15, x8 ldp x22, x23, [sp, #48] mvn x22, x22 extr x0, x22, x21, #61 adcs x9, x9, x0 and x15, x15, x9 mvn x23, x23 extr x0, x23, x22, #61 adcs x10, x10, x0 and x15, x15, x10 ldr x0, [sp, #64] eor x0, x0, #0x1ff extr x0, x0, x23, #61 adc x11, x11, x0 lsr x12, x11, #9 orr x11, x11, #0xfffffffffffffe00 cmp xzr, xzr adcs xzr, x3, x12 adcs xzr, x15, xzr adcs xzr, x11, xzr adcs x3, x3, x12 adcs x4, x4, xzr adcs x5, x5, xzr adcs x6, x6, xzr adcs x7, x7, xzr adcs x8, x8, xzr adcs x9, x9, xzr adcs x10, x10, xzr adc x11, x11, xzr and x11, x11, #0x1ff stp x3, x4, [x27, #72] stp x5, x6, [x27, #88] stp x7, x8, [x27, #104] stp x9, x10, [x27, #120] str x11, [x27, #136] CFI_INC_SP(512) CFI_POP2(x29,x30) CFI_POP2(x27,x28) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jscalarmul_alt_jdouble) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jscalarmul_alt_mul_p521) Lp521_jscalarmul_alt_mul_p521: CFI_START ldp x3, x4, [x1] ldp x5, x6, [x2] mul x15, x3, x5 umulh x16, x3, x5 mul x14, x3, x6 umulh x17, x3, x6 adds x16, x16, x14 ldp x7, x8, [x2, #16] mul x14, x3, x7 umulh x19, x3, x7 adcs x17, x17, x14 mul x14, x3, x8 umulh x20, x3, x8 adcs x19, x19, x14 ldp x9, x10, [x2, #32] mul x14, x3, x9 umulh x21, x3, x9 adcs x20, x20, x14 mul x14, x3, x10 umulh x22, x3, x10 adcs x21, x21, x14 ldp x11, x12, [x2, #48] mul x14, x3, x11 umulh x23, x3, x11 adcs x22, x22, x14 ldr x13, [x2, #64] mul x14, x3, x12 umulh x24, x3, x12 adcs x23, x23, x14 mul x14, x3, x13 umulh x25, x3, x13 adcs x24, x24, x14 adc x25, x25, xzr mul x14, x4, x5 adds x16, x16, x14 mul x14, x4, x6 adcs x17, x17, x14 mul x14, x4, x7 adcs x19, x19, x14 mul x14, x4, x8 adcs x20, x20, x14 mul x14, x4, x9 adcs x21, x21, x14 mul x14, x4, x10 adcs x22, x22, x14 mul x14, x4, x11 adcs x23, x23, x14 mul x14, x4, x12 adcs x24, x24, x14 mul x14, x4, x13 adcs x25, x25, x14 cset x26, cs umulh x14, x4, x5 adds x17, x17, x14 umulh x14, x4, x6 adcs x19, x19, x14 umulh x14, x4, x7 adcs x20, x20, x14 umulh x14, x4, x8 adcs x21, x21, x14 umulh x14, x4, x9 adcs x22, x22, x14 umulh x14, x4, x10 adcs x23, x23, x14 umulh x14, x4, x11 adcs x24, x24, x14 umulh x14, x4, x12 adcs x25, x25, x14 umulh x14, x4, x13 adc x26, x26, x14 stp x15, x16, [sp, #432] ldp x3, x4, [x1, #16] mul x14, x3, x5 adds x17, x17, x14 mul x14, x3, x6 adcs x19, x19, x14 mul x14, x3, x7 adcs x20, x20, x14 mul x14, x3, x8 adcs x21, x21, x14 mul x14, x3, x9 adcs x22, x22, x14 mul x14, x3, x10 adcs x23, x23, x14 mul x14, x3, x11 adcs x24, x24, x14 mul x14, x3, x12 adcs x25, x25, x14 mul x14, x3, x13 adcs x26, x26, x14 cset x15, cs umulh x14, x3, x5 adds x19, x19, x14 umulh x14, x3, x6 adcs x20, x20, x14 umulh x14, x3, x7 adcs x21, x21, x14 umulh x14, x3, x8 adcs x22, x22, x14 umulh x14, x3, x9 adcs x23, x23, x14 umulh x14, x3, x10 adcs x24, x24, x14 umulh x14, x3, x11 adcs x25, x25, x14 umulh x14, x3, x12 adcs x26, x26, x14 umulh x14, x3, x13 adc x15, x15, x14 mul x14, x4, x5 adds x19, x19, x14 mul x14, x4, x6 adcs x20, x20, x14 mul x14, x4, x7 adcs x21, x21, x14 mul x14, x4, x8 adcs x22, x22, x14 mul x14, x4, x9 adcs x23, x23, x14 mul x14, x4, x10 adcs x24, x24, x14 mul x14, x4, x11 adcs x25, x25, x14 mul x14, x4, x12 adcs x26, x26, x14 mul x14, x4, x13 adcs x15, x15, x14 cset x16, cs umulh x14, x4, x5 adds x20, x20, x14 umulh x14, x4, x6 adcs x21, x21, x14 umulh x14, x4, x7 adcs x22, x22, x14 umulh x14, x4, x8 adcs x23, x23, x14 umulh x14, x4, x9 adcs x24, x24, x14 umulh x14, x4, x10 adcs x25, x25, x14 umulh x14, x4, x11 adcs x26, x26, x14 umulh x14, x4, x12 adcs x15, x15, x14 umulh x14, x4, x13 adc x16, x16, x14 stp x17, x19, [sp, #448] ldp x3, x4, [x1, #32] mul x14, x3, x5 adds x20, x20, x14 mul x14, x3, x6 adcs x21, x21, x14 mul x14, x3, x7 adcs x22, x22, x14 mul x14, x3, x8 adcs x23, x23, x14 mul x14, x3, x9 adcs x24, x24, x14 mul x14, x3, x10 adcs x25, x25, x14 mul x14, x3, x11 adcs x26, x26, x14 mul x14, x3, x12 adcs x15, x15, x14 mul x14, x3, x13 adcs x16, x16, x14 cset x17, cs umulh x14, x3, x5 adds x21, x21, x14 umulh x14, x3, x6 adcs x22, x22, x14 umulh x14, x3, x7 adcs x23, x23, x14 umulh x14, x3, x8 adcs x24, x24, x14 umulh x14, x3, x9 adcs x25, x25, x14 umulh x14, x3, x10 adcs x26, x26, x14 umulh x14, x3, x11 adcs x15, x15, x14 umulh x14, x3, x12 adcs x16, x16, x14 umulh x14, x3, x13 adc x17, x17, x14 mul x14, x4, x5 adds x21, x21, x14 mul x14, x4, x6 adcs x22, x22, x14 mul x14, x4, x7 adcs x23, x23, x14 mul x14, x4, x8 adcs x24, x24, x14 mul x14, x4, x9 adcs x25, x25, x14 mul x14, x4, x10 adcs x26, x26, x14 mul x14, x4, x11 adcs x15, x15, x14 mul x14, x4, x12 adcs x16, x16, x14 mul x14, x4, x13 adcs x17, x17, x14 cset x19, cs umulh x14, x4, x5 adds x22, x22, x14 umulh x14, x4, x6 adcs x23, x23, x14 umulh x14, x4, x7 adcs x24, x24, x14 umulh x14, x4, x8 adcs x25, x25, x14 umulh x14, x4, x9 adcs x26, x26, x14 umulh x14, x4, x10 adcs x15, x15, x14 umulh x14, x4, x11 adcs x16, x16, x14 umulh x14, x4, x12 adcs x17, x17, x14 umulh x14, x4, x13 adc x19, x19, x14 stp x20, x21, [sp, #464] ldp x3, x4, [x1, #48] mul x14, x3, x5 adds x22, x22, x14 mul x14, x3, x6 adcs x23, x23, x14 mul x14, x3, x7 adcs x24, x24, x14 mul x14, x3, x8 adcs x25, x25, x14 mul x14, x3, x9 adcs x26, x26, x14 mul x14, x3, x10 adcs x15, x15, x14 mul x14, x3, x11 adcs x16, x16, x14 mul x14, x3, x12 adcs x17, x17, x14 mul x14, x3, x13 adcs x19, x19, x14 cset x20, cs umulh x14, x3, x5 adds x23, x23, x14 umulh x14, x3, x6 adcs x24, x24, x14 umulh x14, x3, x7 adcs x25, x25, x14 umulh x14, x3, x8 adcs x26, x26, x14 umulh x14, x3, x9 adcs x15, x15, x14 umulh x14, x3, x10 adcs x16, x16, x14 umulh x14, x3, x11 adcs x17, x17, x14 umulh x14, x3, x12 adcs x19, x19, x14 umulh x14, x3, x13 adc x20, x20, x14 mul x14, x4, x5 adds x23, x23, x14 mul x14, x4, x6 adcs x24, x24, x14 mul x14, x4, x7 adcs x25, x25, x14 mul x14, x4, x8 adcs x26, x26, x14 mul x14, x4, x9 adcs x15, x15, x14 mul x14, x4, x10 adcs x16, x16, x14 mul x14, x4, x11 adcs x17, x17, x14 mul x14, x4, x12 adcs x19, x19, x14 mul x14, x4, x13 adcs x20, x20, x14 cset x21, cs umulh x14, x4, x5 adds x24, x24, x14 umulh x14, x4, x6 adcs x25, x25, x14 umulh x14, x4, x7 adcs x26, x26, x14 umulh x14, x4, x8 adcs x15, x15, x14 umulh x14, x4, x9 adcs x16, x16, x14 umulh x14, x4, x10 adcs x17, x17, x14 umulh x14, x4, x11 adcs x19, x19, x14 umulh x14, x4, x12 adcs x20, x20, x14 umulh x14, x4, x13 adc x21, x21, x14 stp x22, x23, [sp, #480] ldr x3, [x1, #64] mul x14, x3, x5 adds x24, x24, x14 mul x14, x3, x6 adcs x25, x25, x14 mul x14, x3, x7 adcs x26, x26, x14 mul x14, x3, x8 adcs x15, x15, x14 mul x14, x3, x9 adcs x16, x16, x14 mul x14, x3, x10 adcs x17, x17, x14 mul x14, x3, x11 adcs x19, x19, x14 mul x14, x3, x12 adcs x20, x20, x14 mul x14, x3, x13 adc x21, x21, x14 umulh x14, x3, x5 adds x25, x25, x14 umulh x14, x3, x6 adcs x26, x26, x14 umulh x14, x3, x7 adcs x15, x15, x14 umulh x14, x3, x8 adcs x16, x16, x14 umulh x14, x3, x9 adcs x17, x17, x14 umulh x14, x3, x10 adcs x19, x19, x14 umulh x14, x3, x11 adcs x20, x20, x14 umulh x14, x3, x12 adc x21, x21, x14 cmp xzr, xzr ldp x5, x6, [sp, #432] extr x14, x25, x24, #9 adcs x5, x5, x14 extr x14, x26, x25, #9 adcs x6, x6, x14 ldp x7, x8, [sp, #448] extr x14, x15, x26, #9 adcs x7, x7, x14 extr x14, x16, x15, #9 adcs x8, x8, x14 ldp x9, x10, [sp, #464] extr x14, x17, x16, #9 adcs x9, x9, x14 extr x14, x19, x17, #9 adcs x10, x10, x14 ldp x11, x12, [sp, #480] extr x14, x20, x19, #9 adcs x11, x11, x14 extr x14, x21, x20, #9 adcs x12, x12, x14 orr x13, x24, #0xfffffffffffffe00 lsr x14, x21, #9 adcs x13, x13, x14 sbcs x5, x5, xzr sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbc x13, x13, xzr and x13, x13, #0x1ff stp x5, x6, [x0] stp x7, x8, [x0, #16] stp x9, x10, [x0, #32] stp x11, x12, [x0, #48] str x13, [x0, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jscalarmul_alt_mul_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jscalarmul_alt_sqr_p521) Lp521_jscalarmul_alt_sqr_p521: CFI_START ldp x2, x3, [x1] mul x11, x2, x3 umulh x12, x2, x3 ldp x4, x5, [x1, #16] mul x10, x2, x4 umulh x13, x2, x4 adds x12, x12, x10 ldp x6, x7, [x1, #32] mul x10, x2, x5 umulh x14, x2, x5 adcs x13, x13, x10 ldp x8, x9, [x1, #48] mul x10, x2, x6 umulh x15, x2, x6 adcs x14, x14, x10 mul x10, x2, x7 umulh x16, x2, x7 adcs x15, x15, x10 mul x10, x2, x8 umulh x17, x2, x8 adcs x16, x16, x10 mul x10, x2, x9 umulh x19, x2, x9 adcs x17, x17, x10 adc x19, x19, xzr mul x10, x3, x4 adds x13, x13, x10 mul x10, x3, x5 adcs x14, x14, x10 mul x10, x3, x6 adcs x15, x15, x10 mul x10, x3, x7 adcs x16, x16, x10 mul x10, x3, x8 adcs x17, x17, x10 mul x10, x3, x9 adcs x19, x19, x10 cset x20, cs umulh x10, x3, x4 adds x14, x14, x10 umulh x10, x3, x5 adcs x15, x15, x10 umulh x10, x3, x6 adcs x16, x16, x10 umulh x10, x3, x7 adcs x17, x17, x10 umulh x10, x3, x8 adcs x19, x19, x10 umulh x10, x3, x9 adc x20, x20, x10 mul x10, x6, x7 umulh x21, x6, x7 adds x20, x20, x10 adc x21, x21, xzr mul x10, x4, x5 adds x15, x15, x10 mul x10, x4, x6 adcs x16, x16, x10 mul x10, x4, x7 adcs x17, x17, x10 mul x10, x4, x8 adcs x19, x19, x10 mul x10, x4, x9 adcs x20, x20, x10 mul x10, x6, x8 adcs x21, x21, x10 cset x22, cs umulh x10, x4, x5 adds x16, x16, x10 umulh x10, x4, x6 adcs x17, x17, x10 umulh x10, x4, x7 adcs x19, x19, x10 umulh x10, x4, x8 adcs x20, x20, x10 umulh x10, x4, x9 adcs x21, x21, x10 umulh x10, x6, x8 adc x22, x22, x10 mul x10, x7, x8 umulh x23, x7, x8 adds x22, x22, x10 adc x23, x23, xzr mul x10, x5, x6 adds x17, x17, x10 mul x10, x5, x7 adcs x19, x19, x10 mul x10, x5, x8 adcs x20, x20, x10 mul x10, x5, x9 adcs x21, x21, x10 mul x10, x6, x9 adcs x22, x22, x10 mul x10, x7, x9 adcs x23, x23, x10 cset x24, cs umulh x10, x5, x6 adds x19, x19, x10 umulh x10, x5, x7 adcs x20, x20, x10 umulh x10, x5, x8 adcs x21, x21, x10 umulh x10, x5, x9 adcs x22, x22, x10 umulh x10, x6, x9 adcs x23, x23, x10 umulh x10, x7, x9 adc x24, x24, x10 mul x10, x8, x9 umulh x25, x8, x9 adds x24, x24, x10 adc x25, x25, xzr adds x11, x11, x11 adcs x12, x12, x12 adcs x13, x13, x13 adcs x14, x14, x14 adcs x15, x15, x15 adcs x16, x16, x16 adcs x17, x17, x17 adcs x19, x19, x19 adcs x20, x20, x20 adcs x21, x21, x21 adcs x22, x22, x22 adcs x23, x23, x23 adcs x24, x24, x24 adcs x25, x25, x25 cset x26, cs umulh x10, x2, x2 adds x11, x11, x10 mul x10, x3, x3 adcs x12, x12, x10 umulh x10, x3, x3 adcs x13, x13, x10 mul x10, x4, x4 adcs x14, x14, x10 umulh x10, x4, x4 adcs x15, x15, x10 mul x10, x5, x5 adcs x16, x16, x10 umulh x10, x5, x5 adcs x17, x17, x10 mul x10, x6, x6 adcs x19, x19, x10 umulh x10, x6, x6 adcs x20, x20, x10 mul x10, x7, x7 adcs x21, x21, x10 umulh x10, x7, x7 adcs x22, x22, x10 mul x10, x8, x8 adcs x23, x23, x10 umulh x10, x8, x8 adcs x24, x24, x10 mul x10, x9, x9 adcs x25, x25, x10 umulh x10, x9, x9 adc x26, x26, x10 ldr x1, [x1, #64] add x1, x1, x1 mul x10, x1, x2 adds x19, x19, x10 umulh x10, x1, x2 adcs x20, x20, x10 mul x10, x1, x4 adcs x21, x21, x10 umulh x10, x1, x4 adcs x22, x22, x10 mul x10, x1, x6 adcs x23, x23, x10 umulh x10, x1, x6 adcs x24, x24, x10 mul x10, x1, x8 adcs x25, x25, x10 umulh x10, x1, x8 adcs x26, x26, x10 lsr x4, x1, #1 mul x4, x4, x4 adc x4, x4, xzr mul x10, x1, x3 adds x20, x20, x10 umulh x10, x1, x3 adcs x21, x21, x10 mul x10, x1, x5 adcs x22, x22, x10 umulh x10, x1, x5 adcs x23, x23, x10 mul x10, x1, x7 adcs x24, x24, x10 umulh x10, x1, x7 adcs x25, x25, x10 mul x10, x1, x9 adcs x26, x26, x10 umulh x10, x1, x9 adc x4, x4, x10 mul x2, x2, x2 cmp xzr, xzr extr x10, x20, x19, #9 adcs x2, x2, x10 extr x10, x21, x20, #9 adcs x11, x11, x10 extr x10, x22, x21, #9 adcs x12, x12, x10 extr x10, x23, x22, #9 adcs x13, x13, x10 extr x10, x24, x23, #9 adcs x14, x14, x10 extr x10, x25, x24, #9 adcs x15, x15, x10 extr x10, x26, x25, #9 adcs x16, x16, x10 extr x10, x4, x26, #9 adcs x17, x17, x10 orr x19, x19, #0xfffffffffffffe00 lsr x10, x4, #9 adcs x19, x19, x10 sbcs x2, x2, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbcs x13, x13, xzr sbcs x14, x14, xzr sbcs x15, x15, xzr sbcs x16, x16, xzr sbcs x17, x17, xzr sbc x19, x19, xzr and x19, x19, #0x1ff stp x2, x11, [x0] stp x12, x13, [x0, #16] stp x14, x15, [x0, #32] stp x16, x17, [x0, #48] str x19, [x0, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jscalarmul_alt_sqr_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jscalarmul_alt_sub_p521) Lp521_jscalarmul_alt_sub_p521: CFI_START ldp x5, x6, [x1] ldp x4, x3, [x2] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x1, #16] ldp x4, x3, [x2, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x1, #32] ldp x4, x3, [x2, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 ldp x11, x12, [x1, #48] ldp x4, x3, [x2, #48] sbcs x11, x11, x4 sbcs x12, x12, x3 ldr x13, [x1, #64] ldr x4, [x2, #64] sbcs x13, x13, x4 sbcs x5, x5, xzr sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbcs x13, x13, xzr and x13, x13, #0x1ff stp x5, x6, [x0] stp x7, x8, [x0, #16] stp x9, x10, [x0, #32] stp x11, x12, [x0, #48] str x13, [x0, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jscalarmul_alt_sub_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
41,338
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_montmul_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^576) mod p_521 // Inputs x[9], y[9]; output z[9] // // extern void bignum_montmul_p521(uint64_t z[static 9], // const uint64_t x[static 9], // const uint64_t y[static 9]); // // Does z := (x * y / 2^576) mod p_521, assuming x < p_521, y < p_521. This // means the Montgomery base is the "native size" 2^{9*64} = 2^576; since // p_521 is a Mersenne prime the basic modular multiplication bignum_mul_p521 // can be considered a Montgomery operation to base 2^521. // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" // bignum_montmul_p521 is functionally equivalent to // unopt/bignum_montmul_p521_base. // It is written in a way that // 1. A subset of scalar multiplications in bignum_montmul_p384 are carefully // chosen and vectorized // 2. The vectorized assembly is rescheduled using the SLOTHY superoptimizer. // https://github.com/slothy-optimizer/slothy // // The output program of step 1. is as follows: // // stp x19, x20, [sp, #-16]! // stp x21, x22, [sp, #-16]! // stp x23, x24, [sp, #-16]! // stp x25, x26, [sp, #-16]! // sub sp, sp, #80 // ldp x14, x7, [x1] // ldp x3, x25, [x1, #16] // ldp x10, x24, [x2] // ldr q0, [x1] // ldr q25, [x2] // ldp x12, x6, [x2, #16] // movi v18.2D, #0x00000000ffffffff // uzp2 v3.4S, v25.4S, v25.4S // xtn v26.2S, v0.2D // xtn v22.2S, v25.2D // rev64 v24.4S, v25.4S // umull v19.2D, v26.2S, v22.2S // umull v25.2D, v26.2S, v3.2S // uzp2 v20.4S, v0.4S, v0.4S // mul v0.4S, v24.4S, v0.4S // usra v25.2D, v19.2D, #32 // umull v6.2D, v20.2S, v3.2S // uaddlp v0.2D, v0.4S // and v18.16B, v25.16B, v18.16B // umlal v18.2D, v20.2S, v22.2S // shl v0.2D, v0.2D, #32 // usra v6.2D, v25.2D, #32 // umlal v0.2D, v26.2S, v22.2S // usra v6.2D, v18.2D, #32 // mov x23, v0.d[0] // mov x16, v0.d[1] // mul x5, x3, x12 // mul x21, x25, x6 // mov x19, v6.d[0] // adds x16, x16, x19 // mov x19, v6.d[1] // adcs x5, x5, x19 // umulh x19, x3, x12 // adcs x21, x21, x19 // umulh x19, x25, x6 // adc x19, x19, xzr // adds x8, x16, x23 // adcs x16, x5, x16 // adcs x5, x21, x5 // adcs x21, x19, x21 // adc x19, xzr, x19 // adds x11, x16, x23 // adcs x15, x5, x8 // adcs x16, x21, x16 // adcs x5, x19, x5 // adcs x21, xzr, x21 // adc x19, xzr, x19 // subs x20, x3, x25 // cneg x20, x20, cc // csetm x9, cc // subs x13, x6, x12 // cneg x13, x13, cc // mul x26, x20, x13 // umulh x20, x20, x13 // cinv x9, x9, cc // cmn x9, #0x1 // eor x13, x26, x9 // adcs x5, x5, x13 // eor x20, x20, x9 // adcs x21, x21, x20 // adc x19, x19, x9 // subs x20, x14, x7 // cneg x20, x20, cc // csetm x9, cc // subs x13, x24, x10 // cneg x13, x13, cc // mul x26, x20, x13 // umulh x20, x20, x13 // cinv x9, x9, cc // cmn x9, #0x1 // eor x13, x26, x9 // adcs x8, x8, x13 // eor x20, x20, x9 // adcs x11, x11, x20 // adcs x15, x15, x9 // adcs x16, x16, x9 // adcs x5, x5, x9 // adcs x21, x21, x9 // adc x19, x19, x9 // subs x20, x7, x25 // cneg x20, x20, cc // csetm x9, cc // subs x13, x6, x24 // cneg x13, x13, cc // mul x26, x20, x13 // umulh x20, x20, x13 // cinv x9, x9, cc // cmn x9, #0x1 // eor x13, x26, x9 // adcs x16, x16, x13 // eor x20, x20, x9 // adcs x5, x5, x20 // adcs x21, x21, x9 // adc x19, x19, x9 // subs x20, x14, x3 // cneg x20, x20, cc // csetm x9, cc // subs x13, x12, x10 // cneg x13, x13, cc // mul x26, x20, x13 // umulh x20, x20, x13 // cinv x9, x9, cc // cmn x9, #0x1 // eor x13, x26, x9 // adcs x11, x11, x13 // eor x20, x20, x9 // adcs x15, x15, x20 // adcs x16, x16, x9 // adcs x5, x5, x9 // adcs x21, x21, x9 // adc x19, x19, x9 // subs x25, x14, x25 // cneg x25, x25, cc // csetm x20, cc // subs x10, x6, x10 // cneg x10, x10, cc // mul x6, x25, x10 // umulh x25, x25, x10 // cinv x10, x20, cc // cmn x10, #0x1 // eor x6, x6, x10 // adcs x6, x15, x6 // eor x25, x25, x10 // adcs x25, x16, x25 // adcs x16, x5, x10 // adcs x5, x21, x10 // adc x10, x19, x10 // subs x7, x7, x3 // cneg x7, x7, cc // csetm x3, cc // subs x24, x12, x24 // cneg x24, x24, cc // mul x12, x7, x24 // umulh x7, x7, x24 // cinv x3, x3, cc // cmn x3, #0x1 // eor x24, x12, x3 // adcs x24, x6, x24 // eor x7, x7, x3 // adcs x7, x25, x7 // adcs x25, x16, x3 // adcs x12, x5, x3 // adc x3, x10, x3 // lsl x10, x23, #9 // extr x6, x8, x23, #55 // extr x23, x11, x8, #55 // extr x16, x24, x11, #55 // lsr x24, x24, #55 // stp x7, x25, [sp] // @slothy:writes=stack0 // stp x12, x3, [sp, #16] // @slothy:writes=stack16 // stp x10, x6, [sp, #32] // @slothy:writes=stack32 // stp x23, x16, [sp, #48] // @slothy:writes=stack48 // str x24, [sp, #64] // @slothy:writes=stack64 // ldp x7, x3, [x1, #32] // ldr q0, [x1, #32] // ldp x25, x10, [x1, #48] // ldp x24, x12, [x2, #32] // ldr q25, [x2, #32] // ldp x6, x23, [x2, #48] // ldr q18, [x1, #48] // ldr q3, [x2, #48] // uzp1 v26.4S, v25.4S, v0.4S // rev64 v25.4S, v25.4S // uzp1 v22.4S, v0.4S, v0.4S // mul v0.4S, v25.4S, v0.4S // uaddlp v0.2D, v0.4S // shl v0.2D, v0.2D, #32 // umlal v0.2D, v22.2S, v26.2S // mov x16, v0.d[0] // mov x5, v0.d[1] // movi v0.2D, #0x00000000ffffffff // uzp2 v25.4S, v3.4S, v3.4S // xtn v26.2S, v18.2D // xtn v22.2S, v3.2D // rev64 v24.4S, v3.4S // umull v19.2D, v26.2S, v22.2S // umull v3.2D, v26.2S, v25.2S // uzp2 v20.4S, v18.4S, v18.4S // mul v18.4S, v24.4S, v18.4S // usra v3.2D, v19.2D, #32 // umull v6.2D, v20.2S, v25.2S // uaddlp v25.2D, v18.4S // and v0.16B, v3.16B, v0.16B // umlal v0.2D, v20.2S, v22.2S // shl v25.2D, v25.2D, #32 // usra v6.2D, v3.2D, #32 // umlal v25.2D, v26.2S, v22.2S // usra v6.2D, v0.2D, #32 // mov x21, v25.d[0] // mov x19, v25.d[1] // umulh x8, x7, x24 // adds x5, x5, x8 // umulh x8, x3, x12 // adcs x21, x21, x8 // mov x8, v6.d[0] // adcs x19, x19, x8 // mov x8, v6.d[1] // adc x8, x8, xzr // adds x11, x5, x16 // adcs x5, x21, x5 // adcs x21, x19, x21 // adcs x19, x8, x19 // adc x8, xzr, x8 // adds x15, x5, x16 // adcs x20, x21, x11 // adcs x5, x19, x5 // adcs x21, x8, x21 // adcs x19, xzr, x19 // adc x8, xzr, x8 // subs x9, x25, x10 // cneg x9, x9, cc // csetm x13, cc // subs x26, x23, x6 // cneg x26, x26, cc // mul x22, x9, x26 // umulh x9, x9, x26 // cinv x13, x13, cc // cmn x13, #0x1 // eor x26, x22, x13 // adcs x21, x21, x26 // eor x9, x9, x13 // adcs x19, x19, x9 // adc x8, x8, x13 // subs x9, x7, x3 // cneg x9, x9, cc // csetm x13, cc // subs x26, x12, x24 // cneg x26, x26, cc // mul x22, x9, x26 // umulh x9, x9, x26 // cinv x13, x13, cc // cmn x13, #0x1 // eor x26, x22, x13 // adcs x11, x11, x26 // eor x9, x9, x13 // adcs x15, x15, x9 // adcs x20, x20, x13 // adcs x5, x5, x13 // adcs x21, x21, x13 // adcs x19, x19, x13 // adc x8, x8, x13 // subs x9, x3, x10 // cneg x9, x9, cc // csetm x13, cc // subs x26, x23, x12 // cneg x26, x26, cc // mul x22, x9, x26 // umulh x9, x9, x26 // cinv x13, x13, cc // cmn x13, #0x1 // eor x26, x22, x13 // adcs x5, x5, x26 // eor x9, x9, x13 // adcs x14, x21, x9 // adcs x21, x19, x13 // adc x19, x8, x13 // subs x9, x7, x25 // cneg x8, x9, cc // csetm x9, cc // subs x13, x6, x24 // cneg x13, x13, cc // mul x26, x8, x13 // umulh x8, x8, x13 // cinv x9, x9, cc // cmn x9, #0x1 // eor x13, x26, x9 // adcs x15, x15, x13 // eor x8, x8, x9 // adcs x8, x20, x8 // adcs x5, x5, x9 // adcs x20, x14, x9 // adcs x21, x21, x9 // adc x19, x19, x9 // subs x9, x7, x10 // cneg x9, x9, cc // csetm x13, cc // subs x26, x23, x24 // cneg x26, x26, cc // mul x22, x9, x26 // umulh x9, x9, x26 // cinv x13, x13, cc // cmn x13, #0x1 // eor x26, x22, x13 // adcs x8, x8, x26 // eor x9, x9, x13 // adcs x5, x5, x9 // adcs x20, x20, x13 // adcs x21, x21, x13 // adc x19, x19, x13 // subs x9, x3, x25 // cneg x9, x9, cc // csetm x13, cc // subs x26, x6, x12 // cneg x26, x26, cc // mul x22, x9, x26 // umulh x9, x9, x26 // cinv x13, x13, cc // cmn x13, #0x1 // eor x26, x22, x13 // adcs x8, x8, x26 // eor x9, x9, x13 // adcs x5, x5, x9 // adcs x20, x20, x13 // adcs x21, x21, x13 // adc x19, x19, x13 // ldp x9, x13, [sp] // @slothy:reads=stack0 // adds x16, x16, x9 // adcs x11, x11, x13 // stp x16, x11, [sp] // @slothy:writes=stack0 // ldp x16, x11, [sp, #16] // @slothy:reads=stack16 // adcs x16, x15, x16 // adcs x8, x8, x11 // stp x16, x8, [sp, #16] // @slothy:writes=stack16 // ldp x16, x8, [sp, #32] // @slothy:reads=stack32 // adcs x16, x5, x16 // adcs x5, x20, x8 // stp x16, x5, [sp, #32] // @slothy:writes=stack32 // ldp x16, x5, [sp, #48] // @slothy:reads=stack48 // adcs x16, x21, x16 // adcs x5, x19, x5 // stp x16, x5, [sp, #48] // @slothy:writes=stack48 // ldr x16, [sp, #64] // @slothy:reads=stack64 // adc x16, x16, xzr // str x16, [sp, #64] // @slothy:writes=stack64 // ldp x16, x5, [x1] // subs x7, x7, x16 // sbcs x3, x3, x5 // ldp x16, x5, [x1, #16] // sbcs x25, x25, x16 // sbcs x10, x10, x5 // csetm x16, cc // ldp x5, x21, [x2] // subs x24, x5, x24 // sbcs x12, x21, x12 // ldp x5, x19, [x2, #16] // sbcs x6, x5, x6 // sbcs x23, x19, x23 // csetm x5, cc // eor x7, x7, x16 // subs x7, x7, x16 // eor x3, x3, x16 // sbcs x3, x3, x16 // eor x25, x25, x16 // sbcs x25, x25, x16 // eor x10, x10, x16 // sbc x10, x10, x16 // eor x24, x24, x5 // subs x24, x24, x5 // eor x12, x12, x5 // sbcs x12, x12, x5 // eor x6, x6, x5 // sbcs x6, x6, x5 // eor x23, x23, x5 // sbc x23, x23, x5 // eor x16, x5, x16 // mul x21, x7, x24 // mul x5, x3, x12 // mul x19, x25, x6 // mul x8, x10, x23 // umulh x11, x7, x24 // adds x5, x5, x11 // umulh x11, x3, x12 // adcs x19, x19, x11 // umulh x11, x25, x6 // adcs x8, x8, x11 // umulh x11, x10, x23 // adc x11, x11, xzr // adds x15, x5, x21 // adcs x5, x19, x5 // adcs x19, x8, x19 // adcs x8, x11, x8 // adc x11, xzr, x11 // adds x20, x5, x21 // adcs x9, x19, x15 // adcs x5, x8, x5 // adcs x19, x11, x19 // adcs x8, xzr, x8 // adc x11, xzr, x11 // subs x13, x25, x10 // cneg x13, x13, cc // csetm x26, cc // subs x22, x23, x6 // cneg x22, x22, cc // mul x4, x13, x22 // umulh x13, x13, x22 // cinv x26, x26, cc // cmn x26, #0x1 // eor x22, x4, x26 // adcs x19, x19, x22 // eor x13, x13, x26 // adcs x8, x8, x13 // adc x11, x11, x26 // subs x13, x7, x3 // cneg x13, x13, cc // csetm x26, cc // subs x22, x12, x24 // cneg x22, x22, cc // mul x4, x13, x22 // umulh x13, x13, x22 // cinv x26, x26, cc // cmn x26, #0x1 // eor x22, x4, x26 // adcs x15, x15, x22 // eor x13, x13, x26 // adcs x20, x20, x13 // adcs x9, x9, x26 // adcs x5, x5, x26 // adcs x19, x19, x26 // adcs x8, x8, x26 // adc x11, x11, x26 // subs x13, x3, x10 // cneg x13, x13, cc // csetm x26, cc // subs x22, x23, x12 // cneg x22, x22, cc // mul x4, x13, x22 // umulh x13, x13, x22 // cinv x26, x26, cc // cmn x26, #0x1 // eor x22, x4, x26 // adcs x5, x5, x22 // eor x13, x13, x26 // adcs x19, x19, x13 // adcs x8, x8, x26 // adc x11, x11, x26 // subs x13, x7, x25 // cneg x13, x13, cc // csetm x26, cc // subs x22, x6, x24 // cneg x22, x22, cc // mul x4, x13, x22 // umulh x13, x13, x22 // cinv x26, x26, cc // cmn x26, #0x1 // eor x22, x4, x26 // adcs x20, x20, x22 // eor x13, x13, x26 // adcs x9, x9, x13 // adcs x5, x5, x26 // adcs x19, x19, x26 // adcs x8, x8, x26 // adc x11, x11, x26 // subs x7, x7, x10 // cneg x7, x7, cc // csetm x10, cc // subs x24, x23, x24 // cneg x24, x24, cc // mul x23, x7, x24 // umulh x7, x7, x24 // cinv x10, x10, cc // cmn x10, #0x1 // eor x24, x23, x10 // adcs x24, x9, x24 // eor x7, x7, x10 // adcs x7, x5, x7 // adcs x23, x19, x10 // adcs x5, x8, x10 // adc x10, x11, x10 // subs x3, x3, x25 // cneg x3, x3, cc // csetm x25, cc // subs x12, x6, x12 // cneg x12, x12, cc // mul x6, x3, x12 // umulh x3, x3, x12 // cinv x25, x25, cc // cmn x25, #0x1 // eor x12, x6, x25 // adcs x24, x24, x12 // eor x3, x3, x25 // adcs x7, x7, x3 // adcs x3, x23, x25 // adcs x12, x5, x25 // adc x25, x10, x25 // ldp x10, x6, [sp] // @slothy:reads=stack0 // ldp x23, x5, [sp, #16] // @slothy:reads=stack16 // eor x21, x21, x16 // adds x21, x21, x10 // eor x19, x15, x16 // adcs x19, x19, x6 // eor x8, x20, x16 // adcs x8, x8, x23 // eor x24, x24, x16 // adcs x24, x24, x5 // eor x7, x7, x16 // ldp x11, x15, [sp, #32] // @slothy:reads=stack32 // ldp x20, x9, [sp, #48] // @slothy:reads=stack48 // ldr x13, [sp, #64] // @slothy:reads=stack64 // adcs x7, x7, x11 // eor x3, x3, x16 // adcs x3, x3, x15 // eor x12, x12, x16 // adcs x12, x12, x20 // eor x25, x25, x16 // adcs x25, x25, x9 // adc x26, x13, xzr // adds x7, x7, x10 // adcs x3, x3, x6 // adcs x10, x12, x23 // adcs x25, x25, x5 // and x12, x16, #0x1ff // lsl x6, x21, #9 // orr x12, x6, x12 // adcs x12, x11, x12 // extr x6, x19, x21, #55 // adcs x6, x15, x6 // extr x23, x8, x19, #55 // adcs x23, x20, x23 // extr x16, x24, x8, #55 // adcs x16, x9, x16 // lsr x24, x24, #55 // adc x24, x24, x13 // ldr x5, [x2, #64] // ldp x21, x19, [x1] // and x8, x21, #0xfffffffffffff // mul x8, x5, x8 // ldr x11, [x1, #64] // ldp x15, x20, [x2] // and x9, x15, #0xfffffffffffff // mul x9, x11, x9 // add x8, x8, x9 // extr x21, x19, x21, #52 // and x21, x21, #0xfffffffffffff // mul x21, x5, x21 // extr x15, x20, x15, #52 // and x15, x15, #0xfffffffffffff // mul x15, x11, x15 // add x21, x21, x15 // lsr x15, x8, #52 // add x21, x21, x15 // lsl x8, x8, #12 // extr x8, x21, x8, #12 // adds x7, x7, x8 // ldp x8, x15, [x1, #16] // ldp x9, x13, [x2, #16] // extr x19, x8, x19, #40 // and x19, x19, #0xfffffffffffff // mul x19, x5, x19 // extr x20, x9, x20, #40 // and x20, x20, #0xfffffffffffff // mul x20, x11, x20 // add x19, x19, x20 // lsr x20, x21, #52 // add x19, x19, x20 // lsl x21, x21, #12 // extr x21, x19, x21, #24 // adcs x3, x3, x21 // extr x21, x15, x8, #28 // and x21, x21, #0xfffffffffffff // mul x21, x5, x21 // extr x8, x13, x9, #28 // and x8, x8, #0xfffffffffffff // mul x8, x11, x8 // add x21, x21, x8 // lsr x8, x19, #52 // add x21, x21, x8 // lsl x19, x19, #12 // extr x19, x21, x19, #36 // adcs x10, x10, x19 // and x19, x3, x10 // ldp x8, x20, [x1, #32] // ldp x9, x22, [x2, #32] // extr x15, x8, x15, #16 // and x15, x15, #0xfffffffffffff // mul x4, x5, x15 // extr x15, x9, x13, #16 // and x15, x15, #0xfffffffffffff // mul x15, x11, x15 // add x15, x4, x15 // lsl x13, x26, #48 // add x15, x15, x13 // lsr x13, x21, #52 // add x15, x15, x13 // lsl x21, x21, #12 // extr x21, x15, x21, #48 // adcs x25, x25, x21 // and x21, x19, x25 // lsr x19, x8, #4 // and x19, x19, #0xfffffffffffff // mul x19, x5, x19 // lsr x26, x9, #4 // and x13, x26, #0xfffffffffffff // mul x26, x11, x13 // add x19, x19, x26 // lsr x13, x15, #52 // add x19, x19, x13 // lsl x15, x15, #12 // extr x15, x19, x15, #60 // extr x8, x20, x8, #56 // and x8, x8, #0xfffffffffffff // mul x8, x5, x8 // extr x9, x22, x9, #56 // and x9, x9, #0xfffffffffffff // mul x9, x11, x9 // add x8, x8, x9 // lsr x19, x19, #52 // add x19, x8, x19 // lsl x8, x15, #8 // extr x8, x19, x8, #8 // adcs x12, x12, x8 // and x21, x21, x12 // ldp x1, x8, [x1, #48] // ldp x2, x15, [x2, #48] // extr x20, x1, x20, #44 // and x20, x20, #0xfffffffffffff // mul x20, x5, x20 // extr x9, x2, x22, #44 // and x9, x9, #0xfffffffffffff // mul x9, x11, x9 // add x20, x20, x9 // lsr x9, x19, #52 // add x22, x20, x9 // lsl x19, x19, #12 // extr x19, x22, x19, #20 // adcs x6, x6, x19 // and x21, x21, x6 // extr x1, x8, x1, #32 // and x1, x1, #0xfffffffffffff // mul x1, x5, x1 // extr x2, x15, x2, #32 // and x2, x2, #0xfffffffffffff // mul x2, x11, x2 // add x2, x1, x2 // lsr x1, x22, #52 // add x2, x2, x1 // lsl x1, x22, #12 // extr x1, x2, x1, #32 // adcs x23, x23, x1 // and x21, x21, x23 // lsr x1, x8, #20 // mul x1, x5, x1 // lsr x19, x15, #20 // mul x19, x11, x19 // add x1, x1, x19 // lsr x19, x2, #52 // add x19, x1, x19 // lsl x2, x2, #12 // extr x2, x19, x2, #44 // adcs x16, x16, x2 // and x2, x21, x16 // mul x5, x5, x11 // lsr x1, x19, #44 // add x5, x5, x1 // adc x24, x24, x5 // lsr x5, x24, #9 // orr x24, x24, #0xfffffffffffffe00 // cmp xzr, xzr // adcs xzr, x7, x5 // adcs xzr, x2, xzr // adcs xzr, x24, xzr // adcs x7, x7, x5 // adcs x2, x3, xzr // adcs x10, x10, xzr // adcs x25, x25, xzr // adcs x12, x12, xzr // adcs x6, x6, xzr // adcs x23, x23, xzr // adcs x16, x16, xzr // adc x3, x24, xzr // stp x2, x10, [x0] // @slothy:writes=buffer0 // stp x25, x12, [x0, #16] // @slothy:writes=buffer16 // stp x6, x23, [x0, #32] // @slothy:writes=buffer32 // lsl x25, x7, #9 // and x3, x3, #0x1ff // orr x3, x3, x25 // stp x16, x3, [x0, #48] // @slothy:writes=buffer48 // lsr x14, x7, #55 // str x14, [x0, #64] // @slothy:writes=buffer64 // add sp, sp, #80 // ldp x25, x26, [sp], #16 // ldp x23, x24, [sp], #16 // ldp x21, x22, [sp], #16 // ldp x19, x20, [sp], #16 // ret // // The bash script used for step 2 is as follows: // // # Store the assembly instructions except the last 'ret', // # callee-register store/loads and add/sub sp #80 as, say, 'input.S'. // export OUTPUTS="[hint_buffer0,hint_buffer16,hint_buffer32,hint_buffer48,hint_buffer64]" // export RESERVED_REGS="[x18,x27,x28,x29,x30,sp,q8,q9,q10,q11,q12,q13,q14,q15,v8,v9,v10,v11,v12,v13,v14,v15]" // <s2n-bignum>/tools/external/slothy.sh input.S my_out_dir // # my_out_dir/3.opt.s is the optimized assembly. Its output may differ // # from this file since the sequence is non-deterministically chosen. // # Please add 'ret' at the end of the output assembly. S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_montmul_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p521) .text .balign 4 S2N_BN_SYMBOL(bignum_montmul_p521): CFI_START // Save registers and make space for the temporary buffer CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_DEC_SP(80) ldr q24, [x2] ldr q21, [x1] ldr q1, [x2, #48] ldp x23, x20, [x1, #16] movi v18.2D, #0x00000000ffffffff ldp x19, x17, [x2, #16] uzp2 v3.4S, v24.4S, v24.4S xtn v6.2S, v21.2D ldp x11, x22, [x1] rev64 v5.4S, v24.4S xtn v24.2S, v24.2D subs x16, x23, x20 umull v29.2D, v6.2S, v3.2S rev64 v31.4S, v1.4S cneg x26, x16, cc umull v27.2D, v6.2S, v24.2S ldr q19, [x1, #48] csetm x12, cc mul x15, x20, x17 mul v26.4S, v5.4S, v21.4S uzp2 v28.4S, v21.4S, v21.4S subs x6, x17, x19 xtn v7.2S, v1.2D cinv x10, x12, cc cneg x3, x6, cc uzp2 v21.4S, v1.4S, v1.4S umull v1.2D, v28.2S, v3.2S mul x12, x26, x3 usra v29.2D, v27.2D, #32 mul v25.4S, v31.4S, v19.4S usra v1.2D, v29.2D, #32 uaddlp v31.2D, v26.4S umulh x14, x26, x3 eor x12, x12, x10 and v26.16B, v29.16B, v18.16B uaddlp v2.2D, v25.4S subs x16, x11, x22 shl v0.2D, v31.2D, #32 xtn v31.2S, v19.2D cneg x6, x16, cc shl v16.2D, v2.2D, #32 umlal v26.2D, v28.2S, v24.2S umlal v0.2D, v6.2S, v24.2S uzp2 v30.4S, v19.4S, v19.4S umulh x26, x20, x17 umull v22.2D, v31.2S, v21.2S umull v29.2D, v30.2S, v21.2S usra v1.2D, v26.2D, #32 mul x13, x23, x19 eor x9, x14, x10 ldr q5, [x2, #32] umull v26.2D, v31.2S, v7.2S ldp x21, x4, [x2] csetm x8, cc mov x16, v0.d[1] ldr q6, [x1, #32] umlal v16.2D, v31.2S, v7.2S mov x3, v0.d[0] umulh x14, x23, x19 mov x25, v1.d[1] mov x5, v1.d[0] usra v22.2D, v26.2D, #32 rev64 v3.4S, v5.4S adds x16, x16, x5 uzp1 v24.4S, v5.4S, v6.4S movi v26.2D, #0x00000000ffffffff adcs x7, x13, x25 uzp1 v0.4S, v6.4S, v6.4S mul v5.4S, v3.4S, v6.4S adcs x25, x15, x14 adc x13, x26, xzr adds x26, x16, x3 and v6.16B, v22.16B, v26.16B usra v29.2D, v22.2D, #32 adcs x16, x7, x16 adcs x14, x25, x7 umlal v6.2D, v30.2S, v7.2S adcs x7, x13, x25 uaddlp v7.2D, v5.4S adc x13, xzr, x13 adds x25, x16, x3 adcs x24, x14, x26 shl v1.2D, v7.2D, #32 adcs x5, x7, x16 usra v29.2D, v6.2D, #32 adcs x16, x13, x14 umlal v1.2D, v0.2S, v24.2S adcs x14, xzr, x7 adc x13, xzr, x13 subs x7, x4, x21 cneg x7, x7, cc mul x15, x6, x7 umulh x7, x6, x7 cinv x6, x8, cc cmn x10, #0x1 adcs x16, x16, x12 eor x8, x15, x6 adcs x14, x14, x9 adc x9, x13, x10 subs x13, x22, x20 cneg x13, x13, cc csetm x10, cc subs x12, x17, x4 cinv x15, x10, cc cneg x10, x12, cc cmn x6, #0x1 umulh x12, x13, x10 eor x7, x7, x6 adcs x26, x26, x8 adcs x7, x25, x7 adcs x8, x24, x6 adcs x24, x5, x6 adcs x25, x16, x6 mul x5, x13, x10 adcs x13, x14, x6 adc x14, x9, x6 subs x10, x11, x23 csetm x16, cc cneg x9, x10, cc subs x6, x19, x21 cinv x10, x16, cc cneg x16, x6, cc eor x5, x5, x15 subs x20, x11, x20 mul x6, x9, x16 csetm x11, cc cneg x20, x20, cc subs x17, x17, x21 cneg x17, x17, cc cinv x11, x11, cc umulh x9, x9, x16 eor x16, x12, x15 subs x21, x22, x23 cneg x22, x21, cc eor x12, x6, x10 csetm x6, cc cmn x15, #0x1 eor x9, x9, x10 adcs x5, x24, x5 umulh x23, x20, x17 lsl x24, x3, #9 adcs x25, x25, x16 adcs x21, x13, x15 adc x16, x14, x15 subs x13, x19, x4 cneg x14, x13, cc cinv x15, x6, cc cmn x10, #0x1 mul x13, x20, x17 extr x17, x26, x3, #55 adcs x12, x7, x12 adcs x8, x8, x9 eor x19, x23, x11 adcs x6, x5, x10 eor x13, x13, x11 mov x5, v29.d[0] adcs x25, x25, x10 extr x26, x12, x26, #55 mul x4, x22, x14 adcs x7, x21, x10 stp x24, x17, [sp, #32] ldp x20, x21, [x1, #48] adc x24, x16, x10 cmn x11, #0x1 mov x16, v16.d[0] umulh x17, x22, x14 adcs x13, x8, x13 eor x9, x4, x15 adcs x10, x6, x19 ldp x22, x23, [x1, #32] adcs x3, x25, x11 ldp x4, x19, [x2, #32] eor x17, x17, x15 adcs x7, x7, x11 adc x14, x24, x11 subs x6, x20, x21 csetm x11, cc cneg x8, x6, cc cmn x15, #0x1 umulh x25, x22, x4 adcs x24, x13, x9 adcs x10, x10, x17 extr x13, x24, x12, #55 adcs x9, x3, x15 ldp x17, x3, [x2, #48] umulh x6, x23, x19 adcs x7, x7, x15 adc x14, x14, x15 subs x12, x22, x23 stp x10, x9, [sp] mov x9, v1.d[1] csetm x10, cc stp x7, x14, [sp, #16] cneg x12, x12, cc subs x14, x3, x17 mov x7, v16.d[1] cneg x15, x14, cc mov x14, v29.d[1] cinv x11, x11, cc adds x9, x9, x25 mul x25, x8, x15 stp x26, x13, [sp, #48] lsr x24, x24, #55 adcs x26, x16, x6 mov x13, v1.d[0] str x24, [sp, #64] adcs x7, x7, x5 adc x5, x14, xzr umulh x6, x8, x15 eor x15, x25, x11 subs x25, x19, x4 cinv x16, x10, cc cneg x10, x25, cc eor x6, x6, x11 adds x8, x9, x13 adcs x14, x26, x9 mul x9, x12, x10 adcs x24, x7, x26 adcs x7, x5, x7 umulh x25, x12, x10 adc x12, xzr, x5 adds x26, x14, x13 eor x10, x9, x16 adcs x9, x24, x8 adcs x5, x7, x14 adcs x14, x12, x24 adcs x7, xzr, x7 adc x12, xzr, x12 eor x24, x25, x16 cmn x11, #0x1 adcs x25, x14, x15 adcs x14, x7, x6 adc x11, x12, x11 subs x12, x23, x21 csetm x15, cc cneg x7, x12, cc subs x12, x3, x19 cneg x12, x12, cc cinv x15, x15, cc cmn x16, #0x1 adcs x6, x8, x10 mul x10, x7, x12 adcs x26, x26, x24 adcs x9, x9, x16 umulh x24, x7, x12 eor x8, x10, x15 adcs x5, x5, x16 adcs x25, x25, x16 adcs x7, x14, x16 adc x16, x11, x16 subs x11, x22, x20 cneg x11, x11, cc csetm x14, cc subs x10, x17, x4 cinv x14, x14, cc cneg x10, x10, cc cmn x15, #0x1 eor x12, x24, x15 adcs x5, x5, x8 mul x24, x11, x10 adcs x8, x25, x12 adcs x25, x7, x15 adc x16, x16, x15 subs x12, x22, x21 umulh x10, x11, x10 cneg x15, x12, cc csetm x11, cc subs x12, x3, x4 cneg x12, x12, cc cinv x7, x11, cc mul x11, x15, x12 eor x24, x24, x14 cmn x14, #0x1 eor x10, x10, x14 adcs x24, x26, x24 eor x26, x11, x7 adcs x10, x9, x10 ldp x11, x9, [x1, #16] umulh x15, x15, x12 adcs x5, x5, x14 adcs x8, x8, x14 adcs x25, x25, x14 adc x12, x16, x14 cmn x7, #0x1 adcs x16, x10, x26 eor x14, x15, x7 adcs x26, x5, x14 ldp x5, x10, [x1] adcs x14, x8, x7 adcs x15, x25, x7 adc x7, x12, x7 subs x25, x23, x20 cneg x25, x25, cc csetm x8, cc subs x22, x22, x5 sbcs x10, x23, x10 ldp x23, x12, [x2] sbcs x20, x20, x11 sbcs x21, x21, x9 csetm x9, cc subs x11, x17, x19 cneg x5, x11, cc cinv x11, x8, cc subs x23, x23, x4 sbcs x19, x12, x19 eor x20, x20, x9 ldp x12, x4, [x2, #16] eor x21, x21, x9 umulh x8, x25, x5 eor x22, x22, x9 eor x10, x10, x9 sbcs x17, x12, x17 sbcs x3, x4, x3 mul x25, x25, x5 csetm x12, cc subs x22, x22, x9 eor x4, x23, x12 sbcs x23, x10, x9 eor x10, x3, x12 sbcs x20, x20, x9 eor x5, x8, x11 eor x3, x19, x12 sbc x21, x21, x9 subs x4, x4, x12 eor x25, x25, x11 sbcs x19, x3, x12 eor x3, x17, x12 sbcs x17, x3, x12 umulh x8, x23, x19 sbc x3, x10, x12 cmn x11, #0x1 adcs x25, x16, x25 adcs x26, x26, x5 ldp x10, x5, [sp] adcs x16, x14, x11 mul x14, x22, x4 adcs x15, x15, x11 adc x7, x7, x11 adds x11, x13, x10 umulh x10, x21, x3 adcs x13, x6, x5 ldp x6, x5, [sp, #16] stp x11, x13, [sp] eor x13, x12, x9 mul x9, x23, x19 adcs x6, x24, x6 ldp x11, x24, [sp, #32] mul x12, x20, x17 adcs x25, x25, x5 stp x6, x25, [sp, #16] ldp x6, x25, [sp, #48] umulh x5, x20, x17 adcs x11, x26, x11 ldr x26, [sp, #64] adcs x16, x16, x24 stp x11, x16, [sp, #32] adcs x11, x15, x6 umulh x24, x22, x4 adcs x25, x7, x25 adc x7, x26, xzr stp x11, x25, [sp, #48] subs x26, x20, x21 csetm x15, cc cneg x25, x26, cc str x7, [sp, #64] mul x11, x21, x3 subs x6, x22, x23 cneg x6, x6, cc csetm x16, cc subs x26, x3, x17 cneg x26, x26, cc cinv x7, x15, cc adds x24, x9, x24 adcs x8, x12, x8 umulh x12, x25, x26 adcs x5, x11, x5 adc x11, x10, xzr subs x15, x19, x4 cinv x9, x16, cc mul x26, x25, x26 eor x25, x12, x7 cneg x12, x15, cc adds x16, x24, x14 eor x15, x26, x7 umulh x26, x6, x12 adcs x10, x8, x24 adcs x8, x5, x8 adcs x24, x11, x5 adc x5, xzr, x11 adds x11, x10, x14 mul x12, x6, x12 adcs x6, x8, x16 eor x14, x14, x13 adcs x10, x24, x10 adcs x8, x5, x8 adcs x24, xzr, x24 adc x5, xzr, x5 cmn x7, #0x1 adcs x15, x8, x15 adcs x24, x24, x25 eor x25, x26, x9 adc x8, x5, x7 eor x5, x12, x9 subs x26, x23, x21 cneg x12, x26, cc csetm x26, cc subs x7, x3, x19 cneg x7, x7, cc cinv x26, x26, cc cmn x9, #0x1 adcs x5, x16, x5 mul x16, x12, x7 adcs x25, x11, x25 umulh x7, x12, x7 adcs x12, x6, x9 eor x11, x16, x26 adcs x6, x10, x9 adcs x10, x15, x9 adcs x24, x24, x9 adc x8, x8, x9 subs x15, x22, x20 cneg x15, x15, cc csetm x9, cc subs x16, x17, x4 cneg x16, x16, cc cinv x9, x9, cc subs x21, x22, x21 mul x22, x15, x16 eor x7, x7, x26 cneg x21, x21, cc umulh x16, x15, x16 csetm x15, cc subs x4, x3, x4 cneg x3, x4, cc eor x4, x22, x9 cinv x15, x15, cc cmn x26, #0x1 eor x22, x5, x13 adcs x5, x6, x11 adcs x6, x10, x7 adcs x10, x24, x26 eor x11, x16, x9 adc x8, x8, x26 subs x16, x23, x20 cneg x7, x16, cc csetm x23, cc cmn x9, #0x1 adcs x16, x25, x4 mul x4, x21, x3 adcs x24, x12, x11 eor x11, x16, x13 adcs x26, x5, x9 adcs x16, x6, x9 umulh x20, x21, x3 adcs x6, x10, x9 ldp x3, x10, [x1] adc x12, x8, x9 subs x21, x17, x19 cneg x8, x21, cc eor x25, x20, x15 eor x20, x4, x15 mul x19, x7, x8 cinv x17, x23, cc cmn x15, #0x1 adcs x4, x24, x20 extr x21, x10, x3, #52 umulh x9, x7, x8 and x24, x21, #0xfffffffffffff adcs x26, x26, x25 eor x7, x19, x17 adcs x5, x16, x15 and x23, x3, #0xfffffffffffff eor x9, x9, x17 adcs x21, x6, x15 adc x6, x12, x15 cmn x17, #0x1 adcs x25, x4, x7 and x4, x13, #0x1ff ldp x16, x8, [sp] adcs x20, x26, x9 adcs x12, x5, x17 ldp x3, x5, [sp, #16] eor x15, x12, x13 adcs x12, x21, x17 adc x9, x6, x17 adds x21, x14, x16 lsl x7, x21, #9 eor x26, x12, x13 ldp x19, x17, [sp, #32] orr x4, x7, x4 eor x14, x25, x13 adcs x7, x22, x8 adcs x12, x11, x3 eor x11, x20, x13 ldp x6, x25, [sp, #48] eor x20, x9, x13 adcs x22, x14, x5 ldr x14, [x2, #64] adcs x9, x11, x19 ldr x11, [sp, #64] adcs x13, x15, x17 adcs x26, x26, x6 adcs x20, x20, x25 adc x15, x11, xzr adds x16, x9, x16 mul x9, x14, x23 adcs x23, x13, x8 extr x13, x7, x21, #55 adcs x21, x26, x3 ldp x3, x26, [x1, #16] extr x8, x22, x12, #55 adcs x20, x20, x5 adcs x19, x19, x4 mul x4, x14, x24 ldp x5, x24, [x2] adcs x17, x17, x13 extr x13, x26, x3, #28 extr x10, x3, x10, #40 extr x7, x12, x7, #55 and x12, x13, #0xfffffffffffff adcs x3, x6, x7 ldr x6, [x1, #64] extr x7, x24, x5, #52 and x5, x5, #0xfffffffffffff mul x12, x14, x12 adcs x13, x25, x8 and x7, x7, #0xfffffffffffff ldp x8, x25, [x2, #16] mul x5, x6, x5 extr x24, x8, x24, #40 and x24, x24, #0xfffffffffffff add x9, x9, x5 lsr x5, x22, #55 mul x7, x6, x7 extr x22, x25, x8, #28 and x10, x10, #0xfffffffffffff mul x10, x14, x10 lsr x8, x9, #52 lsl x9, x9, #12 add x7, x4, x7 adc x4, x5, x11 ldp x11, x5, [x2, #32] add x8, x7, x8 and x7, x22, #0xfffffffffffff extr x22, x8, x9, #12 lsl x9, x15, #48 mul x15, x6, x24 add x10, x10, x15 lsr x15, x8, #52 extr x25, x11, x25, #16 and x25, x25, #0xfffffffffffff mul x24, x6, x7 add x7, x10, x15 lsr x10, x7, #52 lsl x8, x8, #12 extr x8, x7, x8, #24 adds x22, x16, x22 ldp x16, x15, [x1, #32] adcs x23, x23, x8 extr x8, x5, x11, #56 mul x25, x6, x25 add x24, x12, x24 add x12, x24, x10 lsr x10, x16, #4 lsl x7, x7, #12 extr x24, x12, x7, #36 and x10, x10, #0xfffffffffffff extr x26, x16, x26, #16 mul x10, x14, x10 and x8, x8, #0xfffffffffffff adcs x21, x21, x24 and x7, x26, #0xfffffffffffff mul x7, x14, x7 lsr x24, x11, #4 and x24, x24, #0xfffffffffffff extr x11, x15, x16, #56 lsl x26, x12, #12 and x16, x11, #0xfffffffffffff mul x11, x6, x24 lsr x12, x12, #52 ldp x2, x24, [x2, #48] add x25, x7, x25 add x25, x25, x9 and x9, x23, x21 mul x8, x6, x8 add x12, x25, x12 add x25, x10, x11 extr x11, x12, x26, #48 ldp x7, x26, [x1, #48] extr x5, x2, x5, #44 lsr x1, x12, #52 mul x10, x14, x16 lsr x16, x24, #20 add x10, x10, x8 extr x8, x26, x7, #32 and x8, x8, #0xfffffffffffff extr x24, x24, x2, #32 mul x2, x6, x16 add x1, x25, x1 lsr x25, x26, #20 and x26, x24, #0xfffffffffffff and x24, x5, #0xfffffffffffff extr x16, x7, x15, #44 mul x7, x6, x24 adcs x11, x20, x11 and x20, x16, #0xfffffffffffff lsl x5, x12, #12 and x15, x9, x11 mul x24, x14, x20 lsr x9, x1, #52 add x20, x10, x9 extr x12, x1, x5, #60 lsl x9, x20, #12 lsl x5, x12, #8 mul x10, x14, x8 extr x12, x20, x5, #8 lsr x1, x20, #52 add x7, x24, x7 adcs x8, x19, x12 and x5, x15, x8 add x7, x7, x1 mul x20, x6, x26 extr x24, x7, x9, #20 lsr x19, x7, #52 mul x25, x14, x25 lsl x16, x7, #12 add x20, x10, x20 adcs x12, x17, x24 add x19, x20, x19 lsr x26, x19, #52 mul x24, x14, x6 and x5, x5, x12 add x6, x25, x2 lsl x17, x19, #12 add x14, x6, x26 extr x16, x19, x16, #32 lsr x6, x14, #44 extr x19, x14, x17, #44 add x9, x24, x6 adcs x17, x3, x16 adcs x2, x13, x19 and x7, x5, x17 adc x15, x4, x9 cmp xzr, xzr orr x1, x15, #0xfffffffffffffe00 lsr x3, x15, #9 adcs xzr, x22, x3 and x15, x7, x2 adcs xzr, x15, xzr adcs xzr, x1, xzr adcs x7, x22, x3 lsl x3, x7, #9 lsr x15, x7, #55 str x15, [x0, #64] adcs x13, x23, xzr adcs x16, x21, xzr stp x13, x16, [x0] adcs x13, x11, xzr adcs x16, x8, xzr stp x13, x16, [x0, #16] adcs x19, x12, xzr adcs x16, x17, xzr adcs x13, x2, xzr stp x19, x16, [x0, #32] adc x16, x1, xzr and x16, x16, #0x1ff orr x16, x16, x3 stp x13, x16, [x0, #48] // Restore regs and return CFI_INC_SP(80) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_montmul_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
80,148
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/p521_jscalarmul.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Jacobian form scalar multiplication for P-521 // Input scalar[9], point[27]; output res[27] // // extern void p521_jscalarmul // (uint64_t res[static 27], // const uint64_t scalar[static 9], // const uint64_t point[static 27]); // // This function is a variant of its affine point version p521_scalarmul. // Here, input and output points are assumed to be in Jacobian form with // a triple (x,y,z) representing the affine point (x/z^2,y/z^3) when // z is nonzero or the point at infinity (group identity) if z = 0. // // Given scalar = n and point = P, assumed to be on the NIST elliptic // curve P-521, returns a representation of n * P. If the result is the // point at infinity (either because the input point was or because the // scalar was a multiple of p_521) then the output is guaranteed to // represent the point at infinity, i.e. to have its z coordinate zero. // // Standard ARM ABI: X0 = res, X1 = scalar, X2 = point // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p521_jscalarmul) S2N_BN_FUNCTION_TYPE_DIRECTIVE(p521_jscalarmul) S2N_BN_SYM_PRIVACY_DIRECTIVE(p521_jscalarmul) .text .balign 4 // Size of individual field elements #define NUMSIZE 72 #define JACSIZE (3*NUMSIZE) // Safe copies of input res and additional values in variables. #define tabup x15 #define bf x16 #define sgn x17 #define j x19 #define res x20 // Intermediate variables on the stack. // The table is 16 entries, each of size JACSIZE = 3 * NUMSIZE #define scalarb sp, #(0*NUMSIZE) #define acc sp, #(1*NUMSIZE) #define tabent sp, #(4*NUMSIZE) #define tab sp, #(7*NUMSIZE) // Round up to maintain stack alignment #define NSPACE 3968 #define selectblock(I) \ cmp bf, #(1*I) __LF \ ldp x10, x11, [tabup] __LF \ csel x0, x10, x0, eq __LF \ csel x1, x11, x1, eq __LF \ ldp x10, x11, [tabup, #16] __LF \ csel x2, x10, x2, eq __LF \ csel x3, x11, x3, eq __LF \ ldp x10, x11, [tabup, #32] __LF \ csel x4, x10, x4, eq __LF \ csel x5, x11, x5, eq __LF \ ldp x10, x11, [tabup, #48] __LF \ csel x6, x10, x6, eq __LF \ csel x7, x11, x7, eq __LF \ ldr x10, [tabup, #64] __LF \ csel x8, x10, x8, eq __LF \ add tabup, tabup, #JACSIZE // Loading large constants #define movbig(nn,n3,n2,n1,n0) \ movz nn, n0 __LF \ movk nn, n1, lsl #16 __LF \ movk nn, n2, lsl #32 __LF \ movk nn, n3, lsl #48 S2N_BN_SYMBOL(p521_jscalarmul): CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x30) CFI_DEC_SP(NSPACE) // Preserve the "res" input argument; others get processed early. mov res, x0 // Reduce the input scalar mod n_521 and store it to "scalarb". mov x19, x2 add x0, scalarb CFI_BL(Lp521_jscalarmul_bignum_mod_n521_9) mov x2, x19 // Set the tab[0] table entry to the input point = 1 * P, but also // reduce all coordinates modulo p. In principle we assume reduction // as a precondition, but this reduces the scope for surprise, e.g. // making sure that any input with z = 0 is treated as zero, even // if the other coordinates are not in fact reduced. add x0, tab mov x1, x19 CFI_BL(Lp521_jscalarmul_bignum_mod_p521_9) add x0, tab+NUMSIZE add x1, x19, #NUMSIZE CFI_BL(Lp521_jscalarmul_bignum_mod_p521_9) add x0, tab+2*NUMSIZE add x1, x19, #(2*NUMSIZE) CFI_BL(Lp521_jscalarmul_bignum_mod_p521_9) // If bit 520 of the scalar is set, then negate the scalar mod n_521, // i.e. do scalar |-> n_521 - scalar, and also the point to compensate // by negating its y coordinate. This further step is not needed by // the indexing scheme (the top window is only a couple of bits either // way), but is convenient to exclude a problem with the specific value // scalar = n_521 - 18, where the last Jacobian addition is of the form // (n_521 - 9) * P + -(9 * P) and hence is a degenerate doubling case. ldp x0, x1, [scalarb] movbig(x10, #0xbb6f, #0xb71e, #0x9138, #0x6409) subs x10, x10, x0 movbig(x11, #0x3bb5, #0xc9b8, #0x899c, #0x47ae) sbcs x11, x11, x1 ldp x2, x3, [scalarb+16] movbig(x12, #0x7fcc, #0x0148, #0xf709, #0xa5d0) sbcs x12, x12, x2 movbig(x13, #0x5186, #0x8783, #0xbf2f, #0x966b) sbcs x13, x13, x3 ldp x4, x5, [scalarb+32] mov x14, 0xfffffffffffffffa sbcs x14, x14, x4 mov x15, 0xffffffffffffffff sbcs x15, x15, x5 ldp x6, x7, [scalarb+48] mov x16, 0xffffffffffffffff sbcs x16, x16, x6 mov x17, 0xffffffffffffffff sbcs x17, x17, x7 ldr x8, [scalarb+64] mov x19, 0x00000000000001ff sbc x19, x19, x8 tst x8, 0x100 csetm x9, ne csel x0, x10, x0, ne csel x1, x11, x1, ne csel x2, x12, x2, ne csel x3, x13, x3, ne csel x4, x14, x4, ne csel x5, x15, x5, ne csel x6, x16, x6, ne csel x7, x17, x7, ne csel x8, x19, x8, ne stp x0, x1, [scalarb] stp x2, x3, [scalarb+16] stp x4, x5, [scalarb+32] stp x6, x7, [scalarb+48] str x8, [scalarb+64] add tabup, tab ldp x0, x1, [tabup, #NUMSIZE] ldp x2, x3, [tabup, #NUMSIZE+16] ldp x4, x5, [tabup, #NUMSIZE+32] ldp x6, x7, [tabup, #NUMSIZE+48] ldr x8, [tabup, #NUMSIZE+64] orr x10, x0, x1 orr x11, x2, x3 orr x12, x4, x5 orr x13, x6, x7 orr x10, x10, x11 orr x12, x12, x13 orr x12, x12, x8 orr x10, x10, x12 cmp x10, xzr csel x9, x9, xzr, ne eor x0, x0, x9 eor x1, x1, x9 eor x2, x2, x9 eor x3, x3, x9 eor x4, x4, x9 eor x5, x5, x9 eor x6, x6, x9 eor x7, x7, x9 and x9, x9, #0x1FF eor x8, x8, x9 stp x0, x1, [tabup, #NUMSIZE] stp x2, x3, [tabup, #NUMSIZE+16] stp x4, x5, [tabup, #NUMSIZE+32] stp x6, x7, [tabup, #NUMSIZE+48] str x8, [tabup, #NUMSIZE+64] // Compute and record tab[1] = 2 * p, ..., tab[15] = 16 * P add x0, tab+JACSIZE*1 add x1, tab CFI_BL(Lp521_jscalarmul_jdouble) add x0, tab+JACSIZE*2 add x1, tab+JACSIZE*1 add x2, tab CFI_BL(Lp521_jscalarmul_jadd) add x0, tab+JACSIZE*3 add x1, tab+JACSIZE*1 CFI_BL(Lp521_jscalarmul_jdouble) add x0, tab+JACSIZE*4 add x1, tab+JACSIZE*3 add x2, tab CFI_BL(Lp521_jscalarmul_jadd) add x0, tab+JACSIZE*5 add x1, tab+JACSIZE*2 CFI_BL(Lp521_jscalarmul_jdouble) add x0, tab+JACSIZE*6 add x1, tab+JACSIZE*5 add x2, tab CFI_BL(Lp521_jscalarmul_jadd) add x0, tab+JACSIZE*7 add x1, tab+JACSIZE*3 CFI_BL(Lp521_jscalarmul_jdouble) add x0, tab+JACSIZE*8 add x1, tab+JACSIZE*7 add x2, tab CFI_BL(Lp521_jscalarmul_jadd) add x0, tab+JACSIZE*9 add x1, tab+JACSIZE*4 CFI_BL(Lp521_jscalarmul_jdouble) add x0, tab+JACSIZE*10 add x1, tab+JACSIZE*9 add x2, tab CFI_BL(Lp521_jscalarmul_jadd) add x0, tab+JACSIZE*11 add x1, tab+JACSIZE*5 CFI_BL(Lp521_jscalarmul_jdouble) add x0, tab+JACSIZE*12 add x1, tab+JACSIZE*11 add x2, tab CFI_BL(Lp521_jscalarmul_jadd) add x0, tab+JACSIZE*13 add x1, tab+JACSIZE*6 CFI_BL(Lp521_jscalarmul_jdouble) add x0, tab+JACSIZE*14 add x1, tab+JACSIZE*13 add x2, tab CFI_BL(Lp521_jscalarmul_jadd) add x0, tab+JACSIZE*15 add x1, tab+JACSIZE*7 CFI_BL(Lp521_jscalarmul_jdouble) // Add the recoding constant sum_i(16 * 32^i) to the scalar to allow signed // digits. The digits of the constant, in lowest-to-highest order, are as // follows; they are generated dynamically since none is a simple ARM load. // // 0x0842108421084210 // 0x1084210842108421 // 0x2108421084210842 // 0x4210842108421084 // 0x8421084210842108 // 0x0842108421084210 // 0x1084210842108421 // 0x2108421084210842 // 0x0000000000000084 ldp x0, x1, [scalarb] ldp x2, x3, [scalarb+16] ldp x4, x5, [scalarb+32] ldp x6, x7, [scalarb+48] ldr x8, [scalarb+64] movbig(x10, #0x1084, #0x2108, #0x4210, #0x8421) adds x0, x0, x10, lsr #1 adcs x1, x1, x10 lsl x10, x10, #1 adcs x2, x2, x10 lsl x10, x10, #1 adcs x3, x3, x10 lsl x10, x10, #1 adcs x4, x4, x10 lsr x11, x10, #4 adcs x5, x5, x11 lsr x10, x10, #3 adcs x6, x6, x10 lsl x10, x10, #1 adcs x7, x7, x10 lsl x10, x10, #1 and x10, x10, #0xFF adc x8, x8, x10 // Because of the initial reduction the top bitfield (>= bits 520) is <= 1, // i.e. just a single bit. Record that in "bf", then shift the whole // scalar left 56 bits to align the top of the next bitfield with the MSB // (bits 571..575). lsr bf, x8, #8 extr x8, x8, x7, #8 extr x7, x7, x6, #8 extr x6, x6, x5, #8 extr x5, x5, x4, #8 extr x4, x4, x3, #8 extr x3, x3, x2, #8 extr x2, x2, x1, #8 extr x1, x1, x0, #8 lsl x0, x0, #56 stp x0, x1, [scalarb] stp x2, x3, [scalarb+16] stp x4, x5, [scalarb+32] stp x6, x7, [scalarb+48] str x8, [scalarb+64] // According to the top bit, initialize the accumulator to P or 0. This top // digit, uniquely, is not recoded so there is no sign adjustment to make. // We only really need to adjust the z coordinate to zero, but do all three. add tabup, tab cmp bf, xzr ldp x0, x1, [tabup] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc] ldp x0, x1, [tabup, #16] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+16] ldp x0, x1, [tabup, #32] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+32] ldp x0, x1, [tabup, #48] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+48] ldp x0, x1, [tabup, #64] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+64] ldp x0, x1, [tabup, #80] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+80] ldp x0, x1, [tabup, #96] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+96] ldp x0, x1, [tabup, #112] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+112] ldp x0, x1, [tabup, #128] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+128] ldp x0, x1, [tabup, #144] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+144] ldp x0, x1, [tabup, #160] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+160] ldp x0, x1, [tabup, #176] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+176] ldp x0, x1, [tabup, #192] csel x0, x0, xzr, ne csel x1, x1, xzr, ne stp x0, x1, [acc+192] ldr x0, [tabup, #208] csel x0, x0, xzr, ne str x0, [acc+208] // Main loop over size-5 bitfields: double 5 times then add signed digit // At each stage we shift the scalar left by 5 bits so we can simply pick // the top 5 bits as the bitfield, saving some fiddle over indexing. mov j, #520 Lp521_jscalarmul_mainloop: sub j, j, #5 add x0, acc add x1, acc CFI_BL(Lp521_jscalarmul_jdouble) add x0, acc add x1, acc CFI_BL(Lp521_jscalarmul_jdouble) add x0, acc add x1, acc CFI_BL(Lp521_jscalarmul_jdouble) add x0, acc add x1, acc CFI_BL(Lp521_jscalarmul_jdouble) add x0, acc add x1, acc CFI_BL(Lp521_jscalarmul_jdouble) // Choose the bitfield and adjust it to sign and magnitude ldp x0, x1, [scalarb] ldp x2, x3, [scalarb+16] ldp x4, x5, [scalarb+32] ldp x6, x7, [scalarb+48] ldr x8, [scalarb+64] lsr bf, x8, #59 extr x8, x8, x7, #59 extr x7, x7, x6, #59 extr x6, x6, x5, #59 extr x5, x5, x4, #59 extr x4, x4, x3, #59 extr x3, x3, x2, #59 extr x2, x2, x1, #59 extr x1, x1, x0, #59 lsl x0, x0, #5 stp x0, x1, [scalarb] stp x2, x3, [scalarb+16] stp x4, x5, [scalarb+32] stp x6, x7, [scalarb+48] str x8, [scalarb+64] subs bf, bf, #16 csetm sgn, lo // sgn = sign of digit (1 = negative) cneg bf, bf, lo // bf = absolute value of digit // Conditionally select the table entry tab[i-1] = i * P in constant time mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr mov x4, xzr mov x5, xzr mov x6, xzr mov x7, xzr mov x8, xzr add tabup, tab selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) selectblock(9) selectblock(10) selectblock(11) selectblock(12) selectblock(13) selectblock(14) selectblock(15) selectblock(16) stp x0, x1, [tabent] stp x2, x3, [tabent+16] stp x4, x5, [tabent+32] stp x6, x7, [tabent+48] str x8, [tabent+64] mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr mov x4, xzr mov x5, xzr mov x6, xzr mov x7, xzr mov x8, xzr add tabup, tab+2*NUMSIZE selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) selectblock(9) selectblock(10) selectblock(11) selectblock(12) selectblock(13) selectblock(14) selectblock(15) selectblock(16) stp x0, x1, [tabent+2*NUMSIZE] stp x2, x3, [tabent+2*NUMSIZE+16] stp x4, x5, [tabent+2*NUMSIZE+32] stp x6, x7, [tabent+2*NUMSIZE+48] str x8, [tabent+2*NUMSIZE+64] mov x0, xzr mov x1, xzr mov x2, xzr mov x3, xzr mov x4, xzr mov x5, xzr mov x6, xzr mov x7, xzr mov x8, xzr add tabup, tab+NUMSIZE selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) selectblock(9) selectblock(10) selectblock(11) selectblock(12) selectblock(13) selectblock(14) selectblock(15) selectblock(16) // Store it to "tabent" with the y coordinate optionally negated. // This is done carefully to give coordinates < p_521 even in // the degenerate case y = 0 (when z = 0 for points on the curve). orr x10, x0, x1 orr x11, x2, x3 orr x12, x4, x5 orr x13, x6, x7 orr x10, x10, x11 orr x12, x12, x13 orr x12, x12, x8 orr x10, x10, x12 cmp x10, xzr csel sgn, sgn, xzr, ne eor x0, x0, sgn eor x1, x1, sgn eor x2, x2, sgn eor x3, x3, sgn eor x4, x4, sgn eor x5, x5, sgn eor x6, x6, sgn eor x7, x7, sgn and sgn, sgn, #0x1FF eor x8, x8, sgn stp x0, x1, [tabent+NUMSIZE] stp x2, x3, [tabent+NUMSIZE+16] stp x4, x5, [tabent+NUMSIZE+32] stp x6, x7, [tabent+NUMSIZE+48] str x8, [tabent+NUMSIZE+64] // Add to the accumulator add x0, acc add x1, acc add x2, tabent CFI_BL(Lp521_jscalarmul_jadd) cbnz j, Lp521_jscalarmul_mainloop // That's the end of the main loop, and we just need to copy the // result in "acc" to the output. ldp x0, x1, [acc] stp x0, x1, [res] ldp x0, x1, [acc+16] stp x0, x1, [res, #16] ldp x0, x1, [acc+32] stp x0, x1, [res, #32] ldp x0, x1, [acc+48] stp x0, x1, [res, #48] ldp x0, x1, [acc+64] stp x0, x1, [res, #64] ldp x0, x1, [acc+80] stp x0, x1, [res, #80] ldp x0, x1, [acc+96] stp x0, x1, [res, #96] ldp x0, x1, [acc+112] stp x0, x1, [res, #112] ldp x0, x1, [acc+128] stp x0, x1, [res, #128] ldp x0, x1, [acc+144] stp x0, x1, [res, #144] ldp x0, x1, [acc+160] stp x0, x1, [res, #160] ldp x0, x1, [acc+176] stp x0, x1, [res, #176] ldp x0, x1, [acc+192] stp x0, x1, [res, #192] ldr x0, [acc+208] str x0, [res, #208] // Restore stack and registers and return CFI_INC_SP(NSPACE) CFI_POP2(x21,x30) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(p521_jscalarmul) // Local copies of subroutines, complete clones at the moment except // that we share multiplication and squaring between the point operations. S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jscalarmul_bignum_mod_p521_9) Lp521_jscalarmul_bignum_mod_p521_9: CFI_START ldr x12, [x1, #64] lsr x2, x12, #9 cmp xzr, xzr ldp x4, x5, [x1] adcs xzr, x4, x2 adcs xzr, x5, xzr ldp x6, x7, [x1, #16] and x3, x6, x7 adcs xzr, x3, xzr ldp x8, x9, [x1, #32] and x3, x8, x9 adcs xzr, x3, xzr ldp x10, x11, [x1, #48] and x3, x10, x11 adcs xzr, x3, xzr orr x3, x12, #0xfffffffffffffe00 adcs x3, x3, xzr adcs x4, x4, x2 adcs x5, x5, xzr adcs x6, x6, xzr adcs x7, x7, xzr adcs x8, x8, xzr adcs x9, x9, xzr adcs x10, x10, xzr adcs x11, x11, xzr adc x12, x12, xzr and x12, x12, #0x1ff stp x4, x5, [x0] stp x6, x7, [x0, #16] stp x8, x9, [x0, #32] stp x10, x11, [x0, #48] str x12, [x0, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jscalarmul_bignum_mod_p521_9) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jscalarmul_bignum_mod_n521_9) Lp521_jscalarmul_bignum_mod_n521_9: CFI_START ldr x14, [x1, #64] lsr x15, x14, #9 add x15, x15, #1 mov x2, #39927 movk x2, #28359, lsl #16 movk x2, #18657, lsl #32 movk x2, #17552, lsl #48 mul x6, x2, x15 mov x3, #47185 movk x3, #30307, lsl #16 movk x3, #13895, lsl #32 movk x3, #50250, lsl #48 mul x7, x3, x15 mov x4, #23087 movk x4, #2294, lsl #16 movk x4, #65207, lsl #32 movk x4, #32819, lsl #48 mul x8, x4, x15 mov x5, #27028 movk x5, #16592, lsl #16 movk x5, #30844, lsl #32 movk x5, #44665, lsl #48 mul x9, x5, x15 lsl x10, x15, #2 add x10, x10, x15 umulh x13, x2, x15 adds x7, x7, x13 umulh x13, x3, x15 adcs x8, x8, x13 umulh x13, x4, x15 adcs x9, x9, x13 umulh x13, x5, x15 adc x10, x10, x13 ldp x12, x13, [x1] adds x6, x6, x12 adcs x7, x7, x13 ldp x12, x13, [x1, #16] adcs x8, x8, x12 adcs x9, x9, x13 ldp x13, x11, [x1, #32] adcs x10, x10, x13 adcs x11, x11, xzr ldp x12, x13, [x1, #48] adcs x12, x12, xzr adcs x13, x13, xzr orr x14, x14, #0xfffffffffffffe00 adcs x14, x14, xzr csetm x15, lo and x2, x2, x15 subs x6, x6, x2 and x3, x3, x15 sbcs x7, x7, x3 and x4, x4, x15 sbcs x8, x8, x4 and x5, x5, x15 sbcs x9, x9, x5 mov x2, #5 and x2, x2, x15 sbcs x10, x10, x2 sbcs x11, x11, xzr sbcs x12, x12, xzr sbcs x13, x13, xzr sbc x14, x14, xzr and x14, x14, #0x1ff stp x6, x7, [x0] stp x8, x9, [x0, #16] stp x10, x11, [x0, #32] stp x12, x13, [x0, #48] str x14, [x0, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jscalarmul_bignum_mod_n521_9) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jscalarmul_jadd) Lp521_jscalarmul_jadd: CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_PUSH2(x27,x28) CFI_PUSH2(x29,x30) CFI_DEC_SP(576) mov x26, x0 mov x27, x1 mov x28, x2 mov x0, sp add x1, x27, #0x90 CFI_BL(Lp521_jscalarmul_sqr_p521) add x0, sp, #0x168 add x1, x28, #0x90 CFI_BL(Lp521_jscalarmul_sqr_p521) add x0, sp, #0x1f8 add x1, x28, #0x90 add x2, x27, #0x48 CFI_BL(Lp521_jscalarmul_mul_p521) add x0, sp, #0x48 add x1, x27, #0x90 add x2, x28, #0x48 CFI_BL(Lp521_jscalarmul_mul_p521) add x0, sp, #0x90 mov x1, sp add x2, x28, #0x0 CFI_BL(Lp521_jscalarmul_mul_p521) add x0, sp, #0x120 add x1, sp, #0x168 add x2, x27, #0x0 CFI_BL(Lp521_jscalarmul_mul_p521) add x0, sp, #0x48 mov x1, sp add x2, sp, #0x48 CFI_BL(Lp521_jscalarmul_mul_p521) add x0, sp, #0x1f8 add x1, sp, #0x168 add x2, sp, #0x1f8 CFI_BL(Lp521_jscalarmul_mul_p521) add x0, sp, #0x168 add x1, sp, #0x90 add x2, sp, #0x120 CFI_BL(Lp521_jscalarmul_sub_p521) add x0, sp, #0x48 add x1, sp, #0x48 add x2, sp, #0x1f8 CFI_BL(Lp521_jscalarmul_sub_p521) add x0, sp, #0xd8 add x1, sp, #0x168 CFI_BL(Lp521_jscalarmul_sqr_p521) mov x0, sp add x1, sp, #0x48 CFI_BL(Lp521_jscalarmul_sqr_p521) add x0, sp, #0x120 add x1, sp, #0xd8 add x2, sp, #0x120 CFI_BL(Lp521_jscalarmul_mul_p521) add x0, sp, #0x90 add x1, sp, #0xd8 add x2, sp, #0x90 CFI_BL(Lp521_jscalarmul_mul_p521) mov x0, sp mov x1, sp add x2, sp, #0x120 CFI_BL(Lp521_jscalarmul_sub_p521) add x0, sp, #0xd8 add x1, sp, #0x90 add x2, sp, #0x120 CFI_BL(Lp521_jscalarmul_sub_p521) add x0, sp, #0x168 add x1, sp, #0x168 add x2, x27, #0x90 CFI_BL(Lp521_jscalarmul_mul_p521) mov x0, sp mov x1, sp add x2, sp, #0x90 CFI_BL(Lp521_jscalarmul_sub_p521) add x0, sp, #0x120 add x1, sp, #0x120 mov x2, sp CFI_BL(Lp521_jscalarmul_sub_p521) add x0, sp, #0xd8 add x1, sp, #0xd8 add x2, sp, #0x1f8 CFI_BL(Lp521_jscalarmul_mul_p521) add x0, sp, #0x168 add x1, sp, #0x168 add x2, x28, #0x90 CFI_BL(Lp521_jscalarmul_mul_p521) add x0, sp, #0x120 add x1, sp, #0x48 add x2, sp, #0x120 CFI_BL(Lp521_jscalarmul_mul_p521) add x0, sp, #0x120 add x1, sp, #0x120 add x2, sp, #0xd8 CFI_BL(Lp521_jscalarmul_sub_p521) ldp x0, x1, [x27, #144] ldp x2, x3, [x27, #160] ldp x4, x5, [x27, #176] ldp x6, x7, [x27, #192] ldr x8, [x27, #208] orr x20, x0, x1 orr x21, x2, x3 orr x22, x4, x5 orr x23, x6, x7 orr x20, x20, x21 orr x22, x22, x23 orr x20, x20, x8 orr x20, x20, x22 cmp x20, xzr cset x20, ne ldp x10, x11, [x28, #144] ldp x12, x13, [x28, #160] ldp x14, x15, [x28, #176] ldp x16, x17, [x28, #192] ldr x19, [x28, #208] orr x21, x10, x11 orr x22, x12, x13 orr x23, x14, x15 orr x24, x16, x17 orr x21, x21, x22 orr x23, x23, x24 orr x21, x21, x19 orr x21, x21, x23 csel x0, x0, x10, ne csel x1, x1, x11, ne csel x2, x2, x12, ne csel x3, x3, x13, ne csel x4, x4, x14, ne csel x5, x5, x15, ne csel x6, x6, x16, ne csel x7, x7, x17, ne csel x8, x8, x19, ne cmp x21, xzr cset x21, ne cmp x21, x20 ldp x10, x11, [sp, #360] ldp x12, x13, [sp, #376] ldp x14, x15, [sp, #392] ldp x16, x17, [sp, #408] ldr x19, [sp, #424] csel x0, x0, x10, ne csel x1, x1, x11, ne csel x2, x2, x12, ne csel x3, x3, x13, ne csel x4, x4, x14, ne csel x5, x5, x15, ne csel x6, x6, x16, ne csel x7, x7, x17, ne csel x8, x8, x19, ne stp x0, x1, [sp, #360] stp x2, x3, [sp, #376] stp x4, x5, [sp, #392] stp x6, x7, [sp, #408] str x8, [sp, #424] ldp x20, x21, [x27] ldp x0, x1, [sp] csel x0, x20, x0, cc csel x1, x21, x1, cc ldp x20, x21, [x28] csel x0, x20, x0, hi csel x1, x21, x1, hi ldp x20, x21, [x27, #16] ldp x2, x3, [sp, #16] csel x2, x20, x2, cc csel x3, x21, x3, cc ldp x20, x21, [x28, #16] csel x2, x20, x2, hi csel x3, x21, x3, hi ldp x20, x21, [x27, #32] ldp x4, x5, [sp, #32] csel x4, x20, x4, cc csel x5, x21, x5, cc ldp x20, x21, [x28, #32] csel x4, x20, x4, hi csel x5, x21, x5, hi ldp x20, x21, [x27, #48] ldp x6, x7, [sp, #48] csel x6, x20, x6, cc csel x7, x21, x7, cc ldp x20, x21, [x28, #48] csel x6, x20, x6, hi csel x7, x21, x7, hi ldr x20, [x27, #64] ldr x8, [sp, #64] csel x8, x20, x8, cc ldr x21, [x28, #64] csel x8, x21, x8, hi ldp x20, x21, [x27, #72] ldp x10, x11, [sp, #288] csel x10, x20, x10, cc csel x11, x21, x11, cc ldp x20, x21, [x28, #72] csel x10, x20, x10, hi csel x11, x21, x11, hi ldp x20, x21, [x27, #88] ldp x12, x13, [sp, #304] csel x12, x20, x12, cc csel x13, x21, x13, cc ldp x20, x21, [x28, #88] csel x12, x20, x12, hi csel x13, x21, x13, hi ldp x20, x21, [x27, #104] ldp x14, x15, [sp, #320] csel x14, x20, x14, cc csel x15, x21, x15, cc ldp x20, x21, [x28, #104] csel x14, x20, x14, hi csel x15, x21, x15, hi ldp x20, x21, [x27, #120] ldp x16, x17, [sp, #336] csel x16, x20, x16, cc csel x17, x21, x17, cc ldp x20, x21, [x28, #120] csel x16, x20, x16, hi csel x17, x21, x17, hi ldr x20, [x27, #136] ldr x19, [sp, #352] csel x19, x20, x19, cc ldr x21, [x28, #136] csel x19, x21, x19, hi stp x0, x1, [x26] stp x2, x3, [x26, #16] stp x4, x5, [x26, #32] stp x6, x7, [x26, #48] str x8, [x26, #64] ldp x0, x1, [sp, #360] ldp x2, x3, [sp, #376] ldp x4, x5, [sp, #392] ldp x6, x7, [sp, #408] ldr x8, [sp, #424] stp x10, x11, [x26, #72] stp x12, x13, [x26, #88] stp x14, x15, [x26, #104] stp x16, x17, [x26, #120] str x19, [x26, #136] stp x0, x1, [x26, #144] stp x2, x3, [x26, #160] stp x4, x5, [x26, #176] stp x6, x7, [x26, #192] str x8, [x26, #208] CFI_INC_SP(576) CFI_POP2(x29,x30) CFI_POP2(x27,x28) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jscalarmul_jadd) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jscalarmul_jdouble) Lp521_jscalarmul_jdouble: CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_PUSH2(x27,x28) CFI_PUSH2(x29,x30) CFI_DEC_SP(512) mov x26, x0 mov x27, x1 mov x0, sp add x1, x27, #0x90 CFI_BL(Lp521_jscalarmul_sqr_p521) add x0, sp, #0x48 add x1, x27, #0x48 CFI_BL(Lp521_jscalarmul_sqr_p521) ldp x5, x6, [x27] ldp x4, x3, [sp] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x27, #16] ldp x4, x3, [sp, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x27, #32] ldp x4, x3, [sp, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 ldp x11, x12, [x27, #48] ldp x4, x3, [sp, #48] sbcs x11, x11, x4 sbcs x12, x12, x3 ldr x13, [x27, #64] ldr x4, [sp, #64] sbcs x13, x13, x4 sbcs x5, x5, xzr sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbcs x13, x13, xzr and x13, x13, #0x1ff stp x5, x6, [sp, #216] stp x7, x8, [sp, #232] stp x9, x10, [sp, #248] stp x11, x12, [sp, #264] str x13, [sp, #280] cmp xzr, xzr ldp x5, x6, [x27] ldp x4, x3, [sp] adcs x5, x5, x4 adcs x6, x6, x3 ldp x7, x8, [x27, #16] ldp x4, x3, [sp, #16] adcs x7, x7, x4 adcs x8, x8, x3 ldp x9, x10, [x27, #32] ldp x4, x3, [sp, #32] adcs x9, x9, x4 adcs x10, x10, x3 ldp x11, x12, [x27, #48] ldp x4, x3, [sp, #48] adcs x11, x11, x4 adcs x12, x12, x3 ldr x13, [x27, #64] ldr x4, [sp, #64] adc x13, x13, x4 subs x4, x13, #0x200 csetm x4, cs sbcs x5, x5, xzr and x4, x4, #0x200 sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbc x13, x13, x4 stp x5, x6, [sp, #144] stp x7, x8, [sp, #160] stp x9, x10, [sp, #176] stp x11, x12, [sp, #192] str x13, [sp, #208] add x0, sp, #0xd8 add x1, sp, #0x90 add x2, sp, #0xd8 CFI_BL(Lp521_jscalarmul_mul_p521) cmp xzr, xzr ldp x5, x6, [x27, #72] ldp x4, x3, [x27, #144] adcs x5, x5, x4 adcs x6, x6, x3 ldp x7, x8, [x27, #88] ldp x4, x3, [x27, #160] adcs x7, x7, x4 adcs x8, x8, x3 ldp x9, x10, [x27, #104] ldp x4, x3, [x27, #176] adcs x9, x9, x4 adcs x10, x10, x3 ldp x11, x12, [x27, #120] ldp x4, x3, [x27, #192] adcs x11, x11, x4 adcs x12, x12, x3 ldr x13, [x27, #136] ldr x4, [x27, #208] adc x13, x13, x4 subs x4, x13, #0x200 csetm x4, cs sbcs x5, x5, xzr and x4, x4, #0x200 sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbc x13, x13, x4 stp x5, x6, [sp, #144] stp x7, x8, [sp, #160] stp x9, x10, [sp, #176] stp x11, x12, [sp, #192] str x13, [sp, #208] add x0, sp, #0x120 add x1, x27, #0x0 add x2, sp, #0x48 CFI_BL(Lp521_jscalarmul_mul_p521) add x0, sp, #0x168 add x1, sp, #0xd8 CFI_BL(Lp521_jscalarmul_sqr_p521) add x0, sp, #0x90 add x1, sp, #0x90 CFI_BL(Lp521_jscalarmul_sqr_p521) ldp x6, x7, [sp, #288] mov x1, #0xc mul x3, x1, x6 mul x4, x1, x7 umulh x6, x1, x6 adds x4, x4, x6 umulh x7, x1, x7 ldp x8, x9, [sp, #304] mul x5, x1, x8 mul x6, x1, x9 umulh x8, x1, x8 adcs x5, x5, x7 umulh x9, x1, x9 adcs x6, x6, x8 ldp x10, x11, [sp, #320] mul x7, x1, x10 mul x8, x1, x11 umulh x10, x1, x10 adcs x7, x7, x9 umulh x11, x1, x11 adcs x8, x8, x10 ldp x12, x13, [sp, #336] mul x9, x1, x12 mul x10, x1, x13 umulh x12, x1, x12 adcs x9, x9, x11 umulh x13, x1, x13 adcs x10, x10, x12 ldr x14, [sp, #352] mul x11, x1, x14 adc x11, x11, x13 mov x1, #0x9 ldp x20, x21, [sp, #360] mvn x20, x20 mul x0, x1, x20 umulh x20, x1, x20 adds x3, x3, x0 mvn x21, x21 mul x0, x1, x21 umulh x21, x1, x21 adcs x4, x4, x0 ldp x22, x23, [sp, #376] mvn x22, x22 mul x0, x1, x22 umulh x22, x1, x22 adcs x5, x5, x0 mvn x23, x23 mul x0, x1, x23 umulh x23, x1, x23 adcs x6, x6, x0 ldp x17, x19, [sp, #392] mvn x17, x17 mul x0, x1, x17 umulh x17, x1, x17 adcs x7, x7, x0 mvn x19, x19 mul x0, x1, x19 umulh x19, x1, x19 adcs x8, x8, x0 ldp x2, x16, [sp, #408] mvn x2, x2 mul x0, x1, x2 umulh x2, x1, x2 adcs x9, x9, x0 mvn x16, x16 mul x0, x1, x16 umulh x16, x1, x16 adcs x10, x10, x0 ldr x0, [sp, #424] eor x0, x0, #0x1ff mul x0, x1, x0 adc x11, x11, x0 adds x4, x4, x20 adcs x5, x5, x21 and x15, x4, x5 adcs x6, x6, x22 and x15, x15, x6 adcs x7, x7, x23 and x15, x15, x7 adcs x8, x8, x17 and x15, x15, x8 adcs x9, x9, x19 and x15, x15, x9 adcs x10, x10, x2 and x15, x15, x10 adc x11, x11, x16 lsr x12, x11, #9 orr x11, x11, #0xfffffffffffffe00 cmp xzr, xzr adcs xzr, x3, x12 adcs xzr, x15, xzr adcs xzr, x11, xzr adcs x3, x3, x12 adcs x4, x4, xzr adcs x5, x5, xzr adcs x6, x6, xzr adcs x7, x7, xzr adcs x8, x8, xzr adcs x9, x9, xzr adcs x10, x10, xzr adc x11, x11, xzr and x11, x11, #0x1ff stp x3, x4, [sp, #360] stp x5, x6, [sp, #376] stp x7, x8, [sp, #392] stp x9, x10, [sp, #408] str x11, [sp, #424] ldp x5, x6, [sp, #144] ldp x4, x3, [sp] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #160] ldp x4, x3, [sp, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #176] ldp x4, x3, [sp, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 ldp x11, x12, [sp, #192] ldp x4, x3, [sp, #48] sbcs x11, x11, x4 sbcs x12, x12, x3 ldr x13, [sp, #208] ldr x4, [sp, #64] sbcs x13, x13, x4 sbcs x5, x5, xzr sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbcs x13, x13, xzr and x13, x13, #0x1ff stp x5, x6, [sp, #144] stp x7, x8, [sp, #160] stp x9, x10, [sp, #176] stp x11, x12, [sp, #192] str x13, [sp, #208] mov x0, sp add x1, sp, #0x48 CFI_BL(Lp521_jscalarmul_sqr_p521) add x0, sp, #0xd8 add x1, sp, #0x168 add x2, sp, #0xd8 CFI_BL(Lp521_jscalarmul_mul_p521) ldp x5, x6, [sp, #144] ldp x4, x3, [sp, #72] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [sp, #160] ldp x4, x3, [sp, #88] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [sp, #176] ldp x4, x3, [sp, #104] sbcs x9, x9, x4 sbcs x10, x10, x3 ldp x11, x12, [sp, #192] ldp x4, x3, [sp, #120] sbcs x11, x11, x4 sbcs x12, x12, x3 ldr x13, [sp, #208] ldr x4, [sp, #136] sbcs x13, x13, x4 sbcs x5, x5, xzr sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbcs x13, x13, xzr and x13, x13, #0x1ff stp x5, x6, [x26, #144] stp x7, x8, [x26, #160] stp x9, x10, [x26, #176] stp x11, x12, [x26, #192] str x13, [x26, #208] ldp x6, x7, [sp, #288] lsl x3, x6, #2 extr x4, x7, x6, #62 ldp x8, x9, [sp, #304] extr x5, x8, x7, #62 extr x6, x9, x8, #62 ldp x10, x11, [sp, #320] extr x7, x10, x9, #62 extr x8, x11, x10, #62 ldp x12, x13, [sp, #336] extr x9, x12, x11, #62 extr x10, x13, x12, #62 ldr x14, [sp, #352] extr x11, x14, x13, #62 ldp x0, x1, [sp, #360] mvn x0, x0 adds x3, x3, x0 sbcs x4, x4, x1 ldp x0, x1, [sp, #376] sbcs x5, x5, x0 and x15, x4, x5 sbcs x6, x6, x1 and x15, x15, x6 ldp x0, x1, [sp, #392] sbcs x7, x7, x0 and x15, x15, x7 sbcs x8, x8, x1 and x15, x15, x8 ldp x0, x1, [sp, #408] sbcs x9, x9, x0 and x15, x15, x9 sbcs x10, x10, x1 and x15, x15, x10 ldr x0, [sp, #424] eor x0, x0, #0x1ff adc x11, x11, x0 lsr x12, x11, #9 orr x11, x11, #0xfffffffffffffe00 cmp xzr, xzr adcs xzr, x3, x12 adcs xzr, x15, xzr adcs xzr, x11, xzr adcs x3, x3, x12 adcs x4, x4, xzr adcs x5, x5, xzr adcs x6, x6, xzr adcs x7, x7, xzr adcs x8, x8, xzr adcs x9, x9, xzr adcs x10, x10, xzr adc x11, x11, xzr and x11, x11, #0x1ff stp x3, x4, [x26] stp x5, x6, [x26, #16] stp x7, x8, [x26, #32] stp x9, x10, [x26, #48] str x11, [x26, #64] ldp x6, x7, [sp, #216] lsl x3, x6, #1 adds x3, x3, x6 extr x4, x7, x6, #63 adcs x4, x4, x7 ldp x8, x9, [sp, #232] extr x5, x8, x7, #63 adcs x5, x5, x8 extr x6, x9, x8, #63 adcs x6, x6, x9 ldp x10, x11, [sp, #248] extr x7, x10, x9, #63 adcs x7, x7, x10 extr x8, x11, x10, #63 adcs x8, x8, x11 ldp x12, x13, [sp, #264] extr x9, x12, x11, #63 adcs x9, x9, x12 extr x10, x13, x12, #63 adcs x10, x10, x13 ldr x14, [sp, #280] extr x11, x14, x13, #63 adc x11, x11, x14 ldp x20, x21, [sp] mvn x20, x20 lsl x0, x20, #3 adds x3, x3, x0 mvn x21, x21 extr x0, x21, x20, #61 adcs x4, x4, x0 ldp x22, x23, [sp, #16] mvn x22, x22 extr x0, x22, x21, #61 adcs x5, x5, x0 and x15, x4, x5 mvn x23, x23 extr x0, x23, x22, #61 adcs x6, x6, x0 and x15, x15, x6 ldp x20, x21, [sp, #32] mvn x20, x20 extr x0, x20, x23, #61 adcs x7, x7, x0 and x15, x15, x7 mvn x21, x21 extr x0, x21, x20, #61 adcs x8, x8, x0 and x15, x15, x8 ldp x22, x23, [sp, #48] mvn x22, x22 extr x0, x22, x21, #61 adcs x9, x9, x0 and x15, x15, x9 mvn x23, x23 extr x0, x23, x22, #61 adcs x10, x10, x0 and x15, x15, x10 ldr x0, [sp, #64] eor x0, x0, #0x1ff extr x0, x0, x23, #61 adc x11, x11, x0 lsr x12, x11, #9 orr x11, x11, #0xfffffffffffffe00 cmp xzr, xzr adcs xzr, x3, x12 adcs xzr, x15, xzr adcs xzr, x11, xzr adcs x3, x3, x12 adcs x4, x4, xzr adcs x5, x5, xzr adcs x6, x6, xzr adcs x7, x7, xzr adcs x8, x8, xzr adcs x9, x9, xzr adcs x10, x10, xzr adc x11, x11, xzr and x11, x11, #0x1ff stp x3, x4, [x26, #72] stp x5, x6, [x26, #88] stp x7, x8, [x26, #104] stp x9, x10, [x26, #120] str x11, [x26, #136] CFI_INC_SP(512) CFI_POP2(x29,x30) CFI_POP2(x27,x28) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jscalarmul_jdouble) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jscalarmul_mul_p521) Lp521_jscalarmul_mul_p521: CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) CFI_PUSH2(x25,x26) CFI_DEC_SP(80) ldr q6, [x2] ldp x10, x17, [x1, #16] ldr q4, [x1] ldr q16, [x2, #32] ldp x5, x20, [x2, #16] ldr q2, [x1, #32] movi v31.2D, #0x00000000ffffffff uzp2 v17.4S, v6.4S, v6.4S rev64 v7.4S, v6.4S ldp x15, x21, [x1] xtn v25.2S, v6.2D xtn v22.2S, v4.2D subs x14, x10, x17 mul v7.4S, v7.4S, v4.4S csetm x8, cc rev64 v3.4S, v16.4S xtn v1.2S, v16.2D ldp x13, x16, [x2] mul x26, x10, x5 uzp2 v16.4S, v16.4S, v16.4S uaddlp v26.2D, v7.4S cneg x4, x14, cc subs x24, x15, x21 xtn v5.2S, v2.2D mul v28.4S, v3.4S, v2.4S shl v26.2D, v26.2D, #32 mul x22, x17, x20 umull v20.2D, v22.2S, v25.2S uzp2 v6.4S, v4.4S, v4.4S umull v18.2D, v22.2S, v17.2S uzp2 v4.4S, v2.4S, v2.4S cneg x14, x24, cc csetm x7, cc umulh x11, x17, x20 usra v18.2D, v20.2D, #32 uaddlp v7.2D, v28.4S subs x19, x16, x13 umlal v26.2D, v22.2S, v25.2S cneg x19, x19, cc shl v28.2D, v7.2D, #32 umull v7.2D, v5.2S, v1.2S umull v30.2D, v5.2S, v16.2S cinv x6, x7, cc mul x25, x14, x19 umlal v28.2D, v5.2S, v1.2S umull v21.2D, v6.2S, v17.2S umulh x14, x14, x19 usra v30.2D, v7.2D, #32 subs x9, x20, x5 and v29.16B, v18.16B, v31.16B cinv x23, x8, cc mov x8, v26.d[1] cneg x12, x9, cc usra v21.2D, v18.2D, #32 umlal v29.2D, v6.2S, v25.2S mul x24, x4, x12 umull v18.2D, v4.2S, v16.2S movi v25.2D, #0x00000000ffffffff eor x9, x14, x6 and v7.16B, v30.16B, v25.16B usra v21.2D, v29.2D, #32 umulh x7, x10, x5 usra v18.2D, v30.2D, #32 umlal v7.2D, v4.2S, v1.2S mov x19, v21.d[0] umulh x3, x4, x12 mov x14, v21.d[1] usra v18.2D, v7.2D, #32 adds x4, x8, x19 mov x8, v26.d[0] adcs x19, x26, x14 adcs x14, x22, x7 adc x12, x11, xzr adds x11, x4, x8 adcs x26, x19, x4 adcs x22, x14, x19 eor x4, x24, x23 adcs x14, x12, x14 eor x7, x25, x6 adc x25, xzr, x12 eor x19, x3, x23 adds x3, x26, x8 adcs x24, x22, x11 adcs x12, x14, x26 adcs x22, x25, x22 adcs x26, xzr, x14 adc x14, xzr, x25 cmn x23, #0x1 adcs x22, x22, x4 adcs x19, x26, x19 adc x25, x14, x23 subs x14, x21, x17 cneg x23, x14, cc csetm x26, cc subs x4, x20, x16 cneg x14, x4, cc cinv x4, x26, cc cmn x6, #0x1 adcs x11, x11, x7 mul x7, x23, x14 adcs x9, x3, x9 adcs x26, x24, x6 umulh x3, x23, x14 adcs x14, x12, x6 adcs x22, x22, x6 adcs x12, x19, x6 extr x24, x11, x8, #55 adc x6, x25, x6 subs x19, x15, x17 csetm x17, cc cneg x23, x19, cc subs x19, x20, x13 lsl x25, x8, #9 eor x8, x7, x4 cneg x20, x19, cc umulh x7, x23, x20 cinv x19, x17, cc subs x17, x15, x10 csetm x15, cc stp x25, x24, [sp, #32] cneg x24, x17, cc mul x20, x23, x20 subs x25, x5, x13 cneg x13, x25, cc cinv x15, x15, cc mul x25, x24, x13 subs x21, x21, x10 csetm x23, cc cneg x17, x21, cc subs x21, x5, x16 umulh x13, x24, x13 cinv x10, x23, cc cneg x23, x21, cc cmn x4, #0x1 adcs x14, x14, x8 eor x21, x3, x4 adcs x21, x22, x21 eor x5, x20, x19 adcs x24, x12, x4 mul x12, x17, x23 eor x8, x25, x15 adc x25, x6, x4 cmn x15, #0x1 adcs x6, x9, x8 ldp x20, x8, [x2, #48] eor x9, x13, x15 adcs x4, x26, x9 umulh x26, x17, x23 ldp x17, x13, [x1, #48] adcs x9, x14, x15 adcs x16, x21, x15 adcs x14, x24, x15 eor x21, x7, x19 mul x23, x17, x20 adc x24, x25, x15 cmn x19, #0x1 adcs x7, x4, x5 adcs x9, x9, x21 umulh x3, x13, x8 adcs x16, x16, x19 adcs x22, x14, x19 eor x5, x12, x10 adc x12, x24, x19 cmn x10, #0x1 adcs x19, x7, x5 eor x14, x26, x10 mov x7, v28.d[1] adcs x24, x9, x14 extr x4, x19, x6, #55 umulh x15, x17, x20 mov x14, v18.d[1] lsr x9, x19, #55 adcs x5, x16, x10 mov x16, v18.d[0] adcs x19, x22, x10 str x9, [sp, #64] extr x25, x6, x11, #55 adc x21, x12, x10 subs x26, x17, x13 stp x25, x4, [sp, #48] stp x19, x21, [sp, #16] csetm x6, cc cneg x4, x26, cc mul x19, x13, x8 subs x11, x8, x20 stp x24, x5, [sp] ldp x21, x10, [x1, #32] cinv x12, x6, cc cneg x6, x11, cc mov x9, v28.d[0] umulh x25, x4, x6 adds x22, x7, x16 ldp x16, x5, [x2, #32] adcs x14, x23, x14 adcs x11, x19, x15 adc x24, x3, xzr adds x3, x22, x9 adcs x15, x14, x22 mul x22, x4, x6 adcs x6, x11, x14 adcs x4, x24, x11 eor x14, x25, x12 adc x26, xzr, x24 subs x7, x21, x10 csetm x23, cc cneg x19, x7, cc subs x24, x5, x16 cneg x11, x24, cc cinv x7, x23, cc adds x25, x15, x9 eor x23, x22, x12 adcs x22, x6, x3 mul x24, x19, x11 adcs x15, x4, x15 adcs x6, x26, x6 umulh x19, x19, x11 adcs x11, xzr, x4 adc x26, xzr, x26 cmn x12, #0x1 adcs x4, x6, x23 eor x6, x24, x7 adcs x14, x11, x14 adc x26, x26, x12 subs x11, x10, x13 cneg x12, x11, cc csetm x11, cc eor x19, x19, x7 subs x24, x8, x5 cinv x11, x11, cc cneg x24, x24, cc cmn x7, #0x1 adcs x3, x3, x6 mul x23, x12, x24 adcs x25, x25, x19 adcs x6, x22, x7 umulh x19, x12, x24 adcs x22, x15, x7 adcs x12, x4, x7 eor x24, x23, x11 adcs x4, x14, x7 adc x26, x26, x7 eor x19, x19, x11 subs x14, x21, x17 cneg x7, x14, cc csetm x14, cc subs x23, x20, x16 cinv x14, x14, cc cneg x23, x23, cc cmn x11, #0x1 adcs x22, x22, x24 mul x24, x7, x23 adcs x15, x12, x19 adcs x4, x4, x11 adc x19, x26, x11 umulh x26, x7, x23 subs x7, x21, x13 eor x11, x24, x14 cneg x23, x7, cc csetm x12, cc subs x7, x8, x16 cneg x7, x7, cc cinv x12, x12, cc cmn x14, #0x1 eor x26, x26, x14 adcs x11, x25, x11 mul x25, x23, x7 adcs x26, x6, x26 adcs x6, x22, x14 adcs x24, x15, x14 umulh x23, x23, x7 adcs x4, x4, x14 adc x22, x19, x14 eor x14, x25, x12 eor x7, x23, x12 cmn x12, #0x1 adcs x14, x26, x14 ldp x19, x25, [x2] ldp x15, x23, [x2, #16] adcs x26, x6, x7 adcs x24, x24, x12 adcs x7, x4, x12 adc x4, x22, x12 subs x19, x19, x16 ldp x16, x22, [x1] sbcs x6, x25, x5 ldp x12, x25, [x1, #16] sbcs x15, x15, x20 sbcs x8, x23, x8 csetm x23, cc subs x21, x21, x16 eor x16, x19, x23 sbcs x19, x10, x22 eor x22, x6, x23 eor x8, x8, x23 sbcs x6, x17, x12 sbcs x13, x13, x25 csetm x12, cc subs x10, x10, x17 cneg x17, x10, cc csetm x25, cc subs x5, x20, x5 eor x10, x19, x12 cneg x19, x5, cc eor x20, x15, x23 eor x21, x21, x12 cinv x15, x25, cc mul x25, x17, x19 subs x16, x16, x23 sbcs x5, x22, x23 eor x6, x6, x12 sbcs x20, x20, x23 eor x22, x13, x12 sbc x8, x8, x23 subs x21, x21, x12 umulh x19, x17, x19 sbcs x10, x10, x12 sbcs x17, x6, x12 eor x6, x19, x15 eor x19, x25, x15 umulh x25, x17, x20 sbc x13, x22, x12 cmn x15, #0x1 adcs x22, x14, x19 adcs x19, x26, x6 ldp x6, x26, [sp] adcs x14, x24, x15 umulh x24, x21, x16 adcs x7, x7, x15 adc x15, x4, x15 adds x4, x9, x6 eor x9, x23, x12 adcs x12, x3, x26 stp x4, x12, [sp] ldp x4, x26, [sp, #16] umulh x12, x10, x5 ldp x6, x23, [sp, #32] adcs x3, x11, x4 mul x4, x13, x8 adcs x26, x22, x26 ldp x22, x11, [sp, #48] adcs x6, x19, x6 stp x3, x26, [sp, #16] mul x26, x10, x5 adcs x14, x14, x23 stp x6, x14, [sp, #32] ldr x6, [sp, #64] adcs x22, x7, x22 adcs x14, x15, x11 mul x11, x17, x20 adc x19, x6, xzr stp x22, x14, [sp, #48] adds x14, x26, x24 str x19, [sp, #64] umulh x19, x13, x8 adcs x7, x11, x12 adcs x22, x4, x25 mul x6, x21, x16 adc x19, x19, xzr subs x11, x17, x13 cneg x12, x11, cc csetm x11, cc subs x24, x8, x20 cinv x11, x11, cc cneg x24, x24, cc adds x4, x14, x6 adcs x14, x7, x14 mul x3, x12, x24 adcs x7, x22, x7 adcs x22, x19, x22 umulh x12, x12, x24 adc x24, xzr, x19 adds x19, x14, x6 eor x3, x3, x11 adcs x26, x7, x4 adcs x14, x22, x14 adcs x25, x24, x7 adcs x23, xzr, x22 eor x7, x12, x11 adc x12, xzr, x24 subs x22, x21, x10 cneg x24, x22, cc csetm x22, cc subs x15, x5, x16 cinv x22, x22, cc cneg x15, x15, cc cmn x11, #0x1 adcs x3, x25, x3 mul x25, x24, x15 adcs x23, x23, x7 adc x11, x12, x11 subs x7, x10, x13 umulh x15, x24, x15 cneg x12, x7, cc csetm x7, cc eor x24, x25, x22 eor x25, x15, x22 cmn x22, #0x1 adcs x24, x4, x24 adcs x19, x19, x25 adcs x15, x26, x22 adcs x4, x14, x22 adcs x26, x3, x22 adcs x25, x23, x22 adc x23, x11, x22 subs x14, x21, x17 cneg x3, x14, cc csetm x11, cc subs x14, x8, x5 cneg x14, x14, cc cinv x7, x7, cc subs x13, x21, x13 cneg x21, x13, cc csetm x13, cc mul x22, x12, x14 subs x8, x8, x16 cinv x13, x13, cc umulh x14, x12, x14 cneg x12, x8, cc subs x8, x20, x16 cneg x8, x8, cc cinv x16, x11, cc eor x22, x22, x7 cmn x7, #0x1 eor x14, x14, x7 adcs x4, x4, x22 mul x11, x3, x8 adcs x22, x26, x14 adcs x14, x25, x7 eor x25, x24, x9 adc x26, x23, x7 umulh x7, x3, x8 subs x17, x10, x17 cneg x24, x17, cc eor x3, x11, x16 csetm x11, cc subs x20, x20, x5 cneg x5, x20, cc cinv x11, x11, cc cmn x16, #0x1 mul x17, x21, x12 eor x8, x7, x16 adcs x10, x19, x3 and x19, x9, #0x1ff adcs x20, x15, x8 umulh x15, x21, x12 eor x12, x10, x9 eor x8, x6, x9 adcs x6, x4, x16 adcs x4, x22, x16 adcs x21, x14, x16 adc x7, x26, x16 mul x10, x24, x5 cmn x13, #0x1 ldp x3, x14, [x1] eor x17, x17, x13 umulh x5, x24, x5 adcs x20, x20, x17 eor x17, x15, x13 adcs x16, x6, x17 eor x22, x10, x11 adcs x23, x4, x13 extr x10, x14, x3, #52 and x26, x3, #0xfffffffffffff adcs x24, x21, x13 and x15, x10, #0xfffffffffffff adc x6, x7, x13 cmn x11, #0x1 adcs x17, x20, x22 eor x4, x5, x11 ldp x21, x10, [sp] adcs x7, x16, x4 eor x16, x17, x9 eor x13, x7, x9 ldp x3, x17, [sp, #16] adcs x7, x23, x11 eor x23, x7, x9 ldp x5, x22, [sp, #32] adcs x7, x24, x11 adc x24, x6, x11 ldr x6, [x2, #64] adds x20, x8, x21 lsl x11, x20, #9 eor x4, x7, x9 orr x7, x11, x19 eor x8, x24, x9 adcs x11, x25, x10 mul x26, x6, x26 ldp x19, x24, [sp, #48] adcs x12, x12, x3 adcs x16, x16, x17 adcs x9, x13, x5 ldr x25, [sp, #64] extr x20, x11, x20, #55 adcs x13, x23, x22 adcs x4, x4, x19 extr x23, x12, x11, #55 adcs x8, x8, x24 adc x11, x25, xzr adds x21, x9, x21 extr x9, x16, x12, #55 lsr x12, x16, #55 adcs x10, x13, x10 mul x15, x6, x15 adcs x13, x4, x3 ldp x16, x4, [x2] ldr x3, [x1, #64] adcs x17, x8, x17 adcs x5, x5, x7 adcs x20, x22, x20 adcs x8, x19, x23 and x22, x16, #0xfffffffffffff ldp x19, x7, [x1, #16] adcs x9, x24, x9 extr x24, x4, x16, #52 adc x16, x12, x25 mul x22, x3, x22 and x25, x24, #0xfffffffffffff extr x14, x19, x14, #40 and x12, x14, #0xfffffffffffff extr x23, x7, x19, #28 ldp x19, x24, [x2, #16] mul x14, x3, x25 and x23, x23, #0xfffffffffffff add x22, x26, x22 lsl x11, x11, #48 lsr x26, x22, #52 lsl x25, x22, #12 mul x22, x6, x12 extr x12, x19, x4, #40 add x4, x15, x14 mul x15, x6, x23 add x4, x4, x26 extr x23, x24, x19, #28 ldp x14, x19, [x1, #32] and x26, x12, #0xfffffffffffff extr x12, x4, x25, #12 and x25, x23, #0xfffffffffffff adds x21, x21, x12 mul x12, x3, x26 extr x23, x14, x7, #16 and x23, x23, #0xfffffffffffff mul x7, x3, x25 ldp x25, x26, [x2, #32] add x12, x22, x12 extr x22, x19, x14, #56 mul x23, x6, x23 lsr x14, x14, #4 extr x24, x25, x24, #16 add x7, x15, x7 and x15, x24, #0xfffffffffffff and x22, x22, #0xfffffffffffff lsr x24, x4, #52 mul x15, x3, x15 and x14, x14, #0xfffffffffffff add x12, x12, x24 lsl x24, x4, #12 lsr x4, x12, #52 extr x24, x12, x24, #24 adcs x10, x10, x24 lsl x24, x12, #12 add x12, x7, x4 mul x22, x6, x22 add x4, x23, x15 extr x7, x12, x24, #36 adcs x13, x13, x7 lsl x15, x12, #12 add x7, x4, x11 lsr x24, x12, #52 ldp x23, x11, [x2, #48] add x4, x7, x24 mul x12, x6, x14 extr x7, x26, x25, #56 extr x14, x4, x15, #48 and x2, x7, #0xfffffffffffff extr x24, x11, x23, #32 ldp x15, x7, [x1, #48] and x1, x24, #0xfffffffffffff lsr x24, x4, #52 mul x2, x3, x2 extr x26, x23, x26, #44 lsr x23, x25, #4 and x23, x23, #0xfffffffffffff and x25, x26, #0xfffffffffffff extr x26, x7, x15, #32 extr x19, x15, x19, #44 mul x23, x3, x23 and x15, x26, #0xfffffffffffff lsl x26, x4, #12 and x4, x19, #0xfffffffffffff lsr x11, x11, #20 mul x19, x6, x4 adcs x17, x17, x14 add x14, x22, x2 add x22, x12, x23 lsr x7, x7, #20 add x22, x22, x24 extr x2, x22, x26, #60 mul x24, x3, x25 lsr x22, x22, #52 add x14, x14, x22 lsl x22, x2, #8 extr x22, x14, x22, #8 lsl x2, x14, #12 mul x1, x3, x1 adcs x12, x5, x22 mul x5, x6, x15 and x26, x10, x13 and x4, x26, x17 add x23, x19, x24 lsr x14, x14, #52 mul x22, x3, x11 add x11, x23, x14 extr x25, x11, x2, #20 lsl x19, x11, #12 adcs x25, x20, x25 and x14, x4, x12 add x1, x5, x1 and x14, x14, x25 mul x15, x6, x7 add x26, x15, x22 mul x6, x6, x3 lsr x22, x11, #52 add x4, x1, x22 lsr x1, x4, #52 extr x3, x4, x19, #32 lsl x15, x4, #12 add x7, x26, x1 adcs x23, x8, x3 extr x20, x7, x15, #44 and x3, x14, x23 lsr x19, x7, #44 adcs x7, x9, x20 add x11, x6, x19 adc x4, x16, x11 lsr x14, x4, #9 cmp xzr, xzr and x15, x3, x7 orr x3, x4, #0xfffffffffffffe00 adcs xzr, x21, x14 adcs xzr, x15, xzr adcs xzr, x3, xzr adcs x11, x21, x14 and x14, x11, #0x1ff adcs x1, x10, xzr extr x10, x1, x11, #9 str x14, [x0, #64] adcs x14, x13, xzr extr x11, x14, x1, #9 adcs x1, x17, xzr extr x4, x1, x14, #9 stp x10, x11, [x0] adcs x11, x12, xzr extr x14, x11, x1, #9 adcs x10, x25, xzr extr x11, x10, x11, #9 stp x4, x14, [x0, #16] adcs x14, x23, xzr extr x10, x14, x10, #9 adcs x1, x7, xzr stp x11, x10, [x0, #32] extr x14, x1, x14, #9 adc x10, x3, xzr extr x26, x10, x1, #9 stp x14, x26, [x0, #48] CFI_INC_SP(80) CFI_POP2(x25,x26) CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jscalarmul_mul_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jscalarmul_sqr_p521) Lp521_jscalarmul_sqr_p521: CFI_START CFI_PUSH2(x19,x20) CFI_PUSH2(x21,x22) CFI_PUSH2(x23,x24) ldr q23, [x1, #32] ldp x9, x2, [x1, #32] ldr q16, [x1, #32] ldr q20, [x1, #48] ldp x6, x13, [x1, #48] rev64 v2.4S, v23.4S mul x14, x9, x2 ldr q31, [x1, #48] subs x22, x9, x2 uzp2 v26.4S, v23.4S, v23.4S mul v30.4S, v2.4S, v16.4S xtn v0.2S, v20.2D csetm x12, cc xtn v21.2S, v16.2D xtn v23.2S, v23.2D umulh x10, x9, x6 rev64 v27.4S, v31.4S umull v2.2D, v21.2S, v26.2S cneg x23, x22, cc uaddlp v25.2D, v30.4S umull v18.2D, v21.2S, v23.2S mul x22, x9, x6 mul v6.4S, v27.4S, v20.4S uzp2 v17.4S, v20.4S, v20.4S shl v20.2D, v25.2D, #32 uzp2 v27.4S, v31.4S, v31.4S mul x16, x2, x13 umlal v20.2D, v21.2S, v23.2S usra v2.2D, v18.2D, #32 adds x8, x22, x10 umull v25.2D, v17.2S, v27.2S xtn v31.2S, v31.2D movi v1.2D, #0xffffffff adc x3, x10, xzr umulh x21, x2, x13 uzp2 v21.4S, v16.4S, v16.4S umull v18.2D, v0.2S, v27.2S subs x19, x13, x6 and v7.16B, v2.16B, v1.16B umull v27.2D, v0.2S, v31.2S cneg x20, x19, cc movi v30.2D, #0xffffffff umull v16.2D, v21.2S, v26.2S umlal v7.2D, v21.2S, v23.2S mul x19, x23, x20 cinv x7, x12, cc uaddlp v6.2D, v6.4S eor x12, x19, x7 adds x11, x8, x16 umulh x10, x23, x20 ldr q1, [x1] usra v16.2D, v2.2D, #32 adcs x19, x3, x21 shl v2.2D, v6.2D, #32 adc x20, x21, xzr adds x17, x19, x16 usra v18.2D, v27.2D, #32 adc x19, x20, xzr cmn x7, #0x1 umlal v2.2D, v0.2S, v31.2S umulh x16, x9, x2 adcs x8, x11, x12 usra v16.2D, v7.2D, #32 ldr x12, [x1, #64] eor x20, x10, x7 umulh x10, x6, x13 mov x23, v2.d[0] mov x3, v2.d[1] adcs x21, x17, x20 usra v25.2D, v18.2D, #32 and v23.16B, v18.16B, v30.16B adc x7, x19, x7 adds x22, x22, x22 ldr q7, [x1, #16] adcs x17, x8, x8 umlal v23.2D, v17.2S, v31.2S mov x19, v16.d[0] mul x11, x12, x12 ldr q4, [x1] usra v25.2D, v23.2D, #32 add x5, x12, x12 adcs x15, x21, x21 ldr q28, [x1] mov x12, v20.d[1] adcs x24, x7, x7 mov x21, v16.d[1] adc x4, xzr, xzr adds x19, x19, x14 ldr q18, [x1, #16] xtn v26.2S, v1.2D adcs x8, x12, x16 adc x21, x21, xzr adds x7, x19, x14 xtn v23.2S, v7.2D rev64 v21.4S, v28.4S adcs x12, x8, x16 ldp x20, x19, [x1] mov x16, v25.d[1] xtn v22.2S, v28.2D adc x14, x21, xzr adds x8, x22, x12 uzp2 v24.4S, v28.4S, v28.4S rev64 v28.4S, v18.4S mul x12, x6, x13 mul v16.4S, v21.4S, v1.4S shrn v31.2S, v7.2D, #32 adcs x22, x17, x14 mov x14, v25.d[0] and x21, x20, #0xfffffffffffff umull v17.2D, v26.2S, v24.2S ldr q2, [x1, #32] adcs x17, x15, xzr ldr q30, [x1, #48] umull v7.2D, v26.2S, v22.2S adcs x15, x24, xzr ldr q0, [x1, #16] movi v6.2D, #0xffffffff adc x4, x4, xzr adds x14, x14, x12 uzp1 v27.4S, v18.4S, v4.4S uzp2 v19.4S, v1.4S, v1.4S adcs x24, x3, x10 mul x3, x5, x21 umull v29.2D, v23.2S, v31.2S ldr q5, [x1] adc x21, x16, xzr adds x16, x14, x12 extr x12, x19, x20, #52 umull v18.2D, v19.2S, v24.2S adcs x24, x24, x10 and x10, x12, #0xfffffffffffff ldp x14, x12, [x1, #16] usra v17.2D, v7.2D, #32 adc x21, x21, xzr adds x23, x23, x17 mul x17, x5, x10 shl v21.2D, v29.2D, #33 lsl x10, x3, #12 lsr x1, x3, #52 rev64 v29.4S, v2.4S uaddlp v25.2D, v16.4S add x17, x17, x1 adcs x16, x16, x15 extr x3, x14, x19, #40 mov x15, v20.d[0] extr x10, x17, x10, #12 and x3, x3, #0xfffffffffffff shl v3.2D, v25.2D, #32 and v6.16B, v17.16B, v6.16B mul x1, x5, x3 usra v18.2D, v17.2D, #32 adcs x3, x24, x4 extr x4, x12, x14, #28 umlal v6.2D, v19.2S, v22.2S xtn v20.2S, v2.2D umlal v3.2D, v26.2S, v22.2S movi v26.2D, #0xffffffff lsr x24, x17, #52 and x4, x4, #0xfffffffffffff uzp2 v19.4S, v2.4S, v2.4S add x1, x1, x24 mul x24, x5, x4 lsl x4, x17, #12 xtn v24.2S, v5.2D extr x17, x1, x4, #24 adc x21, x21, xzr umlal v21.2D, v23.2S, v23.2S adds x4, x15, x10 lsl x10, x1, #12 adcs x15, x7, x17 mul v23.4S, v28.4S, v4.4S and x7, x4, #0x1ff lsr x17, x1, #52 umulh x1, x19, x12 uzp2 v17.4S, v5.4S, v5.4S extr x4, x15, x4, #9 add x24, x24, x17 mul v29.4S, v29.4S, v5.4S extr x17, x24, x10, #36 extr x10, x9, x12, #16 uzp1 v28.4S, v4.4S, v4.4S adcs x17, x8, x17 and x8, x10, #0xfffffffffffff umull v16.2D, v24.2S, v20.2S extr x10, x17, x15, #9 mul x15, x5, x8 stp x4, x10, [x0] lsl x4, x24, #12 lsr x8, x9, #4 uaddlp v4.2D, v23.4S and x8, x8, #0xfffffffffffff umull v23.2D, v24.2S, v19.2S mul x8, x5, x8 extr x10, x2, x9, #56 lsr x24, x24, #52 and x10, x10, #0xfffffffffffff add x15, x15, x24 extr x4, x15, x4, #48 mul x24, x5, x10 lsr x10, x15, #52 usra v23.2D, v16.2D, #32 add x10, x8, x10 shl v4.2D, v4.2D, #32 adcs x22, x22, x4 extr x4, x6, x2, #44 lsl x15, x15, #12 lsr x8, x10, #52 extr x15, x10, x15, #60 and x10, x4, #0xfffffffffffff umlal v4.2D, v28.2S, v27.2S add x8, x24, x8 extr x4, x13, x6, #32 mul x24, x5, x10 uzp2 v16.4S, v30.4S, v30.4S lsl x10, x15, #8 rev64 v28.4S, v30.4S and x15, x4, #0xfffffffffffff extr x4, x8, x10, #8 mul x10, x5, x15 lsl x15, x8, #12 adcs x23, x23, x4 lsr x4, x8, #52 lsr x8, x13, #20 add x4, x24, x4 mul x8, x5, x8 lsr x24, x4, #52 extr x15, x4, x15, #20 lsl x4, x4, #12 add x10, x10, x24 adcs x15, x16, x15 extr x4, x10, x4, #32 umulh x5, x20, x14 adcs x3, x3, x4 usra v18.2D, v6.2D, #32 lsl x16, x10, #12 extr x24, x15, x23, #9 lsr x10, x10, #52 uzp2 v27.4S, v0.4S, v0.4S add x8, x8, x10 extr x10, x3, x15, #9 extr x4, x22, x17, #9 and v25.16B, v23.16B, v26.16B lsr x17, x8, #44 extr x15, x8, x16, #44 extr x16, x23, x22, #9 xtn v7.2S, v30.2D mov x8, v4.d[0] stp x24, x10, [x0, #32] uaddlp v30.2D, v29.4S stp x4, x16, [x0, #16] umulh x24, x20, x19 adcs x15, x21, x15 adc x16, x11, x17 subs x11, x20, x19 xtn v5.2S, v0.2D csetm x17, cc extr x3, x15, x3, #9 mov x22, v4.d[1] cneg x21, x11, cc subs x10, x12, x14 mul v31.4S, v28.4S, v0.4S cneg x10, x10, cc cinv x11, x17, cc shl v4.2D, v30.2D, #32 umull v28.2D, v5.2S, v16.2S extr x23, x16, x15, #9 adds x4, x8, x5 mul x17, x21, x10 umull v22.2D, v5.2S, v7.2S adc x15, x5, xzr adds x4, x4, x22 uaddlp v2.2D, v31.4S lsr x5, x16, #9 adcs x16, x15, x1 mov x15, v18.d[0] adc x1, x1, xzr umulh x10, x21, x10 adds x22, x16, x22 umlal v4.2D, v24.2S, v20.2S umull v30.2D, v27.2S, v16.2S stp x3, x23, [x0, #48] add x3, x7, x5 adc x16, x1, xzr usra v28.2D, v22.2D, #32 mul x23, x20, x19 eor x1, x17, x11 cmn x11, #0x1 mov x17, v18.d[1] umull v18.2D, v17.2S, v19.2S adcs x7, x4, x1 eor x1, x10, x11 umlal v25.2D, v17.2S, v20.2S movi v16.2D, #0xffffffff adcs x22, x22, x1 usra v18.2D, v23.2D, #32 umulh x4, x14, x14 adc x1, x16, x11 adds x10, x8, x8 shl v23.2D, v2.2D, #32 str x3, [x0, #64] adcs x5, x7, x7 and v16.16B, v28.16B, v16.16B usra v30.2D, v28.2D, #32 adcs x7, x22, x22 mov x21, v3.d[1] adcs x11, x1, x1 umlal v16.2D, v27.2S, v7.2S adc x22, xzr, xzr adds x16, x15, x23 mul x8, x14, x12 umlal v23.2D, v5.2S, v7.2S usra v18.2D, v25.2D, #32 umulh x15, x14, x12 adcs x21, x21, x24 usra v30.2D, v16.2D, #32 adc x1, x17, xzr adds x3, x16, x23 adcs x21, x21, x24 adc x1, x1, xzr adds x24, x10, x21 umulh x21, x12, x12 adcs x16, x5, x1 adcs x10, x7, xzr mov x17, v21.d[1] adcs x23, x11, xzr adc x5, x22, xzr adds x1, x4, x8 adcs x22, x17, x15 ldp x17, x4, [x0] mov x11, v21.d[0] adc x21, x21, xzr adds x1, x1, x8 adcs x15, x22, x15 adc x8, x21, xzr adds x22, x11, x10 mov x21, v3.d[0] adcs x11, x1, x23 ldp x1, x10, [x0, #16] adcs x15, x15, x5 adc x7, x8, xzr adds x8, x17, x21 mov x23, v4.d[1] ldp x5, x21, [x0, #32] adcs x17, x4, x3 ldr x4, [x0, #64] mov x3, v18.d[0] adcs x24, x1, x24 stp x8, x17, [x0] adcs x17, x10, x16 ldp x1, x16, [x0, #48] adcs x5, x5, x22 adcs x8, x21, x11 stp x5, x8, [x0, #32] adcs x1, x1, x15 mov x15, v23.d[1] adcs x21, x16, x7 stp x1, x21, [x0, #48] adc x10, x4, xzr subs x7, x14, x12 mov x16, v18.d[1] cneg x5, x7, cc csetm x4, cc subs x11, x13, x6 mov x8, v23.d[0] cneg x7, x11, cc cinv x21, x4, cc mov x11, v30.d[0] adds x4, x23, x3 mul x22, x5, x7 mov x23, v30.d[1] adcs x8, x8, x16 adcs x16, x15, x11 adc x11, x23, xzr umulh x3, x5, x7 stp x24, x17, [x0, #16] mov x5, v4.d[0] subs x15, x20, x19 cneg x7, x15, cc str x10, [x0, #64] csetm x1, cc subs x24, x2, x9 cneg x17, x24, cc cinv x15, x1, cc adds x23, x4, x5 umulh x1, x7, x17 adcs x24, x8, x4 adcs x10, x16, x8 eor x8, x22, x21 adcs x16, x11, x16 mul x22, x7, x17 eor x17, x1, x15 adc x1, xzr, x11 adds x11, x24, x5 eor x7, x3, x21 adcs x3, x10, x23 adcs x24, x16, x24 adcs x4, x1, x10 eor x10, x22, x15 adcs x16, xzr, x16 adc x1, xzr, x1 cmn x21, #0x1 adcs x8, x4, x8 adcs x22, x16, x7 adc x7, x1, x21 subs x21, x19, x12 csetm x4, cc cneg x1, x21, cc subs x21, x13, x2 cinv x16, x4, cc cneg x4, x21, cc cmn x15, #0x1 adcs x21, x23, x10 mul x23, x1, x4 adcs x11, x11, x17 adcs x3, x3, x15 umulh x1, x1, x4 adcs x24, x24, x15 adcs x8, x8, x15 adcs x22, x22, x15 eor x17, x23, x16 adc x15, x7, x15 subs x7, x20, x14 cneg x7, x7, cc csetm x4, cc subs x10, x20, x12 cneg x23, x10, cc csetm x10, cc subs x12, x6, x9 cinv x20, x4, cc cneg x12, x12, cc cmn x16, #0x1 eor x1, x1, x16 adcs x17, x24, x17 mul x4, x7, x12 adcs x8, x8, x1 umulh x1, x7, x12 adcs x24, x22, x16 adc x7, x15, x16 subs x12, x13, x9 cneg x12, x12, cc cinv x13, x10, cc subs x19, x19, x14 mul x9, x23, x12 cneg x19, x19, cc csetm x10, cc eor x16, x1, x20 subs x22, x6, x2 umulh x12, x23, x12 eor x1, x4, x20 cinv x4, x10, cc cneg x22, x22, cc cmn x20, #0x1 adcs x15, x11, x1 eor x6, x12, x13 adcs x10, x3, x16 adcs x17, x17, x20 eor x23, x9, x13 adcs x2, x8, x20 mul x11, x19, x22 adcs x24, x24, x20 adc x7, x7, x20 cmn x13, #0x1 adcs x3, x10, x23 umulh x22, x19, x22 adcs x17, x17, x6 eor x12, x22, x4 extr x22, x15, x21, #63 adcs x8, x2, x13 extr x21, x21, x5, #63 ldp x16, x23, [x0] adcs x20, x24, x13 eor x1, x11, x4 adc x6, x7, x13 cmn x4, #0x1 ldp x2, x7, [x0, #16] adcs x1, x3, x1 extr x19, x1, x15, #63 adcs x14, x17, x12 extr x1, x14, x1, #63 lsl x17, x5, #1 adcs x8, x8, x4 extr x12, x8, x14, #8 ldp x15, x11, [x0, #32] adcs x9, x20, x4 adc x3, x6, x4 adds x16, x12, x16 extr x6, x9, x8, #8 ldp x14, x12, [x0, #48] extr x8, x3, x9, #8 adcs x20, x6, x23 ldr x24, [x0, #64] lsr x6, x3, #8 adcs x8, x8, x2 and x2, x1, #0x1ff and x1, x20, x8 adcs x4, x6, x7 adcs x3, x17, x15 and x1, x1, x4 adcs x9, x21, x11 and x1, x1, x3 adcs x6, x22, x14 and x1, x1, x9 and x21, x1, x6 adcs x14, x19, x12 adc x1, x24, x2 cmp xzr, xzr orr x12, x1, #0xfffffffffffffe00 lsr x1, x1, #9 adcs xzr, x16, x1 and x21, x21, x14 adcs xzr, x21, xzr adcs xzr, x12, xzr adcs x21, x16, x1 adcs x1, x20, xzr adcs x19, x8, xzr stp x21, x1, [x0] adcs x1, x4, xzr adcs x21, x3, xzr stp x19, x1, [x0, #16] adcs x1, x9, xzr stp x21, x1, [x0, #32] adcs x21, x6, xzr adcs x1, x14, xzr stp x21, x1, [x0, #48] adc x1, x12, xzr and x1, x1, #0x1ff str x1, [x0, #64] CFI_POP2(x23,x24) CFI_POP2(x21,x22) CFI_POP2(x19,x20) CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jscalarmul_sqr_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(Lp521_jscalarmul_sub_p521) Lp521_jscalarmul_sub_p521: CFI_START ldp x5, x6, [x1] ldp x4, x3, [x2] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x1, #16] ldp x4, x3, [x2, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x1, #32] ldp x4, x3, [x2, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 ldp x11, x12, [x1, #48] ldp x4, x3, [x2, #48] sbcs x11, x11, x4 sbcs x12, x12, x3 ldr x13, [x1, #64] ldr x4, [x2, #64] sbcs x13, x13, x4 sbcs x5, x5, xzr sbcs x6, x6, xzr sbcs x7, x7, xzr sbcs x8, x8, xzr sbcs x9, x9, xzr sbcs x10, x10, xzr sbcs x11, x11, xzr sbcs x12, x12, xzr sbcs x13, x13, xzr and x13, x13, #0x1ff stp x5, x6, [x0] stp x7, x8, [x0, #16] stp x9, x10, [x0, #32] stp x11, x12, [x0, #48] str x13, [x0, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(Lp521_jscalarmul_sub_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
2,096
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_demont_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert from Montgomery form z := (x / 2^576) mod p_521, assuming x reduced // Input x[9]; output z[9] // // extern void bignum_demont_p521(uint64_t z[static 9], // const uint64_t x[static 9]); // // This assumes the input is < p_521 for correctness. If this is not the case, // use the variant "bignum_deamont_p521" instead. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_demont_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_demont_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_demont_p521) .text .balign 4 // Input parameters #define z x0 #define x x1 // Rotating registers for the intermediate windows #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define d4 x2 #define d5 x3 #define d6 x4 #define d7 x5 #define d8 x2 #define c x6 S2N_BN_SYMBOL(bignum_demont_p521): CFI_START // Rotate, as a 521-bit quantity, by 9*64 - 521 = 55 bits right. ldp d0, d1, [x] lsl c, d0, #9 extr d0, d1, d0, #55 ldp d2, d3, [x, #16] extr d1, d2, d1, #55 stp d0, d1, [z] extr d2, d3, d2, #55 ldp d4, d5, [x, #32] extr d3, d4, d3, #55 stp d2, d3, [z, #16] extr d4, d5, d4, #55 ldp d6, d7, [x, #48] extr d5, d6, d5, #55 stp d4, d5, [z, #32] extr d6, d7, d6, #55 ldr d8, [x, #64] orr d8, d8, c extr d7, d8, d7, #55 stp d6, d7, [z, #48] lsr d8, d8, #55 str d8, [z, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_demont_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
4,328
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_mod_n521_9.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo group order, z := x mod n_521 // Input x[9]; output z[9] // // extern void bignum_mod_n521_9(uint64_t z[static 9], const uint64_t x[static 9]); // // Reduction is modulo the group order of the NIST curve P-521. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n521_9) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_n521_9) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n521_9) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n521_9_alt) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_mod_n521_9_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n521_9_alt) .text .balign 4 #define z x0 #define x x1 #define n0 x2 #define n1 x3 #define n2 x4 #define n3 x5 #define d0 x6 #define d1 x7 #define d2 x8 #define d3 x9 #define d4 x10 #define d5 x11 #define d6 x12 #define d7 x13 #define d8 x14 #define q x15 // Re-use d6 and d7 as temporaries before they are needed #define s d6 #define t d7 #define movbig(nn,n3,n2,n1,n0) \ movz nn, n0 __LF \ movk nn, n1, lsl #16 __LF \ movk nn, n2, lsl #32 __LF \ movk nn, n3, lsl #48 S2N_BN_SYMBOL(bignum_mod_n521_9): S2N_BN_SYMBOL(bignum_mod_n521_9_alt): CFI_START // Load the top digit first into d8. // The initial quotient estimate is q = h + 1 where x = 2^521 * h + t ldr d8, [x, #64] lsr q, d8, #9 add q, q, #1 // Let [5; n3; n2; n1; n0] = r_521 = 2^521 - n_521 // and form [d4;d3;d2;d1;d0] = q * r_521 movbig( n0, #0x4490, #0x48e1, #0x6ec7, #0x9bf7) mul d0, n0, q movbig( n1, #0xc44a, #0x3647, #0x7663, #0xb851) mul d1, n1, q movbig( n2, #0x8033, #0xfeb7, #0x08f6, #0x5a2f) mul d2, n2, q movbig( n3, #0xae79, #0x787c, #0x40d0, #0x6994) mul d3, n3, q lsl d4, q, #2 add d4, d4, q umulh t, n0, q adds d1, d1, t umulh t, n1, q adcs d2, d2, t umulh t, n2, q adcs d3, d3, t umulh t, n3, q adc d4, d4, t // Now load other digits and form r = x - q * n_521 = (q * r_521 + t) - 2^521. // But the computed result stuffs in 1s from bit 521 onwards and actually // gives r' = (q * r_521 + t) + (2^576 - 2^521) = r + 2^576, including the // top carry. Hence CF <=> r >= 0, while r' == r (mod 2^521). ldp s, t, [x] adds d0, d0, s adcs d1, d1, t ldp s, t, [x, #16] adcs d2, d2, s adcs d3, d3, t ldp t, d5, [x, #32] adcs d4, d4, t adcs d5, d5, xzr ldp d6, d7, [x, #48] adcs d6, d6, xzr adcs d7, d7, xzr orr d8, d8, #~0x1FF adcs d8, d8, xzr // We already know r < n_521, but if it actually went negative then // we need to add back n_521 again. Recycle q as a bitmask for r < n_521, // and just subtract r_521 and mask rather than literally adding 2^521. // This also gets rid of the bit-stuffing above. csetm q, cc and n0, n0, q subs d0, d0, n0 and n1, n1, q sbcs d1, d1, n1 and n2, n2, q sbcs d2, d2, n2 and n3, n3, q sbcs d3, d3, n3 mov n0, #5 and n0, n0, q sbcs d4, d4, n0 sbcs d5, d5, xzr sbcs d6, d6, xzr sbcs d7, d7, xzr sbc d8, d8, xzr and d8, d8, #0x1FF // Store the end result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] stp d6, d7, [z, #48] str d8, [z, #64] CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_mod_n521_9) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
1,923
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/bignum_half_p521.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Halve modulo p_521, z := (x / 2) mod p_521, assuming x reduced // Input x[9]; output z[9] // // extern void bignum_half_p521(uint64_t z[static 9], const uint64_t x[static 9]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_half_p521) S2N_BN_FUNCTION_TYPE_DIRECTIVE(bignum_half_p521) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_half_p521) .text .balign 4 #define z x0 #define x x1 // We use distinct variables for clarity, but these are heavily aliased #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define d4 x2 #define d5 x3 #define d6 x4 #define d7 x5 #define d8 x2 #define a x6 S2N_BN_SYMBOL(bignum_half_p521): CFI_START // We do a 521-bit rotation one bit right, since 2^521 == 1 (mod p_521) ldp d0, d1, [x] and a, d0, #1 extr d0, d1, d0, #1 ldp d2, d3, [x, #16] extr d1, d2, d1, #1 stp d0, d1, [z] extr d2, d3, d2, #1 ldp d4, d5, [x, #32] extr d3, d4, d3, #1 stp d2, d3, [z, #16] extr d4, d5, d4, #1 ldp d6, d7, [x, #48] extr d5, d6, d5, #1 stp d4, d5, [z, #32] extr d6, d7, d6, #1 ldr d8, [x, #64] extr d7, d8, d7, #1 stp d6, d7, [z, #48] lsl d8, d8, #55 extr d8, a, d8, #56 str d8, [z, #64] // Return CFI_RET S2N_BN_SIZE_DIRECTIVE(bignum_half_p521) #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
8,817
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p256/unopt/bignum_montsqr_p256_base.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^256) mod p_256 // Input x[4]; output z[4] // // extern void bignum_montsqr_p256_base // (uint64_t z[static 4], uint64_t x[static 4]); // // Does z := (x^2 / 2^256) mod p_256, assuming x^2 <= 2^256 * p_256, which is // guaranteed in particular if x < p_256 initially (the "intended" case). // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p256_base) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p256_base) .text .balign 4 // --------------------------------------------------------------------------- // Macro returning (c,h,l) = 3-word 1s complement (x - y) * (w - z) // c,h,l,t should all be different // t,h should not overlap w,z // --------------------------------------------------------------------------- #define muldiffn(c,h,l, t, x,y, w,z) \ subs t, x, y __LF \ cneg t, t, cc __LF \ csetm c, cc __LF \ subs h, w, z __LF \ cneg h, h, cc __LF \ mul l, t, h __LF \ umulh h, t, h __LF \ cinv c, c, cc __LF \ eor l, l, c __LF \ eor h, h, c // --------------------------------------------------------------------------- // Core one-step "end" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d5;d4;d3;d2;d1], adding to // the existing [d4;d3;d2;d1], re-using d0 as a temporary internally as well // as t1, t2, t3, and initializing d5 from zero (hence "end"). // --------------------------------------------------------------------------- #define montrede(d5, d4,d3,d2,d1,d0, t2,t1,t0) \ /* Let w = d0, the original word we use as offset; d0 gets recycled */ \ /* First let [t2;t1] = 2^32 * w */ \ /* then let [d0;t0] = (2^64 - 2^32 + 1) * w (overwrite old d0) */ \ lsl t1, d0, #32 __LF \ subs t0, d0, t1 __LF \ lsr t2, d0, #32 __LF \ sbc d0, d0, t2 __LF \ /* Hence basic [d4;d3;d2;d1] += (2^256 - 2^224 + 2^192 + 2^96) * w */ \ adds d1, d1, t1 __LF \ adcs d2, d2, t2 __LF \ adcs d3, d3, t0 __LF \ adcs d4, d4, d0 __LF \ adc d5, xzr, xzr // --------------------------------------------------------------------------- // Core one-step "short" Montgomery reduction macro. Takes input in // [d3;d2;d1;d0] and returns result in [d4;d3;d2;d1], adding to the // existing contents of [d3;d2;d1] and generating d4 from zero, re-using // d0 as a temporary internally together with t0, t1 and t2. // It is fine for d4 to be the same register as d0, and it often is. // --------------------------------------------------------------------------- #define montreds(d4,d3,d2,d1,d0, t2,t1,t0) \ /* Let w = d0, the original word we use as offset; d0 gets recycled */ \ /* First let [t2;t1] = 2^32 * w */ \ /* then let [d0;t0] = (2^64 - 2^32 + 1) * w (overwrite old d0) */ \ lsl t1, d0, #32 __LF \ subs t0, d0, t1 __LF \ lsr t2, d0, #32 __LF \ sbc d0, d0, t2 __LF \ /* Hence [d4;..;d1] := [d3;d2;d1;0] + (2^256 - 2^224 + 2^192 + 2^96) * w */ \ adds d1, d1, t1 __LF \ adcs d2, d2, t2 __LF \ adcs d3, d3, t0 __LF \ adc d4, d0, xzr #define a0 x2 #define a1 x3 #define a2 x4 #define a3 x5 #define c0 x6 #define c1 x7 #define c2 x8 #define c3 x9 #define c4 x10 #define d1 x11 #define d2 x12 #define d3 x13 #define d4 x14 #define s0 x15 #define s1 x16 #define s2 x17 #define s3 x1 #define a0short w2 #define a1short w3 #define d1short w11 S2N_BN_SYMBOL(bignum_montsqr_p256_base): // Load in all words of the input ldp a0, a1, [x1] ldp a2, a3, [x1, #16] // Square the low half, getting a result in [s3;s2;s1;s0] // This uses 32x32->64 multiplications to reduce the number of UMULHs umull s0, a0short, a0short lsr d1, a0, #32 umull s1, d1short, d1short umull d1, a0short, d1short adds s0, s0, d1, lsl #33 lsr d1, d1, #31 adc s1, s1, d1 umull s2, a1short, a1short lsr d1, a1, #32 umull s3, d1short, d1short umull d1, a1short, d1short mul d2, a0, a1 umulh d3, a0, a1 adds s2, s2, d1, lsl #33 lsr d1, d1, #31 adc s3, s3, d1 adds d2, d2, d2 adcs d3, d3, d3 adc s3, s3, xzr adds s1, s1, d2 adcs s2, s2, d3 adc s3, s3, xzr // Perform two "short" Montgomery steps on the low square // This shifts it to an offset compatible with middle product montreds(s0,s3,s2,s1,s0, d1,d2,d3) montreds(s1,s0,s3,s2,s1, d1,d2,d3) // Compute cross-product with ADK 2x2->4 multiplier as [c3;c2;c1;c0] mul c0, a0, a2 mul d4, a1, a3 umulh c2, a0, a2 muldiffn(d3,d2,d1, c4, a0,a1, a3,a2) adds c1, c0, c2 adc c2, c2, xzr umulh c3, a1, a3 adds c1, c1, d4 adcs c2, c2, c3 adc c3, c3, xzr adds c2, c2, d4 adc c3, c3, xzr adds xzr, d3, #1 adcs c1, c1, d1 adcs c2, c2, d2 adc c3, c3, d3 // Double it and add the Montgomerified low square adds c0, c0, c0 adcs c1, c1, c1 adcs c2, c2, c2 adcs c3, c3, c3 adc c4, xzr, xzr adds c0, c0, s2 adcs c1, c1, s3 adcs c2, c2, s0 adcs c3, c3, s1 adc c4, c4, xzr // Montgomery-reduce the combined low and middle term another twice montrede(c0,c4,c3,c2,c1,c0, d1,d2,d3) montrede(c1,c0,c4,c3,c2,c1, d1,d2,d3) // Our sum so far is in [c1,c0,c4,c3,c2]; choose more intuitive names #define r0 x8 #define r1 x9 #define r2 x10 #define r3 x6 #define c x7 // So we can have these as temps #define t1 x11 #define t2 x12 #define t3 x13 // Add in the pure squares 22 + 33 mul t1, a2, a2 adds r0, r0, t1 mul t2, a3, a3 umulh t1, a2, a2 adcs r1, r1, t1 adcs r2, r2, t2 umulh t2, a3, a3 adcs r3, r3, t2 adc c, c, xzr // Construct the 23 term, double and add it in mul t1, a2, a3 umulh t2, a2, a3 adds t1, t1, t1 adcs t2, t2, t2 adc t3, xzr, xzr adds r1, r1, t1 adcs r2, r2, t2 adcs r3, r3, t3 adcs c, c, xzr // We know, writing B = 2^{4*64} that the full implicit result is // B^2 c <= z + (B - 1) * p < B * p + (B - 1) * p < 2 * B * p, // so the top half is certainly < 2 * p. If c = 1 already, we know // subtracting p will give the reduced modulus. But now we do a // subtraction-comparison to catch cases where the residue is >= p. // The constants are such that [t3;0;t1;-1] = p_256. #define t0 x5 // Set CF (because of inversion) iff (0,p_256) <= (c,r3,r2,r1,r0) mov t1, #0x00000000ffffffff subs t0, r0, #-1 sbcs t1, r1, t1 mov t3, #0xffffffff00000001 sbcs t2, r2, xzr sbcs t3, r3, t3 sbcs xzr, c, xzr // Select final output accordingly csel r0, t0, r0, cs csel r1, t1, r1, cs csel r2, t2, r2, cs csel r3, t3, r3, cs // Store things back in place stp r0, r1, [x0] stp r2, r3, [x0, #16] ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
16,928
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p256/unopt/p256_montjadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point addition on NIST curve P-256 in Montgomery-Jacobian coordinates // // extern void p256_montjadd // (uint64_t p3[static 12],uint64_t p1[static 12],uint64_t p2[static 12]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^256 * x) mod p_256. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p256_montjadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(p256_montjadd) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Stable homes for input arguments during main code sequence #define input_z x21 #define input_x x22 #define input_y x23 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define z_2 input_y, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // NSPACE is the total stack needed for these temporaries #define z1sq sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define x1a sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define z2sq sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define y1a sp, #(NUMSIZE*6) #define NSPACE (NUMSIZE*7) #define montmul_p256(P0,P1,P2) \ add x0, P0;\ add x1, P1;\ add x2, P2;\ bl .montmul_p256 #define montsqr_p256(P0,P1) \ add x0, P0;\ add x1, P1;\ bl .montsqr_p256 #define sub_p256(P0,P1,P2) \ add x0, P0;\ add x1, P1;\ add x2, P2;\ bl .sub_p256 // Corresponds exactly to bignum_montmul_p256 .montmul_p256: ldr q20, [x2] ldp x7, x17, [x1] ldr q0, [x1] ldp x6, x10, [x2] ldp x11, x15, [x1, #16] rev64 v16.4s, v20.4s subs x4, x7, x17 csetm x3, cc cneg x13, x4, cc mul v16.4s, v16.4s, v0.4s umulh x12, x17, x10 uzp1 v28.4s, v20.4s, v0.4s subs x14, x11, x7 ldr q20, [x2, #16] sbcs x5, x15, x17 ngc x17, xzr subs x8, x11, x15 uaddlp v27.2d, v16.4s umulh x4, x7, x6 uzp1 v21.4s, v0.4s, v0.4s cneg x11, x8, cc shl v17.2d, v27.2d, #32 csetm x15, cc subs x9, x10, x6 eor x7, x14, x17 umlal v17.2d, v21.2s, v28.2s cneg x8, x9, cc cinv x9, x3, cc cmn x17, #0x1 ldr q28, [x1, #16] adcs x14, x7, xzr mul x7, x13, x8 eor x1, x5, x17 adcs x5, x1, xzr xtn v1.2s, v20.2d mov x1, v17.d[0] mov x3, v17.d[1] uzp2 v16.4s, v20.4s, v20.4s umulh x16, x13, x8 eor x13, x7, x9 adds x8, x1, x3 adcs x7, x4, x12 xtn v0.2s, v28.2d adcs x12, x12, xzr adds x8, x4, x8 adcs x3, x3, x7 ldp x7, x2, [x2, #16] adcs x12, x12, xzr cmn x9, #0x1 adcs x8, x8, x13 eor x13, x16, x9 adcs x16, x3, x13 lsl x3, x1, #32 adc x13, x12, x9 subs x12, x6, x7 sbcs x9, x10, x2 lsr x10, x1, #32 ngc x4, xzr subs x6, x2, x7 cinv x2, x15, cc cneg x6, x6, cc subs x7, x1, x3 eor x9, x9, x4 sbc x1, x1, x10 adds x15, x8, x3 adcs x3, x16, x10 mul x16, x11, x6 adcs x8, x13, x7 eor x13, x12, x4 adc x10, x1, xzr cmn x4, #0x1 umulh x6, x11, x6 adcs x11, x13, xzr adcs x1, x9, xzr lsl x13, x15, #32 subs x12, x15, x13 lsr x7, x15, #32 sbc x15, x15, x7 adds x9, x3, x13 adcs x3, x8, x7 umulh x8, x14, x11 umull v21.2d, v0.2s, v1.2s adcs x12, x10, x12 umull v3.2d, v0.2s, v16.2s adc x15, x15, xzr rev64 v24.4s, v20.4s stp x12, x15, [x0, #16] movi v2.2d, #0xffffffff mul x10, x14, x11 mul v4.4s, v24.4s, v28.4s subs x13, x14, x5 uzp2 v19.4s, v28.4s, v28.4s csetm x15, cc usra v3.2d, v21.2d, #32 mul x7, x5, x1 umull v21.2d, v19.2s, v16.2s cneg x13, x13, cc uaddlp v5.2d, v4.4s subs x11, x1, x11 and v16.16b, v3.16b, v2.16b umulh x5, x5, x1 shl v24.2d, v5.2d, #32 cneg x11, x11, cc umlal v16.2d, v19.2s, v1.2s cinv x12, x15, cc umlal v24.2d, v0.2s, v1.2s adds x15, x10, x7 mul x14, x13, x11 eor x1, x6, x2 adcs x6, x8, x5 stp x9, x3, [x0] usra v21.2d, v3.2d, #32 adcs x9, x5, xzr umulh x11, x13, x11 adds x15, x8, x15 adcs x7, x7, x6 eor x8, x14, x12 usra v21.2d, v16.2d, #32 adcs x13, x9, xzr cmn x12, #0x1 mov x9, v24.d[1] adcs x14, x15, x8 eor x6, x11, x12 adcs x6, x7, x6 mov x5, v24.d[0] mov x11, v21.d[1] mov x7, v21.d[0] adc x3, x13, x12 adds x12, x5, x9 adcs x13, x7, x11 ldp x15, x8, [x0] adcs x11, x11, xzr adds x12, x7, x12 eor x16, x16, x2 adcs x7, x9, x13 adcs x11, x11, xzr cmn x2, #0x1 ldp x9, x13, [x0, #16] adcs x16, x12, x16 adcs x1, x7, x1 adc x2, x11, x2 adds x7, x5, x15 adcs x15, x16, x8 eor x5, x17, x4 adcs x9, x1, x9 eor x1, x10, x5 adcs x16, x2, x13 adc x2, xzr, xzr cmn x5, #0x1 eor x13, x14, x5 adcs x14, x1, x7 eor x1, x6, x5 adcs x6, x13, x15 adcs x10, x1, x9 eor x4, x3, x5 mov x1, #0xffffffff adcs x8, x4, x16 lsr x13, x14, #32 adcs x17, x2, x5 adcs x11, x5, xzr adc x4, x5, xzr adds x12, x10, x7 adcs x7, x8, x15 adcs x5, x17, x9 adcs x9, x11, x16 lsl x11, x14, #32 adc x10, x4, x2 subs x17, x14, x11 sbc x4, x14, x13 adds x11, x6, x11 adcs x12, x12, x13 lsl x15, x11, #32 adcs x17, x7, x17 lsr x7, x11, #32 adc x13, x4, xzr subs x4, x11, x15 sbc x11, x11, x7 adds x8, x12, x15 adcs x15, x17, x7 adcs x4, x13, x4 adc x11, x11, xzr adds x7, x5, x4 adcs x17, x9, x11 adc x13, x10, xzr add x12, x13, #0x1 neg x11, x12 lsl x4, x12, #32 adds x17, x17, x4 sub x4, x4, #0x1 adc x13, x13, xzr subs x11, x8, x11 sbcs x4, x15, x4 sbcs x7, x7, xzr sbcs x17, x17, x12 sbcs x13, x13, x12 mov x12, #0xffffffff00000001 adds x11, x11, x13 and x1, x1, x13 adcs x4, x4, x1 and x1, x12, x13 stp x11, x4, [x0] adcs x4, x7, xzr adc x1, x17, x1 stp x4, x1, [x0, #16] ret // Corresponds exactly to bignum_montsqr_p256 .montsqr_p256: ldr q19, [x1] ldp x9, x13, [x1] ldr q23, [x1, #16] ldr q0, [x1] ldp x1, x10, [x1, #16] uzp2 v29.4s, v19.4s, v19.4s xtn v4.2s, v19.2d umulh x8, x9, x13 rev64 v20.4s, v23.4s umull v16.2d, v19.2s, v19.2s umull v1.2d, v29.2s, v4.2s mul v20.4s, v20.4s, v0.4s subs x14, x9, x13 umulh x15, x9, x1 mov x16, v16.d[1] umull2 v4.2d, v19.4s, v19.4s mov x4, v16.d[0] uzp1 v17.4s, v23.4s, v0.4s uaddlp v19.2d, v20.4s lsr x7, x8, #63 mul x11, x9, x13 mov x12, v1.d[0] csetm x5, cc cneg x6, x14, cc mov x3, v4.d[1] mov x14, v4.d[0] subs x2, x10, x1 mov x9, v1.d[1] cneg x17, x2, cc cinv x2, x5, cc adds x5, x4, x12, lsl #33 extr x4, x8, x11, #63 lsr x8, x12, #31 uzp1 v20.4s, v0.4s, v0.4s shl v19.2d, v19.2d, #32 adc x16, x16, x8 adds x8, x14, x9, lsl #33 lsr x14, x9, #31 lsl x9, x5, #32 umlal v19.2d, v20.2s, v17.2s adc x14, x3, x14 adds x16, x16, x11, lsl #1 lsr x3, x5, #32 umulh x12, x6, x17 adcs x4, x8, x4 adc x11, x14, x7 subs x8, x5, x9 sbc x5, x5, x3 adds x16, x16, x9 mov x14, v19.d[0] mul x17, x6, x17 adcs x3, x4, x3 lsl x7, x16, #32 umulh x13, x13, x10 adcs x11, x11, x8 lsr x8, x16, #32 adc x5, x5, xzr subs x9, x16, x7 sbc x16, x16, x8 adds x7, x3, x7 mov x3, v19.d[1] adcs x6, x11, x8 umulh x11, x1, x10 adcs x5, x5, x9 eor x8, x12, x2 adc x9, x16, xzr adds x16, x14, x15 adc x15, x15, xzr adds x12, x16, x3 eor x16, x17, x2 mul x4, x1, x10 adcs x15, x15, x13 adc x17, x13, xzr adds x15, x15, x3 adc x3, x17, xzr cmn x2, #0x1 mul x17, x10, x10 adcs x12, x12, x16 adcs x16, x15, x8 umulh x10, x10, x10 adc x2, x3, x2 adds x14, x14, x14 adcs x12, x12, x12 adcs x16, x16, x16 adcs x2, x2, x2 adc x15, xzr, xzr adds x14, x14, x7 mul x3, x1, x1 adcs x12, x12, x6 lsr x7, x14, #32 adcs x16, x16, x5 lsl x5, x14, #32 umulh x13, x1, x1 adcs x2, x2, x9 mov x6, #0xffffffff adc x15, x15, xzr adds x8, x4, x4 adcs x1, x11, x11 mov x11, #0xffffffff00000001 adc x4, xzr, xzr subs x9, x14, x5 sbc x14, x14, x7 adds x12, x12, x5 adcs x16, x16, x7 lsl x5, x12, #32 lsr x7, x12, #32 adcs x2, x2, x9 adcs x14, x15, x14 adc x15, xzr, xzr subs x9, x12, x5 sbc x12, x12, x7 adds x16, x16, x5 adcs x2, x2, x7 adcs x14, x14, x9 adcs x12, x15, x12 adc x15, xzr, xzr adds x16, x16, x3 adcs x2, x2, x13 adcs x14, x14, x17 adcs x12, x12, x10 adc x15, x15, xzr adds x2, x2, x8 adcs x14, x14, x1 adcs x12, x12, x4 adcs x15, x15, xzr adds x3, x16, #0x1 sbcs x5, x2, x6 sbcs x8, x14, xzr sbcs x11, x12, x11 sbcs xzr, x15, xzr csel x16, x3, x16, cs csel x14, x8, x14, cs csel x12, x11, x12, cs csel x2, x5, x2, cs stp x14, x12, [x0, #16] stp x16, x2, [x0] ret // Corresponds exactly to bignum_sub_p256 .sub_p256: ldp x5, x6, [x1] ldp x4, x3, [x2] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x1, #16] ldp x4, x3, [x2, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, cc adds x5, x5, x3 and x4, x3, #0xffffffff adcs x6, x6, x4 adcs x7, x7, xzr and x4, x3, #0xffffffff00000001 adc x8, x8, x4 stp x5, x6, [x0] stp x7, x8, [x0, #16] ret S2N_BN_SYMBOL(p256_montjadd): // Save regs and make room on stack for temporary variables stp x19, x20, [sp, #-16]! stp x21, x22, [sp, #-16]! stp x23, x30, [sp, #-16]! sub sp, sp, NSPACE // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations // 12 * multiply + 4 * square + 7 * subtract montsqr_p256(z1sq,z_1) montsqr_p256(z2sq,z_2) montmul_p256(y1a,z_2,y_1) montmul_p256(y2a,z_1,y_2) montmul_p256(x2a,z1sq,x_2) montmul_p256(x1a,z2sq,x_1) montmul_p256(y2a,z1sq,y2a) montmul_p256(y1a,z2sq,y1a) sub_p256(xd,x2a,x1a) sub_p256(yd,y2a,y1a) montsqr_p256(zz,xd) montsqr_p256(ww,yd) montmul_p256(zzx1,zz,x1a) montmul_p256(zzx2,zz,x2a) sub_p256(resx,ww,zzx1) sub_p256(t1,zzx2,zzx1) montmul_p256(xd,xd,z_1) sub_p256(resx,resx,zzx2) sub_p256(t2,zzx1,resx) montmul_p256(t1,t1,y1a) montmul_p256(resz,xd,z_2) montmul_p256(t2,yd,t2) sub_p256(resy,t2,t1) // Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0 // The condition codes get set by a comparison (P2 != 0) - (P1 != 0) // So "HI" <=> CF /\ ~ZF <=> P1 = 0 /\ ~(P2 = 0) // and "LO" <=> ~CF <=> ~(P1 = 0) /\ P2 = 0 ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] orr x12, x0, x1 orr x13, x2, x3 orr x12, x12, x13 cmp x12, xzr cset x12, ne ldp x4, x5, [z_2] ldp x6, x7, [z_2+16] orr x13, x4, x5 orr x14, x6, x7 orr x13, x13, x14 cmp x13, xzr cset x13, ne cmp x13, x12 // Multiplex the outputs accordingly, re-using the z's in registers ldp x8, x9, [resz] csel x8, x0, x8, lo csel x9, x1, x9, lo csel x8, x4, x8, hi csel x9, x5, x9, hi ldp x10, x11, [resz+16] csel x10, x2, x10, lo csel x11, x3, x11, lo csel x10, x6, x10, hi csel x11, x7, x11, hi ldp x12, x13, [x_1] ldp x0, x1, [resx] csel x0, x12, x0, lo csel x1, x13, x1, lo ldp x12, x13, [x_2] csel x0, x12, x0, hi csel x1, x13, x1, hi ldp x12, x13, [x_1+16] ldp x2, x3, [resx+16] csel x2, x12, x2, lo csel x3, x13, x3, lo ldp x12, x13, [x_2+16] csel x2, x12, x2, hi csel x3, x13, x3, hi ldp x12, x13, [y_1] ldp x4, x5, [resy] csel x4, x12, x4, lo csel x5, x13, x5, lo ldp x12, x13, [y_2] csel x4, x12, x4, hi csel x5, x13, x5, hi ldp x12, x13, [y_1+16] ldp x6, x7, [resy+16] csel x6, x12, x6, lo csel x7, x13, x7, lo ldp x12, x13, [y_2+16] csel x6, x12, x6, hi csel x7, x13, x7, hi // Finally store back the multiplexed values stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [y_3] stp x6, x7, [y_3+16] stp x8, x9, [z_3] stp x10, x11, [z_3+16] // Restore registers and return add sp, sp, NSPACE ldp x23, x30, [sp], 16 ldp x21, x22, [sp], 16 ldp x19, x20, [sp], 16 ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
8,613
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p256/unopt/bignum_montmul_p256_base.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^256) mod p_256 // Inputs x[4], y[4]; output z[4] // // extern void bignum_montmul_p256_base // (uint64_t z[static 4], uint64_t x[static 4], uint64_t y[static 4]); // // Does z := (2^{-256} * x * y) mod p_256, assuming that the inputs x and y // satisfy x * y <= 2^256 * p_256 (in particular this is true if we are in // the "usual" case x < p_256 and y < p_256). // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p256_base) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p256_base) .text .balign 4 // --------------------------------------------------------------------------- // Macro returning (c,h,l) = 3-word 1s complement (x - y) * (w - z) // c,h,l,t should all be different // t,h should not overlap w,z // --------------------------------------------------------------------------- #define muldiffn(c,h,l, t, x,y, w,z) \ subs t, x, y __LF \ cneg t, t, cc __LF \ csetm c, cc __LF \ subs h, w, z __LF \ cneg h, h, cc __LF \ mul l, t, h __LF \ umulh h, t, h __LF \ cinv c, c, cc __LF \ eor l, l, c __LF \ eor h, h, c // --------------------------------------------------------------------------- // Core one-step "short" Montgomery reduction macro. Takes input in // [d3;d2;d1;d0] and returns result in [d4;d3;d2;d1], adding to the // existing contents of [d3;d2;d1] and generating d4 from zero, re-using // d0 as a temporary internally together with t0, t1 and t2. // It is fine for d4 to be the same register as d0, and it often is. // --------------------------------------------------------------------------- #define montreds(d4,d3,d2,d1,d0, t2,t1,t0) \ /* Let w = d0, the original word we use as offset; d0 gets recycled */ \ /* First let [t2;t1] = 2^32 * w */ \ /* then let [d0;t0] = (2^64 - 2^32 + 1) * w (overwrite old d0) */ \ lsl t1, d0, #32 __LF \ subs t0, d0, t1 __LF \ lsr t2, d0, #32 __LF \ sbc d0, d0, t2 __LF \ /* Hence [d4;..;d1] := [d3;d2;d1;0] + (2^256 - 2^224 + 2^192 + 2^96) * w */ \ adds d1, d1, t1 __LF \ adcs d2, d2, t2 __LF \ adcs d3, d3, t0 __LF \ adc d4, d0, xzr #define a0 x3 #define a1 x4 #define a2 x5 #define a3 x6 #define b0 x7 #define b1 x8 #define b2 x9 #define b3 x10 #define s0 x11 #define s1 x12 #define s2 x13 #define s3 x14 #define t0 x15 #define t1 x16 #define t2 x17 #define t3 x1 #define s4 x2 S2N_BN_SYMBOL(bignum_montmul_p256_base): // Load in all words of both inputs ldp a0, a1, [x1] ldp a2, a3, [x1, #16] ldp b0, b1, [x2] ldp b2, b3, [x2, #16] // Multiply low halves with a 2x2->4 ADK multiplier as L = [s3;s2;s1;s0] mul s0, a0, b0 mul s2, a1, b1 umulh s1, a0, b0 adds t1, s0, s2 umulh s3, a1, b1 adcs t2, s1, s3 adcs s3, s3, xzr adds s1, s1, t1 adcs s2, s2, t2 adcs s3, s3, xzr muldiffn(t3,t2,t1, t0, a0,a1, b1,b0) adds xzr, t3, #1 adcs s1, s1, t1 adcs s2, s2, t2 adc s3, s3, t3 // Perform two "short" Montgomery steps on the low product to // get a modified low result L' = [s1;s0;s3;s2] // This shifts it to an offset compatible with middle terms // Stash the result L' temporarily in the output buffer to avoid // using additional registers. montreds(s0,s3,s2,s1,s0, t1,t2,t3) montreds(s1,s0,s3,s2,s1, t1,t2,t3) stp s2, s3, [x0] stp s0, s1, [x0, #16] // Multiply high halves with a 2x2->4 ADK multiplier as H = [s3;s2;s1;s0] mul s0, a2, b2 mul s2, a3, b3 umulh s1, a2, b2 adds t1, s0, s2 umulh s3, a3, b3 adcs t2, s1, s3 adcs s3, s3, xzr adds s1, s1, t1 adcs s2, s2, t2 adcs s3, s3, xzr muldiffn(t3,t2,t1, t0, a2,a3, b3,b2) adds xzr, t3, #1 adcs s1, s1, t1 adcs s2, s2, t2 adc s3, s3, t3 // Compute sign-magnitude a2,[a1,a0] = x_hi - x_lo subs a0, a2, a0 sbcs a1, a3, a1 sbc a2, xzr, xzr adds xzr, a2, #1 eor a0, a0, a2 adcs a0, a0, xzr eor a1, a1, a2 adcs a1, a1, xzr // Compute sign-magnitude b2,[b1,b0] = y_lo - y_hi subs b0, b0, b2 sbcs b1, b1, b3 sbc b2, xzr, xzr adds xzr, b2, #1 eor b0, b0, b2 adcs b0, b0, xzr eor b1, b1, b2 adcs b1, b1, xzr // Save the correct sign for the sub-product in b3 eor b3, a2, b2 // Add the high H to the modified low term L' as H + L' = [s4;b2;a2;t3;t0] ldp t0, t3, [x0] adds t0, s0, t0 adcs t3, s1, t3 ldp a2, b2, [x0, #16] adcs a2, s2, a2 adcs b2, s3, b2 adc s4, xzr, xzr // Multiply with yet a third 2x2->4 ADK multiplier for complex mid-term M mul s0, a0, b0 mul s2, a1, b1 umulh s1, a0, b0 adds t1, s0, s2 umulh s3, a1, b1 adcs t2, s1, s3 adcs s3, s3, xzr adds s1, s1, t1 adcs s2, s2, t2 adcs s3, s3, xzr muldiffn(a1,t2,t1, a0, a0,a1, b1,b0) adds xzr, a1, #1 adcs s1, s1, t1 adcs s2, s2, t2 adc s3, s3, a1 // Set up a sign-modified version of the mid-product in a long accumulator // as [b3;a1;a0;s3;s2;s1;s0], adding in the H + L' term once with // zero offset as this signed value is created adds xzr, b3, #1 eor s0, s0, b3 adcs s0, s0, t0 eor s1, s1, b3 adcs s1, s1, t3 eor s2, s2, b3 adcs s2, s2, a2 eor s3, s3, b3 adcs s3, s3, b2 adcs a0, s4, b3 adcs a1, b3, xzr adc b3, b3, xzr // Add in the stashed H + L' term an offset of 2 words as well adds s2, s2, t0 adcs s3, s3, t3 adcs a0, a0, a2 adcs a1, a1, b2 adc b3, b3, s4 // Do two more Montgomery steps on the composed term // Net pre-reduct is in [b3;a1;a0;s3;s2] montreds(s0,s3,s2,s1,s0, t1,t2,t3) montreds(s1,s0,s3,s2,s1, t1,t2,t3) adds a0, a0, s0 adcs a1, a1, s1 adc b3, b3, xzr // Because of the way we added L' in two places, we can overspill by // more than usual in Montgomery, with the result being only known to // be < 3 * p_256, not the usual < 2 * p_256. So now we do a more // elaborate final correction in the style of bignum_cmul_p256, though // we can use much simpler quotient estimation logic (q = h + 1) and // slightly more direct accumulation of p_256 * q. #define d0 s2 #define d1 s3 #define d2 a0 #define d3 a1 #define h b3 #define q s4 #define c b0 add q, h, #1 lsl t1, q, #32 adds d3, d3, t1 adc h, h, xzr sub t0, xzr, q sub t1, t1, #1 subs d0, d0, t0 sbcs d1, d1, t1 sbcs d2, d2, xzr sbcs d3, d3, q sbcs c, h, q adds d0, d0, c mov h, #0x00000000ffffffff and h, h, c adcs d1, d1, h adcs d2, d2, xzr mov h, #0xffffffff00000001 and h, h, c adc d3, d3, h // Finally store the result stp d0, d1, [x0] stp d2, d3, [x0, #16] ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
24,607
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p256/unopt/p256_montjdouble.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point doubling on NIST curve P-256 in Montgomery-Jacobian coordinates // // extern void p256_montjdouble // (uint64_t p3[static 12],uint64_t p1[static 12]); // // Does p3 := 2 * p1 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^256 * x) mod p_256. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // // Standard ARM ABI: X0 = p3, X1 = p1 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p256_montjdouble) S2N_BN_SYM_PRIVACY_DIRECTIVE(p256_montjdouble) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Stable homes for input arguments during main code sequence #define input_z x19 #define input_x x20 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // NSPACE is the total stack needed for these temporaries #define z2 sp, #(NUMSIZE*0) #define y4 sp, #(NUMSIZE*0) #define y2 sp, #(NUMSIZE*1) #define t1 sp, #(NUMSIZE*2) #define t2 sp, #(NUMSIZE*3) #define x2p sp, #(NUMSIZE*3) #define dx2 sp, #(NUMSIZE*3) #define xy2 sp, #(NUMSIZE*4) #define x4p sp, #(NUMSIZE*5) #define d_ sp, #(NUMSIZE*5) #define NSPACE #(NUMSIZE*6) // Corresponds exactly to bignum_montmul_p256 .montmul_p256: ldr q20, [x2] ldp x7, x17, [x1] ldr q0, [x1] ldp x6, x10, [x2] ldp x11, x15, [x1, #16] rev64 v16.4s, v20.4s subs x4, x7, x17 csetm x3, cc cneg x13, x4, cc mul v16.4s, v16.4s, v0.4s umulh x12, x17, x10 uzp1 v28.4s, v20.4s, v0.4s subs x14, x11, x7 ldr q20, [x2, #16] sbcs x5, x15, x17 ngc x17, xzr subs x8, x11, x15 uaddlp v27.2d, v16.4s umulh x4, x7, x6 uzp1 v21.4s, v0.4s, v0.4s cneg x11, x8, cc shl v17.2d, v27.2d, #32 csetm x15, cc subs x9, x10, x6 eor x7, x14, x17 umlal v17.2d, v21.2s, v28.2s cneg x8, x9, cc cinv x9, x3, cc cmn x17, #0x1 ldr q28, [x1, #16] adcs x14, x7, xzr mul x7, x13, x8 eor x1, x5, x17 adcs x5, x1, xzr xtn v1.2s, v20.2d mov x1, v17.d[0] mov x3, v17.d[1] uzp2 v16.4s, v20.4s, v20.4s umulh x16, x13, x8 eor x13, x7, x9 adds x8, x1, x3 adcs x7, x4, x12 xtn v0.2s, v28.2d adcs x12, x12, xzr adds x8, x4, x8 adcs x3, x3, x7 ldp x7, x2, [x2, #16] adcs x12, x12, xzr cmn x9, #0x1 adcs x8, x8, x13 eor x13, x16, x9 adcs x16, x3, x13 lsl x3, x1, #32 adc x13, x12, x9 subs x12, x6, x7 sbcs x9, x10, x2 lsr x10, x1, #32 ngc x4, xzr subs x6, x2, x7 cinv x2, x15, cc cneg x6, x6, cc subs x7, x1, x3 eor x9, x9, x4 sbc x1, x1, x10 adds x15, x8, x3 adcs x3, x16, x10 mul x16, x11, x6 adcs x8, x13, x7 eor x13, x12, x4 adc x10, x1, xzr cmn x4, #0x1 umulh x6, x11, x6 adcs x11, x13, xzr adcs x1, x9, xzr lsl x13, x15, #32 subs x12, x15, x13 lsr x7, x15, #32 sbc x15, x15, x7 adds x9, x3, x13 adcs x3, x8, x7 umulh x8, x14, x11 umull v21.2d, v0.2s, v1.2s adcs x12, x10, x12 umull v3.2d, v0.2s, v16.2s adc x15, x15, xzr rev64 v24.4s, v20.4s stp x12, x15, [x0, #16] movi v2.2d, #0xffffffff mul x10, x14, x11 mul v4.4s, v24.4s, v28.4s subs x13, x14, x5 uzp2 v19.4s, v28.4s, v28.4s csetm x15, cc usra v3.2d, v21.2d, #32 mul x7, x5, x1 umull v21.2d, v19.2s, v16.2s cneg x13, x13, cc uaddlp v5.2d, v4.4s subs x11, x1, x11 and v16.16b, v3.16b, v2.16b umulh x5, x5, x1 shl v24.2d, v5.2d, #32 cneg x11, x11, cc umlal v16.2d, v19.2s, v1.2s cinv x12, x15, cc umlal v24.2d, v0.2s, v1.2s adds x15, x10, x7 mul x14, x13, x11 eor x1, x6, x2 adcs x6, x8, x5 stp x9, x3, [x0] usra v21.2d, v3.2d, #32 adcs x9, x5, xzr umulh x11, x13, x11 adds x15, x8, x15 adcs x7, x7, x6 eor x8, x14, x12 usra v21.2d, v16.2d, #32 adcs x13, x9, xzr cmn x12, #0x1 mov x9, v24.d[1] adcs x14, x15, x8 eor x6, x11, x12 adcs x6, x7, x6 mov x5, v24.d[0] mov x11, v21.d[1] mov x7, v21.d[0] adc x3, x13, x12 adds x12, x5, x9 adcs x13, x7, x11 ldp x15, x8, [x0] adcs x11, x11, xzr adds x12, x7, x12 eor x16, x16, x2 adcs x7, x9, x13 adcs x11, x11, xzr cmn x2, #0x1 ldp x9, x13, [x0, #16] adcs x16, x12, x16 adcs x1, x7, x1 adc x2, x11, x2 adds x7, x5, x15 adcs x15, x16, x8 eor x5, x17, x4 adcs x9, x1, x9 eor x1, x10, x5 adcs x16, x2, x13 adc x2, xzr, xzr cmn x5, #0x1 eor x13, x14, x5 adcs x14, x1, x7 eor x1, x6, x5 adcs x6, x13, x15 adcs x10, x1, x9 eor x4, x3, x5 mov x1, #0xffffffff adcs x8, x4, x16 lsr x13, x14, #32 adcs x17, x2, x5 adcs x11, x5, xzr adc x4, x5, xzr adds x12, x10, x7 adcs x7, x8, x15 adcs x5, x17, x9 adcs x9, x11, x16 lsl x11, x14, #32 adc x10, x4, x2 subs x17, x14, x11 sbc x4, x14, x13 adds x11, x6, x11 adcs x12, x12, x13 lsl x15, x11, #32 adcs x17, x7, x17 lsr x7, x11, #32 adc x13, x4, xzr subs x4, x11, x15 sbc x11, x11, x7 adds x8, x12, x15 adcs x15, x17, x7 adcs x4, x13, x4 adc x11, x11, xzr adds x7, x5, x4 adcs x17, x9, x11 adc x13, x10, xzr add x12, x13, #0x1 neg x11, x12 lsl x4, x12, #32 adds x17, x17, x4 sub x4, x4, #0x1 adc x13, x13, xzr subs x11, x8, x11 sbcs x4, x15, x4 sbcs x7, x7, xzr sbcs x17, x17, x12 sbcs x13, x13, x12 mov x12, #0xffffffff00000001 adds x11, x11, x13 and x1, x1, x13 adcs x4, x4, x1 and x1, x12, x13 stp x11, x4, [x0] adcs x4, x7, xzr adc x1, x17, x1 stp x4, x1, [x0, #16] ret // Corresponds exactly to bignum_montsqr_p256 .montsqr_p256: ldr q19, [x1] ldp x9, x13, [x1] ldr q23, [x1, #16] ldr q0, [x1] ldp x1, x10, [x1, #16] uzp2 v29.4s, v19.4s, v19.4s xtn v4.2s, v19.2d umulh x8, x9, x13 rev64 v20.4s, v23.4s umull v16.2d, v19.2s, v19.2s umull v1.2d, v29.2s, v4.2s mul v20.4s, v20.4s, v0.4s subs x14, x9, x13 umulh x15, x9, x1 mov x16, v16.d[1] umull2 v4.2d, v19.4s, v19.4s mov x4, v16.d[0] uzp1 v17.4s, v23.4s, v0.4s uaddlp v19.2d, v20.4s lsr x7, x8, #63 mul x11, x9, x13 mov x12, v1.d[0] csetm x5, cc cneg x6, x14, cc mov x3, v4.d[1] mov x14, v4.d[0] subs x2, x10, x1 mov x9, v1.d[1] cneg x17, x2, cc cinv x2, x5, cc adds x5, x4, x12, lsl #33 extr x4, x8, x11, #63 lsr x8, x12, #31 uzp1 v20.4s, v0.4s, v0.4s shl v19.2d, v19.2d, #32 adc x16, x16, x8 adds x8, x14, x9, lsl #33 lsr x14, x9, #31 lsl x9, x5, #32 umlal v19.2d, v20.2s, v17.2s adc x14, x3, x14 adds x16, x16, x11, lsl #1 lsr x3, x5, #32 umulh x12, x6, x17 adcs x4, x8, x4 adc x11, x14, x7 subs x8, x5, x9 sbc x5, x5, x3 adds x16, x16, x9 mov x14, v19.d[0] mul x17, x6, x17 adcs x3, x4, x3 lsl x7, x16, #32 umulh x13, x13, x10 adcs x11, x11, x8 lsr x8, x16, #32 adc x5, x5, xzr subs x9, x16, x7 sbc x16, x16, x8 adds x7, x3, x7 mov x3, v19.d[1] adcs x6, x11, x8 umulh x11, x1, x10 adcs x5, x5, x9 eor x8, x12, x2 adc x9, x16, xzr adds x16, x14, x15 adc x15, x15, xzr adds x12, x16, x3 eor x16, x17, x2 mul x4, x1, x10 adcs x15, x15, x13 adc x17, x13, xzr adds x15, x15, x3 adc x3, x17, xzr cmn x2, #0x1 mul x17, x10, x10 adcs x12, x12, x16 adcs x16, x15, x8 umulh x10, x10, x10 adc x2, x3, x2 adds x14, x14, x14 adcs x12, x12, x12 adcs x16, x16, x16 adcs x2, x2, x2 adc x15, xzr, xzr adds x14, x14, x7 mul x3, x1, x1 adcs x12, x12, x6 lsr x7, x14, #32 adcs x16, x16, x5 lsl x5, x14, #32 umulh x13, x1, x1 adcs x2, x2, x9 mov x6, #0xffffffff adc x15, x15, xzr adds x8, x4, x4 adcs x1, x11, x11 mov x11, #0xffffffff00000001 adc x4, xzr, xzr subs x9, x14, x5 sbc x14, x14, x7 adds x12, x12, x5 adcs x16, x16, x7 lsl x5, x12, #32 lsr x7, x12, #32 adcs x2, x2, x9 adcs x14, x15, x14 adc x15, xzr, xzr subs x9, x12, x5 sbc x12, x12, x7 adds x16, x16, x5 adcs x2, x2, x7 adcs x14, x14, x9 adcs x12, x15, x12 adc x15, xzr, xzr adds x16, x16, x3 adcs x2, x2, x13 adcs x14, x14, x17 adcs x12, x12, x10 adc x15, x15, xzr adds x2, x2, x8 adcs x14, x14, x1 adcs x12, x12, x4 adcs x15, x15, xzr adds x3, x16, #0x1 sbcs x5, x2, x6 sbcs x8, x14, xzr sbcs x11, x12, x11 sbcs xzr, x15, xzr csel x16, x3, x16, cs csel x14, x8, x14, cs csel x12, x11, x12, cs csel x2, x5, x2, cs stp x14, x12, [x0, #16] stp x16, x2, [x0] ret // Corresponds exactly to bignum_sub_p256 .sub_p256: ldp x5, x6, [x1] ldp x4, x3, [x2] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x1, #16] ldp x4, x3, [x2, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 csetm x3, cc adds x5, x5, x3 and x4, x3, #0xffffffff adcs x6, x6, x4 adcs x7, x7, xzr and x4, x3, #0xffffffff00000001 adc x8, x8, x4 stp x5, x6, [x0] stp x7, x8, [x0, #16] ret // Corresponds exactly to bignum_add_p256 .add_p256: ldp x4, x5, [x1] ldp x8, x9, [x2] adds x4, x4, x8 adcs x5, x5, x9 ldp x6, x7, [x1, #16] ldp x10, x11, [x2, #16] adcs x6, x6, x10 adcs x7, x7, x11 adc x3, xzr, xzr adds x8, x4, #0x1 mov x9, #0xffffffff sbcs x9, x5, x9 sbcs x10, x6, xzr mov x11, #0xffffffff00000001 sbcs x11, x7, x11 sbcs x3, x3, xzr csel x4, x4, x8, cc csel x5, x5, x9, cc csel x6, x6, x10, cc csel x7, x7, x11, cc stp x4, x5, [x0] stp x6, x7, [x0, #16] ret #define montmul_p256(P0,P1,P2) \ add x0, P0;\ add x1, P1;\ add x2, P2;\ bl .montmul_p256 #define montsqr_p256(P0,P1) \ add x0, P0;\ add x1, P1;\ bl .montsqr_p256 #define sub_p256(P0,P1,P2) \ add x0, P0;\ add x1, P1;\ add x2, P2;\ bl .sub_p256 #define add_p256(P0,P1,P2) \ add x0, P0;\ add x1, P1;\ add x2, P2;\ bl .add_p256 // A weak version of add that only guarantees sum in 4 digits #define weakadd_p256(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ adds x5, x5, x4 __LF \ adcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ adcs x7, x7, x4 __LF \ adcs x8, x8, x3 __LF \ csetm x3, cs __LF \ subs x5, x5, x3 __LF \ and x1, x3, #4294967295 __LF \ sbcs x6, x6, x1 __LF \ sbcs x7, x7, xzr __LF \ and x2, x3, #-4294967295 __LF \ sbc x8, x8, x2 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] // P0 = C * P1 - D * P2 computed as D * (p_256 - P2) + C * P1 // Quotient estimation is done just as q = h + 1 as in bignum_triple_p256 // This also applies to the other functions following. #define cmsub_p256(P0,C,P1,D,P2) \ mov x1, D __LF \ mov x2, #-1 __LF \ ldp x9, x10, [P2] __LF \ subs x9, x2, x9 __LF \ mov x2, #4294967295 __LF \ sbcs x10, x2, x10 __LF \ ldp x11, x12, [P2+16] __LF \ sbcs x11, xzr, x11 __LF \ mov x2, #-4294967295 __LF \ sbc x12, x2, x12 __LF \ mul x3, x1, x9 __LF \ mul x4, x1, x10 __LF \ mul x5, x1, x11 __LF \ mul x6, x1, x12 __LF \ umulh x9, x1, x9 __LF \ umulh x10, x1, x10 __LF \ umulh x11, x1, x11 __LF \ umulh x7, x1, x12 __LF \ adds x4, x4, x9 __LF \ adcs x5, x5, x10 __LF \ adcs x6, x6, x11 __LF \ adc x7, x7, xzr __LF \ mov x1, C __LF \ ldp x9, x10, [P1] __LF \ mul x8, x9, x1 __LF \ umulh x9, x9, x1 __LF \ adds x3, x3, x8 __LF \ mul x8, x10, x1 __LF \ umulh x10, x10, x1 __LF \ adcs x4, x4, x8 __LF \ ldp x11, x12, [P1+16] __LF \ mul x8, x11, x1 __LF \ umulh x11, x11, x1 __LF \ adcs x5, x5, x8 __LF \ mul x8, x12, x1 __LF \ umulh x12, x12, x1 __LF \ adcs x6, x6, x8 __LF \ adc x7, x7, xzr __LF \ adds x4, x4, x9 __LF \ adcs x5, x5, x10 __LF \ adcs x6, x6, x11 __LF \ adc x7, x7, x12 __LF \ add x8, x7, #1 __LF \ lsl x10, x8, #32 __LF \ adds x6, x6, x10 __LF \ adc x7, x7, xzr __LF \ neg x9, x8 __LF \ sub x10, x10, #1 __LF \ subs x3, x3, x9 __LF \ sbcs x4, x4, x10 __LF \ sbcs x5, x5, xzr __LF \ sbcs x6, x6, x8 __LF \ sbc x8, x7, x8 __LF \ adds x3, x3, x8 __LF \ and x9, x8, #4294967295 __LF \ adcs x4, x4, x9 __LF \ adcs x5, x5, xzr __LF \ neg x10, x9 __LF \ adc x6, x6, x10 __LF \ stp x3, x4, [P0] __LF \ stp x5, x6, [P0+16] // P0 = 4 * P1 - P2, by direct subtraction of P2; the method // in bignum_cmul_p256 etc. for quotient estimation still // works when the value to be reduced is negative, as // long as it is > -p_256, which is the case here. The // actual accumulation of q * p_256 is done a bit differently // so it works for the q = 0 case. #define cmsub41_p256(P0,P1,P2) \ ldp x1, x2, [P1] __LF \ lsl x0, x1, #2 __LF \ ldp x6, x7, [P2] __LF \ subs x0, x0, x6 __LF \ extr x1, x2, x1, #62 __LF \ sbcs x1, x1, x7 __LF \ ldp x3, x4, [P1+16] __LF \ extr x2, x3, x2, #62 __LF \ ldp x6, x7, [P2+16] __LF \ sbcs x2, x2, x6 __LF \ extr x3, x4, x3, #62 __LF \ sbcs x3, x3, x7 __LF \ lsr x4, x4, #62 __LF \ sbc x4, x4, xzr __LF \ add x5, x4, #1 __LF \ lsl x8, x5, #32 __LF \ subs x6, xzr, x8 __LF \ sbcs x7, xzr, xzr __LF \ sbc x8, x8, x5 __LF \ adds x0, x0, x5 __LF \ adcs x1, x1, x6 __LF \ adcs x2, x2, x7 __LF \ adcs x3, x3, x8 __LF \ csetm x5, cc __LF \ adds x0, x0, x5 __LF \ and x6, x5, #4294967295 __LF \ adcs x1, x1, x6 __LF \ adcs x2, x2, xzr __LF \ neg x7, x6 __LF \ adc x3, x3, x7 __LF \ stp x0, x1, [P0] __LF \ stp x2, x3, [P0+16] // P0 = 3 * P1 - 8 * P2, computed as (p_256 - P2) << 3 + 3 * P1 #define cmsub38_p256(P0,P1,P2) \ mov x1, 8 __LF \ mov x2, #-1 __LF \ ldp x9, x10, [P2] __LF \ subs x9, x2, x9 __LF \ mov x2, #4294967295 __LF \ sbcs x10, x2, x10 __LF \ ldp x11, x12, [P2+16] __LF \ sbcs x11, xzr, x11 __LF \ mov x2, #-4294967295 __LF \ sbc x12, x2, x12 __LF \ lsl x3, x9, #3 __LF \ extr x4, x10, x9, #61 __LF \ extr x5, x11, x10, #61 __LF \ extr x6, x12, x11, #61 __LF \ lsr x7, x12, #61 __LF \ mov x1, 3 __LF \ ldp x9, x10, [P1] __LF \ mul x8, x9, x1 __LF \ umulh x9, x9, x1 __LF \ adds x3, x3, x8 __LF \ mul x8, x10, x1 __LF \ umulh x10, x10, x1 __LF \ adcs x4, x4, x8 __LF \ ldp x11, x12, [P1+16] __LF \ mul x8, x11, x1 __LF \ umulh x11, x11, x1 __LF \ adcs x5, x5, x8 __LF \ mul x8, x12, x1 __LF \ umulh x12, x12, x1 __LF \ adcs x6, x6, x8 __LF \ adc x7, x7, xzr __LF \ adds x4, x4, x9 __LF \ adcs x5, x5, x10 __LF \ adcs x6, x6, x11 __LF \ adc x7, x7, x12 __LF \ add x8, x7, #1 __LF \ lsl x10, x8, #32 __LF \ adds x6, x6, x10 __LF \ adc x7, x7, xzr __LF \ neg x9, x8 __LF \ sub x10, x10, #1 __LF \ subs x3, x3, x9 __LF \ sbcs x4, x4, x10 __LF \ sbcs x5, x5, xzr __LF \ sbcs x6, x6, x8 __LF \ sbc x8, x7, x8 __LF \ adds x3, x3, x8 __LF \ and x9, x8, #4294967295 __LF \ adcs x4, x4, x9 __LF \ adcs x5, x5, xzr __LF \ neg x10, x9 __LF \ adc x6, x6, x10 __LF \ stp x3, x4, [P0] __LF \ stp x5, x6, [P0+16] S2N_BN_SYMBOL(p256_montjdouble): // Save registers and make room on stack for temporary variables sub sp, sp, NSPACE+32 stp x30, xzr, [sp, NSPACE+16] stp x19, x20, [sp, NSPACE] // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 // Main code, just a sequence of basic field operations // z2 = z^2 // y2 = y^2 montsqr_p256(z2,z_1) montsqr_p256(y2,y_1) // x2p = x^2 - z^4 = (x + z^2) * (x - z^2) sub_p256(t2,x_1,z2) weakadd_p256(t1,x_1,z2) montmul_p256(x2p,t1,t2) // t1 = y + z // xy2 = x * y^2 // x4p = x2p^2 add_p256(t1,y_1,z_1) montmul_p256(xy2,x_1,y2) montsqr_p256(x4p,x2p) // t1 = (y + z)^2 montsqr_p256(t1,t1) // d = 12 * xy2 - 9 * x4p // t1 = y^2 + 2 * y * z cmsub_p256(d_,12,xy2,9,x4p) sub_p256(t1,t1,z2) // y4 = y^4 montsqr_p256(y4,y2) // dx2 = d * x2p montmul_p256(dx2,d_,x2p) // z_3' = 2 * y * z sub_p256(z_3,t1,y2) // x' = 4 * xy2 - d cmsub41_p256(x_3,xy2,d_) // y' = 3 * dx2 - 8 * y4 cmsub38_p256(y_3,dx2,y4) // Restore registers and stack and return ldp x19, x20, [sp, NSPACE] ldp x30, xzr, [sp, NSPACE+16] add sp, sp, NSPACE+32 ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
9,734
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/fastmul/unopt/bignum_sqr_8_16_base.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square, z := x^2 // Input x[8]; output z[16] // // extern void bignum_sqr_8_16_base (uint64_t z[static 16], uint64_t x[static 8]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_8_16_base) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_8_16_base) .text .balign 4 // --------------------------------------------------------------------------- // Macro computing [c,b,a] := [b,a] + (x - y) * (w - z), adding with carry // to the [b,a] components but leaving CF aligned with the c term, which is // a sign bitmask for (x - y) * (w - z). Continued add-with-carry operations // with [c,...,c] will continue the carry chain correctly starting from // the c position if desired to add to a longer term of the form [...,b,a]. // // c,h,l,t should all be different and t,h should not overlap w,z. // --------------------------------------------------------------------------- .macro muldiffnadd b,a, c,h,l,t, x,y, w,z subs \t, \x, \y cneg \t, \t, cc csetm \c, cc subs \h, \w, \z cneg \h, \h, cc mul \l, \t, \h umulh \h, \t, \h cinv \c, \c, cc adds xzr, \c, #1 eor \l, \l, \c adcs \a, \a, \l eor \h, \h, \c adcs \b, \b, \h .endm #define z x0 #define x x1 #define a0 x2 #define a1 x3 #define a2 x4 #define a3 x5 #define b0 x6 #define b1 x7 #define b2 x8 #define b3 x9 #define s0 x10 #define s1 x11 #define s2 x12 #define s3 x13 #define s4 x14 #define s5 x15 #define s6 x16 #define s7 x17 #define c x19 #define h x20 #define l x21 #define t x22 // --------------------------------------------------------------------------- // Core 4x4->8 ADK multiplication macro // Does [s7,s6,s5,s4,s3,s2,s1,s0] = [a3,a2,a1,a0] * [b3,b2,b1,b0] // --------------------------------------------------------------------------- .macro mul4 // First accumulate all the "simple" products as [s7,s6,s5,s4,s0] mul s0, a0, b0 mul s4, a1, b1 mul s5, a2, b2 mul s6, a3, b3 umulh s7, a0, b0 adds s4, s4, s7 umulh s7, a1, b1 adcs s5, s5, s7 umulh s7, a2, b2 adcs s6, s6, s7 umulh s7, a3, b3 adc s7, s7, xzr // Multiply by B + 1 to get [s7;s6;s5;s4;s1;s0] adds s1, s4, s0 adcs s4, s5, s4 adcs s5, s6, s5 adcs s6, s7, s6 adc s7, xzr, s7 // Multiply by B^2 + 1 to get [s7;s6;s5;s4;s3;s2;s1;s0] adds s2, s4, s0 adcs s3, s5, s1 adcs s4, s6, s4 adcs s5, s7, s5 adcs s6, xzr, s6 adc s7, xzr, s7 // Now add in all the "complicated" terms. muldiffnadd s6,s5, c,h,l,t, a2,a3, b3,b2 adc s7, s7, c muldiffnadd s2,s1, c,h,l,t, a0,a1, b1,b0 adcs s3, s3, c adcs s4, s4, c adcs s5, s5, c adcs s6, s6, c adc s7, s7, c muldiffnadd s5,s4, c,h,l,t, a1,a3, b3,b1 adcs s6, s6, c adc s7, s7, c muldiffnadd s3,s2, c,h,l,t, a0,a2, b2,b0 adcs s4, s4, c adcs s5, s5, c adcs s6, s6, c adc s7, s7, c muldiffnadd s4,s3, c,h,l,t, a0,a3, b3,b0 adcs s5, s5, c adcs s6, s6, c adc s7, s7, c muldiffnadd s4,s3, c,h,l,t, a1,a2, b2,b1 adcs s5, s5, c adcs s6, s6, c adc s7, s7, c .endm // --------------------------------------------------------------------------- // The main code // --------------------------------------------------------------------------- S2N_BN_SYMBOL(bignum_sqr_8_16_base): // Save registers stp x19, x20, [sp, #-16]! stp x21, x22, [sp, #-16]! // Load all the inputs first ldp a0, a1, [x] ldp a2, a3, [x, #16] ldp b0, b1, [x, #32] ldp b2, b3, [x, #48] // Square the lower half with a near-clone of bignum_sqr_4_8 mul x17, x2, x4 mul x14, x3, x5 umulh x20, x2, x4 subs x21, x2, x3 cneg x21, x21, cc csetm x11, cc subs x12, x5, x4 cneg x12, x12, cc mul x13, x21, x12 umulh x12, x21, x12 cinv x11, x11, cc eor x13, x13, x11 eor x12, x12, x11 adds x19, x17, x20 adc x20, x20, xzr umulh x21, x3, x5 adds x19, x19, x14 adcs x20, x20, x21 adc x21, x21, xzr adds x20, x20, x14 adc x21, x21, xzr cmn x11, #0x1 adcs x19, x19, x13 adcs x20, x20, x12 adc x21, x21, x11 adds x17, x17, x17 adcs x19, x19, x19 adcs x20, x20, x20 adcs x21, x21, x21 adc x10, xzr, xzr mul x12, x2, x2 mul x13, x3, x3 mul x15, x2, x3 umulh x11, x2, x2 umulh x14, x3, x3 umulh x16, x2, x3 adds x11, x11, x15 adcs x13, x13, x16 adc x14, x14, xzr adds x11, x11, x15 adcs x13, x13, x16 adc x14, x14, xzr stp x12, x11, [z] adds x17, x17, x13 adcs x19, x19, x14 adcs x20, x20, xzr adcs x21, x21, xzr adc x10, x10, xzr stp x17, x19, [z, #16] mul x12, x4, x4 mul x13, x5, x5 mul x15, x4, x5 umulh x11, x4, x4 umulh x14, x5, x5 umulh x16, x4, x5 adds x11, x11, x15 adcs x13, x13, x16 adc x14, x14, xzr adds x11, x11, x15 adcs x13, x13, x16 adc x14, x14, xzr adds x12, x12, x20 adcs x11, x11, x21 stp x12, x11, [z, #32] adcs x13, x13, x10 adc x14, x14, xzr stp x13, x14, [z, #48] // Square the upper half with a slight variant of the previous block mul x17, x6, x8 mul x14, x7, x9 umulh x20, x6, x8 subs x21, x6, x7 cneg x21, x21, cc csetm x11, cc subs x12, x9, x8 cneg x12, x12, cc mul x13, x21, x12 umulh x12, x21, x12 cinv x11, x11, cc eor x13, x13, x11 eor x12, x12, x11 adds x19, x17, x20 adc x20, x20, xzr umulh x21, x7, x9 adds x19, x19, x14 adcs x20, x20, x21 adc x21, x21, xzr adds x20, x20, x14 adc x21, x21, xzr cmn x11, #0x1 adcs x19, x19, x13 adcs x20, x20, x12 adc x21, x21, x11 adds x17, x17, x17 adcs x19, x19, x19 adcs x20, x20, x20 adcs x21, x21, x21 adc x10, xzr, xzr mul x12, x6, x6 mul x13, x7, x7 mul x15, x6, x7 umulh x11, x6, x6 umulh x14, x7, x7 umulh x16, x6, x7 adds x11, x11, x15 adcs x13, x13, x16 adc x14, x14, xzr adds x11, x11, x15 adcs x13, x13, x16 adc x14, x14, xzr stp x12, x11, [z, #64] adds x17, x17, x13 adcs x19, x19, x14 adcs x20, x20, xzr adcs x21, x21, xzr adc x10, x10, xzr stp x17, x19, [z, #80] mul x12, x8, x8 mul x13, x9, x9 mul x15, x8, x9 umulh x11, x8, x8 umulh x14, x9, x9 umulh x16, x8, x9 adds x11, x11, x15 adcs x13, x13, x16 adc x14, x14, xzr adds x11, x11, x15 adcs x13, x13, x16 adc x14, x14, xzr adds x12, x12, x20 adcs x11, x11, x21 stp x12, x11, [z, #96] adcs x13, x13, x10 adc x14, x14, xzr stp x13, x14, [z, #112] // Now get the cross-product in [s7,...,s0] and double it as [c,s7,...,s0] mul4 adds s0, s0, s0 adcs s1, s1, s1 adcs s2, s2, s2 adcs s3, s3, s3 adcs s4, s4, s4 adcs s5, s5, s5 adcs s6, s6, s6 adcs s7, s7, s7 adc c, xzr, xzr // Add it back to the buffer ldp a0, a1, [z, #32] adds s0, s0, a0 adcs s1, s1, a1 stp s0, s1, [z, #32] ldp a0, a1, [z, #48] adcs s2, s2, a0 adcs s3, s3, a1 stp s2, s3, [z, #48] ldp a0, a1, [z, #64] adcs s4, s4, a0 adcs s5, s5, a1 stp s4, s5, [z, #64] ldp a0, a1, [z, #80] adcs s6, s6, a0 adcs s7, s7, a1 stp s6, s7, [z, #80] ldp a0, a1, [z, #96] adcs a0, a0, c adcs a1, a1, xzr stp a0, a1, [z, #96] ldp a0, a1, [z, #112] adcs a0, a0, xzr adc a1, a1, xzr stp a0, a1, [z, #112] // Restore regs and return ldp x21, x22, [sp], #16 ldp x19, x20, [sp], #16 ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
10,189
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/fastmul/unopt/bignum_emontredc_8n_base.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Extended Montgomery reduce in 8-digit blocks, results in input-output buffer // Inputs z[2*k], m[k], w; outputs function return (extra result bit) and z[2*k] // // extern uint64_t bignum_emontredc_8n_base // (uint64_t k, uint64_t *z, uint64_t *m, uint64_t w); // // Functionally equivalent to bignum_emontredc (see that file for more detail). // But in general assumes that the input k is a multiple of 8. // // Standard ARM ABI: X0 = k, X1 = z, X2 = m, X3 = w, returns X0 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_emontredc_8n_base) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_emontredc_8n_base) .text .balign 4 // --------------------------------------------------------------------------- // Macro computing (c,h,l) = 3-word 1s complement (x - y) * (w - z) // and adding it with carry to (b,a) so that we have CF+c in the 2 position. // // c,h,l,t should all be different // t,h should not overlap w,z // --------------------------------------------------------------------------- #define muldiffnadd(b,a, c,h,l,t, x,y, w,z) \ subs t, x, y __LF\ cneg t, t, cc __LF\ csetm c, cc __LF\ subs h, w, z __LF\ cneg h, h, cc __LF\ mul l, t, h __LF\ umulh h, t, h __LF\ cinv c, c, cc __LF\ adds xzr, c, #1 __LF\ eor l, l, c __LF\ adcs a, a, l __LF\ eor h, h, c __LF\ adcs b, b, h // The inputs, though k gets processed so we use a different name #define k4m1 x0 #define z x1 #define m x2 #define w x3 // Additional variables #define a0 x4 #define a1 x5 #define a2 x6 #define a3 x7 #define b0 x8 #define b1 x9 #define b2 x10 #define b3 x11 #define c0 x12 #define c1 x13 #define c2 x14 #define c3 x15 #define c4 x16 #define u0 x17 #define u1 x19 #define u2 x20 #define u3 x21 #define u4 x22 #define u5 x23 #define u6 x24 #define u7 x25 // These temp registers are aliased to c0..c3, which is safe here #define c x12 #define h x13 #define l x14 #define t x15 // Loop counters, outer and inner #define i x26 #define j x27 // Top carry, eventually returned when aligned with top // It's maintained as a bitmask since this seems slightly easier(?) #define tc x28 // ----------------------------------------------------------------------- // The basic 4x4->8 multiply-add block, which does in ADK style (10 muls): // // [c3;c2;c1;c0;z_3;z_2;z_1;z_0] := // [a3;a2;a1;a0] * [b3;b2;b1;b0] + [c3;c2;c1;c0] + [z_3;z_2;z_1;z_0] // ----------------------------------------------------------------------- .macro madd4 mul u0, a0, b0 mul u4, a1, b1 mul u5, a2, b2 mul u6, a3, b3 // Accumulate the simple products as [u7,u6,u5,u4,u0] umulh c4, a0, b0 adds u4, u4, c4 umulh c4, a1, b1 adcs u5, u5, c4 umulh c4, a2, b2 adcs u6, u6, c4 umulh c4, a3, b3 adc u7, c4, xzr // Add up the carry-in and the existing z contents ldp u2, u3, [z] adds c0, c0, u2 adcs c1, c1, u3 ldp u2, u3, [z, #16] adcs c2, c2, u2 adcs c3, c3, u3 adc c4, xzr, xzr // Multiply by B + 1 to get [u7;u6;u5;u4;u1;u0] adds u1, u4, u0 adcs u4, u5, u4 adcs u5, u6, u5 adcs u6, u7, u6 adc u7, xzr, u7 // Multiply by B^2 + 1 to get [u6;u5;u4;u3;u2;u1;u0] adds u2, u4, u0 adcs u3, u5, u1 adcs u4, u6, u4 adcs u5, u7, u5 adcs u6, xzr, u6 adc u7, xzr, u7 // Add in the carry-in and original z contents adds u0, u0, c0 adcs u1, u1, c1 adcs u2, u2, c2 adcs u3, u3, c3 adcs u4, u4, c4 adcs u5, u5, xzr adcs u6, u6, xzr adc u7, u7, xzr // Now add in all the "complicated" terms. muldiffnadd (u6,u5, c,h,l,t, a2,a3, b3,b2) adc u7, u7, c muldiffnadd (u2,u1, c,h,l,t, a0,a1, b1,b0) adcs u3, u3, c adcs u4, u4, c adcs u5, u5, c adcs u6, u6, c adc u7, u7, c muldiffnadd (u5,u4, c,h,l,t, a1,a3, b3,b1) adcs u6, u6, c adc u7, u7, c muldiffnadd (u3,u2, c,h,l,t, a0,a2, b2,b0) adcs u4, u4, c adcs u5, u5, c adcs u6, u6, c adc u7, u7, c muldiffnadd (u4,u3, c,h,l,t, a0,a3, b3,b0) adcs u5, u5, c adcs u6, u6, c adc u7, u7, c muldiffnadd (u4,u3, c,h,l,t, a1,a2, b2,b1) adcs c1, u5, c adcs c2, u6, c adc c3, u7, c mov c0, u4 stp u0, u1, [z] stp u2, u3, [z, #16] .endm // ***************************************************** // Main code // ***************************************************** S2N_BN_SYMBOL(bignum_emontredc_8n_base): stp x19, x20, [sp, #-16]! stp x21, x22, [sp, #-16]! stp x23, x24, [sp, #-16]! stp x25, x26, [sp, #-16]! stp x27, x28, [sp, #-16]! // Set up (k/4 - 1)<<5 which is used as inner count and pointer fixup // ns i = k/4 as the outer loop count. // At this point skip everything if k/4 = 0, returning our x0 = 0 value lsr k4m1, x0, #2 mov i, k4m1 subs c, k4m1, #1 bcc bignum_emontredc_8n_base_end mov tc, xzr lsl k4m1, c, #5 // Outer loop, one digit of Montgomery reduction adding in word * m. // Rather than propagating the carry to the end each time, we // stop at the "natural" end and store top carry in tc as a bitmask. bignum_emontredc_8n_base_outerloop: // Load [u3;u2;u1;u0] = bottom 4 digits of the input at current window ldp u0, u1, [z] ldp u2, u3, [z, #16] // Load the bottom 4 digits of m ldp b0, b1, [m] ldp b2, b3, [m, #16] // Montgomery step 0 mul a0, u0, w mul c0, a0, b0 mul c1, a0, b1 mul c2, a0, b2 mul c3, a0, b3 adds u0, u0, c0 umulh c0, a0, b0 adcs u1, u1, c1 umulh c1, a0, b1 adcs u2, u2, c2 umulh c2, a0, b2 adcs u3, u3, c3 umulh c3, a0, b3 adc u4, xzr, xzr adds u1, u1, c0 adcs u2, u2, c1 adcs u3, u3, c2 adc u4, u4, c3 // Montgomery step 1 mul a1, u1, w mul c0, a1, b0 mul c1, a1, b1 mul c2, a1, b2 mul c3, a1, b3 adds u1, u1, c0 umulh c0, a1, b0 adcs u2, u2, c1 umulh c1, a1, b1 adcs u3, u3, c2 umulh c2, a1, b2 adcs u4, u4, c3 umulh c3, a1, b3 adc u5, xzr, xzr adds u2, u2, c0 adcs u3, u3, c1 adcs u4, u4, c2 adc u5, u5, c3 // Montgomery step 2 mul a2, u2, w mul c0, a2, b0 mul c1, a2, b1 mul c2, a2, b2 mul c3, a2, b3 adds u2, u2, c0 umulh c0, a2, b0 adcs u3, u3, c1 umulh c1, a2, b1 adcs u4, u4, c2 umulh c2, a2, b2 adcs u5, u5, c3 umulh c3, a2, b3 adc u6, xzr, xzr adds u3, u3, c0 adcs u4, u4, c1 adcs u5, u5, c2 adc u6, u6, c3 // Montgomery step 3. In the last four instructions we put the top in // the carry variables expected by the "madd" block next, which is why // the pattern is slightly different. mul a3, u3, w mul c0, a3, b0 mul c1, a3, b1 mul c2, a3, b2 mul c3, a3, b3 adds u3, u3, c0 umulh c0, a3, b0 adcs u4, u4, c1 umulh c1, a3, b1 adcs u5, u5, c2 umulh c2, a3, b2 adcs u6, u6, c3 umulh c3, a3, b3 adc u7, xzr, xzr adds c0, u4, c0 adcs c1, u5, c1 adcs c2, u6, c2 adc c3, u7, c3 // Stash the multipliers as expected by the bignum_emontredc interface // We don't use these ourselves again though; they stay in [a3;a2;a1;a0] stp a0, a1, [z] stp a2, a3, [z, #16] // Repeated multiply-add block to do the k/4-1 remaining 4-digit chunks mov j, k4m1 bignum_emontredc_8n_base_maddloop: add m, m, #32 add z, z, #32 ldp b0, b1, [m] ldp b2, b3, [m, #16] madd4 subs j, j, #32 bne bignum_emontredc_8n_base_maddloop bignum_emontredc_8n_base_madddone: // Add the carry out to the existing z contents, propagating the // top carry tc up by 32 places as we move "leftwards". ldp u0, u1, [z, #32] ldp u2, u3, [z, #48] adds xzr, tc, tc adcs u0, u0, c0 adcs u1, u1, c1 adcs u2, u2, c2 adcs u3, u3, c3 csetm tc, cs stp u0, u1, [z, #32] stp u2, u3, [z, #48] // Compensate for the repeated bumps in m and z in the inner loop sub z, z, k4m1 sub m, m, k4m1 // Bump up z only and keep going add z, z, #32 sub i, i, #1 cbnz i, bignum_emontredc_8n_base_outerloop // Return the top carry as 0 or 1 (it's currently a bitmask) neg x0, tc bignum_emontredc_8n_base_end: ldp x27, x28, [sp], #16 ldp x25, x26, [sp], #16 ldp x23, x24, [sp], #16 ldp x21, x22, [sp], #16 ldp x19, x20, [sp], #16 ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
22,193
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/fastmul/unopt/bignum_emontredc_8n_cdiff_base.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC // ---------------------------------------------------------------------------- // Extend Montgomery reduce in 8-digit blocks, uses an extra storage to // temporarily cache multiplied differences appearing in ADK. // Results are stored in input-output buffer (z). // Inputs z[2*k], m[k], w; // Outputs function return (extra result bit) and z[2*k] // Temporary buffer m_precalc[12*(k/4-1)] // // extern uint64_t bignum_emontredc_8n_cdiff // (uint64_t k, uint64_t *z, uint64_t *m, uint64_t w, uint64_t *m_precalc); // // Standard ARM ABI: X0 = k, X1 = z, X2 = m, X3 = w, X4 = m_precalc // returns X0 // // This is an unoptimized version of bignum_emontredc_8n_cdiff. // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_emontredc_8n_cdiff_base) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_emontredc_8n_cdiff_base) .text .balign 4 // Silly SLOTHY limitation: It needs the loop counter to have the name 'count' count .req x27 // inner loop counter // Semantically transparent instruction wrapper which is used by SLOTHY // for dependency tracking through memory. SLOTHY itself has no notion of // memory, only registers; to still track static dependencies through memory // (register spills), a 'hint' register type is introduced (syntax t{i}, t{i}{j}) // that's written to in store instructions and read from in the corresponding // load instruction. // // The 'slothy:no-unfold' annotation prevents SLOTHY from opening the macro, // and instead makes it treat `stph` as an instruction specified in the // Arch and uArch models provided to it. .macro stph a, b, addr, imm, hint // slothy:no-unfold stp \a\(), \b\(), [\addr, \imm] .endm .macro ldph a, b, addr, imm, hint // slothy:no-unfold ldp \a\(), \b\(), [\addr, \imm] .endm .macro ldrh a, addr, imm, hint // slothy:no-unfold ldr \a\(), [\addr, \imm] .endm // Helper macro for the pre-computations .macro cdiff t, c, x, y subs \t, \x, \y cneg \t, \t, cc csetm \c, cc .endm // Some immediate offsets for cached differences+carry used // in the inner ADK multiplications #define cache_a01 (32+0*16) #define cache_a02 (32+1*16) #define cache_a03 (32+2*16) #define cache_a12 (32+3*16) #define cache_a13 (32+4*16) #define cache_a23 (32+5*16) #define cache_m10 (0*16) #define cache_m20 (1*16) #define cache_m30 (2*16) #define cache_m21 (3*16) #define cache_m31 (4*16) #define cache_m32 (5*16) a0 .req x4 a1 .req x5 a2 .req x6 a3 .req x7 vpre00 .req v30 vpre01 .req v28 vpre02 .req v17 vpre10 .req v18 vpre11 .req v19 vpre12 .req v20 // Computes two 64x64->128-bit multiplication a*x and a*y // v_in0: 128-bit input vector viewed as pair (x,y) of 64-bit numbers // x_in: 64-bit common multiplicand a // v_out0: 128-bit output vector to hold a*x // v_out1: 128-bit output vector to hold a*y // // Uses temporaries as indicated in the following defines: #define v_in0_p v3 #define v_in0_pp v5 #define v_in0_ppp v28 #define v_in1 v0 #define vtmp0 v4 #define vtmp1 v6 #define vtmp2 v7 #define vtmp3 v16 #define vtmp4 v2 .macro vmul_2x_64_64_128 v_in0, x_in, v_out0, v_out1 // slothy:no-unfold dup v_in1.2d, \x_in uzp2 v_in0_p.4s, \v_in0\().4s, \v_in0\().4s xtn vtmp0.2s, v_in1.2d xtn v_in0_pp.2s, \v_in0\().2d rev64 v_in0_ppp.4s, \v_in0\().4s umull vtmp1.2d, vtmp0.2s, v_in0_pp.2s umull vtmp2.2d, vtmp0.2s, v_in0_p.2s uzp2 vtmp3.4s, v_in1.4s, v_in1.4s mul v_in1.4s, v_in0_ppp.4s, v_in1.4s usra vtmp2.2d, vtmp1.2d, #32 umull \v_out1\().2d, vtmp3.2s, v_in0_p.2s uaddlp v_in1.2d, v_in1.4s and vtmp4.16b, vtmp2.16b, v29.16b umlal vtmp4.2d, vtmp3.2s, v_in0_pp.2s shl \v_out0\().2d, v_in1.2d, #32 usra \v_out1\().2d, vtmp2.2d, #32 umlal \v_out0\().2d, vtmp0.2s, v_in0_pp.2s usra \v_out1\().2d, vtmp4.2d, #32 .endm // SLOTHY version of the above multiplication macro, using symbolic // registers instead of hardcoded registers. This is only used during // SLOTHY optimization (the above macro is ignored because of // 'slothy:no-unfold'). #if defined(SLOTHY) .macro vmul_2x_64_64_128 v_in0, x_in, v_out0, v_out1 dup V<in1>.2d, \x_in uzp2 V<in0_p>.4s, \v_in0\().4s, \v_in0\().4s xtn V<tmp0>.2s, V<in1>.2d xtn V<in0_pp>.2s, \v_in0\().2d rev64 V<in0_ppp>.4s, \v_in0\().4s umull V<tmp1>.2d, V<tmp0>.2s, V<in0_pp>.2s umull V<tmp2>.2d, V<tmp0>.2s, V<in0_p>.2s uzp2 V<tmp3>.4s, V<in1>.4s, V<in1>.4s mul V<in1>.4s, V<in0_ppp>.4s, V<in1>.4s usra V<tmp2>.2d, V<tmp1>.2d, #32 umull \v_out1\().2d, V<tmp3>.2s, V<in0_p>.2s uaddlp V<in1>.2d, V<in1>.4s and V<tmp4>.16b, V<tmp2>.16b, v29.16b umlal V<tmp4>.2d, V<tmp3>.2s, V<in0_pp>.2s shl \v_out0\().2d, V<in1>.2d, #32 usra \v_out1\().2d, V<tmp2>.2d, #32 umlal \v_out0\().2d, V<tmp0>.2s, V<in0_pp>.2s usra \v_out1\().2d, V<tmp4>.2d, #32 .endm #endif S2N_BN_SYMBOL(bignum_emontredc_8n_cdiff_base): sub sp, sp, #(6*16) stp x19, x20, [sp, #(5*16)] stp x21, x22, [sp, #(4*16)] stp x23, x24, [sp, #(3*16)] stp x25, x26, [sp, #(2*16)] stp x27, x28, [sp, #(1*16)] stp x29, x30, [sp, #(0*16)] // Leave space for cached differences in inner loop sub sp, sp, #(6*16) sub sp, sp, #32 lsr x0, x0, #2 mov x26, x0 subs x12, x0, #1 bcc bignum_emontredc_8n_cdiff_base_end // x30 = buffer holding precomputed ADK carry-differences for modulus mov w30, #(12*8) mul w30, w12, w30 sub x30, sp, x30 // // Start of precomputation // // Precompute and cache signed differences of modulus components // used in the ADK multiplication in the inner loop. // // THIS SHOULD BE HOISTED OUT // (and until then, comment out for benchmarking to get accurate estimates) // // Number of extra limbs required: // 6 * (number of limbs / 4 - 1) * 2 = 12 * (number_of_limbs/4 - 1) // // For now, just put them on the stack mov sp, x30 // Save modulus pointer mov x25, x2 mov count, x12 bignum_emontredc_8n_cdiff_base_precomp: ldp a0, a1, [x2, #32]! ldp a2, a3, [x2, #16] t .req x28 c .req x29 cdiff t, c, a1, a0 stp t, c, [sp, #cache_m10] cdiff t, c, a2, a0 stp t, c, [sp, #cache_m20] cdiff t, c, a3, a0 stp t, c, [sp, #cache_m30] cdiff t, c, a2, a1 stp t, c, [sp, #cache_m21] cdiff t, c, a3, a1 stp t, c, [sp, #cache_m31] cdiff t, c, a3, a2 stp t, c, [sp, #cache_m32] add sp, sp, #(6*16) subs count, count, #1 cbnz count, bignum_emontredc_8n_cdiff_base_precomp // Set modulus pointer back to its original value mov x2, x25 // // End of precomputation // stp x3, x30, [sp] //stp x3, xzr, [sp] stp x26, xzr, [sp, #16] mov x28, xzr lsl x0, x12, #5 movi v29.2d, #0x000000ffffffff bignum_emontredc_8n_cdiff_base_outerloop: ldr x3, [sp] ldph x17, x19, x1, #0, t0 ldph x20, x21, x1, #16, t1 ldp x8, x9, [x2, #0] ldp x10, x11, [x2, #16] ldr q21, [x2, #16] // Montgomery step 0 mul x4, x17, x3 // NEON: Calculate x4 * (x10, x11) that does two 64x64->128-bit multiplications. vmul_2x_64_64_128 v21, x4, v0, v1 mov x14, v0.d[0] mov x15, v0.d[1] mul x12, x4, x8 adds x17, x17, x12 umulh x12, x4, x8 mul x13, x4, x9 adcs x19, x19, x13 umulh x13, x4, x9 adcs x20, x20, x14 adcs x21, x21, x15 mov x14, v1.d[0] mov x15, v1.d[1] adc x22, xzr, xzr adds x19, x19, x12 adcs x20, x20, x13 adcs x21, x21, x14 adc x22, x22, x15 // Montgomery step 1 mul x5, x19, x3 // NEON: Calculate x5 * (x10, x11) that does two 64x64->128-bit multiplications. vmul_2x_64_64_128 v21, x5, v0, v1 mov x14, v0.d[0] mov x15, v0.d[1] mul x12, x5, x8 adds x19, x19, x12 umulh x12, x5, x8 mul x13, x5, x9 adcs x20, x20, x13 umulh x13, x5, x9 adcs x21, x21, x14 adcs x22, x22, x15 mov x14, v1.d[0] mov x15, v1.d[1] adc x23, xzr, xzr adds x20, x20, x12 adcs x21, x21, x13 adcs x22, x22, x14 adc x23, x23, x15 stph x4, x5, x1, #0, t0 // Montgomery step 2 mul x6, x20, x3 // NEON: Calculate x6 * (x10, x11) that does two 64x64->128-bit multiplications. vmul_2x_64_64_128 v21, x6, v21, v1 mov x14, v21.d[0] mov x15, v21.d[1] mul x12, x6, x8 adds x20, x20, x12 umulh x12, x6, x8 mul x13, x6, x9 adcs x21, x21, x13 umulh x13, x6, x9 adcs x22, x22, x14 adcs x23, x23, x15 mov x14, v1.d[0] mov x15, v1.d[1] adc x24, xzr, xzr adds x21, x21, x12 mul x7, x21, x3 adcs x22, x22, x13 adcs x23, x23, x14 adc x24, x24, x15 stph x6, x7, x1, #16, t1 // Montgomery step 3 mul x12, x7, x8 mul x13, x7, x9 mul x14, x7, x10 mul x15, x7, x11 adds x21, x21, x12 umulh x12, x7, x8 adcs x22, x22, x13 umulh x13, x7, x9 adcs x23, x23, x14 umulh x14, x7, x10 adcs x24, x24, x15 umulh x15, x7, x11 adc x25, xzr, xzr adds x12, x22, x12 adcs x13, x23, x13 adcs x14, x24, x14 adc x15, x25, x15 lsr count, x0, #5 ldrh q20, x1, #0, t0 ldrh q21, x1, #16, t1 // Precompute and cache differences required in the // ADK multiplication conducted by the innerl oop. // Save each difference (somewhat inefficiently) // as a pair (t,c) of 64-bit + carry. // // The same caching trick is applied to the modulus, // for which the various differences can even be hoisted // out of the entire multiplication routine. // a0 - a1 with carry cdiff x16,x26,a0,a1 stph x16, x26, sp, #cache_a01, t01 // a0 - a2 with carry cdiff x16,x26,a0,a2 stph x16, x26, sp, #cache_a02, t02 // a0 - a3 with carry cdiff x16,x26,a0,a3 stph x16, x26, sp, #cache_a03, t03 // a1 - a2 with carry cdiff x16,x26,a1,a2 stph x16, x26, sp, #cache_a12, t12 // a1 - a3 with carry cdiff x16,x26,a1,a3 stph x16, x26, sp, #cache_a13, t13 // a2 - a3 with carry cdiff x16,x26,a2,a3 stph x16, x26, sp, #cache_a23, t23 // Precompute and cache some precomputations for // the Neon multiplications in the inner loop uzp2 vpre00.4s, v20.4s, v20.4s xtn vpre01.2s, v20.2d rev64 vpre02.4s, v20.4s uzp2 vpre10.4s, v21.4s, v21.4s xtn vpre11.2s, v21.2d rev64 vpre12.4s, v21.4s bignum_emontredc_8n_cdiff_base_maddloop_neon: ldr q22, [x2, #32]! ldr q23, [x2, #16] xtn v4.2s, v22.2d umull v6.2d, v4.2s, vpre01.2s umull v7.2d, v4.2s, vpre00.2s uzp2 v16.4s, v22.4s, v22.4s mul v0.4s, vpre02.4s, v22.4s usra v7.2d, v6.2d, #32 umull v25.2d, v16.2s, vpre00.2s uaddlp v0.2d, v0.4s and v2.16b, v7.16b, v29.16b umlal v2.2d, v16.2s, vpre01.2s shl v24.2d, v0.2d, #32 usra v25.2d, v7.2d, #32 umlal v24.2d, v4.2s, vpre01.2s usra v25.2d, v2.2d, #32 // Original version without caching // uzp2 v3.4s, v22.4s, v22.4s // xtn v4.2s, v20.2d // xtn v5.2s, v22.2d // rev64 v1.4s, v22.4s // umull v6.2d, v4.2s, v5.2s // umull v7.2d, v4.2s, v3.2s // uzp2 v16.4s, v20.4s, v20.4s // mul v0.4s, v1.4s, v20.4s // usra v7.2d, v6.2d, #32 // umull v25.2d, v16.2s, v3.2s // uaddlp v0.2d, v0.4s // and v2.16b, v7.16b, v29.16b // umlal v2.2d, v16.2s, v5.2s // shl v24.2d, v0.2d, #32 // usra v25.2d, v7.2d, #32 // umlal v24.2d, v4.2s, v5.2s // usra v25.2d, v2.2d, #32 xtn v4.2s, v23.2d umull v6.2d, v4.2s, vpre11.2s umull v7.2d, v4.2s, vpre10.2s uzp2 v16.4s, v23.4s, v23.4s mul v0.4s, vpre12.4s, v23.4s usra v7.2d, v6.2d, #32 umull v27.2d, v16.2s, vpre10.2s uaddlp v0.2d, v0.4s and v2.16b, v7.16b, v29.16b umlal v2.2d, v16.2s, vpre11.2s shl v26.2d, v0.2d, #32 usra v27.2d, v7.2d, #32 umlal v26.2d, v4.2s, vpre11.2s usra v27.2d, v2.2d, #32 // Original version without caching // uzp2 v3.4s, v23.4s, v23.4s // xtn v4.2s, v21.2d // xtn v5.2s, v23.2d // rev64 v1.4s, v23.4s // umull v6.2d, v4.2s, v5.2s // umull v7.2d, v4.2s, v3.2s // uzp2 v16.4s, v21.4s, v21.4s // mul v0.4s, v1.4s, v21.4s // usra v7.2d, v6.2d, #32 // umull v27.2d, v16.2s, v3.2s // uaddlp v0.2d, v0.4s // and v2.16b, v7.16b, v29.16b // umlal v2.2d, v16.2s, v5.2s // shl v26.2d, v0.2d, #32 // usra v27.2d, v7.2d, #32 // umlal v26.2d, v4.2s, v5.2s // usra v27.2d, v2.2d, #32 mov x16, v25.d[0] // hi bits of (x4 * x8) mov x26, v27.d[0] // hi bits of (x6 * x10) mov x3, v25.d[1] // hi bits of (x5 * x9) mov x17, v27.d[1] // hi bits of (x6 * x10) mov x20, v24.d[1] // lo bits of (x5 * x9) mov x21, v26.d[0] // lo bits of (x6 * x10) mov x24, v26.d[1] // lo bits of (x7 * x11) // Not necessary if one uses cached differences for the modulus //ldp x8, x9, [x2, #0] //ldp x10, x11, [x2, #16] adds x22, x20, x16 adcs x23, x21, x3 adcs x24, x24, x26 adc x25, x17, xzr mov x17, v24.d[0] // lo bits of (x4 * x8) ldp x20, x21, [x1, #32]! adds x12, x12, x20 adcs x13, x13, x21 ldp x20, x21, [x1, #16] adcs x14, x14, x20 adcs x15, x15, x21 adc x16, xzr, xzr adds x19, x22, x17 adcs x22, x23, x22 adcs x23, x24, x23 adcs x24, x25, x24 adc x25, xzr, x25 adds x20, x22, x17 adcs x21, x23, x19 adcs x22, x24, x22 adcs x23, x25, x23 adcs x24, xzr, x24 adc x25, xzr, x25 adds x17, x17, x12 adcs x19, x19, x13 adcs x20, x20, x14 adcs x21, x21, x15 adcs x22, x22, x16 adcs x23, x23, xzr adcs x24, x24, xzr adc x25, x25, xzr ldph x15, x12, sp, #cache_a23, t23 // Original code without caching //subs x15, x6, x7 //cneg x15, x15, cc //csetm x12, cc ldp x13, x14, [x30, #cache_m32] eor x12, x12, x14 // Original code without caching //cdiff x13, x14, x11, x10 //subs x13, x11, x10 //cneg x13, x13, cc mul x14, x15, x13 umulh x13, x15, x13 adds xzr, x12, #1 eor x14, x14, x12 adcs x23, x23, x14 eor x13, x13, x12 adcs x24, x24, x13 adc x25, x25, x12 ldph x15, x12, sp, #cache_a01, t01 //subs x15, x4, x5 //cneg x15, x15, cc //csetm x12, cc ldp x13, x14, [x30, #cache_m10] eor x12, x12, x14 // Original code without caching //subs x13, x9, x8 //cneg x13, x13, cc //cinv x12, x12, cc mul x14, x15, x13 umulh x13, x15, x13 adds xzr, x12, #1 eor x14, x14, x12 adcs x19, x19, x14 eor x13, x13, x12 adcs x20, x20, x13 adcs x21, x21, x12 adcs x22, x22, x12 adcs x23, x23, x12 adcs x24, x24, x12 adc x25, x25, x12 stp x17, x19, [x1, #0] ldph x15, x12, sp, #cache_a13, t13 //subs x15, x5, x7 //cneg x15, x15, cc //csetm x12, cc ldp x13, x14, [x30, #cache_m31] eor x12, x12, x14 // Original code without caching //subs x13, x11, x9 //cneg x13, x13, cc //cinv x12, x12, cc mul x14, x15, x13 umulh x13, x15, x13 adds xzr, x12, #1 eor x14, x14, x12 adcs x22, x22, x14 eor x13, x13, x12 adcs x23, x23, x13 adcs x24, x24, x12 adc x25, x25, x12 ldph x15, x12, sp, #cache_a02, t02 //subs x15, x4, x6 //cneg x15, x15, cc //csetm x12, cc ldp x13, x14, [x30, #cache_m20] eor x12, x12, x14 // Original code without caching //subs x13, x10, x8 //cneg x13, x13, cc //cinv x12, x12, cc mul x14, x15, x13 umulh x13, x15, x13 adds xzr, x12, #1 eor x14, x14, x12 adcs x20, x20, x14 eor x13, x13, x12 adcs x21, x21, x13 adcs x22, x22, x12 adcs x23, x23, x12 adcs x24, x24, x12 adc x25, x25, x12 ldph x15, x12, sp, #cache_a03, t03 //subs x15, x4, x7 //cneg x15, x15, cc //csetm x12, cc ldp x13, x14, [x30, #cache_m30] eor x12, x12, x14 // Original code without caching //subs x13, x11, x8 //cneg x13, x13, cc //cinv x12, x12, cc mul x14, x15, x13 umulh x13, x15, x13 adds xzr, x12, #1 eor x14, x14, x12 adcs x21, x21, x14 eor x13, x13, x12 adcs x22, x22, x13 adcs x23, x23, x12 adcs x24, x24, x12 adc x25, x25, x12 ldph x15, x12, sp, #cache_a12, t12 //subs x15, x5, x6 //cneg x15, x15, cc //csetm x12, cc ldp x13, x14, [x30, #cache_m21] eor x12, x12, x14 // Original code without caching //subs x13, x10, x9 //cneg x13, x13, cc //cinv x12, x12, cc mul x14, x15, x13 umulh x13, x15, x13 adds xzr, x12, #1 eor x14, x14, x12 adcs x21, x21, x14 stp x20, x21, [x1, #16] eor x13, x13, x12 adcs x22, x22, x13 adcs x13, x23, x12 adcs x14, x24, x12 adc x15, x25, x12 mov x12, x22 add x30, x30, #96 sub count, count, #1 cbnz count, bignum_emontredc_8n_cdiff_base_maddloop_neon ldp x17, x19, [x1, #32] ldp x20, x21, [x1, #48] ldp x26, xzr, [sp, #16] adds xzr, x28, x28 adcs x17, x17, x12 adcs x19, x19, x13 adcs x20, x20, x14 adcs x21, x21, x15 csetm x28, cs stp x17, x19, [x1, #32] stp x20, x21, [x1, #48] sub x1, x1, x0 sub x2, x2, x0 add x1, x1, #32 subs x26, x26, #1 stp x26, xzr, [sp, #16] // Restore buffer base for cached modulus differences ldr x30, [sp, #8] bne bignum_emontredc_8n_cdiff_base_outerloop neg x0, x28 bignum_emontredc_8n_cdiff_base_end: add sp, sp, #32 add sp, sp, #(6*16) ldp x29, x30, [sp, #(0*16)] ldp x27, x28, [sp, #(1*16)] ldp x25, x26, [sp, #(2*16)] ldp x23, x24, [sp, #(3*16)] ldp x21, x22, [sp, #(4*16)] ldp x19, x20, [sp, #(5*16)] add sp, sp, #(6*16) ret
wlsfx/bnbb
9,100
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/fastmul/unopt/bignum_mul_8_16_base.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply z := x * y // Inputs x[8], y[8]; output z[16] // // extern void bignum_mul_8_16_base // (uint64_t z[static 16], uint64_t x[static 8], uint64_t y[static 8]); // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_8_16_base) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_8_16_base) .text .balign 4 // --------------------------------------------------------------------------- // Macro computing [c,b,a] := [b,a] + (x - y) * (w - z), adding with carry // to the [b,a] components but leaving CF aligned with the c term, which is // a sign bitmask for (x - y) * (w - z). Continued add-with-carry operations // with [c,...,c] will continue the carry chain correctly starting from // the c position if desired to add to a longer term of the form [...,b,a]. // // c,h,l,t should all be different and t,h should not overlap w,z. // --------------------------------------------------------------------------- .macro muldiffnadd b,a, c,h,l,t, x,y, w,z subs \t, \x, \y cneg \t, \t, cc csetm \c, cc subs \h, \w, \z cneg \h, \h, cc mul \l, \t, \h umulh \h, \t, \h cinv \c, \c, cc adds xzr, \c, #1 eor \l, \l, \c adcs \a, \a, \l eor \h, \h, \c adcs \b, \b, \h .endm #define z x0 #define x x1 #define y x2 #define a0 x3 #define a1 x4 #define a2 x5 #define a3 x6 #define b0 x7 #define b1 x8 #define b2 x9 #define b3 x10 #define s0 x11 #define s1 x12 #define s2 x13 #define s3 x14 #define s4 x15 #define s5 x16 #define s6 x17 #define s7 x19 #define c x20 #define h x21 #define l x22 #define m x23 #define t x24 // These alias the ax and bx values, and are only used when they are done with #define u0 x3 #define u1 x4 #define u2 x5 #define u3 x6 #define u4 x7 #define u5 x8 #define u6 x9 #define u7 x10 // These alias c,h,l,m but leave s, t and d safe, all we need #define u8 x20 #define u9 x21 #define u10 x22 #define u11 x23 // We recycle the input pointers near the end #define s x1 #define d x2 // --------------------------------------------------------------------------- // Core 4x4->8 ADK multiplication macro // Does [s7,s6,s5,s4,s3,s2,s1,s0] = [a3,a2,a1,a0] * [b3,b2,b1,b0] // // If the input parameter is 1, it also adds in [z+32,z+40,z+48,z+56] // existing contents; if the parameter is 0 it just does the pure multiply // --------------------------------------------------------------------------- .macro mul4 afl // First accumulate all the "simple" products as [s7,s6,s5,s4,s0] mul s0, a0, b0 mul s4, a1, b1 mul s5, a2, b2 mul s6, a3, b3 umulh s7, a0, b0 adds s4, s4, s7 umulh s7, a1, b1 adcs s5, s5, s7 umulh s7, a2, b2 adcs s6, s6, s7 umulh s7, a3, b3 adc s7, s7, xzr // Multiply by B + 1 to get [s7;s6;s5;s4;s1;s0] adds s1, s4, s0 adcs s4, s5, s4 adcs s5, s6, s5 adcs s6, s7, s6 adc s7, xzr, s7 // Multiply by B^2 + 1 to get [s7;s6;s5;s4;s3;s2;s1;s0] adds s2, s4, s0 adcs s3, s5, s1 adcs s4, s6, s4 adcs s5, s7, s5 adcs s6, xzr, s6 adc s7, xzr, s7 // Optionally add the existing z contents .rep \afl ldp l, h, [z,#32] adds s0, s0, l adcs s1, s1, h ldp l, h, [z,#48] adcs s2, s2, l adcs s3, s3, h adcs s4, s4, xzr adcs s5, s5, xzr adcs s6, s6, xzr adc s7, s7, xzr .endr // Now add in all the "complicated" terms. muldiffnadd s6,s5, c,h,l,t, a2,a3, b3,b2 adc s7, s7, c muldiffnadd s2,s1, c,h,l,t, a0,a1, b1,b0 adcs s3, s3, c adcs s4, s4, c adcs s5, s5, c adcs s6, s6, c adc s7, s7, c muldiffnadd s5,s4, c,h,l,t, a1,a3, b3,b1 adcs s6, s6, c adc s7, s7, c muldiffnadd s3,s2, c,h,l,t, a0,a2, b2,b0 adcs s4, s4, c adcs s5, s5, c adcs s6, s6, c adc s7, s7, c muldiffnadd s4,s3, c,h,l,t, a0,a3, b3,b0 adcs s5, s5, c adcs s6, s6, c adc s7, s7, c muldiffnadd s4,s3, c,h,l,t, a1,a2, b2,b1 adcs s5, s5, c adcs s6, s6, c adc s7, s7, c .endm // --------------------------------------------------------------------------- // The main code // --------------------------------------------------------------------------- S2N_BN_SYMBOL(bignum_mul_8_16_base): // Save registers stp x19, x20, [sp, #-16]! stp x21, x22, [sp, #-16]! stp x23, x24, [sp, #-16]! // Multiply the low halves and then the high halves using ADK 4x4->8. // For the second one add the top of the low part (Q1) already into // the bottom of the high part (Q2) so that is already dealt with. // // Write back the first one but defer the second till a bit later while // we get on with the absolute difference computations ldp a0, a1, [x] ldp b0, b1, [y] ldp a2, a3, [x, #16] ldp b2, b3, [y, #16] mul4 0 ldp a0, a1, [x, #32] stp s0, s1, [z] ldp b0, b1, [y, #32] stp s2, s3, [z, #16] ldp a2, a3, [x, #48] stp s4, s5, [z, #32] ldp b2, b3, [y, #48] stp s6, s7, [z, #48] mul4 1 // Compute t,[a3,a2,a1,a0] = x_hi - x_lo // and s,[b3,b2,b1,b0] = y_lo - y_hi // sign-magnitude differences, and scatter in belated high writeback ldp l, h, [x] subs a0, a0, l sbcs a1, a1, h ldp l, h, [x, #16] sbcs a2, a2, l sbcs a3, a3, h csetm t, cc stp s0, s1, [z, #64] ldp l, h, [y] subs b0, l, b0 sbcs b1, h, b1 ldp l, h, [y, #16] sbcs b2, l, b2 sbcs b3, h, b3 csetm s, cc stp s2, s3, [z, #80] eor a0, a0, t subs a0, a0, t eor a1, a1, t sbcs a1, a1, t eor a2, a2, t sbcs a2, a2, t eor a3, a3, t sbc a3, a3, t stp s4, s5, [z, #96] eor b0, b0, s subs b0, b0, s eor b1, b1, s sbcs b1, b1, s eor b2, b2, s sbcs b2, b2, s eor b3, b3, s sbc b3, b3, s stp s6, s7, [z, #112] // Save the correct sign for the sub-product eor s, s, t // Now yet another 4x4->8 ADK core, but not writing back, keeping s0..s7 mul4 0 // Now accumulate the positive mid-terms as [u7,u6,u5,u4,u3.u2,u1,u0] ldp u0, u1, [z] ldp u4, u5, [z,#64] adds u0, u0, u4 adcs u1, u1, u5 ldp u2, u3, [z,#16] ldp u6, u7, [z,#80] adcs u2, u2, u6 adcs u3, u3, u7 ldp u8, u9, [z,#96] adcs u4, u4, u8 adcs u5, u5, u9 ldp u10, u11, [z,#112] adcs u6, u6, u10 adcs u7, u7, u11 // Stop the carry here so we can reintroduce it, taking into account the // effective addition of s from sign-extension below. Note that we get // a duplicated word c+carry beyond the first one, so this upper part is // of the form [d,d,d,t]. adcs t, s, xzr adc d, s, xzr // Add in the sign-adjusted complex term adds xzr, s, #1 eor s0, s0, s adcs u0, s0, u0 eor s1, s1, s adcs u1, s1, u1 eor s2, s2, s adcs u2, s2, u2 eor s3, s3, s adcs u3, s3, u3 eor s4, s4, s adcs u4, s4, u4 eor s5, s5, s adcs u5, s5, u5 eor s6, s6, s adcs u6, s6, u6 eor s7, s7, s adcs u7, s7, u7 // From this point on replace the sign with the suspended carry indication adcs u8, u8, t adcs u9, u9, d adcs u10, u10, d adc u11, u11, d // Store it back stp u0, u1, [z,#32] stp u2, u3, [z,#48] stp u4, u5, [z,#64] stp u6, u7, [z,#80] stp u8, u9, [z,#96] stp u10, u11, [z,#112] // Restore regs and return ldp x23, x24, [sp], #16 ldp x21, x22, [sp], #16 ldp x19, x20, [sp], #16 ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
12,644
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/unopt/bignum_montmul_p384_base.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^384) mod p_384 // Inputs x[6], y[6]; output z[6] // // extern void bignum_montmul_p384_base // (uint64_t z[static 6], uint64_t x[static 6], uint64_t y[static 6]); // // Does z := (2^{-384} * x * y) mod p_384, assuming that the inputs x and y // satisfy x * y <= 2^384 * p_384 (in particular this is true if we are in // the "usual" case x < p_384 and y < p_384). // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p384_base) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p384_base) .text .balign 4 // --------------------------------------------------------------------------- // Macro returning (c,h,l) = 3-word 1s complement (x - y) * (w - z) // c,h,l,t should all be different // t,h should not overlap w,z // --------------------------------------------------------------------------- #define muldiffn(c,h,l, t, x,y, w,z) \ subs t, x, y __LF \ cneg t, t, cc __LF \ csetm c, cc __LF \ subs h, w, z __LF \ cneg h, h, cc __LF \ mul l, t, h __LF \ umulh h, t, h __LF \ cinv c, c, cc __LF \ eor l, l, c __LF \ eor h, h, c // --------------------------------------------------------------------------- // Core one-step "short" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1], // adding to the existing contents of [d5;d4;d3;d2;d1]. It is fine // for d6 to be the same register as d0. // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // --------------------------------------------------------------------------- #define montreds(d6,d5,d4,d3,d2,d1,d0, t3,t2,t1) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ /* Recycle d0 (which we know gets implicitly cancelled) to store it */ \ lsl t1, d0, #32 __LF \ add d0, t1, d0 __LF \ /* Now let [t2;t1] = 2^64 * w - w + w_hi where w_hi = floor(w/2^32) */ \ /* We need to subtract 2^32 * this, and we can ignore its lower 32 */ \ /* bits since by design it will cancel anyway; we only need the w_hi */ \ /* part to get the carry propagation going. */ \ lsr t1, d0, #32 __LF \ subs t1, t1, d0 __LF \ sbc t2, d0, xzr __LF \ /* Now select in t1 the field to subtract from d1 */ \ extr t1, t2, t1, #32 __LF \ /* And now get the terms to subtract from d2 and d3 */ \ lsr t2, t2, #32 __LF \ adds t2, t2, d0 __LF \ adc t3, xzr, xzr __LF \ /* Do the subtraction of that portion */ \ subs d1, d1, t1 __LF \ sbcs d2, d2, t2 __LF \ sbcs d3, d3, t3 __LF \ sbcs d4, d4, xzr __LF \ sbcs d5, d5, xzr __LF \ /* Now effectively add 2^384 * w by taking d0 as the input for last sbc */ \ sbc d6, d0, xzr #define a0 x3 #define a1 x4 #define a2 x5 #define a3 x6 #define a4 x7 #define a5 x8 #define b0 x9 #define b1 x10 #define b2 x11 #define b3 x12 #define b4 x13 #define b5 x14 #define s0 x15 #define s1 x16 #define s2 x17 #define s3 x19 #define s4 x20 #define s5 x1 #define s6 x2 #define t1 x21 #define t2 x22 #define t3 x23 #define t4 x24 S2N_BN_SYMBOL(bignum_montmul_p384_base): // Save some registers stp x19, x20, [sp, -16]! stp x21, x22, [sp, -16]! stp x23, x24, [sp, -16]! // Load in all words of both inputs ldp a0, a1, [x1] ldp a2, a3, [x1, #16] ldp a4, a5, [x1, #32] ldp b0, b1, [x2] ldp b2, b3, [x2, #16] ldp b4, b5, [x2, #32] // Multiply low halves with a 3x3->6 ADK multiplier as [s5;s4;s3;s2;s1;s0] mul s0, a0, b0 mul t1, a1, b1 mul t2, a2, b2 umulh t3, a0, b0 umulh t4, a1, b1 umulh s5, a2, b2 adds t3, t3, t1 adcs t4, t4, t2 adc s5, s5, xzr adds s1, t3, s0 adcs s2, t4, t3 adcs s3, s5, t4 adc s4, s5, xzr adds s2, s2, s0 adcs s3, s3, t3 adcs s4, s4, t4 adc s5, s5, xzr muldiffn(t3,t2,t1, t4, a0,a1, b1,b0) adds xzr, t3, #1 adcs s1, s1, t1 adcs s2, s2, t2 adcs s3, s3, t3 adcs s4, s4, t3 adc s5, s5, t3 muldiffn(t3,t2,t1, t4, a0,a2, b2,b0) adds xzr, t3, #1 adcs s2, s2, t1 adcs s3, s3, t2 adcs s4, s4, t3 adc s5, s5, t3 muldiffn(t3,t2,t1, t4, a1,a2, b2,b1) adds xzr, t3, #1 adcs s3, s3, t1 adcs s4, s4, t2 adc s5, s5, t3 // Perform three "short" Montgomery steps on the low product // This shifts it to an offset compatible with middle terms // Stash the result temporarily in the output buffer // We could keep this in registers by directly adding to it in the next // ADK block, but if anything that seems to be slightly slower montreds(s0,s5,s4,s3,s2,s1,s0, t1,t2,t3) montreds(s1,s0,s5,s4,s3,s2,s1, t1,t2,t3) montreds(s2,s1,s0,s5,s4,s3,s2, t1,t2,t3) stp s3, s4, [x0] stp s5, s0, [x0, #16] stp s1, s2, [x0, #32] // Multiply high halves with a 3x3->6 ADK multiplier as [s5;s4;s3;s2;s1;s0] mul s0, a3, b3 mul t1, a4, b4 mul t2, a5, b5 umulh t3, a3, b3 umulh t4, a4, b4 umulh s5, a5, b5 adds t3, t3, t1 adcs t4, t4, t2 adc s5, s5, xzr adds s1, t3, s0 adcs s2, t4, t3 adcs s3, s5, t4 adc s4, s5, xzr adds s2, s2, s0 adcs s3, s3, t3 adcs s4, s4, t4 adc s5, s5, xzr muldiffn(t3,t2,t1, t4, a3,a4, b4,b3) adds xzr, t3, #1 adcs s1, s1, t1 adcs s2, s2, t2 adcs s3, s3, t3 adcs s4, s4, t3 adc s5, s5, t3 muldiffn(t3,t2,t1, t4, a3,a5, b5,b3) adds xzr, t3, #1 adcs s2, s2, t1 adcs s3, s3, t2 adcs s4, s4, t3 adc s5, s5, t3 muldiffn(t3,t2,t1, t4, a4,a5, b5,b4) adds xzr, t3, #1 adcs s3, s3, t1 adcs s4, s4, t2 adc s5, s5, t3 // Compute sign-magnitude a0,[a5,a4,a3] = x_hi - x_lo subs a3, a3, a0 sbcs a4, a4, a1 sbcs a5, a5, a2 sbc a0, xzr, xzr adds xzr, a0, #1 eor a3, a3, a0 adcs a3, a3, xzr eor a4, a4, a0 adcs a4, a4, xzr eor a5, a5, a0 adc a5, a5, xzr // Compute sign-magnitude b5,[b2,b1,b0] = y_lo - y_hi subs b0, b0, b3 sbcs b1, b1, b4 sbcs b2, b2, b5 sbc b5, xzr, xzr adds xzr, b5, #1 eor b0, b0, b5 adcs b0, b0, xzr eor b1, b1, b5 adcs b1, b1, xzr eor b2, b2, b5 adc b2, b2, xzr // Save the correct sign for the sub-product in b5 eor b5, a0, b5 // Add the high H to the modified low term L' and re-stash 6 words, // keeping top word in s6 ldp t1, t2, [x0] adds s0, s0, t1 adcs s1, s1, t2 ldp t1, t2, [x0, #16] adcs s2, s2, t1 adcs s3, s3, t2 ldp t1, t2, [x0, #32] adcs s4, s4, t1 adcs s5, s5, t2 adc s6, xzr, xzr stp s0, s1, [x0] stp s2, s3, [x0, #16] stp s4, s5, [x0, #32] // Multiply with yet a third 3x3 ADK for the complex mid-term mul s0, a3, b0 mul t1, a4, b1 mul t2, a5, b2 umulh t3, a3, b0 umulh t4, a4, b1 umulh s5, a5, b2 adds t3, t3, t1 adcs t4, t4, t2 adc s5, s5, xzr adds s1, t3, s0 adcs s2, t4, t3 adcs s3, s5, t4 adc s4, s5, xzr adds s2, s2, s0 adcs s3, s3, t3 adcs s4, s4, t4 adc s5, s5, xzr muldiffn(t3,t2,t1, t4, a3,a4, b1,b0) adds xzr, t3, #1 adcs s1, s1, t1 adcs s2, s2, t2 adcs s3, s3, t3 adcs s4, s4, t3 adc s5, s5, t3 muldiffn(t3,t2,t1, t4, a3,a5, b2,b0) adds xzr, t3, #1 adcs s2, s2, t1 adcs s3, s3, t2 adcs s4, s4, t3 adc s5, s5, t3 muldiffn(t3,t2,t1, t4, a4,a5, b2,b1) adds xzr, t3, #1 adcs s3, s3, t1 adcs s4, s4, t2 adc s5, s5, t3 // Unstash the H + L' sum to add in twice ldp a0, a1, [x0] ldp a2, a3, [x0, #16] ldp a4, a5, [x0, #32] // Set up a sign-modified version of the mid-product in a long accumulator // as [b3;b2;b1;b0;s5;s4;s3;s2;s1;s0], adding in the H + L' term once with // zero offset as this signed value is created adds xzr, b5, #1 eor s0, s0, b5 adcs s0, s0, a0 eor s1, s1, b5 adcs s1, s1, a1 eor s2, s2, b5 adcs s2, s2, a2 eor s3, s3, b5 adcs s3, s3, a3 eor s4, s4, b5 adcs s4, s4, a4 eor s5, s5, b5 adcs s5, s5, a5 adcs b0, b5, s6 adcs b1, b5, xzr adcs b2, b5, xzr adc b3, b5, xzr // Add in the stashed H + L' term an offset of 3 words as well adds s3, s3, a0 adcs s4, s4, a1 adcs s5, s5, a2 adcs b0, b0, a3 adcs b1, b1, a4 adcs b2, b2, a5 adc b3, b3, s6 // Do three more Montgomery steps on the composed term montreds(s0,s5,s4,s3,s2,s1,s0, t1,t2,t3) montreds(s1,s0,s5,s4,s3,s2,s1, t1,t2,t3) montreds(s2,s1,s0,s5,s4,s3,s2, t1,t2,t3) adds b0, b0, s0 adcs b1, b1, s1 adcs b2, b2, s2 adc b3, b3, xzr // Because of the way we added L' in two places, we can overspill by // more than usual in Montgomery, with the result being only known to // be < 3 * p_384, not the usual < 2 * p_384. So now we do a more // elaborate final correction in the style of bignum_cmul_p384, just // a little bit simpler because we know q is small. add t2, b3, #1 lsl t1, t2, #32 subs t4, t2, t1 sbc t1, t1, xzr adds s3, s3, t4 adcs s4, s4, t1 adcs s5, s5, t2 adcs b0, b0, xzr adcs b1, b1, xzr adcs b2, b2, xzr csetm t2, cc mov t3, #0x00000000ffffffff and t3, t3, t2 adds s3, s3, t3 eor t3, t3, t2 adcs s4, s4, t3 mov t3, #0xfffffffffffffffe and t3, t3, t2 adcs s5, s5, t3 adcs b0, b0, t2 adcs b1, b1, t2 adc b2, b2, t2 // Write back the result stp s3, s4, [x0] stp s5, b0, [x0, #16] stp b1, b2, [x0, #32] // Restore registers and return ldp x23, x24, [sp], #16 ldp x21, x22, [sp], #16 ldp x19, x20, [sp], #16 ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
27,172
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/unopt/p384_montjadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point addition on NIST curve P-384 in Montgomery-Jacobian coordinates // // extern void p384_montjadd // (uint64_t p3[static 18],uint64_t p1[static 18],uint64_t p2[static 18]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // // Standard ARM ABI: X0 = p3, X1 = p1, X2 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjadd) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 // Stable homes for input arguments during main code sequence #define input_z x24 #define input_x x25 #define input_y x26 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_2 input_y, #0 #define y_2 input_y, #NUMSIZE #define z_2 input_y, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // NSPACE is the total stack needed for these temporaries #define z1sq sp, #(NUMSIZE*0) #define ww sp, #(NUMSIZE*0) #define resx sp, #(NUMSIZE*0) #define yd sp, #(NUMSIZE*1) #define y2a sp, #(NUMSIZE*1) #define x2a sp, #(NUMSIZE*2) #define zzx2 sp, #(NUMSIZE*2) #define zz sp, #(NUMSIZE*3) #define t1 sp, #(NUMSIZE*3) #define t2 sp, #(NUMSIZE*4) #define x1a sp, #(NUMSIZE*4) #define zzx1 sp, #(NUMSIZE*4) #define resy sp, #(NUMSIZE*4) #define xd sp, #(NUMSIZE*5) #define z2sq sp, #(NUMSIZE*5) #define resz sp, #(NUMSIZE*5) #define y1a sp, #(NUMSIZE*6) #define NSPACE (NUMSIZE*7) // Corresponds to bignum_montmul_p384, with callee-save register spills // rewritten to update sp in advance .montmul_p384: sub sp, sp, 48 stp x19, x20, [sp, 32] stp x21, x22, [sp, 16] stp x23, x24, [sp] ldr q3, [x1] ldr q25, [x2] ldp x13, x23, [x2] ldp x3, x21, [x1] rev64 v23.4S, v25.4S uzp1 v17.4S, v25.4S, v3.4S umulh x15, x3, x13 mul v6.4S, v23.4S, v3.4S uzp1 v3.4S, v3.4S, v3.4S ldr q27, [x2, #32] ldp x8, x24, [x1, #16] subs x6, x3, x21 ldr q0, [x1, #32] movi v23.2D, #0x00000000ffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4S, v27.4S uzp2 v25.4S, v27.4S, v27.4S cneg x4, x6, cc subs x7, x23, x13 xtn v22.2S, v0.2D xtn v24.2S, v27.2D cneg x20, x7, cc ldp x6, x14, [x2, #16] mul v27.4S, v4.4S, v0.4S uaddlp v20.2D, v6.4S cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4S, v0.4S, v0.4S umull v21.2D, v22.2S, v25.2S shl v0.2D, v20.2D, #32 umlal v0.2D, v3.2S, v17.2S mul x22, x8, x6 umull v1.2D, v6.2S, v25.2S subs x12, x3, x8 umull v20.2D, v22.2S, v24.2S cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2D, v20.2D, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2D, v21.2D, #32 adds x22, x15, x7 and v26.16B, v21.16B, v23.16B adcs x16, x12, x15 uaddlp v25.2D, v27.4S adcs x9, x19, x12 umlal v26.2D, v6.2S, v24.2S adc x4, x19, xzr adds x16, x16, x7 shl v27.2D, v25.2D, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2D, v22.2S, v24.2S mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2D, v26.2D, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x2, #32] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [x1, #32] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [x0] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [x0, #16] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [x0, #32] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [x0] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [x0, #16] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [x0, #32] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [x0] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [x0, #16] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [x0, #32] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [x0] ldp x21, x12, [x0, #16] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [x0, #32] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [x0] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [x0, #16] adc x12, x15, x23 stp x21, x12, [x0, #32] ldp x23, x24, [sp] ldp x21, x22, [sp, 16] ldp x19, x20, [sp, 32] add sp, sp, 48 ret // Corresponds exactly to bignum_montsqr_p384 .montsqr_p384: ldr q1, [x1] ldp x9, x2, [x1] ldr q0, [x1] ldp x4, x6, [x1, #16] rev64 v21.4S, v1.4S uzp2 v28.4S, v1.4S, v1.4S umulh x7, x9, x2 xtn v17.2S, v1.2D mul v27.4S, v21.4S, v0.4S ldr q20, [x1, #32] xtn v30.2S, v0.2D ldr q1, [x1, #32] uzp2 v31.4S, v0.4S, v0.4S ldp x5, x10, [x1, #32] umulh x8, x9, x4 uaddlp v3.2D, v27.4S umull v16.2D, v30.2S, v17.2S mul x16, x9, x4 umull v27.2D, v30.2S, v28.2S shrn v0.2S, v20.2D, #32 xtn v7.2S, v20.2D shl v20.2D, v3.2D, #32 umull v3.2D, v31.2S, v28.2S mul x3, x2, x4 umlal v20.2D, v30.2S, v17.2S umull v22.2D, v7.2S, v0.2S usra v27.2D, v16.2D, #32 umulh x11, x2, x4 movi v21.2D, #0x00000000ffffffff uzp2 v28.4S, v1.4S, v1.4S adds x15, x16, x7 and v5.16B, v27.16B, v21.16B adcs x3, x3, x8 usra v3.2D, v27.2D, #32 dup v29.2D, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2D, v31.2S, v17.2S mul x8, x9, x2 mov x7, v20.d[1] shl v19.2D, v22.2D, #33 xtn v25.2S, v29.2D rev64 v31.4S, v1.4S lsl x13, x14, #32 uzp2 v6.4S, v29.4S, v29.4S umlal v19.2D, v7.2S, v7.2S usra v3.2D, v5.2D, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4S, v31.4S, v29.4S xtn v4.2S, v1.2D adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2D, v25.2S, v28.2S adcs x11, x16, x16 umull v21.2D, v25.2S, v4.2S mov x17, v3.d[0] umull v18.2D, v6.2S, v28.2S adc x16, x8, xzr uaddlp v16.2D, v17.4S movi v1.2D, #0x00000000ffffffff subs x13, x13, x12 usra v31.2D, v21.2D, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2D, v16.2D, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16B, v31.16B, v1.16B adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2D, v6.2S, v4.2S usra v18.2D, v31.2D, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2D, v25.2S, v4.2S adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2D, v3.2D, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [x0] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [x0, #16] csetm x15, cc cneg x1, x1, cc stp x11, x14, [x0, #32] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc cinv x16, x15, cc mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc cneg x9, x9, cc subs x4, x2, x4 cneg x4, x4, cc csetm x7, cc subs x2, x10, x6 cinv x8, x8, cc cneg x2, x2, cc cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc cneg x1, x1, cc eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [x0, #16] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [x0] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [x0, #32] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 adcs x14, x14, x2 mov x2, #0x1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [x0] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [x0, #16] adc x17, x14, xzr stp x2, x17, [x0, #32] ret // Corresponds exactly to bignum_sub_p384 .sub_p384: ldp x5, x6, [x1] ldp x4, x3, [x2] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x1, #16] ldp x4, x3, [x2, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x1, #32] ldp x4, x3, [x2, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, cc mov x4, #0xffffffff and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [x0] stp x7, x8, [x0, #16] stp x9, x10, [x0, #32] ret #define montmul_p384(P0,P1,P2) \ add x0, P0;\ add x1, P1;\ add x2, P2;\ bl .montmul_p384 #define montsqr_p384(P0,P1) \ add x0, P0;\ add x1, P1;\ bl .montsqr_p384 #define sub_p384(P0,P1,P2) \ add x0, P0;\ add x1, P1;\ add x2, P2;\ bl .sub_p384 S2N_BN_SYMBOL(p384_montjadd): // Save regs and make room on stack for temporary variables stp x19, x20, [sp, #-16]! stp x21, x22, [sp, #-16]! stp x23, x24, [sp, #-16]! stp x25, x26, [sp, #-16]! stp x30, xzr, [sp, #-16]! sub sp, sp, NSPACE // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 mov input_y, x2 // Main code, just a sequence of basic field operations // 8 * multiply + 3 * square + 7 * subtract montsqr_p384(z1sq,z_1) montsqr_p384(z2sq,z_2) montmul_p384(y1a,z_2,y_1) montmul_p384(y2a,z_1,y_2) montmul_p384(x2a,z1sq,x_2) montmul_p384(x1a,z2sq,x_1) montmul_p384(y2a,z1sq,y2a) montmul_p384(y1a,z2sq,y1a) sub_p384(xd,x2a,x1a) sub_p384(yd,y2a,y1a) montsqr_p384(zz,xd) montsqr_p384(ww,yd) montmul_p384(zzx1,zz,x1a) montmul_p384(zzx2,zz,x2a) sub_p384(resx,ww,zzx1) sub_p384(t1,zzx2,zzx1) montmul_p384(xd,xd,z_1) sub_p384(resx,resx,zzx2) sub_p384(t2,zzx1,resx) montmul_p384(t1,t1,y1a) montmul_p384(resz,xd,z_2) montmul_p384(t2,yd,t2) sub_p384(resy,t2,t1) // Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0 // The condition codes get set by a comparison (P2 != 0) - (P1 != 0) // So "HI" <=> CF /\ ~ZF <=> P1 = 0 /\ ~(P2 = 0) // and "LO" <=> ~CF <=> ~(P1 = 0) /\ P2 = 0 ldp x0, x1, [z_1] ldp x2, x3, [z_1+16] ldp x4, x5, [z_1+32] orr x20, x0, x1 orr x21, x2, x3 orr x22, x4, x5 orr x20, x20, x21 orr x20, x20, x22 cmp x20, xzr cset x20, ne ldp x6, x7, [z_2] ldp x8, x9, [z_2+16] ldp x10, x11, [z_2+32] orr x21, x6, x7 orr x22, x8, x9 orr x23, x10, x11 orr x21, x21, x22 orr x21, x21, x23 cmp x21, xzr cset x21, ne cmp x21, x20 // Multiplex the outputs accordingly, re-using the z's in registers ldp x12, x13, [resz] csel x12, x0, x12, lo csel x13, x1, x13, lo csel x12, x6, x12, hi csel x13, x7, x13, hi ldp x14, x15, [resz+16] csel x14, x2, x14, lo csel x15, x3, x15, lo csel x14, x8, x14, hi csel x15, x9, x15, hi ldp x16, x17, [resz+32] csel x16, x4, x16, lo csel x17, x5, x17, lo csel x16, x10, x16, hi csel x17, x11, x17, hi ldp x20, x21, [x_1] ldp x0, x1, [resx] csel x0, x20, x0, lo csel x1, x21, x1, lo ldp x20, x21, [x_2] csel x0, x20, x0, hi csel x1, x21, x1, hi ldp x20, x21, [x_1+16] ldp x2, x3, [resx+16] csel x2, x20, x2, lo csel x3, x21, x3, lo ldp x20, x21, [x_2+16] csel x2, x20, x2, hi csel x3, x21, x3, hi ldp x20, x21, [x_1+32] ldp x4, x5, [resx+32] csel x4, x20, x4, lo csel x5, x21, x5, lo ldp x20, x21, [x_2+32] csel x4, x20, x4, hi csel x5, x21, x5, hi ldp x20, x21, [y_1] ldp x6, x7, [resy] csel x6, x20, x6, lo csel x7, x21, x7, lo ldp x20, x21, [y_2] csel x6, x20, x6, hi csel x7, x21, x7, hi ldp x20, x21, [y_1+16] ldp x8, x9, [resy+16] csel x8, x20, x8, lo csel x9, x21, x9, lo ldp x20, x21, [y_2+16] csel x8, x20, x8, hi csel x9, x21, x9, hi ldp x20, x21, [y_1+32] ldp x10, x11, [resy+32] csel x10, x20, x10, lo csel x11, x21, x11, lo ldp x20, x21, [y_2+32] csel x10, x20, x10, hi csel x11, x21, x11, hi // Finally store back the multiplexed values stp x0, x1, [x_3] stp x2, x3, [x_3+16] stp x4, x5, [x_3+32] stp x6, x7, [y_3] stp x8, x9, [y_3+16] stp x10, x11, [y_3+32] stp x12, x13, [z_3] stp x14, x15, [z_3+16] stp x16, x17, [z_3+32] // Restore stack and registers add sp, sp, NSPACE ldp x30, xzr, [sp], 16 ldp x25, x26, [sp], 16 ldp x23, x24, [sp], 16 ldp x21, x22, [sp], 16 ldp x19, x20, [sp], 16 ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
10,897
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/unopt/bignum_montsqr_p384_base.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^384) mod p_384 // Input x[6]; output z[6] // // extern void bignum_montsqr_p384_base // (uint64_t z[static 6], uint64_t x[static 6]); // // Does z := (x^2 / 2^384) mod p_384, assuming x^2 <= 2^384 * p_384, which is // guaranteed in particular if x < p_384 initially (the "intended" case). // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p384_base) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p384_base) .text .balign 4 // --------------------------------------------------------------------------- // Macro returning (c,h,l) = 3-word 1s complement (x - y) * (w - z) // c,h,l,t should all be different // t,h should not overlap w,z // --------------------------------------------------------------------------- #define muldiffn(c,h,l, t, x,y, w,z) \ subs t, x, y __LF \ cneg t, t, cc __LF \ csetm c, cc __LF \ subs h, w, z __LF \ cneg h, h, cc __LF \ mul l, t, h __LF \ umulh h, t, h __LF \ cinv c, c, cc __LF \ eor l, l, c __LF \ eor h, h, c // --------------------------------------------------------------------------- // Core one-step "short" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1], // adding to the existing contents of [d5;d4;d3;d2;d1]. It is fine // for d6 to be the same register as d0. // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // --------------------------------------------------------------------------- #define montreds(d6,d5,d4,d3,d2,d1,d0, t3,t2,t1) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ /* Recycle d0 (which we know gets implicitly cancelled) to store it */ \ lsl t1, d0, #32 __LF \ add d0, t1, d0 __LF \ /* Now let [t2;t1] = 2^64 * w - w + w_hi where w_hi = floor(w/2^32) */ \ /* We need to subtract 2^32 * this, and we can ignore its lower 32 */ \ /* bits since by design it will cancel anyway; we only need the w_hi */ \ /* part to get the carry propagation going. */ \ lsr t1, d0, #32 __LF \ subs t1, t1, d0 __LF \ sbc t2, d0, xzr __LF \ /* Now select in t1 the field to subtract from d1 */ \ extr t1, t2, t1, #32 __LF \ /* And now get the terms to subtract from d2 and d3 */ \ lsr t2, t2, #32 __LF \ adds t2, t2, d0 __LF \ adc t3, xzr, xzr __LF \ /* Do the subtraction of that portion */ \ subs d1, d1, t1 __LF \ sbcs d2, d2, t2 __LF \ sbcs d3, d3, t3 __LF \ sbcs d4, d4, xzr __LF \ sbcs d5, d5, xzr __LF \ /* Now effectively add 2^384 * w by taking d0 as the input for last sbc */ \ sbc d6, d0, xzr #define a0 x2 #define a1 x3 #define a2 x4 #define a3 x5 #define a4 x6 #define a5 x7 #define c0 x8 #define c1 x9 #define c2 x10 #define c3 x11 #define c4 x12 #define c5 x13 #define d1 x14 #define d2 x15 #define d3 x16 #define d4 x17 S2N_BN_SYMBOL(bignum_montsqr_p384_base): // Load in all words of the input ldp a0, a1, [x1] ldp a2, a3, [x1, #16] ldp a4, a5, [x1, #32] // Square the low half getting a result in [c5;c4;c3;c2;c1;c0] mul d1, a0, a1 mul d2, a0, a2 mul d3, a1, a2 mul c0, a0, a0 mul c2, a1, a1 mul c4, a2, a2 umulh d4, a0, a1 adds d2, d2, d4 umulh d4, a0, a2 adcs d3, d3, d4 umulh d4, a1, a2 adcs d4, d4, xzr umulh c1, a0, a0 umulh c3, a1, a1 umulh c5, a2, a2 adds d1, d1, d1 adcs d2, d2, d2 adcs d3, d3, d3 adcs d4, d4, d4 adc c5, c5, xzr adds c1, c1, d1 adcs c2, c2, d2 adcs c3, c3, d3 adcs c4, c4, d4 adc c5, c5, xzr // Perform three "short" Montgomery steps on the low square // This shifts it to an offset compatible with middle product // Stash the result temporarily in the output buffer (to avoid more registers) montreds(c0,c5,c4,c3,c2,c1,c0, d1,d2,d3) montreds(c1,c0,c5,c4,c3,c2,c1, d1,d2,d3) montreds(c2,c1,c0,c5,c4,c3,c2, d1,d2,d3) stp c3, c4, [x0] stp c5, c0, [x0, #16] stp c1, c2, [x0, #32] // Compute product of the cross-term with ADK 3x3->6 multiplier #define a0 x2 #define a1 x3 #define a2 x4 #define a3 x5 #define a4 x6 #define a5 x7 #define s0 x8 #define s1 x9 #define s2 x10 #define s3 x11 #define s4 x12 #define s5 x13 #define l1 x14 #define l2 x15 #define h0 x16 #define h1 x17 #define h2 x1 #define s6 h1 #define c l1 #define h l2 #define l h0 #define t h1 mul s0, a0, a3 mul l1, a1, a4 mul l2, a2, a5 umulh h0, a0, a3 umulh h1, a1, a4 umulh h2, a2, a5 adds h0, h0, l1 adcs h1, h1, l2 adc h2, h2, xzr adds s1, h0, s0 adcs s2, h1, h0 adcs s3, h2, h1 adc s4, h2, xzr adds s2, s2, s0 adcs s3, s3, h0 adcs s4, s4, h1 adc s5, h2, xzr muldiffn(c,h,l, t, a0,a1, a4,a3) adds xzr, c, #1 adcs s1, s1, l adcs s2, s2, h adcs s3, s3, c adcs s4, s4, c adc s5, s5, c muldiffn(c,h,l, t, a0,a2, a5,a3) adds xzr, c, #1 adcs s2, s2, l adcs s3, s3, h adcs s4, s4, c adc s5, s5, c muldiffn(c,h,l, t, a1,a2, a5,a4) adds xzr, c, #1 adcs s3, s3, l adcs s4, s4, h adc s5, s5, c // Double it and add the stashed Montgomerified low square adds s0, s0, s0 adcs s1, s1, s1 adcs s2, s2, s2 adcs s3, s3, s3 adcs s4, s4, s4 adcs s5, s5, s5 adc s6, xzr, xzr ldp a0, a1, [x0] adds s0, s0, a0 adcs s1, s1, a1 ldp a0, a1, [x0, #16] adcs s2, s2, a0 adcs s3, s3, a1 ldp a0, a1, [x0, #32] adcs s4, s4, a0 adcs s5, s5, a1 adc s6, s6, xzr // Montgomery-reduce the combined low and middle term another thrice montreds(s0,s5,s4,s3,s2,s1,s0, a0,a1,a2) montreds(s1,s0,s5,s4,s3,s2,s1, a0,a1,a2) montreds(s2,s1,s0,s5,s4,s3,s2, a0,a1,a2) adds s6, s6, s0 adcs s0, s1, xzr adcs s1, s2, xzr adcs s2, xzr, xzr // Our sum so far is in [s2;s1;s0;s6;s5;s4;s3] // Choose more intuitive names #define r0 x11 #define r1 x12 #define r2 x13 #define r3 x17 #define r4 x8 #define r5 x9 #define r6 x10 // Remind ourselves what else we can't destroy #define a3 x5 #define a4 x6 #define a5 x7 // So we can have these as temps #define t1 x1 #define t2 x14 #define t3 x15 #define t4 x16 // Add in all the pure squares 33 + 44 + 55 mul t1, a3, a3 adds r0, r0, t1 mul t2, a4, a4 mul t3, a5, a5 umulh t1, a3, a3 adcs r1, r1, t1 umulh t1, a4, a4 adcs r2, r2, t2 adcs r3, r3, t1 umulh t1, a5, a5 adcs r4, r4, t3 adcs r5, r5, t1 adc r6, r6, xzr // Now compose the 34 + 35 + 45 terms, which need doubling mul t1, a3, a4 mul t2, a3, a5 mul t3, a4, a5 umulh t4, a3, a4 adds t2, t2, t4 umulh t4, a3, a5 adcs t3, t3, t4 umulh t4, a4, a5 adc t4, t4, xzr // Double and add. Recycle one of the no-longer-needed inputs as a temp #define t5 x5 adds t1, t1, t1 adcs t2, t2, t2 adcs t3, t3, t3 adcs t4, t4, t4 adc t5, xzr, xzr adds r1, r1, t1 adcs r2, r2, t2 adcs r3, r3, t3 adcs r4, r4, t4 adcs r5, r5, t5 adc r6, r6, xzr // We know, writing B = 2^{6*64} that the full implicit result is // B^2 c <= z + (B - 1) * p < B * p + (B - 1) * p < 2 * B * p, // so the top half is certainly < 2 * p. If c = 1 already, we know // subtracting p will give the reduced modulus. But now we do a // comparison to catch cases where the residue is >= p. // First set [0;0;0;t3;t2;t1] = 2^384 - p_384 mov t1, #0xffffffff00000001 mov t2, #0x00000000ffffffff mov t3, #0x0000000000000001 // Let dd = [] be the 6-word intermediate result. // Set CF if the addition dd + (2^384 - p_384) >= 2^384, hence iff dd >= p_384. adds xzr, r0, t1 adcs xzr, r1, t2 adcs xzr, r2, t3 adcs xzr, r3, xzr adcs xzr, r4, xzr adcs xzr, r5, xzr // Now just add this new carry into the existing r6. It's easy to see they // can't both be 1 by our range assumptions, so this gives us a {0,1} flag adc r6, r6, xzr // Now convert it into a bitmask sub r6, xzr, r6 // Masked addition of 2^384 - p_384, hence subtraction of p_384 and t1, t1, r6 adds r0, r0, t1 and t2, t2, r6 adcs r1, r1, t2 and t3, t3, r6 adcs r2, r2, t3 adcs r3, r3, xzr adcs r4, r4, xzr adc r5, r5, xzr // Store it back stp r0, r1, [x0] stp r2, r3, [x0, #16] stp r4, r5, [x0, #32] ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
37,431
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p384/unopt/p384_montjdouble.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point doubling on NIST curve P-384 in Montgomery-Jacobian coordinates // // extern void p384_montjdouble // (uint64_t p3[static 18],uint64_t p1[static 18]); // // Does p3 := 2 * p1 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // // Standard ARM ABI: X0 = p3, X1 = p1 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjdouble) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjdouble) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 // Stable homes for input arguments during main code sequence #define input_z x25 #define input_x x26 // Pointer-offset pairs for inputs and outputs #define x_1 input_x, #0 #define y_1 input_x, #NUMSIZE #define z_1 input_x, #(2*NUMSIZE) #define x_3 input_z, #0 #define y_3 input_z, #NUMSIZE #define z_3 input_z, #(2*NUMSIZE) // Pointer-offset pairs for temporaries, with some aliasing // NSPACE is the total stack needed for these temporaries #define z2 sp, #(NUMSIZE*0) #define y2 sp, #(NUMSIZE*1) #define x2p sp, #(NUMSIZE*2) #define xy2 sp, #(NUMSIZE*3) #define y4 sp, #(NUMSIZE*4) #define t2 sp, #(NUMSIZE*4) #define dx2 sp, #(NUMSIZE*5) #define t1 sp, #(NUMSIZE*5) #define d_ sp, #(NUMSIZE*6) #define x4p sp, #(NUMSIZE*6) #define NSPACE #(NUMSIZE*7) // Corresponds exactly to bignum_montmul_p384 .montmul_p384: sub sp, sp, 48 stp x19, x20, [sp, 32] stp x21, x22, [sp, 16] stp x23, x24, [sp] ldr q3, [x1] ldr q25, [x2] ldp x13, x23, [x2] ldp x3, x21, [x1] rev64 v23.4S, v25.4S uzp1 v17.4S, v25.4S, v3.4S umulh x15, x3, x13 mul v6.4S, v23.4S, v3.4S uzp1 v3.4S, v3.4S, v3.4S ldr q27, [x2, #32] ldp x8, x24, [x1, #16] subs x6, x3, x21 ldr q0, [x1, #32] movi v23.2D, #0x00000000ffffffff csetm x10, cc umulh x19, x21, x23 rev64 v4.4S, v27.4S uzp2 v25.4S, v27.4S, v27.4S cneg x4, x6, cc subs x7, x23, x13 xtn v22.2S, v0.2D xtn v24.2S, v27.2D cneg x20, x7, cc ldp x6, x14, [x2, #16] mul v27.4S, v4.4S, v0.4S uaddlp v20.2D, v6.4S cinv x5, x10, cc mul x16, x4, x20 uzp2 v6.4S, v0.4S, v0.4S umull v21.2D, v22.2S, v25.2S shl v0.2D, v20.2D, #32 umlal v0.2D, v3.2S, v17.2S mul x22, x8, x6 umull v1.2D, v6.2S, v25.2S subs x12, x3, x8 umull v20.2D, v22.2S, v24.2S cneg x17, x12, cc umulh x9, x8, x6 mov x12, v0.d[1] eor x11, x16, x5 mov x7, v0.d[0] csetm x10, cc usra v21.2D, v20.2D, #32 adds x15, x15, x12 adcs x12, x19, x22 umulh x20, x4, x20 adc x19, x9, xzr usra v1.2D, v21.2D, #32 adds x22, x15, x7 and v26.16B, v21.16B, v23.16B adcs x16, x12, x15 uaddlp v25.2D, v27.4S adcs x9, x19, x12 umlal v26.2D, v6.2S, v24.2S adc x4, x19, xzr adds x16, x16, x7 shl v27.2D, v25.2D, #32 adcs x9, x9, x15 adcs x4, x4, x12 eor x12, x20, x5 adc x15, x19, xzr subs x20, x6, x13 cneg x20, x20, cc cinv x10, x10, cc cmn x5, #0x1 mul x19, x17, x20 adcs x11, x22, x11 adcs x12, x16, x12 adcs x9, x9, x5 umulh x17, x17, x20 adcs x22, x4, x5 adc x5, x15, x5 subs x16, x21, x8 cneg x20, x16, cc eor x19, x19, x10 csetm x4, cc subs x16, x6, x23 cneg x16, x16, cc umlal v27.2D, v22.2S, v24.2S mul x15, x20, x16 cinv x4, x4, cc cmn x10, #0x1 usra v1.2D, v26.2D, #32 adcs x19, x12, x19 eor x17, x17, x10 adcs x9, x9, x17 adcs x22, x22, x10 lsl x12, x7, #32 umulh x20, x20, x16 eor x16, x15, x4 ldp x15, x17, [x2, #32] add x2, x12, x7 adc x7, x5, x10 ldp x5, x10, [x1, #32] lsr x1, x2, #32 eor x12, x20, x4 subs x1, x1, x2 sbc x20, x2, xzr cmn x4, #0x1 adcs x9, x9, x16 extr x1, x20, x1, #32 lsr x20, x20, #32 adcs x22, x22, x12 adc x16, x7, x4 adds x12, x20, x2 umulh x7, x24, x14 adc x4, xzr, xzr subs x1, x11, x1 sbcs x20, x19, x12 sbcs x12, x9, x4 lsl x9, x1, #32 add x1, x9, x1 sbcs x9, x22, xzr mul x22, x24, x14 sbcs x16, x16, xzr lsr x4, x1, #32 sbc x19, x2, xzr subs x4, x4, x1 sbc x11, x1, xzr extr x2, x11, x4, #32 lsr x4, x11, #32 adds x4, x4, x1 adc x11, xzr, xzr subs x2, x20, x2 sbcs x4, x12, x4 sbcs x20, x9, x11 lsl x12, x2, #32 add x2, x12, x2 sbcs x9, x16, xzr lsr x11, x2, #32 sbcs x19, x19, xzr sbc x1, x1, xzr subs x16, x11, x2 sbc x12, x2, xzr extr x16, x12, x16, #32 lsr x12, x12, #32 adds x11, x12, x2 adc x12, xzr, xzr subs x16, x4, x16 mov x4, v27.d[0] sbcs x11, x20, x11 sbcs x20, x9, x12 stp x16, x11, [x0] sbcs x11, x19, xzr sbcs x9, x1, xzr stp x20, x11, [x0, #16] mov x1, v1.d[0] sbc x20, x2, xzr subs x12, x24, x5 mov x11, v27.d[1] cneg x16, x12, cc csetm x2, cc subs x19, x15, x14 mov x12, v1.d[1] cinv x2, x2, cc cneg x19, x19, cc stp x9, x20, [x0, #32] mul x9, x16, x19 adds x4, x7, x4 adcs x11, x1, x11 adc x1, x12, xzr adds x20, x4, x22 umulh x19, x16, x19 adcs x7, x11, x4 eor x16, x9, x2 adcs x9, x1, x11 adc x12, x1, xzr adds x7, x7, x22 adcs x4, x9, x4 adcs x9, x12, x11 adc x12, x1, xzr cmn x2, #0x1 eor x1, x19, x2 adcs x11, x20, x16 adcs x19, x7, x1 adcs x1, x4, x2 adcs x20, x9, x2 adc x2, x12, x2 subs x12, x24, x10 cneg x16, x12, cc csetm x12, cc subs x9, x17, x14 cinv x12, x12, cc cneg x9, x9, cc subs x3, x24, x3 sbcs x21, x5, x21 mul x24, x16, x9 sbcs x4, x10, x8 ngc x8, xzr subs x10, x5, x10 eor x5, x24, x12 csetm x7, cc cneg x24, x10, cc subs x10, x17, x15 cinv x7, x7, cc cneg x10, x10, cc subs x14, x13, x14 sbcs x15, x23, x15 eor x13, x21, x8 mul x23, x24, x10 sbcs x17, x6, x17 eor x6, x3, x8 ngc x21, xzr umulh x9, x16, x9 cmn x8, #0x1 eor x3, x23, x7 adcs x23, x6, xzr adcs x13, x13, xzr eor x16, x4, x8 adc x16, x16, xzr eor x4, x17, x21 umulh x17, x24, x10 cmn x21, #0x1 eor x24, x14, x21 eor x6, x15, x21 adcs x15, x24, xzr adcs x14, x6, xzr adc x6, x4, xzr cmn x12, #0x1 eor x4, x9, x12 adcs x19, x19, x5 umulh x5, x23, x15 adcs x1, x1, x4 adcs x10, x20, x12 eor x4, x17, x7 ldp x20, x9, [x0] adc x2, x2, x12 cmn x7, #0x1 adcs x12, x1, x3 ldp x17, x24, [x0, #16] mul x1, x16, x6 adcs x3, x10, x4 adc x2, x2, x7 ldp x7, x4, [x0, #32] adds x20, x22, x20 mul x10, x13, x14 adcs x11, x11, x9 eor x9, x8, x21 adcs x21, x19, x17 stp x20, x11, [x0] adcs x12, x12, x24 mul x8, x23, x15 adcs x3, x3, x7 stp x21, x12, [x0, #16] adcs x12, x2, x4 adc x19, xzr, xzr subs x21, x23, x16 umulh x2, x16, x6 stp x3, x12, [x0, #32] cneg x3, x21, cc csetm x24, cc umulh x11, x13, x14 subs x21, x13, x16 eor x7, x8, x9 cneg x17, x21, cc csetm x16, cc subs x21, x6, x15 cneg x22, x21, cc cinv x21, x24, cc subs x20, x23, x13 umulh x12, x3, x22 cneg x23, x20, cc csetm x24, cc subs x20, x14, x15 cinv x24, x24, cc mul x22, x3, x22 cneg x3, x20, cc subs x13, x6, x14 cneg x20, x13, cc cinv x15, x16, cc adds x13, x5, x10 mul x4, x23, x3 adcs x11, x11, x1 adc x14, x2, xzr adds x5, x13, x8 adcs x16, x11, x13 umulh x23, x23, x3 adcs x3, x14, x11 adc x1, x14, xzr adds x10, x16, x8 adcs x6, x3, x13 adcs x8, x1, x11 umulh x13, x17, x20 eor x1, x4, x24 adc x4, x14, xzr cmn x24, #0x1 adcs x1, x5, x1 eor x16, x23, x24 eor x11, x1, x9 adcs x23, x10, x16 eor x2, x22, x21 adcs x3, x6, x24 mul x14, x17, x20 eor x17, x13, x15 adcs x13, x8, x24 adc x8, x4, x24 cmn x21, #0x1 adcs x6, x23, x2 mov x16, #0xfffffffffffffffe eor x20, x12, x21 adcs x20, x3, x20 eor x23, x14, x15 adcs x2, x13, x21 adc x8, x8, x21 cmn x15, #0x1 ldp x5, x4, [x0] ldp x21, x12, [x0, #16] adcs x22, x20, x23 eor x23, x22, x9 adcs x17, x2, x17 adc x22, x8, x15 cmn x9, #0x1 adcs x15, x7, x5 ldp x10, x14, [x0, #32] eor x1, x6, x9 lsl x2, x15, #32 adcs x8, x11, x4 adcs x13, x1, x21 eor x1, x22, x9 adcs x24, x23, x12 eor x11, x17, x9 adcs x23, x11, x10 adcs x7, x1, x14 adcs x17, x9, x19 adcs x20, x9, xzr add x1, x2, x15 lsr x3, x1, #32 adcs x11, x9, xzr adc x9, x9, xzr subs x3, x3, x1 sbc x6, x1, xzr adds x24, x24, x5 adcs x4, x23, x4 extr x3, x6, x3, #32 lsr x6, x6, #32 adcs x21, x7, x21 adcs x15, x17, x12 adcs x7, x20, x10 adcs x20, x11, x14 mov x14, #0xffffffff adc x22, x9, x19 adds x12, x6, x1 adc x10, xzr, xzr subs x3, x8, x3 sbcs x12, x13, x12 lsl x9, x3, #32 add x3, x9, x3 sbcs x10, x24, x10 sbcs x24, x4, xzr lsr x9, x3, #32 sbcs x21, x21, xzr sbc x1, x1, xzr subs x9, x9, x3 sbc x13, x3, xzr extr x9, x13, x9, #32 lsr x13, x13, #32 adds x13, x13, x3 adc x6, xzr, xzr subs x12, x12, x9 sbcs x17, x10, x13 lsl x2, x12, #32 sbcs x10, x24, x6 add x9, x2, x12 sbcs x6, x21, xzr lsr x5, x9, #32 sbcs x21, x1, xzr sbc x13, x3, xzr subs x8, x5, x9 sbc x19, x9, xzr lsr x12, x19, #32 extr x3, x19, x8, #32 adds x8, x12, x9 adc x1, xzr, xzr subs x2, x17, x3 sbcs x12, x10, x8 sbcs x5, x6, x1 sbcs x3, x21, xzr sbcs x19, x13, xzr sbc x24, x9, xzr adds x23, x15, x3 adcs x8, x7, x19 adcs x11, x20, x24 adc x9, x22, xzr add x24, x9, #0x1 lsl x7, x24, #32 subs x21, x24, x7 sbc x10, x7, xzr adds x6, x2, x21 adcs x7, x12, x10 adcs x24, x5, x24 adcs x13, x23, xzr adcs x8, x8, xzr adcs x15, x11, xzr csetm x23, cc and x11, x16, x23 and x20, x14, x23 adds x22, x6, x20 eor x3, x20, x23 adcs x5, x7, x3 adcs x14, x24, x11 stp x22, x5, [x0] adcs x5, x13, x23 adcs x21, x8, x23 stp x14, x5, [x0, #16] adc x12, x15, x23 stp x21, x12, [x0, #32] ldp x23, x24, [sp] ldp x21, x22, [sp, 16] ldp x19, x20, [sp, 32] add sp, sp, 48 ret // Corresponds exactly to bignum_montsqr_p384 .montsqr_p384: ldr q1, [x1] ldp x9, x2, [x1] ldr q0, [x1] ldp x4, x6, [x1, #16] rev64 v21.4S, v1.4S uzp2 v28.4S, v1.4S, v1.4S umulh x7, x9, x2 xtn v17.2S, v1.2D mul v27.4S, v21.4S, v0.4S ldr q20, [x1, #32] xtn v30.2S, v0.2D ldr q1, [x1, #32] uzp2 v31.4S, v0.4S, v0.4S ldp x5, x10, [x1, #32] umulh x8, x9, x4 uaddlp v3.2D, v27.4S umull v16.2D, v30.2S, v17.2S mul x16, x9, x4 umull v27.2D, v30.2S, v28.2S shrn v0.2S, v20.2D, #32 xtn v7.2S, v20.2D shl v20.2D, v3.2D, #32 umull v3.2D, v31.2S, v28.2S mul x3, x2, x4 umlal v20.2D, v30.2S, v17.2S umull v22.2D, v7.2S, v0.2S usra v27.2D, v16.2D, #32 umulh x11, x2, x4 movi v21.2D, #0x00000000ffffffff uzp2 v28.4S, v1.4S, v1.4S adds x15, x16, x7 and v5.16B, v27.16B, v21.16B adcs x3, x3, x8 usra v3.2D, v27.2D, #32 dup v29.2D, x6 adcs x16, x11, xzr mov x14, v20.d[0] umlal v5.2D, v31.2S, v17.2S mul x8, x9, x2 mov x7, v20.d[1] shl v19.2D, v22.2D, #33 xtn v25.2S, v29.2D rev64 v31.4S, v1.4S lsl x13, x14, #32 uzp2 v6.4S, v29.4S, v29.4S umlal v19.2D, v7.2S, v7.2S usra v3.2D, v5.2D, #32 adds x1, x8, x8 umulh x8, x4, x4 add x12, x13, x14 mul v17.4S, v31.4S, v29.4S xtn v4.2S, v1.2D adcs x14, x15, x15 lsr x13, x12, #32 adcs x15, x3, x3 umull v31.2D, v25.2S, v28.2S adcs x11, x16, x16 umull v21.2D, v25.2S, v4.2S mov x17, v3.d[0] umull v18.2D, v6.2S, v28.2S adc x16, x8, xzr uaddlp v16.2D, v17.4S movi v1.2D, #0x00000000ffffffff subs x13, x13, x12 usra v31.2D, v21.2D, #32 sbc x8, x12, xzr adds x17, x17, x1 mul x1, x4, x4 shl v28.2D, v16.2D, #32 mov x3, v3.d[1] adcs x14, x7, x14 extr x7, x8, x13, #32 adcs x13, x3, x15 and v3.16B, v31.16B, v1.16B adcs x11, x1, x11 lsr x1, x8, #32 umlal v3.2D, v6.2S, v4.2S usra v18.2D, v31.2D, #32 adc x3, x16, xzr adds x1, x1, x12 umlal v28.2D, v25.2S, v4.2S adc x16, xzr, xzr subs x15, x17, x7 sbcs x7, x14, x1 lsl x1, x15, #32 sbcs x16, x13, x16 add x8, x1, x15 usra v18.2D, v3.2D, #32 sbcs x14, x11, xzr lsr x1, x8, #32 sbcs x17, x3, xzr sbc x11, x12, xzr subs x13, x1, x8 umulh x12, x4, x10 sbc x1, x8, xzr extr x13, x1, x13, #32 lsr x1, x1, #32 adds x15, x1, x8 adc x1, xzr, xzr subs x7, x7, x13 sbcs x13, x16, x15 lsl x3, x7, #32 umulh x16, x2, x5 sbcs x15, x14, x1 add x7, x3, x7 sbcs x3, x17, xzr lsr x1, x7, #32 sbcs x14, x11, xzr sbc x11, x8, xzr subs x8, x1, x7 sbc x1, x7, xzr extr x8, x1, x8, #32 lsr x1, x1, #32 adds x1, x1, x7 adc x17, xzr, xzr subs x13, x13, x8 umulh x8, x9, x6 sbcs x1, x15, x1 sbcs x15, x3, x17 sbcs x3, x14, xzr mul x17, x2, x5 sbcs x11, x11, xzr stp x13, x1, [x0] sbc x14, x7, xzr mul x7, x4, x10 subs x1, x9, x2 stp x15, x3, [x0, #16] csetm x15, cc cneg x1, x1, cc stp x11, x14, [x0, #32] mul x14, x9, x6 adds x17, x8, x17 adcs x7, x16, x7 adc x13, x12, xzr subs x12, x5, x6 cneg x3, x12, cc cinv x16, x15, cc mul x8, x1, x3 umulh x1, x1, x3 eor x12, x8, x16 adds x11, x17, x14 adcs x3, x7, x17 adcs x15, x13, x7 adc x8, x13, xzr adds x3, x3, x14 adcs x15, x15, x17 adcs x17, x8, x7 eor x1, x1, x16 adc x13, x13, xzr subs x9, x9, x4 csetm x8, cc cneg x9, x9, cc subs x4, x2, x4 cneg x4, x4, cc csetm x7, cc subs x2, x10, x6 cinv x8, x8, cc cneg x2, x2, cc cmn x16, #0x1 adcs x11, x11, x12 mul x12, x9, x2 adcs x3, x3, x1 adcs x15, x15, x16 umulh x9, x9, x2 adcs x17, x17, x16 adc x13, x13, x16 subs x1, x10, x5 cinv x2, x7, cc cneg x1, x1, cc eor x9, x9, x8 cmn x8, #0x1 eor x7, x12, x8 mul x12, x4, x1 adcs x3, x3, x7 adcs x7, x15, x9 adcs x15, x17, x8 ldp x9, x17, [x0, #16] umulh x4, x4, x1 adc x8, x13, x8 cmn x2, #0x1 eor x1, x12, x2 adcs x1, x7, x1 ldp x7, x16, [x0] eor x12, x4, x2 adcs x4, x15, x12 ldp x15, x12, [x0, #32] adc x8, x8, x2 adds x13, x14, x14 umulh x14, x5, x10 adcs x2, x11, x11 adcs x3, x3, x3 adcs x1, x1, x1 adcs x4, x4, x4 adcs x11, x8, x8 adc x8, xzr, xzr adds x13, x13, x7 adcs x2, x2, x16 mul x16, x5, x10 adcs x3, x3, x9 adcs x1, x1, x17 umulh x5, x5, x5 lsl x9, x13, #32 add x9, x9, x13 adcs x4, x4, x15 mov x13, v28.d[1] adcs x15, x11, x12 lsr x7, x9, #32 adc x11, x8, xzr subs x7, x7, x9 umulh x10, x10, x10 sbc x17, x9, xzr extr x7, x17, x7, #32 lsr x17, x17, #32 adds x17, x17, x9 adc x12, xzr, xzr subs x8, x2, x7 sbcs x17, x3, x17 lsl x7, x8, #32 sbcs x2, x1, x12 add x3, x7, x8 sbcs x12, x4, xzr lsr x1, x3, #32 sbcs x7, x15, xzr sbc x15, x9, xzr subs x1, x1, x3 sbc x4, x3, xzr lsr x9, x4, #32 extr x8, x4, x1, #32 adds x9, x9, x3 adc x4, xzr, xzr subs x1, x17, x8 lsl x17, x1, #32 sbcs x8, x2, x9 sbcs x9, x12, x4 add x17, x17, x1 mov x1, v18.d[1] lsr x2, x17, #32 sbcs x7, x7, xzr mov x12, v18.d[0] sbcs x15, x15, xzr sbc x3, x3, xzr subs x4, x2, x17 sbc x2, x17, xzr adds x12, x13, x12 adcs x16, x16, x1 lsr x13, x2, #32 extr x1, x2, x4, #32 adc x2, x14, xzr adds x4, x13, x17 mul x13, x6, x6 adc x14, xzr, xzr subs x1, x8, x1 sbcs x4, x9, x4 mov x9, v28.d[0] sbcs x7, x7, x14 sbcs x8, x15, xzr sbcs x3, x3, xzr sbc x14, x17, xzr adds x17, x9, x9 adcs x12, x12, x12 mov x15, v19.d[0] adcs x9, x16, x16 umulh x6, x6, x6 adcs x16, x2, x2 adc x2, xzr, xzr adds x11, x11, x8 adcs x3, x3, xzr adcs x14, x14, xzr adcs x8, xzr, xzr adds x13, x1, x13 mov x1, v19.d[1] adcs x6, x4, x6 mov x4, #0xffffffff adcs x15, x7, x15 adcs x7, x11, x5 adcs x1, x3, x1 adcs x14, x14, x10 adc x11, x8, xzr adds x6, x6, x17 adcs x8, x15, x12 adcs x3, x7, x9 adcs x15, x1, x16 mov x16, #0xffffffff00000001 adcs x14, x14, x2 mov x2, #0x1 adc x17, x11, xzr cmn x13, x16 adcs xzr, x6, x4 adcs xzr, x8, x2 adcs xzr, x3, xzr adcs xzr, x15, xzr adcs xzr, x14, xzr adc x1, x17, xzr neg x9, x1 and x1, x16, x9 adds x11, x13, x1 and x13, x4, x9 adcs x5, x6, x13 and x1, x2, x9 adcs x7, x8, x1 stp x11, x5, [x0] adcs x11, x3, xzr adcs x2, x15, xzr stp x7, x11, [x0, #16] adc x17, x14, xzr stp x2, x17, [x0, #32] ret // Corresponds exactly to bignum_sub_p384 .sub_p384: ldp x5, x6, [x1] ldp x4, x3, [x2] subs x5, x5, x4 sbcs x6, x6, x3 ldp x7, x8, [x1, #16] ldp x4, x3, [x2, #16] sbcs x7, x7, x4 sbcs x8, x8, x3 ldp x9, x10, [x1, #32] ldp x4, x3, [x2, #32] sbcs x9, x9, x4 sbcs x10, x10, x3 csetm x3, cc mov x4, #0xffffffff and x4, x4, x3 adds x5, x5, x4 eor x4, x4, x3 adcs x6, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 adcs x7, x7, x4 adcs x8, x8, x3 adcs x9, x9, x3 adc x10, x10, x3 stp x5, x6, [x0] stp x7, x8, [x0, #16] stp x9, x10, [x0, #32] ret // Corresponds exactly to bignum_add_p384 .add_p384: ldp x5, x6, [x1] ldp x4, x3, [x2] adds x5, x5, x4 adcs x6, x6, x3 ldp x7, x8, [x1, #16] ldp x4, x3, [x2, #16] adcs x7, x7, x4 adcs x8, x8, x3 ldp x9, x10, [x1, #32] ldp x4, x3, [x2, #32] adcs x9, x9, x4 adcs x10, x10, x3 adc x3, xzr, xzr mov x4, #0xffffffff cmp x5, x4 mov x4, #0xffffffff00000000 sbcs xzr, x6, x4 mov x4, #0xfffffffffffffffe sbcs xzr, x7, x4 adcs xzr, x8, xzr adcs xzr, x9, xzr adcs xzr, x10, xzr adcs x3, x3, xzr csetm x3, ne mov x4, #0xffffffff and x4, x4, x3 subs x5, x5, x4 eor x4, x4, x3 sbcs x6, x6, x4 mov x4, #0xfffffffffffffffe and x4, x4, x3 sbcs x7, x7, x4 sbcs x8, x8, x3 sbcs x9, x9, x3 sbc x10, x10, x3 stp x5, x6, [x0] stp x7, x8, [x0, #16] stp x9, x10, [x0, #32] ret #define montmul_p384(P0,P1,P2) \ add x0, P0;\ add x1, P1;\ add x2, P2;\ bl .montmul_p384 #define montsqr_p384(P0,P1) \ add x0, P0;\ add x1, P1;\ bl .montsqr_p384 #define sub_p384(P0,P1,P2) \ add x0, P0;\ add x1, P1;\ add x2, P2;\ bl .sub_p384 #define add_p384(P0,P1,P2) \ add x0, P0;\ add x1, P1;\ add x2, P2;\ bl .add_p384 // P0 = 4 * P1 - P2 #define cmsub41_p384(P0,P1,P2) \ ldp x1, x2, [P1] __LF \ ldp x3, x4, [P1+16] __LF \ ldp x5, x6, [P1+32] __LF \ lsl x0, x1, #2 __LF \ ldp x7, x8, [P2] __LF \ subs x0, x0, x7 __LF \ extr x1, x2, x1, #62 __LF \ sbcs x1, x1, x8 __LF \ ldp x7, x8, [P2+16] __LF \ extr x2, x3, x2, #62 __LF \ sbcs x2, x2, x7 __LF \ extr x3, x4, x3, #62 __LF \ sbcs x3, x3, x8 __LF \ extr x4, x5, x4, #62 __LF \ ldp x7, x8, [P2+32] __LF \ sbcs x4, x4, x7 __LF \ extr x5, x6, x5, #62 __LF \ sbcs x5, x5, x8 __LF \ lsr x6, x6, #62 __LF \ adc x6, x6, xzr __LF \ lsl x7, x6, #32 __LF \ subs x8, x6, x7 __LF \ sbc x7, x7, xzr __LF \ adds x0, x0, x8 __LF \ adcs x1, x1, x7 __LF \ adcs x2, x2, x6 __LF \ adcs x3, x3, xzr __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ csetm x8, cc __LF \ mov x9, #0xffffffff __LF \ and x9, x9, x8 __LF \ adds x0, x0, x9 __LF \ eor x9, x9, x8 __LF \ adcs x1, x1, x9 __LF \ mov x9, #0xfffffffffffffffe __LF \ and x9, x9, x8 __LF \ adcs x2, x2, x9 __LF \ adcs x3, x3, x8 __LF \ adcs x4, x4, x8 __LF \ adc x5, x5, x8 __LF \ stp x0, x1, [P0] __LF \ stp x2, x3, [P0+16] __LF \ stp x4, x5, [P0+32] // P0 = C * P1 - D * P2 #define cmsub_p384(P0,C,P1,D,P2) \ ldp x0, x1, [P2] __LF \ mov x6, #0x00000000ffffffff __LF \ subs x6, x6, x0 __LF \ mov x7, #0xffffffff00000000 __LF \ sbcs x7, x7, x1 __LF \ ldp x0, x1, [P2+16] __LF \ mov x8, #0xfffffffffffffffe __LF \ sbcs x8, x8, x0 __LF \ mov x13, #0xffffffffffffffff __LF \ sbcs x9, x13, x1 __LF \ ldp x0, x1, [P2+32] __LF \ sbcs x10, x13, x0 __LF \ sbc x11, x13, x1 __LF \ mov x12, D __LF \ mul x0, x12, x6 __LF \ mul x1, x12, x7 __LF \ mul x2, x12, x8 __LF \ mul x3, x12, x9 __LF \ mul x4, x12, x10 __LF \ mul x5, x12, x11 __LF \ umulh x6, x12, x6 __LF \ umulh x7, x12, x7 __LF \ umulh x8, x12, x8 __LF \ umulh x9, x12, x9 __LF \ umulh x10, x12, x10 __LF \ umulh x12, x12, x11 __LF \ adds x1, x1, x6 __LF \ adcs x2, x2, x7 __LF \ adcs x3, x3, x8 __LF \ adcs x4, x4, x9 __LF \ adcs x5, x5, x10 __LF \ mov x6, #1 __LF \ adc x6, x12, x6 __LF \ ldp x8, x9, [P1] __LF \ ldp x10, x11, [P1+16] __LF \ ldp x12, x13, [P1+32] __LF \ mov x14, C __LF \ mul x15, x14, x8 __LF \ umulh x8, x14, x8 __LF \ adds x0, x0, x15 __LF \ mul x15, x14, x9 __LF \ umulh x9, x14, x9 __LF \ adcs x1, x1, x15 __LF \ mul x15, x14, x10 __LF \ umulh x10, x14, x10 __LF \ adcs x2, x2, x15 __LF \ mul x15, x14, x11 __LF \ umulh x11, x14, x11 __LF \ adcs x3, x3, x15 __LF \ mul x15, x14, x12 __LF \ umulh x12, x14, x12 __LF \ adcs x4, x4, x15 __LF \ mul x15, x14, x13 __LF \ umulh x13, x14, x13 __LF \ adcs x5, x5, x15 __LF \ adc x6, x6, xzr __LF \ adds x1, x1, x8 __LF \ adcs x2, x2, x9 __LF \ adcs x3, x3, x10 __LF \ adcs x4, x4, x11 __LF \ adcs x5, x5, x12 __LF \ adcs x6, x6, x13 __LF \ lsl x7, x6, #32 __LF \ subs x8, x6, x7 __LF \ sbc x7, x7, xzr __LF \ adds x0, x0, x8 __LF \ adcs x1, x1, x7 __LF \ adcs x2, x2, x6 __LF \ adcs x3, x3, xzr __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ csetm x6, cc __LF \ mov x7, #0xffffffff __LF \ and x7, x7, x6 __LF \ adds x0, x0, x7 __LF \ eor x7, x7, x6 __LF \ adcs x1, x1, x7 __LF \ mov x7, #0xfffffffffffffffe __LF \ and x7, x7, x6 __LF \ adcs x2, x2, x7 __LF \ adcs x3, x3, x6 __LF \ adcs x4, x4, x6 __LF \ adc x5, x5, x6 __LF \ stp x0, x1, [P0] __LF \ stp x2, x3, [P0+16] __LF \ stp x4, x5, [P0+32] // A weak version of add that only guarantees sum in 6 digits #define weakadd_p384(P0,P1,P2) \ ldp x5, x6, [P1] __LF \ ldp x4, x3, [P2] __LF \ adds x5, x5, x4 __LF \ adcs x6, x6, x3 __LF \ ldp x7, x8, [P1+16] __LF \ ldp x4, x3, [P2+16] __LF \ adcs x7, x7, x4 __LF \ adcs x8, x8, x3 __LF \ ldp x9, x10, [P1+32] __LF \ ldp x4, x3, [P2+32] __LF \ adcs x9, x9, x4 __LF \ adcs x10, x10, x3 __LF \ csetm x3, cs __LF \ mov x4, #0xffffffff __LF \ and x4, x4, x3 __LF \ subs x5, x5, x4 __LF \ eor x4, x4, x3 __LF \ sbcs x6, x6, x4 __LF \ mov x4, #0xfffffffffffffffe __LF \ and x4, x4, x3 __LF \ sbcs x7, x7, x4 __LF \ sbcs x8, x8, x3 __LF \ sbcs x9, x9, x3 __LF \ sbc x10, x10, x3 __LF \ stp x5, x6, [P0] __LF \ stp x7, x8, [P0+16] __LF \ stp x9, x10, [P0+32] // P0 = 3 * P1 - 8 * P2 #define cmsub38_p384(P0,P1,P2) \ ldp x0, x1, [P2] __LF \ mov x6, #0x00000000ffffffff __LF \ subs x6, x6, x0 __LF \ mov x7, #0xffffffff00000000 __LF \ sbcs x7, x7, x1 __LF \ ldp x0, x1, [P2+16] __LF \ mov x8, #0xfffffffffffffffe __LF \ sbcs x8, x8, x0 __LF \ mov x13, #0xffffffffffffffff __LF \ sbcs x9, x13, x1 __LF \ ldp x0, x1, [P2+32] __LF \ sbcs x10, x13, x0 __LF \ sbc x11, x13, x1 __LF \ lsl x0, x6, #3 __LF \ extr x1, x7, x6, #61 __LF \ extr x2, x8, x7, #61 __LF \ extr x3, x9, x8, #61 __LF \ extr x4, x10, x9, #61 __LF \ extr x5, x11, x10, #61 __LF \ lsr x6, x11, #61 __LF \ add x6, x6, #1 __LF \ ldp x8, x9, [P1] __LF \ ldp x10, x11, [P1+16] __LF \ ldp x12, x13, [P1+32] __LF \ mov x14, 3 __LF \ mul x15, x14, x8 __LF \ umulh x8, x14, x8 __LF \ adds x0, x0, x15 __LF \ mul x15, x14, x9 __LF \ umulh x9, x14, x9 __LF \ adcs x1, x1, x15 __LF \ mul x15, x14, x10 __LF \ umulh x10, x14, x10 __LF \ adcs x2, x2, x15 __LF \ mul x15, x14, x11 __LF \ umulh x11, x14, x11 __LF \ adcs x3, x3, x15 __LF \ mul x15, x14, x12 __LF \ umulh x12, x14, x12 __LF \ adcs x4, x4, x15 __LF \ mul x15, x14, x13 __LF \ umulh x13, x14, x13 __LF \ adcs x5, x5, x15 __LF \ adc x6, x6, xzr __LF \ adds x1, x1, x8 __LF \ adcs x2, x2, x9 __LF \ adcs x3, x3, x10 __LF \ adcs x4, x4, x11 __LF \ adcs x5, x5, x12 __LF \ adcs x6, x6, x13 __LF \ lsl x7, x6, #32 __LF \ subs x8, x6, x7 __LF \ sbc x7, x7, xzr __LF \ adds x0, x0, x8 __LF \ adcs x1, x1, x7 __LF \ adcs x2, x2, x6 __LF \ adcs x3, x3, xzr __LF \ adcs x4, x4, xzr __LF \ adcs x5, x5, xzr __LF \ csetm x6, cc __LF \ mov x7, #0xffffffff __LF \ and x7, x7, x6 __LF \ adds x0, x0, x7 __LF \ eor x7, x7, x6 __LF \ adcs x1, x1, x7 __LF \ mov x7, #0xfffffffffffffffe __LF \ and x7, x7, x6 __LF \ adcs x2, x2, x7 __LF \ adcs x3, x3, x6 __LF \ adcs x4, x4, x6 __LF \ adc x5, x5, x6 __LF \ stp x0, x1, [P0] __LF \ stp x2, x3, [P0+16] __LF \ stp x4, x5, [P0+32] S2N_BN_SYMBOL(p384_montjdouble): // Save regs and make room on stack for temporary variables sub sp, sp, NSPACE+80 stp x19, x20, [sp, NSPACE] stp x21, x22, [sp, NSPACE+16] stp x23, x24, [sp, NSPACE+32] stp x25, x26, [sp, NSPACE+48] stp x30, xzr, [sp, NSPACE+64] // Move the input arguments to stable places mov input_z, x0 mov input_x, x1 // Main code, just a sequence of basic field operations // z2 = z^2 // y2 = y^2 montsqr_p384(z2,z_1) montsqr_p384(y2,y_1) // x2p = x^2 - z^4 = (x + z^2) * (x - z^2) weakadd_p384(t1,x_1,z2) sub_p384(t2,x_1,z2) montmul_p384(x2p,t1,t2) // t1 = y + z // x4p = x2p^2 // xy2 = x * y^2 add_p384(t1,y_1,z_1) montsqr_p384(x4p,x2p) montmul_p384(xy2,x_1,y2) // t2 = (y + z)^2 montsqr_p384(t2,t1) // d = 12 * xy2 - 9 * x4p // t1 = y^2 + 2 * y * z cmsub_p384(d_,12,xy2,9,x4p) sub_p384(t1,t2,z2) // y4 = y^4 montsqr_p384(y4,y2) // z_3' = 2 * y * z // dx2 = d * x2p sub_p384(z_3,t1,y2) montmul_p384(dx2,d_,x2p) // x' = 4 * xy2 - d cmsub41_p384(x_3,xy2,d_) // y' = 3 * dx2 - 8 * y4 cmsub38_p384(y_3,dx2,y4) // Restore stack and registers ldp x19, x20, [sp, NSPACE] ldp x21, x22, [sp, NSPACE+16] ldp x23, x24, [sp, NSPACE+32] ldp x25, x26, [sp, NSPACE+48] ldp x30, xzr, [sp, NSPACE+64] add sp, sp, NSPACE+80 ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
wlsfx/bnbb
15,398
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/unopt/bignum_sqr_p521_base.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Square modulo p_521, z := (x^2) mod p_521, assuming x reduced // Input x[9]; output z[9] // // extern void bignum_sqr_p521_base (uint64_t z[static 9], uint64_t x[static 9]); // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sqr_p521_base) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sqr_p521_base) .text .balign 4 #define z x0 #define x x1 #define a0 x2 #define a1 x3 #define a2 x4 #define a3 x5 #define b0 x6 #define b1 x7 #define b2 x8 #define b3 x9 #define s0 x10 #define s1 x11 #define s2 x12 #define s3 x13 #define s4 x14 #define s5 x15 #define s6 x16 #define s7 x17 #define c x19 #define h x20 #define l x21 #define t x22 #define u x23 #define v x24 // Aliased to earlier ones we no longer need #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define d4 x6 #define d5 x7 #define d6 x8 #define d7 x9 #define d8 x10 S2N_BN_SYMBOL(bignum_sqr_p521_base): // Save registers stp x19, x20, [sp, #-16]! stp x21, x22, [sp, #-16]! stp x23, x24, [sp, #-16]! // Load all the inputs first ldp a0, a1, [x] ldp a2, a3, [x, #16] ldp b0, b1, [x, #32] ldp b2, b3, [x, #48] // Square the upper half with a register-renamed variant of bignum_sqr_4_8 mul s2, b0, b2 mul s7, b1, b3 umulh t, b0, b2 subs u, b0, b1 cneg u, u, cc csetm s1, cc subs s0, b3, b2 cneg s0, s0, cc mul s6, u, s0 umulh s0, u, s0 cinv s1, s1, cc eor s6, s6, s1 eor s0, s0, s1 adds s3, s2, t adc t, t, xzr umulh u, b1, b3 adds s3, s3, s7 adcs t, t, u adc u, u, xzr adds t, t, s7 adc u, u, xzr cmn s1, #0x1 adcs s3, s3, s6 adcs t, t, s0 adc u, u, s1 adds s2, s2, s2 adcs s3, s3, s3 adcs t, t, t adcs u, u, u adc c, xzr, xzr mul s0, b0, b0 mul s6, b1, b1 mul l, b0, b1 umulh s1, b0, b0 umulh s7, b1, b1 umulh h, b0, b1 adds s1, s1, l adcs s6, s6, h adc s7, s7, xzr adds s1, s1, l adcs s6, s6, h adc s7, s7, xzr adds s2, s2, s6 adcs s3, s3, s7 adcs t, t, xzr adcs u, u, xzr adc c, c, xzr mul s4, b2, b2 mul s6, b3, b3 mul l, b2, b3 umulh s5, b2, b2 umulh s7, b3, b3 umulh h, b2, b3 adds s5, s5, l adcs s6, s6, h adc s7, s7, xzr adds s5, s5, l adcs s6, s6, h adc s7, s7, xzr adds s4, s4, t adcs s5, s5, u adcs s6, s6, c adc s7, s7, xzr // Augment the high part with the contribution from the top little word C. // If we write the input as 2^512 * C + x then we are otherwise just doing // x^2, so we need to add to the high part 2^512 * C^2 + (2 * C) * x. // Accumulate it as [c;s7;...;s0] = H'. Since 2 * C is only 10 bits long // we multiply 52-bit chunks of the x digits by 2 * C and solve the overlap // with non-overflowing addition to get 52-bit chunks of the result with // similar alignment. Then we stitch these back together and add them into // the running total. This is quite a bit of palaver, but it avoids using // the standard 2-part multiplications involving umulh, and on target // microarchitectures seems to improve performance by about 5%. We could // equally well use 53 or 54 since they are still <= 64 - 10, but below // 52 we would end up using more multiplications. ldr c, [x, #64] add u, c, c mul c, c, c // 0 * 52 = 64 * 0 + 0 and l, a0, #0x000fffffffffffff mul l, u, l // 1 * 52 = 64 * 0 + 52 extr h, a1, a0, #52 and h, h, #0x000fffffffffffff mul h, u, h lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #12 adds s0, s0, t // 2 * 52 = 64 * 1 + 40 extr l, a2, a1, #40 and l, l, #0x000fffffffffffff mul l, u, l lsr t, h, #52 add l, l, t lsl h, h, #12 extr t, l, h, #24 adcs s1, s1, t // 3 * 52 = 64 * 2 + 28 extr h, a3, a2, #28 and h, h, #0x000fffffffffffff mul h, u, h lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #36 adcs s2, s2, t // 4 * 52 = 64 * 3 + 16 extr l, b0, a3, #16 and l, l, #0x000fffffffffffff mul l, u, l lsr t, h, #52 add l, l, t lsl h, h, #12 extr t, l, h, #48 adcs s3, s3, t // 5 * 52 = 64 * 4 + 4 lsr h, b0, #4 and h, h, #0x000fffffffffffff mul h, u, h lsr t, l, #52 add h, h, t lsl l, l, #12 extr v, h, l, #60 // 6 * 52 = 64 * 4 + 56 extr l, b1, b0, #56 and l, l, #0x000fffffffffffff mul l, u, l lsr t, h, #52 add l, l, t lsl v, v, #8 extr t, l, v, #8 adcs s4, s4, t // 7 * 52 = 64 * 5 + 44 extr h, b2, b1, #44 and h, h, #0x000fffffffffffff mul h, u, h lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #20 adcs s5, s5, t // 8 * 52 = 64 * 6 + 32 extr l, b3, b2, #32 and l, l, #0x000fffffffffffff mul l, u, l lsr t, h, #52 add l, l, t lsl h, h, #12 extr t, l, h, #32 adcs s6, s6, t // 9 * 52 = 64 * 7 + 20 lsr h, b3, #20 mul h, u, h lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #44 adcs s7, s7, t // Top word lsr h, h, #44 adc c, c, h // Rotate [c;s7;...;s0] before storing in the buffer. // We want to add 2^512 * H', which splitting H' at bit 9 is // 2^521 * H_top + 2^512 * H_bot == 2^512 * H_bot + H_top (mod p_521) extr l, s1, s0, #9 extr h, s2, s1, #9 stp l, h, [z] extr l, s3, s2, #9 extr h, s4, s3, #9 stp l, h, [z, #16] extr l, s5, s4, #9 extr h, s6, s5, #9 stp l, h, [z, #32] extr l, s7, s6, #9 extr h, c, s7, #9 stp l, h, [z, #48] and t, s0, #0x1FF lsr c, c, #9 add t, t, c str t, [z, #64] // Square the lower half with an analogous variant of bignum_sqr_4_8 mul s2, a0, a2 mul s7, a1, a3 umulh t, a0, a2 subs u, a0, a1 cneg u, u, cc csetm s1, cc subs s0, a3, a2 cneg s0, s0, cc mul s6, u, s0 umulh s0, u, s0 cinv s1, s1, cc eor s6, s6, s1 eor s0, s0, s1 adds s3, s2, t adc t, t, xzr umulh u, a1, a3 adds s3, s3, s7 adcs t, t, u adc u, u, xzr adds t, t, s7 adc u, u, xzr cmn s1, #0x1 adcs s3, s3, s6 adcs t, t, s0 adc u, u, s1 adds s2, s2, s2 adcs s3, s3, s3 adcs t, t, t adcs u, u, u adc c, xzr, xzr mul s0, a0, a0 mul s6, a1, a1 mul l, a0, a1 umulh s1, a0, a0 umulh s7, a1, a1 umulh h, a0, a1 adds s1, s1, l adcs s6, s6, h adc s7, s7, xzr adds s1, s1, l adcs s6, s6, h adc s7, s7, xzr adds s2, s2, s6 adcs s3, s3, s7 adcs t, t, xzr adcs u, u, xzr adc c, c, xzr mul s4, a2, a2 mul s6, a3, a3 mul l, a2, a3 umulh s5, a2, a2 umulh s7, a3, a3 umulh h, a2, a3 adds s5, s5, l adcs s6, s6, h adc s7, s7, xzr adds s5, s5, l adcs s6, s6, h adc s7, s7, xzr adds s4, s4, t adcs s5, s5, u adcs s6, s6, c adc s7, s7, xzr // Add it directly to the existing buffer ldp l, h, [z] adds l, l, s0 adcs h, h, s1 stp l, h, [z] ldp l, h, [z, #16] adcs l, l, s2 adcs h, h, s3 stp l, h, [z, #16] ldp l, h, [z, #32] adcs l, l, s4 adcs h, h, s5 stp l, h, [z, #32] ldp l, h, [z, #48] adcs l, l, s6 adcs h, h, s7 stp l, h, [z, #48] ldr t, [z, #64] adc t, t, xzr str t, [z, #64] // Now get the cross-product in [s7,...,s0] with variant of bignum_mul_4_8 mul s0, a0, b0 mul s4, a1, b1 mul s5, a2, b2 mul s6, a3, b3 umulh s7, a0, b0 adds s4, s4, s7 umulh s7, a1, b1 adcs s5, s5, s7 umulh s7, a2, b2 adcs s6, s6, s7 umulh s7, a3, b3 adc s7, s7, xzr adds s1, s4, s0 adcs s4, s5, s4 adcs s5, s6, s5 adcs s6, s7, s6 adc s7, xzr, s7 adds s2, s4, s0 adcs s3, s5, s1 adcs s4, s6, s4 adcs s5, s7, s5 adcs s6, xzr, s6 adc s7, xzr, s7 subs t, a2, a3 cneg t, t, cc csetm c, cc subs h, b3, b2 cneg h, h, cc mul l, t, h umulh h, t, h cinv c, c, cc cmn c, #0x1 eor l, l, c adcs s5, s5, l eor h, h, c adcs s6, s6, h adc s7, s7, c subs t, a0, a1 cneg t, t, cc csetm c, cc subs h, b1, b0 cneg h, h, cc mul l, t, h umulh h, t, h cinv c, c, cc cmn c, #0x1 eor l, l, c adcs s1, s1, l eor h, h, c adcs s2, s2, h adcs s3, s3, c adcs s4, s4, c adcs s5, s5, c adcs s6, s6, c adc s7, s7, c subs t, a1, a3 cneg t, t, cc csetm c, cc subs h, b3, b1 cneg h, h, cc mul l, t, h umulh h, t, h cinv c, c, cc cmn c, #0x1 eor l, l, c adcs s4, s4, l eor h, h, c adcs s5, s5, h adcs s6, s6, c adc s7, s7, c subs t, a0, a2 cneg t, t, cc csetm c, cc subs h, b2, b0 cneg h, h, cc mul l, t, h umulh h, t, h cinv c, c, cc cmn c, #0x1 eor l, l, c adcs s2, s2, l eor h, h, c adcs s3, s3, h adcs s4, s4, c adcs s5, s5, c adcs s6, s6, c adc s7, s7, c subs t, a0, a3 cneg t, t, cc csetm c, cc subs h, b3, b0 cneg h, h, cc mul l, t, h umulh h, t, h cinv c, c, cc cmn c, #0x1 eor l, l, c adcs s3, s3, l eor h, h, c adcs s4, s4, h adcs s5, s5, c adcs s6, s6, c adc s7, s7, c subs t, a1, a2 cneg t, t, cc csetm c, cc subs h, b2, b1 cneg h, h, cc mul l, t, h umulh h, t, h cinv c, c, cc cmn c, #0x1 eor l, l, c adcs s3, s3, l eor h, h, c adcs s4, s4, h adcs s5, s5, c adcs s6, s6, c adc s7, s7, c // Let the cross product be M. We want to add 2^256 * 2 * M to the buffer // Split M into M_top (248 bits) and M_bot (264 bits), so we add // 2^521 * M_top + 2^257 * M_bot == 2^257 * M_bot + M_top (mod p_521) // Accumulate the (non-reduced in general) 9-word answer [d8;...;d0] // As this sum is built, accumulate t = AND of words d7...d1 to help // in condensing the carry chain in the comparison that comes next ldp l, h, [z] extr d0, s5, s4, #8 adds d0, d0, l extr d1, s6, s5, #8 adcs d1, d1, h ldp l, h, [z, #16] extr d2, s7, s6, #8 adcs d2, d2, l and t, d1, d2 lsr d3, s7, #8 adcs d3, d3, h and t, t, d3 ldp l, h, [z, #32] lsl d4, s0, #1 adcs d4, d4, l and t, t, d4 extr d5, s1, s0, #63 adcs d5, d5, h and t, t, d5 ldp l, h, [z, #48] extr d6, s2, s1, #63 adcs d6, d6, l and t, t, d6 extr d7, s3, s2, #63 adcs d7, d7, h and t, t, d7 ldr l, [z, #64] extr d8, s4, s3, #63 and d8, d8, #0x1FF adc d8, l, d8 // Extract the high part h and mask off the low part l = [d8;d7;...;d0] // but stuff d8 with 1 bits at the left to ease a comparison below lsr h, d8, #9 orr d8, d8, #~0x1FF // Decide whether h + l >= p_521 <=> h + l + 1 >= 2^521. Since this can only // happen if digits d7,...d1 are all 1s, we use the AND of them "t" to // condense the carry chain, and since we stuffed 1 bits into d8 we get // the result in CF without an additional comparison. subs xzr, xzr, xzr adcs xzr, d0, h adcs xzr, t, xzr adcs xzr, d8, xzr // Now if CF is set we want (h + l) - p_521 = (h + l + 1) - 2^521 // while otherwise we want just h + l. So mask h + l + CF to 521 bits. // This masking also gets rid of the stuffing with 1s we did above. adcs d0, d0, h adcs d1, d1, xzr adcs d2, d2, xzr adcs d3, d3, xzr adcs d4, d4, xzr adcs d5, d5, xzr adcs d6, d6, xzr adcs d7, d7, xzr adc d8, d8, xzr and d8, d8, #0x1FF // Store the final result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] stp d6, d7, [z, #48] str d8, [z, #64] // Restore regs and return ldp x23, x24, [sp], #16 ldp x21, x22, [sp], #16 ldp x19, x20, [sp], #16 ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
18,599
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/unopt/bignum_montmul_p521_base.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^576) mod p_521 // Inputs x[9], y[9]; output z[9] // // extern void bignum_montmul_p521_base // (uint64_t z[static 9], uint64_t x[static 9], uint64_t y[static 9]); // // Does z := (x * y / 2^576) mod p_521, assuming x < p_521, y < p_521. This // means the Montgomery base is the "native size" 2^{9*64} = 2^576; since // p_521 is a Mersenne prime the basic modular multiplication bignum_mul_p521_base // can be considered a Montgomery operation to base 2^521. // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p521_base) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p521_base) .text .balign 4 // --------------------------------------------------------------------------- // Macro computing [c,b,a] := [b,a] + (x - y) * (w - z), adding with carry // to the [b,a] components but leaving CF aligned with the c term, which is // a sign bitmask for (x - y) * (w - z). Continued add-with-carry operations // with [c,...,c] will continue the carry chain correctly starting from // the c position if desired to add to a longer term of the form [...,b,a]. // // c,h,l,t should all be different and t,h should not overlap w,z. // --------------------------------------------------------------------------- #define muldiffnadd(b,a,x,y,w,z) \ subs t, x, y __LF \ cneg t, t, cc __LF \ csetm c, cc __LF \ subs h, w, z __LF \ cneg h, h, cc __LF \ mul l, t, h __LF \ umulh h, t, h __LF \ cinv c, c, cc __LF \ adds xzr, c, #1 __LF \ eor l, l, c __LF \ adcs a, a, l __LF \ eor h, h, c __LF \ adcs b, b, h #define z x0 #define x x1 #define y x2 #define a0 x3 #define a1 x4 #define a2 x5 #define a3 x6 #define b0 x7 #define b1 x8 #define b2 x9 #define b3 x10 #define s0 x11 #define s1 x12 #define s2 x13 #define s3 x14 #define s4 x15 #define s5 x16 #define s6 x17 #define s7 x19 #define s8 x20 #define c x21 #define h x22 #define l x23 #define t x24 #define s x25 #define u x26 // --------------------------------------------------------------------------- // Core 4x4->8 ADK multiplication macro // Does [s7,s6,s5,s4,s3,s2,s1,s0] = [a3,a2,a1,a0] * [b3,b2,b1,b0] // --------------------------------------------------------------------------- #define mul4 \ /* First accumulate all the "simple" products as [s7,s6,s5,s4,s0] */ \ \ mul s0, a0, b0 __LF \ mul s4, a1, b1 __LF \ mul s5, a2, b2 __LF \ mul s6, a3, b3 __LF \ \ umulh s7, a0, b0 __LF \ adds s4, s4, s7 __LF \ umulh s7, a1, b1 __LF \ adcs s5, s5, s7 __LF \ umulh s7, a2, b2 __LF \ adcs s6, s6, s7 __LF \ umulh s7, a3, b3 __LF \ adc s7, s7, xzr __LF \ \ /* Multiply by B + 1 to get [s7;s6;s5;s4;s1;s0] */ \ \ adds s1, s4, s0 __LF \ adcs s4, s5, s4 __LF \ adcs s5, s6, s5 __LF \ adcs s6, s7, s6 __LF \ adc s7, xzr, s7 __LF \ \ /* Multiply by B^2 + 1 to get [s7;s6;s5;s4;s3;s2;s1;s0] */ \ \ adds s2, s4, s0 __LF \ adcs s3, s5, s1 __LF \ adcs s4, s6, s4 __LF \ adcs s5, s7, s5 __LF \ adcs s6, xzr, s6 __LF \ adc s7, xzr, s7 __LF \ \ /* Now add in all the "complicated" terms. */ \ \ muldiffnadd(s6,s5, a2,a3, b3,b2) __LF \ adc s7, s7, c __LF \ \ muldiffnadd(s2,s1, a0,a1, b1,b0) __LF \ adcs s3, s3, c __LF \ adcs s4, s4, c __LF \ adcs s5, s5, c __LF \ adcs s6, s6, c __LF \ adc s7, s7, c __LF \ \ muldiffnadd(s5,s4, a1,a3, b3,b1) __LF \ adcs s6, s6, c __LF \ adc s7, s7, c __LF \ \ muldiffnadd(s3,s2, a0,a2, b2,b0) __LF \ adcs s4, s4, c __LF \ adcs s5, s5, c __LF \ adcs s6, s6, c __LF \ adc s7, s7, c __LF \ \ muldiffnadd(s4,s3, a0,a3, b3,b0) __LF \ adcs s5, s5, c __LF \ adcs s6, s6, c __LF \ adc s7, s7, c __LF \ muldiffnadd(s4,s3, a1,a2, b2,b1) __LF \ adcs s5, s5, c __LF \ adcs s6, s6, c __LF \ adc s7, s7, c \ S2N_BN_SYMBOL(bignum_montmul_p521_base): // Save registers and make space for the temporary buffer stp x19, x20, [sp, #-16]! stp x21, x22, [sp, #-16]! stp x23, x24, [sp, #-16]! stp x25, x26, [sp, #-16]! sub sp, sp, #80 // Load 4-digit low parts and multiply them to get L ldp a0, a1, [x] ldp a2, a3, [x, #16] ldp b0, b1, [y] ldp b2, b3, [y, #16] mul4 // Shift right 256 bits modulo p_521 and stash in temp buffer lsl c, s0, #9 extr s0, s1, s0, #55 extr s1, s2, s1, #55 extr s2, s3, s2, #55 lsr s3, s3, #55 stp s4, s5, [sp] stp s6, s7, [sp, #16] stp c, s0, [sp, #32] stp s1, s2, [sp, #48] str s3, [sp, #64] // Load 4-digit low parts and multiply them to get H ldp a0, a1, [x, #32] ldp a2, a3, [x, #48] ldp b0, b1, [y, #32] ldp b2, b3, [y, #48] mul4 // Add to the existing temporary buffer and re-stash. // This gives a result HL congruent to (2^256 * H + L) / 2^256 modulo p_521 ldp l, h, [sp] adds s0, s0, l adcs s1, s1, h stp s0, s1, [sp] ldp l, h, [sp, #16] adcs s2, s2, l adcs s3, s3, h stp s2, s3, [sp, #16] ldp l, h, [sp, #32] adcs s4, s4, l adcs s5, s5, h stp s4, s5, [sp, #32] ldp l, h, [sp, #48] adcs s6, s6, l adcs s7, s7, h stp s6, s7, [sp, #48] ldr c, [sp, #64] adc c, c, xzr str c, [sp, #64] // Compute t,[a3,a2,a1,a0] = x_hi - x_lo // and s,[b3,b2,b1,b0] = y_lo - y_hi // sign-magnitude differences, then XOR overall sign bitmask into s ldp l, h, [x] subs a0, a0, l sbcs a1, a1, h ldp l, h, [x, #16] sbcs a2, a2, l sbcs a3, a3, h csetm t, cc ldp l, h, [y] subs b0, l, b0 sbcs b1, h, b1 ldp l, h, [y, #16] sbcs b2, l, b2 sbcs b3, h, b3 csetm s, cc eor a0, a0, t subs a0, a0, t eor a1, a1, t sbcs a1, a1, t eor a2, a2, t sbcs a2, a2, t eor a3, a3, t sbc a3, a3, t eor b0, b0, s subs b0, b0, s eor b1, b1, s sbcs b1, b1, s eor b2, b2, s sbcs b2, b2, s eor b3, b3, s sbc b3, b3, s eor s, s, t // Now do yet a third 4x4 multiply to get mid-term product M mul4 // We now want, at the 256 position, 2^256 * HL + HL + (-1)^s * M // To keep things positive we use M' = p_521 - M in place of -M, // and this notion of negation just amounts to complementation in 521 bits. // Fold in the re-addition of the appropriately scaled lowest 4 words // The initial result is [s8; b3;b2;b1;b0; s7;s6;s5;s4;s3;s2;s1;s0] // Rebase it as a 9-word value at the 512 bit position using // [s8; b3;b2;b1;b0; s7;s6;s5;s4;s3;s2;s1;s0] == // [s8; b3;b2;b1;b0; s7;s6;s5;s4] + 2^265 * [s3;s2;s1;s0] = // ([s8; b3;b2;b1;b0] + 2^9 * [s3;s2;s1;s0]); s7;s6;s5;s4] // // Accumulate as [s8; b3;b2;b1;b0; s7;s6;s5;s4] but leave out an additional // small c (s8 + suspended carry) to add at the 256 position here (512 // overall). This can be added in the next block (to b0 = sum4). ldp a0, a1, [sp] ldp a2, a3, [sp, #16] eor s0, s0, s adds s0, s0, a0 eor s1, s1, s adcs s1, s1, a1 eor s2, s2, s adcs s2, s2, a2 eor s3, s3, s adcs s3, s3, a3 eor s4, s4, s ldp b0, b1, [sp, #32] ldp b2, b3, [sp, #48] ldr s8, [sp, #64] adcs s4, s4, b0 eor s5, s5, s adcs s5, s5, b1 eor s6, s6, s adcs s6, s6, b2 eor s7, s7, s adcs s7, s7, b3 adc c, s8, xzr adds s4, s4, a0 adcs s5, s5, a1 adcs s6, s6, a2 adcs s7, s7, a3 and s, s, #0x1FF lsl t, s0, #9 orr t, t, s adcs b0, b0, t extr t, s1, s0, #55 adcs b1, b1, t extr t, s2, s1, #55 adcs b2, b2, t extr t, s3, s2, #55 adcs b3, b3, t lsr t, s3, #55 adc s8, t, s8 // Augment the total with the contribution from the top little words // w and v. If we write the inputs as 2^512 * w + x and 2^512 * v + y // then we are otherwise just doing x * y so we actually need to add // 2^512 * (2^512 * w * v + w * y + v * x). We do this is an involved // way chopping x and y into 52-bit chunks so we can do most of the core // arithmetic using only basic muls, no umulh (since w, v are only 9 bits). // This does however involve some intricate bit-splicing plus arithmetic. // To make things marginally less confusing we introduce some new names // at the human level: x = [c7;...;c0] and y = [d7;...d0], which are // not all distinct, and [sum8;sum7;...;sum0] for the running sum. // Also accumulate u = sum1 AND ... AND sum7 for the later comparison #define sum0 s4 #define sum1 s5 #define sum2 s6 #define sum3 s7 #define sum4 b0 #define sum5 b1 #define sum6 b2 #define sum7 b3 #define sum8 s8 #define c0 a0 #define c1 a1 #define c2 a2 #define c3 a0 #define c4 a1 #define c5 a2 #define c6 a0 #define c7 a1 #define d0 s0 #define d1 s1 #define d2 s2 #define d3 s0 #define d4 s1 #define d5 s2 #define d6 s0 #define d7 s1 #define v a3 #define w s3 // 0 * 52 = 64 * 0 + 0 ldr v, [y, #64] ldp c0, c1, [x] and l, c0, #0x000fffffffffffff mul l, v, l ldr w, [x, #64] ldp d0, d1, [y] and t, d0, #0x000fffffffffffff mul t, w, t add l, l, t // 1 * 52 = 64 * 0 + 52 extr t, c1, c0, #52 and t, t, #0x000fffffffffffff mul h, v, t extr t, d1, d0, #52 and t, t, #0x000fffffffffffff mul t, w, t add h, h, t lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #12 adds sum0, sum0, t // 2 * 52 = 64 * 1 + 40 ldp c2, c3, [x, #16] ldp d2, d3, [y, #16] extr t, c2, c1, #40 and t, t, #0x000fffffffffffff mul l, v, t extr t, d2, d1, #40 and t, t, #0x000fffffffffffff mul t, w, t add l, l, t lsr t, h, #52 add l, l, t lsl h, h, #12 extr t, l, h, #24 adcs sum1, sum1, t // 3 * 52 = 64 * 2 + 28 extr t, c3, c2, #28 and t, t, #0x000fffffffffffff mul h, v, t extr t, d3, d2, #28 and t, t, #0x000fffffffffffff mul t, w, t add h, h, t lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #36 adcs sum2, sum2, t and u, sum1, sum2 // 4 * 52 = 64 * 3 + 16 // At this point we also fold in the addition of c at the right place. // Note that 4 * 64 = 4 * 52 + 48 so we shift c left 48 places to align. ldp c4, c5, [x, #32] ldp d4, d5, [y, #32] extr t, c4, c3, #16 and t, t, #0x000fffffffffffff mul l, v, t extr t, d4, d3, #16 and t, t, #0x000fffffffffffff mul t, w, t add l, l, t lsl c, c, #48 add l, l, c lsr t, h, #52 add l, l, t lsl h, h, #12 extr t, l, h, #48 adcs sum3, sum3, t and u, u, sum3 // 5 * 52 = 64 * 4 + 4 lsr t, c4, #4 and t, t, #0x000fffffffffffff mul h, v, t lsr t, d4, #4 and t, t, #0x000fffffffffffff mul t, w, t add h, h, t lsr t, l, #52 add h, h, t lsl l, l, #12 extr s, h, l, #60 // 6 * 52 = 64 * 4 + 56 extr t, c5, c4, #56 and t, t, #0x000fffffffffffff mul l, v, t extr t, d5, d4, #56 and t, t, #0x000fffffffffffff mul t, w, t add l, l, t lsr t, h, #52 add l, l, t lsl s, s, #8 extr t, l, s, #8 adcs sum4, sum4, t and u, u, sum4 // 7 * 52 = 64 * 5 + 44 ldp c6, c7, [x, #48] ldp d6, d7, [y, #48] extr t, c6, c5, #44 and t, t, #0x000fffffffffffff mul h, v, t extr t, d6, d5, #44 and t, t, #0x000fffffffffffff mul t, w, t add h, h, t lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #20 adcs sum5, sum5, t and u, u, sum5 // 8 * 52 = 64 * 6 + 32 extr t, c7, c6, #32 and t, t, #0x000fffffffffffff mul l, v, t extr t, d7, d6, #32 and t, t, #0x000fffffffffffff mul t, w, t add l, l, t lsr t, h, #52 add l, l, t lsl h, h, #12 extr t, l, h, #32 adcs sum6, sum6, t and u, u, sum6 // 9 * 52 = 64 * 7 + 20 lsr t, c7, #20 mul h, v, t lsr t, d7, #20 mul t, w, t add h, h, t lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #44 adcs sum7, sum7, t and u, u, sum7 // Top word mul t, v, w lsr h, h, #44 add t, t, h adc sum8, sum8, t // Extract the high part h and mask off the low part l = [sum8;sum7;...;sum0] // but stuff sum8 with 1 bits at the left to ease a comparison below lsr h, sum8, #9 orr sum8, sum8, #~0x1FF // Decide whether h + l >= p_521 <=> h + l + 1 >= 2^521. Since this can only // happen if digits sum7,...sum1 are all 1s, we use the AND of them "u" to // condense the carry chain, and since we stuffed 1 bits into sum8 we get // the result in CF without an additional comparison. subs xzr, xzr, xzr adcs xzr, sum0, h adcs xzr, u, xzr adcs xzr, sum8, xzr // Now if CF is set we want (h + l) - p_521 = (h + l + 1) - 2^521 // while otherwise we want just h + l. So mask h + l + CF to 521 bits. // The masking is combined with the writeback in the next block. adcs sum0, sum0, h adcs sum1, sum1, xzr adcs sum2, sum2, xzr adcs sum3, sum3, xzr adcs sum4, sum4, xzr adcs sum5, sum5, xzr adcs sum6, sum6, xzr adcs sum7, sum7, xzr adc sum8, sum8, xzr // The result is actually [sum8;...;sum0] == product / 2^512, since we are // in the 512 position. For Montgomery we want product / 2^576, so write // back [sum8;...;sum0] rotated right by 64 bits, as a 521-bit quantity. stp sum1, sum2, [z] stp sum3, sum4, [z, #16] stp sum5, sum6, [z, #32] lsl h, sum0, #9 and sum8, sum8, #0x1FF orr sum8, sum8, h stp sum7, sum8, [z, #48] lsr sum0, sum0, #55 str sum0, [z, #64] // Restore regs and return add sp, sp, #80 ldp x25, x26, [sp], #16 ldp x23, x24, [sp], #16 ldp x21, x22, [sp], #16 ldp x19, x20, [sp], #16 ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
16,192
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/unopt/bignum_montsqr_p521_base.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^576) mod p_521 // Input x[9]; output z[9] // // extern void bignum_montsqr_p521_base // (uint64_t z[static 9], uint64_t x[static 9]); // // Does z := (x^2 / 2^576) mod p_521, assuming x < p_521. This means the // Montgomery base is the "native size" 2^{9*64} = 2^576; since p_521 is // a Mersenne prime the basic modular squaring bignum_sqr_p521_base can be // considered a Montgomery operation to base 2^521. // // Standard ARM ABI: X0 = z, X1 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p521_base) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p521_base) .text .balign 4 #define z x0 #define x x1 #define a0 x2 #define a1 x3 #define a2 x4 #define a3 x5 #define b0 x6 #define b1 x7 #define b2 x8 #define b3 x9 #define s0 x10 #define s1 x11 #define s2 x12 #define s3 x13 #define s4 x14 #define s5 x15 #define s6 x16 #define s7 x17 #define c x19 #define h x20 #define l x21 #define t x22 #define u x23 #define v x24 // Aliased to earlier ones we no longer need #define d0 x2 #define d1 x3 #define d2 x4 #define d3 x5 #define d4 x6 #define d5 x7 #define d6 x8 #define d7 x9 #define d8 x10 S2N_BN_SYMBOL(bignum_montsqr_p521_base): // Save registers stp x19, x20, [sp, #-16]! stp x21, x22, [sp, #-16]! stp x23, x24, [sp, #-16]! // Load all the inputs first ldp a0, a1, [x] ldp a2, a3, [x, #16] ldp b0, b1, [x, #32] ldp b2, b3, [x, #48] // Square the upper half with a register-renamed variant of bignum_sqr_4_8 mul s2, b0, b2 mul s7, b1, b3 umulh t, b0, b2 subs u, b0, b1 cneg u, u, cc csetm s1, cc subs s0, b3, b2 cneg s0, s0, cc mul s6, u, s0 umulh s0, u, s0 cinv s1, s1, cc eor s6, s6, s1 eor s0, s0, s1 adds s3, s2, t adc t, t, xzr umulh u, b1, b3 adds s3, s3, s7 adcs t, t, u adc u, u, xzr adds t, t, s7 adc u, u, xzr cmn s1, #0x1 adcs s3, s3, s6 adcs t, t, s0 adc u, u, s1 adds s2, s2, s2 adcs s3, s3, s3 adcs t, t, t adcs u, u, u adc c, xzr, xzr mul s0, b0, b0 mul s6, b1, b1 mul l, b0, b1 umulh s1, b0, b0 umulh s7, b1, b1 umulh h, b0, b1 adds s1, s1, l adcs s6, s6, h adc s7, s7, xzr adds s1, s1, l adcs s6, s6, h adc s7, s7, xzr adds s2, s2, s6 adcs s3, s3, s7 adcs t, t, xzr adcs u, u, xzr adc c, c, xzr mul s4, b2, b2 mul s6, b3, b3 mul l, b2, b3 umulh s5, b2, b2 umulh s7, b3, b3 umulh h, b2, b3 adds s5, s5, l adcs s6, s6, h adc s7, s7, xzr adds s5, s5, l adcs s6, s6, h adc s7, s7, xzr adds s4, s4, t adcs s5, s5, u adcs s6, s6, c adc s7, s7, xzr // Augment the high part with the contribution from the top little word C. // If we write the input as 2^512 * C + x then we are otherwise just doing // x^2, so we need to add to the high part 2^512 * C^2 + (2 * C) * x. // Accumulate it as [c;s7;...;s0] = H'. Since 2 * C is only 10 bits long // we multiply 52-bit chunks of the x digits by 2 * C and solve the overlap // with non-overflowing addition to get 52-bit chunks of the result with // similar alignment. Then we stitch these back together and add them into // the running total. This is quite a bit of palaver, but it avoids using // the standard 2-part multiplications involving umulh, and on target // microarchitectures seems to improve performance by about 5%. We could // equally well use 53 or 54 since they are still <= 64 - 10, but below // 52 we would end up using more multiplications. ldr c, [x, #64] add u, c, c mul c, c, c // 0 * 52 = 64 * 0 + 0 and l, a0, #0x000fffffffffffff mul l, u, l // 1 * 52 = 64 * 0 + 52 extr h, a1, a0, #52 and h, h, #0x000fffffffffffff mul h, u, h lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #12 adds s0, s0, t // 2 * 52 = 64 * 1 + 40 extr l, a2, a1, #40 and l, l, #0x000fffffffffffff mul l, u, l lsr t, h, #52 add l, l, t lsl h, h, #12 extr t, l, h, #24 adcs s1, s1, t // 3 * 52 = 64 * 2 + 28 extr h, a3, a2, #28 and h, h, #0x000fffffffffffff mul h, u, h lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #36 adcs s2, s2, t // 4 * 52 = 64 * 3 + 16 extr l, b0, a3, #16 and l, l, #0x000fffffffffffff mul l, u, l lsr t, h, #52 add l, l, t lsl h, h, #12 extr t, l, h, #48 adcs s3, s3, t // 5 * 52 = 64 * 4 + 4 lsr h, b0, #4 and h, h, #0x000fffffffffffff mul h, u, h lsr t, l, #52 add h, h, t lsl l, l, #12 extr v, h, l, #60 // 6 * 52 = 64 * 4 + 56 extr l, b1, b0, #56 and l, l, #0x000fffffffffffff mul l, u, l lsr t, h, #52 add l, l, t lsl v, v, #8 extr t, l, v, #8 adcs s4, s4, t // 7 * 52 = 64 * 5 + 44 extr h, b2, b1, #44 and h, h, #0x000fffffffffffff mul h, u, h lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #20 adcs s5, s5, t // 8 * 52 = 64 * 6 + 32 extr l, b3, b2, #32 and l, l, #0x000fffffffffffff mul l, u, l lsr t, h, #52 add l, l, t lsl h, h, #12 extr t, l, h, #32 adcs s6, s6, t // 9 * 52 = 64 * 7 + 20 lsr h, b3, #20 mul h, u, h lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #44 adcs s7, s7, t // Top word lsr h, h, #44 adc c, c, h // Rotate [c;s7;...;s0] before storing in the buffer. // We want to add 2^512 * H', which splitting H' at bit 9 is // 2^521 * H_top + 2^512 * H_bot == 2^512 * H_bot + H_top (mod p_521) extr l, s1, s0, #9 extr h, s2, s1, #9 stp l, h, [z] extr l, s3, s2, #9 extr h, s4, s3, #9 stp l, h, [z, #16] extr l, s5, s4, #9 extr h, s6, s5, #9 stp l, h, [z, #32] extr l, s7, s6, #9 extr h, c, s7, #9 stp l, h, [z, #48] and t, s0, #0x1FF lsr c, c, #9 add t, t, c str t, [z, #64] // Square the lower half with an analogous variant of bignum_sqr_4_8 mul s2, a0, a2 mul s7, a1, a3 umulh t, a0, a2 subs u, a0, a1 cneg u, u, cc csetm s1, cc subs s0, a3, a2 cneg s0, s0, cc mul s6, u, s0 umulh s0, u, s0 cinv s1, s1, cc eor s6, s6, s1 eor s0, s0, s1 adds s3, s2, t adc t, t, xzr umulh u, a1, a3 adds s3, s3, s7 adcs t, t, u adc u, u, xzr adds t, t, s7 adc u, u, xzr cmn s1, #0x1 adcs s3, s3, s6 adcs t, t, s0 adc u, u, s1 adds s2, s2, s2 adcs s3, s3, s3 adcs t, t, t adcs u, u, u adc c, xzr, xzr mul s0, a0, a0 mul s6, a1, a1 mul l, a0, a1 umulh s1, a0, a0 umulh s7, a1, a1 umulh h, a0, a1 adds s1, s1, l adcs s6, s6, h adc s7, s7, xzr adds s1, s1, l adcs s6, s6, h adc s7, s7, xzr adds s2, s2, s6 adcs s3, s3, s7 adcs t, t, xzr adcs u, u, xzr adc c, c, xzr mul s4, a2, a2 mul s6, a3, a3 mul l, a2, a3 umulh s5, a2, a2 umulh s7, a3, a3 umulh h, a2, a3 adds s5, s5, l adcs s6, s6, h adc s7, s7, xzr adds s5, s5, l adcs s6, s6, h adc s7, s7, xzr adds s4, s4, t adcs s5, s5, u adcs s6, s6, c adc s7, s7, xzr // Add it directly to the existing buffer ldp l, h, [z] adds l, l, s0 adcs h, h, s1 stp l, h, [z] ldp l, h, [z, #16] adcs l, l, s2 adcs h, h, s3 stp l, h, [z, #16] ldp l, h, [z, #32] adcs l, l, s4 adcs h, h, s5 stp l, h, [z, #32] ldp l, h, [z, #48] adcs l, l, s6 adcs h, h, s7 stp l, h, [z, #48] ldr t, [z, #64] adc t, t, xzr str t, [z, #64] // Now get the cross-product in [s7,...,s0] with variant of bignum_mul_4_8 mul s0, a0, b0 mul s4, a1, b1 mul s5, a2, b2 mul s6, a3, b3 umulh s7, a0, b0 adds s4, s4, s7 umulh s7, a1, b1 adcs s5, s5, s7 umulh s7, a2, b2 adcs s6, s6, s7 umulh s7, a3, b3 adc s7, s7, xzr adds s1, s4, s0 adcs s4, s5, s4 adcs s5, s6, s5 adcs s6, s7, s6 adc s7, xzr, s7 adds s2, s4, s0 adcs s3, s5, s1 adcs s4, s6, s4 adcs s5, s7, s5 adcs s6, xzr, s6 adc s7, xzr, s7 subs t, a2, a3 cneg t, t, cc csetm c, cc subs h, b3, b2 cneg h, h, cc mul l, t, h umulh h, t, h cinv c, c, cc cmn c, #0x1 eor l, l, c adcs s5, s5, l eor h, h, c adcs s6, s6, h adc s7, s7, c subs t, a0, a1 cneg t, t, cc csetm c, cc subs h, b1, b0 cneg h, h, cc mul l, t, h umulh h, t, h cinv c, c, cc cmn c, #0x1 eor l, l, c adcs s1, s1, l eor h, h, c adcs s2, s2, h adcs s3, s3, c adcs s4, s4, c adcs s5, s5, c adcs s6, s6, c adc s7, s7, c subs t, a1, a3 cneg t, t, cc csetm c, cc subs h, b3, b1 cneg h, h, cc mul l, t, h umulh h, t, h cinv c, c, cc cmn c, #0x1 eor l, l, c adcs s4, s4, l eor h, h, c adcs s5, s5, h adcs s6, s6, c adc s7, s7, c subs t, a0, a2 cneg t, t, cc csetm c, cc subs h, b2, b0 cneg h, h, cc mul l, t, h umulh h, t, h cinv c, c, cc cmn c, #0x1 eor l, l, c adcs s2, s2, l eor h, h, c adcs s3, s3, h adcs s4, s4, c adcs s5, s5, c adcs s6, s6, c adc s7, s7, c subs t, a0, a3 cneg t, t, cc csetm c, cc subs h, b3, b0 cneg h, h, cc mul l, t, h umulh h, t, h cinv c, c, cc cmn c, #0x1 eor l, l, c adcs s3, s3, l eor h, h, c adcs s4, s4, h adcs s5, s5, c adcs s6, s6, c adc s7, s7, c subs t, a1, a2 cneg t, t, cc csetm c, cc subs h, b2, b1 cneg h, h, cc mul l, t, h umulh h, t, h cinv c, c, cc cmn c, #0x1 eor l, l, c adcs s3, s3, l eor h, h, c adcs s4, s4, h adcs s5, s5, c adcs s6, s6, c adc s7, s7, c // Let the cross product be M. We want to add 2^256 * 2 * M to the buffer // Split M into M_top (248 bits) and M_bot (264 bits), so we add // 2^521 * M_top + 2^257 * M_bot == 2^257 * M_bot + M_top (mod p_521) // Accumulate the (non-reduced in general) 9-word answer [d8;...;d0] // As this sum is built, accumulate t = AND of words d7...d1 to help // in condensing the carry chain in the comparison that comes next ldp l, h, [z] extr d0, s5, s4, #8 adds d0, d0, l extr d1, s6, s5, #8 adcs d1, d1, h ldp l, h, [z, #16] extr d2, s7, s6, #8 adcs d2, d2, l and t, d1, d2 lsr d3, s7, #8 adcs d3, d3, h and t, t, d3 ldp l, h, [z, #32] lsl d4, s0, #1 adcs d4, d4, l and t, t, d4 extr d5, s1, s0, #63 adcs d5, d5, h and t, t, d5 ldp l, h, [z, #48] extr d6, s2, s1, #63 adcs d6, d6, l and t, t, d6 extr d7, s3, s2, #63 adcs d7, d7, h and t, t, d7 ldr l, [z, #64] extr d8, s4, s3, #63 and d8, d8, #0x1FF adc d8, l, d8 // Extract the high part h and mask off the low part l = [d8;d7;...;d0] // but stuff d8 with 1 bits at the left to ease a comparison below lsr h, d8, #9 orr d8, d8, #~0x1FF // Decide whether h + l >= p_521 <=> h + l + 1 >= 2^521. Since this can only // happen if digits d7,...d1 are all 1s, we use the AND of them "t" to // condense the carry chain, and since we stuffed 1 bits into d8 we get // the result in CF without an additional comparison. subs xzr, xzr, xzr adcs xzr, d0, h adcs xzr, t, xzr adcs xzr, d8, xzr // Now if CF is set we want (h + l) - p_521 = (h + l + 1) - 2^521 // while otherwise we want just h + l. So mask h + l + CF to 521 bits. // This masking also gets rid of the stuffing with 1s we did above. adcs d0, d0, h adcs d1, d1, xzr adcs d2, d2, xzr adcs d3, d3, xzr adcs d4, d4, xzr adcs d5, d5, xzr adcs d6, d6, xzr adcs d7, d7, xzr adc d8, d8, xzr and d8, d8, #0x1FF // So far, this has been the same as a pure modular squaring. // Now finally the Montgomery ingredient, which is just a 521-bit // rotation by 9*64 - 521 = 55 bits right. lsl c, d0, #9 extr d0, d1, d0, #55 extr d1, d2, d1, #55 extr d2, d3, d2, #55 extr d3, d4, d3, #55 orr d8, d8, c extr d4, d5, d4, #55 extr d5, d6, d5, #55 extr d6, d7, d6, #55 extr d7, d8, d7, #55 lsr d8, d8, #55 // Store the final result stp d0, d1, [z] stp d2, d3, [z, #16] stp d4, d5, [z, #32] stp d6, d7, [z, #48] str d8, [z, #64] // Restore regs and return ldp x23, x24, [sp], #16 ldp x21, x22, [sp], #16 ldp x19, x20, [sp], #16 ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
18,403
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/third_party/s2n-bignum/s2n-bignum-imported/arm/p521/unopt/bignum_mul_p521_base.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply modulo p_521, z := (x * y) mod p_521, assuming x and y reduced // Inputs x[9], y[9]; output z[9] // // extern void bignum_mul_p521_base // (uint64_t z[static 9], uint64_t x[static 9], uint64_t y[static 9]); // // Standard ARM ABI: X0 = z, X1 = x, X2 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum_arm.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mul_p521_base) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mul_p521_base) .text .balign 4 // --------------------------------------------------------------------------- // Macro computing [c,b,a] := [b,a] + (x - y) * (w - z), adding with carry // to the [b,a] components but leaving CF aligned with the c term, which is // a sign bitmask for (x - y) * (w - z). Continued add-with-carry operations // with [c,...,c] will continue the carry chain correctly starting from // the c position if desired to add to a longer term of the form [...,b,a]. // // c,h,l,t should all be different and t,h should not overlap w,z. // --------------------------------------------------------------------------- #define muldiffnadd(b,a,x,y,w,z) \ subs t, x, y __LF \ cneg t, t, cc __LF \ csetm c, cc __LF \ subs h, w, z __LF \ cneg h, h, cc __LF \ mul l, t, h __LF \ umulh h, t, h __LF \ cinv c, c, cc __LF \ adds xzr, c, #1 __LF \ eor l, l, c __LF \ adcs a, a, l __LF \ eor h, h, c __LF \ adcs b, b, h #define z x0 #define x x1 #define y x2 #define a0 x3 #define a1 x4 #define a2 x5 #define a3 x6 #define b0 x7 #define b1 x8 #define b2 x9 #define b3 x10 #define s0 x11 #define s1 x12 #define s2 x13 #define s3 x14 #define s4 x15 #define s5 x16 #define s6 x17 #define s7 x19 #define s8 x20 #define c x21 #define h x22 #define l x23 #define t x24 #define s x25 #define u x26 // --------------------------------------------------------------------------- // Core 4x4->8 ADK multiplication macro // Does [s7,s6,s5,s4,s3,s2,s1,s0] = [a3,a2,a1,a0] * [b3,b2,b1,b0] // --------------------------------------------------------------------------- #define mul4 \ /* First accumulate all the "simple" products as [s7,s6,s5,s4,s0] */ \ \ mul s0, a0, b0 __LF \ mul s4, a1, b1 __LF \ mul s5, a2, b2 __LF \ mul s6, a3, b3 __LF \ \ umulh s7, a0, b0 __LF \ adds s4, s4, s7 __LF \ umulh s7, a1, b1 __LF \ adcs s5, s5, s7 __LF \ umulh s7, a2, b2 __LF \ adcs s6, s6, s7 __LF \ umulh s7, a3, b3 __LF \ adc s7, s7, xzr __LF \ \ /* Multiply by B + 1 to get [s7;s6;s5;s4;s1;s0] */ \ \ adds s1, s4, s0 __LF \ adcs s4, s5, s4 __LF \ adcs s5, s6, s5 __LF \ adcs s6, s7, s6 __LF \ adc s7, xzr, s7 __LF \ \ /* Multiply by B^2 + 1 to get [s7;s6;s5;s4;s3;s2;s1;s0] */ \ \ adds s2, s4, s0 __LF \ adcs s3, s5, s1 __LF \ adcs s4, s6, s4 __LF \ adcs s5, s7, s5 __LF \ adcs s6, xzr, s6 __LF \ adc s7, xzr, s7 __LF \ \ /* Now add in all the "complicated" terms. */ \ \ muldiffnadd(s6,s5, a2,a3, b3,b2) __LF \ adc s7, s7, c __LF \ \ muldiffnadd(s2,s1, a0,a1, b1,b0) __LF \ adcs s3, s3, c __LF \ adcs s4, s4, c __LF \ adcs s5, s5, c __LF \ adcs s6, s6, c __LF \ adc s7, s7, c __LF \ \ muldiffnadd(s5,s4, a1,a3, b3,b1) __LF \ adcs s6, s6, c __LF \ adc s7, s7, c __LF \ \ muldiffnadd(s3,s2, a0,a2, b2,b0) __LF \ adcs s4, s4, c __LF \ adcs s5, s5, c __LF \ adcs s6, s6, c __LF \ adc s7, s7, c __LF \ \ muldiffnadd(s4,s3, a0,a3, b3,b0) __LF \ adcs s5, s5, c __LF \ adcs s6, s6, c __LF \ adc s7, s7, c __LF \ muldiffnadd(s4,s3, a1,a2, b2,b1) __LF \ adcs s5, s5, c __LF \ adcs s6, s6, c __LF \ adc s7, s7, c \ S2N_BN_SYMBOL(bignum_mul_p521_base): // Save registers and make space for the temporary buffer stp x19, x20, [sp, #-16]! stp x21, x22, [sp, #-16]! stp x23, x24, [sp, #-16]! stp x25, x26, [sp, #-16]! sub sp, sp, #80 // Load 4-digit low parts and multiply them to get L ldp a0, a1, [x] ldp a2, a3, [x, #16] ldp b0, b1, [y] ldp b2, b3, [y, #16] mul4 // Shift right 256 bits modulo p_521 and stash in temp buffer lsl c, s0, #9 extr s0, s1, s0, #55 extr s1, s2, s1, #55 extr s2, s3, s2, #55 lsr s3, s3, #55 stp s4, s5, [sp] stp s6, s7, [sp, #16] stp c, s0, [sp, #32] stp s1, s2, [sp, #48] str s3, [sp, #64] // Load 4-digit low parts and multiply them to get H ldp a0, a1, [x, #32] ldp a2, a3, [x, #48] ldp b0, b1, [y, #32] ldp b2, b3, [y, #48] mul4 // Add to the existing temporary buffer and re-stash. // This gives a result HL congruent to (2^256 * H + L) / 2^256 modulo p_521 ldp l, h, [sp] adds s0, s0, l adcs s1, s1, h stp s0, s1, [sp] ldp l, h, [sp, #16] adcs s2, s2, l adcs s3, s3, h stp s2, s3, [sp, #16] ldp l, h, [sp, #32] adcs s4, s4, l adcs s5, s5, h stp s4, s5, [sp, #32] ldp l, h, [sp, #48] adcs s6, s6, l adcs s7, s7, h stp s6, s7, [sp, #48] ldr c, [sp, #64] adc c, c, xzr str c, [sp, #64] // Compute t,[a3,a2,a1,a0] = x_hi - x_lo // and s,[b3,b2,b1,b0] = y_lo - y_hi // sign-magnitude differences, then XOR overall sign bitmask into s ldp l, h, [x] subs a0, a0, l sbcs a1, a1, h ldp l, h, [x, #16] sbcs a2, a2, l sbcs a3, a3, h csetm t, cc ldp l, h, [y] subs b0, l, b0 sbcs b1, h, b1 ldp l, h, [y, #16] sbcs b2, l, b2 sbcs b3, h, b3 csetm s, cc eor a0, a0, t subs a0, a0, t eor a1, a1, t sbcs a1, a1, t eor a2, a2, t sbcs a2, a2, t eor a3, a3, t sbc a3, a3, t eor b0, b0, s subs b0, b0, s eor b1, b1, s sbcs b1, b1, s eor b2, b2, s sbcs b2, b2, s eor b3, b3, s sbc b3, b3, s eor s, s, t // Now do yet a third 4x4 multiply to get mid-term product M mul4 // We now want, at the 256 position, 2^256 * HL + HL + (-1)^s * M // To keep things positive we use M' = p_521 - M in place of -M, // and this notion of negation just amounts to complementation in 521 bits. // Fold in the re-addition of the appropriately scaled lowest 4 words // The initial result is [s8; b3;b2;b1;b0; s7;s6;s5;s4;s3;s2;s1;s0] // Rebase it as a 9-word value at the 512 bit position using // [s8; b3;b2;b1;b0; s7;s6;s5;s4;s3;s2;s1;s0] == // [s8; b3;b2;b1;b0; s7;s6;s5;s4] + 2^265 * [s3;s2;s1;s0] = // ([s8; b3;b2;b1;b0] + 2^9 * [s3;s2;s1;s0]); s7;s6;s5;s4] // // Accumulate as [s8; b3;b2;b1;b0; s7;s6;s5;s4] but leave out an additional // small c (s8 + suspended carry) to add at the 256 position here (512 // overall). This can be added in the next block (to b0 = sum4). ldp a0, a1, [sp] ldp a2, a3, [sp, #16] eor s0, s0, s adds s0, s0, a0 eor s1, s1, s adcs s1, s1, a1 eor s2, s2, s adcs s2, s2, a2 eor s3, s3, s adcs s3, s3, a3 eor s4, s4, s ldp b0, b1, [sp, #32] ldp b2, b3, [sp, #48] ldr s8, [sp, #64] adcs s4, s4, b0 eor s5, s5, s adcs s5, s5, b1 eor s6, s6, s adcs s6, s6, b2 eor s7, s7, s adcs s7, s7, b3 adc c, s8, xzr adds s4, s4, a0 adcs s5, s5, a1 adcs s6, s6, a2 adcs s7, s7, a3 and s, s, #0x1FF lsl t, s0, #9 orr t, t, s adcs b0, b0, t extr t, s1, s0, #55 adcs b1, b1, t extr t, s2, s1, #55 adcs b2, b2, t extr t, s3, s2, #55 adcs b3, b3, t lsr t, s3, #55 adc s8, t, s8 // Augment the total with the contribution from the top little words // w and v. If we write the inputs as 2^512 * w + x and 2^512 * v + y // then we are otherwise just doing x * y so we actually need to add // 2^512 * (2^512 * w * v + w * y + v * x). We do this is an involved // way chopping x and y into 52-bit chunks so we can do most of the core // arithmetic using only basic muls, no umulh (since w, v are only 9 bits). // This does however involve some intricate bit-splicing plus arithmetic. // To make things marginally less confusing we introduce some new names // at the human level: x = [c7;...;c0] and y = [d7;...d0], which are // not all distinct, and [sum8;sum7;...;sum0] for the running sum. // Also accumulate u = sum1 AND ... AND sum7 for the later comparison #define sum0 s4 #define sum1 s5 #define sum2 s6 #define sum3 s7 #define sum4 b0 #define sum5 b1 #define sum6 b2 #define sum7 b3 #define sum8 s8 #define c0 a0 #define c1 a1 #define c2 a2 #define c3 a0 #define c4 a1 #define c5 a2 #define c6 a0 #define c7 a1 #define d0 s0 #define d1 s1 #define d2 s2 #define d3 s0 #define d4 s1 #define d5 s2 #define d6 s0 #define d7 s1 #define v a3 #define w s3 // 0 * 52 = 64 * 0 + 0 ldr v, [y, #64] ldp c0, c1, [x] and l, c0, #0x000fffffffffffff mul l, v, l ldr w, [x, #64] ldp d0, d1, [y] and t, d0, #0x000fffffffffffff mul t, w, t add l, l, t // 1 * 52 = 64 * 0 + 52 extr t, c1, c0, #52 and t, t, #0x000fffffffffffff mul h, v, t extr t, d1, d0, #52 and t, t, #0x000fffffffffffff mul t, w, t add h, h, t lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #12 adds sum0, sum0, t // 2 * 52 = 64 * 1 + 40 ldp c2, c3, [x, #16] ldp d2, d3, [y, #16] extr t, c2, c1, #40 and t, t, #0x000fffffffffffff mul l, v, t extr t, d2, d1, #40 and t, t, #0x000fffffffffffff mul t, w, t add l, l, t lsr t, h, #52 add l, l, t lsl h, h, #12 extr t, l, h, #24 adcs sum1, sum1, t // 3 * 52 = 64 * 2 + 28 extr t, c3, c2, #28 and t, t, #0x000fffffffffffff mul h, v, t extr t, d3, d2, #28 and t, t, #0x000fffffffffffff mul t, w, t add h, h, t lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #36 adcs sum2, sum2, t and u, sum1, sum2 // 4 * 52 = 64 * 3 + 16 // At this point we also fold in the addition of c at the right place. // Note that 4 * 64 = 4 * 52 + 48 so we shift c left 48 places to align. ldp c4, c5, [x, #32] ldp d4, d5, [y, #32] extr t, c4, c3, #16 and t, t, #0x000fffffffffffff mul l, v, t extr t, d4, d3, #16 and t, t, #0x000fffffffffffff mul t, w, t add l, l, t lsl c, c, #48 add l, l, c lsr t, h, #52 add l, l, t lsl h, h, #12 extr t, l, h, #48 adcs sum3, sum3, t and u, u, sum3 // 5 * 52 = 64 * 4 + 4 lsr t, c4, #4 and t, t, #0x000fffffffffffff mul h, v, t lsr t, d4, #4 and t, t, #0x000fffffffffffff mul t, w, t add h, h, t lsr t, l, #52 add h, h, t lsl l, l, #12 extr s, h, l, #60 // 6 * 52 = 64 * 4 + 56 extr t, c5, c4, #56 and t, t, #0x000fffffffffffff mul l, v, t extr t, d5, d4, #56 and t, t, #0x000fffffffffffff mul t, w, t add l, l, t lsr t, h, #52 add l, l, t lsl s, s, #8 extr t, l, s, #8 adcs sum4, sum4, t and u, u, sum4 // 7 * 52 = 64 * 5 + 44 ldp c6, c7, [x, #48] ldp d6, d7, [y, #48] extr t, c6, c5, #44 and t, t, #0x000fffffffffffff mul h, v, t extr t, d6, d5, #44 and t, t, #0x000fffffffffffff mul t, w, t add h, h, t lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #20 adcs sum5, sum5, t and u, u, sum5 // 8 * 52 = 64 * 6 + 32 extr t, c7, c6, #32 and t, t, #0x000fffffffffffff mul l, v, t extr t, d7, d6, #32 and t, t, #0x000fffffffffffff mul t, w, t add l, l, t lsr t, h, #52 add l, l, t lsl h, h, #12 extr t, l, h, #32 adcs sum6, sum6, t and u, u, sum6 // 9 * 52 = 64 * 7 + 20 lsr t, c7, #20 mul h, v, t lsr t, d7, #20 mul t, w, t add h, h, t lsr t, l, #52 add h, h, t lsl l, l, #12 extr t, h, l, #44 adcs sum7, sum7, t and u, u, sum7 // Top word mul t, v, w lsr h, h, #44 add t, t, h adc sum8, sum8, t // Extract the high part h and mask off the low part l = [sum8;sum7;...;sum0] // but stuff sum8 with 1 bits at the left to ease a comparison below lsr h, sum8, #9 orr sum8, sum8, #~0x1FF // Decide whether h + l >= p_521 <=> h + l + 1 >= 2^521. Since this can only // happen if digits sum7,...sum1 are all 1s, we use the AND of them "u" to // condense the carry chain, and since we stuffed 1 bits into sum8 we get // the result in CF without an additional comparison. subs xzr, xzr, xzr adcs xzr, sum0, h adcs xzr, u, xzr adcs xzr, sum8, xzr // Now if CF is set we want (h + l) - p_521 = (h + l + 1) - 2^521 // while otherwise we want just h + l. So mask h + l + CF to 521 bits. // We don't need to mask away bits above 521 since they disappear below. adcs sum0, sum0, h adcs sum1, sum1, xzr adcs sum2, sum2, xzr adcs sum3, sum3, xzr adcs sum4, sum4, xzr adcs sum5, sum5, xzr adcs sum6, sum6, xzr adcs sum7, sum7, xzr adc sum8, sum8, xzr // The result is actually 2^512 * [sum8;...;sum0] == 2^-9 * [sum8;...;sum0] // so we rotate right by 9 bits and h, sum0, #0x1FF extr sum0, sum1, sum0, #9 extr sum1, sum2, sum1, #9 stp sum0, sum1, [z] extr sum2, sum3, sum2, #9 extr sum3, sum4, sum3, #9 stp sum2, sum3, [z, #16] extr sum4, sum5, sum4, #9 extr sum5, sum6, sum5, #9 stp sum4, sum5, [z, #32] extr sum6, sum7, sum6, #9 extr sum7, sum8, sum7, #9 stp sum6, sum7, [z, #48] str h, [z, #64] // Restore regs and return add sp, sp, #80 ldp x25, x26, [sp], #16 ldp x23, x24, [sp], #16 ldp x21, x22, [sp], #16 ldp x19, x20, [sp], #16 ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
wlsfx/bnbb
62,471
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/poly1305/poly1305_arm_asm.S
#include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) # This implementation was taken from the public domain, neon2 version in # SUPERCOP by D. J. Bernstein and Peter Schwabe. # qhasm: int32 input_0 # qhasm: int32 input_1 # qhasm: int32 input_2 # qhasm: int32 input_3 # qhasm: stack32 input_4 # qhasm: stack32 input_5 # qhasm: stack32 input_6 # qhasm: stack32 input_7 # qhasm: int32 caller_r4 # qhasm: int32 caller_r5 # qhasm: int32 caller_r6 # qhasm: int32 caller_r7 # qhasm: int32 caller_r8 # qhasm: int32 caller_r9 # qhasm: int32 caller_r10 # qhasm: int32 caller_r11 # qhasm: int32 caller_r12 # qhasm: int32 caller_r14 # qhasm: reg128 caller_q4 # qhasm: reg128 caller_q5 # qhasm: reg128 caller_q6 # qhasm: reg128 caller_q7 # qhasm: startcode .fpu neon .text # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 x01 # qhasm: reg128 x23 # qhasm: reg128 x4 # qhasm: reg128 y0 # qhasm: reg128 y12 # qhasm: reg128 y34 # qhasm: reg128 5y12 # qhasm: reg128 5y34 # qhasm: stack128 y0_stack # qhasm: stack128 y12_stack # qhasm: stack128 y34_stack # qhasm: stack128 5y12_stack # qhasm: stack128 5y34_stack # qhasm: reg128 z0 # qhasm: reg128 z12 # qhasm: reg128 z34 # qhasm: reg128 5z12 # qhasm: reg128 5z34 # qhasm: stack128 z0_stack # qhasm: stack128 z12_stack # qhasm: stack128 z34_stack # qhasm: stack128 5z12_stack # qhasm: stack128 5z34_stack # qhasm: stack128 two24 # qhasm: int32 ptr # qhasm: reg128 c01 # qhasm: reg128 c23 # qhasm: reg128 d01 # qhasm: reg128 d23 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 t2 # qhasm: reg128 t3 # qhasm: reg128 t4 # qhasm: reg128 mask # qhasm: reg128 u0 # qhasm: reg128 u1 # qhasm: reg128 u2 # qhasm: reg128 u3 # qhasm: reg128 u4 # qhasm: reg128 v01 # qhasm: reg128 mid # qhasm: reg128 v23 # qhasm: reg128 v4 # qhasm: int32 len # qhasm: qpushenter crypto_onetimeauth_poly1305_neon2_blocks .align 4 .global openssl_poly1305_neon2_blocks .hidden openssl_poly1305_neon2_blocks .type openssl_poly1305_neon2_blocks STT_FUNC openssl_poly1305_neon2_blocks: vpush {q4,q5,q6,q7} mov r12,sp sub sp,sp,#192 bic sp,sp,#31 # qhasm: len = input_3 # asm 1: mov >len=int32#4,<input_3=int32#4 # asm 2: mov >len=r3,<input_3=r3 mov r3,r3 # qhasm: new y0 # qhasm: y0 = mem64[input_1]y0[1]; input_1 += 8 # asm 1: vld1.8 {<y0=reg128#1%bot},[<input_1=int32#2]! # asm 2: vld1.8 {<y0=d0},[<input_1=r1]! vld1.8 {d0},[r1]! # qhasm: y12 = mem128[input_1]; input_1 += 16 # asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<input_1=int32#2]! # asm 2: vld1.8 {>y12=d2->y12=d3},[<input_1=r1]! vld1.8 {d2-d3},[r1]! # qhasm: y34 = mem128[input_1]; input_1 += 16 # asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<input_1=int32#2]! # asm 2: vld1.8 {>y34=d4->y34=d5},[<input_1=r1]! vld1.8 {d4-d5},[r1]! # qhasm: input_1 += 8 # asm 1: add >input_1=int32#2,<input_1=int32#2,#8 # asm 2: add >input_1=r1,<input_1=r1,#8 add r1,r1,#8 # qhasm: new z0 # qhasm: z0 = mem64[input_1]z0[1]; input_1 += 8 # asm 1: vld1.8 {<z0=reg128#4%bot},[<input_1=int32#2]! # asm 2: vld1.8 {<z0=d6},[<input_1=r1]! vld1.8 {d6},[r1]! # qhasm: z12 = mem128[input_1]; input_1 += 16 # asm 1: vld1.8 {>z12=reg128#5%bot->z12=reg128#5%top},[<input_1=int32#2]! # asm 2: vld1.8 {>z12=d8->z12=d9},[<input_1=r1]! vld1.8 {d8-d9},[r1]! # qhasm: z34 = mem128[input_1]; input_1 += 16 # asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<input_1=int32#2]! # asm 2: vld1.8 {>z34=d10->z34=d11},[<input_1=r1]! vld1.8 {d10-d11},[r1]! # qhasm: 2x mask = 0xffffffff # asm 1: vmov.i64 >mask=reg128#7,#0xffffffff # asm 2: vmov.i64 >mask=q6,#0xffffffff vmov.i64 q6,#0xffffffff # qhasm: 2x u4 = 0xff # asm 1: vmov.i64 >u4=reg128#8,#0xff # asm 2: vmov.i64 >u4=q7,#0xff vmov.i64 q7,#0xff # qhasm: x01 aligned= mem128[input_0];input_0+=16 # asm 1: vld1.8 {>x01=reg128#9%bot->x01=reg128#9%top},[<input_0=int32#1,: 128]! # asm 2: vld1.8 {>x01=d16->x01=d17},[<input_0=r0,: 128]! vld1.8 {d16-d17},[r0,: 128]! # qhasm: x23 aligned= mem128[input_0];input_0+=16 # asm 1: vld1.8 {>x23=reg128#10%bot->x23=reg128#10%top},[<input_0=int32#1,: 128]! # asm 2: vld1.8 {>x23=d18->x23=d19},[<input_0=r0,: 128]! vld1.8 {d18-d19},[r0,: 128]! # qhasm: x4 aligned= mem64[input_0]x4[1] # asm 1: vld1.8 {<x4=reg128#11%bot},[<input_0=int32#1,: 64] # asm 2: vld1.8 {<x4=d20},[<input_0=r0,: 64] vld1.8 {d20},[r0,: 64] # qhasm: input_0 -= 32 # asm 1: sub >input_0=int32#1,<input_0=int32#1,#32 # asm 2: sub >input_0=r0,<input_0=r0,#32 sub r0,r0,#32 # qhasm: 2x mask unsigned>>=6 # asm 1: vshr.u64 >mask=reg128#7,<mask=reg128#7,#6 # asm 2: vshr.u64 >mask=q6,<mask=q6,#6 vshr.u64 q6,q6,#6 # qhasm: 2x u4 unsigned>>= 7 # asm 1: vshr.u64 >u4=reg128#8,<u4=reg128#8,#7 # asm 2: vshr.u64 >u4=q7,<u4=q7,#7 vshr.u64 q7,q7,#7 # qhasm: 4x 5y12 = y12 << 2 # asm 1: vshl.i32 >5y12=reg128#12,<y12=reg128#2,#2 # asm 2: vshl.i32 >5y12=q11,<y12=q1,#2 vshl.i32 q11,q1,#2 # qhasm: 4x 5y34 = y34 << 2 # asm 1: vshl.i32 >5y34=reg128#13,<y34=reg128#3,#2 # asm 2: vshl.i32 >5y34=q12,<y34=q2,#2 vshl.i32 q12,q2,#2 # qhasm: 4x 5y12 += y12 # asm 1: vadd.i32 >5y12=reg128#12,<5y12=reg128#12,<y12=reg128#2 # asm 2: vadd.i32 >5y12=q11,<5y12=q11,<y12=q1 vadd.i32 q11,q11,q1 # qhasm: 4x 5y34 += y34 # asm 1: vadd.i32 >5y34=reg128#13,<5y34=reg128#13,<y34=reg128#3 # asm 2: vadd.i32 >5y34=q12,<5y34=q12,<y34=q2 vadd.i32 q12,q12,q2 # qhasm: 2x u4 <<= 24 # asm 1: vshl.i64 >u4=reg128#8,<u4=reg128#8,#24 # asm 2: vshl.i64 >u4=q7,<u4=q7,#24 vshl.i64 q7,q7,#24 # qhasm: 4x 5z12 = z12 << 2 # asm 1: vshl.i32 >5z12=reg128#14,<z12=reg128#5,#2 # asm 2: vshl.i32 >5z12=q13,<z12=q4,#2 vshl.i32 q13,q4,#2 # qhasm: 4x 5z34 = z34 << 2 # asm 1: vshl.i32 >5z34=reg128#15,<z34=reg128#6,#2 # asm 2: vshl.i32 >5z34=q14,<z34=q5,#2 vshl.i32 q14,q5,#2 # qhasm: 4x 5z12 += z12 # asm 1: vadd.i32 >5z12=reg128#14,<5z12=reg128#14,<z12=reg128#5 # asm 2: vadd.i32 >5z12=q13,<5z12=q13,<z12=q4 vadd.i32 q13,q13,q4 # qhasm: 4x 5z34 += z34 # asm 1: vadd.i32 >5z34=reg128#15,<5z34=reg128#15,<z34=reg128#6 # asm 2: vadd.i32 >5z34=q14,<5z34=q14,<z34=q5 vadd.i32 q14,q14,q5 # qhasm: new two24 # qhasm: new y0_stack # qhasm: new y12_stack # qhasm: new y34_stack # qhasm: new 5y12_stack # qhasm: new 5y34_stack # qhasm: new z0_stack # qhasm: new z12_stack # qhasm: new z34_stack # qhasm: new 5z12_stack # qhasm: new 5z34_stack # qhasm: ptr = &two24 # asm 1: lea >ptr=int32#2,<two24=stack128#1 # asm 2: lea >ptr=r1,<two24=[sp,#0] add r1,sp,#0 # qhasm: mem128[ptr] aligned= u4 # asm 1: vst1.8 {<u4=reg128#8%bot-<u4=reg128#8%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<u4=d14-<u4=d15},[<ptr=r1,: 128] vst1.8 {d14-d15},[r1,: 128] # qhasm: r4 = u4 # asm 1: vmov >r4=reg128#16,<u4=reg128#8 # asm 2: vmov >r4=q15,<u4=q7 vmov q15,q7 # qhasm: r0 = u4 # asm 1: vmov >r0=reg128#8,<u4=reg128#8 # asm 2: vmov >r0=q7,<u4=q7 vmov q7,q7 # qhasm: ptr = &y0_stack # asm 1: lea >ptr=int32#2,<y0_stack=stack128#2 # asm 2: lea >ptr=r1,<y0_stack=[sp,#16] add r1,sp,#16 # qhasm: mem128[ptr] aligned= y0 # asm 1: vst1.8 {<y0=reg128#1%bot-<y0=reg128#1%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<y0=d0-<y0=d1},[<ptr=r1,: 128] vst1.8 {d0-d1},[r1,: 128] # qhasm: ptr = &y12_stack # asm 1: lea >ptr=int32#2,<y12_stack=stack128#3 # asm 2: lea >ptr=r1,<y12_stack=[sp,#32] add r1,sp,#32 # qhasm: mem128[ptr] aligned= y12 # asm 1: vst1.8 {<y12=reg128#2%bot-<y12=reg128#2%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<y12=d2-<y12=d3},[<ptr=r1,: 128] vst1.8 {d2-d3},[r1,: 128] # qhasm: ptr = &y34_stack # asm 1: lea >ptr=int32#2,<y34_stack=stack128#4 # asm 2: lea >ptr=r1,<y34_stack=[sp,#48] add r1,sp,#48 # qhasm: mem128[ptr] aligned= y34 # asm 1: vst1.8 {<y34=reg128#3%bot-<y34=reg128#3%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<y34=d4-<y34=d5},[<ptr=r1,: 128] vst1.8 {d4-d5},[r1,: 128] # qhasm: ptr = &z0_stack # asm 1: lea >ptr=int32#2,<z0_stack=stack128#7 # asm 2: lea >ptr=r1,<z0_stack=[sp,#96] add r1,sp,#96 # qhasm: mem128[ptr] aligned= z0 # asm 1: vst1.8 {<z0=reg128#4%bot-<z0=reg128#4%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<z0=d6-<z0=d7},[<ptr=r1,: 128] vst1.8 {d6-d7},[r1,: 128] # qhasm: ptr = &z12_stack # asm 1: lea >ptr=int32#2,<z12_stack=stack128#8 # asm 2: lea >ptr=r1,<z12_stack=[sp,#112] add r1,sp,#112 # qhasm: mem128[ptr] aligned= z12 # asm 1: vst1.8 {<z12=reg128#5%bot-<z12=reg128#5%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<z12=d8-<z12=d9},[<ptr=r1,: 128] vst1.8 {d8-d9},[r1,: 128] # qhasm: ptr = &z34_stack # asm 1: lea >ptr=int32#2,<z34_stack=stack128#9 # asm 2: lea >ptr=r1,<z34_stack=[sp,#128] add r1,sp,#128 # qhasm: mem128[ptr] aligned= z34 # asm 1: vst1.8 {<z34=reg128#6%bot-<z34=reg128#6%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<z34=d10-<z34=d11},[<ptr=r1,: 128] vst1.8 {d10-d11},[r1,: 128] # qhasm: ptr = &5y12_stack # asm 1: lea >ptr=int32#2,<5y12_stack=stack128#5 # asm 2: lea >ptr=r1,<5y12_stack=[sp,#64] add r1,sp,#64 # qhasm: mem128[ptr] aligned= 5y12 # asm 1: vst1.8 {<5y12=reg128#12%bot-<5y12=reg128#12%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<5y12=d22-<5y12=d23},[<ptr=r1,: 128] vst1.8 {d22-d23},[r1,: 128] # qhasm: ptr = &5y34_stack # asm 1: lea >ptr=int32#2,<5y34_stack=stack128#6 # asm 2: lea >ptr=r1,<5y34_stack=[sp,#80] add r1,sp,#80 # qhasm: mem128[ptr] aligned= 5y34 # asm 1: vst1.8 {<5y34=reg128#13%bot-<5y34=reg128#13%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<5y34=d24-<5y34=d25},[<ptr=r1,: 128] vst1.8 {d24-d25},[r1,: 128] # qhasm: ptr = &5z12_stack # asm 1: lea >ptr=int32#2,<5z12_stack=stack128#10 # asm 2: lea >ptr=r1,<5z12_stack=[sp,#144] add r1,sp,#144 # qhasm: mem128[ptr] aligned= 5z12 # asm 1: vst1.8 {<5z12=reg128#14%bot-<5z12=reg128#14%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<5z12=d26-<5z12=d27},[<ptr=r1,: 128] vst1.8 {d26-d27},[r1,: 128] # qhasm: ptr = &5z34_stack # asm 1: lea >ptr=int32#2,<5z34_stack=stack128#11 # asm 2: lea >ptr=r1,<5z34_stack=[sp,#160] add r1,sp,#160 # qhasm: mem128[ptr] aligned= 5z34 # asm 1: vst1.8 {<5z34=reg128#15%bot-<5z34=reg128#15%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<5z34=d28-<5z34=d29},[<ptr=r1,: 128] vst1.8 {d28-d29},[r1,: 128] # qhasm: unsigned>? len - 64 # asm 1: cmp <len=int32#4,#64 # asm 2: cmp <len=r3,#64 cmp r3,#64 # qhasm: goto below64bytes if !unsigned> bls ._below64bytes # qhasm: input_2 += 32 # asm 1: add >input_2=int32#2,<input_2=int32#3,#32 # asm 2: add >input_2=r1,<input_2=r2,#32 add r1,r2,#32 # qhasm: mainloop2: ._mainloop2: # qhasm: c01 = mem128[input_2];input_2+=16 # asm 1: vld1.8 {>c01=reg128#1%bot->c01=reg128#1%top},[<input_2=int32#2]! # asm 2: vld1.8 {>c01=d0->c01=d1},[<input_2=r1]! vld1.8 {d0-d1},[r1]! # qhasm: c23 = mem128[input_2];input_2+=16 # asm 1: vld1.8 {>c23=reg128#2%bot->c23=reg128#2%top},[<input_2=int32#2]! # asm 2: vld1.8 {>c23=d2->c23=d3},[<input_2=r1]! vld1.8 {d2-d3},[r1]! # qhasm: r4[0,1] += x01[0] unsigned* z34[2]; r4[2,3] += x01[1] unsigned* z34[3] # asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%bot,<z34=reg128#6%top # asm 2: vmlal.u32 <r4=q15,<x01=d16,<z34=d11 vmlal.u32 q15,d16,d11 # qhasm: ptr = &z12_stack # asm 1: lea >ptr=int32#3,<z12_stack=stack128#8 # asm 2: lea >ptr=r2,<z12_stack=[sp,#112] add r2,sp,#112 # qhasm: z12 aligned= mem128[ptr] # asm 1: vld1.8 {>z12=reg128#3%bot->z12=reg128#3%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>z12=d4->z12=d5},[<ptr=r2,: 128] vld1.8 {d4-d5},[r2,: 128] # qhasm: r4[0,1] += x01[2] unsigned* z34[0]; r4[2,3] += x01[3] unsigned* z34[1] # asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%top,<z34=reg128#6%bot # asm 2: vmlal.u32 <r4=q15,<x01=d17,<z34=d10 vmlal.u32 q15,d17,d10 # qhasm: ptr = &z0_stack # asm 1: lea >ptr=int32#3,<z0_stack=stack128#7 # asm 2: lea >ptr=r2,<z0_stack=[sp,#96] add r2,sp,#96 # qhasm: z0 aligned= mem128[ptr] # asm 1: vld1.8 {>z0=reg128#4%bot->z0=reg128#4%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>z0=d6->z0=d7},[<ptr=r2,: 128] vld1.8 {d6-d7},[r2,: 128] # qhasm: r4[0,1] += x23[0] unsigned* z12[2]; r4[2,3] += x23[1] unsigned* z12[3] # asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%bot,<z12=reg128#3%top # asm 2: vmlal.u32 <r4=q15,<x23=d18,<z12=d5 vmlal.u32 q15,d18,d5 # qhasm: c01 c23 = c01[0]c01[1]c01[2]c23[2]c23[0]c23[1]c01[3]c23[3] # asm 1: vtrn.32 <c01=reg128#1%top,<c23=reg128#2%top # asm 2: vtrn.32 <c01=d1,<c23=d3 vtrn.32 d1,d3 # qhasm: r4[0,1] += x23[2] unsigned* z12[0]; r4[2,3] += x23[3] unsigned* z12[1] # asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%top,<z12=reg128#3%bot # asm 2: vmlal.u32 <r4=q15,<x23=d19,<z12=d4 vmlal.u32 q15,d19,d4 # qhasm: r4[0,1] += x4[0] unsigned* z0[0]; r4[2,3] += x4[1] unsigned* z0[1] # asm 1: vmlal.u32 <r4=reg128#16,<x4=reg128#11%bot,<z0=reg128#4%bot # asm 2: vmlal.u32 <r4=q15,<x4=d20,<z0=d6 vmlal.u32 q15,d20,d6 # qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18 # asm 1: vshll.u32 >r3=reg128#5,<c23=reg128#2%top,#18 # asm 2: vshll.u32 >r3=q4,<c23=d3,#18 vshll.u32 q4,d3,#18 # qhasm: c01 c23 = c01[0]c23[0]c01[2]c01[3]c01[1]c23[1]c23[2]c23[3] # asm 1: vtrn.32 <c01=reg128#1%bot,<c23=reg128#2%bot # asm 2: vtrn.32 <c01=d0,<c23=d2 vtrn.32 d0,d2 # qhasm: r3[0,1] += x01[0] unsigned* z34[0]; r3[2,3] += x01[1] unsigned* z34[1] # asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%bot,<z34=reg128#6%bot # asm 2: vmlal.u32 <r3=q4,<x01=d16,<z34=d10 vmlal.u32 q4,d16,d10 # qhasm: r3[0,1] += x01[2] unsigned* z12[2]; r3[2,3] += x01[3] unsigned* z12[3] # asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%top,<z12=reg128#3%top # asm 2: vmlal.u32 <r3=q4,<x01=d17,<z12=d5 vmlal.u32 q4,d17,d5 # qhasm: r0 = r0[1]c01[0]r0[2,3] # asm 1: vext.32 <r0=reg128#8%bot,<r0=reg128#8%bot,<c01=reg128#1%bot,#1 # asm 2: vext.32 <r0=d14,<r0=d14,<c01=d0,#1 vext.32 d14,d14,d0,#1 # qhasm: r3[0,1] += x23[0] unsigned* z12[0]; r3[2,3] += x23[1] unsigned* z12[1] # asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%bot,<z12=reg128#3%bot # asm 2: vmlal.u32 <r3=q4,<x23=d18,<z12=d4 vmlal.u32 q4,d18,d4 # qhasm: input_2 -= 64 # asm 1: sub >input_2=int32#2,<input_2=int32#2,#64 # asm 2: sub >input_2=r1,<input_2=r1,#64 sub r1,r1,#64 # qhasm: r3[0,1] += x23[2] unsigned* z0[0]; r3[2,3] += x23[3] unsigned* z0[1] # asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%top,<z0=reg128#4%bot # asm 2: vmlal.u32 <r3=q4,<x23=d19,<z0=d6 vmlal.u32 q4,d19,d6 # qhasm: ptr = &5z34_stack # asm 1: lea >ptr=int32#3,<5z34_stack=stack128#11 # asm 2: lea >ptr=r2,<5z34_stack=[sp,#160] add r2,sp,#160 # qhasm: 5z34 aligned= mem128[ptr] # asm 1: vld1.8 {>5z34=reg128#6%bot->5z34=reg128#6%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>5z34=d10->5z34=d11},[<ptr=r2,: 128] vld1.8 {d10-d11},[r2,: 128] # qhasm: r3[0,1] += x4[0] unsigned* 5z34[2]; r3[2,3] += x4[1] unsigned* 5z34[3] # asm 1: vmlal.u32 <r3=reg128#5,<x4=reg128#11%bot,<5z34=reg128#6%top # asm 2: vmlal.u32 <r3=q4,<x4=d20,<5z34=d11 vmlal.u32 q4,d20,d11 # qhasm: r0 = r0[1]r0[0]r0[3]r0[2] # asm 1: vrev64.i32 >r0=reg128#8,<r0=reg128#8 # asm 2: vrev64.i32 >r0=q7,<r0=q7 vrev64.i32 q7,q7 # qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12 # asm 1: vshll.u32 >r2=reg128#14,<c01=reg128#1%top,#12 # asm 2: vshll.u32 >r2=q13,<c01=d1,#12 vshll.u32 q13,d1,#12 # qhasm: d01 = mem128[input_2];input_2+=16 # asm 1: vld1.8 {>d01=reg128#12%bot->d01=reg128#12%top},[<input_2=int32#2]! # asm 2: vld1.8 {>d01=d22->d01=d23},[<input_2=r1]! vld1.8 {d22-d23},[r1]! # qhasm: r2[0,1] += x01[0] unsigned* z12[2]; r2[2,3] += x01[1] unsigned* z12[3] # asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%bot,<z12=reg128#3%top # asm 2: vmlal.u32 <r2=q13,<x01=d16,<z12=d5 vmlal.u32 q13,d16,d5 # qhasm: r2[0,1] += x01[2] unsigned* z12[0]; r2[2,3] += x01[3] unsigned* z12[1] # asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%top,<z12=reg128#3%bot # asm 2: vmlal.u32 <r2=q13,<x01=d17,<z12=d4 vmlal.u32 q13,d17,d4 # qhasm: r2[0,1] += x23[0] unsigned* z0[0]; r2[2,3] += x23[1] unsigned* z0[1] # asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%bot,<z0=reg128#4%bot # asm 2: vmlal.u32 <r2=q13,<x23=d18,<z0=d6 vmlal.u32 q13,d18,d6 # qhasm: r2[0,1] += x23[2] unsigned* 5z34[2]; r2[2,3] += x23[3] unsigned* 5z34[3] # asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%top,<5z34=reg128#6%top # asm 2: vmlal.u32 <r2=q13,<x23=d19,<5z34=d11 vmlal.u32 q13,d19,d11 # qhasm: r2[0,1] += x4[0] unsigned* 5z34[0]; r2[2,3] += x4[1] unsigned* 5z34[1] # asm 1: vmlal.u32 <r2=reg128#14,<x4=reg128#11%bot,<5z34=reg128#6%bot # asm 2: vmlal.u32 <r2=q13,<x4=d20,<5z34=d10 vmlal.u32 q13,d20,d10 # qhasm: r0 = r0[0,1]c01[1]r0[2] # asm 1: vext.32 <r0=reg128#8%top,<c01=reg128#1%bot,<r0=reg128#8%top,#1 # asm 2: vext.32 <r0=d15,<c01=d0,<r0=d15,#1 vext.32 d15,d0,d15,#1 # qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6 # asm 1: vshll.u32 >r1=reg128#15,<c23=reg128#2%bot,#6 # asm 2: vshll.u32 >r1=q14,<c23=d2,#6 vshll.u32 q14,d2,#6 # qhasm: r1[0,1] += x01[0] unsigned* z12[0]; r1[2,3] += x01[1] unsigned* z12[1] # asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%bot,<z12=reg128#3%bot # asm 2: vmlal.u32 <r1=q14,<x01=d16,<z12=d4 vmlal.u32 q14,d16,d4 # qhasm: r1[0,1] += x01[2] unsigned* z0[0]; r1[2,3] += x01[3] unsigned* z0[1] # asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%top,<z0=reg128#4%bot # asm 2: vmlal.u32 <r1=q14,<x01=d17,<z0=d6 vmlal.u32 q14,d17,d6 # qhasm: r1[0,1] += x23[0] unsigned* 5z34[2]; r1[2,3] += x23[1] unsigned* 5z34[3] # asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%bot,<5z34=reg128#6%top # asm 2: vmlal.u32 <r1=q14,<x23=d18,<5z34=d11 vmlal.u32 q14,d18,d11 # qhasm: r1[0,1] += x23[2] unsigned* 5z34[0]; r1[2,3] += x23[3] unsigned* 5z34[1] # asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%top,<5z34=reg128#6%bot # asm 2: vmlal.u32 <r1=q14,<x23=d19,<5z34=d10 vmlal.u32 q14,d19,d10 # qhasm: ptr = &5z12_stack # asm 1: lea >ptr=int32#3,<5z12_stack=stack128#10 # asm 2: lea >ptr=r2,<5z12_stack=[sp,#144] add r2,sp,#144 # qhasm: 5z12 aligned= mem128[ptr] # asm 1: vld1.8 {>5z12=reg128#1%bot->5z12=reg128#1%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>5z12=d0->5z12=d1},[<ptr=r2,: 128] vld1.8 {d0-d1},[r2,: 128] # qhasm: r1[0,1] += x4[0] unsigned* 5z12[2]; r1[2,3] += x4[1] unsigned* 5z12[3] # asm 1: vmlal.u32 <r1=reg128#15,<x4=reg128#11%bot,<5z12=reg128#1%top # asm 2: vmlal.u32 <r1=q14,<x4=d20,<5z12=d1 vmlal.u32 q14,d20,d1 # qhasm: d23 = mem128[input_2];input_2+=16 # asm 1: vld1.8 {>d23=reg128#2%bot->d23=reg128#2%top},[<input_2=int32#2]! # asm 2: vld1.8 {>d23=d2->d23=d3},[<input_2=r1]! vld1.8 {d2-d3},[r1]! # qhasm: input_2 += 32 # asm 1: add >input_2=int32#2,<input_2=int32#2,#32 # asm 2: add >input_2=r1,<input_2=r1,#32 add r1,r1,#32 # qhasm: r0[0,1] += x4[0] unsigned* 5z12[0]; r0[2,3] += x4[1] unsigned* 5z12[1] # asm 1: vmlal.u32 <r0=reg128#8,<x4=reg128#11%bot,<5z12=reg128#1%bot # asm 2: vmlal.u32 <r0=q7,<x4=d20,<5z12=d0 vmlal.u32 q7,d20,d0 # qhasm: r0[0,1] += x23[0] unsigned* 5z34[0]; r0[2,3] += x23[1] unsigned* 5z34[1] # asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%bot,<5z34=reg128#6%bot # asm 2: vmlal.u32 <r0=q7,<x23=d18,<5z34=d10 vmlal.u32 q7,d18,d10 # qhasm: d01 d23 = d01[0] d23[0] d01[1] d23[1] # asm 1: vswp <d23=reg128#2%bot,<d01=reg128#12%top # asm 2: vswp <d23=d2,<d01=d23 vswp d2,d23 # qhasm: r0[0,1] += x23[2] unsigned* 5z12[2]; r0[2,3] += x23[3] unsigned* 5z12[3] # asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%top,<5z12=reg128#1%top # asm 2: vmlal.u32 <r0=q7,<x23=d19,<5z12=d1 vmlal.u32 q7,d19,d1 # qhasm: r0[0,1] += x01[0] unsigned* z0[0]; r0[2,3] += x01[1] unsigned* z0[1] # asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%bot,<z0=reg128#4%bot # asm 2: vmlal.u32 <r0=q7,<x01=d16,<z0=d6 vmlal.u32 q7,d16,d6 # qhasm: new mid # qhasm: 2x v4 = d23 unsigned>> 40 # asm 1: vshr.u64 >v4=reg128#4,<d23=reg128#2,#40 # asm 2: vshr.u64 >v4=q3,<d23=q1,#40 vshr.u64 q3,q1,#40 # qhasm: mid = d01[1]d23[0] mid[2,3] # asm 1: vext.32 <mid=reg128#1%bot,<d01=reg128#12%bot,<d23=reg128#2%bot,#1 # asm 2: vext.32 <mid=d0,<d01=d22,<d23=d2,#1 vext.32 d0,d22,d2,#1 # qhasm: new v23 # qhasm: v23[2] = d23[0,1] unsigned>> 14; v23[3] = d23[2,3] unsigned>> 14 # asm 1: vshrn.u64 <v23=reg128#10%top,<d23=reg128#2,#14 # asm 2: vshrn.u64 <v23=d19,<d23=q1,#14 vshrn.u64 d19,q1,#14 # qhasm: mid = mid[0,1] d01[3]d23[2] # asm 1: vext.32 <mid=reg128#1%top,<d01=reg128#12%top,<d23=reg128#2%top,#1 # asm 2: vext.32 <mid=d1,<d01=d23,<d23=d3,#1 vext.32 d1,d23,d3,#1 # qhasm: new v01 # qhasm: v01[2] = d01[0,1] unsigned>> 26; v01[3] = d01[2,3] unsigned>> 26 # asm 1: vshrn.u64 <v01=reg128#11%top,<d01=reg128#12,#26 # asm 2: vshrn.u64 <v01=d21,<d01=q11,#26 vshrn.u64 d21,q11,#26 # qhasm: v01 = d01[1]d01[0] v01[2,3] # asm 1: vext.32 <v01=reg128#11%bot,<d01=reg128#12%bot,<d01=reg128#12%bot,#1 # asm 2: vext.32 <v01=d20,<d01=d22,<d01=d22,#1 vext.32 d20,d22,d22,#1 # qhasm: r0[0,1] += x01[2] unsigned* 5z34[2]; r0[2,3] += x01[3] unsigned* 5z34[3] # asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%top,<5z34=reg128#6%top # asm 2: vmlal.u32 <r0=q7,<x01=d17,<5z34=d11 vmlal.u32 q7,d17,d11 # qhasm: v01 = v01[1]d01[2] v01[2,3] # asm 1: vext.32 <v01=reg128#11%bot,<v01=reg128#11%bot,<d01=reg128#12%top,#1 # asm 2: vext.32 <v01=d20,<v01=d20,<d01=d23,#1 vext.32 d20,d20,d23,#1 # qhasm: v23[0] = mid[0,1] unsigned>> 20; v23[1] = mid[2,3] unsigned>> 20 # asm 1: vshrn.u64 <v23=reg128#10%bot,<mid=reg128#1,#20 # asm 2: vshrn.u64 <v23=d18,<mid=q0,#20 vshrn.u64 d18,q0,#20 # qhasm: v4 = v4[0]v4[2]v4[1]v4[3] # asm 1: vtrn.32 <v4=reg128#4%bot,<v4=reg128#4%top # asm 2: vtrn.32 <v4=d6,<v4=d7 vtrn.32 d6,d7 # qhasm: 4x v01 &= 0x03ffffff # asm 1: vand.i32 <v01=reg128#11,#0x03ffffff # asm 2: vand.i32 <v01=q10,#0x03ffffff vand.i32 q10,#0x03ffffff # qhasm: ptr = &y34_stack # asm 1: lea >ptr=int32#3,<y34_stack=stack128#4 # asm 2: lea >ptr=r2,<y34_stack=[sp,#48] add r2,sp,#48 # qhasm: y34 aligned= mem128[ptr] # asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>y34=d4->y34=d5},[<ptr=r2,: 128] vld1.8 {d4-d5},[r2,: 128] # qhasm: 4x v23 &= 0x03ffffff # asm 1: vand.i32 <v23=reg128#10,#0x03ffffff # asm 2: vand.i32 <v23=q9,#0x03ffffff vand.i32 q9,#0x03ffffff # qhasm: ptr = &y12_stack # asm 1: lea >ptr=int32#3,<y12_stack=stack128#3 # asm 2: lea >ptr=r2,<y12_stack=[sp,#32] add r2,sp,#32 # qhasm: y12 aligned= mem128[ptr] # asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>y12=d2->y12=d3},[<ptr=r2,: 128] vld1.8 {d2-d3},[r2,: 128] # qhasm: 4x v4 |= 0x01000000 # asm 1: vorr.i32 <v4=reg128#4,#0x01000000 # asm 2: vorr.i32 <v4=q3,#0x01000000 vorr.i32 q3,#0x01000000 # qhasm: ptr = &y0_stack # asm 1: lea >ptr=int32#3,<y0_stack=stack128#2 # asm 2: lea >ptr=r2,<y0_stack=[sp,#16] add r2,sp,#16 # qhasm: y0 aligned= mem128[ptr] # asm 1: vld1.8 {>y0=reg128#1%bot->y0=reg128#1%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>y0=d0->y0=d1},[<ptr=r2,: 128] vld1.8 {d0-d1},[r2,: 128] # qhasm: r4[0,1] += v01[0] unsigned* y34[2]; r4[2,3] += v01[1] unsigned* y34[3] # asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%bot,<y34=reg128#3%top # asm 2: vmlal.u32 <r4=q15,<v01=d20,<y34=d5 vmlal.u32 q15,d20,d5 # qhasm: r4[0,1] += v01[2] unsigned* y34[0]; r4[2,3] += v01[3] unsigned* y34[1] # asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%top,<y34=reg128#3%bot # asm 2: vmlal.u32 <r4=q15,<v01=d21,<y34=d4 vmlal.u32 q15,d21,d4 # qhasm: r4[0,1] += v23[0] unsigned* y12[2]; r4[2,3] += v23[1] unsigned* y12[3] # asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%bot,<y12=reg128#2%top # asm 2: vmlal.u32 <r4=q15,<v23=d18,<y12=d3 vmlal.u32 q15,d18,d3 # qhasm: r4[0,1] += v23[2] unsigned* y12[0]; r4[2,3] += v23[3] unsigned* y12[1] # asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%top,<y12=reg128#2%bot # asm 2: vmlal.u32 <r4=q15,<v23=d19,<y12=d2 vmlal.u32 q15,d19,d2 # qhasm: r4[0,1] += v4[0] unsigned* y0[0]; r4[2,3] += v4[1] unsigned* y0[1] # asm 1: vmlal.u32 <r4=reg128#16,<v4=reg128#4%bot,<y0=reg128#1%bot # asm 2: vmlal.u32 <r4=q15,<v4=d6,<y0=d0 vmlal.u32 q15,d6,d0 # qhasm: ptr = &5y34_stack # asm 1: lea >ptr=int32#3,<5y34_stack=stack128#6 # asm 2: lea >ptr=r2,<5y34_stack=[sp,#80] add r2,sp,#80 # qhasm: 5y34 aligned= mem128[ptr] # asm 1: vld1.8 {>5y34=reg128#13%bot->5y34=reg128#13%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>5y34=d24->5y34=d25},[<ptr=r2,: 128] vld1.8 {d24-d25},[r2,: 128] # qhasm: r3[0,1] += v01[0] unsigned* y34[0]; r3[2,3] += v01[1] unsigned* y34[1] # asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%bot,<y34=reg128#3%bot # asm 2: vmlal.u32 <r3=q4,<v01=d20,<y34=d4 vmlal.u32 q4,d20,d4 # qhasm: r3[0,1] += v01[2] unsigned* y12[2]; r3[2,3] += v01[3] unsigned* y12[3] # asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%top,<y12=reg128#2%top # asm 2: vmlal.u32 <r3=q4,<v01=d21,<y12=d3 vmlal.u32 q4,d21,d3 # qhasm: r3[0,1] += v23[0] unsigned* y12[0]; r3[2,3] += v23[1] unsigned* y12[1] # asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%bot,<y12=reg128#2%bot # asm 2: vmlal.u32 <r3=q4,<v23=d18,<y12=d2 vmlal.u32 q4,d18,d2 # qhasm: r3[0,1] += v23[2] unsigned* y0[0]; r3[2,3] += v23[3] unsigned* y0[1] # asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%top,<y0=reg128#1%bot # asm 2: vmlal.u32 <r3=q4,<v23=d19,<y0=d0 vmlal.u32 q4,d19,d0 # qhasm: r3[0,1] += v4[0] unsigned* 5y34[2]; r3[2,3] += v4[1] unsigned* 5y34[3] # asm 1: vmlal.u32 <r3=reg128#5,<v4=reg128#4%bot,<5y34=reg128#13%top # asm 2: vmlal.u32 <r3=q4,<v4=d6,<5y34=d25 vmlal.u32 q4,d6,d25 # qhasm: ptr = &5y12_stack # asm 1: lea >ptr=int32#3,<5y12_stack=stack128#5 # asm 2: lea >ptr=r2,<5y12_stack=[sp,#64] add r2,sp,#64 # qhasm: 5y12 aligned= mem128[ptr] # asm 1: vld1.8 {>5y12=reg128#12%bot->5y12=reg128#12%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>5y12=d22->5y12=d23},[<ptr=r2,: 128] vld1.8 {d22-d23},[r2,: 128] # qhasm: r0[0,1] += v4[0] unsigned* 5y12[0]; r0[2,3] += v4[1] unsigned* 5y12[1] # asm 1: vmlal.u32 <r0=reg128#8,<v4=reg128#4%bot,<5y12=reg128#12%bot # asm 2: vmlal.u32 <r0=q7,<v4=d6,<5y12=d22 vmlal.u32 q7,d6,d22 # qhasm: r0[0,1] += v23[0] unsigned* 5y34[0]; r0[2,3] += v23[1] unsigned* 5y34[1] # asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%bot,<5y34=reg128#13%bot # asm 2: vmlal.u32 <r0=q7,<v23=d18,<5y34=d24 vmlal.u32 q7,d18,d24 # qhasm: r0[0,1] += v23[2] unsigned* 5y12[2]; r0[2,3] += v23[3] unsigned* 5y12[3] # asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%top,<5y12=reg128#12%top # asm 2: vmlal.u32 <r0=q7,<v23=d19,<5y12=d23 vmlal.u32 q7,d19,d23 # qhasm: r0[0,1] += v01[0] unsigned* y0[0]; r0[2,3] += v01[1] unsigned* y0[1] # asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%bot,<y0=reg128#1%bot # asm 2: vmlal.u32 <r0=q7,<v01=d20,<y0=d0 vmlal.u32 q7,d20,d0 # qhasm: r0[0,1] += v01[2] unsigned* 5y34[2]; r0[2,3] += v01[3] unsigned* 5y34[3] # asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%top,<5y34=reg128#13%top # asm 2: vmlal.u32 <r0=q7,<v01=d21,<5y34=d25 vmlal.u32 q7,d21,d25 # qhasm: r1[0,1] += v01[0] unsigned* y12[0]; r1[2,3] += v01[1] unsigned* y12[1] # asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%bot,<y12=reg128#2%bot # asm 2: vmlal.u32 <r1=q14,<v01=d20,<y12=d2 vmlal.u32 q14,d20,d2 # qhasm: r1[0,1] += v01[2] unsigned* y0[0]; r1[2,3] += v01[3] unsigned* y0[1] # asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%top,<y0=reg128#1%bot # asm 2: vmlal.u32 <r1=q14,<v01=d21,<y0=d0 vmlal.u32 q14,d21,d0 # qhasm: r1[0,1] += v23[0] unsigned* 5y34[2]; r1[2,3] += v23[1] unsigned* 5y34[3] # asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%bot,<5y34=reg128#13%top # asm 2: vmlal.u32 <r1=q14,<v23=d18,<5y34=d25 vmlal.u32 q14,d18,d25 # qhasm: r1[0,1] += v23[2] unsigned* 5y34[0]; r1[2,3] += v23[3] unsigned* 5y34[1] # asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%top,<5y34=reg128#13%bot # asm 2: vmlal.u32 <r1=q14,<v23=d19,<5y34=d24 vmlal.u32 q14,d19,d24 # qhasm: r1[0,1] += v4[0] unsigned* 5y12[2]; r1[2,3] += v4[1] unsigned* 5y12[3] # asm 1: vmlal.u32 <r1=reg128#15,<v4=reg128#4%bot,<5y12=reg128#12%top # asm 2: vmlal.u32 <r1=q14,<v4=d6,<5y12=d23 vmlal.u32 q14,d6,d23 # qhasm: r2[0,1] += v01[0] unsigned* y12[2]; r2[2,3] += v01[1] unsigned* y12[3] # asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%bot,<y12=reg128#2%top # asm 2: vmlal.u32 <r2=q13,<v01=d20,<y12=d3 vmlal.u32 q13,d20,d3 # qhasm: r2[0,1] += v01[2] unsigned* y12[0]; r2[2,3] += v01[3] unsigned* y12[1] # asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%top,<y12=reg128#2%bot # asm 2: vmlal.u32 <r2=q13,<v01=d21,<y12=d2 vmlal.u32 q13,d21,d2 # qhasm: r2[0,1] += v23[0] unsigned* y0[0]; r2[2,3] += v23[1] unsigned* y0[1] # asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%bot,<y0=reg128#1%bot # asm 2: vmlal.u32 <r2=q13,<v23=d18,<y0=d0 vmlal.u32 q13,d18,d0 # qhasm: r2[0,1] += v23[2] unsigned* 5y34[2]; r2[2,3] += v23[3] unsigned* 5y34[3] # asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%top,<5y34=reg128#13%top # asm 2: vmlal.u32 <r2=q13,<v23=d19,<5y34=d25 vmlal.u32 q13,d19,d25 # qhasm: r2[0,1] += v4[0] unsigned* 5y34[0]; r2[2,3] += v4[1] unsigned* 5y34[1] # asm 1: vmlal.u32 <r2=reg128#14,<v4=reg128#4%bot,<5y34=reg128#13%bot # asm 2: vmlal.u32 <r2=q13,<v4=d6,<5y34=d24 vmlal.u32 q13,d6,d24 # qhasm: ptr = &two24 # asm 1: lea >ptr=int32#3,<two24=stack128#1 # asm 2: lea >ptr=r2,<two24=[sp,#0] add r2,sp,#0 # qhasm: 2x t1 = r0 unsigned>> 26 # asm 1: vshr.u64 >t1=reg128#4,<r0=reg128#8,#26 # asm 2: vshr.u64 >t1=q3,<r0=q7,#26 vshr.u64 q3,q7,#26 # qhasm: len -= 64 # asm 1: sub >len=int32#4,<len=int32#4,#64 # asm 2: sub >len=r3,<len=r3,#64 sub r3,r3,#64 # qhasm: r0 &= mask # asm 1: vand >r0=reg128#6,<r0=reg128#8,<mask=reg128#7 # asm 2: vand >r0=q5,<r0=q7,<mask=q6 vand q5,q7,q6 # qhasm: 2x r1 += t1 # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#15,<t1=reg128#4 # asm 2: vadd.i64 >r1=q3,<r1=q14,<t1=q3 vadd.i64 q3,q14,q3 # qhasm: 2x t4 = r3 unsigned>> 26 # asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#5,#26 # asm 2: vshr.u64 >t4=q7,<r3=q4,#26 vshr.u64 q7,q4,#26 # qhasm: r3 &= mask # asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7 # asm 2: vand >r3=q4,<r3=q4,<mask=q6 vand q4,q4,q6 # qhasm: 2x x4 = r4 + t4 # asm 1: vadd.i64 >x4=reg128#8,<r4=reg128#16,<t4=reg128#8 # asm 2: vadd.i64 >x4=q7,<r4=q15,<t4=q7 vadd.i64 q7,q15,q7 # qhasm: r4 aligned= mem128[ptr] # asm 1: vld1.8 {>r4=reg128#16%bot->r4=reg128#16%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>r4=d30->r4=d31},[<ptr=r2,: 128] vld1.8 {d30-d31},[r2,: 128] # qhasm: 2x t2 = r1 unsigned>> 26 # asm 1: vshr.u64 >t2=reg128#9,<r1=reg128#4,#26 # asm 2: vshr.u64 >t2=q8,<r1=q3,#26 vshr.u64 q8,q3,#26 # qhasm: r1 &= mask # asm 1: vand >r1=reg128#4,<r1=reg128#4,<mask=reg128#7 # asm 2: vand >r1=q3,<r1=q3,<mask=q6 vand q3,q3,q6 # qhasm: 2x t0 = x4 unsigned>> 26 # asm 1: vshr.u64 >t0=reg128#10,<x4=reg128#8,#26 # asm 2: vshr.u64 >t0=q9,<x4=q7,#26 vshr.u64 q9,q7,#26 # qhasm: 2x r2 += t2 # asm 1: vadd.i64 >r2=reg128#9,<r2=reg128#14,<t2=reg128#9 # asm 2: vadd.i64 >r2=q8,<r2=q13,<t2=q8 vadd.i64 q8,q13,q8 # qhasm: x4 &= mask # asm 1: vand >x4=reg128#11,<x4=reg128#8,<mask=reg128#7 # asm 2: vand >x4=q10,<x4=q7,<mask=q6 vand q10,q7,q6 # qhasm: 2x x01 = r0 + t0 # asm 1: vadd.i64 >x01=reg128#6,<r0=reg128#6,<t0=reg128#10 # asm 2: vadd.i64 >x01=q5,<r0=q5,<t0=q9 vadd.i64 q5,q5,q9 # qhasm: r0 aligned= mem128[ptr] # asm 1: vld1.8 {>r0=reg128#8%bot->r0=reg128#8%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>r0=d14->r0=d15},[<ptr=r2,: 128] vld1.8 {d14-d15},[r2,: 128] # qhasm: ptr = &z34_stack # asm 1: lea >ptr=int32#3,<z34_stack=stack128#9 # asm 2: lea >ptr=r2,<z34_stack=[sp,#128] add r2,sp,#128 # qhasm: 2x t0 <<= 2 # asm 1: vshl.i64 >t0=reg128#10,<t0=reg128#10,#2 # asm 2: vshl.i64 >t0=q9,<t0=q9,#2 vshl.i64 q9,q9,#2 # qhasm: 2x t3 = r2 unsigned>> 26 # asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#9,#26 # asm 2: vshr.u64 >t3=q13,<r2=q8,#26 vshr.u64 q13,q8,#26 # qhasm: 2x x01 += t0 # asm 1: vadd.i64 >x01=reg128#15,<x01=reg128#6,<t0=reg128#10 # asm 2: vadd.i64 >x01=q14,<x01=q5,<t0=q9 vadd.i64 q14,q5,q9 # qhasm: z34 aligned= mem128[ptr] # asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>z34=d10->z34=d11},[<ptr=r2,: 128] vld1.8 {d10-d11},[r2,: 128] # qhasm: x23 = r2 & mask # asm 1: vand >x23=reg128#10,<r2=reg128#9,<mask=reg128#7 # asm 2: vand >x23=q9,<r2=q8,<mask=q6 vand q9,q8,q6 # qhasm: 2x r3 += t3 # asm 1: vadd.i64 >r3=reg128#5,<r3=reg128#5,<t3=reg128#14 # asm 2: vadd.i64 >r3=q4,<r3=q4,<t3=q13 vadd.i64 q4,q4,q13 # qhasm: input_2 += 32 # asm 1: add >input_2=int32#2,<input_2=int32#2,#32 # asm 2: add >input_2=r1,<input_2=r1,#32 add r1,r1,#32 # qhasm: 2x t1 = x01 unsigned>> 26 # asm 1: vshr.u64 >t1=reg128#14,<x01=reg128#15,#26 # asm 2: vshr.u64 >t1=q13,<x01=q14,#26 vshr.u64 q13,q14,#26 # qhasm: x23 = x23[0,2,1,3] # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top # asm 2: vtrn.32 <x23=d18,<x23=d19 vtrn.32 d18,d19 # qhasm: x01 = x01 & mask # asm 1: vand >x01=reg128#9,<x01=reg128#15,<mask=reg128#7 # asm 2: vand >x01=q8,<x01=q14,<mask=q6 vand q8,q14,q6 # qhasm: 2x r1 += t1 # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#4,<t1=reg128#14 # asm 2: vadd.i64 >r1=q3,<r1=q3,<t1=q13 vadd.i64 q3,q3,q13 # qhasm: 2x t4 = r3 unsigned>> 26 # asm 1: vshr.u64 >t4=reg128#14,<r3=reg128#5,#26 # asm 2: vshr.u64 >t4=q13,<r3=q4,#26 vshr.u64 q13,q4,#26 # qhasm: x01 = x01[0,2,1,3] # asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top # asm 2: vtrn.32 <x01=d16,<x01=d17 vtrn.32 d16,d17 # qhasm: r3 &= mask # asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7 # asm 2: vand >r3=q4,<r3=q4,<mask=q6 vand q4,q4,q6 # qhasm: r1 = r1[0,2,1,3] # asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top # asm 2: vtrn.32 <r1=d6,<r1=d7 vtrn.32 d6,d7 # qhasm: 2x x4 += t4 # asm 1: vadd.i64 >x4=reg128#11,<x4=reg128#11,<t4=reg128#14 # asm 2: vadd.i64 >x4=q10,<x4=q10,<t4=q13 vadd.i64 q10,q10,q13 # qhasm: r3 = r3[0,2,1,3] # asm 1: vtrn.32 <r3=reg128#5%bot,<r3=reg128#5%top # asm 2: vtrn.32 <r3=d8,<r3=d9 vtrn.32 d8,d9 # qhasm: x01 = x01[0,1] r1[0,1] # asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0 # asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0 vext.32 d17,d6,d6,#0 # qhasm: x23 = x23[0,1] r3[0,1] # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#5%bot,<r3=reg128#5%bot,#0 # asm 2: vext.32 <x23=d19,<r3=d8,<r3=d8,#0 vext.32 d19,d8,d8,#0 # qhasm: x4 = x4[0,2,1,3] # asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top # asm 2: vtrn.32 <x4=d20,<x4=d21 vtrn.32 d20,d21 # qhasm: unsigned>? len - 64 # asm 1: cmp <len=int32#4,#64 # asm 2: cmp <len=r3,#64 cmp r3,#64 # qhasm: goto mainloop2 if unsigned> bhi ._mainloop2 # qhasm: input_2 -= 32 # asm 1: sub >input_2=int32#3,<input_2=int32#2,#32 # asm 2: sub >input_2=r2,<input_2=r1,#32 sub r2,r1,#32 # qhasm: below64bytes: ._below64bytes: # qhasm: unsigned>? len - 32 # asm 1: cmp <len=int32#4,#32 # asm 2: cmp <len=r3,#32 cmp r3,#32 # qhasm: goto end if !unsigned> bls ._end # qhasm: mainloop: ._mainloop: # qhasm: new r0 # qhasm: ptr = &two24 # asm 1: lea >ptr=int32#2,<two24=stack128#1 # asm 2: lea >ptr=r1,<two24=[sp,#0] add r1,sp,#0 # qhasm: r4 aligned= mem128[ptr] # asm 1: vld1.8 {>r4=reg128#5%bot->r4=reg128#5%top},[<ptr=int32#2,: 128] # asm 2: vld1.8 {>r4=d8->r4=d9},[<ptr=r1,: 128] vld1.8 {d8-d9},[r1,: 128] # qhasm: u4 aligned= mem128[ptr] # asm 1: vld1.8 {>u4=reg128#6%bot->u4=reg128#6%top},[<ptr=int32#2,: 128] # asm 2: vld1.8 {>u4=d10->u4=d11},[<ptr=r1,: 128] vld1.8 {d10-d11},[r1,: 128] # qhasm: c01 = mem128[input_2];input_2+=16 # asm 1: vld1.8 {>c01=reg128#8%bot->c01=reg128#8%top},[<input_2=int32#3]! # asm 2: vld1.8 {>c01=d14->c01=d15},[<input_2=r2]! vld1.8 {d14-d15},[r2]! # qhasm: r4[0,1] += x01[0] unsigned* y34[2]; r4[2,3] += x01[1] unsigned* y34[3] # asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%bot,<y34=reg128#3%top # asm 2: vmlal.u32 <r4=q4,<x01=d16,<y34=d5 vmlal.u32 q4,d16,d5 # qhasm: c23 = mem128[input_2];input_2+=16 # asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_2=int32#3]! # asm 2: vld1.8 {>c23=d26->c23=d27},[<input_2=r2]! vld1.8 {d26-d27},[r2]! # qhasm: r4[0,1] += x01[2] unsigned* y34[0]; r4[2,3] += x01[3] unsigned* y34[1] # asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%top,<y34=reg128#3%bot # asm 2: vmlal.u32 <r4=q4,<x01=d17,<y34=d4 vmlal.u32 q4,d17,d4 # qhasm: r0 = u4[1]c01[0]r0[2,3] # asm 1: vext.32 <r0=reg128#4%bot,<u4=reg128#6%bot,<c01=reg128#8%bot,#1 # asm 2: vext.32 <r0=d6,<u4=d10,<c01=d14,#1 vext.32 d6,d10,d14,#1 # qhasm: r4[0,1] += x23[0] unsigned* y12[2]; r4[2,3] += x23[1] unsigned* y12[3] # asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%bot,<y12=reg128#2%top # asm 2: vmlal.u32 <r4=q4,<x23=d18,<y12=d3 vmlal.u32 q4,d18,d3 # qhasm: r0 = r0[0,1]u4[1]c23[0] # asm 1: vext.32 <r0=reg128#4%top,<u4=reg128#6%bot,<c23=reg128#14%bot,#1 # asm 2: vext.32 <r0=d7,<u4=d10,<c23=d26,#1 vext.32 d7,d10,d26,#1 # qhasm: r4[0,1] += x23[2] unsigned* y12[0]; r4[2,3] += x23[3] unsigned* y12[1] # asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%top,<y12=reg128#2%bot # asm 2: vmlal.u32 <r4=q4,<x23=d19,<y12=d2 vmlal.u32 q4,d19,d2 # qhasm: r0 = r0[1]r0[0]r0[3]r0[2] # asm 1: vrev64.i32 >r0=reg128#4,<r0=reg128#4 # asm 2: vrev64.i32 >r0=q3,<r0=q3 vrev64.i32 q3,q3 # qhasm: r4[0,1] += x4[0] unsigned* y0[0]; r4[2,3] += x4[1] unsigned* y0[1] # asm 1: vmlal.u32 <r4=reg128#5,<x4=reg128#11%bot,<y0=reg128#1%bot # asm 2: vmlal.u32 <r4=q4,<x4=d20,<y0=d0 vmlal.u32 q4,d20,d0 # qhasm: r0[0,1] += x4[0] unsigned* 5y12[0]; r0[2,3] += x4[1] unsigned* 5y12[1] # asm 1: vmlal.u32 <r0=reg128#4,<x4=reg128#11%bot,<5y12=reg128#12%bot # asm 2: vmlal.u32 <r0=q3,<x4=d20,<5y12=d22 vmlal.u32 q3,d20,d22 # qhasm: r0[0,1] += x23[0] unsigned* 5y34[0]; r0[2,3] += x23[1] unsigned* 5y34[1] # asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%bot,<5y34=reg128#13%bot # asm 2: vmlal.u32 <r0=q3,<x23=d18,<5y34=d24 vmlal.u32 q3,d18,d24 # qhasm: r0[0,1] += x23[2] unsigned* 5y12[2]; r0[2,3] += x23[3] unsigned* 5y12[3] # asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%top,<5y12=reg128#12%top # asm 2: vmlal.u32 <r0=q3,<x23=d19,<5y12=d23 vmlal.u32 q3,d19,d23 # qhasm: c01 c23 = c01[0]c23[0]c01[2]c23[2]c01[1]c23[1]c01[3]c23[3] # asm 1: vtrn.32 <c01=reg128#8,<c23=reg128#14 # asm 2: vtrn.32 <c01=q7,<c23=q13 vtrn.32 q7,q13 # qhasm: r0[0,1] += x01[0] unsigned* y0[0]; r0[2,3] += x01[1] unsigned* y0[1] # asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%bot,<y0=reg128#1%bot # asm 2: vmlal.u32 <r0=q3,<x01=d16,<y0=d0 vmlal.u32 q3,d16,d0 # qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18 # asm 1: vshll.u32 >r3=reg128#6,<c23=reg128#14%top,#18 # asm 2: vshll.u32 >r3=q5,<c23=d27,#18 vshll.u32 q5,d27,#18 # qhasm: r0[0,1] += x01[2] unsigned* 5y34[2]; r0[2,3] += x01[3] unsigned* 5y34[3] # asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%top,<5y34=reg128#13%top # asm 2: vmlal.u32 <r0=q3,<x01=d17,<5y34=d25 vmlal.u32 q3,d17,d25 # qhasm: r3[0,1] += x01[0] unsigned* y34[0]; r3[2,3] += x01[1] unsigned* y34[1] # asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%bot,<y34=reg128#3%bot # asm 2: vmlal.u32 <r3=q5,<x01=d16,<y34=d4 vmlal.u32 q5,d16,d4 # qhasm: r3[0,1] += x01[2] unsigned* y12[2]; r3[2,3] += x01[3] unsigned* y12[3] # asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%top,<y12=reg128#2%top # asm 2: vmlal.u32 <r3=q5,<x01=d17,<y12=d3 vmlal.u32 q5,d17,d3 # qhasm: r3[0,1] += x23[0] unsigned* y12[0]; r3[2,3] += x23[1] unsigned* y12[1] # asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%bot,<y12=reg128#2%bot # asm 2: vmlal.u32 <r3=q5,<x23=d18,<y12=d2 vmlal.u32 q5,d18,d2 # qhasm: r3[0,1] += x23[2] unsigned* y0[0]; r3[2,3] += x23[3] unsigned* y0[1] # asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%top,<y0=reg128#1%bot # asm 2: vmlal.u32 <r3=q5,<x23=d19,<y0=d0 vmlal.u32 q5,d19,d0 # qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6 # asm 1: vshll.u32 >r1=reg128#14,<c23=reg128#14%bot,#6 # asm 2: vshll.u32 >r1=q13,<c23=d26,#6 vshll.u32 q13,d26,#6 # qhasm: r3[0,1] += x4[0] unsigned* 5y34[2]; r3[2,3] += x4[1] unsigned* 5y34[3] # asm 1: vmlal.u32 <r3=reg128#6,<x4=reg128#11%bot,<5y34=reg128#13%top # asm 2: vmlal.u32 <r3=q5,<x4=d20,<5y34=d25 vmlal.u32 q5,d20,d25 # qhasm: r1[0,1] += x01[0] unsigned* y12[0]; r1[2,3] += x01[1] unsigned* y12[1] # asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%bot,<y12=reg128#2%bot # asm 2: vmlal.u32 <r1=q13,<x01=d16,<y12=d2 vmlal.u32 q13,d16,d2 # qhasm: r1[0,1] += x01[2] unsigned* y0[0]; r1[2,3] += x01[3] unsigned* y0[1] # asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%top,<y0=reg128#1%bot # asm 2: vmlal.u32 <r1=q13,<x01=d17,<y0=d0 vmlal.u32 q13,d17,d0 # qhasm: r1[0,1] += x23[0] unsigned* 5y34[2]; r1[2,3] += x23[1] unsigned* 5y34[3] # asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%bot,<5y34=reg128#13%top # asm 2: vmlal.u32 <r1=q13,<x23=d18,<5y34=d25 vmlal.u32 q13,d18,d25 # qhasm: r1[0,1] += x23[2] unsigned* 5y34[0]; r1[2,3] += x23[3] unsigned* 5y34[1] # asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%top,<5y34=reg128#13%bot # asm 2: vmlal.u32 <r1=q13,<x23=d19,<5y34=d24 vmlal.u32 q13,d19,d24 # qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12 # asm 1: vshll.u32 >r2=reg128#8,<c01=reg128#8%top,#12 # asm 2: vshll.u32 >r2=q7,<c01=d15,#12 vshll.u32 q7,d15,#12 # qhasm: r1[0,1] += x4[0] unsigned* 5y12[2]; r1[2,3] += x4[1] unsigned* 5y12[3] # asm 1: vmlal.u32 <r1=reg128#14,<x4=reg128#11%bot,<5y12=reg128#12%top # asm 2: vmlal.u32 <r1=q13,<x4=d20,<5y12=d23 vmlal.u32 q13,d20,d23 # qhasm: r2[0,1] += x01[0] unsigned* y12[2]; r2[2,3] += x01[1] unsigned* y12[3] # asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%bot,<y12=reg128#2%top # asm 2: vmlal.u32 <r2=q7,<x01=d16,<y12=d3 vmlal.u32 q7,d16,d3 # qhasm: r2[0,1] += x01[2] unsigned* y12[0]; r2[2,3] += x01[3] unsigned* y12[1] # asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%top,<y12=reg128#2%bot # asm 2: vmlal.u32 <r2=q7,<x01=d17,<y12=d2 vmlal.u32 q7,d17,d2 # qhasm: r2[0,1] += x23[0] unsigned* y0[0]; r2[2,3] += x23[1] unsigned* y0[1] # asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%bot,<y0=reg128#1%bot # asm 2: vmlal.u32 <r2=q7,<x23=d18,<y0=d0 vmlal.u32 q7,d18,d0 # qhasm: r2[0,1] += x23[2] unsigned* 5y34[2]; r2[2,3] += x23[3] unsigned* 5y34[3] # asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%top,<5y34=reg128#13%top # asm 2: vmlal.u32 <r2=q7,<x23=d19,<5y34=d25 vmlal.u32 q7,d19,d25 # qhasm: r2[0,1] += x4[0] unsigned* 5y34[0]; r2[2,3] += x4[1] unsigned* 5y34[1] # asm 1: vmlal.u32 <r2=reg128#8,<x4=reg128#11%bot,<5y34=reg128#13%bot # asm 2: vmlal.u32 <r2=q7,<x4=d20,<5y34=d24 vmlal.u32 q7,d20,d24 # qhasm: 2x t1 = r0 unsigned>> 26 # asm 1: vshr.u64 >t1=reg128#9,<r0=reg128#4,#26 # asm 2: vshr.u64 >t1=q8,<r0=q3,#26 vshr.u64 q8,q3,#26 # qhasm: r0 &= mask # asm 1: vand >r0=reg128#4,<r0=reg128#4,<mask=reg128#7 # asm 2: vand >r0=q3,<r0=q3,<mask=q6 vand q3,q3,q6 # qhasm: 2x r1 += t1 # asm 1: vadd.i64 >r1=reg128#9,<r1=reg128#14,<t1=reg128#9 # asm 2: vadd.i64 >r1=q8,<r1=q13,<t1=q8 vadd.i64 q8,q13,q8 # qhasm: 2x t4 = r3 unsigned>> 26 # asm 1: vshr.u64 >t4=reg128#10,<r3=reg128#6,#26 # asm 2: vshr.u64 >t4=q9,<r3=q5,#26 vshr.u64 q9,q5,#26 # qhasm: r3 &= mask # asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7 # asm 2: vand >r3=q5,<r3=q5,<mask=q6 vand q5,q5,q6 # qhasm: 2x r4 += t4 # asm 1: vadd.i64 >r4=reg128#5,<r4=reg128#5,<t4=reg128#10 # asm 2: vadd.i64 >r4=q4,<r4=q4,<t4=q9 vadd.i64 q4,q4,q9 # qhasm: 2x t2 = r1 unsigned>> 26 # asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#9,#26 # asm 2: vshr.u64 >t2=q9,<r1=q8,#26 vshr.u64 q9,q8,#26 # qhasm: r1 &= mask # asm 1: vand >r1=reg128#11,<r1=reg128#9,<mask=reg128#7 # asm 2: vand >r1=q10,<r1=q8,<mask=q6 vand q10,q8,q6 # qhasm: 2x t0 = r4 unsigned>> 26 # asm 1: vshr.u64 >t0=reg128#9,<r4=reg128#5,#26 # asm 2: vshr.u64 >t0=q8,<r4=q4,#26 vshr.u64 q8,q4,#26 # qhasm: 2x r2 += t2 # asm 1: vadd.i64 >r2=reg128#8,<r2=reg128#8,<t2=reg128#10 # asm 2: vadd.i64 >r2=q7,<r2=q7,<t2=q9 vadd.i64 q7,q7,q9 # qhasm: r4 &= mask # asm 1: vand >r4=reg128#5,<r4=reg128#5,<mask=reg128#7 # asm 2: vand >r4=q4,<r4=q4,<mask=q6 vand q4,q4,q6 # qhasm: 2x r0 += t0 # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9 # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8 vadd.i64 q3,q3,q8 # qhasm: 2x t0 <<= 2 # asm 1: vshl.i64 >t0=reg128#9,<t0=reg128#9,#2 # asm 2: vshl.i64 >t0=q8,<t0=q8,#2 vshl.i64 q8,q8,#2 # qhasm: 2x t3 = r2 unsigned>> 26 # asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#8,#26 # asm 2: vshr.u64 >t3=q13,<r2=q7,#26 vshr.u64 q13,q7,#26 # qhasm: 2x r0 += t0 # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9 # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8 vadd.i64 q3,q3,q8 # qhasm: x23 = r2 & mask # asm 1: vand >x23=reg128#10,<r2=reg128#8,<mask=reg128#7 # asm 2: vand >x23=q9,<r2=q7,<mask=q6 vand q9,q7,q6 # qhasm: 2x r3 += t3 # asm 1: vadd.i64 >r3=reg128#6,<r3=reg128#6,<t3=reg128#14 # asm 2: vadd.i64 >r3=q5,<r3=q5,<t3=q13 vadd.i64 q5,q5,q13 # qhasm: 2x t1 = r0 unsigned>> 26 # asm 1: vshr.u64 >t1=reg128#8,<r0=reg128#4,#26 # asm 2: vshr.u64 >t1=q7,<r0=q3,#26 vshr.u64 q7,q3,#26 # qhasm: x01 = r0 & mask # asm 1: vand >x01=reg128#9,<r0=reg128#4,<mask=reg128#7 # asm 2: vand >x01=q8,<r0=q3,<mask=q6 vand q8,q3,q6 # qhasm: 2x r1 += t1 # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#11,<t1=reg128#8 # asm 2: vadd.i64 >r1=q3,<r1=q10,<t1=q7 vadd.i64 q3,q10,q7 # qhasm: 2x t4 = r3 unsigned>> 26 # asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#6,#26 # asm 2: vshr.u64 >t4=q7,<r3=q5,#26 vshr.u64 q7,q5,#26 # qhasm: r3 &= mask # asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7 # asm 2: vand >r3=q5,<r3=q5,<mask=q6 vand q5,q5,q6 # qhasm: 2x x4 = r4 + t4 # asm 1: vadd.i64 >x4=reg128#11,<r4=reg128#5,<t4=reg128#8 # asm 2: vadd.i64 >x4=q10,<r4=q4,<t4=q7 vadd.i64 q10,q4,q7 # qhasm: len -= 32 # asm 1: sub >len=int32#4,<len=int32#4,#32 # asm 2: sub >len=r3,<len=r3,#32 sub r3,r3,#32 # qhasm: x01 = x01[0,2,1,3] # asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top # asm 2: vtrn.32 <x01=d16,<x01=d17 vtrn.32 d16,d17 # qhasm: x23 = x23[0,2,1,3] # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top # asm 2: vtrn.32 <x23=d18,<x23=d19 vtrn.32 d18,d19 # qhasm: r1 = r1[0,2,1,3] # asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top # asm 2: vtrn.32 <r1=d6,<r1=d7 vtrn.32 d6,d7 # qhasm: r3 = r3[0,2,1,3] # asm 1: vtrn.32 <r3=reg128#6%bot,<r3=reg128#6%top # asm 2: vtrn.32 <r3=d10,<r3=d11 vtrn.32 d10,d11 # qhasm: x4 = x4[0,2,1,3] # asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top # asm 2: vtrn.32 <x4=d20,<x4=d21 vtrn.32 d20,d21 # qhasm: x01 = x01[0,1] r1[0,1] # asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0 # asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0 vext.32 d17,d6,d6,#0 # qhasm: x23 = x23[0,1] r3[0,1] # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#6%bot,<r3=reg128#6%bot,#0 # asm 2: vext.32 <x23=d19,<r3=d10,<r3=d10,#0 vext.32 d19,d10,d10,#0 # qhasm: unsigned>? len - 32 # asm 1: cmp <len=int32#4,#32 # asm 2: cmp <len=r3,#32 cmp r3,#32 # qhasm: goto mainloop if unsigned> bhi ._mainloop # qhasm: end: ._end: # qhasm: mem128[input_0] = x01;input_0+=16 # asm 1: vst1.8 {<x01=reg128#9%bot-<x01=reg128#9%top},[<input_0=int32#1]! # asm 2: vst1.8 {<x01=d16-<x01=d17},[<input_0=r0]! vst1.8 {d16-d17},[r0]! # qhasm: mem128[input_0] = x23;input_0+=16 # asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1]! # asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0]! vst1.8 {d18-d19},[r0]! # qhasm: mem64[input_0] = x4[0] # asm 1: vst1.8 <x4=reg128#11%bot,[<input_0=int32#1] # asm 2: vst1.8 <x4=d20,[<input_0=r0] vst1.8 d20,[r0] # qhasm: len = len # asm 1: mov >len=int32#1,<len=int32#4 # asm 2: mov >len=r0,<len=r3 mov r0,r3 # qhasm: qpopreturn len mov sp,r12 vpop {q4,q5,q6,q7} bx lr # qhasm: int32 input_0 # qhasm: int32 input_1 # qhasm: int32 input_2 # qhasm: int32 input_3 # qhasm: stack32 input_4 # qhasm: stack32 input_5 # qhasm: stack32 input_6 # qhasm: stack32 input_7 # qhasm: int32 caller_r4 # qhasm: int32 caller_r5 # qhasm: int32 caller_r6 # qhasm: int32 caller_r7 # qhasm: int32 caller_r8 # qhasm: int32 caller_r9 # qhasm: int32 caller_r10 # qhasm: int32 caller_r11 # qhasm: int32 caller_r12 # qhasm: int32 caller_r14 # qhasm: reg128 caller_q4 # qhasm: reg128 caller_q5 # qhasm: reg128 caller_q6 # qhasm: reg128 caller_q7 # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 x01 # qhasm: reg128 x23 # qhasm: reg128 x4 # qhasm: reg128 y01 # qhasm: reg128 y23 # qhasm: reg128 y4 # qhasm: reg128 _5y01 # qhasm: reg128 _5y23 # qhasm: reg128 _5y4 # qhasm: reg128 c01 # qhasm: reg128 c23 # qhasm: reg128 c4 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 t2 # qhasm: reg128 t3 # qhasm: reg128 t4 # qhasm: reg128 mask # qhasm: enter crypto_onetimeauth_poly1305_neon2_addmulmod .align 2 .global openssl_poly1305_neon2_addmulmod .hidden openssl_poly1305_neon2_addmulmod .type openssl_poly1305_neon2_addmulmod STT_FUNC openssl_poly1305_neon2_addmulmod: sub sp,sp,#0 # qhasm: 2x mask = 0xffffffff # asm 1: vmov.i64 >mask=reg128#1,#0xffffffff # asm 2: vmov.i64 >mask=q0,#0xffffffff vmov.i64 q0,#0xffffffff # qhasm: y01 aligned= mem128[input_2];input_2+=16 # asm 1: vld1.8 {>y01=reg128#2%bot->y01=reg128#2%top},[<input_2=int32#3,: 128]! # asm 2: vld1.8 {>y01=d2->y01=d3},[<input_2=r2,: 128]! vld1.8 {d2-d3},[r2,: 128]! # qhasm: 4x _5y01 = y01 << 2 # asm 1: vshl.i32 >_5y01=reg128#3,<y01=reg128#2,#2 # asm 2: vshl.i32 >_5y01=q2,<y01=q1,#2 vshl.i32 q2,q1,#2 # qhasm: y23 aligned= mem128[input_2];input_2+=16 # asm 1: vld1.8 {>y23=reg128#4%bot->y23=reg128#4%top},[<input_2=int32#3,: 128]! # asm 2: vld1.8 {>y23=d6->y23=d7},[<input_2=r2,: 128]! vld1.8 {d6-d7},[r2,: 128]! # qhasm: 4x _5y23 = y23 << 2 # asm 1: vshl.i32 >_5y23=reg128#9,<y23=reg128#4,#2 # asm 2: vshl.i32 >_5y23=q8,<y23=q3,#2 vshl.i32 q8,q3,#2 # qhasm: y4 aligned= mem64[input_2]y4[1] # asm 1: vld1.8 {<y4=reg128#10%bot},[<input_2=int32#3,: 64] # asm 2: vld1.8 {<y4=d18},[<input_2=r2,: 64] vld1.8 {d18},[r2,: 64] # qhasm: 4x _5y4 = y4 << 2 # asm 1: vshl.i32 >_5y4=reg128#11,<y4=reg128#10,#2 # asm 2: vshl.i32 >_5y4=q10,<y4=q9,#2 vshl.i32 q10,q9,#2 # qhasm: x01 aligned= mem128[input_1];input_1+=16 # asm 1: vld1.8 {>x01=reg128#12%bot->x01=reg128#12%top},[<input_1=int32#2,: 128]! # asm 2: vld1.8 {>x01=d22->x01=d23},[<input_1=r1,: 128]! vld1.8 {d22-d23},[r1,: 128]! # qhasm: 4x _5y01 += y01 # asm 1: vadd.i32 >_5y01=reg128#3,<_5y01=reg128#3,<y01=reg128#2 # asm 2: vadd.i32 >_5y01=q2,<_5y01=q2,<y01=q1 vadd.i32 q2,q2,q1 # qhasm: x23 aligned= mem128[input_1];input_1+=16 # asm 1: vld1.8 {>x23=reg128#13%bot->x23=reg128#13%top},[<input_1=int32#2,: 128]! # asm 2: vld1.8 {>x23=d24->x23=d25},[<input_1=r1,: 128]! vld1.8 {d24-d25},[r1,: 128]! # qhasm: 4x _5y23 += y23 # asm 1: vadd.i32 >_5y23=reg128#9,<_5y23=reg128#9,<y23=reg128#4 # asm 2: vadd.i32 >_5y23=q8,<_5y23=q8,<y23=q3 vadd.i32 q8,q8,q3 # qhasm: 4x _5y4 += y4 # asm 1: vadd.i32 >_5y4=reg128#11,<_5y4=reg128#11,<y4=reg128#10 # asm 2: vadd.i32 >_5y4=q10,<_5y4=q10,<y4=q9 vadd.i32 q10,q10,q9 # qhasm: c01 aligned= mem128[input_3];input_3+=16 # asm 1: vld1.8 {>c01=reg128#14%bot->c01=reg128#14%top},[<input_3=int32#4,: 128]! # asm 2: vld1.8 {>c01=d26->c01=d27},[<input_3=r3,: 128]! vld1.8 {d26-d27},[r3,: 128]! # qhasm: 4x x01 += c01 # asm 1: vadd.i32 >x01=reg128#12,<x01=reg128#12,<c01=reg128#14 # asm 2: vadd.i32 >x01=q11,<x01=q11,<c01=q13 vadd.i32 q11,q11,q13 # qhasm: c23 aligned= mem128[input_3];input_3+=16 # asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_3=int32#4,: 128]! # asm 2: vld1.8 {>c23=d26->c23=d27},[<input_3=r3,: 128]! vld1.8 {d26-d27},[r3,: 128]! # qhasm: 4x x23 += c23 # asm 1: vadd.i32 >x23=reg128#13,<x23=reg128#13,<c23=reg128#14 # asm 2: vadd.i32 >x23=q12,<x23=q12,<c23=q13 vadd.i32 q12,q12,q13 # qhasm: x4 aligned= mem64[input_1]x4[1] # asm 1: vld1.8 {<x4=reg128#14%bot},[<input_1=int32#2,: 64] # asm 2: vld1.8 {<x4=d26},[<input_1=r1,: 64] vld1.8 {d26},[r1,: 64] # qhasm: 2x mask unsigned>>=6 # asm 1: vshr.u64 >mask=reg128#1,<mask=reg128#1,#6 # asm 2: vshr.u64 >mask=q0,<mask=q0,#6 vshr.u64 q0,q0,#6 # qhasm: c4 aligned= mem64[input_3]c4[1] # asm 1: vld1.8 {<c4=reg128#15%bot},[<input_3=int32#4,: 64] # asm 2: vld1.8 {<c4=d28},[<input_3=r3,: 64] vld1.8 {d28},[r3,: 64] # qhasm: 4x x4 += c4 # asm 1: vadd.i32 >x4=reg128#14,<x4=reg128#14,<c4=reg128#15 # asm 2: vadd.i32 >x4=q13,<x4=q13,<c4=q14 vadd.i32 q13,q13,q14 # qhasm: r0[0,1] = x01[0] unsigned* y01[0]; r0[2,3] = x01[1] unsigned* y01[1] # asm 1: vmull.u32 >r0=reg128#15,<x01=reg128#12%bot,<y01=reg128#2%bot # asm 2: vmull.u32 >r0=q14,<x01=d22,<y01=d2 vmull.u32 q14,d22,d2 # qhasm: r0[0,1] += x01[2] unsigned* _5y4[0]; r0[2,3] += x01[3] unsigned* _5y4[1] # asm 1: vmlal.u32 <r0=reg128#15,<x01=reg128#12%top,<_5y4=reg128#11%bot # asm 2: vmlal.u32 <r0=q14,<x01=d23,<_5y4=d20 vmlal.u32 q14,d23,d20 # qhasm: r0[0,1] += x23[0] unsigned* _5y23[2]; r0[2,3] += x23[1] unsigned* _5y23[3] # asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%bot,<_5y23=reg128#9%top # asm 2: vmlal.u32 <r0=q14,<x23=d24,<_5y23=d17 vmlal.u32 q14,d24,d17 # qhasm: r0[0,1] += x23[2] unsigned* _5y23[0]; r0[2,3] += x23[3] unsigned* _5y23[1] # asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%top,<_5y23=reg128#9%bot # asm 2: vmlal.u32 <r0=q14,<x23=d25,<_5y23=d16 vmlal.u32 q14,d25,d16 # qhasm: r0[0,1] += x4[0] unsigned* _5y01[2]; r0[2,3] += x4[1] unsigned* _5y01[3] # asm 1: vmlal.u32 <r0=reg128#15,<x4=reg128#14%bot,<_5y01=reg128#3%top # asm 2: vmlal.u32 <r0=q14,<x4=d26,<_5y01=d5 vmlal.u32 q14,d26,d5 # qhasm: r1[0,1] = x01[0] unsigned* y01[2]; r1[2,3] = x01[1] unsigned* y01[3] # asm 1: vmull.u32 >r1=reg128#3,<x01=reg128#12%bot,<y01=reg128#2%top # asm 2: vmull.u32 >r1=q2,<x01=d22,<y01=d3 vmull.u32 q2,d22,d3 # qhasm: r1[0,1] += x01[2] unsigned* y01[0]; r1[2,3] += x01[3] unsigned* y01[1] # asm 1: vmlal.u32 <r1=reg128#3,<x01=reg128#12%top,<y01=reg128#2%bot # asm 2: vmlal.u32 <r1=q2,<x01=d23,<y01=d2 vmlal.u32 q2,d23,d2 # qhasm: r1[0,1] += x23[0] unsigned* _5y4[0]; r1[2,3] += x23[1] unsigned* _5y4[1] # asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%bot,<_5y4=reg128#11%bot # asm 2: vmlal.u32 <r1=q2,<x23=d24,<_5y4=d20 vmlal.u32 q2,d24,d20 # qhasm: r1[0,1] += x23[2] unsigned* _5y23[2]; r1[2,3] += x23[3] unsigned* _5y23[3] # asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%top,<_5y23=reg128#9%top # asm 2: vmlal.u32 <r1=q2,<x23=d25,<_5y23=d17 vmlal.u32 q2,d25,d17 # qhasm: r1[0,1] += x4[0] unsigned* _5y23[0]; r1[2,3] += x4[1] unsigned* _5y23[1] # asm 1: vmlal.u32 <r1=reg128#3,<x4=reg128#14%bot,<_5y23=reg128#9%bot # asm 2: vmlal.u32 <r1=q2,<x4=d26,<_5y23=d16 vmlal.u32 q2,d26,d16 # qhasm: r2[0,1] = x01[0] unsigned* y23[0]; r2[2,3] = x01[1] unsigned* y23[1] # asm 1: vmull.u32 >r2=reg128#16,<x01=reg128#12%bot,<y23=reg128#4%bot # asm 2: vmull.u32 >r2=q15,<x01=d22,<y23=d6 vmull.u32 q15,d22,d6 # qhasm: r2[0,1] += x01[2] unsigned* y01[2]; r2[2,3] += x01[3] unsigned* y01[3] # asm 1: vmlal.u32 <r2=reg128#16,<x01=reg128#12%top,<y01=reg128#2%top # asm 2: vmlal.u32 <r2=q15,<x01=d23,<y01=d3 vmlal.u32 q15,d23,d3 # qhasm: r2[0,1] += x23[0] unsigned* y01[0]; r2[2,3] += x23[1] unsigned* y01[1] # asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%bot,<y01=reg128#2%bot # asm 2: vmlal.u32 <r2=q15,<x23=d24,<y01=d2 vmlal.u32 q15,d24,d2 # qhasm: r2[0,1] += x23[2] unsigned* _5y4[0]; r2[2,3] += x23[3] unsigned* _5y4[1] # asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%top,<_5y4=reg128#11%bot # asm 2: vmlal.u32 <r2=q15,<x23=d25,<_5y4=d20 vmlal.u32 q15,d25,d20 # qhasm: r2[0,1] += x4[0] unsigned* _5y23[2]; r2[2,3] += x4[1] unsigned* _5y23[3] # asm 1: vmlal.u32 <r2=reg128#16,<x4=reg128#14%bot,<_5y23=reg128#9%top # asm 2: vmlal.u32 <r2=q15,<x4=d26,<_5y23=d17 vmlal.u32 q15,d26,d17 # qhasm: r3[0,1] = x01[0] unsigned* y23[2]; r3[2,3] = x01[1] unsigned* y23[3] # asm 1: vmull.u32 >r3=reg128#9,<x01=reg128#12%bot,<y23=reg128#4%top # asm 2: vmull.u32 >r3=q8,<x01=d22,<y23=d7 vmull.u32 q8,d22,d7 # qhasm: r3[0,1] += x01[2] unsigned* y23[0]; r3[2,3] += x01[3] unsigned* y23[1] # asm 1: vmlal.u32 <r3=reg128#9,<x01=reg128#12%top,<y23=reg128#4%bot # asm 2: vmlal.u32 <r3=q8,<x01=d23,<y23=d6 vmlal.u32 q8,d23,d6 # qhasm: r3[0,1] += x23[0] unsigned* y01[2]; r3[2,3] += x23[1] unsigned* y01[3] # asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%bot,<y01=reg128#2%top # asm 2: vmlal.u32 <r3=q8,<x23=d24,<y01=d3 vmlal.u32 q8,d24,d3 # qhasm: r3[0,1] += x23[2] unsigned* y01[0]; r3[2,3] += x23[3] unsigned* y01[1] # asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%top,<y01=reg128#2%bot # asm 2: vmlal.u32 <r3=q8,<x23=d25,<y01=d2 vmlal.u32 q8,d25,d2 # qhasm: r3[0,1] += x4[0] unsigned* _5y4[0]; r3[2,3] += x4[1] unsigned* _5y4[1] # asm 1: vmlal.u32 <r3=reg128#9,<x4=reg128#14%bot,<_5y4=reg128#11%bot # asm 2: vmlal.u32 <r3=q8,<x4=d26,<_5y4=d20 vmlal.u32 q8,d26,d20 # qhasm: r4[0,1] = x01[0] unsigned* y4[0]; r4[2,3] = x01[1] unsigned* y4[1] # asm 1: vmull.u32 >r4=reg128#10,<x01=reg128#12%bot,<y4=reg128#10%bot # asm 2: vmull.u32 >r4=q9,<x01=d22,<y4=d18 vmull.u32 q9,d22,d18 # qhasm: r4[0,1] += x01[2] unsigned* y23[2]; r4[2,3] += x01[3] unsigned* y23[3] # asm 1: vmlal.u32 <r4=reg128#10,<x01=reg128#12%top,<y23=reg128#4%top # asm 2: vmlal.u32 <r4=q9,<x01=d23,<y23=d7 vmlal.u32 q9,d23,d7 # qhasm: r4[0,1] += x23[0] unsigned* y23[0]; r4[2,3] += x23[1] unsigned* y23[1] # asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%bot,<y23=reg128#4%bot # asm 2: vmlal.u32 <r4=q9,<x23=d24,<y23=d6 vmlal.u32 q9,d24,d6 # qhasm: r4[0,1] += x23[2] unsigned* y01[2]; r4[2,3] += x23[3] unsigned* y01[3] # asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%top,<y01=reg128#2%top # asm 2: vmlal.u32 <r4=q9,<x23=d25,<y01=d3 vmlal.u32 q9,d25,d3 # qhasm: r4[0,1] += x4[0] unsigned* y01[0]; r4[2,3] += x4[1] unsigned* y01[1] # asm 1: vmlal.u32 <r4=reg128#10,<x4=reg128#14%bot,<y01=reg128#2%bot # asm 2: vmlal.u32 <r4=q9,<x4=d26,<y01=d2 vmlal.u32 q9,d26,d2 # qhasm: 2x t1 = r0 unsigned>> 26 # asm 1: vshr.u64 >t1=reg128#2,<r0=reg128#15,#26 # asm 2: vshr.u64 >t1=q1,<r0=q14,#26 vshr.u64 q1,q14,#26 # qhasm: r0 &= mask # asm 1: vand >r0=reg128#4,<r0=reg128#15,<mask=reg128#1 # asm 2: vand >r0=q3,<r0=q14,<mask=q0 vand q3,q14,q0 # qhasm: 2x r1 += t1 # asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#3,<t1=reg128#2 # asm 2: vadd.i64 >r1=q1,<r1=q2,<t1=q1 vadd.i64 q1,q2,q1 # qhasm: 2x t4 = r3 unsigned>> 26 # asm 1: vshr.u64 >t4=reg128#3,<r3=reg128#9,#26 # asm 2: vshr.u64 >t4=q2,<r3=q8,#26 vshr.u64 q2,q8,#26 # qhasm: r3 &= mask # asm 1: vand >r3=reg128#9,<r3=reg128#9,<mask=reg128#1 # asm 2: vand >r3=q8,<r3=q8,<mask=q0 vand q8,q8,q0 # qhasm: 2x r4 += t4 # asm 1: vadd.i64 >r4=reg128#3,<r4=reg128#10,<t4=reg128#3 # asm 2: vadd.i64 >r4=q2,<r4=q9,<t4=q2 vadd.i64 q2,q9,q2 # qhasm: 2x t2 = r1 unsigned>> 26 # asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#2,#26 # asm 2: vshr.u64 >t2=q9,<r1=q1,#26 vshr.u64 q9,q1,#26 # qhasm: r1 &= mask # asm 1: vand >r1=reg128#2,<r1=reg128#2,<mask=reg128#1 # asm 2: vand >r1=q1,<r1=q1,<mask=q0 vand q1,q1,q0 # qhasm: 2x t0 = r4 unsigned>> 26 # asm 1: vshr.u64 >t0=reg128#11,<r4=reg128#3,#26 # asm 2: vshr.u64 >t0=q10,<r4=q2,#26 vshr.u64 q10,q2,#26 # qhasm: 2x r2 += t2 # asm 1: vadd.i64 >r2=reg128#10,<r2=reg128#16,<t2=reg128#10 # asm 2: vadd.i64 >r2=q9,<r2=q15,<t2=q9 vadd.i64 q9,q15,q9 # qhasm: r4 &= mask # asm 1: vand >r4=reg128#3,<r4=reg128#3,<mask=reg128#1 # asm 2: vand >r4=q2,<r4=q2,<mask=q0 vand q2,q2,q0 # qhasm: 2x r0 += t0 # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11 # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10 vadd.i64 q3,q3,q10 # qhasm: 2x t0 <<= 2 # asm 1: vshl.i64 >t0=reg128#11,<t0=reg128#11,#2 # asm 2: vshl.i64 >t0=q10,<t0=q10,#2 vshl.i64 q10,q10,#2 # qhasm: 2x t3 = r2 unsigned>> 26 # asm 1: vshr.u64 >t3=reg128#12,<r2=reg128#10,#26 # asm 2: vshr.u64 >t3=q11,<r2=q9,#26 vshr.u64 q11,q9,#26 # qhasm: 2x r0 += t0 # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11 # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10 vadd.i64 q3,q3,q10 # qhasm: x23 = r2 & mask # asm 1: vand >x23=reg128#10,<r2=reg128#10,<mask=reg128#1 # asm 2: vand >x23=q9,<r2=q9,<mask=q0 vand q9,q9,q0 # qhasm: 2x r3 += t3 # asm 1: vadd.i64 >r3=reg128#9,<r3=reg128#9,<t3=reg128#12 # asm 2: vadd.i64 >r3=q8,<r3=q8,<t3=q11 vadd.i64 q8,q8,q11 # qhasm: 2x t1 = r0 unsigned>> 26 # asm 1: vshr.u64 >t1=reg128#11,<r0=reg128#4,#26 # asm 2: vshr.u64 >t1=q10,<r0=q3,#26 vshr.u64 q10,q3,#26 # qhasm: x23 = x23[0,2,1,3] # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top # asm 2: vtrn.32 <x23=d18,<x23=d19 vtrn.32 d18,d19 # qhasm: x01 = r0 & mask # asm 1: vand >x01=reg128#4,<r0=reg128#4,<mask=reg128#1 # asm 2: vand >x01=q3,<r0=q3,<mask=q0 vand q3,q3,q0 # qhasm: 2x r1 += t1 # asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#2,<t1=reg128#11 # asm 2: vadd.i64 >r1=q1,<r1=q1,<t1=q10 vadd.i64 q1,q1,q10 # qhasm: 2x t4 = r3 unsigned>> 26 # asm 1: vshr.u64 >t4=reg128#11,<r3=reg128#9,#26 # asm 2: vshr.u64 >t4=q10,<r3=q8,#26 vshr.u64 q10,q8,#26 # qhasm: x01 = x01[0,2,1,3] # asm 1: vtrn.32 <x01=reg128#4%bot,<x01=reg128#4%top # asm 2: vtrn.32 <x01=d6,<x01=d7 vtrn.32 d6,d7 # qhasm: r3 &= mask # asm 1: vand >r3=reg128#1,<r3=reg128#9,<mask=reg128#1 # asm 2: vand >r3=q0,<r3=q8,<mask=q0 vand q0,q8,q0 # qhasm: r1 = r1[0,2,1,3] # asm 1: vtrn.32 <r1=reg128#2%bot,<r1=reg128#2%top # asm 2: vtrn.32 <r1=d2,<r1=d3 vtrn.32 d2,d3 # qhasm: 2x x4 = r4 + t4 # asm 1: vadd.i64 >x4=reg128#3,<r4=reg128#3,<t4=reg128#11 # asm 2: vadd.i64 >x4=q2,<r4=q2,<t4=q10 vadd.i64 q2,q2,q10 # qhasm: r3 = r3[0,2,1,3] # asm 1: vtrn.32 <r3=reg128#1%bot,<r3=reg128#1%top # asm 2: vtrn.32 <r3=d0,<r3=d1 vtrn.32 d0,d1 # qhasm: x01 = x01[0,1] r1[0,1] # asm 1: vext.32 <x01=reg128#4%top,<r1=reg128#2%bot,<r1=reg128#2%bot,#0 # asm 2: vext.32 <x01=d7,<r1=d2,<r1=d2,#0 vext.32 d7,d2,d2,#0 # qhasm: x23 = x23[0,1] r3[0,1] # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#1%bot,<r3=reg128#1%bot,#0 # asm 2: vext.32 <x23=d19,<r3=d0,<r3=d0,#0 vext.32 d19,d0,d0,#0 # qhasm: x4 = x4[0,2,1,3] # asm 1: vtrn.32 <x4=reg128#3%bot,<x4=reg128#3%top # asm 2: vtrn.32 <x4=d4,<x4=d5 vtrn.32 d4,d5 # qhasm: mem128[input_0] aligned= x01;input_0+=16 # asm 1: vst1.8 {<x01=reg128#4%bot-<x01=reg128#4%top},[<input_0=int32#1,: 128]! # asm 2: vst1.8 {<x01=d6-<x01=d7},[<input_0=r0,: 128]! vst1.8 {d6-d7},[r0,: 128]! # qhasm: mem128[input_0] aligned= x23;input_0+=16 # asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1,: 128]! # asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0,: 128]! vst1.8 {d18-d19},[r0,: 128]! # qhasm: mem64[input_0] aligned= x4[0] # asm 1: vst1.8 <x4=reg128#3%bot,[<input_0=int32#1,: 64] # asm 2: vst1.8 <x4=d4,[<input_0=r0,: 64] vst1.8 d4,[r0,: 64] # qhasm: return add sp,sp,#0 bx lr #endif /* !OPENSSL_NO_ASM && OPENSSL_ARM && __ELF__ */
wlsfx/bnbb
6,052
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/x86_64/src/nttunpack.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /* References * ========== * * - [REF_AVX2] * CRYSTALS-Kyber optimized AVX2 implementation * Bos, Ducas, Kiltz, Lepoint, Lyubashevsky, Schanck, Schwabe, Seiler, Stehlé * https://github.com/pq-crystals/kyber/tree/main/avx2 */ /* * This file is derived from the public domain * AVX2 Kyber implementation @[REF_AVX2]. */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/x86_64/src/nttunpack.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_nttunpack_avx2) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_nttunpack_avx2) S2N_BN_SYMBOL(mlkem_nttunpack_avx2): .cfi_startproc callq Lnttunpack_avx2_core addq $0x100, %rdi # imm = 0x100 callq Lnttunpack_avx2_core retq .cfi_endproc Lnttunpack_avx2_core: .cfi_startproc vmovdqa (%rdi), %ymm4 vmovdqa 0x20(%rdi), %ymm5 vmovdqa 0x40(%rdi), %ymm6 vmovdqa 0x60(%rdi), %ymm7 vmovdqa 0x80(%rdi), %ymm8 vmovdqa 0xa0(%rdi), %ymm9 vmovdqa 0xc0(%rdi), %ymm10 vmovdqa 0xe0(%rdi), %ymm11 vperm2i128 $0x20, %ymm8, %ymm4, %ymm3 # ymm3 = ymm4[0,1],ymm8[0,1] vperm2i128 $0x31, %ymm8, %ymm4, %ymm8 # ymm8 = ymm4[2,3],ymm8[2,3] vperm2i128 $0x20, %ymm9, %ymm5, %ymm4 # ymm4 = ymm5[0,1],ymm9[0,1] vperm2i128 $0x31, %ymm9, %ymm5, %ymm9 # ymm9 = ymm5[2,3],ymm9[2,3] vperm2i128 $0x20, %ymm10, %ymm6, %ymm5 # ymm5 = ymm6[0,1],ymm10[0,1] vperm2i128 $0x31, %ymm10, %ymm6, %ymm10 # ymm10 = ymm6[2,3],ymm10[2,3] vperm2i128 $0x20, %ymm11, %ymm7, %ymm6 # ymm6 = ymm7[0,1],ymm11[0,1] vperm2i128 $0x31, %ymm11, %ymm7, %ymm11 # ymm11 = ymm7[2,3],ymm11[2,3] vpunpcklqdq %ymm5, %ymm3, %ymm7 # ymm7 = ymm3[0],ymm5[0],ymm3[2],ymm5[2] vpunpckhqdq %ymm5, %ymm3, %ymm5 # ymm5 = ymm3[1],ymm5[1],ymm3[3],ymm5[3] vpunpcklqdq %ymm10, %ymm8, %ymm3 # ymm3 = ymm8[0],ymm10[0],ymm8[2],ymm10[2] vpunpckhqdq %ymm10, %ymm8, %ymm10 # ymm10 = ymm8[1],ymm10[1],ymm8[3],ymm10[3] vpunpcklqdq %ymm6, %ymm4, %ymm8 # ymm8 = ymm4[0],ymm6[0],ymm4[2],ymm6[2] vpunpckhqdq %ymm6, %ymm4, %ymm6 # ymm6 = ymm4[1],ymm6[1],ymm4[3],ymm6[3] vpunpcklqdq %ymm11, %ymm9, %ymm4 # ymm4 = ymm9[0],ymm11[0],ymm9[2],ymm11[2] vpunpckhqdq %ymm11, %ymm9, %ymm11 # ymm11 = ymm9[1],ymm11[1],ymm9[3],ymm11[3] vmovsldup %ymm8, %ymm9 # ymm9 = ymm8[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm9, %ymm7, %ymm9 # ymm9 = ymm7[0],ymm9[1],ymm7[2],ymm9[3],ymm7[4],ymm9[5],ymm7[6],ymm9[7] vpsrlq $0x20, %ymm7, %ymm7 vpblendd $0xaa, %ymm8, %ymm7, %ymm8 # ymm8 = ymm7[0],ymm8[1],ymm7[2],ymm8[3],ymm7[4],ymm8[5],ymm7[6],ymm8[7] vmovsldup %ymm6, %ymm7 # ymm7 = ymm6[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm7, %ymm5, %ymm7 # ymm7 = ymm5[0],ymm7[1],ymm5[2],ymm7[3],ymm5[4],ymm7[5],ymm5[6],ymm7[7] vpsrlq $0x20, %ymm5, %ymm5 vpblendd $0xaa, %ymm6, %ymm5, %ymm6 # ymm6 = ymm5[0],ymm6[1],ymm5[2],ymm6[3],ymm5[4],ymm6[5],ymm5[6],ymm6[7] vmovsldup %ymm4, %ymm5 # ymm5 = ymm4[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm5, %ymm3, %ymm5 # ymm5 = ymm3[0],ymm5[1],ymm3[2],ymm5[3],ymm3[4],ymm5[5],ymm3[6],ymm5[7] vpsrlq $0x20, %ymm3, %ymm3 vpblendd $0xaa, %ymm4, %ymm3, %ymm4 # ymm4 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7] vmovsldup %ymm11, %ymm3 # ymm3 = ymm11[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm3, %ymm10, %ymm3 # ymm3 = ymm10[0],ymm3[1],ymm10[2],ymm3[3],ymm10[4],ymm3[5],ymm10[6],ymm3[7] vpsrlq $0x20, %ymm10, %ymm10 vpblendd $0xaa, %ymm11, %ymm10, %ymm11 # ymm11 = ymm10[0],ymm11[1],ymm10[2],ymm11[3],ymm10[4],ymm11[5],ymm10[6],ymm11[7] vpslld $0x10, %ymm5, %ymm10 vpblendw $0xaa, %ymm10, %ymm9, %ymm10 # ymm10 = ymm9[0],ymm10[1],ymm9[2],ymm10[3],ymm9[4],ymm10[5],ymm9[6],ymm10[7],ymm9[8],ymm10[9],ymm9[10],ymm10[11],ymm9[12],ymm10[13],ymm9[14],ymm10[15] vpsrld $0x10, %ymm9, %ymm9 vpblendw $0xaa, %ymm5, %ymm9, %ymm5 # ymm5 = ymm9[0],ymm5[1],ymm9[2],ymm5[3],ymm9[4],ymm5[5],ymm9[6],ymm5[7],ymm9[8],ymm5[9],ymm9[10],ymm5[11],ymm9[12],ymm5[13],ymm9[14],ymm5[15] vpslld $0x10, %ymm4, %ymm9 vpblendw $0xaa, %ymm9, %ymm8, %ymm9 # ymm9 = ymm8[0],ymm9[1],ymm8[2],ymm9[3],ymm8[4],ymm9[5],ymm8[6],ymm9[7],ymm8[8],ymm9[9],ymm8[10],ymm9[11],ymm8[12],ymm9[13],ymm8[14],ymm9[15] vpsrld $0x10, %ymm8, %ymm8 vpblendw $0xaa, %ymm4, %ymm8, %ymm4 # ymm4 = ymm8[0],ymm4[1],ymm8[2],ymm4[3],ymm8[4],ymm4[5],ymm8[6],ymm4[7],ymm8[8],ymm4[9],ymm8[10],ymm4[11],ymm8[12],ymm4[13],ymm8[14],ymm4[15] vpslld $0x10, %ymm3, %ymm8 vpblendw $0xaa, %ymm8, %ymm7, %ymm8 # ymm8 = ymm7[0],ymm8[1],ymm7[2],ymm8[3],ymm7[4],ymm8[5],ymm7[6],ymm8[7],ymm7[8],ymm8[9],ymm7[10],ymm8[11],ymm7[12],ymm8[13],ymm7[14],ymm8[15] vpsrld $0x10, %ymm7, %ymm7 vpblendw $0xaa, %ymm3, %ymm7, %ymm3 # ymm3 = ymm7[0],ymm3[1],ymm7[2],ymm3[3],ymm7[4],ymm3[5],ymm7[6],ymm3[7],ymm7[8],ymm3[9],ymm7[10],ymm3[11],ymm7[12],ymm3[13],ymm7[14],ymm3[15] vpslld $0x10, %ymm11, %ymm7 vpblendw $0xaa, %ymm7, %ymm6, %ymm7 # ymm7 = ymm6[0],ymm7[1],ymm6[2],ymm7[3],ymm6[4],ymm7[5],ymm6[6],ymm7[7],ymm6[8],ymm7[9],ymm6[10],ymm7[11],ymm6[12],ymm7[13],ymm6[14],ymm7[15] vpsrld $0x10, %ymm6, %ymm6 vpblendw $0xaa, %ymm11, %ymm6, %ymm11 # ymm11 = ymm6[0],ymm11[1],ymm6[2],ymm11[3],ymm6[4],ymm11[5],ymm6[6],ymm11[7],ymm6[8],ymm11[9],ymm6[10],ymm11[11],ymm6[12],ymm11[13],ymm6[14],ymm11[15] vmovdqa %ymm10, (%rdi) vmovdqa %ymm5, 0x20(%rdi) vmovdqa %ymm9, 0x40(%rdi) vmovdqa %ymm4, 0x60(%rdi) vmovdqa %ymm8, 0x80(%rdi) vmovdqa %ymm3, 0xa0(%rdi) vmovdqa %ymm7, 0xc0(%rdi) vmovdqa %ymm11, 0xe0(%rdi) retq .cfi_endproc
wlsfx/bnbb
4,141
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/x86_64/src/reduce.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /* References * ========== * * - [REF_AVX2] * CRYSTALS-Kyber optimized AVX2 implementation * Bos, Ducas, Kiltz, Lepoint, Lyubashevsky, Schanck, Schwabe, Seiler, Stehlé * https://github.com/pq-crystals/kyber/tree/main/avx2 */ /* * This file is derived from the public domain * AVX2 Kyber implementation @[REF_AVX2]. * * Changes: * - Add call to csub in reduce128_avx to produce outputs * in [0,1,...,q-1] rather than [0,1,...,q], matching the * semantics of mlk_poly_reduce(). */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/x86_64/src/reduce.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_reduce_avx2) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_reduce_avx2) S2N_BN_SYMBOL(mlkem_reduce_avx2): .cfi_startproc vmovdqa (%rsi), %ymm0 vmovdqa 0x40(%rsi), %ymm1 callq Lreduce_avx2_core addq $0x100, %rdi # imm = 0x100 callq Lreduce_avx2_core retq .cfi_endproc Lreduce_avx2_core: .cfi_startproc vmovdqa (%rdi), %ymm2 vmovdqa 0x20(%rdi), %ymm3 vmovdqa 0x40(%rdi), %ymm4 vmovdqa 0x60(%rdi), %ymm5 vmovdqa 0x80(%rdi), %ymm6 vmovdqa 0xa0(%rdi), %ymm7 vmovdqa 0xc0(%rdi), %ymm8 vmovdqa 0xe0(%rdi), %ymm9 vpmulhw %ymm1, %ymm2, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm2, %ymm2 vpmulhw %ymm1, %ymm3, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm3, %ymm3 vpmulhw %ymm1, %ymm4, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm4, %ymm4 vpmulhw %ymm1, %ymm5, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm5, %ymm5 vpmulhw %ymm1, %ymm6, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm6, %ymm6 vpmulhw %ymm1, %ymm7, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm7, %ymm7 vpmulhw %ymm1, %ymm8, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm8, %ymm8 vpmulhw %ymm1, %ymm9, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm9, %ymm9 vpsubw %ymm0, %ymm2, %ymm2 vpsraw $0xf, %ymm2, %ymm12 vpand %ymm0, %ymm12, %ymm12 vpaddw %ymm12, %ymm2, %ymm2 vpsubw %ymm0, %ymm3, %ymm3 vpsraw $0xf, %ymm3, %ymm12 vpand %ymm0, %ymm12, %ymm12 vpaddw %ymm12, %ymm3, %ymm3 vpsubw %ymm0, %ymm4, %ymm4 vpsraw $0xf, %ymm4, %ymm12 vpand %ymm0, %ymm12, %ymm12 vpaddw %ymm12, %ymm4, %ymm4 vpsubw %ymm0, %ymm5, %ymm5 vpsraw $0xf, %ymm5, %ymm12 vpand %ymm0, %ymm12, %ymm12 vpaddw %ymm12, %ymm5, %ymm5 vpsubw %ymm0, %ymm6, %ymm6 vpsraw $0xf, %ymm6, %ymm12 vpand %ymm0, %ymm12, %ymm12 vpaddw %ymm12, %ymm6, %ymm6 vpsubw %ymm0, %ymm7, %ymm7 vpsraw $0xf, %ymm7, %ymm12 vpand %ymm0, %ymm12, %ymm12 vpaddw %ymm12, %ymm7, %ymm7 vpsubw %ymm0, %ymm8, %ymm8 vpsraw $0xf, %ymm8, %ymm12 vpand %ymm0, %ymm12, %ymm12 vpaddw %ymm12, %ymm8, %ymm8 vpsubw %ymm0, %ymm9, %ymm9 vpsraw $0xf, %ymm9, %ymm12 vpand %ymm0, %ymm12, %ymm12 vpaddw %ymm12, %ymm9, %ymm9 vmovdqa %ymm2, (%rdi) vmovdqa %ymm3, 0x20(%rdi) vmovdqa %ymm4, 0x40(%rdi) vmovdqa %ymm5, 0x60(%rdi) vmovdqa %ymm6, 0x80(%rdi) vmovdqa %ymm7, 0xa0(%rdi) vmovdqa %ymm8, 0xc0(%rdi) vmovdqa %ymm9, 0xe0(%rdi) retq .cfi_endproc
wlsfx/bnbb
3,018
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/x86_64/src/tomont.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /* References * ========== * * - [REF_AVX2] * CRYSTALS-Kyber optimized AVX2 implementation * Bos, Ducas, Kiltz, Lepoint, Lyubashevsky, Schanck, Schwabe, Seiler, Stehlé * https://github.com/pq-crystals/kyber/tree/main/avx2 */ /* * Implementation from Kyber reference repository @[REF_AVX2] * * Changes: * - Add call to csub in reduce128_avx to produce outputs * in [0,1,...,q-1] rather than [0,1,...,q], matching the * semantics of mlk_poly_reduce(). */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/x86_64/src/tomont.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_tomont_avx2) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_tomont_avx2) S2N_BN_SYMBOL(mlkem_tomont_avx2): .cfi_startproc vmovdqa (%rsi), %ymm0 vmovdqa 0xa0(%rsi), %ymm1 vmovdqa 0xc0(%rsi), %ymm2 callq Ltomont_avx2_core addq $0x100, %rdi # imm = 0x100 callq Ltomont_avx2_core retq .cfi_endproc Ltomont_avx2_core: .cfi_startproc vmovdqa (%rdi), %ymm3 vmovdqa 0x20(%rdi), %ymm4 vmovdqa 0x40(%rdi), %ymm5 vmovdqa 0x60(%rdi), %ymm6 vmovdqa 0x80(%rdi), %ymm7 vmovdqa 0xa0(%rdi), %ymm8 vmovdqa 0xc0(%rdi), %ymm9 vmovdqa 0xe0(%rdi), %ymm10 vpmullw %ymm1, %ymm3, %ymm11 vpmulhw %ymm2, %ymm3, %ymm3 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm11, %ymm3, %ymm3 vpmullw %ymm1, %ymm4, %ymm12 vpmulhw %ymm2, %ymm4, %ymm4 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm4, %ymm4 vpmullw %ymm1, %ymm5, %ymm13 vpmulhw %ymm2, %ymm5, %ymm5 vpmulhw %ymm0, %ymm13, %ymm13 vpsubw %ymm13, %ymm5, %ymm5 vpmullw %ymm1, %ymm6, %ymm14 vpmulhw %ymm2, %ymm6, %ymm6 vpmulhw %ymm0, %ymm14, %ymm14 vpsubw %ymm14, %ymm6, %ymm6 vpmullw %ymm1, %ymm7, %ymm15 vpmulhw %ymm2, %ymm7, %ymm7 vpmulhw %ymm0, %ymm15, %ymm15 vpsubw %ymm15, %ymm7, %ymm7 vpmullw %ymm1, %ymm8, %ymm11 vpmulhw %ymm2, %ymm8, %ymm8 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm11, %ymm8, %ymm8 vpmullw %ymm1, %ymm9, %ymm12 vpmulhw %ymm2, %ymm9, %ymm9 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm9, %ymm9 vpmullw %ymm1, %ymm10, %ymm13 vpmulhw %ymm2, %ymm10, %ymm10 vpmulhw %ymm0, %ymm13, %ymm13 vpsubw %ymm13, %ymm10, %ymm10 vmovdqa %ymm3, (%rdi) vmovdqa %ymm4, 0x20(%rdi) vmovdqa %ymm5, 0x40(%rdi) vmovdqa %ymm6, 0x60(%rdi) vmovdqa %ymm7, 0x80(%rdi) vmovdqa %ymm8, 0xa0(%rdi) vmovdqa %ymm9, 0xc0(%rdi) vmovdqa %ymm10, 0xe0(%rdi) retq .cfi_endproc
wlsfx/bnbb
2,591
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/x86_64/src/mulcache_compute.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/x86_64/src/mulcache_compute.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_poly_mulcache_compute_avx2) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_poly_mulcache_compute_avx2) S2N_BN_SYMBOL(mlkem_poly_mulcache_compute_avx2): .cfi_startproc vmovdqa (%rdx), %ymm0 vmovdqa 0x20(%rsi), %ymm2 vmovdqa 0x60(%rsi), %ymm3 vmovdqa 0x500(%rdx), %ymm4 vmovdqa 0x580(%rdx), %ymm1 vpmullw %ymm2, %ymm1, %ymm5 vpmullw %ymm3, %ymm1, %ymm6 vpmulhw %ymm2, %ymm4, %ymm7 vpmulhw %ymm3, %ymm4, %ymm8 vpmulhw %ymm5, %ymm0, %ymm9 vpmulhw %ymm6, %ymm0, %ymm10 vpsubw %ymm9, %ymm7, %ymm7 vpsubw %ymm10, %ymm8, %ymm8 vmovdqa %ymm7, (%rdi) vmovdqa %ymm8, 0x20(%rdi) vmovdqa 0xa0(%rsi), %ymm2 vmovdqa 0xe0(%rsi), %ymm3 vmovdqa 0x520(%rdx), %ymm4 vmovdqa 0x5a0(%rdx), %ymm1 vpmullw %ymm2, %ymm1, %ymm5 vpmullw %ymm3, %ymm1, %ymm6 vpmulhw %ymm2, %ymm4, %ymm7 vpmulhw %ymm3, %ymm4, %ymm8 vpmulhw %ymm5, %ymm0, %ymm9 vpmulhw %ymm6, %ymm0, %ymm10 vpsubw %ymm9, %ymm7, %ymm7 vpsubw %ymm10, %ymm8, %ymm8 vmovdqa %ymm7, 0x40(%rdi) vmovdqa %ymm8, 0x60(%rdi) vmovdqa 0x120(%rsi), %ymm2 vmovdqa 0x160(%rsi), %ymm3 vmovdqa 0x540(%rdx), %ymm4 vmovdqa 0x5c0(%rdx), %ymm1 vpmullw %ymm2, %ymm1, %ymm5 vpmullw %ymm3, %ymm1, %ymm6 vpmulhw %ymm2, %ymm4, %ymm7 vpmulhw %ymm3, %ymm4, %ymm8 vpmulhw %ymm5, %ymm0, %ymm9 vpmulhw %ymm6, %ymm0, %ymm10 vpsubw %ymm9, %ymm7, %ymm7 vpsubw %ymm10, %ymm8, %ymm8 vmovdqa %ymm7, 0x80(%rdi) vmovdqa %ymm8, 0xa0(%rdi) vmovdqa 0x1a0(%rsi), %ymm2 vmovdqa 0x1e0(%rsi), %ymm3 vmovdqa 0x560(%rdx), %ymm4 vmovdqa 0x5e0(%rdx), %ymm1 vpmullw %ymm2, %ymm1, %ymm5 vpmullw %ymm3, %ymm1, %ymm6 vpmulhw %ymm2, %ymm4, %ymm7 vpmulhw %ymm3, %ymm4, %ymm8 vpmulhw %ymm5, %ymm0, %ymm9 vpmulhw %ymm6, %ymm0, %ymm10 vpsubw %ymm9, %ymm7, %ymm7 vpsubw %ymm10, %ymm8, %ymm8 vmovdqa %ymm7, 0xc0(%rdi) vmovdqa %ymm8, 0xe0(%rdi) retq .cfi_endproc
wlsfx/bnbb
3,051
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/x86_64/src/rej_uniform_asm.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /************************************************* * Name: mlk_rej_uniform_asm * * Description: Run rejection sampling on uniform random bytes to generate * uniform random integers mod q * * Arguments: - int16_t *r: pointer to output buffer of MLKEM_N * 16-bit coefficients. * - const uint8_t *buf: pointer to input buffer * (assumed to be uniform random bytes) * - unsigned buflen: length of input buffer in bytes. * Must be a multiple of 12. * * Returns number of sampled 16-bit integers (at most MLKEM_N). **************************************************/ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/x86_64/src/rej_uniform_asm.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_rej_uniform_asm) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_rej_uniform_asm) S2N_BN_SYMBOL(mlkem_rej_uniform_asm): .cfi_startproc subq $0x210, %rsp # imm = 0x210 .cfi_adjust_cfa_offset 0x210 movabsq $0xd010d010d010d01, %rax # imm = 0xD010D010D010D01 movq %rax, %xmm0 pinsrq $0x1, %rax, %xmm0 movabsq $0xfff0fff0fff0fff, %rax # imm = 0xFFF0FFF0FFF0FFF movq %rax, %xmm5 pinsrq $0x1, %rax, %xmm5 movabsq $0x504040302010100, %rax # imm = 0x504040302010100 movq %rax, %xmm4 movabsq $0xb0a0a0908070706, %rax # imm = 0xB0A0A0908070706 pinsrq $0x1, %rax, %xmm4 movq $0x0, %rax movq $0x0, %r8 movq $0x5555, %r9 # imm = 0x5555 Lrej_uniform_asm_loop_start: movdqu (%rsi,%r8), %xmm2 pshufb %xmm4, %xmm2 movdqa %xmm2, %xmm6 psrlw $0x4, %xmm6 pblendw $0xaa, %xmm6, %xmm2 # xmm2 = xmm2[0],xmm6[1],xmm2[2],xmm6[3],xmm2[4],xmm6[5],xmm2[6],xmm6[7] pand %xmm5, %xmm2 movdqa %xmm0, %xmm1 pcmpgtw %xmm2, %xmm1 pmovmskb %xmm1, %r11d pextq %r9, %r11, %r11 movq %r11, %r10 shlq $0x4, %r10 movdqu (%rcx,%r10), %xmm3 pshufb %xmm3, %xmm2 movdqu %xmm2, (%rsp,%rax,2) popcntq %r11, %r11 addq %r11, %rax cmpq $0x100, %rax # imm = 0x100 jae Lrej_uniform_asm_final_copy addq $0xc, %r8 cmpq %r8, %rdx ja Lrej_uniform_asm_loop_start Lrej_uniform_asm_final_copy: movq $0x100, %rcx # imm = 0x100 cmpq $0x100, %rax # imm = 0x100 cmovaq %rcx, %rax movq %rsp, %rsi movq %rax, %rcx shlq %rcx rep movsb (%rsi), %es:(%rdi) addq $0x210, %rsp # imm = 0x210 .cfi_adjust_cfa_offset -0x210 retq .cfi_endproc
wlsfx/bnbb
5,692
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/x86_64/src/nttfrombytes.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /* References * ========== * * - [REF_AVX2] * CRYSTALS-Kyber optimized AVX2 implementation * Bos, Ducas, Kiltz, Lepoint, Lyubashevsky, Schanck, Schwabe, Seiler, Stehlé * https://github.com/pq-crystals/kyber/tree/main/avx2 */ /* * This file is derived from the public domain * AVX2 Kyber implementation @[REF_AVX2]. */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/x86_64/src/nttfrombytes.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_nttfrombytes_avx2) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_nttfrombytes_avx2) S2N_BN_SYMBOL(mlkem_nttfrombytes_avx2): .cfi_startproc vmovdqa 0xe0(%rdx), %ymm0 callq Lnttfrombytes_avx2_core addq $0x100, %rdi # imm = 0x100 addq $0xc0, %rsi callq Lnttfrombytes_avx2_core retq .cfi_endproc Lnttfrombytes_avx2_core: .cfi_startproc vmovdqu (%rsi), %ymm4 vmovdqu 0x20(%rsi), %ymm5 vmovdqu 0x40(%rsi), %ymm6 vmovdqu 0x60(%rsi), %ymm7 vmovdqu 0x80(%rsi), %ymm8 vmovdqu 0xa0(%rsi), %ymm9 vperm2i128 $0x20, %ymm7, %ymm4, %ymm3 # ymm3 = ymm4[0,1],ymm7[0,1] vperm2i128 $0x31, %ymm7, %ymm4, %ymm7 # ymm7 = ymm4[2,3],ymm7[2,3] vperm2i128 $0x20, %ymm8, %ymm5, %ymm4 # ymm4 = ymm5[0,1],ymm8[0,1] vperm2i128 $0x31, %ymm8, %ymm5, %ymm8 # ymm8 = ymm5[2,3],ymm8[2,3] vperm2i128 $0x20, %ymm9, %ymm6, %ymm5 # ymm5 = ymm6[0,1],ymm9[0,1] vperm2i128 $0x31, %ymm9, %ymm6, %ymm9 # ymm9 = ymm6[2,3],ymm9[2,3] vpunpcklqdq %ymm8, %ymm3, %ymm6 # ymm6 = ymm3[0],ymm8[0],ymm3[2],ymm8[2] vpunpckhqdq %ymm8, %ymm3, %ymm8 # ymm8 = ymm3[1],ymm8[1],ymm3[3],ymm8[3] vpunpcklqdq %ymm5, %ymm7, %ymm3 # ymm3 = ymm7[0],ymm5[0],ymm7[2],ymm5[2] vpunpckhqdq %ymm5, %ymm7, %ymm5 # ymm5 = ymm7[1],ymm5[1],ymm7[3],ymm5[3] vpunpcklqdq %ymm9, %ymm4, %ymm7 # ymm7 = ymm4[0],ymm9[0],ymm4[2],ymm9[2] vpunpckhqdq %ymm9, %ymm4, %ymm9 # ymm9 = ymm4[1],ymm9[1],ymm4[3],ymm9[3] vmovsldup %ymm5, %ymm4 # ymm4 = ymm5[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm4, %ymm6, %ymm4 # ymm4 = ymm6[0],ymm4[1],ymm6[2],ymm4[3],ymm6[4],ymm4[5],ymm6[6],ymm4[7] vpsrlq $0x20, %ymm6, %ymm6 vpblendd $0xaa, %ymm5, %ymm6, %ymm5 # ymm5 = ymm6[0],ymm5[1],ymm6[2],ymm5[3],ymm6[4],ymm5[5],ymm6[6],ymm5[7] vmovsldup %ymm7, %ymm6 # ymm6 = ymm7[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm6, %ymm8, %ymm6 # ymm6 = ymm8[0],ymm6[1],ymm8[2],ymm6[3],ymm8[4],ymm6[5],ymm8[6],ymm6[7] vpsrlq $0x20, %ymm8, %ymm8 vpblendd $0xaa, %ymm7, %ymm8, %ymm7 # ymm7 = ymm8[0],ymm7[1],ymm8[2],ymm7[3],ymm8[4],ymm7[5],ymm8[6],ymm7[7] vmovsldup %ymm9, %ymm8 # ymm8 = ymm9[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm8, %ymm3, %ymm8 # ymm8 = ymm3[0],ymm8[1],ymm3[2],ymm8[3],ymm3[4],ymm8[5],ymm3[6],ymm8[7] vpsrlq $0x20, %ymm3, %ymm3 vpblendd $0xaa, %ymm9, %ymm3, %ymm9 # ymm9 = ymm3[0],ymm9[1],ymm3[2],ymm9[3],ymm3[4],ymm9[5],ymm3[6],ymm9[7] vpslld $0x10, %ymm7, %ymm10 vpblendw $0xaa, %ymm10, %ymm4, %ymm10 # ymm10 = ymm4[0],ymm10[1],ymm4[2],ymm10[3],ymm4[4],ymm10[5],ymm4[6],ymm10[7],ymm4[8],ymm10[9],ymm4[10],ymm10[11],ymm4[12],ymm10[13],ymm4[14],ymm10[15] vpsrld $0x10, %ymm4, %ymm4 vpblendw $0xaa, %ymm7, %ymm4, %ymm7 # ymm7 = ymm4[0],ymm7[1],ymm4[2],ymm7[3],ymm4[4],ymm7[5],ymm4[6],ymm7[7],ymm4[8],ymm7[9],ymm4[10],ymm7[11],ymm4[12],ymm7[13],ymm4[14],ymm7[15] vpslld $0x10, %ymm8, %ymm4 vpblendw $0xaa, %ymm4, %ymm5, %ymm4 # ymm4 = ymm5[0],ymm4[1],ymm5[2],ymm4[3],ymm5[4],ymm4[5],ymm5[6],ymm4[7],ymm5[8],ymm4[9],ymm5[10],ymm4[11],ymm5[12],ymm4[13],ymm5[14],ymm4[15] vpsrld $0x10, %ymm5, %ymm5 vpblendw $0xaa, %ymm8, %ymm5, %ymm8 # ymm8 = ymm5[0],ymm8[1],ymm5[2],ymm8[3],ymm5[4],ymm8[5],ymm5[6],ymm8[7],ymm5[8],ymm8[9],ymm5[10],ymm8[11],ymm5[12],ymm8[13],ymm5[14],ymm8[15] vpslld $0x10, %ymm9, %ymm5 vpblendw $0xaa, %ymm5, %ymm6, %ymm5 # ymm5 = ymm6[0],ymm5[1],ymm6[2],ymm5[3],ymm6[4],ymm5[5],ymm6[6],ymm5[7],ymm6[8],ymm5[9],ymm6[10],ymm5[11],ymm6[12],ymm5[13],ymm6[14],ymm5[15] vpsrld $0x10, %ymm6, %ymm6 vpblendw $0xaa, %ymm9, %ymm6, %ymm9 # ymm9 = ymm6[0],ymm9[1],ymm6[2],ymm9[3],ymm6[4],ymm9[5],ymm6[6],ymm9[7],ymm6[8],ymm9[9],ymm6[10],ymm9[11],ymm6[12],ymm9[13],ymm6[14],ymm9[15] vpsrlw $0xc, %ymm10, %ymm11 vpsllw $0x4, %ymm7, %ymm12 vpor %ymm11, %ymm12, %ymm11 vpand %ymm0, %ymm10, %ymm10 vpand %ymm0, %ymm11, %ymm11 vpsrlw $0x8, %ymm7, %ymm12 vpsllw $0x8, %ymm4, %ymm13 vpor %ymm12, %ymm13, %ymm12 vpand %ymm0, %ymm12, %ymm12 vpsrlw $0x4, %ymm4, %ymm13 vpand %ymm0, %ymm13, %ymm13 vpsrlw $0xc, %ymm8, %ymm14 vpsllw $0x4, %ymm5, %ymm15 vpor %ymm14, %ymm15, %ymm14 vpand %ymm0, %ymm8, %ymm8 vpand %ymm0, %ymm14, %ymm14 vpsrlw $0x8, %ymm5, %ymm15 vpsllw $0x8, %ymm9, %ymm1 vpor %ymm15, %ymm1, %ymm15 vpand %ymm0, %ymm15, %ymm15 vpsrlw $0x4, %ymm9, %ymm1 vpand %ymm0, %ymm1, %ymm1 vmovdqa %ymm10, (%rdi) vmovdqa %ymm11, 0x20(%rdi) vmovdqa %ymm12, 0x40(%rdi) vmovdqa %ymm13, 0x60(%rdi) vmovdqa %ymm8, 0x80(%rdi) vmovdqa %ymm14, 0xa0(%rdi) vmovdqa %ymm15, 0xc0(%rdi) vmovdqa %ymm1, 0xe0(%rdi) retq .cfi_endproc
wlsfx/bnbb
17,457
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/x86_64/src/polyvec_basemul_acc_montgomery_cached_asm_k2.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/x86_64/src/polyvec_basemul_acc_montgomery_cached_asm_k2.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k2) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k2) S2N_BN_SYMBOL(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k2): .cfi_startproc vmovdqa (%r8), %ymm0 vmovdqa 0x20(%r8), %ymm1 vmovdqa (%rsi), %ymm2 vmovdqa 0x20(%rsi), %ymm3 vmovdqa (%rdx), %ymm4 vmovdqa 0x20(%rdx), %ymm5 vmovdqa (%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, (%rdi) vmovdqa %ymm9, 0x20(%rdi) vmovdqa 0x40(%rsi), %ymm2 vmovdqa 0x60(%rsi), %ymm3 vmovdqa 0x40(%rdx), %ymm4 vmovdqa 0x60(%rdx), %ymm5 vmovdqa 0x20(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x40(%rdi) vmovdqa %ymm9, 0x60(%rdi) vmovdqa 0x80(%rsi), %ymm2 vmovdqa 0xa0(%rsi), %ymm3 vmovdqa 0x80(%rdx), %ymm4 vmovdqa 0xa0(%rdx), %ymm5 vmovdqa 0x40(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x80(%rdi) vmovdqa %ymm9, 0xa0(%rdi) vmovdqa 0xc0(%rsi), %ymm2 vmovdqa 0xe0(%rsi), %ymm3 vmovdqa 0xc0(%rdx), %ymm4 vmovdqa 0xe0(%rdx), %ymm5 vmovdqa 0x60(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0xc0(%rdi) vmovdqa %ymm9, 0xe0(%rdi) vmovdqa 0x100(%rsi), %ymm2 vmovdqa 0x120(%rsi), %ymm3 vmovdqa 0x100(%rdx), %ymm4 vmovdqa 0x120(%rdx), %ymm5 vmovdqa 0x80(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x100(%rdi) vmovdqa %ymm9, 0x120(%rdi) vmovdqa 0x140(%rsi), %ymm2 vmovdqa 0x160(%rsi), %ymm3 vmovdqa 0x140(%rdx), %ymm4 vmovdqa 0x160(%rdx), %ymm5 vmovdqa 0xa0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x140(%rdi) vmovdqa %ymm9, 0x160(%rdi) vmovdqa 0x180(%rsi), %ymm2 vmovdqa 0x1a0(%rsi), %ymm3 vmovdqa 0x180(%rdx), %ymm4 vmovdqa 0x1a0(%rdx), %ymm5 vmovdqa 0xc0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x180(%rdi) vmovdqa %ymm9, 0x1a0(%rdi) vmovdqa 0x1c0(%rsi), %ymm2 vmovdqa 0x1e0(%rsi), %ymm3 vmovdqa 0x1c0(%rdx), %ymm4 vmovdqa 0x1e0(%rdx), %ymm5 vmovdqa 0xe0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x1c0(%rdi) vmovdqa %ymm9, 0x1e0(%rdi) vmovdqa 0x200(%rsi), %ymm2 vmovdqa 0x220(%rsi), %ymm3 vmovdqa 0x200(%rdx), %ymm4 vmovdqa 0x220(%rdx), %ymm5 vmovdqa 0x100(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa (%rdi), %ymm8 vmovdqa 0x20(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, (%rdi) vmovdqa %ymm9, 0x20(%rdi) vmovdqa 0x240(%rsi), %ymm2 vmovdqa 0x260(%rsi), %ymm3 vmovdqa 0x240(%rdx), %ymm4 vmovdqa 0x260(%rdx), %ymm5 vmovdqa 0x120(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x40(%rdi), %ymm8 vmovdqa 0x60(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x40(%rdi) vmovdqa %ymm9, 0x60(%rdi) vmovdqa 0x280(%rsi), %ymm2 vmovdqa 0x2a0(%rsi), %ymm3 vmovdqa 0x280(%rdx), %ymm4 vmovdqa 0x2a0(%rdx), %ymm5 vmovdqa 0x140(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x80(%rdi), %ymm8 vmovdqa 0xa0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x80(%rdi) vmovdqa %ymm9, 0xa0(%rdi) vmovdqa 0x2c0(%rsi), %ymm2 vmovdqa 0x2e0(%rsi), %ymm3 vmovdqa 0x2c0(%rdx), %ymm4 vmovdqa 0x2e0(%rdx), %ymm5 vmovdqa 0x160(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0xc0(%rdi), %ymm8 vmovdqa 0xe0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0xc0(%rdi) vmovdqa %ymm9, 0xe0(%rdi) vmovdqa 0x300(%rsi), %ymm2 vmovdqa 0x320(%rsi), %ymm3 vmovdqa 0x300(%rdx), %ymm4 vmovdqa 0x320(%rdx), %ymm5 vmovdqa 0x180(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x100(%rdi), %ymm8 vmovdqa 0x120(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x100(%rdi) vmovdqa %ymm9, 0x120(%rdi) vmovdqa 0x340(%rsi), %ymm2 vmovdqa 0x360(%rsi), %ymm3 vmovdqa 0x340(%rdx), %ymm4 vmovdqa 0x360(%rdx), %ymm5 vmovdqa 0x1a0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x140(%rdi), %ymm8 vmovdqa 0x160(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x140(%rdi) vmovdqa %ymm9, 0x160(%rdi) vmovdqa 0x380(%rsi), %ymm2 vmovdqa 0x3a0(%rsi), %ymm3 vmovdqa 0x380(%rdx), %ymm4 vmovdqa 0x3a0(%rdx), %ymm5 vmovdqa 0x1c0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x180(%rdi), %ymm8 vmovdqa 0x1a0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x180(%rdi) vmovdqa %ymm9, 0x1a0(%rdi) vmovdqa 0x3c0(%rsi), %ymm2 vmovdqa 0x3e0(%rsi), %ymm3 vmovdqa 0x3c0(%rdx), %ymm4 vmovdqa 0x3e0(%rdx), %ymm5 vmovdqa 0x1e0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x1c0(%rdi), %ymm8 vmovdqa 0x1e0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x1c0(%rdi) vmovdqa %ymm9, 0x1e0(%rdi) retq .cfi_endproc
wlsfx/bnbb
30,849
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/x86_64/src/intt.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /* References * ========== * * - [AVX2_NTT] * Faster AVX2 optimized NTT multiplication for Ring-LWE lattice cryptography. * Gregor Seiler * https://eprint.iacr.org/2018/039 * * - [REF_AVX2] * CRYSTALS-Kyber optimized AVX2 implementation * Bos, Ducas, Kiltz, Lepoint, Lyubashevsky, Schanck, Schwabe, Seiler, Stehlé * https://github.com/pq-crystals/kyber/tree/main/avx2 */ /* * This file is derived from the public domain * AVX2 Kyber implementation @[REF_AVX2]. * * The core ideas behind the implementation are described in @[AVX2_NTT]. * * Changes: * - Different placement of modular reductions to simplify * reasoning of non-overflow */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/x86_64/src/intt.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_invntt_avx2) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_invntt_avx2) S2N_BN_SYMBOL(mlkem_invntt_avx2): .cfi_startproc vmovdqa (%rsi), %ymm0 vmovdqa 0x60(%rsi), %ymm2 vmovdqa 0x80(%rsi), %ymm3 vmovdqa (%rdi), %ymm4 vmovdqa 0x40(%rdi), %ymm6 vmovdqa 0x20(%rdi), %ymm5 vmovdqa 0x60(%rdi), %ymm7 vpmullw %ymm2, %ymm4, %ymm12 vpmulhw %ymm3, %ymm4, %ymm4 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm4, %ymm4 vpmullw %ymm2, %ymm6, %ymm12 vpmulhw %ymm3, %ymm6, %ymm6 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm6, %ymm6 vpmullw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm5, %ymm5 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm5, %ymm5 vpmullw %ymm2, %ymm7, %ymm12 vpmulhw %ymm3, %ymm7, %ymm7 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm7, %ymm7 vmovdqa 0x80(%rdi), %ymm8 vmovdqa 0xc0(%rdi), %ymm10 vmovdqa 0xa0(%rdi), %ymm9 vmovdqa 0xe0(%rdi), %ymm11 vpmullw %ymm2, %ymm8, %ymm12 vpmulhw %ymm3, %ymm8, %ymm8 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm8, %ymm8 vpmullw %ymm2, %ymm10, %ymm12 vpmulhw %ymm3, %ymm10, %ymm10 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm10, %ymm10 vpmullw %ymm2, %ymm9, %ymm12 vpmulhw %ymm3, %ymm9, %ymm9 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm9, %ymm9 vpmullw %ymm2, %ymm11, %ymm12 vpmulhw %ymm3, %ymm11, %ymm11 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm11, %ymm11 vpermq $0x4e, 0x4a0(%rsi), %ymm15 # ymm15 = mem[2,3,0,1] vpermq $0x4e, 0x460(%rsi), %ymm1 # ymm1 = mem[2,3,0,1] vpermq $0x4e, 0x4c0(%rsi), %ymm2 # ymm2 = mem[2,3,0,1] vpermq $0x4e, 0x480(%rsi), %ymm3 # ymm3 = mem[2,3,0,1] vmovdqa 0x100(%rsi), %ymm12 vpshufb %ymm12, %ymm15, %ymm15 vpshufb %ymm12, %ymm1, %ymm1 vpshufb %ymm12, %ymm2, %ymm2 vpshufb %ymm12, %ymm3, %ymm3 vpsubw %ymm4, %ymm6, %ymm12 vpaddw %ymm6, %ymm4, %ymm4 vpsubw %ymm5, %ymm7, %ymm13 vpmullw %ymm15, %ymm12, %ymm6 vpaddw %ymm7, %ymm5, %ymm5 vpsubw %ymm8, %ymm10, %ymm14 vpmullw %ymm15, %ymm13, %ymm7 vpaddw %ymm10, %ymm8, %ymm8 vpsubw %ymm9, %ymm11, %ymm15 vpmullw %ymm1, %ymm14, %ymm10 vpaddw %ymm11, %ymm9, %ymm9 vpmullw %ymm1, %ymm15, %ymm11 vpmulhw %ymm2, %ymm12, %ymm12 vpmulhw %ymm2, %ymm13, %ymm13 vpmulhw %ymm3, %ymm14, %ymm14 vpmulhw %ymm3, %ymm15, %ymm15 vpmulhw %ymm0, %ymm6, %ymm6 vpmulhw %ymm0, %ymm7, %ymm7 vpmulhw %ymm0, %ymm10, %ymm10 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm6, %ymm12, %ymm6 vpsubw %ymm7, %ymm13, %ymm7 vpsubw %ymm10, %ymm14, %ymm10 vpsubw %ymm11, %ymm15, %ymm11 vpermq $0x4e, 0x420(%rsi), %ymm2 # ymm2 = mem[2,3,0,1] vpermq $0x4e, 0x440(%rsi), %ymm3 # ymm3 = mem[2,3,0,1] vmovdqa 0x100(%rsi), %ymm1 vpshufb %ymm1, %ymm2, %ymm2 vpshufb %ymm1, %ymm3, %ymm3 vpsubw %ymm4, %ymm8, %ymm12 vpaddw %ymm8, %ymm4, %ymm4 vpsubw %ymm5, %ymm9, %ymm13 vpmullw %ymm2, %ymm12, %ymm8 vpaddw %ymm9, %ymm5, %ymm5 vpsubw %ymm6, %ymm10, %ymm14 vpmullw %ymm2, %ymm13, %ymm9 vpaddw %ymm10, %ymm6, %ymm6 vpsubw %ymm7, %ymm11, %ymm15 vpmullw %ymm2, %ymm14, %ymm10 vpaddw %ymm11, %ymm7, %ymm7 vpmullw %ymm2, %ymm15, %ymm11 vpmulhw %ymm3, %ymm12, %ymm12 vpmulhw %ymm3, %ymm13, %ymm13 vpmulhw %ymm3, %ymm14, %ymm14 vpmulhw %ymm3, %ymm15, %ymm15 vpmulhw %ymm0, %ymm8, %ymm8 vpmulhw %ymm0, %ymm9, %ymm9 vpmulhw %ymm0, %ymm10, %ymm10 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm8, %ymm12, %ymm8 vpsubw %ymm9, %ymm13, %ymm9 vpsubw %ymm10, %ymm14, %ymm10 vpsubw %ymm11, %ymm15, %ymm11 vpslld $0x10, %ymm5, %ymm3 vpblendw $0xaa, %ymm3, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7],ymm4[8],ymm3[9],ymm4[10],ymm3[11],ymm4[12],ymm3[13],ymm4[14],ymm3[15] vpsrld $0x10, %ymm4, %ymm4 vpblendw $0xaa, %ymm5, %ymm4, %ymm5 # ymm5 = ymm4[0],ymm5[1],ymm4[2],ymm5[3],ymm4[4],ymm5[5],ymm4[6],ymm5[7],ymm4[8],ymm5[9],ymm4[10],ymm5[11],ymm4[12],ymm5[13],ymm4[14],ymm5[15] vpslld $0x10, %ymm7, %ymm4 vpblendw $0xaa, %ymm4, %ymm6, %ymm4 # ymm4 = ymm6[0],ymm4[1],ymm6[2],ymm4[3],ymm6[4],ymm4[5],ymm6[6],ymm4[7],ymm6[8],ymm4[9],ymm6[10],ymm4[11],ymm6[12],ymm4[13],ymm6[14],ymm4[15] vpsrld $0x10, %ymm6, %ymm6 vpblendw $0xaa, %ymm7, %ymm6, %ymm7 # ymm7 = ymm6[0],ymm7[1],ymm6[2],ymm7[3],ymm6[4],ymm7[5],ymm6[6],ymm7[7],ymm6[8],ymm7[9],ymm6[10],ymm7[11],ymm6[12],ymm7[13],ymm6[14],ymm7[15] vpslld $0x10, %ymm9, %ymm6 vpblendw $0xaa, %ymm6, %ymm8, %ymm6 # ymm6 = ymm8[0],ymm6[1],ymm8[2],ymm6[3],ymm8[4],ymm6[5],ymm8[6],ymm6[7],ymm8[8],ymm6[9],ymm8[10],ymm6[11],ymm8[12],ymm6[13],ymm8[14],ymm6[15] vpsrld $0x10, %ymm8, %ymm8 vpblendw $0xaa, %ymm9, %ymm8, %ymm9 # ymm9 = ymm8[0],ymm9[1],ymm8[2],ymm9[3],ymm8[4],ymm9[5],ymm8[6],ymm9[7],ymm8[8],ymm9[9],ymm8[10],ymm9[11],ymm8[12],ymm9[13],ymm8[14],ymm9[15] vpslld $0x10, %ymm11, %ymm8 vpblendw $0xaa, %ymm8, %ymm10, %ymm8 # ymm8 = ymm10[0],ymm8[1],ymm10[2],ymm8[3],ymm10[4],ymm8[5],ymm10[6],ymm8[7],ymm10[8],ymm8[9],ymm10[10],ymm8[11],ymm10[12],ymm8[13],ymm10[14],ymm8[15] vpsrld $0x10, %ymm10, %ymm10 vpblendw $0xaa, %ymm11, %ymm10, %ymm11 # ymm11 = ymm10[0],ymm11[1],ymm10[2],ymm11[3],ymm10[4],ymm11[5],ymm10[6],ymm11[7],ymm10[8],ymm11[9],ymm10[10],ymm11[11],ymm10[12],ymm11[13],ymm10[14],ymm11[15] vmovdqa 0x120(%rsi), %ymm12 vpermd 0x3e0(%rsi), %ymm12, %ymm2 vpermd 0x400(%rsi), %ymm12, %ymm10 vpsubw %ymm3, %ymm5, %ymm12 vpaddw %ymm5, %ymm3, %ymm3 vpsubw %ymm4, %ymm7, %ymm13 vpmullw %ymm2, %ymm12, %ymm5 vpaddw %ymm7, %ymm4, %ymm4 vpsubw %ymm6, %ymm9, %ymm14 vpmullw %ymm2, %ymm13, %ymm7 vpaddw %ymm9, %ymm6, %ymm6 vpsubw %ymm8, %ymm11, %ymm15 vpmullw %ymm2, %ymm14, %ymm9 vpaddw %ymm11, %ymm8, %ymm8 vpmullw %ymm2, %ymm15, %ymm11 vpmulhw %ymm10, %ymm12, %ymm12 vpmulhw %ymm10, %ymm13, %ymm13 vpmulhw %ymm10, %ymm14, %ymm14 vpmulhw %ymm10, %ymm15, %ymm15 vpmulhw %ymm0, %ymm5, %ymm5 vpmulhw %ymm0, %ymm7, %ymm7 vpmulhw %ymm0, %ymm9, %ymm9 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm5, %ymm12, %ymm5 vpsubw %ymm7, %ymm13, %ymm7 vpsubw %ymm9, %ymm14, %ymm9 vpsubw %ymm11, %ymm15, %ymm11 vmovdqa 0x40(%rsi), %ymm1 vpmulhw %ymm1, %ymm3, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm3, %ymm3 vmovsldup %ymm4, %ymm10 # ymm10 = ymm4[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm10, %ymm3, %ymm10 # ymm10 = ymm3[0],ymm10[1],ymm3[2],ymm10[3],ymm3[4],ymm10[5],ymm3[6],ymm10[7] vpsrlq $0x20, %ymm3, %ymm3 vpblendd $0xaa, %ymm4, %ymm3, %ymm4 # ymm4 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7] vmovsldup %ymm8, %ymm3 # ymm3 = ymm8[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm3, %ymm6, %ymm3 # ymm3 = ymm6[0],ymm3[1],ymm6[2],ymm3[3],ymm6[4],ymm3[5],ymm6[6],ymm3[7] vpsrlq $0x20, %ymm6, %ymm6 vpblendd $0xaa, %ymm8, %ymm6, %ymm8 # ymm8 = ymm6[0],ymm8[1],ymm6[2],ymm8[3],ymm6[4],ymm8[5],ymm6[6],ymm8[7] vmovsldup %ymm7, %ymm6 # ymm6 = ymm7[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm6, %ymm5, %ymm6 # ymm6 = ymm5[0],ymm6[1],ymm5[2],ymm6[3],ymm5[4],ymm6[5],ymm5[6],ymm6[7] vpsrlq $0x20, %ymm5, %ymm5 vpblendd $0xaa, %ymm7, %ymm5, %ymm7 # ymm7 = ymm5[0],ymm7[1],ymm5[2],ymm7[3],ymm5[4],ymm7[5],ymm5[6],ymm7[7] vmovsldup %ymm11, %ymm5 # ymm5 = ymm11[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm5, %ymm9, %ymm5 # ymm5 = ymm9[0],ymm5[1],ymm9[2],ymm5[3],ymm9[4],ymm5[5],ymm9[6],ymm5[7] vpsrlq $0x20, %ymm9, %ymm9 vpblendd $0xaa, %ymm11, %ymm9, %ymm11 # ymm11 = ymm9[0],ymm11[1],ymm9[2],ymm11[3],ymm9[4],ymm11[5],ymm9[6],ymm11[7] vpermq $0x1b, 0x3a0(%rsi), %ymm2 # ymm2 = mem[3,2,1,0] vpermq $0x1b, 0x3c0(%rsi), %ymm9 # ymm9 = mem[3,2,1,0] vpsubw %ymm10, %ymm4, %ymm12 vpaddw %ymm4, %ymm10, %ymm10 vpsubw %ymm3, %ymm8, %ymm13 vpmullw %ymm2, %ymm12, %ymm4 vpaddw %ymm8, %ymm3, %ymm3 vpsubw %ymm6, %ymm7, %ymm14 vpmullw %ymm2, %ymm13, %ymm8 vpaddw %ymm7, %ymm6, %ymm6 vpsubw %ymm5, %ymm11, %ymm15 vpmullw %ymm2, %ymm14, %ymm7 vpaddw %ymm11, %ymm5, %ymm5 vpmullw %ymm2, %ymm15, %ymm11 vpmulhw %ymm9, %ymm12, %ymm12 vpmulhw %ymm9, %ymm13, %ymm13 vpmulhw %ymm9, %ymm14, %ymm14 vpmulhw %ymm9, %ymm15, %ymm15 vpmulhw %ymm0, %ymm4, %ymm4 vpmulhw %ymm0, %ymm8, %ymm8 vpmulhw %ymm0, %ymm7, %ymm7 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm4, %ymm12, %ymm4 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm7, %ymm14, %ymm7 vpsubw %ymm11, %ymm15, %ymm11 vpmulhw %ymm1, %ymm10, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm10, %ymm10 vpunpcklqdq %ymm3, %ymm10, %ymm9 # ymm9 = ymm10[0],ymm3[0],ymm10[2],ymm3[2] vpunpckhqdq %ymm3, %ymm10, %ymm3 # ymm3 = ymm10[1],ymm3[1],ymm10[3],ymm3[3] vpunpcklqdq %ymm5, %ymm6, %ymm10 # ymm10 = ymm6[0],ymm5[0],ymm6[2],ymm5[2] vpunpckhqdq %ymm5, %ymm6, %ymm5 # ymm5 = ymm6[1],ymm5[1],ymm6[3],ymm5[3] vpunpcklqdq %ymm8, %ymm4, %ymm6 # ymm6 = ymm4[0],ymm8[0],ymm4[2],ymm8[2] vpunpckhqdq %ymm8, %ymm4, %ymm8 # ymm8 = ymm4[1],ymm8[1],ymm4[3],ymm8[3] vpunpcklqdq %ymm11, %ymm7, %ymm4 # ymm4 = ymm7[0],ymm11[0],ymm7[2],ymm11[2] vpunpckhqdq %ymm11, %ymm7, %ymm11 # ymm11 = ymm7[1],ymm11[1],ymm7[3],ymm11[3] vpermq $0x4e, 0x360(%rsi), %ymm2 # ymm2 = mem[2,3,0,1] vpermq $0x4e, 0x380(%rsi), %ymm7 # ymm7 = mem[2,3,0,1] vpsubw %ymm9, %ymm3, %ymm12 vpaddw %ymm3, %ymm9, %ymm9 vpsubw %ymm10, %ymm5, %ymm13 vpmullw %ymm2, %ymm12, %ymm3 vpaddw %ymm5, %ymm10, %ymm10 vpsubw %ymm6, %ymm8, %ymm14 vpmullw %ymm2, %ymm13, %ymm5 vpaddw %ymm8, %ymm6, %ymm6 vpsubw %ymm4, %ymm11, %ymm15 vpmullw %ymm2, %ymm14, %ymm8 vpaddw %ymm11, %ymm4, %ymm4 vpmullw %ymm2, %ymm15, %ymm11 vpmulhw %ymm7, %ymm12, %ymm12 vpmulhw %ymm7, %ymm13, %ymm13 vpmulhw %ymm7, %ymm14, %ymm14 vpmulhw %ymm7, %ymm15, %ymm15 vpmulhw %ymm0, %ymm3, %ymm3 vpmulhw %ymm0, %ymm5, %ymm5 vpmulhw %ymm0, %ymm8, %ymm8 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm3, %ymm12, %ymm3 vpsubw %ymm5, %ymm13, %ymm5 vpsubw %ymm8, %ymm14, %ymm8 vpsubw %ymm11, %ymm15, %ymm11 vpmulhw %ymm1, %ymm9, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm9, %ymm9 vperm2i128 $0x20, %ymm10, %ymm9, %ymm7 # ymm7 = ymm9[0,1],ymm10[0,1] vperm2i128 $0x31, %ymm10, %ymm9, %ymm10 # ymm10 = ymm9[2,3],ymm10[2,3] vperm2i128 $0x20, %ymm4, %ymm6, %ymm9 # ymm9 = ymm6[0,1],ymm4[0,1] vperm2i128 $0x31, %ymm4, %ymm6, %ymm4 # ymm4 = ymm6[2,3],ymm4[2,3] vperm2i128 $0x20, %ymm5, %ymm3, %ymm6 # ymm6 = ymm3[0,1],ymm5[0,1] vperm2i128 $0x31, %ymm5, %ymm3, %ymm5 # ymm5 = ymm3[2,3],ymm5[2,3] vperm2i128 $0x20, %ymm11, %ymm8, %ymm3 # ymm3 = ymm8[0,1],ymm11[0,1] vperm2i128 $0x31, %ymm11, %ymm8, %ymm11 # ymm11 = ymm8[2,3],ymm11[2,3] vmovdqa 0x320(%rsi), %ymm2 vmovdqa 0x340(%rsi), %ymm8 vpsubw %ymm7, %ymm10, %ymm12 vpaddw %ymm10, %ymm7, %ymm7 vpsubw %ymm9, %ymm4, %ymm13 vpmullw %ymm2, %ymm12, %ymm10 vpaddw %ymm4, %ymm9, %ymm9 vpsubw %ymm6, %ymm5, %ymm14 vpmullw %ymm2, %ymm13, %ymm4 vpaddw %ymm5, %ymm6, %ymm6 vpsubw %ymm3, %ymm11, %ymm15 vpmullw %ymm2, %ymm14, %ymm5 vpaddw %ymm11, %ymm3, %ymm3 vpmullw %ymm2, %ymm15, %ymm11 vpmulhw %ymm8, %ymm12, %ymm12 vpmulhw %ymm8, %ymm13, %ymm13 vpmulhw %ymm8, %ymm14, %ymm14 vpmulhw %ymm8, %ymm15, %ymm15 vpmulhw %ymm0, %ymm10, %ymm10 vpmulhw %ymm0, %ymm4, %ymm4 vpmulhw %ymm0, %ymm5, %ymm5 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm10, %ymm12, %ymm10 vpsubw %ymm4, %ymm13, %ymm4 vpsubw %ymm5, %ymm14, %ymm5 vpsubw %ymm11, %ymm15, %ymm11 vpmulhw %ymm1, %ymm7, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm7, %ymm7 vmovdqa %ymm7, (%rdi) vmovdqa %ymm9, 0x20(%rdi) vmovdqa %ymm6, 0x40(%rdi) vmovdqa %ymm3, 0x60(%rdi) vmovdqa %ymm10, 0x80(%rdi) vmovdqa %ymm4, 0xa0(%rdi) vmovdqa %ymm5, 0xc0(%rdi) vmovdqa %ymm11, 0xe0(%rdi) vmovdqa 0x60(%rsi), %ymm2 vmovdqa 0x80(%rsi), %ymm3 vmovdqa 0x100(%rdi), %ymm4 vmovdqa 0x140(%rdi), %ymm6 vmovdqa 0x120(%rdi), %ymm5 vmovdqa 0x160(%rdi), %ymm7 vpmullw %ymm2, %ymm4, %ymm12 vpmulhw %ymm3, %ymm4, %ymm4 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm4, %ymm4 vpmullw %ymm2, %ymm6, %ymm12 vpmulhw %ymm3, %ymm6, %ymm6 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm6, %ymm6 vpmullw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm5, %ymm5 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm5, %ymm5 vpmullw %ymm2, %ymm7, %ymm12 vpmulhw %ymm3, %ymm7, %ymm7 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm7, %ymm7 vmovdqa 0x180(%rdi), %ymm8 vmovdqa 0x1c0(%rdi), %ymm10 vmovdqa 0x1a0(%rdi), %ymm9 vmovdqa 0x1e0(%rdi), %ymm11 vpmullw %ymm2, %ymm8, %ymm12 vpmulhw %ymm3, %ymm8, %ymm8 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm8, %ymm8 vpmullw %ymm2, %ymm10, %ymm12 vpmulhw %ymm3, %ymm10, %ymm10 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm10, %ymm10 vpmullw %ymm2, %ymm9, %ymm12 vpmulhw %ymm3, %ymm9, %ymm9 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm9, %ymm9 vpmullw %ymm2, %ymm11, %ymm12 vpmulhw %ymm3, %ymm11, %ymm11 vpmulhw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm11, %ymm11 vpermq $0x4e, 0x2e0(%rsi), %ymm15 # ymm15 = mem[2,3,0,1] vpermq $0x4e, 0x2a0(%rsi), %ymm1 # ymm1 = mem[2,3,0,1] vpermq $0x4e, 0x300(%rsi), %ymm2 # ymm2 = mem[2,3,0,1] vpermq $0x4e, 0x2c0(%rsi), %ymm3 # ymm3 = mem[2,3,0,1] vmovdqa 0x100(%rsi), %ymm12 vpshufb %ymm12, %ymm15, %ymm15 vpshufb %ymm12, %ymm1, %ymm1 vpshufb %ymm12, %ymm2, %ymm2 vpshufb %ymm12, %ymm3, %ymm3 vpsubw %ymm4, %ymm6, %ymm12 vpaddw %ymm6, %ymm4, %ymm4 vpsubw %ymm5, %ymm7, %ymm13 vpmullw %ymm15, %ymm12, %ymm6 vpaddw %ymm7, %ymm5, %ymm5 vpsubw %ymm8, %ymm10, %ymm14 vpmullw %ymm15, %ymm13, %ymm7 vpaddw %ymm10, %ymm8, %ymm8 vpsubw %ymm9, %ymm11, %ymm15 vpmullw %ymm1, %ymm14, %ymm10 vpaddw %ymm11, %ymm9, %ymm9 vpmullw %ymm1, %ymm15, %ymm11 vpmulhw %ymm2, %ymm12, %ymm12 vpmulhw %ymm2, %ymm13, %ymm13 vpmulhw %ymm3, %ymm14, %ymm14 vpmulhw %ymm3, %ymm15, %ymm15 vpmulhw %ymm0, %ymm6, %ymm6 vpmulhw %ymm0, %ymm7, %ymm7 vpmulhw %ymm0, %ymm10, %ymm10 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm6, %ymm12, %ymm6 vpsubw %ymm7, %ymm13, %ymm7 vpsubw %ymm10, %ymm14, %ymm10 vpsubw %ymm11, %ymm15, %ymm11 vpermq $0x4e, 0x260(%rsi), %ymm2 # ymm2 = mem[2,3,0,1] vpermq $0x4e, 0x280(%rsi), %ymm3 # ymm3 = mem[2,3,0,1] vmovdqa 0x100(%rsi), %ymm1 vpshufb %ymm1, %ymm2, %ymm2 vpshufb %ymm1, %ymm3, %ymm3 vpsubw %ymm4, %ymm8, %ymm12 vpaddw %ymm8, %ymm4, %ymm4 vpsubw %ymm5, %ymm9, %ymm13 vpmullw %ymm2, %ymm12, %ymm8 vpaddw %ymm9, %ymm5, %ymm5 vpsubw %ymm6, %ymm10, %ymm14 vpmullw %ymm2, %ymm13, %ymm9 vpaddw %ymm10, %ymm6, %ymm6 vpsubw %ymm7, %ymm11, %ymm15 vpmullw %ymm2, %ymm14, %ymm10 vpaddw %ymm11, %ymm7, %ymm7 vpmullw %ymm2, %ymm15, %ymm11 vpmulhw %ymm3, %ymm12, %ymm12 vpmulhw %ymm3, %ymm13, %ymm13 vpmulhw %ymm3, %ymm14, %ymm14 vpmulhw %ymm3, %ymm15, %ymm15 vpmulhw %ymm0, %ymm8, %ymm8 vpmulhw %ymm0, %ymm9, %ymm9 vpmulhw %ymm0, %ymm10, %ymm10 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm8, %ymm12, %ymm8 vpsubw %ymm9, %ymm13, %ymm9 vpsubw %ymm10, %ymm14, %ymm10 vpsubw %ymm11, %ymm15, %ymm11 vpslld $0x10, %ymm5, %ymm3 vpblendw $0xaa, %ymm3, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7],ymm4[8],ymm3[9],ymm4[10],ymm3[11],ymm4[12],ymm3[13],ymm4[14],ymm3[15] vpsrld $0x10, %ymm4, %ymm4 vpblendw $0xaa, %ymm5, %ymm4, %ymm5 # ymm5 = ymm4[0],ymm5[1],ymm4[2],ymm5[3],ymm4[4],ymm5[5],ymm4[6],ymm5[7],ymm4[8],ymm5[9],ymm4[10],ymm5[11],ymm4[12],ymm5[13],ymm4[14],ymm5[15] vpslld $0x10, %ymm7, %ymm4 vpblendw $0xaa, %ymm4, %ymm6, %ymm4 # ymm4 = ymm6[0],ymm4[1],ymm6[2],ymm4[3],ymm6[4],ymm4[5],ymm6[6],ymm4[7],ymm6[8],ymm4[9],ymm6[10],ymm4[11],ymm6[12],ymm4[13],ymm6[14],ymm4[15] vpsrld $0x10, %ymm6, %ymm6 vpblendw $0xaa, %ymm7, %ymm6, %ymm7 # ymm7 = ymm6[0],ymm7[1],ymm6[2],ymm7[3],ymm6[4],ymm7[5],ymm6[6],ymm7[7],ymm6[8],ymm7[9],ymm6[10],ymm7[11],ymm6[12],ymm7[13],ymm6[14],ymm7[15] vpslld $0x10, %ymm9, %ymm6 vpblendw $0xaa, %ymm6, %ymm8, %ymm6 # ymm6 = ymm8[0],ymm6[1],ymm8[2],ymm6[3],ymm8[4],ymm6[5],ymm8[6],ymm6[7],ymm8[8],ymm6[9],ymm8[10],ymm6[11],ymm8[12],ymm6[13],ymm8[14],ymm6[15] vpsrld $0x10, %ymm8, %ymm8 vpblendw $0xaa, %ymm9, %ymm8, %ymm9 # ymm9 = ymm8[0],ymm9[1],ymm8[2],ymm9[3],ymm8[4],ymm9[5],ymm8[6],ymm9[7],ymm8[8],ymm9[9],ymm8[10],ymm9[11],ymm8[12],ymm9[13],ymm8[14],ymm9[15] vpslld $0x10, %ymm11, %ymm8 vpblendw $0xaa, %ymm8, %ymm10, %ymm8 # ymm8 = ymm10[0],ymm8[1],ymm10[2],ymm8[3],ymm10[4],ymm8[5],ymm10[6],ymm8[7],ymm10[8],ymm8[9],ymm10[10],ymm8[11],ymm10[12],ymm8[13],ymm10[14],ymm8[15] vpsrld $0x10, %ymm10, %ymm10 vpblendw $0xaa, %ymm11, %ymm10, %ymm11 # ymm11 = ymm10[0],ymm11[1],ymm10[2],ymm11[3],ymm10[4],ymm11[5],ymm10[6],ymm11[7],ymm10[8],ymm11[9],ymm10[10],ymm11[11],ymm10[12],ymm11[13],ymm10[14],ymm11[15] vmovdqa 0x120(%rsi), %ymm12 vpermd 0x220(%rsi), %ymm12, %ymm2 vpermd 0x240(%rsi), %ymm12, %ymm10 vpsubw %ymm3, %ymm5, %ymm12 vpaddw %ymm5, %ymm3, %ymm3 vpsubw %ymm4, %ymm7, %ymm13 vpmullw %ymm2, %ymm12, %ymm5 vpaddw %ymm7, %ymm4, %ymm4 vpsubw %ymm6, %ymm9, %ymm14 vpmullw %ymm2, %ymm13, %ymm7 vpaddw %ymm9, %ymm6, %ymm6 vpsubw %ymm8, %ymm11, %ymm15 vpmullw %ymm2, %ymm14, %ymm9 vpaddw %ymm11, %ymm8, %ymm8 vpmullw %ymm2, %ymm15, %ymm11 vpmulhw %ymm10, %ymm12, %ymm12 vpmulhw %ymm10, %ymm13, %ymm13 vpmulhw %ymm10, %ymm14, %ymm14 vpmulhw %ymm10, %ymm15, %ymm15 vpmulhw %ymm0, %ymm5, %ymm5 vpmulhw %ymm0, %ymm7, %ymm7 vpmulhw %ymm0, %ymm9, %ymm9 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm5, %ymm12, %ymm5 vpsubw %ymm7, %ymm13, %ymm7 vpsubw %ymm9, %ymm14, %ymm9 vpsubw %ymm11, %ymm15, %ymm11 vmovdqa 0x40(%rsi), %ymm1 vpmulhw %ymm1, %ymm3, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm3, %ymm3 vmovsldup %ymm4, %ymm10 # ymm10 = ymm4[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm10, %ymm3, %ymm10 # ymm10 = ymm3[0],ymm10[1],ymm3[2],ymm10[3],ymm3[4],ymm10[5],ymm3[6],ymm10[7] vpsrlq $0x20, %ymm3, %ymm3 vpblendd $0xaa, %ymm4, %ymm3, %ymm4 # ymm4 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7] vmovsldup %ymm8, %ymm3 # ymm3 = ymm8[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm3, %ymm6, %ymm3 # ymm3 = ymm6[0],ymm3[1],ymm6[2],ymm3[3],ymm6[4],ymm3[5],ymm6[6],ymm3[7] vpsrlq $0x20, %ymm6, %ymm6 vpblendd $0xaa, %ymm8, %ymm6, %ymm8 # ymm8 = ymm6[0],ymm8[1],ymm6[2],ymm8[3],ymm6[4],ymm8[5],ymm6[6],ymm8[7] vmovsldup %ymm7, %ymm6 # ymm6 = ymm7[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm6, %ymm5, %ymm6 # ymm6 = ymm5[0],ymm6[1],ymm5[2],ymm6[3],ymm5[4],ymm6[5],ymm5[6],ymm6[7] vpsrlq $0x20, %ymm5, %ymm5 vpblendd $0xaa, %ymm7, %ymm5, %ymm7 # ymm7 = ymm5[0],ymm7[1],ymm5[2],ymm7[3],ymm5[4],ymm7[5],ymm5[6],ymm7[7] vmovsldup %ymm11, %ymm5 # ymm5 = ymm11[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm5, %ymm9, %ymm5 # ymm5 = ymm9[0],ymm5[1],ymm9[2],ymm5[3],ymm9[4],ymm5[5],ymm9[6],ymm5[7] vpsrlq $0x20, %ymm9, %ymm9 vpblendd $0xaa, %ymm11, %ymm9, %ymm11 # ymm11 = ymm9[0],ymm11[1],ymm9[2],ymm11[3],ymm9[4],ymm11[5],ymm9[6],ymm11[7] vpermq $0x1b, 0x1e0(%rsi), %ymm2 # ymm2 = mem[3,2,1,0] vpermq $0x1b, 0x200(%rsi), %ymm9 # ymm9 = mem[3,2,1,0] vpsubw %ymm10, %ymm4, %ymm12 vpaddw %ymm4, %ymm10, %ymm10 vpsubw %ymm3, %ymm8, %ymm13 vpmullw %ymm2, %ymm12, %ymm4 vpaddw %ymm8, %ymm3, %ymm3 vpsubw %ymm6, %ymm7, %ymm14 vpmullw %ymm2, %ymm13, %ymm8 vpaddw %ymm7, %ymm6, %ymm6 vpsubw %ymm5, %ymm11, %ymm15 vpmullw %ymm2, %ymm14, %ymm7 vpaddw %ymm11, %ymm5, %ymm5 vpmullw %ymm2, %ymm15, %ymm11 vpmulhw %ymm9, %ymm12, %ymm12 vpmulhw %ymm9, %ymm13, %ymm13 vpmulhw %ymm9, %ymm14, %ymm14 vpmulhw %ymm9, %ymm15, %ymm15 vpmulhw %ymm0, %ymm4, %ymm4 vpmulhw %ymm0, %ymm8, %ymm8 vpmulhw %ymm0, %ymm7, %ymm7 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm4, %ymm12, %ymm4 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm7, %ymm14, %ymm7 vpsubw %ymm11, %ymm15, %ymm11 vpmulhw %ymm1, %ymm10, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm10, %ymm10 vpunpcklqdq %ymm3, %ymm10, %ymm9 # ymm9 = ymm10[0],ymm3[0],ymm10[2],ymm3[2] vpunpckhqdq %ymm3, %ymm10, %ymm3 # ymm3 = ymm10[1],ymm3[1],ymm10[3],ymm3[3] vpunpcklqdq %ymm5, %ymm6, %ymm10 # ymm10 = ymm6[0],ymm5[0],ymm6[2],ymm5[2] vpunpckhqdq %ymm5, %ymm6, %ymm5 # ymm5 = ymm6[1],ymm5[1],ymm6[3],ymm5[3] vpunpcklqdq %ymm8, %ymm4, %ymm6 # ymm6 = ymm4[0],ymm8[0],ymm4[2],ymm8[2] vpunpckhqdq %ymm8, %ymm4, %ymm8 # ymm8 = ymm4[1],ymm8[1],ymm4[3],ymm8[3] vpunpcklqdq %ymm11, %ymm7, %ymm4 # ymm4 = ymm7[0],ymm11[0],ymm7[2],ymm11[2] vpunpckhqdq %ymm11, %ymm7, %ymm11 # ymm11 = ymm7[1],ymm11[1],ymm7[3],ymm11[3] vpermq $0x4e, 0x1a0(%rsi), %ymm2 # ymm2 = mem[2,3,0,1] vpermq $0x4e, 0x1c0(%rsi), %ymm7 # ymm7 = mem[2,3,0,1] vpsubw %ymm9, %ymm3, %ymm12 vpaddw %ymm3, %ymm9, %ymm9 vpsubw %ymm10, %ymm5, %ymm13 vpmullw %ymm2, %ymm12, %ymm3 vpaddw %ymm5, %ymm10, %ymm10 vpsubw %ymm6, %ymm8, %ymm14 vpmullw %ymm2, %ymm13, %ymm5 vpaddw %ymm8, %ymm6, %ymm6 vpsubw %ymm4, %ymm11, %ymm15 vpmullw %ymm2, %ymm14, %ymm8 vpaddw %ymm11, %ymm4, %ymm4 vpmullw %ymm2, %ymm15, %ymm11 vpmulhw %ymm7, %ymm12, %ymm12 vpmulhw %ymm7, %ymm13, %ymm13 vpmulhw %ymm7, %ymm14, %ymm14 vpmulhw %ymm7, %ymm15, %ymm15 vpmulhw %ymm0, %ymm3, %ymm3 vpmulhw %ymm0, %ymm5, %ymm5 vpmulhw %ymm0, %ymm8, %ymm8 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm3, %ymm12, %ymm3 vpsubw %ymm5, %ymm13, %ymm5 vpsubw %ymm8, %ymm14, %ymm8 vpsubw %ymm11, %ymm15, %ymm11 vpmulhw %ymm1, %ymm9, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm9, %ymm9 vperm2i128 $0x20, %ymm10, %ymm9, %ymm7 # ymm7 = ymm9[0,1],ymm10[0,1] vperm2i128 $0x31, %ymm10, %ymm9, %ymm10 # ymm10 = ymm9[2,3],ymm10[2,3] vperm2i128 $0x20, %ymm4, %ymm6, %ymm9 # ymm9 = ymm6[0,1],ymm4[0,1] vperm2i128 $0x31, %ymm4, %ymm6, %ymm4 # ymm4 = ymm6[2,3],ymm4[2,3] vperm2i128 $0x20, %ymm5, %ymm3, %ymm6 # ymm6 = ymm3[0,1],ymm5[0,1] vperm2i128 $0x31, %ymm5, %ymm3, %ymm5 # ymm5 = ymm3[2,3],ymm5[2,3] vperm2i128 $0x20, %ymm11, %ymm8, %ymm3 # ymm3 = ymm8[0,1],ymm11[0,1] vperm2i128 $0x31, %ymm11, %ymm8, %ymm11 # ymm11 = ymm8[2,3],ymm11[2,3] vmovdqa 0x160(%rsi), %ymm2 vmovdqa 0x180(%rsi), %ymm8 vpsubw %ymm7, %ymm10, %ymm12 vpaddw %ymm10, %ymm7, %ymm7 vpsubw %ymm9, %ymm4, %ymm13 vpmullw %ymm2, %ymm12, %ymm10 vpaddw %ymm4, %ymm9, %ymm9 vpsubw %ymm6, %ymm5, %ymm14 vpmullw %ymm2, %ymm13, %ymm4 vpaddw %ymm5, %ymm6, %ymm6 vpsubw %ymm3, %ymm11, %ymm15 vpmullw %ymm2, %ymm14, %ymm5 vpaddw %ymm11, %ymm3, %ymm3 vpmullw %ymm2, %ymm15, %ymm11 vpmulhw %ymm8, %ymm12, %ymm12 vpmulhw %ymm8, %ymm13, %ymm13 vpmulhw %ymm8, %ymm14, %ymm14 vpmulhw %ymm8, %ymm15, %ymm15 vpmulhw %ymm0, %ymm10, %ymm10 vpmulhw %ymm0, %ymm4, %ymm4 vpmulhw %ymm0, %ymm5, %ymm5 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm10, %ymm12, %ymm10 vpsubw %ymm4, %ymm13, %ymm4 vpsubw %ymm5, %ymm14, %ymm5 vpsubw %ymm11, %ymm15, %ymm11 vpmulhw %ymm1, %ymm7, %ymm12 vpsraw $0xa, %ymm12, %ymm12 vpmullw %ymm0, %ymm12, %ymm12 vpsubw %ymm12, %ymm7, %ymm7 vmovdqa %ymm7, 0x100(%rdi) vmovdqa %ymm9, 0x120(%rdi) vmovdqa %ymm6, 0x140(%rdi) vmovdqa %ymm3, 0x160(%rdi) vmovdqa %ymm10, 0x180(%rdi) vmovdqa %ymm4, 0x1a0(%rdi) vmovdqa %ymm5, 0x1c0(%rdi) vmovdqa %ymm11, 0x1e0(%rdi) vmovdqa (%rdi), %ymm4 vmovdqa 0x100(%rdi), %ymm8 vmovdqa 0x20(%rdi), %ymm5 vmovdqa 0x120(%rdi), %ymm9 vpbroadcastq 0x140(%rsi), %ymm2 vmovdqa 0x40(%rdi), %ymm6 vmovdqa 0x140(%rdi), %ymm10 vmovdqa 0x60(%rdi), %ymm7 vmovdqa 0x160(%rdi), %ymm11 vpbroadcastq 0x148(%rsi), %ymm3 vpsubw %ymm4, %ymm8, %ymm12 vpaddw %ymm8, %ymm4, %ymm4 vpsubw %ymm5, %ymm9, %ymm13 vpmullw %ymm2, %ymm12, %ymm8 vpaddw %ymm9, %ymm5, %ymm5 vpsubw %ymm6, %ymm10, %ymm14 vpmullw %ymm2, %ymm13, %ymm9 vpaddw %ymm10, %ymm6, %ymm6 vpsubw %ymm7, %ymm11, %ymm15 vpmullw %ymm2, %ymm14, %ymm10 vpaddw %ymm11, %ymm7, %ymm7 vpmullw %ymm2, %ymm15, %ymm11 vpmulhw %ymm3, %ymm12, %ymm12 vpmulhw %ymm3, %ymm13, %ymm13 vpmulhw %ymm3, %ymm14, %ymm14 vpmulhw %ymm3, %ymm15, %ymm15 vpmulhw %ymm0, %ymm8, %ymm8 vpmulhw %ymm0, %ymm9, %ymm9 vpmulhw %ymm0, %ymm10, %ymm10 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm8, %ymm12, %ymm8 vpsubw %ymm9, %ymm13, %ymm9 vpsubw %ymm10, %ymm14, %ymm10 vpsubw %ymm11, %ymm15, %ymm11 vmovdqa %ymm4, (%rdi) vmovdqa %ymm5, 0x20(%rdi) vmovdqa %ymm6, 0x40(%rdi) vmovdqa %ymm7, 0x60(%rdi) vmovdqa %ymm8, 0x100(%rdi) vmovdqa %ymm9, 0x120(%rdi) vmovdqa %ymm10, 0x140(%rdi) vmovdqa %ymm11, 0x160(%rdi) vmovdqa 0x80(%rdi), %ymm4 vmovdqa 0x180(%rdi), %ymm8 vmovdqa 0xa0(%rdi), %ymm5 vmovdqa 0x1a0(%rdi), %ymm9 vpbroadcastq 0x140(%rsi), %ymm2 vmovdqa 0xc0(%rdi), %ymm6 vmovdqa 0x1c0(%rdi), %ymm10 vmovdqa 0xe0(%rdi), %ymm7 vmovdqa 0x1e0(%rdi), %ymm11 vpbroadcastq 0x148(%rsi), %ymm3 vpsubw %ymm4, %ymm8, %ymm12 vpaddw %ymm8, %ymm4, %ymm4 vpsubw %ymm5, %ymm9, %ymm13 vpmullw %ymm2, %ymm12, %ymm8 vpaddw %ymm9, %ymm5, %ymm5 vpsubw %ymm6, %ymm10, %ymm14 vpmullw %ymm2, %ymm13, %ymm9 vpaddw %ymm10, %ymm6, %ymm6 vpsubw %ymm7, %ymm11, %ymm15 vpmullw %ymm2, %ymm14, %ymm10 vpaddw %ymm11, %ymm7, %ymm7 vpmullw %ymm2, %ymm15, %ymm11 vpmulhw %ymm3, %ymm12, %ymm12 vpmulhw %ymm3, %ymm13, %ymm13 vpmulhw %ymm3, %ymm14, %ymm14 vpmulhw %ymm3, %ymm15, %ymm15 vpmulhw %ymm0, %ymm8, %ymm8 vpmulhw %ymm0, %ymm9, %ymm9 vpmulhw %ymm0, %ymm10, %ymm10 vpmulhw %ymm0, %ymm11, %ymm11 vpsubw %ymm8, %ymm12, %ymm8 vpsubw %ymm9, %ymm13, %ymm9 vpsubw %ymm10, %ymm14, %ymm10 vpsubw %ymm11, %ymm15, %ymm11 vmovdqa %ymm4, 0x80(%rdi) vmovdqa %ymm5, 0xa0(%rdi) vmovdqa %ymm6, 0xc0(%rdi) vmovdqa %ymm7, 0xe0(%rdi) vmovdqa %ymm8, 0x180(%rdi) vmovdqa %ymm9, 0x1a0(%rdi) vmovdqa %ymm10, 0x1c0(%rdi) vmovdqa %ymm11, 0x1e0(%rdi) retq .cfi_endproc
wlsfx/bnbb
27,768
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/x86_64/src/ntt.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /* References * ========== * * - [AVX2_NTT] * Faster AVX2 optimized NTT multiplication for Ring-LWE lattice cryptography. * Gregor Seiler * https://eprint.iacr.org/2018/039 * * - [REF_AVX2] * CRYSTALS-Kyber optimized AVX2 implementation * Bos, Ducas, Kiltz, Lepoint, Lyubashevsky, Schanck, Schwabe, Seiler, Stehlé * https://github.com/pq-crystals/kyber/tree/main/avx2 */ /* * This file is derived from the public domain * AVX2 Kyber implementation @[REF_AVX2]. * * The core ideas behind the implementation are described in @[AVX2_NTT]. */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/x86_64/src/ntt.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_ntt_avx2) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_ntt_avx2) S2N_BN_SYMBOL(mlkem_ntt_avx2): .cfi_startproc vmovdqa (%rsi), %ymm0 vpbroadcastq 0x140(%rsi), %ymm15 vmovdqa 0x100(%rdi), %ymm8 vmovdqa 0x120(%rdi), %ymm9 vmovdqa 0x140(%rdi), %ymm10 vmovdqa 0x160(%rdi), %ymm11 vpbroadcastq 0x148(%rsi), %ymm2 vpmullw %ymm15, %ymm8, %ymm12 vpmullw %ymm15, %ymm9, %ymm13 vpmullw %ymm15, %ymm10, %ymm14 vpmullw %ymm15, %ymm11, %ymm15 vpmulhw %ymm2, %ymm8, %ymm8 vpmulhw %ymm2, %ymm9, %ymm9 vpmulhw %ymm2, %ymm10, %ymm10 vpmulhw %ymm2, %ymm11, %ymm11 vmovdqa (%rdi), %ymm4 vmovdqa 0x20(%rdi), %ymm5 vmovdqa 0x40(%rdi), %ymm6 vmovdqa 0x60(%rdi), %ymm7 vpmulhw %ymm0, %ymm12, %ymm12 vpmulhw %ymm0, %ymm13, %ymm13 vpmulhw %ymm0, %ymm14, %ymm14 vpmulhw %ymm0, %ymm15, %ymm15 vpaddw %ymm8, %ymm4, %ymm3 vpsubw %ymm8, %ymm4, %ymm8 vpaddw %ymm9, %ymm5, %ymm4 vpsubw %ymm9, %ymm5, %ymm9 vpaddw %ymm10, %ymm6, %ymm5 vpsubw %ymm10, %ymm6, %ymm10 vpaddw %ymm11, %ymm7, %ymm6 vpsubw %ymm11, %ymm7, %ymm11 vpsubw %ymm12, %ymm3, %ymm3 vpaddw %ymm12, %ymm8, %ymm8 vpsubw %ymm13, %ymm4, %ymm4 vpaddw %ymm13, %ymm9, %ymm9 vpsubw %ymm14, %ymm5, %ymm5 vpaddw %ymm14, %ymm10, %ymm10 vpsubw %ymm15, %ymm6, %ymm6 vpaddw %ymm15, %ymm11, %ymm11 vmovdqa %ymm3, (%rdi) vmovdqa %ymm4, 0x20(%rdi) vmovdqa %ymm5, 0x40(%rdi) vmovdqa %ymm6, 0x60(%rdi) vmovdqa %ymm8, 0x100(%rdi) vmovdqa %ymm9, 0x120(%rdi) vmovdqa %ymm10, 0x140(%rdi) vmovdqa %ymm11, 0x160(%rdi) vpbroadcastq 0x140(%rsi), %ymm15 vmovdqa 0x180(%rdi), %ymm8 vmovdqa 0x1a0(%rdi), %ymm9 vmovdqa 0x1c0(%rdi), %ymm10 vmovdqa 0x1e0(%rdi), %ymm11 vpbroadcastq 0x148(%rsi), %ymm2 vpmullw %ymm15, %ymm8, %ymm12 vpmullw %ymm15, %ymm9, %ymm13 vpmullw %ymm15, %ymm10, %ymm14 vpmullw %ymm15, %ymm11, %ymm15 vpmulhw %ymm2, %ymm8, %ymm8 vpmulhw %ymm2, %ymm9, %ymm9 vpmulhw %ymm2, %ymm10, %ymm10 vpmulhw %ymm2, %ymm11, %ymm11 vmovdqa 0x80(%rdi), %ymm4 vmovdqa 0xa0(%rdi), %ymm5 vmovdqa 0xc0(%rdi), %ymm6 vmovdqa 0xe0(%rdi), %ymm7 vpmulhw %ymm0, %ymm12, %ymm12 vpmulhw %ymm0, %ymm13, %ymm13 vpmulhw %ymm0, %ymm14, %ymm14 vpmulhw %ymm0, %ymm15, %ymm15 vpaddw %ymm8, %ymm4, %ymm3 vpsubw %ymm8, %ymm4, %ymm8 vpaddw %ymm9, %ymm5, %ymm4 vpsubw %ymm9, %ymm5, %ymm9 vpaddw %ymm10, %ymm6, %ymm5 vpsubw %ymm10, %ymm6, %ymm10 vpaddw %ymm11, %ymm7, %ymm6 vpsubw %ymm11, %ymm7, %ymm11 vpsubw %ymm12, %ymm3, %ymm3 vpaddw %ymm12, %ymm8, %ymm8 vpsubw %ymm13, %ymm4, %ymm4 vpaddw %ymm13, %ymm9, %ymm9 vpsubw %ymm14, %ymm5, %ymm5 vpaddw %ymm14, %ymm10, %ymm10 vpsubw %ymm15, %ymm6, %ymm6 vpaddw %ymm15, %ymm11, %ymm11 vmovdqa %ymm3, 0x80(%rdi) vmovdqa %ymm4, 0xa0(%rdi) vmovdqa %ymm5, 0xc0(%rdi) vmovdqa %ymm6, 0xe0(%rdi) vmovdqa %ymm8, 0x180(%rdi) vmovdqa %ymm9, 0x1a0(%rdi) vmovdqa %ymm10, 0x1c0(%rdi) vmovdqa %ymm11, 0x1e0(%rdi) vmovdqa 0x160(%rsi), %ymm15 vmovdqa 0x80(%rdi), %ymm8 vmovdqa 0xa0(%rdi), %ymm9 vmovdqa 0xc0(%rdi), %ymm10 vmovdqa 0xe0(%rdi), %ymm11 vmovdqa 0x180(%rsi), %ymm2 vpmullw %ymm15, %ymm8, %ymm12 vpmullw %ymm15, %ymm9, %ymm13 vpmullw %ymm15, %ymm10, %ymm14 vpmullw %ymm15, %ymm11, %ymm15 vpmulhw %ymm2, %ymm8, %ymm8 vpmulhw %ymm2, %ymm9, %ymm9 vpmulhw %ymm2, %ymm10, %ymm10 vpmulhw %ymm2, %ymm11, %ymm11 vmovdqa (%rdi), %ymm4 vmovdqa 0x20(%rdi), %ymm5 vmovdqa 0x40(%rdi), %ymm6 vmovdqa 0x60(%rdi), %ymm7 vpmulhw %ymm0, %ymm12, %ymm12 vpmulhw %ymm0, %ymm13, %ymm13 vpmulhw %ymm0, %ymm14, %ymm14 vpmulhw %ymm0, %ymm15, %ymm15 vpaddw %ymm8, %ymm4, %ymm3 vpsubw %ymm8, %ymm4, %ymm8 vpaddw %ymm9, %ymm5, %ymm4 vpsubw %ymm9, %ymm5, %ymm9 vpaddw %ymm10, %ymm6, %ymm5 vpsubw %ymm10, %ymm6, %ymm10 vpaddw %ymm11, %ymm7, %ymm6 vpsubw %ymm11, %ymm7, %ymm11 vpsubw %ymm12, %ymm3, %ymm3 vpaddw %ymm12, %ymm8, %ymm8 vpsubw %ymm13, %ymm4, %ymm4 vpaddw %ymm13, %ymm9, %ymm9 vpsubw %ymm14, %ymm5, %ymm5 vpaddw %ymm14, %ymm10, %ymm10 vpsubw %ymm15, %ymm6, %ymm6 vpaddw %ymm15, %ymm11, %ymm11 vperm2i128 $0x20, %ymm10, %ymm5, %ymm7 # ymm7 = ymm5[0,1],ymm10[0,1] vperm2i128 $0x31, %ymm10, %ymm5, %ymm10 # ymm10 = ymm5[2,3],ymm10[2,3] vperm2i128 $0x20, %ymm11, %ymm6, %ymm5 # ymm5 = ymm6[0,1],ymm11[0,1] vperm2i128 $0x31, %ymm11, %ymm6, %ymm11 # ymm11 = ymm6[2,3],ymm11[2,3] vmovdqa 0x1a0(%rsi), %ymm15 vmovdqa 0x1c0(%rsi), %ymm2 vpmullw %ymm15, %ymm7, %ymm12 vpmullw %ymm15, %ymm10, %ymm13 vpmullw %ymm15, %ymm5, %ymm14 vpmullw %ymm15, %ymm11, %ymm15 vpmulhw %ymm2, %ymm7, %ymm7 vpmulhw %ymm2, %ymm10, %ymm10 vpmulhw %ymm2, %ymm5, %ymm5 vpmulhw %ymm2, %ymm11, %ymm11 vperm2i128 $0x20, %ymm8, %ymm3, %ymm6 # ymm6 = ymm3[0,1],ymm8[0,1] vperm2i128 $0x31, %ymm8, %ymm3, %ymm8 # ymm8 = ymm3[2,3],ymm8[2,3] vperm2i128 $0x20, %ymm9, %ymm4, %ymm3 # ymm3 = ymm4[0,1],ymm9[0,1] vperm2i128 $0x31, %ymm9, %ymm4, %ymm9 # ymm9 = ymm4[2,3],ymm9[2,3] vpmulhw %ymm0, %ymm12, %ymm12 vpmulhw %ymm0, %ymm13, %ymm13 vpmulhw %ymm0, %ymm14, %ymm14 vpmulhw %ymm0, %ymm15, %ymm15 vpaddw %ymm7, %ymm6, %ymm4 vpsubw %ymm7, %ymm6, %ymm7 vpaddw %ymm10, %ymm8, %ymm6 vpsubw %ymm10, %ymm8, %ymm10 vpaddw %ymm5, %ymm3, %ymm8 vpsubw %ymm5, %ymm3, %ymm5 vpaddw %ymm11, %ymm9, %ymm3 vpsubw %ymm11, %ymm9, %ymm11 vpsubw %ymm12, %ymm4, %ymm4 vpaddw %ymm12, %ymm7, %ymm7 vpsubw %ymm13, %ymm6, %ymm6 vpaddw %ymm13, %ymm10, %ymm10 vpsubw %ymm14, %ymm8, %ymm8 vpaddw %ymm14, %ymm5, %ymm5 vpsubw %ymm15, %ymm3, %ymm3 vpaddw %ymm15, %ymm11, %ymm11 vpunpcklqdq %ymm5, %ymm8, %ymm9 # ymm9 = ymm8[0],ymm5[0],ymm8[2],ymm5[2] vpunpckhqdq %ymm5, %ymm8, %ymm5 # ymm5 = ymm8[1],ymm5[1],ymm8[3],ymm5[3] vpunpcklqdq %ymm11, %ymm3, %ymm8 # ymm8 = ymm3[0],ymm11[0],ymm3[2],ymm11[2] vpunpckhqdq %ymm11, %ymm3, %ymm11 # ymm11 = ymm3[1],ymm11[1],ymm3[3],ymm11[3] vmovdqa 0x1e0(%rsi), %ymm15 vmovdqa 0x200(%rsi), %ymm2 vpmullw %ymm15, %ymm9, %ymm12 vpmullw %ymm15, %ymm5, %ymm13 vpmullw %ymm15, %ymm8, %ymm14 vpmullw %ymm15, %ymm11, %ymm15 vpmulhw %ymm2, %ymm9, %ymm9 vpmulhw %ymm2, %ymm5, %ymm5 vpmulhw %ymm2, %ymm8, %ymm8 vpmulhw %ymm2, %ymm11, %ymm11 vpunpcklqdq %ymm7, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm7[0],ymm4[2],ymm7[2] vpunpckhqdq %ymm7, %ymm4, %ymm7 # ymm7 = ymm4[1],ymm7[1],ymm4[3],ymm7[3] vpunpcklqdq %ymm10, %ymm6, %ymm4 # ymm4 = ymm6[0],ymm10[0],ymm6[2],ymm10[2] vpunpckhqdq %ymm10, %ymm6, %ymm10 # ymm10 = ymm6[1],ymm10[1],ymm6[3],ymm10[3] vpmulhw %ymm0, %ymm12, %ymm12 vpmulhw %ymm0, %ymm13, %ymm13 vpmulhw %ymm0, %ymm14, %ymm14 vpmulhw %ymm0, %ymm15, %ymm15 vpaddw %ymm9, %ymm3, %ymm6 vpsubw %ymm9, %ymm3, %ymm9 vpaddw %ymm5, %ymm7, %ymm3 vpsubw %ymm5, %ymm7, %ymm5 vpaddw %ymm8, %ymm4, %ymm7 vpsubw %ymm8, %ymm4, %ymm8 vpaddw %ymm11, %ymm10, %ymm4 vpsubw %ymm11, %ymm10, %ymm11 vpsubw %ymm12, %ymm6, %ymm6 vpaddw %ymm12, %ymm9, %ymm9 vpsubw %ymm13, %ymm3, %ymm3 vpaddw %ymm13, %ymm5, %ymm5 vpsubw %ymm14, %ymm7, %ymm7 vpaddw %ymm14, %ymm8, %ymm8 vpsubw %ymm15, %ymm4, %ymm4 vpaddw %ymm15, %ymm11, %ymm11 vmovsldup %ymm8, %ymm10 # ymm10 = ymm8[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm10, %ymm7, %ymm10 # ymm10 = ymm7[0],ymm10[1],ymm7[2],ymm10[3],ymm7[4],ymm10[5],ymm7[6],ymm10[7] vpsrlq $0x20, %ymm7, %ymm7 vpblendd $0xaa, %ymm8, %ymm7, %ymm8 # ymm8 = ymm7[0],ymm8[1],ymm7[2],ymm8[3],ymm7[4],ymm8[5],ymm7[6],ymm8[7] vmovsldup %ymm11, %ymm7 # ymm7 = ymm11[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm7, %ymm4, %ymm7 # ymm7 = ymm4[0],ymm7[1],ymm4[2],ymm7[3],ymm4[4],ymm7[5],ymm4[6],ymm7[7] vpsrlq $0x20, %ymm4, %ymm4 vpblendd $0xaa, %ymm11, %ymm4, %ymm11 # ymm11 = ymm4[0],ymm11[1],ymm4[2],ymm11[3],ymm4[4],ymm11[5],ymm4[6],ymm11[7] vmovdqa 0x220(%rsi), %ymm15 vmovdqa 0x240(%rsi), %ymm2 vpmullw %ymm15, %ymm10, %ymm12 vpmullw %ymm15, %ymm8, %ymm13 vpmullw %ymm15, %ymm7, %ymm14 vpmullw %ymm15, %ymm11, %ymm15 vpmulhw %ymm2, %ymm10, %ymm10 vpmulhw %ymm2, %ymm8, %ymm8 vpmulhw %ymm2, %ymm7, %ymm7 vpmulhw %ymm2, %ymm11, %ymm11 vmovsldup %ymm9, %ymm4 # ymm4 = ymm9[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm4, %ymm6, %ymm4 # ymm4 = ymm6[0],ymm4[1],ymm6[2],ymm4[3],ymm6[4],ymm4[5],ymm6[6],ymm4[7] vpsrlq $0x20, %ymm6, %ymm6 vpblendd $0xaa, %ymm9, %ymm6, %ymm9 # ymm9 = ymm6[0],ymm9[1],ymm6[2],ymm9[3],ymm6[4],ymm9[5],ymm6[6],ymm9[7] vmovsldup %ymm5, %ymm6 # ymm6 = ymm5[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm6, %ymm3, %ymm6 # ymm6 = ymm3[0],ymm6[1],ymm3[2],ymm6[3],ymm3[4],ymm6[5],ymm3[6],ymm6[7] vpsrlq $0x20, %ymm3, %ymm3 vpblendd $0xaa, %ymm5, %ymm3, %ymm5 # ymm5 = ymm3[0],ymm5[1],ymm3[2],ymm5[3],ymm3[4],ymm5[5],ymm3[6],ymm5[7] vpmulhw %ymm0, %ymm12, %ymm12 vpmulhw %ymm0, %ymm13, %ymm13 vpmulhw %ymm0, %ymm14, %ymm14 vpmulhw %ymm0, %ymm15, %ymm15 vpaddw %ymm10, %ymm4, %ymm3 vpsubw %ymm10, %ymm4, %ymm10 vpaddw %ymm8, %ymm9, %ymm4 vpsubw %ymm8, %ymm9, %ymm8 vpaddw %ymm7, %ymm6, %ymm9 vpsubw %ymm7, %ymm6, %ymm7 vpaddw %ymm11, %ymm5, %ymm6 vpsubw %ymm11, %ymm5, %ymm11 vpsubw %ymm12, %ymm3, %ymm3 vpaddw %ymm12, %ymm10, %ymm10 vpsubw %ymm13, %ymm4, %ymm4 vpaddw %ymm13, %ymm8, %ymm8 vpsubw %ymm14, %ymm9, %ymm9 vpaddw %ymm14, %ymm7, %ymm7 vpsubw %ymm15, %ymm6, %ymm6 vpaddw %ymm15, %ymm11, %ymm11 vpslld $0x10, %ymm7, %ymm5 vpblendw $0xaa, %ymm5, %ymm9, %ymm5 # ymm5 = ymm9[0],ymm5[1],ymm9[2],ymm5[3],ymm9[4],ymm5[5],ymm9[6],ymm5[7],ymm9[8],ymm5[9],ymm9[10],ymm5[11],ymm9[12],ymm5[13],ymm9[14],ymm5[15] vpsrld $0x10, %ymm9, %ymm9 vpblendw $0xaa, %ymm7, %ymm9, %ymm7 # ymm7 = ymm9[0],ymm7[1],ymm9[2],ymm7[3],ymm9[4],ymm7[5],ymm9[6],ymm7[7],ymm9[8],ymm7[9],ymm9[10],ymm7[11],ymm9[12],ymm7[13],ymm9[14],ymm7[15] vpslld $0x10, %ymm11, %ymm9 vpblendw $0xaa, %ymm9, %ymm6, %ymm9 # ymm9 = ymm6[0],ymm9[1],ymm6[2],ymm9[3],ymm6[4],ymm9[5],ymm6[6],ymm9[7],ymm6[8],ymm9[9],ymm6[10],ymm9[11],ymm6[12],ymm9[13],ymm6[14],ymm9[15] vpsrld $0x10, %ymm6, %ymm6 vpblendw $0xaa, %ymm11, %ymm6, %ymm11 # ymm11 = ymm6[0],ymm11[1],ymm6[2],ymm11[3],ymm6[4],ymm11[5],ymm6[6],ymm11[7],ymm6[8],ymm11[9],ymm6[10],ymm11[11],ymm6[12],ymm11[13],ymm6[14],ymm11[15] vmovdqa 0x260(%rsi), %ymm15 vmovdqa 0x280(%rsi), %ymm2 vpmullw %ymm15, %ymm5, %ymm12 vpmullw %ymm15, %ymm7, %ymm13 vpmullw %ymm15, %ymm9, %ymm14 vpmullw %ymm15, %ymm11, %ymm15 vpmulhw %ymm2, %ymm5, %ymm5 vpmulhw %ymm2, %ymm7, %ymm7 vpmulhw %ymm2, %ymm9, %ymm9 vpmulhw %ymm2, %ymm11, %ymm11 vpslld $0x10, %ymm10, %ymm6 vpblendw $0xaa, %ymm6, %ymm3, %ymm6 # ymm6 = ymm3[0],ymm6[1],ymm3[2],ymm6[3],ymm3[4],ymm6[5],ymm3[6],ymm6[7],ymm3[8],ymm6[9],ymm3[10],ymm6[11],ymm3[12],ymm6[13],ymm3[14],ymm6[15] vpsrld $0x10, %ymm3, %ymm3 vpblendw $0xaa, %ymm10, %ymm3, %ymm10 # ymm10 = ymm3[0],ymm10[1],ymm3[2],ymm10[3],ymm3[4],ymm10[5],ymm3[6],ymm10[7],ymm3[8],ymm10[9],ymm3[10],ymm10[11],ymm3[12],ymm10[13],ymm3[14],ymm10[15] vpslld $0x10, %ymm8, %ymm3 vpblendw $0xaa, %ymm3, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7],ymm4[8],ymm3[9],ymm4[10],ymm3[11],ymm4[12],ymm3[13],ymm4[14],ymm3[15] vpsrld $0x10, %ymm4, %ymm4 vpblendw $0xaa, %ymm8, %ymm4, %ymm8 # ymm8 = ymm4[0],ymm8[1],ymm4[2],ymm8[3],ymm4[4],ymm8[5],ymm4[6],ymm8[7],ymm4[8],ymm8[9],ymm4[10],ymm8[11],ymm4[12],ymm8[13],ymm4[14],ymm8[15] vpmulhw %ymm0, %ymm12, %ymm12 vpmulhw %ymm0, %ymm13, %ymm13 vpmulhw %ymm0, %ymm14, %ymm14 vpmulhw %ymm0, %ymm15, %ymm15 vpaddw %ymm5, %ymm6, %ymm4 vpsubw %ymm5, %ymm6, %ymm5 vpaddw %ymm7, %ymm10, %ymm6 vpsubw %ymm7, %ymm10, %ymm7 vpaddw %ymm9, %ymm3, %ymm10 vpsubw %ymm9, %ymm3, %ymm9 vpaddw %ymm11, %ymm8, %ymm3 vpsubw %ymm11, %ymm8, %ymm11 vpsubw %ymm12, %ymm4, %ymm4 vpaddw %ymm12, %ymm5, %ymm5 vpsubw %ymm13, %ymm6, %ymm6 vpaddw %ymm13, %ymm7, %ymm7 vpsubw %ymm14, %ymm10, %ymm10 vpaddw %ymm14, %ymm9, %ymm9 vpsubw %ymm15, %ymm3, %ymm3 vpaddw %ymm15, %ymm11, %ymm11 vmovdqa 0x2a0(%rsi), %ymm14 vmovdqa 0x2e0(%rsi), %ymm15 vmovdqa 0x2c0(%rsi), %ymm8 vmovdqa 0x300(%rsi), %ymm2 vpmullw %ymm14, %ymm10, %ymm12 vpmullw %ymm14, %ymm3, %ymm13 vpmullw %ymm15, %ymm9, %ymm14 vpmullw %ymm15, %ymm11, %ymm15 vpmulhw %ymm8, %ymm10, %ymm10 vpmulhw %ymm8, %ymm3, %ymm3 vpmulhw %ymm2, %ymm9, %ymm9 vpmulhw %ymm2, %ymm11, %ymm11 vpmulhw %ymm0, %ymm12, %ymm12 vpmulhw %ymm0, %ymm13, %ymm13 vpmulhw %ymm0, %ymm14, %ymm14 vpmulhw %ymm0, %ymm15, %ymm15 vpaddw %ymm10, %ymm4, %ymm8 vpsubw %ymm10, %ymm4, %ymm10 vpaddw %ymm3, %ymm6, %ymm4 vpsubw %ymm3, %ymm6, %ymm3 vpaddw %ymm9, %ymm5, %ymm6 vpsubw %ymm9, %ymm5, %ymm9 vpaddw %ymm11, %ymm7, %ymm5 vpsubw %ymm11, %ymm7, %ymm11 vpsubw %ymm12, %ymm8, %ymm8 vpaddw %ymm12, %ymm10, %ymm10 vpsubw %ymm13, %ymm4, %ymm4 vpaddw %ymm13, %ymm3, %ymm3 vpsubw %ymm14, %ymm6, %ymm6 vpaddw %ymm14, %ymm9, %ymm9 vpsubw %ymm15, %ymm5, %ymm5 vpaddw %ymm15, %ymm11, %ymm11 vmovdqa %ymm8, (%rdi) vmovdqa %ymm4, 0x20(%rdi) vmovdqa %ymm10, 0x40(%rdi) vmovdqa %ymm3, 0x60(%rdi) vmovdqa %ymm6, 0x80(%rdi) vmovdqa %ymm5, 0xa0(%rdi) vmovdqa %ymm9, 0xc0(%rdi) vmovdqa %ymm11, 0xe0(%rdi) vmovdqa 0x320(%rsi), %ymm15 vmovdqa 0x180(%rdi), %ymm8 vmovdqa 0x1a0(%rdi), %ymm9 vmovdqa 0x1c0(%rdi), %ymm10 vmovdqa 0x1e0(%rdi), %ymm11 vmovdqa 0x340(%rsi), %ymm2 vpmullw %ymm15, %ymm8, %ymm12 vpmullw %ymm15, %ymm9, %ymm13 vpmullw %ymm15, %ymm10, %ymm14 vpmullw %ymm15, %ymm11, %ymm15 vpmulhw %ymm2, %ymm8, %ymm8 vpmulhw %ymm2, %ymm9, %ymm9 vpmulhw %ymm2, %ymm10, %ymm10 vpmulhw %ymm2, %ymm11, %ymm11 vmovdqa 0x100(%rdi), %ymm4 vmovdqa 0x120(%rdi), %ymm5 vmovdqa 0x140(%rdi), %ymm6 vmovdqa 0x160(%rdi), %ymm7 vpmulhw %ymm0, %ymm12, %ymm12 vpmulhw %ymm0, %ymm13, %ymm13 vpmulhw %ymm0, %ymm14, %ymm14 vpmulhw %ymm0, %ymm15, %ymm15 vpaddw %ymm8, %ymm4, %ymm3 vpsubw %ymm8, %ymm4, %ymm8 vpaddw %ymm9, %ymm5, %ymm4 vpsubw %ymm9, %ymm5, %ymm9 vpaddw %ymm10, %ymm6, %ymm5 vpsubw %ymm10, %ymm6, %ymm10 vpaddw %ymm11, %ymm7, %ymm6 vpsubw %ymm11, %ymm7, %ymm11 vpsubw %ymm12, %ymm3, %ymm3 vpaddw %ymm12, %ymm8, %ymm8 vpsubw %ymm13, %ymm4, %ymm4 vpaddw %ymm13, %ymm9, %ymm9 vpsubw %ymm14, %ymm5, %ymm5 vpaddw %ymm14, %ymm10, %ymm10 vpsubw %ymm15, %ymm6, %ymm6 vpaddw %ymm15, %ymm11, %ymm11 vperm2i128 $0x20, %ymm10, %ymm5, %ymm7 # ymm7 = ymm5[0,1],ymm10[0,1] vperm2i128 $0x31, %ymm10, %ymm5, %ymm10 # ymm10 = ymm5[2,3],ymm10[2,3] vperm2i128 $0x20, %ymm11, %ymm6, %ymm5 # ymm5 = ymm6[0,1],ymm11[0,1] vperm2i128 $0x31, %ymm11, %ymm6, %ymm11 # ymm11 = ymm6[2,3],ymm11[2,3] vmovdqa 0x360(%rsi), %ymm15 vmovdqa 0x380(%rsi), %ymm2 vpmullw %ymm15, %ymm7, %ymm12 vpmullw %ymm15, %ymm10, %ymm13 vpmullw %ymm15, %ymm5, %ymm14 vpmullw %ymm15, %ymm11, %ymm15 vpmulhw %ymm2, %ymm7, %ymm7 vpmulhw %ymm2, %ymm10, %ymm10 vpmulhw %ymm2, %ymm5, %ymm5 vpmulhw %ymm2, %ymm11, %ymm11 vperm2i128 $0x20, %ymm8, %ymm3, %ymm6 # ymm6 = ymm3[0,1],ymm8[0,1] vperm2i128 $0x31, %ymm8, %ymm3, %ymm8 # ymm8 = ymm3[2,3],ymm8[2,3] vperm2i128 $0x20, %ymm9, %ymm4, %ymm3 # ymm3 = ymm4[0,1],ymm9[0,1] vperm2i128 $0x31, %ymm9, %ymm4, %ymm9 # ymm9 = ymm4[2,3],ymm9[2,3] vpmulhw %ymm0, %ymm12, %ymm12 vpmulhw %ymm0, %ymm13, %ymm13 vpmulhw %ymm0, %ymm14, %ymm14 vpmulhw %ymm0, %ymm15, %ymm15 vpaddw %ymm7, %ymm6, %ymm4 vpsubw %ymm7, %ymm6, %ymm7 vpaddw %ymm10, %ymm8, %ymm6 vpsubw %ymm10, %ymm8, %ymm10 vpaddw %ymm5, %ymm3, %ymm8 vpsubw %ymm5, %ymm3, %ymm5 vpaddw %ymm11, %ymm9, %ymm3 vpsubw %ymm11, %ymm9, %ymm11 vpsubw %ymm12, %ymm4, %ymm4 vpaddw %ymm12, %ymm7, %ymm7 vpsubw %ymm13, %ymm6, %ymm6 vpaddw %ymm13, %ymm10, %ymm10 vpsubw %ymm14, %ymm8, %ymm8 vpaddw %ymm14, %ymm5, %ymm5 vpsubw %ymm15, %ymm3, %ymm3 vpaddw %ymm15, %ymm11, %ymm11 vpunpcklqdq %ymm5, %ymm8, %ymm9 # ymm9 = ymm8[0],ymm5[0],ymm8[2],ymm5[2] vpunpckhqdq %ymm5, %ymm8, %ymm5 # ymm5 = ymm8[1],ymm5[1],ymm8[3],ymm5[3] vpunpcklqdq %ymm11, %ymm3, %ymm8 # ymm8 = ymm3[0],ymm11[0],ymm3[2],ymm11[2] vpunpckhqdq %ymm11, %ymm3, %ymm11 # ymm11 = ymm3[1],ymm11[1],ymm3[3],ymm11[3] vmovdqa 0x3a0(%rsi), %ymm15 vmovdqa 0x3c0(%rsi), %ymm2 vpmullw %ymm15, %ymm9, %ymm12 vpmullw %ymm15, %ymm5, %ymm13 vpmullw %ymm15, %ymm8, %ymm14 vpmullw %ymm15, %ymm11, %ymm15 vpmulhw %ymm2, %ymm9, %ymm9 vpmulhw %ymm2, %ymm5, %ymm5 vpmulhw %ymm2, %ymm8, %ymm8 vpmulhw %ymm2, %ymm11, %ymm11 vpunpcklqdq %ymm7, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm7[0],ymm4[2],ymm7[2] vpunpckhqdq %ymm7, %ymm4, %ymm7 # ymm7 = ymm4[1],ymm7[1],ymm4[3],ymm7[3] vpunpcklqdq %ymm10, %ymm6, %ymm4 # ymm4 = ymm6[0],ymm10[0],ymm6[2],ymm10[2] vpunpckhqdq %ymm10, %ymm6, %ymm10 # ymm10 = ymm6[1],ymm10[1],ymm6[3],ymm10[3] vpmulhw %ymm0, %ymm12, %ymm12 vpmulhw %ymm0, %ymm13, %ymm13 vpmulhw %ymm0, %ymm14, %ymm14 vpmulhw %ymm0, %ymm15, %ymm15 vpaddw %ymm9, %ymm3, %ymm6 vpsubw %ymm9, %ymm3, %ymm9 vpaddw %ymm5, %ymm7, %ymm3 vpsubw %ymm5, %ymm7, %ymm5 vpaddw %ymm8, %ymm4, %ymm7 vpsubw %ymm8, %ymm4, %ymm8 vpaddw %ymm11, %ymm10, %ymm4 vpsubw %ymm11, %ymm10, %ymm11 vpsubw %ymm12, %ymm6, %ymm6 vpaddw %ymm12, %ymm9, %ymm9 vpsubw %ymm13, %ymm3, %ymm3 vpaddw %ymm13, %ymm5, %ymm5 vpsubw %ymm14, %ymm7, %ymm7 vpaddw %ymm14, %ymm8, %ymm8 vpsubw %ymm15, %ymm4, %ymm4 vpaddw %ymm15, %ymm11, %ymm11 vmovsldup %ymm8, %ymm10 # ymm10 = ymm8[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm10, %ymm7, %ymm10 # ymm10 = ymm7[0],ymm10[1],ymm7[2],ymm10[3],ymm7[4],ymm10[5],ymm7[6],ymm10[7] vpsrlq $0x20, %ymm7, %ymm7 vpblendd $0xaa, %ymm8, %ymm7, %ymm8 # ymm8 = ymm7[0],ymm8[1],ymm7[2],ymm8[3],ymm7[4],ymm8[5],ymm7[6],ymm8[7] vmovsldup %ymm11, %ymm7 # ymm7 = ymm11[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm7, %ymm4, %ymm7 # ymm7 = ymm4[0],ymm7[1],ymm4[2],ymm7[3],ymm4[4],ymm7[5],ymm4[6],ymm7[7] vpsrlq $0x20, %ymm4, %ymm4 vpblendd $0xaa, %ymm11, %ymm4, %ymm11 # ymm11 = ymm4[0],ymm11[1],ymm4[2],ymm11[3],ymm4[4],ymm11[5],ymm4[6],ymm11[7] vmovdqa 0x3e0(%rsi), %ymm15 vmovdqa 0x400(%rsi), %ymm2 vpmullw %ymm15, %ymm10, %ymm12 vpmullw %ymm15, %ymm8, %ymm13 vpmullw %ymm15, %ymm7, %ymm14 vpmullw %ymm15, %ymm11, %ymm15 vpmulhw %ymm2, %ymm10, %ymm10 vpmulhw %ymm2, %ymm8, %ymm8 vpmulhw %ymm2, %ymm7, %ymm7 vpmulhw %ymm2, %ymm11, %ymm11 vmovsldup %ymm9, %ymm4 # ymm4 = ymm9[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm4, %ymm6, %ymm4 # ymm4 = ymm6[0],ymm4[1],ymm6[2],ymm4[3],ymm6[4],ymm4[5],ymm6[6],ymm4[7] vpsrlq $0x20, %ymm6, %ymm6 vpblendd $0xaa, %ymm9, %ymm6, %ymm9 # ymm9 = ymm6[0],ymm9[1],ymm6[2],ymm9[3],ymm6[4],ymm9[5],ymm6[6],ymm9[7] vmovsldup %ymm5, %ymm6 # ymm6 = ymm5[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm6, %ymm3, %ymm6 # ymm6 = ymm3[0],ymm6[1],ymm3[2],ymm6[3],ymm3[4],ymm6[5],ymm3[6],ymm6[7] vpsrlq $0x20, %ymm3, %ymm3 vpblendd $0xaa, %ymm5, %ymm3, %ymm5 # ymm5 = ymm3[0],ymm5[1],ymm3[2],ymm5[3],ymm3[4],ymm5[5],ymm3[6],ymm5[7] vpmulhw %ymm0, %ymm12, %ymm12 vpmulhw %ymm0, %ymm13, %ymm13 vpmulhw %ymm0, %ymm14, %ymm14 vpmulhw %ymm0, %ymm15, %ymm15 vpaddw %ymm10, %ymm4, %ymm3 vpsubw %ymm10, %ymm4, %ymm10 vpaddw %ymm8, %ymm9, %ymm4 vpsubw %ymm8, %ymm9, %ymm8 vpaddw %ymm7, %ymm6, %ymm9 vpsubw %ymm7, %ymm6, %ymm7 vpaddw %ymm11, %ymm5, %ymm6 vpsubw %ymm11, %ymm5, %ymm11 vpsubw %ymm12, %ymm3, %ymm3 vpaddw %ymm12, %ymm10, %ymm10 vpsubw %ymm13, %ymm4, %ymm4 vpaddw %ymm13, %ymm8, %ymm8 vpsubw %ymm14, %ymm9, %ymm9 vpaddw %ymm14, %ymm7, %ymm7 vpsubw %ymm15, %ymm6, %ymm6 vpaddw %ymm15, %ymm11, %ymm11 vpslld $0x10, %ymm7, %ymm5 vpblendw $0xaa, %ymm5, %ymm9, %ymm5 # ymm5 = ymm9[0],ymm5[1],ymm9[2],ymm5[3],ymm9[4],ymm5[5],ymm9[6],ymm5[7],ymm9[8],ymm5[9],ymm9[10],ymm5[11],ymm9[12],ymm5[13],ymm9[14],ymm5[15] vpsrld $0x10, %ymm9, %ymm9 vpblendw $0xaa, %ymm7, %ymm9, %ymm7 # ymm7 = ymm9[0],ymm7[1],ymm9[2],ymm7[3],ymm9[4],ymm7[5],ymm9[6],ymm7[7],ymm9[8],ymm7[9],ymm9[10],ymm7[11],ymm9[12],ymm7[13],ymm9[14],ymm7[15] vpslld $0x10, %ymm11, %ymm9 vpblendw $0xaa, %ymm9, %ymm6, %ymm9 # ymm9 = ymm6[0],ymm9[1],ymm6[2],ymm9[3],ymm6[4],ymm9[5],ymm6[6],ymm9[7],ymm6[8],ymm9[9],ymm6[10],ymm9[11],ymm6[12],ymm9[13],ymm6[14],ymm9[15] vpsrld $0x10, %ymm6, %ymm6 vpblendw $0xaa, %ymm11, %ymm6, %ymm11 # ymm11 = ymm6[0],ymm11[1],ymm6[2],ymm11[3],ymm6[4],ymm11[5],ymm6[6],ymm11[7],ymm6[8],ymm11[9],ymm6[10],ymm11[11],ymm6[12],ymm11[13],ymm6[14],ymm11[15] vmovdqa 0x420(%rsi), %ymm15 vmovdqa 0x440(%rsi), %ymm2 vpmullw %ymm15, %ymm5, %ymm12 vpmullw %ymm15, %ymm7, %ymm13 vpmullw %ymm15, %ymm9, %ymm14 vpmullw %ymm15, %ymm11, %ymm15 vpmulhw %ymm2, %ymm5, %ymm5 vpmulhw %ymm2, %ymm7, %ymm7 vpmulhw %ymm2, %ymm9, %ymm9 vpmulhw %ymm2, %ymm11, %ymm11 vpslld $0x10, %ymm10, %ymm6 vpblendw $0xaa, %ymm6, %ymm3, %ymm6 # ymm6 = ymm3[0],ymm6[1],ymm3[2],ymm6[3],ymm3[4],ymm6[5],ymm3[6],ymm6[7],ymm3[8],ymm6[9],ymm3[10],ymm6[11],ymm3[12],ymm6[13],ymm3[14],ymm6[15] vpsrld $0x10, %ymm3, %ymm3 vpblendw $0xaa, %ymm10, %ymm3, %ymm10 # ymm10 = ymm3[0],ymm10[1],ymm3[2],ymm10[3],ymm3[4],ymm10[5],ymm3[6],ymm10[7],ymm3[8],ymm10[9],ymm3[10],ymm10[11],ymm3[12],ymm10[13],ymm3[14],ymm10[15] vpslld $0x10, %ymm8, %ymm3 vpblendw $0xaa, %ymm3, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7],ymm4[8],ymm3[9],ymm4[10],ymm3[11],ymm4[12],ymm3[13],ymm4[14],ymm3[15] vpsrld $0x10, %ymm4, %ymm4 vpblendw $0xaa, %ymm8, %ymm4, %ymm8 # ymm8 = ymm4[0],ymm8[1],ymm4[2],ymm8[3],ymm4[4],ymm8[5],ymm4[6],ymm8[7],ymm4[8],ymm8[9],ymm4[10],ymm8[11],ymm4[12],ymm8[13],ymm4[14],ymm8[15] vpmulhw %ymm0, %ymm12, %ymm12 vpmulhw %ymm0, %ymm13, %ymm13 vpmulhw %ymm0, %ymm14, %ymm14 vpmulhw %ymm0, %ymm15, %ymm15 vpaddw %ymm5, %ymm6, %ymm4 vpsubw %ymm5, %ymm6, %ymm5 vpaddw %ymm7, %ymm10, %ymm6 vpsubw %ymm7, %ymm10, %ymm7 vpaddw %ymm9, %ymm3, %ymm10 vpsubw %ymm9, %ymm3, %ymm9 vpaddw %ymm11, %ymm8, %ymm3 vpsubw %ymm11, %ymm8, %ymm11 vpsubw %ymm12, %ymm4, %ymm4 vpaddw %ymm12, %ymm5, %ymm5 vpsubw %ymm13, %ymm6, %ymm6 vpaddw %ymm13, %ymm7, %ymm7 vpsubw %ymm14, %ymm10, %ymm10 vpaddw %ymm14, %ymm9, %ymm9 vpsubw %ymm15, %ymm3, %ymm3 vpaddw %ymm15, %ymm11, %ymm11 vmovdqa 0x460(%rsi), %ymm14 vmovdqa 0x4a0(%rsi), %ymm15 vmovdqa 0x480(%rsi), %ymm8 vmovdqa 0x4c0(%rsi), %ymm2 vpmullw %ymm14, %ymm10, %ymm12 vpmullw %ymm14, %ymm3, %ymm13 vpmullw %ymm15, %ymm9, %ymm14 vpmullw %ymm15, %ymm11, %ymm15 vpmulhw %ymm8, %ymm10, %ymm10 vpmulhw %ymm8, %ymm3, %ymm3 vpmulhw %ymm2, %ymm9, %ymm9 vpmulhw %ymm2, %ymm11, %ymm11 vpmulhw %ymm0, %ymm12, %ymm12 vpmulhw %ymm0, %ymm13, %ymm13 vpmulhw %ymm0, %ymm14, %ymm14 vpmulhw %ymm0, %ymm15, %ymm15 vpaddw %ymm10, %ymm4, %ymm8 vpsubw %ymm10, %ymm4, %ymm10 vpaddw %ymm3, %ymm6, %ymm4 vpsubw %ymm3, %ymm6, %ymm3 vpaddw %ymm9, %ymm5, %ymm6 vpsubw %ymm9, %ymm5, %ymm9 vpaddw %ymm11, %ymm7, %ymm5 vpsubw %ymm11, %ymm7, %ymm11 vpsubw %ymm12, %ymm8, %ymm8 vpaddw %ymm12, %ymm10, %ymm10 vpsubw %ymm13, %ymm4, %ymm4 vpaddw %ymm13, %ymm3, %ymm3 vpsubw %ymm14, %ymm6, %ymm6 vpaddw %ymm14, %ymm9, %ymm9 vpsubw %ymm15, %ymm5, %ymm5 vpaddw %ymm15, %ymm11, %ymm11 vmovdqa %ymm8, 0x100(%rdi) vmovdqa %ymm4, 0x120(%rdi) vmovdqa %ymm10, 0x140(%rdi) vmovdqa %ymm3, 0x160(%rdi) vmovdqa %ymm6, 0x180(%rdi) vmovdqa %ymm5, 0x1a0(%rdi) vmovdqa %ymm9, 0x1c0(%rdi) vmovdqa %ymm11, 0x1e0(%rdi) retq .cfi_endproc
wlsfx/bnbb
26,401
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/x86_64/src/polyvec_basemul_acc_montgomery_cached_asm_k3.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/x86_64/src/polyvec_basemul_acc_montgomery_cached_asm_k3.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k3) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k3) S2N_BN_SYMBOL(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k3): .cfi_startproc vmovdqa (%r8), %ymm0 vmovdqa 0x20(%r8), %ymm1 vmovdqa (%rsi), %ymm2 vmovdqa 0x20(%rsi), %ymm3 vmovdqa (%rdx), %ymm4 vmovdqa 0x20(%rdx), %ymm5 vmovdqa (%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, (%rdi) vmovdqa %ymm9, 0x20(%rdi) vmovdqa 0x40(%rsi), %ymm2 vmovdqa 0x60(%rsi), %ymm3 vmovdqa 0x40(%rdx), %ymm4 vmovdqa 0x60(%rdx), %ymm5 vmovdqa 0x20(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x40(%rdi) vmovdqa %ymm9, 0x60(%rdi) vmovdqa 0x80(%rsi), %ymm2 vmovdqa 0xa0(%rsi), %ymm3 vmovdqa 0x80(%rdx), %ymm4 vmovdqa 0xa0(%rdx), %ymm5 vmovdqa 0x40(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x80(%rdi) vmovdqa %ymm9, 0xa0(%rdi) vmovdqa 0xc0(%rsi), %ymm2 vmovdqa 0xe0(%rsi), %ymm3 vmovdqa 0xc0(%rdx), %ymm4 vmovdqa 0xe0(%rdx), %ymm5 vmovdqa 0x60(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0xc0(%rdi) vmovdqa %ymm9, 0xe0(%rdi) vmovdqa 0x100(%rsi), %ymm2 vmovdqa 0x120(%rsi), %ymm3 vmovdqa 0x100(%rdx), %ymm4 vmovdqa 0x120(%rdx), %ymm5 vmovdqa 0x80(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x100(%rdi) vmovdqa %ymm9, 0x120(%rdi) vmovdqa 0x140(%rsi), %ymm2 vmovdqa 0x160(%rsi), %ymm3 vmovdqa 0x140(%rdx), %ymm4 vmovdqa 0x160(%rdx), %ymm5 vmovdqa 0xa0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x140(%rdi) vmovdqa %ymm9, 0x160(%rdi) vmovdqa 0x180(%rsi), %ymm2 vmovdqa 0x1a0(%rsi), %ymm3 vmovdqa 0x180(%rdx), %ymm4 vmovdqa 0x1a0(%rdx), %ymm5 vmovdqa 0xc0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x180(%rdi) vmovdqa %ymm9, 0x1a0(%rdi) vmovdqa 0x1c0(%rsi), %ymm2 vmovdqa 0x1e0(%rsi), %ymm3 vmovdqa 0x1c0(%rdx), %ymm4 vmovdqa 0x1e0(%rdx), %ymm5 vmovdqa 0xe0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x1c0(%rdi) vmovdqa %ymm9, 0x1e0(%rdi) vmovdqa 0x200(%rsi), %ymm2 vmovdqa 0x220(%rsi), %ymm3 vmovdqa 0x200(%rdx), %ymm4 vmovdqa 0x220(%rdx), %ymm5 vmovdqa 0x100(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa (%rdi), %ymm8 vmovdqa 0x20(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, (%rdi) vmovdqa %ymm9, 0x20(%rdi) vmovdqa 0x240(%rsi), %ymm2 vmovdqa 0x260(%rsi), %ymm3 vmovdqa 0x240(%rdx), %ymm4 vmovdqa 0x260(%rdx), %ymm5 vmovdqa 0x120(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x40(%rdi), %ymm8 vmovdqa 0x60(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x40(%rdi) vmovdqa %ymm9, 0x60(%rdi) vmovdqa 0x280(%rsi), %ymm2 vmovdqa 0x2a0(%rsi), %ymm3 vmovdqa 0x280(%rdx), %ymm4 vmovdqa 0x2a0(%rdx), %ymm5 vmovdqa 0x140(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x80(%rdi), %ymm8 vmovdqa 0xa0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x80(%rdi) vmovdqa %ymm9, 0xa0(%rdi) vmovdqa 0x2c0(%rsi), %ymm2 vmovdqa 0x2e0(%rsi), %ymm3 vmovdqa 0x2c0(%rdx), %ymm4 vmovdqa 0x2e0(%rdx), %ymm5 vmovdqa 0x160(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0xc0(%rdi), %ymm8 vmovdqa 0xe0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0xc0(%rdi) vmovdqa %ymm9, 0xe0(%rdi) vmovdqa 0x300(%rsi), %ymm2 vmovdqa 0x320(%rsi), %ymm3 vmovdqa 0x300(%rdx), %ymm4 vmovdqa 0x320(%rdx), %ymm5 vmovdqa 0x180(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x100(%rdi), %ymm8 vmovdqa 0x120(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x100(%rdi) vmovdqa %ymm9, 0x120(%rdi) vmovdqa 0x340(%rsi), %ymm2 vmovdqa 0x360(%rsi), %ymm3 vmovdqa 0x340(%rdx), %ymm4 vmovdqa 0x360(%rdx), %ymm5 vmovdqa 0x1a0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x140(%rdi), %ymm8 vmovdqa 0x160(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x140(%rdi) vmovdqa %ymm9, 0x160(%rdi) vmovdqa 0x380(%rsi), %ymm2 vmovdqa 0x3a0(%rsi), %ymm3 vmovdqa 0x380(%rdx), %ymm4 vmovdqa 0x3a0(%rdx), %ymm5 vmovdqa 0x1c0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x180(%rdi), %ymm8 vmovdqa 0x1a0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x180(%rdi) vmovdqa %ymm9, 0x1a0(%rdi) vmovdqa 0x3c0(%rsi), %ymm2 vmovdqa 0x3e0(%rsi), %ymm3 vmovdqa 0x3c0(%rdx), %ymm4 vmovdqa 0x3e0(%rdx), %ymm5 vmovdqa 0x1e0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x1c0(%rdi), %ymm8 vmovdqa 0x1e0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x1c0(%rdi) vmovdqa %ymm9, 0x1e0(%rdi) vmovdqa 0x400(%rsi), %ymm2 vmovdqa 0x420(%rsi), %ymm3 vmovdqa 0x400(%rdx), %ymm4 vmovdqa 0x420(%rdx), %ymm5 vmovdqa 0x200(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa (%rdi), %ymm8 vmovdqa 0x20(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, (%rdi) vmovdqa %ymm9, 0x20(%rdi) vmovdqa 0x440(%rsi), %ymm2 vmovdqa 0x460(%rsi), %ymm3 vmovdqa 0x440(%rdx), %ymm4 vmovdqa 0x460(%rdx), %ymm5 vmovdqa 0x220(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x40(%rdi), %ymm8 vmovdqa 0x60(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x40(%rdi) vmovdqa %ymm9, 0x60(%rdi) vmovdqa 0x480(%rsi), %ymm2 vmovdqa 0x4a0(%rsi), %ymm3 vmovdqa 0x480(%rdx), %ymm4 vmovdqa 0x4a0(%rdx), %ymm5 vmovdqa 0x240(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x80(%rdi), %ymm8 vmovdqa 0xa0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x80(%rdi) vmovdqa %ymm9, 0xa0(%rdi) vmovdqa 0x4c0(%rsi), %ymm2 vmovdqa 0x4e0(%rsi), %ymm3 vmovdqa 0x4c0(%rdx), %ymm4 vmovdqa 0x4e0(%rdx), %ymm5 vmovdqa 0x260(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0xc0(%rdi), %ymm8 vmovdqa 0xe0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0xc0(%rdi) vmovdqa %ymm9, 0xe0(%rdi) vmovdqa 0x500(%rsi), %ymm2 vmovdqa 0x520(%rsi), %ymm3 vmovdqa 0x500(%rdx), %ymm4 vmovdqa 0x520(%rdx), %ymm5 vmovdqa 0x280(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x100(%rdi), %ymm8 vmovdqa 0x120(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x100(%rdi) vmovdqa %ymm9, 0x120(%rdi) vmovdqa 0x540(%rsi), %ymm2 vmovdqa 0x560(%rsi), %ymm3 vmovdqa 0x540(%rdx), %ymm4 vmovdqa 0x560(%rdx), %ymm5 vmovdqa 0x2a0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x140(%rdi), %ymm8 vmovdqa 0x160(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x140(%rdi) vmovdqa %ymm9, 0x160(%rdi) vmovdqa 0x580(%rsi), %ymm2 vmovdqa 0x5a0(%rsi), %ymm3 vmovdqa 0x580(%rdx), %ymm4 vmovdqa 0x5a0(%rdx), %ymm5 vmovdqa 0x2c0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x180(%rdi), %ymm8 vmovdqa 0x1a0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x180(%rdi) vmovdqa %ymm9, 0x1a0(%rdi) vmovdqa 0x5c0(%rsi), %ymm2 vmovdqa 0x5e0(%rsi), %ymm3 vmovdqa 0x5c0(%rdx), %ymm4 vmovdqa 0x5e0(%rdx), %ymm5 vmovdqa 0x2e0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x1c0(%rdi), %ymm8 vmovdqa 0x1e0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x1c0(%rdi) vmovdqa %ymm9, 0x1e0(%rdi) retq .cfi_endproc
wlsfx/bnbb
35,345
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/x86_64/src/polyvec_basemul_acc_montgomery_cached_asm_k4.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/x86_64/src/polyvec_basemul_acc_montgomery_cached_asm_k4.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k4) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k4) S2N_BN_SYMBOL(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k4): .cfi_startproc vmovdqa (%r8), %ymm0 vmovdqa 0x20(%r8), %ymm1 vmovdqa (%rsi), %ymm2 vmovdqa 0x20(%rsi), %ymm3 vmovdqa (%rdx), %ymm4 vmovdqa 0x20(%rdx), %ymm5 vmovdqa (%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, (%rdi) vmovdqa %ymm9, 0x20(%rdi) vmovdqa 0x40(%rsi), %ymm2 vmovdqa 0x60(%rsi), %ymm3 vmovdqa 0x40(%rdx), %ymm4 vmovdqa 0x60(%rdx), %ymm5 vmovdqa 0x20(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x40(%rdi) vmovdqa %ymm9, 0x60(%rdi) vmovdqa 0x80(%rsi), %ymm2 vmovdqa 0xa0(%rsi), %ymm3 vmovdqa 0x80(%rdx), %ymm4 vmovdqa 0xa0(%rdx), %ymm5 vmovdqa 0x40(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x80(%rdi) vmovdqa %ymm9, 0xa0(%rdi) vmovdqa 0xc0(%rsi), %ymm2 vmovdqa 0xe0(%rsi), %ymm3 vmovdqa 0xc0(%rdx), %ymm4 vmovdqa 0xe0(%rdx), %ymm5 vmovdqa 0x60(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0xc0(%rdi) vmovdqa %ymm9, 0xe0(%rdi) vmovdqa 0x100(%rsi), %ymm2 vmovdqa 0x120(%rsi), %ymm3 vmovdqa 0x100(%rdx), %ymm4 vmovdqa 0x120(%rdx), %ymm5 vmovdqa 0x80(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x100(%rdi) vmovdqa %ymm9, 0x120(%rdi) vmovdqa 0x140(%rsi), %ymm2 vmovdqa 0x160(%rsi), %ymm3 vmovdqa 0x140(%rdx), %ymm4 vmovdqa 0x160(%rdx), %ymm5 vmovdqa 0xa0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x140(%rdi) vmovdqa %ymm9, 0x160(%rdi) vmovdqa 0x180(%rsi), %ymm2 vmovdqa 0x1a0(%rsi), %ymm3 vmovdqa 0x180(%rdx), %ymm4 vmovdqa 0x1a0(%rdx), %ymm5 vmovdqa 0xc0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x180(%rdi) vmovdqa %ymm9, 0x1a0(%rdi) vmovdqa 0x1c0(%rsi), %ymm2 vmovdqa 0x1e0(%rsi), %ymm3 vmovdqa 0x1c0(%rdx), %ymm4 vmovdqa 0x1e0(%rdx), %ymm5 vmovdqa 0xe0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x1c0(%rdi) vmovdqa %ymm9, 0x1e0(%rdi) vmovdqa 0x200(%rsi), %ymm2 vmovdqa 0x220(%rsi), %ymm3 vmovdqa 0x200(%rdx), %ymm4 vmovdqa 0x220(%rdx), %ymm5 vmovdqa 0x100(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa (%rdi), %ymm8 vmovdqa 0x20(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, (%rdi) vmovdqa %ymm9, 0x20(%rdi) vmovdqa 0x240(%rsi), %ymm2 vmovdqa 0x260(%rsi), %ymm3 vmovdqa 0x240(%rdx), %ymm4 vmovdqa 0x260(%rdx), %ymm5 vmovdqa 0x120(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x40(%rdi), %ymm8 vmovdqa 0x60(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x40(%rdi) vmovdqa %ymm9, 0x60(%rdi) vmovdqa 0x280(%rsi), %ymm2 vmovdqa 0x2a0(%rsi), %ymm3 vmovdqa 0x280(%rdx), %ymm4 vmovdqa 0x2a0(%rdx), %ymm5 vmovdqa 0x140(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x80(%rdi), %ymm8 vmovdqa 0xa0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x80(%rdi) vmovdqa %ymm9, 0xa0(%rdi) vmovdqa 0x2c0(%rsi), %ymm2 vmovdqa 0x2e0(%rsi), %ymm3 vmovdqa 0x2c0(%rdx), %ymm4 vmovdqa 0x2e0(%rdx), %ymm5 vmovdqa 0x160(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0xc0(%rdi), %ymm8 vmovdqa 0xe0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0xc0(%rdi) vmovdqa %ymm9, 0xe0(%rdi) vmovdqa 0x300(%rsi), %ymm2 vmovdqa 0x320(%rsi), %ymm3 vmovdqa 0x300(%rdx), %ymm4 vmovdqa 0x320(%rdx), %ymm5 vmovdqa 0x180(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x100(%rdi), %ymm8 vmovdqa 0x120(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x100(%rdi) vmovdqa %ymm9, 0x120(%rdi) vmovdqa 0x340(%rsi), %ymm2 vmovdqa 0x360(%rsi), %ymm3 vmovdqa 0x340(%rdx), %ymm4 vmovdqa 0x360(%rdx), %ymm5 vmovdqa 0x1a0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x140(%rdi), %ymm8 vmovdqa 0x160(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x140(%rdi) vmovdqa %ymm9, 0x160(%rdi) vmovdqa 0x380(%rsi), %ymm2 vmovdqa 0x3a0(%rsi), %ymm3 vmovdqa 0x380(%rdx), %ymm4 vmovdqa 0x3a0(%rdx), %ymm5 vmovdqa 0x1c0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x180(%rdi), %ymm8 vmovdqa 0x1a0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x180(%rdi) vmovdqa %ymm9, 0x1a0(%rdi) vmovdqa 0x3c0(%rsi), %ymm2 vmovdqa 0x3e0(%rsi), %ymm3 vmovdqa 0x3c0(%rdx), %ymm4 vmovdqa 0x3e0(%rdx), %ymm5 vmovdqa 0x1e0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x1c0(%rdi), %ymm8 vmovdqa 0x1e0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x1c0(%rdi) vmovdqa %ymm9, 0x1e0(%rdi) vmovdqa 0x400(%rsi), %ymm2 vmovdqa 0x420(%rsi), %ymm3 vmovdqa 0x400(%rdx), %ymm4 vmovdqa 0x420(%rdx), %ymm5 vmovdqa 0x200(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa (%rdi), %ymm8 vmovdqa 0x20(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, (%rdi) vmovdqa %ymm9, 0x20(%rdi) vmovdqa 0x440(%rsi), %ymm2 vmovdqa 0x460(%rsi), %ymm3 vmovdqa 0x440(%rdx), %ymm4 vmovdqa 0x460(%rdx), %ymm5 vmovdqa 0x220(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x40(%rdi), %ymm8 vmovdqa 0x60(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x40(%rdi) vmovdqa %ymm9, 0x60(%rdi) vmovdqa 0x480(%rsi), %ymm2 vmovdqa 0x4a0(%rsi), %ymm3 vmovdqa 0x480(%rdx), %ymm4 vmovdqa 0x4a0(%rdx), %ymm5 vmovdqa 0x240(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x80(%rdi), %ymm8 vmovdqa 0xa0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x80(%rdi) vmovdqa %ymm9, 0xa0(%rdi) vmovdqa 0x4c0(%rsi), %ymm2 vmovdqa 0x4e0(%rsi), %ymm3 vmovdqa 0x4c0(%rdx), %ymm4 vmovdqa 0x4e0(%rdx), %ymm5 vmovdqa 0x260(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0xc0(%rdi), %ymm8 vmovdqa 0xe0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0xc0(%rdi) vmovdqa %ymm9, 0xe0(%rdi) vmovdqa 0x500(%rsi), %ymm2 vmovdqa 0x520(%rsi), %ymm3 vmovdqa 0x500(%rdx), %ymm4 vmovdqa 0x520(%rdx), %ymm5 vmovdqa 0x280(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x100(%rdi), %ymm8 vmovdqa 0x120(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x100(%rdi) vmovdqa %ymm9, 0x120(%rdi) vmovdqa 0x540(%rsi), %ymm2 vmovdqa 0x560(%rsi), %ymm3 vmovdqa 0x540(%rdx), %ymm4 vmovdqa 0x560(%rdx), %ymm5 vmovdqa 0x2a0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x140(%rdi), %ymm8 vmovdqa 0x160(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x140(%rdi) vmovdqa %ymm9, 0x160(%rdi) vmovdqa 0x580(%rsi), %ymm2 vmovdqa 0x5a0(%rsi), %ymm3 vmovdqa 0x580(%rdx), %ymm4 vmovdqa 0x5a0(%rdx), %ymm5 vmovdqa 0x2c0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x180(%rdi), %ymm8 vmovdqa 0x1a0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x180(%rdi) vmovdqa %ymm9, 0x1a0(%rdi) vmovdqa 0x5c0(%rsi), %ymm2 vmovdqa 0x5e0(%rsi), %ymm3 vmovdqa 0x5c0(%rdx), %ymm4 vmovdqa 0x5e0(%rdx), %ymm5 vmovdqa 0x2e0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x1c0(%rdi), %ymm8 vmovdqa 0x1e0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x1c0(%rdi) vmovdqa %ymm9, 0x1e0(%rdi) vmovdqa 0x600(%rsi), %ymm2 vmovdqa 0x620(%rsi), %ymm3 vmovdqa 0x600(%rdx), %ymm4 vmovdqa 0x620(%rdx), %ymm5 vmovdqa 0x300(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa (%rdi), %ymm8 vmovdqa 0x20(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, (%rdi) vmovdqa %ymm9, 0x20(%rdi) vmovdqa 0x640(%rsi), %ymm2 vmovdqa 0x660(%rsi), %ymm3 vmovdqa 0x640(%rdx), %ymm4 vmovdqa 0x660(%rdx), %ymm5 vmovdqa 0x320(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x40(%rdi), %ymm8 vmovdqa 0x60(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x40(%rdi) vmovdqa %ymm9, 0x60(%rdi) vmovdqa 0x680(%rsi), %ymm2 vmovdqa 0x6a0(%rsi), %ymm3 vmovdqa 0x680(%rdx), %ymm4 vmovdqa 0x6a0(%rdx), %ymm5 vmovdqa 0x340(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x80(%rdi), %ymm8 vmovdqa 0xa0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x80(%rdi) vmovdqa %ymm9, 0xa0(%rdi) vmovdqa 0x6c0(%rsi), %ymm2 vmovdqa 0x6e0(%rsi), %ymm3 vmovdqa 0x6c0(%rdx), %ymm4 vmovdqa 0x6e0(%rdx), %ymm5 vmovdqa 0x360(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0xc0(%rdi), %ymm8 vmovdqa 0xe0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0xc0(%rdi) vmovdqa %ymm9, 0xe0(%rdi) vmovdqa 0x700(%rsi), %ymm2 vmovdqa 0x720(%rsi), %ymm3 vmovdqa 0x700(%rdx), %ymm4 vmovdqa 0x720(%rdx), %ymm5 vmovdqa 0x380(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x100(%rdi), %ymm8 vmovdqa 0x120(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x100(%rdi) vmovdqa %ymm9, 0x120(%rdi) vmovdqa 0x740(%rsi), %ymm2 vmovdqa 0x760(%rsi), %ymm3 vmovdqa 0x740(%rdx), %ymm4 vmovdqa 0x760(%rdx), %ymm5 vmovdqa 0x3a0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x140(%rdi), %ymm8 vmovdqa 0x160(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x140(%rdi) vmovdqa %ymm9, 0x160(%rdi) vmovdqa 0x780(%rsi), %ymm2 vmovdqa 0x7a0(%rsi), %ymm3 vmovdqa 0x780(%rdx), %ymm4 vmovdqa 0x7a0(%rdx), %ymm5 vmovdqa 0x3c0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm8, %ymm13, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x180(%rdi), %ymm8 vmovdqa 0x1a0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x180(%rdi) vmovdqa %ymm9, 0x1a0(%rdi) vmovdqa 0x7c0(%rsi), %ymm2 vmovdqa 0x7e0(%rsi), %ymm3 vmovdqa 0x7c0(%rdx), %ymm4 vmovdqa 0x7e0(%rdx), %ymm5 vmovdqa 0x3e0(%rcx), %ymm6 vpmullw %ymm2, %ymm1, %ymm13 vpmullw %ymm3, %ymm1, %ymm14 vpmullw %ymm13, %ymm4, %ymm7 vpmullw %ymm13, %ymm5, %ymm9 vpmullw %ymm14, %ymm6, %ymm8 vpmullw %ymm14, %ymm4, %ymm10 vpmulhw %ymm7, %ymm0, %ymm7 vpmulhw %ymm9, %ymm0, %ymm9 vpmulhw %ymm8, %ymm0, %ymm8 vpmulhw %ymm10, %ymm0, %ymm10 vpmulhw %ymm2, %ymm4, %ymm11 vpmulhw %ymm2, %ymm5, %ymm12 vpmulhw %ymm3, %ymm6, %ymm13 vpmulhw %ymm3, %ymm4, %ymm14 vpsubw %ymm7, %ymm11, %ymm7 vpsubw %ymm9, %ymm12, %ymm9 vpsubw %ymm13, %ymm8, %ymm8 vpsubw %ymm10, %ymm14, %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa 0x1c0(%rdi), %ymm8 vmovdqa 0x1e0(%rdi), %ymm10 vpaddw %ymm7, %ymm8, %ymm7 vpaddw %ymm9, %ymm10, %ymm9 vmovdqa %ymm7, 0x1c0(%rdi) vmovdqa %ymm9, 0x1e0(%rdi) retq .cfi_endproc
wlsfx/bnbb
5,428
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/x86_64/src/ntttobytes.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /* References * ========== * * - [REF_AVX2] * CRYSTALS-Kyber optimized AVX2 implementation * Bos, Ducas, Kiltz, Lepoint, Lyubashevsky, Schanck, Schwabe, Seiler, Stehlé * https://github.com/pq-crystals/kyber/tree/main/avx2 */ /* * This file is derived from the public domain * AVX2 Kyber implementation @[REF_AVX2]. */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/x86_64/src/ntttobytes.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_ntttobytes_avx2) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_ntttobytes_avx2) S2N_BN_SYMBOL(mlkem_ntttobytes_avx2): .cfi_startproc vmovdqa (%rdx), %ymm0 callq Lntttobytes_avx2_core addq $0x100, %rsi # imm = 0x100 addq $0xc0, %rdi callq Lntttobytes_avx2_core retq .cfi_endproc Lntttobytes_avx2_core: .cfi_startproc vmovdqa (%rsi), %ymm5 vmovdqa 0x20(%rsi), %ymm6 vmovdqa 0x40(%rsi), %ymm7 vmovdqa 0x60(%rsi), %ymm8 vmovdqa 0x80(%rsi), %ymm9 vmovdqa 0xa0(%rsi), %ymm10 vmovdqa 0xc0(%rsi), %ymm11 vmovdqa 0xe0(%rsi), %ymm12 vpsllw $0xc, %ymm6, %ymm4 vpor %ymm4, %ymm5, %ymm4 vpsrlw $0x4, %ymm6, %ymm5 vpsllw $0x8, %ymm7, %ymm6 vpor %ymm5, %ymm6, %ymm5 vpsrlw $0x8, %ymm7, %ymm6 vpsllw $0x4, %ymm8, %ymm7 vpor %ymm6, %ymm7, %ymm6 vpsllw $0xc, %ymm10, %ymm7 vpor %ymm7, %ymm9, %ymm7 vpsrlw $0x4, %ymm10, %ymm8 vpsllw $0x8, %ymm11, %ymm9 vpor %ymm8, %ymm9, %ymm8 vpsrlw $0x8, %ymm11, %ymm9 vpsllw $0x4, %ymm12, %ymm10 vpor %ymm9, %ymm10, %ymm9 vpslld $0x10, %ymm5, %ymm3 vpblendw $0xaa, %ymm3, %ymm4, %ymm3 # ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7],ymm4[8],ymm3[9],ymm4[10],ymm3[11],ymm4[12],ymm3[13],ymm4[14],ymm3[15] vpsrld $0x10, %ymm4, %ymm4 vpblendw $0xaa, %ymm5, %ymm4, %ymm5 # ymm5 = ymm4[0],ymm5[1],ymm4[2],ymm5[3],ymm4[4],ymm5[5],ymm4[6],ymm5[7],ymm4[8],ymm5[9],ymm4[10],ymm5[11],ymm4[12],ymm5[13],ymm4[14],ymm5[15] vpslld $0x10, %ymm7, %ymm4 vpblendw $0xaa, %ymm4, %ymm6, %ymm4 # ymm4 = ymm6[0],ymm4[1],ymm6[2],ymm4[3],ymm6[4],ymm4[5],ymm6[6],ymm4[7],ymm6[8],ymm4[9],ymm6[10],ymm4[11],ymm6[12],ymm4[13],ymm6[14],ymm4[15] vpsrld $0x10, %ymm6, %ymm6 vpblendw $0xaa, %ymm7, %ymm6, %ymm7 # ymm7 = ymm6[0],ymm7[1],ymm6[2],ymm7[3],ymm6[4],ymm7[5],ymm6[6],ymm7[7],ymm6[8],ymm7[9],ymm6[10],ymm7[11],ymm6[12],ymm7[13],ymm6[14],ymm7[15] vpslld $0x10, %ymm9, %ymm6 vpblendw $0xaa, %ymm6, %ymm8, %ymm6 # ymm6 = ymm8[0],ymm6[1],ymm8[2],ymm6[3],ymm8[4],ymm6[5],ymm8[6],ymm6[7],ymm8[8],ymm6[9],ymm8[10],ymm6[11],ymm8[12],ymm6[13],ymm8[14],ymm6[15] vpsrld $0x10, %ymm8, %ymm8 vpblendw $0xaa, %ymm9, %ymm8, %ymm9 # ymm9 = ymm8[0],ymm9[1],ymm8[2],ymm9[3],ymm8[4],ymm9[5],ymm8[6],ymm9[7],ymm8[8],ymm9[9],ymm8[10],ymm9[11],ymm8[12],ymm9[13],ymm8[14],ymm9[15] vmovsldup %ymm4, %ymm8 # ymm8 = ymm4[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm8, %ymm3, %ymm8 # ymm8 = ymm3[0],ymm8[1],ymm3[2],ymm8[3],ymm3[4],ymm8[5],ymm3[6],ymm8[7] vpsrlq $0x20, %ymm3, %ymm3 vpblendd $0xaa, %ymm4, %ymm3, %ymm4 # ymm4 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7] vmovsldup %ymm5, %ymm3 # ymm3 = ymm5[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm3, %ymm6, %ymm3 # ymm3 = ymm6[0],ymm3[1],ymm6[2],ymm3[3],ymm6[4],ymm3[5],ymm6[6],ymm3[7] vpsrlq $0x20, %ymm6, %ymm6 vpblendd $0xaa, %ymm5, %ymm6, %ymm5 # ymm5 = ymm6[0],ymm5[1],ymm6[2],ymm5[3],ymm6[4],ymm5[5],ymm6[6],ymm5[7] vmovsldup %ymm9, %ymm6 # ymm6 = ymm9[0,0,2,2,4,4,6,6] vpblendd $0xaa, %ymm6, %ymm7, %ymm6 # ymm6 = ymm7[0],ymm6[1],ymm7[2],ymm6[3],ymm7[4],ymm6[5],ymm7[6],ymm6[7] vpsrlq $0x20, %ymm7, %ymm7 vpblendd $0xaa, %ymm9, %ymm7, %ymm9 # ymm9 = ymm7[0],ymm9[1],ymm7[2],ymm9[3],ymm7[4],ymm9[5],ymm7[6],ymm9[7] vpunpcklqdq %ymm3, %ymm8, %ymm7 # ymm7 = ymm8[0],ymm3[0],ymm8[2],ymm3[2] vpunpckhqdq %ymm3, %ymm8, %ymm3 # ymm3 = ymm8[1],ymm3[1],ymm8[3],ymm3[3] vpunpcklqdq %ymm4, %ymm6, %ymm8 # ymm8 = ymm6[0],ymm4[0],ymm6[2],ymm4[2] vpunpckhqdq %ymm4, %ymm6, %ymm4 # ymm4 = ymm6[1],ymm4[1],ymm6[3],ymm4[3] vpunpcklqdq %ymm9, %ymm5, %ymm6 # ymm6 = ymm5[0],ymm9[0],ymm5[2],ymm9[2] vpunpckhqdq %ymm9, %ymm5, %ymm9 # ymm9 = ymm5[1],ymm9[1],ymm5[3],ymm9[3] vperm2i128 $0x20, %ymm8, %ymm7, %ymm5 # ymm5 = ymm7[0,1],ymm8[0,1] vperm2i128 $0x31, %ymm8, %ymm7, %ymm8 # ymm8 = ymm7[2,3],ymm8[2,3] vperm2i128 $0x20, %ymm3, %ymm6, %ymm7 # ymm7 = ymm6[0,1],ymm3[0,1] vperm2i128 $0x31, %ymm3, %ymm6, %ymm3 # ymm3 = ymm6[2,3],ymm3[2,3] vperm2i128 $0x20, %ymm9, %ymm4, %ymm6 # ymm6 = ymm4[0,1],ymm9[0,1] vperm2i128 $0x31, %ymm9, %ymm4, %ymm9 # ymm9 = ymm4[2,3],ymm9[2,3] vmovdqu %ymm5, (%rdi) vmovdqu %ymm7, 0x20(%rdi) vmovdqu %ymm6, 0x40(%rdi) vmovdqu %ymm8, 0x60(%rdi) vmovdqu %ymm3, 0x80(%rdi) vmovdqu %ymm9, 0xa0(%rdi) retq .cfi_endproc
wlsfx/bnbb
2,636
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/aarch64/src/poly_tomont_asm.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /*yaml Name: poly_tomont_asm Description: Convert polynomial to Montgomery domain Signature: void mlk_poly_tomont_asm(int16_t p[256]) ABI: x0: type: buffer size_bytes: 512 permissions: read/write c_parameter: int16_t p[256] description: Input/output polynomial Stack: bytes: 0 */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/aarch64_opt/src/poly_tomont_asm.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_poly_tomont_asm) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_poly_tomont_asm) S2N_BN_SYMBOL(mlkem_poly_tomont_asm): .cfi_startproc mov w2, #0xd01 // =3329 dup v4.8h, w2 mov w2, #0x4ebf // =20159 dup v5.8h, w2 mov w2, #-0x414 // =-1044 dup v2.8h, w2 mov w2, #-0x2824 // =-10276 dup v3.8h, w2 mov x1, #0x8 // =8 ldr q26, [x0, #0x30] ldr q23, [x0, #0x10] mul v17.8h, v26.8h, v2.8h sqrdmulh v7.8h, v26.8h, v3.8h ldr q27, [x0, #0x20] sub x1, x1, #0x1 Lpoly_tomont_loop: mls v17.8h, v7.8h, v4.h[0] sqrdmulh v5.8h, v23.8h, v3.8h ldr q7, [x0], #0x40 stur q17, [x0, #-0x10] sqrdmulh v29.8h, v27.8h, v3.8h sqrdmulh v19.8h, v7.8h, v3.8h mul v25.8h, v23.8h, v2.8h mul v0.8h, v7.8h, v2.8h mul v26.8h, v27.8h, v2.8h ldr q7, [x0, #0x30] mls v25.8h, v5.8h, v4.h[0] ldr q23, [x0, #0x10] mls v26.8h, v29.8h, v4.h[0] mls v0.8h, v19.8h, v4.h[0] stur q25, [x0, #-0x30] mul v17.8h, v7.8h, v2.8h sqrdmulh v7.8h, v7.8h, v3.8h stur q0, [x0, #-0x40] ldr q27, [x0, #0x20] stur q26, [x0, #-0x20] sub x1, x1, #0x1 cbnz x1, Lpoly_tomont_loop mls v17.8h, v7.8h, v4.h[0] sqrdmulh v7.8h, v23.8h, v3.8h mul v26.8h, v23.8h, v2.8h sqrdmulh v25.8h, v27.8h, v3.8h ldr q23, [x0], #0x40 mul v27.8h, v27.8h, v2.8h mls v26.8h, v7.8h, v4.h[0] sqrdmulh v7.8h, v23.8h, v3.8h mul v23.8h, v23.8h, v2.8h stur q17, [x0, #-0x10] mls v27.8h, v25.8h, v4.h[0] stur q26, [x0, #-0x30] mls v23.8h, v7.8h, v4.h[0] stur q27, [x0, #-0x20] stur q23, [x0, #-0x40] ret .cfi_endproc
wlsfx/bnbb
2,392
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/aarch64/src/poly_mulcache_compute_asm.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /*yaml Name: poly_mulcache_compute_asm Description: Compute multiplication cache for polynomial Signature: void mlk_poly_mulcache_compute_asm(int16_t cache[128], const int16_t mlk_poly[256], const int16_t zetas[128], const int16_t zetas_twisted[128]) ABI: x0: type: buffer size_bytes: 256 permissions: write-only c_parameter: int16_t cache[128] description: Output cache x1: type: buffer size_bytes: 512 permissions: read-only c_parameter: const int16_t mlk_poly[256] description: Input polynomial x2: type: buffer size_bytes: 256 permissions: read-only c_parameter: const int16_t zetas[128] description: Zeta values x3: type: buffer size_bytes: 256 permissions: read-only c_parameter: const int16_t zetas_twisted[128] description: Twisted zeta values Stack: bytes: 0 */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/aarch64_opt/src/poly_mulcache_compute_asm.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_poly_mulcache_compute_asm) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_poly_mulcache_compute_asm) S2N_BN_SYMBOL(mlkem_poly_mulcache_compute_asm): .cfi_startproc mov w5, #0xd01 // =3329 dup v6.8h, w5 mov w5, #0x4ebf // =20159 dup v7.8h, w5 mov x4, #0x10 // =16 ldr q1, [x1, #0x10] ldr q27, [x1], #0x20 ldr q23, [x2], #0x10 uzp2 v27.8h, v27.8h, v1.8h ldr q1, [x3], #0x10 mul v2.8h, v27.8h, v23.8h sqrdmulh v27.8h, v27.8h, v1.8h sub x4, x4, #0x1 Lpoly_mulcache_compute_loop: ldr q29, [x1, #0x10] ldr q21, [x2], #0x10 mls v2.8h, v27.8h, v6.h[0] ldr q27, [x1], #0x20 ldr q7, [x3], #0x10 uzp2 v28.8h, v27.8h, v29.8h str q2, [x0], #0x10 mul v2.8h, v28.8h, v21.8h sqrdmulh v27.8h, v28.8h, v7.8h sub x4, x4, #0x1 cbnz x4, Lpoly_mulcache_compute_loop mls v2.8h, v27.8h, v6.h[0] str q2, [x0], #0x10 ret .cfi_endproc
wlsfx/bnbb
6,299
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/aarch64/src/rej_uniform_asm.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /*yaml Name: rej_uniform_asm Description: Run rejection sampling on uniform random bytes to generate uniform random integers mod q Signature: uint64_t mlk_rej_uniform_asm(int16_t r[256], const uint8_t *buf, unsigned buflen, const uint8_t table[2048]) ABI: x0: type: buffer size_bytes: 512 permissions: write-only c_parameter: int16_t r[256] description: Output buffer x1: type: buffer size_bytes: x2 permissions: read-only c_parameter: const uint8_t *buf description: Input buffer x2: type: scalar c_parameter: unsigned buflen description: Length of input buffer (must be multiple of 24) test_with: 504 # MLKEM_GEN_MATRIX_NBLOCKS * MLK_XOF_RATE x3: type: buffer size_bytes: 2048 permissions: read-only c_parameter: const uint8_t table[2048] description: Lookup table Stack: bytes: 576 description: register preservation and temporary storage */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/aarch64_opt/src/rej_uniform_asm.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_rej_uniform_asm) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_rej_uniform_asm) S2N_BN_SYMBOL(mlkem_rej_uniform_asm): .cfi_startproc sub sp, sp, #0x240 .cfi_adjust_cfa_offset 0x240 mov x7, #0x1 // =1 movk x7, #0x2, lsl #16 movk x7, #0x4, lsl #32 movk x7, #0x8, lsl #48 mov v31.d[0], x7 mov x7, #0x10 // =16 movk x7, #0x20, lsl #16 movk x7, #0x40, lsl #32 movk x7, #0x80, lsl #48 mov v31.d[1], x7 mov w11, #0xd01 // =3329 dup v30.8h, w11 mov x8, sp mov x7, x8 mov x11, #0x0 // =0 eor v16.16b, v16.16b, v16.16b Lrej_uniform_initial_zero: str q16, [x7], #0x40 stur q16, [x7, #-0x30] stur q16, [x7, #-0x20] stur q16, [x7, #-0x10] add x11, x11, #0x20 cmp x11, #0x100 b.lt Lrej_uniform_initial_zero mov x7, x8 mov x9, #0x0 // =0 mov x4, #0x100 // =256 cmp x2, #0x30 b.lo Lrej_uniform_loop48_end Lrej_uniform_loop48: cmp x9, x4 b.hs Lrej_uniform_memory_copy sub x2, x2, #0x30 ld3 { v0.16b, v1.16b, v2.16b }, [x1], #48 zip1 v4.16b, v0.16b, v1.16b zip2 v5.16b, v0.16b, v1.16b zip1 v6.16b, v1.16b, v2.16b zip2 v7.16b, v1.16b, v2.16b bic v4.8h, #0xf0, lsl #8 bic v5.8h, #0xf0, lsl #8 ushr v6.8h, v6.8h, #0x4 ushr v7.8h, v7.8h, #0x4 zip1 v16.8h, v4.8h, v6.8h zip2 v17.8h, v4.8h, v6.8h zip1 v18.8h, v5.8h, v7.8h zip2 v19.8h, v5.8h, v7.8h cmhi v4.8h, v30.8h, v16.8h cmhi v5.8h, v30.8h, v17.8h cmhi v6.8h, v30.8h, v18.8h cmhi v7.8h, v30.8h, v19.8h and v4.16b, v4.16b, v31.16b and v5.16b, v5.16b, v31.16b and v6.16b, v6.16b, v31.16b and v7.16b, v7.16b, v31.16b uaddlv s20, v4.8h uaddlv s21, v5.8h uaddlv s22, v6.8h uaddlv s23, v7.8h fmov w12, s20 fmov w13, s21 fmov w14, s22 fmov w15, s23 ldr q24, [x3, x12, lsl #4] ldr q25, [x3, x13, lsl #4] ldr q26, [x3, x14, lsl #4] ldr q27, [x3, x15, lsl #4] cnt v4.16b, v4.16b cnt v5.16b, v5.16b cnt v6.16b, v6.16b cnt v7.16b, v7.16b uaddlv s20, v4.8h uaddlv s21, v5.8h uaddlv s22, v6.8h uaddlv s23, v7.8h fmov w12, s20 fmov w13, s21 fmov w14, s22 fmov w15, s23 tbl v16.16b, { v16.16b }, v24.16b tbl v17.16b, { v17.16b }, v25.16b tbl v18.16b, { v18.16b }, v26.16b tbl v19.16b, { v19.16b }, v27.16b str q16, [x7] add x7, x7, x12, lsl #1 str q17, [x7] add x7, x7, x13, lsl #1 str q18, [x7] add x7, x7, x14, lsl #1 str q19, [x7] add x7, x7, x15, lsl #1 add x12, x12, x13 add x14, x14, x15 add x9, x9, x12 add x9, x9, x14 cmp x2, #0x30 b.hs Lrej_uniform_loop48 Lrej_uniform_loop48_end: cmp x9, x4 b.hs Lrej_uniform_memory_copy cmp x2, #0x18 b.lo Lrej_uniform_memory_copy sub x2, x2, #0x18 ld3 { v0.8b, v1.8b, v2.8b }, [x1], #24 zip1 v4.16b, v0.16b, v1.16b zip1 v5.16b, v1.16b, v2.16b bic v4.8h, #0xf0, lsl #8 ushr v5.8h, v5.8h, #0x4 zip1 v16.8h, v4.8h, v5.8h zip2 v17.8h, v4.8h, v5.8h cmhi v4.8h, v30.8h, v16.8h cmhi v5.8h, v30.8h, v17.8h and v4.16b, v4.16b, v31.16b and v5.16b, v5.16b, v31.16b uaddlv s20, v4.8h uaddlv s21, v5.8h fmov w12, s20 fmov w13, s21 ldr q24, [x3, x12, lsl #4] ldr q25, [x3, x13, lsl #4] cnt v4.16b, v4.16b cnt v5.16b, v5.16b uaddlv s20, v4.8h uaddlv s21, v5.8h fmov w12, s20 fmov w13, s21 tbl v16.16b, { v16.16b }, v24.16b tbl v17.16b, { v17.16b }, v25.16b str q16, [x7] add x7, x7, x12, lsl #1 str q17, [x7] add x7, x7, x13, lsl #1 add x9, x9, x12 add x9, x9, x13 Lrej_uniform_memory_copy: cmp x9, x4 csel x9, x9, x4, lo mov x11, #0x0 // =0 mov x7, x8 Lrej_uniform_final_copy: ldr q16, [x7], #0x40 ldur q17, [x7, #-0x30] ldur q18, [x7, #-0x20] ldur q19, [x7, #-0x10] str q16, [x0], #0x40 stur q17, [x0, #-0x30] stur q18, [x0, #-0x20] stur q19, [x0, #-0x10] add x11, x11, #0x20 cmp x11, #0x100 b.lt Lrej_uniform_final_copy mov x0, x9 b Lrej_uniform_return Lrej_uniform_return: add sp, sp, #0x240 .cfi_adjust_cfa_offset -0x240 ret .cfi_endproc
wlsfx/bnbb
3,351
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/aarch64/src/poly_reduce_asm.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /*yaml Name: poly_reduce_asm Description: Barrett reduction of polynomial coefficients Signature: void mlk_poly_reduce_asm(int16_t p[256]) ABI: x0: type: buffer size_bytes: 512 permissions: read/write c_parameter: int16_t p[256] description: Input/output polynomial Stack: bytes: 0 */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/aarch64_opt/src/poly_reduce_asm.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_poly_reduce_asm) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_poly_reduce_asm) S2N_BN_SYMBOL(mlkem_poly_reduce_asm): .cfi_startproc mov w2, #0xd01 // =3329 dup v3.8h, w2 mov w2, #0x4ebf // =20159 dup v4.8h, w2 mov x1, #0x8 // =8 ldr q21, [x0, #0x20] ldr q23, [x0, #0x30] sqdmulh v7.8h, v21.8h, v4.h[0] sqdmulh v30.8h, v23.8h, v4.h[0] srshr v7.8h, v7.8h, #0xb srshr v30.8h, v30.8h, #0xb mls v21.8h, v7.8h, v3.h[0] mls v23.8h, v30.8h, v3.h[0] ldr q5, [x0, #0x10] sshr v7.8h, v21.8h, #0xf sshr v30.8h, v23.8h, #0xf and v7.16b, v3.16b, v7.16b add v21.8h, v21.8h, v7.8h and v7.16b, v3.16b, v30.16b add v16.8h, v23.8h, v7.8h sub x1, x1, #0x1 Lpoly_reduce_loop: ldr q6, [x0], #0x40 ldr q30, [x0, #0x20] sqdmulh v31.8h, v6.8h, v4.h[0] sqdmulh v29.8h, v5.8h, v4.h[0] sqdmulh v22.8h, v30.8h, v4.h[0] stur q16, [x0, #-0x10] srshr v20.8h, v31.8h, #0xb srshr v28.8h, v29.8h, #0xb stur q21, [x0, #-0x20] mls v6.8h, v20.8h, v3.h[0] mls v5.8h, v28.8h, v3.h[0] ldr q2, [x0, #0x30] sshr v31.8h, v6.8h, #0xf srshr v19.8h, v22.8h, #0xb and v22.16b, v3.16b, v31.16b add v0.8h, v6.8h, v22.8h mls v30.8h, v19.8h, v3.h[0] sshr v26.8h, v5.8h, #0xf sqdmulh v25.8h, v2.8h, v4.h[0] and v17.16b, v3.16b, v26.16b add v1.8h, v5.8h, v17.8h sshr v31.8h, v30.8h, #0xf srshr v25.8h, v25.8h, #0xb stur q1, [x0, #-0x30] and v18.16b, v3.16b, v31.16b mls v2.8h, v25.8h, v3.h[0] add v21.8h, v30.8h, v18.8h ldr q5, [x0, #0x10] sshr v18.8h, v2.8h, #0xf stur q0, [x0, #-0x40] and v27.16b, v3.16b, v18.16b add v16.8h, v2.8h, v27.8h sub x1, x1, #0x1 cbnz x1, Lpoly_reduce_loop sqdmulh v20.8h, v5.8h, v4.h[0] ldr q24, [x0], #0x40 stur q21, [x0, #-0x20] srshr v20.8h, v20.8h, #0xb sqdmulh v25.8h, v24.8h, v4.h[0] stur q16, [x0, #-0x10] mls v5.8h, v20.8h, v3.h[0] srshr v20.8h, v25.8h, #0xb sshr v2.8h, v5.8h, #0xf mls v24.8h, v20.8h, v3.h[0] and v20.16b, v3.16b, v2.16b add v31.8h, v5.8h, v20.8h sshr v20.8h, v24.8h, #0xf stur q31, [x0, #-0x30] and v31.16b, v3.16b, v20.16b add v24.8h, v24.8h, v31.8h stur q24, [x0, #-0x40] ret .cfi_endproc
wlsfx/bnbb
8,141
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/aarch64/src/polyvec_basemul_acc_montgomery_cached_asm_k2.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /* References * ========== * * - [NeonNTT] * Neon NTT: Faster Dilithium, Kyber, and Saber on Cortex-A72 and Apple M1 * Becker, Hwang, Kannwischer, Yang, Yang * https://eprint.iacr.org/2021/986 */ /*yaml Name: polyvec_basemul_acc_montgomery_cached_asm_k2 Description: Re-implementation of asymmetric base multiplication following @[NeonNTT] for k=2 Signature: void mlk_polyvec_basemul_acc_montgomery_cached_asm_k2(int16_t r[256], const int16_t a[512], const int16_t b[512], const int16_t b_cache[256]) ABI: x0: type: buffer size_bytes: 512 permissions: write-only c_parameter: int16_t r[256] description: Output polynomial x1: type: buffer size_bytes: 1024 permissions: read-only c_parameter: const int16_t a[512] description: Input polynomial vector a x2: type: buffer size_bytes: 1024 permissions: read-only c_parameter: const int16_t b[512] description: Input polynomial vector b x3: type: buffer size_bytes: 512 permissions: read-only c_parameter: const int16_t b_cache[256] description: Cached values for b Stack: bytes: 64 description: saving callee-saved Neon registers */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/aarch64_opt/src/polyvec_basemul_acc_montgomery_cached_asm_k2.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k2) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k2) S2N_BN_SYMBOL(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k2): .cfi_startproc sub sp, sp, #0x40 .cfi_adjust_cfa_offset 0x40 stp d8, d9, [sp] .cfi_rel_offset d8, 0x0 .cfi_rel_offset d9, 0x8 stp d10, d11, [sp, #0x10] .cfi_rel_offset d10, 0x10 .cfi_rel_offset d11, 0x18 stp d12, d13, [sp, #0x20] .cfi_rel_offset d12, 0x20 .cfi_rel_offset d13, 0x28 stp d14, d15, [sp, #0x30] .cfi_rel_offset d14, 0x30 .cfi_rel_offset d15, 0x38 mov w14, #0xd01 // =3329 dup v0.8h, w14 mov w14, #0xcff // =3327 dup v2.8h, w14 add x4, x1, #0x200 add x5, x2, #0x200 add x6, x3, #0x100 mov x13, #0x10 // =16 ldr q9, [x4], #0x20 ldur q5, [x4, #-0x10] ldr q11, [x5], #0x20 uzp1 v23.8h, v9.8h, v5.8h uzp2 v9.8h, v9.8h, v5.8h ldr q5, [x2], #0x20 ldur q7, [x5, #-0x10] ldur q21, [x2, #-0x10] uzp2 v10.8h, v11.8h, v7.8h uzp1 v11.8h, v11.8h, v7.8h uzp1 v7.8h, v5.8h, v21.8h uzp2 v5.8h, v5.8h, v21.8h ldr q21, [x1], #0x20 ldur q25, [x1, #-0x10] ld1 { v6.8h }, [x3], #16 uzp1 v26.8h, v21.8h, v25.8h uzp2 v21.8h, v21.8h, v25.8h smull v25.4s, v26.4h, v5.4h smull2 v5.4s, v26.8h, v5.8h smull v19.4s, v26.4h, v7.4h smull2 v26.4s, v26.8h, v7.8h smlal v25.4s, v21.4h, v7.4h smlal2 v5.4s, v21.8h, v7.8h smlal v19.4s, v21.4h, v6.4h smlal2 v26.4s, v21.8h, v6.8h smlal v25.4s, v23.4h, v10.4h smlal2 v5.4s, v23.8h, v10.8h smlal v19.4s, v23.4h, v11.4h smlal2 v26.4s, v23.8h, v11.8h ld1 { v23.8h }, [x6], #16 smlal v25.4s, v9.4h, v11.4h smlal2 v5.4s, v9.8h, v11.8h smlal2 v26.4s, v9.8h, v23.8h smlal v19.4s, v9.4h, v23.4h ldr q9, [x4], #0x20 uzp1 v11.8h, v25.8h, v5.8h uzp1 v23.8h, v19.8h, v26.8h mul v11.8h, v11.8h, v2.8h mul v23.8h, v23.8h, v2.8h ldr q7, [x5], #0x20 smlal2 v5.4s, v11.8h, v0.8h smlal v25.4s, v11.4h, v0.4h ldr q11, [x2], #0x20 ldur q21, [x2, #-0x10] ldur q6, [x4, #-0x10] uzp1 v17.8h, v11.8h, v21.8h ldr q10, [x1], #0x20 ldur q29, [x1, #-0x10] uzp2 v11.8h, v11.8h, v21.8h uzp1 v13.8h, v9.8h, v6.8h uzp1 v3.8h, v10.8h, v29.8h uzp2 v10.8h, v10.8h, v29.8h smull v12.4s, v3.4h, v11.4h smull2 v11.4s, v3.8h, v11.8h ldur q21, [x5, #-0x10] smlal v12.4s, v10.4h, v17.4h smlal2 v11.4s, v10.8h, v17.8h uzp2 v29.8h, v7.8h, v21.8h uzp1 v15.8h, v7.8h, v21.8h smlal v12.4s, v13.4h, v29.4h smlal2 v11.4s, v13.8h, v29.8h uzp2 v28.8h, v9.8h, v6.8h smlal2 v26.4s, v23.8h, v0.8h smlal v12.4s, v28.4h, v15.4h smlal2 v11.4s, v28.8h, v15.8h smlal v19.4s, v23.4h, v0.4h uzp2 v27.8h, v25.8h, v5.8h smull v23.4s, v3.4h, v17.4h uzp1 v9.8h, v12.8h, v11.8h uzp2 v19.8h, v19.8h, v26.8h mul v14.8h, v9.8h, v2.8h ld1 { v22.8h }, [x6], #16 zip2 v9.8h, v19.8h, v27.8h smlal2 v11.4s, v14.8h, v0.8h ld1 { v4.8h }, [x3], #16 sub x13, x13, #0x2 Lpolyvec_basemul_acc_montgomery_cached_k2_loop: smull2 v20.4s, v3.8h, v17.8h ldr q18, [x4], #0x20 ldr q30, [x5], #0x20 smlal2 v20.4s, v10.8h, v4.8h smlal v12.4s, v14.4h, v0.4h smlal v23.4s, v10.4h, v4.4h str q9, [x0, #0x10] smlal2 v20.4s, v13.8h, v15.8h ldr q8, [x2], #0x20 smlal v23.4s, v13.4h, v15.4h smlal2 v20.4s, v28.8h, v22.8h zip1 v26.8h, v19.8h, v27.8h ldur q9, [x2, #-0x10] smlal v23.4s, v28.4h, v22.4h uzp2 v27.8h, v12.8h, v11.8h uzp1 v17.8h, v8.8h, v9.8h uzp2 v4.8h, v8.8h, v9.8h uzp1 v5.8h, v23.8h, v20.8h str q26, [x0], #0x20 mul v31.8h, v5.8h, v2.8h ldur q19, [x4, #-0x10] ldr q29, [x1], #0x20 ldur q12, [x1, #-0x10] smlal2 v20.4s, v31.8h, v0.8h uzp1 v13.8h, v18.8h, v19.8h uzp1 v3.8h, v29.8h, v12.8h uzp2 v10.8h, v29.8h, v12.8h smull v12.4s, v3.4h, v4.4h smull2 v11.4s, v3.8h, v4.8h ldur q5, [x5, #-0x10] smlal v12.4s, v10.4h, v17.4h smlal2 v11.4s, v10.8h, v17.8h uzp2 v14.8h, v30.8h, v5.8h uzp1 v15.8h, v30.8h, v5.8h smlal v12.4s, v13.4h, v14.4h smlal2 v11.4s, v13.8h, v14.8h uzp2 v28.8h, v18.8h, v19.8h smlal v23.4s, v31.4h, v0.4h smlal v12.4s, v28.4h, v15.4h smlal2 v11.4s, v28.8h, v15.8h ld1 { v22.8h }, [x6], #16 uzp2 v19.8h, v23.8h, v20.8h uzp1 v1.8h, v12.8h, v11.8h smull v23.4s, v3.4h, v17.4h mul v14.8h, v1.8h, v2.8h zip2 v9.8h, v19.8h, v27.8h ld1 { v4.8h }, [x3], #16 smlal2 v11.4s, v14.8h, v0.8h sub x13, x13, #0x1 cbnz x13, Lpolyvec_basemul_acc_montgomery_cached_k2_loop smull2 v5.4s, v3.8h, v17.8h smlal v12.4s, v14.4h, v0.4h smlal v23.4s, v10.4h, v4.4h str q9, [x0, #0x10] smlal2 v5.4s, v10.8h, v4.8h uzp2 v11.8h, v12.8h, v11.8h zip1 v9.8h, v19.8h, v27.8h smlal v23.4s, v13.4h, v15.4h smlal2 v5.4s, v13.8h, v15.8h str q9, [x0], #0x20 smlal v23.4s, v28.4h, v22.4h smlal2 v5.4s, v28.8h, v22.8h uzp1 v9.8h, v23.8h, v5.8h mul v9.8h, v9.8h, v2.8h smlal2 v5.4s, v9.8h, v0.8h smlal v23.4s, v9.4h, v0.4h uzp2 v9.8h, v23.8h, v5.8h zip2 v5.8h, v9.8h, v11.8h zip1 v9.8h, v9.8h, v11.8h str q5, [x0, #0x10] str q9, [x0], #0x20 ldp d8, d9, [sp] .cfi_restore d8 .cfi_restore d9 ldp d10, d11, [sp, #0x10] .cfi_restore d10 .cfi_restore d11 ldp d12, d13, [sp, #0x20] .cfi_restore d12 .cfi_restore d13 ldp d14, d15, [sp, #0x30] .cfi_restore d14 .cfi_restore d15 add sp, sp, #0x40 .cfi_adjust_cfa_offset -0x40 ret .cfi_endproc
wlsfx/bnbb
3,888
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/aarch64/src/poly_tobytes_asm.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /*yaml Name: poly_tobytes_asm Description: Convert polynomial to byte representation Signature: void mlk_poly_tobytes_asm(uint8_t r[384], const int16_t a[256]) ABI: x0: type: buffer size_bytes: 384 permissions: write-only c_parameter: uint8_t r[384] description: Output byte array x1: type: buffer size_bytes: 512 permissions: read-only c_parameter: const int16_t a[256] description: Input polynomial Stack: bytes: 0 */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/aarch64_opt/src/poly_tobytes_asm.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_poly_tobytes_asm) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_poly_tobytes_asm) S2N_BN_SYMBOL(mlkem_poly_tobytes_asm): .cfi_startproc mov x2, #0x10 // =16 ldr q6, [x1], #0x20 ldur q24, [x1, #-0x10] ldr q30, [x1], #0x20 ldur q22, [x1, #-0x10] ldr q5, [x1], #0x20 ldur q17, [x1, #-0x10] ldr q19, [x1], #0x20 ldur q4, [x1, #-0x10] lsr x2, x2, #2 sub x2, x2, #0x1 Lpoly_tobytes_loop_start: uzp1 v25.8h, v6.8h, v24.8h uzp2 v6.8h, v6.8h, v24.8h xtn v24.8b, v25.8h shrn v25.8b, v25.8h, #0x8 xtn v18.8b, v6.8h shrn v26.8b, v6.8h, #0x4 sli v25.8b, v18.8b, #0x4 st3 { v24.8b, v25.8b, v26.8b }, [x0], #24 uzp1 v25.8h, v30.8h, v22.8h uzp2 v6.8h, v30.8h, v22.8h xtn v24.8b, v25.8h xtn v18.8b, v6.8h uzp1 v30.8h, v5.8h, v17.8h uzp2 v22.8h, v5.8h, v17.8h xtn v5.8b, v30.8h xtn v17.8b, v22.8h uzp1 v28.8h, v19.8h, v4.8h uzp2 v19.8h, v19.8h, v4.8h xtn v4.8b, v28.8h xtn v20.8b, v19.8h shrn v25.8b, v25.8h, #0x8 sli v25.8b, v18.8b, #0x4 shrn v26.8b, v6.8h, #0x4 st3 { v24.8b, v25.8b, v26.8b }, [x0], #24 shrn v6.8b, v30.8h, #0x8 sli v6.8b, v17.8b, #0x4 shrn v7.8b, v22.8h, #0x4 st3 { v5.8b, v6.8b, v7.8b }, [x0], #24 shrn v5.8b, v28.8h, #0x8 shrn v6.8b, v19.8h, #0x4 sli v5.8b, v20.8b, #0x4 st3 { v4.8b, v5.8b, v6.8b }, [x0], #24 ldr q6, [x1], #0x20 ldur q24, [x1, #-0x10] ldr q30, [x1], #0x20 ldur q22, [x1, #-0x10] ldr q5, [x1], #0x20 ldur q17, [x1, #-0x10] ldr q19, [x1], #0x20 ldur q4, [x1, #-0x10] sub x2, x2, #0x1 cbnz x2, Lpoly_tobytes_loop_start uzp1 v25.8h, v30.8h, v22.8h uzp2 v18.8h, v30.8h, v22.8h uzp1 v30.8h, v6.8h, v24.8h uzp2 v6.8h, v6.8h, v24.8h uzp1 v24.8h, v5.8h, v17.8h uzp2 v22.8h, v5.8h, v17.8h uzp1 v5.8h, v19.8h, v4.8h uzp2 v17.8h, v19.8h, v4.8h xtn v19.8b, v25.8h shrn v20.8b, v25.8h, #0x8 xtn v25.8b, v18.8h shrn v21.8b, v18.8h, #0x4 xtn v28.8b, v30.8h shrn v29.8b, v30.8h, #0x8 xtn v18.8b, v6.8h shrn v30.8b, v6.8h, #0x4 xtn v1.8b, v24.8h shrn v2.8b, v24.8h, #0x8 xtn v6.8b, v22.8h shrn v3.8b, v22.8h, #0x4 xtn v22.8b, v5.8h shrn v23.8b, v5.8h, #0x8 xtn v5.8b, v17.8h shrn v24.8b, v17.8h, #0x4 sli v20.8b, v25.8b, #0x4 sli v29.8b, v18.8b, #0x4 st3 { v28.8b, v29.8b, v30.8b }, [x0], #24 st3 { v19.8b, v20.8b, v21.8b }, [x0], #24 sli v2.8b, v6.8b, #0x4 st3 { v1.8b, v2.8b, v3.8b }, [x0], #24 sli v23.8b, v5.8b, #0x4 st3 { v22.8b, v23.8b, v24.8b }, [x0], #24 ret .cfi_endproc
wlsfx/bnbb
20,320
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/aarch64/src/intt.S
/* Copyright (c) 2022 Arm Limited * Copyright (c) 2022 Hanno Becker * Copyright (c) 2023 Amin Abdulrahman, Matthias Kannwischer * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /* References * ========== * * - [NeonNTT] * Neon NTT: Faster Dilithium, Kyber, and Saber on Cortex-A72 and Apple M1 * Becker, Hwang, Kannwischer, Yang, Yang * https://eprint.iacr.org/2021/986 * * - [SLOTHY_Paper] * Fast and Clean: Auditable high-performance assembly via constraint solving * Abdulrahman, Becker, Kannwischer, Klein * https://eprint.iacr.org/2022/1303 */ /*yaml Name: intt_asm Description: AArch64 ML-KEM inverse NTT following @[NeonNTT] and @[SLOTHY_Paper] Signature: void mlk_intt_asm(int16_t p[256], const int16_t twiddles12345[80], const int16_t twiddles56[384]) ABI: x0: type: buffer size_bytes: 512 permissions: read/write c_parameter: int16_t p[256] description: Input/output polynomial x1: type: buffer size_bytes: 160 permissions: read-only c_parameter: const int16_t twiddles12345[80] description: Twiddle factors for layers 1-5 x2: type: buffer size_bytes: 768 permissions: read-only c_parameter: const int16_t twiddles56[384] description: Twiddle factors for layers 6-7 Stack: bytes: 64 description: saving callee-saved Neon registers */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/aarch64_opt/src/intt.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_intt_asm) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_intt_asm) S2N_BN_SYMBOL(mlkem_intt_asm): .cfi_startproc sub sp, sp, #0x40 .cfi_adjust_cfa_offset 0x40 stp d8, d9, [sp] .cfi_rel_offset d8, 0x0 .cfi_rel_offset d9, 0x8 stp d10, d11, [sp, #0x10] .cfi_rel_offset d10, 0x10 .cfi_rel_offset d11, 0x18 stp d12, d13, [sp, #0x20] .cfi_rel_offset d12, 0x20 .cfi_rel_offset d13, 0x28 stp d14, d15, [sp, #0x30] .cfi_rel_offset d14, 0x30 .cfi_rel_offset d15, 0x38 mov w5, #0xd01 // =3329 mov v7.h[0], w5 mov w5, #0x4ebf // =20159 mov v7.h[1], w5 mov w5, #0x200 // =512 dup v29.8h, w5 mov w5, #0x13b0 // =5040 dup v30.8h, w5 mov x3, x0 mov x4, #0x8 // =8 Lintt_scale_start: ldr q8, [x3] ldr q9, [x3, #0x10] ldr q10, [x3, #0x20] ldr q11, [x3, #0x30] sqrdmulh v27.8h, v8.8h, v30.8h mul v8.8h, v8.8h, v29.8h mls v8.8h, v27.8h, v7.h[0] sqrdmulh v27.8h, v9.8h, v30.8h mul v9.8h, v9.8h, v29.8h mls v9.8h, v27.8h, v7.h[0] sqrdmulh v27.8h, v10.8h, v30.8h mul v10.8h, v10.8h, v29.8h mls v10.8h, v27.8h, v7.h[0] sqrdmulh v27.8h, v11.8h, v30.8h mul v11.8h, v11.8h, v29.8h mls v11.8h, v27.8h, v7.h[0] str q8, [x3], #0x40 stur q9, [x3, #-0x30] stur q10, [x3, #-0x20] stur q11, [x3, #-0x10] subs x4, x4, #0x1 cbnz x4, Lintt_scale_start mov x3, x0 mov x4, #0x8 // =8 ldr q3, [x3, #0x10] ldr q20, [x3] ldr q25, [x3, #0x20] ldr q24, [x3, #0x30] ldr q21, [x2, #0x50] trn1 v18.4s, v25.4s, v24.4s trn1 v6.4s, v20.4s, v3.4s trn2 v12.4s, v25.4s, v24.4s trn2 v31.4s, v20.4s, v3.4s trn2 v28.2d, v6.2d, v18.2d trn1 v25.2d, v6.2d, v18.2d trn2 v15.2d, v31.2d, v12.2d trn1 v20.2d, v31.2d, v12.2d add v4.8h, v28.8h, v15.8h add v1.8h, v25.8h, v20.8h sub v30.8h, v28.8h, v15.8h sub v3.8h, v25.8h, v20.8h add v6.8h, v1.8h, v4.8h sqrdmulh v9.8h, v30.8h, v21.8h ldr q21, [x2, #0x40] ldr q25, [x2, #0x30] mul v21.8h, v30.8h, v21.8h ldr q30, [x2, #0x20] sub v28.8h, v1.8h, v4.8h ldr q1, [x2, #0x10] mls v21.8h, v9.8h, v7.h[0] sqrdmulh v9.8h, v3.8h, v25.8h mul v20.8h, v3.8h, v30.8h ldr q29, [x2], #0x60 ldr q17, [x3, #0x60] mls v20.8h, v9.8h, v7.h[0] ldr q3, [x3, #0x70] mul v4.8h, v28.8h, v29.8h sub v25.8h, v20.8h, v21.8h trn1 v15.4s, v17.4s, v3.4s sqrdmulh v28.8h, v28.8h, v1.8h trn2 v31.4s, v17.4s, v3.4s mul v30.8h, v25.8h, v29.8h add v20.8h, v20.8h, v21.8h mls v4.8h, v28.8h, v7.h[0] sqrdmulh v3.8h, v25.8h, v1.8h ldr q28, [x3, #0x40] trn1 v25.4s, v6.4s, v20.4s mls v30.8h, v3.8h, v7.h[0] ldr q27, [x3, #0x50] trn2 v6.4s, v6.4s, v20.4s trn1 v3.4s, v4.4s, v30.4s trn2 v10.4s, v28.4s, v27.4s trn2 v20.4s, v4.4s, v30.4s trn2 v8.2d, v25.2d, v3.2d trn1 v9.2d, v25.2d, v3.2d trn1 v1.2d, v6.2d, v20.2d trn2 v30.2d, v6.2d, v20.2d add v4.8h, v9.8h, v1.8h add v11.8h, v8.8h, v30.8h trn2 v25.2d, v10.2d, v31.2d sqdmulh v6.8h, v4.8h, v7.h[1] sqdmulh v20.8h, v11.8h, v7.h[1] ldr q21, [x2, #0x50] srshr v0.8h, v6.8h, #0xb srshr v3.8h, v20.8h, #0xb trn1 v2.4s, v28.4s, v27.4s mls v4.8h, v0.8h, v7.h[0] mls v11.8h, v3.8h, v7.h[0] ldr q0, [x1], #0x10 trn2 v20.2d, v2.2d, v15.2d sub v6.8h, v4.8h, v11.8h sub v5.8h, v20.8h, v25.8h sub v22.8h, v9.8h, v1.8h sqrdmulh v3.8h, v6.8h, v0.h[1] mul v6.8h, v6.8h, v0.h[0] sqrdmulh v12.8h, v5.8h, v21.8h ldr q19, [x2, #0x40] mls v6.8h, v3.8h, v7.h[0] ldr q14, [x2], #0x60 sub x4, x4, #0x2 Lintt_layer4567_start: str q6, [x3, #0x20] ldur q18, [x2, #-0x50] mul v26.8h, v5.8h, v19.8h trn1 v16.2d, v10.2d, v31.2d mul v27.8h, v22.8h, v0.h[2] trn1 v10.2d, v2.2d, v15.2d add v5.8h, v4.8h, v11.8h mls v26.8h, v12.8h, v7.h[0] add v11.8h, v10.8h, v16.8h add v6.8h, v20.8h, v25.8h ldur q25, [x2, #-0x40] ldur q28, [x2, #-0x30] ldr q2, [x3, #0xa0] ldr q19, [x2, #0x40] sub v17.8h, v8.8h, v30.8h ldr q1, [x3, #0x90] sqrdmulh v9.8h, v17.8h, v0.h[5] str q5, [x3], #0x40 ldr q30, [x3, #0x70] sub v10.8h, v10.8h, v16.8h ldr q16, [x3, #0x40] sqrdmulh v24.8h, v10.8h, v28.8h mul v13.8h, v10.8h, v25.8h sub v21.8h, v11.8h, v6.8h trn1 v15.4s, v2.4s, v30.4s trn2 v31.4s, v2.4s, v30.4s mls v13.8h, v24.8h, v7.h[0] mul v29.8h, v21.8h, v14.8h ldr q12, [x2, #0x50] sub v28.8h, v13.8h, v26.8h trn2 v10.4s, v16.4s, v1.4s add v30.8h, v11.8h, v6.8h sqrdmulh v2.8h, v28.8h, v18.8h mul v8.8h, v28.8h, v14.8h sqrdmulh v18.8h, v21.8h, v18.8h ldr q14, [x2], #0x60 mls v8.8h, v2.8h, v7.h[0] add v11.8h, v13.8h, v26.8h mls v29.8h, v18.8h, v7.h[0] sqrdmulh v20.8h, v22.8h, v0.h[3] trn1 v23.4s, v30.4s, v11.4s trn2 v28.4s, v30.4s, v11.4s trn2 v13.4s, v29.4s, v8.4s trn1 v11.4s, v29.4s, v8.4s mls v27.8h, v20.8h, v7.h[0] trn1 v21.2d, v28.2d, v13.2d trn2 v8.2d, v23.2d, v11.2d trn1 v24.2d, v23.2d, v11.2d mul v26.8h, v17.8h, v0.h[4] trn2 v30.2d, v28.2d, v13.2d add v4.8h, v24.8h, v21.8h add v11.8h, v8.8h, v30.8h mls v26.8h, v9.8h, v7.h[0] sqdmulh v17.8h, v4.8h, v7.h[1] sqdmulh v29.8h, v11.8h, v7.h[1] trn2 v25.2d, v10.2d, v31.2d add v2.8h, v27.8h, v26.8h srshr v28.8h, v17.8h, #0xb srshr v13.8h, v29.8h, #0xb sqdmulh v20.8h, v2.8h, v7.h[1] sub v5.8h, v27.8h, v26.8h mls v4.8h, v28.8h, v7.h[0] mls v11.8h, v13.8h, v7.h[0] srshr v23.8h, v20.8h, #0xb sqrdmulh v17.8h, v5.8h, v0.h[1] mul v9.8h, v5.8h, v0.h[0] mls v2.8h, v23.8h, v7.h[0] sub v29.8h, v4.8h, v11.8h ldr q0, [x1], #0x10 stur q2, [x3, #-0x30] trn1 v2.4s, v16.4s, v1.4s sqrdmulh v3.8h, v29.8h, v0.h[1] mul v6.8h, v29.8h, v0.h[0] trn2 v20.2d, v2.2d, v15.2d mls v9.8h, v17.8h, v7.h[0] sub v5.8h, v20.8h, v25.8h mls v6.8h, v3.8h, v7.h[0] sub v22.8h, v24.8h, v21.8h stur q9, [x3, #-0x10] sqrdmulh v12.8h, v5.8h, v12.8h subs x4, x4, #0x1 cbnz x4, Lintt_layer4567_start mul v21.8h, v22.8h, v0.h[2] mul v28.8h, v5.8h, v19.8h trn1 v10.2d, v10.2d, v31.2d trn1 v2.2d, v2.2d, v15.2d add v11.8h, v4.8h, v11.8h sub v30.8h, v8.8h, v30.8h add v23.8h, v20.8h, v25.8h add v24.8h, v2.8h, v10.8h mul v8.8h, v30.8h, v0.h[4] sqrdmulh v5.8h, v30.8h, v0.h[5] sqrdmulh v22.8h, v22.8h, v0.h[3] add v30.8h, v24.8h, v23.8h ldur q26, [x2, #-0x30] mls v8.8h, v5.8h, v7.h[0] sub v5.8h, v2.8h, v10.8h ldur q13, [x2, #-0x40] mls v21.8h, v22.8h, v7.h[0] str q6, [x3, #0x20] mul v3.8h, v5.8h, v13.8h sqrdmulh v22.8h, v5.8h, v26.8h sub v18.8h, v21.8h, v8.8h mls v28.8h, v12.8h, v7.h[0] str q11, [x3], #0x40 mls v3.8h, v22.8h, v7.h[0] sqrdmulh v16.8h, v18.8h, v0.h[1] sub v10.8h, v24.8h, v23.8h mul v17.8h, v18.8h, v0.h[0] sub v11.8h, v3.8h, v28.8h mul v13.8h, v10.8h, v14.8h add v22.8h, v3.8h, v28.8h mul v14.8h, v11.8h, v14.8h ldur q26, [x2, #-0x50] trn2 v2.4s, v30.4s, v22.4s mls v17.8h, v16.8h, v7.h[0] sqrdmulh v10.8h, v10.8h, v26.8h sqrdmulh v11.8h, v11.8h, v26.8h ldr q9, [x1], #0x10 mls v13.8h, v10.8h, v7.h[0] mls v14.8h, v11.8h, v7.h[0] trn1 v6.4s, v30.4s, v22.4s add v8.8h, v21.8h, v8.8h stur q17, [x3, #-0x10] trn2 v0.4s, v13.4s, v14.4s trn1 v1.4s, v13.4s, v14.4s sqdmulh v13.8h, v8.8h, v7.h[1] trn1 v24.2d, v2.2d, v0.2d trn2 v2.2d, v2.2d, v0.2d trn2 v26.2d, v6.2d, v1.2d trn1 v11.2d, v6.2d, v1.2d add v22.8h, v26.8h, v2.8h sub v28.8h, v11.8h, v24.8h sub v27.8h, v26.8h, v2.8h add v10.8h, v11.8h, v24.8h sqrdmulh v11.8h, v28.8h, v9.h[3] mul v24.8h, v28.8h, v9.h[2] sqdmulh v1.8h, v22.8h, v7.h[1] sqrdmulh v0.8h, v27.8h, v9.h[5] srshr v12.8h, v13.8h, #0xb mls v24.8h, v11.8h, v7.h[0] sqdmulh v14.8h, v10.8h, v7.h[1] mul v27.8h, v27.8h, v9.h[4] mls v8.8h, v12.8h, v7.h[0] srshr v5.8h, v1.8h, #0xb srshr v14.8h, v14.8h, #0xb mls v27.8h, v0.8h, v7.h[0] mls v22.8h, v5.8h, v7.h[0] mls v10.8h, v14.8h, v7.h[0] stur q8, [x3, #-0x30] sub v2.8h, v24.8h, v27.8h add v14.8h, v24.8h, v27.8h sub v11.8h, v10.8h, v22.8h add v20.8h, v10.8h, v22.8h sqdmulh v22.8h, v14.8h, v7.h[1] sqrdmulh v8.8h, v11.8h, v9.h[1] mul v27.8h, v11.8h, v9.h[0] sqrdmulh v0.8h, v2.8h, v9.h[1] mul v11.8h, v2.8h, v9.h[0] srshr v10.8h, v22.8h, #0xb mls v27.8h, v8.8h, v7.h[0] str q20, [x3], #0x40 mls v11.8h, v0.8h, v7.h[0] mls v14.8h, v10.8h, v7.h[0] stur q27, [x3, #-0x20] stur q11, [x3, #-0x10] stur q14, [x3, #-0x30] mov x4, #0x4 // =4 ldr q0, [x1], #0x20 ldur q1, [x1, #-0x10] ldr q2, [x0] ldr q10, [x0, #0x40] ldr q11, [x0, #0x80] sub v14.8h, v2.8h, v10.8h add v2.8h, v2.8h, v10.8h ldr q10, [x0, #0xc0] sqrdmulh v8.8h, v14.8h, v0.h[7] mul v14.8h, v14.8h, v0.h[6] sub v22.8h, v11.8h, v10.8h add v10.8h, v11.8h, v10.8h ldr q11, [x0, #0x1c0] add v13.8h, v2.8h, v10.8h sub v2.8h, v2.8h, v10.8h sqrdmulh v10.8h, v22.8h, v1.h[1] mul v22.8h, v22.8h, v1.h[0] mls v14.8h, v8.8h, v7.h[0] sqrdmulh v8.8h, v2.8h, v0.h[3] mul v2.8h, v2.8h, v0.h[2] mls v22.8h, v10.8h, v7.h[0] ldr q10, [x0, #0x100] mls v2.8h, v8.8h, v7.h[0] sub v8.8h, v14.8h, v22.8h add v14.8h, v14.8h, v22.8h ldr q22, [x0, #0x180] sqrdmulh v24.8h, v8.8h, v0.h[3] mul v8.8h, v8.8h, v0.h[2] sub v26.8h, v22.8h, v11.8h add v11.8h, v22.8h, v11.8h ldr q22, [x0, #0x140] sqrdmulh v16.8h, v26.8h, v1.h[5] mul v26.8h, v26.8h, v1.h[4] add v23.8h, v10.8h, v22.8h sub v10.8h, v10.8h, v22.8h mls v8.8h, v24.8h, v7.h[0] add v22.8h, v23.8h, v11.8h mul v24.8h, v10.8h, v1.h[2] sqrdmulh v10.8h, v10.8h, v1.h[3] sub v19.8h, v13.8h, v22.8h add v18.8h, v13.8h, v22.8h sub v11.8h, v23.8h, v11.8h mls v24.8h, v10.8h, v7.h[0] mls v26.8h, v16.8h, v7.h[0] sqrdmulh v10.8h, v11.8h, v0.h[5] mul v11.8h, v11.8h, v0.h[4] sqrdmulh v22.8h, v19.8h, v0.h[1] sub v13.8h, v24.8h, v26.8h mul v16.8h, v19.8h, v0.h[0] mls v11.8h, v10.8h, v7.h[0] sqrdmulh v10.8h, v13.8h, v0.h[5] mul v13.8h, v13.8h, v0.h[4] add v24.8h, v24.8h, v26.8h sub v26.8h, v2.8h, v11.8h add v9.8h, v2.8h, v11.8h add v11.8h, v14.8h, v24.8h sub v14.8h, v14.8h, v24.8h sqrdmulh v2.8h, v26.8h, v0.h[1] mul v24.8h, v26.8h, v0.h[0] mls v13.8h, v10.8h, v7.h[0] mls v16.8h, v22.8h, v7.h[0] sqrdmulh v10.8h, v14.8h, v0.h[1] mls v24.8h, v2.8h, v7.h[0] add v22.8h, v8.8h, v13.8h str q16, [x0, #0x100] sub v2.8h, v8.8h, v13.8h str q24, [x0, #0x180] mul v13.8h, v14.8h, v0.h[0] str q22, [x0, #0xc0] sqrdmulh v21.8h, v2.8h, v0.h[1] ldr q6, [x0, #0x90] ldr q14, [x0, #0xd0] mls v13.8h, v10.8h, v7.h[0] str q11, [x0, #0x40] sub v10.8h, v6.8h, v14.8h ldr q11, [x0, #0x10] sqrdmulh v19.8h, v10.8h, v1.h[1] mul v20.8h, v10.8h, v1.h[0] ldr q28, [x0, #0x50] sub x4, x4, #0x2 Lintt_layer123_start: mls v20.8h, v19.8h, v7.h[0] ldr q31, [x0, #0x1d0] sub v22.8h, v11.8h, v28.8h ldr q30, [x0, #0x110] sqrdmulh v8.8h, v22.8h, v0.h[7] mul v3.8h, v22.8h, v0.h[6] mul v5.8h, v2.8h, v0.h[0] str q13, [x0, #0x140] add v10.8h, v11.8h, v28.8h ldr q22, [x0, #0x150] ldr q4, [x0, #0x190] sub v23.8h, v30.8h, v22.8h add v27.8h, v30.8h, v22.8h mls v3.8h, v8.8h, v7.h[0] mls v5.8h, v21.8h, v7.h[0] ldr q11, [x0, #0x20] sub v17.8h, v4.8h, v31.8h add v2.8h, v6.8h, v14.8h mul v19.8h, v23.8h, v1.h[2] sub v22.8h, v3.8h, v20.8h add v14.8h, v10.8h, v2.8h sub v24.8h, v10.8h, v2.8h sqrdmulh v2.8h, v23.8h, v1.h[3] sqrdmulh v30.8h, v22.8h, v0.h[3] mul v23.8h, v22.8h, v0.h[2] sqrdmulh v15.8h, v17.8h, v1.h[5] mls v19.8h, v2.8h, v7.h[0] add v2.8h, v4.8h, v31.8h mul v21.8h, v17.8h, v1.h[4] sqrdmulh v22.8h, v24.8h, v0.h[3] sub v26.8h, v27.8h, v2.8h add v8.8h, v27.8h, v2.8h mul v28.8h, v24.8h, v0.h[2] sqrdmulh v10.8h, v26.8h, v0.h[5] mul v31.8h, v26.8h, v0.h[4] mls v21.8h, v15.8h, v7.h[0] mls v28.8h, v22.8h, v7.h[0] sub v17.8h, v14.8h, v8.8h mls v31.8h, v10.8h, v7.h[0] sub v27.8h, v19.8h, v21.8h sqrdmulh v29.8h, v17.8h, v0.h[1] mul v10.8h, v17.8h, v0.h[0] sub v15.8h, v28.8h, v31.8h sqrdmulh v17.8h, v27.8h, v0.h[5] mul v25.8h, v27.8h, v0.h[4] sqrdmulh v6.8h, v15.8h, v0.h[1] mul v27.8h, v15.8h, v0.h[0] add v16.8h, v19.8h, v21.8h mls v25.8h, v17.8h, v7.h[0] mls v23.8h, v30.8h, v7.h[0] mls v27.8h, v6.8h, v7.h[0] ldr q6, [x0, #0xa0] add v22.8h, v23.8h, v25.8h str q27, [x0, #0x190] add v4.8h, v3.8h, v20.8h str q22, [x0, #0xd0] mls v10.8h, v29.8h, v7.h[0] str q5, [x0, #0x1c0] add v20.8h, v4.8h, v16.8h str q18, [x0], #0x10 sub v18.8h, v4.8h, v16.8h str q10, [x0, #0x100] sub v2.8h, v23.8h, v25.8h sqrdmulh v12.8h, v18.8h, v0.h[1] mul v13.8h, v18.8h, v0.h[0] add v18.8h, v14.8h, v8.8h ldr q14, [x0, #0xd0] mls v13.8h, v12.8h, v7.h[0] str q9, [x0, #0x70] sub v3.8h, v6.8h, v14.8h add v9.8h, v28.8h, v31.8h str q20, [x0, #0x40] sqrdmulh v19.8h, v3.8h, v1.h[1] mul v20.8h, v3.8h, v1.h[0] sqrdmulh v21.8h, v2.8h, v0.h[1] ldr q28, [x0, #0x50] subs x4, x4, #0x1 cbnz x4, Lintt_layer123_start mls v20.8h, v19.8h, v7.h[0] sub v10.8h, v11.8h, v28.8h add v11.8h, v11.8h, v28.8h mul v2.8h, v2.8h, v0.h[0] str q13, [x0, #0x140] add v25.8h, v6.8h, v14.8h str q18, [x0], #0x10 sqrdmulh v17.8h, v10.8h, v0.h[7] str q9, [x0, #0x70] ldr q8, [x0, #0x1c0] ldr q13, [x0, #0x100] ldr q26, [x0, #0x180] ldr q24, [x0, #0x140] add v15.8h, v26.8h, v8.8h sub v8.8h, v26.8h, v8.8h sub v12.8h, v13.8h, v24.8h add v24.8h, v13.8h, v24.8h sqrdmulh v18.8h, v8.8h, v1.h[5] mul v26.8h, v12.8h, v1.h[2] mul v8.8h, v8.8h, v1.h[4] sqrdmulh v16.8h, v12.8h, v1.h[3] mul v10.8h, v10.8h, v0.h[6] add v22.8h, v11.8h, v25.8h mls v8.8h, v18.8h, v7.h[0] mls v26.8h, v16.8h, v7.h[0] mls v10.8h, v17.8h, v7.h[0] add v23.8h, v24.8h, v15.8h sub v11.8h, v11.8h, v25.8h sub v3.8h, v26.8h, v8.8h sub v14.8h, v10.8h, v20.8h sub v19.8h, v22.8h, v23.8h mul v18.8h, v3.8h, v0.h[4] sqrdmulh v17.8h, v14.8h, v0.h[3] mul v14.8h, v14.8h, v0.h[2] sqrdmulh v3.8h, v3.8h, v0.h[5] sub v16.8h, v24.8h, v15.8h mls v2.8h, v21.8h, v7.h[0] mls v14.8h, v17.8h, v7.h[0] mls v18.8h, v3.8h, v7.h[0] sqrdmulh v31.8h, v16.8h, v0.h[5] str q2, [x0, #0x1b0] mul v13.8h, v16.8h, v0.h[4] add v24.8h, v14.8h, v18.8h sqrdmulh v2.8h, v11.8h, v0.h[3] mul v21.8h, v11.8h, v0.h[2] mls v13.8h, v31.8h, v7.h[0] add v16.8h, v26.8h, v8.8h add v28.8h, v10.8h, v20.8h mls v21.8h, v2.8h, v7.h[0] sub v14.8h, v14.8h, v18.8h add v2.8h, v28.8h, v16.8h sub v10.8h, v28.8h, v16.8h sub v16.8h, v21.8h, v13.8h sqrdmulh v27.8h, v19.8h, v0.h[1] mul v26.8h, v19.8h, v0.h[0] sqrdmulh v19.8h, v16.8h, v0.h[1] mul v28.8h, v16.8h, v0.h[0] sqrdmulh v8.8h, v14.8h, v0.h[1] mls v26.8h, v27.8h, v7.h[0] mul v14.8h, v14.8h, v0.h[0] mls v28.8h, v19.8h, v7.h[0] sqrdmulh v20.8h, v10.8h, v0.h[1] str q26, [x0, #0x100] mul v10.8h, v10.8h, v0.h[0] str q28, [x0, #0x180] add v22.8h, v22.8h, v23.8h str q24, [x0, #0xc0] mls v10.8h, v20.8h, v7.h[0] str q2, [x0, #0x40] mls v14.8h, v8.8h, v7.h[0] str q22, [x0], #0x10 add v11.8h, v21.8h, v13.8h str q10, [x0, #0x130] str q11, [x0, #0x70] str q14, [x0, #0x1b0] ldp d8, d9, [sp] .cfi_restore d8 .cfi_restore d9 ldp d10, d11, [sp, #0x10] .cfi_restore d10 .cfi_restore d11 ldp d12, d13, [sp, #0x20] .cfi_restore d12 .cfi_restore d13 ldp d14, d15, [sp, #0x30] .cfi_restore d14 .cfi_restore d15 add sp, sp, #0x40 .cfi_adjust_cfa_offset -0x40 ret .cfi_endproc
wlsfx/bnbb
13,296
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/aarch64/src/ntt.S
/* Copyright (c) 2022 Arm Limited * Copyright (c) 2022 Hanno Becker * Copyright (c) 2023 Amin Abdulrahman, Matthias Kannwischer * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /* References * ========== * * - [NeonNTT] * Neon NTT: Faster Dilithium, Kyber, and Saber on Cortex-A72 and Apple M1 * Becker, Hwang, Kannwischer, Yang, Yang * https://eprint.iacr.org/2021/986 * * - [SLOTHY_Paper] * Fast and Clean: Auditable high-performance assembly via constraint solving * Abdulrahman, Becker, Kannwischer, Klein * https://eprint.iacr.org/2022/1303 */ /*yaml Name: ntt_asm Description: AArch64 ML-KEM forward NTT following @[NeonNTT] and @[SLOTHY_Paper] Signature: void mlk_ntt_asm(int16_t p[256], const int16_t twiddles12345[80], const int16_t twiddles56[384]) ABI: x0: type: buffer size_bytes: 512 permissions: read/write c_parameter: int16_t p[256] description: Input/output polynomial x1: type: buffer size_bytes: 160 permissions: read-only c_parameter: const int16_t twiddles12345[80] description: Twiddle factors for layers 1-5 x2: type: buffer size_bytes: 768 permissions: read-only c_parameter: const int16_t twiddles56[384] description: Twiddle factors for layers 6-7 Stack: bytes: 64 description: saving callee-saved Neon registers */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/aarch64_opt/src/ntt.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_ntt_asm) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_ntt_asm) S2N_BN_SYMBOL(mlkem_ntt_asm): .cfi_startproc sub sp, sp, #0x40 .cfi_adjust_cfa_offset 0x40 stp d8, d9, [sp] .cfi_rel_offset d8, 0x0 .cfi_rel_offset d9, 0x8 stp d10, d11, [sp, #0x10] .cfi_rel_offset d10, 0x10 .cfi_rel_offset d11, 0x18 stp d12, d13, [sp, #0x20] .cfi_rel_offset d12, 0x20 .cfi_rel_offset d13, 0x28 stp d14, d15, [sp, #0x30] .cfi_rel_offset d14, 0x30 .cfi_rel_offset d15, 0x38 mov w5, #0xd01 // =3329 mov v7.h[0], w5 mov w5, #0x4ebf // =20159 mov v7.h[1], w5 mov x3, x0 mov x4, #0x4 // =4 ldr q0, [x1], #0x20 ldur q1, [x1, #-0x10] ldr q5, [x0] ldr q13, [x0, #0x40] ldr q3, [x0, #0x80] ldr q22, [x0, #0xc0] ldr q24, [x0, #0x100] ldr q11, [x0, #0x1c0] mul v23.8h, v24.8h, v0.h[0] ldr q2, [x0, #0x140] mul v17.8h, v11.8h, v0.h[0] ldr q19, [x0, #0x180] sub x4, x4, #0x1 Lntt_layer123_start: sqrdmulh v8.8h, v24.8h, v0.h[1] sqrdmulh v24.8h, v2.8h, v0.h[1] mul v2.8h, v2.8h, v0.h[0] sqrdmulh v14.8h, v19.8h, v0.h[1] mls v23.8h, v8.8h, v7.h[0] mul v8.8h, v19.8h, v0.h[0] mls v2.8h, v24.8h, v7.h[0] sqrdmulh v24.8h, v11.8h, v0.h[1] sub v11.8h, v5.8h, v23.8h mls v8.8h, v14.8h, v7.h[0] sub v14.8h, v13.8h, v2.8h add v2.8h, v13.8h, v2.8h add v23.8h, v5.8h, v23.8h sub v19.8h, v3.8h, v8.8h add v8.8h, v3.8h, v8.8h mls v17.8h, v24.8h, v7.h[0] sqrdmulh v24.8h, v19.8h, v0.h[5] mul v19.8h, v19.8h, v0.h[4] sqrdmulh v5.8h, v8.8h, v0.h[3] sub v13.8h, v22.8h, v17.8h add v17.8h, v22.8h, v17.8h mls v19.8h, v24.8h, v7.h[0] sqrdmulh v24.8h, v13.8h, v0.h[5] mul v13.8h, v13.8h, v0.h[4] mul v8.8h, v8.8h, v0.h[2] sub v3.8h, v11.8h, v19.8h add v11.8h, v11.8h, v19.8h mls v13.8h, v24.8h, v7.h[0] sqrdmulh v24.8h, v17.8h, v0.h[3] mul v19.8h, v17.8h, v0.h[2] mls v8.8h, v5.8h, v7.h[0] sub v17.8h, v14.8h, v13.8h add v14.8h, v14.8h, v13.8h mls v19.8h, v24.8h, v7.h[0] sub v24.8h, v23.8h, v8.8h add v8.8h, v23.8h, v8.8h sqrdmulh v23.8h, v14.8h, v1.h[3] sub v5.8h, v2.8h, v19.8h add v2.8h, v2.8h, v19.8h mul v14.8h, v14.8h, v1.h[2] sqrdmulh v19.8h, v5.8h, v1.h[1] sqrdmulh v13.8h, v2.8h, v0.h[7] mul v2.8h, v2.8h, v0.h[6] mul v5.8h, v5.8h, v1.h[0] mls v14.8h, v23.8h, v7.h[0] sqrdmulh v23.8h, v17.8h, v1.h[5] mls v2.8h, v13.8h, v7.h[0] mls v5.8h, v19.8h, v7.h[0] sub v19.8h, v11.8h, v14.8h add v14.8h, v11.8h, v14.8h sub v11.8h, v8.8h, v2.8h mul v17.8h, v17.8h, v1.h[4] add v8.8h, v8.8h, v2.8h sub v2.8h, v24.8h, v5.8h add v24.8h, v24.8h, v5.8h mls v17.8h, v23.8h, v7.h[0] str q8, [x0], #0x10 ldr q5, [x0] sub v8.8h, v3.8h, v17.8h add v23.8h, v3.8h, v17.8h str q11, [x0, #0x30] ldr q13, [x0, #0x40] str q24, [x0, #0x70] ldr q3, [x0, #0x80] str q2, [x0, #0xb0] ldr q22, [x0, #0xc0] str q14, [x0, #0xf0] ldr q24, [x0, #0x100] str q19, [x0, #0x130] ldr q2, [x0, #0x140] str q23, [x0, #0x170] mul v23.8h, v24.8h, v0.h[0] str q8, [x0, #0x1b0] ldr q11, [x0, #0x1c0] ldr q19, [x0, #0x180] mul v17.8h, v11.8h, v0.h[0] subs x4, x4, #0x1 cbnz x4, Lntt_layer123_start sqrdmulh v6.8h, v11.8h, v0.h[1] mul v25.8h, v19.8h, v0.h[0] sqrdmulh v12.8h, v19.8h, v0.h[1] mul v11.8h, v2.8h, v0.h[0] mls v17.8h, v6.8h, v7.h[0] sqrdmulh v14.8h, v2.8h, v0.h[1] mls v25.8h, v12.8h, v7.h[0] sqrdmulh v27.8h, v24.8h, v0.h[1] add v9.8h, v22.8h, v17.8h mls v11.8h, v14.8h, v7.h[0] sub v26.8h, v3.8h, v25.8h sqrdmulh v2.8h, v9.8h, v0.h[3] mul v24.8h, v9.8h, v0.h[2] mul v19.8h, v26.8h, v0.h[4] sqrdmulh v14.8h, v26.8h, v0.h[5] mls v23.8h, v27.8h, v7.h[0] mls v24.8h, v2.8h, v7.h[0] add v6.8h, v13.8h, v11.8h mls v19.8h, v14.8h, v7.h[0] sub v4.8h, v5.8h, v23.8h add v10.8h, v3.8h, v25.8h sub v8.8h, v6.8h, v24.8h add v3.8h, v4.8h, v19.8h sub v31.8h, v4.8h, v19.8h mul v14.8h, v8.8h, v1.h[0] sqrdmulh v4.8h, v10.8h, v0.h[3] mul v12.8h, v10.8h, v0.h[2] sqrdmulh v2.8h, v8.8h, v1.h[1] sub v8.8h, v22.8h, v17.8h add v30.8h, v5.8h, v23.8h mls v12.8h, v4.8h, v7.h[0] sqrdmulh v4.8h, v8.8h, v0.h[5] mul v19.8h, v8.8h, v0.h[4] mls v14.8h, v2.8h, v7.h[0] sub v27.8h, v30.8h, v12.8h sub v23.8h, v13.8h, v11.8h mls v19.8h, v4.8h, v7.h[0] sub v2.8h, v27.8h, v14.8h add v8.8h, v27.8h, v14.8h add v14.8h, v6.8h, v24.8h str q2, [x0, #0xc0] add v2.8h, v23.8h, v19.8h str q8, [x0, #0x80] sub v19.8h, v23.8h, v19.8h sqrdmulh v13.8h, v2.8h, v1.h[3] mul v17.8h, v2.8h, v1.h[2] add v27.8h, v30.8h, v12.8h sqrdmulh v24.8h, v19.8h, v1.h[5] mul v19.8h, v19.8h, v1.h[4] mls v17.8h, v13.8h, v7.h[0] sqrdmulh v8.8h, v14.8h, v0.h[7] mul v2.8h, v14.8h, v0.h[6] mls v19.8h, v24.8h, v7.h[0] add v26.8h, v3.8h, v17.8h sub v14.8h, v3.8h, v17.8h mls v2.8h, v8.8h, v7.h[0] str q26, [x0, #0x100] add v8.8h, v31.8h, v19.8h str q14, [x0, #0x140] sub v24.8h, v31.8h, v19.8h str q8, [x0, #0x180] add v18.8h, v27.8h, v2.8h str q24, [x0, #0x1c0] sub v14.8h, v27.8h, v2.8h str q18, [x0], #0x10 str q14, [x0, #0x30] mov x0, x3 mov x4, #0x8 // =8 ldr q11, [x1], #0x10 ldr q24, [x0, #0x30] ldr q8, [x0, #0x20] sqrdmulh v14.8h, v24.8h, v11.h[1] mul v2.8h, v24.8h, v11.h[0] sqrdmulh v9.8h, v8.8h, v11.h[1] ldr q24, [x0, #0x10] mls v2.8h, v14.8h, v7.h[0] mul v14.8h, v8.8h, v11.h[0] ldr q6, [x2, #0x40] sub v8.8h, v24.8h, v2.8h mls v14.8h, v9.8h, v7.h[0] add v2.8h, v24.8h, v2.8h mul v27.8h, v8.8h, v11.h[4] sqrdmulh v8.8h, v8.8h, v11.h[5] mul v24.8h, v2.8h, v11.h[2] sqrdmulh v11.8h, v2.8h, v11.h[3] mls v27.8h, v8.8h, v7.h[0] ldr q5, [x2, #0x50] sub x4, x4, #0x1 Lntt_layer4567_start: ldr q8, [x0] ldr q17, [x2, #0x10] sub v1.8h, v8.8h, v14.8h mls v24.8h, v11.8h, v7.h[0] add v8.8h, v8.8h, v14.8h sub v0.8h, v1.8h, v27.8h add v12.8h, v1.8h, v27.8h sub v19.8h, v8.8h, v24.8h add v8.8h, v8.8h, v24.8h trn1 v24.4s, v12.4s, v0.4s trn2 v13.4s, v12.4s, v0.4s trn1 v23.4s, v8.4s, v19.4s ldr q2, [x2], #0x60 trn2 v9.2d, v23.2d, v24.2d trn2 v8.4s, v8.4s, v19.4s sqrdmulh v26.8h, v9.8h, v17.8h trn1 v24.2d, v23.2d, v24.2d trn2 v11.2d, v8.2d, v13.2d trn1 v29.2d, v8.2d, v13.2d sqrdmulh v23.8h, v11.8h, v17.8h mul v10.8h, v11.8h, v2.8h mul v0.8h, v9.8h, v2.8h ldur q11, [x2, #-0x40] mls v10.8h, v23.8h, v7.h[0] mls v0.8h, v26.8h, v7.h[0] ldur q19, [x2, #-0x30] add v17.8h, v29.8h, v10.8h sub v23.8h, v24.8h, v0.8h sub v30.8h, v29.8h, v10.8h mul v2.8h, v17.8h, v11.8h sqrdmulh v11.8h, v17.8h, v19.8h mul v8.8h, v30.8h, v6.8h ldr q22, [x0, #0x70] mls v2.8h, v11.8h, v7.h[0] add v24.8h, v24.8h, v0.8h ldr q15, [x1], #0x10 sub v14.8h, v24.8h, v2.8h add v24.8h, v24.8h, v2.8h sqrdmulh v1.8h, v22.8h, v15.h[1] mul v2.8h, v22.8h, v15.h[0] trn1 v0.4s, v24.4s, v14.4s trn2 v24.4s, v24.4s, v14.4s sqrdmulh v19.8h, v30.8h, v5.8h mls v2.8h, v1.8h, v7.h[0] ldr q16, [x0, #0x60] mls v8.8h, v19.8h, v7.h[0] ldr q6, [x2, #0x40] mul v14.8h, v16.8h, v15.h[0] sub v3.8h, v23.8h, v8.8h add v8.8h, v23.8h, v8.8h ldr q5, [x2, #0x50] trn2 v23.4s, v8.4s, v3.4s trn1 v31.4s, v8.4s, v3.4s sqrdmulh v8.8h, v16.8h, v15.h[1] trn2 v25.2d, v24.2d, v23.2d trn1 v29.2d, v24.2d, v23.2d ldr q24, [x0, #0x50] trn1 v16.2d, v0.2d, v31.2d mls v14.8h, v8.8h, v7.h[0] sub v13.8h, v24.8h, v2.8h add v24.8h, v24.8h, v2.8h trn2 v2.2d, v0.2d, v31.2d sqrdmulh v19.8h, v13.8h, v15.h[5] str q2, [x0, #0x20] sqrdmulh v11.8h, v24.8h, v15.h[3] str q16, [x0], #0x40 mul v27.8h, v13.8h, v15.h[4] stur q29, [x0, #-0x30] mul v24.8h, v24.8h, v15.h[2] stur q25, [x0, #-0x10] mls v27.8h, v19.8h, v7.h[0] subs x4, x4, #0x1 cbnz x4, Lntt_layer4567_start ldr q23, [x0] ldr q17, [x2], #0x60 sub v19.8h, v23.8h, v14.8h mls v24.8h, v11.8h, v7.h[0] add v14.8h, v23.8h, v14.8h add v8.8h, v19.8h, v27.8h sub v13.8h, v19.8h, v27.8h add v12.8h, v14.8h, v24.8h sub v24.8h, v14.8h, v24.8h trn1 v0.4s, v8.4s, v13.4s trn2 v23.4s, v8.4s, v13.4s trn2 v19.4s, v12.4s, v24.4s ldur q27, [x2, #-0x50] trn2 v8.2d, v19.2d, v23.2d trn1 v22.4s, v12.4s, v24.4s mul v14.8h, v8.8h, v17.8h sqrdmulh v24.8h, v8.8h, v27.8h trn2 v2.2d, v22.2d, v0.2d trn1 v8.2d, v19.2d, v23.2d mul v11.8h, v2.8h, v17.8h mls v14.8h, v24.8h, v7.h[0] ldur q26, [x2, #-0x30] sqrdmulh v23.8h, v2.8h, v27.8h sub v24.8h, v8.8h, v14.8h ldur q2, [x2, #-0x40] sqrdmulh v19.8h, v24.8h, v5.8h add v14.8h, v8.8h, v14.8h mul v24.8h, v24.8h, v6.8h mls v11.8h, v23.8h, v7.h[0] sqrdmulh v8.8h, v14.8h, v26.8h mul v2.8h, v14.8h, v2.8h trn1 v14.2d, v22.2d, v0.2d mls v24.8h, v19.8h, v7.h[0] sub v23.8h, v14.8h, v11.8h mls v2.8h, v8.8h, v7.h[0] add v14.8h, v14.8h, v11.8h add v8.8h, v23.8h, v24.8h sub v24.8h, v23.8h, v24.8h sub v19.8h, v14.8h, v2.8h add v11.8h, v14.8h, v2.8h trn1 v2.4s, v8.4s, v24.4s trn2 v14.4s, v8.4s, v24.4s trn2 v23.4s, v11.4s, v19.4s trn1 v11.4s, v11.4s, v19.4s trn2 v8.2d, v23.2d, v14.2d trn1 v24.2d, v11.2d, v2.2d str q8, [x0, #0x30] trn2 v8.2d, v11.2d, v2.2d str q24, [x0], #0x40 trn1 v24.2d, v23.2d, v14.2d stur q8, [x0, #-0x20] stur q24, [x0, #-0x30] ldp d8, d9, [sp] .cfi_restore d8 .cfi_restore d9 ldp d10, d11, [sp, #0x10] .cfi_restore d10 .cfi_restore d11 ldp d12, d13, [sp, #0x20] .cfi_restore d12 .cfi_restore d13 ldp d14, d15, [sp, #0x30] .cfi_restore d14 .cfi_restore d15 add sp, sp, #0x40 .cfi_adjust_cfa_offset -0x40 ret .cfi_endproc
wlsfx/bnbb
9,973
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/aarch64/src/polyvec_basemul_acc_montgomery_cached_asm_k3.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /* References * ========== * * - [NeonNTT] * Neon NTT: Faster Dilithium, Kyber, and Saber on Cortex-A72 and Apple M1 * Becker, Hwang, Kannwischer, Yang, Yang * https://eprint.iacr.org/2021/986 */ /*yaml Name: polyvec_basemul_acc_montgomery_cached_asm_k3 Description: Re-implementation of asymmetric base multiplication following @[NeonNTT] for k=3 Signature: void mlk_polyvec_basemul_acc_montgomery_cached_asm_k3(int16_t r[256], const int16_t a[768], const int16_t b[768], const int16_t b_cache[384]) ABI: x0: type: buffer size_bytes: 512 permissions: write-only c_parameter: int16_t r[256] description: Output polynomial x1: type: buffer size_bytes: 1536 permissions: read-only c_parameter: const int16_t a[768] description: Input polynomial vector a x2: type: buffer size_bytes: 1536 permissions: read-only c_parameter: const int16_t b[768] description: Input polynomial vector b x3: type: buffer size_bytes: 768 permissions: read-only c_parameter: const int16_t b_cache[384] description: Cached values for b Stack: bytes: 64 description: saving callee-saved Neon registers */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/aarch64_opt/src/polyvec_basemul_acc_montgomery_cached_asm_k3.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k3) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k3) S2N_BN_SYMBOL(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k3): .cfi_startproc sub sp, sp, #0x40 .cfi_adjust_cfa_offset 0x40 stp d8, d9, [sp] .cfi_rel_offset d8, 0x0 .cfi_rel_offset d9, 0x8 stp d10, d11, [sp, #0x10] .cfi_rel_offset d10, 0x10 .cfi_rel_offset d11, 0x18 stp d12, d13, [sp, #0x20] .cfi_rel_offset d12, 0x20 .cfi_rel_offset d13, 0x28 stp d14, d15, [sp, #0x30] .cfi_rel_offset d14, 0x30 .cfi_rel_offset d15, 0x38 mov w14, #0xd01 // =3329 dup v0.8h, w14 mov w14, #0xcff // =3327 dup v2.8h, w14 add x4, x1, #0x200 add x5, x2, #0x200 add x6, x3, #0x100 add x7, x1, #0x400 add x8, x2, #0x400 add x9, x3, #0x200 mov x13, #0x10 // =16 ldr q7, [x2, #0x10] ldr q20, [x2], #0x20 ldr q15, [x1, #0x10] uzp1 v8.8h, v20.8h, v7.8h uzp2 v7.8h, v20.8h, v7.8h ld1 { v20.8h }, [x3], #16 ldr q30, [x1], #0x20 ldr q11, [x4], #0x20 uzp1 v16.8h, v30.8h, v15.8h uzp2 v15.8h, v30.8h, v15.8h smull v30.4s, v16.4h, v7.4h smull2 v7.4s, v16.8h, v7.8h smull v9.4s, v16.4h, v8.4h smull2 v16.4s, v16.8h, v8.8h smlal v30.4s, v15.4h, v8.4h smlal2 v7.4s, v15.8h, v8.8h smlal v9.4s, v15.4h, v20.4h smlal2 v16.4s, v15.8h, v20.8h ldur q20, [x4, #-0x10] ldr q15, [x5], #0x20 uzp1 v8.8h, v11.8h, v20.8h uzp2 v20.8h, v11.8h, v20.8h ldur q11, [x5, #-0x10] ld1 { v27.8h }, [x6], #16 uzp1 v10.8h, v15.8h, v11.8h uzp2 v15.8h, v15.8h, v11.8h smlal v9.4s, v8.4h, v10.4h smlal2 v16.4s, v8.8h, v10.8h smlal v30.4s, v8.4h, v15.4h smlal2 v7.4s, v8.8h, v15.8h smlal v9.4s, v20.4h, v27.4h smlal2 v16.4s, v20.8h, v27.8h smlal v30.4s, v20.4h, v10.4h smlal2 v7.4s, v20.8h, v10.8h ldr q20, [x7], #0x20 ldur q15, [x7, #-0x10] ldr q8, [x8], #0x20 uzp1 v11.8h, v20.8h, v15.8h uzp2 v20.8h, v20.8h, v15.8h ldur q15, [x8, #-0x10] ld1 { v27.8h }, [x9], #16 uzp1 v10.8h, v8.8h, v15.8h uzp2 v15.8h, v8.8h, v15.8h smlal v9.4s, v11.4h, v10.4h smlal2 v16.4s, v11.8h, v10.8h smlal v30.4s, v11.4h, v15.4h smlal2 v7.4s, v11.8h, v15.8h smlal v9.4s, v20.4h, v27.4h smlal2 v16.4s, v20.8h, v27.8h smlal v30.4s, v20.4h, v10.4h smlal2 v7.4s, v20.8h, v10.8h ldr q15, [x2], #0x20 uzp1 v20.8h, v9.8h, v16.8h uzp1 v8.8h, v30.8h, v7.8h mul v20.8h, v20.8h, v2.8h mul v8.8h, v8.8h, v2.8h ldr q21, [x4], #0x20 smlal v9.4s, v20.4h, v0.4h smlal2 v16.4s, v20.8h, v0.8h smlal v30.4s, v8.4h, v0.4h smlal2 v7.4s, v8.8h, v0.8h ldur q6, [x4, #-0x10] uzp2 v27.8h, v9.8h, v16.8h uzp2 v10.8h, v30.8h, v7.8h ldur q16, [x2, #-0x10] ldr q30, [x1, #0x10] ld1 { v9.8h }, [x3], #16 ldr q1, [x5], #0x20 ldur q12, [x5, #-0x10] ld1 { v24.8h }, [x6], #16 ldr q19, [x7], #0x20 ldur q31, [x7, #-0x10] ldr q17, [x8], #0x20 ldur q18, [x8, #-0x10] ld1 { v25.8h }, [x9], #16 sub x13, x13, #0x2 Lpolyvec_basemul_acc_montgomery_cached_k3_loop: ldr q20, [x1], #0x20 uzp1 v7.8h, v15.8h, v16.8h uzp2 v15.8h, v15.8h, v16.8h uzp1 v8.8h, v20.8h, v30.8h uzp2 v20.8h, v20.8h, v30.8h smull v30.4s, v8.4h, v15.4h smull2 v15.4s, v8.8h, v15.8h smull v11.4s, v8.4h, v7.4h smull2 v8.4s, v8.8h, v7.8h smlal v30.4s, v20.4h, v7.4h smlal2 v15.4s, v20.8h, v7.8h smlal v11.4s, v20.4h, v9.4h smlal2 v8.4s, v20.8h, v9.8h uzp1 v7.8h, v21.8h, v6.8h uzp2 v20.8h, v21.8h, v6.8h uzp1 v16.8h, v1.8h, v12.8h uzp2 v9.8h, v1.8h, v12.8h smlal v11.4s, v7.4h, v16.4h smlal2 v8.4s, v7.8h, v16.8h smlal v30.4s, v7.4h, v9.4h smlal2 v15.4s, v7.8h, v9.8h smlal v11.4s, v20.4h, v24.4h smlal2 v8.4s, v20.8h, v24.8h smlal v30.4s, v20.4h, v16.4h smlal2 v15.4s, v20.8h, v16.8h uzp1 v7.8h, v19.8h, v31.8h uzp2 v20.8h, v19.8h, v31.8h uzp1 v16.8h, v17.8h, v18.8h uzp2 v9.8h, v17.8h, v18.8h smlal v11.4s, v7.4h, v16.4h smlal2 v8.4s, v7.8h, v16.8h smlal v30.4s, v7.4h, v9.4h smlal2 v15.4s, v7.8h, v9.8h smlal v11.4s, v20.4h, v25.4h smlal2 v8.4s, v20.8h, v25.8h smlal v30.4s, v20.4h, v16.4h smlal2 v15.4s, v20.8h, v16.8h ldr q16, [x2, #0x10] uzp1 v7.8h, v11.8h, v8.8h uzp1 v20.8h, v30.8h, v15.8h mul v7.8h, v7.8h, v2.8h mul v20.8h, v20.8h, v2.8h zip2 v9.8h, v27.8h, v10.8h zip1 v27.8h, v27.8h, v10.8h smlal v11.4s, v7.4h, v0.4h smlal2 v8.4s, v7.8h, v0.8h smlal v30.4s, v20.4h, v0.4h smlal2 v15.4s, v20.8h, v0.8h str q27, [x0], #0x20 uzp2 v27.8h, v11.8h, v8.8h stur q9, [x0, #-0x10] uzp2 v10.8h, v30.8h, v15.8h ldr q30, [x1, #0x10] ldr q15, [x2], #0x20 ld1 { v9.8h }, [x3], #16 ldr q21, [x4], #0x20 ldur q6, [x4, #-0x10] ldr q1, [x5], #0x20 ldur q12, [x5, #-0x10] ld1 { v24.8h }, [x6], #16 ldr q19, [x7], #0x20 ldur q31, [x7, #-0x10] ldr q17, [x8], #0x20 ldur q18, [x8, #-0x10] ld1 { v25.8h }, [x9], #16 sub x13, x13, #0x1 cbnz x13, Lpolyvec_basemul_acc_montgomery_cached_k3_loop ldr q7, [x1], #0x20 uzp1 v20.8h, v15.8h, v16.8h uzp2 v15.8h, v15.8h, v16.8h uzp1 v23.8h, v7.8h, v30.8h uzp2 v11.8h, v7.8h, v30.8h smull2 v8.4s, v23.8h, v20.8h smull v5.4s, v23.4h, v20.4h smull2 v30.4s, v23.8h, v15.8h uzp1 v28.8h, v1.8h, v12.8h smlal2 v8.4s, v11.8h, v9.8h smlal v5.4s, v11.4h, v9.4h uzp1 v3.8h, v21.8h, v6.8h smull v16.4s, v23.4h, v15.4h smlal2 v8.4s, v3.8h, v28.8h smlal v5.4s, v3.4h, v28.4h uzp2 v29.8h, v21.8h, v6.8h uzp1 v7.8h, v17.8h, v18.8h smlal2 v8.4s, v29.8h, v24.8h uzp1 v14.8h, v19.8h, v31.8h smlal v16.4s, v11.4h, v20.4h smlal2 v30.4s, v11.8h, v20.8h smlal2 v8.4s, v14.8h, v7.8h uzp2 v20.8h, v1.8h, v12.8h uzp2 v21.8h, v19.8h, v31.8h smlal2 v30.4s, v3.8h, v20.8h smlal v16.4s, v3.4h, v20.4h smlal v5.4s, v29.4h, v24.4h uzp2 v9.8h, v17.8h, v18.8h smlal2 v30.4s, v29.8h, v28.8h smlal v16.4s, v29.4h, v28.4h smlal v5.4s, v14.4h, v7.4h smlal2 v8.4s, v21.8h, v25.8h smlal2 v30.4s, v14.8h, v9.8h smlal v16.4s, v14.4h, v9.4h smlal v5.4s, v21.4h, v25.4h zip1 v20.8h, v27.8h, v10.8h smlal2 v30.4s, v21.8h, v7.8h smlal v16.4s, v21.4h, v7.4h uzp1 v7.8h, v5.8h, v8.8h str q20, [x0], #0x20 mul v15.8h, v7.8h, v2.8h uzp1 v7.8h, v16.8h, v30.8h zip2 v31.8h, v27.8h, v10.8h mul v20.8h, v7.8h, v2.8h smlal v5.4s, v15.4h, v0.4h smlal2 v8.4s, v15.8h, v0.8h stur q31, [x0, #-0x10] smlal2 v30.4s, v20.8h, v0.8h smlal v16.4s, v20.4h, v0.4h uzp2 v15.8h, v5.8h, v8.8h uzp2 v20.8h, v16.8h, v30.8h zip1 v7.8h, v15.8h, v20.8h zip2 v20.8h, v15.8h, v20.8h str q7, [x0], #0x20 stur q20, [x0, #-0x10] ldp d8, d9, [sp] .cfi_restore d8 .cfi_restore d9 ldp d10, d11, [sp, #0x10] .cfi_restore d10 .cfi_restore d11 ldp d12, d13, [sp, #0x20] .cfi_restore d12 .cfi_restore d13 ldp d14, d15, [sp, #0x30] .cfi_restore d14 .cfi_restore d15 add sp, sp, #0x40 .cfi_adjust_cfa_offset -0x40 ret .cfi_endproc
wlsfx/bnbb
11,894
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/fipsmodule/ml_kem/mlkem/native/aarch64/src/polyvec_basemul_acc_montgomery_cached_asm_k4.S
/* * Copyright (c) The mlkem-native project authors * SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT */ /* References * ========== * * - [NeonNTT] * Neon NTT: Faster Dilithium, Kyber, and Saber on Cortex-A72 and Apple M1 * Becker, Hwang, Kannwischer, Yang, Yang * https://eprint.iacr.org/2021/986 */ /*yaml Name: polyvec_basemul_acc_montgomery_cached_asm_k4 Description: Re-implementation of asymmetric base multiplication following @[NeonNTT] for k=4 Signature: void mlk_polyvec_basemul_acc_montgomery_cached_asm_k4(int16_t r[256], const int16_t a[1024], const int16_t b[1024], const int16_t b_cache[512]) ABI: x0: type: buffer size_bytes: 512 permissions: write-only c_parameter: int16_t r[256] description: Output polynomial x1: type: buffer size_bytes: 2048 permissions: read-only c_parameter: const int16_t a[1024] description: Input polynomial vector a x2: type: buffer size_bytes: 2048 permissions: read-only c_parameter: const int16_t b[1024] description: Input polynomial vector b x3: type: buffer size_bytes: 1024 permissions: read-only c_parameter: const int16_t b_cache[512] description: Cached values for b Stack: bytes: 64 description: saving callee-saved Neon registers */ #include "_internal_s2n_bignum.h" /* * WARNING: This file is auto-derived from the mlkem-native source file * dev/aarch64_opt/src/polyvec_basemul_acc_montgomery_cached_asm_k4.S using scripts/simpasm. Do not modify it directly. */ .text .balign 4 S2N_BN_SYM_VISIBILITY_DIRECTIVE(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k4) S2N_BN_SYM_PRIVACY_DIRECTIVE(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k4) S2N_BN_SYMBOL(mlkem_polyvec_basemul_acc_montgomery_cached_asm_k4): .cfi_startproc sub sp, sp, #0x40 .cfi_adjust_cfa_offset 0x40 stp d8, d9, [sp] .cfi_rel_offset d8, 0x0 .cfi_rel_offset d9, 0x8 stp d10, d11, [sp, #0x10] .cfi_rel_offset d10, 0x10 .cfi_rel_offset d11, 0x18 stp d12, d13, [sp, #0x20] .cfi_rel_offset d12, 0x20 .cfi_rel_offset d13, 0x28 stp d14, d15, [sp, #0x30] .cfi_rel_offset d14, 0x30 .cfi_rel_offset d15, 0x38 mov w14, #0xd01 // =3329 dup v0.8h, w14 mov w14, #0xcff // =3327 dup v2.8h, w14 add x4, x1, #0x200 add x5, x2, #0x200 add x6, x3, #0x100 add x7, x1, #0x400 add x8, x2, #0x400 add x9, x3, #0x200 add x10, x1, #0x600 add x11, x2, #0x600 add x12, x3, #0x300 mov x13, #0x10 // =16 ldr q23, [x2, #0x10] ldr q19, [x2], #0x20 ldr q17, [x5], #0x20 uzp2 v13.8h, v19.8h, v23.8h uzp1 v19.8h, v19.8h, v23.8h ldur q23, [x5, #-0x10] ldr q30, [x1, #0x10] uzp2 v9.8h, v17.8h, v23.8h uzp1 v23.8h, v17.8h, v23.8h ldr q17, [x1], #0x20 ldr q10, [x7, #0x10] uzp1 v12.8h, v17.8h, v30.8h uzp2 v17.8h, v17.8h, v30.8h smull2 v30.4s, v12.8h, v13.8h smull v13.4s, v12.4h, v13.4h smull2 v22.4s, v12.8h, v19.8h smull v12.4s, v12.4h, v19.4h smlal2 v30.4s, v17.8h, v19.8h smlal v13.4s, v17.4h, v19.4h ldr q19, [x4], #0x20 ldur q16, [x4, #-0x10] ld1 { v8.8h }, [x3], #16 uzp1 v26.8h, v19.8h, v16.8h uzp2 v19.8h, v19.8h, v16.8h smlal2 v30.4s, v26.8h, v9.8h smlal v13.4s, v26.4h, v9.4h smlal2 v22.4s, v17.8h, v8.8h smlal v12.4s, v17.4h, v8.4h smlal2 v30.4s, v19.8h, v23.8h smlal v13.4s, v19.4h, v23.4h smlal2 v22.4s, v26.8h, v23.8h smlal v12.4s, v26.4h, v23.4h ldr q23, [x7], #0x20 ldr q17, [x8, #0x10] uzp1 v9.8h, v23.8h, v10.8h uzp2 v23.8h, v23.8h, v10.8h ldr q10, [x10], #0x20 ldur q16, [x10, #-0x10] ld1 { v8.8h }, [x12], #16 uzp1 v26.8h, v10.8h, v16.8h uzp2 v10.8h, v10.8h, v16.8h ld1 { v16.8h }, [x6], #16 ldr q3, [x11, #0x10] smlal2 v22.4s, v19.8h, v16.8h smlal v12.4s, v19.4h, v16.4h ldr q19, [x11], #0x20 ld1 { v16.8h }, [x9], #16 uzp1 v4.8h, v19.8h, v3.8h uzp2 v19.8h, v19.8h, v3.8h ldr q3, [x8], #0x20 ldr q31, [x2], #0x20 uzp1 v6.8h, v3.8h, v17.8h uzp2 v17.8h, v3.8h, v17.8h smlal2 v22.4s, v9.8h, v6.8h smlal2 v30.4s, v9.8h, v17.8h smlal v13.4s, v9.4h, v17.4h smlal v12.4s, v9.4h, v6.4h smlal2 v22.4s, v23.8h, v16.8h smlal2 v30.4s, v23.8h, v6.8h smlal v13.4s, v23.4h, v6.4h smlal v12.4s, v23.4h, v16.4h smlal2 v22.4s, v26.8h, v4.8h smlal2 v30.4s, v26.8h, v19.8h smlal v13.4s, v26.4h, v19.4h smlal v12.4s, v26.4h, v4.4h smlal2 v22.4s, v10.8h, v8.8h smlal2 v30.4s, v10.8h, v4.8h smlal v13.4s, v10.4h, v4.4h smlal v12.4s, v10.4h, v8.4h ldur q19, [x2, #-0x10] uzp1 v23.8h, v13.8h, v30.8h uzp1 v17.8h, v12.8h, v22.8h mul v23.8h, v23.8h, v2.8h uzp2 v21.8h, v31.8h, v19.8h uzp1 v19.8h, v31.8h, v19.8h mul v17.8h, v17.8h, v2.8h smlal v13.4s, v23.4h, v0.4h smlal2 v30.4s, v23.8h, v0.8h ldr q23, [x5], #0x20 smlal2 v22.4s, v17.8h, v0.8h uzp2 v15.8h, v13.8h, v30.8h smlal v12.4s, v17.4h, v0.4h ldur q17, [x5, #-0x10] ldr q13, [x1, #0x10] uzp2 v27.8h, v23.8h, v17.8h uzp1 v28.8h, v23.8h, v17.8h uzp2 v7.8h, v12.8h, v22.8h ldr q23, [x1], #0x20 zip1 v5.8h, v7.8h, v15.8h ldr q3, [x7, #0x10] uzp1 v31.8h, v23.8h, v13.8h uzp2 v16.8h, v23.8h, v13.8h smull2 v24.4s, v31.8h, v21.8h ldr q6, [x8, #0x10] ldr q23, [x10], #0x20 smlal2 v24.4s, v16.8h, v19.8h ldur q17, [x10, #-0x10] ld1 { v22.8h }, [x12], #16 uzp1 v30.8h, v23.8h, v17.8h uzp2 v11.8h, v23.8h, v17.8h ldr q23, [x4], #0x20 ldur q17, [x4, #-0x10] ldr q4, [x7], #0x20 uzp1 v20.8h, v23.8h, v17.8h uzp2 v26.8h, v23.8h, v17.8h uzp1 v9.8h, v4.8h, v3.8h smlal2 v24.4s, v20.8h, v27.8h ld1 { v8.8h }, [x6], #16 ldr q25, [x11, #0x10] ldr q29, [x11], #0x20 ld1 { v12.8h }, [x9], #16 uzp1 v10.8h, v29.8h, v25.8h ldr q14, [x8], #0x20 ld1 { v23.8h }, [x3], #16 sub x13, x13, #0x2 Lpolyvec_basemul_acc_montgomery_cached_k4_loop: smlal2 v24.4s, v26.8h, v28.8h uzp2 v4.8h, v4.8h, v3.8h smull2 v13.4s, v31.8h, v19.8h ldr q3, [x2], #0x20 uzp2 v1.8h, v29.8h, v25.8h smlal2 v13.4s, v16.8h, v23.8h ldur q17, [x2, #-0x10] smull v18.4s, v31.4h, v19.4h smlal2 v13.4s, v20.8h, v28.8h smull v29.4s, v31.4h, v21.4h ldr q21, [x5], #0x20 smlal2 v13.4s, v26.8h, v8.8h smlal v29.4s, v16.4h, v19.4h ldur q19, [x5, #-0x10] smlal v18.4s, v16.4h, v23.4h smlal v29.4s, v20.4h, v27.4h uzp1 v31.8h, v14.8h, v6.8h uzp2 v27.8h, v21.8h, v19.8h smlal v18.4s, v20.4h, v28.4h ldr q25, [x1, #0x10] smlal v29.4s, v26.4h, v28.4h smlal v18.4s, v26.4h, v8.4h uzp2 v26.8h, v14.8h, v6.8h smlal2 v13.4s, v9.8h, v31.8h smlal2 v24.4s, v9.8h, v26.8h smlal v29.4s, v9.4h, v26.4h smlal v18.4s, v9.4h, v31.4h smlal2 v13.4s, v4.8h, v12.8h smlal2 v24.4s, v4.8h, v31.8h smlal v29.4s, v4.4h, v31.4h smlal v18.4s, v4.4h, v12.4h smlal2 v13.4s, v30.8h, v10.8h smlal2 v24.4s, v30.8h, v1.8h smlal v29.4s, v30.4h, v1.4h smlal v18.4s, v30.4h, v10.4h smlal2 v13.4s, v11.8h, v22.8h smlal2 v24.4s, v11.8h, v10.8h smlal v29.4s, v11.4h, v10.4h smlal v18.4s, v11.4h, v22.4h ldr q22, [x1], #0x20 uzp1 v31.8h, v29.8h, v24.8h uzp1 v28.8h, v21.8h, v19.8h mul v19.8h, v31.8h, v2.8h uzp1 v31.8h, v22.8h, v25.8h uzp2 v16.8h, v22.8h, v25.8h uzp2 v21.8h, v3.8h, v17.8h smlal v29.4s, v19.4h, v0.4h smlal2 v24.4s, v19.8h, v0.8h uzp1 v19.8h, v3.8h, v17.8h uzp1 v26.8h, v18.8h, v13.8h zip2 v14.8h, v7.8h, v15.8h mul v23.8h, v26.8h, v2.8h uzp2 v15.8h, v29.8h, v24.8h smull2 v24.4s, v31.8h, v21.8h str q14, [x0, #0x10] ldr q3, [x7, #0x10] ldr q6, [x8, #0x10] ldr q8, [x10], #0x20 ldur q26, [x10, #-0x10] ld1 { v22.8h }, [x12], #16 uzp1 v30.8h, v8.8h, v26.8h uzp2 v11.8h, v8.8h, v26.8h ldr q8, [x4], #0x20 ldur q26, [x4, #-0x10] ldr q4, [x7], #0x20 uzp1 v20.8h, v8.8h, v26.8h uzp2 v26.8h, v8.8h, v26.8h ld1 { v8.8h }, [x6], #16 uzp1 v9.8h, v4.8h, v3.8h ldr q25, [x11, #0x10] ldr q29, [x11], #0x20 ld1 { v12.8h }, [x9], #16 ldr q14, [x8], #0x20 smlal2 v24.4s, v16.8h, v19.8h smlal2 v13.4s, v23.8h, v0.8h smlal v18.4s, v23.4h, v0.4h ld1 { v23.8h }, [x3], #16 smlal2 v24.4s, v20.8h, v27.8h uzp2 v7.8h, v18.8h, v13.8h uzp1 v10.8h, v29.8h, v25.8h str q5, [x0], #0x20 zip1 v5.8h, v7.8h, v15.8h sub x13, x13, #0x1 cbnz x13, Lpolyvec_basemul_acc_montgomery_cached_k4_loop smull2 v17.4s, v31.8h, v19.8h uzp2 v1.8h, v14.8h, v6.8h smull v18.4s, v31.4h, v21.4h smlal2 v24.4s, v26.8h, v28.8h smlal2 v17.4s, v16.8h, v23.8h smull v21.4s, v31.4h, v19.4h smlal v18.4s, v16.4h, v19.4h uzp2 v31.8h, v4.8h, v3.8h uzp1 v3.8h, v14.8h, v6.8h smlal v21.4s, v16.4h, v23.4h smlal v18.4s, v20.4h, v27.4h uzp2 v14.8h, v29.8h, v25.8h smlal2 v17.4s, v20.8h, v28.8h smlal v21.4s, v20.4h, v28.4h smlal v18.4s, v26.4h, v28.4h smlal2 v24.4s, v9.8h, v1.8h smlal2 v17.4s, v26.8h, v8.8h smlal v21.4s, v26.4h, v8.4h smlal v18.4s, v9.4h, v1.4h smlal2 v24.4s, v31.8h, v3.8h smlal2 v17.4s, v9.8h, v3.8h smlal v21.4s, v9.4h, v3.4h smlal v18.4s, v31.4h, v3.4h smlal2 v24.4s, v30.8h, v14.8h smlal2 v17.4s, v31.8h, v12.8h smlal v21.4s, v31.4h, v12.4h smlal v18.4s, v30.4h, v14.4h smlal2 v24.4s, v11.8h, v10.8h smlal2 v17.4s, v30.8h, v10.8h smlal v21.4s, v30.4h, v10.4h smlal v18.4s, v11.4h, v10.4h zip2 v19.8h, v7.8h, v15.8h smlal2 v17.4s, v11.8h, v22.8h smlal v21.4s, v11.4h, v22.4h uzp1 v23.8h, v18.8h, v24.8h str q19, [x0, #0x10] mul v19.8h, v23.8h, v2.8h uzp1 v23.8h, v21.8h, v17.8h str q5, [x0], #0x20 mul v26.8h, v23.8h, v2.8h smlal v18.4s, v19.4h, v0.4h smlal2 v24.4s, v19.8h, v0.8h smlal v21.4s, v26.4h, v0.4h smlal2 v17.4s, v26.8h, v0.8h uzp2 v13.8h, v18.8h, v24.8h uzp2 v19.8h, v21.8h, v17.8h zip1 v23.8h, v19.8h, v13.8h zip2 v19.8h, v19.8h, v13.8h str q23, [x0], #0x20 stur q19, [x0, #-0x10] ldp d8, d9, [sp] .cfi_restore d8 .cfi_restore d9 ldp d10, d11, [sp, #0x10] .cfi_restore d10 .cfi_restore d11 ldp d12, d13, [sp, #0x20] .cfi_restore d12 .cfi_restore d13 ldp d14, d15, [sp, #0x30] .cfi_restore d14 .cfi_restore d15 add sp, sp, #0x40 .cfi_adjust_cfa_offset -0x40 ret .cfi_endproc
wlsfx/bnbb
242,854
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/crypto/hrss/asm/poly_rq_mul.S
// Copyright (c) 2017, the HRSS authors. // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY // SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION // OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN // CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_SMALL) && \ defined(__linux__) && !defined(MY_ASSEMBLER_IS_TOO_OLD_FOR_AVX) // This is the polynomial multiplication function from [HRSS], provided by kind // permission of the authors. // // HRSS: https://eprint.iacr.org/2017/1005 # This file was generated by poly_rq_mul.py .text .align 32 const3: .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 .word 3 const9: .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 .word 9 const0: .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 const729: .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 .word 729 const3_inv: .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 .word 43691 const5_inv: .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 .word 52429 shuf48_16: .byte 10 .byte 11 .byte 12 .byte 13 .byte 14 .byte 15 .byte 0 .byte 1 .byte 2 .byte 3 .byte 4 .byte 5 .byte 6 .byte 7 .byte 8 .byte 9 .byte 10 .byte 11 .byte 12 .byte 13 .byte 14 .byte 15 .byte 0 .byte 1 .byte 2 .byte 3 .byte 4 .byte 5 .byte 6 .byte 7 .byte 8 .byte 9 shufmin1_mask3: .byte 2 .byte 3 .byte 4 .byte 5 .byte 6 .byte 7 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 .byte 255 mask32_to_16: .word 0xffff .word 0x0 .word 0xffff .word 0x0 .word 0xffff .word 0x0 .word 0xffff .word 0x0 .word 0xffff .word 0x0 .word 0xffff .word 0x0 .word 0xffff .word 0x0 .word 0xffff .word 0x0 mask5_3_5_3: .word 0 .word 0 .word 0 .word 65535 .word 65535 .word 65535 .word 65535 .word 65535 .word 0 .word 0 .word 0 .word 65535 .word 65535 .word 65535 .word 65535 .word 65535 mask3_5_3_5: .word 65535 .word 65535 .word 65535 .word 0 .word 0 .word 0 .word 0 .word 0 .word 65535 .word 65535 .word 65535 .word 0 .word 0 .word 0 .word 0 .word 0 mask3_5_4_3_1: .word 65535 .word 65535 .word 65535 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 65535 .word 65535 .word 65535 .word 0 mask_keephigh: .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 0 .word 65535 .word 65535 .word 65535 .word 65535 .word 65535 .word 65535 .word 65535 .word 65535 mask_mod8192: .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .word 8191 .text .global poly_Rq_mul .hidden poly_Rq_mul .type poly_Rq_mul, @function .att_syntax prefix poly_Rq_mul: .cfi_startproc _CET_ENDBR push %rbp .cfi_adjust_cfa_offset 8 .cfi_offset rbp, -16 movq %rsp, %rbp .cfi_def_cfa_register rbp push %r12 .cfi_offset r12, -24 # This function originally used a significant amount of stack space. As an # alternative, the needed scratch space is now passed in as the 4th argument. # The amount of scratch space used must thus be kept in sync with # POLY_MUL_RQ_SCRATCH_SPACE in internal.h. # # Setting RSP to point into the given scratch space upsets the ABI tests # therefore all references to RSP are switched to R8. mov %rcx, %r8 addq $6144+12288+512+9408+32, %r8 mov %r8, %rax subq $6144, %r8 mov %r8, %r11 subq $12288, %r8 mov %r8, %r12 subq $512, %r8 vmovdqa const3(%rip), %ymm3 vmovdqu 0(%rsi), %ymm0 vmovdqu 88(%rsi), %ymm1 vmovdqu 176(%rsi), %ymm2 vmovdqu 264(%rsi), %ymm12 vmovdqu 1056(%rsi), %ymm4 vmovdqu 1144(%rsi), %ymm5 vmovdqu 1232(%rsi), %ymm6 vmovdqu 1320(%rsi), %ymm7 vmovdqu 352(%rsi), %ymm8 vmovdqu 440(%rsi), %ymm9 vmovdqu 528(%rsi), %ymm10 vmovdqu 616(%rsi), %ymm11 vmovdqa %ymm0, 0(%rax) vmovdqa %ymm1, 96(%rax) vpaddw %ymm0, %ymm1, %ymm14 vmovdqa %ymm14, 192(%rax) vmovdqa %ymm2, 288(%rax) vmovdqa %ymm12, 384(%rax) vpaddw %ymm2, %ymm12, %ymm14 vmovdqa %ymm14, 480(%rax) vpaddw %ymm0, %ymm2, %ymm14 vmovdqa %ymm14, 576(%rax) vpaddw %ymm1, %ymm12, %ymm15 vmovdqa %ymm15, 672(%rax) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 768(%rax) vmovdqa %ymm4, 5184(%rax) vmovdqa %ymm5, 5280(%rax) vpaddw %ymm4, %ymm5, %ymm14 vmovdqa %ymm14, 5376(%rax) vmovdqa %ymm6, 5472(%rax) vmovdqa %ymm7, 5568(%rax) vpaddw %ymm6, %ymm7, %ymm14 vmovdqa %ymm14, 5664(%rax) vpaddw %ymm4, %ymm6, %ymm14 vmovdqa %ymm14, 5760(%rax) vpaddw %ymm5, %ymm7, %ymm15 vmovdqa %ymm15, 5856(%rax) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 5952(%rax) vmovdqa %ymm0, 0(%r8) vmovdqa %ymm1, 32(%r8) vmovdqa %ymm2, 64(%r8) vmovdqa %ymm12, 96(%r8) vmovdqa %ymm8, 128(%r8) vmovdqa %ymm9, 160(%r8) vmovdqa %ymm10, 192(%r8) vmovdqa %ymm11, 224(%r8) vmovdqu 704(%rsi), %ymm0 vpaddw 0(%r8), %ymm0, %ymm1 vpaddw 128(%r8), %ymm4, %ymm2 vpaddw %ymm2, %ymm1, %ymm8 vpsubw %ymm2, %ymm1, %ymm12 vmovdqa %ymm0, 256(%r8) vmovdqu 792(%rsi), %ymm0 vpaddw 32(%r8), %ymm0, %ymm1 vpaddw 160(%r8), %ymm5, %ymm2 vpaddw %ymm2, %ymm1, %ymm9 vpsubw %ymm2, %ymm1, %ymm13 vmovdqa %ymm0, 288(%r8) vmovdqu 880(%rsi), %ymm0 vpaddw 64(%r8), %ymm0, %ymm1 vpaddw 192(%r8), %ymm6, %ymm2 vpaddw %ymm2, %ymm1, %ymm10 vpsubw %ymm2, %ymm1, %ymm14 vmovdqa %ymm0, 320(%r8) vmovdqu 968(%rsi), %ymm0 vpaddw 96(%r8), %ymm0, %ymm1 vpaddw 224(%r8), %ymm7, %ymm2 vpaddw %ymm2, %ymm1, %ymm11 vpsubw %ymm2, %ymm1, %ymm15 vmovdqa %ymm0, 352(%r8) vmovdqa %ymm8, 864(%rax) vmovdqa %ymm9, 960(%rax) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 1056(%rax) vmovdqa %ymm10, 1152(%rax) vmovdqa %ymm11, 1248(%rax) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 1344(%rax) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 1440(%rax) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 1536(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 1632(%rax) vmovdqa %ymm12, 1728(%rax) vmovdqa %ymm13, 1824(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 1920(%rax) vmovdqa %ymm14, 2016(%rax) vmovdqa %ymm15, 2112(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 2208(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 2304(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 2400(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 2496(%rax) vmovdqa 256(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm0 vpsllw $2, %ymm4, %ymm1 vpaddw 128(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm8 vpsubw %ymm1, %ymm0, %ymm12 vmovdqa 288(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm0 vpsllw $2, %ymm5, %ymm1 vpaddw 160(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm9 vpsubw %ymm1, %ymm0, %ymm13 vmovdqa 320(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm0 vpsllw $2, %ymm6, %ymm1 vpaddw 192(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm10 vpsubw %ymm1, %ymm0, %ymm14 vmovdqa 352(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm0 vpsllw $2, %ymm7, %ymm1 vpaddw 224(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm11 vpsubw %ymm1, %ymm0, %ymm15 vmovdqa %ymm8, 2592(%rax) vmovdqa %ymm9, 2688(%rax) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 2784(%rax) vmovdqa %ymm10, 2880(%rax) vmovdqa %ymm11, 2976(%rax) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 3072(%rax) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 3168(%rax) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 3264(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 3360(%rax) vmovdqa %ymm12, 3456(%rax) vmovdqa %ymm13, 3552(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 3648(%rax) vmovdqa %ymm14, 3744(%rax) vmovdqa %ymm15, 3840(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 3936(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4032(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4128(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 4224(%rax) vpmullw %ymm3, %ymm4, %ymm0 vpaddw 256(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 128(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm12 vpmullw %ymm3, %ymm5, %ymm0 vpaddw 288(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 160(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm13 vpmullw %ymm3, %ymm6, %ymm0 vpaddw 320(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 192(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm14 vpmullw %ymm3, %ymm7, %ymm0 vpaddw 352(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 224(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm15 vmovdqa %ymm12, 4320(%rax) vmovdqa %ymm13, 4416(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 4512(%rax) vmovdqa %ymm14, 4608(%rax) vmovdqa %ymm15, 4704(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4800(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4896(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4992(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 5088(%rax) vmovdqu 32(%rsi), %ymm0 vmovdqu 120(%rsi), %ymm1 vmovdqu 208(%rsi), %ymm2 vmovdqu 296(%rsi), %ymm12 vmovdqu 1088(%rsi), %ymm4 vmovdqu 1176(%rsi), %ymm5 vmovdqu 1264(%rsi), %ymm6 vmovdqu 1352(%rsi), %ymm7 vmovdqu 384(%rsi), %ymm8 vmovdqu 472(%rsi), %ymm9 vmovdqu 560(%rsi), %ymm10 vmovdqu 648(%rsi), %ymm11 vmovdqa %ymm0, 32(%rax) vmovdqa %ymm1, 128(%rax) vpaddw %ymm0, %ymm1, %ymm14 vmovdqa %ymm14, 224(%rax) vmovdqa %ymm2, 320(%rax) vmovdqa %ymm12, 416(%rax) vpaddw %ymm2, %ymm12, %ymm14 vmovdqa %ymm14, 512(%rax) vpaddw %ymm0, %ymm2, %ymm14 vmovdqa %ymm14, 608(%rax) vpaddw %ymm1, %ymm12, %ymm15 vmovdqa %ymm15, 704(%rax) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 800(%rax) vmovdqa %ymm4, 5216(%rax) vmovdqa %ymm5, 5312(%rax) vpaddw %ymm4, %ymm5, %ymm14 vmovdqa %ymm14, 5408(%rax) vmovdqa %ymm6, 5504(%rax) vmovdqa %ymm7, 5600(%rax) vpaddw %ymm6, %ymm7, %ymm14 vmovdqa %ymm14, 5696(%rax) vpaddw %ymm4, %ymm6, %ymm14 vmovdqa %ymm14, 5792(%rax) vpaddw %ymm5, %ymm7, %ymm15 vmovdqa %ymm15, 5888(%rax) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 5984(%rax) vmovdqa %ymm0, 0(%r8) vmovdqa %ymm1, 32(%r8) vmovdqa %ymm2, 64(%r8) vmovdqa %ymm12, 96(%r8) vmovdqa %ymm8, 128(%r8) vmovdqa %ymm9, 160(%r8) vmovdqa %ymm10, 192(%r8) vmovdqa %ymm11, 224(%r8) vmovdqu 736(%rsi), %ymm0 vpaddw 0(%r8), %ymm0, %ymm1 vpaddw 128(%r8), %ymm4, %ymm2 vpaddw %ymm2, %ymm1, %ymm8 vpsubw %ymm2, %ymm1, %ymm12 vmovdqa %ymm0, 256(%r8) vmovdqu 824(%rsi), %ymm0 vpaddw 32(%r8), %ymm0, %ymm1 vpaddw 160(%r8), %ymm5, %ymm2 vpaddw %ymm2, %ymm1, %ymm9 vpsubw %ymm2, %ymm1, %ymm13 vmovdqa %ymm0, 288(%r8) vmovdqu 912(%rsi), %ymm0 vpaddw 64(%r8), %ymm0, %ymm1 vpaddw 192(%r8), %ymm6, %ymm2 vpaddw %ymm2, %ymm1, %ymm10 vpsubw %ymm2, %ymm1, %ymm14 vmovdqa %ymm0, 320(%r8) vmovdqu 1000(%rsi), %ymm0 vpaddw 96(%r8), %ymm0, %ymm1 vpaddw 224(%r8), %ymm7, %ymm2 vpaddw %ymm2, %ymm1, %ymm11 vpsubw %ymm2, %ymm1, %ymm15 vmovdqa %ymm0, 352(%r8) vmovdqa %ymm8, 896(%rax) vmovdqa %ymm9, 992(%rax) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 1088(%rax) vmovdqa %ymm10, 1184(%rax) vmovdqa %ymm11, 1280(%rax) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 1376(%rax) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 1472(%rax) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 1568(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 1664(%rax) vmovdqa %ymm12, 1760(%rax) vmovdqa %ymm13, 1856(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 1952(%rax) vmovdqa %ymm14, 2048(%rax) vmovdqa %ymm15, 2144(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 2240(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 2336(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 2432(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 2528(%rax) vmovdqa 256(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm0 vpsllw $2, %ymm4, %ymm1 vpaddw 128(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm8 vpsubw %ymm1, %ymm0, %ymm12 vmovdqa 288(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm0 vpsllw $2, %ymm5, %ymm1 vpaddw 160(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm9 vpsubw %ymm1, %ymm0, %ymm13 vmovdqa 320(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm0 vpsllw $2, %ymm6, %ymm1 vpaddw 192(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm10 vpsubw %ymm1, %ymm0, %ymm14 vmovdqa 352(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm0 vpsllw $2, %ymm7, %ymm1 vpaddw 224(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm11 vpsubw %ymm1, %ymm0, %ymm15 vmovdqa %ymm8, 2624(%rax) vmovdqa %ymm9, 2720(%rax) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 2816(%rax) vmovdqa %ymm10, 2912(%rax) vmovdqa %ymm11, 3008(%rax) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 3104(%rax) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 3200(%rax) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 3296(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 3392(%rax) vmovdqa %ymm12, 3488(%rax) vmovdqa %ymm13, 3584(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 3680(%rax) vmovdqa %ymm14, 3776(%rax) vmovdqa %ymm15, 3872(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 3968(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4064(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4160(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 4256(%rax) vpmullw %ymm3, %ymm4, %ymm0 vpaddw 256(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 128(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm12 vpmullw %ymm3, %ymm5, %ymm0 vpaddw 288(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 160(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm13 vpmullw %ymm3, %ymm6, %ymm0 vpaddw 320(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 192(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm14 vpmullw %ymm3, %ymm7, %ymm0 vpaddw 352(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 224(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm15 vmovdqa %ymm12, 4352(%rax) vmovdqa %ymm13, 4448(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 4544(%rax) vmovdqa %ymm14, 4640(%rax) vmovdqa %ymm15, 4736(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4832(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4928(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 5024(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 5120(%rax) vmovdqu 64(%rsi), %ymm0 vmovdqu 152(%rsi), %ymm1 vmovdqu 240(%rsi), %ymm2 vmovdqu 328(%rsi), %ymm12 vmovdqu 1120(%rsi), %ymm4 vmovdqu 1208(%rsi), %ymm5 vmovdqu 1296(%rsi), %ymm6 # Only 18 bytes more can be read, but vmovdqu reads 32. # Copy 18 bytes to the red zone and zero pad to 32 bytes. xor %r9, %r9 movq %r9, -16(%rsp) movq %r9, -8(%rsp) movq 1384(%rsi), %r9 movq %r9, -32(%rsp) movq 1384+8(%rsi), %r9 movq %r9, -24(%rsp) movw 1384+16(%rsi), %r9w movw %r9w, -16(%rsp) vmovdqu -32(%rsp), %ymm7 vmovdqu 416(%rsi), %ymm8 vmovdqu 504(%rsi), %ymm9 vmovdqu 592(%rsi), %ymm10 vmovdqu 680(%rsi), %ymm11 vmovdqa %ymm0, 64(%rax) vmovdqa %ymm1, 160(%rax) vpaddw %ymm0, %ymm1, %ymm14 vmovdqa %ymm14, 256(%rax) vmovdqa %ymm2, 352(%rax) vmovdqa %ymm12, 448(%rax) vpaddw %ymm2, %ymm12, %ymm14 vmovdqa %ymm14, 544(%rax) vpaddw %ymm0, %ymm2, %ymm14 vmovdqa %ymm14, 640(%rax) vpaddw %ymm1, %ymm12, %ymm15 vmovdqa %ymm15, 736(%rax) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 832(%rax) vmovdqa %ymm4, 5248(%rax) vmovdqa %ymm5, 5344(%rax) vpaddw %ymm4, %ymm5, %ymm14 vmovdqa %ymm14, 5440(%rax) vmovdqa %ymm6, 5536(%rax) vmovdqa %ymm7, 5632(%rax) vpaddw %ymm6, %ymm7, %ymm14 vmovdqa %ymm14, 5728(%rax) vpaddw %ymm4, %ymm6, %ymm14 vmovdqa %ymm14, 5824(%rax) vpaddw %ymm5, %ymm7, %ymm15 vmovdqa %ymm15, 5920(%rax) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 6016(%rax) vmovdqa %ymm0, 0(%r8) vmovdqa %ymm1, 32(%r8) vmovdqa %ymm2, 64(%r8) vmovdqa %ymm12, 96(%r8) vmovdqa %ymm8, 128(%r8) vmovdqa %ymm9, 160(%r8) vmovdqa %ymm10, 192(%r8) vmovdqa %ymm11, 224(%r8) vmovdqu 768(%rsi), %ymm0 vpaddw 0(%r8), %ymm0, %ymm1 vpaddw 128(%r8), %ymm4, %ymm2 vpaddw %ymm2, %ymm1, %ymm8 vpsubw %ymm2, %ymm1, %ymm12 vmovdqa %ymm0, 256(%r8) vmovdqu 856(%rsi), %ymm0 vpaddw 32(%r8), %ymm0, %ymm1 vpaddw 160(%r8), %ymm5, %ymm2 vpaddw %ymm2, %ymm1, %ymm9 vpsubw %ymm2, %ymm1, %ymm13 vmovdqa %ymm0, 288(%r8) vmovdqu 944(%rsi), %ymm0 vpaddw 64(%r8), %ymm0, %ymm1 vpaddw 192(%r8), %ymm6, %ymm2 vpaddw %ymm2, %ymm1, %ymm10 vpsubw %ymm2, %ymm1, %ymm14 vmovdqa %ymm0, 320(%r8) vmovdqu 1032(%rsi), %ymm0 vpaddw 96(%r8), %ymm0, %ymm1 vpaddw 224(%r8), %ymm7, %ymm2 vpaddw %ymm2, %ymm1, %ymm11 vpsubw %ymm2, %ymm1, %ymm15 vmovdqa %ymm0, 352(%r8) vmovdqa %ymm8, 928(%rax) vmovdqa %ymm9, 1024(%rax) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 1120(%rax) vmovdqa %ymm10, 1216(%rax) vmovdqa %ymm11, 1312(%rax) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 1408(%rax) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 1504(%rax) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 1600(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 1696(%rax) vmovdqa %ymm12, 1792(%rax) vmovdqa %ymm13, 1888(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 1984(%rax) vmovdqa %ymm14, 2080(%rax) vmovdqa %ymm15, 2176(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 2272(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 2368(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 2464(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 2560(%rax) vmovdqa 256(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm0 vpsllw $2, %ymm4, %ymm1 vpaddw 128(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm8 vpsubw %ymm1, %ymm0, %ymm12 vmovdqa 288(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm0 vpsllw $2, %ymm5, %ymm1 vpaddw 160(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm9 vpsubw %ymm1, %ymm0, %ymm13 vmovdqa 320(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm0 vpsllw $2, %ymm6, %ymm1 vpaddw 192(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm10 vpsubw %ymm1, %ymm0, %ymm14 vmovdqa 352(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm0 vpsllw $2, %ymm7, %ymm1 vpaddw 224(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm11 vpsubw %ymm1, %ymm0, %ymm15 vmovdqa %ymm8, 2656(%rax) vmovdqa %ymm9, 2752(%rax) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 2848(%rax) vmovdqa %ymm10, 2944(%rax) vmovdqa %ymm11, 3040(%rax) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 3136(%rax) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 3232(%rax) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 3328(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 3424(%rax) vmovdqa %ymm12, 3520(%rax) vmovdqa %ymm13, 3616(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 3712(%rax) vmovdqa %ymm14, 3808(%rax) vmovdqa %ymm15, 3904(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4000(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4096(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4192(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 4288(%rax) vpmullw %ymm3, %ymm4, %ymm0 vpaddw 256(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 128(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm12 vpmullw %ymm3, %ymm5, %ymm0 vpaddw 288(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 160(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm13 vpmullw %ymm3, %ymm6, %ymm0 vpaddw 320(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 192(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm14 vpmullw %ymm3, %ymm7, %ymm0 vpaddw 352(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 224(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm15 vmovdqa %ymm12, 4384(%rax) vmovdqa %ymm13, 4480(%rax) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 4576(%rax) vmovdqa %ymm14, 4672(%rax) vmovdqa %ymm15, 4768(%rax) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4864(%rax) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4960(%rax) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 5056(%rax) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 5152(%rax) vmovdqu 0(%rdx), %ymm0 vmovdqu 88(%rdx), %ymm1 vmovdqu 176(%rdx), %ymm2 vmovdqu 264(%rdx), %ymm12 vmovdqu 1056(%rdx), %ymm4 vmovdqu 1144(%rdx), %ymm5 vmovdqu 1232(%rdx), %ymm6 vmovdqu 1320(%rdx), %ymm7 vmovdqu 352(%rdx), %ymm8 vmovdqu 440(%rdx), %ymm9 vmovdqu 528(%rdx), %ymm10 vmovdqu 616(%rdx), %ymm11 vmovdqa %ymm0, 0(%r11) vmovdqa %ymm1, 96(%r11) vpaddw %ymm0, %ymm1, %ymm14 vmovdqa %ymm14, 192(%r11) vmovdqa %ymm2, 288(%r11) vmovdqa %ymm12, 384(%r11) vpaddw %ymm2, %ymm12, %ymm14 vmovdqa %ymm14, 480(%r11) vpaddw %ymm0, %ymm2, %ymm14 vmovdqa %ymm14, 576(%r11) vpaddw %ymm1, %ymm12, %ymm15 vmovdqa %ymm15, 672(%r11) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 768(%r11) vmovdqa %ymm4, 5184(%r11) vmovdqa %ymm5, 5280(%r11) vpaddw %ymm4, %ymm5, %ymm14 vmovdqa %ymm14, 5376(%r11) vmovdqa %ymm6, 5472(%r11) vmovdqa %ymm7, 5568(%r11) vpaddw %ymm6, %ymm7, %ymm14 vmovdqa %ymm14, 5664(%r11) vpaddw %ymm4, %ymm6, %ymm14 vmovdqa %ymm14, 5760(%r11) vpaddw %ymm5, %ymm7, %ymm15 vmovdqa %ymm15, 5856(%r11) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 5952(%r11) vmovdqa %ymm0, 0(%r8) vmovdqa %ymm1, 32(%r8) vmovdqa %ymm2, 64(%r8) vmovdqa %ymm12, 96(%r8) vmovdqa %ymm8, 128(%r8) vmovdqa %ymm9, 160(%r8) vmovdqa %ymm10, 192(%r8) vmovdqa %ymm11, 224(%r8) vmovdqu 704(%rdx), %ymm0 vpaddw 0(%r8), %ymm0, %ymm1 vpaddw 128(%r8), %ymm4, %ymm2 vpaddw %ymm2, %ymm1, %ymm8 vpsubw %ymm2, %ymm1, %ymm12 vmovdqa %ymm0, 256(%r8) vmovdqu 792(%rdx), %ymm0 vpaddw 32(%r8), %ymm0, %ymm1 vpaddw 160(%r8), %ymm5, %ymm2 vpaddw %ymm2, %ymm1, %ymm9 vpsubw %ymm2, %ymm1, %ymm13 vmovdqa %ymm0, 288(%r8) vmovdqu 880(%rdx), %ymm0 vpaddw 64(%r8), %ymm0, %ymm1 vpaddw 192(%r8), %ymm6, %ymm2 vpaddw %ymm2, %ymm1, %ymm10 vpsubw %ymm2, %ymm1, %ymm14 vmovdqa %ymm0, 320(%r8) vmovdqu 968(%rdx), %ymm0 vpaddw 96(%r8), %ymm0, %ymm1 vpaddw 224(%r8), %ymm7, %ymm2 vpaddw %ymm2, %ymm1, %ymm11 vpsubw %ymm2, %ymm1, %ymm15 vmovdqa %ymm0, 352(%r8) vmovdqa %ymm8, 864(%r11) vmovdqa %ymm9, 960(%r11) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 1056(%r11) vmovdqa %ymm10, 1152(%r11) vmovdqa %ymm11, 1248(%r11) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 1344(%r11) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 1440(%r11) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 1536(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 1632(%r11) vmovdqa %ymm12, 1728(%r11) vmovdqa %ymm13, 1824(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 1920(%r11) vmovdqa %ymm14, 2016(%r11) vmovdqa %ymm15, 2112(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 2208(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 2304(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 2400(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 2496(%r11) vmovdqa 256(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm0 vpsllw $2, %ymm4, %ymm1 vpaddw 128(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm8 vpsubw %ymm1, %ymm0, %ymm12 vmovdqa 288(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm0 vpsllw $2, %ymm5, %ymm1 vpaddw 160(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm9 vpsubw %ymm1, %ymm0, %ymm13 vmovdqa 320(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm0 vpsllw $2, %ymm6, %ymm1 vpaddw 192(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm10 vpsubw %ymm1, %ymm0, %ymm14 vmovdqa 352(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm0 vpsllw $2, %ymm7, %ymm1 vpaddw 224(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm11 vpsubw %ymm1, %ymm0, %ymm15 vmovdqa %ymm8, 2592(%r11) vmovdqa %ymm9, 2688(%r11) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 2784(%r11) vmovdqa %ymm10, 2880(%r11) vmovdqa %ymm11, 2976(%r11) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 3072(%r11) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 3168(%r11) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 3264(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 3360(%r11) vmovdqa %ymm12, 3456(%r11) vmovdqa %ymm13, 3552(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 3648(%r11) vmovdqa %ymm14, 3744(%r11) vmovdqa %ymm15, 3840(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 3936(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4032(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4128(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 4224(%r11) vpmullw %ymm3, %ymm4, %ymm0 vpaddw 256(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 128(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm12 vpmullw %ymm3, %ymm5, %ymm0 vpaddw 288(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 160(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm13 vpmullw %ymm3, %ymm6, %ymm0 vpaddw 320(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 192(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm14 vpmullw %ymm3, %ymm7, %ymm0 vpaddw 352(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 224(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm15 vmovdqa %ymm12, 4320(%r11) vmovdqa %ymm13, 4416(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 4512(%r11) vmovdqa %ymm14, 4608(%r11) vmovdqa %ymm15, 4704(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4800(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4896(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4992(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 5088(%r11) vmovdqu 32(%rdx), %ymm0 vmovdqu 120(%rdx), %ymm1 vmovdqu 208(%rdx), %ymm2 vmovdqu 296(%rdx), %ymm12 vmovdqu 1088(%rdx), %ymm4 vmovdqu 1176(%rdx), %ymm5 vmovdqu 1264(%rdx), %ymm6 vmovdqu 1352(%rdx), %ymm7 vmovdqu 384(%rdx), %ymm8 vmovdqu 472(%rdx), %ymm9 vmovdqu 560(%rdx), %ymm10 vmovdqu 648(%rdx), %ymm11 vmovdqa %ymm0, 32(%r11) vmovdqa %ymm1, 128(%r11) vpaddw %ymm0, %ymm1, %ymm14 vmovdqa %ymm14, 224(%r11) vmovdqa %ymm2, 320(%r11) vmovdqa %ymm12, 416(%r11) vpaddw %ymm2, %ymm12, %ymm14 vmovdqa %ymm14, 512(%r11) vpaddw %ymm0, %ymm2, %ymm14 vmovdqa %ymm14, 608(%r11) vpaddw %ymm1, %ymm12, %ymm15 vmovdqa %ymm15, 704(%r11) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 800(%r11) vmovdqa %ymm4, 5216(%r11) vmovdqa %ymm5, 5312(%r11) vpaddw %ymm4, %ymm5, %ymm14 vmovdqa %ymm14, 5408(%r11) vmovdqa %ymm6, 5504(%r11) vmovdqa %ymm7, 5600(%r11) vpaddw %ymm6, %ymm7, %ymm14 vmovdqa %ymm14, 5696(%r11) vpaddw %ymm4, %ymm6, %ymm14 vmovdqa %ymm14, 5792(%r11) vpaddw %ymm5, %ymm7, %ymm15 vmovdqa %ymm15, 5888(%r11) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 5984(%r11) vmovdqa %ymm0, 0(%r8) vmovdqa %ymm1, 32(%r8) vmovdqa %ymm2, 64(%r8) vmovdqa %ymm12, 96(%r8) vmovdqa %ymm8, 128(%r8) vmovdqa %ymm9, 160(%r8) vmovdqa %ymm10, 192(%r8) vmovdqa %ymm11, 224(%r8) vmovdqu 736(%rdx), %ymm0 vpaddw 0(%r8), %ymm0, %ymm1 vpaddw 128(%r8), %ymm4, %ymm2 vpaddw %ymm2, %ymm1, %ymm8 vpsubw %ymm2, %ymm1, %ymm12 vmovdqa %ymm0, 256(%r8) vmovdqu 824(%rdx), %ymm0 vpaddw 32(%r8), %ymm0, %ymm1 vpaddw 160(%r8), %ymm5, %ymm2 vpaddw %ymm2, %ymm1, %ymm9 vpsubw %ymm2, %ymm1, %ymm13 vmovdqa %ymm0, 288(%r8) vmovdqu 912(%rdx), %ymm0 vpaddw 64(%r8), %ymm0, %ymm1 vpaddw 192(%r8), %ymm6, %ymm2 vpaddw %ymm2, %ymm1, %ymm10 vpsubw %ymm2, %ymm1, %ymm14 vmovdqa %ymm0, 320(%r8) vmovdqu 1000(%rdx), %ymm0 vpaddw 96(%r8), %ymm0, %ymm1 vpaddw 224(%r8), %ymm7, %ymm2 vpaddw %ymm2, %ymm1, %ymm11 vpsubw %ymm2, %ymm1, %ymm15 vmovdqa %ymm0, 352(%r8) vmovdqa %ymm8, 896(%r11) vmovdqa %ymm9, 992(%r11) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 1088(%r11) vmovdqa %ymm10, 1184(%r11) vmovdqa %ymm11, 1280(%r11) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 1376(%r11) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 1472(%r11) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 1568(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 1664(%r11) vmovdqa %ymm12, 1760(%r11) vmovdqa %ymm13, 1856(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 1952(%r11) vmovdqa %ymm14, 2048(%r11) vmovdqa %ymm15, 2144(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 2240(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 2336(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 2432(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 2528(%r11) vmovdqa 256(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm0 vpsllw $2, %ymm4, %ymm1 vpaddw 128(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm8 vpsubw %ymm1, %ymm0, %ymm12 vmovdqa 288(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm0 vpsllw $2, %ymm5, %ymm1 vpaddw 160(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm9 vpsubw %ymm1, %ymm0, %ymm13 vmovdqa 320(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm0 vpsllw $2, %ymm6, %ymm1 vpaddw 192(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm10 vpsubw %ymm1, %ymm0, %ymm14 vmovdqa 352(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm0 vpsllw $2, %ymm7, %ymm1 vpaddw 224(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm11 vpsubw %ymm1, %ymm0, %ymm15 vmovdqa %ymm8, 2624(%r11) vmovdqa %ymm9, 2720(%r11) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 2816(%r11) vmovdqa %ymm10, 2912(%r11) vmovdqa %ymm11, 3008(%r11) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 3104(%r11) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 3200(%r11) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 3296(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 3392(%r11) vmovdqa %ymm12, 3488(%r11) vmovdqa %ymm13, 3584(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 3680(%r11) vmovdqa %ymm14, 3776(%r11) vmovdqa %ymm15, 3872(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 3968(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4064(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4160(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 4256(%r11) vpmullw %ymm3, %ymm4, %ymm0 vpaddw 256(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 128(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm12 vpmullw %ymm3, %ymm5, %ymm0 vpaddw 288(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 160(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm13 vpmullw %ymm3, %ymm6, %ymm0 vpaddw 320(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 192(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm14 vpmullw %ymm3, %ymm7, %ymm0 vpaddw 352(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 224(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm15 vmovdqa %ymm12, 4352(%r11) vmovdqa %ymm13, 4448(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 4544(%r11) vmovdqa %ymm14, 4640(%r11) vmovdqa %ymm15, 4736(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4832(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4928(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 5024(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 5120(%r11) vmovdqu 64(%rdx), %ymm0 vmovdqu 152(%rdx), %ymm1 vmovdqu 240(%rdx), %ymm2 vmovdqu 328(%rdx), %ymm12 vmovdqu 1120(%rdx), %ymm4 vmovdqu 1208(%rdx), %ymm5 vmovdqu 1296(%rdx), %ymm6 # Only 18 bytes more can be read, but vmovdqu reads 32. # Copy 18 bytes to the red zone and zero pad to 32 bytes. xor %r9, %r9 movq %r9, -16(%rsp) movq %r9, -8(%rsp) movq 1384(%rdx), %r9 movq %r9, -32(%rsp) movq 1384+8(%rdx), %r9 movq %r9, -24(%rsp) movw 1384+16(%rdx), %r9w movw %r9w, -16(%rsp) vmovdqu -32(%rsp), %ymm7 vmovdqu 416(%rdx), %ymm8 vmovdqu 504(%rdx), %ymm9 vmovdqu 592(%rdx), %ymm10 vmovdqu 680(%rdx), %ymm11 vmovdqa %ymm0, 64(%r11) vmovdqa %ymm1, 160(%r11) vpaddw %ymm0, %ymm1, %ymm14 vmovdqa %ymm14, 256(%r11) vmovdqa %ymm2, 352(%r11) vmovdqa %ymm12, 448(%r11) vpaddw %ymm2, %ymm12, %ymm14 vmovdqa %ymm14, 544(%r11) vpaddw %ymm0, %ymm2, %ymm14 vmovdqa %ymm14, 640(%r11) vpaddw %ymm1, %ymm12, %ymm15 vmovdqa %ymm15, 736(%r11) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 832(%r11) vmovdqa %ymm4, 5248(%r11) vmovdqa %ymm5, 5344(%r11) vpaddw %ymm4, %ymm5, %ymm14 vmovdqa %ymm14, 5440(%r11) vmovdqa %ymm6, 5536(%r11) vmovdqa %ymm7, 5632(%r11) vpaddw %ymm6, %ymm7, %ymm14 vmovdqa %ymm14, 5728(%r11) vpaddw %ymm4, %ymm6, %ymm14 vmovdqa %ymm14, 5824(%r11) vpaddw %ymm5, %ymm7, %ymm15 vmovdqa %ymm15, 5920(%r11) vpaddw %ymm14, %ymm15, %ymm14 vmovdqa %ymm14, 6016(%r11) vmovdqa %ymm0, 0(%r8) vmovdqa %ymm1, 32(%r8) vmovdqa %ymm2, 64(%r8) vmovdqa %ymm12, 96(%r8) vmovdqa %ymm8, 128(%r8) vmovdqa %ymm9, 160(%r8) vmovdqa %ymm10, 192(%r8) vmovdqa %ymm11, 224(%r8) vmovdqu 768(%rdx), %ymm0 vpaddw 0(%r8), %ymm0, %ymm1 vpaddw 128(%r8), %ymm4, %ymm2 vpaddw %ymm2, %ymm1, %ymm8 vpsubw %ymm2, %ymm1, %ymm12 vmovdqa %ymm0, 256(%r8) vmovdqu 856(%rdx), %ymm0 vpaddw 32(%r8), %ymm0, %ymm1 vpaddw 160(%r8), %ymm5, %ymm2 vpaddw %ymm2, %ymm1, %ymm9 vpsubw %ymm2, %ymm1, %ymm13 vmovdqa %ymm0, 288(%r8) vmovdqu 944(%rdx), %ymm0 vpaddw 64(%r8), %ymm0, %ymm1 vpaddw 192(%r8), %ymm6, %ymm2 vpaddw %ymm2, %ymm1, %ymm10 vpsubw %ymm2, %ymm1, %ymm14 vmovdqa %ymm0, 320(%r8) vmovdqu 1032(%rdx), %ymm0 vpaddw 96(%r8), %ymm0, %ymm1 vpaddw 224(%r8), %ymm7, %ymm2 vpaddw %ymm2, %ymm1, %ymm11 vpsubw %ymm2, %ymm1, %ymm15 vmovdqa %ymm0, 352(%r8) vmovdqa %ymm8, 928(%r11) vmovdqa %ymm9, 1024(%r11) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 1120(%r11) vmovdqa %ymm10, 1216(%r11) vmovdqa %ymm11, 1312(%r11) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 1408(%r11) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 1504(%r11) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 1600(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 1696(%r11) vmovdqa %ymm12, 1792(%r11) vmovdqa %ymm13, 1888(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 1984(%r11) vmovdqa %ymm14, 2080(%r11) vmovdqa %ymm15, 2176(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 2272(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 2368(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 2464(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 2560(%r11) vmovdqa 256(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm0 vpsllw $2, %ymm4, %ymm1 vpaddw 128(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm8 vpsubw %ymm1, %ymm0, %ymm12 vmovdqa 288(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm0 vpsllw $2, %ymm5, %ymm1 vpaddw 160(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm9 vpsubw %ymm1, %ymm0, %ymm13 vmovdqa 320(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm0 vpsllw $2, %ymm6, %ymm1 vpaddw 192(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm10 vpsubw %ymm1, %ymm0, %ymm14 vmovdqa 352(%r8), %ymm0 vpsllw $2, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm0 vpsllw $2, %ymm7, %ymm1 vpaddw 224(%r8), %ymm1, %ymm1 vpsllw $1, %ymm1, %ymm1 vpaddw %ymm1, %ymm0, %ymm11 vpsubw %ymm1, %ymm0, %ymm15 vmovdqa %ymm8, 2656(%r11) vmovdqa %ymm9, 2752(%r11) vpaddw %ymm8, %ymm9, %ymm0 vmovdqa %ymm0, 2848(%r11) vmovdqa %ymm10, 2944(%r11) vmovdqa %ymm11, 3040(%r11) vpaddw %ymm10, %ymm11, %ymm0 vmovdqa %ymm0, 3136(%r11) vpaddw %ymm8, %ymm10, %ymm0 vmovdqa %ymm0, 3232(%r11) vpaddw %ymm9, %ymm11, %ymm1 vmovdqa %ymm1, 3328(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 3424(%r11) vmovdqa %ymm12, 3520(%r11) vmovdqa %ymm13, 3616(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 3712(%r11) vmovdqa %ymm14, 3808(%r11) vmovdqa %ymm15, 3904(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4000(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4096(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 4192(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 4288(%r11) vpmullw %ymm3, %ymm4, %ymm0 vpaddw 256(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 128(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 0(%r8), %ymm0, %ymm12 vpmullw %ymm3, %ymm5, %ymm0 vpaddw 288(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 160(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 32(%r8), %ymm0, %ymm13 vpmullw %ymm3, %ymm6, %ymm0 vpaddw 320(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 192(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 64(%r8), %ymm0, %ymm14 vpmullw %ymm3, %ymm7, %ymm0 vpaddw 352(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 224(%r8), %ymm0, %ymm0 vpmullw %ymm3, %ymm0, %ymm0 vpaddw 96(%r8), %ymm0, %ymm15 vmovdqa %ymm12, 4384(%r11) vmovdqa %ymm13, 4480(%r11) vpaddw %ymm12, %ymm13, %ymm0 vmovdqa %ymm0, 4576(%r11) vmovdqa %ymm14, 4672(%r11) vmovdqa %ymm15, 4768(%r11) vpaddw %ymm14, %ymm15, %ymm0 vmovdqa %ymm0, 4864(%r11) vpaddw %ymm12, %ymm14, %ymm0 vmovdqa %ymm0, 4960(%r11) vpaddw %ymm13, %ymm15, %ymm1 vmovdqa %ymm1, 5056(%r11) vpaddw %ymm0, %ymm1, %ymm0 vmovdqa %ymm0, 5152(%r11) subq $9408, %r8 mov $4, %ecx karatsuba_loop_4eced63f144beffcb0247f9c6f67d165: mov %r8, %r9 mov %r8, %r10 subq $32, %r8 vmovdqa 0(%rax), %ymm0 vmovdqa 192(%rax), %ymm1 vmovdqa 384(%rax), %ymm2 vmovdqa 576(%rax), %ymm3 vpunpcklwd 96(%rax), %ymm0, %ymm4 vpunpckhwd 96(%rax), %ymm0, %ymm5 vpunpcklwd 288(%rax), %ymm1, %ymm6 vpunpckhwd 288(%rax), %ymm1, %ymm7 vpunpcklwd 480(%rax), %ymm2, %ymm8 vpunpckhwd 480(%rax), %ymm2, %ymm9 vpunpcklwd 672(%rax), %ymm3, %ymm10 vpunpckhwd 672(%rax), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 768(%rax), %ymm0 vmovdqa 960(%rax), %ymm1 vmovdqa 1152(%rax), %ymm2 vmovdqa 1344(%rax), %ymm3 vpunpcklwd 864(%rax), %ymm0, %ymm12 vpunpckhwd 864(%rax), %ymm0, %ymm13 vpunpcklwd 1056(%rax), %ymm1, %ymm14 vpunpckhwd 1056(%rax), %ymm1, %ymm15 vpunpcklwd 1248(%rax), %ymm2, %ymm0 vpunpckhwd 1248(%rax), %ymm2, %ymm1 vpunpcklwd 1440(%rax), %ymm3, %ymm2 vpunpckhwd 1440(%rax), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 0(%r9) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 32(%r9) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 64(%r9) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 96(%r9) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 128(%r9) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 160(%r9) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 192(%r9) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 256(%r9) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 288(%r9) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 320(%r9) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 352(%r9) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 384(%r9) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 416(%r9) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 448(%r9) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 224(%r9) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 480(%r9) vmovdqa 32(%rax), %ymm0 vmovdqa 224(%rax), %ymm1 vmovdqa 416(%rax), %ymm2 vmovdqa 608(%rax), %ymm3 vpunpcklwd 128(%rax), %ymm0, %ymm4 vpunpckhwd 128(%rax), %ymm0, %ymm5 vpunpcklwd 320(%rax), %ymm1, %ymm6 vpunpckhwd 320(%rax), %ymm1, %ymm7 vpunpcklwd 512(%rax), %ymm2, %ymm8 vpunpckhwd 512(%rax), %ymm2, %ymm9 vpunpcklwd 704(%rax), %ymm3, %ymm10 vpunpckhwd 704(%rax), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 800(%rax), %ymm0 vmovdqa 992(%rax), %ymm1 vmovdqa 1184(%rax), %ymm2 vmovdqa 1376(%rax), %ymm3 vpunpcklwd 896(%rax), %ymm0, %ymm12 vpunpckhwd 896(%rax), %ymm0, %ymm13 vpunpcklwd 1088(%rax), %ymm1, %ymm14 vpunpckhwd 1088(%rax), %ymm1, %ymm15 vpunpcklwd 1280(%rax), %ymm2, %ymm0 vpunpckhwd 1280(%rax), %ymm2, %ymm1 vpunpcklwd 1472(%rax), %ymm3, %ymm2 vpunpckhwd 1472(%rax), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 512(%r9) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 544(%r9) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 576(%r9) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 608(%r9) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 640(%r9) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 672(%r9) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 704(%r9) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 768(%r9) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 800(%r9) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 832(%r9) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 864(%r9) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 896(%r9) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 928(%r9) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 960(%r9) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 736(%r9) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 992(%r9) vmovdqa 64(%rax), %ymm0 vmovdqa 256(%rax), %ymm1 vmovdqa 448(%rax), %ymm2 vmovdqa 640(%rax), %ymm3 vpunpcklwd 160(%rax), %ymm0, %ymm4 vpunpckhwd 160(%rax), %ymm0, %ymm5 vpunpcklwd 352(%rax), %ymm1, %ymm6 vpunpckhwd 352(%rax), %ymm1, %ymm7 vpunpcklwd 544(%rax), %ymm2, %ymm8 vpunpckhwd 544(%rax), %ymm2, %ymm9 vpunpcklwd 736(%rax), %ymm3, %ymm10 vpunpckhwd 736(%rax), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 832(%rax), %ymm0 vmovdqa 1024(%rax), %ymm1 vmovdqa 1216(%rax), %ymm2 vmovdqa 1408(%rax), %ymm3 vpunpcklwd 928(%rax), %ymm0, %ymm12 vpunpckhwd 928(%rax), %ymm0, %ymm13 vpunpcklwd 1120(%rax), %ymm1, %ymm14 vpunpckhwd 1120(%rax), %ymm1, %ymm15 vpunpcklwd 1312(%rax), %ymm2, %ymm0 vpunpckhwd 1312(%rax), %ymm2, %ymm1 vpunpcklwd 1504(%rax), %ymm3, %ymm2 vpunpckhwd 1504(%rax), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 1024(%r9) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 1056(%r9) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 1088(%r9) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 1120(%r9) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 1152(%r9) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 1184(%r9) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1216(%r9) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1280(%r9) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1312(%r9) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 1344(%r9) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 1376(%r9) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1248(%r9) vmovdqa 0(%r11), %ymm0 vmovdqa 192(%r11), %ymm1 vmovdqa 384(%r11), %ymm2 vmovdqa 576(%r11), %ymm3 vpunpcklwd 96(%r11), %ymm0, %ymm4 vpunpckhwd 96(%r11), %ymm0, %ymm5 vpunpcklwd 288(%r11), %ymm1, %ymm6 vpunpckhwd 288(%r11), %ymm1, %ymm7 vpunpcklwd 480(%r11), %ymm2, %ymm8 vpunpckhwd 480(%r11), %ymm2, %ymm9 vpunpcklwd 672(%r11), %ymm3, %ymm10 vpunpckhwd 672(%r11), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 768(%r11), %ymm0 vmovdqa 960(%r11), %ymm1 vmovdqa 1152(%r11), %ymm2 vmovdqa 1344(%r11), %ymm3 vpunpcklwd 864(%r11), %ymm0, %ymm12 vpunpckhwd 864(%r11), %ymm0, %ymm13 vpunpcklwd 1056(%r11), %ymm1, %ymm14 vpunpckhwd 1056(%r11), %ymm1, %ymm15 vpunpcklwd 1248(%r11), %ymm2, %ymm0 vpunpckhwd 1248(%r11), %ymm2, %ymm1 vpunpcklwd 1440(%r11), %ymm3, %ymm2 vpunpckhwd 1440(%r11), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 1408(%r9) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 1440(%r9) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 1472(%r9) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 1504(%r9) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 1536(%r9) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 1568(%r9) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1600(%r9) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1664(%r9) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1696(%r9) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 1728(%r9) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 1760(%r9) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 1792(%r9) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 1824(%r9) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 1856(%r9) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1632(%r9) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 1888(%r9) vmovdqa 32(%r11), %ymm0 vmovdqa 224(%r11), %ymm1 vmovdqa 416(%r11), %ymm2 vmovdqa 608(%r11), %ymm3 vpunpcklwd 128(%r11), %ymm0, %ymm4 vpunpckhwd 128(%r11), %ymm0, %ymm5 vpunpcklwd 320(%r11), %ymm1, %ymm6 vpunpckhwd 320(%r11), %ymm1, %ymm7 vpunpcklwd 512(%r11), %ymm2, %ymm8 vpunpckhwd 512(%r11), %ymm2, %ymm9 vpunpcklwd 704(%r11), %ymm3, %ymm10 vpunpckhwd 704(%r11), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 800(%r11), %ymm0 vmovdqa 992(%r11), %ymm1 vmovdqa 1184(%r11), %ymm2 vmovdqa 1376(%r11), %ymm3 vpunpcklwd 896(%r11), %ymm0, %ymm12 vpunpckhwd 896(%r11), %ymm0, %ymm13 vpunpcklwd 1088(%r11), %ymm1, %ymm14 vpunpckhwd 1088(%r11), %ymm1, %ymm15 vpunpcklwd 1280(%r11), %ymm2, %ymm0 vpunpckhwd 1280(%r11), %ymm2, %ymm1 vpunpcklwd 1472(%r11), %ymm3, %ymm2 vpunpckhwd 1472(%r11), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 1920(%r9) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 1952(%r9) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 1984(%r9) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 2016(%r9) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 2048(%r9) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 2080(%r9) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 2112(%r9) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 2176(%r9) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 2208(%r9) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 2240(%r9) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2272(%r9) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 2304(%r9) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 2336(%r9) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 2368(%r9) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 2144(%r9) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 2400(%r9) vmovdqa 64(%r11), %ymm0 vmovdqa 256(%r11), %ymm1 vmovdqa 448(%r11), %ymm2 vmovdqa 640(%r11), %ymm3 vpunpcklwd 160(%r11), %ymm0, %ymm4 vpunpckhwd 160(%r11), %ymm0, %ymm5 vpunpcklwd 352(%r11), %ymm1, %ymm6 vpunpckhwd 352(%r11), %ymm1, %ymm7 vpunpcklwd 544(%r11), %ymm2, %ymm8 vpunpckhwd 544(%r11), %ymm2, %ymm9 vpunpcklwd 736(%r11), %ymm3, %ymm10 vpunpckhwd 736(%r11), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 832(%r11), %ymm0 vmovdqa 1024(%r11), %ymm1 vmovdqa 1216(%r11), %ymm2 vmovdqa 1408(%r11), %ymm3 vpunpcklwd 928(%r11), %ymm0, %ymm12 vpunpckhwd 928(%r11), %ymm0, %ymm13 vpunpcklwd 1120(%r11), %ymm1, %ymm14 vpunpckhwd 1120(%r11), %ymm1, %ymm15 vpunpcklwd 1312(%r11), %ymm2, %ymm0 vpunpckhwd 1312(%r11), %ymm2, %ymm1 vpunpcklwd 1504(%r11), %ymm3, %ymm2 vpunpckhwd 1504(%r11), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 2432(%r9) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 2464(%r9) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 2496(%r9) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 2528(%r9) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 2560(%r9) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 2592(%r9) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 2624(%r9) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 2688(%r9) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 2720(%r9) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 2752(%r9) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2784(%r9) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 2656(%r9) addq $32, %r8 innerloop_4eced63f144beffcb0247f9c6f67d165: vmovdqa 0(%r9), %ymm0 vmovdqa 1408(%r9), %ymm6 vmovdqa 32(%r9), %ymm1 vmovdqa 1440(%r9), %ymm7 vmovdqa 64(%r9), %ymm2 vmovdqa 1472(%r9), %ymm8 vmovdqa 96(%r9), %ymm3 vmovdqa 1504(%r9), %ymm9 vmovdqa 128(%r9), %ymm4 vmovdqa 1536(%r9), %ymm10 vmovdqa 160(%r9), %ymm5 vmovdqa 1568(%r9), %ymm11 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 2816(%r10) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 2848(%r10) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 2880(%r10) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 2912(%r10) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 2944(%r10) vpmullw %ymm0, %ymm11, %ymm13 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 2976(%r10) vpmullw %ymm1, %ymm11, %ymm12 vpmullw %ymm2, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3008(%r10) vpmullw %ymm2, %ymm11, %ymm13 vpmullw %ymm3, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3040(%r10) vpmullw %ymm3, %ymm11, %ymm12 vpmullw %ymm4, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3072(%r10) vpmullw %ymm4, %ymm11, %ymm13 vpmullw %ymm5, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3104(%r10) vpmullw %ymm5, %ymm11, %ymm12 vmovdqa %ymm12, 3136(%r10) vmovdqa 192(%r9), %ymm0 vmovdqa 1600(%r9), %ymm6 vmovdqa 224(%r9), %ymm1 vmovdqa 1632(%r9), %ymm7 vmovdqa 256(%r9), %ymm2 vmovdqa 1664(%r9), %ymm8 vmovdqa 288(%r9), %ymm3 vmovdqa 1696(%r9), %ymm9 vmovdqa 320(%r9), %ymm4 vmovdqa 1728(%r9), %ymm10 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 3200(%r10) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3232(%r10) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3264(%r10) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3296(%r10) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3328(%r10) vpmullw %ymm1, %ymm10, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3360(%r10) vpmullw %ymm2, %ymm10, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3392(%r10) vpmullw %ymm3, %ymm10, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3424(%r10) vpmullw %ymm4, %ymm10, %ymm12 vmovdqa %ymm12, 3456(%r10) vpaddw 0(%r9), %ymm0, %ymm0 vpaddw 1408(%r9), %ymm6, %ymm6 vpaddw 32(%r9), %ymm1, %ymm1 vpaddw 1440(%r9), %ymm7, %ymm7 vpaddw 64(%r9), %ymm2, %ymm2 vpaddw 1472(%r9), %ymm8, %ymm8 vpaddw 96(%r9), %ymm3, %ymm3 vpaddw 1504(%r9), %ymm9, %ymm9 vpaddw 128(%r9), %ymm4, %ymm4 vpaddw 1536(%r9), %ymm10, %ymm10 vpmullw %ymm0, %ymm11, %ymm12 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpsubw 2976(%r10), %ymm12, %ymm12 vpsubw 3360(%r10), %ymm12, %ymm12 vmovdqa %ymm12, 3168(%r10) vpmullw %ymm5, %ymm7, %ymm12 vpmullw %ymm5, %ymm8, %ymm13 vpmullw %ymm5, %ymm9, %ymm14 vpmullw %ymm5, %ymm10, %ymm15 vpmullw %ymm1, %ymm11, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm10, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm3, %ymm9, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm4, %ymm8, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm11, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm10, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm4, %ymm9, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm11, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm10, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm11, %ymm5 vpaddw %ymm5, %ymm15, %ymm15 vpmullw %ymm0, %ymm10, %ymm11 vpmullw %ymm1, %ymm9, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm2, %ymm8, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm3, %ymm7, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm4, %ymm6, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm0, %ymm9, %ymm10 vpmullw %ymm1, %ymm8, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm2, %ymm7, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm3, %ymm6, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm0, %ymm8, %ymm9 vpmullw %ymm1, %ymm7, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm2, %ymm6, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm0, %ymm7, %ymm8 vpmullw %ymm1, %ymm6, %ymm5 vpaddw %ymm5, %ymm8, %ymm8 vpmullw %ymm0, %ymm6, %ymm7 vmovdqa 3008(%r10), %ymm0 vpsubw 3200(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm12, %ymm6 vpsubw 3392(%r10), %ymm6, %ymm6 vmovdqa %ymm6, 3200(%r10) vpaddw %ymm7, %ymm0, %ymm0 vpsubw 2816(%r10), %ymm0, %ymm0 vmovdqa %ymm0, 3008(%r10) vmovdqa 3040(%r10), %ymm1 vpsubw 3232(%r10), %ymm1, %ymm1 vpsubw %ymm1, %ymm13, %ymm7 vpsubw 3424(%r10), %ymm7, %ymm7 vmovdqa %ymm7, 3232(%r10) vpaddw %ymm8, %ymm1, %ymm1 vpsubw 2848(%r10), %ymm1, %ymm1 vmovdqa %ymm1, 3040(%r10) vmovdqa 3072(%r10), %ymm2 vpsubw 3264(%r10), %ymm2, %ymm2 vpsubw %ymm2, %ymm14, %ymm8 vpsubw 3456(%r10), %ymm8, %ymm8 vmovdqa %ymm8, 3264(%r10) vpaddw %ymm9, %ymm2, %ymm2 vpsubw 2880(%r10), %ymm2, %ymm2 vmovdqa %ymm2, 3072(%r10) vmovdqa 3104(%r10), %ymm3 vpsubw 3296(%r10), %ymm3, %ymm3 vpsubw %ymm3, %ymm15, %ymm9 vmovdqa %ymm9, 3296(%r10) vpaddw %ymm10, %ymm3, %ymm3 vpsubw 2912(%r10), %ymm3, %ymm3 vmovdqa %ymm3, 3104(%r10) vmovdqa 3136(%r10), %ymm4 vpsubw 3328(%r10), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vpsubw 2944(%r10), %ymm4, %ymm4 vmovdqa %ymm4, 3136(%r10) vmovdqa 352(%r9), %ymm0 vmovdqa 1760(%r9), %ymm6 vmovdqa 384(%r9), %ymm1 vmovdqa 1792(%r9), %ymm7 vmovdqa 416(%r9), %ymm2 vmovdqa 1824(%r9), %ymm8 vmovdqa 448(%r9), %ymm3 vmovdqa 1856(%r9), %ymm9 vmovdqa 480(%r9), %ymm4 vmovdqa 1888(%r9), %ymm10 vmovdqa 512(%r9), %ymm5 vmovdqa 1920(%r9), %ymm11 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 3520(%r10) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3552(%r10) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3584(%r10) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3616(%r10) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3648(%r10) vpmullw %ymm0, %ymm11, %ymm13 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3680(%r10) vpmullw %ymm1, %ymm11, %ymm12 vpmullw %ymm2, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3712(%r10) vpmullw %ymm2, %ymm11, %ymm13 vpmullw %ymm3, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3744(%r10) vpmullw %ymm3, %ymm11, %ymm12 vpmullw %ymm4, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3776(%r10) vpmullw %ymm4, %ymm11, %ymm13 vpmullw %ymm5, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3808(%r10) vpmullw %ymm5, %ymm11, %ymm12 vmovdqa %ymm12, 3840(%r10) vmovdqa 544(%r9), %ymm0 vmovdqa 1952(%r9), %ymm6 vmovdqa 576(%r9), %ymm1 vmovdqa 1984(%r9), %ymm7 vmovdqa 608(%r9), %ymm2 vmovdqa 2016(%r9), %ymm8 vmovdqa 640(%r9), %ymm3 vmovdqa 2048(%r9), %ymm9 vmovdqa 672(%r9), %ymm4 vmovdqa 2080(%r9), %ymm10 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 3904(%r10) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 3936(%r10) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 3968(%r10) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 4000(%r10) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 4032(%r10) vpmullw %ymm1, %ymm10, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 4064(%r10) vpmullw %ymm2, %ymm10, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 4096(%r10) vpmullw %ymm3, %ymm10, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 4128(%r10) vpmullw %ymm4, %ymm10, %ymm12 vmovdqa %ymm12, 4160(%r10) vpaddw 352(%r9), %ymm0, %ymm0 vpaddw 1760(%r9), %ymm6, %ymm6 vpaddw 384(%r9), %ymm1, %ymm1 vpaddw 1792(%r9), %ymm7, %ymm7 vpaddw 416(%r9), %ymm2, %ymm2 vpaddw 1824(%r9), %ymm8, %ymm8 vpaddw 448(%r9), %ymm3, %ymm3 vpaddw 1856(%r9), %ymm9, %ymm9 vpaddw 480(%r9), %ymm4, %ymm4 vpaddw 1888(%r9), %ymm10, %ymm10 vpmullw %ymm0, %ymm11, %ymm12 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpsubw 3680(%r10), %ymm12, %ymm12 vpsubw 4064(%r10), %ymm12, %ymm12 vmovdqa %ymm12, 3872(%r10) vpmullw %ymm5, %ymm7, %ymm12 vpmullw %ymm5, %ymm8, %ymm13 vpmullw %ymm5, %ymm9, %ymm14 vpmullw %ymm5, %ymm10, %ymm15 vpmullw %ymm1, %ymm11, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm10, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm3, %ymm9, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm4, %ymm8, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm11, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm10, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm4, %ymm9, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm11, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm10, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm11, %ymm5 vpaddw %ymm5, %ymm15, %ymm15 vpmullw %ymm0, %ymm10, %ymm11 vpmullw %ymm1, %ymm9, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm2, %ymm8, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm3, %ymm7, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm4, %ymm6, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm0, %ymm9, %ymm10 vpmullw %ymm1, %ymm8, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm2, %ymm7, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm3, %ymm6, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm0, %ymm8, %ymm9 vpmullw %ymm1, %ymm7, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm2, %ymm6, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm0, %ymm7, %ymm8 vpmullw %ymm1, %ymm6, %ymm5 vpaddw %ymm5, %ymm8, %ymm8 vpmullw %ymm0, %ymm6, %ymm7 vmovdqa 3712(%r10), %ymm0 vpsubw 3904(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm12, %ymm6 vpsubw 4096(%r10), %ymm6, %ymm6 vmovdqa %ymm6, 3904(%r10) vpaddw %ymm7, %ymm0, %ymm0 vpsubw 3520(%r10), %ymm0, %ymm0 vmovdqa %ymm0, 3712(%r10) vmovdqa 3744(%r10), %ymm1 vpsubw 3936(%r10), %ymm1, %ymm1 vpsubw %ymm1, %ymm13, %ymm7 vpsubw 4128(%r10), %ymm7, %ymm7 vmovdqa %ymm7, 3936(%r10) vpaddw %ymm8, %ymm1, %ymm1 vpsubw 3552(%r10), %ymm1, %ymm1 vmovdqa %ymm1, 3744(%r10) vmovdqa 3776(%r10), %ymm2 vpsubw 3968(%r10), %ymm2, %ymm2 vpsubw %ymm2, %ymm14, %ymm8 vpsubw 4160(%r10), %ymm8, %ymm8 vmovdqa %ymm8, 3968(%r10) vpaddw %ymm9, %ymm2, %ymm2 vpsubw 3584(%r10), %ymm2, %ymm2 vmovdqa %ymm2, 3776(%r10) vmovdqa 3808(%r10), %ymm3 vpsubw 4000(%r10), %ymm3, %ymm3 vpsubw %ymm3, %ymm15, %ymm9 vmovdqa %ymm9, 4000(%r10) vpaddw %ymm10, %ymm3, %ymm3 vpsubw 3616(%r10), %ymm3, %ymm3 vmovdqa %ymm3, 3808(%r10) vmovdqa 3840(%r10), %ymm4 vpsubw 4032(%r10), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vpsubw 3648(%r10), %ymm4, %ymm4 vmovdqa %ymm4, 3840(%r10) vmovdqa 0(%r9), %ymm0 vmovdqa 1408(%r9), %ymm6 vpaddw 352(%r9), %ymm0, %ymm0 vpaddw 1760(%r9), %ymm6, %ymm6 vmovdqa 32(%r9), %ymm1 vmovdqa 1440(%r9), %ymm7 vpaddw 384(%r9), %ymm1, %ymm1 vpaddw 1792(%r9), %ymm7, %ymm7 vmovdqa 64(%r9), %ymm2 vmovdqa 1472(%r9), %ymm8 vpaddw 416(%r9), %ymm2, %ymm2 vpaddw 1824(%r9), %ymm8, %ymm8 vmovdqa 96(%r9), %ymm3 vmovdqa 1504(%r9), %ymm9 vpaddw 448(%r9), %ymm3, %ymm3 vpaddw 1856(%r9), %ymm9, %ymm9 vmovdqa 128(%r9), %ymm4 vmovdqa 1536(%r9), %ymm10 vpaddw 480(%r9), %ymm4, %ymm4 vpaddw 1888(%r9), %ymm10, %ymm10 vmovdqa 160(%r9), %ymm5 vmovdqa 1568(%r9), %ymm11 vpaddw 512(%r9), %ymm5, %ymm5 vpaddw 1920(%r9), %ymm11, %ymm11 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 5888(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 5920(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 5952(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 5984(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6016(%r8) vpmullw %ymm0, %ymm11, %ymm13 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6048(%r8) vpmullw %ymm1, %ymm11, %ymm12 vpmullw %ymm2, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6080(%r8) vpmullw %ymm2, %ymm11, %ymm13 vpmullw %ymm3, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6112(%r8) vpmullw %ymm3, %ymm11, %ymm12 vpmullw %ymm4, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6144(%r8) vpmullw %ymm4, %ymm11, %ymm13 vpmullw %ymm5, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6176(%r8) vpmullw %ymm5, %ymm11, %ymm12 vmovdqa %ymm12, 6208(%r8) vmovdqa 192(%r9), %ymm0 vmovdqa 1600(%r9), %ymm6 vpaddw 544(%r9), %ymm0, %ymm0 vpaddw 1952(%r9), %ymm6, %ymm6 vmovdqa 224(%r9), %ymm1 vmovdqa 1632(%r9), %ymm7 vpaddw 576(%r9), %ymm1, %ymm1 vpaddw 1984(%r9), %ymm7, %ymm7 vmovdqa 256(%r9), %ymm2 vmovdqa 1664(%r9), %ymm8 vpaddw 608(%r9), %ymm2, %ymm2 vpaddw 2016(%r9), %ymm8, %ymm8 vmovdqa 288(%r9), %ymm3 vmovdqa 1696(%r9), %ymm9 vpaddw 640(%r9), %ymm3, %ymm3 vpaddw 2048(%r9), %ymm9, %ymm9 vmovdqa 320(%r9), %ymm4 vmovdqa 1728(%r9), %ymm10 vpaddw 672(%r9), %ymm4, %ymm4 vpaddw 2080(%r9), %ymm10, %ymm10 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 6272(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6304(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6336(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6368(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6400(%r8) vpmullw %ymm1, %ymm10, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6432(%r8) vpmullw %ymm2, %ymm10, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6464(%r8) vpmullw %ymm3, %ymm10, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6496(%r8) vpmullw %ymm4, %ymm10, %ymm12 vmovdqa %ymm12, 6528(%r8) vpaddw 0(%r9), %ymm0, %ymm0 vpaddw 1408(%r9), %ymm6, %ymm6 vpaddw 352(%r9), %ymm0, %ymm0 vpaddw 1760(%r9), %ymm6, %ymm6 vpaddw 32(%r9), %ymm1, %ymm1 vpaddw 1440(%r9), %ymm7, %ymm7 vpaddw 384(%r9), %ymm1, %ymm1 vpaddw 1792(%r9), %ymm7, %ymm7 vpaddw 64(%r9), %ymm2, %ymm2 vpaddw 1472(%r9), %ymm8, %ymm8 vpaddw 416(%r9), %ymm2, %ymm2 vpaddw 1824(%r9), %ymm8, %ymm8 vpaddw 96(%r9), %ymm3, %ymm3 vpaddw 1504(%r9), %ymm9, %ymm9 vpaddw 448(%r9), %ymm3, %ymm3 vpaddw 1856(%r9), %ymm9, %ymm9 vpaddw 128(%r9), %ymm4, %ymm4 vpaddw 1536(%r9), %ymm10, %ymm10 vpaddw 480(%r9), %ymm4, %ymm4 vpaddw 1888(%r9), %ymm10, %ymm10 vpmullw %ymm0, %ymm11, %ymm12 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpsubw 6048(%r8), %ymm12, %ymm12 vpsubw 6432(%r8), %ymm12, %ymm12 vmovdqa %ymm12, 6240(%r8) vpmullw %ymm5, %ymm7, %ymm12 vpmullw %ymm5, %ymm8, %ymm13 vpmullw %ymm5, %ymm9, %ymm14 vpmullw %ymm5, %ymm10, %ymm15 vpmullw %ymm1, %ymm11, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm10, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm3, %ymm9, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm4, %ymm8, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm11, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm10, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm4, %ymm9, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm11, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm10, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm11, %ymm5 vpaddw %ymm5, %ymm15, %ymm15 vpmullw %ymm0, %ymm10, %ymm11 vpmullw %ymm1, %ymm9, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm2, %ymm8, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm3, %ymm7, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm4, %ymm6, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm0, %ymm9, %ymm10 vpmullw %ymm1, %ymm8, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm2, %ymm7, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm3, %ymm6, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm0, %ymm8, %ymm9 vpmullw %ymm1, %ymm7, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm2, %ymm6, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm0, %ymm7, %ymm8 vpmullw %ymm1, %ymm6, %ymm5 vpaddw %ymm5, %ymm8, %ymm8 vpmullw %ymm0, %ymm6, %ymm7 vmovdqa 6080(%r8), %ymm0 vpsubw 6272(%r8), %ymm0, %ymm0 vpsubw %ymm0, %ymm12, %ymm6 vpsubw 6464(%r8), %ymm6, %ymm6 vmovdqa %ymm6, 6272(%r8) vpaddw %ymm7, %ymm0, %ymm0 vpsubw 5888(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 6080(%r8) vmovdqa 6112(%r8), %ymm1 vpsubw 6304(%r8), %ymm1, %ymm1 vpsubw %ymm1, %ymm13, %ymm7 vpsubw 6496(%r8), %ymm7, %ymm7 vmovdqa %ymm7, 6304(%r8) vpaddw %ymm8, %ymm1, %ymm1 vpsubw 5920(%r8), %ymm1, %ymm1 vmovdqa %ymm1, 6112(%r8) vmovdqa 6144(%r8), %ymm2 vpsubw 6336(%r8), %ymm2, %ymm2 vpsubw %ymm2, %ymm14, %ymm8 vpsubw 6528(%r8), %ymm8, %ymm8 vmovdqa %ymm8, 6336(%r8) vpaddw %ymm9, %ymm2, %ymm2 vpsubw 5952(%r8), %ymm2, %ymm2 vmovdqa %ymm2, 6144(%r8) vmovdqa 6176(%r8), %ymm3 vpsubw 6368(%r8), %ymm3, %ymm3 vpsubw %ymm3, %ymm15, %ymm9 vmovdqa %ymm9, 6368(%r8) vpaddw %ymm10, %ymm3, %ymm3 vpsubw 5984(%r8), %ymm3, %ymm3 vmovdqa %ymm3, 6176(%r8) vmovdqa 6208(%r8), %ymm4 vpsubw 6400(%r8), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vpsubw 6016(%r8), %ymm4, %ymm4 vmovdqa %ymm4, 6208(%r8) vmovdqa 6208(%r8), %ymm0 vpsubw 3136(%r10), %ymm0, %ymm0 vpsubw 3840(%r10), %ymm0, %ymm0 vmovdqa %ymm0, 3488(%r10) vmovdqa 3168(%r10), %ymm0 vpsubw 3520(%r10), %ymm0, %ymm0 vmovdqa 6240(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 3872(%r10), %ymm1, %ymm1 vpsubw 2816(%r10), %ymm0, %ymm0 vpaddw 5888(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3168(%r10) vmovdqa %ymm1, 3520(%r10) vmovdqa 3200(%r10), %ymm0 vpsubw 3552(%r10), %ymm0, %ymm0 vmovdqa 6272(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 3904(%r10), %ymm1, %ymm1 vpsubw 2848(%r10), %ymm0, %ymm0 vpaddw 5920(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3200(%r10) vmovdqa %ymm1, 3552(%r10) vmovdqa 3232(%r10), %ymm0 vpsubw 3584(%r10), %ymm0, %ymm0 vmovdqa 6304(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 3936(%r10), %ymm1, %ymm1 vpsubw 2880(%r10), %ymm0, %ymm0 vpaddw 5952(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3232(%r10) vmovdqa %ymm1, 3584(%r10) vmovdqa 3264(%r10), %ymm0 vpsubw 3616(%r10), %ymm0, %ymm0 vmovdqa 6336(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 3968(%r10), %ymm1, %ymm1 vpsubw 2912(%r10), %ymm0, %ymm0 vpaddw 5984(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3264(%r10) vmovdqa %ymm1, 3616(%r10) vmovdqa 3296(%r10), %ymm0 vpsubw 3648(%r10), %ymm0, %ymm0 vmovdqa 6368(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 4000(%r10), %ymm1, %ymm1 vpsubw 2944(%r10), %ymm0, %ymm0 vpaddw 6016(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3296(%r10) vmovdqa %ymm1, 3648(%r10) vmovdqa 3328(%r10), %ymm0 vpsubw 3680(%r10), %ymm0, %ymm0 vmovdqa 6400(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 4032(%r10), %ymm1, %ymm1 vpsubw 2976(%r10), %ymm0, %ymm0 vpaddw 6048(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3328(%r10) vmovdqa %ymm1, 3680(%r10) vmovdqa 3360(%r10), %ymm0 vpsubw 3712(%r10), %ymm0, %ymm0 vmovdqa 6432(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 4064(%r10), %ymm1, %ymm1 vpsubw 3008(%r10), %ymm0, %ymm0 vpaddw 6080(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3360(%r10) vmovdqa %ymm1, 3712(%r10) vmovdqa 3392(%r10), %ymm0 vpsubw 3744(%r10), %ymm0, %ymm0 vmovdqa 6464(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 4096(%r10), %ymm1, %ymm1 vpsubw 3040(%r10), %ymm0, %ymm0 vpaddw 6112(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3392(%r10) vmovdqa %ymm1, 3744(%r10) vmovdqa 3424(%r10), %ymm0 vpsubw 3776(%r10), %ymm0, %ymm0 vmovdqa 6496(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 4128(%r10), %ymm1, %ymm1 vpsubw 3072(%r10), %ymm0, %ymm0 vpaddw 6144(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3424(%r10) vmovdqa %ymm1, 3776(%r10) vmovdqa 3456(%r10), %ymm0 vpsubw 3808(%r10), %ymm0, %ymm0 vmovdqa 6528(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 4160(%r10), %ymm1, %ymm1 vpsubw 3104(%r10), %ymm0, %ymm0 vpaddw 6176(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3456(%r10) vmovdqa %ymm1, 3808(%r10) neg %ecx jns done_4eced63f144beffcb0247f9c6f67d165 add $704, %r9 add $1408, %r10 jmp innerloop_4eced63f144beffcb0247f9c6f67d165 done_4eced63f144beffcb0247f9c6f67d165: sub $704, %r9 sub $1408, %r10 vmovdqa 0(%r9), %ymm0 vpaddw 704(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6592(%r8) vmovdqa 1408(%r9), %ymm0 vpaddw 2112(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7296(%r8) vmovdqa 32(%r9), %ymm0 vpaddw 736(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6624(%r8) vmovdqa 1440(%r9), %ymm0 vpaddw 2144(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7328(%r8) vmovdqa 64(%r9), %ymm0 vpaddw 768(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6656(%r8) vmovdqa 1472(%r9), %ymm0 vpaddw 2176(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7360(%r8) vmovdqa 96(%r9), %ymm0 vpaddw 800(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6688(%r8) vmovdqa 1504(%r9), %ymm0 vpaddw 2208(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7392(%r8) vmovdqa 128(%r9), %ymm0 vpaddw 832(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6720(%r8) vmovdqa 1536(%r9), %ymm0 vpaddw 2240(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7424(%r8) vmovdqa 160(%r9), %ymm0 vpaddw 864(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6752(%r8) vmovdqa 1568(%r9), %ymm0 vpaddw 2272(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7456(%r8) vmovdqa 192(%r9), %ymm0 vpaddw 896(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6784(%r8) vmovdqa 1600(%r9), %ymm0 vpaddw 2304(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7488(%r8) vmovdqa 224(%r9), %ymm0 vpaddw 928(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6816(%r8) vmovdqa 1632(%r9), %ymm0 vpaddw 2336(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7520(%r8) vmovdqa 256(%r9), %ymm0 vpaddw 960(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6848(%r8) vmovdqa 1664(%r9), %ymm0 vpaddw 2368(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7552(%r8) vmovdqa 288(%r9), %ymm0 vpaddw 992(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6880(%r8) vmovdqa 1696(%r9), %ymm0 vpaddw 2400(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7584(%r8) vmovdqa 320(%r9), %ymm0 vpaddw 1024(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6912(%r8) vmovdqa 1728(%r9), %ymm0 vpaddw 2432(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7616(%r8) vmovdqa 352(%r9), %ymm0 vpaddw 1056(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6944(%r8) vmovdqa 1760(%r9), %ymm0 vpaddw 2464(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7648(%r8) vmovdqa 384(%r9), %ymm0 vpaddw 1088(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 6976(%r8) vmovdqa 1792(%r9), %ymm0 vpaddw 2496(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7680(%r8) vmovdqa 416(%r9), %ymm0 vpaddw 1120(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7008(%r8) vmovdqa 1824(%r9), %ymm0 vpaddw 2528(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7712(%r8) vmovdqa 448(%r9), %ymm0 vpaddw 1152(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7040(%r8) vmovdqa 1856(%r9), %ymm0 vpaddw 2560(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7744(%r8) vmovdqa 480(%r9), %ymm0 vpaddw 1184(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7072(%r8) vmovdqa 1888(%r9), %ymm0 vpaddw 2592(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7776(%r8) vmovdqa 512(%r9), %ymm0 vpaddw 1216(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7104(%r8) vmovdqa 1920(%r9), %ymm0 vpaddw 2624(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7808(%r8) vmovdqa 544(%r9), %ymm0 vpaddw 1248(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7136(%r8) vmovdqa 1952(%r9), %ymm0 vpaddw 2656(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7840(%r8) vmovdqa 576(%r9), %ymm0 vpaddw 1280(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7168(%r8) vmovdqa 1984(%r9), %ymm0 vpaddw 2688(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7872(%r8) vmovdqa 608(%r9), %ymm0 vpaddw 1312(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7200(%r8) vmovdqa 2016(%r9), %ymm0 vpaddw 2720(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7904(%r8) vmovdqa 640(%r9), %ymm0 vpaddw 1344(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7232(%r8) vmovdqa 2048(%r9), %ymm0 vpaddw 2752(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7936(%r8) vmovdqa 672(%r9), %ymm0 vpaddw 1376(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7264(%r8) vmovdqa 2080(%r9), %ymm0 vpaddw 2784(%r9), %ymm0, %ymm0 vmovdqa %ymm0, 7968(%r8) vmovdqa 6592(%r8), %ymm0 vmovdqa 7296(%r8), %ymm6 vmovdqa 6624(%r8), %ymm1 vmovdqa 7328(%r8), %ymm7 vmovdqa 6656(%r8), %ymm2 vmovdqa 7360(%r8), %ymm8 vmovdqa 6688(%r8), %ymm3 vmovdqa 7392(%r8), %ymm9 vmovdqa 6720(%r8), %ymm4 vmovdqa 7424(%r8), %ymm10 vmovdqa 6752(%r8), %ymm5 vmovdqa 7456(%r8), %ymm11 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 8000(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8032(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8064(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8096(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8128(%r8) vpmullw %ymm0, %ymm11, %ymm13 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8160(%r8) vpmullw %ymm1, %ymm11, %ymm12 vpmullw %ymm2, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8192(%r8) vpmullw %ymm2, %ymm11, %ymm13 vpmullw %ymm3, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8224(%r8) vpmullw %ymm3, %ymm11, %ymm12 vpmullw %ymm4, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8256(%r8) vpmullw %ymm4, %ymm11, %ymm13 vpmullw %ymm5, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8288(%r8) vpmullw %ymm5, %ymm11, %ymm12 vmovdqa %ymm12, 8320(%r8) vmovdqa 6784(%r8), %ymm0 vmovdqa 7488(%r8), %ymm6 vmovdqa 6816(%r8), %ymm1 vmovdqa 7520(%r8), %ymm7 vmovdqa 6848(%r8), %ymm2 vmovdqa 7552(%r8), %ymm8 vmovdqa 6880(%r8), %ymm3 vmovdqa 7584(%r8), %ymm9 vmovdqa 6912(%r8), %ymm4 vmovdqa 7616(%r8), %ymm10 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 8384(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8416(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8448(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8480(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8512(%r8) vpmullw %ymm1, %ymm10, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8544(%r8) vpmullw %ymm2, %ymm10, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8576(%r8) vpmullw %ymm3, %ymm10, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8608(%r8) vpmullw %ymm4, %ymm10, %ymm12 vmovdqa %ymm12, 8640(%r8) vpaddw 6592(%r8), %ymm0, %ymm0 vpaddw 7296(%r8), %ymm6, %ymm6 vpaddw 6624(%r8), %ymm1, %ymm1 vpaddw 7328(%r8), %ymm7, %ymm7 vpaddw 6656(%r8), %ymm2, %ymm2 vpaddw 7360(%r8), %ymm8, %ymm8 vpaddw 6688(%r8), %ymm3, %ymm3 vpaddw 7392(%r8), %ymm9, %ymm9 vpaddw 6720(%r8), %ymm4, %ymm4 vpaddw 7424(%r8), %ymm10, %ymm10 vpmullw %ymm0, %ymm11, %ymm12 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpsubw 8160(%r8), %ymm12, %ymm12 vpsubw 8544(%r8), %ymm12, %ymm12 vmovdqa %ymm12, 8352(%r8) vpmullw %ymm5, %ymm7, %ymm12 vpmullw %ymm5, %ymm8, %ymm13 vpmullw %ymm5, %ymm9, %ymm14 vpmullw %ymm5, %ymm10, %ymm15 vpmullw %ymm1, %ymm11, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm10, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm3, %ymm9, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm4, %ymm8, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm11, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm10, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm4, %ymm9, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm11, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm10, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm11, %ymm5 vpaddw %ymm5, %ymm15, %ymm15 vpmullw %ymm0, %ymm10, %ymm11 vpmullw %ymm1, %ymm9, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm2, %ymm8, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm3, %ymm7, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm4, %ymm6, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm0, %ymm9, %ymm10 vpmullw %ymm1, %ymm8, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm2, %ymm7, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm3, %ymm6, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm0, %ymm8, %ymm9 vpmullw %ymm1, %ymm7, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm2, %ymm6, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm0, %ymm7, %ymm8 vpmullw %ymm1, %ymm6, %ymm5 vpaddw %ymm5, %ymm8, %ymm8 vpmullw %ymm0, %ymm6, %ymm7 vmovdqa 8192(%r8), %ymm0 vpsubw 8384(%r8), %ymm0, %ymm0 vpsubw %ymm0, %ymm12, %ymm6 vpsubw 8576(%r8), %ymm6, %ymm6 vmovdqa %ymm6, 8384(%r8) vpaddw %ymm7, %ymm0, %ymm0 vpsubw 8000(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8192(%r8) vmovdqa 8224(%r8), %ymm1 vpsubw 8416(%r8), %ymm1, %ymm1 vpsubw %ymm1, %ymm13, %ymm7 vpsubw 8608(%r8), %ymm7, %ymm7 vmovdqa %ymm7, 8416(%r8) vpaddw %ymm8, %ymm1, %ymm1 vpsubw 8032(%r8), %ymm1, %ymm1 vmovdqa %ymm1, 8224(%r8) vmovdqa 8256(%r8), %ymm2 vpsubw 8448(%r8), %ymm2, %ymm2 vpsubw %ymm2, %ymm14, %ymm8 vpsubw 8640(%r8), %ymm8, %ymm8 vmovdqa %ymm8, 8448(%r8) vpaddw %ymm9, %ymm2, %ymm2 vpsubw 8064(%r8), %ymm2, %ymm2 vmovdqa %ymm2, 8256(%r8) vmovdqa 8288(%r8), %ymm3 vpsubw 8480(%r8), %ymm3, %ymm3 vpsubw %ymm3, %ymm15, %ymm9 vmovdqa %ymm9, 8480(%r8) vpaddw %ymm10, %ymm3, %ymm3 vpsubw 8096(%r8), %ymm3, %ymm3 vmovdqa %ymm3, 8288(%r8) vmovdqa 8320(%r8), %ymm4 vpsubw 8512(%r8), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vpsubw 8128(%r8), %ymm4, %ymm4 vmovdqa %ymm4, 8320(%r8) vmovdqa 6944(%r8), %ymm0 vmovdqa 7648(%r8), %ymm6 vmovdqa 6976(%r8), %ymm1 vmovdqa 7680(%r8), %ymm7 vmovdqa 7008(%r8), %ymm2 vmovdqa 7712(%r8), %ymm8 vmovdqa 7040(%r8), %ymm3 vmovdqa 7744(%r8), %ymm9 vmovdqa 7072(%r8), %ymm4 vmovdqa 7776(%r8), %ymm10 vmovdqa 7104(%r8), %ymm5 vmovdqa 7808(%r8), %ymm11 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 8704(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8736(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8768(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8800(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8832(%r8) vpmullw %ymm0, %ymm11, %ymm13 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8864(%r8) vpmullw %ymm1, %ymm11, %ymm12 vpmullw %ymm2, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8896(%r8) vpmullw %ymm2, %ymm11, %ymm13 vpmullw %ymm3, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8928(%r8) vpmullw %ymm3, %ymm11, %ymm12 vpmullw %ymm4, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 8960(%r8) vpmullw %ymm4, %ymm11, %ymm13 vpmullw %ymm5, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 8992(%r8) vpmullw %ymm5, %ymm11, %ymm12 vmovdqa %ymm12, 9024(%r8) vmovdqa 7136(%r8), %ymm0 vmovdqa 7840(%r8), %ymm6 vmovdqa 7168(%r8), %ymm1 vmovdqa 7872(%r8), %ymm7 vmovdqa 7200(%r8), %ymm2 vmovdqa 7904(%r8), %ymm8 vmovdqa 7232(%r8), %ymm3 vmovdqa 7936(%r8), %ymm9 vmovdqa 7264(%r8), %ymm4 vmovdqa 7968(%r8), %ymm10 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 9088(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 9120(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 9152(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 9184(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 9216(%r8) vpmullw %ymm1, %ymm10, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 9248(%r8) vpmullw %ymm2, %ymm10, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 9280(%r8) vpmullw %ymm3, %ymm10, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 9312(%r8) vpmullw %ymm4, %ymm10, %ymm12 vmovdqa %ymm12, 9344(%r8) vpaddw 6944(%r8), %ymm0, %ymm0 vpaddw 7648(%r8), %ymm6, %ymm6 vpaddw 6976(%r8), %ymm1, %ymm1 vpaddw 7680(%r8), %ymm7, %ymm7 vpaddw 7008(%r8), %ymm2, %ymm2 vpaddw 7712(%r8), %ymm8, %ymm8 vpaddw 7040(%r8), %ymm3, %ymm3 vpaddw 7744(%r8), %ymm9, %ymm9 vpaddw 7072(%r8), %ymm4, %ymm4 vpaddw 7776(%r8), %ymm10, %ymm10 vpmullw %ymm0, %ymm11, %ymm12 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpsubw 8864(%r8), %ymm12, %ymm12 vpsubw 9248(%r8), %ymm12, %ymm12 vmovdqa %ymm12, 9056(%r8) vpmullw %ymm5, %ymm7, %ymm12 vpmullw %ymm5, %ymm8, %ymm13 vpmullw %ymm5, %ymm9, %ymm14 vpmullw %ymm5, %ymm10, %ymm15 vpmullw %ymm1, %ymm11, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm10, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm3, %ymm9, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm4, %ymm8, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm11, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm10, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm4, %ymm9, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm11, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm10, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm11, %ymm5 vpaddw %ymm5, %ymm15, %ymm15 vpmullw %ymm0, %ymm10, %ymm11 vpmullw %ymm1, %ymm9, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm2, %ymm8, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm3, %ymm7, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm4, %ymm6, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm0, %ymm9, %ymm10 vpmullw %ymm1, %ymm8, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm2, %ymm7, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm3, %ymm6, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm0, %ymm8, %ymm9 vpmullw %ymm1, %ymm7, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm2, %ymm6, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm0, %ymm7, %ymm8 vpmullw %ymm1, %ymm6, %ymm5 vpaddw %ymm5, %ymm8, %ymm8 vpmullw %ymm0, %ymm6, %ymm7 vmovdqa 8896(%r8), %ymm0 vpsubw 9088(%r8), %ymm0, %ymm0 vpsubw %ymm0, %ymm12, %ymm6 vpsubw 9280(%r8), %ymm6, %ymm6 vmovdqa %ymm6, 9088(%r8) vpaddw %ymm7, %ymm0, %ymm0 vpsubw 8704(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8896(%r8) vmovdqa 8928(%r8), %ymm1 vpsubw 9120(%r8), %ymm1, %ymm1 vpsubw %ymm1, %ymm13, %ymm7 vpsubw 9312(%r8), %ymm7, %ymm7 vmovdqa %ymm7, 9120(%r8) vpaddw %ymm8, %ymm1, %ymm1 vpsubw 8736(%r8), %ymm1, %ymm1 vmovdqa %ymm1, 8928(%r8) vmovdqa 8960(%r8), %ymm2 vpsubw 9152(%r8), %ymm2, %ymm2 vpsubw %ymm2, %ymm14, %ymm8 vpsubw 9344(%r8), %ymm8, %ymm8 vmovdqa %ymm8, 9152(%r8) vpaddw %ymm9, %ymm2, %ymm2 vpsubw 8768(%r8), %ymm2, %ymm2 vmovdqa %ymm2, 8960(%r8) vmovdqa 8992(%r8), %ymm3 vpsubw 9184(%r8), %ymm3, %ymm3 vpsubw %ymm3, %ymm15, %ymm9 vmovdqa %ymm9, 9184(%r8) vpaddw %ymm10, %ymm3, %ymm3 vpsubw 8800(%r8), %ymm3, %ymm3 vmovdqa %ymm3, 8992(%r8) vmovdqa 9024(%r8), %ymm4 vpsubw 9216(%r8), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vpsubw 8832(%r8), %ymm4, %ymm4 vmovdqa %ymm4, 9024(%r8) vmovdqa 6592(%r8), %ymm0 vmovdqa 7296(%r8), %ymm6 vpaddw 6944(%r8), %ymm0, %ymm0 vpaddw 7648(%r8), %ymm6, %ymm6 vmovdqa 6624(%r8), %ymm1 vmovdqa 7328(%r8), %ymm7 vpaddw 6976(%r8), %ymm1, %ymm1 vpaddw 7680(%r8), %ymm7, %ymm7 vmovdqa 6656(%r8), %ymm2 vmovdqa 7360(%r8), %ymm8 vpaddw 7008(%r8), %ymm2, %ymm2 vpaddw 7712(%r8), %ymm8, %ymm8 vmovdqa 6688(%r8), %ymm3 vmovdqa 7392(%r8), %ymm9 vpaddw 7040(%r8), %ymm3, %ymm3 vpaddw 7744(%r8), %ymm9, %ymm9 vmovdqa 6720(%r8), %ymm4 vmovdqa 7424(%r8), %ymm10 vpaddw 7072(%r8), %ymm4, %ymm4 vpaddw 7776(%r8), %ymm10, %ymm10 vmovdqa 6752(%r8), %ymm5 vmovdqa 7456(%r8), %ymm11 vpaddw 7104(%r8), %ymm5, %ymm5 vpaddw 7808(%r8), %ymm11, %ymm11 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 5888(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 5920(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 5952(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 5984(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6016(%r8) vpmullw %ymm0, %ymm11, %ymm13 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6048(%r8) vpmullw %ymm1, %ymm11, %ymm12 vpmullw %ymm2, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6080(%r8) vpmullw %ymm2, %ymm11, %ymm13 vpmullw %ymm3, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm5, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6112(%r8) vpmullw %ymm3, %ymm11, %ymm12 vpmullw %ymm4, %ymm10, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm5, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6144(%r8) vpmullw %ymm4, %ymm11, %ymm13 vpmullw %ymm5, %ymm10, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6176(%r8) vpmullw %ymm5, %ymm11, %ymm12 vmovdqa %ymm12, 6208(%r8) vmovdqa 6784(%r8), %ymm0 vmovdqa 7488(%r8), %ymm6 vpaddw 7136(%r8), %ymm0, %ymm0 vpaddw 7840(%r8), %ymm6, %ymm6 vmovdqa 6816(%r8), %ymm1 vmovdqa 7520(%r8), %ymm7 vpaddw 7168(%r8), %ymm1, %ymm1 vpaddw 7872(%r8), %ymm7, %ymm7 vmovdqa 6848(%r8), %ymm2 vmovdqa 7552(%r8), %ymm8 vpaddw 7200(%r8), %ymm2, %ymm2 vpaddw 7904(%r8), %ymm8, %ymm8 vmovdqa 6880(%r8), %ymm3 vmovdqa 7584(%r8), %ymm9 vpaddw 7232(%r8), %ymm3, %ymm3 vpaddw 7936(%r8), %ymm9, %ymm9 vmovdqa 6912(%r8), %ymm4 vmovdqa 7616(%r8), %ymm10 vpaddw 7264(%r8), %ymm4, %ymm4 vpaddw 7968(%r8), %ymm10, %ymm10 vpmullw %ymm0, %ymm6, %ymm12 vmovdqa %ymm12, 6272(%r8) vpmullw %ymm0, %ymm7, %ymm13 vpmullw %ymm1, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6304(%r8) vpmullw %ymm0, %ymm8, %ymm12 vpmullw %ymm1, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6336(%r8) vpmullw %ymm0, %ymm9, %ymm13 vpmullw %ymm1, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm2, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm6, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6368(%r8) vpmullw %ymm0, %ymm10, %ymm12 vpmullw %ymm1, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm2, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm3, %ymm7, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm6, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6400(%r8) vpmullw %ymm1, %ymm10, %ymm13 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6432(%r8) vpmullw %ymm2, %ymm10, %ymm12 vpmullw %ymm3, %ymm9, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vpmullw %ymm4, %ymm8, %ymm15 vpaddw %ymm12, %ymm15, %ymm12 vmovdqa %ymm12, 6464(%r8) vpmullw %ymm3, %ymm10, %ymm13 vpmullw %ymm4, %ymm9, %ymm15 vpaddw %ymm13, %ymm15, %ymm13 vmovdqa %ymm13, 6496(%r8) vpmullw %ymm4, %ymm10, %ymm12 vmovdqa %ymm12, 6528(%r8) vpaddw 6592(%r8), %ymm0, %ymm0 vpaddw 7296(%r8), %ymm6, %ymm6 vpaddw 6944(%r8), %ymm0, %ymm0 vpaddw 7648(%r8), %ymm6, %ymm6 vpaddw 6624(%r8), %ymm1, %ymm1 vpaddw 7328(%r8), %ymm7, %ymm7 vpaddw 6976(%r8), %ymm1, %ymm1 vpaddw 7680(%r8), %ymm7, %ymm7 vpaddw 6656(%r8), %ymm2, %ymm2 vpaddw 7360(%r8), %ymm8, %ymm8 vpaddw 7008(%r8), %ymm2, %ymm2 vpaddw 7712(%r8), %ymm8, %ymm8 vpaddw 6688(%r8), %ymm3, %ymm3 vpaddw 7392(%r8), %ymm9, %ymm9 vpaddw 7040(%r8), %ymm3, %ymm3 vpaddw 7744(%r8), %ymm9, %ymm9 vpaddw 6720(%r8), %ymm4, %ymm4 vpaddw 7424(%r8), %ymm10, %ymm10 vpaddw 7072(%r8), %ymm4, %ymm4 vpaddw 7776(%r8), %ymm10, %ymm10 vpmullw %ymm0, %ymm11, %ymm12 vpmullw %ymm1, %ymm10, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm2, %ymm9, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm3, %ymm8, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm4, %ymm7, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpmullw %ymm5, %ymm6, %ymm15 vpaddw %ymm15, %ymm12, %ymm12 vpsubw 6048(%r8), %ymm12, %ymm12 vpsubw 6432(%r8), %ymm12, %ymm12 vmovdqa %ymm12, 6240(%r8) vpmullw %ymm5, %ymm7, %ymm12 vpmullw %ymm5, %ymm8, %ymm13 vpmullw %ymm5, %ymm9, %ymm14 vpmullw %ymm5, %ymm10, %ymm15 vpmullw %ymm1, %ymm11, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm10, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm3, %ymm9, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm4, %ymm8, %ymm5 vpaddw %ymm5, %ymm12, %ymm12 vpmullw %ymm2, %ymm11, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm10, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm4, %ymm9, %ymm5 vpaddw %ymm5, %ymm13, %ymm13 vpmullw %ymm3, %ymm11, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm10, %ymm5 vpaddw %ymm5, %ymm14, %ymm14 vpmullw %ymm4, %ymm11, %ymm5 vpaddw %ymm5, %ymm15, %ymm15 vpmullw %ymm0, %ymm10, %ymm11 vpmullw %ymm1, %ymm9, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm2, %ymm8, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm3, %ymm7, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm4, %ymm6, %ymm5 vpaddw %ymm5, %ymm11, %ymm11 vpmullw %ymm0, %ymm9, %ymm10 vpmullw %ymm1, %ymm8, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm2, %ymm7, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm3, %ymm6, %ymm5 vpaddw %ymm5, %ymm10, %ymm10 vpmullw %ymm0, %ymm8, %ymm9 vpmullw %ymm1, %ymm7, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm2, %ymm6, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vpmullw %ymm0, %ymm7, %ymm8 vpmullw %ymm1, %ymm6, %ymm5 vpaddw %ymm5, %ymm8, %ymm8 vpmullw %ymm0, %ymm6, %ymm7 vmovdqa 6080(%r8), %ymm0 vpsubw 6272(%r8), %ymm0, %ymm0 vpsubw %ymm0, %ymm12, %ymm6 vpsubw 6464(%r8), %ymm6, %ymm6 vmovdqa %ymm6, 6272(%r8) vpaddw %ymm7, %ymm0, %ymm0 vpsubw 5888(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 6080(%r8) vmovdqa 6112(%r8), %ymm1 vpsubw 6304(%r8), %ymm1, %ymm1 vpsubw %ymm1, %ymm13, %ymm7 vpsubw 6496(%r8), %ymm7, %ymm7 vmovdqa %ymm7, 6304(%r8) vpaddw %ymm8, %ymm1, %ymm1 vpsubw 5920(%r8), %ymm1, %ymm1 vmovdqa %ymm1, 6112(%r8) vmovdqa 6144(%r8), %ymm2 vpsubw 6336(%r8), %ymm2, %ymm2 vpsubw %ymm2, %ymm14, %ymm8 vpsubw 6528(%r8), %ymm8, %ymm8 vmovdqa %ymm8, 6336(%r8) vpaddw %ymm9, %ymm2, %ymm2 vpsubw 5952(%r8), %ymm2, %ymm2 vmovdqa %ymm2, 6144(%r8) vmovdqa 6176(%r8), %ymm3 vpsubw 6368(%r8), %ymm3, %ymm3 vpsubw %ymm3, %ymm15, %ymm9 vmovdqa %ymm9, 6368(%r8) vpaddw %ymm10, %ymm3, %ymm3 vpsubw 5984(%r8), %ymm3, %ymm3 vmovdqa %ymm3, 6176(%r8) vmovdqa 6208(%r8), %ymm4 vpsubw 6400(%r8), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vpsubw 6016(%r8), %ymm4, %ymm4 vmovdqa %ymm4, 6208(%r8) vmovdqa 8352(%r8), %ymm0 vpsubw 8704(%r8), %ymm0, %ymm0 vmovdqa 6240(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9056(%r8), %ymm1, %ymm6 vpsubw 8000(%r8), %ymm0, %ymm0 vpaddw 5888(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8352(%r8) vmovdqa 8384(%r8), %ymm0 vpsubw 8736(%r8), %ymm0, %ymm0 vmovdqa 6272(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9088(%r8), %ymm1, %ymm7 vpsubw 8032(%r8), %ymm0, %ymm0 vpaddw 5920(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8384(%r8) vmovdqa 8416(%r8), %ymm0 vpsubw 8768(%r8), %ymm0, %ymm0 vmovdqa 6304(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9120(%r8), %ymm1, %ymm8 vpsubw 8064(%r8), %ymm0, %ymm0 vpaddw 5952(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8416(%r8) vmovdqa 8448(%r8), %ymm0 vpsubw 8800(%r8), %ymm0, %ymm0 vmovdqa 6336(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9152(%r8), %ymm1, %ymm9 vpsubw 8096(%r8), %ymm0, %ymm0 vpaddw 5984(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8448(%r8) vmovdqa 8480(%r8), %ymm0 vpsubw 8832(%r8), %ymm0, %ymm0 vmovdqa 6368(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9184(%r8), %ymm1, %ymm10 vpsubw 8128(%r8), %ymm0, %ymm0 vpaddw 6016(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8480(%r8) vmovdqa 8512(%r8), %ymm0 vpsubw 8864(%r8), %ymm0, %ymm0 vmovdqa 6400(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9216(%r8), %ymm1, %ymm11 vpsubw 8160(%r8), %ymm0, %ymm0 vpaddw 6048(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8512(%r8) vmovdqa 8544(%r8), %ymm0 vpsubw 8896(%r8), %ymm0, %ymm0 vmovdqa 6432(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9248(%r8), %ymm1, %ymm12 vpsubw 8192(%r8), %ymm0, %ymm0 vpaddw 6080(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8544(%r8) vmovdqa 8576(%r8), %ymm0 vpsubw 8928(%r8), %ymm0, %ymm0 vmovdqa 6464(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9280(%r8), %ymm1, %ymm13 vpsubw 8224(%r8), %ymm0, %ymm0 vpaddw 6112(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8576(%r8) vmovdqa 8608(%r8), %ymm0 vpsubw 8960(%r8), %ymm0, %ymm0 vmovdqa 6496(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9312(%r8), %ymm1, %ymm14 vpsubw 8256(%r8), %ymm0, %ymm0 vpaddw 6144(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8608(%r8) vmovdqa 8640(%r8), %ymm0 vpsubw 8992(%r8), %ymm0, %ymm0 vmovdqa 6528(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 9344(%r8), %ymm1, %ymm15 vpsubw 8288(%r8), %ymm0, %ymm0 vpaddw 6176(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 8640(%r8) vmovdqa 6208(%r8), %ymm0 vpsubw 8320(%r8), %ymm0, %ymm0 vpsubw 9024(%r8), %ymm0, %ymm0 vpsubw 3488(%r10), %ymm0, %ymm0 vpsubw 4896(%r10), %ymm0, %ymm0 vmovdqa %ymm0, 4192(%r10) vmovdqa 3520(%r10), %ymm0 vpsubw 4224(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm6, %ymm6 vpsubw 4928(%r10), %ymm6, %ymm6 vpsubw 2816(%r10), %ymm0, %ymm0 vpaddw 8000(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3520(%r10) vmovdqa %ymm6, 4224(%r10) vmovdqa 3552(%r10), %ymm0 vpsubw 4256(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm7, %ymm7 vpsubw 4960(%r10), %ymm7, %ymm7 vpsubw 2848(%r10), %ymm0, %ymm0 vpaddw 8032(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3552(%r10) vmovdqa %ymm7, 4256(%r10) vmovdqa 3584(%r10), %ymm0 vpsubw 4288(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm8, %ymm8 vpsubw 4992(%r10), %ymm8, %ymm8 vpsubw 2880(%r10), %ymm0, %ymm0 vpaddw 8064(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3584(%r10) vmovdqa %ymm8, 4288(%r10) vmovdqa 3616(%r10), %ymm0 vpsubw 4320(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm9, %ymm9 vpsubw 5024(%r10), %ymm9, %ymm9 vpsubw 2912(%r10), %ymm0, %ymm0 vpaddw 8096(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3616(%r10) vmovdqa %ymm9, 4320(%r10) vmovdqa 3648(%r10), %ymm0 vpsubw 4352(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm10, %ymm10 vpsubw 5056(%r10), %ymm10, %ymm10 vpsubw 2944(%r10), %ymm0, %ymm0 vpaddw 8128(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3648(%r10) vmovdqa %ymm10, 4352(%r10) vmovdqa 3680(%r10), %ymm0 vpsubw 4384(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm11, %ymm11 vpsubw 5088(%r10), %ymm11, %ymm11 vpsubw 2976(%r10), %ymm0, %ymm0 vpaddw 8160(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3680(%r10) vmovdqa %ymm11, 4384(%r10) vmovdqa 3712(%r10), %ymm0 vpsubw 4416(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm12, %ymm12 vpsubw 5120(%r10), %ymm12, %ymm12 vpsubw 3008(%r10), %ymm0, %ymm0 vpaddw 8192(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3712(%r10) vmovdqa %ymm12, 4416(%r10) vmovdqa 3744(%r10), %ymm0 vpsubw 4448(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm13, %ymm13 vpsubw 5152(%r10), %ymm13, %ymm13 vpsubw 3040(%r10), %ymm0, %ymm0 vpaddw 8224(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3744(%r10) vmovdqa %ymm13, 4448(%r10) vmovdqa 3776(%r10), %ymm0 vpsubw 4480(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm14, %ymm14 vpsubw 5184(%r10), %ymm14, %ymm14 vpsubw 3072(%r10), %ymm0, %ymm0 vpaddw 8256(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3776(%r10) vmovdqa %ymm14, 4480(%r10) vmovdqa 3808(%r10), %ymm0 vpsubw 4512(%r10), %ymm0, %ymm0 vpsubw %ymm0, %ymm15, %ymm15 vpsubw 5216(%r10), %ymm15, %ymm15 vpsubw 3104(%r10), %ymm0, %ymm0 vpaddw 8288(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3808(%r10) vmovdqa %ymm15, 4512(%r10) vmovdqa 3840(%r10), %ymm0 vpsubw 4544(%r10), %ymm0, %ymm0 vmovdqa 9024(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5248(%r10), %ymm1, %ymm1 vpsubw 3136(%r10), %ymm0, %ymm0 vpaddw 8320(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3840(%r10) vmovdqa %ymm1, 4544(%r10) vmovdqa 3872(%r10), %ymm0 vpsubw 4576(%r10), %ymm0, %ymm0 vmovdqa 9056(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5280(%r10), %ymm1, %ymm1 vpsubw 3168(%r10), %ymm0, %ymm0 vpaddw 8352(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3872(%r10) vmovdqa %ymm1, 4576(%r10) vmovdqa 3904(%r10), %ymm0 vpsubw 4608(%r10), %ymm0, %ymm0 vmovdqa 9088(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5312(%r10), %ymm1, %ymm1 vpsubw 3200(%r10), %ymm0, %ymm0 vpaddw 8384(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3904(%r10) vmovdqa %ymm1, 4608(%r10) vmovdqa 3936(%r10), %ymm0 vpsubw 4640(%r10), %ymm0, %ymm0 vmovdqa 9120(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5344(%r10), %ymm1, %ymm1 vpsubw 3232(%r10), %ymm0, %ymm0 vpaddw 8416(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3936(%r10) vmovdqa %ymm1, 4640(%r10) vmovdqa 3968(%r10), %ymm0 vpsubw 4672(%r10), %ymm0, %ymm0 vmovdqa 9152(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5376(%r10), %ymm1, %ymm1 vpsubw 3264(%r10), %ymm0, %ymm0 vpaddw 8448(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 3968(%r10) vmovdqa %ymm1, 4672(%r10) vmovdqa 4000(%r10), %ymm0 vpsubw 4704(%r10), %ymm0, %ymm0 vmovdqa 9184(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5408(%r10), %ymm1, %ymm1 vpsubw 3296(%r10), %ymm0, %ymm0 vpaddw 8480(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 4000(%r10) vmovdqa %ymm1, 4704(%r10) vmovdqa 4032(%r10), %ymm0 vpsubw 4736(%r10), %ymm0, %ymm0 vmovdqa 9216(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5440(%r10), %ymm1, %ymm1 vpsubw 3328(%r10), %ymm0, %ymm0 vpaddw 8512(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 4032(%r10) vmovdqa %ymm1, 4736(%r10) vmovdqa 4064(%r10), %ymm0 vpsubw 4768(%r10), %ymm0, %ymm0 vmovdqa 9248(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5472(%r10), %ymm1, %ymm1 vpsubw 3360(%r10), %ymm0, %ymm0 vpaddw 8544(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 4064(%r10) vmovdqa %ymm1, 4768(%r10) vmovdqa 4096(%r10), %ymm0 vpsubw 4800(%r10), %ymm0, %ymm0 vmovdqa 9280(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5504(%r10), %ymm1, %ymm1 vpsubw 3392(%r10), %ymm0, %ymm0 vpaddw 8576(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 4096(%r10) vmovdqa %ymm1, 4800(%r10) vmovdqa 4128(%r10), %ymm0 vpsubw 4832(%r10), %ymm0, %ymm0 vmovdqa 9312(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5536(%r10), %ymm1, %ymm1 vpsubw 3424(%r10), %ymm0, %ymm0 vpaddw 8608(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 4128(%r10) vmovdqa %ymm1, 4832(%r10) vmovdqa 4160(%r10), %ymm0 vpsubw 4864(%r10), %ymm0, %ymm0 vmovdqa 9344(%r8), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5568(%r10), %ymm1, %ymm1 vpsubw 3456(%r10), %ymm0, %ymm0 vpaddw 8640(%r8), %ymm0, %ymm0 vmovdqa %ymm0, 4160(%r10) vmovdqa %ymm1, 4864(%r10) vpxor %ymm1, %ymm1, %ymm1 vmovdqa %ymm1, 5600(%r10) subq $32, %r8 vmovdqa 2816(%r10), %ymm0 vmovdqa 2880(%r10), %ymm1 vmovdqa 2944(%r10), %ymm2 vmovdqa 3008(%r10), %ymm3 vpunpcklwd 2848(%r10), %ymm0, %ymm4 vpunpckhwd 2848(%r10), %ymm0, %ymm5 vpunpcklwd 2912(%r10), %ymm1, %ymm6 vpunpckhwd 2912(%r10), %ymm1, %ymm7 vpunpcklwd 2976(%r10), %ymm2, %ymm8 vpunpckhwd 2976(%r10), %ymm2, %ymm9 vpunpcklwd 3040(%r10), %ymm3, %ymm10 vpunpckhwd 3040(%r10), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 3072(%r10), %ymm0 vmovdqa 3136(%r10), %ymm1 vmovdqa 3200(%r10), %ymm2 vmovdqa 3264(%r10), %ymm3 vpunpcklwd 3104(%r10), %ymm0, %ymm12 vpunpckhwd 3104(%r10), %ymm0, %ymm13 vpunpcklwd 3168(%r10), %ymm1, %ymm14 vpunpckhwd 3168(%r10), %ymm1, %ymm15 vpunpcklwd 3232(%r10), %ymm2, %ymm0 vpunpckhwd 3232(%r10), %ymm2, %ymm1 vpunpcklwd 3296(%r10), %ymm3, %ymm2 vpunpckhwd 3296(%r10), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 0(%r12) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 192(%r12) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 384(%r12) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 576(%r12) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 768(%r12) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 960(%r12) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1152(%r12) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1536(%r12) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1728(%r12) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 1920(%r12) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2112(%r12) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 2304(%r12) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 2496(%r12) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 2688(%r12) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1344(%r12) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 2880(%r12) vmovdqa 3328(%r10), %ymm0 vmovdqa 3392(%r10), %ymm1 vmovdqa 3456(%r10), %ymm2 vmovdqa 3520(%r10), %ymm3 vpunpcklwd 3360(%r10), %ymm0, %ymm4 vpunpckhwd 3360(%r10), %ymm0, %ymm5 vpunpcklwd 3424(%r10), %ymm1, %ymm6 vpunpckhwd 3424(%r10), %ymm1, %ymm7 vpunpcklwd 3488(%r10), %ymm2, %ymm8 vpunpckhwd 3488(%r10), %ymm2, %ymm9 vpunpcklwd 3552(%r10), %ymm3, %ymm10 vpunpckhwd 3552(%r10), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 3584(%r10), %ymm0 vmovdqa 3648(%r10), %ymm1 vmovdqa 3712(%r10), %ymm2 vmovdqa 3776(%r10), %ymm3 vpunpcklwd 3616(%r10), %ymm0, %ymm12 vpunpckhwd 3616(%r10), %ymm0, %ymm13 vpunpcklwd 3680(%r10), %ymm1, %ymm14 vpunpckhwd 3680(%r10), %ymm1, %ymm15 vpunpcklwd 3744(%r10), %ymm2, %ymm0 vpunpckhwd 3744(%r10), %ymm2, %ymm1 vpunpcklwd 3808(%r10), %ymm3, %ymm2 vpunpckhwd 3808(%r10), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 32(%r12) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 224(%r12) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 416(%r12) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 608(%r12) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 800(%r12) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 992(%r12) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1184(%r12) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1568(%r12) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1760(%r12) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 1952(%r12) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2144(%r12) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 2336(%r12) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 2528(%r12) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 2720(%r12) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1376(%r12) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 2912(%r12) vmovdqa 3840(%r10), %ymm0 vmovdqa 3904(%r10), %ymm1 vmovdqa 3968(%r10), %ymm2 vmovdqa 4032(%r10), %ymm3 vpunpcklwd 3872(%r10), %ymm0, %ymm4 vpunpckhwd 3872(%r10), %ymm0, %ymm5 vpunpcklwd 3936(%r10), %ymm1, %ymm6 vpunpckhwd 3936(%r10), %ymm1, %ymm7 vpunpcklwd 4000(%r10), %ymm2, %ymm8 vpunpckhwd 4000(%r10), %ymm2, %ymm9 vpunpcklwd 4064(%r10), %ymm3, %ymm10 vpunpckhwd 4064(%r10), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 4096(%r10), %ymm0 vmovdqa 4160(%r10), %ymm1 vmovdqa 4224(%r10), %ymm2 vmovdqa 4288(%r10), %ymm3 vpunpcklwd 4128(%r10), %ymm0, %ymm12 vpunpckhwd 4128(%r10), %ymm0, %ymm13 vpunpcklwd 4192(%r10), %ymm1, %ymm14 vpunpckhwd 4192(%r10), %ymm1, %ymm15 vpunpcklwd 4256(%r10), %ymm2, %ymm0 vpunpckhwd 4256(%r10), %ymm2, %ymm1 vpunpcklwd 4320(%r10), %ymm3, %ymm2 vpunpckhwd 4320(%r10), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 64(%r12) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 256(%r12) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 448(%r12) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 640(%r12) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 832(%r12) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 1024(%r12) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1216(%r12) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1600(%r12) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1792(%r12) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 1984(%r12) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2176(%r12) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 2368(%r12) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 2560(%r12) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 2752(%r12) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1408(%r12) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 2944(%r12) vmovdqa 4224(%r10), %ymm0 vmovdqa 4288(%r10), %ymm1 vmovdqa 4352(%r10), %ymm2 vmovdqa 4416(%r10), %ymm3 vpunpcklwd 4256(%r10), %ymm0, %ymm4 vpunpckhwd 4256(%r10), %ymm0, %ymm5 vpunpcklwd 4320(%r10), %ymm1, %ymm6 vpunpckhwd 4320(%r10), %ymm1, %ymm7 vpunpcklwd 4384(%r10), %ymm2, %ymm8 vpunpckhwd 4384(%r10), %ymm2, %ymm9 vpunpcklwd 4448(%r10), %ymm3, %ymm10 vpunpckhwd 4448(%r10), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 4480(%r10), %ymm0 vmovdqa 4544(%r10), %ymm1 vmovdqa 4608(%r10), %ymm2 vmovdqa 4672(%r10), %ymm3 vpunpcklwd 4512(%r10), %ymm0, %ymm12 vpunpckhwd 4512(%r10), %ymm0, %ymm13 vpunpcklwd 4576(%r10), %ymm1, %ymm14 vpunpckhwd 4576(%r10), %ymm1, %ymm15 vpunpcklwd 4640(%r10), %ymm2, %ymm0 vpunpckhwd 4640(%r10), %ymm2, %ymm1 vpunpcklwd 4704(%r10), %ymm3, %ymm2 vpunpckhwd 4704(%r10), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 96(%r12) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 288(%r12) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 480(%r12) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 672(%r12) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 864(%r12) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 1056(%r12) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1248(%r12) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1632(%r12) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1824(%r12) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 2016(%r12) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2208(%r12) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 2400(%r12) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 2592(%r12) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 2784(%r12) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1440(%r12) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 2976(%r12) vmovdqa 4736(%r10), %ymm0 vmovdqa 4800(%r10), %ymm1 vmovdqa 4864(%r10), %ymm2 vmovdqa 4928(%r10), %ymm3 vpunpcklwd 4768(%r10), %ymm0, %ymm4 vpunpckhwd 4768(%r10), %ymm0, %ymm5 vpunpcklwd 4832(%r10), %ymm1, %ymm6 vpunpckhwd 4832(%r10), %ymm1, %ymm7 vpunpcklwd 4896(%r10), %ymm2, %ymm8 vpunpckhwd 4896(%r10), %ymm2, %ymm9 vpunpcklwd 4960(%r10), %ymm3, %ymm10 vpunpckhwd 4960(%r10), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 4992(%r10), %ymm0 vmovdqa 5056(%r10), %ymm1 vmovdqa 5120(%r10), %ymm2 vmovdqa 5184(%r10), %ymm3 vpunpcklwd 5024(%r10), %ymm0, %ymm12 vpunpckhwd 5024(%r10), %ymm0, %ymm13 vpunpcklwd 5088(%r10), %ymm1, %ymm14 vpunpckhwd 5088(%r10), %ymm1, %ymm15 vpunpcklwd 5152(%r10), %ymm2, %ymm0 vpunpckhwd 5152(%r10), %ymm2, %ymm1 vpunpcklwd 5216(%r10), %ymm3, %ymm2 vpunpckhwd 5216(%r10), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 128(%r12) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 320(%r12) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 512(%r12) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 704(%r12) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 896(%r12) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 1088(%r12) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1280(%r12) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1664(%r12) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1856(%r12) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 2048(%r12) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2240(%r12) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 2432(%r12) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 2624(%r12) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 2816(%r12) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1472(%r12) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 3008(%r12) vmovdqa 5248(%r10), %ymm0 vmovdqa 5312(%r10), %ymm1 vmovdqa 5376(%r10), %ymm2 vmovdqa 5440(%r10), %ymm3 vpunpcklwd 5280(%r10), %ymm0, %ymm4 vpunpckhwd 5280(%r10), %ymm0, %ymm5 vpunpcklwd 5344(%r10), %ymm1, %ymm6 vpunpckhwd 5344(%r10), %ymm1, %ymm7 vpunpcklwd 5408(%r10), %ymm2, %ymm8 vpunpckhwd 5408(%r10), %ymm2, %ymm9 vpunpcklwd 5472(%r10), %ymm3, %ymm10 vpunpckhwd 5472(%r10), %ymm3, %ymm11 vpunpckldq %ymm6, %ymm4, %ymm0 vpunpckhdq %ymm6, %ymm4, %ymm1 vpunpckldq %ymm7, %ymm5, %ymm2 vpunpckhdq %ymm7, %ymm5, %ymm3 vpunpckldq %ymm10, %ymm8, %ymm12 vpunpckhdq %ymm10, %ymm8, %ymm13 vpunpckldq %ymm11, %ymm9, %ymm14 vpunpckhdq %ymm11, %ymm9, %ymm15 vpunpcklqdq %ymm12, %ymm0, %ymm4 vpunpckhqdq %ymm12, %ymm0, %ymm5 vpunpcklqdq %ymm13, %ymm1, %ymm6 vpunpckhqdq %ymm13, %ymm1, %ymm7 vpunpcklqdq %ymm14, %ymm2, %ymm8 vpunpckhqdq %ymm14, %ymm2, %ymm9 vpunpcklqdq %ymm15, %ymm3, %ymm10 vpunpckhqdq %ymm15, %ymm3, %ymm11 vmovdqa 5504(%r10), %ymm0 vmovdqa 5568(%r10), %ymm1 vmovdqa 5632(%r10), %ymm2 vmovdqa 5696(%r10), %ymm3 vpunpcklwd 5536(%r10), %ymm0, %ymm12 vpunpckhwd 5536(%r10), %ymm0, %ymm13 vpunpcklwd 5600(%r10), %ymm1, %ymm14 vpunpckhwd 5600(%r10), %ymm1, %ymm15 vpunpcklwd 5664(%r10), %ymm2, %ymm0 vpunpckhwd 5664(%r10), %ymm2, %ymm1 vpunpcklwd 5728(%r10), %ymm3, %ymm2 vpunpckhwd 5728(%r10), %ymm3, %ymm3 vmovdqa %ymm11, 0(%r8) vpunpckldq %ymm14, %ymm12, %ymm11 vpunpckhdq %ymm14, %ymm12, %ymm12 vpunpckldq %ymm15, %ymm13, %ymm14 vpunpckhdq %ymm15, %ymm13, %ymm15 vpunpckldq %ymm2, %ymm0, %ymm13 vpunpckhdq %ymm2, %ymm0, %ymm0 vpunpckldq %ymm3, %ymm1, %ymm2 vpunpckhdq %ymm3, %ymm1, %ymm1 vpunpcklqdq %ymm13, %ymm11, %ymm3 vpunpckhqdq %ymm13, %ymm11, %ymm13 vpunpcklqdq %ymm0, %ymm12, %ymm11 vpunpckhqdq %ymm0, %ymm12, %ymm0 vpunpcklqdq %ymm2, %ymm14, %ymm12 vpunpckhqdq %ymm2, %ymm14, %ymm2 vpunpcklqdq %ymm1, %ymm15, %ymm14 vpunpckhqdq %ymm1, %ymm15, %ymm1 vinserti128 $1, %xmm3, %ymm4, %ymm15 vmovdqa %ymm15, 160(%r12) vinserti128 $1, %xmm13, %ymm5, %ymm15 vmovdqa %ymm15, 352(%r12) vinserti128 $1, %xmm11, %ymm6, %ymm15 vmovdqa %ymm15, 544(%r12) vinserti128 $1, %xmm0, %ymm7, %ymm15 vmovdqa %ymm15, 736(%r12) vinserti128 $1, %xmm12, %ymm8, %ymm15 vmovdqa %ymm15, 928(%r12) vinserti128 $1, %xmm2, %ymm9, %ymm15 vmovdqa %ymm15, 1120(%r12) vinserti128 $1, %xmm14, %ymm10, %ymm15 vmovdqa %ymm15, 1312(%r12) vpermq $78, %ymm4, %ymm4 vpermq $78, %ymm5, %ymm5 vpermq $78, %ymm6, %ymm6 vpermq $78, %ymm7, %ymm7 vpermq $78, %ymm8, %ymm8 vpermq $78, %ymm9, %ymm9 vpermq $78, %ymm10, %ymm10 vinserti128 $0, %xmm4, %ymm3, %ymm15 vmovdqa %ymm15, 1696(%r12) vinserti128 $0, %xmm5, %ymm13, %ymm15 vmovdqa %ymm15, 1888(%r12) vinserti128 $0, %xmm6, %ymm11, %ymm15 vmovdqa %ymm15, 2080(%r12) vinserti128 $0, %xmm7, %ymm0, %ymm15 vmovdqa %ymm15, 2272(%r12) vinserti128 $0, %xmm8, %ymm12, %ymm15 vmovdqa %ymm15, 2464(%r12) vinserti128 $0, %xmm9, %ymm2, %ymm15 vmovdqa %ymm15, 2656(%r12) vinserti128 $0, %xmm10, %ymm14, %ymm15 vmovdqa %ymm15, 2848(%r12) vmovdqa 0(%r8), %ymm11 vinserti128 $1, %xmm1, %ymm11, %ymm14 vmovdqa %ymm14, 1504(%r12) vpermq $78, %ymm11, %ymm11 vinserti128 $0, %xmm11, %ymm1, %ymm1 vmovdqa %ymm1, 3040(%r12) addq $32, %r8 add $1536, %rax add $1536, %r11 add $3072, %r12 dec %ecx jnz karatsuba_loop_4eced63f144beffcb0247f9c6f67d165 sub $12288, %r12 add $9408-2400, %r8 vpxor %ymm0, %ymm0, %ymm0 vmovdqa %ymm0, 1792(%r8) vmovdqa %ymm0, 1824(%r8) vmovdqa %ymm0, 1856(%r8) vmovdqa %ymm0, 1888(%r8) vmovdqa %ymm0, 1920(%r8) vmovdqa %ymm0, 1952(%r8) vmovdqa %ymm0, 1984(%r8) vmovdqa %ymm0, 2016(%r8) vmovdqa %ymm0, 2048(%r8) vmovdqa %ymm0, 2080(%r8) vmovdqa %ymm0, 2112(%r8) vmovdqa %ymm0, 2144(%r8) vmovdqa %ymm0, 2176(%r8) vmovdqa %ymm0, 2208(%r8) vmovdqa %ymm0, 2240(%r8) vmovdqa %ymm0, 2272(%r8) vmovdqa %ymm0, 2304(%r8) vmovdqa %ymm0, 2336(%r8) vmovdqa %ymm0, 2368(%r8) vmovdqa %ymm0, 2400(%r8) vmovdqa %ymm0, 2432(%r8) vmovdqa %ymm0, 2464(%r8) vmovdqa %ymm0, 2496(%r8) vmovdqa %ymm0, 2528(%r8) vmovdqa %ymm0, 2560(%r8) vmovdqa %ymm0, 2592(%r8) vmovdqa %ymm0, 2624(%r8) vmovdqa %ymm0, 2656(%r8) vmovdqa %ymm0, 2688(%r8) vmovdqa %ymm0, 2720(%r8) vmovdqa %ymm0, 2752(%r8) vmovdqa %ymm0, 2784(%r8) vmovdqa const729(%rip), %ymm15 vmovdqa const3_inv(%rip), %ymm14 vmovdqa const5_inv(%rip), %ymm13 vmovdqa const9(%rip), %ymm12 vmovdqa 96(%r12), %ymm0 vpsubw 192(%r12), %ymm0, %ymm0 vmovdqa 480(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 288(%r12), %ymm1, %ymm1 vpsubw 0(%r12), %ymm0, %ymm0 vpaddw 384(%r12), %ymm0, %ymm0 vmovdqa 672(%r12), %ymm2 vpsubw 768(%r12), %ymm2, %ymm2 vmovdqa 1056(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 864(%r12), %ymm3, %ymm3 vpsubw 576(%r12), %ymm2, %ymm2 vpaddw 960(%r12), %ymm2, %ymm2 vmovdqa 1248(%r12), %ymm4 vpsubw 1344(%r12), %ymm4, %ymm4 vmovdqa 1632(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 1440(%r12), %ymm5, %ymm5 vpsubw 1152(%r12), %ymm4, %ymm4 vpaddw 1536(%r12), %ymm4, %ymm4 vpsubw 576(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 0(%r12), %ymm1, %ymm1 vpaddw 1152(%r12), %ymm1, %ymm1 vmovdqa 288(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 1440(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 864(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 0(%r12), %ymm8 vmovdqa 864(%r12), %ymm9 vmovdqa %ymm8, 0(%r8) vmovdqa %ymm0, 32(%r8) vmovdqa %ymm1, 64(%r8) vmovdqa %ymm7, 96(%r8) vmovdqa %ymm5, 128(%r8) vmovdqa %ymm2, 160(%r8) vmovdqa %ymm3, 192(%r8) vmovdqa %ymm9, 224(%r8) vmovdqa 1824(%r12), %ymm0 vpsubw 1920(%r12), %ymm0, %ymm0 vmovdqa 2208(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 2016(%r12), %ymm1, %ymm1 vpsubw 1728(%r12), %ymm0, %ymm0 vpaddw 2112(%r12), %ymm0, %ymm0 vmovdqa 2400(%r12), %ymm2 vpsubw 2496(%r12), %ymm2, %ymm2 vmovdqa 2784(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 2592(%r12), %ymm3, %ymm3 vpsubw 2304(%r12), %ymm2, %ymm2 vpaddw 2688(%r12), %ymm2, %ymm2 vmovdqa 2976(%r12), %ymm4 vpsubw 3072(%r12), %ymm4, %ymm4 vmovdqa 3360(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 3168(%r12), %ymm5, %ymm5 vpsubw 2880(%r12), %ymm4, %ymm4 vpaddw 3264(%r12), %ymm4, %ymm4 vpsubw 2304(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 1728(%r12), %ymm1, %ymm1 vpaddw 2880(%r12), %ymm1, %ymm1 vmovdqa 2016(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 3168(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 2592(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 1728(%r12), %ymm8 vmovdqa 2592(%r12), %ymm9 vmovdqa %ymm8, 256(%r8) vmovdqa %ymm0, 288(%r8) vmovdqa %ymm1, 320(%r8) vmovdqa %ymm7, 352(%r8) vmovdqa %ymm5, 384(%r8) vmovdqa %ymm2, 416(%r8) vmovdqa %ymm3, 448(%r8) vmovdqa %ymm9, 480(%r8) vmovdqa 3552(%r12), %ymm0 vpsubw 3648(%r12), %ymm0, %ymm0 vmovdqa 3936(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 3744(%r12), %ymm1, %ymm1 vpsubw 3456(%r12), %ymm0, %ymm0 vpaddw 3840(%r12), %ymm0, %ymm0 vmovdqa 4128(%r12), %ymm2 vpsubw 4224(%r12), %ymm2, %ymm2 vmovdqa 4512(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 4320(%r12), %ymm3, %ymm3 vpsubw 4032(%r12), %ymm2, %ymm2 vpaddw 4416(%r12), %ymm2, %ymm2 vmovdqa 4704(%r12), %ymm4 vpsubw 4800(%r12), %ymm4, %ymm4 vmovdqa 5088(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 4896(%r12), %ymm5, %ymm5 vpsubw 4608(%r12), %ymm4, %ymm4 vpaddw 4992(%r12), %ymm4, %ymm4 vpsubw 4032(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 3456(%r12), %ymm1, %ymm1 vpaddw 4608(%r12), %ymm1, %ymm1 vmovdqa 3744(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 4896(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 4320(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 3456(%r12), %ymm8 vmovdqa 4320(%r12), %ymm9 vmovdqa %ymm8, 512(%r8) vmovdqa %ymm0, 544(%r8) vmovdqa %ymm1, 576(%r8) vmovdqa %ymm7, 608(%r8) vmovdqa %ymm5, 640(%r8) vmovdqa %ymm2, 672(%r8) vmovdqa %ymm3, 704(%r8) vmovdqa %ymm9, 736(%r8) vmovdqa 5280(%r12), %ymm0 vpsubw 5376(%r12), %ymm0, %ymm0 vmovdqa 5664(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5472(%r12), %ymm1, %ymm1 vpsubw 5184(%r12), %ymm0, %ymm0 vpaddw 5568(%r12), %ymm0, %ymm0 vmovdqa 5856(%r12), %ymm2 vpsubw 5952(%r12), %ymm2, %ymm2 vmovdqa 6240(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 6048(%r12), %ymm3, %ymm3 vpsubw 5760(%r12), %ymm2, %ymm2 vpaddw 6144(%r12), %ymm2, %ymm2 vmovdqa 6432(%r12), %ymm4 vpsubw 6528(%r12), %ymm4, %ymm4 vmovdqa 6816(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 6624(%r12), %ymm5, %ymm5 vpsubw 6336(%r12), %ymm4, %ymm4 vpaddw 6720(%r12), %ymm4, %ymm4 vpsubw 5760(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 5184(%r12), %ymm1, %ymm1 vpaddw 6336(%r12), %ymm1, %ymm1 vmovdqa 5472(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 6624(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 6048(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 5184(%r12), %ymm8 vmovdqa 6048(%r12), %ymm9 vmovdqa %ymm8, 768(%r8) vmovdqa %ymm0, 800(%r8) vmovdqa %ymm1, 832(%r8) vmovdqa %ymm7, 864(%r8) vmovdqa %ymm5, 896(%r8) vmovdqa %ymm2, 928(%r8) vmovdqa %ymm3, 960(%r8) vmovdqa %ymm9, 992(%r8) vmovdqa 7008(%r12), %ymm0 vpsubw 7104(%r12), %ymm0, %ymm0 vmovdqa 7392(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 7200(%r12), %ymm1, %ymm1 vpsubw 6912(%r12), %ymm0, %ymm0 vpaddw 7296(%r12), %ymm0, %ymm0 vmovdqa 7584(%r12), %ymm2 vpsubw 7680(%r12), %ymm2, %ymm2 vmovdqa 7968(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 7776(%r12), %ymm3, %ymm3 vpsubw 7488(%r12), %ymm2, %ymm2 vpaddw 7872(%r12), %ymm2, %ymm2 vmovdqa 8160(%r12), %ymm4 vpsubw 8256(%r12), %ymm4, %ymm4 vmovdqa 8544(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 8352(%r12), %ymm5, %ymm5 vpsubw 8064(%r12), %ymm4, %ymm4 vpaddw 8448(%r12), %ymm4, %ymm4 vpsubw 7488(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 6912(%r12), %ymm1, %ymm1 vpaddw 8064(%r12), %ymm1, %ymm1 vmovdqa 7200(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 8352(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 7776(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 6912(%r12), %ymm8 vmovdqa 7776(%r12), %ymm9 vmovdqa %ymm8, 1024(%r8) vmovdqa %ymm0, 1056(%r8) vmovdqa %ymm1, 1088(%r8) vmovdqa %ymm7, 1120(%r8) vmovdqa %ymm5, 1152(%r8) vmovdqa %ymm2, 1184(%r8) vmovdqa %ymm3, 1216(%r8) vmovdqa %ymm9, 1248(%r8) vmovdqa 8736(%r12), %ymm0 vpsubw 8832(%r12), %ymm0, %ymm0 vmovdqa 9120(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 8928(%r12), %ymm1, %ymm1 vpsubw 8640(%r12), %ymm0, %ymm0 vpaddw 9024(%r12), %ymm0, %ymm0 vmovdqa 9312(%r12), %ymm2 vpsubw 9408(%r12), %ymm2, %ymm2 vmovdqa 9696(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 9504(%r12), %ymm3, %ymm3 vpsubw 9216(%r12), %ymm2, %ymm2 vpaddw 9600(%r12), %ymm2, %ymm2 vmovdqa 9888(%r12), %ymm4 vpsubw 9984(%r12), %ymm4, %ymm4 vmovdqa 10272(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 10080(%r12), %ymm5, %ymm5 vpsubw 9792(%r12), %ymm4, %ymm4 vpaddw 10176(%r12), %ymm4, %ymm4 vpsubw 9216(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 8640(%r12), %ymm1, %ymm1 vpaddw 9792(%r12), %ymm1, %ymm1 vmovdqa 8928(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 10080(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 9504(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 8640(%r12), %ymm8 vmovdqa 9504(%r12), %ymm9 vmovdqa %ymm8, 1280(%r8) vmovdqa %ymm0, 1312(%r8) vmovdqa %ymm1, 1344(%r8) vmovdqa %ymm7, 1376(%r8) vmovdqa %ymm5, 1408(%r8) vmovdqa %ymm2, 1440(%r8) vmovdqa %ymm3, 1472(%r8) vmovdqa %ymm9, 1504(%r8) vmovdqa 10464(%r12), %ymm0 vpsubw 10560(%r12), %ymm0, %ymm0 vmovdqa 10848(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 10656(%r12), %ymm1, %ymm1 vpsubw 10368(%r12), %ymm0, %ymm0 vpaddw 10752(%r12), %ymm0, %ymm0 vmovdqa 11040(%r12), %ymm2 vpsubw 11136(%r12), %ymm2, %ymm2 vmovdqa 11424(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 11232(%r12), %ymm3, %ymm3 vpsubw 10944(%r12), %ymm2, %ymm2 vpaddw 11328(%r12), %ymm2, %ymm2 vmovdqa 11616(%r12), %ymm4 vpsubw 11712(%r12), %ymm4, %ymm4 vmovdqa 12000(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 11808(%r12), %ymm5, %ymm5 vpsubw 11520(%r12), %ymm4, %ymm4 vpaddw 11904(%r12), %ymm4, %ymm4 vpsubw 10944(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 10368(%r12), %ymm1, %ymm1 vpaddw 11520(%r12), %ymm1, %ymm1 vmovdqa 10656(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 11808(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 11232(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 10368(%r12), %ymm8 vmovdqa 11232(%r12), %ymm9 vmovdqa %ymm8, 1536(%r8) vmovdqa %ymm0, 1568(%r8) vmovdqa %ymm1, 1600(%r8) vmovdqa %ymm7, 1632(%r8) vmovdqa %ymm5, 1664(%r8) vmovdqa %ymm2, 1696(%r8) vmovdqa %ymm3, 1728(%r8) vmovdqa %ymm9, 1760(%r8) vmovdqa 0(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm10 vpunpckhwd const0(%rip), %ymm11, %ymm9 vpslld $1, %ymm10, %ymm10 vpslld $1, %ymm9, %ymm9 vmovdqa 256(%r8), %ymm8 vpunpcklwd const0(%rip), %ymm8, %ymm7 vpunpckhwd const0(%rip), %ymm8, %ymm8 vmovdqa 512(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm7, %ymm4 vpaddd %ymm6, %ymm8, %ymm3 vpsubd %ymm10, %ymm4, %ymm4 vpsubd %ymm9, %ymm3, %ymm3 vpsubd %ymm5, %ymm7, %ymm5 vpsubd %ymm6, %ymm8, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1536(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm8 vpunpckhwd const0(%rip), %ymm5, %ymm7 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm7, %ymm7 vpsubd %ymm8, %ymm4, %ymm4 vpsubd %ymm7, %ymm3, %ymm3 vpsrld $1, %ymm4, %ymm4 vpsrld $1, %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpackusdw %ymm3, %ymm4, %ymm3 vmovdqa 768(%r8), %ymm4 vpaddw 1024(%r8), %ymm4, %ymm7 vpsubw 1024(%r8), %ymm4, %ymm4 vpsrlw $2, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsllw $1, %ymm11, %ymm8 vpsubw %ymm8, %ymm7, %ymm8 vpsllw $7, %ymm5, %ymm7 vpsubw %ymm7, %ymm8, %ymm7 vpsrlw $3, %ymm7, %ymm7 vpsubw %ymm3, %ymm7, %ymm7 vmovdqa 1280(%r8), %ymm8 vpsubw %ymm11, %ymm8, %ymm8 vpmullw %ymm15, %ymm5, %ymm9 vpsubw %ymm9, %ymm8, %ymm9 vpmullw %ymm14, %ymm7, %ymm7 vpsubw %ymm7, %ymm3, %ymm3 vpmullw %ymm12, %ymm7, %ymm8 vpaddw %ymm8, %ymm3, %ymm8 vpmullw %ymm12, %ymm8, %ymm8 vpsubw %ymm8, %ymm9, %ymm8 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm6, %ymm8, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm4, %ymm8, %ymm8 vpsubw %ymm8, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vpmullw %ymm13, %ymm8, %ymm8 vpsubw %ymm8, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_3_5(%rip), %ymm7, %ymm9 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm10 vpor %ymm10, %ymm7, %ymm7 vpaddw %ymm7, %ymm11, %ymm11 vmovdqa %xmm9, 2048(%r8) vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_3_5(%rip), %ymm8, %ymm9 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm10 vpor %ymm10, %ymm8, %ymm8 vpaddw %ymm8, %ymm6, %ymm6 vmovdqa %xmm9, 2304(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm9 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm10 vpor %ymm10, %ymm5, %ymm5 vpaddw %ymm5, %ymm3, %ymm3 vmovdqa %xmm9, 2560(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 0(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 352(%rdi) vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %ymm3, 704(%rdi) vpand mask_mod8192(%rip), %ymm4, %ymm4 vmovdqu %ymm4, 1056(%rdi) vmovdqa 32(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm8 vpunpckhwd const0(%rip), %ymm5, %ymm7 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm7, %ymm7 vmovdqa 288(%r8), %ymm4 vpunpcklwd const0(%rip), %ymm4, %ymm3 vpunpckhwd const0(%rip), %ymm4, %ymm4 vmovdqa 544(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm3, %ymm9 vpaddd %ymm6, %ymm4, %ymm10 vpsubd %ymm8, %ymm9, %ymm9 vpsubd %ymm7, %ymm10, %ymm10 vpsubd %ymm11, %ymm3, %ymm11 vpsubd %ymm6, %ymm4, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1568(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm4 vpunpckhwd const0(%rip), %ymm11, %ymm3 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm3, %ymm3 vpsubd %ymm4, %ymm9, %ymm9 vpsubd %ymm3, %ymm10, %ymm10 vpsrld $1, %ymm9, %ymm9 vpsrld $1, %ymm10, %ymm10 vpand mask32_to_16(%rip), %ymm9, %ymm9 vpand mask32_to_16(%rip), %ymm10, %ymm10 vpackusdw %ymm10, %ymm9, %ymm10 vmovdqa 800(%r8), %ymm9 vpaddw 1056(%r8), %ymm9, %ymm3 vpsubw 1056(%r8), %ymm9, %ymm9 vpsrlw $2, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsllw $1, %ymm5, %ymm4 vpsubw %ymm4, %ymm3, %ymm4 vpsllw $7, %ymm11, %ymm3 vpsubw %ymm3, %ymm4, %ymm3 vpsrlw $3, %ymm3, %ymm3 vpsubw %ymm10, %ymm3, %ymm3 vmovdqa 1312(%r8), %ymm4 vpsubw %ymm5, %ymm4, %ymm4 vpmullw %ymm15, %ymm11, %ymm7 vpsubw %ymm7, %ymm4, %ymm7 vpmullw %ymm14, %ymm3, %ymm3 vpsubw %ymm3, %ymm10, %ymm10 vpmullw %ymm12, %ymm3, %ymm4 vpaddw %ymm4, %ymm10, %ymm4 vpmullw %ymm12, %ymm4, %ymm4 vpsubw %ymm4, %ymm7, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpsrlw $3, %ymm4, %ymm4 vpsubw %ymm9, %ymm4, %ymm4 vpsubw %ymm4, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vpmullw %ymm13, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_3_5(%rip), %ymm3, %ymm7 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $206, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm8 vpor %ymm8, %ymm3, %ymm3 vpaddw %ymm3, %ymm5, %ymm5 vmovdqa %xmm7, 2080(%r8) vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_3_5(%rip), %ymm4, %ymm7 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $206, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm8 vpor %ymm8, %ymm4, %ymm4 vpaddw %ymm4, %ymm6, %ymm6 vmovdqa %xmm7, 2336(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm7 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm8 vpor %ymm8, %ymm11, %ymm11 vpaddw %ymm11, %ymm10, %ymm10 vmovdqa %xmm7, 2592(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 88(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 440(%rdi) vpand mask_mod8192(%rip), %ymm10, %ymm10 vmovdqu %ymm10, 792(%rdi) vpand mask_mod8192(%rip), %ymm9, %ymm9 vmovdqu %ymm9, 1144(%rdi) vmovdqa 64(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm4 vpunpckhwd const0(%rip), %ymm11, %ymm3 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm3, %ymm3 vmovdqa 320(%r8), %ymm9 vpunpcklwd const0(%rip), %ymm9, %ymm10 vpunpckhwd const0(%rip), %ymm9, %ymm9 vmovdqa 576(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm10, %ymm7 vpaddd %ymm6, %ymm9, %ymm8 vpsubd %ymm4, %ymm7, %ymm7 vpsubd %ymm3, %ymm8, %ymm8 vpsubd %ymm5, %ymm10, %ymm5 vpsubd %ymm6, %ymm9, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1600(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm9 vpunpckhwd const0(%rip), %ymm5, %ymm10 vpslld $1, %ymm9, %ymm9 vpslld $1, %ymm10, %ymm10 vpsubd %ymm9, %ymm7, %ymm7 vpsubd %ymm10, %ymm8, %ymm8 vpsrld $1, %ymm7, %ymm7 vpsrld $1, %ymm8, %ymm8 vpand mask32_to_16(%rip), %ymm7, %ymm7 vpand mask32_to_16(%rip), %ymm8, %ymm8 vpackusdw %ymm8, %ymm7, %ymm8 vmovdqa 832(%r8), %ymm7 vpaddw 1088(%r8), %ymm7, %ymm10 vpsubw 1088(%r8), %ymm7, %ymm7 vpsrlw $2, %ymm7, %ymm7 vpsubw %ymm6, %ymm7, %ymm7 vpmullw %ymm14, %ymm7, %ymm7 vpsllw $1, %ymm11, %ymm9 vpsubw %ymm9, %ymm10, %ymm9 vpsllw $7, %ymm5, %ymm10 vpsubw %ymm10, %ymm9, %ymm10 vpsrlw $3, %ymm10, %ymm10 vpsubw %ymm8, %ymm10, %ymm10 vmovdqa 1344(%r8), %ymm9 vpsubw %ymm11, %ymm9, %ymm9 vpmullw %ymm15, %ymm5, %ymm3 vpsubw %ymm3, %ymm9, %ymm3 vpmullw %ymm14, %ymm10, %ymm10 vpsubw %ymm10, %ymm8, %ymm8 vpmullw %ymm12, %ymm10, %ymm9 vpaddw %ymm9, %ymm8, %ymm9 vpmullw %ymm12, %ymm9, %ymm9 vpsubw %ymm9, %ymm3, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpsrlw $3, %ymm9, %ymm9 vpsubw %ymm7, %ymm9, %ymm9 vpsubw %ymm9, %ymm7, %ymm7 vpsubw %ymm7, %ymm6, %ymm6 vpmullw %ymm13, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_3_5(%rip), %ymm10, %ymm3 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm4 vpor %ymm4, %ymm10, %ymm10 vpaddw %ymm10, %ymm11, %ymm11 vmovdqa %xmm3, 2112(%r8) vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_3_5(%rip), %ymm9, %ymm3 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm4 vpor %ymm4, %ymm9, %ymm9 vpaddw %ymm9, %ymm6, %ymm6 vmovdqa %xmm3, 2368(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm3 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm4 vpor %ymm4, %ymm5, %ymm5 vpaddw %ymm5, %ymm8, %ymm8 vmovdqa %xmm3, 2624(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 176(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 528(%rdi) vpand mask_mod8192(%rip), %ymm8, %ymm8 vmovdqu %ymm8, 880(%rdi) vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %ymm7, 1232(%rdi) vmovdqa 96(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm9 vpunpckhwd const0(%rip), %ymm5, %ymm10 vpslld $1, %ymm9, %ymm9 vpslld $1, %ymm10, %ymm10 vmovdqa 352(%r8), %ymm7 vpunpcklwd const0(%rip), %ymm7, %ymm8 vpunpckhwd const0(%rip), %ymm7, %ymm7 vmovdqa 608(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm8, %ymm3 vpaddd %ymm6, %ymm7, %ymm4 vpsubd %ymm9, %ymm3, %ymm3 vpsubd %ymm10, %ymm4, %ymm4 vpsubd %ymm11, %ymm8, %ymm11 vpsubd %ymm6, %ymm7, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1632(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm7 vpunpckhwd const0(%rip), %ymm11, %ymm8 vpslld $1, %ymm7, %ymm7 vpslld $1, %ymm8, %ymm8 vpsubd %ymm7, %ymm3, %ymm3 vpsubd %ymm8, %ymm4, %ymm4 vpsrld $1, %ymm3, %ymm3 vpsrld $1, %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpackusdw %ymm4, %ymm3, %ymm4 vmovdqa 864(%r8), %ymm3 vpaddw 1120(%r8), %ymm3, %ymm8 vpsubw 1120(%r8), %ymm3, %ymm3 vpsrlw $2, %ymm3, %ymm3 vpsubw %ymm6, %ymm3, %ymm3 vpmullw %ymm14, %ymm3, %ymm3 vpsllw $1, %ymm5, %ymm7 vpsubw %ymm7, %ymm8, %ymm7 vpsllw $7, %ymm11, %ymm8 vpsubw %ymm8, %ymm7, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm4, %ymm8, %ymm8 vmovdqa 1376(%r8), %ymm7 vpsubw %ymm5, %ymm7, %ymm7 vpmullw %ymm15, %ymm11, %ymm10 vpsubw %ymm10, %ymm7, %ymm10 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm8, %ymm4, %ymm4 vpmullw %ymm12, %ymm8, %ymm7 vpaddw %ymm7, %ymm4, %ymm7 vpmullw %ymm12, %ymm7, %ymm7 vpsubw %ymm7, %ymm10, %ymm7 vpmullw %ymm14, %ymm7, %ymm7 vpsubw %ymm6, %ymm7, %ymm7 vpsrlw $3, %ymm7, %ymm7 vpsubw %ymm3, %ymm7, %ymm7 vpsubw %ymm7, %ymm3, %ymm3 vpsubw %ymm3, %ymm6, %ymm6 vpmullw %ymm13, %ymm7, %ymm7 vpsubw %ymm7, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_3_5(%rip), %ymm8, %ymm10 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $206, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm9 vpor %ymm9, %ymm8, %ymm8 vpaddw %ymm8, %ymm5, %ymm5 vmovdqa %xmm10, 2144(%r8) vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_3_5(%rip), %ymm7, %ymm10 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $206, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm9 vpor %ymm9, %ymm7, %ymm7 vpaddw %ymm7, %ymm6, %ymm6 vmovdqa %xmm10, 2400(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm10 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm9 vpor %ymm9, %ymm11, %ymm11 vpaddw %ymm11, %ymm4, %ymm4 vmovdqa %xmm10, 2656(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 264(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 616(%rdi) vpand mask_mod8192(%rip), %ymm4, %ymm4 vmovdqu %ymm4, 968(%rdi) vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %ymm3, 1320(%rdi) vmovdqa 128(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm7 vpunpckhwd const0(%rip), %ymm11, %ymm8 vpslld $1, %ymm7, %ymm7 vpslld $1, %ymm8, %ymm8 vmovdqa 384(%r8), %ymm3 vpunpcklwd const0(%rip), %ymm3, %ymm4 vpunpckhwd const0(%rip), %ymm3, %ymm3 vmovdqa 640(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm4, %ymm10 vpaddd %ymm6, %ymm3, %ymm9 vpsubd %ymm7, %ymm10, %ymm10 vpsubd %ymm8, %ymm9, %ymm9 vpsubd %ymm5, %ymm4, %ymm5 vpsubd %ymm6, %ymm3, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1664(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm3 vpunpckhwd const0(%rip), %ymm5, %ymm4 vpslld $1, %ymm3, %ymm3 vpslld $1, %ymm4, %ymm4 vpsubd %ymm3, %ymm10, %ymm10 vpsubd %ymm4, %ymm9, %ymm9 vpsrld $1, %ymm10, %ymm10 vpsrld $1, %ymm9, %ymm9 vpand mask32_to_16(%rip), %ymm10, %ymm10 vpand mask32_to_16(%rip), %ymm9, %ymm9 vpackusdw %ymm9, %ymm10, %ymm9 vmovdqa 896(%r8), %ymm10 vpaddw 1152(%r8), %ymm10, %ymm4 vpsubw 1152(%r8), %ymm10, %ymm10 vpsrlw $2, %ymm10, %ymm10 vpsubw %ymm6, %ymm10, %ymm10 vpmullw %ymm14, %ymm10, %ymm10 vpsllw $1, %ymm11, %ymm3 vpsubw %ymm3, %ymm4, %ymm3 vpsllw $7, %ymm5, %ymm4 vpsubw %ymm4, %ymm3, %ymm4 vpsrlw $3, %ymm4, %ymm4 vpsubw %ymm9, %ymm4, %ymm4 vmovdqa 1408(%r8), %ymm3 vpsubw %ymm11, %ymm3, %ymm3 vpmullw %ymm15, %ymm5, %ymm8 vpsubw %ymm8, %ymm3, %ymm8 vpmullw %ymm14, %ymm4, %ymm4 vpsubw %ymm4, %ymm9, %ymm9 vpmullw %ymm12, %ymm4, %ymm3 vpaddw %ymm3, %ymm9, %ymm3 vpmullw %ymm12, %ymm3, %ymm3 vpsubw %ymm3, %ymm8, %ymm3 vpmullw %ymm14, %ymm3, %ymm3 vpsubw %ymm6, %ymm3, %ymm3 vpsrlw $3, %ymm3, %ymm3 vpsubw %ymm10, %ymm3, %ymm3 vpsubw %ymm3, %ymm10, %ymm10 vpsubw %ymm10, %ymm6, %ymm6 vpmullw %ymm13, %ymm3, %ymm3 vpsubw %ymm3, %ymm6, %ymm6 vmovdqu 352(%rdi), %ymm8 vmovdqu 704(%rdi), %ymm7 vmovdqu 1056(%rdi), %ymm2 vpaddw %ymm11, %ymm8, %ymm11 vpaddw %ymm6, %ymm7, %ymm6 vpaddw %ymm9, %ymm2, %ymm9 vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_3_5(%rip), %ymm10, %ymm2 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $206, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm7 vpor %ymm7, %ymm10, %ymm10 vmovdqu 0(%rdi), %ymm7 vpaddw %ymm10, %ymm7, %ymm7 vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %ymm7, 0(%rdi) vmovdqa %xmm2, 1920(%r8) vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_3_5(%rip), %ymm4, %ymm2 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $206, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm7 vpor %ymm7, %ymm4, %ymm4 vpaddw %ymm4, %ymm11, %ymm11 vmovdqa %xmm2, 2176(%r8) vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_3_5(%rip), %ymm3, %ymm2 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $206, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm7 vpor %ymm7, %ymm3, %ymm3 vpaddw %ymm3, %ymm6, %ymm6 vmovdqa %xmm2, 2432(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm2 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm7 vpor %ymm7, %ymm5, %ymm5 vpaddw %ymm5, %ymm9, %ymm9 vmovdqa %xmm2, 2688(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 352(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 704(%rdi) vpand mask_mod8192(%rip), %ymm9, %ymm9 vmovdqu %ymm9, 1056(%rdi) vmovdqa 160(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm3 vpunpckhwd const0(%rip), %ymm5, %ymm4 vpslld $1, %ymm3, %ymm3 vpslld $1, %ymm4, %ymm4 vmovdqa 416(%r8), %ymm10 vpunpcklwd const0(%rip), %ymm10, %ymm9 vpunpckhwd const0(%rip), %ymm10, %ymm10 vmovdqa 672(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm9, %ymm2 vpaddd %ymm6, %ymm10, %ymm7 vpsubd %ymm3, %ymm2, %ymm2 vpsubd %ymm4, %ymm7, %ymm7 vpsubd %ymm11, %ymm9, %ymm11 vpsubd %ymm6, %ymm10, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1696(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm10 vpunpckhwd const0(%rip), %ymm11, %ymm9 vpslld $1, %ymm10, %ymm10 vpslld $1, %ymm9, %ymm9 vpsubd %ymm10, %ymm2, %ymm2 vpsubd %ymm9, %ymm7, %ymm7 vpsrld $1, %ymm2, %ymm2 vpsrld $1, %ymm7, %ymm7 vpand mask32_to_16(%rip), %ymm2, %ymm2 vpand mask32_to_16(%rip), %ymm7, %ymm7 vpackusdw %ymm7, %ymm2, %ymm7 vmovdqa 928(%r8), %ymm2 vpaddw 1184(%r8), %ymm2, %ymm9 vpsubw 1184(%r8), %ymm2, %ymm2 vpsrlw $2, %ymm2, %ymm2 vpsubw %ymm6, %ymm2, %ymm2 vpmullw %ymm14, %ymm2, %ymm2 vpsllw $1, %ymm5, %ymm10 vpsubw %ymm10, %ymm9, %ymm10 vpsllw $7, %ymm11, %ymm9 vpsubw %ymm9, %ymm10, %ymm9 vpsrlw $3, %ymm9, %ymm9 vpsubw %ymm7, %ymm9, %ymm9 vmovdqa 1440(%r8), %ymm10 vpsubw %ymm5, %ymm10, %ymm10 vpmullw %ymm15, %ymm11, %ymm4 vpsubw %ymm4, %ymm10, %ymm4 vpmullw %ymm14, %ymm9, %ymm9 vpsubw %ymm9, %ymm7, %ymm7 vpmullw %ymm12, %ymm9, %ymm10 vpaddw %ymm10, %ymm7, %ymm10 vpmullw %ymm12, %ymm10, %ymm10 vpsubw %ymm10, %ymm4, %ymm10 vpmullw %ymm14, %ymm10, %ymm10 vpsubw %ymm6, %ymm10, %ymm10 vpsrlw $3, %ymm10, %ymm10 vpsubw %ymm2, %ymm10, %ymm10 vpsubw %ymm10, %ymm2, %ymm2 vpsubw %ymm2, %ymm6, %ymm6 vpmullw %ymm13, %ymm10, %ymm10 vpsubw %ymm10, %ymm6, %ymm6 vmovdqu 440(%rdi), %ymm4 vmovdqu 792(%rdi), %ymm3 vmovdqu 1144(%rdi), %ymm8 vpaddw %ymm5, %ymm4, %ymm5 vpaddw %ymm6, %ymm3, %ymm6 vpaddw %ymm7, %ymm8, %ymm7 vpshufb shuf48_16(%rip), %ymm2, %ymm2 vpand mask3_5_3_5(%rip), %ymm2, %ymm8 vpand mask5_3_5_3(%rip), %ymm2, %ymm2 vpermq $206, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm3 vpor %ymm3, %ymm2, %ymm2 vmovdqu 88(%rdi), %ymm3 vpaddw %ymm2, %ymm3, %ymm3 vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %ymm3, 88(%rdi) vmovdqa %xmm8, 1952(%r8) vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_3_5(%rip), %ymm9, %ymm8 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $206, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm3 vpor %ymm3, %ymm9, %ymm9 vpaddw %ymm9, %ymm5, %ymm5 vmovdqa %xmm8, 2208(%r8) vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_3_5(%rip), %ymm10, %ymm8 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $206, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm3 vpor %ymm3, %ymm10, %ymm10 vpaddw %ymm10, %ymm6, %ymm6 vmovdqa %xmm8, 2464(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm8 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm3 vpor %ymm3, %ymm11, %ymm11 vpaddw %ymm11, %ymm7, %ymm7 vmovdqa %xmm8, 2720(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 440(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 792(%rdi) vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %ymm7, 1144(%rdi) vmovdqa 192(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm10 vpunpckhwd const0(%rip), %ymm11, %ymm9 vpslld $1, %ymm10, %ymm10 vpslld $1, %ymm9, %ymm9 vmovdqa 448(%r8), %ymm2 vpunpcklwd const0(%rip), %ymm2, %ymm7 vpunpckhwd const0(%rip), %ymm2, %ymm2 vmovdqa 704(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm7, %ymm8 vpaddd %ymm6, %ymm2, %ymm3 vpsubd %ymm10, %ymm8, %ymm8 vpsubd %ymm9, %ymm3, %ymm3 vpsubd %ymm5, %ymm7, %ymm5 vpsubd %ymm6, %ymm2, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1728(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm2 vpunpckhwd const0(%rip), %ymm5, %ymm7 vpslld $1, %ymm2, %ymm2 vpslld $1, %ymm7, %ymm7 vpsubd %ymm2, %ymm8, %ymm8 vpsubd %ymm7, %ymm3, %ymm3 vpsrld $1, %ymm8, %ymm8 vpsrld $1, %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm8, %ymm8 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpackusdw %ymm3, %ymm8, %ymm3 vmovdqa 960(%r8), %ymm8 vpaddw 1216(%r8), %ymm8, %ymm7 vpsubw 1216(%r8), %ymm8, %ymm8 vpsrlw $2, %ymm8, %ymm8 vpsubw %ymm6, %ymm8, %ymm8 vpmullw %ymm14, %ymm8, %ymm8 vpsllw $1, %ymm11, %ymm2 vpsubw %ymm2, %ymm7, %ymm2 vpsllw $7, %ymm5, %ymm7 vpsubw %ymm7, %ymm2, %ymm7 vpsrlw $3, %ymm7, %ymm7 vpsubw %ymm3, %ymm7, %ymm7 vmovdqa 1472(%r8), %ymm2 vpsubw %ymm11, %ymm2, %ymm2 vpmullw %ymm15, %ymm5, %ymm9 vpsubw %ymm9, %ymm2, %ymm9 vpmullw %ymm14, %ymm7, %ymm7 vpsubw %ymm7, %ymm3, %ymm3 vpmullw %ymm12, %ymm7, %ymm2 vpaddw %ymm2, %ymm3, %ymm2 vpmullw %ymm12, %ymm2, %ymm2 vpsubw %ymm2, %ymm9, %ymm2 vpmullw %ymm14, %ymm2, %ymm2 vpsubw %ymm6, %ymm2, %ymm2 vpsrlw $3, %ymm2, %ymm2 vpsubw %ymm8, %ymm2, %ymm2 vpsubw %ymm2, %ymm8, %ymm8 vpsubw %ymm8, %ymm6, %ymm6 vpmullw %ymm13, %ymm2, %ymm2 vpsubw %ymm2, %ymm6, %ymm6 vmovdqu 528(%rdi), %ymm9 vmovdqu 880(%rdi), %ymm10 vmovdqu 1232(%rdi), %ymm4 vpaddw %ymm11, %ymm9, %ymm11 vpaddw %ymm6, %ymm10, %ymm6 vpaddw %ymm3, %ymm4, %ymm3 vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_3_5(%rip), %ymm8, %ymm4 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm10 vpor %ymm10, %ymm8, %ymm8 vmovdqu 176(%rdi), %ymm10 vpaddw %ymm8, %ymm10, %ymm10 vpand mask_mod8192(%rip), %ymm10, %ymm10 vmovdqu %ymm10, 176(%rdi) vmovdqa %xmm4, 1984(%r8) vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_3_5(%rip), %ymm7, %ymm4 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm10 vpor %ymm10, %ymm7, %ymm7 vpaddw %ymm7, %ymm11, %ymm11 vmovdqa %xmm4, 2240(%r8) vpshufb shuf48_16(%rip), %ymm2, %ymm2 vpand mask3_5_3_5(%rip), %ymm2, %ymm4 vpand mask5_3_5_3(%rip), %ymm2, %ymm2 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm10 vpor %ymm10, %ymm2, %ymm2 vpaddw %ymm2, %ymm6, %ymm6 vmovdqa %xmm4, 2496(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm4 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm10 vpor %ymm10, %ymm5, %ymm5 vpaddw %ymm5, %ymm3, %ymm3 vmovdqa %xmm4, 2752(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 528(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 880(%rdi) vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %ymm3, 1232(%rdi) vmovdqa 224(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm2 vpunpckhwd const0(%rip), %ymm5, %ymm7 vpslld $1, %ymm2, %ymm2 vpslld $1, %ymm7, %ymm7 vmovdqa 480(%r8), %ymm8 vpunpcklwd const0(%rip), %ymm8, %ymm3 vpunpckhwd const0(%rip), %ymm8, %ymm8 vmovdqa 736(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm3, %ymm4 vpaddd %ymm6, %ymm8, %ymm10 vpsubd %ymm2, %ymm4, %ymm4 vpsubd %ymm7, %ymm10, %ymm10 vpsubd %ymm11, %ymm3, %ymm11 vpsubd %ymm6, %ymm8, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1760(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm8 vpunpckhwd const0(%rip), %ymm11, %ymm3 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm3, %ymm3 vpsubd %ymm8, %ymm4, %ymm4 vpsubd %ymm3, %ymm10, %ymm10 vpsrld $1, %ymm4, %ymm4 vpsrld $1, %ymm10, %ymm10 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm10, %ymm10 vpackusdw %ymm10, %ymm4, %ymm10 vmovdqa 992(%r8), %ymm4 vpaddw 1248(%r8), %ymm4, %ymm3 vpsubw 1248(%r8), %ymm4, %ymm4 vpsrlw $2, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsllw $1, %ymm5, %ymm8 vpsubw %ymm8, %ymm3, %ymm8 vpsllw $7, %ymm11, %ymm3 vpsubw %ymm3, %ymm8, %ymm3 vpsrlw $3, %ymm3, %ymm3 vpsubw %ymm10, %ymm3, %ymm3 vmovdqa 1504(%r8), %ymm8 vpsubw %ymm5, %ymm8, %ymm8 vpmullw %ymm15, %ymm11, %ymm7 vpsubw %ymm7, %ymm8, %ymm7 vpmullw %ymm14, %ymm3, %ymm3 vpsubw %ymm3, %ymm10, %ymm10 vpmullw %ymm12, %ymm3, %ymm8 vpaddw %ymm8, %ymm10, %ymm8 vpmullw %ymm12, %ymm8, %ymm8 vpsubw %ymm8, %ymm7, %ymm8 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm6, %ymm8, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm4, %ymm8, %ymm8 vpsubw %ymm8, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vpmullw %ymm13, %ymm8, %ymm8 vpsubw %ymm8, %ymm6, %ymm6 vmovdqu 616(%rdi), %ymm7 vmovdqu 968(%rdi), %ymm2 vmovdqu 1320(%rdi), %ymm9 vpaddw %ymm5, %ymm7, %ymm5 vpaddw %ymm6, %ymm2, %ymm6 vpaddw %ymm10, %ymm9, %ymm10 vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_3_5(%rip), %ymm4, %ymm9 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm2 vpor %ymm2, %ymm4, %ymm4 vmovdqu 264(%rdi), %ymm2 vpaddw %ymm4, %ymm2, %ymm2 vpand mask_mod8192(%rip), %ymm2, %ymm2 vmovdqu %ymm2, 264(%rdi) vmovdqa %xmm9, 2016(%r8) vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_3_5(%rip), %ymm3, %ymm9 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm2 vpor %ymm2, %ymm3, %ymm3 vpaddw %ymm3, %ymm5, %ymm5 vmovdqa %xmm9, 2272(%r8) vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_3_5(%rip), %ymm8, %ymm9 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm2 vpor %ymm2, %ymm8, %ymm8 vpaddw %ymm8, %ymm6, %ymm6 vmovdqa %xmm9, 2528(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm9 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm2 vpor %ymm2, %ymm11, %ymm11 vpaddw %ymm11, %ymm10, %ymm10 vmovdqa %xmm9, 2784(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 616(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 968(%rdi) vpand mask_mod8192(%rip), %ymm10, %ymm10 vmovdqu %ymm10, 1320(%rdi) vmovdqa 128(%r12), %ymm0 vpsubw 224(%r12), %ymm0, %ymm0 vmovdqa 512(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 320(%r12), %ymm1, %ymm1 vpsubw 32(%r12), %ymm0, %ymm0 vpaddw 416(%r12), %ymm0, %ymm0 vmovdqa 704(%r12), %ymm2 vpsubw 800(%r12), %ymm2, %ymm2 vmovdqa 1088(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 896(%r12), %ymm3, %ymm3 vpsubw 608(%r12), %ymm2, %ymm2 vpaddw 992(%r12), %ymm2, %ymm2 vmovdqa 1280(%r12), %ymm4 vpsubw 1376(%r12), %ymm4, %ymm4 vmovdqa 1664(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 1472(%r12), %ymm5, %ymm5 vpsubw 1184(%r12), %ymm4, %ymm4 vpaddw 1568(%r12), %ymm4, %ymm4 vpsubw 608(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 32(%r12), %ymm1, %ymm1 vpaddw 1184(%r12), %ymm1, %ymm1 vmovdqa 320(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 1472(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 896(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 32(%r12), %ymm8 vmovdqa 896(%r12), %ymm9 vmovdqa %ymm8, 0(%r8) vmovdqa %ymm0, 32(%r8) vmovdqa %ymm1, 64(%r8) vmovdqa %ymm7, 96(%r8) vmovdqa %ymm5, 128(%r8) vmovdqa %ymm2, 160(%r8) vmovdqa %ymm3, 192(%r8) vmovdqa %ymm9, 224(%r8) vmovdqa 1856(%r12), %ymm0 vpsubw 1952(%r12), %ymm0, %ymm0 vmovdqa 2240(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 2048(%r12), %ymm1, %ymm1 vpsubw 1760(%r12), %ymm0, %ymm0 vpaddw 2144(%r12), %ymm0, %ymm0 vmovdqa 2432(%r12), %ymm2 vpsubw 2528(%r12), %ymm2, %ymm2 vmovdqa 2816(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 2624(%r12), %ymm3, %ymm3 vpsubw 2336(%r12), %ymm2, %ymm2 vpaddw 2720(%r12), %ymm2, %ymm2 vmovdqa 3008(%r12), %ymm4 vpsubw 3104(%r12), %ymm4, %ymm4 vmovdqa 3392(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 3200(%r12), %ymm5, %ymm5 vpsubw 2912(%r12), %ymm4, %ymm4 vpaddw 3296(%r12), %ymm4, %ymm4 vpsubw 2336(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 1760(%r12), %ymm1, %ymm1 vpaddw 2912(%r12), %ymm1, %ymm1 vmovdqa 2048(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 3200(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 2624(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 1760(%r12), %ymm8 vmovdqa 2624(%r12), %ymm9 vmovdqa %ymm8, 256(%r8) vmovdqa %ymm0, 288(%r8) vmovdqa %ymm1, 320(%r8) vmovdqa %ymm7, 352(%r8) vmovdqa %ymm5, 384(%r8) vmovdqa %ymm2, 416(%r8) vmovdqa %ymm3, 448(%r8) vmovdqa %ymm9, 480(%r8) vmovdqa 3584(%r12), %ymm0 vpsubw 3680(%r12), %ymm0, %ymm0 vmovdqa 3968(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 3776(%r12), %ymm1, %ymm1 vpsubw 3488(%r12), %ymm0, %ymm0 vpaddw 3872(%r12), %ymm0, %ymm0 vmovdqa 4160(%r12), %ymm2 vpsubw 4256(%r12), %ymm2, %ymm2 vmovdqa 4544(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 4352(%r12), %ymm3, %ymm3 vpsubw 4064(%r12), %ymm2, %ymm2 vpaddw 4448(%r12), %ymm2, %ymm2 vmovdqa 4736(%r12), %ymm4 vpsubw 4832(%r12), %ymm4, %ymm4 vmovdqa 5120(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 4928(%r12), %ymm5, %ymm5 vpsubw 4640(%r12), %ymm4, %ymm4 vpaddw 5024(%r12), %ymm4, %ymm4 vpsubw 4064(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 3488(%r12), %ymm1, %ymm1 vpaddw 4640(%r12), %ymm1, %ymm1 vmovdqa 3776(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 4928(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 4352(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 3488(%r12), %ymm8 vmovdqa 4352(%r12), %ymm9 vmovdqa %ymm8, 512(%r8) vmovdqa %ymm0, 544(%r8) vmovdqa %ymm1, 576(%r8) vmovdqa %ymm7, 608(%r8) vmovdqa %ymm5, 640(%r8) vmovdqa %ymm2, 672(%r8) vmovdqa %ymm3, 704(%r8) vmovdqa %ymm9, 736(%r8) vmovdqa 5312(%r12), %ymm0 vpsubw 5408(%r12), %ymm0, %ymm0 vmovdqa 5696(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5504(%r12), %ymm1, %ymm1 vpsubw 5216(%r12), %ymm0, %ymm0 vpaddw 5600(%r12), %ymm0, %ymm0 vmovdqa 5888(%r12), %ymm2 vpsubw 5984(%r12), %ymm2, %ymm2 vmovdqa 6272(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 6080(%r12), %ymm3, %ymm3 vpsubw 5792(%r12), %ymm2, %ymm2 vpaddw 6176(%r12), %ymm2, %ymm2 vmovdqa 6464(%r12), %ymm4 vpsubw 6560(%r12), %ymm4, %ymm4 vmovdqa 6848(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 6656(%r12), %ymm5, %ymm5 vpsubw 6368(%r12), %ymm4, %ymm4 vpaddw 6752(%r12), %ymm4, %ymm4 vpsubw 5792(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 5216(%r12), %ymm1, %ymm1 vpaddw 6368(%r12), %ymm1, %ymm1 vmovdqa 5504(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 6656(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 6080(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 5216(%r12), %ymm8 vmovdqa 6080(%r12), %ymm9 vmovdqa %ymm8, 768(%r8) vmovdqa %ymm0, 800(%r8) vmovdqa %ymm1, 832(%r8) vmovdqa %ymm7, 864(%r8) vmovdqa %ymm5, 896(%r8) vmovdqa %ymm2, 928(%r8) vmovdqa %ymm3, 960(%r8) vmovdqa %ymm9, 992(%r8) vmovdqa 7040(%r12), %ymm0 vpsubw 7136(%r12), %ymm0, %ymm0 vmovdqa 7424(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 7232(%r12), %ymm1, %ymm1 vpsubw 6944(%r12), %ymm0, %ymm0 vpaddw 7328(%r12), %ymm0, %ymm0 vmovdqa 7616(%r12), %ymm2 vpsubw 7712(%r12), %ymm2, %ymm2 vmovdqa 8000(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 7808(%r12), %ymm3, %ymm3 vpsubw 7520(%r12), %ymm2, %ymm2 vpaddw 7904(%r12), %ymm2, %ymm2 vmovdqa 8192(%r12), %ymm4 vpsubw 8288(%r12), %ymm4, %ymm4 vmovdqa 8576(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 8384(%r12), %ymm5, %ymm5 vpsubw 8096(%r12), %ymm4, %ymm4 vpaddw 8480(%r12), %ymm4, %ymm4 vpsubw 7520(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 6944(%r12), %ymm1, %ymm1 vpaddw 8096(%r12), %ymm1, %ymm1 vmovdqa 7232(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 8384(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 7808(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 6944(%r12), %ymm8 vmovdqa 7808(%r12), %ymm9 vmovdqa %ymm8, 1024(%r8) vmovdqa %ymm0, 1056(%r8) vmovdqa %ymm1, 1088(%r8) vmovdqa %ymm7, 1120(%r8) vmovdqa %ymm5, 1152(%r8) vmovdqa %ymm2, 1184(%r8) vmovdqa %ymm3, 1216(%r8) vmovdqa %ymm9, 1248(%r8) vmovdqa 8768(%r12), %ymm0 vpsubw 8864(%r12), %ymm0, %ymm0 vmovdqa 9152(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 8960(%r12), %ymm1, %ymm1 vpsubw 8672(%r12), %ymm0, %ymm0 vpaddw 9056(%r12), %ymm0, %ymm0 vmovdqa 9344(%r12), %ymm2 vpsubw 9440(%r12), %ymm2, %ymm2 vmovdqa 9728(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 9536(%r12), %ymm3, %ymm3 vpsubw 9248(%r12), %ymm2, %ymm2 vpaddw 9632(%r12), %ymm2, %ymm2 vmovdqa 9920(%r12), %ymm4 vpsubw 10016(%r12), %ymm4, %ymm4 vmovdqa 10304(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 10112(%r12), %ymm5, %ymm5 vpsubw 9824(%r12), %ymm4, %ymm4 vpaddw 10208(%r12), %ymm4, %ymm4 vpsubw 9248(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 8672(%r12), %ymm1, %ymm1 vpaddw 9824(%r12), %ymm1, %ymm1 vmovdqa 8960(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 10112(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 9536(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 8672(%r12), %ymm8 vmovdqa 9536(%r12), %ymm9 vmovdqa %ymm8, 1280(%r8) vmovdqa %ymm0, 1312(%r8) vmovdqa %ymm1, 1344(%r8) vmovdqa %ymm7, 1376(%r8) vmovdqa %ymm5, 1408(%r8) vmovdqa %ymm2, 1440(%r8) vmovdqa %ymm3, 1472(%r8) vmovdqa %ymm9, 1504(%r8) vmovdqa 10496(%r12), %ymm0 vpsubw 10592(%r12), %ymm0, %ymm0 vmovdqa 10880(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 10688(%r12), %ymm1, %ymm1 vpsubw 10400(%r12), %ymm0, %ymm0 vpaddw 10784(%r12), %ymm0, %ymm0 vmovdqa 11072(%r12), %ymm2 vpsubw 11168(%r12), %ymm2, %ymm2 vmovdqa 11456(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 11264(%r12), %ymm3, %ymm3 vpsubw 10976(%r12), %ymm2, %ymm2 vpaddw 11360(%r12), %ymm2, %ymm2 vmovdqa 11648(%r12), %ymm4 vpsubw 11744(%r12), %ymm4, %ymm4 vmovdqa 12032(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 11840(%r12), %ymm5, %ymm5 vpsubw 11552(%r12), %ymm4, %ymm4 vpaddw 11936(%r12), %ymm4, %ymm4 vpsubw 10976(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 10400(%r12), %ymm1, %ymm1 vpaddw 11552(%r12), %ymm1, %ymm1 vmovdqa 10688(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 11840(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 11264(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 10400(%r12), %ymm8 vmovdqa 11264(%r12), %ymm9 vmovdqa %ymm8, 1536(%r8) vmovdqa %ymm0, 1568(%r8) vmovdqa %ymm1, 1600(%r8) vmovdqa %ymm7, 1632(%r8) vmovdqa %ymm5, 1664(%r8) vmovdqa %ymm2, 1696(%r8) vmovdqa %ymm3, 1728(%r8) vmovdqa %ymm9, 1760(%r8) vmovdqa 0(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm8 vpunpckhwd const0(%rip), %ymm11, %ymm3 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm3, %ymm3 vmovdqa 256(%r8), %ymm4 vpunpcklwd const0(%rip), %ymm4, %ymm10 vpunpckhwd const0(%rip), %ymm4, %ymm4 vmovdqa 512(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm10, %ymm9 vpaddd %ymm6, %ymm4, %ymm2 vpsubd %ymm8, %ymm9, %ymm9 vpsubd %ymm3, %ymm2, %ymm2 vpsubd %ymm5, %ymm10, %ymm5 vpsubd %ymm6, %ymm4, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1536(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm4 vpunpckhwd const0(%rip), %ymm5, %ymm10 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm10, %ymm10 vpsubd %ymm4, %ymm9, %ymm9 vpsubd %ymm10, %ymm2, %ymm2 vpsrld $1, %ymm9, %ymm9 vpsrld $1, %ymm2, %ymm2 vpand mask32_to_16(%rip), %ymm9, %ymm9 vpand mask32_to_16(%rip), %ymm2, %ymm2 vpackusdw %ymm2, %ymm9, %ymm2 vmovdqa 768(%r8), %ymm9 vpaddw 1024(%r8), %ymm9, %ymm10 vpsubw 1024(%r8), %ymm9, %ymm9 vpsrlw $2, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsllw $1, %ymm11, %ymm4 vpsubw %ymm4, %ymm10, %ymm4 vpsllw $7, %ymm5, %ymm10 vpsubw %ymm10, %ymm4, %ymm10 vpsrlw $3, %ymm10, %ymm10 vpsubw %ymm2, %ymm10, %ymm10 vmovdqa 1280(%r8), %ymm4 vpsubw %ymm11, %ymm4, %ymm4 vpmullw %ymm15, %ymm5, %ymm3 vpsubw %ymm3, %ymm4, %ymm3 vpmullw %ymm14, %ymm10, %ymm10 vpsubw %ymm10, %ymm2, %ymm2 vpmullw %ymm12, %ymm10, %ymm4 vpaddw %ymm4, %ymm2, %ymm4 vpmullw %ymm12, %ymm4, %ymm4 vpsubw %ymm4, %ymm3, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpsrlw $3, %ymm4, %ymm4 vpsubw %ymm9, %ymm4, %ymm4 vpsubw %ymm4, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vpmullw %ymm13, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_3_5(%rip), %ymm10, %ymm3 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm8 vpor %ymm8, %ymm10, %ymm10 vpaddw 2048(%r8), %ymm11, %ymm11 vpaddw %ymm10, %ymm11, %ymm11 vmovdqa %xmm3, 2048(%r8) vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_3_5(%rip), %ymm4, %ymm3 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm8 vpor %ymm8, %ymm4, %ymm4 vpaddw 2304(%r8), %ymm6, %ymm6 vpaddw %ymm4, %ymm6, %ymm6 vmovdqa %xmm3, 2304(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm3 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm8 vpor %ymm8, %ymm5, %ymm5 vpaddw 2560(%r8), %ymm2, %ymm2 vpaddw %ymm5, %ymm2, %ymm2 vmovdqa %xmm3, 2560(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 32(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 384(%rdi) vpand mask_mod8192(%rip), %ymm2, %ymm2 vmovdqu %ymm2, 736(%rdi) vpand mask_mod8192(%rip), %ymm9, %ymm9 vmovdqu %ymm9, 1088(%rdi) vmovdqa 32(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm4 vpunpckhwd const0(%rip), %ymm5, %ymm10 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm10, %ymm10 vmovdqa 288(%r8), %ymm9 vpunpcklwd const0(%rip), %ymm9, %ymm2 vpunpckhwd const0(%rip), %ymm9, %ymm9 vmovdqa 544(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm2, %ymm3 vpaddd %ymm6, %ymm9, %ymm8 vpsubd %ymm4, %ymm3, %ymm3 vpsubd %ymm10, %ymm8, %ymm8 vpsubd %ymm11, %ymm2, %ymm11 vpsubd %ymm6, %ymm9, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1568(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm9 vpunpckhwd const0(%rip), %ymm11, %ymm2 vpslld $1, %ymm9, %ymm9 vpslld $1, %ymm2, %ymm2 vpsubd %ymm9, %ymm3, %ymm3 vpsubd %ymm2, %ymm8, %ymm8 vpsrld $1, %ymm3, %ymm3 vpsrld $1, %ymm8, %ymm8 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm8, %ymm8 vpackusdw %ymm8, %ymm3, %ymm8 vmovdqa 800(%r8), %ymm3 vpaddw 1056(%r8), %ymm3, %ymm2 vpsubw 1056(%r8), %ymm3, %ymm3 vpsrlw $2, %ymm3, %ymm3 vpsubw %ymm6, %ymm3, %ymm3 vpmullw %ymm14, %ymm3, %ymm3 vpsllw $1, %ymm5, %ymm9 vpsubw %ymm9, %ymm2, %ymm9 vpsllw $7, %ymm11, %ymm2 vpsubw %ymm2, %ymm9, %ymm2 vpsrlw $3, %ymm2, %ymm2 vpsubw %ymm8, %ymm2, %ymm2 vmovdqa 1312(%r8), %ymm9 vpsubw %ymm5, %ymm9, %ymm9 vpmullw %ymm15, %ymm11, %ymm10 vpsubw %ymm10, %ymm9, %ymm10 vpmullw %ymm14, %ymm2, %ymm2 vpsubw %ymm2, %ymm8, %ymm8 vpmullw %ymm12, %ymm2, %ymm9 vpaddw %ymm9, %ymm8, %ymm9 vpmullw %ymm12, %ymm9, %ymm9 vpsubw %ymm9, %ymm10, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpsrlw $3, %ymm9, %ymm9 vpsubw %ymm3, %ymm9, %ymm9 vpsubw %ymm9, %ymm3, %ymm3 vpsubw %ymm3, %ymm6, %ymm6 vpmullw %ymm13, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm2, %ymm2 vpand mask3_5_3_5(%rip), %ymm2, %ymm10 vpand mask5_3_5_3(%rip), %ymm2, %ymm2 vpermq $206, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm4 vpor %ymm4, %ymm2, %ymm2 vpaddw 2080(%r8), %ymm5, %ymm5 vpaddw %ymm2, %ymm5, %ymm5 vmovdqa %xmm10, 2080(%r8) vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_3_5(%rip), %ymm9, %ymm10 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $206, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm4 vpor %ymm4, %ymm9, %ymm9 vpaddw 2336(%r8), %ymm6, %ymm6 vpaddw %ymm9, %ymm6, %ymm6 vmovdqa %xmm10, 2336(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm10 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm4 vpor %ymm4, %ymm11, %ymm11 vpaddw 2592(%r8), %ymm8, %ymm8 vpaddw %ymm11, %ymm8, %ymm8 vmovdqa %xmm10, 2592(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 120(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 472(%rdi) vpand mask_mod8192(%rip), %ymm8, %ymm8 vmovdqu %ymm8, 824(%rdi) vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %ymm3, 1176(%rdi) vmovdqa 64(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm9 vpunpckhwd const0(%rip), %ymm11, %ymm2 vpslld $1, %ymm9, %ymm9 vpslld $1, %ymm2, %ymm2 vmovdqa 320(%r8), %ymm3 vpunpcklwd const0(%rip), %ymm3, %ymm8 vpunpckhwd const0(%rip), %ymm3, %ymm3 vmovdqa 576(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm8, %ymm10 vpaddd %ymm6, %ymm3, %ymm4 vpsubd %ymm9, %ymm10, %ymm10 vpsubd %ymm2, %ymm4, %ymm4 vpsubd %ymm5, %ymm8, %ymm5 vpsubd %ymm6, %ymm3, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1600(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm3 vpunpckhwd const0(%rip), %ymm5, %ymm8 vpslld $1, %ymm3, %ymm3 vpslld $1, %ymm8, %ymm8 vpsubd %ymm3, %ymm10, %ymm10 vpsubd %ymm8, %ymm4, %ymm4 vpsrld $1, %ymm10, %ymm10 vpsrld $1, %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm10, %ymm10 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpackusdw %ymm4, %ymm10, %ymm4 vmovdqa 832(%r8), %ymm10 vpaddw 1088(%r8), %ymm10, %ymm8 vpsubw 1088(%r8), %ymm10, %ymm10 vpsrlw $2, %ymm10, %ymm10 vpsubw %ymm6, %ymm10, %ymm10 vpmullw %ymm14, %ymm10, %ymm10 vpsllw $1, %ymm11, %ymm3 vpsubw %ymm3, %ymm8, %ymm3 vpsllw $7, %ymm5, %ymm8 vpsubw %ymm8, %ymm3, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm4, %ymm8, %ymm8 vmovdqa 1344(%r8), %ymm3 vpsubw %ymm11, %ymm3, %ymm3 vpmullw %ymm15, %ymm5, %ymm2 vpsubw %ymm2, %ymm3, %ymm2 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm8, %ymm4, %ymm4 vpmullw %ymm12, %ymm8, %ymm3 vpaddw %ymm3, %ymm4, %ymm3 vpmullw %ymm12, %ymm3, %ymm3 vpsubw %ymm3, %ymm2, %ymm3 vpmullw %ymm14, %ymm3, %ymm3 vpsubw %ymm6, %ymm3, %ymm3 vpsrlw $3, %ymm3, %ymm3 vpsubw %ymm10, %ymm3, %ymm3 vpsubw %ymm3, %ymm10, %ymm10 vpsubw %ymm10, %ymm6, %ymm6 vpmullw %ymm13, %ymm3, %ymm3 vpsubw %ymm3, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_3_5(%rip), %ymm8, %ymm2 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $206, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm9 vpor %ymm9, %ymm8, %ymm8 vpaddw 2112(%r8), %ymm11, %ymm11 vpaddw %ymm8, %ymm11, %ymm11 vmovdqa %xmm2, 2112(%r8) vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_3_5(%rip), %ymm3, %ymm2 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $206, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm9 vpor %ymm9, %ymm3, %ymm3 vpaddw 2368(%r8), %ymm6, %ymm6 vpaddw %ymm3, %ymm6, %ymm6 vmovdqa %xmm2, 2368(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm2 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm9 vpor %ymm9, %ymm5, %ymm5 vpaddw 2624(%r8), %ymm4, %ymm4 vpaddw %ymm5, %ymm4, %ymm4 vmovdqa %xmm2, 2624(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 208(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 560(%rdi) vpand mask_mod8192(%rip), %ymm4, %ymm4 vmovdqu %ymm4, 912(%rdi) vpand mask_mod8192(%rip), %ymm10, %ymm10 vmovdqu %ymm10, 1264(%rdi) vmovdqa 96(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm3 vpunpckhwd const0(%rip), %ymm5, %ymm8 vpslld $1, %ymm3, %ymm3 vpslld $1, %ymm8, %ymm8 vmovdqa 352(%r8), %ymm10 vpunpcklwd const0(%rip), %ymm10, %ymm4 vpunpckhwd const0(%rip), %ymm10, %ymm10 vmovdqa 608(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm4, %ymm2 vpaddd %ymm6, %ymm10, %ymm9 vpsubd %ymm3, %ymm2, %ymm2 vpsubd %ymm8, %ymm9, %ymm9 vpsubd %ymm11, %ymm4, %ymm11 vpsubd %ymm6, %ymm10, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1632(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm10 vpunpckhwd const0(%rip), %ymm11, %ymm4 vpslld $1, %ymm10, %ymm10 vpslld $1, %ymm4, %ymm4 vpsubd %ymm10, %ymm2, %ymm2 vpsubd %ymm4, %ymm9, %ymm9 vpsrld $1, %ymm2, %ymm2 vpsrld $1, %ymm9, %ymm9 vpand mask32_to_16(%rip), %ymm2, %ymm2 vpand mask32_to_16(%rip), %ymm9, %ymm9 vpackusdw %ymm9, %ymm2, %ymm9 vmovdqa 864(%r8), %ymm2 vpaddw 1120(%r8), %ymm2, %ymm4 vpsubw 1120(%r8), %ymm2, %ymm2 vpsrlw $2, %ymm2, %ymm2 vpsubw %ymm6, %ymm2, %ymm2 vpmullw %ymm14, %ymm2, %ymm2 vpsllw $1, %ymm5, %ymm10 vpsubw %ymm10, %ymm4, %ymm10 vpsllw $7, %ymm11, %ymm4 vpsubw %ymm4, %ymm10, %ymm4 vpsrlw $3, %ymm4, %ymm4 vpsubw %ymm9, %ymm4, %ymm4 vmovdqa 1376(%r8), %ymm10 vpsubw %ymm5, %ymm10, %ymm10 vpmullw %ymm15, %ymm11, %ymm8 vpsubw %ymm8, %ymm10, %ymm8 vpmullw %ymm14, %ymm4, %ymm4 vpsubw %ymm4, %ymm9, %ymm9 vpmullw %ymm12, %ymm4, %ymm10 vpaddw %ymm10, %ymm9, %ymm10 vpmullw %ymm12, %ymm10, %ymm10 vpsubw %ymm10, %ymm8, %ymm10 vpmullw %ymm14, %ymm10, %ymm10 vpsubw %ymm6, %ymm10, %ymm10 vpsrlw $3, %ymm10, %ymm10 vpsubw %ymm2, %ymm10, %ymm10 vpsubw %ymm10, %ymm2, %ymm2 vpsubw %ymm2, %ymm6, %ymm6 vpmullw %ymm13, %ymm10, %ymm10 vpsubw %ymm10, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_3_5(%rip), %ymm4, %ymm8 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $206, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm3 vpor %ymm3, %ymm4, %ymm4 vpaddw 2144(%r8), %ymm5, %ymm5 vpaddw %ymm4, %ymm5, %ymm5 vmovdqa %xmm8, 2144(%r8) vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_3_5(%rip), %ymm10, %ymm8 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $206, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm3 vpor %ymm3, %ymm10, %ymm10 vpaddw 2400(%r8), %ymm6, %ymm6 vpaddw %ymm10, %ymm6, %ymm6 vmovdqa %xmm8, 2400(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm8 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm3 vpor %ymm3, %ymm11, %ymm11 vpaddw 2656(%r8), %ymm9, %ymm9 vpaddw %ymm11, %ymm9, %ymm9 vmovdqa %xmm8, 2656(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 296(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 648(%rdi) vpand mask_mod8192(%rip), %ymm9, %ymm9 vmovdqu %ymm9, 1000(%rdi) vpand mask_mod8192(%rip), %ymm2, %ymm2 vmovdqu %ymm2, 1352(%rdi) vmovdqa 128(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm10 vpunpckhwd const0(%rip), %ymm11, %ymm4 vpslld $1, %ymm10, %ymm10 vpslld $1, %ymm4, %ymm4 vmovdqa 384(%r8), %ymm2 vpunpcklwd const0(%rip), %ymm2, %ymm9 vpunpckhwd const0(%rip), %ymm2, %ymm2 vmovdqa 640(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm9, %ymm8 vpaddd %ymm6, %ymm2, %ymm3 vpsubd %ymm10, %ymm8, %ymm8 vpsubd %ymm4, %ymm3, %ymm3 vpsubd %ymm5, %ymm9, %ymm5 vpsubd %ymm6, %ymm2, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1664(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm2 vpunpckhwd const0(%rip), %ymm5, %ymm9 vpslld $1, %ymm2, %ymm2 vpslld $1, %ymm9, %ymm9 vpsubd %ymm2, %ymm8, %ymm8 vpsubd %ymm9, %ymm3, %ymm3 vpsrld $1, %ymm8, %ymm8 vpsrld $1, %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm8, %ymm8 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpackusdw %ymm3, %ymm8, %ymm3 vmovdqa 896(%r8), %ymm8 vpaddw 1152(%r8), %ymm8, %ymm9 vpsubw 1152(%r8), %ymm8, %ymm8 vpsrlw $2, %ymm8, %ymm8 vpsubw %ymm6, %ymm8, %ymm8 vpmullw %ymm14, %ymm8, %ymm8 vpsllw $1, %ymm11, %ymm2 vpsubw %ymm2, %ymm9, %ymm2 vpsllw $7, %ymm5, %ymm9 vpsubw %ymm9, %ymm2, %ymm9 vpsrlw $3, %ymm9, %ymm9 vpsubw %ymm3, %ymm9, %ymm9 vmovdqa 1408(%r8), %ymm2 vpsubw %ymm11, %ymm2, %ymm2 vpmullw %ymm15, %ymm5, %ymm4 vpsubw %ymm4, %ymm2, %ymm4 vpmullw %ymm14, %ymm9, %ymm9 vpsubw %ymm9, %ymm3, %ymm3 vpmullw %ymm12, %ymm9, %ymm2 vpaddw %ymm2, %ymm3, %ymm2 vpmullw %ymm12, %ymm2, %ymm2 vpsubw %ymm2, %ymm4, %ymm2 vpmullw %ymm14, %ymm2, %ymm2 vpsubw %ymm6, %ymm2, %ymm2 vpsrlw $3, %ymm2, %ymm2 vpsubw %ymm8, %ymm2, %ymm2 vpsubw %ymm2, %ymm8, %ymm8 vpsubw %ymm8, %ymm6, %ymm6 vpmullw %ymm13, %ymm2, %ymm2 vpsubw %ymm2, %ymm6, %ymm6 vmovdqu 384(%rdi), %ymm4 vmovdqu 736(%rdi), %ymm10 vmovdqu 1088(%rdi), %ymm7 vpaddw %ymm11, %ymm4, %ymm11 vpaddw %ymm6, %ymm10, %ymm6 vpaddw %ymm3, %ymm7, %ymm3 vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_3_5(%rip), %ymm8, %ymm7 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $206, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm10 vpor %ymm10, %ymm8, %ymm8 vmovdqu 32(%rdi), %ymm10 vpaddw 1920(%r8), %ymm10, %ymm10 vpaddw %ymm8, %ymm10, %ymm10 vpand mask_mod8192(%rip), %ymm10, %ymm10 vmovdqu %ymm10, 32(%rdi) vmovdqa %xmm7, 1920(%r8) vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_3_5(%rip), %ymm9, %ymm7 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $206, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm10 vpor %ymm10, %ymm9, %ymm9 vpaddw 2176(%r8), %ymm11, %ymm11 vpaddw %ymm9, %ymm11, %ymm11 vmovdqa %xmm7, 2176(%r8) vpshufb shuf48_16(%rip), %ymm2, %ymm2 vpand mask3_5_3_5(%rip), %ymm2, %ymm7 vpand mask5_3_5_3(%rip), %ymm2, %ymm2 vpermq $206, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm10 vpor %ymm10, %ymm2, %ymm2 vpaddw 2432(%r8), %ymm6, %ymm6 vpaddw %ymm2, %ymm6, %ymm6 vmovdqa %xmm7, 2432(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm7 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm10 vpor %ymm10, %ymm5, %ymm5 vpaddw 2688(%r8), %ymm3, %ymm3 vpaddw %ymm5, %ymm3, %ymm3 vmovdqa %xmm7, 2688(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 384(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 736(%rdi) vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %ymm3, 1088(%rdi) vmovdqa 160(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm2 vpunpckhwd const0(%rip), %ymm5, %ymm9 vpslld $1, %ymm2, %ymm2 vpslld $1, %ymm9, %ymm9 vmovdqa 416(%r8), %ymm8 vpunpcklwd const0(%rip), %ymm8, %ymm3 vpunpckhwd const0(%rip), %ymm8, %ymm8 vmovdqa 672(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm3, %ymm7 vpaddd %ymm6, %ymm8, %ymm10 vpsubd %ymm2, %ymm7, %ymm7 vpsubd %ymm9, %ymm10, %ymm10 vpsubd %ymm11, %ymm3, %ymm11 vpsubd %ymm6, %ymm8, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1696(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm8 vpunpckhwd const0(%rip), %ymm11, %ymm3 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm3, %ymm3 vpsubd %ymm8, %ymm7, %ymm7 vpsubd %ymm3, %ymm10, %ymm10 vpsrld $1, %ymm7, %ymm7 vpsrld $1, %ymm10, %ymm10 vpand mask32_to_16(%rip), %ymm7, %ymm7 vpand mask32_to_16(%rip), %ymm10, %ymm10 vpackusdw %ymm10, %ymm7, %ymm10 vmovdqa 928(%r8), %ymm7 vpaddw 1184(%r8), %ymm7, %ymm3 vpsubw 1184(%r8), %ymm7, %ymm7 vpsrlw $2, %ymm7, %ymm7 vpsubw %ymm6, %ymm7, %ymm7 vpmullw %ymm14, %ymm7, %ymm7 vpsllw $1, %ymm5, %ymm8 vpsubw %ymm8, %ymm3, %ymm8 vpsllw $7, %ymm11, %ymm3 vpsubw %ymm3, %ymm8, %ymm3 vpsrlw $3, %ymm3, %ymm3 vpsubw %ymm10, %ymm3, %ymm3 vmovdqa 1440(%r8), %ymm8 vpsubw %ymm5, %ymm8, %ymm8 vpmullw %ymm15, %ymm11, %ymm9 vpsubw %ymm9, %ymm8, %ymm9 vpmullw %ymm14, %ymm3, %ymm3 vpsubw %ymm3, %ymm10, %ymm10 vpmullw %ymm12, %ymm3, %ymm8 vpaddw %ymm8, %ymm10, %ymm8 vpmullw %ymm12, %ymm8, %ymm8 vpsubw %ymm8, %ymm9, %ymm8 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm6, %ymm8, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm7, %ymm8, %ymm8 vpsubw %ymm8, %ymm7, %ymm7 vpsubw %ymm7, %ymm6, %ymm6 vpmullw %ymm13, %ymm8, %ymm8 vpsubw %ymm8, %ymm6, %ymm6 vmovdqu 472(%rdi), %ymm9 vmovdqu 824(%rdi), %ymm2 vmovdqu 1176(%rdi), %ymm4 vpaddw %ymm5, %ymm9, %ymm5 vpaddw %ymm6, %ymm2, %ymm6 vpaddw %ymm10, %ymm4, %ymm10 vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_3_5(%rip), %ymm7, %ymm4 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm2 vpor %ymm2, %ymm7, %ymm7 vmovdqu 120(%rdi), %ymm2 vpaddw 1952(%r8), %ymm2, %ymm2 vpaddw %ymm7, %ymm2, %ymm2 vpand mask_mod8192(%rip), %ymm2, %ymm2 vmovdqu %ymm2, 120(%rdi) vmovdqa %xmm4, 1952(%r8) vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_3_5(%rip), %ymm3, %ymm4 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm2 vpor %ymm2, %ymm3, %ymm3 vpaddw 2208(%r8), %ymm5, %ymm5 vpaddw %ymm3, %ymm5, %ymm5 vmovdqa %xmm4, 2208(%r8) vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_3_5(%rip), %ymm8, %ymm4 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm2 vpor %ymm2, %ymm8, %ymm8 vpaddw 2464(%r8), %ymm6, %ymm6 vpaddw %ymm8, %ymm6, %ymm6 vmovdqa %xmm4, 2464(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm4 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm2 vpor %ymm2, %ymm11, %ymm11 vpaddw 2720(%r8), %ymm10, %ymm10 vpaddw %ymm11, %ymm10, %ymm10 vmovdqa %xmm4, 2720(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 472(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 824(%rdi) vpand mask_mod8192(%rip), %ymm10, %ymm10 vmovdqu %ymm10, 1176(%rdi) vmovdqa 192(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm8 vpunpckhwd const0(%rip), %ymm11, %ymm3 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm3, %ymm3 vmovdqa 448(%r8), %ymm7 vpunpcklwd const0(%rip), %ymm7, %ymm10 vpunpckhwd const0(%rip), %ymm7, %ymm7 vmovdqa 704(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm10, %ymm4 vpaddd %ymm6, %ymm7, %ymm2 vpsubd %ymm8, %ymm4, %ymm4 vpsubd %ymm3, %ymm2, %ymm2 vpsubd %ymm5, %ymm10, %ymm5 vpsubd %ymm6, %ymm7, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1728(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm7 vpunpckhwd const0(%rip), %ymm5, %ymm10 vpslld $1, %ymm7, %ymm7 vpslld $1, %ymm10, %ymm10 vpsubd %ymm7, %ymm4, %ymm4 vpsubd %ymm10, %ymm2, %ymm2 vpsrld $1, %ymm4, %ymm4 vpsrld $1, %ymm2, %ymm2 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm2, %ymm2 vpackusdw %ymm2, %ymm4, %ymm2 vmovdqa 960(%r8), %ymm4 vpaddw 1216(%r8), %ymm4, %ymm10 vpsubw 1216(%r8), %ymm4, %ymm4 vpsrlw $2, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsllw $1, %ymm11, %ymm7 vpsubw %ymm7, %ymm10, %ymm7 vpsllw $7, %ymm5, %ymm10 vpsubw %ymm10, %ymm7, %ymm10 vpsrlw $3, %ymm10, %ymm10 vpsubw %ymm2, %ymm10, %ymm10 vmovdqa 1472(%r8), %ymm7 vpsubw %ymm11, %ymm7, %ymm7 vpmullw %ymm15, %ymm5, %ymm3 vpsubw %ymm3, %ymm7, %ymm3 vpmullw %ymm14, %ymm10, %ymm10 vpsubw %ymm10, %ymm2, %ymm2 vpmullw %ymm12, %ymm10, %ymm7 vpaddw %ymm7, %ymm2, %ymm7 vpmullw %ymm12, %ymm7, %ymm7 vpsubw %ymm7, %ymm3, %ymm7 vpmullw %ymm14, %ymm7, %ymm7 vpsubw %ymm6, %ymm7, %ymm7 vpsrlw $3, %ymm7, %ymm7 vpsubw %ymm4, %ymm7, %ymm7 vpsubw %ymm7, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vpmullw %ymm13, %ymm7, %ymm7 vpsubw %ymm7, %ymm6, %ymm6 vmovdqu 560(%rdi), %ymm3 vmovdqu 912(%rdi), %ymm8 vmovdqu 1264(%rdi), %ymm9 vpaddw %ymm11, %ymm3, %ymm11 vpaddw %ymm6, %ymm8, %ymm6 vpaddw %ymm2, %ymm9, %ymm2 vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_3_5(%rip), %ymm4, %ymm9 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm8 vpor %ymm8, %ymm4, %ymm4 vmovdqu 208(%rdi), %ymm8 vpaddw 1984(%r8), %ymm8, %ymm8 vpaddw %ymm4, %ymm8, %ymm8 vpand mask_mod8192(%rip), %ymm8, %ymm8 vmovdqu %ymm8, 208(%rdi) vmovdqa %xmm9, 1984(%r8) vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_3_5(%rip), %ymm10, %ymm9 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm8 vpor %ymm8, %ymm10, %ymm10 vpaddw 2240(%r8), %ymm11, %ymm11 vpaddw %ymm10, %ymm11, %ymm11 vmovdqa %xmm9, 2240(%r8) vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_3_5(%rip), %ymm7, %ymm9 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm8 vpor %ymm8, %ymm7, %ymm7 vpaddw 2496(%r8), %ymm6, %ymm6 vpaddw %ymm7, %ymm6, %ymm6 vmovdqa %xmm9, 2496(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_3_5(%rip), %ymm5, %ymm9 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $206, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm8 vpor %ymm8, %ymm5, %ymm5 vpaddw 2752(%r8), %ymm2, %ymm2 vpaddw %ymm5, %ymm2, %ymm2 vmovdqa %xmm9, 2752(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 560(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 912(%rdi) vpand mask_mod8192(%rip), %ymm2, %ymm2 vmovdqu %ymm2, 1264(%rdi) vmovdqa 224(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm7 vpunpckhwd const0(%rip), %ymm5, %ymm10 vpslld $1, %ymm7, %ymm7 vpslld $1, %ymm10, %ymm10 vmovdqa 480(%r8), %ymm4 vpunpcklwd const0(%rip), %ymm4, %ymm2 vpunpckhwd const0(%rip), %ymm4, %ymm4 vmovdqa 736(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm2, %ymm9 vpaddd %ymm6, %ymm4, %ymm8 vpsubd %ymm7, %ymm9, %ymm9 vpsubd %ymm10, %ymm8, %ymm8 vpsubd %ymm11, %ymm2, %ymm11 vpsubd %ymm6, %ymm4, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1760(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm4 vpunpckhwd const0(%rip), %ymm11, %ymm2 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm2, %ymm2 vpsubd %ymm4, %ymm9, %ymm9 vpsubd %ymm2, %ymm8, %ymm8 vpsrld $1, %ymm9, %ymm9 vpsrld $1, %ymm8, %ymm8 vpand mask32_to_16(%rip), %ymm9, %ymm9 vpand mask32_to_16(%rip), %ymm8, %ymm8 vpackusdw %ymm8, %ymm9, %ymm8 vmovdqa 992(%r8), %ymm9 vpaddw 1248(%r8), %ymm9, %ymm2 vpsubw 1248(%r8), %ymm9, %ymm9 vpsrlw $2, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsllw $1, %ymm5, %ymm4 vpsubw %ymm4, %ymm2, %ymm4 vpsllw $7, %ymm11, %ymm2 vpsubw %ymm2, %ymm4, %ymm2 vpsrlw $3, %ymm2, %ymm2 vpsubw %ymm8, %ymm2, %ymm2 vmovdqa 1504(%r8), %ymm4 vpsubw %ymm5, %ymm4, %ymm4 vpmullw %ymm15, %ymm11, %ymm10 vpsubw %ymm10, %ymm4, %ymm10 vpmullw %ymm14, %ymm2, %ymm2 vpsubw %ymm2, %ymm8, %ymm8 vpmullw %ymm12, %ymm2, %ymm4 vpaddw %ymm4, %ymm8, %ymm4 vpmullw %ymm12, %ymm4, %ymm4 vpsubw %ymm4, %ymm10, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpsrlw $3, %ymm4, %ymm4 vpsubw %ymm9, %ymm4, %ymm4 vpsubw %ymm4, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vpmullw %ymm13, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vmovdqu 648(%rdi), %ymm10 vmovdqu 1000(%rdi), %ymm7 vmovdqu 1352(%rdi), %ymm3 vpaddw %ymm5, %ymm10, %ymm5 vpaddw %ymm6, %ymm7, %ymm6 vpaddw %ymm8, %ymm3, %ymm8 vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_3_5(%rip), %ymm9, %ymm3 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm7 vpor %ymm7, %ymm9, %ymm9 vmovdqu 296(%rdi), %ymm7 vpaddw 2016(%r8), %ymm7, %ymm7 vpaddw %ymm9, %ymm7, %ymm7 vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %ymm7, 296(%rdi) vmovdqa %xmm3, 2016(%r8) vpshufb shuf48_16(%rip), %ymm2, %ymm2 vpand mask3_5_3_5(%rip), %ymm2, %ymm3 vpand mask5_3_5_3(%rip), %ymm2, %ymm2 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm7 vpor %ymm7, %ymm2, %ymm2 vpaddw 2272(%r8), %ymm5, %ymm5 vpaddw %ymm2, %ymm5, %ymm5 vmovdqa %xmm3, 2272(%r8) vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_3_5(%rip), %ymm4, %ymm3 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm7 vpor %ymm7, %ymm4, %ymm4 vpaddw 2528(%r8), %ymm6, %ymm6 vpaddw %ymm4, %ymm6, %ymm6 vmovdqa %xmm3, 2528(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_3_5(%rip), %ymm11, %ymm3 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $206, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm7 vpor %ymm7, %ymm11, %ymm11 vpaddw 2784(%r8), %ymm8, %ymm8 vpaddw %ymm11, %ymm8, %ymm8 vmovdqa %xmm3, 2784(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %ymm5, 648(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %ymm6, 1000(%rdi) vpand mask_mod8192(%rip), %ymm8, %ymm8 vmovdqu %ymm8, 1352(%rdi) vmovdqa 160(%r12), %ymm0 vpsubw 256(%r12), %ymm0, %ymm0 vmovdqa 544(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 352(%r12), %ymm1, %ymm1 vpsubw 64(%r12), %ymm0, %ymm0 vpaddw 448(%r12), %ymm0, %ymm0 vmovdqa 736(%r12), %ymm2 vpsubw 832(%r12), %ymm2, %ymm2 vmovdqa 1120(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 928(%r12), %ymm3, %ymm3 vpsubw 640(%r12), %ymm2, %ymm2 vpaddw 1024(%r12), %ymm2, %ymm2 vmovdqa 1312(%r12), %ymm4 vpsubw 1408(%r12), %ymm4, %ymm4 vmovdqa 1696(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 1504(%r12), %ymm5, %ymm5 vpsubw 1216(%r12), %ymm4, %ymm4 vpaddw 1600(%r12), %ymm4, %ymm4 vpsubw 640(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 64(%r12), %ymm1, %ymm1 vpaddw 1216(%r12), %ymm1, %ymm1 vmovdqa 352(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 1504(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 928(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 64(%r12), %ymm8 vmovdqa 928(%r12), %ymm9 vmovdqa %ymm8, 0(%r8) vmovdqa %ymm0, 32(%r8) vmovdqa %ymm1, 64(%r8) vmovdqa %ymm7, 96(%r8) vmovdqa %ymm5, 128(%r8) vmovdqa %ymm2, 160(%r8) vmovdqa %ymm3, 192(%r8) vmovdqa %ymm9, 224(%r8) vmovdqa 1888(%r12), %ymm0 vpsubw 1984(%r12), %ymm0, %ymm0 vmovdqa 2272(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 2080(%r12), %ymm1, %ymm1 vpsubw 1792(%r12), %ymm0, %ymm0 vpaddw 2176(%r12), %ymm0, %ymm0 vmovdqa 2464(%r12), %ymm2 vpsubw 2560(%r12), %ymm2, %ymm2 vmovdqa 2848(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 2656(%r12), %ymm3, %ymm3 vpsubw 2368(%r12), %ymm2, %ymm2 vpaddw 2752(%r12), %ymm2, %ymm2 vmovdqa 3040(%r12), %ymm4 vpsubw 3136(%r12), %ymm4, %ymm4 vmovdqa 3424(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 3232(%r12), %ymm5, %ymm5 vpsubw 2944(%r12), %ymm4, %ymm4 vpaddw 3328(%r12), %ymm4, %ymm4 vpsubw 2368(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 1792(%r12), %ymm1, %ymm1 vpaddw 2944(%r12), %ymm1, %ymm1 vmovdqa 2080(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 3232(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 2656(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 1792(%r12), %ymm8 vmovdqa 2656(%r12), %ymm9 vmovdqa %ymm8, 256(%r8) vmovdqa %ymm0, 288(%r8) vmovdqa %ymm1, 320(%r8) vmovdqa %ymm7, 352(%r8) vmovdqa %ymm5, 384(%r8) vmovdqa %ymm2, 416(%r8) vmovdqa %ymm3, 448(%r8) vmovdqa %ymm9, 480(%r8) vmovdqa 3616(%r12), %ymm0 vpsubw 3712(%r12), %ymm0, %ymm0 vmovdqa 4000(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 3808(%r12), %ymm1, %ymm1 vpsubw 3520(%r12), %ymm0, %ymm0 vpaddw 3904(%r12), %ymm0, %ymm0 vmovdqa 4192(%r12), %ymm2 vpsubw 4288(%r12), %ymm2, %ymm2 vmovdqa 4576(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 4384(%r12), %ymm3, %ymm3 vpsubw 4096(%r12), %ymm2, %ymm2 vpaddw 4480(%r12), %ymm2, %ymm2 vmovdqa 4768(%r12), %ymm4 vpsubw 4864(%r12), %ymm4, %ymm4 vmovdqa 5152(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 4960(%r12), %ymm5, %ymm5 vpsubw 4672(%r12), %ymm4, %ymm4 vpaddw 5056(%r12), %ymm4, %ymm4 vpsubw 4096(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 3520(%r12), %ymm1, %ymm1 vpaddw 4672(%r12), %ymm1, %ymm1 vmovdqa 3808(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 4960(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 4384(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 3520(%r12), %ymm8 vmovdqa 4384(%r12), %ymm9 vmovdqa %ymm8, 512(%r8) vmovdqa %ymm0, 544(%r8) vmovdqa %ymm1, 576(%r8) vmovdqa %ymm7, 608(%r8) vmovdqa %ymm5, 640(%r8) vmovdqa %ymm2, 672(%r8) vmovdqa %ymm3, 704(%r8) vmovdqa %ymm9, 736(%r8) vmovdqa 5344(%r12), %ymm0 vpsubw 5440(%r12), %ymm0, %ymm0 vmovdqa 5728(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 5536(%r12), %ymm1, %ymm1 vpsubw 5248(%r12), %ymm0, %ymm0 vpaddw 5632(%r12), %ymm0, %ymm0 vmovdqa 5920(%r12), %ymm2 vpsubw 6016(%r12), %ymm2, %ymm2 vmovdqa 6304(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 6112(%r12), %ymm3, %ymm3 vpsubw 5824(%r12), %ymm2, %ymm2 vpaddw 6208(%r12), %ymm2, %ymm2 vmovdqa 6496(%r12), %ymm4 vpsubw 6592(%r12), %ymm4, %ymm4 vmovdqa 6880(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 6688(%r12), %ymm5, %ymm5 vpsubw 6400(%r12), %ymm4, %ymm4 vpaddw 6784(%r12), %ymm4, %ymm4 vpsubw 5824(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 5248(%r12), %ymm1, %ymm1 vpaddw 6400(%r12), %ymm1, %ymm1 vmovdqa 5536(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 6688(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 6112(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 5248(%r12), %ymm8 vmovdqa 6112(%r12), %ymm9 vmovdqa %ymm8, 768(%r8) vmovdqa %ymm0, 800(%r8) vmovdqa %ymm1, 832(%r8) vmovdqa %ymm7, 864(%r8) vmovdqa %ymm5, 896(%r8) vmovdqa %ymm2, 928(%r8) vmovdqa %ymm3, 960(%r8) vmovdqa %ymm9, 992(%r8) vmovdqa 7072(%r12), %ymm0 vpsubw 7168(%r12), %ymm0, %ymm0 vmovdqa 7456(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 7264(%r12), %ymm1, %ymm1 vpsubw 6976(%r12), %ymm0, %ymm0 vpaddw 7360(%r12), %ymm0, %ymm0 vmovdqa 7648(%r12), %ymm2 vpsubw 7744(%r12), %ymm2, %ymm2 vmovdqa 8032(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 7840(%r12), %ymm3, %ymm3 vpsubw 7552(%r12), %ymm2, %ymm2 vpaddw 7936(%r12), %ymm2, %ymm2 vmovdqa 8224(%r12), %ymm4 vpsubw 8320(%r12), %ymm4, %ymm4 vmovdqa 8608(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 8416(%r12), %ymm5, %ymm5 vpsubw 8128(%r12), %ymm4, %ymm4 vpaddw 8512(%r12), %ymm4, %ymm4 vpsubw 7552(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 6976(%r12), %ymm1, %ymm1 vpaddw 8128(%r12), %ymm1, %ymm1 vmovdqa 7264(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 8416(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 7840(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 6976(%r12), %ymm8 vmovdqa 7840(%r12), %ymm9 vmovdqa %ymm8, 1024(%r8) vmovdqa %ymm0, 1056(%r8) vmovdqa %ymm1, 1088(%r8) vmovdqa %ymm7, 1120(%r8) vmovdqa %ymm5, 1152(%r8) vmovdqa %ymm2, 1184(%r8) vmovdqa %ymm3, 1216(%r8) vmovdqa %ymm9, 1248(%r8) vmovdqa 8800(%r12), %ymm0 vpsubw 8896(%r12), %ymm0, %ymm0 vmovdqa 9184(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 8992(%r12), %ymm1, %ymm1 vpsubw 8704(%r12), %ymm0, %ymm0 vpaddw 9088(%r12), %ymm0, %ymm0 vmovdqa 9376(%r12), %ymm2 vpsubw 9472(%r12), %ymm2, %ymm2 vmovdqa 9760(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 9568(%r12), %ymm3, %ymm3 vpsubw 9280(%r12), %ymm2, %ymm2 vpaddw 9664(%r12), %ymm2, %ymm2 vmovdqa 9952(%r12), %ymm4 vpsubw 10048(%r12), %ymm4, %ymm4 vmovdqa 10336(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 10144(%r12), %ymm5, %ymm5 vpsubw 9856(%r12), %ymm4, %ymm4 vpaddw 10240(%r12), %ymm4, %ymm4 vpsubw 9280(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 8704(%r12), %ymm1, %ymm1 vpaddw 9856(%r12), %ymm1, %ymm1 vmovdqa 8992(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 10144(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 9568(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 8704(%r12), %ymm8 vmovdqa 9568(%r12), %ymm9 vmovdqa %ymm8, 1280(%r8) vmovdqa %ymm0, 1312(%r8) vmovdqa %ymm1, 1344(%r8) vmovdqa %ymm7, 1376(%r8) vmovdqa %ymm5, 1408(%r8) vmovdqa %ymm2, 1440(%r8) vmovdqa %ymm3, 1472(%r8) vmovdqa %ymm9, 1504(%r8) vmovdqa 10528(%r12), %ymm0 vpsubw 10624(%r12), %ymm0, %ymm0 vmovdqa 10912(%r12), %ymm1 vpsubw %ymm0, %ymm1, %ymm1 vpsubw 10720(%r12), %ymm1, %ymm1 vpsubw 10432(%r12), %ymm0, %ymm0 vpaddw 10816(%r12), %ymm0, %ymm0 vmovdqa 11104(%r12), %ymm2 vpsubw 11200(%r12), %ymm2, %ymm2 vmovdqa 11488(%r12), %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw 11296(%r12), %ymm3, %ymm3 vpsubw 11008(%r12), %ymm2, %ymm2 vpaddw 11392(%r12), %ymm2, %ymm2 vmovdqa 11680(%r12), %ymm4 vpsubw 11776(%r12), %ymm4, %ymm4 vmovdqa 12064(%r12), %ymm5 vpsubw %ymm4, %ymm5, %ymm5 vpsubw 11872(%r12), %ymm5, %ymm5 vpsubw 11584(%r12), %ymm4, %ymm4 vpaddw 11968(%r12), %ymm4, %ymm4 vpsubw 11008(%r12), %ymm1, %ymm1 vpsubw %ymm1, %ymm5, %ymm5 vpsubw %ymm3, %ymm5, %ymm5 vpsubw 10432(%r12), %ymm1, %ymm1 vpaddw 11584(%r12), %ymm1, %ymm1 vmovdqa 10720(%r12), %ymm6 vpsubw %ymm2, %ymm6, %ymm7 vmovdqa 11872(%r12), %ymm2 vpsubw %ymm7, %ymm2, %ymm2 vpsubw 11296(%r12), %ymm2, %ymm2 vpsubw %ymm0, %ymm7, %ymm7 vpaddw %ymm4, %ymm7, %ymm7 vmovdqa 10432(%r12), %ymm8 vmovdqa 11296(%r12), %ymm9 vmovdqa %ymm8, 1536(%r8) vmovdqa %ymm0, 1568(%r8) vmovdqa %ymm1, 1600(%r8) vmovdqa %ymm7, 1632(%r8) vmovdqa %ymm5, 1664(%r8) vmovdqa %ymm2, 1696(%r8) vmovdqa %ymm3, 1728(%r8) vmovdqa %ymm9, 1760(%r8) vmovdqa 0(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm4 vpunpckhwd const0(%rip), %ymm11, %ymm2 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm2, %ymm2 vmovdqa 256(%r8), %ymm9 vpunpcklwd const0(%rip), %ymm9, %ymm8 vpunpckhwd const0(%rip), %ymm9, %ymm9 vmovdqa 512(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm8, %ymm3 vpaddd %ymm6, %ymm9, %ymm7 vpsubd %ymm4, %ymm3, %ymm3 vpsubd %ymm2, %ymm7, %ymm7 vpsubd %ymm5, %ymm8, %ymm5 vpsubd %ymm6, %ymm9, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1536(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm9 vpunpckhwd const0(%rip), %ymm5, %ymm8 vpslld $1, %ymm9, %ymm9 vpslld $1, %ymm8, %ymm8 vpsubd %ymm9, %ymm3, %ymm3 vpsubd %ymm8, %ymm7, %ymm7 vpsrld $1, %ymm3, %ymm3 vpsrld $1, %ymm7, %ymm7 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm7, %ymm7 vpackusdw %ymm7, %ymm3, %ymm7 vmovdqa 768(%r8), %ymm3 vpaddw 1024(%r8), %ymm3, %ymm8 vpsubw 1024(%r8), %ymm3, %ymm3 vpsrlw $2, %ymm3, %ymm3 vpsubw %ymm6, %ymm3, %ymm3 vpmullw %ymm14, %ymm3, %ymm3 vpsllw $1, %ymm11, %ymm9 vpsubw %ymm9, %ymm8, %ymm9 vpsllw $7, %ymm5, %ymm8 vpsubw %ymm8, %ymm9, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm7, %ymm8, %ymm8 vmovdqa 1280(%r8), %ymm9 vpsubw %ymm11, %ymm9, %ymm9 vpmullw %ymm15, %ymm5, %ymm2 vpsubw %ymm2, %ymm9, %ymm2 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm8, %ymm7, %ymm7 vpmullw %ymm12, %ymm8, %ymm9 vpaddw %ymm9, %ymm7, %ymm9 vpmullw %ymm12, %ymm9, %ymm9 vpsubw %ymm9, %ymm2, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpsrlw $3, %ymm9, %ymm9 vpsubw %ymm3, %ymm9, %ymm9 vpsubw %ymm9, %ymm3, %ymm3 vpsubw %ymm3, %ymm6, %ymm6 vpmullw %ymm13, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_4_3_1(%rip), %ymm8, %ymm2 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $139, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm4 vpor %ymm4, %ymm8, %ymm8 vpaddw 2048(%r8), %ymm11, %ymm11 vpaddw %ymm8, %ymm11, %ymm11 vmovdqa %xmm2, 2048(%r8) vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_4_3_1(%rip), %ymm9, %ymm2 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $139, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm4 vpor %ymm4, %ymm9, %ymm9 vpaddw 2304(%r8), %ymm6, %ymm6 vpaddw %ymm9, %ymm6, %ymm6 vmovdqa %xmm2, 2304(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_4_3_1(%rip), %ymm5, %ymm2 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $139, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm4 vpor %ymm4, %ymm5, %ymm5 vpaddw 2560(%r8), %ymm7, %ymm7 vpaddw %ymm5, %ymm7, %ymm7 vmovdqa %xmm2, 2560(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %xmm11, 64(%rdi) vextracti128 $1, %ymm11, %xmm11 vmovq %xmm11, 80(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 416(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 432(%rdi) vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %xmm7, 768(%rdi) vextracti128 $1, %ymm7, %xmm7 vmovq %xmm7, 784(%rdi) vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %xmm3, 1120(%rdi) vextracti128 $1, %ymm3, %xmm3 vmovq %xmm3, 1136(%rdi) vmovdqa 32(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm9 vpunpckhwd const0(%rip), %ymm5, %ymm8 vpslld $1, %ymm9, %ymm9 vpslld $1, %ymm8, %ymm8 vmovdqa 288(%r8), %ymm3 vpunpcklwd const0(%rip), %ymm3, %ymm7 vpunpckhwd const0(%rip), %ymm3, %ymm3 vmovdqa 544(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm7, %ymm2 vpaddd %ymm6, %ymm3, %ymm4 vpsubd %ymm9, %ymm2, %ymm2 vpsubd %ymm8, %ymm4, %ymm4 vpsubd %ymm11, %ymm7, %ymm11 vpsubd %ymm6, %ymm3, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1568(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm3 vpunpckhwd const0(%rip), %ymm11, %ymm7 vpslld $1, %ymm3, %ymm3 vpslld $1, %ymm7, %ymm7 vpsubd %ymm3, %ymm2, %ymm2 vpsubd %ymm7, %ymm4, %ymm4 vpsrld $1, %ymm2, %ymm2 vpsrld $1, %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm2, %ymm2 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpackusdw %ymm4, %ymm2, %ymm4 vmovdqa 800(%r8), %ymm2 vpaddw 1056(%r8), %ymm2, %ymm7 vpsubw 1056(%r8), %ymm2, %ymm2 vpsrlw $2, %ymm2, %ymm2 vpsubw %ymm6, %ymm2, %ymm2 vpmullw %ymm14, %ymm2, %ymm2 vpsllw $1, %ymm5, %ymm3 vpsubw %ymm3, %ymm7, %ymm3 vpsllw $7, %ymm11, %ymm7 vpsubw %ymm7, %ymm3, %ymm7 vpsrlw $3, %ymm7, %ymm7 vpsubw %ymm4, %ymm7, %ymm7 vmovdqa 1312(%r8), %ymm3 vpsubw %ymm5, %ymm3, %ymm3 vpmullw %ymm15, %ymm11, %ymm8 vpsubw %ymm8, %ymm3, %ymm8 vpmullw %ymm14, %ymm7, %ymm7 vpsubw %ymm7, %ymm4, %ymm4 vpmullw %ymm12, %ymm7, %ymm3 vpaddw %ymm3, %ymm4, %ymm3 vpmullw %ymm12, %ymm3, %ymm3 vpsubw %ymm3, %ymm8, %ymm3 vpmullw %ymm14, %ymm3, %ymm3 vpsubw %ymm6, %ymm3, %ymm3 vpsrlw $3, %ymm3, %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vpsubw %ymm3, %ymm2, %ymm2 vpsubw %ymm2, %ymm6, %ymm6 vpmullw %ymm13, %ymm3, %ymm3 vpsubw %ymm3, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_4_3_1(%rip), %ymm7, %ymm8 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $139, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm9 vpor %ymm9, %ymm7, %ymm7 vpaddw 2080(%r8), %ymm5, %ymm5 vpaddw %ymm7, %ymm5, %ymm5 vmovdqa %xmm8, 2080(%r8) vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_4_3_1(%rip), %ymm3, %ymm8 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $139, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm9 vpor %ymm9, %ymm3, %ymm3 vpaddw 2336(%r8), %ymm6, %ymm6 vpaddw %ymm3, %ymm6, %ymm6 vmovdqa %xmm8, 2336(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_4_3_1(%rip), %ymm11, %ymm8 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $139, %ymm8, %ymm8 vpand mask_keephigh(%rip), %ymm8, %ymm9 vpor %ymm9, %ymm11, %ymm11 vpaddw 2592(%r8), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vmovdqa %xmm8, 2592(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %xmm5, 152(%rdi) vextracti128 $1, %ymm5, %xmm5 vmovq %xmm5, 168(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 504(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 520(%rdi) vpand mask_mod8192(%rip), %ymm4, %ymm4 vmovdqu %xmm4, 856(%rdi) vextracti128 $1, %ymm4, %xmm4 vmovq %xmm4, 872(%rdi) vpand mask_mod8192(%rip), %ymm2, %ymm2 vmovdqu %xmm2, 1208(%rdi) vextracti128 $1, %ymm2, %xmm2 vmovq %xmm2, 1224(%rdi) vmovdqa 64(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm3 vpunpckhwd const0(%rip), %ymm11, %ymm7 vpslld $1, %ymm3, %ymm3 vpslld $1, %ymm7, %ymm7 vmovdqa 320(%r8), %ymm2 vpunpcklwd const0(%rip), %ymm2, %ymm4 vpunpckhwd const0(%rip), %ymm2, %ymm2 vmovdqa 576(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm4, %ymm8 vpaddd %ymm6, %ymm2, %ymm9 vpsubd %ymm3, %ymm8, %ymm8 vpsubd %ymm7, %ymm9, %ymm9 vpsubd %ymm5, %ymm4, %ymm5 vpsubd %ymm6, %ymm2, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1600(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm2 vpunpckhwd const0(%rip), %ymm5, %ymm4 vpslld $1, %ymm2, %ymm2 vpslld $1, %ymm4, %ymm4 vpsubd %ymm2, %ymm8, %ymm8 vpsubd %ymm4, %ymm9, %ymm9 vpsrld $1, %ymm8, %ymm8 vpsrld $1, %ymm9, %ymm9 vpand mask32_to_16(%rip), %ymm8, %ymm8 vpand mask32_to_16(%rip), %ymm9, %ymm9 vpackusdw %ymm9, %ymm8, %ymm9 vmovdqa 832(%r8), %ymm8 vpaddw 1088(%r8), %ymm8, %ymm4 vpsubw 1088(%r8), %ymm8, %ymm8 vpsrlw $2, %ymm8, %ymm8 vpsubw %ymm6, %ymm8, %ymm8 vpmullw %ymm14, %ymm8, %ymm8 vpsllw $1, %ymm11, %ymm2 vpsubw %ymm2, %ymm4, %ymm2 vpsllw $7, %ymm5, %ymm4 vpsubw %ymm4, %ymm2, %ymm4 vpsrlw $3, %ymm4, %ymm4 vpsubw %ymm9, %ymm4, %ymm4 vmovdqa 1344(%r8), %ymm2 vpsubw %ymm11, %ymm2, %ymm2 vpmullw %ymm15, %ymm5, %ymm7 vpsubw %ymm7, %ymm2, %ymm7 vpmullw %ymm14, %ymm4, %ymm4 vpsubw %ymm4, %ymm9, %ymm9 vpmullw %ymm12, %ymm4, %ymm2 vpaddw %ymm2, %ymm9, %ymm2 vpmullw %ymm12, %ymm2, %ymm2 vpsubw %ymm2, %ymm7, %ymm2 vpmullw %ymm14, %ymm2, %ymm2 vpsubw %ymm6, %ymm2, %ymm2 vpsrlw $3, %ymm2, %ymm2 vpsubw %ymm8, %ymm2, %ymm2 vpsubw %ymm2, %ymm8, %ymm8 vpsubw %ymm8, %ymm6, %ymm6 vpmullw %ymm13, %ymm2, %ymm2 vpsubw %ymm2, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_4_3_1(%rip), %ymm4, %ymm7 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $139, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm3 vpor %ymm3, %ymm4, %ymm4 vpaddw 2112(%r8), %ymm11, %ymm11 vpaddw %ymm4, %ymm11, %ymm11 vmovdqa %xmm7, 2112(%r8) vpshufb shuf48_16(%rip), %ymm2, %ymm2 vpand mask3_5_4_3_1(%rip), %ymm2, %ymm7 vpand mask5_3_5_3(%rip), %ymm2, %ymm2 vpermq $139, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm3 vpor %ymm3, %ymm2, %ymm2 vpaddw 2368(%r8), %ymm6, %ymm6 vpaddw %ymm2, %ymm6, %ymm6 vmovdqa %xmm7, 2368(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_4_3_1(%rip), %ymm5, %ymm7 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $139, %ymm7, %ymm7 vpand mask_keephigh(%rip), %ymm7, %ymm3 vpor %ymm3, %ymm5, %ymm5 vpaddw 2624(%r8), %ymm9, %ymm9 vpaddw %ymm5, %ymm9, %ymm9 vmovdqa %xmm7, 2624(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %xmm11, 240(%rdi) vextracti128 $1, %ymm11, %xmm11 vmovq %xmm11, 256(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 592(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 608(%rdi) vpand mask_mod8192(%rip), %ymm9, %ymm9 vmovdqu %xmm9, 944(%rdi) vextracti128 $1, %ymm9, %xmm9 vmovq %xmm9, 960(%rdi) vpand mask_mod8192(%rip), %ymm8, %ymm8 vmovdqu %xmm8, 1296(%rdi) vextracti128 $1, %ymm8, %xmm8 vmovq %xmm8, 1312(%rdi) vmovdqa 96(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm2 vpunpckhwd const0(%rip), %ymm5, %ymm4 vpslld $1, %ymm2, %ymm2 vpslld $1, %ymm4, %ymm4 vmovdqa 352(%r8), %ymm8 vpunpcklwd const0(%rip), %ymm8, %ymm9 vpunpckhwd const0(%rip), %ymm8, %ymm8 vmovdqa 608(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm9, %ymm7 vpaddd %ymm6, %ymm8, %ymm3 vpsubd %ymm2, %ymm7, %ymm7 vpsubd %ymm4, %ymm3, %ymm3 vpsubd %ymm11, %ymm9, %ymm11 vpsubd %ymm6, %ymm8, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1632(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm8 vpunpckhwd const0(%rip), %ymm11, %ymm9 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm9, %ymm9 vpsubd %ymm8, %ymm7, %ymm7 vpsubd %ymm9, %ymm3, %ymm3 vpsrld $1, %ymm7, %ymm7 vpsrld $1, %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm7, %ymm7 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpackusdw %ymm3, %ymm7, %ymm3 vmovdqa 864(%r8), %ymm7 vpaddw 1120(%r8), %ymm7, %ymm9 vpsubw 1120(%r8), %ymm7, %ymm7 vpsrlw $2, %ymm7, %ymm7 vpsubw %ymm6, %ymm7, %ymm7 vpmullw %ymm14, %ymm7, %ymm7 vpsllw $1, %ymm5, %ymm8 vpsubw %ymm8, %ymm9, %ymm8 vpsllw $7, %ymm11, %ymm9 vpsubw %ymm9, %ymm8, %ymm9 vpsrlw $3, %ymm9, %ymm9 vpsubw %ymm3, %ymm9, %ymm9 vmovdqa 1376(%r8), %ymm8 vpsubw %ymm5, %ymm8, %ymm8 vpmullw %ymm15, %ymm11, %ymm4 vpsubw %ymm4, %ymm8, %ymm4 vpmullw %ymm14, %ymm9, %ymm9 vpsubw %ymm9, %ymm3, %ymm3 vpmullw %ymm12, %ymm9, %ymm8 vpaddw %ymm8, %ymm3, %ymm8 vpmullw %ymm12, %ymm8, %ymm8 vpsubw %ymm8, %ymm4, %ymm8 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm6, %ymm8, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm7, %ymm8, %ymm8 vpsubw %ymm8, %ymm7, %ymm7 vpsubw %ymm7, %ymm6, %ymm6 vpmullw %ymm13, %ymm8, %ymm8 vpsubw %ymm8, %ymm6, %ymm6 vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_4_3_1(%rip), %ymm9, %ymm4 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $139, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm2 vpor %ymm2, %ymm9, %ymm9 vpaddw 2144(%r8), %ymm5, %ymm5 vpaddw %ymm9, %ymm5, %ymm5 vmovdqa %xmm4, 2144(%r8) vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_4_3_1(%rip), %ymm8, %ymm4 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $139, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm2 vpor %ymm2, %ymm8, %ymm8 vpaddw 2400(%r8), %ymm6, %ymm6 vpaddw %ymm8, %ymm6, %ymm6 vmovdqa %xmm4, 2400(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_4_3_1(%rip), %ymm11, %ymm4 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $139, %ymm4, %ymm4 vpand mask_keephigh(%rip), %ymm4, %ymm2 vpor %ymm2, %ymm11, %ymm11 vpaddw 2656(%r8), %ymm3, %ymm3 vpaddw %ymm11, %ymm3, %ymm3 vmovdqa %xmm4, 2656(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %xmm5, 328(%rdi) vextracti128 $1, %ymm5, %xmm5 vmovq %xmm5, 344(%rdi) vpshufb shufmin1_mask3(%rip), %ymm5, %ymm5 vmovdqa %xmm5, 1792(%r8) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 680(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 696(%rdi) vpshufb shufmin1_mask3(%rip), %ymm6, %ymm6 vmovdqa %xmm6, 1824(%r8) vpand mask_mod8192(%rip), %ymm3, %ymm3 vmovdqu %xmm3, 1032(%rdi) vextracti128 $1, %ymm3, %xmm3 vmovq %xmm3, 1048(%rdi) vpshufb shufmin1_mask3(%rip), %ymm3, %ymm3 vmovdqa %xmm3, 1856(%r8) vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %xmm7, 1384(%rdi) vextracti128 $1, %ymm7, %xmm7 vpextrw $0, %xmm7, 1400(%rdi) vpshufb shufmin1_mask3(%rip), %ymm7, %ymm7 vmovdqa %xmm7, 1888(%r8) vmovdqa 128(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm8 vpunpckhwd const0(%rip), %ymm11, %ymm9 vpslld $1, %ymm8, %ymm8 vpslld $1, %ymm9, %ymm9 vmovdqa 384(%r8), %ymm7 vpunpcklwd const0(%rip), %ymm7, %ymm3 vpunpckhwd const0(%rip), %ymm7, %ymm7 vmovdqa 640(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm3, %ymm4 vpaddd %ymm6, %ymm7, %ymm2 vpsubd %ymm8, %ymm4, %ymm4 vpsubd %ymm9, %ymm2, %ymm2 vpsubd %ymm5, %ymm3, %ymm5 vpsubd %ymm6, %ymm7, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1664(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm7 vpunpckhwd const0(%rip), %ymm5, %ymm3 vpslld $1, %ymm7, %ymm7 vpslld $1, %ymm3, %ymm3 vpsubd %ymm7, %ymm4, %ymm4 vpsubd %ymm3, %ymm2, %ymm2 vpsrld $1, %ymm4, %ymm4 vpsrld $1, %ymm2, %ymm2 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm2, %ymm2 vpackusdw %ymm2, %ymm4, %ymm2 vmovdqa 896(%r8), %ymm4 vpaddw 1152(%r8), %ymm4, %ymm3 vpsubw 1152(%r8), %ymm4, %ymm4 vpsrlw $2, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsllw $1, %ymm11, %ymm7 vpsubw %ymm7, %ymm3, %ymm7 vpsllw $7, %ymm5, %ymm3 vpsubw %ymm3, %ymm7, %ymm3 vpsrlw $3, %ymm3, %ymm3 vpsubw %ymm2, %ymm3, %ymm3 vmovdqa 1408(%r8), %ymm7 vpsubw %ymm11, %ymm7, %ymm7 vpmullw %ymm15, %ymm5, %ymm9 vpsubw %ymm9, %ymm7, %ymm9 vpmullw %ymm14, %ymm3, %ymm3 vpsubw %ymm3, %ymm2, %ymm2 vpmullw %ymm12, %ymm3, %ymm7 vpaddw %ymm7, %ymm2, %ymm7 vpmullw %ymm12, %ymm7, %ymm7 vpsubw %ymm7, %ymm9, %ymm7 vpmullw %ymm14, %ymm7, %ymm7 vpsubw %ymm6, %ymm7, %ymm7 vpsrlw $3, %ymm7, %ymm7 vpsubw %ymm4, %ymm7, %ymm7 vpsubw %ymm7, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vpmullw %ymm13, %ymm7, %ymm7 vpsubw %ymm7, %ymm6, %ymm6 vmovdqu 416(%rdi), %ymm9 vmovdqu 768(%rdi), %ymm8 vmovdqu 1120(%rdi), %ymm10 vpaddw %ymm11, %ymm9, %ymm11 vpaddw %ymm6, %ymm8, %ymm6 vpaddw %ymm2, %ymm10, %ymm2 vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_4_3_1(%rip), %ymm4, %ymm10 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $139, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm8 vpor %ymm8, %ymm4, %ymm4 vmovdqu 64(%rdi), %ymm8 vpaddw 1920(%r8), %ymm8, %ymm8 vpaddw %ymm4, %ymm8, %ymm8 vpand mask_mod8192(%rip), %ymm8, %ymm8 vmovdqu %xmm8, 64(%rdi) vextracti128 $1, %ymm8, %xmm8 vmovq %xmm8, 80(%rdi) vmovdqa %xmm10, 1920(%r8) vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_4_3_1(%rip), %ymm3, %ymm10 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $139, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm8 vpor %ymm8, %ymm3, %ymm3 vpaddw 2176(%r8), %ymm11, %ymm11 vpaddw %ymm3, %ymm11, %ymm11 vmovdqa %xmm10, 2176(%r8) vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_4_3_1(%rip), %ymm7, %ymm10 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $139, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm8 vpor %ymm8, %ymm7, %ymm7 vpaddw 2432(%r8), %ymm6, %ymm6 vpaddw %ymm7, %ymm6, %ymm6 vmovdqa %xmm10, 2432(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_4_3_1(%rip), %ymm5, %ymm10 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $139, %ymm10, %ymm10 vpand mask_keephigh(%rip), %ymm10, %ymm8 vpor %ymm8, %ymm5, %ymm5 vpaddw 2688(%r8), %ymm2, %ymm2 vpaddw %ymm5, %ymm2, %ymm2 vmovdqa %xmm10, 2688(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %xmm11, 416(%rdi) vextracti128 $1, %ymm11, %xmm11 vmovq %xmm11, 432(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 768(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 784(%rdi) vpand mask_mod8192(%rip), %ymm2, %ymm2 vmovdqu %xmm2, 1120(%rdi) vextracti128 $1, %ymm2, %xmm2 vmovq %xmm2, 1136(%rdi) vmovdqa 160(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm7 vpunpckhwd const0(%rip), %ymm5, %ymm3 vpslld $1, %ymm7, %ymm7 vpslld $1, %ymm3, %ymm3 vmovdqa 416(%r8), %ymm4 vpunpcklwd const0(%rip), %ymm4, %ymm2 vpunpckhwd const0(%rip), %ymm4, %ymm4 vmovdqa 672(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm2, %ymm10 vpaddd %ymm6, %ymm4, %ymm8 vpsubd %ymm7, %ymm10, %ymm10 vpsubd %ymm3, %ymm8, %ymm8 vpsubd %ymm11, %ymm2, %ymm11 vpsubd %ymm6, %ymm4, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1696(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm4 vpunpckhwd const0(%rip), %ymm11, %ymm2 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm2, %ymm2 vpsubd %ymm4, %ymm10, %ymm10 vpsubd %ymm2, %ymm8, %ymm8 vpsrld $1, %ymm10, %ymm10 vpsrld $1, %ymm8, %ymm8 vpand mask32_to_16(%rip), %ymm10, %ymm10 vpand mask32_to_16(%rip), %ymm8, %ymm8 vpackusdw %ymm8, %ymm10, %ymm8 vmovdqa 928(%r8), %ymm10 vpaddw 1184(%r8), %ymm10, %ymm2 vpsubw 1184(%r8), %ymm10, %ymm10 vpsrlw $2, %ymm10, %ymm10 vpsubw %ymm6, %ymm10, %ymm10 vpmullw %ymm14, %ymm10, %ymm10 vpsllw $1, %ymm5, %ymm4 vpsubw %ymm4, %ymm2, %ymm4 vpsllw $7, %ymm11, %ymm2 vpsubw %ymm2, %ymm4, %ymm2 vpsrlw $3, %ymm2, %ymm2 vpsubw %ymm8, %ymm2, %ymm2 vmovdqa 1440(%r8), %ymm4 vpsubw %ymm5, %ymm4, %ymm4 vpmullw %ymm15, %ymm11, %ymm3 vpsubw %ymm3, %ymm4, %ymm3 vpmullw %ymm14, %ymm2, %ymm2 vpsubw %ymm2, %ymm8, %ymm8 vpmullw %ymm12, %ymm2, %ymm4 vpaddw %ymm4, %ymm8, %ymm4 vpmullw %ymm12, %ymm4, %ymm4 vpsubw %ymm4, %ymm3, %ymm4 vpmullw %ymm14, %ymm4, %ymm4 vpsubw %ymm6, %ymm4, %ymm4 vpsrlw $3, %ymm4, %ymm4 vpsubw %ymm10, %ymm4, %ymm4 vpsubw %ymm4, %ymm10, %ymm10 vpsubw %ymm10, %ymm6, %ymm6 vpmullw %ymm13, %ymm4, %ymm4 vpsubw %ymm4, %ymm6, %ymm6 vmovdqu 504(%rdi), %ymm3 vmovdqu 856(%rdi), %ymm7 vmovdqu 1208(%rdi), %ymm9 vpaddw %ymm5, %ymm3, %ymm5 vpaddw %ymm6, %ymm7, %ymm6 vpaddw %ymm8, %ymm9, %ymm8 vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_4_3_1(%rip), %ymm10, %ymm9 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $139, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm7 vpor %ymm7, %ymm10, %ymm10 vmovdqu 152(%rdi), %ymm7 vpaddw 1952(%r8), %ymm7, %ymm7 vpaddw %ymm10, %ymm7, %ymm7 vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %xmm7, 152(%rdi) vextracti128 $1, %ymm7, %xmm7 vmovq %xmm7, 168(%rdi) vmovdqa %xmm9, 1952(%r8) vpshufb shuf48_16(%rip), %ymm2, %ymm2 vpand mask3_5_4_3_1(%rip), %ymm2, %ymm9 vpand mask5_3_5_3(%rip), %ymm2, %ymm2 vpermq $139, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm7 vpor %ymm7, %ymm2, %ymm2 vpaddw 2208(%r8), %ymm5, %ymm5 vpaddw %ymm2, %ymm5, %ymm5 vmovdqa %xmm9, 2208(%r8) vpshufb shuf48_16(%rip), %ymm4, %ymm4 vpand mask3_5_4_3_1(%rip), %ymm4, %ymm9 vpand mask5_3_5_3(%rip), %ymm4, %ymm4 vpermq $139, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm7 vpor %ymm7, %ymm4, %ymm4 vpaddw 2464(%r8), %ymm6, %ymm6 vpaddw %ymm4, %ymm6, %ymm6 vmovdqa %xmm9, 2464(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_4_3_1(%rip), %ymm11, %ymm9 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $139, %ymm9, %ymm9 vpand mask_keephigh(%rip), %ymm9, %ymm7 vpor %ymm7, %ymm11, %ymm11 vpaddw 2720(%r8), %ymm8, %ymm8 vpaddw %ymm11, %ymm8, %ymm8 vmovdqa %xmm9, 2720(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %xmm5, 504(%rdi) vextracti128 $1, %ymm5, %xmm5 vmovq %xmm5, 520(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 856(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 872(%rdi) vpand mask_mod8192(%rip), %ymm8, %ymm8 vmovdqu %xmm8, 1208(%rdi) vextracti128 $1, %ymm8, %xmm8 vmovq %xmm8, 1224(%rdi) vmovdqa 192(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm4 vpunpckhwd const0(%rip), %ymm11, %ymm2 vpslld $1, %ymm4, %ymm4 vpslld $1, %ymm2, %ymm2 vmovdqa 448(%r8), %ymm10 vpunpcklwd const0(%rip), %ymm10, %ymm8 vpunpckhwd const0(%rip), %ymm10, %ymm10 vmovdqa 704(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm5 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm5, %ymm8, %ymm9 vpaddd %ymm6, %ymm10, %ymm7 vpsubd %ymm4, %ymm9, %ymm9 vpsubd %ymm2, %ymm7, %ymm7 vpsubd %ymm5, %ymm8, %ymm5 vpsubd %ymm6, %ymm10, %ymm6 vpsrld $1, %ymm5, %ymm5 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm5, %ymm5 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm5, %ymm6 vmovdqa 1728(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm10 vpunpckhwd const0(%rip), %ymm5, %ymm8 vpslld $1, %ymm10, %ymm10 vpslld $1, %ymm8, %ymm8 vpsubd %ymm10, %ymm9, %ymm9 vpsubd %ymm8, %ymm7, %ymm7 vpsrld $1, %ymm9, %ymm9 vpsrld $1, %ymm7, %ymm7 vpand mask32_to_16(%rip), %ymm9, %ymm9 vpand mask32_to_16(%rip), %ymm7, %ymm7 vpackusdw %ymm7, %ymm9, %ymm7 vmovdqa 960(%r8), %ymm9 vpaddw 1216(%r8), %ymm9, %ymm8 vpsubw 1216(%r8), %ymm9, %ymm9 vpsrlw $2, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsllw $1, %ymm11, %ymm10 vpsubw %ymm10, %ymm8, %ymm10 vpsllw $7, %ymm5, %ymm8 vpsubw %ymm8, %ymm10, %ymm8 vpsrlw $3, %ymm8, %ymm8 vpsubw %ymm7, %ymm8, %ymm8 vmovdqa 1472(%r8), %ymm10 vpsubw %ymm11, %ymm10, %ymm10 vpmullw %ymm15, %ymm5, %ymm2 vpsubw %ymm2, %ymm10, %ymm2 vpmullw %ymm14, %ymm8, %ymm8 vpsubw %ymm8, %ymm7, %ymm7 vpmullw %ymm12, %ymm8, %ymm10 vpaddw %ymm10, %ymm7, %ymm10 vpmullw %ymm12, %ymm10, %ymm10 vpsubw %ymm10, %ymm2, %ymm10 vpmullw %ymm14, %ymm10, %ymm10 vpsubw %ymm6, %ymm10, %ymm10 vpsrlw $3, %ymm10, %ymm10 vpsubw %ymm9, %ymm10, %ymm10 vpsubw %ymm10, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vpmullw %ymm13, %ymm10, %ymm10 vpsubw %ymm10, %ymm6, %ymm6 vmovdqu 592(%rdi), %ymm2 vmovdqu 944(%rdi), %ymm4 vmovdqu 1296(%rdi), %ymm3 vpaddw %ymm11, %ymm2, %ymm11 vpaddw %ymm6, %ymm4, %ymm6 vpaddw %ymm7, %ymm3, %ymm7 vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_4_3_1(%rip), %ymm9, %ymm3 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $139, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm4 vpor %ymm4, %ymm9, %ymm9 vmovdqu 240(%rdi), %ymm4 vpaddw 1984(%r8), %ymm4, %ymm4 vpaddw %ymm9, %ymm4, %ymm4 vpand mask_mod8192(%rip), %ymm4, %ymm4 vmovdqu %xmm4, 240(%rdi) vextracti128 $1, %ymm4, %xmm4 vmovq %xmm4, 256(%rdi) vmovdqa %xmm3, 1984(%r8) vpshufb shuf48_16(%rip), %ymm8, %ymm8 vpand mask3_5_4_3_1(%rip), %ymm8, %ymm3 vpand mask5_3_5_3(%rip), %ymm8, %ymm8 vpermq $139, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm4 vpor %ymm4, %ymm8, %ymm8 vpaddw 2240(%r8), %ymm11, %ymm11 vpaddw %ymm8, %ymm11, %ymm11 vmovdqa %xmm3, 2240(%r8) vpshufb shuf48_16(%rip), %ymm10, %ymm10 vpand mask3_5_4_3_1(%rip), %ymm10, %ymm3 vpand mask5_3_5_3(%rip), %ymm10, %ymm10 vpermq $139, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm4 vpor %ymm4, %ymm10, %ymm10 vpaddw 2496(%r8), %ymm6, %ymm6 vpaddw %ymm10, %ymm6, %ymm6 vmovdqa %xmm3, 2496(%r8) vpshufb shuf48_16(%rip), %ymm5, %ymm5 vpand mask3_5_4_3_1(%rip), %ymm5, %ymm3 vpand mask5_3_5_3(%rip), %ymm5, %ymm5 vpermq $139, %ymm3, %ymm3 vpand mask_keephigh(%rip), %ymm3, %ymm4 vpor %ymm4, %ymm5, %ymm5 vpaddw 2752(%r8), %ymm7, %ymm7 vpaddw %ymm5, %ymm7, %ymm7 vmovdqa %xmm3, 2752(%r8) vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %xmm11, 592(%rdi) vextracti128 $1, %ymm11, %xmm11 vmovq %xmm11, 608(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 944(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 960(%rdi) vpand mask_mod8192(%rip), %ymm7, %ymm7 vmovdqu %xmm7, 1296(%rdi) vextracti128 $1, %ymm7, %xmm7 vmovq %xmm7, 1312(%rdi) vmovdqa 224(%r8), %ymm5 vpunpcklwd const0(%rip), %ymm5, %ymm10 vpunpckhwd const0(%rip), %ymm5, %ymm8 vpslld $1, %ymm10, %ymm10 vpslld $1, %ymm8, %ymm8 vmovdqa 480(%r8), %ymm9 vpunpcklwd const0(%rip), %ymm9, %ymm7 vpunpckhwd const0(%rip), %ymm9, %ymm9 vmovdqa 736(%r8), %ymm6 vpunpcklwd const0(%rip), %ymm6, %ymm11 vpunpckhwd const0(%rip), %ymm6, %ymm6 vpaddd %ymm11, %ymm7, %ymm3 vpaddd %ymm6, %ymm9, %ymm4 vpsubd %ymm10, %ymm3, %ymm3 vpsubd %ymm8, %ymm4, %ymm4 vpsubd %ymm11, %ymm7, %ymm11 vpsubd %ymm6, %ymm9, %ymm6 vpsrld $1, %ymm11, %ymm11 vpsrld $1, %ymm6, %ymm6 vpand mask32_to_16(%rip), %ymm11, %ymm11 vpand mask32_to_16(%rip), %ymm6, %ymm6 vpackusdw %ymm6, %ymm11, %ymm6 vmovdqa 1760(%r8), %ymm11 vpunpcklwd const0(%rip), %ymm11, %ymm9 vpunpckhwd const0(%rip), %ymm11, %ymm7 vpslld $1, %ymm9, %ymm9 vpslld $1, %ymm7, %ymm7 vpsubd %ymm9, %ymm3, %ymm3 vpsubd %ymm7, %ymm4, %ymm4 vpsrld $1, %ymm3, %ymm3 vpsrld $1, %ymm4, %ymm4 vpand mask32_to_16(%rip), %ymm3, %ymm3 vpand mask32_to_16(%rip), %ymm4, %ymm4 vpackusdw %ymm4, %ymm3, %ymm4 vmovdqa 992(%r8), %ymm3 vpaddw 1248(%r8), %ymm3, %ymm7 vpsubw 1248(%r8), %ymm3, %ymm3 vpsrlw $2, %ymm3, %ymm3 vpsubw %ymm6, %ymm3, %ymm3 vpmullw %ymm14, %ymm3, %ymm3 vpsllw $1, %ymm5, %ymm9 vpsubw %ymm9, %ymm7, %ymm9 vpsllw $7, %ymm11, %ymm7 vpsubw %ymm7, %ymm9, %ymm7 vpsrlw $3, %ymm7, %ymm7 vpsubw %ymm4, %ymm7, %ymm7 vmovdqa 1504(%r8), %ymm9 vpsubw %ymm5, %ymm9, %ymm9 vpmullw %ymm15, %ymm11, %ymm8 vpsubw %ymm8, %ymm9, %ymm8 vpmullw %ymm14, %ymm7, %ymm7 vpsubw %ymm7, %ymm4, %ymm4 vpmullw %ymm12, %ymm7, %ymm9 vpaddw %ymm9, %ymm4, %ymm9 vpmullw %ymm12, %ymm9, %ymm9 vpsubw %ymm9, %ymm8, %ymm9 vpmullw %ymm14, %ymm9, %ymm9 vpsubw %ymm6, %ymm9, %ymm9 vpsrlw $3, %ymm9, %ymm9 vpsubw %ymm3, %ymm9, %ymm9 vpsubw %ymm9, %ymm3, %ymm3 vpsubw %ymm3, %ymm6, %ymm6 vpmullw %ymm13, %ymm9, %ymm9 vpsubw %ymm9, %ymm6, %ymm6 vextracti128 $1, %ymm4, %xmm8 vpshufb shufmin1_mask3(%rip), %ymm8, %ymm8 vmovdqa %ymm8, 2816(%r8) vextracti128 $1, %ymm3, %xmm8 vpshufb shufmin1_mask3(%rip), %ymm8, %ymm8 vmovdqa %ymm8, 2848(%r8) vextracti128 $1, %ymm7, %xmm8 vpshufb shufmin1_mask3(%rip), %ymm8, %ymm8 vmovdqa %ymm8, 2880(%r8) vmovdqu 680(%rdi), %ymm8 vmovdqu 1032(%rdi), %ymm10 # Only 18 bytes can be read at 1384, but vmovdqu reads 32. # Copy 18 bytes to the red zone and zero pad to 32 bytes. xor %r9, %r9 movq %r9, -16(%rsp) movq %r9, -8(%rsp) movq 1384(%rdi), %r9 movq %r9, -32(%rsp) movq 1384+8(%rdi), %r9 movq %r9, -24(%rsp) movw 1384+16(%rdi), %r9w movw %r9w, -16(%rsp) vmovdqu -32(%rsp), %ymm2 vpaddw %ymm5, %ymm8, %ymm5 vpaddw %ymm6, %ymm10, %ymm6 vpaddw %ymm4, %ymm2, %ymm4 vpshufb shuf48_16(%rip), %ymm3, %ymm3 vpand mask3_5_4_3_1(%rip), %ymm3, %ymm2 vpand mask5_3_5_3(%rip), %ymm3, %ymm3 vpermq $139, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm10 vpor %ymm10, %ymm3, %ymm3 vmovdqu 328(%rdi), %ymm10 vpaddw 2016(%r8), %ymm10, %ymm10 vpaddw %ymm3, %ymm10, %ymm10 vpand mask_mod8192(%rip), %ymm10, %ymm10 vmovdqu %xmm10, 328(%rdi) vextracti128 $1, %ymm10, %xmm10 vmovq %xmm10, 344(%rdi) vpshufb shufmin1_mask3(%rip), %ymm10, %ymm10 vmovdqa %xmm10, 1792(%r8) vmovdqa %xmm2, 2016(%r8) vpshufb shuf48_16(%rip), %ymm7, %ymm7 vpand mask3_5_4_3_1(%rip), %ymm7, %ymm2 vpand mask5_3_5_3(%rip), %ymm7, %ymm7 vpermq $139, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm10 vpor %ymm10, %ymm7, %ymm7 vpaddw 2272(%r8), %ymm5, %ymm5 vpaddw %ymm7, %ymm5, %ymm5 vmovdqa %xmm2, 2272(%r8) vpshufb shuf48_16(%rip), %ymm9, %ymm9 vpand mask3_5_4_3_1(%rip), %ymm9, %ymm2 vpand mask5_3_5_3(%rip), %ymm9, %ymm9 vpermq $139, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm10 vpor %ymm10, %ymm9, %ymm9 vpaddw 2528(%r8), %ymm6, %ymm6 vpaddw %ymm9, %ymm6, %ymm6 vmovdqa %xmm2, 2528(%r8) vpshufb shuf48_16(%rip), %ymm11, %ymm11 vpand mask3_5_4_3_1(%rip), %ymm11, %ymm2 vpand mask5_3_5_3(%rip), %ymm11, %ymm11 vpermq $139, %ymm2, %ymm2 vpand mask_keephigh(%rip), %ymm2, %ymm10 vpor %ymm10, %ymm11, %ymm11 vpaddw 2784(%r8), %ymm4, %ymm4 vpaddw %ymm11, %ymm4, %ymm4 vmovdqa %xmm2, 2784(%r8) vpand mask_mod8192(%rip), %ymm5, %ymm5 vmovdqu %xmm5, 680(%rdi) vextracti128 $1, %ymm5, %xmm5 vmovq %xmm5, 696(%rdi) vpand mask_mod8192(%rip), %ymm6, %ymm6 vmovdqu %xmm6, 1032(%rdi) vextracti128 $1, %ymm6, %xmm6 vmovq %xmm6, 1048(%rdi) vpand mask_mod8192(%rip), %ymm4, %ymm4 vmovdqu %xmm4, 1384(%rdi) vextracti128 $1, %ymm4, %xmm4 vpextrw $0, %xmm4, 1400(%rdi) vmovdqu 0(%rdi), %ymm11 vpaddw 1888(%r8), %ymm11, %ymm11 vpaddw 2816(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 0(%rdi) vmovdqu 352(%rdi), %ymm11 vpaddw 2528(%r8), %ymm11, %ymm11 vpaddw 2848(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 352(%rdi) vmovdqu 704(%rdi), %ymm11 vpaddw 2784(%r8), %ymm11, %ymm11 vpaddw 2880(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 704(%rdi) vmovdqu 88(%rdi), %ymm11 vpaddw 2048(%r8), %ymm11, %ymm11 vpaddw 1920(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 88(%rdi) vmovdqu 440(%rdi), %ymm11 vpaddw 2304(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 440(%rdi) vmovdqu 792(%rdi), %ymm11 vpaddw 2560(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 792(%rdi) vmovdqu 176(%rdi), %ymm11 vpaddw 2080(%r8), %ymm11, %ymm11 vpaddw 1952(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 176(%rdi) vmovdqu 528(%rdi), %ymm11 vpaddw 2336(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 528(%rdi) vmovdqu 880(%rdi), %ymm11 vpaddw 2592(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 880(%rdi) vmovdqu 264(%rdi), %ymm11 vpaddw 2112(%r8), %ymm11, %ymm11 vpaddw 1984(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 264(%rdi) vmovdqu 616(%rdi), %ymm11 vpaddw 2368(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 616(%rdi) vmovdqu 968(%rdi), %ymm11 vpaddw 2624(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 968(%rdi) vmovdqu 352(%rdi), %ymm11 vpaddw 2144(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 352(%rdi) vmovdqu 704(%rdi), %ymm11 vpaddw 2400(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 704(%rdi) vmovdqu 1056(%rdi), %ymm11 vpaddw 2656(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 1056(%rdi) vmovdqu 440(%rdi), %ymm11 vpaddw 2176(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 440(%rdi) vmovdqu 792(%rdi), %ymm11 vpaddw 2432(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 792(%rdi) vmovdqu 1144(%rdi), %ymm11 vpaddw 2688(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 1144(%rdi) vmovdqu 528(%rdi), %ymm11 vpaddw 2208(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 528(%rdi) vmovdqu 880(%rdi), %ymm11 vpaddw 2464(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 880(%rdi) vmovdqu 1232(%rdi), %ymm11 vpaddw 2720(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 1232(%rdi) vmovdqu 616(%rdi), %ymm11 vpaddw 2240(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 616(%rdi) vmovdqu 968(%rdi), %ymm11 vpaddw 2496(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 968(%rdi) vmovdqu 1320(%rdi), %ymm11 vpaddw 2752(%r8), %ymm11, %ymm11 vpand mask_mod8192(%rip), %ymm11, %ymm11 vmovdqu %ymm11, 1320(%rdi) pop %r12 .cfi_restore r12 pop %rbp .cfi_restore rbp .cfi_def_cfa_register rsp .cfi_adjust_cfa_offset -8 ret .cfi_endproc .size poly_Rq_mul,.-poly_Rq_mul #endif
wlsfx/bnbb
5,330
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86/crypto/test/trampoline-x86.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl abi_test_trampoline .hidden abi_test_trampoline .type abi_test_trampoline,@function .align 16 abi_test_trampoline: .L_abi_test_trampoline_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 24(%esp),%ecx movl (%ecx),%esi movl 4(%ecx),%edi movl 8(%ecx),%ebx movl 12(%ecx),%ebp subl $44,%esp movl 72(%esp),%eax xorl %ecx,%ecx .L000loop: cmpl 76(%esp),%ecx jae .L001loop_done movl (%eax,%ecx,4),%edx movl %edx,(%esp,%ecx,4) addl $1,%ecx jmp .L000loop .L001loop_done: call *64(%esp) addl $44,%esp movl 24(%esp),%ecx movl %esi,(%ecx) movl %edi,4(%ecx) movl %ebx,8(%ecx) movl %ebp,12(%ecx) popl %edi popl %esi popl %ebx popl %ebp ret .size abi_test_trampoline,.-.L_abi_test_trampoline_begin .globl abi_test_get_and_clear_direction_flag .hidden abi_test_get_and_clear_direction_flag .type abi_test_get_and_clear_direction_flag,@function .align 16 abi_test_get_and_clear_direction_flag: .L_abi_test_get_and_clear_direction_flag_begin: pushfl popl %eax andl $1024,%eax shrl $10,%eax cld ret .size abi_test_get_and_clear_direction_flag,.-.L_abi_test_get_and_clear_direction_flag_begin .globl abi_test_set_direction_flag .hidden abi_test_set_direction_flag .type abi_test_set_direction_flag,@function .align 16 abi_test_set_direction_flag: .L_abi_test_set_direction_flag_begin: std ret .size abi_test_set_direction_flag,.-.L_abi_test_set_direction_flag_begin .globl abi_test_clobber_eax .hidden abi_test_clobber_eax .type abi_test_clobber_eax,@function .align 16 abi_test_clobber_eax: .L_abi_test_clobber_eax_begin: xorl %eax,%eax ret .size abi_test_clobber_eax,.-.L_abi_test_clobber_eax_begin .globl abi_test_clobber_ebx .hidden abi_test_clobber_ebx .type abi_test_clobber_ebx,@function .align 16 abi_test_clobber_ebx: .L_abi_test_clobber_ebx_begin: xorl %ebx,%ebx ret .size abi_test_clobber_ebx,.-.L_abi_test_clobber_ebx_begin .globl abi_test_clobber_ecx .hidden abi_test_clobber_ecx .type abi_test_clobber_ecx,@function .align 16 abi_test_clobber_ecx: .L_abi_test_clobber_ecx_begin: xorl %ecx,%ecx ret .size abi_test_clobber_ecx,.-.L_abi_test_clobber_ecx_begin .globl abi_test_clobber_edx .hidden abi_test_clobber_edx .type abi_test_clobber_edx,@function .align 16 abi_test_clobber_edx: .L_abi_test_clobber_edx_begin: xorl %edx,%edx ret .size abi_test_clobber_edx,.-.L_abi_test_clobber_edx_begin .globl abi_test_clobber_edi .hidden abi_test_clobber_edi .type abi_test_clobber_edi,@function .align 16 abi_test_clobber_edi: .L_abi_test_clobber_edi_begin: xorl %edi,%edi ret .size abi_test_clobber_edi,.-.L_abi_test_clobber_edi_begin .globl abi_test_clobber_esi .hidden abi_test_clobber_esi .type abi_test_clobber_esi,@function .align 16 abi_test_clobber_esi: .L_abi_test_clobber_esi_begin: xorl %esi,%esi ret .size abi_test_clobber_esi,.-.L_abi_test_clobber_esi_begin .globl abi_test_clobber_ebp .hidden abi_test_clobber_ebp .type abi_test_clobber_ebp,@function .align 16 abi_test_clobber_ebp: .L_abi_test_clobber_ebp_begin: xorl %ebp,%ebp ret .size abi_test_clobber_ebp,.-.L_abi_test_clobber_ebp_begin .globl abi_test_clobber_xmm0 .hidden abi_test_clobber_xmm0 .type abi_test_clobber_xmm0,@function .align 16 abi_test_clobber_xmm0: .L_abi_test_clobber_xmm0_begin: pxor %xmm0,%xmm0 ret .size abi_test_clobber_xmm0,.-.L_abi_test_clobber_xmm0_begin .globl abi_test_clobber_xmm1 .hidden abi_test_clobber_xmm1 .type abi_test_clobber_xmm1,@function .align 16 abi_test_clobber_xmm1: .L_abi_test_clobber_xmm1_begin: pxor %xmm1,%xmm1 ret .size abi_test_clobber_xmm1,.-.L_abi_test_clobber_xmm1_begin .globl abi_test_clobber_xmm2 .hidden abi_test_clobber_xmm2 .type abi_test_clobber_xmm2,@function .align 16 abi_test_clobber_xmm2: .L_abi_test_clobber_xmm2_begin: pxor %xmm2,%xmm2 ret .size abi_test_clobber_xmm2,.-.L_abi_test_clobber_xmm2_begin .globl abi_test_clobber_xmm3 .hidden abi_test_clobber_xmm3 .type abi_test_clobber_xmm3,@function .align 16 abi_test_clobber_xmm3: .L_abi_test_clobber_xmm3_begin: pxor %xmm3,%xmm3 ret .size abi_test_clobber_xmm3,.-.L_abi_test_clobber_xmm3_begin .globl abi_test_clobber_xmm4 .hidden abi_test_clobber_xmm4 .type abi_test_clobber_xmm4,@function .align 16 abi_test_clobber_xmm4: .L_abi_test_clobber_xmm4_begin: pxor %xmm4,%xmm4 ret .size abi_test_clobber_xmm4,.-.L_abi_test_clobber_xmm4_begin .globl abi_test_clobber_xmm5 .hidden abi_test_clobber_xmm5 .type abi_test_clobber_xmm5,@function .align 16 abi_test_clobber_xmm5: .L_abi_test_clobber_xmm5_begin: pxor %xmm5,%xmm5 ret .size abi_test_clobber_xmm5,.-.L_abi_test_clobber_xmm5_begin .globl abi_test_clobber_xmm6 .hidden abi_test_clobber_xmm6 .type abi_test_clobber_xmm6,@function .align 16 abi_test_clobber_xmm6: .L_abi_test_clobber_xmm6_begin: pxor %xmm6,%xmm6 ret .size abi_test_clobber_xmm6,.-.L_abi_test_clobber_xmm6_begin .globl abi_test_clobber_xmm7 .hidden abi_test_clobber_xmm7 .type abi_test_clobber_xmm7,@function .align 16 abi_test_clobber_xmm7: .L_abi_test_clobber_xmm7_begin: pxor %xmm7,%xmm7 ret .size abi_test_clobber_xmm7,.-.L_abi_test_clobber_xmm7_begin #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
wlsfx/bnbb
98,892
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86/crypto/fipsmodule/sha256-586.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl sha256_block_data_order_nohw .hidden sha256_block_data_order_nohw .type sha256_block_data_order_nohw,@function .align 16 sha256_block_data_order_nohw: .L_sha256_block_data_order_nohw_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call .L000pic_point .L000pic_point: popl %ebp leal .LK256-.L000pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $6,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) .L001no_xmm: subl %edi,%eax cmpl $256,%eax jae .L002unrolled jmp .L003loop .align 16 .L003loop: movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx bswap %eax movl 12(%edi),%edx bswap %ebx pushl %eax bswap %ecx pushl %ebx bswap %edx pushl %ecx pushl %edx movl 16(%edi),%eax movl 20(%edi),%ebx movl 24(%edi),%ecx bswap %eax movl 28(%edi),%edx bswap %ebx pushl %eax bswap %ecx pushl %ebx bswap %edx pushl %ecx pushl %edx movl 32(%edi),%eax movl 36(%edi),%ebx movl 40(%edi),%ecx bswap %eax movl 44(%edi),%edx bswap %ebx pushl %eax bswap %ecx pushl %ebx bswap %edx pushl %ecx pushl %edx movl 48(%edi),%eax movl 52(%edi),%ebx movl 56(%edi),%ecx bswap %eax movl 60(%edi),%edx bswap %ebx pushl %eax bswap %ecx pushl %ebx bswap %edx pushl %ecx pushl %edx addl $64,%edi leal -36(%esp),%esp movl %edi,104(%esp) movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edi movl %ebx,8(%esp) xorl %ecx,%ebx movl %ecx,12(%esp) movl %edi,16(%esp) movl %ebx,(%esp) movl 16(%esi),%edx movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%edi movl %ebx,24(%esp) movl %ecx,28(%esp) movl %edi,32(%esp) .align 16 .L00400_15: movl %edx,%ecx movl 24(%esp),%esi rorl $14,%ecx movl 28(%esp),%edi xorl %edx,%ecx xorl %edi,%esi movl 96(%esp),%ebx rorl $5,%ecx andl %edx,%esi movl %edx,20(%esp) xorl %ecx,%edx addl 32(%esp),%ebx xorl %edi,%esi rorl $6,%edx movl %eax,%ecx addl %esi,%ebx rorl $9,%ecx addl %edx,%ebx movl 8(%esp),%edi xorl %eax,%ecx movl %eax,4(%esp) leal -4(%esp),%esp rorl $11,%ecx movl (%ebp),%esi xorl %eax,%ecx movl 20(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %esi,%ebx movl %eax,(%esp) addl %ebx,%edx andl 4(%esp),%eax addl %ecx,%ebx xorl %edi,%eax addl $4,%ebp addl %ebx,%eax cmpl $3248222580,%esi jne .L00400_15 movl 156(%esp),%ecx jmp .L00516_63 .align 16 .L00516_63: movl %ecx,%ebx movl 104(%esp),%esi rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 160(%esp),%ebx shrl $10,%edi addl 124(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 24(%esp),%esi rorl $14,%ecx addl %edi,%ebx movl 28(%esp),%edi xorl %edx,%ecx xorl %edi,%esi movl %ebx,96(%esp) rorl $5,%ecx andl %edx,%esi movl %edx,20(%esp) xorl %ecx,%edx addl 32(%esp),%ebx xorl %edi,%esi rorl $6,%edx movl %eax,%ecx addl %esi,%ebx rorl $9,%ecx addl %edx,%ebx movl 8(%esp),%edi xorl %eax,%ecx movl %eax,4(%esp) leal -4(%esp),%esp rorl $11,%ecx movl (%ebp),%esi xorl %eax,%ecx movl 20(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %esi,%ebx movl %eax,(%esp) addl %ebx,%edx andl 4(%esp),%eax addl %ecx,%ebx xorl %edi,%eax movl 156(%esp),%ecx addl $4,%ebp addl %ebx,%eax cmpl $3329325298,%esi jne .L00516_63 movl 356(%esp),%esi movl 8(%esp),%ebx movl 16(%esp),%ecx addl (%esi),%eax addl 4(%esi),%ebx addl 8(%esi),%edi addl 12(%esi),%ecx movl %eax,(%esi) movl %ebx,4(%esi) movl %edi,8(%esi) movl %ecx,12(%esi) movl 24(%esp),%eax movl 28(%esp),%ebx movl 32(%esp),%ecx movl 360(%esp),%edi addl 16(%esi),%edx addl 20(%esi),%eax addl 24(%esi),%ebx addl 28(%esi),%ecx movl %edx,16(%esi) movl %eax,20(%esi) movl %ebx,24(%esi) movl %ecx,28(%esi) leal 356(%esp),%esp subl $256,%ebp cmpl 8(%esp),%edi jb .L003loop movl 12(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .align 64 .LK256: .long 1116352408,1899447441,3049323471,3921009573,961987163,1508970993,2453635748,2870763221,3624381080,310598401,607225278,1426881987,1925078388,2162078206,2614888103,3248222580,3835390401,4022224774,264347078,604807628,770255983,1249150122,1555081692,1996064986,2554220882,2821834349,2952996808,3210313671,3336571891,3584528711,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,2177026350,2456956037,2730485921,2820302411,3259730800,3345764771,3516065817,3600352804,4094571909,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,2227730452,2361852424,2428436474,2756734187,3204031479,3329325298 .long 66051,67438087,134810123,202182159 .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97 .byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 .byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 .byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 .byte 62,0 .align 16 .L002unrolled: leal -96(%esp),%esp movl (%esi),%eax movl 4(%esi),%ebp movl 8(%esi),%ecx movl 12(%esi),%ebx movl %ebp,4(%esp) xorl %ecx,%ebp movl %ecx,8(%esp) movl %ebx,12(%esp) movl 16(%esi),%edx movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%esi movl %ebx,20(%esp) movl %ecx,24(%esp) movl %esi,28(%esp) jmp .L006grand_loop .align 16 .L006grand_loop: movl (%edi),%ebx movl 4(%edi),%ecx bswap %ebx movl 8(%edi),%esi bswap %ecx movl %ebx,32(%esp) bswap %esi movl %ecx,36(%esp) movl %esi,40(%esp) movl 12(%edi),%ebx movl 16(%edi),%ecx bswap %ebx movl 20(%edi),%esi bswap %ecx movl %ebx,44(%esp) bswap %esi movl %ecx,48(%esp) movl %esi,52(%esp) movl 24(%edi),%ebx movl 28(%edi),%ecx bswap %ebx movl 32(%edi),%esi bswap %ecx movl %ebx,56(%esp) bswap %esi movl %ecx,60(%esp) movl %esi,64(%esp) movl 36(%edi),%ebx movl 40(%edi),%ecx bswap %ebx movl 44(%edi),%esi bswap %ecx movl %ebx,68(%esp) bswap %esi movl %ecx,72(%esp) movl %esi,76(%esp) movl 48(%edi),%ebx movl 52(%edi),%ecx bswap %ebx movl 56(%edi),%esi bswap %ecx movl %ebx,80(%esp) bswap %esi movl %ecx,84(%esp) movl %esi,88(%esp) movl 60(%edi),%ebx addl $64,%edi bswap %ebx movl %edi,100(%esp) movl %ebx,92(%esp) movl %edx,%ecx movl 20(%esp),%esi rorl $14,%edx movl 24(%esp),%edi xorl %ecx,%edx movl 32(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1116352408(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 16(%esp),%ecx rorl $14,%edx movl 20(%esp),%edi xorl %esi,%edx movl 36(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1899447441(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 12(%esp),%esi rorl $14,%edx movl 16(%esp),%edi xorl %ecx,%edx movl 40(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3049323471(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 8(%esp),%ecx rorl $14,%edx movl 12(%esp),%edi xorl %esi,%edx movl 44(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3921009573(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl %edx,%ecx movl 4(%esp),%esi rorl $14,%edx movl 8(%esp),%edi xorl %ecx,%edx movl 48(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 961987163(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl (%esp),%ecx rorl $14,%edx movl 4(%esp),%edi xorl %esi,%edx movl 52(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1508970993(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 28(%esp),%esi rorl $14,%edx movl (%esp),%edi xorl %ecx,%edx movl 56(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2453635748(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 24(%esp),%ecx rorl $14,%edx movl 28(%esp),%edi xorl %esi,%edx movl 60(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2870763221(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 20(%esp),%esi rorl $14,%edx movl 24(%esp),%edi xorl %ecx,%edx movl 64(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3624381080(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 16(%esp),%ecx rorl $14,%edx movl 20(%esp),%edi xorl %esi,%edx movl 68(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 310598401(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 12(%esp),%esi rorl $14,%edx movl 16(%esp),%edi xorl %ecx,%edx movl 72(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 607225278(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 8(%esp),%ecx rorl $14,%edx movl 12(%esp),%edi xorl %esi,%edx movl 76(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1426881987(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl %edx,%ecx movl 4(%esp),%esi rorl $14,%edx movl 8(%esp),%edi xorl %ecx,%edx movl 80(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1925078388(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl (%esp),%ecx rorl $14,%edx movl 4(%esp),%edi xorl %esi,%edx movl 84(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2162078206(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl %edx,%ecx movl 28(%esp),%esi rorl $14,%edx movl (%esp),%edi xorl %ecx,%edx movl 88(%esp),%ebx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2614888103(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl %edx,%esi movl 24(%esp),%ecx rorl $14,%edx movl 28(%esp),%edi xorl %esi,%edx movl 92(%esp),%ebx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3248222580(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 36(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 88(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 32(%esp),%ebx shrl $10,%edi addl 68(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,32(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3835390401(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 40(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 92(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 36(%esp),%ebx shrl $10,%edi addl 72(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,36(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 4022224774(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 44(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 32(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 40(%esp),%ebx shrl $10,%edi addl 76(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,40(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 264347078(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 48(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 36(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 44(%esp),%ebx shrl $10,%edi addl 80(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,44(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 604807628(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 52(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 40(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 48(%esp),%ebx shrl $10,%edi addl 84(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,48(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 770255983(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 56(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 44(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 52(%esp),%ebx shrl $10,%edi addl 88(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,52(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1249150122(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 60(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 48(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 56(%esp),%ebx shrl $10,%edi addl 92(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,56(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1555081692(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 64(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 52(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 60(%esp),%ebx shrl $10,%edi addl 32(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,60(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1996064986(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 68(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 56(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 64(%esp),%ebx shrl $10,%edi addl 36(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,64(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2554220882(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 72(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 60(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 68(%esp),%ebx shrl $10,%edi addl 40(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,68(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2821834349(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 76(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 64(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 72(%esp),%ebx shrl $10,%edi addl 44(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,72(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2952996808(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 80(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 68(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 76(%esp),%ebx shrl $10,%edi addl 48(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,76(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3210313671(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 84(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 72(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 80(%esp),%ebx shrl $10,%edi addl 52(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,80(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3336571891(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 88(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 76(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 84(%esp),%ebx shrl $10,%edi addl 56(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,84(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3584528711(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 92(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 80(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 88(%esp),%ebx shrl $10,%edi addl 60(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,88(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 113926993(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 32(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 84(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 92(%esp),%ebx shrl $10,%edi addl 64(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,92(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 338241895(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 36(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 88(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 32(%esp),%ebx shrl $10,%edi addl 68(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,32(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 666307205(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 40(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 92(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 36(%esp),%ebx shrl $10,%edi addl 72(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,36(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 773529912(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 44(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 32(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 40(%esp),%ebx shrl $10,%edi addl 76(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,40(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1294757372(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 48(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 36(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 44(%esp),%ebx shrl $10,%edi addl 80(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,44(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1396182291(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 52(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 40(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 48(%esp),%ebx shrl $10,%edi addl 84(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,48(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1695183700(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 56(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 44(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 52(%esp),%ebx shrl $10,%edi addl 88(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,52(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1986661051(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 60(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 48(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 56(%esp),%ebx shrl $10,%edi addl 92(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,56(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2177026350(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 64(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 52(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 60(%esp),%ebx shrl $10,%edi addl 32(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,60(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2456956037(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 68(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 56(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 64(%esp),%ebx shrl $10,%edi addl 36(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,64(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2730485921(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 72(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 60(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 68(%esp),%ebx shrl $10,%edi addl 40(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,68(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2820302411(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 76(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 64(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 72(%esp),%ebx shrl $10,%edi addl 44(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,72(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3259730800(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 80(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 68(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 76(%esp),%ebx shrl $10,%edi addl 48(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,76(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3345764771(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 84(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 72(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 80(%esp),%ebx shrl $10,%edi addl 52(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,80(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3516065817(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 88(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 76(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 84(%esp),%ebx shrl $10,%edi addl 56(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,84(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3600352804(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 92(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 80(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 88(%esp),%ebx shrl $10,%edi addl 60(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,88(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 4094571909(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 32(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 84(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 92(%esp),%ebx shrl $10,%edi addl 64(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,92(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 275423344(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 36(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 88(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 32(%esp),%ebx shrl $10,%edi addl 68(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,32(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 430227734(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 40(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 92(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 36(%esp),%ebx shrl $10,%edi addl 72(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,36(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 506948616(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 44(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 32(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 40(%esp),%ebx shrl $10,%edi addl 76(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,40(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 659060556(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 48(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 36(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 44(%esp),%ebx shrl $10,%edi addl 80(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,44(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 883997877(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 52(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 40(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 48(%esp),%ebx shrl $10,%edi addl 84(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,48(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 958139571(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 56(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 44(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 52(%esp),%ebx shrl $10,%edi addl 88(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,52(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1322822218(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 60(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 48(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 56(%esp),%ebx shrl $10,%edi addl 92(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx movl %ebx,56(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1537002063(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 64(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 52(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 60(%esp),%ebx shrl $10,%edi addl 32(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx movl %ebx,60(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 1747873779(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 68(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 56(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 64(%esp),%ebx shrl $10,%edi addl 36(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 20(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 24(%esp),%edi xorl %ecx,%edx movl %ebx,64(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx addl 28(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 4(%esp),%edi xorl %eax,%ecx movl %eax,(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 1955562222(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 72(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 12(%esp),%edx addl %ecx,%ebp movl 60(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 68(%esp),%ebx shrl $10,%edi addl 40(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 16(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 20(%esp),%edi xorl %esi,%edx movl %ebx,68(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,12(%esp) xorl %esi,%edx addl 24(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl (%esp),%edi xorl %ebp,%esi movl %ebp,28(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2024104815(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 76(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 8(%esp),%edx addl %esi,%eax movl 64(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 72(%esp),%ebx shrl $10,%edi addl 44(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 12(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 16(%esp),%edi xorl %ecx,%edx movl %ebx,72(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx addl 20(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 28(%esp),%edi xorl %eax,%ecx movl %eax,24(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2227730452(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 80(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 4(%esp),%edx addl %ecx,%ebp movl 68(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 76(%esp),%ebx shrl $10,%edi addl 48(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 8(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 12(%esp),%edi xorl %esi,%edx movl %ebx,76(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,4(%esp) xorl %esi,%edx addl 16(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 24(%esp),%edi xorl %ebp,%esi movl %ebp,20(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2361852424(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 84(%esp),%ecx rorl $2,%esi addl %edx,%eax addl (%esp),%edx addl %esi,%eax movl 72(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 80(%esp),%ebx shrl $10,%edi addl 52(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 4(%esp),%esi rorl $14,%edx addl %edi,%ebx movl 8(%esp),%edi xorl %ecx,%edx movl %ebx,80(%esp) xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx addl 12(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 20(%esp),%edi xorl %eax,%ecx movl %eax,16(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 2428436474(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 88(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 28(%esp),%edx addl %ecx,%ebp movl 76(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 84(%esp),%ebx shrl $10,%edi addl 56(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl (%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 4(%esp),%edi xorl %esi,%edx movl %ebx,84(%esp) xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,28(%esp) xorl %esi,%edx addl 8(%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 16(%esp),%edi xorl %ebp,%esi movl %ebp,12(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 2756734187(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax movl 92(%esp),%ecx rorl $2,%esi addl %edx,%eax addl 24(%esp),%edx addl %esi,%eax movl 80(%esp),%esi movl %ecx,%ebx rorl $11,%ecx movl %esi,%edi rorl $2,%esi xorl %ebx,%ecx shrl $3,%ebx rorl $7,%ecx xorl %edi,%esi xorl %ecx,%ebx rorl $17,%esi addl 88(%esp),%ebx shrl $10,%edi addl 60(%esp),%ebx movl %edx,%ecx xorl %esi,%edi movl 28(%esp),%esi rorl $14,%edx addl %edi,%ebx movl (%esp),%edi xorl %ecx,%edx xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx addl 4(%esp),%ebx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%ebx rorl $9,%ecx movl %eax,%esi movl 12(%esp),%edi xorl %eax,%ecx movl %eax,8(%esp) xorl %edi,%eax rorl $11,%ecx andl %eax,%ebp leal 3204031479(%ebx,%edx,1),%edx xorl %esi,%ecx xorl %edi,%ebp movl 32(%esp),%esi rorl $2,%ecx addl %edx,%ebp addl 20(%esp),%edx addl %ecx,%ebp movl 84(%esp),%ecx movl %esi,%ebx rorl $11,%esi movl %ecx,%edi rorl $2,%ecx xorl %ebx,%esi shrl $3,%ebx rorl $7,%esi xorl %edi,%ecx xorl %esi,%ebx rorl $17,%ecx addl 92(%esp),%ebx shrl $10,%edi addl 64(%esp),%ebx movl %edx,%esi xorl %ecx,%edi movl 24(%esp),%ecx rorl $14,%edx addl %edi,%ebx movl 28(%esp),%edi xorl %esi,%edx xorl %edi,%ecx rorl $5,%edx andl %esi,%ecx movl %esi,20(%esp) xorl %esi,%edx addl (%esp),%ebx xorl %ecx,%edi rorl $6,%edx movl %ebp,%esi addl %edi,%ebx rorl $9,%esi movl %ebp,%ecx movl 8(%esp),%edi xorl %ebp,%esi movl %ebp,4(%esp) xorl %edi,%ebp rorl $11,%esi andl %ebp,%eax leal 3329325298(%ebx,%edx,1),%edx xorl %ecx,%esi xorl %edi,%eax rorl $2,%esi addl %edx,%eax addl 16(%esp),%edx addl %esi,%eax movl 96(%esp),%esi xorl %edi,%ebp movl 12(%esp),%ecx addl (%esi),%eax addl 4(%esi),%ebp addl 8(%esi),%edi addl 12(%esi),%ecx movl %eax,(%esi) movl %ebp,4(%esi) movl %edi,8(%esi) movl %ecx,12(%esi) movl %ebp,4(%esp) xorl %edi,%ebp movl %edi,8(%esp) movl %ecx,12(%esp) movl 20(%esp),%edi movl 24(%esp),%ebx movl 28(%esp),%ecx addl 16(%esi),%edx addl 20(%esi),%edi addl 24(%esi),%ebx addl 28(%esi),%ecx movl %edx,16(%esi) movl %edi,20(%esi) movl %ebx,24(%esi) movl %ecx,28(%esi) movl %edi,20(%esp) movl 100(%esp),%edi movl %ebx,24(%esp) movl %ecx,28(%esp) cmpl 104(%esp),%edi jb .L006grand_loop movl 108(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size sha256_block_data_order_nohw,.-.L_sha256_block_data_order_nohw_begin .globl sha256_block_data_order_ssse3 .hidden sha256_block_data_order_ssse3 .type sha256_block_data_order_ssse3,@function .align 16 sha256_block_data_order_ssse3: .L_sha256_block_data_order_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call .L007pic_point .L007pic_point: popl %ebp leal .LK256-.L007pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $6,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) leal -96(%esp),%esp movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edi movl %ebx,4(%esp) xorl %ecx,%ebx movl %ecx,8(%esp) movl %edi,12(%esp) movl 16(%esi),%edx movl 20(%esi),%edi movl 24(%esi),%ecx movl 28(%esi),%esi movl %edi,20(%esp) movl 100(%esp),%edi movl %ecx,24(%esp) movl %esi,28(%esp) movdqa 256(%ebp),%xmm7 jmp .L008grand_ssse3 .align 16 .L008grand_ssse3: movdqu (%edi),%xmm0 movdqu 16(%edi),%xmm1 movdqu 32(%edi),%xmm2 movdqu 48(%edi),%xmm3 addl $64,%edi .byte 102,15,56,0,199 movl %edi,100(%esp) .byte 102,15,56,0,207 movdqa (%ebp),%xmm4 .byte 102,15,56,0,215 movdqa 16(%ebp),%xmm5 paddd %xmm0,%xmm4 .byte 102,15,56,0,223 movdqa 32(%ebp),%xmm6 paddd %xmm1,%xmm5 movdqa 48(%ebp),%xmm7 movdqa %xmm4,32(%esp) paddd %xmm2,%xmm6 movdqa %xmm5,48(%esp) paddd %xmm3,%xmm7 movdqa %xmm6,64(%esp) movdqa %xmm7,80(%esp) jmp .L009ssse3_00_47 .align 16 .L009ssse3_00_47: addl $64,%ebp movl %edx,%ecx movdqa %xmm1,%xmm4 rorl $14,%edx movl 20(%esp),%esi movdqa %xmm3,%xmm7 xorl %ecx,%edx movl 24(%esp),%edi .byte 102,15,58,15,224,4 xorl %edi,%esi rorl $5,%edx andl %ecx,%esi .byte 102,15,58,15,250,4 movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi movdqa %xmm4,%xmm5 rorl $6,%edx movl %eax,%ecx movdqa %xmm4,%xmm6 addl %edi,%edx movl 4(%esp),%edi psrld $3,%xmm4 movl %eax,%esi rorl $9,%ecx paddd %xmm7,%xmm0 movl %eax,(%esp) xorl %eax,%ecx psrld $7,%xmm6 xorl %edi,%eax addl 28(%esp),%edx rorl $11,%ecx andl %eax,%ebx pshufd $250,%xmm3,%xmm7 xorl %esi,%ecx addl 32(%esp),%edx pslld $14,%xmm5 xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm4 addl %edx,%ebx addl 12(%esp),%edx psrld $11,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm5,%xmm4 movl 16(%esp),%esi xorl %ecx,%edx pslld $11,%xmm5 movl 20(%esp),%edi xorl %edi,%esi rorl $5,%edx pxor %xmm6,%xmm4 andl %ecx,%esi movl %ecx,12(%esp) movdqa %xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx pxor %xmm5,%xmm4 movl %ebx,%ecx addl %edi,%edx psrld $10,%xmm7 movl (%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm4,%xmm0 movl %ebx,28(%esp) xorl %ebx,%ecx psrlq $17,%xmm6 xorl %edi,%ebx addl 24(%esp),%edx rorl $11,%ecx pxor %xmm6,%xmm7 andl %ebx,%eax xorl %esi,%ecx psrlq $2,%xmm6 addl 36(%esp),%edx xorl %edi,%eax rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%eax addl 8(%esp),%edx pshufd $128,%xmm7,%xmm7 addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi psrldq $8,%xmm7 movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi paddd %xmm7,%xmm0 rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,24(%esp) pshufd $80,%xmm0,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx movdqa %xmm7,%xmm6 rorl $11,%ecx psrld $10,%xmm7 andl %eax,%ebx psrlq $17,%xmm6 xorl %esi,%ecx addl 40(%esp),%edx xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%ebx addl 4(%esp),%edx psrlq $2,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm6,%xmm7 movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi pshufd $8,%xmm7,%xmm7 xorl %edi,%esi rorl $5,%edx movdqa (%ebp),%xmm6 andl %ecx,%esi movl %ecx,4(%esp) pslldq $8,%xmm7 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm7,%xmm0 movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx paddd %xmm0,%xmm6 rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 44(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movdqa %xmm6,32(%esp) movl %edx,%ecx movdqa %xmm2,%xmm4 rorl $14,%edx movl 4(%esp),%esi movdqa %xmm0,%xmm7 xorl %ecx,%edx movl 8(%esp),%edi .byte 102,15,58,15,225,4 xorl %edi,%esi rorl $5,%edx andl %ecx,%esi .byte 102,15,58,15,251,4 movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi movdqa %xmm4,%xmm5 rorl $6,%edx movl %eax,%ecx movdqa %xmm4,%xmm6 addl %edi,%edx movl 20(%esp),%edi psrld $3,%xmm4 movl %eax,%esi rorl $9,%ecx paddd %xmm7,%xmm1 movl %eax,16(%esp) xorl %eax,%ecx psrld $7,%xmm6 xorl %edi,%eax addl 12(%esp),%edx rorl $11,%ecx andl %eax,%ebx pshufd $250,%xmm0,%xmm7 xorl %esi,%ecx addl 48(%esp),%edx pslld $14,%xmm5 xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm4 addl %edx,%ebx addl 28(%esp),%edx psrld $11,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm5,%xmm4 movl (%esp),%esi xorl %ecx,%edx pslld $11,%xmm5 movl 4(%esp),%edi xorl %edi,%esi rorl $5,%edx pxor %xmm6,%xmm4 andl %ecx,%esi movl %ecx,28(%esp) movdqa %xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx pxor %xmm5,%xmm4 movl %ebx,%ecx addl %edi,%edx psrld $10,%xmm7 movl 16(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm4,%xmm1 movl %ebx,12(%esp) xorl %ebx,%ecx psrlq $17,%xmm6 xorl %edi,%ebx addl 8(%esp),%edx rorl $11,%ecx pxor %xmm6,%xmm7 andl %ebx,%eax xorl %esi,%ecx psrlq $2,%xmm6 addl 52(%esp),%edx xorl %edi,%eax rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%eax addl 24(%esp),%edx pshufd $128,%xmm7,%xmm7 addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi psrldq $8,%xmm7 movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi paddd %xmm7,%xmm1 rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,8(%esp) pshufd $80,%xmm1,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx movdqa %xmm7,%xmm6 rorl $11,%ecx psrld $10,%xmm7 andl %eax,%ebx psrlq $17,%xmm6 xorl %esi,%ecx addl 56(%esp),%edx xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%ebx addl 20(%esp),%edx psrlq $2,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm6,%xmm7 movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi pshufd $8,%xmm7,%xmm7 xorl %edi,%esi rorl $5,%edx movdqa 16(%ebp),%xmm6 andl %ecx,%esi movl %ecx,20(%esp) pslldq $8,%xmm7 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm7,%xmm1 movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx paddd %xmm1,%xmm6 rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 60(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movdqa %xmm6,48(%esp) movl %edx,%ecx movdqa %xmm3,%xmm4 rorl $14,%edx movl 20(%esp),%esi movdqa %xmm1,%xmm7 xorl %ecx,%edx movl 24(%esp),%edi .byte 102,15,58,15,226,4 xorl %edi,%esi rorl $5,%edx andl %ecx,%esi .byte 102,15,58,15,248,4 movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi movdqa %xmm4,%xmm5 rorl $6,%edx movl %eax,%ecx movdqa %xmm4,%xmm6 addl %edi,%edx movl 4(%esp),%edi psrld $3,%xmm4 movl %eax,%esi rorl $9,%ecx paddd %xmm7,%xmm2 movl %eax,(%esp) xorl %eax,%ecx psrld $7,%xmm6 xorl %edi,%eax addl 28(%esp),%edx rorl $11,%ecx andl %eax,%ebx pshufd $250,%xmm1,%xmm7 xorl %esi,%ecx addl 64(%esp),%edx pslld $14,%xmm5 xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm4 addl %edx,%ebx addl 12(%esp),%edx psrld $11,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm5,%xmm4 movl 16(%esp),%esi xorl %ecx,%edx pslld $11,%xmm5 movl 20(%esp),%edi xorl %edi,%esi rorl $5,%edx pxor %xmm6,%xmm4 andl %ecx,%esi movl %ecx,12(%esp) movdqa %xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx pxor %xmm5,%xmm4 movl %ebx,%ecx addl %edi,%edx psrld $10,%xmm7 movl (%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm4,%xmm2 movl %ebx,28(%esp) xorl %ebx,%ecx psrlq $17,%xmm6 xorl %edi,%ebx addl 24(%esp),%edx rorl $11,%ecx pxor %xmm6,%xmm7 andl %ebx,%eax xorl %esi,%ecx psrlq $2,%xmm6 addl 68(%esp),%edx xorl %edi,%eax rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%eax addl 8(%esp),%edx pshufd $128,%xmm7,%xmm7 addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi psrldq $8,%xmm7 movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi paddd %xmm7,%xmm2 rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,24(%esp) pshufd $80,%xmm2,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx movdqa %xmm7,%xmm6 rorl $11,%ecx psrld $10,%xmm7 andl %eax,%ebx psrlq $17,%xmm6 xorl %esi,%ecx addl 72(%esp),%edx xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%ebx addl 4(%esp),%edx psrlq $2,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm6,%xmm7 movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi pshufd $8,%xmm7,%xmm7 xorl %edi,%esi rorl $5,%edx movdqa 32(%ebp),%xmm6 andl %ecx,%esi movl %ecx,4(%esp) pslldq $8,%xmm7 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm7,%xmm2 movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx paddd %xmm2,%xmm6 rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 76(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movdqa %xmm6,64(%esp) movl %edx,%ecx movdqa %xmm0,%xmm4 rorl $14,%edx movl 4(%esp),%esi movdqa %xmm2,%xmm7 xorl %ecx,%edx movl 8(%esp),%edi .byte 102,15,58,15,227,4 xorl %edi,%esi rorl $5,%edx andl %ecx,%esi .byte 102,15,58,15,249,4 movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi movdqa %xmm4,%xmm5 rorl $6,%edx movl %eax,%ecx movdqa %xmm4,%xmm6 addl %edi,%edx movl 20(%esp),%edi psrld $3,%xmm4 movl %eax,%esi rorl $9,%ecx paddd %xmm7,%xmm3 movl %eax,16(%esp) xorl %eax,%ecx psrld $7,%xmm6 xorl %edi,%eax addl 12(%esp),%edx rorl $11,%ecx andl %eax,%ebx pshufd $250,%xmm2,%xmm7 xorl %esi,%ecx addl 80(%esp),%edx pslld $14,%xmm5 xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm4 addl %edx,%ebx addl 28(%esp),%edx psrld $11,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm5,%xmm4 movl (%esp),%esi xorl %ecx,%edx pslld $11,%xmm5 movl 4(%esp),%edi xorl %edi,%esi rorl $5,%edx pxor %xmm6,%xmm4 andl %ecx,%esi movl %ecx,28(%esp) movdqa %xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx pxor %xmm5,%xmm4 movl %ebx,%ecx addl %edi,%edx psrld $10,%xmm7 movl 16(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm4,%xmm3 movl %ebx,12(%esp) xorl %ebx,%ecx psrlq $17,%xmm6 xorl %edi,%ebx addl 8(%esp),%edx rorl $11,%ecx pxor %xmm6,%xmm7 andl %ebx,%eax xorl %esi,%ecx psrlq $2,%xmm6 addl 84(%esp),%edx xorl %edi,%eax rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%eax addl 24(%esp),%edx pshufd $128,%xmm7,%xmm7 addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi psrldq $8,%xmm7 movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi paddd %xmm7,%xmm3 rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,8(%esp) pshufd $80,%xmm3,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx movdqa %xmm7,%xmm6 rorl $11,%ecx psrld $10,%xmm7 andl %eax,%ebx psrlq $17,%xmm6 xorl %esi,%ecx addl 88(%esp),%edx xorl %edi,%ebx rorl $2,%ecx pxor %xmm6,%xmm7 addl %edx,%ebx addl 20(%esp),%edx psrlq $2,%xmm6 addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx pxor %xmm6,%xmm7 movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi pshufd $8,%xmm7,%xmm7 xorl %edi,%esi rorl $5,%edx movdqa 48(%ebp),%xmm6 andl %ecx,%esi movl %ecx,20(%esp) pslldq $8,%xmm7 xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi rorl $9,%ecx paddd %xmm7,%xmm3 movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx paddd %xmm3,%xmm6 rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 92(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movdqa %xmm6,80(%esp) cmpl $66051,64(%ebp) jne .L009ssse3_00_47 movl %edx,%ecx rorl $14,%edx movl 20(%esp),%esi xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,(%esp) xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 32(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 16(%esp),%esi xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,12(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,28(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 36(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,24(%esp) xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 40(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 44(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 4(%esp),%esi xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,16(%esp) xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 48(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl (%esp),%esi xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,28(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,12(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 52(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,8(%esp) xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 56(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 60(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 20(%esp),%esi xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,(%esp) xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 64(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 16(%esp),%esi xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,12(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,28(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 68(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,24(%esp) xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 72(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 76(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 4(%esp),%esi xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,16(%esp) xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 80(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl (%esp),%esi xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,28(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,12(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 84(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax movl %edx,%ecx rorl $14,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi rorl $9,%ecx movl %eax,8(%esp) xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx rorl $11,%ecx andl %eax,%ebx xorl %esi,%ecx addl 88(%esp),%edx xorl %edi,%ebx rorl $2,%ecx addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx movl %edx,%ecx rorl $14,%edx movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi rorl $5,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi rorl $6,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi rorl $9,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx rorl $11,%ecx andl %ebx,%eax xorl %esi,%ecx addl 92(%esp),%edx xorl %edi,%eax rorl $2,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movl 96(%esp),%esi xorl %edi,%ebx movl 12(%esp),%ecx addl (%esi),%eax addl 4(%esi),%ebx addl 8(%esi),%edi addl 12(%esi),%ecx movl %eax,(%esi) movl %ebx,4(%esi) movl %edi,8(%esi) movl %ecx,12(%esi) movl %ebx,4(%esp) xorl %edi,%ebx movl %edi,8(%esp) movl %ecx,12(%esp) movl 20(%esp),%edi movl 24(%esp),%ecx addl 16(%esi),%edx addl 20(%esi),%edi addl 24(%esi),%ecx movl %edx,16(%esi) movl %edi,20(%esi) movl %edi,20(%esp) movl 28(%esp),%edi movl %ecx,24(%esi) addl 28(%esi),%edi movl %ecx,24(%esp) movl %edi,28(%esi) movl %edi,28(%esp) movl 100(%esp),%edi movdqa 64(%ebp),%xmm7 subl $192,%ebp cmpl 104(%esp),%edi jb .L008grand_ssse3 movl 108(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size sha256_block_data_order_ssse3,.-.L_sha256_block_data_order_ssse3_begin .globl sha256_block_data_order_avx .hidden sha256_block_data_order_avx .type sha256_block_data_order_avx,@function .align 16 sha256_block_data_order_avx: .L_sha256_block_data_order_avx_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call .L010pic_point .L010pic_point: popl %ebp leal .LK256-.L010pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $6,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) leal -96(%esp),%esp vzeroall movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edi movl %ebx,4(%esp) xorl %ecx,%ebx movl %ecx,8(%esp) movl %edi,12(%esp) movl 16(%esi),%edx movl 20(%esi),%edi movl 24(%esi),%ecx movl 28(%esi),%esi movl %edi,20(%esp) movl 100(%esp),%edi movl %ecx,24(%esp) movl %esi,28(%esp) vmovdqa 256(%ebp),%xmm7 jmp .L011grand_avx .align 32 .L011grand_avx: vmovdqu (%edi),%xmm0 vmovdqu 16(%edi),%xmm1 vmovdqu 32(%edi),%xmm2 vmovdqu 48(%edi),%xmm3 addl $64,%edi vpshufb %xmm7,%xmm0,%xmm0 movl %edi,100(%esp) vpshufb %xmm7,%xmm1,%xmm1 vpshufb %xmm7,%xmm2,%xmm2 vpaddd (%ebp),%xmm0,%xmm4 vpshufb %xmm7,%xmm3,%xmm3 vpaddd 16(%ebp),%xmm1,%xmm5 vpaddd 32(%ebp),%xmm2,%xmm6 vpaddd 48(%ebp),%xmm3,%xmm7 vmovdqa %xmm4,32(%esp) vmovdqa %xmm5,48(%esp) vmovdqa %xmm6,64(%esp) vmovdqa %xmm7,80(%esp) jmp .L012avx_00_47 .align 16 .L012avx_00_47: addl $64,%ebp vpalignr $4,%xmm0,%xmm1,%xmm4 movl %edx,%ecx shrdl $14,%edx,%edx movl 20(%esp),%esi vpalignr $4,%xmm2,%xmm3,%xmm7 xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi vpsrld $7,%xmm4,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,16(%esp) vpaddd %xmm7,%xmm0,%xmm0 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrld $3,%xmm4,%xmm7 movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi vpslld $14,%xmm4,%xmm5 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,(%esp) vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx vpshufd $250,%xmm3,%xmm7 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpsrld $11,%xmm6,%xmm6 addl 32(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpxor %xmm5,%xmm4,%xmm4 addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx vpslld $11,%xmm5,%xmm5 movl %edx,%ecx shrdl $14,%edx,%edx movl 16(%esp),%esi vpxor %xmm6,%xmm4,%xmm4 xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi vpsrld $10,%xmm7,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,12(%esp) vpxor %xmm5,%xmm4,%xmm4 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi vpaddd %xmm4,%xmm0,%xmm0 movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,28(%esp) vpxor %xmm5,%xmm6,%xmm6 xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx vpsrlq $19,%xmm7,%xmm7 shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx vpxor %xmm7,%xmm6,%xmm6 addl 36(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx vpshufd $132,%xmm6,%xmm7 addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax vpsrldq $8,%xmm7,%xmm7 movl %edx,%ecx shrdl $14,%edx,%edx movl 12(%esp),%esi vpaddd %xmm7,%xmm0,%xmm0 xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi vpshufd $80,%xmm0,%xmm7 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,8(%esp) vpsrld $10,%xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi vpxor %xmm5,%xmm6,%xmm6 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,24(%esp) vpsrlq $19,%xmm7,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx vpxor %xmm7,%xmm6,%xmm6 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpshufd $232,%xmm6,%xmm7 addl 40(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpslldq $8,%xmm7,%xmm7 addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx vpaddd %xmm7,%xmm0,%xmm0 movl %edx,%ecx shrdl $14,%edx,%edx movl 8(%esp),%esi vpaddd (%ebp),%xmm0,%xmm6 xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 44(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax vmovdqa %xmm6,32(%esp) vpalignr $4,%xmm1,%xmm2,%xmm4 movl %edx,%ecx shrdl $14,%edx,%edx movl 4(%esp),%esi vpalignr $4,%xmm3,%xmm0,%xmm7 xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi vpsrld $7,%xmm4,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,(%esp) vpaddd %xmm7,%xmm1,%xmm1 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrld $3,%xmm4,%xmm7 movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi vpslld $14,%xmm4,%xmm5 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,16(%esp) vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx vpshufd $250,%xmm0,%xmm7 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpsrld $11,%xmm6,%xmm6 addl 48(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpxor %xmm5,%xmm4,%xmm4 addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx vpslld $11,%xmm5,%xmm5 movl %edx,%ecx shrdl $14,%edx,%edx movl (%esp),%esi vpxor %xmm6,%xmm4,%xmm4 xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi vpsrld $10,%xmm7,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,28(%esp) vpxor %xmm5,%xmm4,%xmm4 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi vpaddd %xmm4,%xmm1,%xmm1 movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,12(%esp) vpxor %xmm5,%xmm6,%xmm6 xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx vpsrlq $19,%xmm7,%xmm7 shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx vpxor %xmm7,%xmm6,%xmm6 addl 52(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx vpshufd $132,%xmm6,%xmm7 addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax vpsrldq $8,%xmm7,%xmm7 movl %edx,%ecx shrdl $14,%edx,%edx movl 28(%esp),%esi vpaddd %xmm7,%xmm1,%xmm1 xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi vpshufd $80,%xmm1,%xmm7 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,24(%esp) vpsrld $10,%xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi vpxor %xmm5,%xmm6,%xmm6 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,8(%esp) vpsrlq $19,%xmm7,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx vpxor %xmm7,%xmm6,%xmm6 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpshufd $232,%xmm6,%xmm7 addl 56(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpslldq $8,%xmm7,%xmm7 addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx vpaddd %xmm7,%xmm1,%xmm1 movl %edx,%ecx shrdl $14,%edx,%edx movl 24(%esp),%esi vpaddd 16(%ebp),%xmm1,%xmm6 xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 60(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax vmovdqa %xmm6,48(%esp) vpalignr $4,%xmm2,%xmm3,%xmm4 movl %edx,%ecx shrdl $14,%edx,%edx movl 20(%esp),%esi vpalignr $4,%xmm0,%xmm1,%xmm7 xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi vpsrld $7,%xmm4,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,16(%esp) vpaddd %xmm7,%xmm2,%xmm2 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrld $3,%xmm4,%xmm7 movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi vpslld $14,%xmm4,%xmm5 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,(%esp) vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx vpshufd $250,%xmm1,%xmm7 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpsrld $11,%xmm6,%xmm6 addl 64(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpxor %xmm5,%xmm4,%xmm4 addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx vpslld $11,%xmm5,%xmm5 movl %edx,%ecx shrdl $14,%edx,%edx movl 16(%esp),%esi vpxor %xmm6,%xmm4,%xmm4 xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi vpsrld $10,%xmm7,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,12(%esp) vpxor %xmm5,%xmm4,%xmm4 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi vpaddd %xmm4,%xmm2,%xmm2 movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,28(%esp) vpxor %xmm5,%xmm6,%xmm6 xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx vpsrlq $19,%xmm7,%xmm7 shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx vpxor %xmm7,%xmm6,%xmm6 addl 68(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx vpshufd $132,%xmm6,%xmm7 addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax vpsrldq $8,%xmm7,%xmm7 movl %edx,%ecx shrdl $14,%edx,%edx movl 12(%esp),%esi vpaddd %xmm7,%xmm2,%xmm2 xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi vpshufd $80,%xmm2,%xmm7 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,8(%esp) vpsrld $10,%xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi vpxor %xmm5,%xmm6,%xmm6 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,24(%esp) vpsrlq $19,%xmm7,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx vpxor %xmm7,%xmm6,%xmm6 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpshufd $232,%xmm6,%xmm7 addl 72(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpslldq $8,%xmm7,%xmm7 addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx vpaddd %xmm7,%xmm2,%xmm2 movl %edx,%ecx shrdl $14,%edx,%edx movl 8(%esp),%esi vpaddd 32(%ebp),%xmm2,%xmm6 xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 76(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax vmovdqa %xmm6,64(%esp) vpalignr $4,%xmm3,%xmm0,%xmm4 movl %edx,%ecx shrdl $14,%edx,%edx movl 4(%esp),%esi vpalignr $4,%xmm1,%xmm2,%xmm7 xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi vpsrld $7,%xmm4,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,(%esp) vpaddd %xmm7,%xmm3,%xmm3 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrld $3,%xmm4,%xmm7 movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi vpslld $14,%xmm4,%xmm5 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,16(%esp) vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx vpshufd $250,%xmm2,%xmm7 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpsrld $11,%xmm6,%xmm6 addl 80(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpxor %xmm5,%xmm4,%xmm4 addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx vpslld $11,%xmm5,%xmm5 movl %edx,%ecx shrdl $14,%edx,%edx movl (%esp),%esi vpxor %xmm6,%xmm4,%xmm4 xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi vpsrld $10,%xmm7,%xmm6 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,28(%esp) vpxor %xmm5,%xmm4,%xmm4 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi vpaddd %xmm4,%xmm3,%xmm3 movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,12(%esp) vpxor %xmm5,%xmm6,%xmm6 xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx vpsrlq $19,%xmm7,%xmm7 shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx vpxor %xmm7,%xmm6,%xmm6 addl 84(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx vpshufd $132,%xmm6,%xmm7 addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax vpsrldq $8,%xmm7,%xmm7 movl %edx,%ecx shrdl $14,%edx,%edx movl 28(%esp),%esi vpaddd %xmm7,%xmm3,%xmm3 xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi vpshufd $80,%xmm3,%xmm7 shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,24(%esp) vpsrld $10,%xmm7,%xmm6 xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx vpsrlq $17,%xmm7,%xmm5 movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi vpxor %xmm5,%xmm6,%xmm6 movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,8(%esp) vpsrlq $19,%xmm7,%xmm7 xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx vpxor %xmm7,%xmm6,%xmm6 shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx vpshufd $232,%xmm6,%xmm7 addl 88(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx vpslldq $8,%xmm7,%xmm7 addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx vpaddd %xmm7,%xmm3,%xmm3 movl %edx,%ecx shrdl $14,%edx,%edx movl 24(%esp),%esi vpaddd 48(%ebp),%xmm3,%xmm6 xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 92(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax vmovdqa %xmm6,80(%esp) cmpl $66051,64(%ebp) jne .L012avx_00_47 movl %edx,%ecx shrdl $14,%edx,%edx movl 20(%esp),%esi xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,(%esp) xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 32(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 16(%esp),%esi xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,12(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,28(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 36(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,24(%esp) xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 40(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 44(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 4(%esp),%esi xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,16(%esp) xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 48(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl (%esp),%esi xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,28(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,12(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 52(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,8(%esp) xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 56(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 60(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 20(%esp),%esi xorl %ecx,%edx movl 24(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,16(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 4(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,(%esp) xorl %eax,%ecx xorl %edi,%eax addl 28(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 64(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 12(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 16(%esp),%esi xorl %ecx,%edx movl 20(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,12(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl (%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,28(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 24(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 68(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 8(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 12(%esp),%esi xorl %ecx,%edx movl 16(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,8(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 28(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,24(%esp) xorl %eax,%ecx xorl %edi,%eax addl 20(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 72(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 4(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 8(%esp),%esi xorl %ecx,%edx movl 12(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,4(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 24(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,20(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 16(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 76(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl (%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 4(%esp),%esi xorl %ecx,%edx movl 8(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 20(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,16(%esp) xorl %eax,%ecx xorl %edi,%eax addl 12(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 80(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 28(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl (%esp),%esi xorl %ecx,%edx movl 4(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,28(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 16(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,12(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl 8(%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 84(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 24(%esp),%edx addl %ecx,%eax movl %edx,%ecx shrdl $14,%edx,%edx movl 28(%esp),%esi xorl %ecx,%edx movl (%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,24(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %eax,%ecx addl %edi,%edx movl 12(%esp),%edi movl %eax,%esi shrdl $9,%ecx,%ecx movl %eax,8(%esp) xorl %eax,%ecx xorl %edi,%eax addl 4(%esp),%edx shrdl $11,%ecx,%ecx andl %eax,%ebx xorl %esi,%ecx addl 88(%esp),%edx xorl %edi,%ebx shrdl $2,%ecx,%ecx addl %edx,%ebx addl 20(%esp),%edx addl %ecx,%ebx movl %edx,%ecx shrdl $14,%edx,%edx movl 24(%esp),%esi xorl %ecx,%edx movl 28(%esp),%edi xorl %edi,%esi shrdl $5,%edx,%edx andl %ecx,%esi movl %ecx,20(%esp) xorl %ecx,%edx xorl %esi,%edi shrdl $6,%edx,%edx movl %ebx,%ecx addl %edi,%edx movl 8(%esp),%edi movl %ebx,%esi shrdl $9,%ecx,%ecx movl %ebx,4(%esp) xorl %ebx,%ecx xorl %edi,%ebx addl (%esp),%edx shrdl $11,%ecx,%ecx andl %ebx,%eax xorl %esi,%ecx addl 92(%esp),%edx xorl %edi,%eax shrdl $2,%ecx,%ecx addl %edx,%eax addl 16(%esp),%edx addl %ecx,%eax movl 96(%esp),%esi xorl %edi,%ebx movl 12(%esp),%ecx addl (%esi),%eax addl 4(%esi),%ebx addl 8(%esi),%edi addl 12(%esi),%ecx movl %eax,(%esi) movl %ebx,4(%esi) movl %edi,8(%esi) movl %ecx,12(%esi) movl %ebx,4(%esp) xorl %edi,%ebx movl %edi,8(%esp) movl %ecx,12(%esp) movl 20(%esp),%edi movl 24(%esp),%ecx addl 16(%esi),%edx addl 20(%esi),%edi addl 24(%esi),%ecx movl %edx,16(%esi) movl %edi,20(%esi) movl %edi,20(%esp) movl 28(%esp),%edi movl %ecx,24(%esi) addl 28(%esi),%edi movl %ecx,24(%esp) movl %edi,28(%esi) movl %edi,28(%esp) movl 100(%esp),%edi vmovdqa 64(%ebp),%xmm7 subl $192,%ebp cmpl 104(%esp),%edi jb .L011grand_avx movl 108(%esp),%esp vzeroall popl %edi popl %esi popl %ebx popl %ebp ret .size sha256_block_data_order_avx,.-.L_sha256_block_data_order_avx_begin #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
wlsfx/bnbb
67,571
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86/crypto/fipsmodule/sha1-586.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl sha1_block_data_order_nohw .hidden sha1_block_data_order_nohw .type sha1_block_data_order_nohw,@function .align 16 sha1_block_data_order_nohw: .L_sha1_block_data_order_nohw_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%ebp movl 24(%esp),%esi movl 28(%esp),%eax subl $76,%esp shll $6,%eax addl %esi,%eax movl %eax,104(%esp) movl 16(%ebp),%edi jmp .L000loop .align 16 .L000loop: movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx movl %eax,(%esp) movl %ebx,4(%esp) movl %ecx,8(%esp) movl %edx,12(%esp) movl 16(%esi),%eax movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx movl %eax,16(%esp) movl %ebx,20(%esp) movl %ecx,24(%esp) movl %edx,28(%esp) movl 32(%esi),%eax movl 36(%esi),%ebx movl 40(%esi),%ecx movl 44(%esi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx movl %eax,32(%esp) movl %ebx,36(%esp) movl %ecx,40(%esp) movl %edx,44(%esp) movl 48(%esi),%eax movl 52(%esi),%ebx movl 56(%esi),%ecx movl 60(%esi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx movl %eax,48(%esp) movl %ebx,52(%esp) movl %ecx,56(%esp) movl %edx,60(%esp) movl %esi,100(%esp) movl (%ebp),%eax movl 4(%ebp),%ebx movl 8(%ebp),%ecx movl 12(%ebp),%edx movl %ecx,%esi movl %eax,%ebp roll $5,%ebp xorl %edx,%esi addl %edi,%ebp movl (%esp),%edi andl %ebx,%esi rorl $2,%ebx xorl %edx,%esi leal 1518500249(%ebp,%edi,1),%ebp addl %esi,%ebp movl %ebx,%edi movl %ebp,%esi roll $5,%ebp xorl %ecx,%edi addl %edx,%ebp movl 4(%esp),%edx andl %eax,%edi rorl $2,%eax xorl %ecx,%edi leal 1518500249(%ebp,%edx,1),%ebp addl %edi,%ebp movl %eax,%edx movl %ebp,%edi roll $5,%ebp xorl %ebx,%edx addl %ecx,%ebp movl 8(%esp),%ecx andl %esi,%edx rorl $2,%esi xorl %ebx,%edx leal 1518500249(%ebp,%ecx,1),%ebp addl %edx,%ebp movl %esi,%ecx movl %ebp,%edx roll $5,%ebp xorl %eax,%ecx addl %ebx,%ebp movl 12(%esp),%ebx andl %edi,%ecx rorl $2,%edi xorl %eax,%ecx leal 1518500249(%ebp,%ebx,1),%ebp addl %ecx,%ebp movl %edi,%ebx movl %ebp,%ecx roll $5,%ebp xorl %esi,%ebx addl %eax,%ebp movl 16(%esp),%eax andl %edx,%ebx rorl $2,%edx xorl %esi,%ebx leal 1518500249(%ebp,%eax,1),%ebp addl %ebx,%ebp movl %edx,%eax movl %ebp,%ebx roll $5,%ebp xorl %edi,%eax addl %esi,%ebp movl 20(%esp),%esi andl %ecx,%eax rorl $2,%ecx xorl %edi,%eax leal 1518500249(%ebp,%esi,1),%ebp addl %eax,%ebp movl %ecx,%esi movl %ebp,%eax roll $5,%ebp xorl %edx,%esi addl %edi,%ebp movl 24(%esp),%edi andl %ebx,%esi rorl $2,%ebx xorl %edx,%esi leal 1518500249(%ebp,%edi,1),%ebp addl %esi,%ebp movl %ebx,%edi movl %ebp,%esi roll $5,%ebp xorl %ecx,%edi addl %edx,%ebp movl 28(%esp),%edx andl %eax,%edi rorl $2,%eax xorl %ecx,%edi leal 1518500249(%ebp,%edx,1),%ebp addl %edi,%ebp movl %eax,%edx movl %ebp,%edi roll $5,%ebp xorl %ebx,%edx addl %ecx,%ebp movl 32(%esp),%ecx andl %esi,%edx rorl $2,%esi xorl %ebx,%edx leal 1518500249(%ebp,%ecx,1),%ebp addl %edx,%ebp movl %esi,%ecx movl %ebp,%edx roll $5,%ebp xorl %eax,%ecx addl %ebx,%ebp movl 36(%esp),%ebx andl %edi,%ecx rorl $2,%edi xorl %eax,%ecx leal 1518500249(%ebp,%ebx,1),%ebp addl %ecx,%ebp movl %edi,%ebx movl %ebp,%ecx roll $5,%ebp xorl %esi,%ebx addl %eax,%ebp movl 40(%esp),%eax andl %edx,%ebx rorl $2,%edx xorl %esi,%ebx leal 1518500249(%ebp,%eax,1),%ebp addl %ebx,%ebp movl %edx,%eax movl %ebp,%ebx roll $5,%ebp xorl %edi,%eax addl %esi,%ebp movl 44(%esp),%esi andl %ecx,%eax rorl $2,%ecx xorl %edi,%eax leal 1518500249(%ebp,%esi,1),%ebp addl %eax,%ebp movl %ecx,%esi movl %ebp,%eax roll $5,%ebp xorl %edx,%esi addl %edi,%ebp movl 48(%esp),%edi andl %ebx,%esi rorl $2,%ebx xorl %edx,%esi leal 1518500249(%ebp,%edi,1),%ebp addl %esi,%ebp movl %ebx,%edi movl %ebp,%esi roll $5,%ebp xorl %ecx,%edi addl %edx,%ebp movl 52(%esp),%edx andl %eax,%edi rorl $2,%eax xorl %ecx,%edi leal 1518500249(%ebp,%edx,1),%ebp addl %edi,%ebp movl %eax,%edx movl %ebp,%edi roll $5,%ebp xorl %ebx,%edx addl %ecx,%ebp movl 56(%esp),%ecx andl %esi,%edx rorl $2,%esi xorl %ebx,%edx leal 1518500249(%ebp,%ecx,1),%ebp addl %edx,%ebp movl %esi,%ecx movl %ebp,%edx roll $5,%ebp xorl %eax,%ecx addl %ebx,%ebp movl 60(%esp),%ebx andl %edi,%ecx rorl $2,%edi xorl %eax,%ecx leal 1518500249(%ebp,%ebx,1),%ebp movl (%esp),%ebx addl %ebp,%ecx movl %edi,%ebp xorl 8(%esp),%ebx xorl %esi,%ebp xorl 32(%esp),%ebx andl %edx,%ebp xorl 52(%esp),%ebx roll $1,%ebx xorl %esi,%ebp addl %ebp,%eax movl %ecx,%ebp rorl $2,%edx movl %ebx,(%esp) roll $5,%ebp leal 1518500249(%ebx,%eax,1),%ebx movl 4(%esp),%eax addl %ebp,%ebx movl %edx,%ebp xorl 12(%esp),%eax xorl %edi,%ebp xorl 36(%esp),%eax andl %ecx,%ebp xorl 56(%esp),%eax roll $1,%eax xorl %edi,%ebp addl %ebp,%esi movl %ebx,%ebp rorl $2,%ecx movl %eax,4(%esp) roll $5,%ebp leal 1518500249(%eax,%esi,1),%eax movl 8(%esp),%esi addl %ebp,%eax movl %ecx,%ebp xorl 16(%esp),%esi xorl %edx,%ebp xorl 40(%esp),%esi andl %ebx,%ebp xorl 60(%esp),%esi roll $1,%esi xorl %edx,%ebp addl %ebp,%edi movl %eax,%ebp rorl $2,%ebx movl %esi,8(%esp) roll $5,%ebp leal 1518500249(%esi,%edi,1),%esi movl 12(%esp),%edi addl %ebp,%esi movl %ebx,%ebp xorl 20(%esp),%edi xorl %ecx,%ebp xorl 44(%esp),%edi andl %eax,%ebp xorl (%esp),%edi roll $1,%edi xorl %ecx,%ebp addl %ebp,%edx movl %esi,%ebp rorl $2,%eax movl %edi,12(%esp) roll $5,%ebp leal 1518500249(%edi,%edx,1),%edi movl 16(%esp),%edx addl %ebp,%edi movl %esi,%ebp xorl 24(%esp),%edx xorl %eax,%ebp xorl 48(%esp),%edx xorl %ebx,%ebp xorl 4(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,16(%esp) leal 1859775393(%edx,%ecx,1),%edx movl 20(%esp),%ecx addl %ebp,%edx movl %edi,%ebp xorl 28(%esp),%ecx xorl %esi,%ebp xorl 52(%esp),%ecx xorl %eax,%ebp xorl 8(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,20(%esp) leal 1859775393(%ecx,%ebx,1),%ecx movl 24(%esp),%ebx addl %ebp,%ecx movl %edx,%ebp xorl 32(%esp),%ebx xorl %edi,%ebp xorl 56(%esp),%ebx xorl %esi,%ebp xorl 12(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,24(%esp) leal 1859775393(%ebx,%eax,1),%ebx movl 28(%esp),%eax addl %ebp,%ebx movl %ecx,%ebp xorl 36(%esp),%eax xorl %edx,%ebp xorl 60(%esp),%eax xorl %edi,%ebp xorl 16(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,28(%esp) leal 1859775393(%eax,%esi,1),%eax movl 32(%esp),%esi addl %ebp,%eax movl %ebx,%ebp xorl 40(%esp),%esi xorl %ecx,%ebp xorl (%esp),%esi xorl %edx,%ebp xorl 20(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,32(%esp) leal 1859775393(%esi,%edi,1),%esi movl 36(%esp),%edi addl %ebp,%esi movl %eax,%ebp xorl 44(%esp),%edi xorl %ebx,%ebp xorl 4(%esp),%edi xorl %ecx,%ebp xorl 24(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,36(%esp) leal 1859775393(%edi,%edx,1),%edi movl 40(%esp),%edx addl %ebp,%edi movl %esi,%ebp xorl 48(%esp),%edx xorl %eax,%ebp xorl 8(%esp),%edx xorl %ebx,%ebp xorl 28(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,40(%esp) leal 1859775393(%edx,%ecx,1),%edx movl 44(%esp),%ecx addl %ebp,%edx movl %edi,%ebp xorl 52(%esp),%ecx xorl %esi,%ebp xorl 12(%esp),%ecx xorl %eax,%ebp xorl 32(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,44(%esp) leal 1859775393(%ecx,%ebx,1),%ecx movl 48(%esp),%ebx addl %ebp,%ecx movl %edx,%ebp xorl 56(%esp),%ebx xorl %edi,%ebp xorl 16(%esp),%ebx xorl %esi,%ebp xorl 36(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,48(%esp) leal 1859775393(%ebx,%eax,1),%ebx movl 52(%esp),%eax addl %ebp,%ebx movl %ecx,%ebp xorl 60(%esp),%eax xorl %edx,%ebp xorl 20(%esp),%eax xorl %edi,%ebp xorl 40(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,52(%esp) leal 1859775393(%eax,%esi,1),%eax movl 56(%esp),%esi addl %ebp,%eax movl %ebx,%ebp xorl (%esp),%esi xorl %ecx,%ebp xorl 24(%esp),%esi xorl %edx,%ebp xorl 44(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,56(%esp) leal 1859775393(%esi,%edi,1),%esi movl 60(%esp),%edi addl %ebp,%esi movl %eax,%ebp xorl 4(%esp),%edi xorl %ebx,%ebp xorl 28(%esp),%edi xorl %ecx,%ebp xorl 48(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,60(%esp) leal 1859775393(%edi,%edx,1),%edi movl (%esp),%edx addl %ebp,%edi movl %esi,%ebp xorl 8(%esp),%edx xorl %eax,%ebp xorl 32(%esp),%edx xorl %ebx,%ebp xorl 52(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,(%esp) leal 1859775393(%edx,%ecx,1),%edx movl 4(%esp),%ecx addl %ebp,%edx movl %edi,%ebp xorl 12(%esp),%ecx xorl %esi,%ebp xorl 36(%esp),%ecx xorl %eax,%ebp xorl 56(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,4(%esp) leal 1859775393(%ecx,%ebx,1),%ecx movl 8(%esp),%ebx addl %ebp,%ecx movl %edx,%ebp xorl 16(%esp),%ebx xorl %edi,%ebp xorl 40(%esp),%ebx xorl %esi,%ebp xorl 60(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,8(%esp) leal 1859775393(%ebx,%eax,1),%ebx movl 12(%esp),%eax addl %ebp,%ebx movl %ecx,%ebp xorl 20(%esp),%eax xorl %edx,%ebp xorl 44(%esp),%eax xorl %edi,%ebp xorl (%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,12(%esp) leal 1859775393(%eax,%esi,1),%eax movl 16(%esp),%esi addl %ebp,%eax movl %ebx,%ebp xorl 24(%esp),%esi xorl %ecx,%ebp xorl 48(%esp),%esi xorl %edx,%ebp xorl 4(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,16(%esp) leal 1859775393(%esi,%edi,1),%esi movl 20(%esp),%edi addl %ebp,%esi movl %eax,%ebp xorl 28(%esp),%edi xorl %ebx,%ebp xorl 52(%esp),%edi xorl %ecx,%ebp xorl 8(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,20(%esp) leal 1859775393(%edi,%edx,1),%edi movl 24(%esp),%edx addl %ebp,%edi movl %esi,%ebp xorl 32(%esp),%edx xorl %eax,%ebp xorl 56(%esp),%edx xorl %ebx,%ebp xorl 12(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,24(%esp) leal 1859775393(%edx,%ecx,1),%edx movl 28(%esp),%ecx addl %ebp,%edx movl %edi,%ebp xorl 36(%esp),%ecx xorl %esi,%ebp xorl 60(%esp),%ecx xorl %eax,%ebp xorl 16(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,28(%esp) leal 1859775393(%ecx,%ebx,1),%ecx movl 32(%esp),%ebx addl %ebp,%ecx movl %edi,%ebp xorl 40(%esp),%ebx xorl %esi,%ebp xorl (%esp),%ebx andl %edx,%ebp xorl 20(%esp),%ebx roll $1,%ebx addl %eax,%ebp rorl $2,%edx movl %ecx,%eax roll $5,%eax movl %ebx,32(%esp) leal 2400959708(%ebx,%ebp,1),%ebx movl %edi,%ebp addl %eax,%ebx andl %esi,%ebp movl 36(%esp),%eax addl %ebp,%ebx movl %edx,%ebp xorl 44(%esp),%eax xorl %edi,%ebp xorl 4(%esp),%eax andl %ecx,%ebp xorl 24(%esp),%eax roll $1,%eax addl %esi,%ebp rorl $2,%ecx movl %ebx,%esi roll $5,%esi movl %eax,36(%esp) leal 2400959708(%eax,%ebp,1),%eax movl %edx,%ebp addl %esi,%eax andl %edi,%ebp movl 40(%esp),%esi addl %ebp,%eax movl %ecx,%ebp xorl 48(%esp),%esi xorl %edx,%ebp xorl 8(%esp),%esi andl %ebx,%ebp xorl 28(%esp),%esi roll $1,%esi addl %edi,%ebp rorl $2,%ebx movl %eax,%edi roll $5,%edi movl %esi,40(%esp) leal 2400959708(%esi,%ebp,1),%esi movl %ecx,%ebp addl %edi,%esi andl %edx,%ebp movl 44(%esp),%edi addl %ebp,%esi movl %ebx,%ebp xorl 52(%esp),%edi xorl %ecx,%ebp xorl 12(%esp),%edi andl %eax,%ebp xorl 32(%esp),%edi roll $1,%edi addl %edx,%ebp rorl $2,%eax movl %esi,%edx roll $5,%edx movl %edi,44(%esp) leal 2400959708(%edi,%ebp,1),%edi movl %ebx,%ebp addl %edx,%edi andl %ecx,%ebp movl 48(%esp),%edx addl %ebp,%edi movl %eax,%ebp xorl 56(%esp),%edx xorl %ebx,%ebp xorl 16(%esp),%edx andl %esi,%ebp xorl 36(%esp),%edx roll $1,%edx addl %ecx,%ebp rorl $2,%esi movl %edi,%ecx roll $5,%ecx movl %edx,48(%esp) leal 2400959708(%edx,%ebp,1),%edx movl %eax,%ebp addl %ecx,%edx andl %ebx,%ebp movl 52(%esp),%ecx addl %ebp,%edx movl %esi,%ebp xorl 60(%esp),%ecx xorl %eax,%ebp xorl 20(%esp),%ecx andl %edi,%ebp xorl 40(%esp),%ecx roll $1,%ecx addl %ebx,%ebp rorl $2,%edi movl %edx,%ebx roll $5,%ebx movl %ecx,52(%esp) leal 2400959708(%ecx,%ebp,1),%ecx movl %esi,%ebp addl %ebx,%ecx andl %eax,%ebp movl 56(%esp),%ebx addl %ebp,%ecx movl %edi,%ebp xorl (%esp),%ebx xorl %esi,%ebp xorl 24(%esp),%ebx andl %edx,%ebp xorl 44(%esp),%ebx roll $1,%ebx addl %eax,%ebp rorl $2,%edx movl %ecx,%eax roll $5,%eax movl %ebx,56(%esp) leal 2400959708(%ebx,%ebp,1),%ebx movl %edi,%ebp addl %eax,%ebx andl %esi,%ebp movl 60(%esp),%eax addl %ebp,%ebx movl %edx,%ebp xorl 4(%esp),%eax xorl %edi,%ebp xorl 28(%esp),%eax andl %ecx,%ebp xorl 48(%esp),%eax roll $1,%eax addl %esi,%ebp rorl $2,%ecx movl %ebx,%esi roll $5,%esi movl %eax,60(%esp) leal 2400959708(%eax,%ebp,1),%eax movl %edx,%ebp addl %esi,%eax andl %edi,%ebp movl (%esp),%esi addl %ebp,%eax movl %ecx,%ebp xorl 8(%esp),%esi xorl %edx,%ebp xorl 32(%esp),%esi andl %ebx,%ebp xorl 52(%esp),%esi roll $1,%esi addl %edi,%ebp rorl $2,%ebx movl %eax,%edi roll $5,%edi movl %esi,(%esp) leal 2400959708(%esi,%ebp,1),%esi movl %ecx,%ebp addl %edi,%esi andl %edx,%ebp movl 4(%esp),%edi addl %ebp,%esi movl %ebx,%ebp xorl 12(%esp),%edi xorl %ecx,%ebp xorl 36(%esp),%edi andl %eax,%ebp xorl 56(%esp),%edi roll $1,%edi addl %edx,%ebp rorl $2,%eax movl %esi,%edx roll $5,%edx movl %edi,4(%esp) leal 2400959708(%edi,%ebp,1),%edi movl %ebx,%ebp addl %edx,%edi andl %ecx,%ebp movl 8(%esp),%edx addl %ebp,%edi movl %eax,%ebp xorl 16(%esp),%edx xorl %ebx,%ebp xorl 40(%esp),%edx andl %esi,%ebp xorl 60(%esp),%edx roll $1,%edx addl %ecx,%ebp rorl $2,%esi movl %edi,%ecx roll $5,%ecx movl %edx,8(%esp) leal 2400959708(%edx,%ebp,1),%edx movl %eax,%ebp addl %ecx,%edx andl %ebx,%ebp movl 12(%esp),%ecx addl %ebp,%edx movl %esi,%ebp xorl 20(%esp),%ecx xorl %eax,%ebp xorl 44(%esp),%ecx andl %edi,%ebp xorl (%esp),%ecx roll $1,%ecx addl %ebx,%ebp rorl $2,%edi movl %edx,%ebx roll $5,%ebx movl %ecx,12(%esp) leal 2400959708(%ecx,%ebp,1),%ecx movl %esi,%ebp addl %ebx,%ecx andl %eax,%ebp movl 16(%esp),%ebx addl %ebp,%ecx movl %edi,%ebp xorl 24(%esp),%ebx xorl %esi,%ebp xorl 48(%esp),%ebx andl %edx,%ebp xorl 4(%esp),%ebx roll $1,%ebx addl %eax,%ebp rorl $2,%edx movl %ecx,%eax roll $5,%eax movl %ebx,16(%esp) leal 2400959708(%ebx,%ebp,1),%ebx movl %edi,%ebp addl %eax,%ebx andl %esi,%ebp movl 20(%esp),%eax addl %ebp,%ebx movl %edx,%ebp xorl 28(%esp),%eax xorl %edi,%ebp xorl 52(%esp),%eax andl %ecx,%ebp xorl 8(%esp),%eax roll $1,%eax addl %esi,%ebp rorl $2,%ecx movl %ebx,%esi roll $5,%esi movl %eax,20(%esp) leal 2400959708(%eax,%ebp,1),%eax movl %edx,%ebp addl %esi,%eax andl %edi,%ebp movl 24(%esp),%esi addl %ebp,%eax movl %ecx,%ebp xorl 32(%esp),%esi xorl %edx,%ebp xorl 56(%esp),%esi andl %ebx,%ebp xorl 12(%esp),%esi roll $1,%esi addl %edi,%ebp rorl $2,%ebx movl %eax,%edi roll $5,%edi movl %esi,24(%esp) leal 2400959708(%esi,%ebp,1),%esi movl %ecx,%ebp addl %edi,%esi andl %edx,%ebp movl 28(%esp),%edi addl %ebp,%esi movl %ebx,%ebp xorl 36(%esp),%edi xorl %ecx,%ebp xorl 60(%esp),%edi andl %eax,%ebp xorl 16(%esp),%edi roll $1,%edi addl %edx,%ebp rorl $2,%eax movl %esi,%edx roll $5,%edx movl %edi,28(%esp) leal 2400959708(%edi,%ebp,1),%edi movl %ebx,%ebp addl %edx,%edi andl %ecx,%ebp movl 32(%esp),%edx addl %ebp,%edi movl %eax,%ebp xorl 40(%esp),%edx xorl %ebx,%ebp xorl (%esp),%edx andl %esi,%ebp xorl 20(%esp),%edx roll $1,%edx addl %ecx,%ebp rorl $2,%esi movl %edi,%ecx roll $5,%ecx movl %edx,32(%esp) leal 2400959708(%edx,%ebp,1),%edx movl %eax,%ebp addl %ecx,%edx andl %ebx,%ebp movl 36(%esp),%ecx addl %ebp,%edx movl %esi,%ebp xorl 44(%esp),%ecx xorl %eax,%ebp xorl 4(%esp),%ecx andl %edi,%ebp xorl 24(%esp),%ecx roll $1,%ecx addl %ebx,%ebp rorl $2,%edi movl %edx,%ebx roll $5,%ebx movl %ecx,36(%esp) leal 2400959708(%ecx,%ebp,1),%ecx movl %esi,%ebp addl %ebx,%ecx andl %eax,%ebp movl 40(%esp),%ebx addl %ebp,%ecx movl %edi,%ebp xorl 48(%esp),%ebx xorl %esi,%ebp xorl 8(%esp),%ebx andl %edx,%ebp xorl 28(%esp),%ebx roll $1,%ebx addl %eax,%ebp rorl $2,%edx movl %ecx,%eax roll $5,%eax movl %ebx,40(%esp) leal 2400959708(%ebx,%ebp,1),%ebx movl %edi,%ebp addl %eax,%ebx andl %esi,%ebp movl 44(%esp),%eax addl %ebp,%ebx movl %edx,%ebp xorl 52(%esp),%eax xorl %edi,%ebp xorl 12(%esp),%eax andl %ecx,%ebp xorl 32(%esp),%eax roll $1,%eax addl %esi,%ebp rorl $2,%ecx movl %ebx,%esi roll $5,%esi movl %eax,44(%esp) leal 2400959708(%eax,%ebp,1),%eax movl %edx,%ebp addl %esi,%eax andl %edi,%ebp movl 48(%esp),%esi addl %ebp,%eax movl %ebx,%ebp xorl 56(%esp),%esi xorl %ecx,%ebp xorl 16(%esp),%esi xorl %edx,%ebp xorl 36(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,48(%esp) leal 3395469782(%esi,%edi,1),%esi movl 52(%esp),%edi addl %ebp,%esi movl %eax,%ebp xorl 60(%esp),%edi xorl %ebx,%ebp xorl 20(%esp),%edi xorl %ecx,%ebp xorl 40(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,52(%esp) leal 3395469782(%edi,%edx,1),%edi movl 56(%esp),%edx addl %ebp,%edi movl %esi,%ebp xorl (%esp),%edx xorl %eax,%ebp xorl 24(%esp),%edx xorl %ebx,%ebp xorl 44(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,56(%esp) leal 3395469782(%edx,%ecx,1),%edx movl 60(%esp),%ecx addl %ebp,%edx movl %edi,%ebp xorl 4(%esp),%ecx xorl %esi,%ebp xorl 28(%esp),%ecx xorl %eax,%ebp xorl 48(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,60(%esp) leal 3395469782(%ecx,%ebx,1),%ecx movl (%esp),%ebx addl %ebp,%ecx movl %edx,%ebp xorl 8(%esp),%ebx xorl %edi,%ebp xorl 32(%esp),%ebx xorl %esi,%ebp xorl 52(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,(%esp) leal 3395469782(%ebx,%eax,1),%ebx movl 4(%esp),%eax addl %ebp,%ebx movl %ecx,%ebp xorl 12(%esp),%eax xorl %edx,%ebp xorl 36(%esp),%eax xorl %edi,%ebp xorl 56(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,4(%esp) leal 3395469782(%eax,%esi,1),%eax movl 8(%esp),%esi addl %ebp,%eax movl %ebx,%ebp xorl 16(%esp),%esi xorl %ecx,%ebp xorl 40(%esp),%esi xorl %edx,%ebp xorl 60(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,8(%esp) leal 3395469782(%esi,%edi,1),%esi movl 12(%esp),%edi addl %ebp,%esi movl %eax,%ebp xorl 20(%esp),%edi xorl %ebx,%ebp xorl 44(%esp),%edi xorl %ecx,%ebp xorl (%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,12(%esp) leal 3395469782(%edi,%edx,1),%edi movl 16(%esp),%edx addl %ebp,%edi movl %esi,%ebp xorl 24(%esp),%edx xorl %eax,%ebp xorl 48(%esp),%edx xorl %ebx,%ebp xorl 4(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,16(%esp) leal 3395469782(%edx,%ecx,1),%edx movl 20(%esp),%ecx addl %ebp,%edx movl %edi,%ebp xorl 28(%esp),%ecx xorl %esi,%ebp xorl 52(%esp),%ecx xorl %eax,%ebp xorl 8(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,20(%esp) leal 3395469782(%ecx,%ebx,1),%ecx movl 24(%esp),%ebx addl %ebp,%ecx movl %edx,%ebp xorl 32(%esp),%ebx xorl %edi,%ebp xorl 56(%esp),%ebx xorl %esi,%ebp xorl 12(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,24(%esp) leal 3395469782(%ebx,%eax,1),%ebx movl 28(%esp),%eax addl %ebp,%ebx movl %ecx,%ebp xorl 36(%esp),%eax xorl %edx,%ebp xorl 60(%esp),%eax xorl %edi,%ebp xorl 16(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp movl %eax,28(%esp) leal 3395469782(%eax,%esi,1),%eax movl 32(%esp),%esi addl %ebp,%eax movl %ebx,%ebp xorl 40(%esp),%esi xorl %ecx,%ebp xorl (%esp),%esi xorl %edx,%ebp xorl 20(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp movl %esi,32(%esp) leal 3395469782(%esi,%edi,1),%esi movl 36(%esp),%edi addl %ebp,%esi movl %eax,%ebp xorl 44(%esp),%edi xorl %ebx,%ebp xorl 4(%esp),%edi xorl %ecx,%ebp xorl 24(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp movl %edi,36(%esp) leal 3395469782(%edi,%edx,1),%edi movl 40(%esp),%edx addl %ebp,%edi movl %esi,%ebp xorl 48(%esp),%edx xorl %eax,%ebp xorl 8(%esp),%edx xorl %ebx,%ebp xorl 28(%esp),%edx roll $1,%edx addl %ebp,%ecx rorl $2,%esi movl %edi,%ebp roll $5,%ebp movl %edx,40(%esp) leal 3395469782(%edx,%ecx,1),%edx movl 44(%esp),%ecx addl %ebp,%edx movl %edi,%ebp xorl 52(%esp),%ecx xorl %esi,%ebp xorl 12(%esp),%ecx xorl %eax,%ebp xorl 32(%esp),%ecx roll $1,%ecx addl %ebp,%ebx rorl $2,%edi movl %edx,%ebp roll $5,%ebp movl %ecx,44(%esp) leal 3395469782(%ecx,%ebx,1),%ecx movl 48(%esp),%ebx addl %ebp,%ecx movl %edx,%ebp xorl 56(%esp),%ebx xorl %edi,%ebp xorl 16(%esp),%ebx xorl %esi,%ebp xorl 36(%esp),%ebx roll $1,%ebx addl %ebp,%eax rorl $2,%edx movl %ecx,%ebp roll $5,%ebp movl %ebx,48(%esp) leal 3395469782(%ebx,%eax,1),%ebx movl 52(%esp),%eax addl %ebp,%ebx movl %ecx,%ebp xorl 60(%esp),%eax xorl %edx,%ebp xorl 20(%esp),%eax xorl %edi,%ebp xorl 40(%esp),%eax roll $1,%eax addl %ebp,%esi rorl $2,%ecx movl %ebx,%ebp roll $5,%ebp leal 3395469782(%eax,%esi,1),%eax movl 56(%esp),%esi addl %ebp,%eax movl %ebx,%ebp xorl (%esp),%esi xorl %ecx,%ebp xorl 24(%esp),%esi xorl %edx,%ebp xorl 44(%esp),%esi roll $1,%esi addl %ebp,%edi rorl $2,%ebx movl %eax,%ebp roll $5,%ebp leal 3395469782(%esi,%edi,1),%esi movl 60(%esp),%edi addl %ebp,%esi movl %eax,%ebp xorl 4(%esp),%edi xorl %ebx,%ebp xorl 28(%esp),%edi xorl %ecx,%ebp xorl 48(%esp),%edi roll $1,%edi addl %ebp,%edx rorl $2,%eax movl %esi,%ebp roll $5,%ebp leal 3395469782(%edi,%edx,1),%edi addl %ebp,%edi movl 96(%esp),%ebp movl 100(%esp),%edx addl (%ebp),%edi addl 4(%ebp),%esi addl 8(%ebp),%eax addl 12(%ebp),%ebx addl 16(%ebp),%ecx movl %edi,(%ebp) addl $64,%edx movl %esi,4(%ebp) cmpl 104(%esp),%edx movl %eax,8(%ebp) movl %ecx,%edi movl %ebx,12(%ebp) movl %edx,%esi movl %ecx,16(%ebp) jb .L000loop addl $76,%esp popl %edi popl %esi popl %ebx popl %ebp ret .size sha1_block_data_order_nohw,.-.L_sha1_block_data_order_nohw_begin .globl sha1_block_data_order_ssse3 .hidden sha1_block_data_order_ssse3 .type sha1_block_data_order_ssse3,@function .align 16 sha1_block_data_order_ssse3: .L_sha1_block_data_order_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi call .L001pic_point .L001pic_point: popl %ebp leal .LK_XX_XX-.L001pic_point(%ebp),%ebp movdqa (%ebp),%xmm7 movdqa 16(%ebp),%xmm0 movdqa 32(%ebp),%xmm1 movdqa 48(%ebp),%xmm2 movdqa 64(%ebp),%xmm6 movl 20(%esp),%edi movl 24(%esp),%ebp movl 28(%esp),%edx movl %esp,%esi subl $208,%esp andl $-64,%esp movdqa %xmm0,112(%esp) movdqa %xmm1,128(%esp) movdqa %xmm2,144(%esp) shll $6,%edx movdqa %xmm7,160(%esp) addl %ebp,%edx movdqa %xmm6,176(%esp) addl $64,%ebp movl %edi,192(%esp) movl %ebp,196(%esp) movl %edx,200(%esp) movl %esi,204(%esp) movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx movl 16(%edi),%edi movl %ebx,%esi movdqu -64(%ebp),%xmm0 movdqu -48(%ebp),%xmm1 movdqu -32(%ebp),%xmm2 movdqu -16(%ebp),%xmm3 .byte 102,15,56,0,198 .byte 102,15,56,0,206 .byte 102,15,56,0,214 movdqa %xmm7,96(%esp) .byte 102,15,56,0,222 paddd %xmm7,%xmm0 paddd %xmm7,%xmm1 paddd %xmm7,%xmm2 movdqa %xmm0,(%esp) psubd %xmm7,%xmm0 movdqa %xmm1,16(%esp) psubd %xmm7,%xmm1 movdqa %xmm2,32(%esp) movl %ecx,%ebp psubd %xmm7,%xmm2 xorl %edx,%ebp pshufd $238,%xmm0,%xmm4 andl %ebp,%esi jmp .L002loop .align 16 .L002loop: rorl $2,%ebx xorl %edx,%esi movl %eax,%ebp punpcklqdq %xmm1,%xmm4 movdqa %xmm3,%xmm6 addl (%esp),%edi xorl %ecx,%ebx paddd %xmm3,%xmm7 movdqa %xmm0,64(%esp) roll $5,%eax addl %esi,%edi psrldq $4,%xmm6 andl %ebx,%ebp xorl %ecx,%ebx pxor %xmm0,%xmm4 addl %eax,%edi rorl $7,%eax pxor %xmm2,%xmm6 xorl %ecx,%ebp movl %edi,%esi addl 4(%esp),%edx pxor %xmm6,%xmm4 xorl %ebx,%eax roll $5,%edi movdqa %xmm7,48(%esp) addl %ebp,%edx andl %eax,%esi movdqa %xmm4,%xmm0 xorl %ebx,%eax addl %edi,%edx rorl $7,%edi movdqa %xmm4,%xmm6 xorl %ebx,%esi pslldq $12,%xmm0 paddd %xmm4,%xmm4 movl %edx,%ebp addl 8(%esp),%ecx psrld $31,%xmm6 xorl %eax,%edi roll $5,%edx movdqa %xmm0,%xmm7 addl %esi,%ecx andl %edi,%ebp xorl %eax,%edi psrld $30,%xmm0 addl %edx,%ecx rorl $7,%edx por %xmm6,%xmm4 xorl %eax,%ebp movl %ecx,%esi addl 12(%esp),%ebx pslld $2,%xmm7 xorl %edi,%edx roll $5,%ecx pxor %xmm0,%xmm4 movdqa 96(%esp),%xmm0 addl %ebp,%ebx andl %edx,%esi pxor %xmm7,%xmm4 pshufd $238,%xmm1,%xmm5 xorl %edi,%edx addl %ecx,%ebx rorl $7,%ecx xorl %edi,%esi movl %ebx,%ebp punpcklqdq %xmm2,%xmm5 movdqa %xmm4,%xmm7 addl 16(%esp),%eax xorl %edx,%ecx paddd %xmm4,%xmm0 movdqa %xmm1,80(%esp) roll $5,%ebx addl %esi,%eax psrldq $4,%xmm7 andl %ecx,%ebp xorl %edx,%ecx pxor %xmm1,%xmm5 addl %ebx,%eax rorl $7,%ebx pxor %xmm3,%xmm7 xorl %edx,%ebp movl %eax,%esi addl 20(%esp),%edi pxor %xmm7,%xmm5 xorl %ecx,%ebx roll $5,%eax movdqa %xmm0,(%esp) addl %ebp,%edi andl %ebx,%esi movdqa %xmm5,%xmm1 xorl %ecx,%ebx addl %eax,%edi rorl $7,%eax movdqa %xmm5,%xmm7 xorl %ecx,%esi pslldq $12,%xmm1 paddd %xmm5,%xmm5 movl %edi,%ebp addl 24(%esp),%edx psrld $31,%xmm7 xorl %ebx,%eax roll $5,%edi movdqa %xmm1,%xmm0 addl %esi,%edx andl %eax,%ebp xorl %ebx,%eax psrld $30,%xmm1 addl %edi,%edx rorl $7,%edi por %xmm7,%xmm5 xorl %ebx,%ebp movl %edx,%esi addl 28(%esp),%ecx pslld $2,%xmm0 xorl %eax,%edi roll $5,%edx pxor %xmm1,%xmm5 movdqa 112(%esp),%xmm1 addl %ebp,%ecx andl %edi,%esi pxor %xmm0,%xmm5 pshufd $238,%xmm2,%xmm6 xorl %eax,%edi addl %edx,%ecx rorl $7,%edx xorl %eax,%esi movl %ecx,%ebp punpcklqdq %xmm3,%xmm6 movdqa %xmm5,%xmm0 addl 32(%esp),%ebx xorl %edi,%edx paddd %xmm5,%xmm1 movdqa %xmm2,96(%esp) roll $5,%ecx addl %esi,%ebx psrldq $4,%xmm0 andl %edx,%ebp xorl %edi,%edx pxor %xmm2,%xmm6 addl %ecx,%ebx rorl $7,%ecx pxor %xmm4,%xmm0 xorl %edi,%ebp movl %ebx,%esi addl 36(%esp),%eax pxor %xmm0,%xmm6 xorl %edx,%ecx roll $5,%ebx movdqa %xmm1,16(%esp) addl %ebp,%eax andl %ecx,%esi movdqa %xmm6,%xmm2 xorl %edx,%ecx addl %ebx,%eax rorl $7,%ebx movdqa %xmm6,%xmm0 xorl %edx,%esi pslldq $12,%xmm2 paddd %xmm6,%xmm6 movl %eax,%ebp addl 40(%esp),%edi psrld $31,%xmm0 xorl %ecx,%ebx roll $5,%eax movdqa %xmm2,%xmm1 addl %esi,%edi andl %ebx,%ebp xorl %ecx,%ebx psrld $30,%xmm2 addl %eax,%edi rorl $7,%eax por %xmm0,%xmm6 xorl %ecx,%ebp movdqa 64(%esp),%xmm0 movl %edi,%esi addl 44(%esp),%edx pslld $2,%xmm1 xorl %ebx,%eax roll $5,%edi pxor %xmm2,%xmm6 movdqa 112(%esp),%xmm2 addl %ebp,%edx andl %eax,%esi pxor %xmm1,%xmm6 pshufd $238,%xmm3,%xmm7 xorl %ebx,%eax addl %edi,%edx rorl $7,%edi xorl %ebx,%esi movl %edx,%ebp punpcklqdq %xmm4,%xmm7 movdqa %xmm6,%xmm1 addl 48(%esp),%ecx xorl %eax,%edi paddd %xmm6,%xmm2 movdqa %xmm3,64(%esp) roll $5,%edx addl %esi,%ecx psrldq $4,%xmm1 andl %edi,%ebp xorl %eax,%edi pxor %xmm3,%xmm7 addl %edx,%ecx rorl $7,%edx pxor %xmm5,%xmm1 xorl %eax,%ebp movl %ecx,%esi addl 52(%esp),%ebx pxor %xmm1,%xmm7 xorl %edi,%edx roll $5,%ecx movdqa %xmm2,32(%esp) addl %ebp,%ebx andl %edx,%esi movdqa %xmm7,%xmm3 xorl %edi,%edx addl %ecx,%ebx rorl $7,%ecx movdqa %xmm7,%xmm1 xorl %edi,%esi pslldq $12,%xmm3 paddd %xmm7,%xmm7 movl %ebx,%ebp addl 56(%esp),%eax psrld $31,%xmm1 xorl %edx,%ecx roll $5,%ebx movdqa %xmm3,%xmm2 addl %esi,%eax andl %ecx,%ebp xorl %edx,%ecx psrld $30,%xmm3 addl %ebx,%eax rorl $7,%ebx por %xmm1,%xmm7 xorl %edx,%ebp movdqa 80(%esp),%xmm1 movl %eax,%esi addl 60(%esp),%edi pslld $2,%xmm2 xorl %ecx,%ebx roll $5,%eax pxor %xmm3,%xmm7 movdqa 112(%esp),%xmm3 addl %ebp,%edi andl %ebx,%esi pxor %xmm2,%xmm7 pshufd $238,%xmm6,%xmm2 xorl %ecx,%ebx addl %eax,%edi rorl $7,%eax pxor %xmm4,%xmm0 punpcklqdq %xmm7,%xmm2 xorl %ecx,%esi movl %edi,%ebp addl (%esp),%edx pxor %xmm1,%xmm0 movdqa %xmm4,80(%esp) xorl %ebx,%eax roll $5,%edi movdqa %xmm3,%xmm4 addl %esi,%edx paddd %xmm7,%xmm3 andl %eax,%ebp pxor %xmm2,%xmm0 xorl %ebx,%eax addl %edi,%edx rorl $7,%edi xorl %ebx,%ebp movdqa %xmm0,%xmm2 movdqa %xmm3,48(%esp) movl %edx,%esi addl 4(%esp),%ecx xorl %eax,%edi roll $5,%edx pslld $2,%xmm0 addl %ebp,%ecx andl %edi,%esi psrld $30,%xmm2 xorl %eax,%edi addl %edx,%ecx rorl $7,%edx xorl %eax,%esi movl %ecx,%ebp addl 8(%esp),%ebx xorl %edi,%edx roll $5,%ecx por %xmm2,%xmm0 addl %esi,%ebx andl %edx,%ebp movdqa 96(%esp),%xmm2 xorl %edi,%edx addl %ecx,%ebx addl 12(%esp),%eax xorl %edi,%ebp movl %ebx,%esi pshufd $238,%xmm7,%xmm3 roll $5,%ebx addl %ebp,%eax xorl %edx,%esi rorl $7,%ecx addl %ebx,%eax addl 16(%esp),%edi pxor %xmm5,%xmm1 punpcklqdq %xmm0,%xmm3 xorl %ecx,%esi movl %eax,%ebp roll $5,%eax pxor %xmm2,%xmm1 movdqa %xmm5,96(%esp) addl %esi,%edi xorl %ecx,%ebp movdqa %xmm4,%xmm5 rorl $7,%ebx paddd %xmm0,%xmm4 addl %eax,%edi pxor %xmm3,%xmm1 addl 20(%esp),%edx xorl %ebx,%ebp movl %edi,%esi roll $5,%edi movdqa %xmm1,%xmm3 movdqa %xmm4,(%esp) addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax addl %edi,%edx pslld $2,%xmm1 addl 24(%esp),%ecx xorl %eax,%esi psrld $30,%xmm3 movl %edx,%ebp roll $5,%edx addl %esi,%ecx xorl %eax,%ebp rorl $7,%edi addl %edx,%ecx por %xmm3,%xmm1 addl 28(%esp),%ebx xorl %edi,%ebp movdqa 64(%esp),%xmm3 movl %ecx,%esi roll $5,%ecx addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx pshufd $238,%xmm0,%xmm4 addl %ecx,%ebx addl 32(%esp),%eax pxor %xmm6,%xmm2 punpcklqdq %xmm1,%xmm4 xorl %edx,%esi movl %ebx,%ebp roll $5,%ebx pxor %xmm3,%xmm2 movdqa %xmm6,64(%esp) addl %esi,%eax xorl %edx,%ebp movdqa 128(%esp),%xmm6 rorl $7,%ecx paddd %xmm1,%xmm5 addl %ebx,%eax pxor %xmm4,%xmm2 addl 36(%esp),%edi xorl %ecx,%ebp movl %eax,%esi roll $5,%eax movdqa %xmm2,%xmm4 movdqa %xmm5,16(%esp) addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx addl %eax,%edi pslld $2,%xmm2 addl 40(%esp),%edx xorl %ebx,%esi psrld $30,%xmm4 movl %edi,%ebp roll $5,%edi addl %esi,%edx xorl %ebx,%ebp rorl $7,%eax addl %edi,%edx por %xmm4,%xmm2 addl 44(%esp),%ecx xorl %eax,%ebp movdqa 80(%esp),%xmm4 movl %edx,%esi roll $5,%edx addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi pshufd $238,%xmm1,%xmm5 addl %edx,%ecx addl 48(%esp),%ebx pxor %xmm7,%xmm3 punpcklqdq %xmm2,%xmm5 xorl %edi,%esi movl %ecx,%ebp roll $5,%ecx pxor %xmm4,%xmm3 movdqa %xmm7,80(%esp) addl %esi,%ebx xorl %edi,%ebp movdqa %xmm6,%xmm7 rorl $7,%edx paddd %xmm2,%xmm6 addl %ecx,%ebx pxor %xmm5,%xmm3 addl 52(%esp),%eax xorl %edx,%ebp movl %ebx,%esi roll $5,%ebx movdqa %xmm3,%xmm5 movdqa %xmm6,32(%esp) addl %ebp,%eax xorl %edx,%esi rorl $7,%ecx addl %ebx,%eax pslld $2,%xmm3 addl 56(%esp),%edi xorl %ecx,%esi psrld $30,%xmm5 movl %eax,%ebp roll $5,%eax addl %esi,%edi xorl %ecx,%ebp rorl $7,%ebx addl %eax,%edi por %xmm5,%xmm3 addl 60(%esp),%edx xorl %ebx,%ebp movdqa 96(%esp),%xmm5 movl %edi,%esi roll $5,%edi addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax pshufd $238,%xmm2,%xmm6 addl %edi,%edx addl (%esp),%ecx pxor %xmm0,%xmm4 punpcklqdq %xmm3,%xmm6 xorl %eax,%esi movl %edx,%ebp roll $5,%edx pxor %xmm5,%xmm4 movdqa %xmm0,96(%esp) addl %esi,%ecx xorl %eax,%ebp movdqa %xmm7,%xmm0 rorl $7,%edi paddd %xmm3,%xmm7 addl %edx,%ecx pxor %xmm6,%xmm4 addl 4(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi roll $5,%ecx movdqa %xmm4,%xmm6 movdqa %xmm7,48(%esp) addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx addl %ecx,%ebx pslld $2,%xmm4 addl 8(%esp),%eax xorl %edx,%esi psrld $30,%xmm6 movl %ebx,%ebp roll $5,%ebx addl %esi,%eax xorl %edx,%ebp rorl $7,%ecx addl %ebx,%eax por %xmm6,%xmm4 addl 12(%esp),%edi xorl %ecx,%ebp movdqa 64(%esp),%xmm6 movl %eax,%esi roll $5,%eax addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx pshufd $238,%xmm3,%xmm7 addl %eax,%edi addl 16(%esp),%edx pxor %xmm1,%xmm5 punpcklqdq %xmm4,%xmm7 xorl %ebx,%esi movl %edi,%ebp roll $5,%edi pxor %xmm6,%xmm5 movdqa %xmm1,64(%esp) addl %esi,%edx xorl %ebx,%ebp movdqa %xmm0,%xmm1 rorl $7,%eax paddd %xmm4,%xmm0 addl %edi,%edx pxor %xmm7,%xmm5 addl 20(%esp),%ecx xorl %eax,%ebp movl %edx,%esi roll $5,%edx movdqa %xmm5,%xmm7 movdqa %xmm0,(%esp) addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi addl %edx,%ecx pslld $2,%xmm5 addl 24(%esp),%ebx xorl %edi,%esi psrld $30,%xmm7 movl %ecx,%ebp roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx addl %ecx,%ebx por %xmm7,%xmm5 addl 28(%esp),%eax movdqa 80(%esp),%xmm7 rorl $7,%ecx movl %ebx,%esi xorl %edx,%ebp roll $5,%ebx pshufd $238,%xmm4,%xmm0 addl %ebp,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax addl 32(%esp),%edi pxor %xmm2,%xmm6 punpcklqdq %xmm5,%xmm0 andl %ecx,%esi xorl %edx,%ecx rorl $7,%ebx pxor %xmm7,%xmm6 movdqa %xmm2,80(%esp) movl %eax,%ebp xorl %ecx,%esi roll $5,%eax movdqa %xmm1,%xmm2 addl %esi,%edi paddd %xmm5,%xmm1 xorl %ebx,%ebp pxor %xmm0,%xmm6 xorl %ecx,%ebx addl %eax,%edi addl 36(%esp),%edx andl %ebx,%ebp movdqa %xmm6,%xmm0 movdqa %xmm1,16(%esp) xorl %ecx,%ebx rorl $7,%eax movl %edi,%esi xorl %ebx,%ebp roll $5,%edi pslld $2,%xmm6 addl %ebp,%edx xorl %eax,%esi psrld $30,%xmm0 xorl %ebx,%eax addl %edi,%edx addl 40(%esp),%ecx andl %eax,%esi xorl %ebx,%eax rorl $7,%edi por %xmm0,%xmm6 movl %edx,%ebp xorl %eax,%esi movdqa 96(%esp),%xmm0 roll $5,%edx addl %esi,%ecx xorl %edi,%ebp xorl %eax,%edi addl %edx,%ecx pshufd $238,%xmm5,%xmm1 addl 44(%esp),%ebx andl %edi,%ebp xorl %eax,%edi rorl $7,%edx movl %ecx,%esi xorl %edi,%ebp roll $5,%ecx addl %ebp,%ebx xorl %edx,%esi xorl %edi,%edx addl %ecx,%ebx addl 48(%esp),%eax pxor %xmm3,%xmm7 punpcklqdq %xmm6,%xmm1 andl %edx,%esi xorl %edi,%edx rorl $7,%ecx pxor %xmm0,%xmm7 movdqa %xmm3,96(%esp) movl %ebx,%ebp xorl %edx,%esi roll $5,%ebx movdqa 144(%esp),%xmm3 addl %esi,%eax paddd %xmm6,%xmm2 xorl %ecx,%ebp pxor %xmm1,%xmm7 xorl %edx,%ecx addl %ebx,%eax addl 52(%esp),%edi andl %ecx,%ebp movdqa %xmm7,%xmm1 movdqa %xmm2,32(%esp) xorl %edx,%ecx rorl $7,%ebx movl %eax,%esi xorl %ecx,%ebp roll $5,%eax pslld $2,%xmm7 addl %ebp,%edi xorl %ebx,%esi psrld $30,%xmm1 xorl %ecx,%ebx addl %eax,%edi addl 56(%esp),%edx andl %ebx,%esi xorl %ecx,%ebx rorl $7,%eax por %xmm1,%xmm7 movl %edi,%ebp xorl %ebx,%esi movdqa 64(%esp),%xmm1 roll $5,%edi addl %esi,%edx xorl %eax,%ebp xorl %ebx,%eax addl %edi,%edx pshufd $238,%xmm6,%xmm2 addl 60(%esp),%ecx andl %eax,%ebp xorl %ebx,%eax rorl $7,%edi movl %edx,%esi xorl %eax,%ebp roll $5,%edx addl %ebp,%ecx xorl %edi,%esi xorl %eax,%edi addl %edx,%ecx addl (%esp),%ebx pxor %xmm4,%xmm0 punpcklqdq %xmm7,%xmm2 andl %edi,%esi xorl %eax,%edi rorl $7,%edx pxor %xmm1,%xmm0 movdqa %xmm4,64(%esp) movl %ecx,%ebp xorl %edi,%esi roll $5,%ecx movdqa %xmm3,%xmm4 addl %esi,%ebx paddd %xmm7,%xmm3 xorl %edx,%ebp pxor %xmm2,%xmm0 xorl %edi,%edx addl %ecx,%ebx addl 4(%esp),%eax andl %edx,%ebp movdqa %xmm0,%xmm2 movdqa %xmm3,48(%esp) xorl %edi,%edx rorl $7,%ecx movl %ebx,%esi xorl %edx,%ebp roll $5,%ebx pslld $2,%xmm0 addl %ebp,%eax xorl %ecx,%esi psrld $30,%xmm2 xorl %edx,%ecx addl %ebx,%eax addl 8(%esp),%edi andl %ecx,%esi xorl %edx,%ecx rorl $7,%ebx por %xmm2,%xmm0 movl %eax,%ebp xorl %ecx,%esi movdqa 80(%esp),%xmm2 roll $5,%eax addl %esi,%edi xorl %ebx,%ebp xorl %ecx,%ebx addl %eax,%edi pshufd $238,%xmm7,%xmm3 addl 12(%esp),%edx andl %ebx,%ebp xorl %ecx,%ebx rorl $7,%eax movl %edi,%esi xorl %ebx,%ebp roll $5,%edi addl %ebp,%edx xorl %eax,%esi xorl %ebx,%eax addl %edi,%edx addl 16(%esp),%ecx pxor %xmm5,%xmm1 punpcklqdq %xmm0,%xmm3 andl %eax,%esi xorl %ebx,%eax rorl $7,%edi pxor %xmm2,%xmm1 movdqa %xmm5,80(%esp) movl %edx,%ebp xorl %eax,%esi roll $5,%edx movdqa %xmm4,%xmm5 addl %esi,%ecx paddd %xmm0,%xmm4 xorl %edi,%ebp pxor %xmm3,%xmm1 xorl %eax,%edi addl %edx,%ecx addl 20(%esp),%ebx andl %edi,%ebp movdqa %xmm1,%xmm3 movdqa %xmm4,(%esp) xorl %eax,%edi rorl $7,%edx movl %ecx,%esi xorl %edi,%ebp roll $5,%ecx pslld $2,%xmm1 addl %ebp,%ebx xorl %edx,%esi psrld $30,%xmm3 xorl %edi,%edx addl %ecx,%ebx addl 24(%esp),%eax andl %edx,%esi xorl %edi,%edx rorl $7,%ecx por %xmm3,%xmm1 movl %ebx,%ebp xorl %edx,%esi movdqa 96(%esp),%xmm3 roll $5,%ebx addl %esi,%eax xorl %ecx,%ebp xorl %edx,%ecx addl %ebx,%eax pshufd $238,%xmm0,%xmm4 addl 28(%esp),%edi andl %ecx,%ebp xorl %edx,%ecx rorl $7,%ebx movl %eax,%esi xorl %ecx,%ebp roll $5,%eax addl %ebp,%edi xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%edi addl 32(%esp),%edx pxor %xmm6,%xmm2 punpcklqdq %xmm1,%xmm4 andl %ebx,%esi xorl %ecx,%ebx rorl $7,%eax pxor %xmm3,%xmm2 movdqa %xmm6,96(%esp) movl %edi,%ebp xorl %ebx,%esi roll $5,%edi movdqa %xmm5,%xmm6 addl %esi,%edx paddd %xmm1,%xmm5 xorl %eax,%ebp pxor %xmm4,%xmm2 xorl %ebx,%eax addl %edi,%edx addl 36(%esp),%ecx andl %eax,%ebp movdqa %xmm2,%xmm4 movdqa %xmm5,16(%esp) xorl %ebx,%eax rorl $7,%edi movl %edx,%esi xorl %eax,%ebp roll $5,%edx pslld $2,%xmm2 addl %ebp,%ecx xorl %edi,%esi psrld $30,%xmm4 xorl %eax,%edi addl %edx,%ecx addl 40(%esp),%ebx andl %edi,%esi xorl %eax,%edi rorl $7,%edx por %xmm4,%xmm2 movl %ecx,%ebp xorl %edi,%esi movdqa 64(%esp),%xmm4 roll $5,%ecx addl %esi,%ebx xorl %edx,%ebp xorl %edi,%edx addl %ecx,%ebx pshufd $238,%xmm1,%xmm5 addl 44(%esp),%eax andl %edx,%ebp xorl %edi,%edx rorl $7,%ecx movl %ebx,%esi xorl %edx,%ebp roll $5,%ebx addl %ebp,%eax xorl %edx,%esi addl %ebx,%eax addl 48(%esp),%edi pxor %xmm7,%xmm3 punpcklqdq %xmm2,%xmm5 xorl %ecx,%esi movl %eax,%ebp roll $5,%eax pxor %xmm4,%xmm3 movdqa %xmm7,64(%esp) addl %esi,%edi xorl %ecx,%ebp movdqa %xmm6,%xmm7 rorl $7,%ebx paddd %xmm2,%xmm6 addl %eax,%edi pxor %xmm5,%xmm3 addl 52(%esp),%edx xorl %ebx,%ebp movl %edi,%esi roll $5,%edi movdqa %xmm3,%xmm5 movdqa %xmm6,32(%esp) addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax addl %edi,%edx pslld $2,%xmm3 addl 56(%esp),%ecx xorl %eax,%esi psrld $30,%xmm5 movl %edx,%ebp roll $5,%edx addl %esi,%ecx xorl %eax,%ebp rorl $7,%edi addl %edx,%ecx por %xmm5,%xmm3 addl 60(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi roll $5,%ecx addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx addl %ecx,%ebx addl (%esp),%eax xorl %edx,%esi movl %ebx,%ebp roll $5,%ebx addl %esi,%eax xorl %edx,%ebp rorl $7,%ecx paddd %xmm3,%xmm7 addl %ebx,%eax addl 4(%esp),%edi xorl %ecx,%ebp movl %eax,%esi movdqa %xmm7,48(%esp) roll $5,%eax addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx addl %eax,%edi addl 8(%esp),%edx xorl %ebx,%esi movl %edi,%ebp roll $5,%edi addl %esi,%edx xorl %ebx,%ebp rorl $7,%eax addl %edi,%edx addl 12(%esp),%ecx xorl %eax,%ebp movl %edx,%esi roll $5,%edx addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi addl %edx,%ecx movl 196(%esp),%ebp cmpl 200(%esp),%ebp je .L003done movdqa 160(%esp),%xmm7 movdqa 176(%esp),%xmm6 movdqu (%ebp),%xmm0 movdqu 16(%ebp),%xmm1 movdqu 32(%ebp),%xmm2 movdqu 48(%ebp),%xmm3 addl $64,%ebp .byte 102,15,56,0,198 movl %ebp,196(%esp) movdqa %xmm7,96(%esp) addl 16(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx .byte 102,15,56,0,206 addl %ecx,%ebx addl 20(%esp),%eax xorl %edx,%ebp movl %ebx,%esi paddd %xmm7,%xmm0 roll $5,%ebx addl %ebp,%eax xorl %edx,%esi rorl $7,%ecx movdqa %xmm0,(%esp) addl %ebx,%eax addl 24(%esp),%edi xorl %ecx,%esi movl %eax,%ebp psubd %xmm7,%xmm0 roll $5,%eax addl %esi,%edi xorl %ecx,%ebp rorl $7,%ebx addl %eax,%edi addl 28(%esp),%edx xorl %ebx,%ebp movl %edi,%esi roll $5,%edi addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax addl %edi,%edx addl 32(%esp),%ecx xorl %eax,%esi movl %edx,%ebp roll $5,%edx addl %esi,%ecx xorl %eax,%ebp rorl $7,%edi .byte 102,15,56,0,214 addl %edx,%ecx addl 36(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi paddd %xmm7,%xmm1 roll $5,%ecx addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx movdqa %xmm1,16(%esp) addl %ecx,%ebx addl 40(%esp),%eax xorl %edx,%esi movl %ebx,%ebp psubd %xmm7,%xmm1 roll $5,%ebx addl %esi,%eax xorl %edx,%ebp rorl $7,%ecx addl %ebx,%eax addl 44(%esp),%edi xorl %ecx,%ebp movl %eax,%esi roll $5,%eax addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx addl %eax,%edi addl 48(%esp),%edx xorl %ebx,%esi movl %edi,%ebp roll $5,%edi addl %esi,%edx xorl %ebx,%ebp rorl $7,%eax .byte 102,15,56,0,222 addl %edi,%edx addl 52(%esp),%ecx xorl %eax,%ebp movl %edx,%esi paddd %xmm7,%xmm2 roll $5,%edx addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi movdqa %xmm2,32(%esp) addl %edx,%ecx addl 56(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp psubd %xmm7,%xmm2 roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx addl %ecx,%ebx addl 60(%esp),%eax xorl %edx,%ebp movl %ebx,%esi roll $5,%ebx addl %ebp,%eax rorl $7,%ecx addl %ebx,%eax movl 192(%esp),%ebp addl (%ebp),%eax addl 4(%ebp),%esi addl 8(%ebp),%ecx movl %eax,(%ebp) addl 12(%ebp),%edx movl %esi,4(%ebp) addl 16(%ebp),%edi movl %ecx,8(%ebp) movl %ecx,%ebx movl %edx,12(%ebp) xorl %edx,%ebx movl %edi,16(%ebp) movl %esi,%ebp pshufd $238,%xmm0,%xmm4 andl %ebx,%esi movl %ebp,%ebx jmp .L002loop .align 16 .L003done: addl 16(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx addl %ecx,%ebx addl 20(%esp),%eax xorl %edx,%ebp movl %ebx,%esi roll $5,%ebx addl %ebp,%eax xorl %edx,%esi rorl $7,%ecx addl %ebx,%eax addl 24(%esp),%edi xorl %ecx,%esi movl %eax,%ebp roll $5,%eax addl %esi,%edi xorl %ecx,%ebp rorl $7,%ebx addl %eax,%edi addl 28(%esp),%edx xorl %ebx,%ebp movl %edi,%esi roll $5,%edi addl %ebp,%edx xorl %ebx,%esi rorl $7,%eax addl %edi,%edx addl 32(%esp),%ecx xorl %eax,%esi movl %edx,%ebp roll $5,%edx addl %esi,%ecx xorl %eax,%ebp rorl $7,%edi addl %edx,%ecx addl 36(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi roll $5,%ecx addl %ebp,%ebx xorl %edi,%esi rorl $7,%edx addl %ecx,%ebx addl 40(%esp),%eax xorl %edx,%esi movl %ebx,%ebp roll $5,%ebx addl %esi,%eax xorl %edx,%ebp rorl $7,%ecx addl %ebx,%eax addl 44(%esp),%edi xorl %ecx,%ebp movl %eax,%esi roll $5,%eax addl %ebp,%edi xorl %ecx,%esi rorl $7,%ebx addl %eax,%edi addl 48(%esp),%edx xorl %ebx,%esi movl %edi,%ebp roll $5,%edi addl %esi,%edx xorl %ebx,%ebp rorl $7,%eax addl %edi,%edx addl 52(%esp),%ecx xorl %eax,%ebp movl %edx,%esi roll $5,%edx addl %ebp,%ecx xorl %eax,%esi rorl $7,%edi addl %edx,%ecx addl 56(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp roll $5,%ecx addl %esi,%ebx xorl %edi,%ebp rorl $7,%edx addl %ecx,%ebx addl 60(%esp),%eax xorl %edx,%ebp movl %ebx,%esi roll $5,%ebx addl %ebp,%eax rorl $7,%ecx addl %ebx,%eax movl 192(%esp),%ebp addl (%ebp),%eax movl 204(%esp),%esp addl 4(%ebp),%esi addl 8(%ebp),%ecx movl %eax,(%ebp) addl 12(%ebp),%edx movl %esi,4(%ebp) addl 16(%ebp),%edi movl %ecx,8(%ebp) movl %edx,12(%ebp) movl %edi,16(%ebp) popl %edi popl %esi popl %ebx popl %ebp ret .size sha1_block_data_order_ssse3,.-.L_sha1_block_data_order_ssse3_begin .globl sha1_block_data_order_avx .hidden sha1_block_data_order_avx .type sha1_block_data_order_avx,@function .align 16 sha1_block_data_order_avx: .L_sha1_block_data_order_avx_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi call .L004pic_point .L004pic_point: popl %ebp leal .LK_XX_XX-.L004pic_point(%ebp),%ebp vzeroall vmovdqa (%ebp),%xmm7 vmovdqa 16(%ebp),%xmm0 vmovdqa 32(%ebp),%xmm1 vmovdqa 48(%ebp),%xmm2 vmovdqa 64(%ebp),%xmm6 movl 20(%esp),%edi movl 24(%esp),%ebp movl 28(%esp),%edx movl %esp,%esi subl $208,%esp andl $-64,%esp vmovdqa %xmm0,112(%esp) vmovdqa %xmm1,128(%esp) vmovdqa %xmm2,144(%esp) shll $6,%edx vmovdqa %xmm7,160(%esp) addl %ebp,%edx vmovdqa %xmm6,176(%esp) addl $64,%ebp movl %edi,192(%esp) movl %ebp,196(%esp) movl %edx,200(%esp) movl %esi,204(%esp) movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx movl 16(%edi),%edi movl %ebx,%esi vmovdqu -64(%ebp),%xmm0 vmovdqu -48(%ebp),%xmm1 vmovdqu -32(%ebp),%xmm2 vmovdqu -16(%ebp),%xmm3 vpshufb %xmm6,%xmm0,%xmm0 vpshufb %xmm6,%xmm1,%xmm1 vpshufb %xmm6,%xmm2,%xmm2 vmovdqa %xmm7,96(%esp) vpshufb %xmm6,%xmm3,%xmm3 vpaddd %xmm7,%xmm0,%xmm4 vpaddd %xmm7,%xmm1,%xmm5 vpaddd %xmm7,%xmm2,%xmm6 vmovdqa %xmm4,(%esp) movl %ecx,%ebp vmovdqa %xmm5,16(%esp) xorl %edx,%ebp vmovdqa %xmm6,32(%esp) andl %ebp,%esi jmp .L005loop .align 16 .L005loop: shrdl $2,%ebx,%ebx xorl %edx,%esi vpalignr $8,%xmm0,%xmm1,%xmm4 movl %eax,%ebp addl (%esp),%edi vpaddd %xmm3,%xmm7,%xmm7 vmovdqa %xmm0,64(%esp) xorl %ecx,%ebx shldl $5,%eax,%eax vpsrldq $4,%xmm3,%xmm6 addl %esi,%edi andl %ebx,%ebp vpxor %xmm0,%xmm4,%xmm4 xorl %ecx,%ebx addl %eax,%edi vpxor %xmm2,%xmm6,%xmm6 shrdl $7,%eax,%eax xorl %ecx,%ebp vmovdqa %xmm7,48(%esp) movl %edi,%esi addl 4(%esp),%edx vpxor %xmm6,%xmm4,%xmm4 xorl %ebx,%eax shldl $5,%edi,%edi addl %ebp,%edx andl %eax,%esi vpsrld $31,%xmm4,%xmm6 xorl %ebx,%eax addl %edi,%edx shrdl $7,%edi,%edi xorl %ebx,%esi vpslldq $12,%xmm4,%xmm0 vpaddd %xmm4,%xmm4,%xmm4 movl %edx,%ebp addl 8(%esp),%ecx xorl %eax,%edi shldl $5,%edx,%edx vpsrld $30,%xmm0,%xmm7 vpor %xmm6,%xmm4,%xmm4 addl %esi,%ecx andl %edi,%ebp xorl %eax,%edi addl %edx,%ecx vpslld $2,%xmm0,%xmm0 shrdl $7,%edx,%edx xorl %eax,%ebp vpxor %xmm7,%xmm4,%xmm4 movl %ecx,%esi addl 12(%esp),%ebx xorl %edi,%edx shldl $5,%ecx,%ecx vpxor %xmm0,%xmm4,%xmm4 addl %ebp,%ebx andl %edx,%esi vmovdqa 96(%esp),%xmm0 xorl %edi,%edx addl %ecx,%ebx shrdl $7,%ecx,%ecx xorl %edi,%esi vpalignr $8,%xmm1,%xmm2,%xmm5 movl %ebx,%ebp addl 16(%esp),%eax vpaddd %xmm4,%xmm0,%xmm0 vmovdqa %xmm1,80(%esp) xorl %edx,%ecx shldl $5,%ebx,%ebx vpsrldq $4,%xmm4,%xmm7 addl %esi,%eax andl %ecx,%ebp vpxor %xmm1,%xmm5,%xmm5 xorl %edx,%ecx addl %ebx,%eax vpxor %xmm3,%xmm7,%xmm7 shrdl $7,%ebx,%ebx xorl %edx,%ebp vmovdqa %xmm0,(%esp) movl %eax,%esi addl 20(%esp),%edi vpxor %xmm7,%xmm5,%xmm5 xorl %ecx,%ebx shldl $5,%eax,%eax addl %ebp,%edi andl %ebx,%esi vpsrld $31,%xmm5,%xmm7 xorl %ecx,%ebx addl %eax,%edi shrdl $7,%eax,%eax xorl %ecx,%esi vpslldq $12,%xmm5,%xmm1 vpaddd %xmm5,%xmm5,%xmm5 movl %edi,%ebp addl 24(%esp),%edx xorl %ebx,%eax shldl $5,%edi,%edi vpsrld $30,%xmm1,%xmm0 vpor %xmm7,%xmm5,%xmm5 addl %esi,%edx andl %eax,%ebp xorl %ebx,%eax addl %edi,%edx vpslld $2,%xmm1,%xmm1 shrdl $7,%edi,%edi xorl %ebx,%ebp vpxor %xmm0,%xmm5,%xmm5 movl %edx,%esi addl 28(%esp),%ecx xorl %eax,%edi shldl $5,%edx,%edx vpxor %xmm1,%xmm5,%xmm5 addl %ebp,%ecx andl %edi,%esi vmovdqa 112(%esp),%xmm1 xorl %eax,%edi addl %edx,%ecx shrdl $7,%edx,%edx xorl %eax,%esi vpalignr $8,%xmm2,%xmm3,%xmm6 movl %ecx,%ebp addl 32(%esp),%ebx vpaddd %xmm5,%xmm1,%xmm1 vmovdqa %xmm2,96(%esp) xorl %edi,%edx shldl $5,%ecx,%ecx vpsrldq $4,%xmm5,%xmm0 addl %esi,%ebx andl %edx,%ebp vpxor %xmm2,%xmm6,%xmm6 xorl %edi,%edx addl %ecx,%ebx vpxor %xmm4,%xmm0,%xmm0 shrdl $7,%ecx,%ecx xorl %edi,%ebp vmovdqa %xmm1,16(%esp) movl %ebx,%esi addl 36(%esp),%eax vpxor %xmm0,%xmm6,%xmm6 xorl %edx,%ecx shldl $5,%ebx,%ebx addl %ebp,%eax andl %ecx,%esi vpsrld $31,%xmm6,%xmm0 xorl %edx,%ecx addl %ebx,%eax shrdl $7,%ebx,%ebx xorl %edx,%esi vpslldq $12,%xmm6,%xmm2 vpaddd %xmm6,%xmm6,%xmm6 movl %eax,%ebp addl 40(%esp),%edi xorl %ecx,%ebx shldl $5,%eax,%eax vpsrld $30,%xmm2,%xmm1 vpor %xmm0,%xmm6,%xmm6 addl %esi,%edi andl %ebx,%ebp xorl %ecx,%ebx addl %eax,%edi vpslld $2,%xmm2,%xmm2 vmovdqa 64(%esp),%xmm0 shrdl $7,%eax,%eax xorl %ecx,%ebp vpxor %xmm1,%xmm6,%xmm6 movl %edi,%esi addl 44(%esp),%edx xorl %ebx,%eax shldl $5,%edi,%edi vpxor %xmm2,%xmm6,%xmm6 addl %ebp,%edx andl %eax,%esi vmovdqa 112(%esp),%xmm2 xorl %ebx,%eax addl %edi,%edx shrdl $7,%edi,%edi xorl %ebx,%esi vpalignr $8,%xmm3,%xmm4,%xmm7 movl %edx,%ebp addl 48(%esp),%ecx vpaddd %xmm6,%xmm2,%xmm2 vmovdqa %xmm3,64(%esp) xorl %eax,%edi shldl $5,%edx,%edx vpsrldq $4,%xmm6,%xmm1 addl %esi,%ecx andl %edi,%ebp vpxor %xmm3,%xmm7,%xmm7 xorl %eax,%edi addl %edx,%ecx vpxor %xmm5,%xmm1,%xmm1 shrdl $7,%edx,%edx xorl %eax,%ebp vmovdqa %xmm2,32(%esp) movl %ecx,%esi addl 52(%esp),%ebx vpxor %xmm1,%xmm7,%xmm7 xorl %edi,%edx shldl $5,%ecx,%ecx addl %ebp,%ebx andl %edx,%esi vpsrld $31,%xmm7,%xmm1 xorl %edi,%edx addl %ecx,%ebx shrdl $7,%ecx,%ecx xorl %edi,%esi vpslldq $12,%xmm7,%xmm3 vpaddd %xmm7,%xmm7,%xmm7 movl %ebx,%ebp addl 56(%esp),%eax xorl %edx,%ecx shldl $5,%ebx,%ebx vpsrld $30,%xmm3,%xmm2 vpor %xmm1,%xmm7,%xmm7 addl %esi,%eax andl %ecx,%ebp xorl %edx,%ecx addl %ebx,%eax vpslld $2,%xmm3,%xmm3 vmovdqa 80(%esp),%xmm1 shrdl $7,%ebx,%ebx xorl %edx,%ebp vpxor %xmm2,%xmm7,%xmm7 movl %eax,%esi addl 60(%esp),%edi xorl %ecx,%ebx shldl $5,%eax,%eax vpxor %xmm3,%xmm7,%xmm7 addl %ebp,%edi andl %ebx,%esi vmovdqa 112(%esp),%xmm3 xorl %ecx,%ebx addl %eax,%edi vpalignr $8,%xmm6,%xmm7,%xmm2 vpxor %xmm4,%xmm0,%xmm0 shrdl $7,%eax,%eax xorl %ecx,%esi movl %edi,%ebp addl (%esp),%edx vpxor %xmm1,%xmm0,%xmm0 vmovdqa %xmm4,80(%esp) xorl %ebx,%eax shldl $5,%edi,%edi vmovdqa %xmm3,%xmm4 vpaddd %xmm7,%xmm3,%xmm3 addl %esi,%edx andl %eax,%ebp vpxor %xmm2,%xmm0,%xmm0 xorl %ebx,%eax addl %edi,%edx shrdl $7,%edi,%edi xorl %ebx,%ebp vpsrld $30,%xmm0,%xmm2 vmovdqa %xmm3,48(%esp) movl %edx,%esi addl 4(%esp),%ecx xorl %eax,%edi shldl $5,%edx,%edx vpslld $2,%xmm0,%xmm0 addl %ebp,%ecx andl %edi,%esi xorl %eax,%edi addl %edx,%ecx shrdl $7,%edx,%edx xorl %eax,%esi movl %ecx,%ebp addl 8(%esp),%ebx vpor %xmm2,%xmm0,%xmm0 xorl %edi,%edx shldl $5,%ecx,%ecx vmovdqa 96(%esp),%xmm2 addl %esi,%ebx andl %edx,%ebp xorl %edi,%edx addl %ecx,%ebx addl 12(%esp),%eax xorl %edi,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax vpalignr $8,%xmm7,%xmm0,%xmm3 vpxor %xmm5,%xmm1,%xmm1 addl 16(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm5,96(%esp) addl %esi,%edi xorl %ecx,%ebp vmovdqa %xmm4,%xmm5 vpaddd %xmm0,%xmm4,%xmm4 shrdl $7,%ebx,%ebx addl %eax,%edi vpxor %xmm3,%xmm1,%xmm1 addl 20(%esp),%edx xorl %ebx,%ebp movl %edi,%esi shldl $5,%edi,%edi vpsrld $30,%xmm1,%xmm3 vmovdqa %xmm4,(%esp) addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx vpslld $2,%xmm1,%xmm1 addl 24(%esp),%ecx xorl %eax,%esi movl %edx,%ebp shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%ebp shrdl $7,%edi,%edi addl %edx,%ecx vpor %xmm3,%xmm1,%xmm1 addl 28(%esp),%ebx xorl %edi,%ebp vmovdqa 64(%esp),%xmm3 movl %ecx,%esi shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx vpalignr $8,%xmm0,%xmm1,%xmm4 vpxor %xmm6,%xmm2,%xmm2 addl 32(%esp),%eax xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx vpxor %xmm3,%xmm2,%xmm2 vmovdqa %xmm6,64(%esp) addl %esi,%eax xorl %edx,%ebp vmovdqa 128(%esp),%xmm6 vpaddd %xmm1,%xmm5,%xmm5 shrdl $7,%ecx,%ecx addl %ebx,%eax vpxor %xmm4,%xmm2,%xmm2 addl 36(%esp),%edi xorl %ecx,%ebp movl %eax,%esi shldl $5,%eax,%eax vpsrld $30,%xmm2,%xmm4 vmovdqa %xmm5,16(%esp) addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi vpslld $2,%xmm2,%xmm2 addl 40(%esp),%edx xorl %ebx,%esi movl %edi,%ebp shldl $5,%edi,%edi addl %esi,%edx xorl %ebx,%ebp shrdl $7,%eax,%eax addl %edi,%edx vpor %xmm4,%xmm2,%xmm2 addl 44(%esp),%ecx xorl %eax,%ebp vmovdqa 80(%esp),%xmm4 movl %edx,%esi shldl $5,%edx,%edx addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx vpalignr $8,%xmm1,%xmm2,%xmm5 vpxor %xmm7,%xmm3,%xmm3 addl 48(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx vpxor %xmm4,%xmm3,%xmm3 vmovdqa %xmm7,80(%esp) addl %esi,%ebx xorl %edi,%ebp vmovdqa %xmm6,%xmm7 vpaddd %xmm2,%xmm6,%xmm6 shrdl $7,%edx,%edx addl %ecx,%ebx vpxor %xmm5,%xmm3,%xmm3 addl 52(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx vpsrld $30,%xmm3,%xmm5 vmovdqa %xmm6,32(%esp) addl %ebp,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax vpslld $2,%xmm3,%xmm3 addl 56(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax addl %esi,%edi xorl %ecx,%ebp shrdl $7,%ebx,%ebx addl %eax,%edi vpor %xmm5,%xmm3,%xmm3 addl 60(%esp),%edx xorl %ebx,%ebp vmovdqa 96(%esp),%xmm5 movl %edi,%esi shldl $5,%edi,%edi addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx vpalignr $8,%xmm2,%xmm3,%xmm6 vpxor %xmm0,%xmm4,%xmm4 addl (%esp),%ecx xorl %eax,%esi movl %edx,%ebp shldl $5,%edx,%edx vpxor %xmm5,%xmm4,%xmm4 vmovdqa %xmm0,96(%esp) addl %esi,%ecx xorl %eax,%ebp vmovdqa %xmm7,%xmm0 vpaddd %xmm3,%xmm7,%xmm7 shrdl $7,%edi,%edi addl %edx,%ecx vpxor %xmm6,%xmm4,%xmm4 addl 4(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi shldl $5,%ecx,%ecx vpsrld $30,%xmm4,%xmm6 vmovdqa %xmm7,48(%esp) addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx vpslld $2,%xmm4,%xmm4 addl 8(%esp),%eax xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%ebp shrdl $7,%ecx,%ecx addl %ebx,%eax vpor %xmm6,%xmm4,%xmm4 addl 12(%esp),%edi xorl %ecx,%ebp vmovdqa 64(%esp),%xmm6 movl %eax,%esi shldl $5,%eax,%eax addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi vpalignr $8,%xmm3,%xmm4,%xmm7 vpxor %xmm1,%xmm5,%xmm5 addl 16(%esp),%edx xorl %ebx,%esi movl %edi,%ebp shldl $5,%edi,%edi vpxor %xmm6,%xmm5,%xmm5 vmovdqa %xmm1,64(%esp) addl %esi,%edx xorl %ebx,%ebp vmovdqa %xmm0,%xmm1 vpaddd %xmm4,%xmm0,%xmm0 shrdl $7,%eax,%eax addl %edi,%edx vpxor %xmm7,%xmm5,%xmm5 addl 20(%esp),%ecx xorl %eax,%ebp movl %edx,%esi shldl $5,%edx,%edx vpsrld $30,%xmm5,%xmm7 vmovdqa %xmm0,(%esp) addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx vpslld $2,%xmm5,%xmm5 addl 24(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx vpor %xmm7,%xmm5,%xmm5 addl 28(%esp),%eax vmovdqa 80(%esp),%xmm7 shrdl $7,%ecx,%ecx movl %ebx,%esi xorl %edx,%ebp shldl $5,%ebx,%ebx addl %ebp,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax vpalignr $8,%xmm4,%xmm5,%xmm0 vpxor %xmm2,%xmm6,%xmm6 addl 32(%esp),%edi andl %ecx,%esi xorl %edx,%ecx shrdl $7,%ebx,%ebx vpxor %xmm7,%xmm6,%xmm6 vmovdqa %xmm2,80(%esp) movl %eax,%ebp xorl %ecx,%esi vmovdqa %xmm1,%xmm2 vpaddd %xmm5,%xmm1,%xmm1 shldl $5,%eax,%eax addl %esi,%edi vpxor %xmm0,%xmm6,%xmm6 xorl %ebx,%ebp xorl %ecx,%ebx addl %eax,%edi addl 36(%esp),%edx vpsrld $30,%xmm6,%xmm0 vmovdqa %xmm1,16(%esp) andl %ebx,%ebp xorl %ecx,%ebx shrdl $7,%eax,%eax movl %edi,%esi vpslld $2,%xmm6,%xmm6 xorl %ebx,%ebp shldl $5,%edi,%edi addl %ebp,%edx xorl %eax,%esi xorl %ebx,%eax addl %edi,%edx addl 40(%esp),%ecx andl %eax,%esi vpor %xmm0,%xmm6,%xmm6 xorl %ebx,%eax shrdl $7,%edi,%edi vmovdqa 96(%esp),%xmm0 movl %edx,%ebp xorl %eax,%esi shldl $5,%edx,%edx addl %esi,%ecx xorl %edi,%ebp xorl %eax,%edi addl %edx,%ecx addl 44(%esp),%ebx andl %edi,%ebp xorl %eax,%edi shrdl $7,%edx,%edx movl %ecx,%esi xorl %edi,%ebp shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edx,%esi xorl %edi,%edx addl %ecx,%ebx vpalignr $8,%xmm5,%xmm6,%xmm1 vpxor %xmm3,%xmm7,%xmm7 addl 48(%esp),%eax andl %edx,%esi xorl %edi,%edx shrdl $7,%ecx,%ecx vpxor %xmm0,%xmm7,%xmm7 vmovdqa %xmm3,96(%esp) movl %ebx,%ebp xorl %edx,%esi vmovdqa 144(%esp),%xmm3 vpaddd %xmm6,%xmm2,%xmm2 shldl $5,%ebx,%ebx addl %esi,%eax vpxor %xmm1,%xmm7,%xmm7 xorl %ecx,%ebp xorl %edx,%ecx addl %ebx,%eax addl 52(%esp),%edi vpsrld $30,%xmm7,%xmm1 vmovdqa %xmm2,32(%esp) andl %ecx,%ebp xorl %edx,%ecx shrdl $7,%ebx,%ebx movl %eax,%esi vpslld $2,%xmm7,%xmm7 xorl %ecx,%ebp shldl $5,%eax,%eax addl %ebp,%edi xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%edi addl 56(%esp),%edx andl %ebx,%esi vpor %xmm1,%xmm7,%xmm7 xorl %ecx,%ebx shrdl $7,%eax,%eax vmovdqa 64(%esp),%xmm1 movl %edi,%ebp xorl %ebx,%esi shldl $5,%edi,%edi addl %esi,%edx xorl %eax,%ebp xorl %ebx,%eax addl %edi,%edx addl 60(%esp),%ecx andl %eax,%ebp xorl %ebx,%eax shrdl $7,%edi,%edi movl %edx,%esi xorl %eax,%ebp shldl $5,%edx,%edx addl %ebp,%ecx xorl %edi,%esi xorl %eax,%edi addl %edx,%ecx vpalignr $8,%xmm6,%xmm7,%xmm2 vpxor %xmm4,%xmm0,%xmm0 addl (%esp),%ebx andl %edi,%esi xorl %eax,%edi shrdl $7,%edx,%edx vpxor %xmm1,%xmm0,%xmm0 vmovdqa %xmm4,64(%esp) movl %ecx,%ebp xorl %edi,%esi vmovdqa %xmm3,%xmm4 vpaddd %xmm7,%xmm3,%xmm3 shldl $5,%ecx,%ecx addl %esi,%ebx vpxor %xmm2,%xmm0,%xmm0 xorl %edx,%ebp xorl %edi,%edx addl %ecx,%ebx addl 4(%esp),%eax vpsrld $30,%xmm0,%xmm2 vmovdqa %xmm3,48(%esp) andl %edx,%ebp xorl %edi,%edx shrdl $7,%ecx,%ecx movl %ebx,%esi vpslld $2,%xmm0,%xmm0 xorl %edx,%ebp shldl $5,%ebx,%ebx addl %ebp,%eax xorl %ecx,%esi xorl %edx,%ecx addl %ebx,%eax addl 8(%esp),%edi andl %ecx,%esi vpor %xmm2,%xmm0,%xmm0 xorl %edx,%ecx shrdl $7,%ebx,%ebx vmovdqa 80(%esp),%xmm2 movl %eax,%ebp xorl %ecx,%esi shldl $5,%eax,%eax addl %esi,%edi xorl %ebx,%ebp xorl %ecx,%ebx addl %eax,%edi addl 12(%esp),%edx andl %ebx,%ebp xorl %ecx,%ebx shrdl $7,%eax,%eax movl %edi,%esi xorl %ebx,%ebp shldl $5,%edi,%edi addl %ebp,%edx xorl %eax,%esi xorl %ebx,%eax addl %edi,%edx vpalignr $8,%xmm7,%xmm0,%xmm3 vpxor %xmm5,%xmm1,%xmm1 addl 16(%esp),%ecx andl %eax,%esi xorl %ebx,%eax shrdl $7,%edi,%edi vpxor %xmm2,%xmm1,%xmm1 vmovdqa %xmm5,80(%esp) movl %edx,%ebp xorl %eax,%esi vmovdqa %xmm4,%xmm5 vpaddd %xmm0,%xmm4,%xmm4 shldl $5,%edx,%edx addl %esi,%ecx vpxor %xmm3,%xmm1,%xmm1 xorl %edi,%ebp xorl %eax,%edi addl %edx,%ecx addl 20(%esp),%ebx vpsrld $30,%xmm1,%xmm3 vmovdqa %xmm4,(%esp) andl %edi,%ebp xorl %eax,%edi shrdl $7,%edx,%edx movl %ecx,%esi vpslld $2,%xmm1,%xmm1 xorl %edi,%ebp shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edx,%esi xorl %edi,%edx addl %ecx,%ebx addl 24(%esp),%eax andl %edx,%esi vpor %xmm3,%xmm1,%xmm1 xorl %edi,%edx shrdl $7,%ecx,%ecx vmovdqa 96(%esp),%xmm3 movl %ebx,%ebp xorl %edx,%esi shldl $5,%ebx,%ebx addl %esi,%eax xorl %ecx,%ebp xorl %edx,%ecx addl %ebx,%eax addl 28(%esp),%edi andl %ecx,%ebp xorl %edx,%ecx shrdl $7,%ebx,%ebx movl %eax,%esi xorl %ecx,%ebp shldl $5,%eax,%eax addl %ebp,%edi xorl %ebx,%esi xorl %ecx,%ebx addl %eax,%edi vpalignr $8,%xmm0,%xmm1,%xmm4 vpxor %xmm6,%xmm2,%xmm2 addl 32(%esp),%edx andl %ebx,%esi xorl %ecx,%ebx shrdl $7,%eax,%eax vpxor %xmm3,%xmm2,%xmm2 vmovdqa %xmm6,96(%esp) movl %edi,%ebp xorl %ebx,%esi vmovdqa %xmm5,%xmm6 vpaddd %xmm1,%xmm5,%xmm5 shldl $5,%edi,%edi addl %esi,%edx vpxor %xmm4,%xmm2,%xmm2 xorl %eax,%ebp xorl %ebx,%eax addl %edi,%edx addl 36(%esp),%ecx vpsrld $30,%xmm2,%xmm4 vmovdqa %xmm5,16(%esp) andl %eax,%ebp xorl %ebx,%eax shrdl $7,%edi,%edi movl %edx,%esi vpslld $2,%xmm2,%xmm2 xorl %eax,%ebp shldl $5,%edx,%edx addl %ebp,%ecx xorl %edi,%esi xorl %eax,%edi addl %edx,%ecx addl 40(%esp),%ebx andl %edi,%esi vpor %xmm4,%xmm2,%xmm2 xorl %eax,%edi shrdl $7,%edx,%edx vmovdqa 64(%esp),%xmm4 movl %ecx,%ebp xorl %edi,%esi shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edx,%ebp xorl %edi,%edx addl %ecx,%ebx addl 44(%esp),%eax andl %edx,%ebp xorl %edi,%edx shrdl $7,%ecx,%ecx movl %ebx,%esi xorl %edx,%ebp shldl $5,%ebx,%ebx addl %ebp,%eax xorl %edx,%esi addl %ebx,%eax vpalignr $8,%xmm1,%xmm2,%xmm5 vpxor %xmm7,%xmm3,%xmm3 addl 48(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax vpxor %xmm4,%xmm3,%xmm3 vmovdqa %xmm7,64(%esp) addl %esi,%edi xorl %ecx,%ebp vmovdqa %xmm6,%xmm7 vpaddd %xmm2,%xmm6,%xmm6 shrdl $7,%ebx,%ebx addl %eax,%edi vpxor %xmm5,%xmm3,%xmm3 addl 52(%esp),%edx xorl %ebx,%ebp movl %edi,%esi shldl $5,%edi,%edi vpsrld $30,%xmm3,%xmm5 vmovdqa %xmm6,32(%esp) addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx vpslld $2,%xmm3,%xmm3 addl 56(%esp),%ecx xorl %eax,%esi movl %edx,%ebp shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%ebp shrdl $7,%edi,%edi addl %edx,%ecx vpor %xmm5,%xmm3,%xmm3 addl 60(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl (%esp),%eax vpaddd %xmm3,%xmm7,%xmm7 xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx addl %esi,%eax vmovdqa %xmm7,48(%esp) xorl %edx,%ebp shrdl $7,%ecx,%ecx addl %ebx,%eax addl 4(%esp),%edi xorl %ecx,%ebp movl %eax,%esi shldl $5,%eax,%eax addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi addl 8(%esp),%edx xorl %ebx,%esi movl %edi,%ebp shldl $5,%edi,%edi addl %esi,%edx xorl %ebx,%ebp shrdl $7,%eax,%eax addl %edi,%edx addl 12(%esp),%ecx xorl %eax,%ebp movl %edx,%esi shldl $5,%edx,%edx addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx movl 196(%esp),%ebp cmpl 200(%esp),%ebp je .L006done vmovdqa 160(%esp),%xmm7 vmovdqa 176(%esp),%xmm6 vmovdqu (%ebp),%xmm0 vmovdqu 16(%ebp),%xmm1 vmovdqu 32(%ebp),%xmm2 vmovdqu 48(%ebp),%xmm3 addl $64,%ebp vpshufb %xmm6,%xmm0,%xmm0 movl %ebp,196(%esp) vmovdqa %xmm7,96(%esp) addl 16(%esp),%ebx xorl %edi,%esi vpshufb %xmm6,%xmm1,%xmm1 movl %ecx,%ebp shldl $5,%ecx,%ecx vpaddd %xmm7,%xmm0,%xmm4 addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx vmovdqa %xmm4,(%esp) addl 20(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 24(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax addl %esi,%edi xorl %ecx,%ebp shrdl $7,%ebx,%ebx addl %eax,%edi addl 28(%esp),%edx xorl %ebx,%ebp movl %edi,%esi shldl $5,%edi,%edi addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx addl 32(%esp),%ecx xorl %eax,%esi vpshufb %xmm6,%xmm2,%xmm2 movl %edx,%ebp shldl $5,%edx,%edx vpaddd %xmm7,%xmm1,%xmm5 addl %esi,%ecx xorl %eax,%ebp shrdl $7,%edi,%edi addl %edx,%ecx vmovdqa %xmm5,16(%esp) addl 36(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl 40(%esp),%eax xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%ebp shrdl $7,%ecx,%ecx addl %ebx,%eax addl 44(%esp),%edi xorl %ecx,%ebp movl %eax,%esi shldl $5,%eax,%eax addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi addl 48(%esp),%edx xorl %ebx,%esi vpshufb %xmm6,%xmm3,%xmm3 movl %edi,%ebp shldl $5,%edi,%edi vpaddd %xmm7,%xmm2,%xmm6 addl %esi,%edx xorl %ebx,%ebp shrdl $7,%eax,%eax addl %edi,%edx vmovdqa %xmm6,32(%esp) addl 52(%esp),%ecx xorl %eax,%ebp movl %edx,%esi shldl $5,%edx,%edx addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx addl 56(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx addl 60(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax shrdl $7,%ecx,%ecx addl %ebx,%eax movl 192(%esp),%ebp addl (%ebp),%eax addl 4(%ebp),%esi addl 8(%ebp),%ecx movl %eax,(%ebp) addl 12(%ebp),%edx movl %esi,4(%ebp) addl 16(%ebp),%edi movl %ecx,%ebx movl %ecx,8(%ebp) xorl %edx,%ebx movl %edx,12(%ebp) movl %edi,16(%ebp) movl %esi,%ebp andl %ebx,%esi movl %ebp,%ebx jmp .L005loop .align 16 .L006done: addl 16(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx addl 20(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax xorl %edx,%esi shrdl $7,%ecx,%ecx addl %ebx,%eax addl 24(%esp),%edi xorl %ecx,%esi movl %eax,%ebp shldl $5,%eax,%eax addl %esi,%edi xorl %ecx,%ebp shrdl $7,%ebx,%ebx addl %eax,%edi addl 28(%esp),%edx xorl %ebx,%ebp movl %edi,%esi shldl $5,%edi,%edi addl %ebp,%edx xorl %ebx,%esi shrdl $7,%eax,%eax addl %edi,%edx addl 32(%esp),%ecx xorl %eax,%esi movl %edx,%ebp shldl $5,%edx,%edx addl %esi,%ecx xorl %eax,%ebp shrdl $7,%edi,%edi addl %edx,%ecx addl 36(%esp),%ebx xorl %edi,%ebp movl %ecx,%esi shldl $5,%ecx,%ecx addl %ebp,%ebx xorl %edi,%esi shrdl $7,%edx,%edx addl %ecx,%ebx addl 40(%esp),%eax xorl %edx,%esi movl %ebx,%ebp shldl $5,%ebx,%ebx addl %esi,%eax xorl %edx,%ebp shrdl $7,%ecx,%ecx addl %ebx,%eax addl 44(%esp),%edi xorl %ecx,%ebp movl %eax,%esi shldl $5,%eax,%eax addl %ebp,%edi xorl %ecx,%esi shrdl $7,%ebx,%ebx addl %eax,%edi addl 48(%esp),%edx xorl %ebx,%esi movl %edi,%ebp shldl $5,%edi,%edi addl %esi,%edx xorl %ebx,%ebp shrdl $7,%eax,%eax addl %edi,%edx addl 52(%esp),%ecx xorl %eax,%ebp movl %edx,%esi shldl $5,%edx,%edx addl %ebp,%ecx xorl %eax,%esi shrdl $7,%edi,%edi addl %edx,%ecx addl 56(%esp),%ebx xorl %edi,%esi movl %ecx,%ebp shldl $5,%ecx,%ecx addl %esi,%ebx xorl %edi,%ebp shrdl $7,%edx,%edx addl %ecx,%ebx addl 60(%esp),%eax xorl %edx,%ebp movl %ebx,%esi shldl $5,%ebx,%ebx addl %ebp,%eax shrdl $7,%ecx,%ecx addl %ebx,%eax vzeroall movl 192(%esp),%ebp addl (%ebp),%eax movl 204(%esp),%esp addl 4(%ebp),%esi addl 8(%ebp),%ecx movl %eax,(%ebp) addl 12(%ebp),%edx movl %esi,4(%ebp) addl 16(%ebp),%edi movl %ecx,8(%ebp) movl %edx,12(%ebp) movl %edi,16(%ebp) popl %edi popl %esi popl %ebx popl %ebp ret .size sha1_block_data_order_avx,.-.L_sha1_block_data_order_avx_begin .align 64 .LK_XX_XX: .long 1518500249,1518500249,1518500249,1518500249 .long 1859775393,1859775393,1859775393,1859775393 .long 2400959708,2400959708,2400959708,2400959708 .long 3395469782,3395469782,3395469782,3395469782 .long 66051,67438087,134810123,202182159 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115 .byte 102,111,114,109,32,102,111,114,32,120,56,54,44,32,67,82 .byte 89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112 .byte 114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
wlsfx/bnbb
16,212
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86/crypto/fipsmodule/vpaes-x86.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text #ifdef BORINGSSL_DISPATCH_TEST #endif .align 64 .L_vpaes_consts: .long 218628480,235210255,168496130,67568393 .long 252381056,17041926,33884169,51187212 .long 252645135,252645135,252645135,252645135 .long 1512730624,3266504856,1377990664,3401244816 .long 830229760,1275146365,2969422977,3447763452 .long 3411033600,2979783055,338359620,2782886510 .long 4209124096,907596821,221174255,1006095553 .long 191964160,3799684038,3164090317,1589111125 .long 182528256,1777043520,2877432650,3265356744 .long 1874708224,3503451415,3305285752,363511674 .long 1606117888,3487855781,1093350906,2384367825 .long 197121,67569157,134941193,202313229 .long 67569157,134941193,202313229,197121 .long 134941193,202313229,197121,67569157 .long 202313229,197121,67569157,134941193 .long 33619971,100992007,168364043,235736079 .long 235736079,33619971,100992007,168364043 .long 168364043,235736079,33619971,100992007 .long 100992007,168364043,235736079,33619971 .long 50462976,117835012,185207048,252579084 .long 252314880,51251460,117574920,184942860 .long 184682752,252054788,50987272,118359308 .long 118099200,185467140,251790600,50727180 .long 2946363062,528716217,1300004225,1881839624 .long 1532713819,1532713819,1532713819,1532713819 .long 3602276352,4288629033,3737020424,4153884961 .long 1354558464,32357713,2958822624,3775749553 .long 1201988352,132424512,1572796698,503232858 .long 2213177600,1597421020,4103937655,675398315 .long 2749646592,4273543773,1511898873,121693092 .long 3040248576,1103263732,2871565598,1608280554 .long 2236667136,2588920351,482954393,64377734 .long 3069987328,291237287,2117370568,3650299247 .long 533321216,3573750986,2572112006,1401264716 .long 1339849704,2721158661,548607111,3445553514 .long 2128193280,3054596040,2183486460,1257083700 .long 655635200,1165381986,3923443150,2344132524 .long 190078720,256924420,290342170,357187870 .long 1610966272,2263057382,4103205268,309794674 .long 2592527872,2233205587,1335446729,3402964816 .long 3973531904,3225098121,3002836325,1918774430 .long 3870401024,2102906079,2284471353,4117666579 .long 617007872,1021508343,366931923,691083277 .long 2528395776,3491914898,2968704004,1613121270 .long 3445188352,3247741094,844474987,4093578302 .long 651481088,1190302358,1689581232,574775300 .long 4289380608,206939853,2555985458,2489840491 .long 2130264064,327674451,3566485037,3349835193 .long 2470714624,316102159,3636825756,3393945945 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105 .byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83 .byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117 .byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105 .byte 118,101,114,115,105,116,121,41,0 .align 64 .hidden _vpaes_preheat .type _vpaes_preheat,@function .align 16 _vpaes_preheat: addl (%esp),%ebp movdqa -48(%ebp),%xmm7 movdqa -16(%ebp),%xmm6 ret .size _vpaes_preheat,.-_vpaes_preheat .hidden _vpaes_encrypt_core .type _vpaes_encrypt_core,@function .align 16 _vpaes_encrypt_core: movl $16,%ecx movl 240(%edx),%eax movdqa %xmm6,%xmm1 movdqa (%ebp),%xmm2 pandn %xmm0,%xmm1 pand %xmm6,%xmm0 movdqu (%edx),%xmm5 .byte 102,15,56,0,208 movdqa 16(%ebp),%xmm0 pxor %xmm5,%xmm2 psrld $4,%xmm1 addl $16,%edx .byte 102,15,56,0,193 leal 192(%ebp),%ebx pxor %xmm2,%xmm0 jmp .L000enc_entry .align 16 .L001enc_loop: movdqa 32(%ebp),%xmm4 movdqa 48(%ebp),%xmm0 .byte 102,15,56,0,226 .byte 102,15,56,0,195 pxor %xmm5,%xmm4 movdqa 64(%ebp),%xmm5 pxor %xmm4,%xmm0 movdqa -64(%ebx,%ecx,1),%xmm1 .byte 102,15,56,0,234 movdqa 80(%ebp),%xmm2 movdqa (%ebx,%ecx,1),%xmm4 .byte 102,15,56,0,211 movdqa %xmm0,%xmm3 pxor %xmm5,%xmm2 .byte 102,15,56,0,193 addl $16,%edx pxor %xmm2,%xmm0 .byte 102,15,56,0,220 addl $16,%ecx pxor %xmm0,%xmm3 .byte 102,15,56,0,193 andl $48,%ecx subl $1,%eax pxor %xmm3,%xmm0 .L000enc_entry: movdqa %xmm6,%xmm1 movdqa -32(%ebp),%xmm5 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm6,%xmm0 .byte 102,15,56,0,232 movdqa %xmm7,%xmm3 pxor %xmm1,%xmm0 .byte 102,15,56,0,217 movdqa %xmm7,%xmm4 pxor %xmm5,%xmm3 .byte 102,15,56,0,224 movdqa %xmm7,%xmm2 pxor %xmm5,%xmm4 .byte 102,15,56,0,211 movdqa %xmm7,%xmm3 pxor %xmm0,%xmm2 .byte 102,15,56,0,220 movdqu (%edx),%xmm5 pxor %xmm1,%xmm3 jnz .L001enc_loop movdqa 96(%ebp),%xmm4 movdqa 112(%ebp),%xmm0 .byte 102,15,56,0,226 pxor %xmm5,%xmm4 .byte 102,15,56,0,195 movdqa 64(%ebx,%ecx,1),%xmm1 pxor %xmm4,%xmm0 .byte 102,15,56,0,193 ret .size _vpaes_encrypt_core,.-_vpaes_encrypt_core .hidden _vpaes_decrypt_core .type _vpaes_decrypt_core,@function .align 16 _vpaes_decrypt_core: leal 608(%ebp),%ebx movl 240(%edx),%eax movdqa %xmm6,%xmm1 movdqa -64(%ebx),%xmm2 pandn %xmm0,%xmm1 movl %eax,%ecx psrld $4,%xmm1 movdqu (%edx),%xmm5 shll $4,%ecx pand %xmm6,%xmm0 .byte 102,15,56,0,208 movdqa -48(%ebx),%xmm0 xorl $48,%ecx .byte 102,15,56,0,193 andl $48,%ecx pxor %xmm5,%xmm2 movdqa 176(%ebp),%xmm5 pxor %xmm2,%xmm0 addl $16,%edx leal -352(%ebx,%ecx,1),%ecx jmp .L002dec_entry .align 16 .L003dec_loop: movdqa -32(%ebx),%xmm4 movdqa -16(%ebx),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa (%ebx),%xmm4 pxor %xmm1,%xmm0 movdqa 16(%ebx),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa 32(%ebx),%xmm4 pxor %xmm1,%xmm0 movdqa 48(%ebx),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 movdqa 64(%ebx),%xmm4 pxor %xmm1,%xmm0 movdqa 80(%ebx),%xmm1 .byte 102,15,56,0,226 .byte 102,15,56,0,197 .byte 102,15,56,0,203 pxor %xmm4,%xmm0 addl $16,%edx .byte 102,15,58,15,237,12 pxor %xmm1,%xmm0 subl $1,%eax .L002dec_entry: movdqa %xmm6,%xmm1 movdqa -32(%ebp),%xmm2 pandn %xmm0,%xmm1 pand %xmm6,%xmm0 psrld $4,%xmm1 .byte 102,15,56,0,208 movdqa %xmm7,%xmm3 pxor %xmm1,%xmm0 .byte 102,15,56,0,217 movdqa %xmm7,%xmm4 pxor %xmm2,%xmm3 .byte 102,15,56,0,224 pxor %xmm2,%xmm4 movdqa %xmm7,%xmm2 .byte 102,15,56,0,211 movdqa %xmm7,%xmm3 pxor %xmm0,%xmm2 .byte 102,15,56,0,220 movdqu (%edx),%xmm0 pxor %xmm1,%xmm3 jnz .L003dec_loop movdqa 96(%ebx),%xmm4 .byte 102,15,56,0,226 pxor %xmm0,%xmm4 movdqa 112(%ebx),%xmm0 movdqa (%ecx),%xmm2 .byte 102,15,56,0,195 pxor %xmm4,%xmm0 .byte 102,15,56,0,194 ret .size _vpaes_decrypt_core,.-_vpaes_decrypt_core .hidden _vpaes_schedule_core .type _vpaes_schedule_core,@function .align 16 _vpaes_schedule_core: addl (%esp),%ebp movdqu (%esi),%xmm0 movdqa 320(%ebp),%xmm2 movdqa %xmm0,%xmm3 leal (%ebp),%ebx movdqa %xmm2,4(%esp) call _vpaes_schedule_transform movdqa %xmm0,%xmm7 testl %edi,%edi jnz .L004schedule_am_decrypting movdqu %xmm0,(%edx) jmp .L005schedule_go .L004schedule_am_decrypting: movdqa 256(%ebp,%ecx,1),%xmm1 .byte 102,15,56,0,217 movdqu %xmm3,(%edx) xorl $48,%ecx .L005schedule_go: cmpl $192,%eax ja .L006schedule_256 je .L007schedule_192 .L008schedule_128: movl $10,%eax .L009loop_schedule_128: call _vpaes_schedule_round decl %eax jz .L010schedule_mangle_last call _vpaes_schedule_mangle jmp .L009loop_schedule_128 .align 16 .L007schedule_192: movdqu 8(%esi),%xmm0 call _vpaes_schedule_transform movdqa %xmm0,%xmm6 pxor %xmm4,%xmm4 movhlps %xmm4,%xmm6 movl $4,%eax .L011loop_schedule_192: call _vpaes_schedule_round .byte 102,15,58,15,198,8 call _vpaes_schedule_mangle call _vpaes_schedule_192_smear call _vpaes_schedule_mangle call _vpaes_schedule_round decl %eax jz .L010schedule_mangle_last call _vpaes_schedule_mangle call _vpaes_schedule_192_smear jmp .L011loop_schedule_192 .align 16 .L006schedule_256: movdqu 16(%esi),%xmm0 call _vpaes_schedule_transform movl $7,%eax .L012loop_schedule_256: call _vpaes_schedule_mangle movdqa %xmm0,%xmm6 call _vpaes_schedule_round decl %eax jz .L010schedule_mangle_last call _vpaes_schedule_mangle pshufd $255,%xmm0,%xmm0 movdqa %xmm7,20(%esp) movdqa %xmm6,%xmm7 call .L_vpaes_schedule_low_round movdqa 20(%esp),%xmm7 jmp .L012loop_schedule_256 .align 16 .L010schedule_mangle_last: leal 384(%ebp),%ebx testl %edi,%edi jnz .L013schedule_mangle_last_dec movdqa 256(%ebp,%ecx,1),%xmm1 .byte 102,15,56,0,193 leal 352(%ebp),%ebx addl $32,%edx .L013schedule_mangle_last_dec: addl $-16,%edx pxor 336(%ebp),%xmm0 call _vpaes_schedule_transform movdqu %xmm0,(%edx) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 ret .size _vpaes_schedule_core,.-_vpaes_schedule_core .hidden _vpaes_schedule_192_smear .type _vpaes_schedule_192_smear,@function .align 16 _vpaes_schedule_192_smear: pshufd $128,%xmm6,%xmm1 pshufd $254,%xmm7,%xmm0 pxor %xmm1,%xmm6 pxor %xmm1,%xmm1 pxor %xmm0,%xmm6 movdqa %xmm6,%xmm0 movhlps %xmm1,%xmm6 ret .size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear .hidden _vpaes_schedule_round .type _vpaes_schedule_round,@function .align 16 _vpaes_schedule_round: movdqa 8(%esp),%xmm2 pxor %xmm1,%xmm1 .byte 102,15,58,15,202,15 .byte 102,15,58,15,210,15 pxor %xmm1,%xmm7 pshufd $255,%xmm0,%xmm0 .byte 102,15,58,15,192,1 movdqa %xmm2,8(%esp) .L_vpaes_schedule_low_round: movdqa %xmm7,%xmm1 pslldq $4,%xmm7 pxor %xmm1,%xmm7 movdqa %xmm7,%xmm1 pslldq $8,%xmm7 pxor %xmm1,%xmm7 pxor 336(%ebp),%xmm7 movdqa -16(%ebp),%xmm4 movdqa -48(%ebp),%xmm5 movdqa %xmm4,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm4,%xmm0 movdqa -32(%ebp),%xmm2 .byte 102,15,56,0,208 pxor %xmm1,%xmm0 movdqa %xmm5,%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 movdqa %xmm5,%xmm4 .byte 102,15,56,0,224 pxor %xmm2,%xmm4 movdqa %xmm5,%xmm2 .byte 102,15,56,0,211 pxor %xmm0,%xmm2 movdqa %xmm5,%xmm3 .byte 102,15,56,0,220 pxor %xmm1,%xmm3 movdqa 32(%ebp),%xmm4 .byte 102,15,56,0,226 movdqa 48(%ebp),%xmm0 .byte 102,15,56,0,195 pxor %xmm4,%xmm0 pxor %xmm7,%xmm0 movdqa %xmm0,%xmm7 ret .size _vpaes_schedule_round,.-_vpaes_schedule_round .hidden _vpaes_schedule_transform .type _vpaes_schedule_transform,@function .align 16 _vpaes_schedule_transform: movdqa -16(%ebp),%xmm2 movdqa %xmm2,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm0 movdqa (%ebx),%xmm2 .byte 102,15,56,0,208 movdqa 16(%ebx),%xmm0 .byte 102,15,56,0,193 pxor %xmm2,%xmm0 ret .size _vpaes_schedule_transform,.-_vpaes_schedule_transform .hidden _vpaes_schedule_mangle .type _vpaes_schedule_mangle,@function .align 16 _vpaes_schedule_mangle: movdqa %xmm0,%xmm4 movdqa 128(%ebp),%xmm5 testl %edi,%edi jnz .L014schedule_mangle_dec addl $16,%edx pxor 336(%ebp),%xmm4 .byte 102,15,56,0,229 movdqa %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 jmp .L015schedule_mangle_both .align 16 .L014schedule_mangle_dec: movdqa -16(%ebp),%xmm2 leal 416(%ebp),%esi movdqa %xmm2,%xmm1 pandn %xmm4,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm4 movdqa (%esi),%xmm2 .byte 102,15,56,0,212 movdqa 16(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 32(%esi),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 48(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 64(%esi),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 80(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 96(%esi),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 112(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 addl $-16,%edx .L015schedule_mangle_both: movdqa 256(%ebp,%ecx,1),%xmm1 .byte 102,15,56,0,217 addl $-16,%ecx andl $48,%ecx movdqu %xmm3,(%edx) ret .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle .globl vpaes_set_encrypt_key .hidden vpaes_set_encrypt_key .type vpaes_set_encrypt_key,@function .align 16 vpaes_set_encrypt_key: .L_vpaes_set_encrypt_key_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L016pic .L016pic: popl %ebx leal BORINGSSL_function_hit+5-.L016pic(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 20(%esp),%esi leal -56(%esp),%ebx movl 24(%esp),%eax andl $-16,%ebx movl 28(%esp),%edx xchgl %esp,%ebx movl %ebx,48(%esp) movl %eax,%ebx shrl $5,%ebx addl $5,%ebx movl %ebx,240(%edx) movl $48,%ecx movl $0,%edi leal .L_vpaes_consts+0x30-.L017pic_point,%ebp call _vpaes_schedule_core .L017pic_point: movl 48(%esp),%esp xorl %eax,%eax popl %edi popl %esi popl %ebx popl %ebp ret .size vpaes_set_encrypt_key,.-.L_vpaes_set_encrypt_key_begin .globl vpaes_set_decrypt_key .hidden vpaes_set_decrypt_key .type vpaes_set_decrypt_key,@function .align 16 vpaes_set_decrypt_key: .L_vpaes_set_decrypt_key_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi leal -56(%esp),%ebx movl 24(%esp),%eax andl $-16,%ebx movl 28(%esp),%edx xchgl %esp,%ebx movl %ebx,48(%esp) movl %eax,%ebx shrl $5,%ebx addl $5,%ebx movl %ebx,240(%edx) shll $4,%ebx leal 16(%edx,%ebx,1),%edx movl $1,%edi movl %eax,%ecx shrl $1,%ecx andl $32,%ecx xorl $32,%ecx leal .L_vpaes_consts+0x30-.L018pic_point,%ebp call _vpaes_schedule_core .L018pic_point: movl 48(%esp),%esp xorl %eax,%eax popl %edi popl %esi popl %ebx popl %ebp ret .size vpaes_set_decrypt_key,.-.L_vpaes_set_decrypt_key_begin .globl vpaes_encrypt .hidden vpaes_encrypt .type vpaes_encrypt,@function .align 16 vpaes_encrypt: .L_vpaes_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L019pic .L019pic: popl %ebx leal BORINGSSL_function_hit+4-.L019pic(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif leal .L_vpaes_consts+0x30-.L020pic_point,%ebp call _vpaes_preheat .L020pic_point: movl 20(%esp),%esi leal -56(%esp),%ebx movl 24(%esp),%edi andl $-16,%ebx movl 28(%esp),%edx xchgl %esp,%ebx movl %ebx,48(%esp) movdqu (%esi),%xmm0 call _vpaes_encrypt_core movdqu %xmm0,(%edi) movl 48(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size vpaes_encrypt,.-.L_vpaes_encrypt_begin .globl vpaes_decrypt .hidden vpaes_decrypt .type vpaes_decrypt,@function .align 16 vpaes_decrypt: .L_vpaes_decrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi leal .L_vpaes_consts+0x30-.L021pic_point,%ebp call _vpaes_preheat .L021pic_point: movl 20(%esp),%esi leal -56(%esp),%ebx movl 24(%esp),%edi andl $-16,%ebx movl 28(%esp),%edx xchgl %esp,%ebx movl %ebx,48(%esp) movdqu (%esi),%xmm0 call _vpaes_decrypt_core movdqu %xmm0,(%edi) movl 48(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size vpaes_decrypt,.-.L_vpaes_decrypt_begin .globl vpaes_cbc_encrypt .hidden vpaes_cbc_encrypt .type vpaes_cbc_encrypt,@function .align 16 vpaes_cbc_encrypt: .L_vpaes_cbc_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx subl $16,%eax jc .L022cbc_abort leal -56(%esp),%ebx movl 36(%esp),%ebp andl $-16,%ebx movl 40(%esp),%ecx xchgl %esp,%ebx movdqu (%ebp),%xmm1 subl %esi,%edi movl %ebx,48(%esp) movl %edi,(%esp) movl %edx,4(%esp) movl %ebp,8(%esp) movl %eax,%edi leal .L_vpaes_consts+0x30-.L023pic_point,%ebp call _vpaes_preheat .L023pic_point: cmpl $0,%ecx je .L024cbc_dec_loop jmp .L025cbc_enc_loop .align 16 .L025cbc_enc_loop: movdqu (%esi),%xmm0 pxor %xmm1,%xmm0 call _vpaes_encrypt_core movl (%esp),%ebx movl 4(%esp),%edx movdqa %xmm0,%xmm1 movdqu %xmm0,(%ebx,%esi,1) leal 16(%esi),%esi subl $16,%edi jnc .L025cbc_enc_loop jmp .L026cbc_done .align 16 .L024cbc_dec_loop: movdqu (%esi),%xmm0 movdqa %xmm1,16(%esp) movdqa %xmm0,32(%esp) call _vpaes_decrypt_core movl (%esp),%ebx movl 4(%esp),%edx pxor 16(%esp),%xmm0 movdqa 32(%esp),%xmm1 movdqu %xmm0,(%ebx,%esi,1) leal 16(%esi),%esi subl $16,%edi jnc .L024cbc_dec_loop .L026cbc_done: movl 8(%esp),%ebx movl 48(%esp),%esp movdqu %xmm1,(%ebx) .L022cbc_abort: popl %edi popl %esi popl %ebx popl %ebp ret .size vpaes_cbc_encrypt,.-.L_vpaes_cbc_encrypt_begin #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
wlsfx/bnbb
17,356
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86/crypto/fipsmodule/co-586.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl bn_mul_comba8 .hidden bn_mul_comba8 .type bn_mul_comba8,@function .align 16 bn_mul_comba8: .L_bn_mul_comba8_begin: pushl %esi movl 12(%esp),%esi pushl %edi movl 20(%esp),%edi pushl %ebp pushl %ebx xorl %ebx,%ebx movl (%esi),%eax xorl %ecx,%ecx movl (%edi),%edx xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,(%eax) movl 4(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl (%edi),%edx adcl $0,%ebx movl %ecx,4(%eax) movl 8(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl (%edi),%edx adcl $0,%ecx movl %ebp,8(%eax) movl 12(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,12(%eax) movl 16(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl (%edi),%edx adcl $0,%ebx movl %ecx,16(%eax) movl 20(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 12(%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 16(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl (%edi),%edx adcl $0,%ecx movl %ebp,20(%eax) movl 24(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 16(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 12(%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 16(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 20(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,24(%eax) movl 28(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 16(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 20(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 24(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx movl %ecx,28(%eax) movl 28(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%ebp movl 24(%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 16(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 12(%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 24(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 28(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx movl %ebp,32(%eax) movl 28(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 24(%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 16(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 16(%esi),%eax adcl %edx,%ecx movl 20(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 12(%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 28(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp movl %ebx,36(%eax) movl 28(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esi),%eax adcl %edx,%ebp movl 20(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 16(%esi),%eax adcl %edx,%ebp movl 24(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 12(%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 16(%edi),%edx adcl $0,%ebx movl %ecx,40(%eax) movl 28(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%ebp movl 24(%esi),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esi),%eax adcl %edx,%ebx movl 24(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 16(%esi),%eax adcl %edx,%ebx movl 28(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl 20(%edi),%edx adcl $0,%ecx movl %ebp,44(%eax) movl 28(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 24(%esi),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 20(%esi),%eax adcl %edx,%ecx movl 28(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl 24(%edi),%edx adcl $0,%ebp movl %ebx,48(%eax) movl 28(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%ecx movl 24(%esi),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 28(%edi),%edx adcl $0,%ebx movl %ecx,52(%eax) movl 28(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx adcl $0,%ecx movl %ebp,56(%eax) movl %ebx,60(%eax) popl %ebx popl %ebp popl %edi popl %esi ret .size bn_mul_comba8,.-.L_bn_mul_comba8_begin .globl bn_mul_comba4 .hidden bn_mul_comba4 .type bn_mul_comba4,@function .align 16 bn_mul_comba4: .L_bn_mul_comba4_begin: pushl %esi movl 12(%esp),%esi pushl %edi movl 20(%esp),%edi pushl %ebp pushl %ebx xorl %ebx,%ebx movl (%esi),%eax xorl %ecx,%ecx movl (%edi),%edx xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl (%edi),%edx adcl $0,%ebp movl %ebx,(%eax) movl 4(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%ecx movl (%esi),%eax adcl %edx,%ebp movl 4(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl (%edi),%edx adcl $0,%ebx movl %ecx,4(%eax) movl 8(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%ebp movl 4(%esi),%eax adcl %edx,%ebx movl 4(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl (%esi),%eax adcl %edx,%ebx movl 8(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl (%edi),%edx adcl $0,%ecx movl %ebp,8(%eax) movl 12(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 8(%esi),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 4(%esi),%eax adcl %edx,%ecx movl 8(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl (%esi),%eax adcl %edx,%ecx movl 12(%edi),%edx adcl $0,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx movl 4(%edi),%edx adcl $0,%ebp movl %ebx,12(%eax) movl 12(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%ecx movl 8(%esi),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 4(%esi),%eax adcl %edx,%ebp movl 12(%edi),%edx adcl $0,%ebx mull %edx addl %eax,%ecx movl 20(%esp),%eax adcl %edx,%ebp movl 8(%edi),%edx adcl $0,%ebx movl %ecx,16(%eax) movl 12(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%ebp movl 8(%esi),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx mull %edx addl %eax,%ebp movl 20(%esp),%eax adcl %edx,%ebx movl 12(%edi),%edx adcl $0,%ecx movl %ebp,20(%eax) movl 12(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%ebx movl 20(%esp),%eax adcl %edx,%ecx adcl $0,%ebp movl %ebx,24(%eax) movl %ecx,28(%eax) popl %ebx popl %ebp popl %edi popl %esi ret .size bn_mul_comba4,.-.L_bn_mul_comba4_begin .globl bn_sqr_comba8 .hidden bn_sqr_comba8 .type bn_sqr_comba8,@function .align 16 bn_sqr_comba8: .L_bn_sqr_comba8_begin: pushl %esi pushl %edi pushl %ebp pushl %ebx movl 20(%esp),%edi movl 24(%esp),%esi xorl %ebx,%ebx xorl %ecx,%ecx movl (%esi),%eax xorl %ebp,%ebp mull %eax addl %eax,%ebx adcl %edx,%ecx movl (%esi),%edx adcl $0,%ebp movl %ebx,(%edi) movl 4(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx movl %ecx,4(%edi) movl (%esi),%edx xorl %ecx,%ecx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 4(%esi),%eax adcl $0,%ecx mull %eax addl %eax,%ebp adcl %edx,%ebx movl (%esi),%edx adcl $0,%ecx movl %ebp,8(%edi) movl 12(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 8(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 16(%esi),%eax adcl $0,%ebp movl %ebx,12(%edi) movl (%esi),%edx xorl %ebx,%ebx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 12(%esi),%eax adcl $0,%ebx movl 4(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx mull %eax addl %eax,%ecx adcl %edx,%ebp movl (%esi),%edx adcl $0,%ebx movl %ecx,16(%edi) movl 20(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 16(%esi),%eax adcl $0,%ecx movl 4(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 12(%esi),%eax adcl $0,%ecx movl 8(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 24(%esi),%eax adcl $0,%ecx movl %ebp,20(%edi) movl (%esi),%edx xorl %ebp,%ebp mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 20(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 16(%esi),%eax adcl $0,%ebp movl 8(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 12(%esi),%eax adcl $0,%ebp mull %eax addl %eax,%ebx adcl %edx,%ecx movl (%esi),%edx adcl $0,%ebp movl %ebx,24(%edi) movl 28(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 24(%esi),%eax adcl $0,%ebx movl 4(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 20(%esi),%eax adcl $0,%ebx movl 8(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 16(%esi),%eax adcl $0,%ebx movl 12(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 28(%esi),%eax adcl $0,%ebx movl %ecx,28(%edi) movl 4(%esi),%edx xorl %ecx,%ecx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 24(%esi),%eax adcl $0,%ecx movl 8(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 20(%esi),%eax adcl $0,%ecx movl 12(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 16(%esi),%eax adcl $0,%ecx mull %eax addl %eax,%ebp adcl %edx,%ebx movl 8(%esi),%edx adcl $0,%ecx movl %ebp,32(%edi) movl 28(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 24(%esi),%eax adcl $0,%ebp movl 12(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 20(%esi),%eax adcl $0,%ebp movl 16(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 28(%esi),%eax adcl $0,%ebp movl %ebx,36(%edi) movl 12(%esi),%edx xorl %ebx,%ebx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 24(%esi),%eax adcl $0,%ebx movl 16(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 20(%esi),%eax adcl $0,%ebx mull %eax addl %eax,%ecx adcl %edx,%ebp movl 16(%esi),%edx adcl $0,%ebx movl %ecx,40(%edi) movl 28(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 24(%esi),%eax adcl $0,%ecx movl 20(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 28(%esi),%eax adcl $0,%ecx movl %ebp,44(%edi) movl 20(%esi),%edx xorl %ebp,%ebp mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 24(%esi),%eax adcl $0,%ebp mull %eax addl %eax,%ebx adcl %edx,%ecx movl 24(%esi),%edx adcl $0,%ebp movl %ebx,48(%edi) movl 28(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 28(%esi),%eax adcl $0,%ebx movl %ecx,52(%edi) xorl %ecx,%ecx mull %eax addl %eax,%ebp adcl %edx,%ebx adcl $0,%ecx movl %ebp,56(%edi) movl %ebx,60(%edi) popl %ebx popl %ebp popl %edi popl %esi ret .size bn_sqr_comba8,.-.L_bn_sqr_comba8_begin .globl bn_sqr_comba4 .hidden bn_sqr_comba4 .type bn_sqr_comba4,@function .align 16 bn_sqr_comba4: .L_bn_sqr_comba4_begin: pushl %esi pushl %edi pushl %ebp pushl %ebx movl 20(%esp),%edi movl 24(%esp),%esi xorl %ebx,%ebx xorl %ecx,%ecx movl (%esi),%eax xorl %ebp,%ebp mull %eax addl %eax,%ebx adcl %edx,%ecx movl (%esi),%edx adcl $0,%ebp movl %ebx,(%edi) movl 4(%esi),%eax xorl %ebx,%ebx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx movl %ecx,4(%edi) movl (%esi),%edx xorl %ecx,%ecx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 4(%esi),%eax adcl $0,%ecx mull %eax addl %eax,%ebp adcl %edx,%ebx movl (%esi),%edx adcl $0,%ecx movl %ebp,8(%edi) movl 12(%esi),%eax xorl %ebp,%ebp mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 8(%esi),%eax adcl $0,%ebp movl 4(%esi),%edx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebp addl %eax,%ebx adcl %edx,%ecx movl 12(%esi),%eax adcl $0,%ebp movl %ebx,12(%edi) movl 4(%esi),%edx xorl %ebx,%ebx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ebx addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%eax adcl $0,%ebx mull %eax addl %eax,%ecx adcl %edx,%ebp movl 8(%esi),%edx adcl $0,%ebx movl %ecx,16(%edi) movl 12(%esi),%eax xorl %ecx,%ecx mull %edx addl %eax,%eax adcl %edx,%edx adcl $0,%ecx addl %eax,%ebp adcl %edx,%ebx movl 12(%esi),%eax adcl $0,%ecx movl %ebp,20(%edi) xorl %ebp,%ebp mull %eax addl %eax,%ebx adcl %edx,%ecx adcl $0,%ebp movl %ebx,24(%edi) movl %ecx,28(%edi) popl %ebx popl %ebp popl %edi popl %esi ret .size bn_sqr_comba4,.-.L_bn_sqr_comba4_begin #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
wlsfx/bnbb
11,526
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86/crypto/fipsmodule/md5-586.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl md5_block_asm_data_order .hidden md5_block_asm_data_order .type md5_block_asm_data_order,@function .align 16 md5_block_asm_data_order: .L_md5_block_asm_data_order_begin: pushl %esi pushl %edi movl 12(%esp),%edi movl 16(%esp),%esi movl 20(%esp),%ecx pushl %ebp shll $6,%ecx pushl %ebx addl %esi,%ecx subl $64,%ecx movl (%edi),%eax pushl %ecx movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx .L000start: movl %ecx,%edi movl (%esi),%ebp xorl %edx,%edi andl %ebx,%edi leal 3614090360(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 4(%esi),%ebp addl %ebx,%eax xorl %ecx,%edi andl %eax,%edi leal 3905402710(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 8(%esi),%ebp addl %eax,%edx xorl %ebx,%edi andl %edx,%edi leal 606105819(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 12(%esi),%ebp addl %edx,%ecx xorl %eax,%edi andl %ecx,%edi leal 3250441966(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 16(%esi),%ebp addl %ecx,%ebx xorl %edx,%edi andl %ebx,%edi leal 4118548399(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 20(%esi),%ebp addl %ebx,%eax xorl %ecx,%edi andl %eax,%edi leal 1200080426(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 24(%esi),%ebp addl %eax,%edx xorl %ebx,%edi andl %edx,%edi leal 2821735955(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 28(%esi),%ebp addl %edx,%ecx xorl %eax,%edi andl %ecx,%edi leal 4249261313(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 32(%esi),%ebp addl %ecx,%ebx xorl %edx,%edi andl %ebx,%edi leal 1770035416(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 36(%esi),%ebp addl %ebx,%eax xorl %ecx,%edi andl %eax,%edi leal 2336552879(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 40(%esi),%ebp addl %eax,%edx xorl %ebx,%edi andl %edx,%edi leal 4294925233(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 44(%esi),%ebp addl %edx,%ecx xorl %eax,%edi andl %ecx,%edi leal 2304563134(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 48(%esi),%ebp addl %ecx,%ebx xorl %edx,%edi andl %ebx,%edi leal 1804603682(%eax,%ebp,1),%eax xorl %edx,%edi addl %edi,%eax movl %ebx,%edi roll $7,%eax movl 52(%esi),%ebp addl %ebx,%eax xorl %ecx,%edi andl %eax,%edi leal 4254626195(%edx,%ebp,1),%edx xorl %ecx,%edi addl %edi,%edx movl %eax,%edi roll $12,%edx movl 56(%esi),%ebp addl %eax,%edx xorl %ebx,%edi andl %edx,%edi leal 2792965006(%ecx,%ebp,1),%ecx xorl %ebx,%edi addl %edi,%ecx movl %edx,%edi roll $17,%ecx movl 60(%esi),%ebp addl %edx,%ecx xorl %eax,%edi andl %ecx,%edi leal 1236535329(%ebx,%ebp,1),%ebx xorl %eax,%edi addl %edi,%ebx movl %ecx,%edi roll $22,%ebx movl 4(%esi),%ebp addl %ecx,%ebx leal 4129170786(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 24(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax leal 3225465664(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 44(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx leal 643717713(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl (%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx leal 3921069994(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 20(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx leal 3593408605(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 40(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax leal 38016083(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 60(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx leal 3634488961(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl 16(%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx leal 3889429448(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 36(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx leal 568446438(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 56(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax leal 3275163606(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 12(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx leal 4107603335(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl 32(%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx leal 1163531501(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 52(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx leal 2850285829(%eax,%ebp,1),%eax xorl %ebx,%edi andl %edx,%edi movl 8(%esi),%ebp xorl %ecx,%edi addl %edi,%eax movl %ebx,%edi roll $5,%eax addl %ebx,%eax leal 4243563512(%edx,%ebp,1),%edx xorl %eax,%edi andl %ecx,%edi movl 28(%esi),%ebp xorl %ebx,%edi addl %edi,%edx movl %eax,%edi roll $9,%edx addl %eax,%edx leal 1735328473(%ecx,%ebp,1),%ecx xorl %edx,%edi andl %ebx,%edi movl 48(%esi),%ebp xorl %eax,%edi addl %edi,%ecx movl %edx,%edi roll $14,%ecx addl %edx,%ecx leal 2368359562(%ebx,%ebp,1),%ebx xorl %ecx,%edi andl %eax,%edi movl 20(%esi),%ebp xorl %edx,%edi addl %edi,%ebx movl %ecx,%edi roll $20,%ebx addl %ecx,%ebx xorl %edx,%edi xorl %ebx,%edi leal 4294588738(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl 32(%esi),%ebp movl %ebx,%edi leal 2272392833(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 44(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx xorl %ebx,%edi xorl %edx,%edi leal 1839030562(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 56(%esi),%ebp movl %edx,%edi leal 4259657740(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl 4(%esi),%ebp addl %edi,%ebx movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx xorl %edx,%edi xorl %ebx,%edi leal 2763975236(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl 16(%esi),%ebp movl %ebx,%edi leal 1272893353(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 28(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx xorl %ebx,%edi xorl %edx,%edi leal 4139469664(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 40(%esi),%ebp movl %edx,%edi leal 3200236656(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl 52(%esi),%ebp addl %edi,%ebx movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx xorl %edx,%edi xorl %ebx,%edi leal 681279174(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl (%esi),%ebp movl %ebx,%edi leal 3936430074(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 12(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx xorl %ebx,%edi xorl %edx,%edi leal 3572445317(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 24(%esi),%ebp movl %edx,%edi leal 76029189(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl 36(%esi),%ebp addl %edi,%ebx movl %ecx,%edi roll $23,%ebx addl %ecx,%ebx xorl %edx,%edi xorl %ebx,%edi leal 3654602809(%eax,%ebp,1),%eax addl %edi,%eax roll $4,%eax movl 48(%esi),%ebp movl %ebx,%edi leal 3873151461(%edx,%ebp,1),%edx addl %ebx,%eax xorl %ecx,%edi xorl %eax,%edi movl 60(%esi),%ebp addl %edi,%edx movl %eax,%edi roll $11,%edx addl %eax,%edx xorl %ebx,%edi xorl %edx,%edi leal 530742520(%ecx,%ebp,1),%ecx addl %edi,%ecx roll $16,%ecx movl 8(%esi),%ebp movl %edx,%edi leal 3299628645(%ebx,%ebp,1),%ebx addl %edx,%ecx xorl %eax,%edi xorl %ecx,%edi movl (%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $23,%ebx addl %ecx,%ebx xorl %edx,%edi orl %ebx,%edi leal 4096336452(%eax,%ebp,1),%eax xorl %ecx,%edi movl 28(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax orl %eax,%edi leal 1126891415(%edx,%ebp,1),%edx xorl %ebx,%edi movl 56(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx orl %edx,%edi leal 2878612391(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 20(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx orl %ecx,%edi leal 4237533241(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 48(%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx orl %ebx,%edi leal 1700485571(%eax,%ebp,1),%eax xorl %ecx,%edi movl 12(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax orl %eax,%edi leal 2399980690(%edx,%ebp,1),%edx xorl %ebx,%edi movl 40(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx orl %edx,%edi leal 4293915773(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 4(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx orl %ecx,%edi leal 2240044497(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 32(%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx orl %ebx,%edi leal 1873313359(%eax,%ebp,1),%eax xorl %ecx,%edi movl 60(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax orl %eax,%edi leal 4264355552(%edx,%ebp,1),%edx xorl %ebx,%edi movl 24(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx orl %edx,%edi leal 2734768916(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 52(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx orl %ecx,%edi leal 1309151649(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 16(%esi),%ebp addl %edi,%ebx movl $-1,%edi roll $21,%ebx xorl %edx,%edi addl %ecx,%ebx orl %ebx,%edi leal 4149444226(%eax,%ebp,1),%eax xorl %ecx,%edi movl 44(%esi),%ebp addl %edi,%eax movl $-1,%edi roll $6,%eax xorl %ecx,%edi addl %ebx,%eax orl %eax,%edi leal 3174756917(%edx,%ebp,1),%edx xorl %ebx,%edi movl 8(%esi),%ebp addl %edi,%edx movl $-1,%edi roll $10,%edx xorl %ebx,%edi addl %eax,%edx orl %edx,%edi leal 718787259(%ecx,%ebp,1),%ecx xorl %eax,%edi movl 36(%esi),%ebp addl %edi,%ecx movl $-1,%edi roll $15,%ecx xorl %eax,%edi addl %edx,%ecx orl %ecx,%edi leal 3951481745(%ebx,%ebp,1),%ebx xorl %edx,%edi movl 24(%esp),%ebp addl %edi,%ebx addl $64,%esi roll $21,%ebx movl (%ebp),%edi addl %ecx,%ebx addl %edi,%eax movl 4(%ebp),%edi addl %edi,%ebx movl 8(%ebp),%edi addl %edi,%ecx movl 12(%ebp),%edi addl %edi,%edx movl %eax,(%ebp) movl %ebx,4(%ebp) movl (%esp),%edi movl %ecx,8(%ebp) movl %edx,12(%ebp) cmpl %esi,%edi jae .L000start popl %eax popl %ebx popl %ebp popl %edi popl %esi ret .size md5_block_asm_data_order,.-.L_md5_block_asm_data_order_begin #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
wlsfx/bnbb
51,315
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86/crypto/fipsmodule/aesni-x86.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text #ifdef BORINGSSL_DISPATCH_TEST #endif .globl aes_hw_encrypt .hidden aes_hw_encrypt .type aes_hw_encrypt,@function .align 16 aes_hw_encrypt: .L_aes_hw_encrypt_begin: #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L000pic .L000pic: popl %ebx leal BORINGSSL_function_hit+1-.L000pic(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 4(%esp),%eax movl 12(%esp),%edx movups (%eax),%xmm2 movl 240(%edx),%ecx movl 8(%esp),%eax movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L001enc1_loop_1: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L001enc1_loop_1 .byte 102,15,56,221,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movups %xmm2,(%eax) pxor %xmm2,%xmm2 ret .size aes_hw_encrypt,.-.L_aes_hw_encrypt_begin .globl aes_hw_decrypt .hidden aes_hw_decrypt .type aes_hw_decrypt,@function .align 16 aes_hw_decrypt: .L_aes_hw_decrypt_begin: movl 4(%esp),%eax movl 12(%esp),%edx movups (%eax),%xmm2 movl 240(%edx),%ecx movl 8(%esp),%eax movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L002dec1_loop_2: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L002dec1_loop_2 .byte 102,15,56,223,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movups %xmm2,(%eax) pxor %xmm2,%xmm2 ret .size aes_hw_decrypt,.-.L_aes_hw_decrypt_begin .hidden _aesni_encrypt2 .type _aesni_encrypt2,@function .align 16 _aesni_encrypt2: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx .L003enc2_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%edx,%ecx,1),%xmm0 jnz .L003enc2_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,221,208 .byte 102,15,56,221,216 ret .size _aesni_encrypt2,.-_aesni_encrypt2 .hidden _aesni_decrypt2 .type _aesni_decrypt2,@function .align 16 _aesni_decrypt2: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx .L004dec2_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 movups -16(%edx,%ecx,1),%xmm0 jnz .L004dec2_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,223,208 .byte 102,15,56,223,216 ret .size _aesni_decrypt2,.-_aesni_decrypt2 .hidden _aesni_encrypt3 .type _aesni_encrypt3,@function .align 16 _aesni_encrypt3: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx .L005enc3_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 movups -16(%edx,%ecx,1),%xmm0 jnz .L005enc3_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 ret .size _aesni_encrypt3,.-_aesni_encrypt3 .hidden _aesni_decrypt3 .type _aesni_decrypt3,@function .align 16 _aesni_decrypt3: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx .L006dec3_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 movups -16(%edx,%ecx,1),%xmm0 jnz .L006dec3_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 ret .size _aesni_decrypt3,.-_aesni_decrypt3 .hidden _aesni_encrypt4 .type _aesni_encrypt4,@function .align 16 _aesni_encrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 shll $4,%ecx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 15,31,64,0 addl $16,%ecx .L007enc4_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movups -16(%edx,%ecx,1),%xmm0 jnz .L007enc4_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 ret .size _aesni_encrypt4,.-_aesni_encrypt4 .hidden _aesni_decrypt4 .type _aesni_decrypt4,@function .align 16 _aesni_decrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 shll $4,%ecx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 15,31,64,0 addl $16,%ecx .L008dec4_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 movups -16(%edx,%ecx,1),%xmm0 jnz .L008dec4_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 .byte 102,15,56,223,232 ret .size _aesni_decrypt4,.-_aesni_decrypt4 .hidden _aesni_encrypt6 .type _aesni_encrypt6,@function .align 16 _aesni_encrypt6: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,220,209 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,220,217 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 102,15,56,220,225 pxor %xmm0,%xmm7 movups (%edx,%ecx,1),%xmm0 addl $16,%ecx jmp .L009_aesni_encrypt6_inner .align 16 .L010enc6_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .L009_aesni_encrypt6_inner: .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .L_aesni_encrypt6_enter: movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 movups -16(%edx,%ecx,1),%xmm0 jnz .L010enc6_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 .byte 102,15,56,221,240 .byte 102,15,56,221,248 ret .size _aesni_encrypt6,.-_aesni_encrypt6 .hidden _aesni_decrypt6 .type _aesni_decrypt6,@function .align 16 _aesni_decrypt6: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,222,209 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,222,217 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 102,15,56,222,225 pxor %xmm0,%xmm7 movups (%edx,%ecx,1),%xmm0 addl $16,%ecx jmp .L011_aesni_decrypt6_inner .align 16 .L012dec6_loop: .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .L011_aesni_decrypt6_inner: .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .L_aesni_decrypt6_enter: movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,222,208 .byte 102,15,56,222,216 .byte 102,15,56,222,224 .byte 102,15,56,222,232 .byte 102,15,56,222,240 .byte 102,15,56,222,248 movups -16(%edx,%ecx,1),%xmm0 jnz .L012dec6_loop .byte 102,15,56,222,209 .byte 102,15,56,222,217 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 .byte 102,15,56,223,208 .byte 102,15,56,223,216 .byte 102,15,56,223,224 .byte 102,15,56,223,232 .byte 102,15,56,223,240 .byte 102,15,56,223,248 ret .size _aesni_decrypt6,.-_aesni_decrypt6 .globl aes_hw_ecb_encrypt .hidden aes_hw_ecb_encrypt .type aes_hw_ecb_encrypt,@function .align 16 aes_hw_ecb_encrypt: .L_aes_hw_ecb_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx andl $-16,%eax jz .L013ecb_ret movl 240(%edx),%ecx testl %ebx,%ebx jz .L014ecb_decrypt movl %edx,%ebp movl %ecx,%ebx cmpl $96,%eax jb .L015ecb_enc_tail movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 movdqu 48(%esi),%xmm5 movdqu 64(%esi),%xmm6 movdqu 80(%esi),%xmm7 leal 96(%esi),%esi subl $96,%eax jmp .L016ecb_enc_loop6_enter .align 16 .L017ecb_enc_loop6: movups %xmm2,(%edi) movdqu (%esi),%xmm2 movups %xmm3,16(%edi) movdqu 16(%esi),%xmm3 movups %xmm4,32(%edi) movdqu 32(%esi),%xmm4 movups %xmm5,48(%edi) movdqu 48(%esi),%xmm5 movups %xmm6,64(%edi) movdqu 64(%esi),%xmm6 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqu 80(%esi),%xmm7 leal 96(%esi),%esi .L016ecb_enc_loop6_enter: call _aesni_encrypt6 movl %ebp,%edx movl %ebx,%ecx subl $96,%eax jnc .L017ecb_enc_loop6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) movups %xmm7,80(%edi) leal 96(%edi),%edi addl $96,%eax jz .L013ecb_ret .L015ecb_enc_tail: movups (%esi),%xmm2 cmpl $32,%eax jb .L018ecb_enc_one movups 16(%esi),%xmm3 je .L019ecb_enc_two movups 32(%esi),%xmm4 cmpl $64,%eax jb .L020ecb_enc_three movups 48(%esi),%xmm5 je .L021ecb_enc_four movups 64(%esi),%xmm6 xorps %xmm7,%xmm7 call _aesni_encrypt6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) jmp .L013ecb_ret .align 16 .L018ecb_enc_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L022enc1_loop_3: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L022enc1_loop_3 .byte 102,15,56,221,209 movups %xmm2,(%edi) jmp .L013ecb_ret .align 16 .L019ecb_enc_two: call _aesni_encrypt2 movups %xmm2,(%edi) movups %xmm3,16(%edi) jmp .L013ecb_ret .align 16 .L020ecb_enc_three: call _aesni_encrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) jmp .L013ecb_ret .align 16 .L021ecb_enc_four: call _aesni_encrypt4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) jmp .L013ecb_ret .align 16 .L014ecb_decrypt: movl %edx,%ebp movl %ecx,%ebx cmpl $96,%eax jb .L023ecb_dec_tail movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 movdqu 48(%esi),%xmm5 movdqu 64(%esi),%xmm6 movdqu 80(%esi),%xmm7 leal 96(%esi),%esi subl $96,%eax jmp .L024ecb_dec_loop6_enter .align 16 .L025ecb_dec_loop6: movups %xmm2,(%edi) movdqu (%esi),%xmm2 movups %xmm3,16(%edi) movdqu 16(%esi),%xmm3 movups %xmm4,32(%edi) movdqu 32(%esi),%xmm4 movups %xmm5,48(%edi) movdqu 48(%esi),%xmm5 movups %xmm6,64(%edi) movdqu 64(%esi),%xmm6 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqu 80(%esi),%xmm7 leal 96(%esi),%esi .L024ecb_dec_loop6_enter: call _aesni_decrypt6 movl %ebp,%edx movl %ebx,%ecx subl $96,%eax jnc .L025ecb_dec_loop6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) movups %xmm7,80(%edi) leal 96(%edi),%edi addl $96,%eax jz .L013ecb_ret .L023ecb_dec_tail: movups (%esi),%xmm2 cmpl $32,%eax jb .L026ecb_dec_one movups 16(%esi),%xmm3 je .L027ecb_dec_two movups 32(%esi),%xmm4 cmpl $64,%eax jb .L028ecb_dec_three movups 48(%esi),%xmm5 je .L029ecb_dec_four movups 64(%esi),%xmm6 xorps %xmm7,%xmm7 call _aesni_decrypt6 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) jmp .L013ecb_ret .align 16 .L026ecb_dec_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L030dec1_loop_4: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L030dec1_loop_4 .byte 102,15,56,223,209 movups %xmm2,(%edi) jmp .L013ecb_ret .align 16 .L027ecb_dec_two: call _aesni_decrypt2 movups %xmm2,(%edi) movups %xmm3,16(%edi) jmp .L013ecb_ret .align 16 .L028ecb_dec_three: call _aesni_decrypt3 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) jmp .L013ecb_ret .align 16 .L029ecb_dec_four: call _aesni_decrypt4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) .L013ecb_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_ecb_encrypt,.-.L_aes_hw_ecb_encrypt_begin .globl aes_hw_ccm64_encrypt_blocks .hidden aes_hw_ccm64_encrypt_blocks .type aes_hw_ccm64_encrypt_blocks,@function .align 16 aes_hw_ccm64_encrypt_blocks: .L_aes_hw_ccm64_encrypt_blocks_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx movl 40(%esp),%ecx movl %esp,%ebp subl $60,%esp andl $-16,%esp movl %ebp,48(%esp) movdqu (%ebx),%xmm7 movdqu (%ecx),%xmm3 movl 240(%edx),%ecx movl $202182159,(%esp) movl $134810123,4(%esp) movl $67438087,8(%esp) movl $66051,12(%esp) movl $1,%ebx xorl %ebp,%ebp movl %ebx,16(%esp) movl %ebp,20(%esp) movl %ebp,24(%esp) movl %ebp,28(%esp) shll $4,%ecx movl $16,%ebx leal (%edx),%ebp movdqa (%esp),%xmm5 movdqa %xmm7,%xmm2 leal 32(%edx,%ecx,1),%edx subl %ecx,%ebx .byte 102,15,56,0,253 .L031ccm64_enc_outer: movups (%ebp),%xmm0 movl %ebx,%ecx movups (%esi),%xmm6 xorps %xmm0,%xmm2 movups 16(%ebp),%xmm1 xorps %xmm6,%xmm0 xorps %xmm0,%xmm3 movups 32(%ebp),%xmm0 .L032ccm64_enc2_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%edx,%ecx,1),%xmm0 jnz .L032ccm64_enc2_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 paddq 16(%esp),%xmm7 decl %eax .byte 102,15,56,221,208 .byte 102,15,56,221,216 leal 16(%esi),%esi xorps %xmm2,%xmm6 movdqa %xmm7,%xmm2 movups %xmm6,(%edi) .byte 102,15,56,0,213 leal 16(%edi),%edi jnz .L031ccm64_enc_outer movl 48(%esp),%esp movl 40(%esp),%edi movups %xmm3,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_ccm64_encrypt_blocks,.-.L_aes_hw_ccm64_encrypt_blocks_begin .globl aes_hw_ccm64_decrypt_blocks .hidden aes_hw_ccm64_decrypt_blocks .type aes_hw_ccm64_decrypt_blocks,@function .align 16 aes_hw_ccm64_decrypt_blocks: .L_aes_hw_ccm64_decrypt_blocks_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx movl 40(%esp),%ecx movl %esp,%ebp subl $60,%esp andl $-16,%esp movl %ebp,48(%esp) movdqu (%ebx),%xmm7 movdqu (%ecx),%xmm3 movl 240(%edx),%ecx movl $202182159,(%esp) movl $134810123,4(%esp) movl $67438087,8(%esp) movl $66051,12(%esp) movl $1,%ebx xorl %ebp,%ebp movl %ebx,16(%esp) movl %ebp,20(%esp) movl %ebp,24(%esp) movl %ebp,28(%esp) movdqa (%esp),%xmm5 movdqa %xmm7,%xmm2 movl %edx,%ebp movl %ecx,%ebx .byte 102,15,56,0,253 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L033enc1_loop_5: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L033enc1_loop_5 .byte 102,15,56,221,209 shll $4,%ebx movl $16,%ecx movups (%esi),%xmm6 paddq 16(%esp),%xmm7 leal 16(%esi),%esi subl %ebx,%ecx leal 32(%ebp,%ebx,1),%edx movl %ecx,%ebx jmp .L034ccm64_dec_outer .align 16 .L034ccm64_dec_outer: xorps %xmm2,%xmm6 movdqa %xmm7,%xmm2 movups %xmm6,(%edi) leal 16(%edi),%edi .byte 102,15,56,0,213 subl $1,%eax jz .L035ccm64_dec_break movups (%ebp),%xmm0 movl %ebx,%ecx movups 16(%ebp),%xmm1 xorps %xmm0,%xmm6 xorps %xmm0,%xmm2 xorps %xmm6,%xmm3 movups 32(%ebp),%xmm0 .L036ccm64_dec2_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%edx,%ecx,1),%xmm0 jnz .L036ccm64_dec2_loop movups (%esi),%xmm6 paddq 16(%esp),%xmm7 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,221,208 .byte 102,15,56,221,216 leal 16(%esi),%esi jmp .L034ccm64_dec_outer .align 16 .L035ccm64_dec_break: movl 240(%ebp),%ecx movl %ebp,%edx movups (%edx),%xmm0 movups 16(%edx),%xmm1 xorps %xmm0,%xmm6 leal 32(%edx),%edx xorps %xmm6,%xmm3 .L037enc1_loop_6: .byte 102,15,56,220,217 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L037enc1_loop_6 .byte 102,15,56,221,217 movl 48(%esp),%esp movl 40(%esp),%edi movups %xmm3,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_ccm64_decrypt_blocks,.-.L_aes_hw_ccm64_decrypt_blocks_begin .globl aes_hw_ctr32_encrypt_blocks .hidden aes_hw_ctr32_encrypt_blocks .type aes_hw_ctr32_encrypt_blocks,@function .align 16 aes_hw_ctr32_encrypt_blocks: .L_aes_hw_ctr32_encrypt_blocks_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L038pic .L038pic: popl %ebx leal BORINGSSL_function_hit+0-.L038pic(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx movl %esp,%ebp subl $88,%esp andl $-16,%esp movl %ebp,80(%esp) cmpl $1,%eax jb .L039ctr32_ret je .L040ctr32_one_shortcut movdqu (%ebx),%xmm7 movl $202182159,(%esp) movl $134810123,4(%esp) movl $67438087,8(%esp) movl $66051,12(%esp) movl $6,%ecx xorl %ebp,%ebp movl %ecx,16(%esp) movl %ecx,20(%esp) movl %ecx,24(%esp) movl %ebp,28(%esp) .byte 102,15,58,22,251,3 .byte 102,15,58,34,253,3 movl 240(%edx),%ecx bswap %ebx pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movdqa (%esp),%xmm2 .byte 102,15,58,34,195,0 leal 3(%ebx),%ebp .byte 102,15,58,34,205,0 incl %ebx .byte 102,15,58,34,195,1 incl %ebp .byte 102,15,58,34,205,1 incl %ebx .byte 102,15,58,34,195,2 incl %ebp .byte 102,15,58,34,205,2 movdqa %xmm0,48(%esp) .byte 102,15,56,0,194 movdqu (%edx),%xmm6 movdqa %xmm1,64(%esp) .byte 102,15,56,0,202 pshufd $192,%xmm0,%xmm2 pshufd $128,%xmm0,%xmm3 cmpl $6,%eax jb .L041ctr32_tail pxor %xmm6,%xmm7 shll $4,%ecx movl $16,%ebx movdqa %xmm7,32(%esp) movl %edx,%ebp subl %ecx,%ebx leal 32(%edx,%ecx,1),%edx subl $6,%eax jmp .L042ctr32_loop6 .align 16 .L042ctr32_loop6: pshufd $64,%xmm0,%xmm4 movdqa 32(%esp),%xmm0 pshufd $192,%xmm1,%xmm5 pxor %xmm0,%xmm2 pshufd $128,%xmm1,%xmm6 pxor %xmm0,%xmm3 pshufd $64,%xmm1,%xmm7 movups 16(%ebp),%xmm1 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 .byte 102,15,56,220,209 pxor %xmm0,%xmm6 pxor %xmm0,%xmm7 .byte 102,15,56,220,217 movups 32(%ebp),%xmm0 movl %ebx,%ecx .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 call .L_aesni_encrypt6_enter movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps %xmm1,%xmm2 movups 32(%esi),%xmm1 xorps %xmm0,%xmm3 movups %xmm2,(%edi) movdqa 16(%esp),%xmm0 xorps %xmm1,%xmm4 movdqa 64(%esp),%xmm1 movups %xmm3,16(%edi) movups %xmm4,32(%edi) paddd %xmm0,%xmm1 paddd 48(%esp),%xmm0 movdqa (%esp),%xmm2 movups 48(%esi),%xmm3 movups 64(%esi),%xmm4 xorps %xmm3,%xmm5 movups 80(%esi),%xmm3 leal 96(%esi),%esi movdqa %xmm0,48(%esp) .byte 102,15,56,0,194 xorps %xmm4,%xmm6 movups %xmm5,48(%edi) xorps %xmm3,%xmm7 movdqa %xmm1,64(%esp) .byte 102,15,56,0,202 movups %xmm6,64(%edi) pshufd $192,%xmm0,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi pshufd $128,%xmm0,%xmm3 subl $6,%eax jnc .L042ctr32_loop6 addl $6,%eax jz .L039ctr32_ret movdqu (%ebp),%xmm7 movl %ebp,%edx pxor 32(%esp),%xmm7 movl 240(%ebp),%ecx .L041ctr32_tail: por %xmm7,%xmm2 cmpl $2,%eax jb .L043ctr32_one pshufd $64,%xmm0,%xmm4 por %xmm7,%xmm3 je .L044ctr32_two pshufd $192,%xmm1,%xmm5 por %xmm7,%xmm4 cmpl $4,%eax jb .L045ctr32_three pshufd $128,%xmm1,%xmm6 por %xmm7,%xmm5 je .L046ctr32_four por %xmm7,%xmm6 call _aesni_encrypt6 movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps %xmm1,%xmm2 movups 32(%esi),%xmm1 xorps %xmm0,%xmm3 movups 48(%esi),%xmm0 xorps %xmm1,%xmm4 movups 64(%esi),%xmm1 xorps %xmm0,%xmm5 movups %xmm2,(%edi) xorps %xmm1,%xmm6 movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) jmp .L039ctr32_ret .align 16 .L040ctr32_one_shortcut: movups (%ebx),%xmm2 movl 240(%edx),%ecx .L043ctr32_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L047enc1_loop_7: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L047enc1_loop_7 .byte 102,15,56,221,209 movups (%esi),%xmm6 xorps %xmm2,%xmm6 movups %xmm6,(%edi) jmp .L039ctr32_ret .align 16 .L044ctr32_two: call _aesni_encrypt2 movups (%esi),%xmm5 movups 16(%esi),%xmm6 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) jmp .L039ctr32_ret .align 16 .L045ctr32_three: call _aesni_encrypt3 movups (%esi),%xmm5 movups 16(%esi),%xmm6 xorps %xmm5,%xmm2 movups 32(%esi),%xmm7 xorps %xmm6,%xmm3 movups %xmm2,(%edi) xorps %xmm7,%xmm4 movups %xmm3,16(%edi) movups %xmm4,32(%edi) jmp .L039ctr32_ret .align 16 .L046ctr32_four: call _aesni_encrypt4 movups (%esi),%xmm6 movups 16(%esi),%xmm7 movups 32(%esi),%xmm1 xorps %xmm6,%xmm2 movups 48(%esi),%xmm0 xorps %xmm7,%xmm3 movups %xmm2,(%edi) xorps %xmm1,%xmm4 movups %xmm3,16(%edi) xorps %xmm0,%xmm5 movups %xmm4,32(%edi) movups %xmm5,48(%edi) .L039ctr32_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 movdqa %xmm0,32(%esp) pxor %xmm5,%xmm5 movdqa %xmm0,48(%esp) pxor %xmm6,%xmm6 movdqa %xmm0,64(%esp) pxor %xmm7,%xmm7 movl 80(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_ctr32_encrypt_blocks,.-.L_aes_hw_ctr32_encrypt_blocks_begin .globl aes_hw_xts_encrypt .hidden aes_hw_xts_encrypt .type aes_hw_xts_encrypt,@function .align 16 aes_hw_xts_encrypt: .L_aes_hw_xts_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 36(%esp),%edx movl 40(%esp),%esi movl 240(%edx),%ecx movups (%esi),%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L048enc1_loop_8: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L048enc1_loop_8 .byte 102,15,56,221,209 movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl %esp,%ebp subl $120,%esp movl 240(%edx),%ecx andl $-16,%esp movl $135,96(%esp) movl $0,100(%esp) movl $1,104(%esp) movl $0,108(%esp) movl %eax,112(%esp) movl %ebp,116(%esp) movdqa %xmm2,%xmm1 pxor %xmm0,%xmm0 movdqa 96(%esp),%xmm3 pcmpgtd %xmm1,%xmm0 andl $-16,%eax movl %edx,%ebp movl %ecx,%ebx subl $96,%eax jc .L049xts_enc_short shll $4,%ecx movl $16,%ebx subl %ecx,%ebx leal 32(%edx,%ecx,1),%edx jmp .L050xts_enc_loop6 .align 16 .L050xts_enc_loop6: pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,16(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,32(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm7 movdqa %xmm1,64(%esp) paddq %xmm1,%xmm1 movups (%ebp),%xmm0 pand %xmm3,%xmm7 movups (%esi),%xmm2 pxor %xmm1,%xmm7 movl %ebx,%ecx movdqu 16(%esi),%xmm3 xorps %xmm0,%xmm2 movdqu 32(%esi),%xmm4 pxor %xmm0,%xmm3 movdqu 48(%esi),%xmm5 pxor %xmm0,%xmm4 movdqu 64(%esi),%xmm6 pxor %xmm0,%xmm5 movdqu 80(%esi),%xmm1 pxor %xmm0,%xmm6 leal 96(%esi),%esi pxor (%esp),%xmm2 movdqa %xmm7,80(%esp) pxor %xmm1,%xmm7 movups 16(%ebp),%xmm1 pxor 16(%esp),%xmm3 pxor 32(%esp),%xmm4 .byte 102,15,56,220,209 pxor 48(%esp),%xmm5 pxor 64(%esp),%xmm6 .byte 102,15,56,220,217 pxor %xmm0,%xmm7 movups 32(%ebp),%xmm0 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 call .L_aesni_encrypt6_enter movdqa 80(%esp),%xmm1 pxor %xmm0,%xmm0 xorps (%esp),%xmm2 pcmpgtd %xmm1,%xmm0 xorps 16(%esp),%xmm3 movups %xmm2,(%edi) xorps 32(%esp),%xmm4 movups %xmm3,16(%edi) xorps 48(%esp),%xmm5 movups %xmm4,32(%edi) xorps 64(%esp),%xmm6 movups %xmm5,48(%edi) xorps %xmm1,%xmm7 movups %xmm6,64(%edi) pshufd $19,%xmm0,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqa 96(%esp),%xmm3 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 subl $96,%eax jnc .L050xts_enc_loop6 movl 240(%ebp),%ecx movl %ebp,%edx movl %ecx,%ebx .L049xts_enc_short: addl $96,%eax jz .L051xts_enc_done6x movdqa %xmm1,%xmm5 cmpl $32,%eax jb .L052xts_enc_one pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 je .L053xts_enc_two pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 cmpl $64,%eax jb .L054xts_enc_three pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm7 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 movdqa %xmm5,(%esp) movdqa %xmm6,16(%esp) je .L055xts_enc_four movdqa %xmm7,32(%esp) pshufd $19,%xmm0,%xmm7 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm7 pxor %xmm1,%xmm7 movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 pxor (%esp),%xmm2 movdqu 48(%esi),%xmm5 pxor 16(%esp),%xmm3 movdqu 64(%esi),%xmm6 pxor 32(%esp),%xmm4 leal 80(%esi),%esi pxor 48(%esp),%xmm5 movdqa %xmm7,64(%esp) pxor %xmm7,%xmm6 call _aesni_encrypt6 movaps 64(%esp),%xmm1 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps 32(%esp),%xmm4 movups %xmm2,(%edi) xorps 48(%esp),%xmm5 movups %xmm3,16(%edi) xorps %xmm1,%xmm6 movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) leal 80(%edi),%edi jmp .L056xts_enc_done .align 16 .L052xts_enc_one: movups (%esi),%xmm2 leal 16(%esi),%esi xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L057enc1_loop_9: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L057enc1_loop_9 .byte 102,15,56,221,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) leal 16(%edi),%edi movdqa %xmm5,%xmm1 jmp .L056xts_enc_done .align 16 .L053xts_enc_two: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 leal 32(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 call _aesni_encrypt2 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 32(%edi),%edi movdqa %xmm6,%xmm1 jmp .L056xts_enc_done .align 16 .L054xts_enc_three: movaps %xmm1,%xmm7 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 leal 48(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 call _aesni_encrypt3 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) leal 48(%edi),%edi movdqa %xmm7,%xmm1 jmp .L056xts_enc_done .align 16 .L055xts_enc_four: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 xorps (%esp),%xmm2 movups 48(%esi),%xmm5 leal 64(%esi),%esi xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 xorps %xmm6,%xmm5 call _aesni_encrypt4 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) xorps %xmm6,%xmm5 movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) leal 64(%edi),%edi movdqa %xmm6,%xmm1 jmp .L056xts_enc_done .align 16 .L051xts_enc_done6x: movl 112(%esp),%eax andl $15,%eax jz .L058xts_enc_ret movdqa %xmm1,%xmm5 movl %eax,112(%esp) jmp .L059xts_enc_steal .align 16 .L056xts_enc_done: movl 112(%esp),%eax pxor %xmm0,%xmm0 andl $15,%eax jz .L058xts_enc_ret pcmpgtd %xmm1,%xmm0 movl %eax,112(%esp) pshufd $19,%xmm0,%xmm5 paddq %xmm1,%xmm1 pand 96(%esp),%xmm5 pxor %xmm1,%xmm5 .L059xts_enc_steal: movzbl (%esi),%ecx movzbl -16(%edi),%edx leal 1(%esi),%esi movb %cl,-16(%edi) movb %dl,(%edi) leal 1(%edi),%edi subl $1,%eax jnz .L059xts_enc_steal subl 112(%esp),%edi movl %ebp,%edx movl %ebx,%ecx movups -16(%edi),%xmm2 xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L060enc1_loop_10: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L060enc1_loop_10 .byte 102,15,56,221,209 xorps %xmm5,%xmm2 movups %xmm2,-16(%edi) .L058xts_enc_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 movdqa %xmm0,(%esp) pxor %xmm3,%xmm3 movdqa %xmm0,16(%esp) pxor %xmm4,%xmm4 movdqa %xmm0,32(%esp) pxor %xmm5,%xmm5 movdqa %xmm0,48(%esp) pxor %xmm6,%xmm6 movdqa %xmm0,64(%esp) pxor %xmm7,%xmm7 movdqa %xmm0,80(%esp) movl 116(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_xts_encrypt,.-.L_aes_hw_xts_encrypt_begin .globl aes_hw_xts_decrypt .hidden aes_hw_xts_decrypt .type aes_hw_xts_decrypt,@function .align 16 aes_hw_xts_decrypt: .L_aes_hw_xts_decrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 36(%esp),%edx movl 40(%esp),%esi movl 240(%edx),%ecx movups (%esi),%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L061enc1_loop_11: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L061enc1_loop_11 .byte 102,15,56,221,209 movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl %esp,%ebp subl $120,%esp andl $-16,%esp xorl %ebx,%ebx testl $15,%eax setnz %bl shll $4,%ebx subl %ebx,%eax movl $135,96(%esp) movl $0,100(%esp) movl $1,104(%esp) movl $0,108(%esp) movl %eax,112(%esp) movl %ebp,116(%esp) movl 240(%edx),%ecx movl %edx,%ebp movl %ecx,%ebx movdqa %xmm2,%xmm1 pxor %xmm0,%xmm0 movdqa 96(%esp),%xmm3 pcmpgtd %xmm1,%xmm0 andl $-16,%eax subl $96,%eax jc .L062xts_dec_short shll $4,%ecx movl $16,%ebx subl %ecx,%ebx leal 32(%edx,%ecx,1),%edx jmp .L063xts_dec_loop6 .align 16 .L063xts_dec_loop6: pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,16(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,32(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 pshufd $19,%xmm0,%xmm7 movdqa %xmm1,64(%esp) paddq %xmm1,%xmm1 movups (%ebp),%xmm0 pand %xmm3,%xmm7 movups (%esi),%xmm2 pxor %xmm1,%xmm7 movl %ebx,%ecx movdqu 16(%esi),%xmm3 xorps %xmm0,%xmm2 movdqu 32(%esi),%xmm4 pxor %xmm0,%xmm3 movdqu 48(%esi),%xmm5 pxor %xmm0,%xmm4 movdqu 64(%esi),%xmm6 pxor %xmm0,%xmm5 movdqu 80(%esi),%xmm1 pxor %xmm0,%xmm6 leal 96(%esi),%esi pxor (%esp),%xmm2 movdqa %xmm7,80(%esp) pxor %xmm1,%xmm7 movups 16(%ebp),%xmm1 pxor 16(%esp),%xmm3 pxor 32(%esp),%xmm4 .byte 102,15,56,222,209 pxor 48(%esp),%xmm5 pxor 64(%esp),%xmm6 .byte 102,15,56,222,217 pxor %xmm0,%xmm7 movups 32(%ebp),%xmm0 .byte 102,15,56,222,225 .byte 102,15,56,222,233 .byte 102,15,56,222,241 .byte 102,15,56,222,249 call .L_aesni_decrypt6_enter movdqa 80(%esp),%xmm1 pxor %xmm0,%xmm0 xorps (%esp),%xmm2 pcmpgtd %xmm1,%xmm0 xorps 16(%esp),%xmm3 movups %xmm2,(%edi) xorps 32(%esp),%xmm4 movups %xmm3,16(%edi) xorps 48(%esp),%xmm5 movups %xmm4,32(%edi) xorps 64(%esp),%xmm6 movups %xmm5,48(%edi) xorps %xmm1,%xmm7 movups %xmm6,64(%edi) pshufd $19,%xmm0,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi movdqa 96(%esp),%xmm3 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 subl $96,%eax jnc .L063xts_dec_loop6 movl 240(%ebp),%ecx movl %ebp,%edx movl %ecx,%ebx .L062xts_dec_short: addl $96,%eax jz .L064xts_dec_done6x movdqa %xmm1,%xmm5 cmpl $32,%eax jb .L065xts_dec_one pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 je .L066xts_dec_two pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 cmpl $64,%eax jb .L067xts_dec_three pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa %xmm1,%xmm7 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 movdqa %xmm5,(%esp) movdqa %xmm6,16(%esp) je .L068xts_dec_four movdqa %xmm7,32(%esp) pshufd $19,%xmm0,%xmm7 movdqa %xmm1,48(%esp) paddq %xmm1,%xmm1 pand %xmm3,%xmm7 pxor %xmm1,%xmm7 movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 pxor (%esp),%xmm2 movdqu 48(%esi),%xmm5 pxor 16(%esp),%xmm3 movdqu 64(%esi),%xmm6 pxor 32(%esp),%xmm4 leal 80(%esi),%esi pxor 48(%esp),%xmm5 movdqa %xmm7,64(%esp) pxor %xmm7,%xmm6 call _aesni_decrypt6 movaps 64(%esp),%xmm1 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps 32(%esp),%xmm4 movups %xmm2,(%edi) xorps 48(%esp),%xmm5 movups %xmm3,16(%edi) xorps %xmm1,%xmm6 movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) leal 80(%edi),%edi jmp .L069xts_dec_done .align 16 .L065xts_dec_one: movups (%esi),%xmm2 leal 16(%esi),%esi xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L070dec1_loop_12: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L070dec1_loop_12 .byte 102,15,56,223,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) leal 16(%edi),%edi movdqa %xmm5,%xmm1 jmp .L069xts_dec_done .align 16 .L066xts_dec_two: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 leal 32(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 call _aesni_decrypt2 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 32(%edi),%edi movdqa %xmm6,%xmm1 jmp .L069xts_dec_done .align 16 .L067xts_dec_three: movaps %xmm1,%xmm7 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 leal 48(%esi),%esi xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 call _aesni_decrypt3 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) movups %xmm3,16(%edi) movups %xmm4,32(%edi) leal 48(%edi),%edi movdqa %xmm7,%xmm1 jmp .L069xts_dec_done .align 16 .L068xts_dec_four: movaps %xmm1,%xmm6 movups (%esi),%xmm2 movups 16(%esi),%xmm3 movups 32(%esi),%xmm4 xorps (%esp),%xmm2 movups 48(%esi),%xmm5 leal 64(%esi),%esi xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 xorps %xmm6,%xmm5 call _aesni_decrypt4 xorps (%esp),%xmm2 xorps 16(%esp),%xmm3 xorps %xmm7,%xmm4 movups %xmm2,(%edi) xorps %xmm6,%xmm5 movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) leal 64(%edi),%edi movdqa %xmm6,%xmm1 jmp .L069xts_dec_done .align 16 .L064xts_dec_done6x: movl 112(%esp),%eax andl $15,%eax jz .L071xts_dec_ret movl %eax,112(%esp) jmp .L072xts_dec_only_one_more .align 16 .L069xts_dec_done: movl 112(%esp),%eax pxor %xmm0,%xmm0 andl $15,%eax jz .L071xts_dec_ret pcmpgtd %xmm1,%xmm0 movl %eax,112(%esp) pshufd $19,%xmm0,%xmm2 pxor %xmm0,%xmm0 movdqa 96(%esp),%xmm3 paddq %xmm1,%xmm1 pand %xmm3,%xmm2 pcmpgtd %xmm1,%xmm0 pxor %xmm2,%xmm1 .L072xts_dec_only_one_more: pshufd $19,%xmm0,%xmm5 movdqa %xmm1,%xmm6 paddq %xmm1,%xmm1 pand %xmm3,%xmm5 pxor %xmm1,%xmm5 movl %ebp,%edx movl %ebx,%ecx movups (%esi),%xmm2 xorps %xmm5,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L073dec1_loop_13: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L073dec1_loop_13 .byte 102,15,56,223,209 xorps %xmm5,%xmm2 movups %xmm2,(%edi) .L074xts_dec_steal: movzbl 16(%esi),%ecx movzbl (%edi),%edx leal 1(%esi),%esi movb %cl,(%edi) movb %dl,16(%edi) leal 1(%edi),%edi subl $1,%eax jnz .L074xts_dec_steal subl 112(%esp),%edi movl %ebp,%edx movl %ebx,%ecx movups (%edi),%xmm2 xorps %xmm6,%xmm2 movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L075dec1_loop_14: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L075dec1_loop_14 .byte 102,15,56,223,209 xorps %xmm6,%xmm2 movups %xmm2,(%edi) .L071xts_dec_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 movdqa %xmm0,(%esp) pxor %xmm3,%xmm3 movdqa %xmm0,16(%esp) pxor %xmm4,%xmm4 movdqa %xmm0,32(%esp) pxor %xmm5,%xmm5 movdqa %xmm0,48(%esp) pxor %xmm6,%xmm6 movdqa %xmm0,64(%esp) pxor %xmm7,%xmm7 movdqa %xmm0,80(%esp) movl 116(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_xts_decrypt,.-.L_aes_hw_xts_decrypt_begin .globl aes_hw_cbc_encrypt .hidden aes_hw_cbc_encrypt .type aes_hw_cbc_encrypt,@function .align 16 aes_hw_cbc_encrypt: .L_aes_hw_cbc_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl %esp,%ebx movl 24(%esp),%edi subl $24,%ebx movl 28(%esp),%eax andl $-16,%ebx movl 32(%esp),%edx movl 36(%esp),%ebp testl %eax,%eax jz .L076cbc_abort cmpl $0,40(%esp) xchgl %esp,%ebx movups (%ebp),%xmm7 movl 240(%edx),%ecx movl %edx,%ebp movl %ebx,16(%esp) movl %ecx,%ebx je .L077cbc_decrypt movaps %xmm7,%xmm2 cmpl $16,%eax jb .L078cbc_enc_tail subl $16,%eax jmp .L079cbc_enc_loop .align 16 .L079cbc_enc_loop: movups (%esi),%xmm7 leal 16(%esi),%esi movups (%edx),%xmm0 movups 16(%edx),%xmm1 xorps %xmm0,%xmm7 leal 32(%edx),%edx xorps %xmm7,%xmm2 .L080enc1_loop_15: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L080enc1_loop_15 .byte 102,15,56,221,209 movl %ebx,%ecx movl %ebp,%edx movups %xmm2,(%edi) leal 16(%edi),%edi subl $16,%eax jnc .L079cbc_enc_loop addl $16,%eax jnz .L078cbc_enc_tail movaps %xmm2,%xmm7 pxor %xmm2,%xmm2 jmp .L081cbc_ret .L078cbc_enc_tail: movl %eax,%ecx .long 2767451785 movl $16,%ecx subl %eax,%ecx xorl %eax,%eax .long 2868115081 leal -16(%edi),%edi movl %ebx,%ecx movl %edi,%esi movl %ebp,%edx jmp .L079cbc_enc_loop .align 16 .L077cbc_decrypt: cmpl $80,%eax jbe .L082cbc_dec_tail movaps %xmm7,(%esp) subl $80,%eax jmp .L083cbc_dec_loop6_enter .align 16 .L084cbc_dec_loop6: movaps %xmm0,(%esp) movups %xmm7,(%edi) leal 16(%edi),%edi .L083cbc_dec_loop6_enter: movdqu (%esi),%xmm2 movdqu 16(%esi),%xmm3 movdqu 32(%esi),%xmm4 movdqu 48(%esi),%xmm5 movdqu 64(%esi),%xmm6 movdqu 80(%esi),%xmm7 call _aesni_decrypt6 movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps (%esp),%xmm2 xorps %xmm1,%xmm3 movups 32(%esi),%xmm1 xorps %xmm0,%xmm4 movups 48(%esi),%xmm0 xorps %xmm1,%xmm5 movups 64(%esi),%xmm1 xorps %xmm0,%xmm6 movups 80(%esi),%xmm0 xorps %xmm1,%xmm7 movups %xmm2,(%edi) movups %xmm3,16(%edi) leal 96(%esi),%esi movups %xmm4,32(%edi) movl %ebx,%ecx movups %xmm5,48(%edi) movl %ebp,%edx movups %xmm6,64(%edi) leal 80(%edi),%edi subl $96,%eax ja .L084cbc_dec_loop6 movaps %xmm7,%xmm2 movaps %xmm0,%xmm7 addl $80,%eax jle .L085cbc_dec_clear_tail_collected movups %xmm2,(%edi) leal 16(%edi),%edi .L082cbc_dec_tail: movups (%esi),%xmm2 movaps %xmm2,%xmm6 cmpl $16,%eax jbe .L086cbc_dec_one movups 16(%esi),%xmm3 movaps %xmm3,%xmm5 cmpl $32,%eax jbe .L087cbc_dec_two movups 32(%esi),%xmm4 cmpl $48,%eax jbe .L088cbc_dec_three movups 48(%esi),%xmm5 cmpl $64,%eax jbe .L089cbc_dec_four movups 64(%esi),%xmm6 movaps %xmm7,(%esp) movups (%esi),%xmm2 xorps %xmm7,%xmm7 call _aesni_decrypt6 movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps (%esp),%xmm2 xorps %xmm1,%xmm3 movups 32(%esi),%xmm1 xorps %xmm0,%xmm4 movups 48(%esi),%xmm0 xorps %xmm1,%xmm5 movups 64(%esi),%xmm7 xorps %xmm0,%xmm6 movups %xmm2,(%edi) movups %xmm3,16(%edi) pxor %xmm3,%xmm3 movups %xmm4,32(%edi) pxor %xmm4,%xmm4 movups %xmm5,48(%edi) pxor %xmm5,%xmm5 leal 64(%edi),%edi movaps %xmm6,%xmm2 pxor %xmm6,%xmm6 subl $80,%eax jmp .L090cbc_dec_tail_collected .align 16 .L086cbc_dec_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L091dec1_loop_16: .byte 102,15,56,222,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L091dec1_loop_16 .byte 102,15,56,223,209 xorps %xmm7,%xmm2 movaps %xmm6,%xmm7 subl $16,%eax jmp .L090cbc_dec_tail_collected .align 16 .L087cbc_dec_two: call _aesni_decrypt2 xorps %xmm7,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movaps %xmm3,%xmm2 pxor %xmm3,%xmm3 leal 16(%edi),%edi movaps %xmm5,%xmm7 subl $32,%eax jmp .L090cbc_dec_tail_collected .align 16 .L088cbc_dec_three: call _aesni_decrypt3 xorps %xmm7,%xmm2 xorps %xmm6,%xmm3 xorps %xmm5,%xmm4 movups %xmm2,(%edi) movaps %xmm4,%xmm2 pxor %xmm4,%xmm4 movups %xmm3,16(%edi) pxor %xmm3,%xmm3 leal 32(%edi),%edi movups 32(%esi),%xmm7 subl $48,%eax jmp .L090cbc_dec_tail_collected .align 16 .L089cbc_dec_four: call _aesni_decrypt4 movups 16(%esi),%xmm1 movups 32(%esi),%xmm0 xorps %xmm7,%xmm2 movups 48(%esi),%xmm7 xorps %xmm6,%xmm3 movups %xmm2,(%edi) xorps %xmm1,%xmm4 movups %xmm3,16(%edi) pxor %xmm3,%xmm3 xorps %xmm0,%xmm5 movups %xmm4,32(%edi) pxor %xmm4,%xmm4 leal 48(%edi),%edi movaps %xmm5,%xmm2 pxor %xmm5,%xmm5 subl $64,%eax jmp .L090cbc_dec_tail_collected .align 16 .L085cbc_dec_clear_tail_collected: pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 .L090cbc_dec_tail_collected: andl $15,%eax jnz .L092cbc_dec_tail_partial movups %xmm2,(%edi) pxor %xmm0,%xmm0 jmp .L081cbc_ret .align 16 .L092cbc_dec_tail_partial: movaps %xmm2,(%esp) pxor %xmm0,%xmm0 movl $16,%ecx movl %esp,%esi subl %eax,%ecx .long 2767451785 movdqa %xmm2,(%esp) .L081cbc_ret: movl 16(%esp),%esp movl 36(%esp),%ebp pxor %xmm2,%xmm2 pxor %xmm1,%xmm1 movups %xmm7,(%ebp) pxor %xmm7,%xmm7 .L076cbc_abort: popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_cbc_encrypt,.-.L_aes_hw_cbc_encrypt_begin .hidden _aesni_set_encrypt_key .type _aesni_set_encrypt_key,@function .align 16 _aesni_set_encrypt_key: pushl %ebp pushl %ebx testl %eax,%eax jz .L093bad_pointer testl %edx,%edx jz .L093bad_pointer call .L094pic .L094pic: popl %ebx leal .Lkey_const-.L094pic(%ebx),%ebx leal OPENSSL_ia32cap_P-.Lkey_const(%ebx),%ebp movups (%eax),%xmm0 xorps %xmm4,%xmm4 movl 4(%ebp),%ebp leal 16(%edx),%edx andl $268437504,%ebp cmpl $256,%ecx je .L09514rounds cmpl $192,%ecx je .L09612rounds cmpl $128,%ecx jne .L097bad_keybits .align 16 .L09810rounds: cmpl $268435456,%ebp je .L09910rounds_alt movl $9,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,200,1 call .L100key_128_cold .byte 102,15,58,223,200,2 call .L101key_128 .byte 102,15,58,223,200,4 call .L101key_128 .byte 102,15,58,223,200,8 call .L101key_128 .byte 102,15,58,223,200,16 call .L101key_128 .byte 102,15,58,223,200,32 call .L101key_128 .byte 102,15,58,223,200,64 call .L101key_128 .byte 102,15,58,223,200,128 call .L101key_128 .byte 102,15,58,223,200,27 call .L101key_128 .byte 102,15,58,223,200,54 call .L101key_128 movups %xmm0,(%edx) movl %ecx,80(%edx) jmp .L102good_key .align 16 .L101key_128: movups %xmm0,(%edx) leal 16(%edx),%edx .L100key_128_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .align 16 .L09910rounds_alt: movdqa (%ebx),%xmm5 movl $8,%ecx movdqa 32(%ebx),%xmm4 movdqa %xmm0,%xmm2 movdqu %xmm0,-16(%edx) .L103loop_key128: .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 leal 16(%edx),%edx movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,-16(%edx) movdqa %xmm0,%xmm2 decl %ecx jnz .L103loop_key128 movdqa 48(%ebx),%xmm4 .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,(%edx) movdqa %xmm0,%xmm2 .byte 102,15,56,0,197 .byte 102,15,56,221,196 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,16(%edx) movl $9,%ecx movl %ecx,96(%edx) jmp .L102good_key .align 16 .L09612rounds: movq 16(%eax),%xmm2 cmpl $268435456,%ebp je .L10412rounds_alt movl $11,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,202,1 call .L105key_192a_cold .byte 102,15,58,223,202,2 call .L106key_192b .byte 102,15,58,223,202,4 call .L107key_192a .byte 102,15,58,223,202,8 call .L106key_192b .byte 102,15,58,223,202,16 call .L107key_192a .byte 102,15,58,223,202,32 call .L106key_192b .byte 102,15,58,223,202,64 call .L107key_192a .byte 102,15,58,223,202,128 call .L106key_192b movups %xmm0,(%edx) movl %ecx,48(%edx) jmp .L102good_key .align 16 .L107key_192a: movups %xmm0,(%edx) leal 16(%edx),%edx .align 16 .L105key_192a_cold: movaps %xmm2,%xmm5 .L108key_192b_warm: shufps $16,%xmm0,%xmm4 movdqa %xmm2,%xmm3 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 pslldq $4,%xmm3 xorps %xmm4,%xmm0 pshufd $85,%xmm1,%xmm1 pxor %xmm3,%xmm2 pxor %xmm1,%xmm0 pshufd $255,%xmm0,%xmm3 pxor %xmm3,%xmm2 ret .align 16 .L106key_192b: movaps %xmm0,%xmm3 shufps $68,%xmm0,%xmm5 movups %xmm5,(%edx) shufps $78,%xmm2,%xmm3 movups %xmm3,16(%edx) leal 32(%edx),%edx jmp .L108key_192b_warm .align 16 .L10412rounds_alt: movdqa 16(%ebx),%xmm5 movdqa 32(%ebx),%xmm4 movl $8,%ecx movdqu %xmm0,-16(%edx) .L109loop_key192: movq %xmm2,(%edx) movdqa %xmm2,%xmm1 .byte 102,15,56,0,213 .byte 102,15,56,221,212 pslld $1,%xmm4 leal 24(%edx),%edx movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pshufd $255,%xmm0,%xmm3 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pxor %xmm2,%xmm0 pxor %xmm3,%xmm2 movdqu %xmm0,-16(%edx) decl %ecx jnz .L109loop_key192 movl $11,%ecx movl %ecx,32(%edx) jmp .L102good_key .align 16 .L09514rounds: movups 16(%eax),%xmm2 leal 16(%edx),%edx cmpl $268435456,%ebp je .L11014rounds_alt movl $13,%ecx movups %xmm0,-32(%edx) movups %xmm2,-16(%edx) .byte 102,15,58,223,202,1 call .L111key_256a_cold .byte 102,15,58,223,200,1 call .L112key_256b .byte 102,15,58,223,202,2 call .L113key_256a .byte 102,15,58,223,200,2 call .L112key_256b .byte 102,15,58,223,202,4 call .L113key_256a .byte 102,15,58,223,200,4 call .L112key_256b .byte 102,15,58,223,202,8 call .L113key_256a .byte 102,15,58,223,200,8 call .L112key_256b .byte 102,15,58,223,202,16 call .L113key_256a .byte 102,15,58,223,200,16 call .L112key_256b .byte 102,15,58,223,202,32 call .L113key_256a .byte 102,15,58,223,200,32 call .L112key_256b .byte 102,15,58,223,202,64 call .L113key_256a movups %xmm0,(%edx) movl %ecx,16(%edx) xorl %eax,%eax jmp .L102good_key .align 16 .L113key_256a: movups %xmm2,(%edx) leal 16(%edx),%edx .L111key_256a_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .align 16 .L112key_256b: movups %xmm0,(%edx) leal 16(%edx),%edx shufps $16,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $140,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 ret .align 16 .L11014rounds_alt: movdqa (%ebx),%xmm5 movdqa 32(%ebx),%xmm4 movl $7,%ecx movdqu %xmm0,-32(%edx) movdqa %xmm2,%xmm1 movdqu %xmm2,-16(%edx) .L114loop_key256: .byte 102,15,56,0,213 .byte 102,15,56,221,212 movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pslld $1,%xmm4 pxor %xmm2,%xmm0 movdqu %xmm0,(%edx) decl %ecx jz .L115done_key256 pshufd $255,%xmm0,%xmm2 pxor %xmm3,%xmm3 .byte 102,15,56,221,211 movdqa %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm3,%xmm1 pxor %xmm1,%xmm2 movdqu %xmm2,16(%edx) leal 32(%edx),%edx movdqa %xmm2,%xmm1 jmp .L114loop_key256 .L115done_key256: movl $13,%ecx movl %ecx,16(%edx) .L102good_key: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 xorl %eax,%eax popl %ebx popl %ebp ret .align 4 .L093bad_pointer: movl $-1,%eax popl %ebx popl %ebp ret .align 4 .L097bad_keybits: pxor %xmm0,%xmm0 movl $-2,%eax popl %ebx popl %ebp ret .size _aesni_set_encrypt_key,.-_aesni_set_encrypt_key .globl aes_hw_set_encrypt_key .hidden aes_hw_set_encrypt_key .type aes_hw_set_encrypt_key,@function .align 16 aes_hw_set_encrypt_key: .L_aes_hw_set_encrypt_key_begin: #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L116pic .L116pic: popl %ebx leal BORINGSSL_function_hit+3-.L116pic(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 4(%esp),%eax movl 8(%esp),%ecx movl 12(%esp),%edx call _aesni_set_encrypt_key ret .size aes_hw_set_encrypt_key,.-.L_aes_hw_set_encrypt_key_begin .globl aes_hw_set_decrypt_key .hidden aes_hw_set_decrypt_key .type aes_hw_set_decrypt_key,@function .align 16 aes_hw_set_decrypt_key: .L_aes_hw_set_decrypt_key_begin: movl 4(%esp),%eax movl 8(%esp),%ecx movl 12(%esp),%edx call _aesni_set_encrypt_key movl 12(%esp),%edx shll $4,%ecx testl %eax,%eax jnz .L117dec_key_ret leal 16(%edx,%ecx,1),%eax movups (%edx),%xmm0 movups (%eax),%xmm1 movups %xmm0,(%eax) movups %xmm1,(%edx) leal 16(%edx),%edx leal -16(%eax),%eax .L118dec_key_inverse: movups (%edx),%xmm0 movups (%eax),%xmm1 .byte 102,15,56,219,192 .byte 102,15,56,219,201 leal 16(%edx),%edx leal -16(%eax),%eax movups %xmm0,16(%eax) movups %xmm1,-16(%edx) cmpl %edx,%eax ja .L118dec_key_inverse movups (%edx),%xmm0 .byte 102,15,56,219,192 movups %xmm0,(%edx) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 xorl %eax,%eax .L117dec_key_ret: ret .size aes_hw_set_decrypt_key,.-.L_aes_hw_set_decrypt_key_begin .align 64 .Lkey_const: .long 202313229,202313229,202313229,202313229 .long 67569157,67569157,67569157,67569157 .long 1,1,1,1 .long 27,27,27,27 .byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69 .byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83 .byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115 .byte 115,108,46,111,114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
wlsfx/bnbb
5,668
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86/crypto/fipsmodule/ghash-ssse3-x86.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl gcm_gmult_ssse3 .hidden gcm_gmult_ssse3 .type gcm_gmult_ssse3,@function .align 16 gcm_gmult_ssse3: .L_gcm_gmult_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%edi movl 24(%esp),%esi movdqu (%edi),%xmm0 call .L000pic_point .L000pic_point: popl %eax movdqa .Lreverse_bytes-.L000pic_point(%eax),%xmm7 movdqa .Llow4_mask-.L000pic_point(%eax),%xmm2 .byte 102,15,56,0,199 movdqa %xmm2,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm0 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 movl $5,%eax .L001loop_row_1: movdqa (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz .L001loop_row_1 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $5,%eax .L002loop_row_2: movdqa (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz .L002loop_row_2 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $6,%eax .L003loop_row_3: movdqa (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz .L003loop_row_3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 .byte 102,15,56,0,215 movdqu %xmm2,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 popl %edi popl %esi popl %ebx popl %ebp ret .size gcm_gmult_ssse3,.-.L_gcm_gmult_ssse3_begin .globl gcm_ghash_ssse3 .hidden gcm_ghash_ssse3 .type gcm_ghash_ssse3,@function .align 16 gcm_ghash_ssse3: .L_gcm_ghash_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%edi movl 24(%esp),%esi movl 28(%esp),%edx movl 32(%esp),%ecx movdqu (%edi),%xmm0 call .L004pic_point .L004pic_point: popl %ebx movdqa .Lreverse_bytes-.L004pic_point(%ebx),%xmm7 andl $-16,%ecx .byte 102,15,56,0,199 pxor %xmm3,%xmm3 .L005loop_ghash: movdqa .Llow4_mask-.L004pic_point(%ebx),%xmm2 movdqu (%edx),%xmm1 .byte 102,15,56,0,207 pxor %xmm1,%xmm0 movdqa %xmm2,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm0 pxor %xmm2,%xmm2 movl $5,%eax .L006loop_row_4: movdqa (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz .L006loop_row_4 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $5,%eax .L007loop_row_5: movdqa (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz .L007loop_row_5 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movl $6,%eax .L008loop_row_6: movdqa (%esi),%xmm4 leal 16(%esi),%esi movdqa %xmm2,%xmm6 .byte 102,15,58,15,243,1 movdqa %xmm6,%xmm3 psrldq $1,%xmm2 movdqa %xmm4,%xmm5 .byte 102,15,56,0,224 .byte 102,15,56,0,233 pxor %xmm5,%xmm2 movdqa %xmm4,%xmm5 psllq $60,%xmm5 movdqa %xmm5,%xmm6 pslldq $8,%xmm6 pxor %xmm6,%xmm3 psrldq $8,%xmm5 pxor %xmm5,%xmm2 psrlq $4,%xmm4 pxor %xmm4,%xmm2 subl $1,%eax jnz .L008loop_row_6 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $1,%xmm3 pxor %xmm3,%xmm2 psrlq $5,%xmm3 pxor %xmm3,%xmm2 pxor %xmm3,%xmm3 movdqa %xmm2,%xmm0 leal -256(%esi),%esi leal 16(%edx),%edx subl $16,%ecx jnz .L005loop_ghash .byte 102,15,56,0,199 movdqu %xmm0,(%edi) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 popl %edi popl %esi popl %ebx popl %ebp ret .size gcm_ghash_ssse3,.-.L_gcm_ghash_ssse3_begin .align 16 .Lreverse_bytes: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .align 16 .Llow4_mask: .long 252645135,252645135,252645135,252645135 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
wlsfx/bnbb
15,486
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86/crypto/fipsmodule/bn-586.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl bn_mul_add_words .hidden bn_mul_add_words .type bn_mul_add_words,@function .align 16 bn_mul_add_words: .L_bn_mul_add_words_begin: call .L000PIC_me_up .L000PIC_me_up: popl %eax leal OPENSSL_ia32cap_P-.L000PIC_me_up(%eax),%eax btl $26,(%eax) jnc .L001maw_non_sse2 movl 4(%esp),%eax movl 8(%esp),%edx movl 12(%esp),%ecx movd 16(%esp),%mm0 pxor %mm1,%mm1 jmp .L002maw_sse2_entry .align 16 .L003maw_sse2_unrolled: movd (%eax),%mm3 paddq %mm3,%mm1 movd (%edx),%mm2 pmuludq %mm0,%mm2 movd 4(%edx),%mm4 pmuludq %mm0,%mm4 movd 8(%edx),%mm6 pmuludq %mm0,%mm6 movd 12(%edx),%mm7 pmuludq %mm0,%mm7 paddq %mm2,%mm1 movd 4(%eax),%mm3 paddq %mm4,%mm3 movd 8(%eax),%mm5 paddq %mm6,%mm5 movd 12(%eax),%mm4 paddq %mm4,%mm7 movd %mm1,(%eax) movd 16(%edx),%mm2 pmuludq %mm0,%mm2 psrlq $32,%mm1 movd 20(%edx),%mm4 pmuludq %mm0,%mm4 paddq %mm3,%mm1 movd 24(%edx),%mm6 pmuludq %mm0,%mm6 movd %mm1,4(%eax) psrlq $32,%mm1 movd 28(%edx),%mm3 addl $32,%edx pmuludq %mm0,%mm3 paddq %mm5,%mm1 movd 16(%eax),%mm5 paddq %mm5,%mm2 movd %mm1,8(%eax) psrlq $32,%mm1 paddq %mm7,%mm1 movd 20(%eax),%mm5 paddq %mm5,%mm4 movd %mm1,12(%eax) psrlq $32,%mm1 paddq %mm2,%mm1 movd 24(%eax),%mm5 paddq %mm5,%mm6 movd %mm1,16(%eax) psrlq $32,%mm1 paddq %mm4,%mm1 movd 28(%eax),%mm5 paddq %mm5,%mm3 movd %mm1,20(%eax) psrlq $32,%mm1 paddq %mm6,%mm1 movd %mm1,24(%eax) psrlq $32,%mm1 paddq %mm3,%mm1 movd %mm1,28(%eax) leal 32(%eax),%eax psrlq $32,%mm1 subl $8,%ecx jz .L004maw_sse2_exit .L002maw_sse2_entry: testl $4294967288,%ecx jnz .L003maw_sse2_unrolled .align 4 .L005maw_sse2_loop: movd (%edx),%mm2 movd (%eax),%mm3 pmuludq %mm0,%mm2 leal 4(%edx),%edx paddq %mm3,%mm1 paddq %mm2,%mm1 movd %mm1,(%eax) subl $1,%ecx psrlq $32,%mm1 leal 4(%eax),%eax jnz .L005maw_sse2_loop .L004maw_sse2_exit: movd %mm1,%eax emms ret .align 16 .L001maw_non_sse2: pushl %ebp pushl %ebx pushl %esi pushl %edi xorl %esi,%esi movl 20(%esp),%edi movl 28(%esp),%ecx movl 24(%esp),%ebx andl $4294967288,%ecx movl 32(%esp),%ebp pushl %ecx jz .L006maw_finish .align 16 .L007maw_loop: movl (%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl (%edi),%eax adcl $0,%edx movl %eax,(%edi) movl %edx,%esi movl 4(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 4(%edi),%eax adcl $0,%edx movl %eax,4(%edi) movl %edx,%esi movl 8(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 8(%edi),%eax adcl $0,%edx movl %eax,8(%edi) movl %edx,%esi movl 12(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 12(%edi),%eax adcl $0,%edx movl %eax,12(%edi) movl %edx,%esi movl 16(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 16(%edi),%eax adcl $0,%edx movl %eax,16(%edi) movl %edx,%esi movl 20(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 20(%edi),%eax adcl $0,%edx movl %eax,20(%edi) movl %edx,%esi movl 24(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 24(%edi),%eax adcl $0,%edx movl %eax,24(%edi) movl %edx,%esi movl 28(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 28(%edi),%eax adcl $0,%edx movl %eax,28(%edi) movl %edx,%esi subl $8,%ecx leal 32(%ebx),%ebx leal 32(%edi),%edi jnz .L007maw_loop .L006maw_finish: movl 32(%esp),%ecx andl $7,%ecx jnz .L008maw_finish2 jmp .L009maw_end .L008maw_finish2: movl (%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl (%edi),%eax adcl $0,%edx decl %ecx movl %eax,(%edi) movl %edx,%esi jz .L009maw_end movl 4(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 4(%edi),%eax adcl $0,%edx decl %ecx movl %eax,4(%edi) movl %edx,%esi jz .L009maw_end movl 8(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 8(%edi),%eax adcl $0,%edx decl %ecx movl %eax,8(%edi) movl %edx,%esi jz .L009maw_end movl 12(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 12(%edi),%eax adcl $0,%edx decl %ecx movl %eax,12(%edi) movl %edx,%esi jz .L009maw_end movl 16(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 16(%edi),%eax adcl $0,%edx decl %ecx movl %eax,16(%edi) movl %edx,%esi jz .L009maw_end movl 20(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 20(%edi),%eax adcl $0,%edx decl %ecx movl %eax,20(%edi) movl %edx,%esi jz .L009maw_end movl 24(%ebx),%eax mull %ebp addl %esi,%eax adcl $0,%edx addl 24(%edi),%eax adcl $0,%edx movl %eax,24(%edi) movl %edx,%esi .L009maw_end: movl %esi,%eax popl %ecx popl %edi popl %esi popl %ebx popl %ebp ret .size bn_mul_add_words,.-.L_bn_mul_add_words_begin .globl bn_mul_words .hidden bn_mul_words .type bn_mul_words,@function .align 16 bn_mul_words: .L_bn_mul_words_begin: call .L010PIC_me_up .L010PIC_me_up: popl %eax leal OPENSSL_ia32cap_P-.L010PIC_me_up(%eax),%eax btl $26,(%eax) jnc .L011mw_non_sse2 movl 4(%esp),%eax movl 8(%esp),%edx movl 12(%esp),%ecx movd 16(%esp),%mm0 pxor %mm1,%mm1 .align 16 .L012mw_sse2_loop: movd (%edx),%mm2 pmuludq %mm0,%mm2 leal 4(%edx),%edx paddq %mm2,%mm1 movd %mm1,(%eax) subl $1,%ecx psrlq $32,%mm1 leal 4(%eax),%eax jnz .L012mw_sse2_loop movd %mm1,%eax emms ret .align 16 .L011mw_non_sse2: pushl %ebp pushl %ebx pushl %esi pushl %edi xorl %esi,%esi movl 20(%esp),%edi movl 24(%esp),%ebx movl 28(%esp),%ebp movl 32(%esp),%ecx andl $4294967288,%ebp jz .L013mw_finish .L014mw_loop: movl (%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,(%edi) movl %edx,%esi movl 4(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,4(%edi) movl %edx,%esi movl 8(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,8(%edi) movl %edx,%esi movl 12(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,12(%edi) movl %edx,%esi movl 16(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,16(%edi) movl %edx,%esi movl 20(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,20(%edi) movl %edx,%esi movl 24(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,24(%edi) movl %edx,%esi movl 28(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,28(%edi) movl %edx,%esi addl $32,%ebx addl $32,%edi subl $8,%ebp jz .L013mw_finish jmp .L014mw_loop .L013mw_finish: movl 28(%esp),%ebp andl $7,%ebp jnz .L015mw_finish2 jmp .L016mw_end .L015mw_finish2: movl (%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,(%edi) movl %edx,%esi decl %ebp jz .L016mw_end movl 4(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,4(%edi) movl %edx,%esi decl %ebp jz .L016mw_end movl 8(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,8(%edi) movl %edx,%esi decl %ebp jz .L016mw_end movl 12(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,12(%edi) movl %edx,%esi decl %ebp jz .L016mw_end movl 16(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,16(%edi) movl %edx,%esi decl %ebp jz .L016mw_end movl 20(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,20(%edi) movl %edx,%esi decl %ebp jz .L016mw_end movl 24(%ebx),%eax mull %ecx addl %esi,%eax adcl $0,%edx movl %eax,24(%edi) movl %edx,%esi .L016mw_end: movl %esi,%eax popl %edi popl %esi popl %ebx popl %ebp ret .size bn_mul_words,.-.L_bn_mul_words_begin .globl bn_sqr_words .hidden bn_sqr_words .type bn_sqr_words,@function .align 16 bn_sqr_words: .L_bn_sqr_words_begin: call .L017PIC_me_up .L017PIC_me_up: popl %eax leal OPENSSL_ia32cap_P-.L017PIC_me_up(%eax),%eax btl $26,(%eax) jnc .L018sqr_non_sse2 movl 4(%esp),%eax movl 8(%esp),%edx movl 12(%esp),%ecx .align 16 .L019sqr_sse2_loop: movd (%edx),%mm0 pmuludq %mm0,%mm0 leal 4(%edx),%edx movq %mm0,(%eax) subl $1,%ecx leal 8(%eax),%eax jnz .L019sqr_sse2_loop emms ret .align 16 .L018sqr_non_sse2: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%ebx andl $4294967288,%ebx jz .L020sw_finish .L021sw_loop: movl (%edi),%eax mull %eax movl %eax,(%esi) movl %edx,4(%esi) movl 4(%edi),%eax mull %eax movl %eax,8(%esi) movl %edx,12(%esi) movl 8(%edi),%eax mull %eax movl %eax,16(%esi) movl %edx,20(%esi) movl 12(%edi),%eax mull %eax movl %eax,24(%esi) movl %edx,28(%esi) movl 16(%edi),%eax mull %eax movl %eax,32(%esi) movl %edx,36(%esi) movl 20(%edi),%eax mull %eax movl %eax,40(%esi) movl %edx,44(%esi) movl 24(%edi),%eax mull %eax movl %eax,48(%esi) movl %edx,52(%esi) movl 28(%edi),%eax mull %eax movl %eax,56(%esi) movl %edx,60(%esi) addl $32,%edi addl $64,%esi subl $8,%ebx jnz .L021sw_loop .L020sw_finish: movl 28(%esp),%ebx andl $7,%ebx jz .L022sw_end movl (%edi),%eax mull %eax movl %eax,(%esi) decl %ebx movl %edx,4(%esi) jz .L022sw_end movl 4(%edi),%eax mull %eax movl %eax,8(%esi) decl %ebx movl %edx,12(%esi) jz .L022sw_end movl 8(%edi),%eax mull %eax movl %eax,16(%esi) decl %ebx movl %edx,20(%esi) jz .L022sw_end movl 12(%edi),%eax mull %eax movl %eax,24(%esi) decl %ebx movl %edx,28(%esi) jz .L022sw_end movl 16(%edi),%eax mull %eax movl %eax,32(%esi) decl %ebx movl %edx,36(%esi) jz .L022sw_end movl 20(%edi),%eax mull %eax movl %eax,40(%esi) decl %ebx movl %edx,44(%esi) jz .L022sw_end movl 24(%edi),%eax mull %eax movl %eax,48(%esi) movl %edx,52(%esi) .L022sw_end: popl %edi popl %esi popl %ebx popl %ebp ret .size bn_sqr_words,.-.L_bn_sqr_words_begin .globl bn_div_words .hidden bn_div_words .type bn_div_words,@function .align 16 bn_div_words: .L_bn_div_words_begin: movl 4(%esp),%edx movl 8(%esp),%eax movl 12(%esp),%ecx divl %ecx ret .size bn_div_words,.-.L_bn_div_words_begin .globl bn_add_words .hidden bn_add_words .type bn_add_words,@function .align 16 bn_add_words: .L_bn_add_words_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%ebx movl 24(%esp),%esi movl 28(%esp),%edi movl 32(%esp),%ebp xorl %eax,%eax andl $4294967288,%ebp jz .L023aw_finish .L024aw_loop: movl (%esi),%ecx movl (%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,(%ebx) movl 4(%esi),%ecx movl 4(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,4(%ebx) movl 8(%esi),%ecx movl 8(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,8(%ebx) movl 12(%esi),%ecx movl 12(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,12(%ebx) movl 16(%esi),%ecx movl 16(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,16(%ebx) movl 20(%esi),%ecx movl 20(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,20(%ebx) movl 24(%esi),%ecx movl 24(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) movl 28(%esi),%ecx movl 28(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,28(%ebx) addl $32,%esi addl $32,%edi addl $32,%ebx subl $8,%ebp jnz .L024aw_loop .L023aw_finish: movl 32(%esp),%ebp andl $7,%ebp jz .L025aw_end movl (%esi),%ecx movl (%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,(%ebx) jz .L025aw_end movl 4(%esi),%ecx movl 4(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,4(%ebx) jz .L025aw_end movl 8(%esi),%ecx movl 8(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,8(%ebx) jz .L025aw_end movl 12(%esi),%ecx movl 12(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,12(%ebx) jz .L025aw_end movl 16(%esi),%ecx movl 16(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,16(%ebx) jz .L025aw_end movl 20(%esi),%ecx movl 20(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,20(%ebx) jz .L025aw_end movl 24(%esi),%ecx movl 24(%edi),%edx addl %eax,%ecx movl $0,%eax adcl %eax,%eax addl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) .L025aw_end: popl %edi popl %esi popl %ebx popl %ebp ret .size bn_add_words,.-.L_bn_add_words_begin .globl bn_sub_words .hidden bn_sub_words .type bn_sub_words,@function .align 16 bn_sub_words: .L_bn_sub_words_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%ebx movl 24(%esp),%esi movl 28(%esp),%edi movl 32(%esp),%ebp xorl %eax,%eax andl $4294967288,%ebp jz .L026aw_finish .L027aw_loop: movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,(%ebx) movl 4(%esi),%ecx movl 4(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,4(%ebx) movl 8(%esi),%ecx movl 8(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,8(%ebx) movl 12(%esi),%ecx movl 12(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,12(%ebx) movl 16(%esi),%ecx movl 16(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,16(%ebx) movl 20(%esi),%ecx movl 20(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,20(%ebx) movl 24(%esi),%ecx movl 24(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) movl 28(%esi),%ecx movl 28(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,28(%ebx) addl $32,%esi addl $32,%edi addl $32,%ebx subl $8,%ebp jnz .L027aw_loop .L026aw_finish: movl 32(%esp),%ebp andl $7,%ebp jz .L028aw_end movl (%esi),%ecx movl (%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,(%ebx) jz .L028aw_end movl 4(%esi),%ecx movl 4(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,4(%ebx) jz .L028aw_end movl 8(%esi),%ecx movl 8(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,8(%ebx) jz .L028aw_end movl 12(%esi),%ecx movl 12(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,12(%ebx) jz .L028aw_end movl 16(%esi),%ecx movl 16(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,16(%ebx) jz .L028aw_end movl 20(%esi),%ecx movl 20(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax decl %ebp movl %ecx,20(%ebx) jz .L028aw_end movl 24(%esi),%ecx movl 24(%edi),%edx subl %eax,%ecx movl $0,%eax adcl %eax,%eax subl %edx,%ecx adcl $0,%eax movl %ecx,24(%ebx) .L028aw_end: popl %edi popl %esi popl %ebx popl %ebp ret .size bn_sub_words,.-.L_bn_sub_words_begin #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
wlsfx/bnbb
8,981
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86/crypto/fipsmodule/x86-mont.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl bn_mul_mont .hidden bn_mul_mont .type bn_mul_mont,@function .align 16 bn_mul_mont: .L_bn_mul_mont_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi xorl %eax,%eax movl 40(%esp),%edi cmpl $4,%edi jl .L000just_leave leal 20(%esp),%esi leal 24(%esp),%edx addl $2,%edi negl %edi leal -32(%esp,%edi,4),%ebp negl %edi movl %ebp,%eax subl %edx,%eax andl $2047,%eax subl %eax,%ebp xorl %ebp,%edx andl $2048,%edx xorl $2048,%edx subl %edx,%ebp andl $-64,%ebp movl %esp,%eax subl %ebp,%eax andl $-4096,%eax movl %esp,%edx leal (%ebp,%eax,1),%esp movl (%esp),%eax cmpl %ebp,%esp ja .L001page_walk jmp .L002page_walk_done .align 16 .L001page_walk: leal -4096(%esp),%esp movl (%esp),%eax cmpl %ebp,%esp ja .L001page_walk .L002page_walk_done: movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%ebp movl 16(%esi),%esi movl (%esi),%esi movl %eax,4(%esp) movl %ebx,8(%esp) movl %ecx,12(%esp) movl %ebp,16(%esp) movl %esi,20(%esp) leal -3(%edi),%ebx movl %edx,24(%esp) call .L003PIC_me_up .L003PIC_me_up: popl %eax leal OPENSSL_ia32cap_P-.L003PIC_me_up(%eax),%eax btl $26,(%eax) jnc .L004non_sse2 movl $-1,%eax movd %eax,%mm7 movl 8(%esp),%esi movl 12(%esp),%edi movl 16(%esp),%ebp xorl %edx,%edx xorl %ecx,%ecx movd (%edi),%mm4 movd (%esi),%mm5 movd (%ebp),%mm3 pmuludq %mm4,%mm5 movq %mm5,%mm2 movq %mm5,%mm0 pand %mm7,%mm0 pmuludq 20(%esp),%mm5 pmuludq %mm5,%mm3 paddq %mm0,%mm3 movd 4(%ebp),%mm1 movd 4(%esi),%mm0 psrlq $32,%mm2 psrlq $32,%mm3 incl %ecx .align 16 .L0051st: pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 movd 4(%ebp,%ecx,4),%mm1 paddq %mm0,%mm3 movd 4(%esi,%ecx,4),%mm0 psrlq $32,%mm2 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm3 leal 1(%ecx),%ecx cmpl %ebx,%ecx jl .L0051st pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 paddq %mm0,%mm3 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm2 psrlq $32,%mm3 paddq %mm2,%mm3 movq %mm3,32(%esp,%ebx,4) incl %edx .L006outer: xorl %ecx,%ecx movd (%edi,%edx,4),%mm4 movd (%esi),%mm5 movd 32(%esp),%mm6 movd (%ebp),%mm3 pmuludq %mm4,%mm5 paddq %mm6,%mm5 movq %mm5,%mm0 movq %mm5,%mm2 pand %mm7,%mm0 pmuludq 20(%esp),%mm5 pmuludq %mm5,%mm3 paddq %mm0,%mm3 movd 36(%esp),%mm6 movd 4(%ebp),%mm1 movd 4(%esi),%mm0 psrlq $32,%mm2 psrlq $32,%mm3 paddq %mm6,%mm2 incl %ecx decl %ebx .L007inner: pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 movd 36(%esp,%ecx,4),%mm6 pand %mm7,%mm0 movd 4(%ebp,%ecx,4),%mm1 paddq %mm0,%mm3 movd 4(%esi,%ecx,4),%mm0 psrlq $32,%mm2 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm3 paddq %mm6,%mm2 decl %ebx leal 1(%ecx),%ecx jnz .L007inner movl %ecx,%ebx pmuludq %mm4,%mm0 pmuludq %mm5,%mm1 paddq %mm0,%mm2 paddq %mm1,%mm3 movq %mm2,%mm0 pand %mm7,%mm0 paddq %mm0,%mm3 movd %mm3,28(%esp,%ecx,4) psrlq $32,%mm2 psrlq $32,%mm3 movd 36(%esp,%ebx,4),%mm6 paddq %mm2,%mm3 paddq %mm6,%mm3 movq %mm3,32(%esp,%ebx,4) leal 1(%edx),%edx cmpl %ebx,%edx jle .L006outer emms jmp .L008common_tail .align 16 .L004non_sse2: movl 8(%esp),%esi leal 1(%ebx),%ebp movl 12(%esp),%edi xorl %ecx,%ecx movl %esi,%edx andl $1,%ebp subl %edi,%edx leal 4(%edi,%ebx,4),%eax orl %edx,%ebp movl (%edi),%edi jz .L009bn_sqr_mont movl %eax,28(%esp) movl (%esi),%eax xorl %edx,%edx .align 16 .L010mull: movl %edx,%ebp mull %edi addl %eax,%ebp leal 1(%ecx),%ecx adcl $0,%edx movl (%esi,%ecx,4),%eax cmpl %ebx,%ecx movl %ebp,28(%esp,%ecx,4) jl .L010mull movl %edx,%ebp mull %edi movl 20(%esp),%edi addl %ebp,%eax movl 16(%esp),%esi adcl $0,%edx imull 32(%esp),%edi movl %eax,32(%esp,%ebx,4) xorl %ecx,%ecx movl %edx,36(%esp,%ebx,4) movl %ecx,40(%esp,%ebx,4) movl (%esi),%eax mull %edi addl 32(%esp),%eax movl 4(%esi),%eax adcl $0,%edx incl %ecx jmp .L0112ndmadd .align 16 .L0121stmadd: movl %edx,%ebp mull %edi addl 32(%esp,%ecx,4),%ebp leal 1(%ecx),%ecx adcl $0,%edx addl %eax,%ebp movl (%esi,%ecx,4),%eax adcl $0,%edx cmpl %ebx,%ecx movl %ebp,28(%esp,%ecx,4) jl .L0121stmadd movl %edx,%ebp mull %edi addl 32(%esp,%ebx,4),%eax movl 20(%esp),%edi adcl $0,%edx movl 16(%esp),%esi addl %eax,%ebp adcl $0,%edx imull 32(%esp),%edi xorl %ecx,%ecx addl 36(%esp,%ebx,4),%edx movl %ebp,32(%esp,%ebx,4) adcl $0,%ecx movl (%esi),%eax movl %edx,36(%esp,%ebx,4) movl %ecx,40(%esp,%ebx,4) mull %edi addl 32(%esp),%eax movl 4(%esi),%eax adcl $0,%edx movl $1,%ecx .align 16 .L0112ndmadd: movl %edx,%ebp mull %edi addl 32(%esp,%ecx,4),%ebp leal 1(%ecx),%ecx adcl $0,%edx addl %eax,%ebp movl (%esi,%ecx,4),%eax adcl $0,%edx cmpl %ebx,%ecx movl %ebp,24(%esp,%ecx,4) jl .L0112ndmadd movl %edx,%ebp mull %edi addl 32(%esp,%ebx,4),%ebp adcl $0,%edx addl %eax,%ebp adcl $0,%edx movl %ebp,28(%esp,%ebx,4) xorl %eax,%eax movl 12(%esp),%ecx addl 36(%esp,%ebx,4),%edx adcl 40(%esp,%ebx,4),%eax leal 4(%ecx),%ecx movl %edx,32(%esp,%ebx,4) cmpl 28(%esp),%ecx movl %eax,36(%esp,%ebx,4) je .L008common_tail movl (%ecx),%edi movl 8(%esp),%esi movl %ecx,12(%esp) xorl %ecx,%ecx xorl %edx,%edx movl (%esi),%eax jmp .L0121stmadd .align 16 .L009bn_sqr_mont: movl %ebx,(%esp) movl %ecx,12(%esp) movl %edi,%eax mull %edi movl %eax,32(%esp) movl %edx,%ebx shrl $1,%edx andl $1,%ebx incl %ecx .align 16 .L013sqr: movl (%esi,%ecx,4),%eax movl %edx,%ebp mull %edi addl %ebp,%eax leal 1(%ecx),%ecx adcl $0,%edx leal (%ebx,%eax,2),%ebp shrl $31,%eax cmpl (%esp),%ecx movl %eax,%ebx movl %ebp,28(%esp,%ecx,4) jl .L013sqr movl (%esi,%ecx,4),%eax movl %edx,%ebp mull %edi addl %ebp,%eax movl 20(%esp),%edi adcl $0,%edx movl 16(%esp),%esi leal (%ebx,%eax,2),%ebp imull 32(%esp),%edi shrl $31,%eax movl %ebp,32(%esp,%ecx,4) leal (%eax,%edx,2),%ebp movl (%esi),%eax shrl $31,%edx movl %ebp,36(%esp,%ecx,4) movl %edx,40(%esp,%ecx,4) mull %edi addl 32(%esp),%eax movl %ecx,%ebx adcl $0,%edx movl 4(%esi),%eax movl $1,%ecx .align 16 .L0143rdmadd: movl %edx,%ebp mull %edi addl 32(%esp,%ecx,4),%ebp adcl $0,%edx addl %eax,%ebp movl 4(%esi,%ecx,4),%eax adcl $0,%edx movl %ebp,28(%esp,%ecx,4) movl %edx,%ebp mull %edi addl 36(%esp,%ecx,4),%ebp leal 2(%ecx),%ecx adcl $0,%edx addl %eax,%ebp movl (%esi,%ecx,4),%eax adcl $0,%edx cmpl %ebx,%ecx movl %ebp,24(%esp,%ecx,4) jl .L0143rdmadd movl %edx,%ebp mull %edi addl 32(%esp,%ebx,4),%ebp adcl $0,%edx addl %eax,%ebp adcl $0,%edx movl %ebp,28(%esp,%ebx,4) movl 12(%esp),%ecx xorl %eax,%eax movl 8(%esp),%esi addl 36(%esp,%ebx,4),%edx adcl 40(%esp,%ebx,4),%eax movl %edx,32(%esp,%ebx,4) cmpl %ebx,%ecx movl %eax,36(%esp,%ebx,4) je .L008common_tail movl 4(%esi,%ecx,4),%edi leal 1(%ecx),%ecx movl %edi,%eax movl %ecx,12(%esp) mull %edi addl 32(%esp,%ecx,4),%eax adcl $0,%edx movl %eax,32(%esp,%ecx,4) xorl %ebp,%ebp cmpl %ebx,%ecx leal 1(%ecx),%ecx je .L015sqrlast movl %edx,%ebx shrl $1,%edx andl $1,%ebx .align 16 .L016sqradd: movl (%esi,%ecx,4),%eax movl %edx,%ebp mull %edi addl %ebp,%eax leal (%eax,%eax,1),%ebp adcl $0,%edx shrl $31,%eax addl 32(%esp,%ecx,4),%ebp leal 1(%ecx),%ecx adcl $0,%eax addl %ebx,%ebp adcl $0,%eax cmpl (%esp),%ecx movl %ebp,28(%esp,%ecx,4) movl %eax,%ebx jle .L016sqradd movl %edx,%ebp addl %edx,%edx shrl $31,%ebp addl %ebx,%edx adcl $0,%ebp .L015sqrlast: movl 20(%esp),%edi movl 16(%esp),%esi imull 32(%esp),%edi addl 32(%esp,%ecx,4),%edx movl (%esi),%eax adcl $0,%ebp movl %edx,32(%esp,%ecx,4) movl %ebp,36(%esp,%ecx,4) mull %edi addl 32(%esp),%eax leal -1(%ecx),%ebx adcl $0,%edx movl $1,%ecx movl 4(%esi),%eax jmp .L0143rdmadd .align 16 .L008common_tail: movl 16(%esp),%ebp movl 4(%esp),%edi leal 32(%esp),%esi movl (%esi),%eax movl %ebx,%ecx xorl %edx,%edx .align 16 .L017sub: sbbl (%ebp,%edx,4),%eax movl %eax,(%edi,%edx,4) decl %ecx movl 4(%esi,%edx,4),%eax leal 1(%edx),%edx jge .L017sub sbbl $0,%eax movl $-1,%edx xorl %eax,%edx jmp .L018copy .align 16 .L018copy: movl 32(%esp,%ebx,4),%esi movl (%edi,%ebx,4),%ebp movl %ecx,32(%esp,%ebx,4) andl %eax,%esi andl %edx,%ebp orl %esi,%ebp movl %ebp,(%edi,%ebx,4) decl %ebx jge .L018copy movl 24(%esp),%esp movl $1,%eax .L000just_leave: popl %edi popl %esi popl %ebx popl %ebp ret .size bn_mul_mont,.-.L_bn_mul_mont_begin .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105 .byte 112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56 .byte 54,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121 .byte 32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46 .byte 111,114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
wlsfx/bnbb
49,847
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86/crypto/fipsmodule/sha512-586.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl sha512_block_data_order .hidden sha512_block_data_order .type sha512_block_data_order,@function .align 16 sha512_block_data_order: .L_sha512_block_data_order_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl %esp,%ebx call .L000pic_point .L000pic_point: popl %ebp leal .L001K512-.L000pic_point(%ebp),%ebp subl $16,%esp andl $-64,%esp shll $7,%eax addl %edi,%eax movl %esi,(%esp) movl %edi,4(%esp) movl %eax,8(%esp) movl %ebx,12(%esp) leal OPENSSL_ia32cap_P-.L001K512(%ebp),%edx movl (%edx),%ecx testl $67108864,%ecx jz .L002loop_x86 movl 4(%edx),%edx movq (%esi),%mm0 andl $16777216,%ecx movq 8(%esi),%mm1 andl $512,%edx movq 16(%esi),%mm2 orl %edx,%ecx movq 24(%esi),%mm3 movq 32(%esi),%mm4 movq 40(%esi),%mm5 movq 48(%esi),%mm6 movq 56(%esi),%mm7 cmpl $16777728,%ecx je .L003SSSE3 subl $80,%esp jmp .L004loop_sse2 .align 16 .L004loop_sse2: movq %mm1,8(%esp) movq %mm2,16(%esp) movq %mm3,24(%esp) movq %mm5,40(%esp) movq %mm6,48(%esp) pxor %mm1,%mm2 movq %mm7,56(%esp) movq %mm0,%mm3 movl (%edi),%eax movl 4(%edi),%ebx addl $8,%edi movl $15,%edx bswap %eax bswap %ebx jmp .L00500_14_sse2 .align 16 .L00500_14_sse2: movd %eax,%mm1 movl (%edi),%eax movd %ebx,%mm7 movl 4(%edi),%ebx addl $8,%edi bswap %eax bswap %ebx punpckldq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm3,%mm0 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm2,%mm3 movq %mm0,%mm2 addl $8,%ebp paddq %mm6,%mm3 movq 48(%esp),%mm6 decl %edx jnz .L00500_14_sse2 movd %eax,%mm1 movd %ebx,%mm7 punpckldq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm3,%mm0 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 192(%esp),%mm7 paddq %mm2,%mm3 movq %mm0,%mm2 addl $8,%ebp paddq %mm6,%mm3 pxor %mm0,%mm0 movl $32,%edx jmp .L00616_79_sse2 .align 16 .L00616_79_sse2: movq 88(%esp),%mm5 movq %mm7,%mm1 psrlq $1,%mm7 movq %mm5,%mm6 psrlq $6,%mm5 psllq $56,%mm1 paddq %mm3,%mm0 movq %mm7,%mm3 psrlq $6,%mm7 pxor %mm1,%mm3 psllq $7,%mm1 pxor %mm7,%mm3 psrlq $1,%mm7 pxor %mm1,%mm3 movq %mm5,%mm1 psrlq $13,%mm5 pxor %mm3,%mm7 psllq $3,%mm6 pxor %mm5,%mm1 paddq 200(%esp),%mm7 pxor %mm6,%mm1 psrlq $42,%mm5 paddq 128(%esp),%mm7 pxor %mm5,%mm1 psllq $42,%mm6 movq 40(%esp),%mm5 pxor %mm6,%mm1 movq 48(%esp),%mm6 paddq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 192(%esp),%mm7 paddq %mm6,%mm2 addl $8,%ebp movq 88(%esp),%mm5 movq %mm7,%mm1 psrlq $1,%mm7 movq %mm5,%mm6 psrlq $6,%mm5 psllq $56,%mm1 paddq %mm3,%mm2 movq %mm7,%mm3 psrlq $6,%mm7 pxor %mm1,%mm3 psllq $7,%mm1 pxor %mm7,%mm3 psrlq $1,%mm7 pxor %mm1,%mm3 movq %mm5,%mm1 psrlq $13,%mm5 pxor %mm3,%mm7 psllq $3,%mm6 pxor %mm5,%mm1 paddq 200(%esp),%mm7 pxor %mm6,%mm1 psrlq $42,%mm5 paddq 128(%esp),%mm7 pxor %mm5,%mm1 psllq $42,%mm6 movq 40(%esp),%mm5 pxor %mm6,%mm1 movq 48(%esp),%mm6 paddq %mm1,%mm7 movq %mm4,%mm1 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 movq %mm7,72(%esp) movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 paddq (%ebp),%mm7 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 subl $8,%esp psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 192(%esp),%mm7 paddq %mm6,%mm0 addl $8,%ebp decl %edx jnz .L00616_79_sse2 paddq %mm3,%mm0 movq 8(%esp),%mm1 movq 24(%esp),%mm3 movq 40(%esp),%mm5 movq 48(%esp),%mm6 movq 56(%esp),%mm7 pxor %mm1,%mm2 paddq (%esi),%mm0 paddq 8(%esi),%mm1 paddq 16(%esi),%mm2 paddq 24(%esi),%mm3 paddq 32(%esi),%mm4 paddq 40(%esi),%mm5 paddq 48(%esi),%mm6 paddq 56(%esi),%mm7 movl $640,%eax movq %mm0,(%esi) movq %mm1,8(%esi) movq %mm2,16(%esi) movq %mm3,24(%esi) movq %mm4,32(%esi) movq %mm5,40(%esi) movq %mm6,48(%esi) movq %mm7,56(%esi) leal (%esp,%eax,1),%esp subl %eax,%ebp cmpl 88(%esp),%edi jb .L004loop_sse2 movl 92(%esp),%esp emms popl %edi popl %esi popl %ebx popl %ebp ret .align 32 .L003SSSE3: leal -64(%esp),%edx subl $256,%esp movdqa 640(%ebp),%xmm1 movdqu (%edi),%xmm0 .byte 102,15,56,0,193 movdqa (%ebp),%xmm3 movdqa %xmm1,%xmm2 movdqu 16(%edi),%xmm1 paddq %xmm0,%xmm3 .byte 102,15,56,0,202 movdqa %xmm3,-128(%edx) movdqa 16(%ebp),%xmm4 movdqa %xmm2,%xmm3 movdqu 32(%edi),%xmm2 paddq %xmm1,%xmm4 .byte 102,15,56,0,211 movdqa %xmm4,-112(%edx) movdqa 32(%ebp),%xmm5 movdqa %xmm3,%xmm4 movdqu 48(%edi),%xmm3 paddq %xmm2,%xmm5 .byte 102,15,56,0,220 movdqa %xmm5,-96(%edx) movdqa 48(%ebp),%xmm6 movdqa %xmm4,%xmm5 movdqu 64(%edi),%xmm4 paddq %xmm3,%xmm6 .byte 102,15,56,0,229 movdqa %xmm6,-80(%edx) movdqa 64(%ebp),%xmm7 movdqa %xmm5,%xmm6 movdqu 80(%edi),%xmm5 paddq %xmm4,%xmm7 .byte 102,15,56,0,238 movdqa %xmm7,-64(%edx) movdqa %xmm0,(%edx) movdqa 80(%ebp),%xmm0 movdqa %xmm6,%xmm7 movdqu 96(%edi),%xmm6 paddq %xmm5,%xmm0 .byte 102,15,56,0,247 movdqa %xmm0,-48(%edx) movdqa %xmm1,16(%edx) movdqa 96(%ebp),%xmm1 movdqa %xmm7,%xmm0 movdqu 112(%edi),%xmm7 paddq %xmm6,%xmm1 .byte 102,15,56,0,248 movdqa %xmm1,-32(%edx) movdqa %xmm2,32(%edx) movdqa 112(%ebp),%xmm2 movdqa (%edx),%xmm0 paddq %xmm7,%xmm2 movdqa %xmm2,-16(%edx) nop .align 32 .L007loop_ssse3: movdqa 16(%edx),%xmm2 movdqa %xmm3,48(%edx) leal 128(%ebp),%ebp movq %mm1,8(%esp) movl %edi,%ebx movq %mm2,16(%esp) leal 128(%edi),%edi movq %mm3,24(%esp) cmpl %eax,%edi movq %mm5,40(%esp) cmovbl %edi,%ebx movq %mm6,48(%esp) movl $4,%ecx pxor %mm1,%mm2 movq %mm7,56(%esp) pxor %mm3,%mm3 jmp .L00800_47_ssse3 .align 32 .L00800_47_ssse3: movdqa %xmm5,%xmm3 movdqa %xmm2,%xmm1 .byte 102,15,58,15,208,8 movdqa %xmm4,(%edx) .byte 102,15,58,15,220,8 movdqa %xmm2,%xmm4 psrlq $7,%xmm2 paddq %xmm3,%xmm0 movdqa %xmm4,%xmm3 psrlq $1,%xmm4 psllq $56,%xmm3 pxor %xmm4,%xmm2 psrlq $7,%xmm4 pxor %xmm3,%xmm2 psllq $7,%xmm3 pxor %xmm4,%xmm2 movdqa %xmm7,%xmm4 pxor %xmm3,%xmm2 movdqa %xmm7,%xmm3 psrlq $6,%xmm4 paddq %xmm2,%xmm0 movdqa %xmm7,%xmm2 psrlq $19,%xmm3 psllq $3,%xmm2 pxor %xmm3,%xmm4 psrlq $42,%xmm3 pxor %xmm2,%xmm4 psllq $42,%xmm2 pxor %xmm3,%xmm4 movdqa 32(%edx),%xmm3 pxor %xmm2,%xmm4 movdqa (%ebp),%xmm2 movq %mm4,%mm1 paddq %xmm4,%xmm0 movq -128(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) paddq %xmm0,%xmm2 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -120(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm2,-128(%edx) movdqa %xmm6,%xmm4 movdqa %xmm3,%xmm2 .byte 102,15,58,15,217,8 movdqa %xmm5,16(%edx) .byte 102,15,58,15,229,8 movdqa %xmm3,%xmm5 psrlq $7,%xmm3 paddq %xmm4,%xmm1 movdqa %xmm5,%xmm4 psrlq $1,%xmm5 psllq $56,%xmm4 pxor %xmm5,%xmm3 psrlq $7,%xmm5 pxor %xmm4,%xmm3 psllq $7,%xmm4 pxor %xmm5,%xmm3 movdqa %xmm0,%xmm5 pxor %xmm4,%xmm3 movdqa %xmm0,%xmm4 psrlq $6,%xmm5 paddq %xmm3,%xmm1 movdqa %xmm0,%xmm3 psrlq $19,%xmm4 psllq $3,%xmm3 pxor %xmm4,%xmm5 psrlq $42,%xmm4 pxor %xmm3,%xmm5 psllq $42,%xmm3 pxor %xmm4,%xmm5 movdqa 48(%edx),%xmm4 pxor %xmm3,%xmm5 movdqa 16(%ebp),%xmm3 movq %mm4,%mm1 paddq %xmm5,%xmm1 movq -112(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) paddq %xmm1,%xmm3 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -104(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm3,-112(%edx) movdqa %xmm7,%xmm5 movdqa %xmm4,%xmm3 .byte 102,15,58,15,226,8 movdqa %xmm6,32(%edx) .byte 102,15,58,15,238,8 movdqa %xmm4,%xmm6 psrlq $7,%xmm4 paddq %xmm5,%xmm2 movdqa %xmm6,%xmm5 psrlq $1,%xmm6 psllq $56,%xmm5 pxor %xmm6,%xmm4 psrlq $7,%xmm6 pxor %xmm5,%xmm4 psllq $7,%xmm5 pxor %xmm6,%xmm4 movdqa %xmm1,%xmm6 pxor %xmm5,%xmm4 movdqa %xmm1,%xmm5 psrlq $6,%xmm6 paddq %xmm4,%xmm2 movdqa %xmm1,%xmm4 psrlq $19,%xmm5 psllq $3,%xmm4 pxor %xmm5,%xmm6 psrlq $42,%xmm5 pxor %xmm4,%xmm6 psllq $42,%xmm4 pxor %xmm5,%xmm6 movdqa (%edx),%xmm5 pxor %xmm4,%xmm6 movdqa 32(%ebp),%xmm4 movq %mm4,%mm1 paddq %xmm6,%xmm2 movq -96(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) paddq %xmm2,%xmm4 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -88(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm4,-96(%edx) movdqa %xmm0,%xmm6 movdqa %xmm5,%xmm4 .byte 102,15,58,15,235,8 movdqa %xmm7,48(%edx) .byte 102,15,58,15,247,8 movdqa %xmm5,%xmm7 psrlq $7,%xmm5 paddq %xmm6,%xmm3 movdqa %xmm7,%xmm6 psrlq $1,%xmm7 psllq $56,%xmm6 pxor %xmm7,%xmm5 psrlq $7,%xmm7 pxor %xmm6,%xmm5 psllq $7,%xmm6 pxor %xmm7,%xmm5 movdqa %xmm2,%xmm7 pxor %xmm6,%xmm5 movdqa %xmm2,%xmm6 psrlq $6,%xmm7 paddq %xmm5,%xmm3 movdqa %xmm2,%xmm5 psrlq $19,%xmm6 psllq $3,%xmm5 pxor %xmm6,%xmm7 psrlq $42,%xmm6 pxor %xmm5,%xmm7 psllq $42,%xmm5 pxor %xmm6,%xmm7 movdqa 16(%edx),%xmm6 pxor %xmm5,%xmm7 movdqa 48(%ebp),%xmm5 movq %mm4,%mm1 paddq %xmm7,%xmm3 movq -80(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) paddq %xmm3,%xmm5 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -72(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm5,-80(%edx) movdqa %xmm1,%xmm7 movdqa %xmm6,%xmm5 .byte 102,15,58,15,244,8 movdqa %xmm0,(%edx) .byte 102,15,58,15,248,8 movdqa %xmm6,%xmm0 psrlq $7,%xmm6 paddq %xmm7,%xmm4 movdqa %xmm0,%xmm7 psrlq $1,%xmm0 psllq $56,%xmm7 pxor %xmm0,%xmm6 psrlq $7,%xmm0 pxor %xmm7,%xmm6 psllq $7,%xmm7 pxor %xmm0,%xmm6 movdqa %xmm3,%xmm0 pxor %xmm7,%xmm6 movdqa %xmm3,%xmm7 psrlq $6,%xmm0 paddq %xmm6,%xmm4 movdqa %xmm3,%xmm6 psrlq $19,%xmm7 psllq $3,%xmm6 pxor %xmm7,%xmm0 psrlq $42,%xmm7 pxor %xmm6,%xmm0 psllq $42,%xmm6 pxor %xmm7,%xmm0 movdqa 32(%edx),%xmm7 pxor %xmm6,%xmm0 movdqa 64(%ebp),%xmm6 movq %mm4,%mm1 paddq %xmm0,%xmm4 movq -64(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) paddq %xmm4,%xmm6 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -56(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm6,-64(%edx) movdqa %xmm2,%xmm0 movdqa %xmm7,%xmm6 .byte 102,15,58,15,253,8 movdqa %xmm1,16(%edx) .byte 102,15,58,15,193,8 movdqa %xmm7,%xmm1 psrlq $7,%xmm7 paddq %xmm0,%xmm5 movdqa %xmm1,%xmm0 psrlq $1,%xmm1 psllq $56,%xmm0 pxor %xmm1,%xmm7 psrlq $7,%xmm1 pxor %xmm0,%xmm7 psllq $7,%xmm0 pxor %xmm1,%xmm7 movdqa %xmm4,%xmm1 pxor %xmm0,%xmm7 movdqa %xmm4,%xmm0 psrlq $6,%xmm1 paddq %xmm7,%xmm5 movdqa %xmm4,%xmm7 psrlq $19,%xmm0 psllq $3,%xmm7 pxor %xmm0,%xmm1 psrlq $42,%xmm0 pxor %xmm7,%xmm1 psllq $42,%xmm7 pxor %xmm0,%xmm1 movdqa 48(%edx),%xmm0 pxor %xmm7,%xmm1 movdqa 80(%ebp),%xmm7 movq %mm4,%mm1 paddq %xmm1,%xmm5 movq -48(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) paddq %xmm5,%xmm7 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -40(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm7,-48(%edx) movdqa %xmm3,%xmm1 movdqa %xmm0,%xmm7 .byte 102,15,58,15,198,8 movdqa %xmm2,32(%edx) .byte 102,15,58,15,202,8 movdqa %xmm0,%xmm2 psrlq $7,%xmm0 paddq %xmm1,%xmm6 movdqa %xmm2,%xmm1 psrlq $1,%xmm2 psllq $56,%xmm1 pxor %xmm2,%xmm0 psrlq $7,%xmm2 pxor %xmm1,%xmm0 psllq $7,%xmm1 pxor %xmm2,%xmm0 movdqa %xmm5,%xmm2 pxor %xmm1,%xmm0 movdqa %xmm5,%xmm1 psrlq $6,%xmm2 paddq %xmm0,%xmm6 movdqa %xmm5,%xmm0 psrlq $19,%xmm1 psllq $3,%xmm0 pxor %xmm1,%xmm2 psrlq $42,%xmm1 pxor %xmm0,%xmm2 psllq $42,%xmm0 pxor %xmm1,%xmm2 movdqa (%edx),%xmm1 pxor %xmm0,%xmm2 movdqa 96(%ebp),%xmm0 movq %mm4,%mm1 paddq %xmm2,%xmm6 movq -32(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) paddq %xmm6,%xmm0 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -24(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm0,-32(%edx) movdqa %xmm4,%xmm2 movdqa %xmm1,%xmm0 .byte 102,15,58,15,207,8 movdqa %xmm3,48(%edx) .byte 102,15,58,15,211,8 movdqa %xmm1,%xmm3 psrlq $7,%xmm1 paddq %xmm2,%xmm7 movdqa %xmm3,%xmm2 psrlq $1,%xmm3 psllq $56,%xmm2 pxor %xmm3,%xmm1 psrlq $7,%xmm3 pxor %xmm2,%xmm1 psllq $7,%xmm2 pxor %xmm3,%xmm1 movdqa %xmm6,%xmm3 pxor %xmm2,%xmm1 movdqa %xmm6,%xmm2 psrlq $6,%xmm3 paddq %xmm1,%xmm7 movdqa %xmm6,%xmm1 psrlq $19,%xmm2 psllq $3,%xmm1 pxor %xmm2,%xmm3 psrlq $42,%xmm2 pxor %xmm1,%xmm3 psllq $42,%xmm1 pxor %xmm2,%xmm3 movdqa 16(%edx),%xmm2 pxor %xmm1,%xmm3 movdqa 112(%ebp),%xmm1 movq %mm4,%mm1 paddq %xmm3,%xmm7 movq -16(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) paddq %xmm7,%xmm1 pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -8(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm1,-16(%edx) leal 128(%ebp),%ebp decl %ecx jnz .L00800_47_ssse3 movdqa (%ebp),%xmm1 leal -640(%ebp),%ebp movdqu (%ebx),%xmm0 .byte 102,15,56,0,193 movdqa (%ebp),%xmm3 movdqa %xmm1,%xmm2 movdqu 16(%ebx),%xmm1 paddq %xmm0,%xmm3 .byte 102,15,56,0,202 movq %mm4,%mm1 movq -128(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -120(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm3,-128(%edx) movdqa 16(%ebp),%xmm4 movdqa %xmm2,%xmm3 movdqu 32(%ebx),%xmm2 paddq %xmm1,%xmm4 .byte 102,15,56,0,211 movq %mm4,%mm1 movq -112(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -104(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm4,-112(%edx) movdqa 32(%ebp),%xmm5 movdqa %xmm3,%xmm4 movdqu 48(%ebx),%xmm3 paddq %xmm2,%xmm5 .byte 102,15,56,0,220 movq %mm4,%mm1 movq -96(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -88(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm5,-96(%edx) movdqa 48(%ebp),%xmm6 movdqa %xmm4,%xmm5 movdqu 64(%ebx),%xmm4 paddq %xmm3,%xmm6 .byte 102,15,56,0,229 movq %mm4,%mm1 movq -80(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -72(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm6,-80(%edx) movdqa 64(%ebp),%xmm7 movdqa %xmm5,%xmm6 movdqu 80(%ebx),%xmm5 paddq %xmm4,%xmm7 .byte 102,15,56,0,238 movq %mm4,%mm1 movq -64(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,32(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 56(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 24(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 8(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 32(%esp),%mm5 paddq %mm6,%mm2 movq 40(%esp),%mm6 movq %mm4,%mm1 movq -56(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,24(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,56(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 48(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 16(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq (%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 24(%esp),%mm5 paddq %mm6,%mm0 movq 32(%esp),%mm6 movdqa %xmm7,-64(%edx) movdqa %xmm0,(%edx) movdqa 80(%ebp),%xmm0 movdqa %xmm6,%xmm7 movdqu 96(%ebx),%xmm6 paddq %xmm5,%xmm0 .byte 102,15,56,0,247 movq %mm4,%mm1 movq -48(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,16(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,48(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 40(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 8(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 56(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 16(%esp),%mm5 paddq %mm6,%mm2 movq 24(%esp),%mm6 movq %mm4,%mm1 movq -40(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,8(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,40(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 32(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq (%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 48(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 8(%esp),%mm5 paddq %mm6,%mm0 movq 16(%esp),%mm6 movdqa %xmm0,-48(%edx) movdqa %xmm1,16(%edx) movdqa 96(%ebp),%xmm1 movdqa %xmm7,%xmm0 movdqu 112(%ebx),%xmm7 paddq %xmm6,%xmm1 .byte 102,15,56,0,248 movq %mm4,%mm1 movq -32(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,32(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 24(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 56(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 40(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq (%esp),%mm5 paddq %mm6,%mm2 movq 8(%esp),%mm6 movq %mm4,%mm1 movq -24(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,56(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,24(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 16(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 48(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 32(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 56(%esp),%mm5 paddq %mm6,%mm0 movq (%esp),%mm6 movdqa %xmm1,-32(%edx) movdqa %xmm2,32(%edx) movdqa 112(%ebp),%xmm2 movdqa (%edx),%xmm0 paddq %xmm7,%xmm2 movq %mm4,%mm1 movq -16(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,48(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm0 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm0,16(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq 8(%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 40(%esp),%mm4 paddq %mm7,%mm3 movq %mm0,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm0,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 24(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm0,%mm2 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 pxor %mm7,%mm6 movq 48(%esp),%mm5 paddq %mm6,%mm2 movq 56(%esp),%mm6 movq %mm4,%mm1 movq -8(%edx),%mm7 pxor %mm6,%mm5 psrlq $14,%mm1 movq %mm4,40(%esp) pand %mm4,%mm5 psllq $23,%mm4 paddq %mm3,%mm2 movq %mm1,%mm3 psrlq $4,%mm1 pxor %mm6,%mm5 pxor %mm4,%mm3 psllq $23,%mm4 pxor %mm1,%mm3 movq %mm2,8(%esp) paddq %mm5,%mm7 pxor %mm4,%mm3 psrlq $23,%mm1 paddq (%esp),%mm7 pxor %mm1,%mm3 psllq $4,%mm4 pxor %mm4,%mm3 movq 32(%esp),%mm4 paddq %mm7,%mm3 movq %mm2,%mm5 psrlq $28,%mm5 paddq %mm3,%mm4 movq %mm2,%mm6 movq %mm5,%mm7 psllq $25,%mm6 movq 16(%esp),%mm1 psrlq $6,%mm5 pxor %mm6,%mm7 psllq $5,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm2 psrlq $5,%mm5 pxor %mm6,%mm7 pand %mm2,%mm0 psllq $6,%mm6 pxor %mm5,%mm7 pxor %mm1,%mm0 pxor %mm7,%mm6 movq 40(%esp),%mm5 paddq %mm6,%mm0 movq 48(%esp),%mm6 movdqa %xmm2,-16(%edx) movq 8(%esp),%mm1 paddq %mm3,%mm0 movq 24(%esp),%mm3 movq 56(%esp),%mm7 pxor %mm1,%mm2 paddq (%esi),%mm0 paddq 8(%esi),%mm1 paddq 16(%esi),%mm2 paddq 24(%esi),%mm3 paddq 32(%esi),%mm4 paddq 40(%esi),%mm5 paddq 48(%esi),%mm6 paddq 56(%esi),%mm7 movq %mm0,(%esi) movq %mm1,8(%esi) movq %mm2,16(%esi) movq %mm3,24(%esi) movq %mm4,32(%esi) movq %mm5,40(%esi) movq %mm6,48(%esi) movq %mm7,56(%esi) cmpl %eax,%edi jb .L007loop_ssse3 movl 76(%edx),%esp emms popl %edi popl %esi popl %ebx popl %ebp ret .align 16 .L002loop_x86: movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx movl 16(%edi),%eax movl 20(%edi),%ebx movl 24(%edi),%ecx movl 28(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx movl 32(%edi),%eax movl 36(%edi),%ebx movl 40(%edi),%ecx movl 44(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx movl 48(%edi),%eax movl 52(%edi),%ebx movl 56(%edi),%ecx movl 60(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx movl 64(%edi),%eax movl 68(%edi),%ebx movl 72(%edi),%ecx movl 76(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx movl 80(%edi),%eax movl 84(%edi),%ebx movl 88(%edi),%ecx movl 92(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx movl 96(%edi),%eax movl 100(%edi),%ebx movl 104(%edi),%ecx movl 108(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx movl 112(%edi),%eax movl 116(%edi),%ebx movl 120(%edi),%ecx movl 124(%edi),%edx bswap %eax bswap %ebx bswap %ecx bswap %edx pushl %eax pushl %ebx pushl %ecx pushl %edx addl $128,%edi subl $72,%esp movl %edi,204(%esp) leal 8(%esp),%edi movl $16,%ecx .long 2784229001 .align 16 .L00900_15_x86: movl 40(%esp),%ecx movl 44(%esp),%edx movl %ecx,%esi shrl $9,%ecx movl %edx,%edi shrl $9,%edx movl %ecx,%ebx shll $14,%esi movl %edx,%eax shll $14,%edi xorl %esi,%ebx shrl $5,%ecx xorl %edi,%eax shrl $5,%edx xorl %ecx,%eax shll $4,%esi xorl %edx,%ebx shll $4,%edi xorl %esi,%ebx shrl $4,%ecx xorl %edi,%eax shrl $4,%edx xorl %ecx,%eax shll $5,%esi xorl %edx,%ebx shll $5,%edi xorl %esi,%eax xorl %edi,%ebx movl 48(%esp),%ecx movl 52(%esp),%edx movl 56(%esp),%esi movl 60(%esp),%edi addl 64(%esp),%eax adcl 68(%esp),%ebx xorl %esi,%ecx xorl %edi,%edx andl 40(%esp),%ecx andl 44(%esp),%edx addl 192(%esp),%eax adcl 196(%esp),%ebx xorl %esi,%ecx xorl %edi,%edx movl (%ebp),%esi movl 4(%ebp),%edi addl %ecx,%eax adcl %edx,%ebx movl 32(%esp),%ecx movl 36(%esp),%edx addl %esi,%eax adcl %edi,%ebx movl %eax,(%esp) movl %ebx,4(%esp) addl %ecx,%eax adcl %edx,%ebx movl 8(%esp),%ecx movl 12(%esp),%edx movl %eax,32(%esp) movl %ebx,36(%esp) movl %ecx,%esi shrl $2,%ecx movl %edx,%edi shrl $2,%edx movl %ecx,%ebx shll $4,%esi movl %edx,%eax shll $4,%edi xorl %esi,%ebx shrl $5,%ecx xorl %edi,%eax shrl $5,%edx xorl %ecx,%ebx shll $21,%esi xorl %edx,%eax shll $21,%edi xorl %esi,%eax shrl $21,%ecx xorl %edi,%ebx shrl $21,%edx xorl %ecx,%eax shll $5,%esi xorl %edx,%ebx shll $5,%edi xorl %esi,%eax xorl %edi,%ebx movl 8(%esp),%ecx movl 12(%esp),%edx movl 16(%esp),%esi movl 20(%esp),%edi addl (%esp),%eax adcl 4(%esp),%ebx orl %esi,%ecx orl %edi,%edx andl 24(%esp),%ecx andl 28(%esp),%edx andl 8(%esp),%esi andl 12(%esp),%edi orl %esi,%ecx orl %edi,%edx addl %ecx,%eax adcl %edx,%ebx movl %eax,(%esp) movl %ebx,4(%esp) movb (%ebp),%dl subl $8,%esp leal 8(%ebp),%ebp cmpb $148,%dl jne .L00900_15_x86 .align 16 .L01016_79_x86: movl 312(%esp),%ecx movl 316(%esp),%edx movl %ecx,%esi shrl $1,%ecx movl %edx,%edi shrl $1,%edx movl %ecx,%eax shll $24,%esi movl %edx,%ebx shll $24,%edi xorl %esi,%ebx shrl $6,%ecx xorl %edi,%eax shrl $6,%edx xorl %ecx,%eax shll $7,%esi xorl %edx,%ebx shll $1,%edi xorl %esi,%ebx shrl $1,%ecx xorl %edi,%eax shrl $1,%edx xorl %ecx,%eax shll $6,%edi xorl %edx,%ebx xorl %edi,%eax movl %eax,(%esp) movl %ebx,4(%esp) movl 208(%esp),%ecx movl 212(%esp),%edx movl %ecx,%esi shrl $6,%ecx movl %edx,%edi shrl $6,%edx movl %ecx,%eax shll $3,%esi movl %edx,%ebx shll $3,%edi xorl %esi,%eax shrl $13,%ecx xorl %edi,%ebx shrl $13,%edx xorl %ecx,%eax shll $10,%esi xorl %edx,%ebx shll $10,%edi xorl %esi,%ebx shrl $10,%ecx xorl %edi,%eax shrl $10,%edx xorl %ecx,%ebx shll $13,%edi xorl %edx,%eax xorl %edi,%eax movl 320(%esp),%ecx movl 324(%esp),%edx addl (%esp),%eax adcl 4(%esp),%ebx movl 248(%esp),%esi movl 252(%esp),%edi addl %ecx,%eax adcl %edx,%ebx addl %esi,%eax adcl %edi,%ebx movl %eax,192(%esp) movl %ebx,196(%esp) movl 40(%esp),%ecx movl 44(%esp),%edx movl %ecx,%esi shrl $9,%ecx movl %edx,%edi shrl $9,%edx movl %ecx,%ebx shll $14,%esi movl %edx,%eax shll $14,%edi xorl %esi,%ebx shrl $5,%ecx xorl %edi,%eax shrl $5,%edx xorl %ecx,%eax shll $4,%esi xorl %edx,%ebx shll $4,%edi xorl %esi,%ebx shrl $4,%ecx xorl %edi,%eax shrl $4,%edx xorl %ecx,%eax shll $5,%esi xorl %edx,%ebx shll $5,%edi xorl %esi,%eax xorl %edi,%ebx movl 48(%esp),%ecx movl 52(%esp),%edx movl 56(%esp),%esi movl 60(%esp),%edi addl 64(%esp),%eax adcl 68(%esp),%ebx xorl %esi,%ecx xorl %edi,%edx andl 40(%esp),%ecx andl 44(%esp),%edx addl 192(%esp),%eax adcl 196(%esp),%ebx xorl %esi,%ecx xorl %edi,%edx movl (%ebp),%esi movl 4(%ebp),%edi addl %ecx,%eax adcl %edx,%ebx movl 32(%esp),%ecx movl 36(%esp),%edx addl %esi,%eax adcl %edi,%ebx movl %eax,(%esp) movl %ebx,4(%esp) addl %ecx,%eax adcl %edx,%ebx movl 8(%esp),%ecx movl 12(%esp),%edx movl %eax,32(%esp) movl %ebx,36(%esp) movl %ecx,%esi shrl $2,%ecx movl %edx,%edi shrl $2,%edx movl %ecx,%ebx shll $4,%esi movl %edx,%eax shll $4,%edi xorl %esi,%ebx shrl $5,%ecx xorl %edi,%eax shrl $5,%edx xorl %ecx,%ebx shll $21,%esi xorl %edx,%eax shll $21,%edi xorl %esi,%eax shrl $21,%ecx xorl %edi,%ebx shrl $21,%edx xorl %ecx,%eax shll $5,%esi xorl %edx,%ebx shll $5,%edi xorl %esi,%eax xorl %edi,%ebx movl 8(%esp),%ecx movl 12(%esp),%edx movl 16(%esp),%esi movl 20(%esp),%edi addl (%esp),%eax adcl 4(%esp),%ebx orl %esi,%ecx orl %edi,%edx andl 24(%esp),%ecx andl 28(%esp),%edx andl 8(%esp),%esi andl 12(%esp),%edi orl %esi,%ecx orl %edi,%edx addl %ecx,%eax adcl %edx,%ebx movl %eax,(%esp) movl %ebx,4(%esp) movb (%ebp),%dl subl $8,%esp leal 8(%ebp),%ebp cmpb $23,%dl jne .L01016_79_x86 movl 840(%esp),%esi movl 844(%esp),%edi movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edx addl 8(%esp),%eax adcl 12(%esp),%ebx movl %eax,(%esi) movl %ebx,4(%esi) addl 16(%esp),%ecx adcl 20(%esp),%edx movl %ecx,8(%esi) movl %edx,12(%esi) movl 16(%esi),%eax movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%edx addl 24(%esp),%eax adcl 28(%esp),%ebx movl %eax,16(%esi) movl %ebx,20(%esi) addl 32(%esp),%ecx adcl 36(%esp),%edx movl %ecx,24(%esi) movl %edx,28(%esi) movl 32(%esi),%eax movl 36(%esi),%ebx movl 40(%esi),%ecx movl 44(%esi),%edx addl 40(%esp),%eax adcl 44(%esp),%ebx movl %eax,32(%esi) movl %ebx,36(%esi) addl 48(%esp),%ecx adcl 52(%esp),%edx movl %ecx,40(%esi) movl %edx,44(%esi) movl 48(%esi),%eax movl 52(%esi),%ebx movl 56(%esi),%ecx movl 60(%esi),%edx addl 56(%esp),%eax adcl 60(%esp),%ebx movl %eax,48(%esi) movl %ebx,52(%esi) addl 64(%esp),%ecx adcl 68(%esp),%edx movl %ecx,56(%esi) movl %edx,60(%esi) addl $840,%esp subl $640,%ebp cmpl 8(%esp),%edi jb .L002loop_x86 movl 12(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .align 64 .L001K512: .long 3609767458,1116352408 .long 602891725,1899447441 .long 3964484399,3049323471 .long 2173295548,3921009573 .long 4081628472,961987163 .long 3053834265,1508970993 .long 2937671579,2453635748 .long 3664609560,2870763221 .long 2734883394,3624381080 .long 1164996542,310598401 .long 1323610764,607225278 .long 3590304994,1426881987 .long 4068182383,1925078388 .long 991336113,2162078206 .long 633803317,2614888103 .long 3479774868,3248222580 .long 2666613458,3835390401 .long 944711139,4022224774 .long 2341262773,264347078 .long 2007800933,604807628 .long 1495990901,770255983 .long 1856431235,1249150122 .long 3175218132,1555081692 .long 2198950837,1996064986 .long 3999719339,2554220882 .long 766784016,2821834349 .long 2566594879,2952996808 .long 3203337956,3210313671 .long 1034457026,3336571891 .long 2466948901,3584528711 .long 3758326383,113926993 .long 168717936,338241895 .long 1188179964,666307205 .long 1546045734,773529912 .long 1522805485,1294757372 .long 2643833823,1396182291 .long 2343527390,1695183700 .long 1014477480,1986661051 .long 1206759142,2177026350 .long 344077627,2456956037 .long 1290863460,2730485921 .long 3158454273,2820302411 .long 3505952657,3259730800 .long 106217008,3345764771 .long 3606008344,3516065817 .long 1432725776,3600352804 .long 1467031594,4094571909 .long 851169720,275423344 .long 3100823752,430227734 .long 1363258195,506948616 .long 3750685593,659060556 .long 3785050280,883997877 .long 3318307427,958139571 .long 3812723403,1322822218 .long 2003034995,1537002063 .long 3602036899,1747873779 .long 1575990012,1955562222 .long 1125592928,2024104815 .long 2716904306,2227730452 .long 442776044,2361852424 .long 593698344,2428436474 .long 3733110249,2756734187 .long 2999351573,3204031479 .long 3815920427,3329325298 .long 3928383900,3391569614 .long 566280711,3515267271 .long 3454069534,3940187606 .long 4000239992,4118630271 .long 1914138554,116418474 .long 2731055270,174292421 .long 3203993006,289380356 .long 320620315,460393269 .long 587496836,685471733 .long 1086792851,852142971 .long 365543100,1017036298 .long 2618297676,1126000580 .long 3409855158,1288033470 .long 4234509866,1501505948 .long 987167468,1607167915 .long 1246189591,1816402316 .long 67438087,66051 .long 202182159,134810123 .size sha512_block_data_order,.-.L_sha512_block_data_order_begin .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97 .byte 110,115,102,111,114,109,32,102,111,114,32,120,56,54,44,32 .byte 67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97 .byte 112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103 .byte 62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
wlsfx/bnbb
6,610
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86/crypto/fipsmodule/ghash-x86.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl gcm_init_clmul .hidden gcm_init_clmul .type gcm_init_clmul,@function .align 16 gcm_init_clmul: .L_gcm_init_clmul_begin: movl 4(%esp),%edx movl 8(%esp),%eax call .L000pic .L000pic: popl %ecx leal .Lbswap-.L000pic(%ecx),%ecx movdqu (%eax),%xmm2 pshufd $78,%xmm2,%xmm2 pshufd $255,%xmm2,%xmm4 movdqa %xmm2,%xmm3 psllq $1,%xmm2 pxor %xmm5,%xmm5 psrlq $63,%xmm3 pcmpgtd %xmm4,%xmm5 pslldq $8,%xmm3 por %xmm3,%xmm2 pand 16(%ecx),%xmm5 pxor %xmm5,%xmm2 movdqa %xmm2,%xmm0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pshufd $78,%xmm2,%xmm4 pxor %xmm0,%xmm3 pxor %xmm2,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 xorps %xmm0,%xmm3 xorps %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 pshufd $78,%xmm2,%xmm3 pshufd $78,%xmm0,%xmm4 pxor %xmm2,%xmm3 movdqu %xmm2,(%edx) pxor %xmm0,%xmm4 movdqu %xmm0,16(%edx) .byte 102,15,58,15,227,8 movdqu %xmm4,32(%edx) ret .size gcm_init_clmul,.-.L_gcm_init_clmul_begin .globl gcm_gmult_clmul .hidden gcm_gmult_clmul .type gcm_gmult_clmul,@function .align 16 gcm_gmult_clmul: .L_gcm_gmult_clmul_begin: movl 4(%esp),%eax movl 8(%esp),%edx call .L001pic .L001pic: popl %ecx leal .Lbswap-.L001pic(%ecx),%ecx movdqu (%eax),%xmm0 movdqa (%ecx),%xmm5 movups (%edx),%xmm2 .byte 102,15,56,0,197 movups 32(%edx),%xmm4 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 xorps %xmm0,%xmm3 xorps %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .byte 102,15,56,0,197 movdqu %xmm0,(%eax) ret .size gcm_gmult_clmul,.-.L_gcm_gmult_clmul_begin .globl gcm_ghash_clmul .hidden gcm_ghash_clmul .type gcm_ghash_clmul,@function .align 16 gcm_ghash_clmul: .L_gcm_ghash_clmul_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%eax movl 24(%esp),%edx movl 28(%esp),%esi movl 32(%esp),%ebx call .L002pic .L002pic: popl %ecx leal .Lbswap-.L002pic(%ecx),%ecx movdqu (%eax),%xmm0 movdqa (%ecx),%xmm5 movdqu (%edx),%xmm2 .byte 102,15,56,0,197 subl $16,%ebx jz .L003odd_tail movdqu (%esi),%xmm3 movdqu 16(%esi),%xmm6 .byte 102,15,56,0,221 .byte 102,15,56,0,245 movdqu 32(%edx),%xmm5 pxor %xmm3,%xmm0 pshufd $78,%xmm6,%xmm3 movdqa %xmm6,%xmm7 pxor %xmm6,%xmm3 leal 32(%esi),%esi .byte 102,15,58,68,242,0 .byte 102,15,58,68,250,17 .byte 102,15,58,68,221,0 movups 16(%edx),%xmm2 nop subl $32,%ebx jbe .L004even_tail jmp .L005mod_loop .align 32 .L005mod_loop: pshufd $78,%xmm0,%xmm4 movdqa %xmm0,%xmm1 pxor %xmm0,%xmm4 nop .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,229,16 movups (%edx),%xmm2 xorps %xmm6,%xmm0 movdqa (%ecx),%xmm5 xorps %xmm7,%xmm1 movdqu (%esi),%xmm7 pxor %xmm0,%xmm3 movdqu 16(%esi),%xmm6 pxor %xmm1,%xmm3 .byte 102,15,56,0,253 pxor %xmm3,%xmm4 movdqa %xmm4,%xmm3 psrldq $8,%xmm4 pslldq $8,%xmm3 pxor %xmm4,%xmm1 pxor %xmm3,%xmm0 .byte 102,15,56,0,245 pxor %xmm7,%xmm1 movdqa %xmm6,%xmm7 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 .byte 102,15,58,68,242,0 movups 32(%edx),%xmm5 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 pshufd $78,%xmm7,%xmm3 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm7,%xmm3 pxor %xmm4,%xmm1 .byte 102,15,58,68,250,17 movups 16(%edx),%xmm2 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .byte 102,15,58,68,221,0 leal 32(%esi),%esi subl $32,%ebx ja .L005mod_loop .L004even_tail: pshufd $78,%xmm0,%xmm4 movdqa %xmm0,%xmm1 pxor %xmm0,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,229,16 movdqa (%ecx),%xmm5 xorps %xmm6,%xmm0 xorps %xmm7,%xmm1 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 pxor %xmm3,%xmm4 movdqa %xmm4,%xmm3 psrldq $8,%xmm4 pslldq $8,%xmm3 pxor %xmm4,%xmm1 pxor %xmm3,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 testl %ebx,%ebx jnz .L006done movups (%edx),%xmm2 .L003odd_tail: movdqu (%esi),%xmm3 .byte 102,15,56,0,221 pxor %xmm3,%xmm0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pshufd $78,%xmm2,%xmm4 pxor %xmm0,%xmm3 pxor %xmm2,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 xorps %xmm0,%xmm3 xorps %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .L006done: .byte 102,15,56,0,197 movdqu %xmm0,(%eax) popl %edi popl %esi popl %ebx popl %ebp ret .size gcm_ghash_clmul,.-.L_gcm_ghash_clmul_begin .align 64 .Lbswap: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,194 .byte 71,72,65,83,72,32,102,111,114,32,120,56,54,44,32,67 .byte 82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112 .byte 112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62 .byte 0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
wlsfx/bnbb
19,257
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/linux-x86/crypto/chacha/chacha-x86.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl ChaCha20_ctr32_nohw .hidden ChaCha20_ctr32_nohw .type ChaCha20_ctr32_nohw,@function .align 16 ChaCha20_ctr32_nohw: .L_ChaCha20_ctr32_nohw_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 32(%esp),%esi movl 36(%esp),%edi subl $132,%esp movl (%esi),%eax movl 4(%esi),%ebx movl 8(%esi),%ecx movl 12(%esi),%edx movl %eax,80(%esp) movl %ebx,84(%esp) movl %ecx,88(%esp) movl %edx,92(%esp) movl 16(%esi),%eax movl 20(%esi),%ebx movl 24(%esi),%ecx movl 28(%esi),%edx movl %eax,96(%esp) movl %ebx,100(%esp) movl %ecx,104(%esp) movl %edx,108(%esp) movl (%edi),%eax movl 4(%edi),%ebx movl 8(%edi),%ecx movl 12(%edi),%edx subl $1,%eax movl %eax,112(%esp) movl %ebx,116(%esp) movl %ecx,120(%esp) movl %edx,124(%esp) jmp .L000entry .align 16 .L001outer_loop: movl %ebx,156(%esp) movl %eax,152(%esp) movl %ecx,160(%esp) .L000entry: movl $1634760805,%eax movl $857760878,4(%esp) movl $2036477234,8(%esp) movl $1797285236,12(%esp) movl 84(%esp),%ebx movl 88(%esp),%ebp movl 104(%esp),%ecx movl 108(%esp),%esi movl 116(%esp),%edx movl 120(%esp),%edi movl %ebx,20(%esp) movl %ebp,24(%esp) movl %ecx,40(%esp) movl %esi,44(%esp) movl %edx,52(%esp) movl %edi,56(%esp) movl 92(%esp),%ebx movl 124(%esp),%edi movl 112(%esp),%edx movl 80(%esp),%ebp movl 96(%esp),%ecx movl 100(%esp),%esi addl $1,%edx movl %ebx,28(%esp) movl %edi,60(%esp) movl %edx,112(%esp) movl $10,%ebx jmp .L002loop .align 16 .L002loop: addl %ebp,%eax movl %ebx,128(%esp) movl %ebp,%ebx xorl %eax,%edx roll $16,%edx addl %edx,%ecx xorl %ecx,%ebx movl 52(%esp),%edi roll $12,%ebx movl 20(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,(%esp) roll $8,%edx movl 4(%esp),%eax addl %edx,%ecx movl %edx,48(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi movl %ecx,32(%esp) roll $16,%edi movl %ebx,16(%esp) addl %edi,%esi movl 40(%esp),%ecx xorl %esi,%ebp movl 56(%esp),%edx roll $12,%ebp movl 24(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,4(%esp) roll $8,%edi movl 8(%esp),%eax addl %edi,%esi movl %edi,52(%esp) xorl %esi,%ebp addl %ebx,%eax roll $7,%ebp xorl %eax,%edx movl %esi,36(%esp) roll $16,%edx movl %ebp,20(%esp) addl %edx,%ecx movl 44(%esp),%esi xorl %ecx,%ebx movl 60(%esp),%edi roll $12,%ebx movl 28(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,8(%esp) roll $8,%edx movl 12(%esp),%eax addl %edx,%ecx movl %edx,56(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi roll $16,%edi movl %ebx,24(%esp) addl %edi,%esi xorl %esi,%ebp roll $12,%ebp movl 20(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,12(%esp) roll $8,%edi movl (%esp),%eax addl %edi,%esi movl %edi,%edx xorl %esi,%ebp addl %ebx,%eax roll $7,%ebp xorl %eax,%edx roll $16,%edx movl %ebp,28(%esp) addl %edx,%ecx xorl %ecx,%ebx movl 48(%esp),%edi roll $12,%ebx movl 24(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,(%esp) roll $8,%edx movl 4(%esp),%eax addl %edx,%ecx movl %edx,60(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi movl %ecx,40(%esp) roll $16,%edi movl %ebx,20(%esp) addl %edi,%esi movl 32(%esp),%ecx xorl %esi,%ebp movl 52(%esp),%edx roll $12,%ebp movl 28(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,4(%esp) roll $8,%edi movl 8(%esp),%eax addl %edi,%esi movl %edi,48(%esp) xorl %esi,%ebp addl %ebx,%eax roll $7,%ebp xorl %eax,%edx movl %esi,44(%esp) roll $16,%edx movl %ebp,24(%esp) addl %edx,%ecx movl 36(%esp),%esi xorl %ecx,%ebx movl 56(%esp),%edi roll $12,%ebx movl 16(%esp),%ebp addl %ebx,%eax xorl %eax,%edx movl %eax,8(%esp) roll $8,%edx movl 12(%esp),%eax addl %edx,%ecx movl %edx,52(%esp) xorl %ecx,%ebx addl %ebp,%eax roll $7,%ebx xorl %eax,%edi roll $16,%edi movl %ebx,28(%esp) addl %edi,%esi xorl %esi,%ebp movl 48(%esp),%edx roll $12,%ebp movl 128(%esp),%ebx addl %ebp,%eax xorl %eax,%edi movl %eax,12(%esp) roll $8,%edi movl (%esp),%eax addl %edi,%esi movl %edi,56(%esp) xorl %esi,%ebp roll $7,%ebp decl %ebx jnz .L002loop movl 160(%esp),%ebx addl $1634760805,%eax addl 80(%esp),%ebp addl 96(%esp),%ecx addl 100(%esp),%esi cmpl $64,%ebx jb .L003tail movl 156(%esp),%ebx addl 112(%esp),%edx addl 120(%esp),%edi xorl (%ebx),%eax xorl 16(%ebx),%ebp movl %eax,(%esp) movl 152(%esp),%eax xorl 32(%ebx),%ecx xorl 36(%ebx),%esi xorl 48(%ebx),%edx xorl 56(%ebx),%edi movl %ebp,16(%eax) movl %ecx,32(%eax) movl %esi,36(%eax) movl %edx,48(%eax) movl %edi,56(%eax) movl 4(%esp),%ebp movl 8(%esp),%ecx movl 12(%esp),%esi movl 20(%esp),%edx movl 24(%esp),%edi addl $857760878,%ebp addl $2036477234,%ecx addl $1797285236,%esi addl 84(%esp),%edx addl 88(%esp),%edi xorl 4(%ebx),%ebp xorl 8(%ebx),%ecx xorl 12(%ebx),%esi xorl 20(%ebx),%edx xorl 24(%ebx),%edi movl %ebp,4(%eax) movl %ecx,8(%eax) movl %esi,12(%eax) movl %edx,20(%eax) movl %edi,24(%eax) movl 28(%esp),%ebp movl 40(%esp),%ecx movl 44(%esp),%esi movl 52(%esp),%edx movl 60(%esp),%edi addl 92(%esp),%ebp addl 104(%esp),%ecx addl 108(%esp),%esi addl 116(%esp),%edx addl 124(%esp),%edi xorl 28(%ebx),%ebp xorl 40(%ebx),%ecx xorl 44(%ebx),%esi xorl 52(%ebx),%edx xorl 60(%ebx),%edi leal 64(%ebx),%ebx movl %ebp,28(%eax) movl (%esp),%ebp movl %ecx,40(%eax) movl 160(%esp),%ecx movl %esi,44(%eax) movl %edx,52(%eax) movl %edi,60(%eax) movl %ebp,(%eax) leal 64(%eax),%eax subl $64,%ecx jnz .L001outer_loop jmp .L004done .L003tail: addl 112(%esp),%edx addl 120(%esp),%edi movl %eax,(%esp) movl %ebp,16(%esp) movl %ecx,32(%esp) movl %esi,36(%esp) movl %edx,48(%esp) movl %edi,56(%esp) movl 4(%esp),%ebp movl 8(%esp),%ecx movl 12(%esp),%esi movl 20(%esp),%edx movl 24(%esp),%edi addl $857760878,%ebp addl $2036477234,%ecx addl $1797285236,%esi addl 84(%esp),%edx addl 88(%esp),%edi movl %ebp,4(%esp) movl %ecx,8(%esp) movl %esi,12(%esp) movl %edx,20(%esp) movl %edi,24(%esp) movl 28(%esp),%ebp movl 40(%esp),%ecx movl 44(%esp),%esi movl 52(%esp),%edx movl 60(%esp),%edi addl 92(%esp),%ebp addl 104(%esp),%ecx addl 108(%esp),%esi addl 116(%esp),%edx addl 124(%esp),%edi movl %ebp,28(%esp) movl 156(%esp),%ebp movl %ecx,40(%esp) movl 152(%esp),%ecx movl %esi,44(%esp) xorl %esi,%esi movl %edx,52(%esp) movl %edi,60(%esp) xorl %eax,%eax xorl %edx,%edx .L005tail_loop: movb (%esi,%ebp,1),%al movb (%esp,%esi,1),%dl leal 1(%esi),%esi xorb %dl,%al movb %al,-1(%ecx,%esi,1) decl %ebx jnz .L005tail_loop .L004done: addl $132,%esp popl %edi popl %esi popl %ebx popl %ebp ret .size ChaCha20_ctr32_nohw,.-.L_ChaCha20_ctr32_nohw_begin .globl ChaCha20_ctr32_ssse3 .hidden ChaCha20_ctr32_ssse3 .type ChaCha20_ctr32_ssse3,@function .align 16 ChaCha20_ctr32_ssse3: .L_ChaCha20_ctr32_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi call .Lpic_point .Lpic_point: popl %eax movl 20(%esp),%edi movl 24(%esp),%esi movl 28(%esp),%ecx movl 32(%esp),%edx movl 36(%esp),%ebx movl %esp,%ebp subl $524,%esp andl $-64,%esp movl %ebp,512(%esp) leal .Lssse3_data-.Lpic_point(%eax),%eax movdqu (%ebx),%xmm3 cmpl $256,%ecx jb .L0061x movl %edx,516(%esp) movl %ebx,520(%esp) subl $256,%ecx leal 384(%esp),%ebp movdqu (%edx),%xmm7 pshufd $0,%xmm3,%xmm0 pshufd $85,%xmm3,%xmm1 pshufd $170,%xmm3,%xmm2 pshufd $255,%xmm3,%xmm3 paddd 48(%eax),%xmm0 pshufd $0,%xmm7,%xmm4 pshufd $85,%xmm7,%xmm5 psubd 64(%eax),%xmm0 pshufd $170,%xmm7,%xmm6 pshufd $255,%xmm7,%xmm7 movdqa %xmm0,64(%ebp) movdqa %xmm1,80(%ebp) movdqa %xmm2,96(%ebp) movdqa %xmm3,112(%ebp) movdqu 16(%edx),%xmm3 movdqa %xmm4,-64(%ebp) movdqa %xmm5,-48(%ebp) movdqa %xmm6,-32(%ebp) movdqa %xmm7,-16(%ebp) movdqa 32(%eax),%xmm7 leal 128(%esp),%ebx pshufd $0,%xmm3,%xmm0 pshufd $85,%xmm3,%xmm1 pshufd $170,%xmm3,%xmm2 pshufd $255,%xmm3,%xmm3 pshufd $0,%xmm7,%xmm4 pshufd $85,%xmm7,%xmm5 pshufd $170,%xmm7,%xmm6 pshufd $255,%xmm7,%xmm7 movdqa %xmm0,(%ebp) movdqa %xmm1,16(%ebp) movdqa %xmm2,32(%ebp) movdqa %xmm3,48(%ebp) movdqa %xmm4,-128(%ebp) movdqa %xmm5,-112(%ebp) movdqa %xmm6,-96(%ebp) movdqa %xmm7,-80(%ebp) leal 128(%esi),%esi leal 128(%edi),%edi jmp .L007outer_loop .align 16 .L007outer_loop: movdqa -112(%ebp),%xmm1 movdqa -96(%ebp),%xmm2 movdqa -80(%ebp),%xmm3 movdqa -48(%ebp),%xmm5 movdqa -32(%ebp),%xmm6 movdqa -16(%ebp),%xmm7 movdqa %xmm1,-112(%ebx) movdqa %xmm2,-96(%ebx) movdqa %xmm3,-80(%ebx) movdqa %xmm5,-48(%ebx) movdqa %xmm6,-32(%ebx) movdqa %xmm7,-16(%ebx) movdqa 32(%ebp),%xmm2 movdqa 48(%ebp),%xmm3 movdqa 64(%ebp),%xmm4 movdqa 80(%ebp),%xmm5 movdqa 96(%ebp),%xmm6 movdqa 112(%ebp),%xmm7 paddd 64(%eax),%xmm4 movdqa %xmm2,32(%ebx) movdqa %xmm3,48(%ebx) movdqa %xmm4,64(%ebx) movdqa %xmm5,80(%ebx) movdqa %xmm6,96(%ebx) movdqa %xmm7,112(%ebx) movdqa %xmm4,64(%ebp) movdqa -128(%ebp),%xmm0 movdqa %xmm4,%xmm6 movdqa -64(%ebp),%xmm3 movdqa (%ebp),%xmm4 movdqa 16(%ebp),%xmm5 movl $10,%edx nop .align 16 .L008loop: paddd %xmm3,%xmm0 movdqa %xmm3,%xmm2 pxor %xmm0,%xmm6 pshufb (%eax),%xmm6 paddd %xmm6,%xmm4 pxor %xmm4,%xmm2 movdqa -48(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -112(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 80(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-128(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,64(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 movdqa %xmm4,(%ebx) pshufb (%eax),%xmm7 movdqa %xmm2,-64(%ebx) paddd %xmm7,%xmm5 movdqa 32(%ebx),%xmm4 pxor %xmm5,%xmm3 movdqa -32(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -96(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 96(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-112(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,80(%ebx) pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 movdqa %xmm5,16(%ebx) pshufb (%eax),%xmm6 movdqa %xmm3,-48(%ebx) paddd %xmm6,%xmm4 movdqa 48(%ebx),%xmm5 pxor %xmm4,%xmm2 movdqa -16(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -80(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 112(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-96(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,96(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 pshufb (%eax),%xmm7 movdqa %xmm2,-32(%ebx) paddd %xmm7,%xmm5 pxor %xmm5,%xmm3 movdqa -48(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -128(%ebx),%xmm0 paddd %xmm3,%xmm1 pxor %xmm1,%xmm7 movdqa %xmm1,-80(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,%xmm6 pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 pshufb (%eax),%xmm6 movdqa %xmm3,-16(%ebx) paddd %xmm6,%xmm4 pxor %xmm4,%xmm2 movdqa -32(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -112(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 64(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-128(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,112(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 movdqa %xmm4,32(%ebx) pshufb (%eax),%xmm7 movdqa %xmm2,-48(%ebx) paddd %xmm7,%xmm5 movdqa (%ebx),%xmm4 pxor %xmm5,%xmm3 movdqa -16(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -96(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 80(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-112(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,64(%ebx) pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 movdqa %xmm5,48(%ebx) pshufb (%eax),%xmm6 movdqa %xmm3,-32(%ebx) paddd %xmm6,%xmm4 movdqa 16(%ebx),%xmm5 pxor %xmm4,%xmm2 movdqa -64(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -80(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 96(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-96(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,80(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 pshufb (%eax),%xmm7 movdqa %xmm2,-16(%ebx) paddd %xmm7,%xmm5 pxor %xmm5,%xmm3 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -128(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 64(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-80(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,96(%ebx) pxor %xmm5,%xmm3 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 por %xmm1,%xmm3 decl %edx jnz .L008loop movdqa %xmm3,-64(%ebx) movdqa %xmm4,(%ebx) movdqa %xmm5,16(%ebx) movdqa %xmm6,64(%ebx) movdqa %xmm7,96(%ebx) movdqa -112(%ebx),%xmm1 movdqa -96(%ebx),%xmm2 movdqa -80(%ebx),%xmm3 paddd -128(%ebp),%xmm0 paddd -112(%ebp),%xmm1 paddd -96(%ebp),%xmm2 paddd -80(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa -64(%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa -48(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa -32(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa -16(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd -64(%ebp),%xmm0 paddd -48(%ebp),%xmm1 paddd -32(%ebp),%xmm2 paddd -16(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa (%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa 16(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa 32(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa 48(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd (%ebp),%xmm0 paddd 16(%ebp),%xmm1 paddd 32(%ebp),%xmm2 paddd 48(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa 64(%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa 80(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa 96(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa 112(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd 64(%ebp),%xmm0 paddd 80(%ebp),%xmm1 paddd 96(%ebp),%xmm2 paddd 112(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 208(%esi),%esi pxor %xmm0,%xmm4 pxor %xmm1,%xmm5 pxor %xmm2,%xmm6 pxor %xmm3,%xmm7 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 208(%edi),%edi subl $256,%ecx jnc .L007outer_loop addl $256,%ecx jz .L009done movl 520(%esp),%ebx leal -128(%esi),%esi movl 516(%esp),%edx leal -128(%edi),%edi movd 64(%ebp),%xmm2 movdqu (%ebx),%xmm3 paddd 96(%eax),%xmm2 pand 112(%eax),%xmm3 por %xmm2,%xmm3 .L0061x: movdqa 32(%eax),%xmm0 movdqu (%edx),%xmm1 movdqu 16(%edx),%xmm2 movdqa (%eax),%xmm6 movdqa 16(%eax),%xmm7 movl %ebp,48(%esp) movdqa %xmm0,(%esp) movdqa %xmm1,16(%esp) movdqa %xmm2,32(%esp) movdqa %xmm3,48(%esp) movl $10,%edx jmp .L010loop1x .align 16 .L011outer1x: movdqa 80(%eax),%xmm3 movdqa (%esp),%xmm0 movdqa 16(%esp),%xmm1 movdqa 32(%esp),%xmm2 paddd 48(%esp),%xmm3 movl $10,%edx movdqa %xmm3,48(%esp) jmp .L010loop1x .align 16 .L010loop1x: paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,222 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $20,%xmm1 pslld $12,%xmm4 por %xmm4,%xmm1 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,223 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $25,%xmm1 pslld $7,%xmm4 por %xmm4,%xmm1 pshufd $78,%xmm2,%xmm2 pshufd $57,%xmm1,%xmm1 pshufd $147,%xmm3,%xmm3 nop paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,222 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $20,%xmm1 pslld $12,%xmm4 por %xmm4,%xmm1 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,223 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $25,%xmm1 pslld $7,%xmm4 por %xmm4,%xmm1 pshufd $78,%xmm2,%xmm2 pshufd $147,%xmm1,%xmm1 pshufd $57,%xmm3,%xmm3 decl %edx jnz .L010loop1x paddd (%esp),%xmm0 paddd 16(%esp),%xmm1 paddd 32(%esp),%xmm2 paddd 48(%esp),%xmm3 cmpl $64,%ecx jb .L012tail movdqu (%esi),%xmm4 movdqu 16(%esi),%xmm5 pxor %xmm4,%xmm0 movdqu 32(%esi),%xmm4 pxor %xmm5,%xmm1 movdqu 48(%esi),%xmm5 pxor %xmm4,%xmm2 pxor %xmm5,%xmm3 leal 64(%esi),%esi movdqu %xmm0,(%edi) movdqu %xmm1,16(%edi) movdqu %xmm2,32(%edi) movdqu %xmm3,48(%edi) leal 64(%edi),%edi subl $64,%ecx jnz .L011outer1x jmp .L009done .L012tail: movdqa %xmm0,(%esp) movdqa %xmm1,16(%esp) movdqa %xmm2,32(%esp) movdqa %xmm3,48(%esp) xorl %eax,%eax xorl %edx,%edx xorl %ebp,%ebp .L013tail_loop: movb (%esp,%ebp,1),%al movb (%esi,%ebp,1),%dl leal 1(%ebp),%ebp xorb %dl,%al movb %al,-1(%edi,%ebp,1) decl %ecx jnz .L013tail_loop .L009done: movl 512(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size ChaCha20_ctr32_ssse3,.-.L_ChaCha20_ctr32_ssse3_begin .align 64 .Lssse3_data: .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 .byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14 .long 1634760805,857760878,2036477234,1797285236 .long 0,1,2,3 .long 4,4,4,4 .long 1,0,0,0 .long 4,0,0,0 .long 0,-1,-1,-1 .align 64 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54 .byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32 .byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111 .byte 114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
wlsfx/bnbb
10,917
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/test/trampoline-armv8.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <openssl/arm_arch.h> .text // abi_test_trampoline loads callee-saved registers from |state|, calls |func| // with |argv|, then saves the callee-saved registers into |state|. It returns // the result of |func|. The |unwind| argument is unused. // uint64_t abi_test_trampoline(void (*func)(...), CallerState *state, // const uint64_t *argv, size_t argc, // uint64_t unwind); .globl abi_test_trampoline .align 4 abi_test_trampoline: Labi_test_trampoline_begin: AARCH64_SIGN_LINK_REGISTER // Stack layout (low to high addresses) // x29,x30 (16 bytes) // d8-d15 (64 bytes) // x19-x28 (80 bytes) // x1 (8 bytes) // padding (8 bytes) stp x29, x30, [sp, #-176]! mov x29, sp // Saved callee-saved registers and |state|. stp d8, d9, [sp, #16] stp d10, d11, [sp, #32] stp d12, d13, [sp, #48] stp d14, d15, [sp, #64] stp x19, x20, [sp, #80] stp x21, x22, [sp, #96] stp x23, x24, [sp, #112] stp x25, x26, [sp, #128] stp x27, x28, [sp, #144] str x1, [sp, #160] // Load registers from |state|, with the exception of x29. x29 is the // frame pointer and also callee-saved, but AAPCS64 allows platforms to // mandate that x29 always point to a frame. iOS64 does so, which means // we cannot fill x29 with entropy without violating ABI rules // ourselves. x29 is tested separately below. ldp d8, d9, [x1], #16 ldp d10, d11, [x1], #16 ldp d12, d13, [x1], #16 ldp d14, d15, [x1], #16 ldp x19, x20, [x1], #16 ldp x21, x22, [x1], #16 ldp x23, x24, [x1], #16 ldp x25, x26, [x1], #16 ldp x27, x28, [x1], #16 // Move parameters into temporary registers. mov x9, x0 mov x10, x2 mov x11, x3 // Load parameters into registers. cbz x11, Largs_done ldr x0, [x10], #8 subs x11, x11, #1 b.eq Largs_done ldr x1, [x10], #8 subs x11, x11, #1 b.eq Largs_done ldr x2, [x10], #8 subs x11, x11, #1 b.eq Largs_done ldr x3, [x10], #8 subs x11, x11, #1 b.eq Largs_done ldr x4, [x10], #8 subs x11, x11, #1 b.eq Largs_done ldr x5, [x10], #8 subs x11, x11, #1 b.eq Largs_done ldr x6, [x10], #8 subs x11, x11, #1 b.eq Largs_done ldr x7, [x10], #8 Largs_done: blr x9 // Reload |state| and store registers. ldr x1, [sp, #160] stp d8, d9, [x1], #16 stp d10, d11, [x1], #16 stp d12, d13, [x1], #16 stp d14, d15, [x1], #16 stp x19, x20, [x1], #16 stp x21, x22, [x1], #16 stp x23, x24, [x1], #16 stp x25, x26, [x1], #16 stp x27, x28, [x1], #16 // |func| is required to preserve x29, the frame pointer. We cannot load // random values into x29 (see comment above), so compare it against the // expected value and zero the field of |state| if corrupted. mov x9, sp cmp x29, x9 b.eq Lx29_ok str xzr, [x1] Lx29_ok: // Restore callee-saved registers. ldp d8, d9, [sp, #16] ldp d10, d11, [sp, #32] ldp d12, d13, [sp, #48] ldp d14, d15, [sp, #64] ldp x19, x20, [sp, #80] ldp x21, x22, [sp, #96] ldp x23, x24, [sp, #112] ldp x25, x26, [sp, #128] ldp x27, x28, [sp, #144] ldp x29, x30, [sp], #176 AARCH64_VALIDATE_LINK_REGISTER ret .globl abi_test_clobber_x0 .align 4 abi_test_clobber_x0: AARCH64_VALID_CALL_TARGET mov x0, xzr ret .globl abi_test_clobber_x1 .align 4 abi_test_clobber_x1: AARCH64_VALID_CALL_TARGET mov x1, xzr ret .globl abi_test_clobber_x2 .align 4 abi_test_clobber_x2: AARCH64_VALID_CALL_TARGET mov x2, xzr ret .globl abi_test_clobber_x3 .align 4 abi_test_clobber_x3: AARCH64_VALID_CALL_TARGET mov x3, xzr ret .globl abi_test_clobber_x4 .align 4 abi_test_clobber_x4: AARCH64_VALID_CALL_TARGET mov x4, xzr ret .globl abi_test_clobber_x5 .align 4 abi_test_clobber_x5: AARCH64_VALID_CALL_TARGET mov x5, xzr ret .globl abi_test_clobber_x6 .align 4 abi_test_clobber_x6: AARCH64_VALID_CALL_TARGET mov x6, xzr ret .globl abi_test_clobber_x7 .align 4 abi_test_clobber_x7: AARCH64_VALID_CALL_TARGET mov x7, xzr ret .globl abi_test_clobber_x8 .align 4 abi_test_clobber_x8: AARCH64_VALID_CALL_TARGET mov x8, xzr ret .globl abi_test_clobber_x9 .align 4 abi_test_clobber_x9: AARCH64_VALID_CALL_TARGET mov x9, xzr ret .globl abi_test_clobber_x10 .align 4 abi_test_clobber_x10: AARCH64_VALID_CALL_TARGET mov x10, xzr ret .globl abi_test_clobber_x11 .align 4 abi_test_clobber_x11: AARCH64_VALID_CALL_TARGET mov x11, xzr ret .globl abi_test_clobber_x12 .align 4 abi_test_clobber_x12: AARCH64_VALID_CALL_TARGET mov x12, xzr ret .globl abi_test_clobber_x13 .align 4 abi_test_clobber_x13: AARCH64_VALID_CALL_TARGET mov x13, xzr ret .globl abi_test_clobber_x14 .align 4 abi_test_clobber_x14: AARCH64_VALID_CALL_TARGET mov x14, xzr ret .globl abi_test_clobber_x15 .align 4 abi_test_clobber_x15: AARCH64_VALID_CALL_TARGET mov x15, xzr ret .globl abi_test_clobber_x16 .align 4 abi_test_clobber_x16: AARCH64_VALID_CALL_TARGET mov x16, xzr ret .globl abi_test_clobber_x17 .align 4 abi_test_clobber_x17: AARCH64_VALID_CALL_TARGET mov x17, xzr ret .globl abi_test_clobber_x19 .align 4 abi_test_clobber_x19: AARCH64_VALID_CALL_TARGET mov x19, xzr ret .globl abi_test_clobber_x20 .align 4 abi_test_clobber_x20: AARCH64_VALID_CALL_TARGET mov x20, xzr ret .globl abi_test_clobber_x21 .align 4 abi_test_clobber_x21: AARCH64_VALID_CALL_TARGET mov x21, xzr ret .globl abi_test_clobber_x22 .align 4 abi_test_clobber_x22: AARCH64_VALID_CALL_TARGET mov x22, xzr ret .globl abi_test_clobber_x23 .align 4 abi_test_clobber_x23: AARCH64_VALID_CALL_TARGET mov x23, xzr ret .globl abi_test_clobber_x24 .align 4 abi_test_clobber_x24: AARCH64_VALID_CALL_TARGET mov x24, xzr ret .globl abi_test_clobber_x25 .align 4 abi_test_clobber_x25: AARCH64_VALID_CALL_TARGET mov x25, xzr ret .globl abi_test_clobber_x26 .align 4 abi_test_clobber_x26: AARCH64_VALID_CALL_TARGET mov x26, xzr ret .globl abi_test_clobber_x27 .align 4 abi_test_clobber_x27: AARCH64_VALID_CALL_TARGET mov x27, xzr ret .globl abi_test_clobber_x28 .align 4 abi_test_clobber_x28: AARCH64_VALID_CALL_TARGET mov x28, xzr ret .globl abi_test_clobber_x29 .align 4 abi_test_clobber_x29: AARCH64_VALID_CALL_TARGET mov x29, xzr ret .globl abi_test_clobber_d0 .align 4 abi_test_clobber_d0: AARCH64_VALID_CALL_TARGET fmov d0, xzr ret .globl abi_test_clobber_d1 .align 4 abi_test_clobber_d1: AARCH64_VALID_CALL_TARGET fmov d1, xzr ret .globl abi_test_clobber_d2 .align 4 abi_test_clobber_d2: AARCH64_VALID_CALL_TARGET fmov d2, xzr ret .globl abi_test_clobber_d3 .align 4 abi_test_clobber_d3: AARCH64_VALID_CALL_TARGET fmov d3, xzr ret .globl abi_test_clobber_d4 .align 4 abi_test_clobber_d4: AARCH64_VALID_CALL_TARGET fmov d4, xzr ret .globl abi_test_clobber_d5 .align 4 abi_test_clobber_d5: AARCH64_VALID_CALL_TARGET fmov d5, xzr ret .globl abi_test_clobber_d6 .align 4 abi_test_clobber_d6: AARCH64_VALID_CALL_TARGET fmov d6, xzr ret .globl abi_test_clobber_d7 .align 4 abi_test_clobber_d7: AARCH64_VALID_CALL_TARGET fmov d7, xzr ret .globl abi_test_clobber_d8 .align 4 abi_test_clobber_d8: AARCH64_VALID_CALL_TARGET fmov d8, xzr ret .globl abi_test_clobber_d9 .align 4 abi_test_clobber_d9: AARCH64_VALID_CALL_TARGET fmov d9, xzr ret .globl abi_test_clobber_d10 .align 4 abi_test_clobber_d10: AARCH64_VALID_CALL_TARGET fmov d10, xzr ret .globl abi_test_clobber_d11 .align 4 abi_test_clobber_d11: AARCH64_VALID_CALL_TARGET fmov d11, xzr ret .globl abi_test_clobber_d12 .align 4 abi_test_clobber_d12: AARCH64_VALID_CALL_TARGET fmov d12, xzr ret .globl abi_test_clobber_d13 .align 4 abi_test_clobber_d13: AARCH64_VALID_CALL_TARGET fmov d13, xzr ret .globl abi_test_clobber_d14 .align 4 abi_test_clobber_d14: AARCH64_VALID_CALL_TARGET fmov d14, xzr ret .globl abi_test_clobber_d15 .align 4 abi_test_clobber_d15: AARCH64_VALID_CALL_TARGET fmov d15, xzr ret .globl abi_test_clobber_d16 .align 4 abi_test_clobber_d16: AARCH64_VALID_CALL_TARGET fmov d16, xzr ret .globl abi_test_clobber_d17 .align 4 abi_test_clobber_d17: AARCH64_VALID_CALL_TARGET fmov d17, xzr ret .globl abi_test_clobber_d18 .align 4 abi_test_clobber_d18: AARCH64_VALID_CALL_TARGET fmov d18, xzr ret .globl abi_test_clobber_d19 .align 4 abi_test_clobber_d19: AARCH64_VALID_CALL_TARGET fmov d19, xzr ret .globl abi_test_clobber_d20 .align 4 abi_test_clobber_d20: AARCH64_VALID_CALL_TARGET fmov d20, xzr ret .globl abi_test_clobber_d21 .align 4 abi_test_clobber_d21: AARCH64_VALID_CALL_TARGET fmov d21, xzr ret .globl abi_test_clobber_d22 .align 4 abi_test_clobber_d22: AARCH64_VALID_CALL_TARGET fmov d22, xzr ret .globl abi_test_clobber_d23 .align 4 abi_test_clobber_d23: AARCH64_VALID_CALL_TARGET fmov d23, xzr ret .globl abi_test_clobber_d24 .align 4 abi_test_clobber_d24: AARCH64_VALID_CALL_TARGET fmov d24, xzr ret .globl abi_test_clobber_d25 .align 4 abi_test_clobber_d25: AARCH64_VALID_CALL_TARGET fmov d25, xzr ret .globl abi_test_clobber_d26 .align 4 abi_test_clobber_d26: AARCH64_VALID_CALL_TARGET fmov d26, xzr ret .globl abi_test_clobber_d27 .align 4 abi_test_clobber_d27: AARCH64_VALID_CALL_TARGET fmov d27, xzr ret .globl abi_test_clobber_d28 .align 4 abi_test_clobber_d28: AARCH64_VALID_CALL_TARGET fmov d28, xzr ret .globl abi_test_clobber_d29 .align 4 abi_test_clobber_d29: AARCH64_VALID_CALL_TARGET fmov d29, xzr ret .globl abi_test_clobber_d30 .align 4 abi_test_clobber_d30: AARCH64_VALID_CALL_TARGET fmov d30, xzr ret .globl abi_test_clobber_d31 .align 4 abi_test_clobber_d31: AARCH64_VALID_CALL_TARGET fmov d31, xzr ret .globl abi_test_clobber_v8_upper .align 4 abi_test_clobber_v8_upper: AARCH64_VALID_CALL_TARGET fmov v8.d[1], xzr ret .globl abi_test_clobber_v9_upper .align 4 abi_test_clobber_v9_upper: AARCH64_VALID_CALL_TARGET fmov v9.d[1], xzr ret .globl abi_test_clobber_v10_upper .align 4 abi_test_clobber_v10_upper: AARCH64_VALID_CALL_TARGET fmov v10.d[1], xzr ret .globl abi_test_clobber_v11_upper .align 4 abi_test_clobber_v11_upper: AARCH64_VALID_CALL_TARGET fmov v11.d[1], xzr ret .globl abi_test_clobber_v12_upper .align 4 abi_test_clobber_v12_upper: AARCH64_VALID_CALL_TARGET fmov v12.d[1], xzr ret .globl abi_test_clobber_v13_upper .align 4 abi_test_clobber_v13_upper: AARCH64_VALID_CALL_TARGET fmov v13.d[1], xzr ret .globl abi_test_clobber_v14_upper .align 4 abi_test_clobber_v14_upper: AARCH64_VALID_CALL_TARGET fmov v14.d[1], xzr ret .globl abi_test_clobber_v15_upper .align 4 abi_test_clobber_v15_upper: AARCH64_VALID_CALL_TARGET fmov v15.d[1], xzr ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
16,886
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/aesv8-armx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <openssl/arm_arch.h> #if __ARM_MAX_ARCH__>=7 .text .arch armv8-a+crypto .section .rodata .align 5 Lrcon: .long 0x01,0x01,0x01,0x01 .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat .long 0x1b,0x1b,0x1b,0x1b .text .globl aes_hw_set_encrypt_key .def aes_hw_set_encrypt_key .type 32 .endef .align 5 aes_hw_set_encrypt_key: Lenc_key: #ifdef BORINGSSL_DISPATCH_TEST adrp x9,BORINGSSL_function_hit add x9, x9, :lo12:BORINGSSL_function_hit mov w10, #1 strb w10, [x9,#3] // kFlag_aes_hw_set_encrypt_key #endif // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x3,#-1 cmp x0,#0 b.eq Lenc_key_abort cmp x2,#0 b.eq Lenc_key_abort mov x3,#-2 cmp w1,#128 b.lt Lenc_key_abort cmp w1,#256 b.gt Lenc_key_abort tst w1,#0x3f b.ne Lenc_key_abort adrp x3,Lrcon add x3,x3,:lo12:Lrcon cmp w1,#192 eor v0.16b,v0.16b,v0.16b ld1 {v3.16b},[x0],#16 mov w1,#8 // reuse w1 ld1 {v1.4s,v2.4s},[x3],#32 b.lt Loop128 b.eq L192 b L256 .align 4 Loop128: tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b b.ne Loop128 ld1 {v1.4s},[x3] tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b eor v3.16b,v3.16b,v6.16b st1 {v3.4s},[x2] add x2,x2,#0x50 mov w12,#10 b Ldone .align 4 L192: ld1 {v4.8b},[x0],#8 movi v6.16b,#8 // borrow v6.16b st1 {v3.4s},[x2],#16 sub v2.16b,v2.16b,v6.16b // adjust the mask Loop192: tbl v6.16b,{v4.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v4.8b},[x2],#8 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b dup v5.4s,v3.s[3] eor v5.16b,v5.16b,v4.16b eor v6.16b,v6.16b,v1.16b ext v4.16b,v0.16b,v4.16b,#12 shl v1.16b,v1.16b,#1 eor v4.16b,v4.16b,v5.16b eor v3.16b,v3.16b,v6.16b eor v4.16b,v4.16b,v6.16b st1 {v3.4s},[x2],#16 b.ne Loop192 mov w12,#12 add x2,x2,#0x20 b Ldone .align 4 L256: ld1 {v4.16b},[x0] mov w1,#7 mov w12,#14 st1 {v3.4s},[x2],#16 Loop256: tbl v6.16b,{v4.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v4.4s},[x2],#16 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b st1 {v3.4s},[x2],#16 b.eq Ldone dup v6.4s,v3.s[3] // just splat ext v5.16b,v0.16b,v4.16b,#12 aese v6.16b,v0.16b eor v4.16b,v4.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v4.16b,v4.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v4.16b,v4.16b,v5.16b eor v4.16b,v4.16b,v6.16b b Loop256 Ldone: str w12,[x2] mov x3,#0 Lenc_key_abort: mov x0,x3 // return value ldr x29,[sp],#16 ret .globl aes_hw_set_decrypt_key .def aes_hw_set_decrypt_key .type 32 .endef .align 5 aes_hw_set_decrypt_key: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 bl Lenc_key cmp x0,#0 b.ne Ldec_key_abort sub x2,x2,#240 // restore original x2 mov x4,#-16 add x0,x2,x12,lsl#4 // end of key schedule ld1 {v0.4s},[x2] ld1 {v1.4s},[x0] st1 {v0.4s},[x0],x4 st1 {v1.4s},[x2],#16 Loop_imc: ld1 {v0.4s},[x2] ld1 {v1.4s},[x0] aesimc v0.16b,v0.16b aesimc v1.16b,v1.16b st1 {v0.4s},[x0],x4 st1 {v1.4s},[x2],#16 cmp x0,x2 b.hi Loop_imc ld1 {v0.4s},[x2] aesimc v0.16b,v0.16b st1 {v0.4s},[x0] eor x0,x0,x0 // return value Ldec_key_abort: ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .globl aes_hw_encrypt .def aes_hw_encrypt .type 32 .endef .align 5 aes_hw_encrypt: #ifdef BORINGSSL_DISPATCH_TEST adrp x9,BORINGSSL_function_hit add x9, x9, :lo12:BORINGSSL_function_hit mov w10, #1 strb w10, [x9,#1] // kFlag_aes_hw_encrypt #endif AARCH64_VALID_CALL_TARGET ldr w3,[x2,#240] ld1 {v0.4s},[x2],#16 ld1 {v2.16b},[x0] sub w3,w3,#2 ld1 {v1.4s},[x2],#16 Loop_enc: aese v2.16b,v0.16b aesmc v2.16b,v2.16b ld1 {v0.4s},[x2],#16 subs w3,w3,#2 aese v2.16b,v1.16b aesmc v2.16b,v2.16b ld1 {v1.4s},[x2],#16 b.gt Loop_enc aese v2.16b,v0.16b aesmc v2.16b,v2.16b ld1 {v0.4s},[x2] aese v2.16b,v1.16b eor v2.16b,v2.16b,v0.16b st1 {v2.16b},[x1] ret .globl aes_hw_decrypt .def aes_hw_decrypt .type 32 .endef .align 5 aes_hw_decrypt: #ifdef BORINGSSL_DISPATCH_TEST adrp x9,BORINGSSL_function_hit add x9, x9, :lo12:BORINGSSL_function_hit mov w10, #1 strb w10, [x9,#1] // kFlag_aes_hw_encrypt #endif AARCH64_VALID_CALL_TARGET ldr w3,[x2,#240] ld1 {v0.4s},[x2],#16 ld1 {v2.16b},[x0] sub w3,w3,#2 ld1 {v1.4s},[x2],#16 Loop_dec: aesd v2.16b,v0.16b aesimc v2.16b,v2.16b ld1 {v0.4s},[x2],#16 subs w3,w3,#2 aesd v2.16b,v1.16b aesimc v2.16b,v2.16b ld1 {v1.4s},[x2],#16 b.gt Loop_dec aesd v2.16b,v0.16b aesimc v2.16b,v2.16b ld1 {v0.4s},[x2] aesd v2.16b,v1.16b eor v2.16b,v2.16b,v0.16b st1 {v2.16b},[x1] ret .globl aes_hw_cbc_encrypt .def aes_hw_cbc_encrypt .type 32 .endef .align 5 aes_hw_cbc_encrypt: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 subs x2,x2,#16 mov x8,#16 b.lo Lcbc_abort csel x8,xzr,x8,eq cmp w5,#0 // en- or decrypting? ldr w5,[x3,#240] and x2,x2,#-16 ld1 {v6.16b},[x4] ld1 {v0.16b},[x0],x8 ld1 {v16.4s,v17.4s},[x3] // load key schedule... sub w5,w5,#6 add x7,x3,x5,lsl#4 // pointer to last 7 round keys sub w5,w5,#2 ld1 {v18.4s,v19.4s},[x7],#32 ld1 {v20.4s,v21.4s},[x7],#32 ld1 {v22.4s,v23.4s},[x7],#32 ld1 {v7.4s},[x7] add x7,x3,#32 mov w6,w5 b.eq Lcbc_dec cmp w5,#2 eor v0.16b,v0.16b,v6.16b eor v5.16b,v16.16b,v7.16b b.eq Lcbc_enc128 ld1 {v2.4s,v3.4s},[x7] add x7,x3,#16 add x6,x3,#16*4 add x12,x3,#16*5 aese v0.16b,v16.16b aesmc v0.16b,v0.16b add x14,x3,#16*6 add x3,x3,#16*7 b Lenter_cbc_enc .align 4 Loop_cbc_enc: aese v0.16b,v16.16b aesmc v0.16b,v0.16b st1 {v6.16b},[x1],#16 Lenter_cbc_enc: aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v0.16b,v2.16b aesmc v0.16b,v0.16b ld1 {v16.4s},[x6] cmp w5,#4 aese v0.16b,v3.16b aesmc v0.16b,v0.16b ld1 {v17.4s},[x12] b.eq Lcbc_enc192 aese v0.16b,v16.16b aesmc v0.16b,v0.16b ld1 {v16.4s},[x14] aese v0.16b,v17.16b aesmc v0.16b,v0.16b ld1 {v17.4s},[x3] nop Lcbc_enc192: aese v0.16b,v16.16b aesmc v0.16b,v0.16b subs x2,x2,#16 aese v0.16b,v17.16b aesmc v0.16b,v0.16b csel x8,xzr,x8,eq aese v0.16b,v18.16b aesmc v0.16b,v0.16b aese v0.16b,v19.16b aesmc v0.16b,v0.16b ld1 {v16.16b},[x0],x8 aese v0.16b,v20.16b aesmc v0.16b,v0.16b eor v16.16b,v16.16b,v5.16b aese v0.16b,v21.16b aesmc v0.16b,v0.16b ld1 {v17.4s},[x7] // re-pre-load rndkey[1] aese v0.16b,v22.16b aesmc v0.16b,v0.16b aese v0.16b,v23.16b eor v6.16b,v0.16b,v7.16b b.hs Loop_cbc_enc st1 {v6.16b},[x1],#16 b Lcbc_done .align 5 Lcbc_enc128: ld1 {v2.4s,v3.4s},[x7] aese v0.16b,v16.16b aesmc v0.16b,v0.16b b Lenter_cbc_enc128 Loop_cbc_enc128: aese v0.16b,v16.16b aesmc v0.16b,v0.16b st1 {v6.16b},[x1],#16 Lenter_cbc_enc128: aese v0.16b,v17.16b aesmc v0.16b,v0.16b subs x2,x2,#16 aese v0.16b,v2.16b aesmc v0.16b,v0.16b csel x8,xzr,x8,eq aese v0.16b,v3.16b aesmc v0.16b,v0.16b aese v0.16b,v18.16b aesmc v0.16b,v0.16b aese v0.16b,v19.16b aesmc v0.16b,v0.16b ld1 {v16.16b},[x0],x8 aese v0.16b,v20.16b aesmc v0.16b,v0.16b aese v0.16b,v21.16b aesmc v0.16b,v0.16b aese v0.16b,v22.16b aesmc v0.16b,v0.16b eor v16.16b,v16.16b,v5.16b aese v0.16b,v23.16b eor v6.16b,v0.16b,v7.16b b.hs Loop_cbc_enc128 st1 {v6.16b},[x1],#16 b Lcbc_done .align 5 Lcbc_dec: ld1 {v18.16b},[x0],#16 subs x2,x2,#32 // bias add w6,w5,#2 orr v3.16b,v0.16b,v0.16b orr v1.16b,v0.16b,v0.16b orr v19.16b,v18.16b,v18.16b b.lo Lcbc_dec_tail orr v1.16b,v18.16b,v18.16b ld1 {v18.16b},[x0],#16 orr v2.16b,v0.16b,v0.16b orr v3.16b,v1.16b,v1.16b orr v19.16b,v18.16b,v18.16b Loop3x_cbc_dec: aesd v0.16b,v16.16b aesimc v0.16b,v0.16b aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aesd v0.16b,v17.16b aesimc v0.16b,v0.16b aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt Loop3x_cbc_dec aesd v0.16b,v16.16b aesimc v0.16b,v0.16b aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b eor v4.16b,v6.16b,v7.16b subs x2,x2,#0x30 eor v5.16b,v2.16b,v7.16b csel x6,x2,x6,lo // x6, w6, is zero at this point aesd v0.16b,v17.16b aesimc v0.16b,v0.16b aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b eor v17.16b,v3.16b,v7.16b add x0,x0,x6 // x0 is adjusted in such way that // at exit from the loop v1.16b-v18.16b // are loaded with last "words" orr v6.16b,v19.16b,v19.16b mov x7,x3 aesd v0.16b,v20.16b aesimc v0.16b,v0.16b aesd v1.16b,v20.16b aesimc v1.16b,v1.16b aesd v18.16b,v20.16b aesimc v18.16b,v18.16b ld1 {v2.16b},[x0],#16 aesd v0.16b,v21.16b aesimc v0.16b,v0.16b aesd v1.16b,v21.16b aesimc v1.16b,v1.16b aesd v18.16b,v21.16b aesimc v18.16b,v18.16b ld1 {v3.16b},[x0],#16 aesd v0.16b,v22.16b aesimc v0.16b,v0.16b aesd v1.16b,v22.16b aesimc v1.16b,v1.16b aesd v18.16b,v22.16b aesimc v18.16b,v18.16b ld1 {v19.16b},[x0],#16 aesd v0.16b,v23.16b aesd v1.16b,v23.16b aesd v18.16b,v23.16b ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] add w6,w5,#2 eor v4.16b,v4.16b,v0.16b eor v5.16b,v5.16b,v1.16b eor v18.16b,v18.16b,v17.16b ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] st1 {v4.16b},[x1],#16 orr v0.16b,v2.16b,v2.16b st1 {v5.16b},[x1],#16 orr v1.16b,v3.16b,v3.16b st1 {v18.16b},[x1],#16 orr v18.16b,v19.16b,v19.16b b.hs Loop3x_cbc_dec cmn x2,#0x30 b.eq Lcbc_done nop Lcbc_dec_tail: aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt Lcbc_dec_tail aesd v1.16b,v16.16b aesimc v1.16b,v1.16b aesd v18.16b,v16.16b aesimc v18.16b,v18.16b aesd v1.16b,v17.16b aesimc v1.16b,v1.16b aesd v18.16b,v17.16b aesimc v18.16b,v18.16b aesd v1.16b,v20.16b aesimc v1.16b,v1.16b aesd v18.16b,v20.16b aesimc v18.16b,v18.16b cmn x2,#0x20 aesd v1.16b,v21.16b aesimc v1.16b,v1.16b aesd v18.16b,v21.16b aesimc v18.16b,v18.16b eor v5.16b,v6.16b,v7.16b aesd v1.16b,v22.16b aesimc v1.16b,v1.16b aesd v18.16b,v22.16b aesimc v18.16b,v18.16b eor v17.16b,v3.16b,v7.16b aesd v1.16b,v23.16b aesd v18.16b,v23.16b b.eq Lcbc_dec_one eor v5.16b,v5.16b,v1.16b eor v17.16b,v17.16b,v18.16b orr v6.16b,v19.16b,v19.16b st1 {v5.16b},[x1],#16 st1 {v17.16b},[x1],#16 b Lcbc_done Lcbc_dec_one: eor v5.16b,v5.16b,v18.16b orr v6.16b,v19.16b,v19.16b st1 {v5.16b},[x1],#16 Lcbc_done: st1 {v6.16b},[x4] Lcbc_abort: ldr x29,[sp],#16 ret .globl aes_hw_ctr32_encrypt_blocks .def aes_hw_ctr32_encrypt_blocks .type 32 .endef .align 5 aes_hw_ctr32_encrypt_blocks: #ifdef BORINGSSL_DISPATCH_TEST adrp x9,BORINGSSL_function_hit add x9, x9, :lo12:BORINGSSL_function_hit mov w10, #1 strb w10, [x9] // kFlag_aes_hw_ctr32_encrypt_blocks #endif // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ldr w5,[x3,#240] ldr w8, [x4, #12] ld1 {v0.4s},[x4] ld1 {v16.4s,v17.4s},[x3] // load key schedule... sub w5,w5,#4 mov x12,#16 cmp x2,#2 add x7,x3,x5,lsl#4 // pointer to last 5 round keys sub w5,w5,#2 ld1 {v20.4s,v21.4s},[x7],#32 ld1 {v22.4s,v23.4s},[x7],#32 ld1 {v7.4s},[x7] add x7,x3,#32 mov w6,w5 // ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are // affected by silicon errata #1742098 [0] and #1655431 [1], // respectively, where the second instruction of an aese/aesmc // instruction pair may execute twice if an interrupt is taken right // after the first instruction consumes an input register of which a // single 32-bit lane has been updated the last time it was modified. // // This function uses a counter in one 32-bit lane. The vmov lines // could write to v1.16b and v18.16b directly, but that trips this bugs. // We write to v6.16b and copy to the final register as a workaround. // // [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice // [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice #ifndef __AARCH64EB__ rev w8, w8 #endif add w10, w8, #1 orr v6.16b,v0.16b,v0.16b rev w10, w10 mov v6.s[3],w10 add w8, w8, #2 orr v1.16b,v6.16b,v6.16b b.ls Lctr32_tail rev w12, w8 mov v6.s[3],w12 sub x2,x2,#3 // bias orr v18.16b,v6.16b,v6.16b b Loop3x_ctr32 .align 4 Loop3x_ctr32: aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b aese v18.16b,v17.16b aesmc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt Loop3x_ctr32 aese v0.16b,v16.16b aesmc v4.16b,v0.16b aese v1.16b,v16.16b aesmc v5.16b,v1.16b ld1 {v2.16b},[x0],#16 add w9,w8,#1 aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v3.16b},[x0],#16 rev w9,w9 aese v4.16b,v17.16b aesmc v4.16b,v4.16b aese v5.16b,v17.16b aesmc v5.16b,v5.16b ld1 {v19.16b},[x0],#16 mov x7,x3 aese v18.16b,v17.16b aesmc v17.16b,v18.16b aese v4.16b,v20.16b aesmc v4.16b,v4.16b aese v5.16b,v20.16b aesmc v5.16b,v5.16b eor v2.16b,v2.16b,v7.16b add w10,w8,#2 aese v17.16b,v20.16b aesmc v17.16b,v17.16b eor v3.16b,v3.16b,v7.16b add w8,w8,#3 aese v4.16b,v21.16b aesmc v4.16b,v4.16b aese v5.16b,v21.16b aesmc v5.16b,v5.16b // Note the logic to update v0.16b, v1.16b, and v1.16b is written to work // around a bug in ARM Cortex-A57 and Cortex-A72 cores running in // 32-bit mode. See the comment above. eor v19.16b,v19.16b,v7.16b mov v6.s[3], w9 aese v17.16b,v21.16b aesmc v17.16b,v17.16b orr v0.16b,v6.16b,v6.16b rev w10,w10 aese v4.16b,v22.16b aesmc v4.16b,v4.16b mov v6.s[3], w10 rev w12,w8 aese v5.16b,v22.16b aesmc v5.16b,v5.16b orr v1.16b,v6.16b,v6.16b mov v6.s[3], w12 aese v17.16b,v22.16b aesmc v17.16b,v17.16b orr v18.16b,v6.16b,v6.16b subs x2,x2,#3 aese v4.16b,v23.16b aese v5.16b,v23.16b aese v17.16b,v23.16b eor v2.16b,v2.16b,v4.16b ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] st1 {v2.16b},[x1],#16 eor v3.16b,v3.16b,v5.16b mov w6,w5 st1 {v3.16b},[x1],#16 eor v19.16b,v19.16b,v17.16b ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] st1 {v19.16b},[x1],#16 b.hs Loop3x_ctr32 adds x2,x2,#3 b.eq Lctr32_done Lctr32_tail: cmp x2,#1 b.lt Lctr32_done // if len = 0, go to done mov x12,#16 csel x12,xzr,x12,eq aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b ld1 {v17.4s},[x7],#16 b.gt Lctr32_tail aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b ld1 {v2.16b},[x0],x12 aese v0.16b,v20.16b aesmc v0.16b,v0.16b aese v1.16b,v20.16b aesmc v1.16b,v1.16b ld1 {v3.16b},[x0] aese v0.16b,v21.16b aesmc v0.16b,v0.16b aese v1.16b,v21.16b aesmc v1.16b,v1.16b eor v2.16b,v2.16b,v7.16b aese v0.16b,v22.16b aesmc v0.16b,v0.16b aese v1.16b,v22.16b aesmc v1.16b,v1.16b eor v3.16b,v3.16b,v7.16b aese v0.16b,v23.16b aese v1.16b,v23.16b eor v2.16b,v2.16b,v0.16b eor v3.16b,v3.16b,v1.16b st1 {v2.16b},[x1],#16 cbz x12,Lctr32_done // if step = 0 (len = 1), go to done st1 {v3.16b},[x1] Lctr32_done: ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
34,129
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/sha256-armv8.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) // Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy // in the file LICENSE in the source distribution or at // https://www.openssl.org/source/license.html // ==================================================================== // Written by Andy Polyakov <appro@openssl.org> for the OpenSSL // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. // // Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. // // Performance in cycles per processed byte and improvement coefficient // over code generated with "default" compiler: // // SHA256-hw SHA256(*) SHA512 // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) // Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. // (**) The result is a trade-off: it's possible to improve it by // 10% (or by 1 cycle per round), but at the cost of 20% loss // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code // generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ # include <openssl/arm_arch.h> #endif .text .globl sha256_block_data_order_nohw .def sha256_block_data_order_nohw .type 32 .endef .align 6 sha256_block_data_order_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#4*4 ldp w20,w21,[x0] // load context ldp w22,w23,[x0,#2*4] ldp w24,w25,[x0,#4*4] add x2,x1,x2,lsl#6 // end of input ldp w26,w27,[x0,#6*4] adrp x30,LK256 add x30,x30,:lo12:LK256 stp x0,x2,[x29,#96] Loop: ldp w3,w4,[x1],#2*4 ldr w19,[x30],#4 // *K++ eor w28,w21,w22 // magic seed str x1,[x29,#112] #ifndef __AARCH64EB__ rev w3,w3 // 0 #endif ror w16,w24,#6 add w27,w27,w19 // h+=K[i] eor w6,w24,w24,ror#14 and w17,w25,w24 bic w19,w26,w24 add w27,w27,w3 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w6,ror#11 // Sigma1(e) ror w6,w20,#2 add w27,w27,w17 // h+=Ch(e,f,g) eor w17,w20,w20,ror#9 add w27,w27,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w23,w23,w27 // d+=h eor w28,w28,w21 // Maj(a,b,c) eor w17,w6,w17,ror#13 // Sigma0(a) add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w4,w4 // 1 #endif ldp w5,w6,[x1],#2*4 add w27,w27,w17 // h+=Sigma0(a) ror w16,w23,#6 add w26,w26,w28 // h+=K[i] eor w7,w23,w23,ror#14 and w17,w24,w23 bic w28,w25,w23 add w26,w26,w4 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w7,ror#11 // Sigma1(e) ror w7,w27,#2 add w26,w26,w17 // h+=Ch(e,f,g) eor w17,w27,w27,ror#9 add w26,w26,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w22,w22,w26 // d+=h eor w19,w19,w20 // Maj(a,b,c) eor w17,w7,w17,ror#13 // Sigma0(a) add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w5,w5 // 2 #endif add w26,w26,w17 // h+=Sigma0(a) ror w16,w22,#6 add w25,w25,w19 // h+=K[i] eor w8,w22,w22,ror#14 and w17,w23,w22 bic w19,w24,w22 add w25,w25,w5 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w8,ror#11 // Sigma1(e) ror w8,w26,#2 add w25,w25,w17 // h+=Ch(e,f,g) eor w17,w26,w26,ror#9 add w25,w25,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w21,w21,w25 // d+=h eor w28,w28,w27 // Maj(a,b,c) eor w17,w8,w17,ror#13 // Sigma0(a) add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w6,w6 // 3 #endif ldp w7,w8,[x1],#2*4 add w25,w25,w17 // h+=Sigma0(a) ror w16,w21,#6 add w24,w24,w28 // h+=K[i] eor w9,w21,w21,ror#14 and w17,w22,w21 bic w28,w23,w21 add w24,w24,w6 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w9,ror#11 // Sigma1(e) ror w9,w25,#2 add w24,w24,w17 // h+=Ch(e,f,g) eor w17,w25,w25,ror#9 add w24,w24,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w20,w20,w24 // d+=h eor w19,w19,w26 // Maj(a,b,c) eor w17,w9,w17,ror#13 // Sigma0(a) add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w7,w7 // 4 #endif add w24,w24,w17 // h+=Sigma0(a) ror w16,w20,#6 add w23,w23,w19 // h+=K[i] eor w10,w20,w20,ror#14 and w17,w21,w20 bic w19,w22,w20 add w23,w23,w7 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w10,ror#11 // Sigma1(e) ror w10,w24,#2 add w23,w23,w17 // h+=Ch(e,f,g) eor w17,w24,w24,ror#9 add w23,w23,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w27,w27,w23 // d+=h eor w28,w28,w25 // Maj(a,b,c) eor w17,w10,w17,ror#13 // Sigma0(a) add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w8,w8 // 5 #endif ldp w9,w10,[x1],#2*4 add w23,w23,w17 // h+=Sigma0(a) ror w16,w27,#6 add w22,w22,w28 // h+=K[i] eor w11,w27,w27,ror#14 and w17,w20,w27 bic w28,w21,w27 add w22,w22,w8 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w11,ror#11 // Sigma1(e) ror w11,w23,#2 add w22,w22,w17 // h+=Ch(e,f,g) eor w17,w23,w23,ror#9 add w22,w22,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w26,w26,w22 // d+=h eor w19,w19,w24 // Maj(a,b,c) eor w17,w11,w17,ror#13 // Sigma0(a) add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w9,w9 // 6 #endif add w22,w22,w17 // h+=Sigma0(a) ror w16,w26,#6 add w21,w21,w19 // h+=K[i] eor w12,w26,w26,ror#14 and w17,w27,w26 bic w19,w20,w26 add w21,w21,w9 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w12,ror#11 // Sigma1(e) ror w12,w22,#2 add w21,w21,w17 // h+=Ch(e,f,g) eor w17,w22,w22,ror#9 add w21,w21,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w25,w25,w21 // d+=h eor w28,w28,w23 // Maj(a,b,c) eor w17,w12,w17,ror#13 // Sigma0(a) add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w10,w10 // 7 #endif ldp w11,w12,[x1],#2*4 add w21,w21,w17 // h+=Sigma0(a) ror w16,w25,#6 add w20,w20,w28 // h+=K[i] eor w13,w25,w25,ror#14 and w17,w26,w25 bic w28,w27,w25 add w20,w20,w10 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w13,ror#11 // Sigma1(e) ror w13,w21,#2 add w20,w20,w17 // h+=Ch(e,f,g) eor w17,w21,w21,ror#9 add w20,w20,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w24,w24,w20 // d+=h eor w19,w19,w22 // Maj(a,b,c) eor w17,w13,w17,ror#13 // Sigma0(a) add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w20,w20,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w11,w11 // 8 #endif add w20,w20,w17 // h+=Sigma0(a) ror w16,w24,#6 add w27,w27,w19 // h+=K[i] eor w14,w24,w24,ror#14 and w17,w25,w24 bic w19,w26,w24 add w27,w27,w11 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w14,ror#11 // Sigma1(e) ror w14,w20,#2 add w27,w27,w17 // h+=Ch(e,f,g) eor w17,w20,w20,ror#9 add w27,w27,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w23,w23,w27 // d+=h eor w28,w28,w21 // Maj(a,b,c) eor w17,w14,w17,ror#13 // Sigma0(a) add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w12,w12 // 9 #endif ldp w13,w14,[x1],#2*4 add w27,w27,w17 // h+=Sigma0(a) ror w16,w23,#6 add w26,w26,w28 // h+=K[i] eor w15,w23,w23,ror#14 and w17,w24,w23 bic w28,w25,w23 add w26,w26,w12 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w15,ror#11 // Sigma1(e) ror w15,w27,#2 add w26,w26,w17 // h+=Ch(e,f,g) eor w17,w27,w27,ror#9 add w26,w26,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w22,w22,w26 // d+=h eor w19,w19,w20 // Maj(a,b,c) eor w17,w15,w17,ror#13 // Sigma0(a) add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w13,w13 // 10 #endif add w26,w26,w17 // h+=Sigma0(a) ror w16,w22,#6 add w25,w25,w19 // h+=K[i] eor w0,w22,w22,ror#14 and w17,w23,w22 bic w19,w24,w22 add w25,w25,w13 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w0,ror#11 // Sigma1(e) ror w0,w26,#2 add w25,w25,w17 // h+=Ch(e,f,g) eor w17,w26,w26,ror#9 add w25,w25,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w21,w21,w25 // d+=h eor w28,w28,w27 // Maj(a,b,c) eor w17,w0,w17,ror#13 // Sigma0(a) add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w14,w14 // 11 #endif ldp w15,w0,[x1],#2*4 add w25,w25,w17 // h+=Sigma0(a) str w6,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] eor w6,w21,w21,ror#14 and w17,w22,w21 bic w28,w23,w21 add w24,w24,w14 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w6,ror#11 // Sigma1(e) ror w6,w25,#2 add w24,w24,w17 // h+=Ch(e,f,g) eor w17,w25,w25,ror#9 add w24,w24,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w20,w20,w24 // d+=h eor w19,w19,w26 // Maj(a,b,c) eor w17,w6,w17,ror#13 // Sigma0(a) add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w15,w15 // 12 #endif add w24,w24,w17 // h+=Sigma0(a) str w7,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] eor w7,w20,w20,ror#14 and w17,w21,w20 bic w19,w22,w20 add w23,w23,w15 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w7,ror#11 // Sigma1(e) ror w7,w24,#2 add w23,w23,w17 // h+=Ch(e,f,g) eor w17,w24,w24,ror#9 add w23,w23,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w27,w27,w23 // d+=h eor w28,w28,w25 // Maj(a,b,c) eor w17,w7,w17,ror#13 // Sigma0(a) add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w0,w0 // 13 #endif ldp w1,w2,[x1] add w23,w23,w17 // h+=Sigma0(a) str w8,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] eor w8,w27,w27,ror#14 and w17,w20,w27 bic w28,w21,w27 add w22,w22,w0 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w8,ror#11 // Sigma1(e) ror w8,w23,#2 add w22,w22,w17 // h+=Ch(e,f,g) eor w17,w23,w23,ror#9 add w22,w22,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w26,w26,w22 // d+=h eor w19,w19,w24 // Maj(a,b,c) eor w17,w8,w17,ror#13 // Sigma0(a) add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w1,w1 // 14 #endif ldr w6,[sp,#12] add w22,w22,w17 // h+=Sigma0(a) str w9,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] eor w9,w26,w26,ror#14 and w17,w27,w26 bic w19,w20,w26 add w21,w21,w1 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w9,ror#11 // Sigma1(e) ror w9,w22,#2 add w21,w21,w17 // h+=Ch(e,f,g) eor w17,w22,w22,ror#9 add w21,w21,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w25,w25,w21 // d+=h eor w28,w28,w23 // Maj(a,b,c) eor w17,w9,w17,ror#13 // Sigma0(a) add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w2,w2 // 15 #endif ldr w7,[sp,#0] add w21,w21,w17 // h+=Sigma0(a) str w10,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w9,w4,#7 and w17,w26,w25 ror w8,w1,#17 bic w28,w27,w25 ror w10,w21,#2 add w20,w20,w2 // h+=X[i] eor w16,w16,w25,ror#11 eor w9,w9,w4,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w10,w10,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w8,w8,w1,ror#19 eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w10,w21,ror#22 // Sigma0(a) eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) add w3,w3,w12 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w3,w3,w9 add w20,w20,w17 // h+=Sigma0(a) add w3,w3,w8 Loop_16_xx: ldr w8,[sp,#4] str w11,[sp,#0] ror w16,w24,#6 add w27,w27,w19 // h+=K[i] ror w10,w5,#7 and w17,w25,w24 ror w9,w2,#17 bic w19,w26,w24 ror w11,w20,#2 add w27,w27,w3 // h+=X[i] eor w16,w16,w24,ror#11 eor w10,w10,w5,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w24,ror#25 // Sigma1(e) eor w11,w11,w20,ror#13 add w27,w27,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w9,w9,w2,ror#19 eor w10,w10,w5,lsr#3 // sigma0(X[i+1]) add w27,w27,w16 // h+=Sigma1(e) eor w28,w28,w21 // Maj(a,b,c) eor w17,w11,w20,ror#22 // Sigma0(a) eor w9,w9,w2,lsr#10 // sigma1(X[i+14]) add w4,w4,w13 add w23,w23,w27 // d+=h add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w4,w4,w10 add w27,w27,w17 // h+=Sigma0(a) add w4,w4,w9 ldr w9,[sp,#8] str w12,[sp,#4] ror w16,w23,#6 add w26,w26,w28 // h+=K[i] ror w11,w6,#7 and w17,w24,w23 ror w10,w3,#17 bic w28,w25,w23 ror w12,w27,#2 add w26,w26,w4 // h+=X[i] eor w16,w16,w23,ror#11 eor w11,w11,w6,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w23,ror#25 // Sigma1(e) eor w12,w12,w27,ror#13 add w26,w26,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w10,w10,w3,ror#19 eor w11,w11,w6,lsr#3 // sigma0(X[i+1]) add w26,w26,w16 // h+=Sigma1(e) eor w19,w19,w20 // Maj(a,b,c) eor w17,w12,w27,ror#22 // Sigma0(a) eor w10,w10,w3,lsr#10 // sigma1(X[i+14]) add w5,w5,w14 add w22,w22,w26 // d+=h add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w5,w5,w11 add w26,w26,w17 // h+=Sigma0(a) add w5,w5,w10 ldr w10,[sp,#12] str w13,[sp,#8] ror w16,w22,#6 add w25,w25,w19 // h+=K[i] ror w12,w7,#7 and w17,w23,w22 ror w11,w4,#17 bic w19,w24,w22 ror w13,w26,#2 add w25,w25,w5 // h+=X[i] eor w16,w16,w22,ror#11 eor w12,w12,w7,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w22,ror#25 // Sigma1(e) eor w13,w13,w26,ror#13 add w25,w25,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w11,w11,w4,ror#19 eor w12,w12,w7,lsr#3 // sigma0(X[i+1]) add w25,w25,w16 // h+=Sigma1(e) eor w28,w28,w27 // Maj(a,b,c) eor w17,w13,w26,ror#22 // Sigma0(a) eor w11,w11,w4,lsr#10 // sigma1(X[i+14]) add w6,w6,w15 add w21,w21,w25 // d+=h add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w6,w6,w12 add w25,w25,w17 // h+=Sigma0(a) add w6,w6,w11 ldr w11,[sp,#0] str w14,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] ror w13,w8,#7 and w17,w22,w21 ror w12,w5,#17 bic w28,w23,w21 ror w14,w25,#2 add w24,w24,w6 // h+=X[i] eor w16,w16,w21,ror#11 eor w13,w13,w8,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w21,ror#25 // Sigma1(e) eor w14,w14,w25,ror#13 add w24,w24,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w12,w12,w5,ror#19 eor w13,w13,w8,lsr#3 // sigma0(X[i+1]) add w24,w24,w16 // h+=Sigma1(e) eor w19,w19,w26 // Maj(a,b,c) eor w17,w14,w25,ror#22 // Sigma0(a) eor w12,w12,w5,lsr#10 // sigma1(X[i+14]) add w7,w7,w0 add w20,w20,w24 // d+=h add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w7,w7,w13 add w24,w24,w17 // h+=Sigma0(a) add w7,w7,w12 ldr w12,[sp,#4] str w15,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] ror w14,w9,#7 and w17,w21,w20 ror w13,w6,#17 bic w19,w22,w20 ror w15,w24,#2 add w23,w23,w7 // h+=X[i] eor w16,w16,w20,ror#11 eor w14,w14,w9,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w20,ror#25 // Sigma1(e) eor w15,w15,w24,ror#13 add w23,w23,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w13,w13,w6,ror#19 eor w14,w14,w9,lsr#3 // sigma0(X[i+1]) add w23,w23,w16 // h+=Sigma1(e) eor w28,w28,w25 // Maj(a,b,c) eor w17,w15,w24,ror#22 // Sigma0(a) eor w13,w13,w6,lsr#10 // sigma1(X[i+14]) add w8,w8,w1 add w27,w27,w23 // d+=h add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w8,w8,w14 add w23,w23,w17 // h+=Sigma0(a) add w8,w8,w13 ldr w13,[sp,#8] str w0,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] ror w15,w10,#7 and w17,w20,w27 ror w14,w7,#17 bic w28,w21,w27 ror w0,w23,#2 add w22,w22,w8 // h+=X[i] eor w16,w16,w27,ror#11 eor w15,w15,w10,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w27,ror#25 // Sigma1(e) eor w0,w0,w23,ror#13 add w22,w22,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w14,w14,w7,ror#19 eor w15,w15,w10,lsr#3 // sigma0(X[i+1]) add w22,w22,w16 // h+=Sigma1(e) eor w19,w19,w24 // Maj(a,b,c) eor w17,w0,w23,ror#22 // Sigma0(a) eor w14,w14,w7,lsr#10 // sigma1(X[i+14]) add w9,w9,w2 add w26,w26,w22 // d+=h add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w9,w9,w15 add w22,w22,w17 // h+=Sigma0(a) add w9,w9,w14 ldr w14,[sp,#12] str w1,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] ror w0,w11,#7 and w17,w27,w26 ror w15,w8,#17 bic w19,w20,w26 ror w1,w22,#2 add w21,w21,w9 // h+=X[i] eor w16,w16,w26,ror#11 eor w0,w0,w11,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w26,ror#25 // Sigma1(e) eor w1,w1,w22,ror#13 add w21,w21,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w15,w15,w8,ror#19 eor w0,w0,w11,lsr#3 // sigma0(X[i+1]) add w21,w21,w16 // h+=Sigma1(e) eor w28,w28,w23 // Maj(a,b,c) eor w17,w1,w22,ror#22 // Sigma0(a) eor w15,w15,w8,lsr#10 // sigma1(X[i+14]) add w10,w10,w3 add w25,w25,w21 // d+=h add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w10,w10,w0 add w21,w21,w17 // h+=Sigma0(a) add w10,w10,w15 ldr w15,[sp,#0] str w2,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w1,w12,#7 and w17,w26,w25 ror w0,w9,#17 bic w28,w27,w25 ror w2,w21,#2 add w20,w20,w10 // h+=X[i] eor w16,w16,w25,ror#11 eor w1,w1,w12,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w2,w2,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w0,w0,w9,ror#19 eor w1,w1,w12,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w2,w21,ror#22 // Sigma0(a) eor w0,w0,w9,lsr#10 // sigma1(X[i+14]) add w11,w11,w4 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w11,w11,w1 add w20,w20,w17 // h+=Sigma0(a) add w11,w11,w0 ldr w0,[sp,#4] str w3,[sp,#0] ror w16,w24,#6 add w27,w27,w19 // h+=K[i] ror w2,w13,#7 and w17,w25,w24 ror w1,w10,#17 bic w19,w26,w24 ror w3,w20,#2 add w27,w27,w11 // h+=X[i] eor w16,w16,w24,ror#11 eor w2,w2,w13,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w24,ror#25 // Sigma1(e) eor w3,w3,w20,ror#13 add w27,w27,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w1,w1,w10,ror#19 eor w2,w2,w13,lsr#3 // sigma0(X[i+1]) add w27,w27,w16 // h+=Sigma1(e) eor w28,w28,w21 // Maj(a,b,c) eor w17,w3,w20,ror#22 // Sigma0(a) eor w1,w1,w10,lsr#10 // sigma1(X[i+14]) add w12,w12,w5 add w23,w23,w27 // d+=h add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w12,w12,w2 add w27,w27,w17 // h+=Sigma0(a) add w12,w12,w1 ldr w1,[sp,#8] str w4,[sp,#4] ror w16,w23,#6 add w26,w26,w28 // h+=K[i] ror w3,w14,#7 and w17,w24,w23 ror w2,w11,#17 bic w28,w25,w23 ror w4,w27,#2 add w26,w26,w12 // h+=X[i] eor w16,w16,w23,ror#11 eor w3,w3,w14,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w23,ror#25 // Sigma1(e) eor w4,w4,w27,ror#13 add w26,w26,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w2,w2,w11,ror#19 eor w3,w3,w14,lsr#3 // sigma0(X[i+1]) add w26,w26,w16 // h+=Sigma1(e) eor w19,w19,w20 // Maj(a,b,c) eor w17,w4,w27,ror#22 // Sigma0(a) eor w2,w2,w11,lsr#10 // sigma1(X[i+14]) add w13,w13,w6 add w22,w22,w26 // d+=h add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w13,w13,w3 add w26,w26,w17 // h+=Sigma0(a) add w13,w13,w2 ldr w2,[sp,#12] str w5,[sp,#8] ror w16,w22,#6 add w25,w25,w19 // h+=K[i] ror w4,w15,#7 and w17,w23,w22 ror w3,w12,#17 bic w19,w24,w22 ror w5,w26,#2 add w25,w25,w13 // h+=X[i] eor w16,w16,w22,ror#11 eor w4,w4,w15,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w22,ror#25 // Sigma1(e) eor w5,w5,w26,ror#13 add w25,w25,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w3,w3,w12,ror#19 eor w4,w4,w15,lsr#3 // sigma0(X[i+1]) add w25,w25,w16 // h+=Sigma1(e) eor w28,w28,w27 // Maj(a,b,c) eor w17,w5,w26,ror#22 // Sigma0(a) eor w3,w3,w12,lsr#10 // sigma1(X[i+14]) add w14,w14,w7 add w21,w21,w25 // d+=h add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w14,w14,w4 add w25,w25,w17 // h+=Sigma0(a) add w14,w14,w3 ldr w3,[sp,#0] str w6,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] ror w5,w0,#7 and w17,w22,w21 ror w4,w13,#17 bic w28,w23,w21 ror w6,w25,#2 add w24,w24,w14 // h+=X[i] eor w16,w16,w21,ror#11 eor w5,w5,w0,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w21,ror#25 // Sigma1(e) eor w6,w6,w25,ror#13 add w24,w24,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w4,w4,w13,ror#19 eor w5,w5,w0,lsr#3 // sigma0(X[i+1]) add w24,w24,w16 // h+=Sigma1(e) eor w19,w19,w26 // Maj(a,b,c) eor w17,w6,w25,ror#22 // Sigma0(a) eor w4,w4,w13,lsr#10 // sigma1(X[i+14]) add w15,w15,w8 add w20,w20,w24 // d+=h add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w15,w15,w5 add w24,w24,w17 // h+=Sigma0(a) add w15,w15,w4 ldr w4,[sp,#4] str w7,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] ror w6,w1,#7 and w17,w21,w20 ror w5,w14,#17 bic w19,w22,w20 ror w7,w24,#2 add w23,w23,w15 // h+=X[i] eor w16,w16,w20,ror#11 eor w6,w6,w1,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w20,ror#25 // Sigma1(e) eor w7,w7,w24,ror#13 add w23,w23,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w5,w5,w14,ror#19 eor w6,w6,w1,lsr#3 // sigma0(X[i+1]) add w23,w23,w16 // h+=Sigma1(e) eor w28,w28,w25 // Maj(a,b,c) eor w17,w7,w24,ror#22 // Sigma0(a) eor w5,w5,w14,lsr#10 // sigma1(X[i+14]) add w0,w0,w9 add w27,w27,w23 // d+=h add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w0,w0,w6 add w23,w23,w17 // h+=Sigma0(a) add w0,w0,w5 ldr w5,[sp,#8] str w8,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] ror w7,w2,#7 and w17,w20,w27 ror w6,w15,#17 bic w28,w21,w27 ror w8,w23,#2 add w22,w22,w0 // h+=X[i] eor w16,w16,w27,ror#11 eor w7,w7,w2,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w27,ror#25 // Sigma1(e) eor w8,w8,w23,ror#13 add w22,w22,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w6,w6,w15,ror#19 eor w7,w7,w2,lsr#3 // sigma0(X[i+1]) add w22,w22,w16 // h+=Sigma1(e) eor w19,w19,w24 // Maj(a,b,c) eor w17,w8,w23,ror#22 // Sigma0(a) eor w6,w6,w15,lsr#10 // sigma1(X[i+14]) add w1,w1,w10 add w26,w26,w22 // d+=h add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w1,w1,w7 add w22,w22,w17 // h+=Sigma0(a) add w1,w1,w6 ldr w6,[sp,#12] str w9,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] ror w8,w3,#7 and w17,w27,w26 ror w7,w0,#17 bic w19,w20,w26 ror w9,w22,#2 add w21,w21,w1 // h+=X[i] eor w16,w16,w26,ror#11 eor w8,w8,w3,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w26,ror#25 // Sigma1(e) eor w9,w9,w22,ror#13 add w21,w21,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w7,w7,w0,ror#19 eor w8,w8,w3,lsr#3 // sigma0(X[i+1]) add w21,w21,w16 // h+=Sigma1(e) eor w28,w28,w23 // Maj(a,b,c) eor w17,w9,w22,ror#22 // Sigma0(a) eor w7,w7,w0,lsr#10 // sigma1(X[i+14]) add w2,w2,w11 add w25,w25,w21 // d+=h add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w2,w2,w8 add w21,w21,w17 // h+=Sigma0(a) add w2,w2,w7 ldr w7,[sp,#0] str w10,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w9,w4,#7 and w17,w26,w25 ror w8,w1,#17 bic w28,w27,w25 ror w10,w21,#2 add w20,w20,w2 // h+=X[i] eor w16,w16,w25,ror#11 eor w9,w9,w4,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w10,w10,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w8,w8,w1,ror#19 eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w10,w21,ror#22 // Sigma0(a) eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) add w3,w3,w12 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w3,w3,w9 add w20,w20,w17 // h+=Sigma0(a) add w3,w3,w8 cbnz w19,Loop_16_xx ldp x0,x2,[x29,#96] ldr x1,[x29,#112] sub x30,x30,#260 // rewind ldp w3,w4,[x0] ldp w5,w6,[x0,#2*4] add x1,x1,#14*4 // advance input pointer ldp w7,w8,[x0,#4*4] add w20,w20,w3 ldp w9,w10,[x0,#6*4] add w21,w21,w4 add w22,w22,w5 add w23,w23,w6 stp w20,w21,[x0] add w24,w24,w7 add w25,w25,w8 stp w22,w23,[x0,#2*4] add w26,w26,w9 add w27,w27,w10 cmp x1,x2 stp w24,w25,[x0,#4*4] stp w26,w27,[x0,#6*4] b.ne Loop ldp x19,x20,[x29,#16] add sp,sp,#4*4 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .section .rodata .align 6 LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0 //terminator .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 .text #ifndef __KERNEL__ .globl sha256_block_data_order_hw .def sha256_block_data_order_hw .type 32 .endef .align 6 sha256_block_data_order_hw: #ifdef BORINGSSL_DISPATCH_TEST adrp x9,BORINGSSL_function_hit add x9, x9, :lo12:BORINGSSL_function_hit mov w10, #1 strb w10, [x9,#6] // kFlag_sha256_hw #endif // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v0.4s,v1.4s},[x0] adrp x3,LK256 add x3,x3,:lo12:LK256 Loop_hw: ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 sub x2,x2,#1 ld1 {v16.4s},[x3],#16 rev32 v4.16b,v4.16b rev32 v5.16b,v5.16b rev32 v6.16b,v6.16b rev32 v7.16b,v7.16b orr v18.16b,v0.16b,v0.16b // offload orr v19.16b,v1.16b,v1.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s ld1 {v17.4s},[x3] add v16.4s,v16.4s,v6.4s sub x3,x3,#64*4-16 // rewind orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s add v17.4s,v17.4s,v7.4s orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s add v0.4s,v0.4s,v18.4s add v1.4s,v1.4s,v19.4s cbnz x2,Loop_hw st1 {v0.4s,v1.4s},[x0] ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
285,754
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/aesv8-gcm-armv8-unroll8.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include "openssl/arm_arch.h" #if __ARM_MAX_ARCH__>=8 .text .arch armv8.2-a+crypto .globl aesv8_gcm_8x_enc_128 .def aesv8_gcm_8x_enc_128 .type 32 .endef .align 4 aesv8_gcm_8x_enc_128: #ifdef BORINGSSL_DISPATCH_TEST adrp x9,BORINGSSL_function_hit add x9, x9, :lo12:BORINGSSL_function_hit mov w10, #1 strb w10, [x9,#7] // kFlag_aesv8_gcm_8x_enc_128 #endif AARCH64_VALID_CALL_TARGET cbz x1, L128_enc_ret stp d8, d9, [sp, #-80]! lsr x9, x1, #3 mov x16, x4 mov x11, x5 stp d10, d11, [sp, #16] stp d12, d13, [sp, #32] stp d14, d15, [sp, #48] mov x5, #0xc200000000000000 stp x5, xzr, [sp, #64] add x10, sp, #64 mov x15, #0x100000000 //set up counter increment movi v31.16b, #0x0 mov v31.d[1], x15 mov x5, x9 ld1 { v0.16b}, [x16] //CTR block 0 sub x5, x5, #1 //byte_len - 1 and x5, x5, #0xffffffffffffff80 //number of bytes to be processed in main loop (at least 1 byte must be handled by tail) rev32 v30.16b, v0.16b //set up reversed counter add v30.4s, v30.4s, v31.4s //CTR block 0 rev32 v1.16b, v30.16b //CTR block 1 add v30.4s, v30.4s, v31.4s //CTR block 1 rev32 v2.16b, v30.16b //CTR block 2 add v30.4s, v30.4s, v31.4s //CTR block 2 rev32 v3.16b, v30.16b //CTR block 3 add v30.4s, v30.4s, v31.4s //CTR block 3 rev32 v4.16b, v30.16b //CTR block 4 add v30.4s, v30.4s, v31.4s //CTR block 4 rev32 v5.16b, v30.16b //CTR block 5 add v30.4s, v30.4s, v31.4s //CTR block 5 ldp q26, q27, [x11, #0] //load rk0, rk1 rev32 v6.16b, v30.16b //CTR block 6 add v30.4s, v30.4s, v31.4s //CTR block 6 rev32 v7.16b, v30.16b //CTR block 7 add v30.4s, v30.4s, v31.4s //CTR block 7 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 0 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 0 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 0 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 0 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 0 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 0 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 0 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 0 ldp q28, q26, [x11, #32] //load rk2, rk3 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 1 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 1 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 1 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 1 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 1 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 1 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 2 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 1 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 2 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 2 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 2 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 2 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 2 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 2 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 3 ldp q27, q28, [x11, #64] //load rk4, rk5 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 3 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 3 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 3 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 3 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 3 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 3 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 4 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 3 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 4 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 4 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 4 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 4 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 4 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 4 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 5 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 5 ldp q26, q27, [x11, #96] //load rk6, rk7 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 5 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 5 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 5 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 5 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 5 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 5 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 6 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 6 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 6 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 6 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 6 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 6 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 6 ldp q28, q26, [x11, #128] //load rk8, rk9 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 7 ld1 { v19.16b}, [x3] ext v19.16b, v19.16b, v19.16b, #8 rev64 v19.16b, v19.16b aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 7 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 7 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 7 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 7 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 7 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 7 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 7 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 8 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 8 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 8 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 8 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 8 ldr q27, [x11, #160] //load rk10 aese v3.16b, v26.16b //AES block 8k+11 - round 9 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 8 aese v2.16b, v26.16b //AES block 8k+10 - round 9 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 8 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 8 aese v6.16b, v26.16b //AES block 8k+14 - round 9 aese v4.16b, v26.16b //AES block 8k+12 - round 9 add x5, x5, x0 aese v0.16b, v26.16b //AES block 8k+8 - round 9 aese v7.16b, v26.16b //AES block 8k+15 - round 9 aese v5.16b, v26.16b //AES block 8k+13 - round 9 aese v1.16b, v26.16b //AES block 8k+9 - round 9 add x4, x0, x1, lsr #3 //end_input_ptr cmp x0, x5 //check if we have <= 8 blocks b.ge L128_enc_tail //handle tail ldp q8, q9, [x0], #32 //AES block 0, 1 - load plaintext ldp q10, q11, [x0], #32 //AES block 2, 3 - load plaintext ldp q12, q13, [x0], #32 //AES block 4, 5 - load plaintext ldp q14, q15, [x0], #32 //AES block 6, 7 - load plaintext cmp x0, x5 //check if we have <= 8 blocks .long 0xce006d08 //eor3 v8.16b, v8.16b, v0.16b, v27.16b //AES block 0 - result rev32 v0.16b, v30.16b //CTR block 8 add v30.4s, v30.4s, v31.4s //CTR block 8 .long 0xce016d29 //eor3 v9.16b, v9.16b, v1.16b, v27.16b //AES block 1 - result stp q8, q9, [x2], #32 //AES block 0, 1 - store result rev32 v1.16b, v30.16b //CTR block 9 .long 0xce056dad //eor3 v13.16b, v13.16b, v5.16b, v27.16b //AES block 5 - result add v30.4s, v30.4s, v31.4s //CTR block 9 .long 0xce026d4a //eor3 v10.16b, v10.16b, v2.16b, v27.16b //AES block 2 - result .long 0xce066dce //eor3 v14.16b, v14.16b, v6.16b, v27.16b //AES block 6 - result .long 0xce046d8c //eor3 v12.16b, v12.16b, v4.16b, v27.16b //AES block 4 - result rev32 v2.16b, v30.16b //CTR block 10 add v30.4s, v30.4s, v31.4s //CTR block 10 .long 0xce036d6b //eor3 v11.16b, v11.16b, v3.16b, v27.16b //AES block 3 - result .long 0xce076def //eor3 v15.16b, v15.16b, v7.16b,v27.16b //AES block 7 - result stp q10, q11, [x2], #32 //AES block 2, 3 - store result rev32 v3.16b, v30.16b //CTR block 11 add v30.4s, v30.4s, v31.4s //CTR block 11 stp q12, q13, [x2], #32 //AES block 4, 5 - store result stp q14, q15, [x2], #32 //AES block 6, 7 - store result rev32 v4.16b, v30.16b //CTR block 12 add v30.4s, v30.4s, v31.4s //CTR block 12 b.ge L128_enc_prepretail //do prepretail L128_enc_main_loop: //main loop start rev32 v5.16b, v30.16b //CTR block 8k+13 ldr q20, [x6, #96] //load h5l | h5h ldr q22, [x6, #128] //load h6l | h6h add v30.4s, v30.4s, v31.4s //CTR block 8k+13 rev64 v9.16b, v9.16b //GHASH block 8k+1 rev64 v8.16b, v8.16b //GHASH block 8k ldr q23, [x6, #144] //load h7l | h7h ldr q25, [x6, #176] //load h8l | h8h rev32 v6.16b, v30.16b //CTR block 8k+14 add v30.4s, v30.4s, v31.4s //CTR block 8k+14 ext v19.16b, v19.16b, v19.16b, #8 //PRE 0 ldr q21, [x6, #112] //load h6k | h5k ldr q24, [x6, #160] //load h8k | h7k rev64 v13.16b, v13.16b //GHASH block 8k+5 (t0, t1, t2 and t3 free) rev64 v11.16b, v11.16b //GHASH block 8k+3 ldp q26, q27, [x11, #0] //load rk0, rk1 eor v8.16b, v8.16b, v19.16b //PRE 1 rev32 v7.16b, v30.16b //CTR block 8k+15 rev64 v15.16b, v15.16b //GHASH block 8k+7 (t0, t1, t2 and t3 free) pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high rev64 v10.16b, v10.16b //GHASH block 8k+2 pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low ldr q23, [x6, #48] //load h3l | h3h ldr q25, [x6, #80] //load h3l | h3h aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 0 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 0 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 0 eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high add v30.4s, v30.4s, v31.4s //CTR block 8k+15 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 0 eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 0 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 1 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 0 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 1 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 0 pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 1 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 0 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 1 .long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b,v9.16b //GHASH block 8k+2, 8k+3 - high trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid ldp q28, q26, [x11, #32] //load rk2, rk3 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 1 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 1 pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 1 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 1 pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid rev64 v14.16b, v14.16b //GHASH block 8k+6 (t0, t1, and t2 free) .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 2 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 2 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 2 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 2 .long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 2 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 2 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 2 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 3 ldr q21, [x6, #16] //load h2k | h1k ldr q24, [x6, #64] //load h4k | h3k rev64 v12.16b, v12.16b //GHASH block 8k+4 (t0, t1, and t2 free) ldp q27, q28, [x11, #64] //load rk4, rk5 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 3 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 3 ldr q20, [x6] //load h1l | h1h ldr q22, [x6, #32] //load h1l | h1h pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 3 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 3 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 3 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 3 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 3 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 4 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 4 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 4 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 4 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 4 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 4 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 4 pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 4 ldp q26, q27, [x11, #96] //load rk6, rk7 trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 5 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 5 pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low .long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid .long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 5 eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 5 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 5 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 5 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 5 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 5 .long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid ldr d16, [x10] //MODULO - load modulo constant pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 6 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 6 pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 6 pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low ldp q8, q9, [x0], #32 //AES block 8k+8, 8k+9 - load plaintext aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 6 rev32 v20.16b, v30.16b //CTR block 8k+16 add v30.4s, v30.4s, v31.4s //CTR block 8k+16 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 6 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 6 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 6 .long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid ldp q28, q26, [x11, #128] //load rk8, rk9 .long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 7 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 7 ldp q10, q11, [x0], #32 //AES block 8k+10, 8k+11 - load plaintext aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 7 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 7 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 7 pmull v21.1q, v17.1d, v16.1d //MODULO - top 64b align with mid aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 7 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 7 rev32 v22.16b, v30.16b //CTR block 8k+17 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 7 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 8 ldp q12, q13, [x0], #32 //AES block 8k+12, 8k+13 - load plaintext add v30.4s, v30.4s, v31.4s //CTR block 8k+17 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 8 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 8 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 8 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 8 .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up ldr q27, [x11, #160] //load rk10 ext v29.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment rev32 v23.16b, v30.16b //CTR block 8k+18 add v30.4s, v30.4s, v31.4s //CTR block 8k+18 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 8 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 8 .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 8 aese v2.16b, v26.16b //AES block 8k+10 - round 9 aese v4.16b, v26.16b //AES block 8k+12 - round 9 aese v1.16b, v26.16b //AES block 8k+9 - round 9 ldp q14, q15, [x0], #32 //AES block 8k+14, 8k+15 - load plaintext rev32 v25.16b, v30.16b //CTR block 8k+19 add v30.4s, v30.4s, v31.4s //CTR block 8k+19 cmp x0, x5 //LOOP CONTROL .long 0xce046d8c //eor3 v12.16b, v12.16b, v4.16b, v27.16b //AES block 4 - result aese v7.16b, v26.16b //AES block 8k+15 - round 9 aese v6.16b, v26.16b //AES block 8k+14 - round 9 aese v3.16b, v26.16b //AES block 8k+11 - round 9 .long 0xce026d4a //eor3 v10.16b, v10.16b, v2.16b, v27.16b //AES block 8k+10 - result mov v2.16b, v23.16b //CTR block 8k+18 aese v0.16b, v26.16b //AES block 8k+8 - round 9 rev32 v4.16b, v30.16b //CTR block 8k+20 add v30.4s, v30.4s, v31.4s //CTR block 8k+20 .long 0xce076def //eor3 v15.16b, v15.16b, v7.16b, v27.16b //AES block 7 - result aese v5.16b, v26.16b //AES block 8k+13 - round 9 pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low .long 0xce016d29 //eor3 v9.16b, v9.16b, v1.16b, v27.16b //AES block 8k+9 - result .long 0xce036d6b //eor3 v11.16b, v11.16b, v3.16b, v27.16b //AES block 8k+11 - result mov v3.16b, v25.16b //CTR block 8k+19 ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment .long 0xce056dad //eor3 v13.16b, v13.16b, v5.16b, v27.16b //AES block 5 - result mov v1.16b, v22.16b //CTR block 8k+17 .long 0xce006d08 //eor3 v8.16b, v8.16b, v0.16b, v27.16b //AES block 8k+8 - result mov v0.16b, v20.16b //CTR block 8k+16 stp q8, q9, [x2], #32 //AES block 8k+8, 8k+9 - store result stp q10, q11, [x2], #32 //AES block 8k+10, 8k+11 - store result .long 0xce066dce //eor3 v14.16b, v14.16b, v6.16b, v27.16b //AES block 6 - result stp q12, q13, [x2], #32 //AES block 8k+12, 8k+13 - store result .long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low stp q14, q15, [x2], #32 //AES block 8k+14, 8k+15 - store result b.lt L128_enc_main_loop L128_enc_prepretail: //PREPRETAIL rev32 v5.16b, v30.16b //CTR block 8k+13 ldr q23, [x6, #144] //load h7l | h7h ldr q25, [x6, #176] //load h8l | h8h ext v19.16b, v19.16b, v19.16b, #8 //PRE 0 ldr q20, [x6, #96] //load h5l | h5h ldr q22, [x6, #128] //load h6l | h6h rev64 v8.16b, v8.16b //GHASH block 8k rev64 v9.16b, v9.16b //GHASH block 8k+1 ldr q21, [x6, #112] //load h6k | h5k ldr q24, [x6, #160] //load h6k | h5k add v30.4s, v30.4s, v31.4s //CTR block 8k+13 rev64 v11.16b, v11.16b //GHASH block 8k+3 rev64 v10.16b, v10.16b //GHASH block 8k+2 eor v8.16b, v8.16b, v19.16b //PRE 1 rev32 v6.16b, v30.16b //CTR block 8k+14 pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high rev64 v13.16b, v13.16b //GHASH block 8k+5 (t0, t1, t2 and t3 free) trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid ldp q26, q27, [x11, #0] //load rk0, rk1 add v30.4s, v30.4s, v31.4s //CTR block 8k+14 pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid rev64 v12.16b, v12.16b //GHASH block 8k+4 (t0, t1, and t2 free) rev64 v15.16b, v15.16b //GHASH block 8k+7 (t0, t1, t2 and t3 free) eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid rev32 v7.16b, v30.16b //CTR block 8k+15 rev64 v14.16b, v14.16b //GHASH block 8k+6 (t0, t1, and t2 free) aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 0 pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 0 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 0 pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 0 .long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 0 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 0 eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 0 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 0 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 1 pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low ldr q23, [x6, #48] //load h3l | h3h ldr q25, [x6, #80] //load h4l | h4h ldp q28, q26, [x11, #32] //load rk2, rk3 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 1 pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 1 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 1 .long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid ldr q21, [x6, #16] //load h2k | h1k ldr q24, [x6, #64] //load h4k | h3k aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 1 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 1 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 1 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 2 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 2 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 2 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 1 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 2 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 3 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 2 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 2 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 2 ldp q27, q28, [x11, #64] //load rk4, rk5 ldr q20, [x6] //load h1l | h1h ldr q22, [x6, #32] //load h1l | h1h trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 3 pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 3 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 3 pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 3 add v30.4s, v30.4s, v31.4s //CTR block 8k+15 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 3 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 3 eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 3 pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 4 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 4 .long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high .long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 5 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 4 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 4 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 4 pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 4 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 4 pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high ldp q26, q27, [x11, #96] //load rk6, rk7 pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low .long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 5 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 5 ldr d16, [x10] //MODULO - load modulo constant aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 5 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 5 .long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 5 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 5 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 5 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 6 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 6 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 6 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 6 .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low .long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 6 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 6 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 6 pmull v21.1q, v17.1d, v16.1d //MODULO - top 64b align with mid .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up ldp q28, q26, [x11, #128] //load rk8, rk9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 7 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 7 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 7 ext v29.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 7 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 7 .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 7 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 7 pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 7 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 8 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 8 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 8 ext v18.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 8 .long 0xce114a73 //eor3 v19.16b, v19.16b, v17.16b, v18.16b //MODULO - fold into low aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 8 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 8 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 8 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 8 ldr q27, [x11, #160] //load rk10 aese v6.16b, v26.16b //AES block 8k+14 - round 9 aese v2.16b, v26.16b //AES block 8k+10 - round 9 aese v0.16b, v26.16b //AES block 8k+8 - round 9 aese v1.16b, v26.16b //AES block 8k+9 - round 9 aese v3.16b, v26.16b //AES block 8k+11 - round 9 aese v5.16b, v26.16b //AES block 8k+13 - round 9 aese v4.16b, v26.16b //AES block 8k+12 - round 9 aese v7.16b, v26.16b //AES block 8k+15 - round 9 L128_enc_tail: //TAIL sub x5, x4, x0 //main_end_input_ptr is number of bytes left to process ldr q8, [x0], #16 //AES block 8k+8 - load plaintext mov v29.16b, v27.16b ldp q20, q21, [x6, #96] //load h5l | h5h .long 0xce007509 //eor3 v9.16b, v8.16b, v0.16b, v29.16b //AES block 8k+8 - result ext v16.16b, v19.16b, v19.16b, #8 //prepare final partial tag ldp q22, q23, [x6, #128] //load h6l | h6h ldp q24, q25, [x6, #160] //load h8k | h7k cmp x5, #112 b.gt L128_enc_blocks_more_than_7 mov v7.16b, v6.16b mov v6.16b, v5.16b movi v17.8b, #0 cmp x5, #96 sub v30.4s, v30.4s, v31.4s mov v5.16b, v4.16b mov v4.16b, v3.16b mov v3.16b, v2.16b mov v2.16b, v1.16b movi v19.8b, #0 movi v18.8b, #0 b.gt L128_enc_blocks_more_than_6 mov v7.16b, v6.16b cmp x5, #80 sub v30.4s, v30.4s, v31.4s mov v6.16b, v5.16b mov v5.16b, v4.16b mov v4.16b, v3.16b mov v3.16b, v1.16b b.gt L128_enc_blocks_more_than_5 cmp x5, #64 sub v30.4s, v30.4s, v31.4s mov v7.16b, v6.16b mov v6.16b, v5.16b mov v5.16b, v4.16b mov v4.16b, v1.16b b.gt L128_enc_blocks_more_than_4 mov v7.16b, v6.16b sub v30.4s, v30.4s, v31.4s mov v6.16b, v5.16b mov v5.16b, v1.16b cmp x5, #48 b.gt L128_enc_blocks_more_than_3 sub v30.4s, v30.4s, v31.4s mov v7.16b, v6.16b mov v6.16b, v1.16b cmp x5, #32 ldr q24, [x6, #64] //load h4k | h3k b.gt L128_enc_blocks_more_than_2 cmp x5, #16 sub v30.4s, v30.4s, v31.4s mov v7.16b, v1.16b b.gt L128_enc_blocks_more_than_1 ldr q21, [x6, #16] //load h2k | h1k sub v30.4s, v30.4s, v31.4s b L128_enc_blocks_less_than_1 L128_enc_blocks_more_than_7: //blocks left > 7 st1 { v9.16b}, [x2], #16 //AES final-7 block - store result rev64 v8.16b, v9.16b //GHASH final-7 block ldr q9, [x0], #16 //AES final-6 block - load plaintext eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-7 block - mid pmull2 v17.1q, v8.2d, v25.2d //GHASH final-7 block - high ins v18.d[0], v24.d[1] //GHASH final-7 block - mid eor v27.8b, v27.8b, v8.8b //GHASH final-7 block - mid movi v16.8b, #0 //supress further partial tag feed in .long 0xce017529 //eor3 v9.16b, v9.16b, v1.16b, v29.16b //AES final-6 block - result pmull v18.1q, v27.1d, v18.1d //GHASH final-7 block - mid pmull v19.1q, v8.1d, v25.1d //GHASH final-7 block - low L128_enc_blocks_more_than_6: //blocks left > 6 st1 { v9.16b}, [x2], #16 //AES final-6 block - store result rev64 v8.16b, v9.16b //GHASH final-6 block ldr q9, [x0], #16 //AES final-5 block - load plaintext eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-6 block - mid .long 0xce027529 //eor3 v9.16b, v9.16b, v2.16b, v29.16b //AES final-5 block - result pmull v26.1q, v8.1d, v23.1d //GHASH final-6 block - low eor v27.8b, v27.8b, v8.8b //GHASH final-6 block - mid movi v16.8b, #0 //supress further partial tag feed in pmull v27.1q, v27.1d, v24.1d //GHASH final-6 block - mid pmull2 v28.1q, v8.2d, v23.2d //GHASH final-6 block - high eor v19.16b, v19.16b, v26.16b //GHASH final-6 block - low eor v18.16b, v18.16b, v27.16b //GHASH final-6 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-6 block - high L128_enc_blocks_more_than_5: //blocks left > 5 st1 { v9.16b}, [x2], #16 //AES final-5 block - store result rev64 v8.16b, v9.16b //GHASH final-5 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-5 block - mid ldr q9, [x0], #16 //AES final-4 block - load plaintext pmull2 v28.1q, v8.2d, v22.2d //GHASH final-5 block - high eor v17.16b, v17.16b, v28.16b //GHASH final-5 block - high eor v27.8b, v27.8b, v8.8b //GHASH final-5 block - mid ins v27.d[1], v27.d[0] //GHASH final-5 block - mid .long 0xce037529 //eor3 v9.16b, v9.16b, v3.16b, v29.16b //AES final-4 block - result pmull v26.1q, v8.1d, v22.1d //GHASH final-5 block - low movi v16.8b, #0 //supress further partial tag feed in pmull2 v27.1q, v27.2d, v21.2d //GHASH final-5 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-5 block - low eor v18.16b, v18.16b, v27.16b //GHASH final-5 block - mid L128_enc_blocks_more_than_4: //blocks left > 4 st1 { v9.16b}, [x2], #16 //AES final-4 block - store result rev64 v8.16b, v9.16b //GHASH final-4 block ldr q9, [x0], #16 //AES final-3 block - load plaintext eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-4 block - mid movi v16.8b, #0 //supress further partial tag feed in pmull2 v28.1q, v8.2d, v20.2d //GHASH final-4 block - high eor v27.8b, v27.8b, v8.8b //GHASH final-4 block - mid pmull v26.1q, v8.1d, v20.1d //GHASH final-4 block - low eor v17.16b, v17.16b, v28.16b //GHASH final-4 block - high pmull v27.1q, v27.1d, v21.1d //GHASH final-4 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-4 block - low .long 0xce047529 //eor3 v9.16b, v9.16b, v4.16b, v29.16b //AES final-3 block - result eor v18.16b, v18.16b, v27.16b //GHASH final-4 block - mid L128_enc_blocks_more_than_3: //blocks left > 3 st1 { v9.16b}, [x2], #16 //AES final-3 block - store result ldr q25, [x6, #80] //load h4l | h4h rev64 v8.16b, v9.16b //GHASH final-3 block eor v8.16b, v8.16b, v16.16b //feed in partial tag movi v16.8b, #0 //supress further partial tag feed in ins v27.d[0], v8.d[1] //GHASH final-3 block - mid ldr q24, [x6, #64] //load h4k | h3k pmull v26.1q, v8.1d, v25.1d //GHASH final-3 block - low ldr q9, [x0], #16 //AES final-2 block - load plaintext eor v27.8b, v27.8b, v8.8b //GHASH final-3 block - mid ins v27.d[1], v27.d[0] //GHASH final-3 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-3 block - low .long 0xce057529 //eor3 v9.16b, v9.16b, v5.16b, v29.16b //AES final-2 block - result pmull2 v27.1q, v27.2d, v24.2d //GHASH final-3 block - mid pmull2 v28.1q, v8.2d, v25.2d //GHASH final-3 block - high eor v18.16b, v18.16b, v27.16b //GHASH final-3 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-3 block - high L128_enc_blocks_more_than_2: //blocks left > 2 st1 { v9.16b}, [x2], #16 //AES final-2 block - store result rev64 v8.16b, v9.16b //GHASH final-2 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ldr q9, [x0], #16 //AES final-1 block - load plaintext ins v27.d[0], v8.d[1] //GHASH final-2 block - mid ldr q23, [x6, #48] //load h3l | h3h movi v16.8b, #0 //supress further partial tag feed in eor v27.8b, v27.8b, v8.8b //GHASH final-2 block - mid .long 0xce067529 //eor3 v9.16b, v9.16b, v6.16b, v29.16b //AES final-1 block - result pmull2 v28.1q, v8.2d, v23.2d //GHASH final-2 block - high pmull v26.1q, v8.1d, v23.1d //GHASH final-2 block - low pmull v27.1q, v27.1d, v24.1d //GHASH final-2 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-2 block - high eor v18.16b, v18.16b, v27.16b //GHASH final-2 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-2 block - low L128_enc_blocks_more_than_1: //blocks left > 1 st1 { v9.16b}, [x2], #16 //AES final-1 block - store result ldr q22, [x6, #32] //load h2l | h2h rev64 v8.16b, v9.16b //GHASH final-1 block ldr q9, [x0], #16 //AES final block - load plaintext eor v8.16b, v8.16b, v16.16b //feed in partial tag movi v16.8b, #0 //supress further partial tag feed in ins v27.d[0], v8.d[1] //GHASH final-1 block - mid .long 0xce077529 //eor3 v9.16b, v9.16b, v7.16b, v29.16b //AES final block - result pmull2 v28.1q, v8.2d, v22.2d //GHASH final-1 block - high eor v27.8b, v27.8b, v8.8b //GHASH final-1 block - mid ldr q21, [x6, #16] //load h2k | h1k ins v27.d[1], v27.d[0] //GHASH final-1 block - mid pmull v26.1q, v8.1d, v22.1d //GHASH final-1 block - low pmull2 v27.1q, v27.2d, v21.2d //GHASH final-1 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-1 block - high eor v18.16b, v18.16b, v27.16b //GHASH final-1 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-1 block - low L128_enc_blocks_less_than_1: //blocks left <= 1 rev32 v30.16b, v30.16b str q30, [x16] //store the updated counter and x1, x1, #127 //bit_length %= 128 sub x1, x1, #128 //bit_length -= 128 neg x1, x1 //bit_length = 128 - #bits in input (in range [1,128]) mvn x7, xzr //temp0_x = 0xffffffffffffffff ld1 { v26.16b}, [x2] //load existing bytes where the possibly partial last block is to be stored and x1, x1, #127 //bit_length %= 128 lsr x7, x7, x1 //temp0_x is mask for top 64b of last block mvn x8, xzr //temp1_x = 0xffffffffffffffff cmp x1, #64 csel x13, x8, x7, lt csel x14, x7, xzr, lt mov v0.d[1], x14 mov v0.d[0], x13 //ctr0b is mask for last block and v9.16b, v9.16b, v0.16b //possibly partial last block has zeroes in highest bits rev64 v8.16b, v9.16b //GHASH final block bif v9.16b, v26.16b, v0.16b //insert existing bytes in top end of result before storing st1 { v9.16b}, [x2] //store all 16B eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v16.d[0], v8.d[1] //GHASH final block - mid eor v16.8b, v16.8b, v8.8b //GHASH final block - mid ldr q20, [x6] //load h1l | h1h pmull v16.1q, v16.1d, v21.1d //GHASH final block - mid pmull2 v28.1q, v8.2d, v20.2d //GHASH final block - high eor v18.16b, v18.16b, v16.16b //GHASH final block - mid ldr d16, [x10] //MODULO - load modulo constant pmull v26.1q, v8.1d, v20.1d //GHASH final block - low eor v17.16b, v17.16b, v28.16b //GHASH final block - high eor v19.16b, v19.16b, v26.16b //GHASH final block - low ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment .long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low ext v19.16b, v19.16b, v19.16b, #8 rev64 v19.16b, v19.16b st1 { v19.16b }, [x3] mov x0, x9 ldp d10, d11, [sp, #16] ldp d12, d13, [sp, #32] ldp d14, d15, [sp, #48] ldp d8, d9, [sp], #80 ret L128_enc_ret: mov w0, #0x0 ret .globl aesv8_gcm_8x_dec_128 .def aesv8_gcm_8x_dec_128 .type 32 .endef .align 4 aesv8_gcm_8x_dec_128: AARCH64_VALID_CALL_TARGET cbz x1, L128_dec_ret stp d8, d9, [sp, #-80]! lsr x9, x1, #3 mov x16, x4 mov x11, x5 stp d10, d11, [sp, #16] stp d12, d13, [sp, #32] stp d14, d15, [sp, #48] mov x5, #0xc200000000000000 stp x5, xzr, [sp, #64] add x10, sp, #64 mov x5, x9 ld1 { v0.16b}, [x16] //CTR block 0 ldp q26, q27, [x11, #0] //load rk0, rk1 sub x5, x5, #1 //byte_len - 1 mov x15, #0x100000000 //set up counter increment movi v31.16b, #0x0 mov v31.d[1], x15 ld1 { v19.16b}, [x3] ext v19.16b, v19.16b, v19.16b, #8 rev64 v19.16b, v19.16b rev32 v30.16b, v0.16b //set up reversed counter aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 0 add v30.4s, v30.4s, v31.4s //CTR block 0 rev32 v1.16b, v30.16b //CTR block 1 add v30.4s, v30.4s, v31.4s //CTR block 1 and x5, x5, #0xffffffffffffff80 //number of bytes to be processed in main loop (at least 1 byte must be handled by tail) rev32 v2.16b, v30.16b //CTR block 2 add v30.4s, v30.4s, v31.4s //CTR block 2 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 0 rev32 v3.16b, v30.16b //CTR block 3 add v30.4s, v30.4s, v31.4s //CTR block 3 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 1 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 1 rev32 v4.16b, v30.16b //CTR block 4 add v30.4s, v30.4s, v31.4s //CTR block 4 rev32 v5.16b, v30.16b //CTR block 5 add v30.4s, v30.4s, v31.4s //CTR block 5 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 0 rev32 v6.16b, v30.16b //CTR block 6 add v30.4s, v30.4s, v31.4s //CTR block 6 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 0 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 0 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 0 rev32 v7.16b, v30.16b //CTR block 7 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 0 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 1 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 0 ldp q28, q26, [x11, #32] //load rk2, rk3 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 1 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 1 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 1 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 2 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 2 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 1 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 2 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 2 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 2 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 2 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 2 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 3 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 3 ldp q27, q28, [x11, #64] //load rk4, rk5 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 3 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 3 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 3 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 3 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 3 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 4 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 4 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 3 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 4 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 4 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 4 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 4 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 4 ldp q26, q27, [x11, #96] //load rk6, rk7 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 5 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 5 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 5 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 5 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 5 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 5 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 5 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 6 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 5 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 6 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 6 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 6 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 6 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 6 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 6 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 7 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 7 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 7 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 7 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 7 ldp q28, q26, [x11, #128] //load rk8, rk9 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 7 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 7 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 7 add x5, x5, x0 add v30.4s, v30.4s, v31.4s //CTR block 7 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 8 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 8 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 8 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 8 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 8 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 8 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 8 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 8 aese v0.16b, v26.16b //AES block 0 - round 9 aese v1.16b, v26.16b //AES block 1 - round 9 aese v6.16b, v26.16b //AES block 6 - round 9 ldr q27, [x11, #160] //load rk10 aese v4.16b, v26.16b //AES block 4 - round 9 aese v3.16b, v26.16b //AES block 3 - round 9 aese v2.16b, v26.16b //AES block 2 - round 9 aese v5.16b, v26.16b //AES block 5 - round 9 aese v7.16b, v26.16b //AES block 7 - round 9 add x4, x0, x1, lsr #3 //end_input_ptr cmp x0, x5 //check if we have <= 8 blocks b.ge L128_dec_tail //handle tail ldp q8, q9, [x0], #32 //AES block 0, 1 - load ciphertext .long 0xce006d00 //eor3 v0.16b, v8.16b, v0.16b, v27.16b //AES block 0 - result .long 0xce016d21 //eor3 v1.16b, v9.16b, v1.16b, v27.16b //AES block 1 - result stp q0, q1, [x2], #32 //AES block 0, 1 - store result rev32 v0.16b, v30.16b //CTR block 8 add v30.4s, v30.4s, v31.4s //CTR block 8 ldp q10, q11, [x0], #32 //AES block 2, 3 - load ciphertext ldp q12, q13, [x0], #32 //AES block 4, 5 - load ciphertext rev32 v1.16b, v30.16b //CTR block 9 add v30.4s, v30.4s, v31.4s //CTR block 9 ldp q14, q15, [x0], #32 //AES block 6, 7 - load ciphertext .long 0xce036d63 //eor3 v3.16b, v11.16b, v3.16b, v27.16b //AES block 3 - result .long 0xce026d42 //eor3 v2.16b, v10.16b, v2.16b, v27.16b //AES block 2 - result stp q2, q3, [x2], #32 //AES block 2, 3 - store result rev32 v2.16b, v30.16b //CTR block 10 add v30.4s, v30.4s, v31.4s //CTR block 10 .long 0xce066dc6 //eor3 v6.16b, v14.16b, v6.16b, v27.16b //AES block 6 - result rev32 v3.16b, v30.16b //CTR block 11 add v30.4s, v30.4s, v31.4s //CTR block 11 .long 0xce046d84 //eor3 v4.16b, v12.16b, v4.16b, v27.16b //AES block 4 - result .long 0xce056da5 //eor3 v5.16b, v13.16b, v5.16b, v27.16b //AES block 5 - result stp q4, q5, [x2], #32 //AES block 4, 5 - store result .long 0xce076de7 //eor3 v7.16b, v15.16b, v7.16b, v27.16b //AES block 7 - result stp q6, q7, [x2], #32 //AES block 6, 7 - store result rev32 v4.16b, v30.16b //CTR block 12 cmp x0, x5 //check if we have <= 8 blocks add v30.4s, v30.4s, v31.4s //CTR block 12 b.ge L128_dec_prepretail //do prepretail L128_dec_main_loop: //main loop start ldr q23, [x6, #144] //load h7l | h7h ldr q25, [x6, #176] //load h8l | h8h rev64 v9.16b, v9.16b //GHASH block 8k+1 rev64 v8.16b, v8.16b //GHASH block 8k ext v19.16b, v19.16b, v19.16b, #8 //PRE 0 rev64 v14.16b, v14.16b //GHASH block 8k+6 ldr q20, [x6, #96] //load h5l | h5h ldr q22, [x6, #128] //load h6l | h6h eor v8.16b, v8.16b, v19.16b //PRE 1 rev32 v5.16b, v30.16b //CTR block 8k+13 add v30.4s, v30.4s, v31.4s //CTR block 8k+13 rev64 v10.16b, v10.16b //GHASH block 8k+2 rev64 v12.16b, v12.16b //GHASH block 8k+4 ldp q26, q27, [x11, #0] //load rk0, rk1 rev32 v6.16b, v30.16b //CTR block 8k+14 add v30.4s, v30.4s, v31.4s //CTR block 8k+14 ldr q21, [x6, #112] //load h6k | h5k ldr q24, [x6, #160] //load h8k | h7k pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high rev64 v11.16b, v11.16b //GHASH block 8k+3 rev32 v7.16b, v30.16b //CTR block 8k+15 trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid rev64 v13.16b, v13.16b //GHASH block 8k+5 pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 0 pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 0 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 0 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 0 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 0 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 0 eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 0 eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 0 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 1 eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low .long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high ldp q28, q26, [x11, #32] //load rk2, rk3 trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 1 pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid ldr q23, [x6, #48] //load h3l | h3h ldr q25, [x6, #80] //load h4l | h4h pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 1 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 1 pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 1 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 1 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 1 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 2 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 2 .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 2 eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid ldr q20, [x6] //load h1l | h1h ldr q22, [x6, #32] //load h2l | h2h eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 2 trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 2 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 2 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 2 pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 3 rev64 v15.16b, v15.16b //GHASH block 8k+7 pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high ldp q27, q28, [x11, #64] //load rk4, rk5 pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low .long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid ldr q21, [x6, #16] //load h2k | h1k ldr q24, [x6, #64] //load h4k | h3k aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 3 trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 3 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 3 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 3 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 3 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 3 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 3 pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 4 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 4 eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 4 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 4 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 4 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 4 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 4 trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid ldp q26, q27, [x11, #96] //load rk6, rk7 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 5 pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 5 eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 5 pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 5 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 5 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 5 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 5 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 5 pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high .long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid .long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 6 .long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 6 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 6 pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 6 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 6 pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 6 add v30.4s, v30.4s, v31.4s //CTR block 8k+15 .long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 6 ldp q28, q26, [x11, #128] //load rk8, rk9 ldr d16, [x10] //MODULO - load modulo constant .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 7 rev32 v20.16b, v30.16b //CTR block 8k+16 .long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid add v30.4s, v30.4s, v31.4s //CTR block 8k+16 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 7 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 7 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 7 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 7 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 7 rev32 v22.16b, v30.16b //CTR block 8k+17 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 7 ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 7 add v30.4s, v30.4s, v31.4s //CTR block 8k+17 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 8 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 8 ldp q8, q9, [x0], #32 //AES block 8k+8, 8k+9 - load ciphertext ldp q10, q11, [x0], #32 //AES block 8k+10, 8k+11 - load ciphertext aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 8 rev32 v23.16b, v30.16b //CTR block 8k+18 ldp q12, q13, [x0], #32 //AES block 8k+12, 8k+13 - load ciphertext aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 8 .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid ldp q14, q15, [x0], #32 //AES block 8k+14, 8k+15 - load ciphertext aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 8 add v30.4s, v30.4s, v31.4s //CTR block 8k+18 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 8 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 8 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 8 aese v0.16b, v26.16b //AES block 8k+8 - round 9 aese v1.16b, v26.16b //AES block 8k+9 - round 9 ldr q27, [x11, #160] //load rk10 aese v6.16b, v26.16b //AES block 8k+14 - round 9 pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low aese v2.16b, v26.16b //AES block 8k+10 - round 9 aese v7.16b, v26.16b //AES block 8k+15 - round 9 aese v4.16b, v26.16b //AES block 8k+12 - round 9 ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment rev32 v25.16b, v30.16b //CTR block 8k+19 add v30.4s, v30.4s, v31.4s //CTR block 8k+19 aese v3.16b, v26.16b //AES block 8k+11 - round 9 aese v5.16b, v26.16b //AES block 8k+13 - round 9 .long 0xce016d21 //eor3 v1.16b, v9.16b, v1.16b, v27.16b //AES block 8k+9 - result .long 0xce006d00 //eor3 v0.16b, v8.16b, v0.16b, v27.16b //AES block 8k+8 - result .long 0xce076de7 //eor3 v7.16b, v15.16b, v7.16b, v27.16b //AES block 8k+15 - result .long 0xce066dc6 //eor3 v6.16b, v14.16b, v6.16b, v27.16b //AES block 8k+14 - result .long 0xce026d42 //eor3 v2.16b, v10.16b, v2.16b, v27.16b //AES block 8k+10 - result stp q0, q1, [x2], #32 //AES block 8k+8, 8k+9 - store result mov v1.16b, v22.16b //CTR block 8k+17 .long 0xce046d84 //eor3 v4.16b, v12.16b, v4.16b, v27.16b //AES block 8k+12 - result .long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low mov v0.16b, v20.16b //CTR block 8k+16 .long 0xce036d63 //eor3 v3.16b, v11.16b, v3.16b, v27.16b //AES block 8k+11 - result cmp x0, x5 //LOOP CONTROL stp q2, q3, [x2], #32 //AES block 8k+10, 8k+11 - store result .long 0xce056da5 //eor3 v5.16b, v13.16b, v5.16b, v27.16b //AES block 8k+13 - result mov v2.16b, v23.16b //CTR block 8k+18 stp q4, q5, [x2], #32 //AES block 8k+12, 8k+13 - store result rev32 v4.16b, v30.16b //CTR block 8k+20 add v30.4s, v30.4s, v31.4s //CTR block 8k+20 stp q6, q7, [x2], #32 //AES block 8k+14, 8k+15 - store result mov v3.16b, v25.16b //CTR block 8k+19 b.lt L128_dec_main_loop L128_dec_prepretail: //PREPRETAIL rev64 v11.16b, v11.16b //GHASH block 8k+3 ext v19.16b, v19.16b, v19.16b, #8 //PRE 0 rev64 v8.16b, v8.16b //GHASH block 8k rev64 v10.16b, v10.16b //GHASH block 8k+2 rev32 v5.16b, v30.16b //CTR block 8k+13 ldp q26, q27, [x11, #0] //load rk0, rk1 ldr q23, [x6, #144] //load h7l | h7h ldr q25, [x6, #176] //load h8l | h8h eor v8.16b, v8.16b, v19.16b //PRE 1 rev64 v9.16b, v9.16b //GHASH block 8k+1 add v30.4s, v30.4s, v31.4s //CTR block 8k+13 ldr q20, [x6, #96] //load h5l | h5h ldr q22, [x6, #128] //load h6l | h6h rev64 v13.16b, v13.16b //GHASH block 8k+5 rev64 v12.16b, v12.16b //GHASH block 8k+4 rev64 v14.16b, v14.16b //GHASH block 8k+6 ldr q21, [x6, #112] //load h6k | h5k ldr q24, [x6, #160] //load h8k | h7k rev32 v6.16b, v30.16b //CTR block 8k+14 add v30.4s, v30.4s, v31.4s //CTR block 8k+14 pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 0 eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 0 eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low rev32 v7.16b, v30.16b //CTR block 8k+15 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 0 .long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 0 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 0 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 0 pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 1 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 0 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 0 eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 1 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 1 ldp q28, q26, [x11, #32] //load rk2, rk3 .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid ldr q23, [x6, #48] //load h3l | h3h ldr q25, [x6, #80] //load h4l | h4h aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 1 pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 1 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 1 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 1 ldr q20, [x6] //load h1l | h1h ldr q22, [x6, #32] //load h2l | h2h .long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 2 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 2 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 2 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 2 trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 2 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 2 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 2 pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid ldp q27, q28, [x11, #64] //load rk4, rk5 rev64 v15.16b, v15.16b //GHASH block 8k+7 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 3 ldr q21, [x6, #16] //load h2k | h1k ldr q24, [x6, #64] //load h4k | h3k pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 3 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 3 trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 3 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 3 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 3 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 3 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 3 eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid .long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 4 eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 4 pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 4 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 4 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 4 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 4 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 4 pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid ldp q26, q27, [x11, #96] //load rk6, rk7 .long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 5 ldr d16, [x10] //MODULO - load modulo constant pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low .long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 5 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 5 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 5 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 5 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 5 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 5 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 5 .long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 6 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 6 .long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 6 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 6 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 6 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 6 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 6 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 7 .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up ldp q28, q26, [x11, #128] //load rk8, rk9 pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 7 ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 7 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 7 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 7 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 7 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 7 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 7 .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid ldr q27, [x11, #160] //load rk10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 8 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 8 pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 8 ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 8 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 8 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 8 aese v6.16b, v26.16b //AES block 8k+14 - round 9 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 8 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 8 .long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low add v30.4s, v30.4s, v31.4s //CTR block 8k+15 aese v2.16b, v26.16b //AES block 8k+10 - round 9 aese v3.16b, v26.16b //AES block 8k+11 - round 9 aese v5.16b, v26.16b //AES block 8k+13 - round 9 aese v0.16b, v26.16b //AES block 8k+8 - round 9 aese v4.16b, v26.16b //AES block 8k+12 - round 9 aese v1.16b, v26.16b //AES block 8k+9 - round 9 aese v7.16b, v26.16b //AES block 8k+15 - round 9 L128_dec_tail: //TAIL mov v29.16b, v27.16b sub x5, x4, x0 //main_end_input_ptr is number of bytes left to process cmp x5, #112 ldp q24, q25, [x6, #160] //load h8k | h7k ldr q9, [x0], #16 //AES block 8k+8 - load ciphertext ldp q20, q21, [x6, #96] //load h5l | h5h ext v16.16b, v19.16b, v19.16b, #8 //prepare final partial tag ldp q22, q23, [x6, #128] //load h6l | h6h .long 0xce00752c //eor3 v12.16b, v9.16b, v0.16b, v29.16b //AES block 8k+8 - result b.gt L128_dec_blocks_more_than_7 cmp x5, #96 mov v7.16b, v6.16b movi v19.8b, #0 movi v17.8b, #0 mov v6.16b, v5.16b mov v5.16b, v4.16b mov v4.16b, v3.16b mov v3.16b, v2.16b mov v2.16b, v1.16b movi v18.8b, #0 sub v30.4s, v30.4s, v31.4s b.gt L128_dec_blocks_more_than_6 cmp x5, #80 sub v30.4s, v30.4s, v31.4s mov v7.16b, v6.16b mov v6.16b, v5.16b mov v5.16b, v4.16b mov v4.16b, v3.16b mov v3.16b, v1.16b b.gt L128_dec_blocks_more_than_5 cmp x5, #64 mov v7.16b, v6.16b mov v6.16b, v5.16b mov v5.16b, v4.16b mov v4.16b, v1.16b sub v30.4s, v30.4s, v31.4s b.gt L128_dec_blocks_more_than_4 sub v30.4s, v30.4s, v31.4s mov v7.16b, v6.16b mov v6.16b, v5.16b mov v5.16b, v1.16b cmp x5, #48 b.gt L128_dec_blocks_more_than_3 sub v30.4s, v30.4s, v31.4s mov v7.16b, v6.16b cmp x5, #32 ldr q24, [x6, #64] //load h4k | h3k mov v6.16b, v1.16b b.gt L128_dec_blocks_more_than_2 cmp x5, #16 mov v7.16b, v1.16b sub v30.4s, v30.4s, v31.4s b.gt L128_dec_blocks_more_than_1 sub v30.4s, v30.4s, v31.4s ldr q21, [x6, #16] //load h2k | h1k b L128_dec_blocks_less_than_1 L128_dec_blocks_more_than_7: //blocks left > 7 rev64 v8.16b, v9.16b //GHASH final-7 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v18.d[0], v24.d[1] //GHASH final-7 block - mid pmull v19.1q, v8.1d, v25.1d //GHASH final-7 block - low ins v27.d[0], v8.d[1] //GHASH final-7 block - mid movi v16.8b, #0 //supress further partial tag feed in ldr q9, [x0], #16 //AES final-6 block - load ciphertext eor v27.8b, v27.8b, v8.8b //GHASH final-7 block - mid pmull2 v17.1q, v8.2d, v25.2d //GHASH final-7 block - high st1 { v12.16b}, [x2], #16 //AES final-7 block - store result .long 0xce01752c //eor3 v12.16b, v9.16b, v1.16b, v29.16b //AES final-6 block - result pmull v18.1q, v27.1d, v18.1d //GHASH final-7 block - mid L128_dec_blocks_more_than_6: //blocks left > 6 rev64 v8.16b, v9.16b //GHASH final-6 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-6 block - mid eor v27.8b, v27.8b, v8.8b //GHASH final-6 block - mid pmull v26.1q, v8.1d, v23.1d //GHASH final-6 block - low ldr q9, [x0], #16 //AES final-5 block - load ciphertext movi v16.8b, #0 //supress further partial tag feed in pmull v27.1q, v27.1d, v24.1d //GHASH final-6 block - mid st1 { v12.16b}, [x2], #16 //AES final-6 block - store result pmull2 v28.1q, v8.2d, v23.2d //GHASH final-6 block - high eor v19.16b, v19.16b, v26.16b //GHASH final-6 block - low eor v17.16b, v17.16b, v28.16b //GHASH final-6 block - high eor v18.16b, v18.16b, v27.16b //GHASH final-6 block - mid .long 0xce02752c //eor3 v12.16b, v9.16b, v2.16b, v29.16b //AES final-5 block - result L128_dec_blocks_more_than_5: //blocks left > 5 rev64 v8.16b, v9.16b //GHASH final-5 block ldr q9, [x0], #16 //AES final-4 block - load ciphertext st1 { v12.16b}, [x2], #16 //AES final-5 block - store result eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-5 block - mid .long 0xce03752c //eor3 v12.16b, v9.16b, v3.16b, v29.16b //AES final-4 block - result eor v27.8b, v27.8b, v8.8b //GHASH final-5 block - mid ins v27.d[1], v27.d[0] //GHASH final-5 block - mid pmull v26.1q, v8.1d, v22.1d //GHASH final-5 block - low movi v16.8b, #0 //supress further partial tag feed in pmull2 v27.1q, v27.2d, v21.2d //GHASH final-5 block - mid pmull2 v28.1q, v8.2d, v22.2d //GHASH final-5 block - high eor v19.16b, v19.16b, v26.16b //GHASH final-5 block - low eor v18.16b, v18.16b, v27.16b //GHASH final-5 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-5 block - high L128_dec_blocks_more_than_4: //blocks left > 4 rev64 v8.16b, v9.16b //GHASH final-4 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ldr q9, [x0], #16 //AES final-3 block - load ciphertext ins v27.d[0], v8.d[1] //GHASH final-4 block - mid movi v16.8b, #0 //supress further partial tag feed in pmull2 v28.1q, v8.2d, v20.2d //GHASH final-4 block - high pmull v26.1q, v8.1d, v20.1d //GHASH final-4 block - low eor v17.16b, v17.16b, v28.16b //GHASH final-4 block - high st1 { v12.16b}, [x2], #16 //AES final-4 block - store result eor v27.8b, v27.8b, v8.8b //GHASH final-4 block - mid .long 0xce04752c //eor3 v12.16b, v9.16b, v4.16b, v29.16b //AES final-3 block - result eor v19.16b, v19.16b, v26.16b //GHASH final-4 block - low pmull v27.1q, v27.1d, v21.1d //GHASH final-4 block - mid eor v18.16b, v18.16b, v27.16b //GHASH final-4 block - mid L128_dec_blocks_more_than_3: //blocks left > 3 st1 { v12.16b}, [x2], #16 //AES final-3 block - store result rev64 v8.16b, v9.16b //GHASH final-3 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-3 block - mid ldr q25, [x6, #80] //load h4l | h4h ldr q24, [x6, #64] //load h4k | h3k eor v27.8b, v27.8b, v8.8b //GHASH final-3 block - mid ldr q9, [x0], #16 //AES final-2 block - load ciphertext ins v27.d[1], v27.d[0] //GHASH final-3 block - mid pmull v26.1q, v8.1d, v25.1d //GHASH final-3 block - low pmull2 v28.1q, v8.2d, v25.2d //GHASH final-3 block - high movi v16.8b, #0 //supress further partial tag feed in .long 0xce05752c //eor3 v12.16b, v9.16b, v5.16b, v29.16b //AES final-2 block - result eor v19.16b, v19.16b, v26.16b //GHASH final-3 block - low pmull2 v27.1q, v27.2d, v24.2d //GHASH final-3 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-3 block - high eor v18.16b, v18.16b, v27.16b //GHASH final-3 block - mid L128_dec_blocks_more_than_2: //blocks left > 2 rev64 v8.16b, v9.16b //GHASH final-2 block st1 { v12.16b}, [x2], #16 //AES final-2 block - store result eor v8.16b, v8.16b, v16.16b //feed in partial tag ldr q23, [x6, #48] //load h3l | h3h movi v16.8b, #0 //supress further partial tag feed in ins v27.d[0], v8.d[1] //GHASH final-2 block - mid eor v27.8b, v27.8b, v8.8b //GHASH final-2 block - mid pmull v26.1q, v8.1d, v23.1d //GHASH final-2 block - low pmull2 v28.1q, v8.2d, v23.2d //GHASH final-2 block - high pmull v27.1q, v27.1d, v24.1d //GHASH final-2 block - mid ldr q9, [x0], #16 //AES final-1 block - load ciphertext eor v18.16b, v18.16b, v27.16b //GHASH final-2 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-2 block - low .long 0xce06752c //eor3 v12.16b, v9.16b, v6.16b, v29.16b //AES final-1 block - result eor v17.16b, v17.16b, v28.16b //GHASH final-2 block - high L128_dec_blocks_more_than_1: //blocks left > 1 st1 { v12.16b}, [x2], #16 //AES final-1 block - store result rev64 v8.16b, v9.16b //GHASH final-1 block ldr q22, [x6, #32] //load h2l | h2h eor v8.16b, v8.16b, v16.16b //feed in partial tag movi v16.8b, #0 //supress further partial tag feed in ins v27.d[0], v8.d[1] //GHASH final-1 block - mid ldr q9, [x0], #16 //AES final block - load ciphertext pmull2 v28.1q, v8.2d, v22.2d //GHASH final-1 block - high eor v27.8b, v27.8b, v8.8b //GHASH final-1 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-1 block - high ldr q21, [x6, #16] //load h2k | h1k ins v27.d[1], v27.d[0] //GHASH final-1 block - mid .long 0xce07752c //eor3 v12.16b, v9.16b, v7.16b, v29.16b //AES final block - result pmull v26.1q, v8.1d, v22.1d //GHASH final-1 block - low pmull2 v27.1q, v27.2d, v21.2d //GHASH final-1 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-1 block - low eor v18.16b, v18.16b, v27.16b //GHASH final-1 block - mid L128_dec_blocks_less_than_1: //blocks left <= 1 and x1, x1, #127 //bit_length %= 128 sub x1, x1, #128 //bit_length -= 128 neg x1, x1 //bit_length = 128 - #bits in input (in range [1,128]) mvn x7, xzr //temp0_x = 0xffffffffffffffff and x1, x1, #127 //bit_length %= 128 lsr x7, x7, x1 //temp0_x is mask for top 64b of last block cmp x1, #64 mvn x8, xzr //temp1_x = 0xffffffffffffffff csel x13, x8, x7, lt csel x14, x7, xzr, lt mov v0.d[1], x14 mov v0.d[0], x13 //ctr0b is mask for last block ldr q20, [x6] //load h1l | h1h ld1 { v26.16b}, [x2] //load existing bytes where the possibly partial last block is to be stored and v9.16b, v9.16b, v0.16b //possibly partial last block has zeroes in highest bits rev64 v8.16b, v9.16b //GHASH final block eor v8.16b, v8.16b, v16.16b //feed in partial tag pmull2 v28.1q, v8.2d, v20.2d //GHASH final block - high ins v16.d[0], v8.d[1] //GHASH final block - mid eor v17.16b, v17.16b, v28.16b //GHASH final block - high eor v16.8b, v16.8b, v8.8b //GHASH final block - mid bif v12.16b, v26.16b, v0.16b //insert existing bytes in top end of result before storing pmull v16.1q, v16.1d, v21.1d //GHASH final block - mid st1 { v12.16b}, [x2] //store all 16B pmull v26.1q, v8.1d, v20.1d //GHASH final block - low eor v18.16b, v18.16b, v16.16b //GHASH final block - mid ldr d16, [x10] //MODULO - load modulo constant eor v19.16b, v19.16b, v26.16b //GHASH final block - low eor v14.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up pmull v21.1q, v17.1d, v16.1d //MODULO - top 64b align with mid ext v17.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment eor v18.16b, v18.16b, v14.16b //MODULO - karatsuba tidy up .long 0xce115652 //eor3 v18.16b, v18.16b, v17.16b, v21.16b //MODULO - fold into mid pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low ext v18.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment .long 0xce124673 //eor3 v19.16b, v19.16b, v18.16b, v17.16b //MODULO - fold into low ext v19.16b, v19.16b, v19.16b, #8 rev64 v19.16b, v19.16b st1 { v19.16b }, [x3] rev32 v30.16b, v30.16b str q30, [x16] //store the updated counter mov x0, x9 ldp d10, d11, [sp, #16] ldp d12, d13, [sp, #32] ldp d14, d15, [sp, #48] ldp d8, d9, [sp], #80 ret L128_dec_ret: mov w0, #0x0 ret .globl aesv8_gcm_8x_enc_192 .def aesv8_gcm_8x_enc_192 .type 32 .endef .align 4 aesv8_gcm_8x_enc_192: AARCH64_VALID_CALL_TARGET cbz x1, L192_enc_ret stp d8, d9, [sp, #-80]! lsr x9, x1, #3 mov x16, x4 mov x11, x5 stp d10, d11, [sp, #16] stp d12, d13, [sp, #32] stp d14, d15, [sp, #48] mov x5, #0xc200000000000000 stp x5, xzr, [sp, #64] add x10, sp, #64 mov x5, x9 ld1 { v0.16b}, [x16] //CTR block 0 mov x15, #0x100000000 //set up counter increment movi v31.16b, #0x0 mov v31.d[1], x15 rev32 v30.16b, v0.16b //set up reversed counter add v30.4s, v30.4s, v31.4s //CTR block 0 rev32 v1.16b, v30.16b //CTR block 1 add v30.4s, v30.4s, v31.4s //CTR block 1 rev32 v2.16b, v30.16b //CTR block 2 add v30.4s, v30.4s, v31.4s //CTR block 2 rev32 v3.16b, v30.16b //CTR block 3 add v30.4s, v30.4s, v31.4s //CTR block 3 rev32 v4.16b, v30.16b //CTR block 4 add v30.4s, v30.4s, v31.4s //CTR block 4 sub x5, x5, #1 //byte_len - 1 and x5, x5, #0xffffffffffffff80 //number of bytes to be processed in main loop (at least 1 byte must be handled by tail) rev32 v5.16b, v30.16b //CTR block 5 add v30.4s, v30.4s, v31.4s //CTR block 5 ldp q26, q27, [x11, #0] //load rk0, rk1 add x5, x5, x0 rev32 v6.16b, v30.16b //CTR block 6 add v30.4s, v30.4s, v31.4s //CTR block 6 rev32 v7.16b, v30.16b //CTR block 7 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 0 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 0 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 0 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 0 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 0 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 0 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 0 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 0 ldp q28, q26, [x11, #32] //load rk2, rk3 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 1 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 1 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 1 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 1 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 1 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 2 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 1 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 1 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 1 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 2 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 2 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 2 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 2 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 2 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 2 ldp q27, q28, [x11, #64] //load rk4, rk5 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 3 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 3 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 3 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 3 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 3 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 3 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 3 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 4 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 4 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 3 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 4 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 4 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 4 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 4 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 4 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 5 ldp q26, q27, [x11, #96] //load rk6, rk7 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 5 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 5 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 5 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 5 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 5 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 5 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 5 add v30.4s, v30.4s, v31.4s //CTR block 7 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 6 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 6 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 6 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 6 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 6 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 6 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 6 ldp q28, q26, [x11, #128] //load rk8, rk9 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 7 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 7 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 7 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 7 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 7 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 7 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 7 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 7 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 8 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 8 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 8 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 8 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 8 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 8 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 8 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 8 add x4, x0, x1, lsr #3 //end_input_ptr cmp x0, x5 //check if we have <= 8 blocks aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 9 ld1 { v19.16b}, [x3] ext v19.16b, v19.16b, v19.16b, #8 rev64 v19.16b, v19.16b ldp q27, q28, [x11, #160] //load rk10, rk11 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 9 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 9 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 9 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 9 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 9 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 9 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 14 - round 10 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 11 - round 10 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 9 - round 10 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 13 - round 10 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 12 - round 10 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8 - round 10 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 10 - round 10 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 15 - round 10 aese v6.16b, v28.16b //AES block 14 - round 11 aese v3.16b, v28.16b //AES block 11 - round 11 aese v4.16b, v28.16b //AES block 12 - round 11 aese v7.16b, v28.16b //AES block 15 - round 11 ldr q26, [x11, #192] //load rk12 aese v1.16b, v28.16b //AES block 9 - round 11 aese v5.16b, v28.16b //AES block 13 - round 11 aese v2.16b, v28.16b //AES block 10 - round 11 aese v0.16b, v28.16b //AES block 8 - round 11 b.ge L192_enc_tail //handle tail ldp q8, q9, [x0], #32 //AES block 0, 1 - load plaintext ldp q10, q11, [x0], #32 //AES block 2, 3 - load plaintext ldp q12, q13, [x0], #32 //AES block 4, 5 - load plaintext ldp q14, q15, [x0], #32 //AES block 6, 7 - load plaintext .long 0xce006908 //eor3 v8.16b, v8.16b, v0.16b, v26.16b //AES block 0 - result rev32 v0.16b, v30.16b //CTR block 8 add v30.4s, v30.4s, v31.4s //CTR block 8 .long 0xce03696b //eor3 v11.16b, v11.16b, v3.16b, v26.16b //AES block 3 - result .long 0xce016929 //eor3 v9.16b, v9.16b, v1.16b, v26.16b //AES block 1 - result rev32 v1.16b, v30.16b //CTR block 9 add v30.4s, v30.4s, v31.4s //CTR block 9 .long 0xce04698c //eor3 v12.16b, v12.16b, v4.16b, v26.16b //AES block 4 - result .long 0xce0569ad //eor3 v13.16b, v13.16b, v5.16b, v26.16b //AES block 5 - result .long 0xce0769ef //eor3 v15.16b, v15.16b, v7.16b, v26.16b //AES block 7 - result stp q8, q9, [x2], #32 //AES block 0, 1 - store result .long 0xce02694a //eor3 v10.16b, v10.16b, v2.16b, v26.16b //AES block 2 - result rev32 v2.16b, v30.16b //CTR block 10 add v30.4s, v30.4s, v31.4s //CTR block 10 stp q10, q11, [x2], #32 //AES block 2, 3 - store result cmp x0, x5 //check if we have <= 8 blocks rev32 v3.16b, v30.16b //CTR block 11 add v30.4s, v30.4s, v31.4s //CTR block 11 .long 0xce0669ce //eor3 v14.16b, v14.16b, v6.16b, v26.16b //AES block 6 - result stp q12, q13, [x2], #32 //AES block 4, 5 - store result rev32 v4.16b, v30.16b //CTR block 12 stp q14, q15, [x2], #32 //AES block 6, 7 - store result add v30.4s, v30.4s, v31.4s //CTR block 12 b.ge L192_enc_prepretail //do prepretail L192_enc_main_loop: //main loop start rev64 v12.16b, v12.16b //GHASH block 8k+4 (t0, t1, and t2 free) ldp q26, q27, [x11, #0] //load rk0, rk1 rev64 v10.16b, v10.16b //GHASH block 8k+2 rev32 v5.16b, v30.16b //CTR block 8k+13 add v30.4s, v30.4s, v31.4s //CTR block 8k+13 ldr q23, [x6, #144] //load h7l | h7h ldr q25, [x6, #176] //load h8l | h8h ext v19.16b, v19.16b, v19.16b, #8 //PRE 0 rev64 v8.16b, v8.16b //GHASH block 8k ldr q20, [x6, #96] //load h5l | h5h ldr q22, [x6, #128] //load h6l | h6h rev64 v9.16b, v9.16b //GHASH block 8k+1 rev32 v6.16b, v30.16b //CTR block 8k+14 add v30.4s, v30.4s, v31.4s //CTR block 8k+14 eor v8.16b, v8.16b, v19.16b //PRE 1 rev64 v11.16b, v11.16b //GHASH block 8k+3 rev64 v13.16b, v13.16b //GHASH block 8k+5 (t0, t1, t2 and t3 free) aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 0 rev32 v7.16b, v30.16b //CTR block 8k+15 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 0 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 0 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 0 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 0 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 0 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 0 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 0 ldp q28, q26, [x11, #32] //load rk2, rk3 pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 1 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 1 pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 1 ldr q21, [x6, #112] //load h6k | h5k ldr q24, [x6, #160] //load h8k | h7k pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 1 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 1 eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 1 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 1 pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 2 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 2 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 2 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 2 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 3 .long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 2 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 3 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 2 trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 2 trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 3 ldp q27, q28, [x11, #64] //load rk4, rk5 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 3 eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low ldr q23, [x6, #48] //load h3l | h3h ldr q25, [x6, #80] //load h4l | h4h pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 3 eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 3 .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 4 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 4 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 3 pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 4 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 3 pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 4 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 4 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 4 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 4 .long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 5 ldr q20, [x6] //load h1l | h1h ldr q22, [x6, #32] //load h2l | h2h ldp q26, q27, [x11, #96] //load rk6, rk7 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 5 rev64 v15.16b, v15.16b //GHASH block 8k+7 (t0, t1, t2 and t3 free) rev64 v14.16b, v14.16b //GHASH block 8k+6 (t0, t1, and t2 free) pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 5 trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 5 ldr q21, [x6, #16] //load h2k | h1k ldr q24, [x6, #64] //load h4k | h3k aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 5 pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 5 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 5 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 5 pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 6 trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 6 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 6 pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 6 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 6 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 6 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 6 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 7 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 6 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 7 eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid ldp q28, q26, [x11, #128] //load rk8, rk9 pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 7 pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 7 .long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 7 add v30.4s, v30.4s, v31.4s //CTR block 8k+15 ldr d16, [x10] //MODULO - load modulo constant .long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 7 pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 7 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 8 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 8 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 8 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 8 .long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 7 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 8 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 8 pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 8 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 8 ldp q27, q28, [x11, #160] //load rk10, rk11 .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low rev32 v20.16b, v30.16b //CTR block 8k+16 add v30.4s, v30.4s, v31.4s //CTR block 8k+16 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 9 .long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid .long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 9 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 9 ldp q8, q9, [x0], #32 //AES block 8k+8, 8k+9 - load plaintext pmull v21.1q, v17.1d, v16.1d //MODULO - top 64b align with mid rev32 v22.16b, v30.16b //CTR block 8k+17 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 9 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 9 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 9 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 9 .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 9 add v30.4s, v30.4s, v31.4s //CTR block 8k+17 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 10 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 10 ldr q26, [x11, #192] //load rk12 ext v29.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 10 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 10 ldp q10, q11, [x0], #32 //AES block 8k+10, 8k+11 - load plaintext aese v4.16b, v28.16b //AES block 8k+12 - round 11 .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid ldp q12, q13, [x0], #32 //AES block 8k+12, 8k+13 - load plaintext ldp q14, q15, [x0], #32 //AES block 8k+14, 8k+15 - load plaintext aese v2.16b, v28.16b //AES block 8k+10 - round 11 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 10 rev32 v23.16b, v30.16b //CTR block 8k+18 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 10 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 10 pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 10 aese v5.16b, v28.16b //AES block 8k+13 - round 11 add v30.4s, v30.4s, v31.4s //CTR block 8k+18 aese v7.16b, v28.16b //AES block 8k+15 - round 11 aese v0.16b, v28.16b //AES block 8k+8 - round 11 .long 0xce04698c //eor3 v12.16b, v12.16b, v4.16b, v26.16b //AES block 4 - result aese v6.16b, v28.16b //AES block 8k+14 - round 11 aese v3.16b, v28.16b //AES block 8k+11 - round 11 aese v1.16b, v28.16b //AES block 8k+9 - round 11 rev32 v25.16b, v30.16b //CTR block 8k+19 add v30.4s, v30.4s, v31.4s //CTR block 8k+19 .long 0xce0769ef //eor3 v15.16b, v15.16b, v7.16b, v26.16b //AES block 7 - result .long 0xce02694a //eor3 v10.16b, v10.16b, v2.16b, v26.16b //AES block 8k+10 - result .long 0xce006908 //eor3 v8.16b, v8.16b, v0.16b, v26.16b //AES block 8k+8 - result mov v2.16b, v23.16b //CTR block 8k+18 .long 0xce016929 //eor3 v9.16b, v9.16b, v1.16b, v26.16b //AES block 8k+9 - result mov v1.16b, v22.16b //CTR block 8k+17 stp q8, q9, [x2], #32 //AES block 8k+8, 8k+9 - store result ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment .long 0xce0669ce //eor3 v14.16b, v14.16b, v6.16b, v26.16b //AES block 6 - result mov v0.16b, v20.16b //CTR block 8k+16 rev32 v4.16b, v30.16b //CTR block 8k+20 add v30.4s, v30.4s, v31.4s //CTR block 8k+20 .long 0xce0569ad //eor3 v13.16b, v13.16b, v5.16b, v26.16b //AES block 5 - result .long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low .long 0xce03696b //eor3 v11.16b, v11.16b, v3.16b, v26.16b //AES block 8k+11 - result mov v3.16b, v25.16b //CTR block 8k+19 stp q10, q11, [x2], #32 //AES block 8k+10, 8k+11 - store result stp q12, q13, [x2], #32 //AES block 8k+12, 8k+13 - store result cmp x0, x5 //LOOP CONTROL stp q14, q15, [x2], #32 //AES block 8k+14, 8k+15 - store result b.lt L192_enc_main_loop L192_enc_prepretail: //PREPRETAIL rev32 v5.16b, v30.16b //CTR block 8k+13 ldp q26, q27, [x11, #0] //load rk0, rk1 add v30.4s, v30.4s, v31.4s //CTR block 8k+13 ldr q23, [x6, #144] //load h7l | h7h ldr q25, [x6, #176] //load h8l | h8h rev64 v8.16b, v8.16b //GHASH block 8k ext v19.16b, v19.16b, v19.16b, #8 //PRE 0 rev32 v6.16b, v30.16b //CTR block 8k+14 add v30.4s, v30.4s, v31.4s //CTR block 8k+14 ldr q21, [x6, #112] //load h6k | h5k ldr q24, [x6, #160] //load h8k | h7k rev64 v11.16b, v11.16b //GHASH block 8k+3 rev64 v10.16b, v10.16b //GHASH block 8k+2 ldr q20, [x6, #96] //load h5l | h5h ldr q22, [x6, #128] //load h6l | h6h eor v8.16b, v8.16b, v19.16b //PRE 1 rev32 v7.16b, v30.16b //CTR block 8k+15 rev64 v9.16b, v9.16b //GHASH block 8k+1 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 0 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 0 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 0 pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 0 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 0 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 0 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 0 pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 1 pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 0 ldp q28, q26, [x11, #32] //load rk2, rk3 pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 1 eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 1 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 1 pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 1 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 1 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 1 pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 2 eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 2 .long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 3 trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 2 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 2 pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 2 rev64 v13.16b, v13.16b //GHASH block 8k+5 (t0, t1, t2 and t3 free) rev64 v14.16b, v14.16b //GHASH block 8k+6 (t0, t1, and t2 free) aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 2 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 2 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 2 eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid ldp q27, q28, [x11, #64] //load rk4, rk5 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 3 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 3 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 3 eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 3 ldr q23, [x6, #48] //load h3l | h3h ldr q25, [x6, #80] //load h4l | h4h aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 3 pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid ldr q20, [x6] //load h1l | h1h ldr q22, [x6, #32] //load h2l | h2h aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 3 rev64 v12.16b, v12.16b //GHASH block 8k+4 (t0, t1, and t2 free) aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 3 pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 4 trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 4 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 4 .long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 4 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 4 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 4 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 4 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 5 rev64 v15.16b, v15.16b //GHASH block 8k+7 (t0, t1, t2 and t3 free) ldr q21, [x6, #16] //load h2k | h1k ldr q24, [x6, #64] //load h4k | h3k aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 5 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 5 ldp q26, q27, [x11, #96] //load rk6, rk7 pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 5 trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 5 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 6 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 5 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 5 eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 5 pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 6 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 6 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 7 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 6 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 6 .long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 6 .long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 7 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 6 ldr d16, [x10] //MODULO - load modulo constant aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 6 pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 7 .long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 7 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 7 ldp q28, q26, [x11, #128] //load rk8, rk9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 7 .long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low .long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up ext v29.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 7 pmull v21.1q, v17.1d, v16.1d //MODULO - top 64b align with mid aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 8 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 8 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 7 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 8 .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 8 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 9 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 8 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 8 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 8 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 8 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 9 ldp q27, q28, [x11, #160] //load rk10, rk11 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 9 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 9 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 9 ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 9 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 9 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 9 pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low ldr q26, [x11, #192] //load rk12 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 10 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 10 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 10 .long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 10 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 10 aese v1.16b, v28.16b //AES block 8k+9 - round 11 aese v7.16b, v28.16b //AES block 8k+15 - round 11 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 10 aese v3.16b, v28.16b //AES block 8k+11 - round 11 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 10 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 10 add v30.4s, v30.4s, v31.4s //CTR block 8k+15 aese v2.16b, v28.16b //AES block 8k+10 - round 11 aese v0.16b, v28.16b //AES block 8k+8 - round 11 aese v6.16b, v28.16b //AES block 8k+14 - round 11 aese v4.16b, v28.16b //AES block 8k+12 - round 11 aese v5.16b, v28.16b //AES block 8k+13 - round 11 L192_enc_tail: //TAIL ldp q20, q21, [x6, #96] //load h5l | h5h sub x5, x4, x0 //main_end_input_ptr is number of bytes left to process ldr q8, [x0], #16 //AES block 8k+8 - l3ad plaintext ldp q24, q25, [x6, #160] //load h8k | h7k mov v29.16b, v26.16b ldp q22, q23, [x6, #128] //load h6l | h6h cmp x5, #112 .long 0xce007509 //eor3 v9.16b, v8.16b, v0.16b, v29.16b //AES block 8k+8 - result ext v16.16b, v19.16b, v19.16b, #8 //prepare final partial tag b.gt L192_enc_blocks_more_than_7 cmp x5, #96 mov v7.16b, v6.16b movi v17.8b, #0 mov v6.16b, v5.16b movi v19.8b, #0 sub v30.4s, v30.4s, v31.4s mov v5.16b, v4.16b mov v4.16b, v3.16b mov v3.16b, v2.16b mov v2.16b, v1.16b movi v18.8b, #0 b.gt L192_enc_blocks_more_than_6 mov v7.16b, v6.16b cmp x5, #80 mov v6.16b, v5.16b mov v5.16b, v4.16b mov v4.16b, v3.16b mov v3.16b, v1.16b sub v30.4s, v30.4s, v31.4s b.gt L192_enc_blocks_more_than_5 cmp x5, #64 sub v30.4s, v30.4s, v31.4s mov v7.16b, v6.16b mov v6.16b, v5.16b mov v5.16b, v4.16b mov v4.16b, v1.16b b.gt L192_enc_blocks_more_than_4 mov v7.16b, v6.16b mov v6.16b, v5.16b mov v5.16b, v1.16b sub v30.4s, v30.4s, v31.4s cmp x5, #48 b.gt L192_enc_blocks_more_than_3 mov v7.16b, v6.16b mov v6.16b, v1.16b sub v30.4s, v30.4s, v31.4s ldr q24, [x6, #64] //load h4k | h3k cmp x5, #32 b.gt L192_enc_blocks_more_than_2 sub v30.4s, v30.4s, v31.4s cmp x5, #16 mov v7.16b, v1.16b b.gt L192_enc_blocks_more_than_1 sub v30.4s, v30.4s, v31.4s ldr q21, [x6, #16] //load h2k | h1k b L192_enc_blocks_less_than_1 L192_enc_blocks_more_than_7: //blocks left > 7 st1 { v9.16b}, [x2], #16 //AES final-7 block - store result rev64 v8.16b, v9.16b //GHASH final-7 block ins v18.d[0], v24.d[1] //GHASH final-7 block - mid eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-7 block - mid ldr q9, [x0], #16 //AES final-6 block - load plaintext eor v27.8b, v27.8b, v8.8b //GHASH final-7 block - mid movi v16.8b, #0 //supress further partial tag feed in pmull v19.1q, v8.1d, v25.1d //GHASH final-7 block - low pmull2 v17.1q, v8.2d, v25.2d //GHASH final-7 block - high pmull v18.1q, v27.1d, v18.1d //GHASH final-7 block - mid .long 0xce017529 //eor3 v9.16b, v9.16b, v1.16b, v29.16b //AES final-6 block - result L192_enc_blocks_more_than_6: //blocks left > 6 st1 { v9.16b}, [x2], #16 //AES final-6 block - store result rev64 v8.16b, v9.16b //GHASH final-6 block ldr q9, [x0], #16 //AES final-5 block - load plaintext eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-6 block - mid pmull v26.1q, v8.1d, v23.1d //GHASH final-6 block - low .long 0xce027529 //eor3 v9.16b, v9.16b, v2.16b, v29.16b //AES final-5 block - result movi v16.8b, #0 //supress further partial tag feed in pmull2 v28.1q, v8.2d, v23.2d //GHASH final-6 block - high eor v27.8b, v27.8b, v8.8b //GHASH final-6 block - mid pmull v27.1q, v27.1d, v24.1d //GHASH final-6 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-6 block - high eor v19.16b, v19.16b, v26.16b //GHASH final-6 block - low eor v18.16b, v18.16b, v27.16b //GHASH final-6 block - mid L192_enc_blocks_more_than_5: //blocks left > 5 st1 { v9.16b}, [x2], #16 //AES final-5 block - store result rev64 v8.16b, v9.16b //GHASH final-5 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-5 block - mid ldr q9, [x0], #16 //AES final-4 block - load plaintext pmull2 v28.1q, v8.2d, v22.2d //GHASH final-5 block - high eor v27.8b, v27.8b, v8.8b //GHASH final-5 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-5 block - high ins v27.d[1], v27.d[0] //GHASH final-5 block - mid pmull v26.1q, v8.1d, v22.1d //GHASH final-5 block - low eor v19.16b, v19.16b, v26.16b //GHASH final-5 block - low pmull2 v27.1q, v27.2d, v21.2d //GHASH final-5 block - mid .long 0xce037529 //eor3 v9.16b, v9.16b, v3.16b, v29.16b //AES final-4 block - result movi v16.8b, #0 //supress further partial tag feed in eor v18.16b, v18.16b, v27.16b //GHASH final-5 block - mid L192_enc_blocks_more_than_4: //blocks left > 4 st1 { v9.16b}, [x2], #16 //AES final-4 block - store result rev64 v8.16b, v9.16b //GHASH final-4 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ldr q9, [x0], #16 //AES final-3 block - load plaintext pmull2 v28.1q, v8.2d, v20.2d //GHASH final-4 block - high ins v27.d[0], v8.d[1] //GHASH final-4 block - mid pmull v26.1q, v8.1d, v20.1d //GHASH final-4 block - low eor v17.16b, v17.16b, v28.16b //GHASH final-4 block - high eor v27.8b, v27.8b, v8.8b //GHASH final-4 block - mid movi v16.8b, #0 //supress further partial tag feed in eor v19.16b, v19.16b, v26.16b //GHASH final-4 block - low pmull v27.1q, v27.1d, v21.1d //GHASH final-4 block - mid eor v18.16b, v18.16b, v27.16b //GHASH final-4 block - mid .long 0xce047529 //eor3 v9.16b, v9.16b, v4.16b, v29.16b //AES final-3 block - result L192_enc_blocks_more_than_3: //blocks left > 3 ldr q24, [x6, #64] //load h4k | h3k st1 { v9.16b}, [x2], #16 //AES final-3 block - store result rev64 v8.16b, v9.16b //GHASH final-3 block eor v8.16b, v8.16b, v16.16b //feed in partial tag movi v16.8b, #0 //supress further partial tag feed in ldr q9, [x0], #16 //AES final-2 block - load plaintext ldr q25, [x6, #80] //load h4l | h4h ins v27.d[0], v8.d[1] //GHASH final-3 block - mid .long 0xce057529 //eor3 v9.16b, v9.16b, v5.16b, v29.16b //AES final-2 block - result eor v27.8b, v27.8b, v8.8b //GHASH final-3 block - mid ins v27.d[1], v27.d[0] //GHASH final-3 block - mid pmull v26.1q, v8.1d, v25.1d //GHASH final-3 block - low pmull2 v28.1q, v8.2d, v25.2d //GHASH final-3 block - high pmull2 v27.1q, v27.2d, v24.2d //GHASH final-3 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-3 block - low eor v18.16b, v18.16b, v27.16b //GHASH final-3 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-3 block - high L192_enc_blocks_more_than_2: //blocks left > 2 st1 { v9.16b}, [x2], #16 //AES final-2 block - store result rev64 v8.16b, v9.16b //GHASH final-2 block ldr q23, [x6, #48] //load h3l | h3h eor v8.16b, v8.16b, v16.16b //feed in partial tag ldr q9, [x0], #16 //AES final-1 block - load plaintext ins v27.d[0], v8.d[1] //GHASH final-2 block - mid eor v27.8b, v27.8b, v8.8b //GHASH final-2 block - mid pmull v26.1q, v8.1d, v23.1d //GHASH final-2 block - low pmull2 v28.1q, v8.2d, v23.2d //GHASH final-2 block - high movi v16.8b, #0 //supress further partial tag feed in pmull v27.1q, v27.1d, v24.1d //GHASH final-2 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-2 block - low eor v17.16b, v17.16b, v28.16b //GHASH final-2 block - high eor v18.16b, v18.16b, v27.16b //GHASH final-2 block - mid .long 0xce067529 //eor3 v9.16b, v9.16b, v6.16b, v29.16b //AES final-1 block - result L192_enc_blocks_more_than_1: //blocks left > 1 ldr q22, [x6, #32] //load h1l | h1h st1 { v9.16b}, [x2], #16 //AES final-1 block - store result rev64 v8.16b, v9.16b //GHASH final-1 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-1 block - mid pmull v26.1q, v8.1d, v22.1d //GHASH final-1 block - low eor v19.16b, v19.16b, v26.16b //GHASH final-1 block - low pmull2 v28.1q, v8.2d, v22.2d //GHASH final-1 block - high eor v27.8b, v27.8b, v8.8b //GHASH final-1 block - mid ldr q9, [x0], #16 //AES final block - load plaintext ldr q21, [x6, #16] //load h2k | h1k ins v27.d[1], v27.d[0] //GHASH final-1 block - mid .long 0xce077529 //eor3 v9.16b, v9.16b, v7.16b, v29.16b //AES final block - result pmull2 v27.1q, v27.2d, v21.2d //GHASH final-1 block - mid movi v16.8b, #0 //supress further partial tag feed in eor v18.16b, v18.16b, v27.16b //GHASH final-1 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-1 block - high L192_enc_blocks_less_than_1: //blocks left <= 1 mvn x7, xzr //temp0_x = 0xffffffffffffffff and x1, x1, #127 //bit_length %= 128 sub x1, x1, #128 //bit_length -= 128 neg x1, x1 //bit_length = 128 - #bits in input (in range [1,128]) and x1, x1, #127 //bit_length %= 128 lsr x7, x7, x1 //temp0_x is mask for top 64b of last block cmp x1, #64 mvn x8, xzr //temp1_x = 0xffffffffffffffff csel x13, x8, x7, lt csel x14, x7, xzr, lt mov v0.d[1], x14 ldr q20, [x6] //load h1l | h1h ld1 { v26.16b}, [x2] //load existing bytes where the possibly partial last block is to be stored mov v0.d[0], x13 //ctr0b is mask for last block and v9.16b, v9.16b, v0.16b //possibly partial last block has zeroes in highest bits rev64 v8.16b, v9.16b //GHASH final block bif v9.16b, v26.16b, v0.16b //insert existing bytes in top end of result before storing st1 { v9.16b}, [x2] //store all 16B eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v16.d[0], v8.d[1] //GHASH final block - mid pmull2 v28.1q, v8.2d, v20.2d //GHASH final block - high eor v17.16b, v17.16b, v28.16b //GHASH final block - high pmull v26.1q, v8.1d, v20.1d //GHASH final block - low eor v16.8b, v16.8b, v8.8b //GHASH final block - mid pmull v16.1q, v16.1d, v21.1d //GHASH final block - mid eor v18.16b, v18.16b, v16.16b //GHASH final block - mid ldr d16, [x10] //MODULO - load modulo constant eor v19.16b, v19.16b, v26.16b //GHASH final block - low ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment rev32 v30.16b, v30.16b str q30, [x16] //store the updated counter .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment .long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low ext v19.16b, v19.16b, v19.16b, #8 rev64 v19.16b, v19.16b st1 { v19.16b }, [x3] mov x0, x9 //return sizes ldp d10, d11, [sp, #16] ldp d12, d13, [sp, #32] ldp d14, d15, [sp, #48] ldp d8, d9, [sp], #80 ret L192_enc_ret: mov w0, #0x0 ret .globl aesv8_gcm_8x_dec_192 .def aesv8_gcm_8x_dec_192 .type 32 .endef .align 4 aesv8_gcm_8x_dec_192: AARCH64_VALID_CALL_TARGET cbz x1, L192_dec_ret stp d8, d9, [sp, #-80]! lsr x9, x1, #3 mov x16, x4 mov x11, x5 stp d10, d11, [sp, #16] stp d12, d13, [sp, #32] stp d14, d15, [sp, #48] mov x5, #0xc200000000000000 stp x5, xzr, [sp, #64] add x10, sp, #64 mov x5, x9 ld1 { v0.16b}, [x16] //CTR block 0 ld1 { v19.16b}, [x3] mov x15, #0x100000000 //set up counter increment movi v31.16b, #0x0 mov v31.d[1], x15 rev32 v30.16b, v0.16b //set up reversed counter add v30.4s, v30.4s, v31.4s //CTR block 0 rev32 v1.16b, v30.16b //CTR block 1 add v30.4s, v30.4s, v31.4s //CTR block 1 rev32 v2.16b, v30.16b //CTR block 2 add v30.4s, v30.4s, v31.4s //CTR block 2 rev32 v3.16b, v30.16b //CTR block 3 add v30.4s, v30.4s, v31.4s //CTR block 3 rev32 v4.16b, v30.16b //CTR block 4 add v30.4s, v30.4s, v31.4s //CTR block 4 rev32 v5.16b, v30.16b //CTR block 5 add v30.4s, v30.4s, v31.4s //CTR block 5 ldp q26, q27, [x11, #0] //load rk0, rk1 rev32 v6.16b, v30.16b //CTR block 6 add v30.4s, v30.4s, v31.4s //CTR block 6 rev32 v7.16b, v30.16b //CTR block 7 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 0 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 0 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 0 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 0 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 0 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 0 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 0 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 0 ldp q28, q26, [x11, #32] //load rk2, rk3 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 1 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 1 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 1 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 1 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 1 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 1 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 2 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 2 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 1 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 2 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 2 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 2 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 2 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 2 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 3 ldp q27, q28, [x11, #64] //load rk4, rk5 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 3 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 3 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 3 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 3 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 3 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 3 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 3 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 4 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 4 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 4 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 4 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 4 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 4 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 5 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 4 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 5 ldp q26, q27, [x11, #96] //load rk6, rk7 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 5 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 5 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 5 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 5 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 5 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 5 sub x5, x5, #1 //byte_len - 1 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 6 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 6 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 6 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 6 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 6 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 6 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 6 ldp q28, q26, [x11, #128] //load rk8, rk9 add v30.4s, v30.4s, v31.4s //CTR block 7 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 7 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 7 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 7 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 7 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 7 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 7 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 7 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 7 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 8 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 8 and x5, x5, #0xffffffffffffff80 //number of bytes to be processed in main loop (at least 1 byte must be handled by tail) aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 8 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 8 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 8 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 8 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 8 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 8 add x4, x0, x1, lsr #3 //end_input_ptr aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 9 ld1 { v19.16b}, [x3] ext v19.16b, v19.16b, v19.16b, #8 rev64 v19.16b, v19.16b ldp q27, q28, [x11, #160] //load rk10, rk11 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 9 add x5, x5, x0 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 9 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 9 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 9 cmp x0, x5 //check if we have <= 8 blocks aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 9 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 9 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 10 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 10 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 10 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 10 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 10 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 10 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 10 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 10 ldr q26, [x11, #192] //load rk12 aese v0.16b, v28.16b //AES block 0 - round 11 aese v1.16b, v28.16b //AES block 1 - round 11 aese v4.16b, v28.16b //AES block 4 - round 11 aese v6.16b, v28.16b //AES block 6 - round 11 aese v5.16b, v28.16b //AES block 5 - round 11 aese v7.16b, v28.16b //AES block 7 - round 11 aese v2.16b, v28.16b //AES block 2 - round 11 aese v3.16b, v28.16b //AES block 3 - round 11 b.ge L192_dec_tail //handle tail ldp q8, q9, [x0], #32 //AES block 0, 1 - load ciphertext ldp q10, q11, [x0], #32 //AES block 2, 3 - load ciphertext ldp q12, q13, [x0], #32 //AES block 4, 5 - load ciphertext .long 0xce016921 //eor3 v1.16b, v9.16b, v1.16b, v26.16b //AES block 1 - result .long 0xce006900 //eor3 v0.16b, v8.16b, v0.16b, v26.16b //AES block 0 - result stp q0, q1, [x2], #32 //AES block 0, 1 - store result rev32 v0.16b, v30.16b //CTR block 8 add v30.4s, v30.4s, v31.4s //CTR block 8 rev32 v1.16b, v30.16b //CTR block 9 add v30.4s, v30.4s, v31.4s //CTR block 9 .long 0xce036963 //eor3 v3.16b, v11.16b, v3.16b, v26.16b //AES block 3 - result .long 0xce026942 //eor3 v2.16b, v10.16b, v2.16b, v26.16b //AES block 2 - result stp q2, q3, [x2], #32 //AES block 2, 3 - store result ldp q14, q15, [x0], #32 //AES block 6, 7 - load ciphertext rev32 v2.16b, v30.16b //CTR block 10 add v30.4s, v30.4s, v31.4s //CTR block 10 .long 0xce046984 //eor3 v4.16b, v12.16b, v4.16b, v26.16b //AES block 4 - result rev32 v3.16b, v30.16b //CTR block 11 add v30.4s, v30.4s, v31.4s //CTR block 11 .long 0xce0569a5 //eor3 v5.16b, v13.16b, v5.16b, v26.16b //AES block 5 - result stp q4, q5, [x2], #32 //AES block 4, 5 - store result cmp x0, x5 //check if we have <= 8 blocks .long 0xce0669c6 //eor3 v6.16b, v14.16b, v6.16b, v26.16b //AES block 6 - result .long 0xce0769e7 //eor3 v7.16b, v15.16b, v7.16b, v26.16b //AES block 7 - result rev32 v4.16b, v30.16b //CTR block 12 add v30.4s, v30.4s, v31.4s //CTR block 12 stp q6, q7, [x2], #32 //AES block 6, 7 - store result b.ge L192_dec_prepretail //do prepretail L192_dec_main_loop: //main loop start rev64 v9.16b, v9.16b //GHASH block 8k+1 ldp q26, q27, [x11, #0] //load rk0, rk1 ext v19.16b, v19.16b, v19.16b, #8 //PRE 0 rev64 v8.16b, v8.16b //GHASH block 8k rev32 v5.16b, v30.16b //CTR block 8k+13 add v30.4s, v30.4s, v31.4s //CTR block 8k+13 ldr q23, [x6, #144] //load h7l | h7h ldr q25, [x6, #176] //load h8l | h8h rev64 v12.16b, v12.16b //GHASH block 8k+4 rev64 v11.16b, v11.16b //GHASH block 8k+3 eor v8.16b, v8.16b, v19.16b //PRE 1 rev32 v6.16b, v30.16b //CTR block 8k+14 add v30.4s, v30.4s, v31.4s //CTR block 8k+14 rev64 v13.16b, v13.16b //GHASH block 8k+5 rev32 v7.16b, v30.16b //CTR block 8k+15 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 0 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 0 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 0 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 0 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 0 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 0 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 0 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 0 pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high ldp q28, q26, [x11, #32] //load rk2, rk3 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 1 pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low ldr q20, [x6, #96] //load h5l | h5h ldr q22, [x6, #128] //load h6l | h6h aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 1 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 1 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 1 pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 1 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 1 trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid rev64 v10.16b, v10.16b //GHASH block 8k+2 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 1 ldr q21, [x6, #112] //load h6k | h5k ldr q24, [x6, #160] //load h8k | h7k trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 2 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 2 pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low .long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 2 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 3 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 2 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 2 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 2 ldr q23, [x6, #48] //load h3l | h3h ldr q25, [x6, #80] //load h4l | h4h aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 2 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 3 pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 3 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 3 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 3 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 3 ldp q27, q28, [x11, #64] //load rk4, rk5 eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 3 trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid add v30.4s, v30.4s, v31.4s //CTR block 8k+15 pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 3 pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 4 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 4 eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 4 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 4 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 4 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 4 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 4 ldr q20, [x6] //load h1l | h1h ldr q22, [x6, #32] //load h2l | h2h aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 5 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 5 ldp q26, q27, [x11, #96] //load rk6, rk7 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 5 rev64 v15.16b, v15.16b //GHASH block 8k+7 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 5 .long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 5 pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 5 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 5 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 5 rev64 v14.16b, v14.16b //GHASH block 8k+6 ldr q21, [x6, #16] //load h2k | h1k ldr q24, [x6, #64] //load h4k | h3k pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 6 eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 6 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 6 pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 6 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 6 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 7 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 7 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 6 pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid .long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high .long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 6 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 7 ldp q28, q26, [x11, #128] //load rk8, rk9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 7 eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 7 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 7 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 7 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 7 .long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid ldr d16, [x10] //MODULO - load modulo constant pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 8 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 8 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 8 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 8 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 8 .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 8 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 8 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 8 .long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high rev32 v20.16b, v30.16b //CTR block 8k+16 add v30.4s, v30.4s, v31.4s //CTR block 8k+16 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 9 .long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 9 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 9 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 9 ldp q27, q28, [x11, #160] //load rk10, rk11 .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up ldp q8, q9, [x0], #32 //AES block 8k+8, 8k+9 - load ciphertext aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 9 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 9 ldp q10, q11, [x0], #32 //AES block 8k+10, 8k+11 - load ciphertext rev32 v22.16b, v30.16b //CTR block 8k+17 pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid add v30.4s, v30.4s, v31.4s //CTR block 8k+17 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 9 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 9 ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 10 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 10 ldp q12, q13, [x0], #32 //AES block 8k+12, 8k+13 - load ciphertext rev32 v23.16b, v30.16b //CTR block 8k+18 add v30.4s, v30.4s, v31.4s //CTR block 8k+18 .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 10 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 10 ldr q26, [x11, #192] //load rk12 ldp q14, q15, [x0], #32 //AES block 8k+14, 8k+15 - load ciphertext aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 10 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 10 aese v0.16b, v28.16b //AES block 8k+8 - round 11 ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment aese v1.16b, v28.16b //AES block 8k+9 - round 11 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 10 aese v6.16b, v28.16b //AES block 8k+14 - round 11 aese v3.16b, v28.16b //AES block 8k+11 - round 11 .long 0xce006900 //eor3 v0.16b, v8.16b, v0.16b, v26.16b //AES block 8k+8 - result rev32 v25.16b, v30.16b //CTR block 8k+19 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 10 aese v4.16b, v28.16b //AES block 8k+12 - round 11 aese v2.16b, v28.16b //AES block 8k+10 - round 11 add v30.4s, v30.4s, v31.4s //CTR block 8k+19 aese v7.16b, v28.16b //AES block 8k+15 - round 11 aese v5.16b, v28.16b //AES block 8k+13 - round 11 pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low .long 0xce016921 //eor3 v1.16b, v9.16b, v1.16b, v26.16b //AES block 8k+9 - result stp q0, q1, [x2], #32 //AES block 8k+8, 8k+9 - store result .long 0xce036963 //eor3 v3.16b, v11.16b, v3.16b, v26.16b //AES block 8k+11 - result .long 0xce026942 //eor3 v2.16b, v10.16b, v2.16b, v26.16b //AES block 8k+10 - result .long 0xce0769e7 //eor3 v7.16b, v15.16b, v7.16b, v26.16b //AES block 8k+15 - result stp q2, q3, [x2], #32 //AES block 8k+10, 8k+11 - store result .long 0xce0569a5 //eor3 v5.16b, v13.16b, v5.16b, v26.16b //AES block 8k+13 - result .long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low mov v3.16b, v25.16b //CTR block 8k+19 .long 0xce046984 //eor3 v4.16b, v12.16b, v4.16b, v26.16b //AES block 8k+12 - result stp q4, q5, [x2], #32 //AES block 8k+12, 8k+13 - store result cmp x0, x5 //LOOP CONTROL .long 0xce0669c6 //eor3 v6.16b, v14.16b, v6.16b, v26.16b //AES block 8k+14 - result stp q6, q7, [x2], #32 //AES block 8k+14, 8k+15 - store result mov v0.16b, v20.16b //CTR block 8k+16 mov v1.16b, v22.16b //CTR block 8k+17 mov v2.16b, v23.16b //CTR block 8k+18 rev32 v4.16b, v30.16b //CTR block 8k+20 add v30.4s, v30.4s, v31.4s //CTR block 8k+20 b.lt L192_dec_main_loop L192_dec_prepretail: //PREPRETAIL ldp q26, q27, [x11, #0] //load rk0, rk1 rev32 v5.16b, v30.16b //CTR block 8k+13 add v30.4s, v30.4s, v31.4s //CTR block 8k+13 ldr q23, [x6, #144] //load h7l | h7h ldr q25, [x6, #176] //load h8l | h8h rev64 v8.16b, v8.16b //GHASH block 8k ext v19.16b, v19.16b, v19.16b, #8 //PRE 0 rev64 v11.16b, v11.16b //GHASH block 8k+3 rev32 v6.16b, v30.16b //CTR block 8k+14 add v30.4s, v30.4s, v31.4s //CTR block 8k+14 eor v8.16b, v8.16b, v19.16b //PRE 1 rev64 v10.16b, v10.16b //GHASH block 8k+2 rev64 v9.16b, v9.16b //GHASH block 8k+1 ldr q20, [x6, #96] //load h5l | h5h ldr q22, [x6, #128] //load h6l | h6h rev32 v7.16b, v30.16b //CTR block 8k+15 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 0 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 0 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 0 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 0 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 0 pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 0 pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 0 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 1 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 0 ldp q28, q26, [x11, #32] //load rk2, rk3 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 1 pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 1 pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 1 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 1 trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 1 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 1 ldr q21, [x6, #112] //load h6k | h5k ldr q24, [x6, #160] //load h8k | h7k aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 2 eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 2 rev64 v13.16b, v13.16b //GHASH block 8k+5 pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low .long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 2 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 2 trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 3 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 2 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 2 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 2 trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 2 pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 3 eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 3 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 3 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 3 .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low ldp q27, q28, [x11, #64] //load rk4, rk5 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 3 ldr q23, [x6, #48] //load h3l | h3h ldr q25, [x6, #80] //load h4l | h4h pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid ldr q20, [x6] //load h1l | h1h ldr q22, [x6, #32] //load h2l | h2h eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 3 rev64 v15.16b, v15.16b //GHASH block 8k+7 .long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid rev64 v12.16b, v12.16b //GHASH block 8k+4 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 4 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 4 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 3 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 4 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 4 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 4 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 4 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 4 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 4 rev64 v14.16b, v14.16b //GHASH block 8k+6 ldr q21, [x6, #16] //load h2k | h1k ldr q24, [x6, #64] //load h4k | h3k trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 5 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 5 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 5 ldp q26, q27, [x11, #96] //load rk6, rk7 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 5 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 5 pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 5 pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 5 trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 5 eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 6 eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 6 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 6 pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 6 pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 6 pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high .long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 7 .long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 6 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 6 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 7 ldp q28, q26, [x11, #128] //load rk8, rk9 pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 7 ldr d16, [x10] //MODULO - load modulo constant .long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 7 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 7 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 7 .long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low .long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 7 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 7 .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 8 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 8 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 8 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 8 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 8 pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 8 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 8 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 8 ldp q27, q28, [x11, #160] //load rk10, rk11 .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 9 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 9 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 9 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 9 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 9 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 9 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 9 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 9 pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low ldr q26, [x11, #192] //load rk12 ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 10 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 10 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 10 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 10 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 10 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 10 aese v0.16b, v28.16b //AES block 8k+8 - round 11 .long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low aese v5.16b, v28.16b //AES block 8k+13 - round 11 aese v2.16b, v28.16b //AES block 8k+10 - round 11 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 10 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 10 aese v6.16b, v28.16b //AES block 8k+14 - round 11 aese v4.16b, v28.16b //AES block 8k+12 - round 11 add v30.4s, v30.4s, v31.4s //CTR block 8k+15 aese v3.16b, v28.16b //AES block 8k+11 - round 11 aese v1.16b, v28.16b //AES block 8k+9 - round 11 aese v7.16b, v28.16b //AES block 8k+15 - round 11 L192_dec_tail: //TAIL sub x5, x4, x0 //main_end_input_ptr is number of bytes left to process ldp q20, q21, [x6, #96] //load h5l | h5h ldr q9, [x0], #16 //AES block 8k+8 - load ciphertext ldp q24, q25, [x6, #160] //load h8k | h7k mov v29.16b, v26.16b ldp q22, q23, [x6, #128] //load h6l | h6h ext v16.16b, v19.16b, v19.16b, #8 //prepare final partial tag .long 0xce00752c //eor3 v12.16b, v9.16b, v0.16b, v29.16b //AES block 8k+8 - result cmp x5, #112 b.gt L192_dec_blocks_more_than_7 mov v7.16b, v6.16b movi v17.8b, #0 sub v30.4s, v30.4s, v31.4s mov v6.16b, v5.16b mov v5.16b, v4.16b mov v4.16b, v3.16b cmp x5, #96 movi v19.8b, #0 mov v3.16b, v2.16b mov v2.16b, v1.16b movi v18.8b, #0 b.gt L192_dec_blocks_more_than_6 mov v7.16b, v6.16b mov v6.16b, v5.16b mov v5.16b, v4.16b mov v4.16b, v3.16b mov v3.16b, v1.16b sub v30.4s, v30.4s, v31.4s cmp x5, #80 b.gt L192_dec_blocks_more_than_5 mov v7.16b, v6.16b mov v6.16b, v5.16b mov v5.16b, v4.16b mov v4.16b, v1.16b cmp x5, #64 sub v30.4s, v30.4s, v31.4s b.gt L192_dec_blocks_more_than_4 sub v30.4s, v30.4s, v31.4s mov v7.16b, v6.16b mov v6.16b, v5.16b mov v5.16b, v1.16b cmp x5, #48 b.gt L192_dec_blocks_more_than_3 sub v30.4s, v30.4s, v31.4s mov v7.16b, v6.16b cmp x5, #32 mov v6.16b, v1.16b ldr q24, [x6, #64] //load h4k | h3k b.gt L192_dec_blocks_more_than_2 sub v30.4s, v30.4s, v31.4s mov v7.16b, v1.16b cmp x5, #16 b.gt L192_dec_blocks_more_than_1 sub v30.4s, v30.4s, v31.4s ldr q21, [x6, #16] //load h2k | h1k b L192_dec_blocks_less_than_1 L192_dec_blocks_more_than_7: //blocks left > 7 rev64 v8.16b, v9.16b //GHASH final-7 block ins v18.d[0], v24.d[1] //GHASH final-7 block - mid eor v8.16b, v8.16b, v16.16b //feed in partial tag pmull2 v17.1q, v8.2d, v25.2d //GHASH final-7 block - high ins v27.d[0], v8.d[1] //GHASH final-7 block - mid ldr q9, [x0], #16 //AES final-6 block - load ciphertext pmull v19.1q, v8.1d, v25.1d //GHASH final-7 block - low eor v27.8b, v27.8b, v8.8b //GHASH final-7 block - mid st1 { v12.16b}, [x2], #16 //AES final-7 block - store result .long 0xce01752c //eor3 v12.16b, v9.16b, v1.16b, v29.16b //AES final-6 block - result pmull v18.1q, v27.1d, v18.1d //GHASH final-7 block - mid movi v16.8b, #0 //supress further partial tag feed in L192_dec_blocks_more_than_6: //blocks left > 6 rev64 v8.16b, v9.16b //GHASH final-6 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ldr q9, [x0], #16 //AES final-5 block - load ciphertext ins v27.d[0], v8.d[1] //GHASH final-6 block - mid eor v27.8b, v27.8b, v8.8b //GHASH final-6 block - mid movi v16.8b, #0 //supress further partial tag feed in pmull2 v28.1q, v8.2d, v23.2d //GHASH final-6 block - high st1 { v12.16b}, [x2], #16 //AES final-6 block - store result .long 0xce02752c //eor3 v12.16b, v9.16b, v2.16b, v29.16b //AES final-5 block - result eor v17.16b, v17.16b, v28.16b //GHASH final-6 block - high pmull v27.1q, v27.1d, v24.1d //GHASH final-6 block - mid pmull v26.1q, v8.1d, v23.1d //GHASH final-6 block - low eor v18.16b, v18.16b, v27.16b //GHASH final-6 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-6 block - low L192_dec_blocks_more_than_5: //blocks left > 5 rev64 v8.16b, v9.16b //GHASH final-5 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-5 block - mid eor v27.8b, v27.8b, v8.8b //GHASH final-5 block - mid ins v27.d[1], v27.d[0] //GHASH final-5 block - mid pmull2 v28.1q, v8.2d, v22.2d //GHASH final-5 block - high ldr q9, [x0], #16 //AES final-4 block - load ciphertext eor v17.16b, v17.16b, v28.16b //GHASH final-5 block - high pmull v26.1q, v8.1d, v22.1d //GHASH final-5 block - low pmull2 v27.1q, v27.2d, v21.2d //GHASH final-5 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-5 block - low movi v16.8b, #0 //supress further partial tag feed in st1 { v12.16b}, [x2], #16 //AES final-5 block - store result eor v18.16b, v18.16b, v27.16b //GHASH final-5 block - mid .long 0xce03752c //eor3 v12.16b, v9.16b, v3.16b, v29.16b //AES final-4 block - result L192_dec_blocks_more_than_4: //blocks left > 4 rev64 v8.16b, v9.16b //GHASH final-4 block eor v8.16b, v8.16b, v16.16b //feed in partial tag movi v16.8b, #0 //supress further partial tag feed in ldr q9, [x0], #16 //AES final-3 block - load ciphertext ins v27.d[0], v8.d[1] //GHASH final-4 block - mid pmull v26.1q, v8.1d, v20.1d //GHASH final-4 block - low eor v27.8b, v27.8b, v8.8b //GHASH final-4 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-4 block - low pmull v27.1q, v27.1d, v21.1d //GHASH final-4 block - mid st1 { v12.16b}, [x2], #16 //AES final-4 block - store result pmull2 v28.1q, v8.2d, v20.2d //GHASH final-4 block - high .long 0xce04752c //eor3 v12.16b, v9.16b, v4.16b, v29.16b //AES final-3 block - result eor v18.16b, v18.16b, v27.16b //GHASH final-4 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-4 block - high L192_dec_blocks_more_than_3: //blocks left > 3 ldr q25, [x6, #80] //load h4l | h4h rev64 v8.16b, v9.16b //GHASH final-3 block ldr q9, [x0], #16 //AES final-2 block - load ciphertext eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-3 block - mid pmull2 v28.1q, v8.2d, v25.2d //GHASH final-3 block - high eor v17.16b, v17.16b, v28.16b //GHASH final-3 block - high movi v16.8b, #0 //supress further partial tag feed in pmull v26.1q, v8.1d, v25.1d //GHASH final-3 block - low st1 { v12.16b}, [x2], #16 //AES final-3 block - store result eor v27.8b, v27.8b, v8.8b //GHASH final-3 block - mid .long 0xce05752c //eor3 v12.16b, v9.16b, v5.16b, v29.16b //AES final-2 block - result eor v19.16b, v19.16b, v26.16b //GHASH final-3 block - low ldr q24, [x6, #64] //load h4k | h3k ins v27.d[1], v27.d[0] //GHASH final-3 block - mid pmull2 v27.1q, v27.2d, v24.2d //GHASH final-3 block - mid eor v18.16b, v18.16b, v27.16b //GHASH final-3 block - mid L192_dec_blocks_more_than_2: //blocks left > 2 rev64 v8.16b, v9.16b //GHASH final-2 block ldr q23, [x6, #48] //load h3l | h3h eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-2 block - mid ldr q9, [x0], #16 //AES final-1 block - load ciphertext pmull2 v28.1q, v8.2d, v23.2d //GHASH final-2 block - high eor v27.8b, v27.8b, v8.8b //GHASH final-2 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-2 block - high pmull v26.1q, v8.1d, v23.1d //GHASH final-2 block - low pmull v27.1q, v27.1d, v24.1d //GHASH final-2 block - mid movi v16.8b, #0 //supress further partial tag feed in eor v19.16b, v19.16b, v26.16b //GHASH final-2 block - low st1 { v12.16b}, [x2], #16 //AES final-2 block - store result eor v18.16b, v18.16b, v27.16b //GHASH final-2 block - mid .long 0xce06752c //eor3 v12.16b, v9.16b, v6.16b, v29.16b //AES final-1 block - result L192_dec_blocks_more_than_1: //blocks left > 1 rev64 v8.16b, v9.16b //GHASH final-1 block ldr q9, [x0], #16 //AES final block - load ciphertext ldr q22, [x6, #32] //load h1l | h1h eor v8.16b, v8.16b, v16.16b //feed in partial tag movi v16.8b, #0 //supress further partial tag feed in ldr q21, [x6, #16] //load h2k | h1k pmull v26.1q, v8.1d, v22.1d //GHASH final-1 block - low ins v27.d[0], v8.d[1] //GHASH final-1 block - mid st1 { v12.16b}, [x2], #16 //AES final-1 block - store result pmull2 v28.1q, v8.2d, v22.2d //GHASH final-1 block - high .long 0xce07752c //eor3 v12.16b, v9.16b, v7.16b, v29.16b //AES final block - result eor v27.8b, v27.8b, v8.8b //GHASH final-1 block - mid ins v27.d[1], v27.d[0] //GHASH final-1 block - mid pmull2 v27.1q, v27.2d, v21.2d //GHASH final-1 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-1 block - low eor v18.16b, v18.16b, v27.16b //GHASH final-1 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-1 block - high L192_dec_blocks_less_than_1: //blocks left <= 1 rev32 v30.16b, v30.16b and x1, x1, #127 //bit_length %= 128 sub x1, x1, #128 //bit_length -= 128 str q30, [x16] //store the updated counter neg x1, x1 //bit_length = 128 - #bits in input (in range [1,128]) mvn x7, xzr //temp0_x = 0xffffffffffffffff and x1, x1, #127 //bit_length %= 128 mvn x8, xzr //temp1_x = 0xffffffffffffffff lsr x7, x7, x1 //temp0_x is mask for top 64b of last block cmp x1, #64 csel x13, x8, x7, lt csel x14, x7, xzr, lt ldr q20, [x6] //load h1l | h1h mov v0.d[1], x14 ld1 { v26.16b}, [x2] //load existing bytes where the possibly partial last block is to be stored mov v0.d[0], x13 //ctr0b is mask for last block and v9.16b, v9.16b, v0.16b //possibly partial last block has zeroes in highest bits bif v12.16b, v26.16b, v0.16b //insert existing bytes in top end of result before storing rev64 v8.16b, v9.16b //GHASH final block st1 { v12.16b}, [x2] //store all 16B eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v16.d[0], v8.d[1] //GHASH final block - mid pmull v26.1q, v8.1d, v20.1d //GHASH final block - low eor v16.8b, v16.8b, v8.8b //GHASH final block - mid pmull2 v28.1q, v8.2d, v20.2d //GHASH final block - high eor v19.16b, v19.16b, v26.16b //GHASH final block - low pmull v16.1q, v16.1d, v21.1d //GHASH final block - mid eor v17.16b, v17.16b, v28.16b //GHASH final block - high eor v14.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up eor v18.16b, v18.16b, v16.16b //GHASH final block - mid ldr d16, [x10] //MODULO - load modulo constant pmull v21.1q, v17.1d, v16.1d //MODULO - top 64b align with mid ext v17.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment eor v18.16b, v18.16b, v14.16b //MODULO - karatsuba tidy up .long 0xce115652 //eor3 v18.16b, v18.16b, v17.16b, v21.16b //MODULO - fold into mid pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low ext v18.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment .long 0xce124673 //eor3 v19.16b, v19.16b, v18.16b, v17.16b //MODULO - fold into low ext v19.16b, v19.16b, v19.16b, #8 rev64 v19.16b, v19.16b st1 { v19.16b }, [x3] mov x0, x9 ldp d10, d11, [sp, #16] ldp d12, d13, [sp, #32] ldp d14, d15, [sp, #48] ldp d8, d9, [sp], #80 ret L192_dec_ret: mov w0, #0x0 ret .globl aesv8_gcm_8x_enc_256 .def aesv8_gcm_8x_enc_256 .type 32 .endef .align 4 aesv8_gcm_8x_enc_256: AARCH64_VALID_CALL_TARGET cbz x1, L256_enc_ret stp d8, d9, [sp, #-80]! lsr x9, x1, #3 mov x16, x4 mov x11, x5 stp d10, d11, [sp, #16] stp d12, d13, [sp, #32] stp d14, d15, [sp, #48] mov x5, #0xc200000000000000 stp x5, xzr, [sp, #64] add x10, sp, #64 ld1 { v0.16b}, [x16] //CTR block 0 mov x5, x9 mov x15, #0x100000000 //set up counter increment movi v31.16b, #0x0 mov v31.d[1], x15 sub x5, x5, #1 //byte_len - 1 and x5, x5, #0xffffffffffffff80 //number of bytes to be processed in main loop (at least 1 byte must be handled by tail) add x5, x5, x0 rev32 v30.16b, v0.16b //set up reversed counter add v30.4s, v30.4s, v31.4s //CTR block 0 rev32 v1.16b, v30.16b //CTR block 1 add v30.4s, v30.4s, v31.4s //CTR block 1 rev32 v2.16b, v30.16b //CTR block 2 add v30.4s, v30.4s, v31.4s //CTR block 2 rev32 v3.16b, v30.16b //CTR block 3 add v30.4s, v30.4s, v31.4s //CTR block 3 rev32 v4.16b, v30.16b //CTR block 4 add v30.4s, v30.4s, v31.4s //CTR block 4 rev32 v5.16b, v30.16b //CTR block 5 add v30.4s, v30.4s, v31.4s //CTR block 5 ldp q26, q27, [x11, #0] //load rk0, rk1 rev32 v6.16b, v30.16b //CTR block 6 add v30.4s, v30.4s, v31.4s //CTR block 6 rev32 v7.16b, v30.16b //CTR block 7 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 0 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 0 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 0 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 0 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 0 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 0 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 0 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 0 ldp q28, q26, [x11, #32] //load rk2, rk3 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 1 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 1 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 1 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 1 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 1 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 1 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 2 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 1 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 2 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 2 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 2 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 2 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 2 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 2 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 3 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 3 ldp q27, q28, [x11, #64] //load rk4, rk5 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 3 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 3 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 3 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 3 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 3 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 3 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 4 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 4 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 4 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 4 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 4 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 4 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 4 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 5 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 5 ldp q26, q27, [x11, #96] //load rk6, rk7 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 5 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 5 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 5 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 5 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 5 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 5 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 6 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 6 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 6 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 6 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 6 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 6 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 6 ldp q28, q26, [x11, #128] //load rk8, rk9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 7 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 7 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 7 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 7 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 7 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 7 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 7 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 7 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 8 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 8 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 8 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 8 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 8 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 8 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 8 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 8 ld1 { v19.16b}, [x3] ext v19.16b, v19.16b, v19.16b, #8 rev64 v19.16b, v19.16b ldp q27, q28, [x11, #160] //load rk10, rk11 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 9 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 9 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 9 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 9 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 9 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 9 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 9 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 10 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 10 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 10 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 10 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 10 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 10 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 10 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 10 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 11 ldp q26, q27, [x11, #192] //load rk12, rk13 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 11 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 11 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 11 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 11 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 11 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 11 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 11 add v30.4s, v30.4s, v31.4s //CTR block 7 ldr q28, [x11, #224] //load rk14 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 12 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 12 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 12 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 12 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 12 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 12 aese v2.16b, v27.16b //AES block 2 - round 13 aese v1.16b, v27.16b //AES block 1 - round 13 aese v4.16b, v27.16b //AES block 4 - round 13 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 12 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 12 aese v0.16b, v27.16b //AES block 0 - round 13 aese v5.16b, v27.16b //AES block 5 - round 13 aese v6.16b, v27.16b //AES block 6 - round 13 aese v7.16b, v27.16b //AES block 7 - round 13 aese v3.16b, v27.16b //AES block 3 - round 13 add x4, x0, x1, lsr #3 //end_input_ptr cmp x0, x5 //check if we have <= 8 blocks b.ge L256_enc_tail //handle tail ldp q8, q9, [x0], #32 //AES block 0, 1 - load plaintext ldp q10, q11, [x0], #32 //AES block 2, 3 - load plaintext .long 0xce007108 //eor3 v8.16b, v8.16b, v0.16b, v28.16b //AES block 0 - result rev32 v0.16b, v30.16b //CTR block 8 add v30.4s, v30.4s, v31.4s //CTR block 8 .long 0xce017129 //eor3 v9.16b, v9.16b, v1.16b, v28.16b //AES block 1 - result .long 0xce03716b //eor3 v11.16b, v11.16b, v3.16b, v28.16b //AES block 3 - result rev32 v1.16b, v30.16b //CTR block 9 add v30.4s, v30.4s, v31.4s //CTR block 9 ldp q12, q13, [x0], #32 //AES block 4, 5 - load plaintext ldp q14, q15, [x0], #32 //AES block 6, 7 - load plaintext .long 0xce02714a //eor3 v10.16b, v10.16b, v2.16b, v28.16b //AES block 2 - result cmp x0, x5 //check if we have <= 8 blocks rev32 v2.16b, v30.16b //CTR block 10 add v30.4s, v30.4s, v31.4s //CTR block 10 stp q8, q9, [x2], #32 //AES block 0, 1 - store result stp q10, q11, [x2], #32 //AES block 2, 3 - store result rev32 v3.16b, v30.16b //CTR block 11 add v30.4s, v30.4s, v31.4s //CTR block 11 .long 0xce04718c //eor3 v12.16b, v12.16b, v4.16b, v28.16b //AES block 4 - result .long 0xce0771ef //eor3 v15.16b, v15.16b, v7.16b, v28.16b //AES block 7 - result .long 0xce0671ce //eor3 v14.16b, v14.16b, v6.16b, v28.16b //AES block 6 - result .long 0xce0571ad //eor3 v13.16b, v13.16b, v5.16b, v28.16b //AES block 5 - result stp q12, q13, [x2], #32 //AES block 4, 5 - store result rev32 v4.16b, v30.16b //CTR block 12 stp q14, q15, [x2], #32 //AES block 6, 7 - store result add v30.4s, v30.4s, v31.4s //CTR block 12 b.ge L256_enc_prepretail //do prepretail L256_enc_main_loop: //main loop start ldp q26, q27, [x11, #0] //load rk0, rk1 rev32 v5.16b, v30.16b //CTR block 8k+13 add v30.4s, v30.4s, v31.4s //CTR block 8k+13 ldr q21, [x6, #112] //load h6k | h5k ldr q24, [x6, #160] //load h8k | h7k rev64 v11.16b, v11.16b //GHASH block 8k+3 ldr q20, [x6, #96] //load h5l | h5h ldr q22, [x6, #128] //load h6l | h6h rev64 v9.16b, v9.16b //GHASH block 8k+1 rev32 v6.16b, v30.16b //CTR block 8k+14 add v30.4s, v30.4s, v31.4s //CTR block 8k+14 rev64 v8.16b, v8.16b //GHASH block 8k rev64 v12.16b, v12.16b //GHASH block 8k+4 ext v19.16b, v19.16b, v19.16b, #8 //PRE 0 ldr q23, [x6, #144] //load h7l | h7h ldr q25, [x6, #176] //load h8l | h8h aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 0 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 0 rev32 v7.16b, v30.16b //CTR block 8k+15 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 0 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 0 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 0 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 0 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 0 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 0 ldp q28, q26, [x11, #32] //load rk2, rk3 eor v8.16b, v8.16b, v19.16b //PRE 1 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 1 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 1 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 1 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 1 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 1 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 1 pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 1 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 2 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 2 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 2 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 2 pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 2 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 3 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 3 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 2 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 3 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 2 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 3 rev64 v14.16b, v14.16b //GHASH block 8k+6 pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 3 ldp q27, q28, [x11, #64] //load rk4, rk5 rev64 v10.16b, v10.16b //GHASH block 8k+2 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 3 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 3 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 3 eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high rev64 v13.16b, v13.16b //GHASH block 8k+5 pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low ldr q23, [x6, #48] //load h3l | h3h ldr q25, [x6, #80] //load h4l | h4h trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid .long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 4 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 4 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 4 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 4 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 4 trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 4 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 4 trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid ldp q26, q27, [x11, #96] //load rk6, rk7 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 5 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 5 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 5 eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 5 rev64 v15.16b, v15.16b //GHASH block 8k+7 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 5 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 5 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 5 pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 5 pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 6 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 6 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 6 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 6 eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 6 .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 6 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 6 ldp q28, q26, [x11, #128] //load rk8, rk9 pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 7 ldr q20, [x6] //load h1l | h1h ldr q22, [x6, #32] //load h2l | h2h aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 7 .long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid ldr q21, [x6, #16] //load h2k | h1k ldr q24, [x6, #64] //load h4k | h3k aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 7 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 7 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 7 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 7 pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 7 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 7 pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 8 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 8 pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 9 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 8 pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 8 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 8 pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 8 trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 8 .long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 9 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 9 eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 9 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 9 ldp q27, q28, [x11, #160] //load rk10, rk11 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 9 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 9 pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high .long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low ldr d16, [x10] //MODULO - load modulo constant pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 9 .long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low .long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 10 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 10 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 10 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 10 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 10 add v30.4s, v30.4s, v31.4s //CTR block 8k+15 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 10 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 10 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 10 .long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high ldp q26, q27, [x11, #192] //load rk12, rk13 rev32 v20.16b, v30.16b //CTR block 8k+16 ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment ldp q8, q9, [x0], #32 //AES block 8k+8, 8k+9 - load plaintext aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 11 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 11 add v30.4s, v30.4s, v31.4s //CTR block 8k+16 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 11 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 11 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 11 pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 11 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 12 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 11 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 12 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 12 rev32 v22.16b, v30.16b //CTR block 8k+17 add v30.4s, v30.4s, v31.4s //CTR block 8k+17 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 11 .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 12 ldr q28, [x11, #224] //load rk14 aese v7.16b, v27.16b //AES block 8k+15 - round 13 ldp q10, q11, [x0], #32 //AES block 8k+10, 8k+11 - load plaintext aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 12 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 12 .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 12 ldp q12, q13, [x0], #32 //AES block 4, 5 - load plaintext ldp q14, q15, [x0], #32 //AES block 6, 7 - load plaintext aese v2.16b, v27.16b //AES block 8k+10 - round 13 aese v4.16b, v27.16b //AES block 8k+12 - round 13 rev32 v23.16b, v30.16b //CTR block 8k+18 add v30.4s, v30.4s, v31.4s //CTR block 8k+18 aese v5.16b, v27.16b //AES block 8k+13 - round 13 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 12 aese v3.16b, v27.16b //AES block 8k+11 - round 13 cmp x0, x5 //LOOP CONTROL .long 0xce02714a //eor3 v10.16b, v10.16b, v2.16b, v28.16b //AES block 8k+10 - result rev32 v25.16b, v30.16b //CTR block 8k+19 add v30.4s, v30.4s, v31.4s //CTR block 8k+19 aese v0.16b, v27.16b //AES block 8k+8 - round 13 aese v6.16b, v27.16b //AES block 8k+14 - round 13 .long 0xce0571ad //eor3 v13.16b, v13.16b, v5.16b, v28.16b //AES block 5 - result ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low aese v1.16b, v27.16b //AES block 8k+9 - round 13 .long 0xce04718c //eor3 v12.16b, v12.16b, v4.16b, v28.16b //AES block 4 - result rev32 v4.16b, v30.16b //CTR block 8k+20 .long 0xce03716b //eor3 v11.16b, v11.16b, v3.16b, v28.16b //AES block 8k+11 - result mov v3.16b, v25.16b //CTR block 8k+19 .long 0xce017129 //eor3 v9.16b, v9.16b, v1.16b, v28.16b //AES block 8k+9 - result .long 0xce007108 //eor3 v8.16b, v8.16b, v0.16b, v28.16b //AES block 8k+8 - result add v30.4s, v30.4s, v31.4s //CTR block 8k+20 stp q8, q9, [x2], #32 //AES block 8k+8, 8k+9 - store result mov v2.16b, v23.16b //CTR block 8k+18 .long 0xce0771ef //eor3 v15.16b, v15.16b, v7.16b, v28.16b //AES block 7 - result .long 0xce154673 //eor3 v19.16b, v19.16b, v21.16b, v17.16b //MODULO - fold into low stp q10, q11, [x2], #32 //AES block 8k+10, 8k+11 - store result .long 0xce0671ce //eor3 v14.16b, v14.16b, v6.16b, v28.16b //AES block 6 - result mov v1.16b, v22.16b //CTR block 8k+17 stp q12, q13, [x2], #32 //AES block 4, 5 - store result stp q14, q15, [x2], #32 //AES block 6, 7 - store result mov v0.16b, v20.16b //CTR block 8k+16 b.lt L256_enc_main_loop L256_enc_prepretail: //PREPRETAIL rev32 v5.16b, v30.16b //CTR block 8k+13 ldp q26, q27, [x11, #0] //load rk0, rk1 add v30.4s, v30.4s, v31.4s //CTR block 8k+13 rev64 v10.16b, v10.16b //GHASH block 8k+2 rev32 v6.16b, v30.16b //CTR block 8k+14 add v30.4s, v30.4s, v31.4s //CTR block 8k+14 rev64 v13.16b, v13.16b //GHASH block 8k+5 ldr q21, [x6, #112] //load h6k | h5k ldr q24, [x6, #160] //load h8k | h7k rev32 v7.16b, v30.16b //CTR block 8k+15 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 0 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 0 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 0 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 0 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 0 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 0 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 0 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 0 ext v19.16b, v19.16b, v19.16b, #8 //PRE 0 rev64 v8.16b, v8.16b //GHASH block 8k aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 1 rev64 v9.16b, v9.16b //GHASH block 8k+1 ldp q28, q26, [x11, #32] //load rk2, rk3 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 1 ldr q23, [x6, #144] //load h7l | h7h ldr q25, [x6, #176] //load h8l | h8h aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 1 ldr q20, [x6, #96] //load h5l | h5h ldr q22, [x6, #128] //load h6l | h6h aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 1 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 1 eor v8.16b, v8.16b, v19.16b //PRE 1 rev64 v11.16b, v11.16b //GHASH block 8k+3 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 1 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 2 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 2 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 1 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 2 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 2 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 2 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 2 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 2 ldp q27, q28, [x11, #64] //load rk4, rk5 trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high rev64 v14.16b, v14.16b //GHASH block 8k+6 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 3 pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 3 pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 3 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 3 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 3 eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 3 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 3 eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 3 pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 4 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 4 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 4 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 4 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 5 pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid .long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 4 trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 4 eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 4 pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid rev64 v12.16b, v12.16b //GHASH block 8k+4 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 5 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 5 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 5 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 5 ldp q26, q27, [x11, #96] //load rk6, rk7 ldr q23, [x6, #48] //load h3l | h3h ldr q25, [x6, #80] //load h4l | h4h pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 5 rev64 v15.16b, v15.16b //GHASH block 8k+7 trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 5 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 5 .long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 6 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 6 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 6 ldr q21, [x6, #16] //load h2k | h1k ldr q24, [x6, #64] //load h4k | h3k aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 6 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 6 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 6 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 6 pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low ldr q20, [x6] //load h1l | h1h ldr q22, [x6, #32] //load h2l | h2h ldp q28, q26, [x11, #128] //load rk8, rk9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 7 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 7 pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 7 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 7 pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 7 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 7 eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 7 trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 7 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 8 .long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 8 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 8 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 8 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 8 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 8 eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 8 pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 8 pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low .long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid .long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high ldp q27, q28, [x11, #160] //load rk10, rk11 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 9 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 9 .long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high .long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid ldr d16, [x10] //MODULO - load modulo constant .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 9 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 9 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 9 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 9 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 9 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 10 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 10 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 9 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 10 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 10 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 10 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 10 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 10 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 10 pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 11 ldp q26, q27, [x11, #192] //load rk12, rk13 ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 11 .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 11 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 11 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 11 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 11 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 11 pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 11 ldr q28, [x11, #224] //load rk14 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 12 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 12 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 12 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 12 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 12 ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 12 add v30.4s, v30.4s, v31.4s //CTR block 8k+15 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 12 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 12 aese v0.16b, v27.16b //AES block 8k+8 - round 13 .long 0xce154673 //eor3 v19.16b, v19.16b, v21.16b, v17.16b //MODULO - fold into low aese v5.16b, v27.16b //AES block 8k+13 - round 13 aese v1.16b, v27.16b //AES block 8k+9 - round 13 aese v3.16b, v27.16b //AES block 8k+11 - round 13 aese v4.16b, v27.16b //AES block 8k+12 - round 13 aese v7.16b, v27.16b //AES block 8k+15 - round 13 aese v2.16b, v27.16b //AES block 8k+10 - round 13 aese v6.16b, v27.16b //AES block 8k+14 - round 13 L256_enc_tail: //TAIL ldp q24, q25, [x6, #160] //load h8l | h8h sub x5, x4, x0 //main_end_input_ptr is number of bytes left to process ldr q8, [x0], #16 //AES block 8k+8 - load plaintext ldp q20, q21, [x6, #96] //load h5l | h5h ext v16.16b, v19.16b, v19.16b, #8 //prepare final partial tag ldp q22, q23, [x6, #128] //load h6l | h6h mov v29.16b, v28.16b cmp x5, #112 .long 0xce007509 //eor3 v9.16b, v8.16b, v0.16b, v29.16b //AES block 8k+8 - result b.gt L256_enc_blocks_more_than_7 movi v19.8b, #0 mov v7.16b, v6.16b movi v17.8b, #0 mov v6.16b, v5.16b mov v5.16b, v4.16b mov v4.16b, v3.16b mov v3.16b, v2.16b sub v30.4s, v30.4s, v31.4s mov v2.16b, v1.16b movi v18.8b, #0 cmp x5, #96 b.gt L256_enc_blocks_more_than_6 mov v7.16b, v6.16b mov v6.16b, v5.16b cmp x5, #80 mov v5.16b, v4.16b mov v4.16b, v3.16b mov v3.16b, v1.16b sub v30.4s, v30.4s, v31.4s b.gt L256_enc_blocks_more_than_5 mov v7.16b, v6.16b sub v30.4s, v30.4s, v31.4s mov v6.16b, v5.16b mov v5.16b, v4.16b cmp x5, #64 mov v4.16b, v1.16b b.gt L256_enc_blocks_more_than_4 cmp x5, #48 mov v7.16b, v6.16b mov v6.16b, v5.16b mov v5.16b, v1.16b sub v30.4s, v30.4s, v31.4s b.gt L256_enc_blocks_more_than_3 cmp x5, #32 mov v7.16b, v6.16b ldr q24, [x6, #64] //load h4k | h3k mov v6.16b, v1.16b sub v30.4s, v30.4s, v31.4s b.gt L256_enc_blocks_more_than_2 mov v7.16b, v1.16b sub v30.4s, v30.4s, v31.4s cmp x5, #16 b.gt L256_enc_blocks_more_than_1 sub v30.4s, v30.4s, v31.4s ldr q21, [x6, #16] //load h2k | h1k b L256_enc_blocks_less_than_1 L256_enc_blocks_more_than_7: //blocks left > 7 st1 { v9.16b}, [x2], #16 //AES final-7 block - store result rev64 v8.16b, v9.16b //GHASH final-7 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ldr q9, [x0], #16 //AES final-6 block - load plaintext pmull2 v17.1q, v8.2d, v25.2d //GHASH final-7 block - high ins v27.d[0], v8.d[1] //GHASH final-7 block - mid ins v18.d[0], v24.d[1] //GHASH final-7 block - mid movi v16.8b, #0 //supress further partial tag feed in eor v27.8b, v27.8b, v8.8b //GHASH final-7 block - mid .long 0xce017529 //eor3 v9.16b, v9.16b, v1.16b, v29.16b //AES final-6 block - result pmull v18.1q, v27.1d, v18.1d //GHASH final-7 block - mid pmull v19.1q, v8.1d, v25.1d //GHASH final-7 block - low L256_enc_blocks_more_than_6: //blocks left > 6 st1 { v9.16b}, [x2], #16 //AES final-6 block - store result rev64 v8.16b, v9.16b //GHASH final-6 block eor v8.16b, v8.16b, v16.16b //feed in partial tag pmull v26.1q, v8.1d, v23.1d //GHASH final-6 block - low ins v27.d[0], v8.d[1] //GHASH final-6 block - mid pmull2 v28.1q, v8.2d, v23.2d //GHASH final-6 block - high ldr q9, [x0], #16 //AES final-5 block - load plaintext eor v19.16b, v19.16b, v26.16b //GHASH final-6 block - low eor v27.8b, v27.8b, v8.8b //GHASH final-6 block - mid pmull v27.1q, v27.1d, v24.1d //GHASH final-6 block - mid .long 0xce027529 //eor3 v9.16b, v9.16b, v2.16b, v29.16b //AES final-5 block - result movi v16.8b, #0 //supress further partial tag feed in eor v18.16b, v18.16b, v27.16b //GHASH final-6 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-6 block - high L256_enc_blocks_more_than_5: //blocks left > 5 st1 { v9.16b}, [x2], #16 //AES final-5 block - store result rev64 v8.16b, v9.16b //GHASH final-5 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-5 block - mid pmull2 v28.1q, v8.2d, v22.2d //GHASH final-5 block - high eor v17.16b, v17.16b, v28.16b //GHASH final-5 block - high eor v27.8b, v27.8b, v8.8b //GHASH final-5 block - mid ins v27.d[1], v27.d[0] //GHASH final-5 block - mid ldr q9, [x0], #16 //AES final-4 block - load plaintext pmull v26.1q, v8.1d, v22.1d //GHASH final-5 block - low pmull2 v27.1q, v27.2d, v21.2d //GHASH final-5 block - mid movi v16.8b, #0 //supress further partial tag feed in eor v19.16b, v19.16b, v26.16b //GHASH final-5 block - low eor v18.16b, v18.16b, v27.16b //GHASH final-5 block - mid .long 0xce037529 //eor3 v9.16b, v9.16b, v3.16b, v29.16b //AES final-4 block - result L256_enc_blocks_more_than_4: //blocks left > 4 st1 { v9.16b}, [x2], #16 //AES final-4 block - store result rev64 v8.16b, v9.16b //GHASH final-4 block ldr q9, [x0], #16 //AES final-3 block - load plaintext eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-4 block - mid pmull2 v28.1q, v8.2d, v20.2d //GHASH final-4 block - high .long 0xce047529 //eor3 v9.16b, v9.16b, v4.16b, v29.16b //AES final-3 block - result pmull v26.1q, v8.1d, v20.1d //GHASH final-4 block - low eor v27.8b, v27.8b, v8.8b //GHASH final-4 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-4 block - low pmull v27.1q, v27.1d, v21.1d //GHASH final-4 block - mid movi v16.8b, #0 //supress further partial tag feed in eor v18.16b, v18.16b, v27.16b //GHASH final-4 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-4 block - high L256_enc_blocks_more_than_3: //blocks left > 3 st1 { v9.16b}, [x2], #16 //AES final-3 block - store result ldr q25, [x6, #80] //load h4l | h4h rev64 v8.16b, v9.16b //GHASH final-3 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-3 block - mid pmull2 v28.1q, v8.2d, v25.2d //GHASH final-3 block - high eor v17.16b, v17.16b, v28.16b //GHASH final-3 block - high eor v27.8b, v27.8b, v8.8b //GHASH final-3 block - mid ldr q24, [x6, #64] //load h4k | h3k ins v27.d[1], v27.d[0] //GHASH final-3 block - mid ldr q9, [x0], #16 //AES final-2 block - load plaintext pmull2 v27.1q, v27.2d, v24.2d //GHASH final-3 block - mid pmull v26.1q, v8.1d, v25.1d //GHASH final-3 block - low .long 0xce057529 //eor3 v9.16b, v9.16b, v5.16b, v29.16b //AES final-2 block - result movi v16.8b, #0 //supress further partial tag feed in eor v18.16b, v18.16b, v27.16b //GHASH final-3 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-3 block - low L256_enc_blocks_more_than_2: //blocks left > 2 ldr q23, [x6, #48] //load h3l | h3h st1 { v9.16b}, [x2], #16 //AES final-2 block - store result rev64 v8.16b, v9.16b //GHASH final-2 block ldr q9, [x0], #16 //AES final-1 block - load plaintext eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-2 block - mid movi v16.8b, #0 //supress further partial tag feed in pmull2 v28.1q, v8.2d, v23.2d //GHASH final-2 block - high .long 0xce067529 //eor3 v9.16b, v9.16b, v6.16b, v29.16b //AES final-1 block - result eor v27.8b, v27.8b, v8.8b //GHASH final-2 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-2 block - high pmull v27.1q, v27.1d, v24.1d //GHASH final-2 block - mid pmull v26.1q, v8.1d, v23.1d //GHASH final-2 block - low eor v18.16b, v18.16b, v27.16b //GHASH final-2 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-2 block - low L256_enc_blocks_more_than_1: //blocks left > 1 st1 { v9.16b}, [x2], #16 //AES final-1 block - store result ldr q22, [x6, #32] //load h2l | h2h rev64 v8.16b, v9.16b //GHASH final-1 block ldr q9, [x0], #16 //AES final block - load plaintext eor v8.16b, v8.16b, v16.16b //feed in partial tag movi v16.8b, #0 //supress further partial tag feed in ins v27.d[0], v8.d[1] //GHASH final-1 block - mid pmull2 v28.1q, v8.2d, v22.2d //GHASH final-1 block - high .long 0xce077529 //eor3 v9.16b, v9.16b, v7.16b, v29.16b //AES final block - result eor v17.16b, v17.16b, v28.16b //GHASH final-1 block - high pmull v26.1q, v8.1d, v22.1d //GHASH final-1 block - low eor v27.8b, v27.8b, v8.8b //GHASH final-1 block - mid ldr q21, [x6, #16] //load h2k | h1k eor v19.16b, v19.16b, v26.16b //GHASH final-1 block - low ins v27.d[1], v27.d[0] //GHASH final-1 block - mid pmull2 v27.1q, v27.2d, v21.2d //GHASH final-1 block - mid eor v18.16b, v18.16b, v27.16b //GHASH final-1 block - mid L256_enc_blocks_less_than_1: //blocks left <= 1 and x1, x1, #127 //bit_length %= 128 sub x1, x1, #128 //bit_length -= 128 neg x1, x1 //bit_length = 128 - #bits in input (in range [1,128]) mvn x7, xzr //temp0_x = 0xffffffffffffffff and x1, x1, #127 //bit_length %= 128 lsr x7, x7, x1 //temp0_x is mask for top 64b of last block cmp x1, #64 mvn x8, xzr //temp1_x = 0xffffffffffffffff csel x14, x7, xzr, lt csel x13, x8, x7, lt mov v0.d[0], x13 //ctr0b is mask for last block ldr q20, [x6] //load h1l | h1h ld1 { v26.16b}, [x2] //load existing bytes where the possibly partial last block is to be stored mov v0.d[1], x14 and v9.16b, v9.16b, v0.16b //possibly partial last block has zeroes in highest bits rev64 v8.16b, v9.16b //GHASH final block rev32 v30.16b, v30.16b bif v9.16b, v26.16b, v0.16b //insert existing bytes in top end of result before storing str q30, [x16] //store the updated counter eor v8.16b, v8.16b, v16.16b //feed in partial tag st1 { v9.16b}, [x2] //store all 16B ins v16.d[0], v8.d[1] //GHASH final block - mid pmull2 v28.1q, v8.2d, v20.2d //GHASH final block - high pmull v26.1q, v8.1d, v20.1d //GHASH final block - low eor v17.16b, v17.16b, v28.16b //GHASH final block - high eor v19.16b, v19.16b, v26.16b //GHASH final block - low eor v16.8b, v16.8b, v8.8b //GHASH final block - mid pmull v16.1q, v16.1d, v21.1d //GHASH final block - mid eor v18.16b, v18.16b, v16.16b //GHASH final block - mid ldr d16, [x10] //MODULO - load modulo constant ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment .long 0xce115673 //eor3 v19.16b, v19.16b, v17.16b, v21.16b //MODULO - fold into low ext v19.16b, v19.16b, v19.16b, #8 rev64 v19.16b, v19.16b st1 { v19.16b }, [x3] mov x0, x9 //return sizes ldp d10, d11, [sp, #16] ldp d12, d13, [sp, #32] ldp d14, d15, [sp, #48] ldp d8, d9, [sp], #80 ret L256_enc_ret: mov w0, #0x0 ret .globl aesv8_gcm_8x_dec_256 .def aesv8_gcm_8x_dec_256 .type 32 .endef .align 4 aesv8_gcm_8x_dec_256: AARCH64_VALID_CALL_TARGET cbz x1, L256_dec_ret stp d8, d9, [sp, #-80]! lsr x9, x1, #3 mov x16, x4 mov x11, x5 stp d10, d11, [sp, #16] stp d12, d13, [sp, #32] stp d14, d15, [sp, #48] mov x5, #0xc200000000000000 stp x5, xzr, [sp, #64] add x10, sp, #64 ld1 { v0.16b}, [x16] //CTR block 0 mov x15, #0x100000000 //set up counter increment movi v31.16b, #0x0 mov v31.d[1], x15 mov x5, x9 sub x5, x5, #1 //byte_len - 1 rev32 v30.16b, v0.16b //set up reversed counter add v30.4s, v30.4s, v31.4s //CTR block 0 rev32 v1.16b, v30.16b //CTR block 1 add v30.4s, v30.4s, v31.4s //CTR block 1 rev32 v2.16b, v30.16b //CTR block 2 add v30.4s, v30.4s, v31.4s //CTR block 2 ldp q26, q27, [x11, #0] //load rk0, rk1 rev32 v3.16b, v30.16b //CTR block 3 add v30.4s, v30.4s, v31.4s //CTR block 3 rev32 v4.16b, v30.16b //CTR block 4 add v30.4s, v30.4s, v31.4s //CTR block 4 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 0 rev32 v5.16b, v30.16b //CTR block 5 add v30.4s, v30.4s, v31.4s //CTR block 5 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 0 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 0 rev32 v6.16b, v30.16b //CTR block 6 add v30.4s, v30.4s, v31.4s //CTR block 6 rev32 v7.16b, v30.16b //CTR block 7 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 0 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 0 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 0 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 0 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 0 ldp q28, q26, [x11, #32] //load rk2, rk3 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 1 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 1 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 1 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 1 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 1 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 1 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 1 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 2 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 2 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 2 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 2 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 2 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 2 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 2 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 2 ldp q27, q28, [x11, #64] //load rk4, rk5 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 3 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 3 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 3 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 3 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 3 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 3 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 3 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 3 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 4 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 4 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 4 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 4 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 4 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 4 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 4 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 5 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 5 ldp q26, q27, [x11, #96] //load rk6, rk7 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 5 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 5 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 5 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 5 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 5 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 5 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 6 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 6 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 6 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 6 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 6 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 6 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 6 ldp q28, q26, [x11, #128] //load rk8, rk9 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 7 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 7 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 7 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 7 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 7 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 7 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 7 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 7 and x5, x5, #0xffffffffffffff80 //number of bytes to be processed in main loop (at least 1 byte must be handled by tail) aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 8 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 8 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 8 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 8 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 8 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 8 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 8 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 8 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 9 ld1 { v19.16b}, [x3] ext v19.16b, v19.16b, v19.16b, #8 rev64 v19.16b, v19.16b ldp q27, q28, [x11, #160] //load rk10, rk11 add x4, x0, x1, lsr #3 //end_input_ptr add x5, x5, x0 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 9 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 9 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 9 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 9 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 9 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 9 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 9 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 4 - round 10 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 7 - round 10 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 5 - round 10 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 1 - round 10 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 2 - round 10 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 0 - round 10 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 6 - round 10 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 3 - round 10 ldp q26, q27, [x11, #192] //load rk12, rk13 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 0 - round 11 add v30.4s, v30.4s, v31.4s //CTR block 7 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 7 - round 11 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 3 - round 11 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 1 - round 11 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 5 - round 11 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 4 - round 11 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 2 - round 11 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 6 - round 11 ldr q28, [x11, #224] //load rk14 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 1 - round 12 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 4 - round 12 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 5 - round 12 cmp x0, x5 //check if we have <= 8 blocks aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 3 - round 12 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 2 - round 12 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 6 - round 12 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 0 - round 12 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 7 - round 12 aese v5.16b, v27.16b //AES block 5 - round 13 aese v1.16b, v27.16b //AES block 1 - round 13 aese v2.16b, v27.16b //AES block 2 - round 13 aese v0.16b, v27.16b //AES block 0 - round 13 aese v4.16b, v27.16b //AES block 4 - round 13 aese v6.16b, v27.16b //AES block 6 - round 13 aese v3.16b, v27.16b //AES block 3 - round 13 aese v7.16b, v27.16b //AES block 7 - round 13 b.ge L256_dec_tail //handle tail ldp q8, q9, [x0], #32 //AES block 0, 1 - load ciphertext ldp q10, q11, [x0], #32 //AES block 2, 3 - load ciphertext ldp q12, q13, [x0], #32 //AES block 4, 5 - load ciphertext ldp q14, q15, [x0], #32 //AES block 6, 7 - load ciphertext cmp x0, x5 //check if we have <= 8 blocks .long 0xce017121 //eor3 v1.16b, v9.16b, v1.16b, v28.16b //AES block 1 - result .long 0xce007100 //eor3 v0.16b, v8.16b, v0.16b, v28.16b //AES block 0 - result stp q0, q1, [x2], #32 //AES block 0, 1 - store result rev32 v0.16b, v30.16b //CTR block 8 add v30.4s, v30.4s, v31.4s //CTR block 8 .long 0xce037163 //eor3 v3.16b, v11.16b, v3.16b, v28.16b //AES block 3 - result .long 0xce0571a5 //eor3 v5.16b, v13.16b, v5.16b, v28.16b //AES block 5 - result .long 0xce047184 //eor3 v4.16b, v12.16b, v4.16b, v28.16b //AES block 4 - result rev32 v1.16b, v30.16b //CTR block 9 add v30.4s, v30.4s, v31.4s //CTR block 9 .long 0xce027142 //eor3 v2.16b, v10.16b, v2.16b, v28.16b //AES block 2 - result stp q2, q3, [x2], #32 //AES block 2, 3 - store result rev32 v2.16b, v30.16b //CTR block 10 add v30.4s, v30.4s, v31.4s //CTR block 10 .long 0xce0671c6 //eor3 v6.16b, v14.16b, v6.16b, v28.16b //AES block 6 - result rev32 v3.16b, v30.16b //CTR block 11 add v30.4s, v30.4s, v31.4s //CTR block 11 stp q4, q5, [x2], #32 //AES block 4, 5 - store result .long 0xce0771e7 //eor3 v7.16b, v15.16b, v7.16b, v28.16b //AES block 7 - result stp q6, q7, [x2], #32 //AES block 6, 7 - store result rev32 v4.16b, v30.16b //CTR block 12 add v30.4s, v30.4s, v31.4s //CTR block 12 b.ge L256_dec_prepretail //do prepretail L256_dec_main_loop: //main loop start rev32 v5.16b, v30.16b //CTR block 8k+13 ldp q26, q27, [x11, #0] //load rk0, rk1 add v30.4s, v30.4s, v31.4s //CTR block 8k+13 rev64 v9.16b, v9.16b //GHASH block 8k+1 ldr q23, [x6, #144] //load h7l | h7h ldr q25, [x6, #176] //load h8l | h8h rev32 v6.16b, v30.16b //CTR block 8k+14 add v30.4s, v30.4s, v31.4s //CTR block 8k+14 rev64 v8.16b, v8.16b //GHASH block 8k ext v19.16b, v19.16b, v19.16b, #8 //PRE 0 rev64 v12.16b, v12.16b //GHASH block 8k+4 rev64 v11.16b, v11.16b //GHASH block 8k+3 rev32 v7.16b, v30.16b //CTR block 8k+15 rev64 v15.16b, v15.16b //GHASH block 8k+7 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 0 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 0 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 0 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 0 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 0 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 0 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 0 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 0 ldp q28, q26, [x11, #32] //load rk2, rk3 eor v8.16b, v8.16b, v19.16b //PRE 1 ldr q20, [x6, #96] //load h5l | h5h ldr q22, [x6, #128] //load h6l | h6h aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 1 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 1 rev64 v10.16b, v10.16b //GHASH block 8k+2 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 1 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 1 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 1 trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 1 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 1 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 2 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 2 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 2 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 2 pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 2 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 2 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 2 ldp q27, q28, [x11, #64] //load rk4, rk5 pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 3 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 3 pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 3 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 3 pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 3 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 3 trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 3 eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 4 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 3 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 4 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 4 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 4 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 4 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 4 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 4 ldr q21, [x6, #112] //load h6k | h5k ldr q24, [x6, #160] //load h8k | h7k eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low ldp q26, q27, [x11, #96] //load rk6, rk7 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 5 eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 5 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 5 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 5 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 5 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 5 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 5 .long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid rev64 v13.16b, v13.16b //GHASH block 8k+5 pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 6 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 6 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 5 trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 6 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 6 eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 6 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 6 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 6 pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low ldr q23, [x6, #48] //load h3l | h3h ldr q25, [x6, #80] //load h4l | h4h rev64 v14.16b, v14.16b //GHASH block 8k+6 eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 7 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 7 ldp q28, q26, [x11, #128] //load rk8, rk9 ldr q20, [x6] //load h1l | h1h ldr q22, [x6, #32] //load h2l | h2h .long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 7 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 7 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 7 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 7 ldr q21, [x6, #16] //load h2k | h1k ldr q24, [x6, #64] //load h4k | h3k aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 7 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 7 pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 8 pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 8 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 8 pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 8 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 8 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 8 pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 8 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 8 ldp q27, q28, [x11, #160] //load rk10, rk11 pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid add v30.4s, v30.4s, v31.4s //CTR block 8k+15 .long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 9 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 9 eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 9 ldp q8, q9, [x0], #32 //AES block 8k+8, 8k+9 - load ciphertext eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 9 pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 9 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 9 pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 10 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 10 pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 9 .long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 9 .long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid .long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 10 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 10 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 10 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 10 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 10 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 10 .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low rev32 v20.16b, v30.16b //CTR block 8k+16 ldr d16, [x10] //MODULO - load modulo constant add v30.4s, v30.4s, v31.4s //CTR block 8k+16 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 11 ldp q26, q27, [x11, #192] //load rk12, rk13 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 11 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 11 .long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid rev32 v22.16b, v30.16b //CTR block 8k+17 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 11 ldp q10, q11, [x0], #32 //AES block 8k+10, 8k+11 - load ciphertext aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 11 ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 11 add v30.4s, v30.4s, v31.4s //CTR block 8k+17 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 11 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 12 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 12 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 12 rev32 v23.16b, v30.16b //CTR block 8k+18 add v30.4s, v30.4s, v31.4s //CTR block 8k+18 pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 12 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 11 ldr q28, [x11, #224] //load rk14 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 12 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 12 .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 12 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 12 ldp q12, q13, [x0], #32 //AES block 8k+12, 8k+13 - load ciphertext aese v1.16b, v27.16b //AES block 8k+9 - round 13 aese v2.16b, v27.16b //AES block 8k+10 - round 13 ldp q14, q15, [x0], #32 //AES block 8k+14, 8k+15 - load ciphertext aese v0.16b, v27.16b //AES block 8k+8 - round 13 aese v5.16b, v27.16b //AES block 8k+13 - round 13 rev32 v25.16b, v30.16b //CTR block 8k+19 .long 0xce027142 //eor3 v2.16b, v10.16b, v2.16b, v28.16b //AES block 8k+10 - result .long 0xce017121 //eor3 v1.16b, v9.16b, v1.16b, v28.16b //AES block 8k+9 - result ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment aese v7.16b, v27.16b //AES block 8k+15 - round 13 add v30.4s, v30.4s, v31.4s //CTR block 8k+19 pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low aese v4.16b, v27.16b //AES block 8k+12 - round 13 .long 0xce0571a5 //eor3 v5.16b, v13.16b, v5.16b, v28.16b //AES block 8k+13 - result .long 0xce007100 //eor3 v0.16b, v8.16b, v0.16b, v28.16b //AES block 8k+8 - result aese v3.16b, v27.16b //AES block 8k+11 - round 13 stp q0, q1, [x2], #32 //AES block 8k+8, 8k+9 - store result mov v0.16b, v20.16b //CTR block 8k+16 .long 0xce047184 //eor3 v4.16b, v12.16b, v4.16b, v28.16b //AES block 8k+12 - result .long 0xce154673 //eor3 v19.16b, v19.16b, v21.16b, v17.16b //MODULO - fold into low .long 0xce037163 //eor3 v3.16b, v11.16b, v3.16b, v28.16b //AES block 8k+11 - result stp q2, q3, [x2], #32 //AES block 8k+10, 8k+11 - store result mov v3.16b, v25.16b //CTR block 8k+19 mov v2.16b, v23.16b //CTR block 8k+18 aese v6.16b, v27.16b //AES block 8k+14 - round 13 mov v1.16b, v22.16b //CTR block 8k+17 stp q4, q5, [x2], #32 //AES block 8k+12, 8k+13 - store result .long 0xce0771e7 //eor3 v7.16b, v15.16b, v7.16b, v28.16b //AES block 8k+15 - result .long 0xce0671c6 //eor3 v6.16b, v14.16b, v6.16b, v28.16b //AES block 8k+14 - result rev32 v4.16b, v30.16b //CTR block 8k+20 add v30.4s, v30.4s, v31.4s //CTR block 8k+20 cmp x0, x5 //LOOP CONTROL stp q6, q7, [x2], #32 //AES block 8k+14, 8k+15 - store result b.lt L256_dec_main_loop L256_dec_prepretail: //PREPRETAIL ldp q26, q27, [x11, #0] //load rk0, rk1 rev32 v5.16b, v30.16b //CTR block 8k+13 add v30.4s, v30.4s, v31.4s //CTR block 8k+13 rev64 v12.16b, v12.16b //GHASH block 8k+4 ldr q21, [x6, #112] //load h6k | h5k ldr q24, [x6, #160] //load h8k | h7k rev32 v6.16b, v30.16b //CTR block 8k+14 rev64 v8.16b, v8.16b //GHASH block 8k add v30.4s, v30.4s, v31.4s //CTR block 8k+14 ext v19.16b, v19.16b, v19.16b, #8 //PRE 0 ldr q23, [x6, #144] //load h7l | h7h ldr q25, [x6, #176] //load h8l | h8h rev64 v9.16b, v9.16b //GHASH block 8k+1 rev32 v7.16b, v30.16b //CTR block 8k+15 rev64 v10.16b, v10.16b //GHASH block 8k+2 ldr q20, [x6, #96] //load h5l | h5h ldr q22, [x6, #128] //load h6l | h6h aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 0 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 0 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 0 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 0 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 0 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 0 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 1 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 0 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 0 ldp q28, q26, [x11, #32] //load rk2, rk3 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 1 eor v8.16b, v8.16b, v19.16b //PRE 1 aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 1 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 1 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 1 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 1 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 1 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 1 pmull2 v16.1q, v9.2d, v23.2d //GHASH block 8k+1 - high trn1 v18.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid pmull v19.1q, v8.1d, v25.1d //GHASH block 8k - low rev64 v11.16b, v11.16b //GHASH block 8k+3 pmull v23.1q, v9.1d, v23.1d //GHASH block 8k+1 - low aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 2 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 2 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 2 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 2 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 2 pmull2 v17.1q, v8.2d, v25.2d //GHASH block 8k - high aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 2 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 3 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 3 rev64 v14.16b, v14.16b //GHASH block 8k+6 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 3 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 2 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 3 pmull2 v29.1q, v10.2d, v22.2d //GHASH block 8k+2 - high trn2 v8.2d, v9.2d, v8.2d //GHASH block 8k, 8k+1 - mid aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 2 ldp q27, q28, [x11, #64] //load rk4, rk5 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 3 pmull2 v9.1q, v11.2d, v20.2d //GHASH block 8k+3 - high aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 3 eor v17.16b, v17.16b, v16.16b //GHASH block 8k+1 - high eor v8.16b, v8.16b, v18.16b //GHASH block 8k, 8k+1 - mid aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 3 pmull v22.1q, v10.1d, v22.1d //GHASH block 8k+2 - low aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 3 .long 0xce1d2631 //eor3 v17.16b, v17.16b, v29.16b, v9.16b //GHASH block 8k+2, 8k+3 - high trn1 v29.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid trn2 v10.2d, v11.2d, v10.2d //GHASH block 8k+2, 8k+3 - mid pmull2 v18.1q, v8.2d, v24.2d //GHASH block 8k - mid pmull v20.1q, v11.1d, v20.1d //GHASH block 8k+3 - low eor v19.16b, v19.16b, v23.16b //GHASH block 8k+1 - low pmull v24.1q, v8.1d, v24.1d //GHASH block 8k+1 - mid aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 4 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 4 .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+2, 8k+3 - low ldr q20, [x6] //load h1l | h1h ldr q22, [x6, #32] //load h2l | h2h aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 4 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 4 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 4 eor v18.16b, v18.16b, v24.16b //GHASH block 8k+1 - mid eor v10.16b, v10.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 5 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 4 aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 5 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 4 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 4 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 5 pmull2 v29.1q, v10.2d, v21.2d //GHASH block 8k+2 - mid aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 5 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 5 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 5 pmull v21.1q, v10.1d, v21.1d //GHASH block 8k+3 - mid aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 5 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 5 ldp q26, q27, [x11, #96] //load rk6, rk7 ldr q23, [x6, #48] //load h3l | h3h ldr q25, [x6, #80] //load h4l | h4h rev64 v15.16b, v15.16b //GHASH block 8k+7 rev64 v13.16b, v13.16b //GHASH block 8k+5 .long 0xce157652 //eor3 v18.16b, v18.16b, v21.16b, v29.16b //GHASH block 8k+2, 8k+3 - mid trn1 v16.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 6 ldr q21, [x6, #16] //load h2k | h1k ldr q24, [x6, #64] //load h4k | h3k aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 6 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 6 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 6 pmull2 v8.1q, v12.2d, v25.2d //GHASH block 8k+4 - high pmull2 v10.1q, v13.2d, v23.2d //GHASH block 8k+5 - high pmull v25.1q, v12.1d, v25.1d //GHASH block 8k+4 - low trn2 v12.2d, v13.2d, v12.2d //GHASH block 8k+4, 8k+5 - mid pmull v23.1q, v13.1d, v23.1d //GHASH block 8k+5 - low trn1 v13.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 7 pmull2 v11.1q, v14.2d, v22.2d //GHASH block 8k+6 - high aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 6 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 6 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 6 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 6 ldp q28, q26, [x11, #128] //load rk8, rk9 pmull v22.1q, v14.1d, v22.1d //GHASH block 8k+6 - low aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 7 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 7 aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 7 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 7 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 7 .long 0xce082a31 //eor3 v17.16b, v17.16b, v8.16b, v10.16b //GHASH block 8k+4, 8k+5 - high aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 7 trn2 v14.2d, v15.2d, v14.2d //GHASH block 8k+6, 8k+7 - mid aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 7 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 8 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 8 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 8 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 8 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 8 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 8 aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 8 aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 9 eor v12.16b, v12.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 9 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 9 eor v14.16b, v14.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 9 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 9 pmull2 v16.1q, v12.2d, v24.2d //GHASH block 8k+4 - mid aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 8 pmull v24.1q, v12.1d, v24.1d //GHASH block 8k+5 - mid pmull2 v12.1q, v15.2d, v20.2d //GHASH block 8k+7 - high pmull2 v13.1q, v14.2d, v21.2d //GHASH block 8k+6 - mid pmull v21.1q, v14.1d, v21.1d //GHASH block 8k+7 - mid pmull v20.1q, v15.1d, v20.1d //GHASH block 8k+7 - low ldp q27, q28, [x11, #160] //load rk10, rk11 .long 0xce195e73 //eor3 v19.16b, v19.16b, v25.16b, v23.16b //GHASH block 8k+4, 8k+5 - low .long 0xce184252 //eor3 v18.16b, v18.16b, v24.16b, v16.16b //GHASH block 8k+4, 8k+5 - mid aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 9 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 9 aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 9 .long 0xce0b3231 //eor3 v17.16b, v17.16b, v11.16b, v12.16b //GHASH block 8k+6, 8k+7 - high .long 0xce165273 //eor3 v19.16b, v19.16b, v22.16b, v20.16b //GHASH block 8k+6, 8k+7 - low ldr d16, [x10] //MODULO - load modulo constant .long 0xce153652 //eor3 v18.16b, v18.16b, v21.16b, v13.16b //GHASH block 8k+6, 8k+7 - mid aese v4.16b, v27.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 10 aese v6.16b, v27.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 10 aese v5.16b, v27.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 10 aese v0.16b, v27.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 10 aese v2.16b, v27.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 10 aese v3.16b, v27.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 10 .long 0xce114e52 //eor3 v18.16b, v18.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up aese v7.16b, v27.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 10 aese v1.16b, v27.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 10 ldp q26, q27, [x11, #192] //load rk12, rk13 ext v21.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment aese v2.16b, v28.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 11 aese v1.16b, v28.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 11 aese v0.16b, v28.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 11 pmull v29.1q, v17.1d, v16.1d //MODULO - top 64b align with mid aese v3.16b, v28.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 11 aese v7.16b, v28.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 11 aese v6.16b, v28.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 11 aese v4.16b, v28.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 11 aese v5.16b, v28.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 11 aese v3.16b, v26.16b aesmc v3.16b, v3.16b //AES block 8k+11 - round 12 .long 0xce1d5652 //eor3 v18.16b, v18.16b, v29.16b, v21.16b //MODULO - fold into mid aese v3.16b, v27.16b //AES block 8k+11 - round 13 aese v2.16b, v26.16b aesmc v2.16b, v2.16b //AES block 8k+10 - round 12 aese v6.16b, v26.16b aesmc v6.16b, v6.16b //AES block 8k+14 - round 12 pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low aese v4.16b, v26.16b aesmc v4.16b, v4.16b //AES block 8k+12 - round 12 aese v7.16b, v26.16b aesmc v7.16b, v7.16b //AES block 8k+15 - round 12 aese v0.16b, v26.16b aesmc v0.16b, v0.16b //AES block 8k+8 - round 12 ldr q28, [x11, #224] //load rk14 aese v1.16b, v26.16b aesmc v1.16b, v1.16b //AES block 8k+9 - round 12 aese v4.16b, v27.16b //AES block 8k+12 - round 13 ext v21.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment aese v5.16b, v26.16b aesmc v5.16b, v5.16b //AES block 8k+13 - round 12 aese v6.16b, v27.16b //AES block 8k+14 - round 13 aese v2.16b, v27.16b //AES block 8k+10 - round 13 aese v1.16b, v27.16b //AES block 8k+9 - round 13 aese v5.16b, v27.16b //AES block 8k+13 - round 13 .long 0xce154673 //eor3 v19.16b, v19.16b, v21.16b, v17.16b //MODULO - fold into low add v30.4s, v30.4s, v31.4s //CTR block 8k+15 aese v7.16b, v27.16b //AES block 8k+15 - round 13 aese v0.16b, v27.16b //AES block 8k+8 - round 13 L256_dec_tail: //TAIL ext v16.16b, v19.16b, v19.16b, #8 //prepare final partial tag sub x5, x4, x0 //main_end_input_ptr is number of bytes left to process cmp x5, #112 ldr q9, [x0], #16 //AES block 8k+8 - load ciphertext ldp q24, q25, [x6, #160] //load h8k | h7k mov v29.16b, v28.16b ldp q20, q21, [x6, #96] //load h5l | h5h .long 0xce00752c //eor3 v12.16b, v9.16b, v0.16b, v29.16b //AES block 8k+8 - result ldp q22, q23, [x6, #128] //load h6l | h6h b.gt L256_dec_blocks_more_than_7 mov v7.16b, v6.16b sub v30.4s, v30.4s, v31.4s mov v6.16b, v5.16b mov v5.16b, v4.16b mov v4.16b, v3.16b movi v19.8b, #0 movi v17.8b, #0 movi v18.8b, #0 mov v3.16b, v2.16b cmp x5, #96 mov v2.16b, v1.16b b.gt L256_dec_blocks_more_than_6 mov v7.16b, v6.16b mov v6.16b, v5.16b mov v5.16b, v4.16b cmp x5, #80 sub v30.4s, v30.4s, v31.4s mov v4.16b, v3.16b mov v3.16b, v1.16b b.gt L256_dec_blocks_more_than_5 cmp x5, #64 mov v7.16b, v6.16b sub v30.4s, v30.4s, v31.4s mov v6.16b, v5.16b mov v5.16b, v4.16b mov v4.16b, v1.16b b.gt L256_dec_blocks_more_than_4 sub v30.4s, v30.4s, v31.4s mov v7.16b, v6.16b cmp x5, #48 mov v6.16b, v5.16b mov v5.16b, v1.16b b.gt L256_dec_blocks_more_than_3 ldr q24, [x6, #64] //load h4k | h3k sub v30.4s, v30.4s, v31.4s mov v7.16b, v6.16b cmp x5, #32 mov v6.16b, v1.16b b.gt L256_dec_blocks_more_than_2 sub v30.4s, v30.4s, v31.4s mov v7.16b, v1.16b cmp x5, #16 b.gt L256_dec_blocks_more_than_1 sub v30.4s, v30.4s, v31.4s ldr q21, [x6, #16] //load h2k | h1k b L256_dec_blocks_less_than_1 L256_dec_blocks_more_than_7: //blocks left > 7 rev64 v8.16b, v9.16b //GHASH final-7 block ldr q9, [x0], #16 //AES final-6 block - load ciphertext st1 { v12.16b}, [x2], #16 //AES final-7 block - store result ins v18.d[0], v24.d[1] //GHASH final-7 block - mid eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-7 block - mid .long 0xce01752c //eor3 v12.16b, v9.16b, v1.16b, v29.16b //AES final-6 block - result pmull2 v17.1q, v8.2d, v25.2d //GHASH final-7 block - high eor v27.8b, v27.8b, v8.8b //GHASH final-7 block - mid movi v16.8b, #0 //supress further partial tag feed in pmull v19.1q, v8.1d, v25.1d //GHASH final-7 block - low pmull v18.1q, v27.1d, v18.1d //GHASH final-7 block - mid L256_dec_blocks_more_than_6: //blocks left > 6 rev64 v8.16b, v9.16b //GHASH final-6 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ldr q9, [x0], #16 //AES final-5 block - load ciphertext movi v16.8b, #0 //supress further partial tag feed in ins v27.d[0], v8.d[1] //GHASH final-6 block - mid st1 { v12.16b}, [x2], #16 //AES final-6 block - store result pmull2 v28.1q, v8.2d, v23.2d //GHASH final-6 block - high pmull v26.1q, v8.1d, v23.1d //GHASH final-6 block - low .long 0xce02752c //eor3 v12.16b, v9.16b, v2.16b, v29.16b //AES final-5 block - result eor v19.16b, v19.16b, v26.16b //GHASH final-6 block - low eor v27.8b, v27.8b, v8.8b //GHASH final-6 block - mid pmull v27.1q, v27.1d, v24.1d //GHASH final-6 block - mid eor v18.16b, v18.16b, v27.16b //GHASH final-6 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-6 block - high L256_dec_blocks_more_than_5: //blocks left > 5 rev64 v8.16b, v9.16b //GHASH final-5 block eor v8.16b, v8.16b, v16.16b //feed in partial tag pmull2 v28.1q, v8.2d, v22.2d //GHASH final-5 block - high ins v27.d[0], v8.d[1] //GHASH final-5 block - mid ldr q9, [x0], #16 //AES final-4 block - load ciphertext eor v27.8b, v27.8b, v8.8b //GHASH final-5 block - mid st1 { v12.16b}, [x2], #16 //AES final-5 block - store result pmull v26.1q, v8.1d, v22.1d //GHASH final-5 block - low ins v27.d[1], v27.d[0] //GHASH final-5 block - mid pmull2 v27.1q, v27.2d, v21.2d //GHASH final-5 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-5 block - high .long 0xce03752c //eor3 v12.16b, v9.16b, v3.16b, v29.16b //AES final-4 block - result eor v19.16b, v19.16b, v26.16b //GHASH final-5 block - low eor v18.16b, v18.16b, v27.16b //GHASH final-5 block - mid movi v16.8b, #0 //supress further partial tag feed in L256_dec_blocks_more_than_4: //blocks left > 4 rev64 v8.16b, v9.16b //GHASH final-4 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-4 block - mid ldr q9, [x0], #16 //AES final-3 block - load ciphertext movi v16.8b, #0 //supress further partial tag feed in pmull v26.1q, v8.1d, v20.1d //GHASH final-4 block - low pmull2 v28.1q, v8.2d, v20.2d //GHASH final-4 block - high eor v27.8b, v27.8b, v8.8b //GHASH final-4 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-4 block - high pmull v27.1q, v27.1d, v21.1d //GHASH final-4 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-4 block - low st1 { v12.16b}, [x2], #16 //AES final-4 block - store result eor v18.16b, v18.16b, v27.16b //GHASH final-4 block - mid .long 0xce04752c //eor3 v12.16b, v9.16b, v4.16b, v29.16b //AES final-3 block - result L256_dec_blocks_more_than_3: //blocks left > 3 ldr q25, [x6, #80] //load h4l | h4h rev64 v8.16b, v9.16b //GHASH final-3 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ldr q9, [x0], #16 //AES final-2 block - load ciphertext ldr q24, [x6, #64] //load h4k | h3k ins v27.d[0], v8.d[1] //GHASH final-3 block - mid st1 { v12.16b}, [x2], #16 //AES final-3 block - store result .long 0xce05752c //eor3 v12.16b, v9.16b, v5.16b, v29.16b //AES final-2 block - result eor v27.8b, v27.8b, v8.8b //GHASH final-3 block - mid ins v27.d[1], v27.d[0] //GHASH final-3 block - mid pmull v26.1q, v8.1d, v25.1d //GHASH final-3 block - low pmull2 v28.1q, v8.2d, v25.2d //GHASH final-3 block - high movi v16.8b, #0 //supress further partial tag feed in pmull2 v27.1q, v27.2d, v24.2d //GHASH final-3 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-3 block - low eor v17.16b, v17.16b, v28.16b //GHASH final-3 block - high eor v18.16b, v18.16b, v27.16b //GHASH final-3 block - mid L256_dec_blocks_more_than_2: //blocks left > 2 rev64 v8.16b, v9.16b //GHASH final-2 block ldr q23, [x6, #48] //load h3l | h3h ldr q9, [x0], #16 //AES final-1 block - load ciphertext eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-2 block - mid pmull v26.1q, v8.1d, v23.1d //GHASH final-2 block - low st1 { v12.16b}, [x2], #16 //AES final-2 block - store result .long 0xce06752c //eor3 v12.16b, v9.16b, v6.16b, v29.16b //AES final-1 block - result eor v27.8b, v27.8b, v8.8b //GHASH final-2 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-2 block - low movi v16.8b, #0 //supress further partial tag feed in pmull v27.1q, v27.1d, v24.1d //GHASH final-2 block - mid pmull2 v28.1q, v8.2d, v23.2d //GHASH final-2 block - high eor v18.16b, v18.16b, v27.16b //GHASH final-2 block - mid eor v17.16b, v17.16b, v28.16b //GHASH final-2 block - high L256_dec_blocks_more_than_1: //blocks left > 1 rev64 v8.16b, v9.16b //GHASH final-1 block eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v27.d[0], v8.d[1] //GHASH final-1 block - mid ldr q22, [x6, #32] //load h2l | h2h eor v27.8b, v27.8b, v8.8b //GHASH final-1 block - mid ldr q9, [x0], #16 //AES final block - load ciphertext st1 { v12.16b}, [x2], #16 //AES final-1 block - store result ldr q21, [x6, #16] //load h2k | h1k pmull v26.1q, v8.1d, v22.1d //GHASH final-1 block - low ins v27.d[1], v27.d[0] //GHASH final-1 block - mid eor v19.16b, v19.16b, v26.16b //GHASH final-1 block - low .long 0xce07752c //eor3 v12.16b, v9.16b, v7.16b, v29.16b //AES final block - result pmull2 v28.1q, v8.2d, v22.2d //GHASH final-1 block - high pmull2 v27.1q, v27.2d, v21.2d //GHASH final-1 block - mid movi v16.8b, #0 //supress further partial tag feed in eor v17.16b, v17.16b, v28.16b //GHASH final-1 block - high eor v18.16b, v18.16b, v27.16b //GHASH final-1 block - mid L256_dec_blocks_less_than_1: //blocks left <= 1 ld1 { v26.16b}, [x2] //load existing bytes where the possibly partial last block is to be stored mvn x7, xzr //temp0_x = 0xffffffffffffffff and x1, x1, #127 //bit_length %= 128 sub x1, x1, #128 //bit_length -= 128 rev32 v30.16b, v30.16b str q30, [x16] //store the updated counter neg x1, x1 //bit_length = 128 - #bits in input (in range [1,128]) and x1, x1, #127 //bit_length %= 128 lsr x7, x7, x1 //temp0_x is mask for top 64b of last block cmp x1, #64 mvn x8, xzr //temp1_x = 0xffffffffffffffff csel x14, x7, xzr, lt csel x13, x8, x7, lt mov v0.d[0], x13 //ctr0b is mask for last block mov v0.d[1], x14 and v9.16b, v9.16b, v0.16b //possibly partial last block has zeroes in highest bits ldr q20, [x6] //load h1l | h1h bif v12.16b, v26.16b, v0.16b //insert existing bytes in top end of result before storing rev64 v8.16b, v9.16b //GHASH final block eor v8.16b, v8.16b, v16.16b //feed in partial tag ins v16.d[0], v8.d[1] //GHASH final block - mid pmull2 v28.1q, v8.2d, v20.2d //GHASH final block - high eor v16.8b, v16.8b, v8.8b //GHASH final block - mid pmull v26.1q, v8.1d, v20.1d //GHASH final block - low eor v17.16b, v17.16b, v28.16b //GHASH final block - high pmull v16.1q, v16.1d, v21.1d //GHASH final block - mid eor v18.16b, v18.16b, v16.16b //GHASH final block - mid ldr d16, [x10] //MODULO - load modulo constant eor v19.16b, v19.16b, v26.16b //GHASH final block - low pmull v21.1q, v17.1d, v16.1d //MODULO - top 64b align with mid eor v14.16b, v17.16b, v19.16b //MODULO - karatsuba tidy up ext v17.16b, v17.16b, v17.16b, #8 //MODULO - other top alignment st1 { v12.16b}, [x2] //store all 16B eor v18.16b, v18.16b, v14.16b //MODULO - karatsuba tidy up eor v21.16b, v17.16b, v21.16b //MODULO - fold into mid eor v18.16b, v18.16b, v21.16b //MODULO - fold into mid pmull v17.1q, v18.1d, v16.1d //MODULO - mid 64b align with low ext v18.16b, v18.16b, v18.16b, #8 //MODULO - other mid alignment eor v19.16b, v19.16b, v17.16b //MODULO - fold into low eor v19.16b, v19.16b, v18.16b //MODULO - fold into low ext v19.16b, v19.16b, v19.16b, #8 rev64 v19.16b, v19.16b st1 { v19.16b }, [x3] mov x0, x9 ldp d10, d11, [sp, #16] ldp d12, d13, [sp, #32] ldp d14, d15, [sp, #48] ldp d8, d9, [sp], #80 ret L256_dec_ret: mov w0, #0x0 ret .byte 65,69,83,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,65,82,77,118,56,44,32,83,80,68,88,32,66,83,68,45,51,45,67,108,97,117,115,101,32,98,121,32,60,120,105,97,111,107,97,110,103,46,113,105,97,110,64,97,114,109,46,99,111,109,62,0 .align 2 .align 2 #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
7,433
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/p256_beeu-armv8-asm.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include "openssl/arm_arch.h" .text .globl beeu_mod_inverse_vartime .align 4 beeu_mod_inverse_vartime: // Reserve enough space for 14 8-byte registers on the stack // in the first stp call for x29, x30. // Then store the remaining callee-saved registers. // // | x29 | x30 | x19 | x20 | ... | x27 | x28 | x0 | x2 | // ^ ^ // sp <------------------- 112 bytes ----------------> old sp // x29 (FP) // AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-112]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] stp x0,x2,[sp,#96] // B = b3..b0 := a ldp x25,x26,[x1] ldp x27,x28,[x1,#16] // n3..n0 := n // Note: the value of input params are changed in the following. ldp x0,x1,[x2] ldp x2,x30,[x2,#16] // A = a3..a0 := n mov x21, x0 mov x22, x1 mov x23, x2 mov x24, x30 // X = x4..x0 := 1 mov x3, #1 eor x4, x4, x4 eor x5, x5, x5 eor x6, x6, x6 eor x7, x7, x7 // Y = y4..y0 := 0 eor x8, x8, x8 eor x9, x9, x9 eor x10, x10, x10 eor x11, x11, x11 eor x12, x12, x12 Lbeeu_loop: // if B == 0, jump to .Lbeeu_loop_end orr x14, x25, x26 orr x14, x14, x27 // reverse the bit order of x25. This is needed for clz after this macro rbit x15, x25 orr x14, x14, x28 cbz x14,Lbeeu_loop_end // 0 < B < |n|, // 0 < A <= |n|, // (1) X*a == B (mod |n|), // (2) (-1)*Y*a == A (mod |n|) // Now divide B by the maximum possible power of two in the // integers, and divide X by the same value mod |n|. // When we're done, (1) still holds. // shift := number of trailing 0s in x25 // ( = number of leading 0s in x15; see the "rbit" instruction in TEST_B_ZERO) clz x13, x15 // If there is no shift, goto shift_A_Y cbz x13, Lbeeu_shift_A_Y // Shift B right by "x13" bits neg x14, x13 lsr x25, x25, x13 lsl x15, x26, x14 lsr x26, x26, x13 lsl x19, x27, x14 orr x25, x25, x15 lsr x27, x27, x13 lsl x20, x28, x14 orr x26, x26, x19 lsr x28, x28, x13 orr x27, x27, x20 // Shift X right by "x13" bits, adding n whenever X becomes odd. // x13--; // x14 := 0; needed in the addition to the most significant word in SHIFT1 eor x14, x14, x14 Lbeeu_shift_loop_X: tbz x3, #0, Lshift1_0 adds x3, x3, x0 adcs x4, x4, x1 adcs x5, x5, x2 adcs x6, x6, x30 adc x7, x7, x14 Lshift1_0: // var0 := [var1|var0]<64..1>; // i.e. concatenate var1 and var0, // extract bits <64..1> from the resulting 128-bit value // and put them in var0 extr x3, x4, x3, #1 extr x4, x5, x4, #1 extr x5, x6, x5, #1 extr x6, x7, x6, #1 lsr x7, x7, #1 subs x13, x13, #1 bne Lbeeu_shift_loop_X // Note: the steps above perform the same sequence as in p256_beeu-x86_64-asm.pl // with the following differences: // - "x13" is set directly to the number of trailing 0s in B // (using rbit and clz instructions) // - The loop is only used to call SHIFT1(X) // and x13 is decreased while executing the X loop. // - SHIFT256(B, x13) is performed before right-shifting X; they are independent Lbeeu_shift_A_Y: // Same for A and Y. // Afterwards, (2) still holds. // Reverse the bit order of x21 // x13 := number of trailing 0s in x21 (= number of leading 0s in x15) rbit x15, x21 clz x13, x15 // If there is no shift, goto |B-A|, X+Y update cbz x13, Lbeeu_update_B_X_or_A_Y // Shift A right by "x13" bits neg x14, x13 lsr x21, x21, x13 lsl x15, x22, x14 lsr x22, x22, x13 lsl x19, x23, x14 orr x21, x21, x15 lsr x23, x23, x13 lsl x20, x24, x14 orr x22, x22, x19 lsr x24, x24, x13 orr x23, x23, x20 // Shift Y right by "x13" bits, adding n whenever Y becomes odd. // x13--; // x14 := 0; needed in the addition to the most significant word in SHIFT1 eor x14, x14, x14 Lbeeu_shift_loop_Y: tbz x8, #0, Lshift1_1 adds x8, x8, x0 adcs x9, x9, x1 adcs x10, x10, x2 adcs x11, x11, x30 adc x12, x12, x14 Lshift1_1: // var0 := [var1|var0]<64..1>; // i.e. concatenate var1 and var0, // extract bits <64..1> from the resulting 128-bit value // and put them in var0 extr x8, x9, x8, #1 extr x9, x10, x9, #1 extr x10, x11, x10, #1 extr x11, x12, x11, #1 lsr x12, x12, #1 subs x13, x13, #1 bne Lbeeu_shift_loop_Y Lbeeu_update_B_X_or_A_Y: // Try T := B - A; if cs, continue with B > A (cs: carry set = no borrow) // Note: this is a case of unsigned arithmetic, where T fits in 4 64-bit words // without taking a sign bit if generated. The lack of a carry would // indicate a negative result. See, for example, // https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/condition-codes-1-condition-flags-and-codes subs x14, x25, x21 sbcs x15, x26, x22 sbcs x19, x27, x23 sbcs x20, x28, x24 bcs Lbeeu_B_greater_than_A // Else A > B => // A := A - B; Y := Y + X; goto beginning of the loop subs x21, x21, x25 sbcs x22, x22, x26 sbcs x23, x23, x27 sbcs x24, x24, x28 adds x8, x8, x3 adcs x9, x9, x4 adcs x10, x10, x5 adcs x11, x11, x6 adc x12, x12, x7 b Lbeeu_loop Lbeeu_B_greater_than_A: // Continue with B > A => // B := B - A; X := X + Y; goto beginning of the loop mov x25, x14 mov x26, x15 mov x27, x19 mov x28, x20 adds x3, x3, x8 adcs x4, x4, x9 adcs x5, x5, x10 adcs x6, x6, x11 adc x7, x7, x12 b Lbeeu_loop Lbeeu_loop_end: // The Euclid's algorithm loop ends when A == gcd(a,n); // this would be 1, when a and n are co-prime (i.e. do not have a common factor). // Since (-1)*Y*a == A (mod |n|), Y>0 // then out = -Y mod n // Verify that A = 1 ==> (-1)*Y*a = A = 1 (mod |n|) // Is A-1 == 0? // If not, fail. sub x14, x21, #1 orr x14, x14, x22 orr x14, x14, x23 orr x14, x14, x24 cbnz x14, Lbeeu_err // If Y>n ==> Y:=Y-n Lbeeu_reduction_loop: // x_i := y_i - n_i (X is no longer needed, use it as temp) // (x14 = 0 from above) subs x3, x8, x0 sbcs x4, x9, x1 sbcs x5, x10, x2 sbcs x6, x11, x30 sbcs x7, x12, x14 // If result is non-negative (i.e., cs = carry set = no borrow), // y_i := x_i; goto reduce again // else // y_i := y_i; continue csel x8, x3, x8, cs csel x9, x4, x9, cs csel x10, x5, x10, cs csel x11, x6, x11, cs csel x12, x7, x12, cs bcs Lbeeu_reduction_loop // Now Y < n (Y cannot be equal to n, since the inverse cannot be 0) // out = -Y = n-Y subs x8, x0, x8 sbcs x9, x1, x9 sbcs x10, x2, x10 sbcs x11, x30, x11 // Save Y in output (out (x0) was saved on the stack) ldr x3, [sp,#96] stp x8, x9, [x3] stp x10, x11, [x3,#16] // return 1 (success) mov x0, #1 b Lbeeu_finish Lbeeu_err: // return 0 (error) eor x0, x0, x0 Lbeeu_finish: // Restore callee-saved registers, except x0, x2 add sp,x29,#0 ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldp x25,x26,[sp,#64] ldp x27,x28,[sp,#80] ldp x29,x30,[sp],#112 AARCH64_VALIDATE_LINK_REGISTER ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
37,795
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/p256-armv8-asm.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include "openssl/arm_arch.h" .section .rodata .align 5 Lpoly: .quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001 LRR: // 2^512 mod P precomputed for NIST P256 polynomial .quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd Lone_mont: .quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe Lone: .quad 1,0,0,0 Lord: .quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000 LordK: .quad 0xccd1c8aaee00bc4f .byte 69,67,80,95,78,73,83,84,90,50,53,54,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .text // void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4], // const BN_ULONG x2[4]); .globl ecp_nistz256_mul_mont .def ecp_nistz256_mul_mont .type 32 .endef .align 4 ecp_nistz256_mul_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-32]! add x29,sp,#0 stp x19,x20,[sp,#16] ldr x3,[x2] // bp[0] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_mul_mont ldp x19,x20,[sp,#16] ldp x29,x30,[sp],#32 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_sqr_mont .def ecp_nistz256_sqr_mont .type 32 .endef .align 4 ecp_nistz256_sqr_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-32]! add x29,sp,#0 stp x19,x20,[sp,#16] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sqr_mont ldp x19,x20,[sp,#16] ldp x29,x30,[sp],#32 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_div_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_div_by_2 .def ecp_nistz256_div_by_2 .type 32 .endef .align 4 ecp_nistz256_div_by_2: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_div_by_2 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_mul_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_mul_by_2 .def ecp_nistz256_mul_by_2 .type 32 .endef .align 4 ecp_nistz256_mul_by_2: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 bl __ecp_nistz256_add_to // ret = a+a // 2*a ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_mul_by_3(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_mul_by_3 .def ecp_nistz256_mul_by_3 .type 32 .endef .align 4 ecp_nistz256_mul_by_3: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 mov x4,x14 mov x5,x15 mov x6,x16 mov x7,x17 bl __ecp_nistz256_add_to // ret = a+a // 2*a mov x8,x4 mov x9,x5 mov x10,x6 mov x11,x7 bl __ecp_nistz256_add_to // ret += a // 2*a+a=3*a ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_sub(BN_ULONG x0[4],const BN_ULONG x1[4], // const BN_ULONG x2[4]); .globl ecp_nistz256_sub .def ecp_nistz256_sub .type 32 .endef .align 4 ecp_nistz256_sub: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ldp x14,x15,[x1] ldp x16,x17,[x1,#16] adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sub_from ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_neg .def ecp_nistz256_neg .type 32 .endef .align 4 ecp_nistz256_neg: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x2,x1 mov x14,xzr // a = 0 mov x15,xzr mov x16,xzr mov x17,xzr adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sub_from ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded // to x4-x7 and b[0] - to x3 .def __ecp_nistz256_mul_mont .type 32 .endef .align 4 __ecp_nistz256_mul_mont: mul x14,x4,x3 // a[0]*b[0] umulh x8,x4,x3 mul x15,x5,x3 // a[1]*b[0] umulh x9,x5,x3 mul x16,x6,x3 // a[2]*b[0] umulh x10,x6,x3 mul x17,x7,x3 // a[3]*b[0] umulh x11,x7,x3 ldr x3,[x2,#8] // b[1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adc x19,xzr,x11 mov x20,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr ldr x3,[x2,#8*(1+1)] // b[1+1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr ldr x3,[x2,#8*(2+1)] // b[2+1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr // last reduction subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 adcs x16,x17,x10 // +=acc[0]*0xffff0001 adcs x17,x19,x11 adc x19,x20,xzr adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x19,xzr // did it borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret // note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded // to x4-x7 .def __ecp_nistz256_sqr_mont .type 32 .endef .align 4 __ecp_nistz256_sqr_mont: // | | | | | |a1*a0| | // | | | | |a2*a0| | | // | |a3*a2|a3*a0| | | | // | | | |a2*a1| | | | // | | |a3*a1| | | | | // *| | | | | | | | 2| // +|a3*a3|a2*a2|a1*a1|a0*a0| // |--+--+--+--+--+--+--+--| // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow // // "can't overflow" below mark carrying into high part of // multiplication result, which can't overflow, because it // can never be all ones. mul x15,x5,x4 // a[1]*a[0] umulh x9,x5,x4 mul x16,x6,x4 // a[2]*a[0] umulh x10,x6,x4 mul x17,x7,x4 // a[3]*a[0] umulh x19,x7,x4 adds x16,x16,x9 // accumulate high parts of multiplication mul x8,x6,x5 // a[2]*a[1] umulh x9,x6,x5 adcs x17,x17,x10 mul x10,x7,x5 // a[3]*a[1] umulh x11,x7,x5 adc x19,x19,xzr // can't overflow mul x20,x7,x6 // a[3]*a[2] umulh x1,x7,x6 adds x9,x9,x10 // accumulate high parts of multiplication mul x14,x4,x4 // a[0]*a[0] adc x10,x11,xzr // can't overflow adds x17,x17,x8 // accumulate low parts of multiplication umulh x4,x4,x4 adcs x19,x19,x9 mul x9,x5,x5 // a[1]*a[1] adcs x20,x20,x10 umulh x5,x5,x5 adc x1,x1,xzr // can't overflow adds x15,x15,x15 // acc[1-6]*=2 mul x10,x6,x6 // a[2]*a[2] adcs x16,x16,x16 umulh x6,x6,x6 adcs x17,x17,x17 mul x11,x7,x7 // a[3]*a[3] adcs x19,x19,x19 umulh x7,x7,x7 adcs x20,x20,x20 adcs x1,x1,x1 adc x2,xzr,xzr adds x15,x15,x4 // +a[i]*a[i] adcs x16,x16,x9 adcs x17,x17,x5 adcs x19,x19,x10 adcs x20,x20,x6 lsl x8,x14,#32 adcs x1,x1,x11 lsr x9,x14,#32 adc x2,x2,x7 subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 adcs x16,x17,x10 // +=acc[0]*0xffff0001 adc x17,x11,xzr // can't overflow adds x14,x14,x19 // accumulate upper half adcs x15,x15,x20 adcs x16,x16,x1 adcs x17,x17,x2 adc x19,xzr,xzr adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x19,xzr // did it borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret // Note that __ecp_nistz256_add_to expects both input vectors pre-loaded to // x4-x7 and x8-x11. This is done because it's used in multiple // contexts, e.g. in multiplication by 2 and 3... .def __ecp_nistz256_add_to .type 32 .endef .align 4 __ecp_nistz256_add_to: adds x14,x14,x8 // ret = a+b adcs x15,x15,x9 adcs x16,x16,x10 adcs x17,x17,x11 adc x1,xzr,xzr // zap x1 adds x8,x14,#1 // subs x8,x4,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x1,xzr // did subtraction borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret .def __ecp_nistz256_sub_from .type 32 .endef .align 4 __ecp_nistz256_sub_from: ldp x8,x9,[x2] ldp x10,x11,[x2,#16] subs x14,x14,x8 // ret = a-b sbcs x15,x15,x9 sbcs x16,x16,x10 sbcs x17,x17,x11 sbc x1,xzr,xzr // zap x1 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus adcs x9,x15,x12 adcs x10,x16,xzr adc x11,x17,x13 cmp x1,xzr // did subtraction borrow? csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret csel x15,x15,x9,eq csel x16,x16,x10,eq stp x14,x15,[x0] csel x17,x17,x11,eq stp x16,x17,[x0,#16] ret .def __ecp_nistz256_sub_morf .type 32 .endef .align 4 __ecp_nistz256_sub_morf: ldp x8,x9,[x2] ldp x10,x11,[x2,#16] subs x14,x8,x14 // ret = b-a sbcs x15,x9,x15 sbcs x16,x10,x16 sbcs x17,x11,x17 sbc x1,xzr,xzr // zap x1 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus adcs x9,x15,x12 adcs x10,x16,xzr adc x11,x17,x13 cmp x1,xzr // did subtraction borrow? csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret csel x15,x15,x9,eq csel x16,x16,x10,eq stp x14,x15,[x0] csel x17,x17,x11,eq stp x16,x17,[x0,#16] ret .def __ecp_nistz256_div_by_2 .type 32 .endef .align 4 __ecp_nistz256_div_by_2: subs x8,x14,#1 // adds x8,x4,#-1 // tmp = a+modulus adcs x9,x15,x12 adcs x10,x16,xzr adcs x11,x17,x13 adc x1,xzr,xzr // zap x1 tst x14,#1 // is a even? csel x14,x14,x8,eq // ret = even ? a : a+modulus csel x15,x15,x9,eq csel x16,x16,x10,eq csel x17,x17,x11,eq csel x1,xzr,x1,eq lsr x14,x14,#1 // ret >>= 1 orr x14,x14,x15,lsl#63 lsr x15,x15,#1 orr x15,x15,x16,lsl#63 lsr x16,x16,#1 orr x16,x16,x17,lsl#63 lsr x17,x17,#1 stp x14,x15,[x0] orr x17,x17,x1,lsl#63 stp x16,x17,[x0,#16] ret .globl ecp_nistz256_point_double .def ecp_nistz256_point_double .type 32 .endef .align 5 ecp_nistz256_point_double: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] sub sp,sp,#32*4 Ldouble_shortcut: ldp x14,x15,[x1,#32] mov x21,x0 ldp x16,x17,[x1,#48] mov x22,x1 adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] mov x8,x14 ldr x13,[x13,#24] mov x9,x15 ldp x4,x5,[x22,#64] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[x22,#64+16] add x0,sp,#0 bl __ecp_nistz256_add_to // p256_mul_by_2(S, in_y); add x0,sp,#64 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z); ldp x8,x9,[x22] ldp x10,x11,[x22,#16] mov x4,x14 // put Zsqr aside for p256_sub mov x5,x15 mov x6,x16 mov x7,x17 add x0,sp,#32 bl __ecp_nistz256_add_to // p256_add(M, Zsqr, in_x); add x2,x22,#0 mov x14,x4 // restore Zsqr mov x15,x5 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont mov x16,x6 mov x17,x7 ldp x6,x7,[sp,#0+16] add x0,sp,#64 bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr); add x0,sp,#0 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S); ldr x3,[x22,#32] ldp x4,x5,[x22,#64] ldp x6,x7,[x22,#64+16] add x2,x22,#32 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y); mov x8,x14 mov x9,x15 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[sp,#0+16] add x0,x21,#64 bl __ecp_nistz256_add_to // p256_mul_by_2(res_z, tmp0); add x0,sp,#96 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S); ldr x3,[sp,#64] // forward load for p256_mul_mont ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x0,x21,#32 bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0); add x2,sp,#64 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr); mov x8,x14 // duplicate M mov x9,x15 mov x10,x16 mov x11,x17 mov x4,x14 // put M aside mov x5,x15 mov x6,x16 mov x7,x17 add x0,sp,#32 bl __ecp_nistz256_add_to mov x8,x4 // restore M mov x9,x5 ldr x3,[x22] // forward load for p256_mul_mont mov x10,x6 ldp x4,x5,[sp,#0] mov x11,x7 ldp x6,x7,[sp,#0+16] bl __ecp_nistz256_add_to // p256_mul_by_3(M, M); add x2,x22,#0 add x0,sp,#0 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x); mov x8,x14 mov x9,x15 ldp x4,x5,[sp,#32] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[sp,#32+16] add x0,sp,#96 bl __ecp_nistz256_add_to // p256_mul_by_2(tmp0, S); add x0,x21,#0 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M); add x2,sp,#96 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0); add x2,sp,#0 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x); ldr x3,[sp,#32] mov x4,x14 // copy S mov x5,x15 mov x6,x16 mov x7,x17 add x2,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M); add x2,x21,#32 add x0,x21,#32 bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y); add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl ecp_nistz256_point_add .def ecp_nistz256_point_add .type 32 .endef .align 5 ecp_nistz256_point_add: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#32*12 ldp x4,x5,[x2,#64] // in2_z ldp x6,x7,[x2,#64+16] mov x21,x0 mov x22,x1 mov x23,x2 adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] orr x8,x4,x5 orr x10,x6,x7 orr x25,x8,x10 cmp x25,#0 csetm x25,ne // ~in2infty add x0,sp,#192 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z); ldp x4,x5,[x22,#64] // in1_z ldp x6,x7,[x22,#64+16] orr x8,x4,x5 orr x10,x6,x7 orr x24,x8,x10 cmp x24,#0 csetm x24,ne // ~in1infty add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z); ldr x3,[x23,#64] ldp x4,x5,[sp,#192] ldp x6,x7,[sp,#192+16] add x2,x23,#64 add x0,sp,#320 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z); ldr x3,[x22,#64] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,x22,#64 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z); ldr x3,[x22,#32] ldp x4,x5,[sp,#320] ldp x6,x7,[sp,#320+16] add x2,x22,#32 add x0,sp,#320 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y); ldr x3,[x23,#32] ldp x4,x5,[sp,#352] ldp x6,x7,[sp,#352+16] add x2,x23,#32 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y); add x2,sp,#320 ldr x3,[sp,#192] // forward load for p256_mul_mont ldp x4,x5,[x22] ldp x6,x7,[x22,#16] add x0,sp,#160 bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1); orr x14,x14,x15 // see if result is zero orr x16,x16,x17 orr x26,x14,x16 // ~is_equal(S1,S2) add x2,sp,#192 add x0,sp,#256 bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr); ldr x3,[sp,#128] ldp x4,x5,[x23] ldp x6,x7,[x23,#16] add x2,sp,#128 add x0,sp,#288 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr); add x2,sp,#256 ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont ldp x6,x7,[sp,#160+16] add x0,sp,#96 bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1); orr x14,x14,x15 // see if result is zero orr x16,x16,x17 orr x14,x14,x16 // ~is_equal(U1,U2) mvn x27,x24 // -1/0 -> 0/-1 mvn x28,x25 // -1/0 -> 0/-1 orr x14,x14,x27 orr x14,x14,x28 orr x14,x14,x26 cbnz x14,Ladd_proceed // if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2)) Ladd_double: mov x1,x22 mov x0,x21 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] add sp,sp,#256 // #256 is from #32*(12-4). difference in stack frames b Ldouble_shortcut .align 4 Ladd_proceed: add x0,sp,#192 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R); ldr x3,[x22,#64] ldp x4,x5,[sp,#96] ldp x6,x7,[sp,#96+16] add x2,x22,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z); ldp x4,x5,[sp,#96] ldp x6,x7,[sp,#96+16] add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H); ldr x3,[x23,#64] ldp x4,x5,[sp,#64] ldp x6,x7,[sp,#64+16] add x2,x23,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z); ldr x3,[sp,#96] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,sp,#96 add x0,sp,#224 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H); ldr x3,[sp,#128] ldp x4,x5,[sp,#256] ldp x6,x7,[sp,#256+16] add x2,sp,#128 add x0,sp,#288 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr); mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 add x0,sp,#128 bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2); add x2,sp,#192 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr); add x2,sp,#224 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub); add x2,sp,#288 ldr x3,[sp,#224] // forward load for p256_mul_mont ldp x4,x5,[sp,#320] ldp x6,x7,[sp,#320+16] add x0,sp,#32 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x); add x2,sp,#224 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub); ldr x3,[sp,#160] ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x2,sp,#160 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R); add x2,sp,#352 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2); ldp x4,x5,[sp,#0] // res ldp x6,x7,[sp,#0+16] ldp x8,x9,[x23] // in2 ldp x10,x11,[x23,#16] ldp x14,x15,[x22,#0] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#0+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+0+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+0+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#0+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#0+48] stp x14,x15,[x21,#0] stp x16,x17,[x21,#0+16] ldp x14,x15,[x22,#32] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#32+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+32+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+32+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#32+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#32+48] stp x14,x15,[x21,#32] stp x16,x17,[x21,#32+16] ldp x14,x15,[x22,#64] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#64+16] csel x8,x4,x8,ne csel x9,x5,x9,ne csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? csel x14,x8,x14,ne csel x15,x9,x15,ne csel x16,x10,x16,ne csel x17,x11,x17,ne stp x14,x15,[x21,#64] stp x16,x17,[x21,#64+16] Ladd_done: add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl ecp_nistz256_point_add_affine .def ecp_nistz256_point_add_affine .type 32 .endef .align 5 ecp_nistz256_point_add_affine: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-80]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] sub sp,sp,#32*10 mov x21,x0 mov x22,x1 mov x23,x2 adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] ldp x4,x5,[x1,#64] // in1_z ldp x6,x7,[x1,#64+16] orr x8,x4,x5 orr x10,x6,x7 orr x24,x8,x10 cmp x24,#0 csetm x24,ne // ~in1infty ldp x14,x15,[x2] // in2_x ldp x16,x17,[x2,#16] ldp x8,x9,[x2,#32] // in2_y ldp x10,x11,[x2,#48] orr x14,x14,x15 orr x16,x16,x17 orr x8,x8,x9 orr x10,x10,x11 orr x14,x14,x16 orr x8,x8,x10 orr x25,x14,x8 cmp x25,#0 csetm x25,ne // ~in2infty add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z); mov x4,x14 mov x5,x15 mov x6,x16 mov x7,x17 ldr x3,[x23] add x2,x23,#0 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x); add x2,x22,#0 ldr x3,[x22,#64] // forward load for p256_mul_mont ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x0,sp,#160 bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x); add x2,x22,#64 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z); ldr x3,[x22,#64] ldp x4,x5,[sp,#160] ldp x6,x7,[sp,#160+16] add x2,x22,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z); ldr x3,[x23,#32] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,x23,#32 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y); add x2,x22,#32 ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont ldp x6,x7,[sp,#160+16] add x0,sp,#192 bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y); add x0,sp,#224 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H); ldp x4,x5,[sp,#192] ldp x6,x7,[sp,#192+16] add x0,sp,#288 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R); ldr x3,[sp,#160] ldp x4,x5,[sp,#224] ldp x6,x7,[sp,#224+16] add x2,sp,#160 add x0,sp,#256 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H); ldr x3,[x22] ldp x4,x5,[sp,#224] ldp x6,x7,[sp,#224+16] add x2,x22,#0 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr); mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 add x0,sp,#224 bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2); add x2,sp,#288 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr); add x2,sp,#256 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub); add x2,sp,#96 ldr x3,[x22,#32] // forward load for p256_mul_mont ldp x4,x5,[sp,#256] ldp x6,x7,[sp,#256+16] add x0,sp,#32 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x); add x2,x22,#32 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub); ldr x3,[sp,#192] ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x2,sp,#192 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R); add x2,sp,#128 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2); ldp x4,x5,[sp,#0] // res ldp x6,x7,[sp,#0+16] ldp x8,x9,[x23] // in2 ldp x10,x11,[x23,#16] ldp x14,x15,[x22,#0] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#0+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+0+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+0+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#0+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#0+48] stp x14,x15,[x21,#0] stp x16,x17,[x21,#0+16] adrp x23,Lone_mont-64 add x23,x23,:lo12:Lone_mont-64 ldp x14,x15,[x22,#32] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#32+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+32+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+32+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#32+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#32+48] stp x14,x15,[x21,#32] stp x16,x17,[x21,#32+16] ldp x14,x15,[x22,#64] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#64+16] csel x8,x4,x8,ne csel x9,x5,x9,ne csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? csel x14,x8,x14,ne csel x15,x9,x15,ne csel x16,x10,x16,ne csel x17,x11,x17,ne stp x14,x15,[x21,#64] stp x16,x17,[x21,#64+16] add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x29,x30,[sp],#80 AARCH64_VALIDATE_LINK_REGISTER ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4], // uint64_t b[4]); .globl ecp_nistz256_ord_mul_mont .def ecp_nistz256_ord_mul_mont .type 32 .endef .align 4 ecp_nistz256_ord_mul_mont: AARCH64_VALID_CALL_TARGET // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] adrp x23,Lord add x23,x23,:lo12:Lord ldr x3,[x2] // bp[0] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] ldp x12,x13,[x23,#0] ldp x21,x22,[x23,#16] ldr x23,[x23,#32] mul x14,x4,x3 // a[0]*b[0] umulh x8,x4,x3 mul x15,x5,x3 // a[1]*b[0] umulh x9,x5,x3 mul x16,x6,x3 // a[2]*b[0] umulh x10,x6,x3 mul x17,x7,x3 // a[3]*b[0] umulh x19,x7,x3 mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts of multiplication adcs x16,x16,x9 adcs x17,x17,x10 adc x19,x19,xzr mov x20,xzr ldr x3,[x2,#8*1] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr ldr x3,[x2,#8*2] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr ldr x3,[x2,#8*3] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr lsl x8,x24,#32 // last reduction subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr subs x8,x14,x12 // ret -= modulus sbcs x9,x15,x13 sbcs x10,x16,x21 sbcs x11,x17,x22 sbcs xzr,x19,xzr csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldr x29,[sp],#64 ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4], // uint64_t rep); .globl ecp_nistz256_ord_sqr_mont .def ecp_nistz256_ord_sqr_mont .type 32 .endef .align 4 ecp_nistz256_ord_sqr_mont: AARCH64_VALID_CALL_TARGET // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] adrp x23,Lord add x23,x23,:lo12:Lord ldp x4,x5,[x1] ldp x6,x7,[x1,#16] ldp x12,x13,[x23,#0] ldp x21,x22,[x23,#16] ldr x23,[x23,#32] b Loop_ord_sqr .align 4 Loop_ord_sqr: sub x2,x2,#1 //////////////////////////////////////////////////////////////// // | | | | | |a1*a0| | // | | | | |a2*a0| | | // | |a3*a2|a3*a0| | | | // | | | |a2*a1| | | | // | | |a3*a1| | | | | // *| | | | | | | | 2| // +|a3*a3|a2*a2|a1*a1|a0*a0| // |--+--+--+--+--+--+--+--| // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow // // "can't overflow" below mark carrying into high part of // multiplication result, which can't overflow, because it // can never be all ones. mul x15,x5,x4 // a[1]*a[0] umulh x9,x5,x4 mul x16,x6,x4 // a[2]*a[0] umulh x10,x6,x4 mul x17,x7,x4 // a[3]*a[0] umulh x19,x7,x4 adds x16,x16,x9 // accumulate high parts of multiplication mul x8,x6,x5 // a[2]*a[1] umulh x9,x6,x5 adcs x17,x17,x10 mul x10,x7,x5 // a[3]*a[1] umulh x11,x7,x5 adc x19,x19,xzr // can't overflow mul x20,x7,x6 // a[3]*a[2] umulh x1,x7,x6 adds x9,x9,x10 // accumulate high parts of multiplication mul x14,x4,x4 // a[0]*a[0] adc x10,x11,xzr // can't overflow adds x17,x17,x8 // accumulate low parts of multiplication umulh x4,x4,x4 adcs x19,x19,x9 mul x9,x5,x5 // a[1]*a[1] adcs x20,x20,x10 umulh x5,x5,x5 adc x1,x1,xzr // can't overflow adds x15,x15,x15 // acc[1-6]*=2 mul x10,x6,x6 // a[2]*a[2] adcs x16,x16,x16 umulh x6,x6,x6 adcs x17,x17,x17 mul x11,x7,x7 // a[3]*a[3] adcs x19,x19,x19 umulh x7,x7,x7 adcs x20,x20,x20 adcs x1,x1,x1 adc x3,xzr,xzr adds x15,x15,x4 // +a[i]*a[i] mul x24,x14,x23 adcs x16,x16,x9 adcs x17,x17,x5 adcs x19,x19,x10 adcs x20,x20,x6 adcs x1,x1,x11 adc x3,x3,x7 subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adc x17,xzr,x24 // can't overflow mul x11,x14,x23 lsl x8,x24,#32 subs x15,x15,x24 lsr x9,x24,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x11 mul x10,x13,x11 umulh x24,x13,x11 adcs x10,x10,x9 adc x24,x24,xzr adds x14,x15,x10 adcs x15,x16,x24 adcs x16,x17,x11 adc x17,xzr,x11 // can't overflow mul x24,x14,x23 lsl x8,x11,#32 subs x15,x15,x11 lsr x9,x11,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adc x17,xzr,x24 // can't overflow mul x11,x14,x23 lsl x8,x24,#32 subs x15,x15,x24 lsr x9,x24,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x11 mul x10,x13,x11 umulh x24,x13,x11 adcs x10,x10,x9 adc x24,x24,xzr adds x14,x15,x10 adcs x15,x16,x24 adcs x16,x17,x11 adc x17,xzr,x11 // can't overflow lsl x8,x11,#32 subs x15,x15,x11 lsr x9,x11,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow adds x14,x14,x19 // accumulate upper half adcs x15,x15,x20 adcs x16,x16,x1 adcs x17,x17,x3 adc x19,xzr,xzr subs x8,x14,x12 // ret -= modulus sbcs x9,x15,x13 sbcs x10,x16,x21 sbcs x11,x17,x22 sbcs xzr,x19,xzr csel x4,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x5,x15,x9,lo csel x6,x16,x10,lo csel x7,x17,x11,lo cbnz x2,Loop_ord_sqr stp x4,x5,[x0] stp x6,x7,[x0,#16] ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldr x29,[sp],#64 ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_select_w5(uint64_t *val, uint64_t *in_t, int index); .globl ecp_nistz256_select_w5 .def ecp_nistz256_select_w5 .type 32 .endef .align 4 ecp_nistz256_select_w5: AARCH64_VALID_CALL_TARGET // x10 := x0 // w9 := 0; loop counter and incremented internal index mov x10, x0 mov w9, #0 // [v16-v21] := 0 movi v16.16b, #0 movi v17.16b, #0 movi v18.16b, #0 movi v19.16b, #0 movi v20.16b, #0 movi v21.16b, #0 Lselect_w5_loop: // Loop 16 times. // Increment index (loop counter); tested at the end of the loop add w9, w9, #1 // [v22-v27] := Load a (3*256-bit = 6*128-bit) table entry starting at x1 // and advance x1 to point to the next entry ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64 // x11 := (w9 == w2)? All 1s : All 0s cmp w9, w2 csetm x11, eq // continue loading ... ld1 {v26.2d, v27.2d}, [x1],#32 // duplicate mask_64 into Mask (all 0s or all 1s) dup v3.2d, x11 // [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19] // i.e., values in output registers will remain the same if w9 != w2 bit v16.16b, v22.16b, v3.16b bit v17.16b, v23.16b, v3.16b bit v18.16b, v24.16b, v3.16b bit v19.16b, v25.16b, v3.16b bit v20.16b, v26.16b, v3.16b bit v21.16b, v27.16b, v3.16b // If bit #4 is not 0 (i.e. idx_ctr < 16) loop back tbz w9, #4, Lselect_w5_loop // Write [v16-v21] to memory at the output pointer st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x10],#64 st1 {v20.2d, v21.2d}, [x10] ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_select_w7(uint64_t *val, uint64_t *in_t, int index); .globl ecp_nistz256_select_w7 .def ecp_nistz256_select_w7 .type 32 .endef .align 4 ecp_nistz256_select_w7: AARCH64_VALID_CALL_TARGET // w9 := 0; loop counter and incremented internal index mov w9, #0 // [v16-v21] := 0 movi v16.16b, #0 movi v17.16b, #0 movi v18.16b, #0 movi v19.16b, #0 Lselect_w7_loop: // Loop 64 times. // Increment index (loop counter); tested at the end of the loop add w9, w9, #1 // [v22-v25] := Load a (2*256-bit = 4*128-bit) table entry starting at x1 // and advance x1 to point to the next entry ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64 // x11 := (w9 == w2)? All 1s : All 0s cmp w9, w2 csetm x11, eq // duplicate mask_64 into Mask (all 0s or all 1s) dup v3.2d, x11 // [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19] // i.e., values in output registers will remain the same if w9 != w2 bit v16.16b, v22.16b, v3.16b bit v17.16b, v23.16b, v3.16b bit v18.16b, v24.16b, v3.16b bit v19.16b, v25.16b, v3.16b // If bit #6 is not 0 (i.e. idx_ctr < 64) loop back tbz w9, #6, Lselect_w7_loop // Write [v16-v19] to memory at the output pointer st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0] ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
44,289
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/md5-armv8.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) .text .globl md5_block_asm_data_order md5_block_asm_data_order: .cfi_startproc // Save all callee-saved registers stp x19,x20,[sp,#-80]! .cfi_def_cfa_offset 80 .cfi_offset x19, -80 .cfi_offset x20, -72 stp x21,x22,[sp,#16] .cfi_offset x21, -64 .cfi_offset x22, -56 stp x23,x24,[sp,#32] .cfi_offset x23, -48 .cfi_offset x24, -40 stp x25,x26,[sp,#48] .cfi_offset x25, -32 .cfi_offset x26, -24 stp x27,x28,[sp,#64] .cfi_offset x27, -16 .cfi_offset x28, -8 ldp w10, w11, [x0, #0] // Load MD5 state->A and state->B ldp w12, w13, [x0, #8] // Load MD5 state->C and state->D .align 5 Lmd5_blocks_loop: eor x17, x12, x13 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) and x16, x17, x11 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) ldp x15, x3, [x1] // Load 4 words of input data0 M[0]/0 eor x14, x16, x13 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x9, #0xa478 // Load lower half of constant 0xd76aa478 movk x9, #0xd76a, lsl #16 // Load upper half of constant 0xd76aa478 add w8, w10, w15 // Add dest value add w7, w8, w9 // Add constant 0xd76aa478 add w6, w7, w14 // Add aux function result ror w6, w6, #25 // Rotate left s=7 bits eor x5, x11, x12 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w4, w11, w6 // Add X parameter round 1 A=FF(A, B, C, D, 0xd76aa478, s=7, M[0]) and x8, x5, x4 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x17, x8, x12 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x16, #0xb756 // Load lower half of constant 0xe8c7b756 movk x16, #0xe8c7, lsl #16 // Load upper half of constant 0xe8c7b756 lsr x20, x15, #32 // Right shift high input value containing M[1] add w9, w13, w20 // Add dest value add w7, w9, w16 // Add constant 0xe8c7b756 add w14, w7, w17 // Add aux function result ror w14, w14, #20 // Rotate left s=12 bits eor x6, x4, x11 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w5, w4, w14 // Add X parameter round 1 D=FF(D, A, B, C, 0xe8c7b756, s=12, M[1]) and x8, x6, x5 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x9, x8, x11 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x16, #0x70db // Load lower half of constant 0x242070db movk x16, #0x2420, lsl #16 // Load upper half of constant 0x242070db add w7, w12, w3 // Add dest value add w17, w7, w16 // Add constant 0x242070db add w14, w17, w9 // Add aux function result ror w14, w14, #15 // Rotate left s=17 bits eor x6, x5, x4 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w8, w5, w14 // Add X parameter round 1 C=FF(C, D, A, B, 0x242070db, s=17, M[2]) and x7, x6, x8 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x16, x7, x4 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x9, #0xceee // Load lower half of constant 0xc1bdceee movk x9, #0xc1bd, lsl #16 // Load upper half of constant 0xc1bdceee lsr x21, x3, #32 // Right shift high input value containing M[3] add w14, w11, w21 // Add dest value add w6, w14, w9 // Add constant 0xc1bdceee add w7, w6, w16 // Add aux function result ror w7, w7, #10 // Rotate left s=22 bits eor x17, x8, x5 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w9, w8, w7 // Add X parameter round 1 B=FF(B, C, D, A, 0xc1bdceee, s=22, M[3]) ldp x14, x7, [x1, #16] // Load 4 words of input data0 M[4]/0w and x16, x17, x9 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x6, x16, x5 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x16, #0xfaf // Load lower half of constant 0xf57c0faf movk x16, #0xf57c, lsl #16 // Load upper half of constant 0xf57c0faf add w17, w4, w14 // Add dest value add w16, w17, w16 // Add constant 0xf57c0faf add w4, w16, w6 // Add aux function result ror w4, w4, #25 // Rotate left s=7 bits eor x16, x9, x8 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w17, w9, w4 // Add X parameter round 1 A=FF(A, B, C, D, 0xf57c0faf, s=7, M[4]) and x16, x16, x17 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x6, x16, x8 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x4, #0xc62a // Load lower half of constant 0x4787c62a movk x4, #0x4787, lsl #16 // Load upper half of constant 0x4787c62a lsr x22, x14, #32 // Right shift high input value containing M[5] add w16, w5, w22 // Add dest value add w16, w16, w4 // Add constant 0x4787c62a add w5, w16, w6 // Add aux function result ror w5, w5, #20 // Rotate left s=12 bits eor x4, x17, x9 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w19, w17, w5 // Add X parameter round 1 D=FF(D, A, B, C, 0x4787c62a, s=12, M[5]) and x6, x4, x19 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x5, x6, x9 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x4, #0x4613 // Load lower half of constant 0xa8304613 movk x4, #0xa830, lsl #16 // Load upper half of constant 0xa8304613 add w6, w8, w7 // Add dest value add w8, w6, w4 // Add constant 0xa8304613 add w4, w8, w5 // Add aux function result ror w4, w4, #15 // Rotate left s=17 bits eor x6, x19, x17 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w8, w19, w4 // Add X parameter round 1 C=FF(C, D, A, B, 0xa8304613, s=17, M[6]) and x5, x6, x8 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x4, x5, x17 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x6, #0x9501 // Load lower half of constant 0xfd469501 movk x6, #0xfd46, lsl #16 // Load upper half of constant 0xfd469501 lsr x23, x7, #32 // Right shift high input value containing M[7] add w9, w9, w23 // Add dest value add w5, w9, w6 // Add constant 0xfd469501 add w9, w5, w4 // Add aux function result ror w9, w9, #10 // Rotate left s=22 bits eor x6, x8, x19 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w4, w8, w9 // Add X parameter round 1 B=FF(B, C, D, A, 0xfd469501, s=22, M[7]) ldp x5, x16, [x1, #32] // Load 4 words of input data0 M[8]/0 and x9, x6, x4 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x6, x9, x19 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x9, #0x98d8 // Load lower half of constant 0x698098d8 movk x9, #0x6980, lsl #16 // Load upper half of constant 0x698098d8 add w17, w17, w5 // Add dest value add w9, w17, w9 // Add constant 0x698098d8 add w17, w9, w6 // Add aux function result ror w17, w17, #25 // Rotate left s=7 bits eor x9, x4, x8 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w6, w4, w17 // Add X parameter round 1 A=FF(A, B, C, D, 0x698098d8, s=7, M[8]) and x17, x9, x6 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x9, x17, x8 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x17, #0xf7af // Load lower half of constant 0x8b44f7af movk x17, #0x8b44, lsl #16 // Load upper half of constant 0x8b44f7af lsr x24, x5, #32 // Right shift high input value containing M[9] add w19, w19, w24 // Add dest value add w17, w19, w17 // Add constant 0x8b44f7af add w19, w17, w9 // Add aux function result ror w19, w19, #20 // Rotate left s=12 bits eor x9, x6, x4 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w17, w6, w19 // Add X parameter round 1 D=FF(D, A, B, C, 0x8b44f7af, s=12, M[9]) and x9, x9, x17 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x9, x9, x4 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x11, #0x5bb1 // Load lower half of constant 0xffff5bb1 movk x11, #0xffff, lsl #16 // Load upper half of constant 0xffff5bb1 add w8, w8, w16 // Add dest value add w8, w8, w11 // Add constant 0xffff5bb1 add w8, w8, w9 // Add aux function result ror w8, w8, #15 // Rotate left s=17 bits eor x9, x17, x6 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w8, w17, w8 // Add X parameter round 1 C=FF(C, D, A, B, 0xffff5bb1, s=17, M[10]) and x9, x9, x8 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x9, x9, x6 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x11, #0xd7be // Load lower half of constant 0x895cd7be movk x11, #0x895c, lsl #16 // Load upper half of constant 0x895cd7be lsr x25, x16, #32 // Right shift high input value containing M[11] add w4, w4, w25 // Add dest value add w4, w4, w11 // Add constant 0x895cd7be add w9, w4, w9 // Add aux function result ror w9, w9, #10 // Rotate left s=22 bits eor x4, x8, x17 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w9, w8, w9 // Add X parameter round 1 B=FF(B, C, D, A, 0x895cd7be, s=22, M[11]) ldp x11, x12, [x1, #48] // Load 4 words of input data0 M[12]/0 and x4, x4, x9 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x4, x4, x17 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x19, #0x1122 // Load lower half of constant 0x6b901122 movk x19, #0x6b90, lsl #16 // Load upper half of constant 0x6b901122 add w6, w6, w11 // Add dest value add w6, w6, w19 // Add constant 0x6b901122 add w4, w6, w4 // Add aux function result ror w4, w4, #25 // Rotate left s=7 bits eor x6, x9, x8 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w4, w9, w4 // Add X parameter round 1 A=FF(A, B, C, D, 0x6b901122, s=7, M[12]) and x6, x6, x4 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x6, x6, x8 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x19, #0x7193 // Load lower half of constant 0xfd987193 movk x19, #0xfd98, lsl #16 // Load upper half of constant 0xfd987193 lsr x26, x11, #32 // Right shift high input value containing M[13] add w17, w17, w26 // Add dest value add w17, w17, w19 // Add constant 0xfd987193 add w17, w17, w6 // Add aux function result ror w17, w17, #20 // Rotate left s=12 bits eor x6, x4, x9 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w17, w4, w17 // Add X parameter round 1 D=FF(D, A, B, C, 0xfd987193, s=12, M[13]) and x6, x6, x17 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x6, x6, x9 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x13, #0x438e // Load lower half of constant 0xa679438e movk x13, #0xa679, lsl #16 // Load upper half of constant 0xa679438e add w8, w8, w12 // Add dest value add w8, w8, w13 // Add constant 0xa679438e add w8, w8, w6 // Add aux function result ror w8, w8, #15 // Rotate left s=17 bits eor x6, x17, x4 // Begin aux function round 1 F(x,y,z)=(((y^z)&x)^z) add w8, w17, w8 // Add X parameter round 1 C=FF(C, D, A, B, 0xa679438e, s=17, M[14]) and x6, x6, x8 // Continue aux function round 1 F(x,y,z)=(((y^z)&x)^z) eor x6, x6, x4 // End aux function round 1 F(x,y,z)=(((y^z)&x)^z) movz x13, #0x821 // Load lower half of constant 0x49b40821 movk x13, #0x49b4, lsl #16 // Load upper half of constant 0x49b40821 lsr x27, x12, #32 // Right shift high input value containing M[15] add w9, w9, w27 // Add dest value add w9, w9, w13 // Add constant 0x49b40821 add w9, w9, w6 // Add aux function result ror w9, w9, #10 // Rotate left s=22 bits bic x6, x8, x17 // Aux function round 2 (~z & y) add w9, w8, w9 // Add X parameter round 1 B=FF(B, C, D, A, 0x49b40821, s=22, M[15]) movz x13, #0x2562 // Load lower half of constant 0xf61e2562 movk x13, #0xf61e, lsl #16 // Load upper half of constant 0xf61e2562 add w4, w4, w20 // Add dest value add w4, w4, w13 // Add constant 0xf61e2562 and x13, x9, x17 // Aux function round 2 (x & z) add w4, w4, w6 // Add (~z & y) add w4, w4, w13 // Add (x & z) ror w4, w4, #27 // Rotate left s=5 bits bic x6, x9, x8 // Aux function round 2 (~z & y) add w4, w9, w4 // Add X parameter round 2 A=GG(A, B, C, D, 0xf61e2562, s=5, M[1]) movz x13, #0xb340 // Load lower half of constant 0xc040b340 movk x13, #0xc040, lsl #16 // Load upper half of constant 0xc040b340 add w17, w17, w7 // Add dest value add w17, w17, w13 // Add constant 0xc040b340 and x13, x4, x8 // Aux function round 2 (x & z) add w17, w17, w6 // Add (~z & y) add w17, w17, w13 // Add (x & z) ror w17, w17, #23 // Rotate left s=9 bits bic x6, x4, x9 // Aux function round 2 (~z & y) add w17, w4, w17 // Add X parameter round 2 D=GG(D, A, B, C, 0xc040b340, s=9, M[6]) movz x13, #0x5a51 // Load lower half of constant 0x265e5a51 movk x13, #0x265e, lsl #16 // Load upper half of constant 0x265e5a51 add w8, w8, w25 // Add dest value add w8, w8, w13 // Add constant 0x265e5a51 and x13, x17, x9 // Aux function round 2 (x & z) add w8, w8, w6 // Add (~z & y) add w8, w8, w13 // Add (x & z) ror w8, w8, #18 // Rotate left s=14 bits bic x6, x17, x4 // Aux function round 2 (~z & y) add w8, w17, w8 // Add X parameter round 2 C=GG(C, D, A, B, 0x265e5a51, s=14, M[11]) movz x13, #0xc7aa // Load lower half of constant 0xe9b6c7aa movk x13, #0xe9b6, lsl #16 // Load upper half of constant 0xe9b6c7aa add w9, w9, w15 // Add dest value add w9, w9, w13 // Add constant 0xe9b6c7aa and x13, x8, x4 // Aux function round 2 (x & z) add w9, w9, w6 // Add (~z & y) add w9, w9, w13 // Add (x & z) ror w9, w9, #12 // Rotate left s=20 bits bic x6, x8, x17 // Aux function round 2 (~z & y) add w9, w8, w9 // Add X parameter round 2 B=GG(B, C, D, A, 0xe9b6c7aa, s=20, M[0]) movz x13, #0x105d // Load lower half of constant 0xd62f105d movk x13, #0xd62f, lsl #16 // Load upper half of constant 0xd62f105d add w4, w4, w22 // Add dest value add w4, w4, w13 // Add constant 0xd62f105d and x13, x9, x17 // Aux function round 2 (x & z) add w4, w4, w6 // Add (~z & y) add w4, w4, w13 // Add (x & z) ror w4, w4, #27 // Rotate left s=5 bits bic x6, x9, x8 // Aux function round 2 (~z & y) add w4, w9, w4 // Add X parameter round 2 A=GG(A, B, C, D, 0xd62f105d, s=5, M[5]) movz x13, #0x1453 // Load lower half of constant 0x2441453 movk x13, #0x244, lsl #16 // Load upper half of constant 0x2441453 add w17, w17, w16 // Add dest value add w17, w17, w13 // Add constant 0x2441453 and x13, x4, x8 // Aux function round 2 (x & z) add w17, w17, w6 // Add (~z & y) add w17, w17, w13 // Add (x & z) ror w17, w17, #23 // Rotate left s=9 bits bic x6, x4, x9 // Aux function round 2 (~z & y) add w17, w4, w17 // Add X parameter round 2 D=GG(D, A, B, C, 0x2441453, s=9, M[10]) movz x13, #0xe681 // Load lower half of constant 0xd8a1e681 movk x13, #0xd8a1, lsl #16 // Load upper half of constant 0xd8a1e681 add w8, w8, w27 // Add dest value add w8, w8, w13 // Add constant 0xd8a1e681 and x13, x17, x9 // Aux function round 2 (x & z) add w8, w8, w6 // Add (~z & y) add w8, w8, w13 // Add (x & z) ror w8, w8, #18 // Rotate left s=14 bits bic x6, x17, x4 // Aux function round 2 (~z & y) add w8, w17, w8 // Add X parameter round 2 C=GG(C, D, A, B, 0xd8a1e681, s=14, M[15]) movz x13, #0xfbc8 // Load lower half of constant 0xe7d3fbc8 movk x13, #0xe7d3, lsl #16 // Load upper half of constant 0xe7d3fbc8 add w9, w9, w14 // Add dest value add w9, w9, w13 // Add constant 0xe7d3fbc8 and x13, x8, x4 // Aux function round 2 (x & z) add w9, w9, w6 // Add (~z & y) add w9, w9, w13 // Add (x & z) ror w9, w9, #12 // Rotate left s=20 bits bic x6, x8, x17 // Aux function round 2 (~z & y) add w9, w8, w9 // Add X parameter round 2 B=GG(B, C, D, A, 0xe7d3fbc8, s=20, M[4]) movz x13, #0xcde6 // Load lower half of constant 0x21e1cde6 movk x13, #0x21e1, lsl #16 // Load upper half of constant 0x21e1cde6 add w4, w4, w24 // Add dest value add w4, w4, w13 // Add constant 0x21e1cde6 and x13, x9, x17 // Aux function round 2 (x & z) add w4, w4, w6 // Add (~z & y) add w4, w4, w13 // Add (x & z) ror w4, w4, #27 // Rotate left s=5 bits bic x6, x9, x8 // Aux function round 2 (~z & y) add w4, w9, w4 // Add X parameter round 2 A=GG(A, B, C, D, 0x21e1cde6, s=5, M[9]) movz x13, #0x7d6 // Load lower half of constant 0xc33707d6 movk x13, #0xc337, lsl #16 // Load upper half of constant 0xc33707d6 add w17, w17, w12 // Add dest value add w17, w17, w13 // Add constant 0xc33707d6 and x13, x4, x8 // Aux function round 2 (x & z) add w17, w17, w6 // Add (~z & y) add w17, w17, w13 // Add (x & z) ror w17, w17, #23 // Rotate left s=9 bits bic x6, x4, x9 // Aux function round 2 (~z & y) add w17, w4, w17 // Add X parameter round 2 D=GG(D, A, B, C, 0xc33707d6, s=9, M[14]) movz x13, #0xd87 // Load lower half of constant 0xf4d50d87 movk x13, #0xf4d5, lsl #16 // Load upper half of constant 0xf4d50d87 add w8, w8, w21 // Add dest value add w8, w8, w13 // Add constant 0xf4d50d87 and x13, x17, x9 // Aux function round 2 (x & z) add w8, w8, w6 // Add (~z & y) add w8, w8, w13 // Add (x & z) ror w8, w8, #18 // Rotate left s=14 bits bic x6, x17, x4 // Aux function round 2 (~z & y) add w8, w17, w8 // Add X parameter round 2 C=GG(C, D, A, B, 0xf4d50d87, s=14, M[3]) movz x13, #0x14ed // Load lower half of constant 0x455a14ed movk x13, #0x455a, lsl #16 // Load upper half of constant 0x455a14ed add w9, w9, w5 // Add dest value add w9, w9, w13 // Add constant 0x455a14ed and x13, x8, x4 // Aux function round 2 (x & z) add w9, w9, w6 // Add (~z & y) add w9, w9, w13 // Add (x & z) ror w9, w9, #12 // Rotate left s=20 bits bic x6, x8, x17 // Aux function round 2 (~z & y) add w9, w8, w9 // Add X parameter round 2 B=GG(B, C, D, A, 0x455a14ed, s=20, M[8]) movz x13, #0xe905 // Load lower half of constant 0xa9e3e905 movk x13, #0xa9e3, lsl #16 // Load upper half of constant 0xa9e3e905 add w4, w4, w26 // Add dest value add w4, w4, w13 // Add constant 0xa9e3e905 and x13, x9, x17 // Aux function round 2 (x & z) add w4, w4, w6 // Add (~z & y) add w4, w4, w13 // Add (x & z) ror w4, w4, #27 // Rotate left s=5 bits bic x6, x9, x8 // Aux function round 2 (~z & y) add w4, w9, w4 // Add X parameter round 2 A=GG(A, B, C, D, 0xa9e3e905, s=5, M[13]) movz x13, #0xa3f8 // Load lower half of constant 0xfcefa3f8 movk x13, #0xfcef, lsl #16 // Load upper half of constant 0xfcefa3f8 add w17, w17, w3 // Add dest value add w17, w17, w13 // Add constant 0xfcefa3f8 and x13, x4, x8 // Aux function round 2 (x & z) add w17, w17, w6 // Add (~z & y) add w17, w17, w13 // Add (x & z) ror w17, w17, #23 // Rotate left s=9 bits bic x6, x4, x9 // Aux function round 2 (~z & y) add w17, w4, w17 // Add X parameter round 2 D=GG(D, A, B, C, 0xfcefa3f8, s=9, M[2]) movz x13, #0x2d9 // Load lower half of constant 0x676f02d9 movk x13, #0x676f, lsl #16 // Load upper half of constant 0x676f02d9 add w8, w8, w23 // Add dest value add w8, w8, w13 // Add constant 0x676f02d9 and x13, x17, x9 // Aux function round 2 (x & z) add w8, w8, w6 // Add (~z & y) add w8, w8, w13 // Add (x & z) ror w8, w8, #18 // Rotate left s=14 bits bic x6, x17, x4 // Aux function round 2 (~z & y) add w8, w17, w8 // Add X parameter round 2 C=GG(C, D, A, B, 0x676f02d9, s=14, M[7]) movz x13, #0x4c8a // Load lower half of constant 0x8d2a4c8a movk x13, #0x8d2a, lsl #16 // Load upper half of constant 0x8d2a4c8a add w9, w9, w11 // Add dest value add w9, w9, w13 // Add constant 0x8d2a4c8a and x13, x8, x4 // Aux function round 2 (x & z) add w9, w9, w6 // Add (~z & y) add w9, w9, w13 // Add (x & z) eor x6, x8, x17 // Begin aux function round 3 H(x,y,z)=(x^y^z) ror w9, w9, #12 // Rotate left s=20 bits movz x10, #0x3942 // Load lower half of constant 0xfffa3942 add w9, w8, w9 // Add X parameter round 2 B=GG(B, C, D, A, 0x8d2a4c8a, s=20, M[12]) movk x10, #0xfffa, lsl #16 // Load upper half of constant 0xfffa3942 add w4, w4, w22 // Add dest value eor x6, x6, x9 // End aux function round 3 H(x,y,z)=(x^y^z) add w4, w4, w10 // Add constant 0xfffa3942 add w4, w4, w6 // Add aux function result ror w4, w4, #28 // Rotate left s=4 bits eor x6, x9, x8 // Begin aux function round 3 H(x,y,z)=(x^y^z) movz x10, #0xf681 // Load lower half of constant 0x8771f681 add w4, w9, w4 // Add X parameter round 3 A=HH(A, B, C, D, 0xfffa3942, s=4, M[5]) movk x10, #0x8771, lsl #16 // Load upper half of constant 0x8771f681 add w17, w17, w5 // Add dest value eor x6, x6, x4 // End aux function round 3 H(x,y,z)=(x^y^z) add w17, w17, w10 // Add constant 0x8771f681 add w17, w17, w6 // Add aux function result eor x6, x4, x9 // Begin aux function round 3 H(x,y,z)=(x^y^z) ror w17, w17, #21 // Rotate left s=11 bits movz x13, #0x6122 // Load lower half of constant 0x6d9d6122 add w17, w4, w17 // Add X parameter round 3 D=HH(D, A, B, C, 0x8771f681, s=11, M[8]) movk x13, #0x6d9d, lsl #16 // Load upper half of constant 0x6d9d6122 add w8, w8, w25 // Add dest value eor x6, x6, x17 // End aux function round 3 H(x,y,z)=(x^y^z) add w8, w8, w13 // Add constant 0x6d9d6122 add w8, w8, w6 // Add aux function result ror w8, w8, #16 // Rotate left s=16 bits eor x6, x17, x4 // Begin aux function round 3 H(x,y,z)=(x^y^z) movz x13, #0x380c // Load lower half of constant 0xfde5380c add w8, w17, w8 // Add X parameter round 3 C=HH(C, D, A, B, 0x6d9d6122, s=16, M[11]) movk x13, #0xfde5, lsl #16 // Load upper half of constant 0xfde5380c add w9, w9, w12 // Add dest value eor x6, x6, x8 // End aux function round 3 H(x,y,z)=(x^y^z) add w9, w9, w13 // Add constant 0xfde5380c add w9, w9, w6 // Add aux function result eor x6, x8, x17 // Begin aux function round 3 H(x,y,z)=(x^y^z) ror w9, w9, #9 // Rotate left s=23 bits movz x10, #0xea44 // Load lower half of constant 0xa4beea44 add w9, w8, w9 // Add X parameter round 3 B=HH(B, C, D, A, 0xfde5380c, s=23, M[14]) movk x10, #0xa4be, lsl #16 // Load upper half of constant 0xa4beea44 add w4, w4, w20 // Add dest value eor x6, x6, x9 // End aux function round 3 H(x,y,z)=(x^y^z) add w4, w4, w10 // Add constant 0xa4beea44 add w4, w4, w6 // Add aux function result ror w4, w4, #28 // Rotate left s=4 bits eor x6, x9, x8 // Begin aux function round 3 H(x,y,z)=(x^y^z) movz x10, #0xcfa9 // Load lower half of constant 0x4bdecfa9 add w4, w9, w4 // Add X parameter round 3 A=HH(A, B, C, D, 0xa4beea44, s=4, M[1]) movk x10, #0x4bde, lsl #16 // Load upper half of constant 0x4bdecfa9 add w17, w17, w14 // Add dest value eor x6, x6, x4 // End aux function round 3 H(x,y,z)=(x^y^z) add w17, w17, w10 // Add constant 0x4bdecfa9 add w17, w17, w6 // Add aux function result eor x6, x4, x9 // Begin aux function round 3 H(x,y,z)=(x^y^z) ror w17, w17, #21 // Rotate left s=11 bits movz x13, #0x4b60 // Load lower half of constant 0xf6bb4b60 add w17, w4, w17 // Add X parameter round 3 D=HH(D, A, B, C, 0x4bdecfa9, s=11, M[4]) movk x13, #0xf6bb, lsl #16 // Load upper half of constant 0xf6bb4b60 add w8, w8, w23 // Add dest value eor x6, x6, x17 // End aux function round 3 H(x,y,z)=(x^y^z) add w8, w8, w13 // Add constant 0xf6bb4b60 add w8, w8, w6 // Add aux function result ror w8, w8, #16 // Rotate left s=16 bits eor x6, x17, x4 // Begin aux function round 3 H(x,y,z)=(x^y^z) movz x13, #0xbc70 // Load lower half of constant 0xbebfbc70 add w8, w17, w8 // Add X parameter round 3 C=HH(C, D, A, B, 0xf6bb4b60, s=16, M[7]) movk x13, #0xbebf, lsl #16 // Load upper half of constant 0xbebfbc70 add w9, w9, w16 // Add dest value eor x6, x6, x8 // End aux function round 3 H(x,y,z)=(x^y^z) add w9, w9, w13 // Add constant 0xbebfbc70 add w9, w9, w6 // Add aux function result eor x6, x8, x17 // Begin aux function round 3 H(x,y,z)=(x^y^z) ror w9, w9, #9 // Rotate left s=23 bits movz x10, #0x7ec6 // Load lower half of constant 0x289b7ec6 add w9, w8, w9 // Add X parameter round 3 B=HH(B, C, D, A, 0xbebfbc70, s=23, M[10]) movk x10, #0x289b, lsl #16 // Load upper half of constant 0x289b7ec6 add w4, w4, w26 // Add dest value eor x6, x6, x9 // End aux function round 3 H(x,y,z)=(x^y^z) add w4, w4, w10 // Add constant 0x289b7ec6 add w4, w4, w6 // Add aux function result ror w4, w4, #28 // Rotate left s=4 bits eor x6, x9, x8 // Begin aux function round 3 H(x,y,z)=(x^y^z) movz x10, #0x27fa // Load lower half of constant 0xeaa127fa add w4, w9, w4 // Add X parameter round 3 A=HH(A, B, C, D, 0x289b7ec6, s=4, M[13]) movk x10, #0xeaa1, lsl #16 // Load upper half of constant 0xeaa127fa add w17, w17, w15 // Add dest value eor x6, x6, x4 // End aux function round 3 H(x,y,z)=(x^y^z) add w17, w17, w10 // Add constant 0xeaa127fa add w17, w17, w6 // Add aux function result eor x6, x4, x9 // Begin aux function round 3 H(x,y,z)=(x^y^z) ror w17, w17, #21 // Rotate left s=11 bits movz x13, #0x3085 // Load lower half of constant 0xd4ef3085 add w17, w4, w17 // Add X parameter round 3 D=HH(D, A, B, C, 0xeaa127fa, s=11, M[0]) movk x13, #0xd4ef, lsl #16 // Load upper half of constant 0xd4ef3085 add w8, w8, w21 // Add dest value eor x6, x6, x17 // End aux function round 3 H(x,y,z)=(x^y^z) add w8, w8, w13 // Add constant 0xd4ef3085 add w8, w8, w6 // Add aux function result ror w8, w8, #16 // Rotate left s=16 bits eor x6, x17, x4 // Begin aux function round 3 H(x,y,z)=(x^y^z) movz x13, #0x1d05 // Load lower half of constant 0x4881d05 add w8, w17, w8 // Add X parameter round 3 C=HH(C, D, A, B, 0xd4ef3085, s=16, M[3]) movk x13, #0x488, lsl #16 // Load upper half of constant 0x4881d05 add w9, w9, w7 // Add dest value eor x6, x6, x8 // End aux function round 3 H(x,y,z)=(x^y^z) add w9, w9, w13 // Add constant 0x4881d05 add w9, w9, w6 // Add aux function result eor x6, x8, x17 // Begin aux function round 3 H(x,y,z)=(x^y^z) ror w9, w9, #9 // Rotate left s=23 bits movz x10, #0xd039 // Load lower half of constant 0xd9d4d039 add w9, w8, w9 // Add X parameter round 3 B=HH(B, C, D, A, 0x4881d05, s=23, M[6]) movk x10, #0xd9d4, lsl #16 // Load upper half of constant 0xd9d4d039 add w4, w4, w24 // Add dest value eor x6, x6, x9 // End aux function round 3 H(x,y,z)=(x^y^z) add w4, w4, w10 // Add constant 0xd9d4d039 add w4, w4, w6 // Add aux function result ror w4, w4, #28 // Rotate left s=4 bits eor x6, x9, x8 // Begin aux function round 3 H(x,y,z)=(x^y^z) movz x10, #0x99e5 // Load lower half of constant 0xe6db99e5 add w4, w9, w4 // Add X parameter round 3 A=HH(A, B, C, D, 0xd9d4d039, s=4, M[9]) movk x10, #0xe6db, lsl #16 // Load upper half of constant 0xe6db99e5 add w17, w17, w11 // Add dest value eor x6, x6, x4 // End aux function round 3 H(x,y,z)=(x^y^z) add w17, w17, w10 // Add constant 0xe6db99e5 add w17, w17, w6 // Add aux function result eor x6, x4, x9 // Begin aux function round 3 H(x,y,z)=(x^y^z) ror w17, w17, #21 // Rotate left s=11 bits movz x13, #0x7cf8 // Load lower half of constant 0x1fa27cf8 add w17, w4, w17 // Add X parameter round 3 D=HH(D, A, B, C, 0xe6db99e5, s=11, M[12]) movk x13, #0x1fa2, lsl #16 // Load upper half of constant 0x1fa27cf8 add w8, w8, w27 // Add dest value eor x6, x6, x17 // End aux function round 3 H(x,y,z)=(x^y^z) add w8, w8, w13 // Add constant 0x1fa27cf8 add w8, w8, w6 // Add aux function result ror w8, w8, #16 // Rotate left s=16 bits eor x6, x17, x4 // Begin aux function round 3 H(x,y,z)=(x^y^z) movz x13, #0x5665 // Load lower half of constant 0xc4ac5665 add w8, w17, w8 // Add X parameter round 3 C=HH(C, D, A, B, 0x1fa27cf8, s=16, M[15]) movk x13, #0xc4ac, lsl #16 // Load upper half of constant 0xc4ac5665 add w9, w9, w3 // Add dest value eor x6, x6, x8 // End aux function round 3 H(x,y,z)=(x^y^z) add w9, w9, w13 // Add constant 0xc4ac5665 add w9, w9, w6 // Add aux function result ror w9, w9, #9 // Rotate left s=23 bits movz x6, #0x2244 // Load lower half of constant 0xf4292244 movk x6, #0xf429, lsl #16 // Load upper half of constant 0xf4292244 add w9, w8, w9 // Add X parameter round 3 B=HH(B, C, D, A, 0xc4ac5665, s=23, M[2]) add w4, w4, w15 // Add dest value orn x13, x9, x17 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w4, w4, w6 // Add constant 0xf4292244 eor x6, x8, x13 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w4, w4, w6 // Add aux function result ror w4, w4, #26 // Rotate left s=6 bits movz x6, #0xff97 // Load lower half of constant 0x432aff97 movk x6, #0x432a, lsl #16 // Load upper half of constant 0x432aff97 add w4, w9, w4 // Add X parameter round 4 A=II(A, B, C, D, 0xf4292244, s=6, M[0]) orn x10, x4, x8 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w17, w17, w23 // Add dest value eor x10, x9, x10 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w17, w17, w6 // Add constant 0x432aff97 add w6, w17, w10 // Add aux function result ror w6, w6, #22 // Rotate left s=10 bits movz x17, #0x23a7 // Load lower half of constant 0xab9423a7 movk x17, #0xab94, lsl #16 // Load upper half of constant 0xab9423a7 add w6, w4, w6 // Add X parameter round 4 D=II(D, A, B, C, 0x432aff97, s=10, M[7]) add w8, w8, w12 // Add dest value orn x10, x6, x9 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w8, w8, w17 // Add constant 0xab9423a7 eor x17, x4, x10 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w8, w8, w17 // Add aux function result ror w8, w8, #17 // Rotate left s=15 bits movz x17, #0xa039 // Load lower half of constant 0xfc93a039 movk x17, #0xfc93, lsl #16 // Load upper half of constant 0xfc93a039 add w8, w6, w8 // Add X parameter round 4 C=II(C, D, A, B, 0xab9423a7, s=15, M[14]) orn x13, x8, x4 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w9, w9, w22 // Add dest value eor x13, x6, x13 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w9, w9, w17 // Add constant 0xfc93a039 add w17, w9, w13 // Add aux function result ror w17, w17, #11 // Rotate left s=21 bits movz x9, #0x59c3 // Load lower half of constant 0x655b59c3 movk x9, #0x655b, lsl #16 // Load upper half of constant 0x655b59c3 add w17, w8, w17 // Add X parameter round 4 B=II(B, C, D, A, 0xfc93a039, s=21, M[5]) add w4, w4, w11 // Add dest value orn x13, x17, x6 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w9, w4, w9 // Add constant 0x655b59c3 eor x4, x8, x13 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w9, w9, w4 // Add aux function result ror w9, w9, #26 // Rotate left s=6 bits movz x4, #0xcc92 // Load lower half of constant 0x8f0ccc92 movk x4, #0x8f0c, lsl #16 // Load upper half of constant 0x8f0ccc92 add w9, w17, w9 // Add X parameter round 4 A=II(A, B, C, D, 0x655b59c3, s=6, M[12]) orn x10, x9, x8 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w6, w6, w21 // Add dest value eor x10, x17, x10 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w4, w6, w4 // Add constant 0x8f0ccc92 add w6, w4, w10 // Add aux function result ror w6, w6, #22 // Rotate left s=10 bits movz x4, #0xf47d // Load lower half of constant 0xffeff47d movk x4, #0xffef, lsl #16 // Load upper half of constant 0xffeff47d add w6, w9, w6 // Add X parameter round 4 D=II(D, A, B, C, 0x8f0ccc92, s=10, M[3]) add w8, w8, w16 // Add dest value orn x10, x6, x17 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w8, w8, w4 // Add constant 0xffeff47d eor x4, x9, x10 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w8, w8, w4 // Add aux function result ror w8, w8, #17 // Rotate left s=15 bits movz x4, #0x5dd1 // Load lower half of constant 0x85845dd1 movk x4, #0x8584, lsl #16 // Load upper half of constant 0x85845dd1 add w8, w6, w8 // Add X parameter round 4 C=II(C, D, A, B, 0xffeff47d, s=15, M[10]) orn x10, x8, x9 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w15, w17, w20 // Add dest value eor x17, x6, x10 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w15, w15, w4 // Add constant 0x85845dd1 add w4, w15, w17 // Add aux function result ror w4, w4, #11 // Rotate left s=21 bits movz x15, #0x7e4f // Load lower half of constant 0x6fa87e4f movk x15, #0x6fa8, lsl #16 // Load upper half of constant 0x6fa87e4f add w17, w8, w4 // Add X parameter round 4 B=II(B, C, D, A, 0x85845dd1, s=21, M[1]) add w4, w9, w5 // Add dest value orn x9, x17, x6 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w15, w4, w15 // Add constant 0x6fa87e4f eor x4, x8, x9 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w9, w15, w4 // Add aux function result ror w9, w9, #26 // Rotate left s=6 bits movz x15, #0xe6e0 // Load lower half of constant 0xfe2ce6e0 movk x15, #0xfe2c, lsl #16 // Load upper half of constant 0xfe2ce6e0 add w4, w17, w9 // Add X parameter round 4 A=II(A, B, C, D, 0x6fa87e4f, s=6, M[8]) orn x9, x4, x8 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w6, w6, w27 // Add dest value eor x9, x17, x9 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w15, w6, w15 // Add constant 0xfe2ce6e0 add w6, w15, w9 // Add aux function result ror w6, w6, #22 // Rotate left s=10 bits movz x9, #0x4314 // Load lower half of constant 0xa3014314 movk x9, #0xa301, lsl #16 // Load upper half of constant 0xa3014314 add w15, w4, w6 // Add X parameter round 4 D=II(D, A, B, C, 0xfe2ce6e0, s=10, M[15]) add w6, w8, w7 // Add dest value orn x7, x15, x17 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w8, w6, w9 // Add constant 0xa3014314 eor x9, x4, x7 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w6, w8, w9 // Add aux function result ror w6, w6, #17 // Rotate left s=15 bits movz x7, #0x11a1 // Load lower half of constant 0x4e0811a1 movk x7, #0x4e08, lsl #16 // Load upper half of constant 0x4e0811a1 add w8, w15, w6 // Add X parameter round 4 C=II(C, D, A, B, 0xa3014314, s=15, M[6]) orn x9, x8, x4 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w6, w17, w26 // Add dest value eor x17, x15, x9 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w9, w6, w7 // Add constant 0x4e0811a1 add w7, w9, w17 // Add aux function result ror w7, w7, #11 // Rotate left s=21 bits movz x6, #0x7e82 // Load lower half of constant 0xf7537e82 movk x6, #0xf753, lsl #16 // Load upper half of constant 0xf7537e82 add w9, w8, w7 // Add X parameter round 4 B=II(B, C, D, A, 0x4e0811a1, s=21, M[13]) add w17, w4, w14 // Add dest value orn x7, x9, x15 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w14, w17, w6 // Add constant 0xf7537e82 eor x4, x8, x7 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w17, w14, w4 // Add aux function result ror w17, w17, #26 // Rotate left s=6 bits movz x6, #0xf235 // Load lower half of constant 0xbd3af235 movk x6, #0xbd3a, lsl #16 // Load upper half of constant 0xbd3af235 add w7, w9, w17 // Add X parameter round 4 A=II(A, B, C, D, 0xf7537e82, s=6, M[4]) orn x14, x7, x8 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w4, w15, w25 // Add dest value eor x17, x9, x14 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w15, w4, w6 // Add constant 0xbd3af235 add w16, w15, w17 // Add aux function result ror w16, w16, #22 // Rotate left s=10 bits movz x14, #0xd2bb // Load lower half of constant 0x2ad7d2bb movk x14, #0x2ad7, lsl #16 // Load upper half of constant 0x2ad7d2bb add w4, w7, w16 // Add X parameter round 4 D=II(D, A, B, C, 0xbd3af235, s=10, M[11]) add w6, w8, w3 // Add dest value orn x15, x4, x9 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w17, w6, w14 // Add constant 0x2ad7d2bb eor x16, x7, x15 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w8, w17, w16 // Add aux function result ror w8, w8, #17 // Rotate left s=15 bits movz x3, #0xd391 // Load lower half of constant 0xeb86d391 movk x3, #0xeb86, lsl #16 // Load upper half of constant 0xeb86d391 add w14, w4, w8 // Add X parameter round 4 C=II(C, D, A, B, 0x2ad7d2bb, s=15, M[2]) orn x6, x14, x7 // Begin aux function round 4 I(x,y,z)=((~z|x)^y) add w15, w9, w24 // Add dest value eor x17, x4, x6 // End aux function round 4 I(x,y,z)=((~z|x)^y) add w16, w15, w3 // Add constant 0xeb86d391 add w8, w16, w17 // Add aux function result ror w8, w8, #11 // Rotate left s=21 bits ldp w6, w15, [x0] // Reload MD5 state->A and state->B ldp w5, w9, [x0, #8] // Reload MD5 state->C and state->D add w3, w14, w8 // Add X parameter round 4 B=II(B, C, D, A, 0xeb86d391, s=21, M[9]) add w13, w4, w9 // Add result of MD5 rounds to state->D add w12, w14, w5 // Add result of MD5 rounds to state->C add w10, w7, w6 // Add result of MD5 rounds to state->A add w11, w3, w15 // Add result of MD5 rounds to state->B stp w12, w13, [x0, #8] // Store MD5 states C,D stp w10, w11, [x0] // Store MD5 states A,B add x1, x1, #64 // Increment data pointer subs w2, w2, #1 // Decrement block counter b.ne Lmd5_blocks_loop ldp x21,x22,[sp,#16] .cfi_restore x21 .cfi_restore x22 ldp x23,x24,[sp,#32] .cfi_restore x23 .cfi_restore x24 ldp x25,x26,[sp,#48] .cfi_restore x25 .cfi_restore x26 ldp x27,x28,[sp,#64] .cfi_restore x27 .cfi_restore x28 ldp x19,x20,[sp],#80 .cfi_restore x19 .cfi_restore x20 .cfi_def_cfa_offset 0 ret .cfi_endproc #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
49,131
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/sha512-armv8.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) // Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy // in the file LICENSE in the source distribution or at // https://www.openssl.org/source/license.html // ==================================================================== // Written by Andy Polyakov <appro@openssl.org> for the OpenSSL // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. // // Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. // // Performance in cycles per processed byte and improvement coefficient // over code generated with "default" compiler: // // SHA256-hw SHA256(*) SHA512 // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) // Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. // (**) The result is a trade-off: it's possible to improve it by // 10% (or by 1 cycle per round), but at the cost of 20% loss // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code // generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ # include <openssl/arm_arch.h> #endif .text .globl sha512_block_data_order_nohw .def sha512_block_data_order_nohw .type 32 .endef .align 6 sha512_block_data_order_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#4*8 ldp x20,x21,[x0] // load context ldp x22,x23,[x0,#2*8] ldp x24,x25,[x0,#4*8] add x2,x1,x2,lsl#7 // end of input ldp x26,x27,[x0,#6*8] adrp x30,LK512 add x30,x30,:lo12:LK512 stp x0,x2,[x29,#96] Loop: ldp x3,x4,[x1],#2*8 ldr x19,[x30],#8 // *K++ eor x28,x21,x22 // magic seed str x1,[x29,#112] #ifndef __AARCH64EB__ rev x3,x3 // 0 #endif ror x16,x24,#14 add x27,x27,x19 // h+=K[i] eor x6,x24,x24,ror#23 and x17,x25,x24 bic x19,x26,x24 add x27,x27,x3 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x6,ror#18 // Sigma1(e) ror x6,x20,#28 add x27,x27,x17 // h+=Ch(e,f,g) eor x17,x20,x20,ror#5 add x27,x27,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x23,x23,x27 // d+=h eor x28,x28,x21 // Maj(a,b,c) eor x17,x6,x17,ror#34 // Sigma0(a) add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x4,x4 // 1 #endif ldp x5,x6,[x1],#2*8 add x27,x27,x17 // h+=Sigma0(a) ror x16,x23,#14 add x26,x26,x28 // h+=K[i] eor x7,x23,x23,ror#23 and x17,x24,x23 bic x28,x25,x23 add x26,x26,x4 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x7,ror#18 // Sigma1(e) ror x7,x27,#28 add x26,x26,x17 // h+=Ch(e,f,g) eor x17,x27,x27,ror#5 add x26,x26,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x22,x22,x26 // d+=h eor x19,x19,x20 // Maj(a,b,c) eor x17,x7,x17,ror#34 // Sigma0(a) add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x5,x5 // 2 #endif add x26,x26,x17 // h+=Sigma0(a) ror x16,x22,#14 add x25,x25,x19 // h+=K[i] eor x8,x22,x22,ror#23 and x17,x23,x22 bic x19,x24,x22 add x25,x25,x5 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x8,ror#18 // Sigma1(e) ror x8,x26,#28 add x25,x25,x17 // h+=Ch(e,f,g) eor x17,x26,x26,ror#5 add x25,x25,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x21,x21,x25 // d+=h eor x28,x28,x27 // Maj(a,b,c) eor x17,x8,x17,ror#34 // Sigma0(a) add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x6,x6 // 3 #endif ldp x7,x8,[x1],#2*8 add x25,x25,x17 // h+=Sigma0(a) ror x16,x21,#14 add x24,x24,x28 // h+=K[i] eor x9,x21,x21,ror#23 and x17,x22,x21 bic x28,x23,x21 add x24,x24,x6 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x9,ror#18 // Sigma1(e) ror x9,x25,#28 add x24,x24,x17 // h+=Ch(e,f,g) eor x17,x25,x25,ror#5 add x24,x24,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x20,x20,x24 // d+=h eor x19,x19,x26 // Maj(a,b,c) eor x17,x9,x17,ror#34 // Sigma0(a) add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x7,x7 // 4 #endif add x24,x24,x17 // h+=Sigma0(a) ror x16,x20,#14 add x23,x23,x19 // h+=K[i] eor x10,x20,x20,ror#23 and x17,x21,x20 bic x19,x22,x20 add x23,x23,x7 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x10,ror#18 // Sigma1(e) ror x10,x24,#28 add x23,x23,x17 // h+=Ch(e,f,g) eor x17,x24,x24,ror#5 add x23,x23,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x27,x27,x23 // d+=h eor x28,x28,x25 // Maj(a,b,c) eor x17,x10,x17,ror#34 // Sigma0(a) add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x8,x8 // 5 #endif ldp x9,x10,[x1],#2*8 add x23,x23,x17 // h+=Sigma0(a) ror x16,x27,#14 add x22,x22,x28 // h+=K[i] eor x11,x27,x27,ror#23 and x17,x20,x27 bic x28,x21,x27 add x22,x22,x8 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x11,ror#18 // Sigma1(e) ror x11,x23,#28 add x22,x22,x17 // h+=Ch(e,f,g) eor x17,x23,x23,ror#5 add x22,x22,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x26,x26,x22 // d+=h eor x19,x19,x24 // Maj(a,b,c) eor x17,x11,x17,ror#34 // Sigma0(a) add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x9,x9 // 6 #endif add x22,x22,x17 // h+=Sigma0(a) ror x16,x26,#14 add x21,x21,x19 // h+=K[i] eor x12,x26,x26,ror#23 and x17,x27,x26 bic x19,x20,x26 add x21,x21,x9 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x12,ror#18 // Sigma1(e) ror x12,x22,#28 add x21,x21,x17 // h+=Ch(e,f,g) eor x17,x22,x22,ror#5 add x21,x21,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x25,x25,x21 // d+=h eor x28,x28,x23 // Maj(a,b,c) eor x17,x12,x17,ror#34 // Sigma0(a) add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x10,x10 // 7 #endif ldp x11,x12,[x1],#2*8 add x21,x21,x17 // h+=Sigma0(a) ror x16,x25,#14 add x20,x20,x28 // h+=K[i] eor x13,x25,x25,ror#23 and x17,x26,x25 bic x28,x27,x25 add x20,x20,x10 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x13,ror#18 // Sigma1(e) ror x13,x21,#28 add x20,x20,x17 // h+=Ch(e,f,g) eor x17,x21,x21,ror#5 add x20,x20,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x24,x24,x20 // d+=h eor x19,x19,x22 // Maj(a,b,c) eor x17,x13,x17,ror#34 // Sigma0(a) add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x20,x20,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x11,x11 // 8 #endif add x20,x20,x17 // h+=Sigma0(a) ror x16,x24,#14 add x27,x27,x19 // h+=K[i] eor x14,x24,x24,ror#23 and x17,x25,x24 bic x19,x26,x24 add x27,x27,x11 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x14,ror#18 // Sigma1(e) ror x14,x20,#28 add x27,x27,x17 // h+=Ch(e,f,g) eor x17,x20,x20,ror#5 add x27,x27,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x23,x23,x27 // d+=h eor x28,x28,x21 // Maj(a,b,c) eor x17,x14,x17,ror#34 // Sigma0(a) add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x12,x12 // 9 #endif ldp x13,x14,[x1],#2*8 add x27,x27,x17 // h+=Sigma0(a) ror x16,x23,#14 add x26,x26,x28 // h+=K[i] eor x15,x23,x23,ror#23 and x17,x24,x23 bic x28,x25,x23 add x26,x26,x12 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x15,ror#18 // Sigma1(e) ror x15,x27,#28 add x26,x26,x17 // h+=Ch(e,f,g) eor x17,x27,x27,ror#5 add x26,x26,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x22,x22,x26 // d+=h eor x19,x19,x20 // Maj(a,b,c) eor x17,x15,x17,ror#34 // Sigma0(a) add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x13,x13 // 10 #endif add x26,x26,x17 // h+=Sigma0(a) ror x16,x22,#14 add x25,x25,x19 // h+=K[i] eor x0,x22,x22,ror#23 and x17,x23,x22 bic x19,x24,x22 add x25,x25,x13 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x0,ror#18 // Sigma1(e) ror x0,x26,#28 add x25,x25,x17 // h+=Ch(e,f,g) eor x17,x26,x26,ror#5 add x25,x25,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x21,x21,x25 // d+=h eor x28,x28,x27 // Maj(a,b,c) eor x17,x0,x17,ror#34 // Sigma0(a) add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x14,x14 // 11 #endif ldp x15,x0,[x1],#2*8 add x25,x25,x17 // h+=Sigma0(a) str x6,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] eor x6,x21,x21,ror#23 and x17,x22,x21 bic x28,x23,x21 add x24,x24,x14 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x6,ror#18 // Sigma1(e) ror x6,x25,#28 add x24,x24,x17 // h+=Ch(e,f,g) eor x17,x25,x25,ror#5 add x24,x24,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x20,x20,x24 // d+=h eor x19,x19,x26 // Maj(a,b,c) eor x17,x6,x17,ror#34 // Sigma0(a) add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x15,x15 // 12 #endif add x24,x24,x17 // h+=Sigma0(a) str x7,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] eor x7,x20,x20,ror#23 and x17,x21,x20 bic x19,x22,x20 add x23,x23,x15 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x7,ror#18 // Sigma1(e) ror x7,x24,#28 add x23,x23,x17 // h+=Ch(e,f,g) eor x17,x24,x24,ror#5 add x23,x23,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x27,x27,x23 // d+=h eor x28,x28,x25 // Maj(a,b,c) eor x17,x7,x17,ror#34 // Sigma0(a) add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x0,x0 // 13 #endif ldp x1,x2,[x1] add x23,x23,x17 // h+=Sigma0(a) str x8,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] eor x8,x27,x27,ror#23 and x17,x20,x27 bic x28,x21,x27 add x22,x22,x0 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x8,ror#18 // Sigma1(e) ror x8,x23,#28 add x22,x22,x17 // h+=Ch(e,f,g) eor x17,x23,x23,ror#5 add x22,x22,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x26,x26,x22 // d+=h eor x19,x19,x24 // Maj(a,b,c) eor x17,x8,x17,ror#34 // Sigma0(a) add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x1,x1 // 14 #endif ldr x6,[sp,#24] add x22,x22,x17 // h+=Sigma0(a) str x9,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] eor x9,x26,x26,ror#23 and x17,x27,x26 bic x19,x20,x26 add x21,x21,x1 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x9,ror#18 // Sigma1(e) ror x9,x22,#28 add x21,x21,x17 // h+=Ch(e,f,g) eor x17,x22,x22,ror#5 add x21,x21,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x25,x25,x21 // d+=h eor x28,x28,x23 // Maj(a,b,c) eor x17,x9,x17,ror#34 // Sigma0(a) add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x2,x2 // 15 #endif ldr x7,[sp,#0] add x21,x21,x17 // h+=Sigma0(a) str x10,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x9,x4,#1 and x17,x26,x25 ror x8,x1,#19 bic x28,x27,x25 ror x10,x21,#28 add x20,x20,x2 // h+=X[i] eor x16,x16,x25,ror#18 eor x9,x9,x4,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x10,x10,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x8,x8,x1,ror#61 eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x10,x21,ror#39 // Sigma0(a) eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) add x3,x3,x12 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x3,x3,x9 add x20,x20,x17 // h+=Sigma0(a) add x3,x3,x8 Loop_16_xx: ldr x8,[sp,#8] str x11,[sp,#0] ror x16,x24,#14 add x27,x27,x19 // h+=K[i] ror x10,x5,#1 and x17,x25,x24 ror x9,x2,#19 bic x19,x26,x24 ror x11,x20,#28 add x27,x27,x3 // h+=X[i] eor x16,x16,x24,ror#18 eor x10,x10,x5,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x24,ror#41 // Sigma1(e) eor x11,x11,x20,ror#34 add x27,x27,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x9,x9,x2,ror#61 eor x10,x10,x5,lsr#7 // sigma0(X[i+1]) add x27,x27,x16 // h+=Sigma1(e) eor x28,x28,x21 // Maj(a,b,c) eor x17,x11,x20,ror#39 // Sigma0(a) eor x9,x9,x2,lsr#6 // sigma1(X[i+14]) add x4,x4,x13 add x23,x23,x27 // d+=h add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x4,x4,x10 add x27,x27,x17 // h+=Sigma0(a) add x4,x4,x9 ldr x9,[sp,#16] str x12,[sp,#8] ror x16,x23,#14 add x26,x26,x28 // h+=K[i] ror x11,x6,#1 and x17,x24,x23 ror x10,x3,#19 bic x28,x25,x23 ror x12,x27,#28 add x26,x26,x4 // h+=X[i] eor x16,x16,x23,ror#18 eor x11,x11,x6,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x23,ror#41 // Sigma1(e) eor x12,x12,x27,ror#34 add x26,x26,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x10,x10,x3,ror#61 eor x11,x11,x6,lsr#7 // sigma0(X[i+1]) add x26,x26,x16 // h+=Sigma1(e) eor x19,x19,x20 // Maj(a,b,c) eor x17,x12,x27,ror#39 // Sigma0(a) eor x10,x10,x3,lsr#6 // sigma1(X[i+14]) add x5,x5,x14 add x22,x22,x26 // d+=h add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x5,x5,x11 add x26,x26,x17 // h+=Sigma0(a) add x5,x5,x10 ldr x10,[sp,#24] str x13,[sp,#16] ror x16,x22,#14 add x25,x25,x19 // h+=K[i] ror x12,x7,#1 and x17,x23,x22 ror x11,x4,#19 bic x19,x24,x22 ror x13,x26,#28 add x25,x25,x5 // h+=X[i] eor x16,x16,x22,ror#18 eor x12,x12,x7,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x22,ror#41 // Sigma1(e) eor x13,x13,x26,ror#34 add x25,x25,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x11,x11,x4,ror#61 eor x12,x12,x7,lsr#7 // sigma0(X[i+1]) add x25,x25,x16 // h+=Sigma1(e) eor x28,x28,x27 // Maj(a,b,c) eor x17,x13,x26,ror#39 // Sigma0(a) eor x11,x11,x4,lsr#6 // sigma1(X[i+14]) add x6,x6,x15 add x21,x21,x25 // d+=h add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x6,x6,x12 add x25,x25,x17 // h+=Sigma0(a) add x6,x6,x11 ldr x11,[sp,#0] str x14,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] ror x13,x8,#1 and x17,x22,x21 ror x12,x5,#19 bic x28,x23,x21 ror x14,x25,#28 add x24,x24,x6 // h+=X[i] eor x16,x16,x21,ror#18 eor x13,x13,x8,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x21,ror#41 // Sigma1(e) eor x14,x14,x25,ror#34 add x24,x24,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x12,x12,x5,ror#61 eor x13,x13,x8,lsr#7 // sigma0(X[i+1]) add x24,x24,x16 // h+=Sigma1(e) eor x19,x19,x26 // Maj(a,b,c) eor x17,x14,x25,ror#39 // Sigma0(a) eor x12,x12,x5,lsr#6 // sigma1(X[i+14]) add x7,x7,x0 add x20,x20,x24 // d+=h add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x7,x7,x13 add x24,x24,x17 // h+=Sigma0(a) add x7,x7,x12 ldr x12,[sp,#8] str x15,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] ror x14,x9,#1 and x17,x21,x20 ror x13,x6,#19 bic x19,x22,x20 ror x15,x24,#28 add x23,x23,x7 // h+=X[i] eor x16,x16,x20,ror#18 eor x14,x14,x9,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x20,ror#41 // Sigma1(e) eor x15,x15,x24,ror#34 add x23,x23,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x13,x13,x6,ror#61 eor x14,x14,x9,lsr#7 // sigma0(X[i+1]) add x23,x23,x16 // h+=Sigma1(e) eor x28,x28,x25 // Maj(a,b,c) eor x17,x15,x24,ror#39 // Sigma0(a) eor x13,x13,x6,lsr#6 // sigma1(X[i+14]) add x8,x8,x1 add x27,x27,x23 // d+=h add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x8,x8,x14 add x23,x23,x17 // h+=Sigma0(a) add x8,x8,x13 ldr x13,[sp,#16] str x0,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] ror x15,x10,#1 and x17,x20,x27 ror x14,x7,#19 bic x28,x21,x27 ror x0,x23,#28 add x22,x22,x8 // h+=X[i] eor x16,x16,x27,ror#18 eor x15,x15,x10,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x27,ror#41 // Sigma1(e) eor x0,x0,x23,ror#34 add x22,x22,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x14,x14,x7,ror#61 eor x15,x15,x10,lsr#7 // sigma0(X[i+1]) add x22,x22,x16 // h+=Sigma1(e) eor x19,x19,x24 // Maj(a,b,c) eor x17,x0,x23,ror#39 // Sigma0(a) eor x14,x14,x7,lsr#6 // sigma1(X[i+14]) add x9,x9,x2 add x26,x26,x22 // d+=h add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x9,x9,x15 add x22,x22,x17 // h+=Sigma0(a) add x9,x9,x14 ldr x14,[sp,#24] str x1,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] ror x0,x11,#1 and x17,x27,x26 ror x15,x8,#19 bic x19,x20,x26 ror x1,x22,#28 add x21,x21,x9 // h+=X[i] eor x16,x16,x26,ror#18 eor x0,x0,x11,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x26,ror#41 // Sigma1(e) eor x1,x1,x22,ror#34 add x21,x21,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x15,x15,x8,ror#61 eor x0,x0,x11,lsr#7 // sigma0(X[i+1]) add x21,x21,x16 // h+=Sigma1(e) eor x28,x28,x23 // Maj(a,b,c) eor x17,x1,x22,ror#39 // Sigma0(a) eor x15,x15,x8,lsr#6 // sigma1(X[i+14]) add x10,x10,x3 add x25,x25,x21 // d+=h add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x10,x10,x0 add x21,x21,x17 // h+=Sigma0(a) add x10,x10,x15 ldr x15,[sp,#0] str x2,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x1,x12,#1 and x17,x26,x25 ror x0,x9,#19 bic x28,x27,x25 ror x2,x21,#28 add x20,x20,x10 // h+=X[i] eor x16,x16,x25,ror#18 eor x1,x1,x12,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x2,x2,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x0,x0,x9,ror#61 eor x1,x1,x12,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x2,x21,ror#39 // Sigma0(a) eor x0,x0,x9,lsr#6 // sigma1(X[i+14]) add x11,x11,x4 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x11,x11,x1 add x20,x20,x17 // h+=Sigma0(a) add x11,x11,x0 ldr x0,[sp,#8] str x3,[sp,#0] ror x16,x24,#14 add x27,x27,x19 // h+=K[i] ror x2,x13,#1 and x17,x25,x24 ror x1,x10,#19 bic x19,x26,x24 ror x3,x20,#28 add x27,x27,x11 // h+=X[i] eor x16,x16,x24,ror#18 eor x2,x2,x13,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x24,ror#41 // Sigma1(e) eor x3,x3,x20,ror#34 add x27,x27,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x1,x1,x10,ror#61 eor x2,x2,x13,lsr#7 // sigma0(X[i+1]) add x27,x27,x16 // h+=Sigma1(e) eor x28,x28,x21 // Maj(a,b,c) eor x17,x3,x20,ror#39 // Sigma0(a) eor x1,x1,x10,lsr#6 // sigma1(X[i+14]) add x12,x12,x5 add x23,x23,x27 // d+=h add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x12,x12,x2 add x27,x27,x17 // h+=Sigma0(a) add x12,x12,x1 ldr x1,[sp,#16] str x4,[sp,#8] ror x16,x23,#14 add x26,x26,x28 // h+=K[i] ror x3,x14,#1 and x17,x24,x23 ror x2,x11,#19 bic x28,x25,x23 ror x4,x27,#28 add x26,x26,x12 // h+=X[i] eor x16,x16,x23,ror#18 eor x3,x3,x14,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x23,ror#41 // Sigma1(e) eor x4,x4,x27,ror#34 add x26,x26,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x2,x2,x11,ror#61 eor x3,x3,x14,lsr#7 // sigma0(X[i+1]) add x26,x26,x16 // h+=Sigma1(e) eor x19,x19,x20 // Maj(a,b,c) eor x17,x4,x27,ror#39 // Sigma0(a) eor x2,x2,x11,lsr#6 // sigma1(X[i+14]) add x13,x13,x6 add x22,x22,x26 // d+=h add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x13,x13,x3 add x26,x26,x17 // h+=Sigma0(a) add x13,x13,x2 ldr x2,[sp,#24] str x5,[sp,#16] ror x16,x22,#14 add x25,x25,x19 // h+=K[i] ror x4,x15,#1 and x17,x23,x22 ror x3,x12,#19 bic x19,x24,x22 ror x5,x26,#28 add x25,x25,x13 // h+=X[i] eor x16,x16,x22,ror#18 eor x4,x4,x15,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x22,ror#41 // Sigma1(e) eor x5,x5,x26,ror#34 add x25,x25,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x3,x3,x12,ror#61 eor x4,x4,x15,lsr#7 // sigma0(X[i+1]) add x25,x25,x16 // h+=Sigma1(e) eor x28,x28,x27 // Maj(a,b,c) eor x17,x5,x26,ror#39 // Sigma0(a) eor x3,x3,x12,lsr#6 // sigma1(X[i+14]) add x14,x14,x7 add x21,x21,x25 // d+=h add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x14,x14,x4 add x25,x25,x17 // h+=Sigma0(a) add x14,x14,x3 ldr x3,[sp,#0] str x6,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] ror x5,x0,#1 and x17,x22,x21 ror x4,x13,#19 bic x28,x23,x21 ror x6,x25,#28 add x24,x24,x14 // h+=X[i] eor x16,x16,x21,ror#18 eor x5,x5,x0,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x21,ror#41 // Sigma1(e) eor x6,x6,x25,ror#34 add x24,x24,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x4,x4,x13,ror#61 eor x5,x5,x0,lsr#7 // sigma0(X[i+1]) add x24,x24,x16 // h+=Sigma1(e) eor x19,x19,x26 // Maj(a,b,c) eor x17,x6,x25,ror#39 // Sigma0(a) eor x4,x4,x13,lsr#6 // sigma1(X[i+14]) add x15,x15,x8 add x20,x20,x24 // d+=h add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x15,x15,x5 add x24,x24,x17 // h+=Sigma0(a) add x15,x15,x4 ldr x4,[sp,#8] str x7,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] ror x6,x1,#1 and x17,x21,x20 ror x5,x14,#19 bic x19,x22,x20 ror x7,x24,#28 add x23,x23,x15 // h+=X[i] eor x16,x16,x20,ror#18 eor x6,x6,x1,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x20,ror#41 // Sigma1(e) eor x7,x7,x24,ror#34 add x23,x23,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x5,x5,x14,ror#61 eor x6,x6,x1,lsr#7 // sigma0(X[i+1]) add x23,x23,x16 // h+=Sigma1(e) eor x28,x28,x25 // Maj(a,b,c) eor x17,x7,x24,ror#39 // Sigma0(a) eor x5,x5,x14,lsr#6 // sigma1(X[i+14]) add x0,x0,x9 add x27,x27,x23 // d+=h add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x0,x0,x6 add x23,x23,x17 // h+=Sigma0(a) add x0,x0,x5 ldr x5,[sp,#16] str x8,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] ror x7,x2,#1 and x17,x20,x27 ror x6,x15,#19 bic x28,x21,x27 ror x8,x23,#28 add x22,x22,x0 // h+=X[i] eor x16,x16,x27,ror#18 eor x7,x7,x2,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x27,ror#41 // Sigma1(e) eor x8,x8,x23,ror#34 add x22,x22,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x6,x6,x15,ror#61 eor x7,x7,x2,lsr#7 // sigma0(X[i+1]) add x22,x22,x16 // h+=Sigma1(e) eor x19,x19,x24 // Maj(a,b,c) eor x17,x8,x23,ror#39 // Sigma0(a) eor x6,x6,x15,lsr#6 // sigma1(X[i+14]) add x1,x1,x10 add x26,x26,x22 // d+=h add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x1,x1,x7 add x22,x22,x17 // h+=Sigma0(a) add x1,x1,x6 ldr x6,[sp,#24] str x9,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] ror x8,x3,#1 and x17,x27,x26 ror x7,x0,#19 bic x19,x20,x26 ror x9,x22,#28 add x21,x21,x1 // h+=X[i] eor x16,x16,x26,ror#18 eor x8,x8,x3,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x26,ror#41 // Sigma1(e) eor x9,x9,x22,ror#34 add x21,x21,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x7,x7,x0,ror#61 eor x8,x8,x3,lsr#7 // sigma0(X[i+1]) add x21,x21,x16 // h+=Sigma1(e) eor x28,x28,x23 // Maj(a,b,c) eor x17,x9,x22,ror#39 // Sigma0(a) eor x7,x7,x0,lsr#6 // sigma1(X[i+14]) add x2,x2,x11 add x25,x25,x21 // d+=h add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x2,x2,x8 add x21,x21,x17 // h+=Sigma0(a) add x2,x2,x7 ldr x7,[sp,#0] str x10,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x9,x4,#1 and x17,x26,x25 ror x8,x1,#19 bic x28,x27,x25 ror x10,x21,#28 add x20,x20,x2 // h+=X[i] eor x16,x16,x25,ror#18 eor x9,x9,x4,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x10,x10,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x8,x8,x1,ror#61 eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x10,x21,ror#39 // Sigma0(a) eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) add x3,x3,x12 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x3,x3,x9 add x20,x20,x17 // h+=Sigma0(a) add x3,x3,x8 cbnz x19,Loop_16_xx ldp x0,x2,[x29,#96] ldr x1,[x29,#112] sub x30,x30,#648 // rewind ldp x3,x4,[x0] ldp x5,x6,[x0,#2*8] add x1,x1,#14*8 // advance input pointer ldp x7,x8,[x0,#4*8] add x20,x20,x3 ldp x9,x10,[x0,#6*8] add x21,x21,x4 add x22,x22,x5 add x23,x23,x6 stp x20,x21,[x0] add x24,x24,x7 add x25,x25,x8 stp x22,x23,[x0,#2*8] add x26,x26,x9 add x27,x27,x10 cmp x1,x2 stp x24,x25,[x0,#4*8] stp x26,x27,[x0,#6*8] b.ne Loop ldp x19,x20,[x29,#16] add sp,sp,#4*8 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .section .rodata .align 6 LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0 // terminator .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 .text #ifndef __KERNEL__ .globl sha512_block_data_order_hw .def sha512_block_data_order_hw .type 32 .endef .align 6 sha512_block_data_order_hw: #ifdef BORINGSSL_DISPATCH_TEST adrp x9,BORINGSSL_function_hit add x9, x9, :lo12:BORINGSSL_function_hit mov w10, #1 strb w10, [x9,#8] // kFlag_sha512_hw #endif // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context adrp x3,LK512 add x3,x3,:lo12:LK512 rev64 v16.16b,v16.16b rev64 v17.16b,v17.16b rev64 v18.16b,v18.16b rev64 v19.16b,v19.16b rev64 v20.16b,v20.16b rev64 v21.16b,v21.16b rev64 v22.16b,v22.16b rev64 v23.16b,v23.16b b Loop_hw .align 4 Loop_hw: ld1 {v24.2d},[x3],#16 subs x2,x2,#1 sub x4,x1,#128 orr v26.16b,v0.16b,v0.16b // offload orr v27.16b,v1.16b,v1.16b orr v28.16b,v2.16b,v2.16b orr v29.16b,v3.16b,v3.16b csel x1,x1,x4,ne // conditional rewind add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v16.2d ld1 {v16.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b rev64 v16.16b,v16.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v17.2d ld1 {v17.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b rev64 v17.16b,v17.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v18.2d ld1 {v18.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b rev64 v18.16b,v18.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v19.2d ld1 {v19.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b rev64 v19.16b,v19.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v20.2d ld1 {v20.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b rev64 v20.16b,v20.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v21.2d ld1 {v21.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b rev64 v21.16b,v21.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v22.2d ld1 {v22.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b rev64 v22.16b,v22.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b sub x3,x3,#80*8 // rewind add v25.2d,v25.2d,v23.2d ld1 {v23.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b rev64 v23.16b,v23.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v0.2d,v0.2d,v26.2d // accumulate add v1.2d,v1.2d,v27.2d add v2.2d,v2.2d,v28.2d add v3.2d,v3.2d,v29.2d cbnz x2,Loop_hw st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
17,297
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/ghashv8-armx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <openssl/arm_arch.h> #if __ARM_MAX_ARCH__>=7 .text .arch armv8-a+crypto .globl gcm_init_v8 .def gcm_init_v8 .type 32 .endef .align 4 gcm_init_v8: AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x1] //load input H movi v19.16b,#0xe1 shl v19.2d,v19.2d,#57 //0xc2.0 ext v3.16b,v17.16b,v17.16b,#8 ushr v18.2d,v19.2d,#63 dup v17.4s,v17.s[1] ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01 ushr v18.2d,v3.2d,#63 sshr v17.4s,v17.4s,#31 //broadcast carry bit and v18.16b,v18.16b,v16.16b shl v3.2d,v3.2d,#1 ext v18.16b,v18.16b,v18.16b,#8 and v16.16b,v16.16b,v17.16b orr v3.16b,v3.16b,v18.16b //H<<<=1 eor v20.16b,v3.16b,v16.16b //twisted H ext v20.16b, v20.16b, v20.16b, #8 st1 {v20.2d},[x0],#16 //store Htable[0] //calculate H^2 ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing pmull2 v0.1q,v20.2d,v20.2d eor v16.16b,v16.16b,v20.16b pmull v2.1q,v20.1d,v20.1d pmull v1.1q,v16.1d,v16.1d ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v17.16b,v0.16b,v18.16b ext v22.16b,v17.16b,v17.16b,#8 //Karatsuba pre-processing eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v21.2d},[x0],#16 //store Htable[1..2] st1 {v22.2d},[x0],#16 //store Htable[1..2] //calculate H^3 and H^4 pmull2 v0.1q,v20.2d, v22.2d pmull2 v5.1q,v22.2d,v22.2d pmull v2.1q,v20.1d, v22.1d pmull v7.1q,v22.1d,v22.1d pmull v1.1q,v16.1d,v17.1d pmull v6.1q,v17.1d,v17.1d ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing ext v17.16b,v5.16b,v7.16b,#8 eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v16.16b eor v4.16b,v5.16b,v7.16b eor v6.16b,v6.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase eor v6.16b,v6.16b,v4.16b pmull v4.1q,v5.1d,v19.1d ins v2.d[0],v1.d[1] ins v7.d[0],v6.d[1] ins v1.d[1],v0.d[0] ins v6.d[1],v5.d[0] eor v0.16b,v1.16b,v18.16b eor v5.16b,v6.16b,v4.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase ext v4.16b,v5.16b,v5.16b,#8 pmull v0.1q,v0.1d,v19.1d pmull v5.1q,v5.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v4.16b,v4.16b,v7.16b eor v16.16b, v0.16b,v18.16b //H^3 eor v17.16b, v5.16b,v4.16b //H^4 ext v23.16b,v16.16b,v16.16b,#8 //Karatsuba pre-processing ext v25.16b,v17.16b,v17.16b,#8 ext v18.16b,v22.16b,v22.16b,#8 eor v16.16b,v16.16b,v23.16b eor v17.16b,v17.16b,v25.16b eor v18.16b,v18.16b,v22.16b ext v24.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v23.2d,v24.2d,v25.2d},[x0],#48 //store Htable[3..5] //calculate H^5 and H^6 pmull2 v0.1q,v22.2d, v23.2d pmull2 v5.1q,v23.2d,v23.2d pmull v2.1q,v22.1d, v23.1d pmull v7.1q,v23.1d,v23.1d pmull v1.1q,v16.1d,v18.1d pmull v6.1q,v16.1d,v16.1d ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing ext v17.16b,v5.16b,v7.16b,#8 eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v16.16b eor v4.16b,v5.16b,v7.16b eor v6.16b,v6.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase eor v6.16b,v6.16b,v4.16b pmull v4.1q,v5.1d,v19.1d ins v2.d[0],v1.d[1] ins v7.d[0],v6.d[1] ins v1.d[1],v0.d[0] ins v6.d[1],v5.d[0] eor v0.16b,v1.16b,v18.16b eor v5.16b,v6.16b,v4.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase ext v4.16b,v5.16b,v5.16b,#8 pmull v0.1q,v0.1d,v19.1d pmull v5.1q,v5.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v4.16b,v4.16b,v7.16b eor v16.16b,v0.16b,v18.16b //H^5 eor v17.16b,v5.16b,v4.16b //H^6 ext v26.16b, v16.16b, v16.16b,#8 //Karatsuba pre-processing ext v28.16b, v17.16b, v17.16b,#8 ext v18.16b,v22.16b,v22.16b,#8 eor v16.16b,v16.16b,v26.16b eor v17.16b,v17.16b,v28.16b eor v18.16b,v18.16b,v22.16b ext v27.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v26.2d,v27.2d,v28.2d},[x0],#48 //store Htable[6..8] //calculate H^7 and H^8 pmull2 v0.1q,v22.2d,v26.2d pmull2 v5.1q,v22.2d,v28.2d pmull v2.1q,v22.1d,v26.1d pmull v7.1q,v22.1d,v28.1d pmull v1.1q,v16.1d,v18.1d pmull v6.1q,v17.1d,v18.1d ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing ext v17.16b,v5.16b,v7.16b,#8 eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v16.16b eor v4.16b,v5.16b,v7.16b eor v6.16b,v6.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase eor v6.16b,v6.16b,v4.16b pmull v4.1q,v5.1d,v19.1d ins v2.d[0],v1.d[1] ins v7.d[0],v6.d[1] ins v1.d[1],v0.d[0] ins v6.d[1],v5.d[0] eor v0.16b,v1.16b,v18.16b eor v5.16b,v6.16b,v4.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase ext v4.16b,v5.16b,v5.16b,#8 pmull v0.1q,v0.1d,v19.1d pmull v5.1q,v5.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v4.16b,v4.16b,v7.16b eor v16.16b,v0.16b,v18.16b //H^7 eor v17.16b,v5.16b,v4.16b //H^8 ext v29.16b,v16.16b,v16.16b,#8 //Karatsuba pre-processing ext v31.16b,v17.16b,v17.16b,#8 eor v16.16b,v16.16b,v29.16b eor v17.16b,v17.16b,v31.16b ext v30.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v29.2d,v30.2d,v31.2d},[x0] //store Htable[9..11] ret .globl gcm_gmult_v8 .def gcm_gmult_v8 .type 32 .endef .align 4 gcm_gmult_v8: AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x0] //load Xi movi v19.16b,#0xe1 ld1 {v20.2d,v21.2d},[x1] //load twisted H, ... ext v20.16b,v20.16b,v20.16b,#8 shl v19.2d,v19.2d,#57 #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ext v3.16b,v17.16b,v17.16b,#8 pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif ext v0.16b,v0.16b,v0.16b,#8 st1 {v0.2d},[x0] //write out Xi ret .globl gcm_ghash_v8 .def gcm_ghash_v8 .type 32 .endef .align 4 gcm_ghash_v8: AARCH64_VALID_CALL_TARGET cmp x3,#64 b.hs Lgcm_ghash_v8_4x ld1 {v0.2d},[x0] //load [rotated] Xi //"[rotated]" means that //loaded value would have //to be rotated in order to //make it appear as in //algorithm specification subs x3,x3,#32 //see if x3 is 32 or larger mov x12,#16 //x12 is used as post- //increment for input pointer; //as loop is modulo-scheduled //x12 is zeroed just in time //to preclude overstepping //inp[len], which means that //last block[s] are actually //loaded twice, but last //copy is not processed ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2 ext v20.16b,v20.16b,v20.16b,#8 movi v19.16b,#0xe1 ld1 {v22.2d},[x1] ext v22.16b,v22.16b,v22.16b,#8 csel x12,xzr,x12,eq //is it time to zero x12? ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi ld1 {v16.2d},[x2],#16 //load [rotated] I[0] shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant #ifndef __AARCH64EB__ rev64 v16.16b,v16.16b rev64 v0.16b,v0.16b #endif ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0] b.lo Lodd_tail_v8 //x3 was less than 32 ld1 {v17.2d},[x2],x12 //load [rotated] I[1] #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ext v7.16b,v17.16b,v17.16b,#8 eor v3.16b,v3.16b,v0.16b //I[i]^=Xi pmull v4.1q,v20.1d,v7.1d //H·Ii+1 eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing pmull2 v6.1q,v20.2d,v7.2d b Loop_mod2x_v8 .align 4 Loop_mod2x_v8: ext v18.16b,v3.16b,v3.16b,#8 subs x3,x3,#32 //is there more data? pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo csel x12,xzr,x12,lo //is it time to zero x12? pmull v5.1q,v21.1d,v17.1d eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi eor v0.16b,v0.16b,v4.16b //accumulate pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi) ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2] eor v2.16b,v2.16b,v6.16b csel x12,xzr,x12,eq //is it time to zero x12? eor v1.16b,v1.16b,v5.16b ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3] #ifndef __AARCH64EB__ rev64 v16.16b,v16.16b #endif eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] ext v7.16b,v17.16b,v17.16b,#8 ext v3.16b,v16.16b,v16.16b,#8 eor v0.16b,v1.16b,v18.16b pmull v4.1q,v20.1d,v7.1d //H·Ii+1 eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v3.16b,v3.16b,v18.16b eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing eor v3.16b,v3.16b,v0.16b pmull2 v6.1q,v20.2d,v7.2d b.hs Loop_mod2x_v8 //there was at least 32 more bytes eor v2.16b,v2.16b,v18.16b ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b adds x3,x3,#32 //re-construct x3 eor v0.16b,v0.16b,v2.16b //re-construct v0.16b b.eq Ldone_v8 //is x3 zero? Lodd_tail_v8: ext v18.16b,v0.16b,v0.16b,#8 eor v3.16b,v3.16b,v0.16b //inp^=Xi eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b Ldone_v8: #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif ext v0.16b,v0.16b,v0.16b,#8 st1 {v0.2d},[x0] //write out Xi ret .def gcm_ghash_v8_4x .type 32 .endef .align 4 gcm_ghash_v8_4x: Lgcm_ghash_v8_4x: ld1 {v0.2d},[x0] //load [rotated] Xi ld1 {v20.2d,v21.2d,v22.2d},[x1],#48 //load twisted H, ..., H^2 ext v20.16b,v20.16b,v20.16b,#8 ext v22.16b,v22.16b,v22.16b,#8 movi v19.16b,#0xe1 ld1 {v26.2d,v27.2d,v28.2d},[x1] //load twisted H^3, ..., H^4 ext v26.16b,v26.16b,v26.16b,#8 ext v28.16b,v28.16b,v28.16b,#8 shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64 #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b rev64 v5.16b,v5.16b rev64 v6.16b,v6.16b rev64 v7.16b,v7.16b rev64 v4.16b,v4.16b #endif ext v25.16b,v7.16b,v7.16b,#8 ext v24.16b,v6.16b,v6.16b,#8 ext v23.16b,v5.16b,v5.16b,#8 pmull v29.1q,v20.1d,v25.1d //H·Ii+3 eor v7.16b,v7.16b,v25.16b pmull2 v31.1q,v20.2d,v25.2d pmull v30.1q,v21.1d,v7.1d pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2 eor v6.16b,v6.16b,v24.16b pmull2 v24.1q,v22.2d,v24.2d pmull2 v6.1q,v21.2d,v6.2d eor v29.16b,v29.16b,v16.16b eor v31.16b,v31.16b,v24.16b eor v30.16b,v30.16b,v6.16b pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1 eor v5.16b,v5.16b,v23.16b pmull2 v23.1q,v26.2d,v23.2d pmull v5.1q,v27.1d,v5.1d eor v29.16b,v29.16b,v7.16b eor v31.16b,v31.16b,v23.16b eor v30.16b,v30.16b,v5.16b subs x3,x3,#128 b.lo Ltail4x b Loop4x .align 4 Loop4x: eor v16.16b,v4.16b,v0.16b ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64 ext v3.16b,v16.16b,v16.16b,#8 #ifndef __AARCH64EB__ rev64 v5.16b,v5.16b rev64 v6.16b,v6.16b rev64 v7.16b,v7.16b rev64 v4.16b,v4.16b #endif pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v28.2d,v3.2d ext v25.16b,v7.16b,v7.16b,#8 pmull2 v1.1q,v27.2d,v16.2d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b ext v24.16b,v6.16b,v6.16b,#8 eor v1.16b,v1.16b,v30.16b ext v23.16b,v5.16b,v5.16b,#8 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b pmull v29.1q,v20.1d,v25.1d //H·Ii+3 eor v7.16b,v7.16b,v25.16b eor v1.16b,v1.16b,v17.16b pmull2 v31.1q,v20.2d,v25.2d eor v1.16b,v1.16b,v18.16b pmull v30.1q,v21.1d,v7.1d pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2 eor v6.16b,v6.16b,v24.16b pmull2 v24.1q,v22.2d,v24.2d eor v0.16b,v1.16b,v18.16b pmull2 v6.1q,v21.2d,v6.2d eor v29.16b,v29.16b,v16.16b eor v31.16b,v31.16b,v24.16b eor v30.16b,v30.16b,v6.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1 eor v5.16b,v5.16b,v23.16b eor v18.16b,v18.16b,v2.16b pmull2 v23.1q,v26.2d,v23.2d pmull v5.1q,v27.1d,v5.1d eor v0.16b,v0.16b,v18.16b eor v29.16b,v29.16b,v7.16b eor v31.16b,v31.16b,v23.16b ext v0.16b,v0.16b,v0.16b,#8 eor v30.16b,v30.16b,v5.16b subs x3,x3,#64 b.hs Loop4x Ltail4x: eor v16.16b,v4.16b,v0.16b ext v3.16b,v16.16b,v16.16b,#8 pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v28.2d,v3.2d pmull2 v1.1q,v27.2d,v16.2d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b eor v1.16b,v1.16b,v30.16b adds x3,x3,#64 b.eq Ldone4x cmp x3,#32 b.lo Lone b.eq Ltwo Lthree: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v4.2d,v5.2d,v6.2d},[x2] eor v1.16b,v1.16b,v18.16b #ifndef __AARCH64EB__ rev64 v5.16b,v5.16b rev64 v6.16b,v6.16b rev64 v4.16b,v4.16b #endif pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] ext v24.16b,v6.16b,v6.16b,#8 ext v23.16b,v5.16b,v5.16b,#8 eor v0.16b,v1.16b,v18.16b pmull v29.1q,v20.1d,v24.1d //H·Ii+2 eor v6.16b,v6.16b,v24.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b pmull2 v31.1q,v20.2d,v24.2d pmull v30.1q,v21.1d,v6.1d eor v0.16b,v0.16b,v18.16b pmull v7.1q,v22.1d,v23.1d //H^2·Ii+1 eor v5.16b,v5.16b,v23.16b ext v0.16b,v0.16b,v0.16b,#8 pmull2 v23.1q,v22.2d,v23.2d eor v16.16b,v4.16b,v0.16b pmull2 v5.1q,v21.2d,v5.2d ext v3.16b,v16.16b,v16.16b,#8 eor v29.16b,v29.16b,v7.16b eor v31.16b,v31.16b,v23.16b eor v30.16b,v30.16b,v5.16b pmull v0.1q,v26.1d,v3.1d //H^3·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v26.2d,v3.2d pmull v1.1q,v27.1d,v16.1d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b eor v1.16b,v1.16b,v30.16b b Ldone4x .align 4 Ltwo: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v4.2d,v5.2d},[x2] eor v1.16b,v1.16b,v18.16b #ifndef __AARCH64EB__ rev64 v5.16b,v5.16b rev64 v4.16b,v4.16b #endif pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] ext v23.16b,v5.16b,v5.16b,#8 eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b ext v0.16b,v0.16b,v0.16b,#8 pmull v29.1q,v20.1d,v23.1d //H·Ii+1 eor v5.16b,v5.16b,v23.16b eor v16.16b,v4.16b,v0.16b ext v3.16b,v16.16b,v16.16b,#8 pmull2 v31.1q,v20.2d,v23.2d pmull v30.1q,v21.1d,v5.1d pmull v0.1q,v22.1d,v3.1d //H^2·(Xi+Ii) eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v22.2d,v3.2d pmull2 v1.1q,v21.2d,v16.2d eor v0.16b,v0.16b,v29.16b eor v2.16b,v2.16b,v31.16b eor v1.16b,v1.16b,v30.16b b Ldone4x .align 4 Lone: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b ld1 {v4.2d},[x2] eor v1.16b,v1.16b,v18.16b #ifndef __AARCH64EB__ rev64 v4.16b,v4.16b #endif pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b ext v0.16b,v0.16b,v0.16b,#8 eor v16.16b,v4.16b,v0.16b ext v3.16b,v16.16b,v16.16b,#8 pmull v0.1q,v20.1d,v3.1d eor v16.16b,v16.16b,v3.16b pmull2 v2.1q,v20.2d,v3.2d pmull v1.1q,v21.1d,v16.1d Ldone4x: ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b ext v0.16b,v0.16b,v0.16b,#8 #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif st1 {v0.2d},[x0] //write out Xi ret .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
1,173
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/rndr-armv8.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <openssl/arm_arch.h> .arch armv8-a .text # int CRYPTO_rndr_multiple8(uint8_t *out, const size_t len) .globl CRYPTO_rndr_multiple8 .def CRYPTO_rndr_multiple8 .type 32 .endef .align 4 CRYPTO_rndr_multiple8: cbz x1, Lrndr_multiple8_error // len = 0 is not supported Lrndr_multiple8_loop: mrs x2, s3_3_c2_c4_0 // rndr instruction https://developer.arm.com/documentation/ddi0601/2024-09/Index-by-Encoding cbz x2, Lrndr_multiple8_error // Check if rndr failed str x2, [x0], #8 // Copy 8 bytes to *out and increment pointer by 8 sub x1, x1, #8 cbz x1, Lrndr_multiple8_done // If multiple of 8 this will be 0 eventually b Lrndr_multiple8_loop Lrndr_multiple8_done: mov x0, #1 // Return value success ret Lrndr_multiple8_error: mov x0, #0 // Return value error ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
42,639
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/vpaes-armv8.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <openssl/arm_arch.h> .section .rodata .align 7 // totally strategic alignment _vpaes_consts: Lk_mc_forward: // mc_forward .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605, 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 Lk_mc_backward: // mc_backward .quad 0x0605040702010003, 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F, 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 .quad 0x0A09080B06050407, 0x020100030E0D0C0F Lk_sr: // sr .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 .quad 0x0F060D040B020900, 0x070E050C030A0108 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 // // "Hot" constants // Lk_inv: // inv, inva .quad 0x0E05060F0D080180, 0x040703090A0B0C02 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 Lk_ipt: // input transform (lo, hi) .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 Lk_sbo: // sbou, sbot .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA Lk_sb1: // sb1u, sb1t .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 Lk_sb2: // sb2u, sb2t .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD // // Decryption stuff // Lk_dipt: // decryption input transform .quad 0x0F505B040B545F00, 0x154A411E114E451A .quad 0x86E383E660056500, 0x12771772F491F194 Lk_dsbo: // decryption sbox final output .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C Lk_dsb9: // decryption sbox output *9*u, *9*t .quad 0x851C03539A86D600, 0xCAD51F504F994CC9 .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 Lk_dsbd: // decryption sbox output *D*u, *D*t .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 Lk_dsbb: // decryption sbox output *B*u, *B*t .quad 0xD022649296B44200, 0x602646F6B0F2D404 .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B Lk_dsbe: // decryption sbox output *E*u, *E*t .quad 0x46F2929626D4D000, 0x2242600464B4F6B0 .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 // // Key schedule constants // Lk_dksd: // decryption key schedule: invskew x*D .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E Lk_dksb: // decryption key schedule: invskew x*B .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 Lk_dkse: // decryption key schedule: invskew x*E + 0x63 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 Lk_dks9: // decryption key schedule: invskew x*9 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE Lk_rcon: // rcon .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 Lk_opt: // output transform .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 Lk_deskew: // deskew tables: inverts the sbox's "skew" .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 .align 2 .align 6 .text ## ## _aes_preheat ## ## Fills register %r10 -> .aes_consts (so you can -fPIC) ## and %xmm9-%xmm15 as specified below. ## .def _vpaes_encrypt_preheat .type 32 .endef .align 4 _vpaes_encrypt_preheat: adrp x10, Lk_inv add x10, x10, :lo12:Lk_inv movi v17.16b, #0x0f ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // Lk_ipt, Lk_sbo ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // Lk_sb1, Lk_sb2 ret ## ## _aes_encrypt_core ## ## AES-encrypt %xmm0. ## ## Inputs: ## %xmm0 = input ## %xmm9-%xmm15 as in _vpaes_preheat ## (%rdx) = scheduled keys ## ## Output in %xmm0 ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax ## Preserves %xmm6 - %xmm8 so you get some local vectors ## ## .def _vpaes_encrypt_core .type 32 .endef .align 4 _vpaes_encrypt_core: mov x9, x2 ldr w8, [x2,#240] // pull rounds adrp x11, Lk_mc_forward+16 add x11, x11, :lo12:Lk_mc_forward+16 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 b Lenc_entry .align 4 Lenc_loop: // middle of middle round add x10, x11, #0x40 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D sub w8, w8, #1 // nr-- Lenc_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 cbnz w8, Lenc_loop // middle of last round add x10, x11, #0x80 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 ret .globl vpaes_encrypt .def vpaes_encrypt .type 32 .endef .align 4 vpaes_encrypt: #ifdef BORINGSSL_DISPATCH_TEST adrp x9,BORINGSSL_function_hit add x9, x9, :lo12:BORINGSSL_function_hit mov w10, #1 strb w10, [x9,#4] // kFlag_vpaes_encrypt #endif AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v7.16b}, [x0] bl _vpaes_encrypt_preheat bl _vpaes_encrypt_core st1 {v0.16b}, [x1] ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .def _vpaes_encrypt_2x .type 32 .endef .align 4 _vpaes_encrypt_2x: mov x9, x2 ldr w8, [x2,#240] // pull rounds adrp x11, Lk_mc_forward+16 add x11, x11, :lo12:Lk_mc_forward+16 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 and v9.16b, v15.16b, v17.16b ushr v8.16b, v15.16b, #4 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 tbl v9.16b, {v20.16b}, v9.16b // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 tbl v10.16b, {v21.16b}, v8.16b eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 eor v8.16b, v9.16b, v16.16b eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 eor v8.16b, v8.16b, v10.16b b Lenc_2x_entry .align 4 Lenc_2x_loop: // middle of middle round add x10, x11, #0x40 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u tbl v12.16b, {v25.16b}, v10.16b ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t tbl v8.16b, {v24.16b}, v11.16b eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u tbl v13.16b, {v27.16b}, v10.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A eor v8.16b, v8.16b, v12.16b tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t tbl v10.16b, {v26.16b}, v11.16b ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B tbl v11.16b, {v8.16b}, v1.16b eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A eor v10.16b, v10.16b, v13.16b tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D tbl v8.16b, {v8.16b}, v4.16b eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B eor v11.16b, v11.16b, v10.16b tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C tbl v12.16b, {v11.16b},v1.16b eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D eor v8.16b, v8.16b, v11.16b and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D eor v8.16b, v8.16b, v12.16b sub w8, w8, #1 // nr-- Lenc_2x_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i and v9.16b, v8.16b, v17.16b ushr v8.16b, v8.16b, #4 tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k tbl v13.16b, {v19.16b},v9.16b eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j eor v9.16b, v9.16b, v8.16b tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v11.16b, {v18.16b},v8.16b tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j tbl v12.16b, {v18.16b},v9.16b eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v11.16b, v11.16b, v13.16b eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k eor v12.16b, v12.16b, v13.16b tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v10.16b, {v18.16b},v11.16b tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak tbl v11.16b, {v18.16b},v12.16b eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v10.16b, v10.16b, v9.16b eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo eor v11.16b, v11.16b, v8.16b ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 cbnz w8, Lenc_2x_loop // middle of last round add x10, x11, #0x80 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou tbl v12.16b, {v22.16b}, v10.16b ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t tbl v8.16b, {v23.16b}, v11.16b eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A eor v8.16b, v8.16b, v12.16b tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0 tbl v1.16b, {v8.16b},v1.16b ret .def _vpaes_decrypt_preheat .type 32 .endef .align 4 _vpaes_decrypt_preheat: adrp x10, Lk_inv add x10, x10, :lo12:Lk_inv movi v17.16b, #0x0f adrp x11, Lk_dipt add x11, x11, :lo12:Lk_dipt ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x11],#64 // Lk_dipt, Lk_dsbo ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x11],#64 // Lk_dsb9, Lk_dsbd ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x11] // Lk_dsbb, Lk_dsbe ret ## ## Decryption core ## ## Same API as encryption core. ## .def _vpaes_decrypt_core .type 32 .endef .align 4 _vpaes_decrypt_core: mov x9, x2 ldr w8, [x2,#240] // pull rounds // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11 eor x11, x11, #0x30 // xor $0x30, %r11 adrp x10, Lk_sr add x10, x10, :lo12:Lk_sr and x11, x11, #0x30 // and $0x30, %r11 add x11, x11, x10 adrp x10, Lk_mc_forward+48 add x10, x10, :lo12:Lk_mc_forward+48 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 ld1 {v5.2d}, [x10] // vmovdqa Lk_mc_forward+48(%rip), %xmm5 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 b Ldec_entry .align 4 Ldec_loop: // // Inverse mix columns // // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch sub w8, w8, #1 // sub $1,%rax # nr-- Ldec_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 cbnz w8, Ldec_loop // middle of last round // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # Lk_sr-Lk_dsbd=-0x160 tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0 ret .globl vpaes_decrypt .def vpaes_decrypt .type 32 .endef .align 4 vpaes_decrypt: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v7.16b}, [x0] bl _vpaes_decrypt_preheat bl _vpaes_decrypt_core st1 {v0.16b}, [x1] ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // v14-v15 input, v0-v1 output .def _vpaes_decrypt_2x .type 32 .endef .align 4 _vpaes_decrypt_2x: mov x9, x2 ldr w8, [x2,#240] // pull rounds // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11 eor x11, x11, #0x30 // xor $0x30, %r11 adrp x10, Lk_sr add x10, x10, :lo12:Lk_sr and x11, x11, #0x30 // and $0x30, %r11 add x11, x11, x10 adrp x10, Lk_mc_forward+48 add x10, x10, :lo12:Lk_mc_forward+48 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 and v9.16b, v15.16b, v17.16b ushr v8.16b, v15.16b, #4 tbl v2.16b, {v20.16b},v1.16b // vpshufb %xmm1, %xmm2, %xmm2 tbl v10.16b, {v20.16b},v9.16b ld1 {v5.2d}, [x10] // vmovdqa Lk_mc_forward+48(%rip), %xmm5 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi tbl v0.16b, {v21.16b},v0.16b // vpshufb %xmm0, %xmm1, %xmm0 tbl v8.16b, {v21.16b},v8.16b eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 eor v10.16b, v10.16b, v16.16b eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 eor v8.16b, v8.16b, v10.16b b Ldec_2x_entry .align 4 Ldec_2x_loop: // // Inverse mix columns // // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u tbl v12.16b, {v24.16b}, v10.16b tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t tbl v9.16b, {v25.16b}, v11.16b eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 eor v8.16b, v12.16b, v16.16b // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu tbl v12.16b, {v26.16b}, v10.16b tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v8.16b, {v8.16b},v5.16b tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt tbl v9.16b, {v27.16b}, v11.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch eor v8.16b, v8.16b, v12.16b // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu tbl v12.16b, {v28.16b}, v10.16b tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v8.16b, {v8.16b},v5.16b tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt tbl v9.16b, {v29.16b}, v11.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch eor v8.16b, v8.16b, v12.16b // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu tbl v12.16b, {v30.16b}, v10.16b tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch tbl v8.16b, {v8.16b},v5.16b tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet tbl v9.16b, {v31.16b}, v11.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch eor v8.16b, v8.16b, v12.16b ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch eor v8.16b, v8.16b, v9.16b sub w8, w8, #1 // sub $1,%rax # nr-- Ldec_2x_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i and v9.16b, v8.16b, v17.16b ushr v8.16b, v8.16b, #4 tbl v2.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k tbl v10.16b, {v19.16b},v9.16b eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j eor v9.16b, v9.16b, v8.16b tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v11.16b, {v18.16b},v8.16b tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j tbl v12.16b, {v18.16b},v9.16b eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v11.16b, v11.16b, v10.16b eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k eor v12.16b, v12.16b, v10.16b tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v10.16b, {v18.16b},v11.16b tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak tbl v11.16b, {v18.16b},v12.16b eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v10.16b, v10.16b, v9.16b eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo eor v11.16b, v11.16b, v8.16b ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 cbnz w8, Ldec_2x_loop // middle of last round // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou tbl v12.16b, {v22.16b}, v10.16b // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t tbl v9.16b, {v23.16b}, v11.16b ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # Lk_sr-Lk_dsbd=-0x160 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A eor v8.16b, v9.16b, v12.16b tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0 tbl v1.16b, {v8.16b},v2.16b ret ######################################################## ## ## ## AES key schedule ## ## ## ######################################################## .def _vpaes_key_preheat .type 32 .endef .align 4 _vpaes_key_preheat: adrp x10, Lk_inv add x10, x10, :lo12:Lk_inv movi v16.16b, #0x5b // Lk_s63 adrp x11, Lk_sb1 add x11, x11, :lo12:Lk_sb1 movi v17.16b, #0x0f // Lk_s0F ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // Lk_inv, Lk_ipt adrp x10, Lk_dksd add x10, x10, :lo12:Lk_dksd ld1 {v22.2d,v23.2d}, [x11] // Lk_sb1 adrp x11, Lk_mc_forward add x11, x11, :lo12:Lk_mc_forward ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // Lk_dksd, Lk_dksb ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // Lk_dkse, Lk_dks9 ld1 {v8.2d}, [x10] // Lk_rcon ld1 {v9.2d}, [x11] // Lk_mc_forward[0] ret .def _vpaes_schedule_core .type 32 .endef .align 4 _vpaes_schedule_core: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp,#-16]! add x29,sp,#0 bl _vpaes_key_preheat // load the tables ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned) // input transform mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3 bl _vpaes_schedule_transform mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7 adrp x10, Lk_sr // lea Lk_sr(%rip),%r10 add x10, x10, :lo12:Lk_sr add x8, x8, x10 cbnz w3, Lschedule_am_decrypting // encrypting, output zeroth round key after transform st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) b Lschedule_go Lschedule_am_decrypting: // decrypting, output zeroth round key after shiftrows ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) eor x8, x8, #0x30 // xor $0x30, %r8 Lschedule_go: cmp w1, #192 // cmp $192, %esi b.hi Lschedule_256 b.eq Lschedule_192 // 128: fall though ## ## .schedule_128 ## ## 128-bit specific part of key schedule. ## ## This schedule is really simple, because all its parts ## are accomplished by the subroutines. ## Lschedule_128: mov x0, #10 // mov $10, %esi Loop_schedule_128: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // write output b Loop_schedule_128 ## ## .aes_schedule_192 ## ## 192-bit specific part of key schedule. ## ## The main body of this schedule is the same as the 128-bit ## schedule, but with more smearing. The long, high side is ## stored in %xmm7 as before, and the short, low side is in ## the high bits of %xmm6. ## ## This schedule is somewhat nastier, however, because each ## round produces 192 bits of key material, or 1.5 round keys. ## Therefore, on each cycle we do 2 rounds and produce 3 round ## keys. ## .align 4 Lschedule_192: sub x0, x0, #8 ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) bl _vpaes_schedule_transform // input transform mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros mov x0, #4 // mov $4, %esi Loop_schedule_192: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_round ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0 bl _vpaes_schedule_mangle // save key n bl _vpaes_schedule_192_smear bl _vpaes_schedule_mangle // save key n+1 bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // save key n+2 bl _vpaes_schedule_192_smear b Loop_schedule_192 ## ## .aes_schedule_256 ## ## 256-bit specific part of key schedule. ## ## The structure here is very similar to the 128-bit ## schedule, but with an additional "low side" in ## %xmm6. The low side's rounds are the same as the ## high side's, except no rcon and no rotation. ## .align 4 Lschedule_256: ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) bl _vpaes_schedule_transform // input transform mov x0, #7 // mov $7, %esi Loop_schedule_256: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_mangle // output low result mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 // high round bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // low round. swap xmm7 and xmm6 dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 movi v4.16b, #0 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7 bl _vpaes_schedule_low_round mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7 b Loop_schedule_256 ## ## .aes_schedule_mangle_last ## ## Mangler for last round of key schedule ## Mangles %xmm0 ## when encrypting, outputs out(%xmm0) ^ 63 ## when decrypting, outputs unskew(%xmm0) ## ## Always called right before return... jumps to cleanup and exits ## .align 4 Lschedule_mangle_last: // schedule last round key from xmm0 adrp x11, Lk_deskew // lea Lk_deskew(%rip),%r11 # prepare to deskew add x11, x11, :lo12:Lk_deskew cbnz w3, Lschedule_mangle_last_dec // encrypting ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1 adrp x11, Lk_opt // lea Lk_opt(%rip), %r11 # prepare to output transform add x11, x11, :lo12:Lk_opt add x2, x2, #32 // add $32, %rdx tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute Lschedule_mangle_last_dec: ld1 {v20.2d,v21.2d}, [x11] // reload constants sub x2, x2, #16 // add $-16, %rdx eor v0.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm0 bl _vpaes_schedule_transform // output transform st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key // cleanup eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 ldp x29, x30, [sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret ## ## .aes_schedule_192_smear ## ## Smear the short, low side in the 192-bit key schedule. ## ## Inputs: ## %xmm7: high side, b a x y ## %xmm6: low side, d c 0 0 ## %xmm13: 0 ## ## Outputs: ## %xmm6: b+c+d b+c 0 0 ## %xmm0: b+c+d b+c b a ## .def _vpaes_schedule_192_smear .type 32 .endef .align 4 _vpaes_schedule_192_smear: movi v1.16b, #0 dup v0.4s, v7.s[3] ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros ret ## ## .aes_schedule_round ## ## Runs one main round of the key schedule on %xmm0, %xmm7 ## ## Specifically, runs subbytes on the high dword of %xmm0 ## then rotates it by one byte and xors into the low dword of ## %xmm7. ## ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for ## next rcon. ## ## Smears the dwords of %xmm7 by xoring the low into the ## second low, result into third, result into highest. ## ## Returns results in %xmm7 = %xmm0. ## Clobbers %xmm1-%xmm4, %r11. ## .def _vpaes_schedule_round .type 32 .endef .align 4 _vpaes_schedule_round: // extract rcon from xmm8 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4 ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1 ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 // rotate dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0 // fall through... // low round: same as high round, but no rotation and no rcon. _vpaes_schedule_low_round: // smear xmm7 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4 // subbytes and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7 tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v7.16b, v7.16b, v16.16b // vpxor Lk_s63(%rip), %xmm7, %xmm7 tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output // add in smeared stuff eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7 ret ## ## .aes_schedule_transform ## ## Linear-transform %xmm0 according to tables at (%r11) ## ## Requires that %xmm9 = 0x0F0F... as in preheat ## Output in %xmm0 ## Clobbers %xmm1, %xmm2 ## .def _vpaes_schedule_transform .type 32 .endef .align 4 _vpaes_schedule_transform: and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 // vmovdqa (%r11), %xmm2 # lo tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 // vmovdqa 16(%r11), %xmm1 # hi tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 ret ## ## .aes_schedule_mangle ## ## Mangle xmm0 from (basis-transformed) standard version ## to our version. ## ## On encrypt, ## xor with 0x63 ## multiply by circulant 0,1,1,1 ## apply shiftrows transform ## ## On decrypt, ## xor with 0x63 ## multiply by "inverse mixcolumns" circulant E,B,D,9 ## deskew ## apply shiftrows transform ## ## ## Writes out to (%rdx), and increments or decrements it ## Keeps track of round number mod 4 in %r8 ## Preserves xmm0 ## Clobbers xmm1-xmm5 ## .def _vpaes_schedule_mangle .type 32 .endef .align 4 _vpaes_schedule_mangle: mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later // vmovdqa .Lk_mc_forward(%rip),%xmm5 cbnz w3, Lschedule_mangle_dec // encrypting eor v4.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm4 add x2, x2, #16 // add $16, %rdx tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3 b Lschedule_mangle_both .align 4 Lschedule_mangle_dec: // inverse mix columns // lea .Lk_dksd(%rip),%r11 ushr v1.16b, v4.16b, #4 // vpsrlb $4, %xmm4, %xmm1 # 1 = hi and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo // vmovdqa 0x00(%r11), %xmm2 tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 // vmovdqa 0x10(%r11), %xmm3 tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 // vmovdqa 0x20(%r11), %xmm2 tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 // vmovdqa 0x30(%r11), %xmm3 tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 // vmovdqa 0x40(%r11), %xmm2 tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 // vmovdqa 0x50(%r11), %xmm3 tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 // vmovdqa 0x60(%r11), %xmm2 tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 // vmovdqa 0x70(%r11), %xmm4 tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3 sub x2, x2, #16 // add $-16, %rdx Lschedule_mangle_both: tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 add x8, x8, #48 // add $-16, %r8 and x8, x8, #~(1<<6) // and $0x30, %r8 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) ret .globl vpaes_set_encrypt_key .def vpaes_set_encrypt_key .type 32 .endef .align 4 vpaes_set_encrypt_key: #ifdef BORINGSSL_DISPATCH_TEST adrp x9,BORINGSSL_function_hit add x9, x9, :lo12:BORINGSSL_function_hit mov w10, #1 strb w10, [x9,#5] // kFlag_vpaes_set_encrypt_key #endif AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so lsr w9, w1, #5 // shr $5,%eax add w9, w9, #5 // $5,%eax str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; mov w3, #0 // mov $0,%ecx mov x8, #0x30 // mov $0x30,%r8d bl _vpaes_schedule_core eor x0, x0, x0 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .globl vpaes_set_decrypt_key .def vpaes_set_decrypt_key .type 32 .endef .align 4 vpaes_set_decrypt_key: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so lsr w9, w1, #5 // shr $5,%eax add w9, w9, #5 // $5,%eax str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; lsl w9, w9, #4 // shl $4,%eax add x2, x2, #16 // lea 16(%rdx,%rax),%rdx add x2, x2, x9 mov w3, #1 // mov $1,%ecx lsr w8, w1, #1 // shr $1,%r8d and x8, x8, #32 // and $32,%r8d eor x8, x8, #32 // xor $32,%r8d # nbits==192?0:32 bl _vpaes_schedule_core ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .globl vpaes_cbc_encrypt .def vpaes_cbc_encrypt .type 32 .endef .align 4 vpaes_cbc_encrypt: AARCH64_SIGN_LINK_REGISTER cbz x2, Lcbc_abort cmp w5, #0 // check direction b.eq vpaes_cbc_decrypt stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x17, x2 // reassign mov x2, x3 // reassign ld1 {v0.16b}, [x4] // load ivec bl _vpaes_encrypt_preheat b Lcbc_enc_loop .align 4 Lcbc_enc_loop: ld1 {v7.16b}, [x0],#16 // load input eor v7.16b, v7.16b, v0.16b // xor with ivec bl _vpaes_encrypt_core st1 {v0.16b}, [x1],#16 // save output subs x17, x17, #16 b.hi Lcbc_enc_loop st1 {v0.16b}, [x4] // write ivec ldp x29,x30,[sp],#16 Lcbc_abort: AARCH64_VALIDATE_LINK_REGISTER ret .def vpaes_cbc_decrypt .type 32 .endef .align 4 vpaes_cbc_decrypt: // Not adding AARCH64_SIGN_LINK_REGISTER here because vpaes_cbc_decrypt is jumped to // only from vpaes_cbc_encrypt which has already signed the return address. stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so stp d10,d11,[sp,#-16]! stp d12,d13,[sp,#-16]! stp d14,d15,[sp,#-16]! mov x17, x2 // reassign mov x2, x3 // reassign ld1 {v6.16b}, [x4] // load ivec bl _vpaes_decrypt_preheat tst x17, #16 b.eq Lcbc_dec_loop2x ld1 {v7.16b}, [x0], #16 // load input bl _vpaes_decrypt_core eor v0.16b, v0.16b, v6.16b // xor with ivec orr v6.16b, v7.16b, v7.16b // next ivec value st1 {v0.16b}, [x1], #16 subs x17, x17, #16 b.ls Lcbc_dec_done .align 4 Lcbc_dec_loop2x: ld1 {v14.16b,v15.16b}, [x0], #32 bl _vpaes_decrypt_2x eor v0.16b, v0.16b, v6.16b // xor with ivec eor v1.16b, v1.16b, v14.16b orr v6.16b, v15.16b, v15.16b st1 {v0.16b,v1.16b}, [x1], #32 subs x17, x17, #32 b.hi Lcbc_dec_loop2x Lcbc_dec_done: st1 {v6.16b}, [x4] ldp d14,d15,[sp],#16 ldp d12,d13,[sp],#16 ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .globl vpaes_ctr32_encrypt_blocks .def vpaes_ctr32_encrypt_blocks .type 32 .endef .align 4 vpaes_ctr32_encrypt_blocks: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so stp d10,d11,[sp,#-16]! stp d12,d13,[sp,#-16]! stp d14,d15,[sp,#-16]! cbz x2, Lctr32_done // Note, unlike the other functions, x2 here is measured in blocks, // not bytes. mov x17, x2 mov x2, x3 // Load the IV and counter portion. ldr w6, [x4, #12] ld1 {v7.16b}, [x4] bl _vpaes_encrypt_preheat tst x17, #1 rev w6, w6 // The counter is big-endian. b.eq Lctr32_prep_loop // Handle one block so the remaining block count is even for // _vpaes_encrypt_2x. ld1 {v6.16b}, [x0], #16 // Load input ahead of time bl _vpaes_encrypt_core eor v0.16b, v0.16b, v6.16b // XOR input and result st1 {v0.16b}, [x1], #16 subs x17, x17, #1 // Update the counter. add w6, w6, #1 rev w7, w6 mov v7.s[3], w7 b.ls Lctr32_done Lctr32_prep_loop: // _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x // uses v14 and v15. mov v15.16b, v7.16b mov v14.16b, v7.16b add w6, w6, #1 rev w7, w6 mov v15.s[3], w7 Lctr32_loop: ld1 {v6.16b,v7.16b}, [x0], #32 // Load input ahead of time bl _vpaes_encrypt_2x eor v0.16b, v0.16b, v6.16b // XOR input and result eor v1.16b, v1.16b, v7.16b // XOR input and result (#2) st1 {v0.16b,v1.16b}, [x1], #32 subs x17, x17, #2 // Update the counter. add w7, w6, #1 add w6, w6, #2 rev w7, w7 mov v14.s[3], w7 rev w7, w6 mov v15.s[3], w7 b.hi Lctr32_loop Lctr32_done: ldp d14,d15,[sp],#16 ldp d12,d13,[sp],#16 ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
10,892
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/ghash-neon-armv8.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <openssl/arm_arch.h> .text .globl gcm_init_neon .def gcm_init_neon .type 32 .endef .align 4 gcm_init_neon: AARCH64_VALID_CALL_TARGET // This function is adapted from gcm_init_v8. xC2 is t3. ld1 {v17.2d}, [x1] // load H movi v19.16b, #0xe1 shl v19.2d, v19.2d, #57 // 0xc2.0 ext v3.16b, v17.16b, v17.16b, #8 ushr v18.2d, v19.2d, #63 dup v17.4s, v17.s[1] ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01 ushr v18.2d, v3.2d, #63 sshr v17.4s, v17.4s, #31 // broadcast carry bit and v18.16b, v18.16b, v16.16b shl v3.2d, v3.2d, #1 ext v18.16b, v18.16b, v18.16b, #8 and v16.16b, v16.16b, v17.16b orr v3.16b, v3.16b, v18.16b // H<<<=1 eor v5.16b, v3.16b, v16.16b // twisted H st1 {v5.2d}, [x0] // store Htable[0] ret .globl gcm_gmult_neon .def gcm_gmult_neon .type 32 .endef .align 4 gcm_gmult_neon: AARCH64_VALID_CALL_TARGET ld1 {v3.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] adrp x9, Lmasks // load constants add x9, x9, :lo12:Lmasks ld1 {v24.2d, v25.2d}, [x9] rev64 v3.16b, v3.16b // byteswap Xi ext v3.16b, v3.16b, v3.16b, #8 eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing mov x3, #16 b Lgmult_neon .globl gcm_ghash_neon .def gcm_ghash_neon .type 32 .endef .align 4 gcm_ghash_neon: AARCH64_VALID_CALL_TARGET ld1 {v0.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] adrp x9, Lmasks // load constants add x9, x9, :lo12:Lmasks ld1 {v24.2d, v25.2d}, [x9] rev64 v0.16b, v0.16b // byteswap Xi ext v0.16b, v0.16b, v0.16b, #8 eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing Loop_neon: ld1 {v3.16b}, [x2], #16 // load inp rev64 v3.16b, v3.16b // byteswap inp ext v3.16b, v3.16b, v3.16b, #8 eor v3.16b, v3.16b, v0.16b // inp ^= Xi Lgmult_neon: // Split the input into v3 and v4. (The upper halves are unused, // so it is okay to leave them alone.) ins v4.d[0], v3.d[1] ext v16.8b, v5.8b, v5.8b, #1 // A1 pmull v16.8h, v16.8b, v3.8b // F = A1*B ext v0.8b, v3.8b, v3.8b, #1 // B1 pmull v0.8h, v5.8b, v0.8b // E = A*B1 ext v17.8b, v5.8b, v5.8b, #2 // A2 pmull v17.8h, v17.8b, v3.8b // H = A2*B ext v19.8b, v3.8b, v3.8b, #2 // B2 pmull v19.8h, v5.8b, v19.8b // G = A*B2 ext v18.8b, v5.8b, v5.8b, #3 // A3 eor v16.16b, v16.16b, v0.16b // L = E + F pmull v18.8h, v18.8b, v3.8b // J = A3*B ext v0.8b, v3.8b, v3.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v0.8h, v5.8b, v0.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v3.8b, v3.8b, #4 // B4 eor v18.16b, v18.16b, v0.16b // N = I + J pmull v19.8h, v5.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v0.8h, v5.8b, v3.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v0.16b, v0.16b, v16.16b eor v0.16b, v0.16b, v18.16b eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing ext v16.8b, v7.8b, v7.8b, #1 // A1 pmull v16.8h, v16.8b, v3.8b // F = A1*B ext v1.8b, v3.8b, v3.8b, #1 // B1 pmull v1.8h, v7.8b, v1.8b // E = A*B1 ext v17.8b, v7.8b, v7.8b, #2 // A2 pmull v17.8h, v17.8b, v3.8b // H = A2*B ext v19.8b, v3.8b, v3.8b, #2 // B2 pmull v19.8h, v7.8b, v19.8b // G = A*B2 ext v18.8b, v7.8b, v7.8b, #3 // A3 eor v16.16b, v16.16b, v1.16b // L = E + F pmull v18.8h, v18.8b, v3.8b // J = A3*B ext v1.8b, v3.8b, v3.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v1.8h, v7.8b, v1.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v3.8b, v3.8b, #4 // B4 eor v18.16b, v18.16b, v1.16b // N = I + J pmull v19.8h, v7.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v1.8h, v7.8b, v3.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v1.16b, v1.16b, v16.16b eor v1.16b, v1.16b, v18.16b ext v16.8b, v6.8b, v6.8b, #1 // A1 pmull v16.8h, v16.8b, v4.8b // F = A1*B ext v2.8b, v4.8b, v4.8b, #1 // B1 pmull v2.8h, v6.8b, v2.8b // E = A*B1 ext v17.8b, v6.8b, v6.8b, #2 // A2 pmull v17.8h, v17.8b, v4.8b // H = A2*B ext v19.8b, v4.8b, v4.8b, #2 // B2 pmull v19.8h, v6.8b, v19.8b // G = A*B2 ext v18.8b, v6.8b, v6.8b, #3 // A3 eor v16.16b, v16.16b, v2.16b // L = E + F pmull v18.8h, v18.8b, v4.8b // J = A3*B ext v2.8b, v4.8b, v4.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v2.8h, v6.8b, v2.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v4.8b, v4.8b, #4 // B4 eor v18.16b, v18.16b, v2.16b // N = I + J pmull v19.8h, v6.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v2.8h, v6.8b, v4.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v2.16b, v2.16b, v16.16b eor v2.16b, v2.16b, v18.16b ext v16.16b, v0.16b, v2.16b, #8 eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing eor v1.16b, v1.16b, v2.16b eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result // This is a no-op due to the ins instruction below. // ins v2.d[0], v1.d[1] // equivalent of reduction_avx from ghash-x86_64.pl shl v17.2d, v0.2d, #57 // 1st phase shl v18.2d, v0.2d, #62 eor v18.16b, v18.16b, v17.16b // shl v17.2d, v0.2d, #63 eor v18.16b, v18.16b, v17.16b // // Note Xm contains {Xl.d[1], Xh.d[0]}. eor v18.16b, v18.16b, v1.16b ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0] ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1] ushr v18.2d, v0.2d, #1 // 2nd phase eor v2.16b, v2.16b,v0.16b eor v0.16b, v0.16b,v18.16b // ushr v18.2d, v18.2d, #6 ushr v0.2d, v0.2d, #1 // eor v0.16b, v0.16b, v2.16b // eor v0.16b, v0.16b, v18.16b // subs x3, x3, #16 bne Loop_neon rev64 v0.16b, v0.16b // byteswap Xi and write ext v0.16b, v0.16b, v0.16b, #8 st1 {v0.16b}, [x0] ret .section .rodata .align 4 Lmasks: .quad 0x0000ffffffffffff // k48 .quad 0x00000000ffffffff // k32 .quad 0x000000000000ffff // k16 .quad 0x0000000000000000 // k0 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
28,215
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/sha1-armv8.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <openssl/arm_arch.h> .text .globl sha1_block_data_order_nohw .def sha1_block_data_order_nohw .type 32 .endef .align 6 sha1_block_data_order_nohw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] ldp w20,w21,[x0] ldp w22,w23,[x0,#8] ldr w24,[x0,#16] Loop: ldr x3,[x1],#64 movz w28,#0x7999 sub x2,x2,#1 movk w28,#0x5a82,lsl#16 #ifdef __AARCH64EB__ ror x3,x3,#32 #else rev32 x3,x3 #endif add w24,w24,w28 // warm it up add w24,w24,w3 lsr x4,x3,#32 ldr x5,[x1,#-56] bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 add w23,w23,w4 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x5,x5,#32 #else rev32 x5,x5 #endif bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 add w22,w22,w5 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) lsr x6,x5,#32 ldr x7,[x1,#-48] bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 add w21,w21,w6 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x7,x7,#32 #else rev32 x7,x7 #endif bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w7 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) lsr x8,x7,#32 ldr x9,[x1,#-40] bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 add w24,w24,w8 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x9,x9,#32 #else rev32 x9,x9 #endif bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 add w23,w23,w9 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) lsr x10,x9,#32 ldr x11,[x1,#-32] bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 add w22,w22,w10 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x11,x11,#32 #else rev32 x11,x11 #endif bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 add w21,w21,w11 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) lsr x12,x11,#32 ldr x13,[x1,#-24] bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w12 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x13,x13,#32 #else rev32 x13,x13 #endif bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 add w24,w24,w13 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) lsr x14,x13,#32 ldr x15,[x1,#-16] bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 add w23,w23,w14 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x15,x15,#32 #else rev32 x15,x15 #endif bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 add w22,w22,w15 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) lsr x16,x15,#32 ldr x17,[x1,#-8] bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 add w21,w21,w16 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) #ifdef __AARCH64EB__ ror x17,x17,#32 #else rev32 x17,x17 #endif bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w17 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) lsr x19,x17,#32 eor w3,w3,w5 bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 eor w3,w3,w11 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) eor w3,w3,w16 ror w22,w22,#2 add w24,w24,w19 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w3,w3,#31 eor w4,w4,w6 bic w25,w23,w21 and w26,w22,w21 ror w27,w20,#27 eor w4,w4,w12 add w23,w23,w28 // future e+=K orr w25,w25,w26 add w24,w24,w27 // e+=rot(a,5) eor w4,w4,w17 ror w21,w21,#2 add w23,w23,w3 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w4,w4,#31 eor w5,w5,w7 bic w25,w22,w20 and w26,w21,w20 ror w27,w24,#27 eor w5,w5,w13 add w22,w22,w28 // future e+=K orr w25,w25,w26 add w23,w23,w27 // e+=rot(a,5) eor w5,w5,w19 ror w20,w20,#2 add w22,w22,w4 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w5,w5,#31 eor w6,w6,w8 bic w25,w21,w24 and w26,w20,w24 ror w27,w23,#27 eor w6,w6,w14 add w21,w21,w28 // future e+=K orr w25,w25,w26 add w22,w22,w27 // e+=rot(a,5) eor w6,w6,w3 ror w24,w24,#2 add w21,w21,w5 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w6,w6,#31 eor w7,w7,w9 bic w25,w20,w23 and w26,w24,w23 ror w27,w22,#27 eor w7,w7,w15 add w20,w20,w28 // future e+=K orr w25,w25,w26 add w21,w21,w27 // e+=rot(a,5) eor w7,w7,w4 ror w23,w23,#2 add w20,w20,w6 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w7,w7,#31 movz w28,#0xeba1 movk w28,#0x6ed9,lsl#16 eor w8,w8,w10 bic w25,w24,w22 and w26,w23,w22 ror w27,w21,#27 eor w8,w8,w16 add w24,w24,w28 // future e+=K orr w25,w25,w26 add w20,w20,w27 // e+=rot(a,5) eor w8,w8,w5 ror w22,w22,#2 add w24,w24,w7 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w8,w8,#31 eor w9,w9,w11 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w9,w9,w17 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w9,w9,w6 add w23,w23,w8 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w9,w9,#31 eor w10,w10,w12 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w10,w10,w19 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w10,w10,w7 add w22,w22,w9 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w10,w10,#31 eor w11,w11,w13 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w11,w11,w3 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w11,w11,w8 add w21,w21,w10 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w11,w11,#31 eor w12,w12,w14 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w12,w12,w4 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w12,w12,w9 add w20,w20,w11 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w12,w12,#31 eor w13,w13,w15 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w13,w13,w5 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w13,w13,w10 add w24,w24,w12 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w13,w13,#31 eor w14,w14,w16 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w14,w14,w6 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w14,w14,w11 add w23,w23,w13 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w14,w14,#31 eor w15,w15,w17 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w15,w15,w7 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w15,w15,w12 add w22,w22,w14 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w15,w15,#31 eor w16,w16,w19 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w16,w16,w8 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w16,w16,w13 add w21,w21,w15 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w16,w16,#31 eor w17,w17,w3 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w17,w17,w9 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w17,w17,w14 add w20,w20,w16 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w17,w17,#31 eor w19,w19,w4 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w19,w19,w10 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w19,w19,w15 add w24,w24,w17 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w19,w19,#31 eor w3,w3,w5 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w3,w3,w11 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w3,w3,w16 add w23,w23,w19 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w3,w3,#31 eor w4,w4,w6 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w4,w4,w12 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w4,w4,w17 add w22,w22,w3 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w4,w4,#31 eor w5,w5,w7 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w5,w5,w13 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w5,w5,w19 add w21,w21,w4 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w5,w5,#31 eor w6,w6,w8 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w6,w6,w14 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w6,w6,w3 add w20,w20,w5 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w6,w6,#31 eor w7,w7,w9 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w7,w7,w15 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w7,w7,w4 add w24,w24,w6 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w7,w7,#31 eor w8,w8,w10 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w8,w8,w16 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w8,w8,w5 add w23,w23,w7 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w8,w8,#31 eor w9,w9,w11 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w9,w9,w17 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w9,w9,w6 add w22,w22,w8 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w9,w9,#31 eor w10,w10,w12 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w10,w10,w19 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w10,w10,w7 add w21,w21,w9 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w10,w10,#31 eor w11,w11,w13 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w11,w11,w3 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w11,w11,w8 add w20,w20,w10 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w11,w11,#31 movz w28,#0xbcdc movk w28,#0x8f1b,lsl#16 eor w12,w12,w14 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w12,w12,w4 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w12,w12,w9 add w24,w24,w11 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w12,w12,#31 orr w25,w21,w22 and w26,w21,w22 eor w13,w13,w15 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w13,w13,w5 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w13,w13,w10 add w23,w23,w12 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w13,w13,#31 orr w25,w20,w21 and w26,w20,w21 eor w14,w14,w16 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w14,w14,w6 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w14,w14,w11 add w22,w22,w13 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w14,w14,#31 orr w25,w24,w20 and w26,w24,w20 eor w15,w15,w17 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w15,w15,w7 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w15,w15,w12 add w21,w21,w14 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w15,w15,#31 orr w25,w23,w24 and w26,w23,w24 eor w16,w16,w19 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w16,w16,w8 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w16,w16,w13 add w20,w20,w15 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w16,w16,#31 orr w25,w22,w23 and w26,w22,w23 eor w17,w17,w3 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w17,w17,w9 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w17,w17,w14 add w24,w24,w16 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w17,w17,#31 orr w25,w21,w22 and w26,w21,w22 eor w19,w19,w4 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w19,w19,w10 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w19,w19,w15 add w23,w23,w17 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w19,w19,#31 orr w25,w20,w21 and w26,w20,w21 eor w3,w3,w5 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w3,w3,w11 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w3,w3,w16 add w22,w22,w19 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w3,w3,#31 orr w25,w24,w20 and w26,w24,w20 eor w4,w4,w6 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w4,w4,w12 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w4,w4,w17 add w21,w21,w3 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w4,w4,#31 orr w25,w23,w24 and w26,w23,w24 eor w5,w5,w7 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w5,w5,w13 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w5,w5,w19 add w20,w20,w4 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w5,w5,#31 orr w25,w22,w23 and w26,w22,w23 eor w6,w6,w8 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w6,w6,w14 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w6,w6,w3 add w24,w24,w5 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w6,w6,#31 orr w25,w21,w22 and w26,w21,w22 eor w7,w7,w9 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w7,w7,w15 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w7,w7,w4 add w23,w23,w6 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w7,w7,#31 orr w25,w20,w21 and w26,w20,w21 eor w8,w8,w10 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w8,w8,w16 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w8,w8,w5 add w22,w22,w7 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w8,w8,#31 orr w25,w24,w20 and w26,w24,w20 eor w9,w9,w11 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w9,w9,w17 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w9,w9,w6 add w21,w21,w8 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w9,w9,#31 orr w25,w23,w24 and w26,w23,w24 eor w10,w10,w12 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w10,w10,w19 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w10,w10,w7 add w20,w20,w9 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w10,w10,#31 orr w25,w22,w23 and w26,w22,w23 eor w11,w11,w13 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w11,w11,w3 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w11,w11,w8 add w24,w24,w10 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w11,w11,#31 orr w25,w21,w22 and w26,w21,w22 eor w12,w12,w14 ror w27,w20,#27 and w25,w25,w23 add w23,w23,w28 // future e+=K eor w12,w12,w4 add w24,w24,w27 // e+=rot(a,5) orr w25,w25,w26 ror w21,w21,#2 eor w12,w12,w9 add w23,w23,w11 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w12,w12,#31 orr w25,w20,w21 and w26,w20,w21 eor w13,w13,w15 ror w27,w24,#27 and w25,w25,w22 add w22,w22,w28 // future e+=K eor w13,w13,w5 add w23,w23,w27 // e+=rot(a,5) orr w25,w25,w26 ror w20,w20,#2 eor w13,w13,w10 add w22,w22,w12 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w13,w13,#31 orr w25,w24,w20 and w26,w24,w20 eor w14,w14,w16 ror w27,w23,#27 and w25,w25,w21 add w21,w21,w28 // future e+=K eor w14,w14,w6 add w22,w22,w27 // e+=rot(a,5) orr w25,w25,w26 ror w24,w24,#2 eor w14,w14,w11 add w21,w21,w13 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w14,w14,#31 orr w25,w23,w24 and w26,w23,w24 eor w15,w15,w17 ror w27,w22,#27 and w25,w25,w20 add w20,w20,w28 // future e+=K eor w15,w15,w7 add w21,w21,w27 // e+=rot(a,5) orr w25,w25,w26 ror w23,w23,#2 eor w15,w15,w12 add w20,w20,w14 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w15,w15,#31 movz w28,#0xc1d6 movk w28,#0xca62,lsl#16 orr w25,w22,w23 and w26,w22,w23 eor w16,w16,w19 ror w27,w21,#27 and w25,w25,w24 add w24,w24,w28 // future e+=K eor w16,w16,w8 add w20,w20,w27 // e+=rot(a,5) orr w25,w25,w26 ror w22,w22,#2 eor w16,w16,w13 add w24,w24,w15 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w16,w16,#31 eor w17,w17,w3 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w17,w17,w9 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w17,w17,w14 add w23,w23,w16 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w17,w17,#31 eor w19,w19,w4 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w19,w19,w10 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w19,w19,w15 add w22,w22,w17 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w19,w19,#31 eor w3,w3,w5 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w3,w3,w11 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w3,w3,w16 add w21,w21,w19 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w3,w3,#31 eor w4,w4,w6 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w4,w4,w12 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w4,w4,w17 add w20,w20,w3 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w4,w4,#31 eor w5,w5,w7 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w5,w5,w13 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w5,w5,w19 add w24,w24,w4 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w5,w5,#31 eor w6,w6,w8 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w6,w6,w14 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w6,w6,w3 add w23,w23,w5 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w6,w6,#31 eor w7,w7,w9 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w7,w7,w15 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w7,w7,w4 add w22,w22,w6 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w7,w7,#31 eor w8,w8,w10 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w8,w8,w16 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w8,w8,w5 add w21,w21,w7 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w8,w8,#31 eor w9,w9,w11 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w9,w9,w17 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w9,w9,w6 add w20,w20,w8 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w9,w9,#31 eor w10,w10,w12 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w10,w10,w19 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w10,w10,w7 add w24,w24,w9 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w10,w10,#31 eor w11,w11,w13 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w11,w11,w3 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w11,w11,w8 add w23,w23,w10 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w11,w11,#31 eor w12,w12,w14 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w12,w12,w4 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w12,w12,w9 add w22,w22,w11 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w12,w12,#31 eor w13,w13,w15 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w13,w13,w5 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w13,w13,w10 add w21,w21,w12 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w13,w13,#31 eor w14,w14,w16 eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w14,w14,w6 eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 eor w14,w14,w11 add w20,w20,w13 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ror w14,w14,#31 eor w15,w15,w17 eor w25,w24,w22 ror w27,w21,#27 add w24,w24,w28 // future e+=K eor w15,w15,w7 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 eor w15,w15,w12 add w24,w24,w14 // future e+=X[i] add w20,w20,w25 // e+=F(b,c,d) ror w15,w15,#31 eor w16,w16,w19 eor w25,w23,w21 ror w27,w20,#27 add w23,w23,w28 // future e+=K eor w16,w16,w8 eor w25,w25,w22 add w24,w24,w27 // e+=rot(a,5) ror w21,w21,#2 eor w16,w16,w13 add w23,w23,w15 // future e+=X[i] add w24,w24,w25 // e+=F(b,c,d) ror w16,w16,#31 eor w17,w17,w3 eor w25,w22,w20 ror w27,w24,#27 add w22,w22,w28 // future e+=K eor w17,w17,w9 eor w25,w25,w21 add w23,w23,w27 // e+=rot(a,5) ror w20,w20,#2 eor w17,w17,w14 add w22,w22,w16 // future e+=X[i] add w23,w23,w25 // e+=F(b,c,d) ror w17,w17,#31 eor w19,w19,w4 eor w25,w21,w24 ror w27,w23,#27 add w21,w21,w28 // future e+=K eor w19,w19,w10 eor w25,w25,w20 add w22,w22,w27 // e+=rot(a,5) ror w24,w24,#2 eor w19,w19,w15 add w21,w21,w17 // future e+=X[i] add w22,w22,w25 // e+=F(b,c,d) ror w19,w19,#31 ldp w4,w5,[x0] eor w25,w20,w23 ror w27,w22,#27 add w20,w20,w28 // future e+=K eor w25,w25,w24 add w21,w21,w27 // e+=rot(a,5) ror w23,w23,#2 add w20,w20,w19 // future e+=X[i] add w21,w21,w25 // e+=F(b,c,d) ldp w6,w7,[x0,#8] eor w25,w24,w22 ror w27,w21,#27 eor w25,w25,w23 add w20,w20,w27 // e+=rot(a,5) ror w22,w22,#2 ldr w8,[x0,#16] add w20,w20,w25 // e+=F(b,c,d) add w21,w21,w5 add w22,w22,w6 add w20,w20,w4 add w23,w23,w7 add w24,w24,w8 stp w20,w21,[x0] stp w22,w23,[x0,#8] str w24,[x0,#16] cbnz x2,Loop ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldp x25,x26,[sp,#64] ldp x27,x28,[sp,#80] ldr x29,[sp],#96 ret .globl sha1_block_data_order_hw .def sha1_block_data_order_hw .type 32 .endef .align 6 sha1_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 adrp x4,Lconst add x4,x4,:lo12:Lconst eor v1.16b,v1.16b,v1.16b ld1 {v0.4s},[x0],#16 ld1 {v1.s}[0],[x0] sub x0,x0,#16 ld1 {v16.4s,v17.4s,v18.4s,v19.4s},[x4] Loop_hw: ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 sub x2,x2,#1 rev32 v4.16b,v4.16b rev32 v5.16b,v5.16b add v20.4s,v16.4s,v4.4s rev32 v6.16b,v6.16b orr v22.16b,v0.16b,v0.16b // offload add v21.4s,v16.4s,v5.4s rev32 v7.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b .long 0x5e140020 //sha1c v0.16b,v1.16b,v20.4s // 0 add v20.4s,v16.4s,v6.4s .long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 1 .long 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s add v21.4s,v16.4s,v7.4s .long 0x5e2818e4 //sha1su1 v4.16b,v7.16b .long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 2 .long 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s add v20.4s,v16.4s,v4.4s .long 0x5e281885 //sha1su1 v5.16b,v4.16b .long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 3 .long 0x5e150060 //sha1c v0.16b,v3.16b,v21.4s add v21.4s,v17.4s,v5.4s .long 0x5e2818a6 //sha1su1 v6.16b,v5.16b .long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 4 .long 0x5e140040 //sha1c v0.16b,v2.16b,v20.4s add v20.4s,v17.4s,v6.4s .long 0x5e2818c7 //sha1su1 v7.16b,v6.16b .long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 5 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v17.4s,v7.4s .long 0x5e2818e4 //sha1su1 v4.16b,v7.16b .long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 6 .long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s add v20.4s,v17.4s,v4.4s .long 0x5e281885 //sha1su1 v5.16b,v4.16b .long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 7 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v17.4s,v5.4s .long 0x5e2818a6 //sha1su1 v6.16b,v5.16b .long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 8 .long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s add v20.4s,v18.4s,v6.4s .long 0x5e2818c7 //sha1su1 v7.16b,v6.16b .long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 9 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v18.4s,v7.4s .long 0x5e2818e4 //sha1su1 v4.16b,v7.16b .long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 10 .long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s add v20.4s,v18.4s,v4.4s .long 0x5e281885 //sha1su1 v5.16b,v4.16b .long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 11 .long 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s add v21.4s,v18.4s,v5.4s .long 0x5e2818a6 //sha1su1 v6.16b,v5.16b .long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 12 .long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s add v20.4s,v18.4s,v6.4s .long 0x5e2818c7 //sha1su1 v7.16b,v6.16b .long 0x5e0630a4 //sha1su0 v4.16b,v5.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 13 .long 0x5e152060 //sha1m v0.16b,v3.16b,v21.4s add v21.4s,v19.4s,v7.4s .long 0x5e2818e4 //sha1su1 v4.16b,v7.16b .long 0x5e0730c5 //sha1su0 v5.16b,v6.16b,v7.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 14 .long 0x5e142040 //sha1m v0.16b,v2.16b,v20.4s add v20.4s,v19.4s,v4.4s .long 0x5e281885 //sha1su1 v5.16b,v4.16b .long 0x5e0430e6 //sha1su0 v6.16b,v7.16b,v4.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 15 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v19.4s,v5.4s .long 0x5e2818a6 //sha1su1 v6.16b,v5.16b .long 0x5e053087 //sha1su0 v7.16b,v4.16b,v5.16b .long 0x5e280803 //sha1h v3.16b,v0.16b // 16 .long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s add v20.4s,v19.4s,v6.4s .long 0x5e2818c7 //sha1su1 v7.16b,v6.16b .long 0x5e280802 //sha1h v2.16b,v0.16b // 17 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v21.4s,v19.4s,v7.4s .long 0x5e280803 //sha1h v3.16b,v0.16b // 18 .long 0x5e141040 //sha1p v0.16b,v2.16b,v20.4s .long 0x5e280802 //sha1h v2.16b,v0.16b // 19 .long 0x5e151060 //sha1p v0.16b,v3.16b,v21.4s add v1.4s,v1.4s,v2.4s add v0.4s,v0.4s,v22.4s cbnz x2,Loop_hw st1 {v0.4s},[x0],#16 st1 {v1.s}[0],[x0] ldr x29,[sp],#16 ret .section .rodata .align 6 Lconst: .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 //K_00_19 .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 //K_20_39 .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc //K_40_59 .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 //K_60_79 .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
80,057
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/aesv8-gcm-armv8.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <openssl/arm_arch.h> #if __ARM_MAX_ARCH__ >= 8 .arch armv8-a+crypto .text .globl aes_gcm_enc_kernel .def aes_gcm_enc_kernel .type 32 .endef .align 4 aes_gcm_enc_kernel: #ifdef BORINGSSL_DISPATCH_TEST adrp x9,BORINGSSL_function_hit add x9, x9, :lo12:BORINGSSL_function_hit mov w10, #1 strb w10, [x9,#2] // kFlag_aes_gcm_enc_kernel #endif AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys add x4, x0, x1, lsr #3 // end_input_ptr lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible sub x5, x5, #1 // byte_len - 1 ldr q18, [x8, #0] // load rk0 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) ldr q25, [x8, #112] // load rk7 add x5, x5, x0 lsr x12, x11, #32 fmov d2, x10 // CTR block 2 orr w11, w11, w11 rev w12, w12 // rev_ctr32 fmov d1, x10 // CTR block 1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 add w12, w12, #1 // increment rev_ctr32 rev w9, w12 // CTR block 1 fmov d3, x10 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 1 add w12, w12, #1 // CTR block 1 ldr q19, [x8, #16] // load rk1 fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 ldr q20, [x8, #32] // load rk2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 orr x9, x11, x9, lsl #32 // CTR block 3 fmov v3.d[1], x9 // CTR block 3 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q21, [x8, #48] // load rk3 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q24, [x8, #96] // load rk6 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q23, [x8, #80] // load rk5 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ldr q14, [x6, #48] // load h3l | h3h aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q22, [x8, #64] // load rk4 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 ldr q13, [x6, #32] // load h2l | h2h aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q15, [x6, #80] // load h4l | h4h aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 ldr q29, [x8, #176] // load rk11 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 ldr q26, [x8, #128] // load rk8 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 add w12, w12, #1 // CTR block 3 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 trn2 v17.2d, v14.2d, v15.2d // h4l | h3l aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 ldr q27, [x8, #144] // load rk9 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 ldr q12, [x6] // load h1l | h1h aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 ldr q28, [x8, #160] // load rk10 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 trn1 v9.2d, v14.2d, v15.2d // h4h | h3h aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 trn2 v16.2d, v12.2d, v13.2d // h2l | h1l aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 b.lt Lenc_finish_first_blocks // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 b.eq Lenc_finish_first_blocks // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 Lenc_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v2.16b, v31.16b // AES block 2 - round N-1 trn1 v8.2d, v12.2d, v13.2d // h2h | h1h aese v1.16b, v31.16b // AES block 1 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 aese v3.16b, v31.16b // AES block 3 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k b.ge Lenc_tail // handle tail ldp x19, x20, [x0, #16] // AES block 1 - load plaintext rev w9, w12 // CTR block 4 ldp x6, x7, [x0, #0] // AES block 0 - load plaintext ldp x23, x24, [x0, #48] // AES block 3 - load plaintext ldp x21, x22, [x0, #32] // AES block 2 - load plaintext add x0, x0, #64 // AES input_ptr update eor x19, x19, x13 // AES block 1 - round N low eor x20, x20, x14 // AES block 1 - round N high fmov d5, x19 // AES block 1 - mov low eor x6, x6, x13 // AES block 0 - round N low eor x7, x7, x14 // AES block 0 - round N high eor x24, x24, x14 // AES block 3 - round N high fmov d4, x6 // AES block 0 - mov low cmp x0, x5 // check if we have <= 8 blocks fmov v4.d[1], x7 // AES block 0 - mov high eor x23, x23, x13 // AES block 3 - round N low eor x21, x21, x13 // AES block 2 - round N low fmov v5.d[1], x20 // AES block 1 - mov high fmov d6, x21 // AES block 2 - mov low add w12, w12, #1 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov d7, x23 // AES block 3 - mov low eor x22, x22, x14 // AES block 2 - round N high fmov v6.d[1], x22 // AES block 2 - mov high eor v4.16b, v4.16b, v0.16b // AES block 0 - result fmov d0, x10 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 eor v5.16b, v5.16b, v1.16b // AES block 1 - result fmov d1, x10 // CTR block 5 orr x9, x11, x9, lsl #32 // CTR block 5 fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 st1 { v4.16b}, [x2], #16 // AES block 0 - store result fmov v7.d[1], x24 // AES block 3 - mov high orr x9, x11, x9, lsl #32 // CTR block 6 eor v6.16b, v6.16b, v2.16b // AES block 2 - result st1 { v5.16b}, [x2], #16 // AES block 1 - store result add w12, w12, #1 // CTR block 6 fmov d2, x10 // CTR block 6 fmov v2.d[1], x9 // CTR block 6 st1 { v6.16b}, [x2], #16 // AES block 2 - store result rev w9, w12 // CTR block 7 orr x9, x11, x9, lsl #32 // CTR block 7 eor v7.16b, v7.16b, v3.16b // AES block 3 - result st1 { v7.16b}, [x2], #16 // AES block 3 - store result b.ge Lenc_prepretail // do prepretail Lenc_main_loop: // main loop start aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d3, x10 // CTR block 4k+3 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 fmov v3.d[1], x9 // CTR block 4k+3 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 eor v4.16b, v4.16b, v11.16b // PRE 1 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x23, x23, x13 // AES block 4k+7 - round N low aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d10, v17.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high eor x22, x22, x14 // AES block 4k+6 - round N high mov d8, v4.d[1] // GHASH block 4k - mid aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor x19, x19, x13 // AES block 4k+5 - round N low aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 eor x21, x21, x13 // AES block 4k+6 - round N low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 movi v8.8b, #0xc2 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check fmov d5, x19 // AES block 4k+5 - mov low ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext b.lt Lenc_main_loop_continue // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq Lenc_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Lenc_main_loop_continue: shl d8, d8, #56 // mod_constant eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid add w12, w12, #1 // CTR block 4k+3 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up add x0, x0, #64 // AES input_ptr update pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid rev w9, w12 // CTR block 4k+8 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor x6, x6, x13 // AES block 4k+4 - round N low eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up eor x7, x7, x14 // AES block 4k+4 - round N high fmov d4, x6 // AES block 4k+4 - mov low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid eor x20, x20, x14 // AES block 4k+5 - round N high eor x24, x24, x14 // AES block 4k+7 - round N high add w12, w12, #1 // CTR block 4k+8 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid fmov d7, x23 // AES block 4k+7 - mov low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 fmov v5.d[1], x20 // AES block 4k+5 - mov high fmov d6, x21 // AES block 4k+6 - mov low cmp x0, x5 // LOOP CONTROL fmov v6.d[1], x22 // AES block 4k+6 - mov high pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 rev w9, w12 // CTR block 4k+9 add w12, w12, #1 // CTR block 4k+9 eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result fmov d1, x10 // CTR block 4k+9 orr x9, x11, x9, lsl #32 // CTR block 4k+9 fmov v1.d[1], x9 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 rev w9, w12 // CTR block 4k+10 st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result orr x9, x11, x9, lsl #32 // CTR block 4k+10 eor v11.16b, v11.16b, v9.16b // MODULO - fold into low fmov v7.d[1], x24 // AES block 4k+7 - mov high ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result fmov d2, x10 // CTR block 4k+10 st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result fmov v2.d[1], x9 // CTR block 4k+10 rev w9, w12 // CTR block 4k+11 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low orr x9, x11, x9, lsl #32 // CTR block 4k+11 eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result b.lt Lenc_main_loop Lenc_prepretail: // PREPRETAIL aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov d3, x10 // CTR block 4k+3 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) fmov v3.d[1], x9 // CTR block 4k+3 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid add w12, w12, #1 // CTR block 4k+3 pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v10.16b, v10.16b, v9.16b // karatsuba tidy up aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 pmull v4.1q, v9.1d, v8.1d ext v9.16b, v9.16b, v9.16b, #8 eor v10.16b, v10.16b, v11.16b b.lt Lenc_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 b.eq Lenc_finish_prepretail // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 Lenc_finish_prepretail: eor v10.16b, v10.16b, v4.16b eor v10.16b, v10.16b, v9.16b pmull v4.1q, v10.1d, v8.1d ext v10.16b, v10.16b, v10.16b, #8 aese v1.16b, v31.16b // AES block 4k+5 - round N-1 eor v11.16b, v11.16b, v4.16b aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b Lenc_tail: // TAIL ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high cmp x5, #48 fmov d4, x6 // AES block 4k+4 - mov low fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result b.gt Lenc_blocks_4_remaining cmp x5, #32 mov v3.16b, v2.16b movi v11.8b, #0 movi v9.8b, #0 sub w12, w12, #1 mov v2.16b, v1.16b movi v10.8b, #0 b.gt Lenc_blocks_3_remaining mov v3.16b, v1.16b sub w12, w12, #1 cmp x5, #16 b.gt Lenc_blocks_2_remaining sub w12, w12, #1 b Lenc_blocks_1_remaining Lenc_blocks_4_remaining: // blocks left = 4 st1 { v5.16b}, [x2], #16 // AES final-3 block - store result ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-3 block eor x6, x6, x13 // AES final-2 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag eor x7, x7, x14 // AES final-2 block - round N high mov d22, v4.d[1] // GHASH final-3 block - mid fmov d5, x6 // AES final-2 block - mov low fmov v5.d[1], x7 // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in mov d10, v17.d[1] // GHASH final-3 block - mid pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor v5.16b, v5.16b, v1.16b // AES final-2 block - result Lenc_blocks_3_remaining: // blocks left = 3 st1 { v5.16b}, [x2], #16 // AES final-2 block - store result ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-2 block eor x6, x6, x13 // AES final-1 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag fmov d5, x6 // AES final-1 block - mov low eor x7, x7, x14 // AES final-1 block - round N high fmov v5.d[1], x7 // AES final-1 block - mov high movi v8.8b, #0 // suppress further partial tag feed in pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid eor v5.16b, v5.16b, v2.16b // AES final-1 block - result eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid Lenc_blocks_2_remaining: // blocks left = 2 st1 { v5.16b}, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ldp x6, x7, [x0], #16 // AES final block - load input low & high eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in eor x6, x6, x13 // AES final block - round N low mov d22, v4.d[1] // GHASH final-1 block - mid pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor x7, x7, x14 // AES final block - round N high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high ins v22.d[1], v22.d[0] // GHASH final-1 block - mid fmov d5, x6 // AES final block - mov low fmov v5.d[1], x7 // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low eor v5.16b, v5.16b, v3.16b // AES final block - result eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low Lenc_blocks_1_remaining: // blocks_left = 1 rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high mov d8, v4.d[1] // GHASH final block - mid rev w9, w12 pmull v21.1q, v4.1d, v12.1d // GHASH final block - low eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment str w9, [x16, #12] // store the updated counter st1 { v5.16b}, [x2] // store all 16B eor v11.16b, v11.16b, v9.16b // MODULO - fold into low eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret .globl aes_gcm_dec_kernel .def aes_gcm_dec_kernel .type 32 .endef .align 4 aes_gcm_dec_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ldr q26, [x8, #128] // load rk8 sub x5, x5, #1 // byte_len - 1 ldr q25, [x8, #112] // load rk7 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) add x4, x0, x1, lsr #3 // end_input_ptr ldr q24, [x8, #96] // load rk6 lsr x12, x11, #32 ldr q23, [x8, #80] // load rk5 orr w11, w11, w11 ldr q21, [x8, #48] // load rk3 add x5, x5, x0 rev w12, w12 // rev_ctr32 add w12, w12, #1 // increment rev_ctr32 fmov d3, x10 // CTR block 3 rev w9, w12 // CTR block 1 add w12, w12, #1 // CTR block 1 fmov d1, x10 // CTR block 1 orr x9, x11, x9, lsl #32 // CTR block 1 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 fmov d2, x10 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 3 ldr q18, [x8, #0] // load rk0 fmov v3.d[1], x9 // CTR block 3 add w12, w12, #1 // CTR block 3 ldr q22, [x8, #64] // load rk4 ldr q19, [x8, #16] // load rk1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 ldr q14, [x6, #48] // load h3l | h3h aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 ldr q15, [x6, #80] // load h4l | h4h aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q13, [x6, #32] // load h2l | h2h aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q20, [x8, #32] // load rk2 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q27, [x8, #144] // load rk9 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q12, [x6] // load h1l | h1h aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q28, [x8, #160] // load rk10 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 cmp x17, #12 // setup flags for AES-128/192/256 check aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 ldr q29, [x8, #176] // load rk11 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 b.lt Ldec_finish_first_blocks // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 b.eq Ldec_finish_first_blocks // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 Ldec_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks trn1 v9.2d, v14.2d, v15.2d // h4h | h3h trn2 v17.2d, v14.2d, v15.2d // h4l | h3l trn1 v8.2d, v12.2d, v13.2d // h2h | h1h trn2 v16.2d, v12.2d, v13.2d // h2l | h1l eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v1.16b, v31.16b // AES block 1 - round N-1 aese v2.16b, v31.16b // AES block 2 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k aese v3.16b, v31.16b // AES block 3 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 b.ge Ldec_tail // handle tail ldr q4, [x0, #0] // AES block 0 - load ciphertext ldr q5, [x0, #16] // AES block 1 - load ciphertext rev w9, w12 // CTR block 4 eor v0.16b, v4.16b, v0.16b // AES block 0 - result eor v1.16b, v5.16b, v1.16b // AES block 1 - result rev64 v5.16b, v5.16b // GHASH block 1 ldr q7, [x0, #48] // AES block 3 - load ciphertext mov x7, v0.d[1] // AES block 0 - mov high mov x6, v0.d[0] // AES block 0 - mov low rev64 v4.16b, v4.16b // GHASH block 0 add w12, w12, #1 // CTR block 4 fmov d0, x10 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 mov x19, v1.d[0] // AES block 1 - mov low orr x9, x11, x9, lsl #32 // CTR block 5 mov x20, v1.d[1] // AES block 1 - mov high eor x7, x7, x14 // AES block 0 - round N high eor x6, x6, x13 // AES block 0 - round N low stp x6, x7, [x2], #16 // AES block 0 - store result fmov d1, x10 // CTR block 5 ldr q6, [x0, #32] // AES block 2 - load ciphertext add x0, x0, #64 // AES input_ptr update fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 add w12, w12, #1 // CTR block 6 eor x19, x19, x13 // AES block 1 - round N low orr x9, x11, x9, lsl #32 // CTR block 6 eor x20, x20, x14 // AES block 1 - round N high stp x19, x20, [x2], #16 // AES block 1 - store result eor v2.16b, v6.16b, v2.16b // AES block 2 - result cmp x0, x5 // check if we have <= 8 blocks b.ge Ldec_prepretail // do prepretail Ldec_main_loop: // main loop start mov x21, v2.d[0] // AES block 4k+2 - mov low ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 eor v4.16b, v4.16b, v11.16b // PRE 1 rev w9, w12 // CTR block 4k+7 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x23, v3.d[0] // AES block 4k+3 - mov low pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov v3.d[1], x9 // CTR block 4k+7 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor x22, x22, x14 // AES block 4k+2 - round N high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 rev64 v6.16b, v6.16b // GHASH block 4k+2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x21, x21, x13 // AES block 4k+2 - round N low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 stp x21, x22, [x2], #16 // AES block 4k+2 - store result pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 rev64 v7.16b, v7.16b // GHASH block 4k+3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor x23, x23, x13 // AES block 4k+3 - round N low pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor x24, x24, x14 // AES block 4k+3 - round N high eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 add w12, w12, #1 // CTR block 4k+7 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid rev w9, w12 // CTR block 4k+8 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 add w12, w12, #1 // CTR block 4k+8 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid movi v8.8b, #0xc2 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 b.lt Ldec_main_loop_continue // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq Ldec_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Ldec_main_loop_continue: pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext aese v0.16b, v31.16b // AES block 4k+4 - round N-1 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext mov x7, v0.d[1] // AES block 4k+4 - mov high eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid aese v1.16b, v31.16b // AES block 4k+5 - round N-1 add x0, x0, #64 // AES input_ptr update mov x6, v0.d[0] // AES block 4k+4 - mov low fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result rev w9, w12 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+9 cmp x0, x5 // LOOP CONTROL add w12, w12, #1 // CTR block 4k+9 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high mov x20, v1.d[1] // AES block 4k+5 - mov high eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low mov x19, v1.d[0] // AES block 4k+5 - mov low fmov d1, x10 // CTR block 4k+9 ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment fmov v1.d[1], x9 // CTR block 4k+9 rev w9, w12 // CTR block 4k+10 add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+10 rev64 v5.16b, v5.16b // GHASH block 4k+5 eor x20, x20, x14 // AES block 4k+5 - round N high stp x6, x7, [x2], #16 // AES block 4k+4 - store result eor x19, x19, x13 // AES block 4k+5 - round N low stp x19, x20, [x2], #16 // AES block 4k+5 - store result rev64 v4.16b, v4.16b // GHASH block 4k+4 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low b.lt Ldec_main_loop Ldec_prepretail: // PREPRETAIL ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 mov x21, v2.d[0] // AES block 4k+2 - mov low eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 rev w9, w12 // CTR block 4k+7 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v6.16b, v6.16b // GHASH block 4k+2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 mov x23, v3.d[0] // AES block 4k+3 - mov low aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high fmov v3.d[1], x9 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 rev64 v7.16b, v7.16b // GHASH block 4k+3 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 cmp x17, #12 // setup flags for AES-128/192/256 check eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 b.lt Ldec_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 b.eq Ldec_finish_prepretail // branch if AES-192 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Ldec_finish_prepretail: eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor x22, x22, x14 // AES block 4k+2 - round N high eor x23, x23, x13 // AES block 4k+3 - round N low eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid add w12, w12, #1 // CTR block 4k+7 eor x21, x21, x13 // AES block 4k+2 - round N low pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor x24, x24, x14 // AES block 4k+3 - round N high stp x21, x22, [x2], #16 // AES block 4k+2 - store result ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low Ldec_tail: // TAIL sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result mov x6, v0.d[0] // AES block 4k+4 - mov low mov x7, v0.d[1] // AES block 4k+4 - mov high ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag cmp x5, #48 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high b.gt Ldec_blocks_4_remaining sub w12, w12, #1 mov v3.16b, v2.16b movi v10.8b, #0 movi v11.8b, #0 cmp x5, #32 movi v9.8b, #0 mov v2.16b, v1.16b b.gt Ldec_blocks_3_remaining sub w12, w12, #1 mov v3.16b, v1.16b cmp x5, #16 b.gt Ldec_blocks_2_remaining sub w12, w12, #1 b Ldec_blocks_1_remaining Ldec_blocks_4_remaining: // blocks left = 4 rev64 v4.16b, v5.16b // GHASH final-3 block ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext stp x6, x7, [x2], #16 // AES final-3 block - store result mov d10, v17.d[1] // GHASH final-3 block - mid eor v4.16b, v4.16b, v8.16b // feed in partial tag eor v0.16b, v5.16b, v1.16b // AES final-2 block - result mov d22, v4.d[1] // GHASH final-3 block - mid mov x6, v0.d[0] // AES final-2 block - mov low mov x7, v0.d[1] // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor x6, x6, x13 // AES final-2 block - round N low pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low eor x7, x7, x14 // AES final-2 block - round N high Ldec_blocks_3_remaining: // blocks left = 3 rev64 v4.16b, v5.16b // GHASH final-2 block ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag stp x6, x7, [x2], #16 // AES final-2 block - store result eor v0.16b, v5.16b, v2.16b // AES final-1 block - result mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid mov x6, v0.d[0] // AES final-1 block - mov low mov x7, v0.d[1] // AES final-1 block - mov high eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low movi v8.8b, #0 // suppress further partial tag feed in pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high eor x6, x6, x13 // AES final-1 block - round N low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid eor x7, x7, x14 // AES final-1 block - round N high Ldec_blocks_2_remaining: // blocks left = 2 stp x6, x7, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in mov d22, v4.d[1] // GHASH final-1 block - mid eor v0.16b, v5.16b, v3.16b // AES final block - result pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low mov x6, v0.d[0] // AES final block - mov low ins v22.d[1], v22.d[0] // GHASH final-1 block - mid mov x7, v0.d[1] // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid eor x6, x6, x13 // AES final block - round N low eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor x7, x7, x14 // AES final block - round N high Ldec_blocks_1_remaining: // blocks_left = 1 rev w9, w12 rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag pmull v21.1q, v4.1d, v12.1d // GHASH final block - low mov d8, v4.d[1] // GHASH final block - mid eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment eor v11.16b, v11.16b, v8.16b // MODULO - fold into low stp x6, x7, [x2] str w9, [x16, #12] // store the updated counter eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
5,284
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/keccak1600-armv8.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <openssl/arm_arch.h> .text .align 8 // strategic alignment and padding that allows to use // address value as loop termination condition... .quad 0,0,0,0,0,0,0,0 iotas_hw: .quad 0x0000000000000001 .quad 0x0000000000008082 .quad 0x800000000000808a .quad 0x8000000080008000 .quad 0x000000000000808b .quad 0x0000000080000001 .quad 0x8000000080008081 .quad 0x8000000000008009 .quad 0x000000000000008a .quad 0x0000000000000088 .quad 0x0000000080008009 .quad 0x000000008000000a .quad 0x000000008000808b .quad 0x800000000000008b .quad 0x8000000000008089 .quad 0x8000000000008003 .quad 0x8000000000008002 .quad 0x8000000000000080 .quad 0x000000000000800a .quad 0x800000008000000a .quad 0x8000000080008081 .quad 0x8000000000008080 .quad 0x0000000080000001 .quad 0x8000000080008008 .def KeccakF1600_int .type 32 .endef .align 5 KeccakF1600_int: AARCH64_SIGN_LINK_REGISTER adr x28,iotas_hw stp x28,x30,[sp,#16] // 32 bytes on top are mine b Loop .align 4 Loop: ////////////////////////////////////////// Theta eor x26,x0,x5 stp x4,x9,[sp,#0] // offload pair... eor x27,x1,x6 eor x28,x2,x7 eor x30,x3,x8 eor x4,x4,x9 eor x26,x26,x10 eor x27,x27,x11 eor x28,x28,x12 eor x30,x30,x13 eor x4,x4,x14 eor x26,x26,x15 eor x27,x27,x16 eor x28,x28,x17 eor x30,x30,x25 eor x4,x4,x19 eor x26,x26,x20 eor x28,x28,x22 eor x27,x27,x21 eor x30,x30,x23 eor x4,x4,x24 eor x9,x26,x28,ror#63 eor x1,x1,x9 eor x6,x6,x9 eor x11,x11,x9 eor x16,x16,x9 eor x21,x21,x9 eor x9,x27,x30,ror#63 eor x28,x28,x4,ror#63 eor x30,x30,x26,ror#63 eor x4,x4,x27,ror#63 eor x27, x2,x9 // mov x27,x2 eor x7,x7,x9 eor x12,x12,x9 eor x17,x17,x9 eor x22,x22,x9 eor x0,x0,x4 eor x5,x5,x4 eor x10,x10,x4 eor x15,x15,x4 eor x20,x20,x4 ldp x4,x9,[sp,#0] // re-load offloaded data eor x26, x3,x28 // mov x26,x3 eor x8,x8,x28 eor x13,x13,x28 eor x25,x25,x28 eor x23,x23,x28 eor x28, x4,x30 // mov x28,x4 eor x9,x9,x30 eor x14,x14,x30 eor x19,x19,x30 eor x24,x24,x30 ////////////////////////////////////////// Rho+Pi mov x30,x1 ror x1,x6,#20 //mov x27,x2 ror x2,x12,#21 //mov x26,x3 ror x3,x25,#43 //mov x28,x4 ror x4,x24,#50 ror x6,x9,#44 ror x12,x13,#39 ror x25,x17,#49 ror x24,x21,#62 ror x9,x22,#3 ror x13,x19,#56 ror x17,x11,#54 ror x21,x8,#9 ror x22,x14,#25 ror x19,x23,#8 ror x11,x7,#58 ror x8,x16,#19 ror x14,x20,#46 ror x23,x15,#23 ror x7,x10,#61 ror x16,x5,#28 ror x5,x26,#36 ror x10,x30,#63 ror x15,x28,#37 ror x20,x27,#2 ////////////////////////////////////////// Chi+Iota bic x26,x2,x1 bic x27,x3,x2 bic x28,x0,x4 bic x30,x1,x0 eor x0,x0,x26 bic x26,x4,x3 eor x1,x1,x27 ldr x27,[sp,#16] eor x3,x3,x28 eor x4,x4,x30 eor x2,x2,x26 ldr x30,[x27],#8 // Iota[i++] bic x26,x7,x6 tst x27,#255 // are we done? str x27,[sp,#16] bic x27,x8,x7 bic x28,x5,x9 eor x0,x0,x30 // A[0][0] ^= Iota bic x30,x6,x5 eor x5,x5,x26 bic x26,x9,x8 eor x6,x6,x27 eor x8,x8,x28 eor x9,x9,x30 eor x7,x7,x26 bic x26,x12,x11 bic x27,x13,x12 bic x28,x10,x14 bic x30,x11,x10 eor x10,x10,x26 bic x26,x14,x13 eor x11,x11,x27 eor x13,x13,x28 eor x14,x14,x30 eor x12,x12,x26 bic x26,x17,x16 bic x27,x25,x17 bic x28,x15,x19 bic x30,x16,x15 eor x15,x15,x26 bic x26,x19,x25 eor x16,x16,x27 eor x25,x25,x28 eor x19,x19,x30 eor x17,x17,x26 bic x26,x22,x21 bic x27,x23,x22 bic x28,x20,x24 bic x30,x21,x20 eor x20,x20,x26 bic x26,x24,x23 eor x21,x21,x27 eor x23,x23,x28 eor x24,x24,x30 eor x22,x22,x26 bne Loop ldr x30,[sp,#24] AARCH64_VALIDATE_LINK_REGISTER ret .globl KeccakF1600_hw .def KeccakF1600_hw .type 32 .endef .align 5 KeccakF1600_hw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#48 str x0,[sp,#32] // offload argument mov x26,x0 ldp x0,x1,[x0,#16*0] ldp x2,x3,[x26,#16*1] ldp x4,x5,[x26,#16*2] ldp x6,x7,[x26,#16*3] ldp x8,x9,[x26,#16*4] ldp x10,x11,[x26,#16*5] ldp x12,x13,[x26,#16*6] ldp x14,x15,[x26,#16*7] ldp x16,x17,[x26,#16*8] ldp x25,x19,[x26,#16*9] ldp x20,x21,[x26,#16*10] ldp x22,x23,[x26,#16*11] ldr x24,[x26,#16*12] bl KeccakF1600_int ldr x26,[sp,#32] stp x0,x1,[x26,#16*0] stp x2,x3,[x26,#16*1] stp x4,x5,[x26,#16*2] stp x6,x7,[x26,#16*3] stp x8,x9,[x26,#16*4] stp x10,x11,[x26,#16*5] stp x12,x13,[x26,#16*6] stp x14,x15,[x26,#16*7] stp x16,x17,[x26,#16*8] stp x25,x19,[x26,#16*9] stp x20,x21,[x26,#16*10] stp x22,x23,[x26,#16*11] str x24,[x26,#16*12] ldp x19,x20,[x29,#16] add sp,sp,#48 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .byte 75,101,99,99,97,107,45,49,54,48,48,32,112,101,114,109,117,116,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
1,979
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/bn-armv8.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <openssl/arm_arch.h> .text // BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, // size_t num); .globl bn_add_words .align 4 bn_add_words: .cfi_startproc AARCH64_VALID_CALL_TARGET # Clear the carry flag. cmn xzr, xzr # aarch64 can load two registers at a time, so we do two loop iterations at # at a time. Split x3 = 2 * x8 + x3. This allows loop # operations to use CBNZ without clobbering the carry flag. lsr x8, x3, #1 and x3, x3, #1 cbz x8, Ladd_tail Ladd_loop: ldp x4, x5, [x1], #16 ldp x6, x7, [x2], #16 sub x8, x8, #1 adcs x4, x4, x6 adcs x5, x5, x7 stp x4, x5, [x0], #16 cbnz x8, Ladd_loop Ladd_tail: cbz x3, Ladd_exit ldr x4, [x1], #8 ldr x6, [x2], #8 adcs x4, x4, x6 str x4, [x0], #8 Ladd_exit: cset x0, cs ret .cfi_endproc // BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, // size_t num); .globl bn_sub_words .align 4 bn_sub_words: .cfi_startproc AARCH64_VALID_CALL_TARGET # Set the carry flag. Arm's borrow bit is flipped from the carry flag, # so we want C = 1 here. cmp xzr, xzr # aarch64 can load two registers at a time, so we do two loop iterations at # at a time. Split x3 = 2 * x8 + x3. This allows loop # operations to use CBNZ without clobbering the carry flag. lsr x8, x3, #1 and x3, x3, #1 cbz x8, Lsub_tail Lsub_loop: ldp x4, x5, [x1], #16 ldp x6, x7, [x2], #16 sub x8, x8, #1 sbcs x4, x4, x6 sbcs x5, x5, x7 stp x4, x5, [x0], #16 cbnz x8, Lsub_loop Lsub_tail: cbz x3, Lsub_exit ldr x4, [x1], #8 ldr x6, [x2], #8 sbcs x4, x4, x6 str x4, [x0], #8 Lsub_exit: cset x0, cc ret .cfi_endproc #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
32,541
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/fipsmodule/armv8-mont.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <openssl/arm_arch.h> .text .globl bn_mul_mont .def bn_mul_mont .type 32 .endef .align 5 bn_mul_mont: .cfi_startproc AARCH64_SIGN_LINK_REGISTER tst x5,#7 b.eq __bn_sqr8x_mont tst x5,#3 b.eq __bn_mul4x_mont Lmul_mont: stp x29,x30,[sp,#-64]! .cfi_def_cfa_offset 64 .cfi_offset x29, -64 .cfi_offset x30, -56 add x29,sp,#0 .cfi_def_cfa x29, 64 stp x19,x20,[sp,#16] .cfi_offset x19, -48 .cfi_offset x20, -40 stp x21,x22,[sp,#32] .cfi_offset x21, -32 .cfi_offset x22, -24 stp x23,x24,[sp,#48] .cfi_offset x23, -16 .cfi_offset x24, -8 ldr x9,[x2],#8 // bp[0] sub x22,sp,x5,lsl#3 ldp x7,x8,[x1],#16 // ap[0..1] lsl x5,x5,#3 ldr x4,[x4] // *n0 and x22,x22,#-16 // ABI says so ldp x13,x14,[x3],#16 // np[0..1] mul x6,x7,x9 // ap[0]*bp[0] sub x21,x5,#16 // j=num-2 umulh x7,x7,x9 mul x10,x8,x9 // ap[1]*bp[0] umulh x11,x8,x9 mul x15,x6,x4 // "tp[0]"*n0 mov sp,x22 // alloca // (*) mul x12,x13,x15 // np[0]*m1 umulh x13,x13,x15 mul x16,x14,x15 // np[1]*m1 // (*) adds x12,x12,x6 // discarded // (*) As for removal of first multiplication and addition // instructions. The outcome of first addition is // guaranteed to be zero, which leaves two computationally // significant outcomes: it either carries or not. Then // question is when does it carry? Is there alternative // way to deduce it? If you follow operations, you can // observe that condition for carry is quite simple: // x6 being non-zero. So that carry can be calculated // by adding -1 to x6. That's what next instruction does. subs xzr,x6,#1 // (*) umulh x17,x14,x15 adc x13,x13,xzr cbz x21,L1st_skip L1st: ldr x8,[x1],#8 adds x6,x10,x7 sub x21,x21,#8 // j-- adc x7,x11,xzr ldr x14,[x3],#8 adds x12,x16,x13 mul x10,x8,x9 // ap[j]*bp[0] adc x13,x17,xzr umulh x11,x8,x9 adds x12,x12,x6 mul x16,x14,x15 // np[j]*m1 adc x13,x13,xzr umulh x17,x14,x15 str x12,[x22],#8 // tp[j-1] cbnz x21,L1st L1st_skip: adds x6,x10,x7 sub x1,x1,x5 // rewind x1 adc x7,x11,xzr adds x12,x16,x13 sub x3,x3,x5 // rewind x3 adc x13,x17,xzr adds x12,x12,x6 sub x20,x5,#8 // i=num-1 adcs x13,x13,x7 adc x19,xzr,xzr // upmost overflow bit stp x12,x13,[x22] Louter: ldr x9,[x2],#8 // bp[i] ldp x7,x8,[x1],#16 ldr x23,[sp] // tp[0] add x22,sp,#8 mul x6,x7,x9 // ap[0]*bp[i] sub x21,x5,#16 // j=num-2 umulh x7,x7,x9 ldp x13,x14,[x3],#16 mul x10,x8,x9 // ap[1]*bp[i] adds x6,x6,x23 umulh x11,x8,x9 adc x7,x7,xzr mul x15,x6,x4 sub x20,x20,#8 // i-- // (*) mul x12,x13,x15 // np[0]*m1 umulh x13,x13,x15 mul x16,x14,x15 // np[1]*m1 // (*) adds x12,x12,x6 subs xzr,x6,#1 // (*) umulh x17,x14,x15 cbz x21,Linner_skip Linner: ldr x8,[x1],#8 adc x13,x13,xzr ldr x23,[x22],#8 // tp[j] adds x6,x10,x7 sub x21,x21,#8 // j-- adc x7,x11,xzr adds x12,x16,x13 ldr x14,[x3],#8 adc x13,x17,xzr mul x10,x8,x9 // ap[j]*bp[i] adds x6,x6,x23 umulh x11,x8,x9 adc x7,x7,xzr mul x16,x14,x15 // np[j]*m1 adds x12,x12,x6 umulh x17,x14,x15 str x12,[x22,#-16] // tp[j-1] cbnz x21,Linner Linner_skip: ldr x23,[x22],#8 // tp[j] adc x13,x13,xzr adds x6,x10,x7 sub x1,x1,x5 // rewind x1 adc x7,x11,xzr adds x12,x16,x13 sub x3,x3,x5 // rewind x3 adcs x13,x17,x19 adc x19,xzr,xzr adds x6,x6,x23 adc x7,x7,xzr adds x12,x12,x6 adcs x13,x13,x7 adc x19,x19,xzr // upmost overflow bit stp x12,x13,[x22,#-16] cbnz x20,Louter // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. ldr x23,[sp] // tp[0] add x22,sp,#8 ldr x14,[x3],#8 // np[0] subs x21,x5,#8 // j=num-1 and clear borrow mov x1,x0 Lsub: sbcs x8,x23,x14 // tp[j]-np[j] ldr x23,[x22],#8 sub x21,x21,#8 // j-- ldr x14,[x3],#8 str x8,[x1],#8 // rp[j]=tp[j]-np[j] cbnz x21,Lsub sbcs x8,x23,x14 sbcs x19,x19,xzr // did it borrow? str x8,[x1],#8 // rp[num-1] ldr x23,[sp] // tp[0] add x22,sp,#8 ldr x8,[x0],#8 // rp[0] sub x5,x5,#8 // num-- nop Lcond_copy: sub x5,x5,#8 // num-- csel x14,x23,x8,lo // did it borrow? ldr x23,[x22],#8 ldr x8,[x0],#8 str xzr,[x22,#-16] // wipe tp str x14,[x0,#-16] cbnz x5,Lcond_copy csel x14,x23,x8,lo str xzr,[x22,#-8] // wipe tp str x14,[x0,#-8] ldp x19,x20,[x29,#16] .cfi_restore x19 .cfi_restore x20 mov sp,x29 .cfi_def_cfa sp, 64 ldp x21,x22,[x29,#32] .cfi_restore x21 .cfi_restore x22 mov x0,#1 ldp x23,x24,[x29,#48] .cfi_restore x23 .cfi_restore x24 ldr x29,[sp],#64 .cfi_restore x29 .cfi_def_cfa_offset 0 AARCH64_VALIDATE_LINK_REGISTER ret .cfi_endproc .def __bn_sqr8x_mont .type 32 .endef .align 5 __bn_sqr8x_mont: .cfi_startproc // Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_sqr8x_mont is jumped to // only from bn_mul_mont which has already signed the return address. cmp x1,x2 b.ne __bn_mul4x_mont Lsqr8x_mont: stp x29,x30,[sp,#-128]! .cfi_def_cfa_offset 128 .cfi_offset x29, -128 .cfi_offset x30, -120 add x29,sp,#0 .cfi_def_cfa x29, 128 stp x19,x20,[sp,#16] .cfi_offset x19, -112 .cfi_offset x20, -104 stp x21,x22,[sp,#32] .cfi_offset x21, -96 .cfi_offset x22, -88 stp x23,x24,[sp,#48] .cfi_offset x23, -80 .cfi_offset x24, -72 stp x25,x26,[sp,#64] .cfi_offset x25, -64 .cfi_offset x26, -56 stp x27,x28,[sp,#80] .cfi_offset x27, -48 .cfi_offset x28, -40 stp x0,x3,[sp,#96] // offload rp and np ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] ldp x10,x11,[x1,#8*4] ldp x12,x13,[x1,#8*6] sub x2,sp,x5,lsl#4 lsl x5,x5,#3 ldr x4,[x4] // *n0 mov sp,x2 // alloca sub x27,x5,#8*8 b Lsqr8x_zero_start Lsqr8x_zero: sub x27,x27,#8*8 stp xzr,xzr,[x2,#8*0] stp xzr,xzr,[x2,#8*2] stp xzr,xzr,[x2,#8*4] stp xzr,xzr,[x2,#8*6] Lsqr8x_zero_start: stp xzr,xzr,[x2,#8*8] stp xzr,xzr,[x2,#8*10] stp xzr,xzr,[x2,#8*12] stp xzr,xzr,[x2,#8*14] add x2,x2,#8*16 cbnz x27,Lsqr8x_zero add x3,x1,x5 add x1,x1,#8*8 mov x19,xzr mov x20,xzr mov x21,xzr mov x22,xzr mov x23,xzr mov x24,xzr mov x25,xzr mov x26,xzr mov x2,sp str x4,[x29,#112] // offload n0 // Multiply everything but a[i]*a[i] .align 4 Lsqr8x_outer_loop: // a[1]a[0] (i) // a[2]a[0] // a[3]a[0] // a[4]a[0] // a[5]a[0] // a[6]a[0] // a[7]a[0] // a[2]a[1] (ii) // a[3]a[1] // a[4]a[1] // a[5]a[1] // a[6]a[1] // a[7]a[1] // a[3]a[2] (iii) // a[4]a[2] // a[5]a[2] // a[6]a[2] // a[7]a[2] // a[4]a[3] (iv) // a[5]a[3] // a[6]a[3] // a[7]a[3] // a[5]a[4] (v) // a[6]a[4] // a[7]a[4] // a[6]a[5] (vi) // a[7]a[5] // a[7]a[6] (vii) mul x14,x7,x6 // lo(a[1..7]*a[0]) (i) mul x15,x8,x6 mul x16,x9,x6 mul x17,x10,x6 adds x20,x20,x14 // t[1]+lo(a[1]*a[0]) mul x14,x11,x6 adcs x21,x21,x15 mul x15,x12,x6 adcs x22,x22,x16 mul x16,x13,x6 adcs x23,x23,x17 umulh x17,x7,x6 // hi(a[1..7]*a[0]) adcs x24,x24,x14 umulh x14,x8,x6 adcs x25,x25,x15 umulh x15,x9,x6 adcs x26,x26,x16 umulh x16,x10,x6 stp x19,x20,[x2],#8*2 // t[0..1] adc x19,xzr,xzr // t[8] adds x21,x21,x17 // t[2]+lo(a[1]*a[0]) umulh x17,x11,x6 adcs x22,x22,x14 umulh x14,x12,x6 adcs x23,x23,x15 umulh x15,x13,x6 adcs x24,x24,x16 mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii) adcs x25,x25,x17 mul x17,x9,x7 adcs x26,x26,x14 mul x14,x10,x7 adc x19,x19,x15 mul x15,x11,x7 adds x22,x22,x16 mul x16,x12,x7 adcs x23,x23,x17 mul x17,x13,x7 adcs x24,x24,x14 umulh x14,x8,x7 // hi(a[2..7]*a[1]) adcs x25,x25,x15 umulh x15,x9,x7 adcs x26,x26,x16 umulh x16,x10,x7 adcs x19,x19,x17 umulh x17,x11,x7 stp x21,x22,[x2],#8*2 // t[2..3] adc x20,xzr,xzr // t[9] adds x23,x23,x14 umulh x14,x12,x7 adcs x24,x24,x15 umulh x15,x13,x7 adcs x25,x25,x16 mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii) adcs x26,x26,x17 mul x17,x10,x8 adcs x19,x19,x14 mul x14,x11,x8 adc x20,x20,x15 mul x15,x12,x8 adds x24,x24,x16 mul x16,x13,x8 adcs x25,x25,x17 umulh x17,x9,x8 // hi(a[3..7]*a[2]) adcs x26,x26,x14 umulh x14,x10,x8 adcs x19,x19,x15 umulh x15,x11,x8 adcs x20,x20,x16 umulh x16,x12,x8 stp x23,x24,[x2],#8*2 // t[4..5] adc x21,xzr,xzr // t[10] adds x25,x25,x17 umulh x17,x13,x8 adcs x26,x26,x14 mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv) adcs x19,x19,x15 mul x15,x11,x9 adcs x20,x20,x16 mul x16,x12,x9 adc x21,x21,x17 mul x17,x13,x9 adds x26,x26,x14 umulh x14,x10,x9 // hi(a[4..7]*a[3]) adcs x19,x19,x15 umulh x15,x11,x9 adcs x20,x20,x16 umulh x16,x12,x9 adcs x21,x21,x17 umulh x17,x13,x9 stp x25,x26,[x2],#8*2 // t[6..7] adc x22,xzr,xzr // t[11] adds x19,x19,x14 mul x14,x11,x10 // lo(a[5..7]*a[4]) (v) adcs x20,x20,x15 mul x15,x12,x10 adcs x21,x21,x16 mul x16,x13,x10 adc x22,x22,x17 umulh x17,x11,x10 // hi(a[5..7]*a[4]) adds x20,x20,x14 umulh x14,x12,x10 adcs x21,x21,x15 umulh x15,x13,x10 adcs x22,x22,x16 mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi) adc x23,xzr,xzr // t[12] adds x21,x21,x17 mul x17,x13,x11 adcs x22,x22,x14 umulh x14,x12,x11 // hi(a[6..7]*a[5]) adc x23,x23,x15 umulh x15,x13,x11 adds x22,x22,x16 mul x16,x13,x12 // lo(a[7]*a[6]) (vii) adcs x23,x23,x17 umulh x17,x13,x12 // hi(a[7]*a[6]) adc x24,xzr,xzr // t[13] adds x23,x23,x14 sub x27,x3,x1 // done yet? adc x24,x24,x15 adds x24,x24,x16 sub x14,x3,x5 // rewinded ap adc x25,xzr,xzr // t[14] add x25,x25,x17 cbz x27,Lsqr8x_outer_break mov x4,x6 ldp x6,x7,[x2,#8*0] ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] adds x19,x19,x6 adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x0,x1 adcs x26,xzr,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved below mov x27,#-8*8 // a[8]a[0] // a[9]a[0] // a[a]a[0] // a[b]a[0] // a[c]a[0] // a[d]a[0] // a[e]a[0] // a[f]a[0] // a[8]a[1] // a[f]a[1]........................ // a[8]a[2] // a[f]a[2]........................ // a[8]a[3] // a[f]a[3]........................ // a[8]a[4] // a[f]a[4]........................ // a[8]a[5] // a[f]a[5]........................ // a[8]a[6] // a[f]a[6]........................ // a[8]a[7] // a[f]a[7]........................ Lsqr8x_mul: mul x14,x6,x4 adc x28,xzr,xzr // carry bit, modulo-scheduled mul x15,x7,x4 add x27,x27,#8 mul x16,x8,x4 mul x17,x9,x4 adds x19,x19,x14 mul x14,x10,x4 adcs x20,x20,x15 mul x15,x11,x4 adcs x21,x21,x16 mul x16,x12,x4 adcs x22,x22,x17 mul x17,x13,x4 adcs x23,x23,x14 umulh x14,x6,x4 adcs x24,x24,x15 umulh x15,x7,x4 adcs x25,x25,x16 umulh x16,x8,x4 adcs x26,x26,x17 umulh x17,x9,x4 adc x28,x28,xzr str x19,[x2],#8 adds x19,x20,x14 umulh x14,x10,x4 adcs x20,x21,x15 umulh x15,x11,x4 adcs x21,x22,x16 umulh x16,x12,x4 adcs x22,x23,x17 umulh x17,x13,x4 ldr x4,[x0,x27] adcs x23,x24,x14 adcs x24,x25,x15 adcs x25,x26,x16 adcs x26,x28,x17 //adc x28,xzr,xzr // moved above cbnz x27,Lsqr8x_mul // note that carry flag is guaranteed // to be zero at this point cmp x1,x3 // done yet? b.eq Lsqr8x_break ldp x6,x7,[x2,#8*0] ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] adds x19,x19,x6 ldr x4,[x0,#-8*8] adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x27,#-8*8 adcs x26,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved above b Lsqr8x_mul .align 4 Lsqr8x_break: ldp x6,x7,[x0,#8*0] add x1,x0,#8*8 ldp x8,x9,[x0,#8*2] sub x14,x3,x1 // is it last iteration? ldp x10,x11,[x0,#8*4] sub x15,x2,x14 ldp x12,x13,[x0,#8*6] cbz x14,Lsqr8x_outer_loop stp x19,x20,[x2,#8*0] ldp x19,x20,[x15,#8*0] stp x21,x22,[x2,#8*2] ldp x21,x22,[x15,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[x15,#8*4] stp x25,x26,[x2,#8*6] mov x2,x15 ldp x25,x26,[x15,#8*6] b Lsqr8x_outer_loop .align 4 Lsqr8x_outer_break: // Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0] ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0] ldp x15,x16,[sp,#8*1] ldp x11,x13,[x14,#8*2] add x1,x14,#8*4 ldp x17,x14,[sp,#8*3] stp x19,x20,[x2,#8*0] mul x19,x7,x7 stp x21,x22,[x2,#8*2] umulh x7,x7,x7 stp x23,x24,[x2,#8*4] mul x8,x9,x9 stp x25,x26,[x2,#8*6] mov x2,sp umulh x9,x9,x9 adds x20,x7,x15,lsl#1 extr x15,x16,x15,#63 sub x27,x5,#8*4 Lsqr4x_shift_n_add: adcs x21,x8,x15 extr x16,x17,x16,#63 sub x27,x27,#8*4 adcs x22,x9,x16 ldp x15,x16,[x2,#8*5] mul x10,x11,x11 ldp x7,x9,[x1],#8*2 umulh x11,x11,x11 mul x12,x13,x13 umulh x13,x13,x13 extr x17,x14,x17,#63 stp x19,x20,[x2,#8*0] adcs x23,x10,x17 extr x14,x15,x14,#63 stp x21,x22,[x2,#8*2] adcs x24,x11,x14 ldp x17,x14,[x2,#8*7] extr x15,x16,x15,#63 adcs x25,x12,x15 extr x16,x17,x16,#63 adcs x26,x13,x16 ldp x15,x16,[x2,#8*9] mul x6,x7,x7 ldp x11,x13,[x1],#8*2 umulh x7,x7,x7 mul x8,x9,x9 umulh x9,x9,x9 stp x23,x24,[x2,#8*4] extr x17,x14,x17,#63 stp x25,x26,[x2,#8*6] add x2,x2,#8*8 adcs x19,x6,x17 extr x14,x15,x14,#63 adcs x20,x7,x14 ldp x17,x14,[x2,#8*3] extr x15,x16,x15,#63 cbnz x27,Lsqr4x_shift_n_add ldp x1,x4,[x29,#104] // pull np and n0 adcs x21,x8,x15 extr x16,x17,x16,#63 adcs x22,x9,x16 ldp x15,x16,[x2,#8*5] mul x10,x11,x11 umulh x11,x11,x11 stp x19,x20,[x2,#8*0] mul x12,x13,x13 umulh x13,x13,x13 stp x21,x22,[x2,#8*2] extr x17,x14,x17,#63 adcs x23,x10,x17 extr x14,x15,x14,#63 ldp x19,x20,[sp,#8*0] adcs x24,x11,x14 extr x15,x16,x15,#63 ldp x6,x7,[x1,#8*0] adcs x25,x12,x15 extr x16,xzr,x16,#63 ldp x8,x9,[x1,#8*2] adc x26,x13,x16 ldp x10,x11,[x1,#8*4] // Reduce by 512 bits per iteration mul x28,x4,x19 // t[0]*n0 ldp x12,x13,[x1,#8*6] add x3,x1,x5 ldp x21,x22,[sp,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[sp,#8*4] stp x25,x26,[x2,#8*6] ldp x25,x26,[sp,#8*6] add x1,x1,#8*8 mov x30,xzr // initial top-most carry mov x2,sp mov x27,#8 Lsqr8x_reduction: // (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0) mul x15,x7,x28 sub x27,x27,#1 mul x16,x8,x28 str x28,[x2],#8 // put aside t[0]*n0 for tail processing mul x17,x9,x28 // (*) adds xzr,x19,x14 subs xzr,x19,#1 // (*) mul x14,x10,x28 adcs x19,x20,x15 mul x15,x11,x28 adcs x20,x21,x16 mul x16,x12,x28 adcs x21,x22,x17 mul x17,x13,x28 adcs x22,x23,x14 umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0) adcs x23,x24,x15 umulh x15,x7,x28 adcs x24,x25,x16 umulh x16,x8,x28 adcs x25,x26,x17 umulh x17,x9,x28 adc x26,xzr,xzr adds x19,x19,x14 umulh x14,x10,x28 adcs x20,x20,x15 umulh x15,x11,x28 adcs x21,x21,x16 umulh x16,x12,x28 adcs x22,x22,x17 umulh x17,x13,x28 mul x28,x4,x19 // next t[0]*n0 adcs x23,x23,x14 adcs x24,x24,x15 adcs x25,x25,x16 adc x26,x26,x17 cbnz x27,Lsqr8x_reduction ldp x14,x15,[x2,#8*0] ldp x16,x17,[x2,#8*2] mov x0,x2 sub x27,x3,x1 // done yet? adds x19,x19,x14 adcs x20,x20,x15 ldp x14,x15,[x2,#8*4] adcs x21,x21,x16 adcs x22,x22,x17 ldp x16,x17,[x2,#8*6] adcs x23,x23,x14 adcs x24,x24,x15 adcs x25,x25,x16 adcs x26,x26,x17 //adc x28,xzr,xzr // moved below cbz x27,Lsqr8x8_post_condition ldr x4,[x2,#-8*8] ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] ldp x10,x11,[x1,#8*4] mov x27,#-8*8 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 Lsqr8x_tail: mul x14,x6,x4 adc x28,xzr,xzr // carry bit, modulo-scheduled mul x15,x7,x4 add x27,x27,#8 mul x16,x8,x4 mul x17,x9,x4 adds x19,x19,x14 mul x14,x10,x4 adcs x20,x20,x15 mul x15,x11,x4 adcs x21,x21,x16 mul x16,x12,x4 adcs x22,x22,x17 mul x17,x13,x4 adcs x23,x23,x14 umulh x14,x6,x4 adcs x24,x24,x15 umulh x15,x7,x4 adcs x25,x25,x16 umulh x16,x8,x4 adcs x26,x26,x17 umulh x17,x9,x4 adc x28,x28,xzr str x19,[x2],#8 adds x19,x20,x14 umulh x14,x10,x4 adcs x20,x21,x15 umulh x15,x11,x4 adcs x21,x22,x16 umulh x16,x12,x4 adcs x22,x23,x17 umulh x17,x13,x4 ldr x4,[x0,x27] adcs x23,x24,x14 adcs x24,x25,x15 adcs x25,x26,x16 adcs x26,x28,x17 //adc x28,xzr,xzr // moved above cbnz x27,Lsqr8x_tail // note that carry flag is guaranteed // to be zero at this point ldp x6,x7,[x2,#8*0] sub x27,x3,x1 // done yet? sub x16,x3,x5 // rewinded np ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] cbz x27,Lsqr8x_tail_break ldr x4,[x0,#-8*8] adds x19,x19,x6 adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x27,#-8*8 adcs x26,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved above b Lsqr8x_tail .align 4 Lsqr8x_tail_break: ldr x4,[x29,#112] // pull n0 add x27,x2,#8*8 // end of current t[num] window subs xzr,x30,#1 // "move" top-most carry to carry bit adcs x14,x19,x6 adcs x15,x20,x7 ldp x19,x20,[x0,#8*0] adcs x21,x21,x8 ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0] adcs x22,x22,x9 ldp x8,x9,[x16,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x16,#8*4] adcs x25,x25,x12 adcs x26,x26,x13 ldp x12,x13,[x16,#8*6] add x1,x16,#8*8 adc x30,xzr,xzr // top-most carry mul x28,x4,x19 stp x14,x15,[x2,#8*0] stp x21,x22,[x2,#8*2] ldp x21,x22,[x0,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[x0,#8*4] cmp x27,x29 // did we hit the bottom? stp x25,x26,[x2,#8*6] mov x2,x0 // slide the window ldp x25,x26,[x0,#8*6] mov x27,#8 b.ne Lsqr8x_reduction // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. ldr x0,[x29,#96] // pull rp add x2,x2,#8*8 subs x14,x19,x6 sbcs x15,x20,x7 sub x27,x5,#8*8 mov x3,x0 // x0 copy Lsqr8x_sub: sbcs x16,x21,x8 ldp x6,x7,[x1,#8*0] sbcs x17,x22,x9 stp x14,x15,[x0,#8*0] sbcs x14,x23,x10 ldp x8,x9,[x1,#8*2] sbcs x15,x24,x11 stp x16,x17,[x0,#8*2] sbcs x16,x25,x12 ldp x10,x11,[x1,#8*4] sbcs x17,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 ldp x19,x20,[x2,#8*0] sub x27,x27,#8*8 ldp x21,x22,[x2,#8*2] ldp x23,x24,[x2,#8*4] ldp x25,x26,[x2,#8*6] add x2,x2,#8*8 stp x14,x15,[x0,#8*4] sbcs x14,x19,x6 stp x16,x17,[x0,#8*6] add x0,x0,#8*8 sbcs x15,x20,x7 cbnz x27,Lsqr8x_sub sbcs x16,x21,x8 mov x2,sp add x1,sp,x5 ldp x6,x7,[x3,#8*0] sbcs x17,x22,x9 stp x14,x15,[x0,#8*0] sbcs x14,x23,x10 ldp x8,x9,[x3,#8*2] sbcs x15,x24,x11 stp x16,x17,[x0,#8*2] sbcs x16,x25,x12 ldp x19,x20,[x1,#8*0] sbcs x17,x26,x13 ldp x21,x22,[x1,#8*2] sbcs xzr,x30,xzr // did it borrow? ldr x30,[x29,#8] // pull return address stp x14,x15,[x0,#8*4] stp x16,x17,[x0,#8*6] sub x27,x5,#8*4 Lsqr4x_cond_copy: sub x27,x27,#8*4 csel x14,x19,x6,lo stp xzr,xzr,[x2,#8*0] csel x15,x20,x7,lo ldp x6,x7,[x3,#8*4] ldp x19,x20,[x1,#8*4] csel x16,x21,x8,lo stp xzr,xzr,[x2,#8*2] add x2,x2,#8*4 csel x17,x22,x9,lo ldp x8,x9,[x3,#8*6] ldp x21,x22,[x1,#8*6] add x1,x1,#8*4 stp x14,x15,[x3,#8*0] stp x16,x17,[x3,#8*2] add x3,x3,#8*4 stp xzr,xzr,[x1,#8*0] stp xzr,xzr,[x1,#8*2] cbnz x27,Lsqr4x_cond_copy csel x14,x19,x6,lo stp xzr,xzr,[x2,#8*0] csel x15,x20,x7,lo stp xzr,xzr,[x2,#8*2] csel x16,x21,x8,lo csel x17,x22,x9,lo stp x14,x15,[x3,#8*0] stp x16,x17,[x3,#8*2] b Lsqr8x_done .align 4 Lsqr8x8_post_condition: adc x28,xzr,xzr ldr x30,[x29,#8] // pull return address .cfi_restore x30 // x19-7,x28 hold result, x6-7 hold modulus subs x6,x19,x6 ldr x1,[x29,#96] // pull rp sbcs x7,x20,x7 stp xzr,xzr,[sp,#8*0] sbcs x8,x21,x8 stp xzr,xzr,[sp,#8*2] sbcs x9,x22,x9 stp xzr,xzr,[sp,#8*4] sbcs x10,x23,x10 stp xzr,xzr,[sp,#8*6] sbcs x11,x24,x11 stp xzr,xzr,[sp,#8*8] sbcs x12,x25,x12 stp xzr,xzr,[sp,#8*10] sbcs x13,x26,x13 stp xzr,xzr,[sp,#8*12] sbcs x28,x28,xzr // did it borrow? stp xzr,xzr,[sp,#8*14] // x6-7 hold result-modulus csel x6,x19,x6,lo csel x7,x20,x7,lo csel x8,x21,x8,lo csel x9,x22,x9,lo stp x6,x7,[x1,#8*0] csel x10,x23,x10,lo csel x11,x24,x11,lo stp x8,x9,[x1,#8*2] csel x12,x25,x12,lo csel x13,x26,x13,lo stp x10,x11,[x1,#8*4] stp x12,x13,[x1,#8*6] Lsqr8x_done: ldp x19,x20,[x29,#16] .cfi_restore x19 .cfi_restore x20 mov sp,x29 ldp x21,x22,[x29,#32] .cfi_restore x21 .cfi_restore x22 mov x0,#1 ldp x23,x24,[x29,#48] .cfi_restore x23 .cfi_restore x24 ldp x25,x26,[x29,#64] .cfi_restore x25 .cfi_restore x26 ldp x27,x28,[x29,#80] .cfi_restore x27 .cfi_restore x28 ldr x29,[sp],#128 .cfi_restore x29 .cfi_def_cfa_offset 0 // x30 is popped earlier AARCH64_VALIDATE_LINK_REGISTER ret .cfi_endproc .def __bn_mul4x_mont .type 32 .endef .align 5 __bn_mul4x_mont: .cfi_startproc // Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_mul4x_mont is jumped to // only from bn_mul_mont or __bn_mul8x_mont which have already signed the // return address. stp x29,x30,[sp,#-128]! .cfi_def_cfa_offset 128 .cfi_offset x30, -120 .cfi_offset x29, -128 add x29,sp,#0 .cfi_def_cfa x29, 128 stp x19,x20,[sp,#16] .cfi_offset x19, -112 .cfi_offset x20, -104 stp x21,x22,[sp,#32] .cfi_offset x21, -96 .cfi_offset x22, -88 stp x23,x24,[sp,#48] .cfi_offset x23, -80 .cfi_offset x24, -72 stp x25,x26,[sp,#64] .cfi_offset x25, -64 .cfi_offset x26, -56 stp x27,x28,[sp,#80] .cfi_offset x27, -48 .cfi_offset x28, -40 sub x26,sp,x5,lsl#3 lsl x5,x5,#3 ldr x4,[x4] // *n0 sub sp,x26,#8*4 // alloca add x10,x2,x5 add x27,x1,x5 stp x0,x10,[x29,#96] // offload rp and &b[num] ldr x24,[x2,#8*0] // b[0] ldp x6,x7,[x1,#8*0] // a[0..3] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 mov x19,xzr mov x20,xzr mov x21,xzr mov x22,xzr ldp x14,x15,[x3,#8*0] // n[0..3] ldp x16,x17,[x3,#8*2] adds x3,x3,#8*4 // clear carry bit mov x0,xzr mov x28,#0 mov x26,sp Loop_mul4x_1st_reduction: mul x10,x6,x24 // lo(a[0..3]*b[0]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[0..3]*b[0]) adcs x20,x20,x11 mul x25,x19,x4 // t[0]*n0 adcs x21,x21,x12 umulh x11,x7,x24 adcs x22,x22,x13 umulh x12,x8,x24 adc x23,xzr,xzr umulh x13,x9,x24 ldr x24,[x2,x28] // next b[i] (or b[0]) adds x20,x20,x10 // (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0) str x25,[x26],#8 // put aside t[0]*n0 for tail processing adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 // (*) adds xzr,x19,x10 subs xzr,x19,#1 // (*) umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0) adcs x19,x20,x11 umulh x11,x15,x25 adcs x20,x21,x12 umulh x12,x16,x25 adcs x21,x22,x13 umulh x13,x17,x25 adcs x22,x23,x0 adc x0,xzr,xzr adds x19,x19,x10 sub x10,x27,x1 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_1st_reduction cbz x10,Lmul4x4_post_condition ldp x6,x7,[x1,#8*0] // a[4..7] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 ldr x25,[sp] // a[0]*n0 ldp x14,x15,[x3,#8*0] // n[4..7] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 Loop_mul4x_1st_tail: mul x10,x6,x24 // lo(a[4..7]*b[i]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[4..7]*b[i]) adcs x20,x20,x11 umulh x11,x7,x24 adcs x21,x21,x12 umulh x12,x8,x24 adcs x22,x22,x13 umulh x13,x9,x24 adc x23,xzr,xzr ldr x24,[x2,x28] // next b[i] (or b[0]) adds x20,x20,x10 mul x10,x14,x25 // lo(n[4..7]*a[0]*n0) adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 adds x19,x19,x10 umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0) adcs x20,x20,x11 umulh x11,x15,x25 adcs x21,x21,x12 umulh x12,x16,x25 adcs x22,x22,x13 adcs x23,x23,x0 umulh x13,x17,x25 adc x0,xzr,xzr ldr x25,[sp,x28] // next t[0]*n0 str x19,[x26],#8 // result!!! adds x19,x20,x10 sub x10,x27,x1 // done yet? adcs x20,x21,x11 adcs x21,x22,x12 adcs x22,x23,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_1st_tail sub x11,x27,x5 // rewinded x1 cbz x10,Lmul4x_proceed ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 ldp x14,x15,[x3,#8*0] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 b Loop_mul4x_1st_tail .align 5 Lmul4x_proceed: ldr x24,[x2,#8*4]! // *++b adc x30,x0,xzr ldp x6,x7,[x11,#8*0] // a[0..3] sub x3,x3,x5 // rewind np ldp x8,x9,[x11,#8*2] add x1,x11,#8*4 stp x19,x20,[x26,#8*0] // result!!! ldp x19,x20,[sp,#8*4] // t[0..3] stp x21,x22,[x26,#8*2] // result!!! ldp x21,x22,[sp,#8*6] ldp x14,x15,[x3,#8*0] // n[0..3] mov x26,sp ldp x16,x17,[x3,#8*2] adds x3,x3,#8*4 // clear carry bit mov x0,xzr .align 4 Loop_mul4x_reduction: mul x10,x6,x24 // lo(a[0..3]*b[4]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[0..3]*b[4]) adcs x20,x20,x11 mul x25,x19,x4 // t[0]*n0 adcs x21,x21,x12 umulh x11,x7,x24 adcs x22,x22,x13 umulh x12,x8,x24 adc x23,xzr,xzr umulh x13,x9,x24 ldr x24,[x2,x28] // next b[i] adds x20,x20,x10 // (*) mul x10,x14,x25 str x25,[x26],#8 // put aside t[0]*n0 for tail processing adcs x21,x21,x11 mul x11,x15,x25 // lo(n[0..3]*t[0]*n0 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 // (*) adds xzr,x19,x10 subs xzr,x19,#1 // (*) umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0 adcs x19,x20,x11 umulh x11,x15,x25 adcs x20,x21,x12 umulh x12,x16,x25 adcs x21,x22,x13 umulh x13,x17,x25 adcs x22,x23,x0 adc x0,xzr,xzr adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_reduction adc x0,x0,xzr ldp x10,x11,[x26,#8*4] // t[4..7] ldp x12,x13,[x26,#8*6] ldp x6,x7,[x1,#8*0] // a[4..7] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr ldr x25,[sp] // t[0]*n0 ldp x14,x15,[x3,#8*0] // n[4..7] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 .align 4 Loop_mul4x_tail: mul x10,x6,x24 // lo(a[4..7]*b[4]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[4..7]*b[4]) adcs x20,x20,x11 umulh x11,x7,x24 adcs x21,x21,x12 umulh x12,x8,x24 adcs x22,x22,x13 umulh x13,x9,x24 adc x23,xzr,xzr ldr x24,[x2,x28] // next b[i] adds x20,x20,x10 mul x10,x14,x25 // lo(n[4..7]*t[0]*n0) adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 adds x19,x19,x10 umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0) adcs x20,x20,x11 umulh x11,x15,x25 adcs x21,x21,x12 umulh x12,x16,x25 adcs x22,x22,x13 umulh x13,x17,x25 adcs x23,x23,x0 ldr x25,[sp,x28] // next a[0]*n0 adc x0,xzr,xzr str x19,[x26],#8 // result!!! adds x19,x20,x10 sub x10,x27,x1 // done yet? adcs x20,x21,x11 adcs x21,x22,x12 adcs x22,x23,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_tail sub x11,x3,x5 // rewinded np? adc x0,x0,xzr cbz x10,Loop_mul4x_break ldp x10,x11,[x26,#8*4] ldp x12,x13,[x26,#8*6] ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr ldp x14,x15,[x3,#8*0] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 b Loop_mul4x_tail .align 4 Loop_mul4x_break: ldp x12,x13,[x29,#96] // pull rp and &b[num] adds x19,x19,x30 add x2,x2,#8*4 // bp++ adcs x20,x20,xzr sub x1,x1,x5 // rewind ap adcs x21,x21,xzr stp x19,x20,[x26,#8*0] // result!!! adcs x22,x22,xzr ldp x19,x20,[sp,#8*4] // t[0..3] adc x30,x0,xzr stp x21,x22,[x26,#8*2] // result!!! cmp x2,x13 // done yet? ldp x21,x22,[sp,#8*6] ldp x14,x15,[x11,#8*0] // n[0..3] ldp x16,x17,[x11,#8*2] add x3,x11,#8*4 b.eq Lmul4x_post ldr x24,[x2] ldp x6,x7,[x1,#8*0] // a[0..3] ldp x8,x9,[x1,#8*2] adds x1,x1,#8*4 // clear carry bit mov x0,xzr mov x26,sp b Loop_mul4x_reduction .align 4 Lmul4x_post: // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. mov x0,x12 mov x27,x12 // x0 copy subs x10,x19,x14 add x26,sp,#8*8 sbcs x11,x20,x15 sub x28,x5,#8*4 Lmul4x_sub: sbcs x12,x21,x16 ldp x14,x15,[x3,#8*0] sub x28,x28,#8*4 ldp x19,x20,[x26,#8*0] sbcs x13,x22,x17 ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 ldp x21,x22,[x26,#8*2] add x26,x26,#8*4 stp x10,x11,[x0,#8*0] sbcs x10,x19,x14 stp x12,x13,[x0,#8*2] add x0,x0,#8*4 sbcs x11,x20,x15 cbnz x28,Lmul4x_sub sbcs x12,x21,x16 mov x26,sp add x1,sp,#8*4 ldp x6,x7,[x27,#8*0] sbcs x13,x22,x17 stp x10,x11,[x0,#8*0] ldp x8,x9,[x27,#8*2] stp x12,x13,[x0,#8*2] ldp x19,x20,[x1,#8*0] ldp x21,x22,[x1,#8*2] sbcs xzr,x30,xzr // did it borrow? ldr x30,[x29,#8] // pull return address sub x28,x5,#8*4 Lmul4x_cond_copy: sub x28,x28,#8*4 csel x10,x19,x6,lo stp xzr,xzr,[x26,#8*0] csel x11,x20,x7,lo ldp x6,x7,[x27,#8*4] ldp x19,x20,[x1,#8*4] csel x12,x21,x8,lo stp xzr,xzr,[x26,#8*2] add x26,x26,#8*4 csel x13,x22,x9,lo ldp x8,x9,[x27,#8*6] ldp x21,x22,[x1,#8*6] add x1,x1,#8*4 stp x10,x11,[x27,#8*0] stp x12,x13,[x27,#8*2] add x27,x27,#8*4 cbnz x28,Lmul4x_cond_copy csel x10,x19,x6,lo stp xzr,xzr,[x26,#8*0] csel x11,x20,x7,lo stp xzr,xzr,[x26,#8*2] csel x12,x21,x8,lo stp xzr,xzr,[x26,#8*3] csel x13,x22,x9,lo stp xzr,xzr,[x26,#8*4] stp x10,x11,[x27,#8*0] stp x12,x13,[x27,#8*2] b Lmul4x_done .align 4 Lmul4x4_post_condition: adc x0,x0,xzr ldr x1,[x29,#96] // pull rp // x19-3,x0 hold result, x14-7 hold modulus subs x6,x19,x14 ldr x30,[x29,#8] // pull return address .cfi_restore x30 sbcs x7,x20,x15 stp xzr,xzr,[sp,#8*0] sbcs x8,x21,x16 stp xzr,xzr,[sp,#8*2] sbcs x9,x22,x17 stp xzr,xzr,[sp,#8*4] sbcs xzr,x0,xzr // did it borrow? stp xzr,xzr,[sp,#8*6] // x6-3 hold result-modulus csel x6,x19,x6,lo csel x7,x20,x7,lo csel x8,x21,x8,lo csel x9,x22,x9,lo stp x6,x7,[x1,#8*0] stp x8,x9,[x1,#8*2] Lmul4x_done: ldp x19,x20,[x29,#16] .cfi_restore x19 .cfi_restore x20 mov sp,x29 ldp x21,x22,[x29,#32] .cfi_restore x21 .cfi_restore x22 mov x0,#1 ldp x23,x24,[x29,#48] .cfi_restore x23 .cfi_restore x24 ldp x25,x26,[x29,#64] .cfi_restore x25 .cfi_restore x26 ldp x27,x28,[x29,#80] .cfi_restore x27 .cfi_restore x28 ldr x29,[sp],#128 .cfi_restore x29 .cfi_def_cfa_offset 0 // x30 is popped earlier AARCH64_VALIDATE_LINK_REGISTER ret .cfi_endproc .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 4 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
wlsfx/bnbb
40,231
.local/share/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/aws-lc-sys-0.32.0/aws-lc/generated-src/win-aarch64/crypto/chacha/chacha-armv8.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <openssl/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <openssl/arm_arch.h> .section .rodata .align 5 Lsigma: .quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral Lone: .long 1,0,0,0 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .text .globl ChaCha20_ctr32_nohw .def ChaCha20_ctr32_nohw .type 32 .endef .align 5 ChaCha20_ctr32_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma add x5,x5,:lo12:Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#64 ldp x22,x23,[x5] // load sigma ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ldp x28,x30,[x4] // load counter #ifdef __AARCH64EB__ ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif Loop_outer: mov w5,w22 // unpack key block lsr x6,x22,#32 mov w7,w23 lsr x8,x23,#32 mov w9,w24 lsr x10,x24,#32 mov w11,w25 lsr x12,x25,#32 mov w13,w26 lsr x14,x26,#32 mov w15,w27 lsr x16,x27,#32 mov w17,w28 lsr x19,x28,#32 mov w20,w30 lsr x21,x30,#32 mov x4,#10 subs x2,x2,#64 Loop: sub x4,x4,#1 add w5,w5,w9 add w6,w6,w10 add w7,w7,w11 add w8,w8,w12 eor w17,w17,w5 eor w19,w19,w6 eor w20,w20,w7 eor w21,w21,w8 ror w17,w17,#16 ror w19,w19,#16 ror w20,w20,#16 ror w21,w21,#16 add w13,w13,w17 add w14,w14,w19 add w15,w15,w20 add w16,w16,w21 eor w9,w9,w13 eor w10,w10,w14 eor w11,w11,w15 eor w12,w12,w16 ror w9,w9,#20 ror w10,w10,#20 ror w11,w11,#20 ror w12,w12,#20 add w5,w5,w9 add w6,w6,w10 add w7,w7,w11 add w8,w8,w12 eor w17,w17,w5 eor w19,w19,w6 eor w20,w20,w7 eor w21,w21,w8 ror w17,w17,#24 ror w19,w19,#24 ror w20,w20,#24 ror w21,w21,#24 add w13,w13,w17 add w14,w14,w19 add w15,w15,w20 add w16,w16,w21 eor w9,w9,w13 eor w10,w10,w14 eor w11,w11,w15 eor w12,w12,w16 ror w9,w9,#25 ror w10,w10,#25 ror w11,w11,#25 ror w12,w12,#25 add w5,w5,w10 add w6,w6,w11 add w7,w7,w12 add w8,w8,w9 eor w21,w21,w5 eor w17,w17,w6 eor w19,w19,w7 eor w20,w20,w8 ror w21,w21,#16 ror w17,w17,#16 ror w19,w19,#16 ror w20,w20,#16 add w15,w15,w21 add w16,w16,w17 add w13,w13,w19 add w14,w14,w20 eor w10,w10,w15 eor w11,w11,w16 eor w12,w12,w13 eor w9,w9,w14 ror w10,w10,#20 ror w11,w11,#20 ror w12,w12,#20 ror w9,w9,#20 add w5,w5,w10 add w6,w6,w11 add w7,w7,w12 add w8,w8,w9 eor w21,w21,w5 eor w17,w17,w6 eor w19,w19,w7 eor w20,w20,w8 ror w21,w21,#24 ror w17,w17,#24 ror w19,w19,#24 ror w20,w20,#24 add w15,w15,w21 add w16,w16,w17 add w13,w13,w19 add w14,w14,w20 eor w10,w10,w15 eor w11,w11,w16 eor w12,w12,w13 eor w9,w9,w14 ror w10,w10,#25 ror w11,w11,#25 ror w12,w12,#25 ror w9,w9,#25 cbnz x4,Loop add w5,w5,w22 // accumulate key block add x6,x6,x22,lsr#32 add w7,w7,w23 add x8,x8,x23,lsr#32 add w9,w9,w24 add x10,x10,x24,lsr#32 add w11,w11,w25 add x12,x12,x25,lsr#32 add w13,w13,w26 add x14,x14,x26,lsr#32 add w15,w15,w27 add x16,x16,x27,lsr#32 add w17,w17,w28 add x19,x19,x28,lsr#32 add w20,w20,w30 add x21,x21,x30,lsr#32 b.lo Ltail add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#1 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 b.hi Loop_outer ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .align 4 Ltail: add x2,x2,#64 Less_than_64: sub x0,x0,#1 add x1,x1,x2 add x0,x0,x2 add x4,sp,x2 neg x2,x2 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif stp x5,x7,[sp,#0] stp x9,x11,[sp,#16] stp x13,x15,[sp,#32] stp x17,x20,[sp,#48] Loop_tail: ldrb w10,[x1,x2] ldrb w11,[x4,x2] add x2,x2,#1 eor w10,w10,w11 strb w10,[x0,x2] cbnz x2,Loop_tail stp xzr,xzr,[sp,#0] stp xzr,xzr,[sp,#16] stp xzr,xzr,[sp,#32] stp xzr,xzr,[sp,#48] ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl ChaCha20_ctr32_neon .def ChaCha20_ctr32_neon .type 32 .endef .align 5 ChaCha20_ctr32_neon: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma add x5,x5,:lo12:Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] cmp x2,#512 b.hs L512_or_more_neon sub sp,sp,#64 ldp x22,x23,[x5] // load sigma ld1 {v24.4s},[x5],#16 ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ld1 {v25.4s,v26.4s},[x3] ldp x28,x30,[x4] // load counter ld1 {v27.4s},[x4] ld1 {v31.4s},[x5] #ifdef __AARCH64EB__ rev64 v24.4s,v24.4s ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif add v27.4s,v27.4s,v31.4s // += 1 add v28.4s,v27.4s,v31.4s add v29.4s,v28.4s,v31.4s shl v31.4s,v31.4s,#2 // 1 -> 4 Loop_outer_neon: mov w5,w22 // unpack key block lsr x6,x22,#32 mov v0.16b,v24.16b mov w7,w23 lsr x8,x23,#32 mov v4.16b,v24.16b mov w9,w24 lsr x10,x24,#32 mov v16.16b,v24.16b mov w11,w25 mov v1.16b,v25.16b lsr x12,x25,#32 mov v5.16b,v25.16b mov w13,w26 mov v17.16b,v25.16b lsr x14,x26,#32 mov v3.16b,v27.16b mov w15,w27 mov v7.16b,v28.16b lsr x16,x27,#32 mov v19.16b,v29.16b mov w17,w28 mov v2.16b,v26.16b lsr x19,x28,#32 mov v6.16b,v26.16b mov w20,w30 mov v18.16b,v26.16b lsr x21,x30,#32 mov x4,#10 subs x2,x2,#256 Loop_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v16.4s,v16.4s,v17.4s add w7,w7,w11 eor v3.16b,v3.16b,v0.16b add w8,w8,w12 eor v7.16b,v7.16b,v4.16b eor w17,w17,w5 eor v19.16b,v19.16b,v16.16b eor w19,w19,w6 rev32 v3.8h,v3.8h eor w20,w20,w7 rev32 v7.8h,v7.8h eor w21,w21,w8 rev32 v19.8h,v19.8h ror w17,w17,#16 add v2.4s,v2.4s,v3.4s ror w19,w19,#16 add v6.4s,v6.4s,v7.4s ror w20,w20,#16 add v18.4s,v18.4s,v19.4s ror w21,w21,#16 eor v20.16b,v1.16b,v2.16b add w13,w13,w17 eor v21.16b,v5.16b,v6.16b add w14,w14,w19 eor v22.16b,v17.16b,v18.16b add w15,w15,w20 ushr v1.4s,v20.4s,#20 add w16,w16,w21 ushr v5.4s,v21.4s,#20 eor w9,w9,w13 ushr v17.4s,v22.4s,#20 eor w10,w10,w14 sli v1.4s,v20.4s,#12 eor w11,w11,w15 sli v5.4s,v21.4s,#12 eor w12,w12,w16 sli v17.4s,v22.4s,#12 ror w9,w9,#20 add v0.4s,v0.4s,v1.4s ror w10,w10,#20 add v4.4s,v4.4s,v5.4s ror w11,w11,#20 add v16.4s,v16.4s,v17.4s ror w12,w12,#20 eor v20.16b,v3.16b,v0.16b add w5,w5,w9 eor v21.16b,v7.16b,v4.16b add w6,w6,w10 eor v22.16b,v19.16b,v16.16b add w7,w7,w11 ushr v3.4s,v20.4s,#24 add w8,w8,w12 ushr v7.4s,v21.4s,#24 eor w17,w17,w5 ushr v19.4s,v22.4s,#24 eor w19,w19,w6 sli v3.4s,v20.4s,#8 eor w20,w20,w7 sli v7.4s,v21.4s,#8 eor w21,w21,w8 sli v19.4s,v22.4s,#8 ror w17,w17,#24 add v2.4s,v2.4s,v3.4s ror w19,w19,#24 add v6.4s,v6.4s,v7.4s ror w20,w20,#24 add v18.4s,v18.4s,v19.4s ror w21,w21,#24 eor v20.16b,v1.16b,v2.16b add w13,w13,w17 eor v21.16b,v5.16b,v6.16b add w14,w14,w19 eor v22.16b,v17.16b,v18.16b add w15,w15,w20 ushr v1.4s,v20.4s,#25 add w16,w16,w21 ushr v5.4s,v21.4s,#25 eor w9,w9,w13 ushr v17.4s,v22.4s,#25 eor w10,w10,w14 sli v1.4s,v20.4s,#7 eor w11,w11,w15 sli v5.4s,v21.4s,#7 eor w12,w12,w16 sli v17.4s,v22.4s,#7 ror w9,w9,#25 ext v2.16b,v2.16b,v2.16b,#8 ror w10,w10,#25 ext v6.16b,v6.16b,v6.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w10 add v4.4s,v4.4s,v5.4s add w6,w6,w11 add v16.4s,v16.4s,v17.4s add w7,w7,w12 eor v3.16b,v3.16b,v0.16b add w8,w8,w9 eor v7.16b,v7.16b,v4.16b eor w21,w21,w5 eor v19.16b,v19.16b,v16.16b eor w17,w17,w6 rev32 v3.8h,v3.8h eor w19,w19,w7 rev32 v7.8h,v7.8h eor w20,w20,w8 rev32 v19.8h,v19.8h ror w21,w21,#16 add v2.4s,v2.4s,v3.4s ror w17,w17,#16 add v6.4s,v6.4s,v7.4s ror w19,w19,#16 add v18.4s,v18.4s,v19.4s ror w20,w20,#16 eor v20.16b,v1.16b,v2.16b add w15,w15,w21 eor v21.16b,v5.16b,v6.16b add w16,w16,w17 eor v22.16b,v17.16b,v18.16b add w13,w13,w19 ushr v1.4s,v20.4s,#20 add w14,w14,w20 ushr v5.4s,v21.4s,#20 eor w10,w10,w15 ushr v17.4s,v22.4s,#20 eor w11,w11,w16 sli v1.4s,v20.4s,#12 eor w12,w12,w13 sli v5.4s,v21.4s,#12 eor w9,w9,w14 sli v17.4s,v22.4s,#12 ror w10,w10,#20 add v0.4s,v0.4s,v1.4s ror w11,w11,#20 add v4.4s,v4.4s,v5.4s ror w12,w12,#20 add v16.4s,v16.4s,v17.4s ror w9,w9,#20 eor v20.16b,v3.16b,v0.16b add w5,w5,w10 eor v21.16b,v7.16b,v4.16b add w6,w6,w11 eor v22.16b,v19.16b,v16.16b add w7,w7,w12 ushr v3.4s,v20.4s,#24 add w8,w8,w9 ushr v7.4s,v21.4s,#24 eor w21,w21,w5 ushr v19.4s,v22.4s,#24 eor w17,w17,w6 sli v3.4s,v20.4s,#8 eor w19,w19,w7 sli v7.4s,v21.4s,#8 eor w20,w20,w8 sli v19.4s,v22.4s,#8 ror w21,w21,#24 add v2.4s,v2.4s,v3.4s ror w17,w17,#24 add v6.4s,v6.4s,v7.4s ror w19,w19,#24 add v18.4s,v18.4s,v19.4s ror w20,w20,#24 eor v20.16b,v1.16b,v2.16b add w15,w15,w21 eor v21.16b,v5.16b,v6.16b add w16,w16,w17 eor v22.16b,v17.16b,v18.16b add w13,w13,w19 ushr v1.4s,v20.4s,#25 add w14,w14,w20 ushr v5.4s,v21.4s,#25 eor w10,w10,w15 ushr v17.4s,v22.4s,#25 eor w11,w11,w16 sli v1.4s,v20.4s,#7 eor w12,w12,w13 sli v5.4s,v21.4s,#7 eor w9,w9,w14 sli v17.4s,v22.4s,#7 ror w10,w10,#25 ext v2.16b,v2.16b,v2.16b,#8 ror w11,w11,#25 ext v6.16b,v6.16b,v6.16b,#8 ror w12,w12,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 cbnz x4,Loop_neon add w5,w5,w22 // accumulate key block add v0.4s,v0.4s,v24.4s add x6,x6,x22,lsr#32 add v4.4s,v4.4s,v24.4s add w7,w7,w23 add v16.4s,v16.4s,v24.4s add x8,x8,x23,lsr#32 add v2.4s,v2.4s,v26.4s add w9,w9,w24 add v6.4s,v6.4s,v26.4s add x10,x10,x24,lsr#32 add v18.4s,v18.4s,v26.4s add w11,w11,w25 add v3.4s,v3.4s,v27.4s add x12,x12,x25,lsr#32 add w13,w13,w26 add v7.4s,v7.4s,v28.4s add x14,x14,x26,lsr#32 add w15,w15,w27 add v19.4s,v19.4s,v29.4s add x16,x16,x27,lsr#32 add w17,w17,w28 add v1.4s,v1.4s,v25.4s add x19,x19,x28,lsr#32 add w20,w20,w30 add v5.4s,v5.4s,v25.4s add x21,x21,x30,lsr#32 add v17.4s,v17.4s,v25.4s b.lo Ltail_neon add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor v0.16b,v0.16b,v20.16b eor x15,x15,x16 eor v1.16b,v1.16b,v21.16b eor x17,x17,x19 eor v2.16b,v2.16b,v22.16b eor x20,x20,x21 eor v3.16b,v3.16b,v23.16b ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 stp x5,x7,[x0,#0] // store output add x28,x28,#4 // increment counter stp x9,x11,[x0,#16] add v27.4s,v27.4s,v31.4s // += 4 stp x13,x15,[x0,#32] add v28.4s,v28.4s,v31.4s stp x17,x20,[x0,#48] add v29.4s,v29.4s,v31.4s add x0,x0,#64 st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 eor v4.16b,v4.16b,v20.16b eor v5.16b,v5.16b,v21.16b eor v6.16b,v6.16b,v22.16b eor v7.16b,v7.16b,v23.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 eor v16.16b,v16.16b,v0.16b eor v17.16b,v17.16b,v1.16b eor v18.16b,v18.16b,v2.16b eor v19.16b,v19.16b,v3.16b st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 b.hi Loop_outer_neon ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret Ltail_neon: add x2,x2,#256 cmp x2,#64 b.lo Less_than_64 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#4 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 b.eq Ldone_neon sub x2,x2,#64 cmp x2,#64 b.lo Less_than_128 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor v0.16b,v0.16b,v20.16b eor v1.16b,v1.16b,v21.16b eor v2.16b,v2.16b,v22.16b eor v3.16b,v3.16b,v23.16b st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 b.eq Ldone_neon sub x2,x2,#64 cmp x2,#64 b.lo Less_than_192 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor v4.16b,v4.16b,v20.16b eor v5.16b,v5.16b,v21.16b eor v6.16b,v6.16b,v22.16b eor v7.16b,v7.16b,v23.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 b.eq Ldone_neon sub x2,x2,#64 st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp] b Last_neon Less_than_128: st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp] b Last_neon Less_than_192: st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp] b Last_neon .align 4 Last_neon: sub x0,x0,#1 add x1,x1,x2 add x0,x0,x2 add x4,sp,x2 neg x2,x2 Loop_tail_neon: ldrb w10,[x1,x2] ldrb w11,[x4,x2] add x2,x2,#1 eor w10,w10,w11 strb w10,[x0,x2] cbnz x2,Loop_tail_neon stp xzr,xzr,[sp,#0] stp xzr,xzr,[sp,#16] stp xzr,xzr,[sp,#32] stp xzr,xzr,[sp,#48] Ldone_neon: ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .def ChaCha20_512_neon .type 32 .endef .align 5 ChaCha20_512_neon: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma add x5,x5,:lo12:Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] L512_or_more_neon: sub sp,sp,#128+64 ldp x22,x23,[x5] // load sigma ld1 {v24.4s},[x5],#16 ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ld1 {v25.4s,v26.4s},[x3] ldp x28,x30,[x4] // load counter ld1 {v27.4s},[x4] ld1 {v31.4s},[x5] #ifdef __AARCH64EB__ rev64 v24.4s,v24.4s ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif add v27.4s,v27.4s,v31.4s // += 1 stp q24,q25,[sp,#0] // off-load key block, invariant part add v27.4s,v27.4s,v31.4s // not typo str q26,[sp,#32] add v28.4s,v27.4s,v31.4s add v29.4s,v28.4s,v31.4s add v30.4s,v29.4s,v31.4s shl v31.4s,v31.4s,#2 // 1 -> 4 stp d8,d9,[sp,#128+0] // meet ABI requirements stp d10,d11,[sp,#128+16] stp d12,d13,[sp,#128+32] stp d14,d15,[sp,#128+48] sub x2,x2,#512 // not typo Loop_outer_512_neon: mov v0.16b,v24.16b mov v4.16b,v24.16b mov v8.16b,v24.16b mov v12.16b,v24.16b mov v16.16b,v24.16b mov v20.16b,v24.16b mov v1.16b,v25.16b mov w5,w22 // unpack key block mov v5.16b,v25.16b lsr x6,x22,#32 mov v9.16b,v25.16b mov w7,w23 mov v13.16b,v25.16b lsr x8,x23,#32 mov v17.16b,v25.16b mov w9,w24 mov v21.16b,v25.16b lsr x10,x24,#32 mov v3.16b,v27.16b mov w11,w25 mov v7.16b,v28.16b lsr x12,x25,#32 mov v11.16b,v29.16b mov w13,w26 mov v15.16b,v30.16b lsr x14,x26,#32 mov v2.16b,v26.16b mov w15,w27 mov v6.16b,v26.16b lsr x16,x27,#32 add v19.4s,v3.4s,v31.4s // +4 mov w17,w28 add v23.4s,v7.4s,v31.4s // +4 lsr x19,x28,#32 mov v10.16b,v26.16b mov w20,w30 mov v14.16b,v26.16b lsr x21,x30,#32 mov v18.16b,v26.16b stp q27,q28,[sp,#48] // off-load key block, variable part mov v22.16b,v26.16b str q29,[sp,#80] mov x4,#5 subs x2,x2,#512 Loop_upper_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v11.16b,v11.16b,v11.16b,#12 ext v15.16b,v15.16b,v15.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v23.16b,v23.16b,v23.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v9.16b,v9.16b,v9.16b,#4 ext v13.16b,v13.16b,v13.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 ext v21.16b,v21.16b,v21.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v11.16b,v11.16b,v11.16b,#4 ext v15.16b,v15.16b,v15.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v23.16b,v23.16b,v23.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v9.16b,v9.16b,v9.16b,#12 ext v13.16b,v13.16b,v13.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 ext v21.16b,v21.16b,v21.16b,#12 cbnz x4,Loop_upper_neon add w5,w5,w22 // accumulate key block add x6,x6,x22,lsr#32 add w7,w7,w23 add x8,x8,x23,lsr#32 add w9,w9,w24 add x10,x10,x24,lsr#32 add w11,w11,w25 add x12,x12,x25,lsr#32 add w13,w13,w26 add x14,x14,x26,lsr#32 add w15,w15,w27 add x16,x16,x27,lsr#32 add w17,w17,w28 add x19,x19,x28,lsr#32 add w20,w20,w30 add x21,x21,x30,lsr#32 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#1 // increment counter mov w5,w22 // unpack key block lsr x6,x22,#32 stp x9,x11,[x0,#16] mov w7,w23 lsr x8,x23,#32 stp x13,x15,[x0,#32] mov w9,w24 lsr x10,x24,#32 stp x17,x20,[x0,#48] add x0,x0,#64 mov w11,w25 lsr x12,x25,#32 mov w13,w26 lsr x14,x26,#32 mov w15,w27 lsr x16,x27,#32 mov w17,w28 lsr x19,x28,#32 mov w20,w30 lsr x21,x30,#32 mov x4,#5 Loop_lower_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v11.16b,v11.16b,v11.16b,#12 ext v15.16b,v15.16b,v15.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v23.16b,v23.16b,v23.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v9.16b,v9.16b,v9.16b,#4 ext v13.16b,v13.16b,v13.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 ext v21.16b,v21.16b,v21.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v11.16b,v11.16b,v11.16b,#4 ext v15.16b,v15.16b,v15.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v23.16b,v23.16b,v23.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v9.16b,v9.16b,v9.16b,#12 ext v13.16b,v13.16b,v13.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 ext v21.16b,v21.16b,v21.16b,#12 cbnz x4,Loop_lower_neon add w5,w5,w22 // accumulate key block ldp q24,q25,[sp,#0] add x6,x6,x22,lsr#32 ldp q26,q27,[sp,#32] add w7,w7,w23 ldp q28,q29,[sp,#64] add x8,x8,x23,lsr#32 add v0.4s,v0.4s,v24.4s add w9,w9,w24 add v4.4s,v4.4s,v24.4s add x10,x10,x24,lsr#32 add v8.4s,v8.4s,v24.4s add w11,w11,w25 add v12.4s,v12.4s,v24.4s add x12,x12,x25,lsr#32 add v16.4s,v16.4s,v24.4s add w13,w13,w26 add v20.4s,v20.4s,v24.4s add x14,x14,x26,lsr#32 add v2.4s,v2.4s,v26.4s add w15,w15,w27 add v6.4s,v6.4s,v26.4s add x16,x16,x27,lsr#32 add v10.4s,v10.4s,v26.4s add w17,w17,w28 add v14.4s,v14.4s,v26.4s add x19,x19,x28,lsr#32 add v18.4s,v18.4s,v26.4s add w20,w20,w30 add v22.4s,v22.4s,v26.4s add x21,x21,x30,lsr#32 add v19.4s,v19.4s,v31.4s // +4 add x5,x5,x6,lsl#32 // pack add v23.4s,v23.4s,v31.4s // +4 add x7,x7,x8,lsl#32 add v3.4s,v3.4s,v27.4s ldp x6,x8,[x1,#0] // load input add v7.4s,v7.4s,v28.4s add x9,x9,x10,lsl#32 add v11.4s,v11.4s,v29.4s add x11,x11,x12,lsl#32 add v15.4s,v15.4s,v30.4s ldp x10,x12,[x1,#16] add v19.4s,v19.4s,v27.4s add x13,x13,x14,lsl#32 add v23.4s,v23.4s,v28.4s add x15,x15,x16,lsl#32 add v1.4s,v1.4s,v25.4s ldp x14,x16,[x1,#32] add v5.4s,v5.4s,v25.4s add x17,x17,x19,lsl#32 add v9.4s,v9.4s,v25.4s add x20,x20,x21,lsl#32 add v13.4s,v13.4s,v25.4s ldp x19,x21,[x1,#48] add v17.4s,v17.4s,v25.4s add x1,x1,#64 add v21.4s,v21.4s,v25.4s #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor v0.16b,v0.16b,v24.16b eor x15,x15,x16 eor v1.16b,v1.16b,v25.16b eor x17,x17,x19 eor v2.16b,v2.16b,v26.16b eor x20,x20,x21 eor v3.16b,v3.16b,v27.16b ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 stp x5,x7,[x0,#0] // store output add x28,x28,#7 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 eor v4.16b,v4.16b,v24.16b eor v5.16b,v5.16b,v25.16b eor v6.16b,v6.16b,v26.16b eor v7.16b,v7.16b,v27.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 eor v8.16b,v8.16b,v0.16b ldp q24,q25,[sp,#0] eor v9.16b,v9.16b,v1.16b ldp q26,q27,[sp,#32] eor v10.16b,v10.16b,v2.16b eor v11.16b,v11.16b,v3.16b st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64 ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64 eor v12.16b,v12.16b,v4.16b eor v13.16b,v13.16b,v5.16b eor v14.16b,v14.16b,v6.16b eor v15.16b,v15.16b,v7.16b st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64 ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64 eor v16.16b,v16.16b,v8.16b eor v17.16b,v17.16b,v9.16b eor v18.16b,v18.16b,v10.16b eor v19.16b,v19.16b,v11.16b st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 shl v0.4s,v31.4s,#1 // 4 -> 8 eor v20.16b,v20.16b,v12.16b eor v21.16b,v21.16b,v13.16b eor v22.16b,v22.16b,v14.16b eor v23.16b,v23.16b,v15.16b st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64 add v27.4s,v27.4s,v0.4s // += 8 add v28.4s,v28.4s,v0.4s add v29.4s,v29.4s,v0.4s add v30.4s,v30.4s,v0.4s b.hs Loop_outer_512_neon adds x2,x2,#512 ushr v0.4s,v31.4s,#2 // 4 -> 1 ldp d8,d9,[sp,#128+0] // meet ABI requirements ldp d10,d11,[sp,#128+16] ldp d12,d13,[sp,#128+32] ldp d14,d15,[sp,#128+48] stp q24,q31,[sp,#0] // wipe off-load area stp q24,q31,[sp,#32] stp q24,q31,[sp,#64] b.eq Ldone_512_neon cmp x2,#192 sub v27.4s,v27.4s,v0.4s // -= 1 sub v28.4s,v28.4s,v0.4s sub v29.4s,v29.4s,v0.4s add sp,sp,#128 b.hs Loop_outer_neon eor v25.16b,v25.16b,v25.16b eor v26.16b,v26.16b,v26.16b eor v27.16b,v27.16b,v27.16b eor v28.16b,v28.16b,v28.16b eor v29.16b,v29.16b,v29.16b eor v30.16b,v30.16b,v30.16b b Loop_outer Ldone_512_neon: ldp x19,x20,[x29,#16] add sp,sp,#128+64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)