repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
marvin-hansen/iggy-streaming-system
82,208
thirdparty/crates/ring-0.17.9/pregenerated/aesv8-gcm-armv8-win64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <ring-core/arm_arch.h> #if __ARM_MAX_ARCH__ >= 8 .arch armv8-a+crypto .text .globl aes_gcm_enc_kernel .def aes_gcm_enc_kernel .type 32 .endef .align 4 aes_gcm_enc_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys add x4, x0, x1, lsr #3 // end_input_ptr lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible sub x5, x5, #1 // byte_len - 1 ldr q18, [x8, #0] // load rk0 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) ldr q25, [x8, #112] // load rk7 add x5, x5, x0 lsr x12, x11, #32 fmov d2, x10 // CTR block 2 orr w11, w11, w11 rev w12, w12 // rev_ctr32 fmov d1, x10 // CTR block 1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 add w12, w12, #1 // increment rev_ctr32 rev w9, w12 // CTR block 1 fmov d3, x10 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 1 add w12, w12, #1 // CTR block 1 ldr q19, [x8, #16] // load rk1 fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 ldr q20, [x8, #32] // load rk2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 orr x9, x11, x9, lsl #32 // CTR block 3 fmov v3.d[1], x9 // CTR block 3 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q21, [x8, #48] // load rk3 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q24, [x8, #96] // load rk6 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q23, [x8, #80] // load rk5 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ldr q14, [x6, #48] // load h3l | h3h ext v14.16b, v14.16b, v14.16b, #8 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q22, [x8, #64] // load rk4 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 ldr q13, [x6, #32] // load h2l | h2h ext v13.16b, v13.16b, v13.16b, #8 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q15, [x6, #80] // load h4l | h4h ext v15.16b, v15.16b, v15.16b, #8 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 ldr q29, [x8, #176] // load rk11 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 ldr q26, [x8, #128] // load rk8 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 add w12, w12, #1 // CTR block 3 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 trn2 v17.2d, v14.2d, v15.2d // h4l | h3l aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 ldr q27, [x8, #144] // load rk9 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 ldr q12, [x6] // load h1l | h1h ext v12.16b, v12.16b, v12.16b, #8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 ldr q28, [x8, #160] // load rk10 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 trn1 v9.2d, v14.2d, v15.2d // h4h | h3h aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 trn2 v16.2d, v12.2d, v13.2d // h2l | h1l aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 b.lt Lenc_finish_first_blocks // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 b.eq Lenc_finish_first_blocks // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 Lenc_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v2.16b, v31.16b // AES block 2 - round N-1 trn1 v8.2d, v12.2d, v13.2d // h2h | h1h aese v1.16b, v31.16b // AES block 1 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 aese v3.16b, v31.16b // AES block 3 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k b.ge Lenc_tail // handle tail ldp x19, x20, [x0, #16] // AES block 1 - load plaintext rev w9, w12 // CTR block 4 ldp x6, x7, [x0, #0] // AES block 0 - load plaintext ldp x23, x24, [x0, #48] // AES block 3 - load plaintext ldp x21, x22, [x0, #32] // AES block 2 - load plaintext add x0, x0, #64 // AES input_ptr update eor x19, x19, x13 // AES block 1 - round N low eor x20, x20, x14 // AES block 1 - round N high fmov d5, x19 // AES block 1 - mov low eor x6, x6, x13 // AES block 0 - round N low eor x7, x7, x14 // AES block 0 - round N high eor x24, x24, x14 // AES block 3 - round N high fmov d4, x6 // AES block 0 - mov low cmp x0, x5 // check if we have <= 8 blocks fmov v4.d[1], x7 // AES block 0 - mov high eor x23, x23, x13 // AES block 3 - round N low eor x21, x21, x13 // AES block 2 - round N low fmov v5.d[1], x20 // AES block 1 - mov high fmov d6, x21 // AES block 2 - mov low add w12, w12, #1 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov d7, x23 // AES block 3 - mov low eor x22, x22, x14 // AES block 2 - round N high fmov v6.d[1], x22 // AES block 2 - mov high eor v4.16b, v4.16b, v0.16b // AES block 0 - result fmov d0, x10 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 eor v5.16b, v5.16b, v1.16b // AES block 1 - result fmov d1, x10 // CTR block 5 orr x9, x11, x9, lsl #32 // CTR block 5 fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 st1 { v4.16b}, [x2], #16 // AES block 0 - store result fmov v7.d[1], x24 // AES block 3 - mov high orr x9, x11, x9, lsl #32 // CTR block 6 eor v6.16b, v6.16b, v2.16b // AES block 2 - result st1 { v5.16b}, [x2], #16 // AES block 1 - store result add w12, w12, #1 // CTR block 6 fmov d2, x10 // CTR block 6 fmov v2.d[1], x9 // CTR block 6 st1 { v6.16b}, [x2], #16 // AES block 2 - store result rev w9, w12 // CTR block 7 orr x9, x11, x9, lsl #32 // CTR block 7 eor v7.16b, v7.16b, v3.16b // AES block 3 - result st1 { v7.16b}, [x2], #16 // AES block 3 - store result b.ge Lenc_prepretail // do prepretail Lenc_main_loop: // main loop start aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d3, x10 // CTR block 4k+3 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 fmov v3.d[1], x9 // CTR block 4k+3 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 eor v4.16b, v4.16b, v11.16b // PRE 1 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x23, x23, x13 // AES block 4k+7 - round N low aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d10, v17.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high eor x22, x22, x14 // AES block 4k+6 - round N high mov d8, v4.d[1] // GHASH block 4k - mid aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor x19, x19, x13 // AES block 4k+5 - round N low aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 eor x21, x21, x13 // AES block 4k+6 - round N low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 movi v8.8b, #0xc2 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check fmov d5, x19 // AES block 4k+5 - mov low ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext b.lt Lenc_main_loop_continue // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq Lenc_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Lenc_main_loop_continue: shl d8, d8, #56 // mod_constant eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid add w12, w12, #1 // CTR block 4k+3 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up add x0, x0, #64 // AES input_ptr update pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid rev w9, w12 // CTR block 4k+8 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor x6, x6, x13 // AES block 4k+4 - round N low eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up eor x7, x7, x14 // AES block 4k+4 - round N high fmov d4, x6 // AES block 4k+4 - mov low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid eor x20, x20, x14 // AES block 4k+5 - round N high eor x24, x24, x14 // AES block 4k+7 - round N high add w12, w12, #1 // CTR block 4k+8 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid fmov d7, x23 // AES block 4k+7 - mov low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 fmov v5.d[1], x20 // AES block 4k+5 - mov high fmov d6, x21 // AES block 4k+6 - mov low cmp x0, x5 // LOOP CONTROL fmov v6.d[1], x22 // AES block 4k+6 - mov high pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 rev w9, w12 // CTR block 4k+9 add w12, w12, #1 // CTR block 4k+9 eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result fmov d1, x10 // CTR block 4k+9 orr x9, x11, x9, lsl #32 // CTR block 4k+9 fmov v1.d[1], x9 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 rev w9, w12 // CTR block 4k+10 st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result orr x9, x11, x9, lsl #32 // CTR block 4k+10 eor v11.16b, v11.16b, v9.16b // MODULO - fold into low fmov v7.d[1], x24 // AES block 4k+7 - mov high ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result fmov d2, x10 // CTR block 4k+10 st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result fmov v2.d[1], x9 // CTR block 4k+10 rev w9, w12 // CTR block 4k+11 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low orr x9, x11, x9, lsl #32 // CTR block 4k+11 eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result b.lt Lenc_main_loop Lenc_prepretail: // PREPRETAIL aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov d3, x10 // CTR block 4k+3 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) fmov v3.d[1], x9 // CTR block 4k+3 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid add w12, w12, #1 // CTR block 4k+3 pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v10.16b, v10.16b, v9.16b // karatsuba tidy up aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 pmull v4.1q, v9.1d, v8.1d ext v9.16b, v9.16b, v9.16b, #8 eor v10.16b, v10.16b, v11.16b b.lt Lenc_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 b.eq Lenc_finish_prepretail // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 Lenc_finish_prepretail: eor v10.16b, v10.16b, v4.16b eor v10.16b, v10.16b, v9.16b pmull v4.1q, v10.1d, v8.1d ext v10.16b, v10.16b, v10.16b, #8 aese v1.16b, v31.16b // AES block 4k+5 - round N-1 eor v11.16b, v11.16b, v4.16b aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b Lenc_tail: // TAIL ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high cmp x5, #48 fmov d4, x6 // AES block 4k+4 - mov low fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result b.gt Lenc_blocks_more_than_3 cmp x5, #32 mov v3.16b, v2.16b movi v11.8b, #0 movi v9.8b, #0 sub w12, w12, #1 mov v2.16b, v1.16b movi v10.8b, #0 b.gt Lenc_blocks_more_than_2 mov v3.16b, v1.16b sub w12, w12, #1 cmp x5, #16 b.gt Lenc_blocks_more_than_1 sub w12, w12, #1 b Lenc_blocks_less_than_1 Lenc_blocks_more_than_3: // blocks left > 3 st1 { v5.16b}, [x2], #16 // AES final-3 block - store result ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-3 block eor x6, x6, x13 // AES final-2 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag eor x7, x7, x14 // AES final-2 block - round N high mov d22, v4.d[1] // GHASH final-3 block - mid fmov d5, x6 // AES final-2 block - mov low fmov v5.d[1], x7 // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in mov d10, v17.d[1] // GHASH final-3 block - mid pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor v5.16b, v5.16b, v1.16b // AES final-2 block - result Lenc_blocks_more_than_2: // blocks left > 2 st1 { v5.16b}, [x2], #16 // AES final-2 block - store result ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-2 block eor x6, x6, x13 // AES final-1 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag fmov d5, x6 // AES final-1 block - mov low eor x7, x7, x14 // AES final-1 block - round N high fmov v5.d[1], x7 // AES final-1 block - mov high movi v8.8b, #0 // suppress further partial tag feed in pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid eor v5.16b, v5.16b, v2.16b // AES final-1 block - result eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid Lenc_blocks_more_than_1: // blocks left > 1 st1 { v5.16b}, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ldp x6, x7, [x0], #16 // AES final block - load input low & high eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in eor x6, x6, x13 // AES final block - round N low mov d22, v4.d[1] // GHASH final-1 block - mid pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor x7, x7, x14 // AES final block - round N high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high ins v22.d[1], v22.d[0] // GHASH final-1 block - mid fmov d5, x6 // AES final block - mov low fmov v5.d[1], x7 // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low eor v5.16b, v5.16b, v3.16b // AES final block - result eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low Lenc_blocks_less_than_1: // blocks left <= 1 and x1, x1, #127 // bit_length %= 128 mvn x13, xzr // rkN_l = 0xffffffffffffffff sub x1, x1, #128 // bit_length -= 128 neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128]) ld1 { v18.16b}, [x2] // load existing bytes where the possibly partial last block is to be stored mvn x14, xzr // rkN_h = 0xffffffffffffffff and x1, x1, #127 // bit_length %= 128 lsr x14, x14, x1 // rkN_h is mask for top 64b of last block cmp x1, #64 csel x6, x13, x14, lt csel x7, x14, xzr, lt fmov d0, x6 // ctr0b is mask for last block fmov v0.d[1], x7 and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag bif v5.16b, v18.16b, v0.16b // insert existing bytes in top end of result before storing pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high mov d8, v4.d[1] // GHASH final block - mid rev w9, w12 pmull v21.1q, v4.1d, v12.1d // GHASH final block - low eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment str w9, [x16, #12] // store the updated counter st1 { v5.16b}, [x2] // store all 16B eor v11.16b, v11.16b, v9.16b // MODULO - fold into low eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret .globl aes_gcm_dec_kernel .def aes_gcm_dec_kernel .type 32 .endef .align 4 aes_gcm_dec_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ldr q26, [x8, #128] // load rk8 sub x5, x5, #1 // byte_len - 1 ldr q25, [x8, #112] // load rk7 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) add x4, x0, x1, lsr #3 // end_input_ptr ldr q24, [x8, #96] // load rk6 lsr x12, x11, #32 ldr q23, [x8, #80] // load rk5 orr w11, w11, w11 ldr q21, [x8, #48] // load rk3 add x5, x5, x0 rev w12, w12 // rev_ctr32 add w12, w12, #1 // increment rev_ctr32 fmov d3, x10 // CTR block 3 rev w9, w12 // CTR block 1 add w12, w12, #1 // CTR block 1 fmov d1, x10 // CTR block 1 orr x9, x11, x9, lsl #32 // CTR block 1 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 fmov d2, x10 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 3 ldr q18, [x8, #0] // load rk0 fmov v3.d[1], x9 // CTR block 3 add w12, w12, #1 // CTR block 3 ldr q22, [x8, #64] // load rk4 ldr q19, [x8, #16] // load rk1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 ldr q14, [x6, #48] // load h3l | h3h ext v14.16b, v14.16b, v14.16b, #8 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 ldr q15, [x6, #80] // load h4l | h4h ext v15.16b, v15.16b, v15.16b, #8 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q13, [x6, #32] // load h2l | h2h ext v13.16b, v13.16b, v13.16b, #8 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q20, [x8, #32] // load rk2 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q27, [x8, #144] // load rk9 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q12, [x6] // load h1l | h1h ext v12.16b, v12.16b, v12.16b, #8 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q28, [x8, #160] // load rk10 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 cmp x17, #12 // setup flags for AES-128/192/256 check aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 ldr q29, [x8, #176] // load rk11 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 b.lt Ldec_finish_first_blocks // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 b.eq Ldec_finish_first_blocks // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 Ldec_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks trn1 v9.2d, v14.2d, v15.2d // h4h | h3h trn2 v17.2d, v14.2d, v15.2d // h4l | h3l trn1 v8.2d, v12.2d, v13.2d // h2h | h1h trn2 v16.2d, v12.2d, v13.2d // h2l | h1l eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v1.16b, v31.16b // AES block 1 - round N-1 aese v2.16b, v31.16b // AES block 2 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k aese v3.16b, v31.16b // AES block 3 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 b.ge Ldec_tail // handle tail ldr q4, [x0, #0] // AES block 0 - load ciphertext ldr q5, [x0, #16] // AES block 1 - load ciphertext rev w9, w12 // CTR block 4 eor v0.16b, v4.16b, v0.16b // AES block 0 - result eor v1.16b, v5.16b, v1.16b // AES block 1 - result rev64 v5.16b, v5.16b // GHASH block 1 ldr q7, [x0, #48] // AES block 3 - load ciphertext mov x7, v0.d[1] // AES block 0 - mov high mov x6, v0.d[0] // AES block 0 - mov low rev64 v4.16b, v4.16b // GHASH block 0 add w12, w12, #1 // CTR block 4 fmov d0, x10 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 mov x19, v1.d[0] // AES block 1 - mov low orr x9, x11, x9, lsl #32 // CTR block 5 mov x20, v1.d[1] // AES block 1 - mov high eor x7, x7, x14 // AES block 0 - round N high eor x6, x6, x13 // AES block 0 - round N low stp x6, x7, [x2], #16 // AES block 0 - store result fmov d1, x10 // CTR block 5 ldr q6, [x0, #32] // AES block 2 - load ciphertext add x0, x0, #64 // AES input_ptr update fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 add w12, w12, #1 // CTR block 6 eor x19, x19, x13 // AES block 1 - round N low orr x9, x11, x9, lsl #32 // CTR block 6 eor x20, x20, x14 // AES block 1 - round N high stp x19, x20, [x2], #16 // AES block 1 - store result eor v2.16b, v6.16b, v2.16b // AES block 2 - result cmp x0, x5 // check if we have <= 8 blocks b.ge Ldec_prepretail // do prepretail Ldec_main_loop: // main loop start mov x21, v2.d[0] // AES block 4k+2 - mov low ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 eor v4.16b, v4.16b, v11.16b // PRE 1 rev w9, w12 // CTR block 4k+7 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x23, v3.d[0] // AES block 4k+3 - mov low pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov v3.d[1], x9 // CTR block 4k+7 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor x22, x22, x14 // AES block 4k+2 - round N high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 rev64 v6.16b, v6.16b // GHASH block 4k+2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x21, x21, x13 // AES block 4k+2 - round N low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 stp x21, x22, [x2], #16 // AES block 4k+2 - store result pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 rev64 v7.16b, v7.16b // GHASH block 4k+3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor x23, x23, x13 // AES block 4k+3 - round N low pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor x24, x24, x14 // AES block 4k+3 - round N high eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 add w12, w12, #1 // CTR block 4k+7 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid rev w9, w12 // CTR block 4k+8 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 add w12, w12, #1 // CTR block 4k+8 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid movi v8.8b, #0xc2 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 b.lt Ldec_main_loop_continue // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq Ldec_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Ldec_main_loop_continue: pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext aese v0.16b, v31.16b // AES block 4k+4 - round N-1 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext mov x7, v0.d[1] // AES block 4k+4 - mov high eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid aese v1.16b, v31.16b // AES block 4k+5 - round N-1 add x0, x0, #64 // AES input_ptr update mov x6, v0.d[0] // AES block 4k+4 - mov low fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result rev w9, w12 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+9 cmp x0, x5 // LOOP CONTROL add w12, w12, #1 // CTR block 4k+9 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high mov x20, v1.d[1] // AES block 4k+5 - mov high eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low mov x19, v1.d[0] // AES block 4k+5 - mov low fmov d1, x10 // CTR block 4k+9 ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment fmov v1.d[1], x9 // CTR block 4k+9 rev w9, w12 // CTR block 4k+10 add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+10 rev64 v5.16b, v5.16b // GHASH block 4k+5 eor x20, x20, x14 // AES block 4k+5 - round N high stp x6, x7, [x2], #16 // AES block 4k+4 - store result eor x19, x19, x13 // AES block 4k+5 - round N low stp x19, x20, [x2], #16 // AES block 4k+5 - store result rev64 v4.16b, v4.16b // GHASH block 4k+4 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low b.lt Ldec_main_loop Ldec_prepretail: // PREPRETAIL ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 mov x21, v2.d[0] // AES block 4k+2 - mov low eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 rev w9, w12 // CTR block 4k+7 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v6.16b, v6.16b // GHASH block 4k+2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 mov x23, v3.d[0] // AES block 4k+3 - mov low aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high fmov v3.d[1], x9 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 rev64 v7.16b, v7.16b // GHASH block 4k+3 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 cmp x17, #12 // setup flags for AES-128/192/256 check eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 b.lt Ldec_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 b.eq Ldec_finish_prepretail // branch if AES-192 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Ldec_finish_prepretail: eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor x22, x22, x14 // AES block 4k+2 - round N high eor x23, x23, x13 // AES block 4k+3 - round N low eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid add w12, w12, #1 // CTR block 4k+7 eor x21, x21, x13 // AES block 4k+2 - round N low pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor x24, x24, x14 // AES block 4k+3 - round N high stp x21, x22, [x2], #16 // AES block 4k+2 - store result ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low Ldec_tail: // TAIL sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result mov x6, v0.d[0] // AES block 4k+4 - mov low mov x7, v0.d[1] // AES block 4k+4 - mov high ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag cmp x5, #48 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high b.gt Ldec_blocks_more_than_3 sub w12, w12, #1 mov v3.16b, v2.16b movi v10.8b, #0 movi v11.8b, #0 cmp x5, #32 movi v9.8b, #0 mov v2.16b, v1.16b b.gt Ldec_blocks_more_than_2 sub w12, w12, #1 mov v3.16b, v1.16b cmp x5, #16 b.gt Ldec_blocks_more_than_1 sub w12, w12, #1 b Ldec_blocks_less_than_1 Ldec_blocks_more_than_3: // blocks left > 3 rev64 v4.16b, v5.16b // GHASH final-3 block ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext stp x6, x7, [x2], #16 // AES final-3 block - store result mov d10, v17.d[1] // GHASH final-3 block - mid eor v4.16b, v4.16b, v8.16b // feed in partial tag eor v0.16b, v5.16b, v1.16b // AES final-2 block - result mov d22, v4.d[1] // GHASH final-3 block - mid mov x6, v0.d[0] // AES final-2 block - mov low mov x7, v0.d[1] // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor x6, x6, x13 // AES final-2 block - round N low pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low eor x7, x7, x14 // AES final-2 block - round N high Ldec_blocks_more_than_2: // blocks left > 2 rev64 v4.16b, v5.16b // GHASH final-2 block ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag stp x6, x7, [x2], #16 // AES final-2 block - store result eor v0.16b, v5.16b, v2.16b // AES final-1 block - result mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid mov x6, v0.d[0] // AES final-1 block - mov low mov x7, v0.d[1] // AES final-1 block - mov high eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low movi v8.8b, #0 // suppress further partial tag feed in pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high eor x6, x6, x13 // AES final-1 block - round N low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid eor x7, x7, x14 // AES final-1 block - round N high Ldec_blocks_more_than_1: // blocks left > 1 stp x6, x7, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in mov d22, v4.d[1] // GHASH final-1 block - mid eor v0.16b, v5.16b, v3.16b // AES final block - result pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low mov x6, v0.d[0] // AES final block - mov low ins v22.d[1], v22.d[0] // GHASH final-1 block - mid mov x7, v0.d[1] // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid eor x6, x6, x13 // AES final block - round N low eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor x7, x7, x14 // AES final block - round N high Ldec_blocks_less_than_1: // blocks left <= 1 and x1, x1, #127 // bit_length %= 128 mvn x14, xzr // rkN_h = 0xffffffffffffffff sub x1, x1, #128 // bit_length -= 128 mvn x13, xzr // rkN_l = 0xffffffffffffffff ldp x4, x5, [x2] // load existing bytes we need to not overwrite neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128]) and x1, x1, #127 // bit_length %= 128 lsr x14, x14, x1 // rkN_h is mask for top 64b of last block cmp x1, #64 csel x9, x13, x14, lt csel x10, x14, xzr, lt fmov d0, x9 // ctr0b is mask for last block and x6, x6, x9 mov v0.d[1], x10 bic x4, x4, x9 // mask out low existing bytes rev w9, w12 bic x5, x5, x10 // mask out high existing bytes orr x6, x6, x4 and x7, x7, x10 orr x7, x7, x5 and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag pmull v21.1q, v4.1d, v12.1d // GHASH final block - low mov d8, v4.d[1] // GHASH final block - mid eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment eor v11.16b, v11.16b, v8.16b // MODULO - fold into low stp x6, x7, [x2] str w9, [x16, #12] // store the updated counter eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
marvin-hansen/iggy-streaming-system
60,166
thirdparty/crates/ring-0.17.9/pregenerated/sha256-armv4-linux32.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) @ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. @ @ Licensed under the OpenSSL license (the "License"). You may not use @ this file except in compliance with the License. You can obtain a copy @ in the file LICENSE in the source distribution or at @ https://www.openssl.org/source/license.html @ ==================================================================== @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL @ project. The module is, however, dual licensed under OpenSSL and @ CRYPTOGAMS licenses depending on where you obtain it. For further @ details see http://www.openssl.org/~appro/cryptogams/. @ @ Permission to use under GPL terms is granted. @ ==================================================================== @ SHA256 block procedure for ARMv4. May 2007. @ Performance is ~2x better than gcc 3.4 generated code and in "abso- @ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per @ byte [on single-issue Xscale PXA250 core]. @ July 2010. @ @ Rescheduling for dual-issue pipeline resulted in 22% improvement on @ Cortex A8 core and ~20 cycles per processed byte. @ February 2011. @ @ Profiler-assisted and platform-specific optimization resulted in 16% @ improvement on Cortex A8 core and ~15.4 cycles per processed byte. @ September 2013. @ @ Add NEON implementation. On Cortex A8 it was measured to process one @ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon @ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only @ code (meaning that latter performs sub-optimally, nothing was done @ about it). @ May 2014. @ @ Add ARMv8 code path performing at 2.0 cpb on Apple A7. #ifndef __KERNEL__ # include <ring-core/arm_arch.h> #else # define __ARM_ARCH __LINUX_ARM_ARCH__ # define __ARM_MAX_ARCH__ 7 #endif @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors. It does have ARMv8-only code, but those @ instructions are manually-encoded. (See unsha256.) .arch armv7-a .text #if defined(__thumb2__) .syntax unified .thumb #else .code 32 #endif .type K256,%object .align 5 K256: .word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .size K256,.-K256 .word 0 @ terminator .align 5 .globl sha256_block_data_order_nohw .hidden sha256_block_data_order_nohw .type sha256_block_data_order_nohw,%function sha256_block_data_order_nohw: add r2,r1,r2,lsl#6 @ len to point at the end of inp stmdb sp!,{r0,r1,r2,r4-r11,lr} ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} adr r14,K256 sub sp,sp,#16*4 @ alloca(X[16]) .Loop: # if __ARM_ARCH>=7 ldr r2,[r1],#4 # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ magic eor r12,r12,r12 #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 0 # if 0==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 add r4,r4,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r8,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 0 add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 0==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r8,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#0*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 0==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 0<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#2*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#15*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 1 # if 1==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 add r11,r11,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r7,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 1 add r11,r11,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 1==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r7,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#1*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 1==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 1<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#3*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#0*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 2 # if 2==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 add r10,r10,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r6,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 2 add r10,r10,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 2==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r6,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#2*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 2==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 2<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#4*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#1*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 3 # if 3==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 add r9,r9,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r5,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 3 add r9,r9,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 3==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r5,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#3*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 3==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 3<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#5*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#2*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 4 # if 4==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 add r8,r8,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r4,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 4 add r8,r8,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 4==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r4,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#4*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 4==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 4<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#6*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#3*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 5 # if 5==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 add r7,r7,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r11,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 5 add r7,r7,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 5==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r11,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#5*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 5==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 5<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#7*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#4*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 6 # if 6==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 add r6,r6,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r10,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 6 add r6,r6,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 6==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r10,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#6*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 6==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 6<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#8*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#5*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 7 # if 7==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 add r5,r5,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r9,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 7 add r5,r5,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 7==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r9,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#7*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 7==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 7<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#9*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#6*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 8 # if 8==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 add r4,r4,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r8,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 8 add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 8==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r8,r8,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r8,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#8*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 8==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 8<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#10*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#7*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 9 # if 9==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 add r11,r11,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r7,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 9 add r11,r11,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 9==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r7,r7,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r7,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#9*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 9==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 9<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#11*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#8*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 10 # if 10==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 add r10,r10,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r6,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 10 add r10,r10,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 10==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r6,r6,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r6,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#10*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 10==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 10<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#12*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#9*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 11 # if 11==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 add r9,r9,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r5,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 11 add r9,r9,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 11==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r5,r5,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r5,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#11*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 11==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 11<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#13*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#10*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 12 # if 12==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 add r8,r8,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r4,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 12 add r8,r8,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 12==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r4,r4,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r4,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#12*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 12==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 12<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#14*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#11*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 13 # if 13==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 add r7,r7,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r11,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 13 add r7,r7,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 13==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r11,r11,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r11,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#13*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 13==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 13<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#15*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#12*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 14 # if 14==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 add r6,r6,r12 @ h+=Maj(a,b,c) from the past eor r0,r0,r10,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 14 add r6,r6,r12 @ h+=Maj(a,b,c) from the past ldrb r12,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r12,lsl#8 ldrb r12,[r1],#4 orr r2,r2,r0,lsl#16 # if 14==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r10,r10,ror#5 orr r2,r2,r12,lsl#24 eor r0,r0,r10,ror#19 @ Sigma1(e) #endif ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#14*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 14==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 14<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#0*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#13*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 @ ldr r2,[r1],#4 @ 15 # if 15==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 add r5,r5,r3 @ h+=Maj(a,b,c) from the past eor r0,r0,r9,ror#19 @ Sigma1(e) # ifndef __ARMEB__ rev r2,r2 # endif #else @ ldrb r2,[r1,#3] @ 15 add r5,r5,r3 @ h+=Maj(a,b,c) from the past ldrb r3,[r1,#2] ldrb r0,[r1,#1] orr r2,r2,r3,lsl#8 ldrb r3,[r1],#4 orr r2,r2,r0,lsl#16 # if 15==15 str r1,[sp,#17*4] @ make room for r1 # endif eor r0,r9,r9,ror#5 orr r2,r2,r3,lsl#24 eor r0,r0,r9,ror#19 @ Sigma1(e) #endif ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#15*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 15==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 15<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#1*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#14*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) .Lrounds_16_xx: @ ldr r2,[sp,#1*4] @ 16 @ ldr r1,[sp,#14*4] mov r0,r2,ror#7 add r4,r4,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#0*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#9*4] add r12,r12,r0 eor r0,r8,r8,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r8,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#0*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 16==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 16<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#2*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#15*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#2*4] @ 17 @ ldr r1,[sp,#15*4] mov r0,r2,ror#7 add r11,r11,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#1*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#10*4] add r3,r3,r0 eor r0,r7,r7,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r7,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#1*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 17==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 17<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#3*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#0*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#3*4] @ 18 @ ldr r1,[sp,#0*4] mov r0,r2,ror#7 add r10,r10,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#2*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#11*4] add r12,r12,r0 eor r0,r6,r6,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r6,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#2*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 18==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 18<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#4*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#1*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#4*4] @ 19 @ ldr r1,[sp,#1*4] mov r0,r2,ror#7 add r9,r9,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#3*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#12*4] add r3,r3,r0 eor r0,r5,r5,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r5,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#3*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 19==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 19<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#5*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#2*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#5*4] @ 20 @ ldr r1,[sp,#2*4] mov r0,r2,ror#7 add r8,r8,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#4*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#13*4] add r12,r12,r0 eor r0,r4,r4,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r4,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#4*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 20==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 20<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#6*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#3*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#6*4] @ 21 @ ldr r1,[sp,#3*4] mov r0,r2,ror#7 add r7,r7,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#5*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#14*4] add r3,r3,r0 eor r0,r11,r11,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r11,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#5*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 21==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 21<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#7*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#4*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#7*4] @ 22 @ ldr r1,[sp,#4*4] mov r0,r2,ror#7 add r6,r6,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#6*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#15*4] add r12,r12,r0 eor r0,r10,r10,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r10,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#6*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 22==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 22<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#8*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#5*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#8*4] @ 23 @ ldr r1,[sp,#5*4] mov r0,r2,ror#7 add r5,r5,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#7*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#0*4] add r3,r3,r0 eor r0,r9,r9,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r9,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#7*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 23==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 23<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#9*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#6*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#9*4] @ 24 @ ldr r1,[sp,#6*4] mov r0,r2,ror#7 add r4,r4,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#8*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#1*4] add r12,r12,r0 eor r0,r8,r8,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r8,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r11,r11,r2 @ h+=X[i] str r2,[sp,#8*4] eor r2,r9,r10 add r11,r11,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r8 add r11,r11,r12 @ h+=K256[i] eor r2,r2,r10 @ Ch(e,f,g) eor r0,r4,r4,ror#11 add r11,r11,r2 @ h+=Ch(e,f,g) #if 24==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 24<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r4,r5 @ a^b, b^c in next round #else ldr r2,[sp,#10*4] @ from future BODY_16_xx eor r12,r4,r5 @ a^b, b^c in next round ldr r1,[sp,#7*4] @ from future BODY_16_xx #endif eor r0,r0,r4,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r7,r7,r11 @ d+=h eor r3,r3,r5 @ Maj(a,b,c) add r11,r11,r0,ror#2 @ h+=Sigma0(a) @ add r11,r11,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#10*4] @ 25 @ ldr r1,[sp,#7*4] mov r0,r2,ror#7 add r11,r11,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#9*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#2*4] add r3,r3,r0 eor r0,r7,r7,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r7,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r10,r10,r2 @ h+=X[i] str r2,[sp,#9*4] eor r2,r8,r9 add r10,r10,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r7 add r10,r10,r3 @ h+=K256[i] eor r2,r2,r9 @ Ch(e,f,g) eor r0,r11,r11,ror#11 add r10,r10,r2 @ h+=Ch(e,f,g) #if 25==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 25<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r11,r4 @ a^b, b^c in next round #else ldr r2,[sp,#11*4] @ from future BODY_16_xx eor r3,r11,r4 @ a^b, b^c in next round ldr r1,[sp,#8*4] @ from future BODY_16_xx #endif eor r0,r0,r11,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r6,r6,r10 @ d+=h eor r12,r12,r4 @ Maj(a,b,c) add r10,r10,r0,ror#2 @ h+=Sigma0(a) @ add r10,r10,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#11*4] @ 26 @ ldr r1,[sp,#8*4] mov r0,r2,ror#7 add r10,r10,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#10*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#3*4] add r12,r12,r0 eor r0,r6,r6,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r6,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r9,r9,r2 @ h+=X[i] str r2,[sp,#10*4] eor r2,r7,r8 add r9,r9,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r6 add r9,r9,r12 @ h+=K256[i] eor r2,r2,r8 @ Ch(e,f,g) eor r0,r10,r10,ror#11 add r9,r9,r2 @ h+=Ch(e,f,g) #if 26==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 26<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r10,r11 @ a^b, b^c in next round #else ldr r2,[sp,#12*4] @ from future BODY_16_xx eor r12,r10,r11 @ a^b, b^c in next round ldr r1,[sp,#9*4] @ from future BODY_16_xx #endif eor r0,r0,r10,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r5,r5,r9 @ d+=h eor r3,r3,r11 @ Maj(a,b,c) add r9,r9,r0,ror#2 @ h+=Sigma0(a) @ add r9,r9,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#12*4] @ 27 @ ldr r1,[sp,#9*4] mov r0,r2,ror#7 add r9,r9,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#11*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#4*4] add r3,r3,r0 eor r0,r5,r5,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r5,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r8,r8,r2 @ h+=X[i] str r2,[sp,#11*4] eor r2,r6,r7 add r8,r8,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r5 add r8,r8,r3 @ h+=K256[i] eor r2,r2,r7 @ Ch(e,f,g) eor r0,r9,r9,ror#11 add r8,r8,r2 @ h+=Ch(e,f,g) #if 27==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 27<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r9,r10 @ a^b, b^c in next round #else ldr r2,[sp,#13*4] @ from future BODY_16_xx eor r3,r9,r10 @ a^b, b^c in next round ldr r1,[sp,#10*4] @ from future BODY_16_xx #endif eor r0,r0,r9,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r4,r4,r8 @ d+=h eor r12,r12,r10 @ Maj(a,b,c) add r8,r8,r0,ror#2 @ h+=Sigma0(a) @ add r8,r8,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#13*4] @ 28 @ ldr r1,[sp,#10*4] mov r0,r2,ror#7 add r8,r8,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#12*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#5*4] add r12,r12,r0 eor r0,r4,r4,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r4,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r7,r7,r2 @ h+=X[i] str r2,[sp,#12*4] eor r2,r5,r6 add r7,r7,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r4 add r7,r7,r12 @ h+=K256[i] eor r2,r2,r6 @ Ch(e,f,g) eor r0,r8,r8,ror#11 add r7,r7,r2 @ h+=Ch(e,f,g) #if 28==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 28<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r8,r9 @ a^b, b^c in next round #else ldr r2,[sp,#14*4] @ from future BODY_16_xx eor r12,r8,r9 @ a^b, b^c in next round ldr r1,[sp,#11*4] @ from future BODY_16_xx #endif eor r0,r0,r8,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r11,r11,r7 @ d+=h eor r3,r3,r9 @ Maj(a,b,c) add r7,r7,r0,ror#2 @ h+=Sigma0(a) @ add r7,r7,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#14*4] @ 29 @ ldr r1,[sp,#11*4] mov r0,r2,ror#7 add r7,r7,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#13*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#6*4] add r3,r3,r0 eor r0,r11,r11,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r11,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r6,r6,r2 @ h+=X[i] str r2,[sp,#13*4] eor r2,r4,r5 add r6,r6,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r11 add r6,r6,r3 @ h+=K256[i] eor r2,r2,r5 @ Ch(e,f,g) eor r0,r7,r7,ror#11 add r6,r6,r2 @ h+=Ch(e,f,g) #if 29==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 29<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r7,r8 @ a^b, b^c in next round #else ldr r2,[sp,#15*4] @ from future BODY_16_xx eor r3,r7,r8 @ a^b, b^c in next round ldr r1,[sp,#12*4] @ from future BODY_16_xx #endif eor r0,r0,r7,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r10,r10,r6 @ d+=h eor r12,r12,r8 @ Maj(a,b,c) add r6,r6,r0,ror#2 @ h+=Sigma0(a) @ add r6,r6,r12 @ h+=Maj(a,b,c) @ ldr r2,[sp,#15*4] @ 30 @ ldr r1,[sp,#12*4] mov r0,r2,ror#7 add r6,r6,r12 @ h+=Maj(a,b,c) from the past mov r12,r1,ror#17 eor r0,r0,r2,ror#18 eor r12,r12,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#14*4] eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#7*4] add r12,r12,r0 eor r0,r10,r10,ror#5 @ from BODY_00_15 add r2,r2,r12 eor r0,r0,r10,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r12,[r14],#4 @ *K256++ add r5,r5,r2 @ h+=X[i] str r2,[sp,#14*4] eor r2,r11,r4 add r5,r5,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r10 add r5,r5,r12 @ h+=K256[i] eor r2,r2,r4 @ Ch(e,f,g) eor r0,r6,r6,ror#11 add r5,r5,r2 @ h+=Ch(e,f,g) #if 30==31 and r12,r12,#0xff cmp r12,#0xf2 @ done? #endif #if 30<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r12,r6,r7 @ a^b, b^c in next round #else ldr r2,[sp,#0*4] @ from future BODY_16_xx eor r12,r6,r7 @ a^b, b^c in next round ldr r1,[sp,#13*4] @ from future BODY_16_xx #endif eor r0,r0,r6,ror#20 @ Sigma0(a) and r3,r3,r12 @ (b^c)&=(a^b) add r9,r9,r5 @ d+=h eor r3,r3,r7 @ Maj(a,b,c) add r5,r5,r0,ror#2 @ h+=Sigma0(a) @ add r5,r5,r3 @ h+=Maj(a,b,c) @ ldr r2,[sp,#0*4] @ 31 @ ldr r1,[sp,#13*4] mov r0,r2,ror#7 add r5,r5,r3 @ h+=Maj(a,b,c) from the past mov r3,r1,ror#17 eor r0,r0,r2,ror#18 eor r3,r3,r1,ror#19 eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) ldr r2,[sp,#15*4] eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) ldr r1,[sp,#8*4] add r3,r3,r0 eor r0,r9,r9,ror#5 @ from BODY_00_15 add r2,r2,r3 eor r0,r0,r9,ror#19 @ Sigma1(e) add r2,r2,r1 @ X[i] ldr r3,[r14],#4 @ *K256++ add r4,r4,r2 @ h+=X[i] str r2,[sp,#15*4] eor r2,r10,r11 add r4,r4,r0,ror#6 @ h+=Sigma1(e) and r2,r2,r9 add r4,r4,r3 @ h+=K256[i] eor r2,r2,r11 @ Ch(e,f,g) eor r0,r5,r5,ror#11 add r4,r4,r2 @ h+=Ch(e,f,g) #if 31==31 and r3,r3,#0xff cmp r3,#0xf2 @ done? #endif #if 31<15 # if __ARM_ARCH>=7 ldr r2,[r1],#4 @ prefetch # else ldrb r2,[r1,#3] # endif eor r3,r5,r6 @ a^b, b^c in next round #else ldr r2,[sp,#1*4] @ from future BODY_16_xx eor r3,r5,r6 @ a^b, b^c in next round ldr r1,[sp,#14*4] @ from future BODY_16_xx #endif eor r0,r0,r5,ror#20 @ Sigma0(a) and r12,r12,r3 @ (b^c)&=(a^b) add r8,r8,r4 @ d+=h eor r12,r12,r6 @ Maj(a,b,c) add r4,r4,r0,ror#2 @ h+=Sigma0(a) @ add r4,r4,r12 @ h+=Maj(a,b,c) #if __ARM_ARCH>=7 ite eq @ Thumb2 thing, sanity check in ARM #endif ldreq r3,[sp,#16*4] @ pull ctx bne .Lrounds_16_xx add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldr r0,[r3,#0] ldr r2,[r3,#4] ldr r12,[r3,#8] add r4,r4,r0 ldr r0,[r3,#12] add r5,r5,r2 ldr r2,[r3,#16] add r6,r6,r12 ldr r12,[r3,#20] add r7,r7,r0 ldr r0,[r3,#24] add r8,r8,r2 ldr r2,[r3,#28] add r9,r9,r12 ldr r1,[sp,#17*4] @ pull inp ldr r12,[sp,#18*4] @ pull inp+len add r10,r10,r0 add r11,r11,r2 stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} cmp r1,r12 sub r14,r14,#256 @ rewind Ktbl bne .Loop add sp,sp,#19*4 @ destroy frame #if __ARM_ARCH>=5 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} #else ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr} tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) #endif .size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw #if __ARM_MAX_ARCH__>=7 .arch armv7-a .fpu neon .LK256_shortcut_neon: @ PC is 8 bytes ahead in Arm mode and 4 bytes ahead in Thumb mode. #if defined(__thumb2__) .word K256-(.LK256_add_neon+4) #else .word K256-(.LK256_add_neon+8) #endif .globl sha256_block_data_order_neon .hidden sha256_block_data_order_neon .type sha256_block_data_order_neon,%function .align 5 .skip 16 sha256_block_data_order_neon: stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} sub r11,sp,#16*4+16 @ K256 is just at the boundary of being easily referenced by an ADR from @ this function. In Arm mode, when building with __ARM_ARCH=6, it does @ not fit. By moving code around, we could make it fit, but this is too @ fragile. For simplicity, just load the offset from @ .LK256_shortcut_neon. @ @ TODO(davidben): adrl would avoid a load, but clang-assembler does not @ support it. We might be able to emulate it with a macro, but Android's @ did not work when I tried it. @ https://android.googlesource.com/platform/ndk/+/refs/heads/master/docs/ClangMigration.md#arm ldr r14,.LK256_shortcut_neon .LK256_add_neon: add r14,pc,r14 bic r11,r11,#15 @ align for 128-bit stores mov r12,sp mov sp,r11 @ alloca add r2,r1,r2,lsl#6 @ len to point at the end of inp vld1.8 {q0},[r1]! vld1.8 {q1},[r1]! vld1.8 {q2},[r1]! vld1.8 {q3},[r1]! vld1.32 {q8},[r14,:128]! vld1.32 {q9},[r14,:128]! vld1.32 {q10},[r14,:128]! vld1.32 {q11},[r14,:128]! vrev32.8 q0,q0 @ yes, even on str r0,[sp,#64] vrev32.8 q1,q1 @ big-endian str r1,[sp,#68] mov r1,sp vrev32.8 q2,q2 str r2,[sp,#72] vrev32.8 q3,q3 str r12,[sp,#76] @ save original sp vadd.i32 q8,q8,q0 vadd.i32 q9,q9,q1 vst1.32 {q8},[r1,:128]! vadd.i32 q10,q10,q2 vst1.32 {q9},[r1,:128]! vadd.i32 q11,q11,q3 vst1.32 {q10},[r1,:128]! vst1.32 {q11},[r1,:128]! ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} sub r1,r1,#64 ldr r2,[sp,#0] eor r12,r12,r12 eor r3,r5,r6 b .L_00_48 .align 4 .L_00_48: vext.8 q8,q0,q1,#4 add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 vext.8 q9,q2,q3,#4 add r4,r4,r12 and r2,r2,r8 eor r12,r0,r8,ror#19 vshr.u32 q10,q8,#7 eor r0,r4,r4,ror#11 eor r2,r2,r10 vadd.i32 q0,q0,q9 add r11,r11,r12,ror#6 eor r12,r4,r5 vshr.u32 q9,q8,#3 eor r0,r0,r4,ror#20 add r11,r11,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#4] and r3,r3,r12 vshr.u32 q11,q8,#18 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 veor q9,q9,q10 add r10,r10,r2 vsli.32 q11,q8,#14 eor r2,r8,r9 eor r0,r7,r7,ror#5 vshr.u32 d24,d7,#17 add r11,r11,r3 and r2,r2,r7 veor q9,q9,q11 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 vsli.32 d24,d7,#15 eor r2,r2,r9 add r10,r10,r3,ror#6 vshr.u32 d25,d7,#10 eor r3,r11,r4 eor r0,r0,r11,ror#20 vadd.i32 q0,q0,q9 add r10,r10,r2 ldr r2,[sp,#8] veor d25,d25,d24 and r12,r12,r3 add r6,r6,r10 vshr.u32 d24,d7,#19 add r10,r10,r0,ror#2 eor r12,r12,r4 vsli.32 d24,d7,#13 add r9,r9,r2 eor r2,r7,r8 veor d25,d25,d24 eor r0,r6,r6,ror#5 add r10,r10,r12 vadd.i32 d0,d0,d25 and r2,r2,r6 eor r12,r0,r6,ror#19 vshr.u32 d24,d0,#17 eor r0,r10,r10,ror#11 eor r2,r2,r8 vsli.32 d24,d0,#15 add r9,r9,r12,ror#6 eor r12,r10,r11 vshr.u32 d25,d0,#10 eor r0,r0,r10,ror#20 add r9,r9,r2 veor d25,d25,d24 ldr r2,[sp,#12] and r3,r3,r12 vshr.u32 d24,d0,#19 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 vld1.32 {q8},[r14,:128]! add r8,r8,r2 vsli.32 d24,d0,#13 eor r2,r6,r7 eor r0,r5,r5,ror#5 veor d25,d25,d24 add r9,r9,r3 and r2,r2,r5 vadd.i32 d1,d1,d25 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 vadd.i32 q8,q8,q0 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#16] and r12,r12,r3 add r4,r4,r8 vst1.32 {q8},[r1,:128]! add r8,r8,r0,ror#2 eor r12,r12,r10 vext.8 q8,q1,q2,#4 add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 vext.8 q9,q3,q0,#4 add r8,r8,r12 and r2,r2,r4 eor r12,r0,r4,ror#19 vshr.u32 q10,q8,#7 eor r0,r8,r8,ror#11 eor r2,r2,r6 vadd.i32 q1,q1,q9 add r7,r7,r12,ror#6 eor r12,r8,r9 vshr.u32 q9,q8,#3 eor r0,r0,r8,ror#20 add r7,r7,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#20] and r3,r3,r12 vshr.u32 q11,q8,#18 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 veor q9,q9,q10 add r6,r6,r2 vsli.32 q11,q8,#14 eor r2,r4,r5 eor r0,r11,r11,ror#5 vshr.u32 d24,d1,#17 add r7,r7,r3 and r2,r2,r11 veor q9,q9,q11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 vsli.32 d24,d1,#15 eor r2,r2,r5 add r6,r6,r3,ror#6 vshr.u32 d25,d1,#10 eor r3,r7,r8 eor r0,r0,r7,ror#20 vadd.i32 q1,q1,q9 add r6,r6,r2 ldr r2,[sp,#24] veor d25,d25,d24 and r12,r12,r3 add r10,r10,r6 vshr.u32 d24,d1,#19 add r6,r6,r0,ror#2 eor r12,r12,r8 vsli.32 d24,d1,#13 add r5,r5,r2 eor r2,r11,r4 veor d25,d25,d24 eor r0,r10,r10,ror#5 add r6,r6,r12 vadd.i32 d2,d2,d25 and r2,r2,r10 eor r12,r0,r10,ror#19 vshr.u32 d24,d2,#17 eor r0,r6,r6,ror#11 eor r2,r2,r4 vsli.32 d24,d2,#15 add r5,r5,r12,ror#6 eor r12,r6,r7 vshr.u32 d25,d2,#10 eor r0,r0,r6,ror#20 add r5,r5,r2 veor d25,d25,d24 ldr r2,[sp,#28] and r3,r3,r12 vshr.u32 d24,d2,#19 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 vld1.32 {q8},[r14,:128]! add r4,r4,r2 vsli.32 d24,d2,#13 eor r2,r10,r11 eor r0,r9,r9,ror#5 veor d25,d25,d24 add r5,r5,r3 and r2,r2,r9 vadd.i32 d3,d3,d25 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 vadd.i32 q8,q8,q1 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[sp,#32] and r12,r12,r3 add r8,r8,r4 vst1.32 {q8},[r1,:128]! add r4,r4,r0,ror#2 eor r12,r12,r6 vext.8 q8,q2,q3,#4 add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 vext.8 q9,q0,q1,#4 add r4,r4,r12 and r2,r2,r8 eor r12,r0,r8,ror#19 vshr.u32 q10,q8,#7 eor r0,r4,r4,ror#11 eor r2,r2,r10 vadd.i32 q2,q2,q9 add r11,r11,r12,ror#6 eor r12,r4,r5 vshr.u32 q9,q8,#3 eor r0,r0,r4,ror#20 add r11,r11,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#36] and r3,r3,r12 vshr.u32 q11,q8,#18 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 veor q9,q9,q10 add r10,r10,r2 vsli.32 q11,q8,#14 eor r2,r8,r9 eor r0,r7,r7,ror#5 vshr.u32 d24,d3,#17 add r11,r11,r3 and r2,r2,r7 veor q9,q9,q11 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 vsli.32 d24,d3,#15 eor r2,r2,r9 add r10,r10,r3,ror#6 vshr.u32 d25,d3,#10 eor r3,r11,r4 eor r0,r0,r11,ror#20 vadd.i32 q2,q2,q9 add r10,r10,r2 ldr r2,[sp,#40] veor d25,d25,d24 and r12,r12,r3 add r6,r6,r10 vshr.u32 d24,d3,#19 add r10,r10,r0,ror#2 eor r12,r12,r4 vsli.32 d24,d3,#13 add r9,r9,r2 eor r2,r7,r8 veor d25,d25,d24 eor r0,r6,r6,ror#5 add r10,r10,r12 vadd.i32 d4,d4,d25 and r2,r2,r6 eor r12,r0,r6,ror#19 vshr.u32 d24,d4,#17 eor r0,r10,r10,ror#11 eor r2,r2,r8 vsli.32 d24,d4,#15 add r9,r9,r12,ror#6 eor r12,r10,r11 vshr.u32 d25,d4,#10 eor r0,r0,r10,ror#20 add r9,r9,r2 veor d25,d25,d24 ldr r2,[sp,#44] and r3,r3,r12 vshr.u32 d24,d4,#19 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 vld1.32 {q8},[r14,:128]! add r8,r8,r2 vsli.32 d24,d4,#13 eor r2,r6,r7 eor r0,r5,r5,ror#5 veor d25,d25,d24 add r9,r9,r3 and r2,r2,r5 vadd.i32 d5,d5,d25 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 vadd.i32 q8,q8,q2 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#48] and r12,r12,r3 add r4,r4,r8 vst1.32 {q8},[r1,:128]! add r8,r8,r0,ror#2 eor r12,r12,r10 vext.8 q8,q3,q0,#4 add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 vext.8 q9,q1,q2,#4 add r8,r8,r12 and r2,r2,r4 eor r12,r0,r4,ror#19 vshr.u32 q10,q8,#7 eor r0,r8,r8,ror#11 eor r2,r2,r6 vadd.i32 q3,q3,q9 add r7,r7,r12,ror#6 eor r12,r8,r9 vshr.u32 q9,q8,#3 eor r0,r0,r8,ror#20 add r7,r7,r2 vsli.32 q10,q8,#25 ldr r2,[sp,#52] and r3,r3,r12 vshr.u32 q11,q8,#18 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 veor q9,q9,q10 add r6,r6,r2 vsli.32 q11,q8,#14 eor r2,r4,r5 eor r0,r11,r11,ror#5 vshr.u32 d24,d5,#17 add r7,r7,r3 and r2,r2,r11 veor q9,q9,q11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 vsli.32 d24,d5,#15 eor r2,r2,r5 add r6,r6,r3,ror#6 vshr.u32 d25,d5,#10 eor r3,r7,r8 eor r0,r0,r7,ror#20 vadd.i32 q3,q3,q9 add r6,r6,r2 ldr r2,[sp,#56] veor d25,d25,d24 and r12,r12,r3 add r10,r10,r6 vshr.u32 d24,d5,#19 add r6,r6,r0,ror#2 eor r12,r12,r8 vsli.32 d24,d5,#13 add r5,r5,r2 eor r2,r11,r4 veor d25,d25,d24 eor r0,r10,r10,ror#5 add r6,r6,r12 vadd.i32 d6,d6,d25 and r2,r2,r10 eor r12,r0,r10,ror#19 vshr.u32 d24,d6,#17 eor r0,r6,r6,ror#11 eor r2,r2,r4 vsli.32 d24,d6,#15 add r5,r5,r12,ror#6 eor r12,r6,r7 vshr.u32 d25,d6,#10 eor r0,r0,r6,ror#20 add r5,r5,r2 veor d25,d25,d24 ldr r2,[sp,#60] and r3,r3,r12 vshr.u32 d24,d6,#19 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 vld1.32 {q8},[r14,:128]! add r4,r4,r2 vsli.32 d24,d6,#13 eor r2,r10,r11 eor r0,r9,r9,ror#5 veor d25,d25,d24 add r5,r5,r3 and r2,r2,r9 vadd.i32 d7,d7,d25 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 vadd.i32 q8,q8,q3 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[r14] and r12,r12,r3 add r8,r8,r4 vst1.32 {q8},[r1,:128]! add r4,r4,r0,ror#2 eor r12,r12,r6 teq r2,#0 @ check for K256 terminator ldr r2,[sp,#0] sub r1,r1,#64 bne .L_00_48 ldr r1,[sp,#68] ldr r0,[sp,#72] sub r14,r14,#256 @ rewind r14 teq r1,r0 it eq subeq r1,r1,#64 @ avoid SEGV vld1.8 {q0},[r1]! @ load next input block vld1.8 {q1},[r1]! vld1.8 {q2},[r1]! vld1.8 {q3},[r1]! it ne strne r1,[sp,#68] mov r1,sp add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 add r4,r4,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r8 eor r12,r0,r8,ror#19 eor r0,r4,r4,ror#11 eor r2,r2,r10 vrev32.8 q0,q0 add r11,r11,r12,ror#6 eor r12,r4,r5 eor r0,r0,r4,ror#20 add r11,r11,r2 vadd.i32 q8,q8,q0 ldr r2,[sp,#4] and r3,r3,r12 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 add r10,r10,r2 eor r2,r8,r9 eor r0,r7,r7,ror#5 add r11,r11,r3 and r2,r2,r7 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 eor r2,r2,r9 add r10,r10,r3,ror#6 eor r3,r11,r4 eor r0,r0,r11,ror#20 add r10,r10,r2 ldr r2,[sp,#8] and r12,r12,r3 add r6,r6,r10 add r10,r10,r0,ror#2 eor r12,r12,r4 add r9,r9,r2 eor r2,r7,r8 eor r0,r6,r6,ror#5 add r10,r10,r12 and r2,r2,r6 eor r12,r0,r6,ror#19 eor r0,r10,r10,ror#11 eor r2,r2,r8 add r9,r9,r12,ror#6 eor r12,r10,r11 eor r0,r0,r10,ror#20 add r9,r9,r2 ldr r2,[sp,#12] and r3,r3,r12 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 add r8,r8,r2 eor r2,r6,r7 eor r0,r5,r5,ror#5 add r9,r9,r3 and r2,r2,r5 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#16] and r12,r12,r3 add r4,r4,r8 add r8,r8,r0,ror#2 eor r12,r12,r10 vst1.32 {q8},[r1,:128]! add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 add r8,r8,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r4 eor r12,r0,r4,ror#19 eor r0,r8,r8,ror#11 eor r2,r2,r6 vrev32.8 q1,q1 add r7,r7,r12,ror#6 eor r12,r8,r9 eor r0,r0,r8,ror#20 add r7,r7,r2 vadd.i32 q8,q8,q1 ldr r2,[sp,#20] and r3,r3,r12 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 add r6,r6,r2 eor r2,r4,r5 eor r0,r11,r11,ror#5 add r7,r7,r3 and r2,r2,r11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 eor r2,r2,r5 add r6,r6,r3,ror#6 eor r3,r7,r8 eor r0,r0,r7,ror#20 add r6,r6,r2 ldr r2,[sp,#24] and r12,r12,r3 add r10,r10,r6 add r6,r6,r0,ror#2 eor r12,r12,r8 add r5,r5,r2 eor r2,r11,r4 eor r0,r10,r10,ror#5 add r6,r6,r12 and r2,r2,r10 eor r12,r0,r10,ror#19 eor r0,r6,r6,ror#11 eor r2,r2,r4 add r5,r5,r12,ror#6 eor r12,r6,r7 eor r0,r0,r6,ror#20 add r5,r5,r2 ldr r2,[sp,#28] and r3,r3,r12 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 add r4,r4,r2 eor r2,r10,r11 eor r0,r9,r9,ror#5 add r5,r5,r3 and r2,r2,r9 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[sp,#32] and r12,r12,r3 add r8,r8,r4 add r4,r4,r0,ror#2 eor r12,r12,r6 vst1.32 {q8},[r1,:128]! add r11,r11,r2 eor r2,r9,r10 eor r0,r8,r8,ror#5 add r4,r4,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r8 eor r12,r0,r8,ror#19 eor r0,r4,r4,ror#11 eor r2,r2,r10 vrev32.8 q2,q2 add r11,r11,r12,ror#6 eor r12,r4,r5 eor r0,r0,r4,ror#20 add r11,r11,r2 vadd.i32 q8,q8,q2 ldr r2,[sp,#36] and r3,r3,r12 add r7,r7,r11 add r11,r11,r0,ror#2 eor r3,r3,r5 add r10,r10,r2 eor r2,r8,r9 eor r0,r7,r7,ror#5 add r11,r11,r3 and r2,r2,r7 eor r3,r0,r7,ror#19 eor r0,r11,r11,ror#11 eor r2,r2,r9 add r10,r10,r3,ror#6 eor r3,r11,r4 eor r0,r0,r11,ror#20 add r10,r10,r2 ldr r2,[sp,#40] and r12,r12,r3 add r6,r6,r10 add r10,r10,r0,ror#2 eor r12,r12,r4 add r9,r9,r2 eor r2,r7,r8 eor r0,r6,r6,ror#5 add r10,r10,r12 and r2,r2,r6 eor r12,r0,r6,ror#19 eor r0,r10,r10,ror#11 eor r2,r2,r8 add r9,r9,r12,ror#6 eor r12,r10,r11 eor r0,r0,r10,ror#20 add r9,r9,r2 ldr r2,[sp,#44] and r3,r3,r12 add r5,r5,r9 add r9,r9,r0,ror#2 eor r3,r3,r11 add r8,r8,r2 eor r2,r6,r7 eor r0,r5,r5,ror#5 add r9,r9,r3 and r2,r2,r5 eor r3,r0,r5,ror#19 eor r0,r9,r9,ror#11 eor r2,r2,r7 add r8,r8,r3,ror#6 eor r3,r9,r10 eor r0,r0,r9,ror#20 add r8,r8,r2 ldr r2,[sp,#48] and r12,r12,r3 add r4,r4,r8 add r8,r8,r0,ror#2 eor r12,r12,r10 vst1.32 {q8},[r1,:128]! add r7,r7,r2 eor r2,r5,r6 eor r0,r4,r4,ror#5 add r8,r8,r12 vld1.32 {q8},[r14,:128]! and r2,r2,r4 eor r12,r0,r4,ror#19 eor r0,r8,r8,ror#11 eor r2,r2,r6 vrev32.8 q3,q3 add r7,r7,r12,ror#6 eor r12,r8,r9 eor r0,r0,r8,ror#20 add r7,r7,r2 vadd.i32 q8,q8,q3 ldr r2,[sp,#52] and r3,r3,r12 add r11,r11,r7 add r7,r7,r0,ror#2 eor r3,r3,r9 add r6,r6,r2 eor r2,r4,r5 eor r0,r11,r11,ror#5 add r7,r7,r3 and r2,r2,r11 eor r3,r0,r11,ror#19 eor r0,r7,r7,ror#11 eor r2,r2,r5 add r6,r6,r3,ror#6 eor r3,r7,r8 eor r0,r0,r7,ror#20 add r6,r6,r2 ldr r2,[sp,#56] and r12,r12,r3 add r10,r10,r6 add r6,r6,r0,ror#2 eor r12,r12,r8 add r5,r5,r2 eor r2,r11,r4 eor r0,r10,r10,ror#5 add r6,r6,r12 and r2,r2,r10 eor r12,r0,r10,ror#19 eor r0,r6,r6,ror#11 eor r2,r2,r4 add r5,r5,r12,ror#6 eor r12,r6,r7 eor r0,r0,r6,ror#20 add r5,r5,r2 ldr r2,[sp,#60] and r3,r3,r12 add r9,r9,r5 add r5,r5,r0,ror#2 eor r3,r3,r7 add r4,r4,r2 eor r2,r10,r11 eor r0,r9,r9,ror#5 add r5,r5,r3 and r2,r2,r9 eor r3,r0,r9,ror#19 eor r0,r5,r5,ror#11 eor r2,r2,r11 add r4,r4,r3,ror#6 eor r3,r5,r6 eor r0,r0,r5,ror#20 add r4,r4,r2 ldr r2,[sp,#64] and r12,r12,r3 add r8,r8,r4 add r4,r4,r0,ror#2 eor r12,r12,r6 vst1.32 {q8},[r1,:128]! ldr r0,[r2,#0] add r4,r4,r12 @ h+=Maj(a,b,c) from the past ldr r12,[r2,#4] ldr r3,[r2,#8] ldr r1,[r2,#12] add r4,r4,r0 @ accumulate ldr r0,[r2,#16] add r5,r5,r12 ldr r12,[r2,#20] add r6,r6,r3 ldr r3,[r2,#24] add r7,r7,r1 ldr r1,[r2,#28] add r8,r8,r0 str r4,[r2],#4 add r9,r9,r12 str r5,[r2],#4 add r10,r10,r3 str r6,[r2],#4 add r11,r11,r1 str r7,[r2],#4 stmia r2,{r8,r9,r10,r11} ittte ne movne r1,sp ldrne r2,[sp,#0] eorne r12,r12,r12 ldreq sp,[sp,#76] @ restore original sp itt ne eorne r3,r5,r6 bne .L_00_48 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc} .size sha256_block_data_order_neon,.-sha256_block_data_order_neon #endif .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
5,546
thirdparty/crates/ring-0.17.9/pregenerated/ghash-x86-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl gcm_init_clmul .hidden gcm_init_clmul .type gcm_init_clmul,@function .align 16 gcm_init_clmul: .L_gcm_init_clmul_begin: movl 4(%esp),%edx movl 8(%esp),%eax call .L000pic .L000pic: popl %ecx leal .Lbswap-.L000pic(%ecx),%ecx movdqu (%eax),%xmm2 pshufd $78,%xmm2,%xmm2 pshufd $255,%xmm2,%xmm4 movdqa %xmm2,%xmm3 psllq $1,%xmm2 pxor %xmm5,%xmm5 psrlq $63,%xmm3 pcmpgtd %xmm4,%xmm5 pslldq $8,%xmm3 por %xmm3,%xmm2 pand 16(%ecx),%xmm5 pxor %xmm5,%xmm2 movdqa %xmm2,%xmm0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pshufd $78,%xmm2,%xmm4 pxor %xmm0,%xmm3 pxor %xmm2,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 xorps %xmm0,%xmm3 xorps %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 pshufd $78,%xmm2,%xmm3 pshufd $78,%xmm0,%xmm4 pxor %xmm2,%xmm3 movdqu %xmm2,(%edx) pxor %xmm0,%xmm4 movdqu %xmm0,16(%edx) .byte 102,15,58,15,227,8 movdqu %xmm4,32(%edx) ret .size gcm_init_clmul,.-.L_gcm_init_clmul_begin .globl gcm_ghash_clmul .hidden gcm_ghash_clmul .type gcm_ghash_clmul,@function .align 16 gcm_ghash_clmul: .L_gcm_ghash_clmul_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi movl 20(%esp),%eax movl 24(%esp),%edx movl 28(%esp),%esi movl 32(%esp),%ebx call .L001pic .L001pic: popl %ecx leal .Lbswap-.L001pic(%ecx),%ecx movdqu (%eax),%xmm0 movdqa (%ecx),%xmm5 movdqu (%edx),%xmm2 .byte 102,15,56,0,197 subl $16,%ebx jz .L002odd_tail movdqu (%esi),%xmm3 movdqu 16(%esi),%xmm6 .byte 102,15,56,0,221 .byte 102,15,56,0,245 movdqu 32(%edx),%xmm5 pxor %xmm3,%xmm0 pshufd $78,%xmm6,%xmm3 movdqa %xmm6,%xmm7 pxor %xmm6,%xmm3 leal 32(%esi),%esi .byte 102,15,58,68,242,0 .byte 102,15,58,68,250,17 .byte 102,15,58,68,221,0 movups 16(%edx),%xmm2 nop subl $32,%ebx jbe .L003even_tail jmp .L004mod_loop .align 32 .L004mod_loop: pshufd $78,%xmm0,%xmm4 movdqa %xmm0,%xmm1 pxor %xmm0,%xmm4 nop .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,229,16 movups (%edx),%xmm2 xorps %xmm6,%xmm0 movdqa (%ecx),%xmm5 xorps %xmm7,%xmm1 movdqu (%esi),%xmm7 pxor %xmm0,%xmm3 movdqu 16(%esi),%xmm6 pxor %xmm1,%xmm3 .byte 102,15,56,0,253 pxor %xmm3,%xmm4 movdqa %xmm4,%xmm3 psrldq $8,%xmm4 pslldq $8,%xmm3 pxor %xmm4,%xmm1 pxor %xmm3,%xmm0 .byte 102,15,56,0,245 pxor %xmm7,%xmm1 movdqa %xmm6,%xmm7 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 .byte 102,15,58,68,242,0 movups 32(%edx),%xmm5 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 pshufd $78,%xmm7,%xmm3 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm7,%xmm3 pxor %xmm4,%xmm1 .byte 102,15,58,68,250,17 movups 16(%edx),%xmm2 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .byte 102,15,58,68,221,0 leal 32(%esi),%esi subl $32,%ebx ja .L004mod_loop .L003even_tail: pshufd $78,%xmm0,%xmm4 movdqa %xmm0,%xmm1 pxor %xmm0,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,229,16 movdqa (%ecx),%xmm5 xorps %xmm6,%xmm0 xorps %xmm7,%xmm1 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 pxor %xmm3,%xmm4 movdqa %xmm4,%xmm3 psrldq $8,%xmm4 pslldq $8,%xmm3 pxor %xmm4,%xmm1 pxor %xmm3,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 testl %ebx,%ebx jnz .L005done movups (%edx),%xmm2 .L002odd_tail: movdqu (%esi),%xmm3 .byte 102,15,56,0,221 pxor %xmm3,%xmm0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pshufd $78,%xmm2,%xmm4 pxor %xmm0,%xmm3 pxor %xmm2,%xmm4 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,220,0 xorps %xmm0,%xmm3 xorps %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .L005done: .byte 102,15,56,0,197 movdqu %xmm0,(%eax) popl %edi popl %esi popl %ebx popl %ebp ret .size gcm_ghash_clmul,.-.L_gcm_ghash_clmul_begin .align 64 .Lbswap: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,194 .byte 71,72,65,83,72,32,102,111,114,32,120,56,54,44,32,67 .byte 82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112 .byte 112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62 .byte 0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
4,192
thirdparty/crates/ring-0.17.9/pregenerated/ghashv8-armx-win64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <ring-core/arm_arch.h> #if __ARM_MAX_ARCH__>=7 .text .arch armv8-a+crypto .globl gcm_init_clmul .def gcm_init_clmul .type 32 .endef .align 4 gcm_init_clmul: AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x1] //load input H movi v19.16b,#0xe1 shl v19.2d,v19.2d,#57 //0xc2.0 ext v3.16b,v17.16b,v17.16b,#8 ushr v18.2d,v19.2d,#63 dup v17.4s,v17.s[1] ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01 ushr v18.2d,v3.2d,#63 sshr v17.4s,v17.4s,#31 //broadcast carry bit and v18.16b,v18.16b,v16.16b shl v3.2d,v3.2d,#1 ext v18.16b,v18.16b,v18.16b,#8 and v16.16b,v16.16b,v17.16b orr v3.16b,v3.16b,v18.16b //H<<<=1 eor v20.16b,v3.16b,v16.16b //twisted H st1 {v20.2d},[x0],#16 //store Htable[0] //calculate H^2 ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing pmull v0.1q,v20.1d,v20.1d eor v16.16b,v16.16b,v20.16b pmull2 v2.1q,v20.2d,v20.2d pmull v1.1q,v16.1d,v16.1d ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v22.16b,v0.16b,v18.16b ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2] //calculate H^3 and H^4 pmull v0.1q,v20.1d, v22.1d pmull v5.1q,v22.1d,v22.1d pmull2 v2.1q,v20.2d, v22.2d pmull2 v7.1q,v22.2d,v22.2d pmull v1.1q,v16.1d,v17.1d pmull v6.1q,v17.1d,v17.1d ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing ext v17.16b,v5.16b,v7.16b,#8 eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v16.16b eor v4.16b,v5.16b,v7.16b eor v6.16b,v6.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase eor v6.16b,v6.16b,v4.16b pmull v4.1q,v5.1d,v19.1d ins v2.d[0],v1.d[1] ins v7.d[0],v6.d[1] ins v1.d[1],v0.d[0] ins v6.d[1],v5.d[0] eor v0.16b,v1.16b,v18.16b eor v5.16b,v6.16b,v4.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase ext v4.16b,v5.16b,v5.16b,#8 pmull v0.1q,v0.1d,v19.1d pmull v5.1q,v5.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v4.16b,v4.16b,v7.16b eor v20.16b, v0.16b,v18.16b //H^3 eor v22.16b,v5.16b,v4.16b //H^4 ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing ext v17.16b,v22.16b,v22.16b,#8 eor v16.16b,v16.16b,v20.16b eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5] ret .globl gcm_gmult_clmul .def gcm_gmult_clmul .type 32 .endef .align 4 gcm_gmult_clmul: AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x0] //load Xi movi v19.16b,#0xe1 ld1 {v20.2d,v21.2d},[x1] //load twisted H, ... shl v19.2d,v19.2d,#57 #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ext v3.16b,v17.16b,v17.16b,#8 pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif ext v0.16b,v0.16b,v0.16b,#8 st1 {v0.2d},[x0] //write out Xi ret .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
marvin-hansen/iggy-streaming-system
30,909
thirdparty/crates/ring-0.17.9/pregenerated/armv8-mont-linux64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include <ring-core/arm_arch.h> .text .globl bn_mul_mont_nohw .hidden bn_mul_mont_nohw .type bn_mul_mont_nohw,%function .align 5 bn_mul_mont_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] ldr x9,[x2],#8 // bp[0] sub x22,sp,x5,lsl#3 ldp x7,x8,[x1],#16 // ap[0..1] lsl x5,x5,#3 ldr x4,[x4] // *n0 and x22,x22,#-16 // ABI says so ldp x13,x14,[x3],#16 // np[0..1] mul x6,x7,x9 // ap[0]*bp[0] sub x21,x5,#16 // j=num-2 umulh x7,x7,x9 mul x10,x8,x9 // ap[1]*bp[0] umulh x11,x8,x9 mul x15,x6,x4 // "tp[0]"*n0 mov sp,x22 // alloca // (*) mul x12,x13,x15 // np[0]*m1 umulh x13,x13,x15 mul x16,x14,x15 // np[1]*m1 // (*) adds x12,x12,x6 // discarded // (*) As for removal of first multiplication and addition // instructions. The outcome of first addition is // guaranteed to be zero, which leaves two computationally // significant outcomes: it either carries or not. Then // question is when does it carry? Is there alternative // way to deduce it? If you follow operations, you can // observe that condition for carry is quite simple: // x6 being non-zero. So that carry can be calculated // by adding -1 to x6. That's what next instruction does. subs xzr,x6,#1 // (*) umulh x17,x14,x15 adc x13,x13,xzr cbz x21,.L1st_skip .L1st: ldr x8,[x1],#8 adds x6,x10,x7 sub x21,x21,#8 // j-- adc x7,x11,xzr ldr x14,[x3],#8 adds x12,x16,x13 mul x10,x8,x9 // ap[j]*bp[0] adc x13,x17,xzr umulh x11,x8,x9 adds x12,x12,x6 mul x16,x14,x15 // np[j]*m1 adc x13,x13,xzr umulh x17,x14,x15 str x12,[x22],#8 // tp[j-1] cbnz x21,.L1st .L1st_skip: adds x6,x10,x7 sub x1,x1,x5 // rewind x1 adc x7,x11,xzr adds x12,x16,x13 sub x3,x3,x5 // rewind x3 adc x13,x17,xzr adds x12,x12,x6 sub x20,x5,#8 // i=num-1 adcs x13,x13,x7 adc x19,xzr,xzr // upmost overflow bit stp x12,x13,[x22] .Louter: ldr x9,[x2],#8 // bp[i] ldp x7,x8,[x1],#16 ldr x23,[sp] // tp[0] add x22,sp,#8 mul x6,x7,x9 // ap[0]*bp[i] sub x21,x5,#16 // j=num-2 umulh x7,x7,x9 ldp x13,x14,[x3],#16 mul x10,x8,x9 // ap[1]*bp[i] adds x6,x6,x23 umulh x11,x8,x9 adc x7,x7,xzr mul x15,x6,x4 sub x20,x20,#8 // i-- // (*) mul x12,x13,x15 // np[0]*m1 umulh x13,x13,x15 mul x16,x14,x15 // np[1]*m1 // (*) adds x12,x12,x6 subs xzr,x6,#1 // (*) umulh x17,x14,x15 cbz x21,.Linner_skip .Linner: ldr x8,[x1],#8 adc x13,x13,xzr ldr x23,[x22],#8 // tp[j] adds x6,x10,x7 sub x21,x21,#8 // j-- adc x7,x11,xzr adds x12,x16,x13 ldr x14,[x3],#8 adc x13,x17,xzr mul x10,x8,x9 // ap[j]*bp[i] adds x6,x6,x23 umulh x11,x8,x9 adc x7,x7,xzr mul x16,x14,x15 // np[j]*m1 adds x12,x12,x6 umulh x17,x14,x15 str x12,[x22,#-16] // tp[j-1] cbnz x21,.Linner .Linner_skip: ldr x23,[x22],#8 // tp[j] adc x13,x13,xzr adds x6,x10,x7 sub x1,x1,x5 // rewind x1 adc x7,x11,xzr adds x12,x16,x13 sub x3,x3,x5 // rewind x3 adcs x13,x17,x19 adc x19,xzr,xzr adds x6,x6,x23 adc x7,x7,xzr adds x12,x12,x6 adcs x13,x13,x7 adc x19,x19,xzr // upmost overflow bit stp x12,x13,[x22,#-16] cbnz x20,.Louter // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. ldr x23,[sp] // tp[0] add x22,sp,#8 ldr x14,[x3],#8 // np[0] subs x21,x5,#8 // j=num-1 and clear borrow mov x1,x0 .Lsub: sbcs x8,x23,x14 // tp[j]-np[j] ldr x23,[x22],#8 sub x21,x21,#8 // j-- ldr x14,[x3],#8 str x8,[x1],#8 // rp[j]=tp[j]-np[j] cbnz x21,.Lsub sbcs x8,x23,x14 sbcs x19,x19,xzr // did it borrow? str x8,[x1],#8 // rp[num-1] ldr x23,[sp] // tp[0] add x22,sp,#8 ldr x8,[x0],#8 // rp[0] sub x5,x5,#8 // num-- nop .Lcond_copy: sub x5,x5,#8 // num-- csel x14,x23,x8,lo // did it borrow? ldr x23,[x22],#8 ldr x8,[x0],#8 str xzr,[x22,#-16] // wipe tp str x14,[x0,#-16] cbnz x5,.Lcond_copy csel x14,x23,x8,lo str xzr,[x22,#-8] // wipe tp str x14,[x0,#-8] ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldr x29,[sp],#64 AARCH64_VALIDATE_LINK_REGISTER ret .size bn_mul_mont_nohw,.-bn_mul_mont_nohw .globl bn_sqr8x_mont .hidden bn_sqr8x_mont .type bn_sqr8x_mont,%function .align 5 bn_sqr8x_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] stp x0,x3,[sp,#96] // offload rp and np ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] ldp x10,x11,[x1,#8*4] ldp x12,x13,[x1,#8*6] sub x2,sp,x5,lsl#4 lsl x5,x5,#3 ldr x4,[x4] // *n0 mov sp,x2 // alloca sub x27,x5,#8*8 b .Lsqr8x_zero_start .Lsqr8x_zero: sub x27,x27,#8*8 stp xzr,xzr,[x2,#8*0] stp xzr,xzr,[x2,#8*2] stp xzr,xzr,[x2,#8*4] stp xzr,xzr,[x2,#8*6] .Lsqr8x_zero_start: stp xzr,xzr,[x2,#8*8] stp xzr,xzr,[x2,#8*10] stp xzr,xzr,[x2,#8*12] stp xzr,xzr,[x2,#8*14] add x2,x2,#8*16 cbnz x27,.Lsqr8x_zero add x3,x1,x5 add x1,x1,#8*8 mov x19,xzr mov x20,xzr mov x21,xzr mov x22,xzr mov x23,xzr mov x24,xzr mov x25,xzr mov x26,xzr mov x2,sp str x4,[x29,#112] // offload n0 // Multiply everything but a[i]*a[i] .align 4 .Lsqr8x_outer_loop: // a[1]a[0] (i) // a[2]a[0] // a[3]a[0] // a[4]a[0] // a[5]a[0] // a[6]a[0] // a[7]a[0] // a[2]a[1] (ii) // a[3]a[1] // a[4]a[1] // a[5]a[1] // a[6]a[1] // a[7]a[1] // a[3]a[2] (iii) // a[4]a[2] // a[5]a[2] // a[6]a[2] // a[7]a[2] // a[4]a[3] (iv) // a[5]a[3] // a[6]a[3] // a[7]a[3] // a[5]a[4] (v) // a[6]a[4] // a[7]a[4] // a[6]a[5] (vi) // a[7]a[5] // a[7]a[6] (vii) mul x14,x7,x6 // lo(a[1..7]*a[0]) (i) mul x15,x8,x6 mul x16,x9,x6 mul x17,x10,x6 adds x20,x20,x14 // t[1]+lo(a[1]*a[0]) mul x14,x11,x6 adcs x21,x21,x15 mul x15,x12,x6 adcs x22,x22,x16 mul x16,x13,x6 adcs x23,x23,x17 umulh x17,x7,x6 // hi(a[1..7]*a[0]) adcs x24,x24,x14 umulh x14,x8,x6 adcs x25,x25,x15 umulh x15,x9,x6 adcs x26,x26,x16 umulh x16,x10,x6 stp x19,x20,[x2],#8*2 // t[0..1] adc x19,xzr,xzr // t[8] adds x21,x21,x17 // t[2]+lo(a[1]*a[0]) umulh x17,x11,x6 adcs x22,x22,x14 umulh x14,x12,x6 adcs x23,x23,x15 umulh x15,x13,x6 adcs x24,x24,x16 mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii) adcs x25,x25,x17 mul x17,x9,x7 adcs x26,x26,x14 mul x14,x10,x7 adc x19,x19,x15 mul x15,x11,x7 adds x22,x22,x16 mul x16,x12,x7 adcs x23,x23,x17 mul x17,x13,x7 adcs x24,x24,x14 umulh x14,x8,x7 // hi(a[2..7]*a[1]) adcs x25,x25,x15 umulh x15,x9,x7 adcs x26,x26,x16 umulh x16,x10,x7 adcs x19,x19,x17 umulh x17,x11,x7 stp x21,x22,[x2],#8*2 // t[2..3] adc x20,xzr,xzr // t[9] adds x23,x23,x14 umulh x14,x12,x7 adcs x24,x24,x15 umulh x15,x13,x7 adcs x25,x25,x16 mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii) adcs x26,x26,x17 mul x17,x10,x8 adcs x19,x19,x14 mul x14,x11,x8 adc x20,x20,x15 mul x15,x12,x8 adds x24,x24,x16 mul x16,x13,x8 adcs x25,x25,x17 umulh x17,x9,x8 // hi(a[3..7]*a[2]) adcs x26,x26,x14 umulh x14,x10,x8 adcs x19,x19,x15 umulh x15,x11,x8 adcs x20,x20,x16 umulh x16,x12,x8 stp x23,x24,[x2],#8*2 // t[4..5] adc x21,xzr,xzr // t[10] adds x25,x25,x17 umulh x17,x13,x8 adcs x26,x26,x14 mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv) adcs x19,x19,x15 mul x15,x11,x9 adcs x20,x20,x16 mul x16,x12,x9 adc x21,x21,x17 mul x17,x13,x9 adds x26,x26,x14 umulh x14,x10,x9 // hi(a[4..7]*a[3]) adcs x19,x19,x15 umulh x15,x11,x9 adcs x20,x20,x16 umulh x16,x12,x9 adcs x21,x21,x17 umulh x17,x13,x9 stp x25,x26,[x2],#8*2 // t[6..7] adc x22,xzr,xzr // t[11] adds x19,x19,x14 mul x14,x11,x10 // lo(a[5..7]*a[4]) (v) adcs x20,x20,x15 mul x15,x12,x10 adcs x21,x21,x16 mul x16,x13,x10 adc x22,x22,x17 umulh x17,x11,x10 // hi(a[5..7]*a[4]) adds x20,x20,x14 umulh x14,x12,x10 adcs x21,x21,x15 umulh x15,x13,x10 adcs x22,x22,x16 mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi) adc x23,xzr,xzr // t[12] adds x21,x21,x17 mul x17,x13,x11 adcs x22,x22,x14 umulh x14,x12,x11 // hi(a[6..7]*a[5]) adc x23,x23,x15 umulh x15,x13,x11 adds x22,x22,x16 mul x16,x13,x12 // lo(a[7]*a[6]) (vii) adcs x23,x23,x17 umulh x17,x13,x12 // hi(a[7]*a[6]) adc x24,xzr,xzr // t[13] adds x23,x23,x14 sub x27,x3,x1 // done yet? adc x24,x24,x15 adds x24,x24,x16 sub x14,x3,x5 // rewinded ap adc x25,xzr,xzr // t[14] add x25,x25,x17 cbz x27,.Lsqr8x_outer_break mov x4,x6 ldp x6,x7,[x2,#8*0] ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] adds x19,x19,x6 adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x0,x1 adcs x26,xzr,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved below mov x27,#-8*8 // a[8]a[0] // a[9]a[0] // a[a]a[0] // a[b]a[0] // a[c]a[0] // a[d]a[0] // a[e]a[0] // a[f]a[0] // a[8]a[1] // a[f]a[1]........................ // a[8]a[2] // a[f]a[2]........................ // a[8]a[3] // a[f]a[3]........................ // a[8]a[4] // a[f]a[4]........................ // a[8]a[5] // a[f]a[5]........................ // a[8]a[6] // a[f]a[6]........................ // a[8]a[7] // a[f]a[7]........................ .Lsqr8x_mul: mul x14,x6,x4 adc x28,xzr,xzr // carry bit, modulo-scheduled mul x15,x7,x4 add x27,x27,#8 mul x16,x8,x4 mul x17,x9,x4 adds x19,x19,x14 mul x14,x10,x4 adcs x20,x20,x15 mul x15,x11,x4 adcs x21,x21,x16 mul x16,x12,x4 adcs x22,x22,x17 mul x17,x13,x4 adcs x23,x23,x14 umulh x14,x6,x4 adcs x24,x24,x15 umulh x15,x7,x4 adcs x25,x25,x16 umulh x16,x8,x4 adcs x26,x26,x17 umulh x17,x9,x4 adc x28,x28,xzr str x19,[x2],#8 adds x19,x20,x14 umulh x14,x10,x4 adcs x20,x21,x15 umulh x15,x11,x4 adcs x21,x22,x16 umulh x16,x12,x4 adcs x22,x23,x17 umulh x17,x13,x4 ldr x4,[x0,x27] adcs x23,x24,x14 adcs x24,x25,x15 adcs x25,x26,x16 adcs x26,x28,x17 //adc x28,xzr,xzr // moved above cbnz x27,.Lsqr8x_mul // note that carry flag is guaranteed // to be zero at this point cmp x1,x3 // done yet? b.eq .Lsqr8x_break ldp x6,x7,[x2,#8*0] ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] adds x19,x19,x6 ldr x4,[x0,#-8*8] adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x27,#-8*8 adcs x26,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved above b .Lsqr8x_mul .align 4 .Lsqr8x_break: ldp x6,x7,[x0,#8*0] add x1,x0,#8*8 ldp x8,x9,[x0,#8*2] sub x14,x3,x1 // is it last iteration? ldp x10,x11,[x0,#8*4] sub x15,x2,x14 ldp x12,x13,[x0,#8*6] cbz x14,.Lsqr8x_outer_loop stp x19,x20,[x2,#8*0] ldp x19,x20,[x15,#8*0] stp x21,x22,[x2,#8*2] ldp x21,x22,[x15,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[x15,#8*4] stp x25,x26,[x2,#8*6] mov x2,x15 ldp x25,x26,[x15,#8*6] b .Lsqr8x_outer_loop .align 4 .Lsqr8x_outer_break: // Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0] ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0] ldp x15,x16,[sp,#8*1] ldp x11,x13,[x14,#8*2] add x1,x14,#8*4 ldp x17,x14,[sp,#8*3] stp x19,x20,[x2,#8*0] mul x19,x7,x7 stp x21,x22,[x2,#8*2] umulh x7,x7,x7 stp x23,x24,[x2,#8*4] mul x8,x9,x9 stp x25,x26,[x2,#8*6] mov x2,sp umulh x9,x9,x9 adds x20,x7,x15,lsl#1 extr x15,x16,x15,#63 sub x27,x5,#8*4 .Lsqr4x_shift_n_add: adcs x21,x8,x15 extr x16,x17,x16,#63 sub x27,x27,#8*4 adcs x22,x9,x16 ldp x15,x16,[x2,#8*5] mul x10,x11,x11 ldp x7,x9,[x1],#8*2 umulh x11,x11,x11 mul x12,x13,x13 umulh x13,x13,x13 extr x17,x14,x17,#63 stp x19,x20,[x2,#8*0] adcs x23,x10,x17 extr x14,x15,x14,#63 stp x21,x22,[x2,#8*2] adcs x24,x11,x14 ldp x17,x14,[x2,#8*7] extr x15,x16,x15,#63 adcs x25,x12,x15 extr x16,x17,x16,#63 adcs x26,x13,x16 ldp x15,x16,[x2,#8*9] mul x6,x7,x7 ldp x11,x13,[x1],#8*2 umulh x7,x7,x7 mul x8,x9,x9 umulh x9,x9,x9 stp x23,x24,[x2,#8*4] extr x17,x14,x17,#63 stp x25,x26,[x2,#8*6] add x2,x2,#8*8 adcs x19,x6,x17 extr x14,x15,x14,#63 adcs x20,x7,x14 ldp x17,x14,[x2,#8*3] extr x15,x16,x15,#63 cbnz x27,.Lsqr4x_shift_n_add ldp x1,x4,[x29,#104] // pull np and n0 adcs x21,x8,x15 extr x16,x17,x16,#63 adcs x22,x9,x16 ldp x15,x16,[x2,#8*5] mul x10,x11,x11 umulh x11,x11,x11 stp x19,x20,[x2,#8*0] mul x12,x13,x13 umulh x13,x13,x13 stp x21,x22,[x2,#8*2] extr x17,x14,x17,#63 adcs x23,x10,x17 extr x14,x15,x14,#63 ldp x19,x20,[sp,#8*0] adcs x24,x11,x14 extr x15,x16,x15,#63 ldp x6,x7,[x1,#8*0] adcs x25,x12,x15 extr x16,xzr,x16,#63 ldp x8,x9,[x1,#8*2] adc x26,x13,x16 ldp x10,x11,[x1,#8*4] // Reduce by 512 bits per iteration mul x28,x4,x19 // t[0]*n0 ldp x12,x13,[x1,#8*6] add x3,x1,x5 ldp x21,x22,[sp,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[sp,#8*4] stp x25,x26,[x2,#8*6] ldp x25,x26,[sp,#8*6] add x1,x1,#8*8 mov x30,xzr // initial top-most carry mov x2,sp mov x27,#8 .Lsqr8x_reduction: // (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0) mul x15,x7,x28 sub x27,x27,#1 mul x16,x8,x28 str x28,[x2],#8 // put aside t[0]*n0 for tail processing mul x17,x9,x28 // (*) adds xzr,x19,x14 subs xzr,x19,#1 // (*) mul x14,x10,x28 adcs x19,x20,x15 mul x15,x11,x28 adcs x20,x21,x16 mul x16,x12,x28 adcs x21,x22,x17 mul x17,x13,x28 adcs x22,x23,x14 umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0) adcs x23,x24,x15 umulh x15,x7,x28 adcs x24,x25,x16 umulh x16,x8,x28 adcs x25,x26,x17 umulh x17,x9,x28 adc x26,xzr,xzr adds x19,x19,x14 umulh x14,x10,x28 adcs x20,x20,x15 umulh x15,x11,x28 adcs x21,x21,x16 umulh x16,x12,x28 adcs x22,x22,x17 umulh x17,x13,x28 mul x28,x4,x19 // next t[0]*n0 adcs x23,x23,x14 adcs x24,x24,x15 adcs x25,x25,x16 adc x26,x26,x17 cbnz x27,.Lsqr8x_reduction ldp x14,x15,[x2,#8*0] ldp x16,x17,[x2,#8*2] mov x0,x2 sub x27,x3,x1 // done yet? adds x19,x19,x14 adcs x20,x20,x15 ldp x14,x15,[x2,#8*4] adcs x21,x21,x16 adcs x22,x22,x17 ldp x16,x17,[x2,#8*6] adcs x23,x23,x14 adcs x24,x24,x15 adcs x25,x25,x16 adcs x26,x26,x17 //adc x28,xzr,xzr // moved below cbz x27,.Lsqr8x8_post_condition ldr x4,[x2,#-8*8] ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] ldp x10,x11,[x1,#8*4] mov x27,#-8*8 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 .Lsqr8x_tail: mul x14,x6,x4 adc x28,xzr,xzr // carry bit, modulo-scheduled mul x15,x7,x4 add x27,x27,#8 mul x16,x8,x4 mul x17,x9,x4 adds x19,x19,x14 mul x14,x10,x4 adcs x20,x20,x15 mul x15,x11,x4 adcs x21,x21,x16 mul x16,x12,x4 adcs x22,x22,x17 mul x17,x13,x4 adcs x23,x23,x14 umulh x14,x6,x4 adcs x24,x24,x15 umulh x15,x7,x4 adcs x25,x25,x16 umulh x16,x8,x4 adcs x26,x26,x17 umulh x17,x9,x4 adc x28,x28,xzr str x19,[x2],#8 adds x19,x20,x14 umulh x14,x10,x4 adcs x20,x21,x15 umulh x15,x11,x4 adcs x21,x22,x16 umulh x16,x12,x4 adcs x22,x23,x17 umulh x17,x13,x4 ldr x4,[x0,x27] adcs x23,x24,x14 adcs x24,x25,x15 adcs x25,x26,x16 adcs x26,x28,x17 //adc x28,xzr,xzr // moved above cbnz x27,.Lsqr8x_tail // note that carry flag is guaranteed // to be zero at this point ldp x6,x7,[x2,#8*0] sub x27,x3,x1 // done yet? sub x16,x3,x5 // rewinded np ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] cbz x27,.Lsqr8x_tail_break ldr x4,[x0,#-8*8] adds x19,x19,x6 adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x27,#-8*8 adcs x26,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved above b .Lsqr8x_tail .align 4 .Lsqr8x_tail_break: ldr x4,[x29,#112] // pull n0 add x27,x2,#8*8 // end of current t[num] window subs xzr,x30,#1 // "move" top-most carry to carry bit adcs x14,x19,x6 adcs x15,x20,x7 ldp x19,x20,[x0,#8*0] adcs x21,x21,x8 ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0] adcs x22,x22,x9 ldp x8,x9,[x16,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x16,#8*4] adcs x25,x25,x12 adcs x26,x26,x13 ldp x12,x13,[x16,#8*6] add x1,x16,#8*8 adc x30,xzr,xzr // top-most carry mul x28,x4,x19 stp x14,x15,[x2,#8*0] stp x21,x22,[x2,#8*2] ldp x21,x22,[x0,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[x0,#8*4] cmp x27,x29 // did we hit the bottom? stp x25,x26,[x2,#8*6] mov x2,x0 // slide the window ldp x25,x26,[x0,#8*6] mov x27,#8 b.ne .Lsqr8x_reduction // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. ldr x0,[x29,#96] // pull rp add x2,x2,#8*8 subs x14,x19,x6 sbcs x15,x20,x7 sub x27,x5,#8*8 mov x3,x0 // x0 copy .Lsqr8x_sub: sbcs x16,x21,x8 ldp x6,x7,[x1,#8*0] sbcs x17,x22,x9 stp x14,x15,[x0,#8*0] sbcs x14,x23,x10 ldp x8,x9,[x1,#8*2] sbcs x15,x24,x11 stp x16,x17,[x0,#8*2] sbcs x16,x25,x12 ldp x10,x11,[x1,#8*4] sbcs x17,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 ldp x19,x20,[x2,#8*0] sub x27,x27,#8*8 ldp x21,x22,[x2,#8*2] ldp x23,x24,[x2,#8*4] ldp x25,x26,[x2,#8*6] add x2,x2,#8*8 stp x14,x15,[x0,#8*4] sbcs x14,x19,x6 stp x16,x17,[x0,#8*6] add x0,x0,#8*8 sbcs x15,x20,x7 cbnz x27,.Lsqr8x_sub sbcs x16,x21,x8 mov x2,sp add x1,sp,x5 ldp x6,x7,[x3,#8*0] sbcs x17,x22,x9 stp x14,x15,[x0,#8*0] sbcs x14,x23,x10 ldp x8,x9,[x3,#8*2] sbcs x15,x24,x11 stp x16,x17,[x0,#8*2] sbcs x16,x25,x12 ldp x19,x20,[x1,#8*0] sbcs x17,x26,x13 ldp x21,x22,[x1,#8*2] sbcs xzr,x30,xzr // did it borrow? ldr x30,[x29,#8] // pull return address stp x14,x15,[x0,#8*4] stp x16,x17,[x0,#8*6] sub x27,x5,#8*4 .Lsqr4x_cond_copy: sub x27,x27,#8*4 csel x14,x19,x6,lo stp xzr,xzr,[x2,#8*0] csel x15,x20,x7,lo ldp x6,x7,[x3,#8*4] ldp x19,x20,[x1,#8*4] csel x16,x21,x8,lo stp xzr,xzr,[x2,#8*2] add x2,x2,#8*4 csel x17,x22,x9,lo ldp x8,x9,[x3,#8*6] ldp x21,x22,[x1,#8*6] add x1,x1,#8*4 stp x14,x15,[x3,#8*0] stp x16,x17,[x3,#8*2] add x3,x3,#8*4 stp xzr,xzr,[x1,#8*0] stp xzr,xzr,[x1,#8*2] cbnz x27,.Lsqr4x_cond_copy csel x14,x19,x6,lo stp xzr,xzr,[x2,#8*0] csel x15,x20,x7,lo stp xzr,xzr,[x2,#8*2] csel x16,x21,x8,lo csel x17,x22,x9,lo stp x14,x15,[x3,#8*0] stp x16,x17,[x3,#8*2] b .Lsqr8x_done .align 4 .Lsqr8x8_post_condition: adc x28,xzr,xzr ldr x30,[x29,#8] // pull return address // x19-7,x28 hold result, x6-7 hold modulus subs x6,x19,x6 ldr x1,[x29,#96] // pull rp sbcs x7,x20,x7 stp xzr,xzr,[sp,#8*0] sbcs x8,x21,x8 stp xzr,xzr,[sp,#8*2] sbcs x9,x22,x9 stp xzr,xzr,[sp,#8*4] sbcs x10,x23,x10 stp xzr,xzr,[sp,#8*6] sbcs x11,x24,x11 stp xzr,xzr,[sp,#8*8] sbcs x12,x25,x12 stp xzr,xzr,[sp,#8*10] sbcs x13,x26,x13 stp xzr,xzr,[sp,#8*12] sbcs x28,x28,xzr // did it borrow? stp xzr,xzr,[sp,#8*14] // x6-7 hold result-modulus csel x6,x19,x6,lo csel x7,x20,x7,lo csel x8,x21,x8,lo csel x9,x22,x9,lo stp x6,x7,[x1,#8*0] csel x10,x23,x10,lo csel x11,x24,x11,lo stp x8,x9,[x1,#8*2] csel x12,x25,x12,lo csel x13,x26,x13,lo stp x10,x11,[x1,#8*4] stp x12,x13,[x1,#8*6] .Lsqr8x_done: ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 // x30 is popped earlier AARCH64_VALIDATE_LINK_REGISTER ret .size bn_sqr8x_mont,.-bn_sqr8x_mont .globl bn_mul4x_mont .hidden bn_mul4x_mont .type bn_mul4x_mont,%function .align 5 bn_mul4x_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub x26,sp,x5,lsl#3 lsl x5,x5,#3 ldr x4,[x4] // *n0 sub sp,x26,#8*4 // alloca add x10,x2,x5 add x27,x1,x5 stp x0,x10,[x29,#96] // offload rp and &b[num] ldr x24,[x2,#8*0] // b[0] ldp x6,x7,[x1,#8*0] // a[0..3] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 mov x19,xzr mov x20,xzr mov x21,xzr mov x22,xzr ldp x14,x15,[x3,#8*0] // n[0..3] ldp x16,x17,[x3,#8*2] adds x3,x3,#8*4 // clear carry bit mov x0,xzr mov x28,#0 mov x26,sp .Loop_mul4x_1st_reduction: mul x10,x6,x24 // lo(a[0..3]*b[0]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[0..3]*b[0]) adcs x20,x20,x11 mul x25,x19,x4 // t[0]*n0 adcs x21,x21,x12 umulh x11,x7,x24 adcs x22,x22,x13 umulh x12,x8,x24 adc x23,xzr,xzr umulh x13,x9,x24 ldr x24,[x2,x28] // next b[i] (or b[0]) adds x20,x20,x10 // (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0) str x25,[x26],#8 // put aside t[0]*n0 for tail processing adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 // (*) adds xzr,x19,x10 subs xzr,x19,#1 // (*) umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0) adcs x19,x20,x11 umulh x11,x15,x25 adcs x20,x21,x12 umulh x12,x16,x25 adcs x21,x22,x13 umulh x13,x17,x25 adcs x22,x23,x0 adc x0,xzr,xzr adds x19,x19,x10 sub x10,x27,x1 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr cbnz x28,.Loop_mul4x_1st_reduction cbz x10,.Lmul4x4_post_condition ldp x6,x7,[x1,#8*0] // a[4..7] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 ldr x25,[sp] // a[0]*n0 ldp x14,x15,[x3,#8*0] // n[4..7] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 .Loop_mul4x_1st_tail: mul x10,x6,x24 // lo(a[4..7]*b[i]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[4..7]*b[i]) adcs x20,x20,x11 umulh x11,x7,x24 adcs x21,x21,x12 umulh x12,x8,x24 adcs x22,x22,x13 umulh x13,x9,x24 adc x23,xzr,xzr ldr x24,[x2,x28] // next b[i] (or b[0]) adds x20,x20,x10 mul x10,x14,x25 // lo(n[4..7]*a[0]*n0) adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 adds x19,x19,x10 umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0) adcs x20,x20,x11 umulh x11,x15,x25 adcs x21,x21,x12 umulh x12,x16,x25 adcs x22,x22,x13 adcs x23,x23,x0 umulh x13,x17,x25 adc x0,xzr,xzr ldr x25,[sp,x28] // next t[0]*n0 str x19,[x26],#8 // result!!! adds x19,x20,x10 sub x10,x27,x1 // done yet? adcs x20,x21,x11 adcs x21,x22,x12 adcs x22,x23,x13 //adc x0,x0,xzr cbnz x28,.Loop_mul4x_1st_tail sub x11,x27,x5 // rewinded x1 cbz x10,.Lmul4x_proceed ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 ldp x14,x15,[x3,#8*0] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 b .Loop_mul4x_1st_tail .align 5 .Lmul4x_proceed: ldr x24,[x2,#8*4]! // *++b adc x30,x0,xzr ldp x6,x7,[x11,#8*0] // a[0..3] sub x3,x3,x5 // rewind np ldp x8,x9,[x11,#8*2] add x1,x11,#8*4 stp x19,x20,[x26,#8*0] // result!!! ldp x19,x20,[sp,#8*4] // t[0..3] stp x21,x22,[x26,#8*2] // result!!! ldp x21,x22,[sp,#8*6] ldp x14,x15,[x3,#8*0] // n[0..3] mov x26,sp ldp x16,x17,[x3,#8*2] adds x3,x3,#8*4 // clear carry bit mov x0,xzr .align 4 .Loop_mul4x_reduction: mul x10,x6,x24 // lo(a[0..3]*b[4]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[0..3]*b[4]) adcs x20,x20,x11 mul x25,x19,x4 // t[0]*n0 adcs x21,x21,x12 umulh x11,x7,x24 adcs x22,x22,x13 umulh x12,x8,x24 adc x23,xzr,xzr umulh x13,x9,x24 ldr x24,[x2,x28] // next b[i] adds x20,x20,x10 // (*) mul x10,x14,x25 str x25,[x26],#8 // put aside t[0]*n0 for tail processing adcs x21,x21,x11 mul x11,x15,x25 // lo(n[0..3]*t[0]*n0 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 // (*) adds xzr,x19,x10 subs xzr,x19,#1 // (*) umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0 adcs x19,x20,x11 umulh x11,x15,x25 adcs x20,x21,x12 umulh x12,x16,x25 adcs x21,x22,x13 umulh x13,x17,x25 adcs x22,x23,x0 adc x0,xzr,xzr adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr cbnz x28,.Loop_mul4x_reduction adc x0,x0,xzr ldp x10,x11,[x26,#8*4] // t[4..7] ldp x12,x13,[x26,#8*6] ldp x6,x7,[x1,#8*0] // a[4..7] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr ldr x25,[sp] // t[0]*n0 ldp x14,x15,[x3,#8*0] // n[4..7] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 .align 4 .Loop_mul4x_tail: mul x10,x6,x24 // lo(a[4..7]*b[4]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[4..7]*b[4]) adcs x20,x20,x11 umulh x11,x7,x24 adcs x21,x21,x12 umulh x12,x8,x24 adcs x22,x22,x13 umulh x13,x9,x24 adc x23,xzr,xzr ldr x24,[x2,x28] // next b[i] adds x20,x20,x10 mul x10,x14,x25 // lo(n[4..7]*t[0]*n0) adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 adds x19,x19,x10 umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0) adcs x20,x20,x11 umulh x11,x15,x25 adcs x21,x21,x12 umulh x12,x16,x25 adcs x22,x22,x13 umulh x13,x17,x25 adcs x23,x23,x0 ldr x25,[sp,x28] // next a[0]*n0 adc x0,xzr,xzr str x19,[x26],#8 // result!!! adds x19,x20,x10 sub x10,x27,x1 // done yet? adcs x20,x21,x11 adcs x21,x22,x12 adcs x22,x23,x13 //adc x0,x0,xzr cbnz x28,.Loop_mul4x_tail sub x11,x3,x5 // rewinded np? adc x0,x0,xzr cbz x10,.Loop_mul4x_break ldp x10,x11,[x26,#8*4] ldp x12,x13,[x26,#8*6] ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr ldp x14,x15,[x3,#8*0] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 b .Loop_mul4x_tail .align 4 .Loop_mul4x_break: ldp x12,x13,[x29,#96] // pull rp and &b[num] adds x19,x19,x30 add x2,x2,#8*4 // bp++ adcs x20,x20,xzr sub x1,x1,x5 // rewind ap adcs x21,x21,xzr stp x19,x20,[x26,#8*0] // result!!! adcs x22,x22,xzr ldp x19,x20,[sp,#8*4] // t[0..3] adc x30,x0,xzr stp x21,x22,[x26,#8*2] // result!!! cmp x2,x13 // done yet? ldp x21,x22,[sp,#8*6] ldp x14,x15,[x11,#8*0] // n[0..3] ldp x16,x17,[x11,#8*2] add x3,x11,#8*4 b.eq .Lmul4x_post ldr x24,[x2] ldp x6,x7,[x1,#8*0] // a[0..3] ldp x8,x9,[x1,#8*2] adds x1,x1,#8*4 // clear carry bit mov x0,xzr mov x26,sp b .Loop_mul4x_reduction .align 4 .Lmul4x_post: // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. mov x0,x12 mov x27,x12 // x0 copy subs x10,x19,x14 add x26,sp,#8*8 sbcs x11,x20,x15 sub x28,x5,#8*4 .Lmul4x_sub: sbcs x12,x21,x16 ldp x14,x15,[x3,#8*0] sub x28,x28,#8*4 ldp x19,x20,[x26,#8*0] sbcs x13,x22,x17 ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 ldp x21,x22,[x26,#8*2] add x26,x26,#8*4 stp x10,x11,[x0,#8*0] sbcs x10,x19,x14 stp x12,x13,[x0,#8*2] add x0,x0,#8*4 sbcs x11,x20,x15 cbnz x28,.Lmul4x_sub sbcs x12,x21,x16 mov x26,sp add x1,sp,#8*4 ldp x6,x7,[x27,#8*0] sbcs x13,x22,x17 stp x10,x11,[x0,#8*0] ldp x8,x9,[x27,#8*2] stp x12,x13,[x0,#8*2] ldp x19,x20,[x1,#8*0] ldp x21,x22,[x1,#8*2] sbcs xzr,x30,xzr // did it borrow? ldr x30,[x29,#8] // pull return address sub x28,x5,#8*4 .Lmul4x_cond_copy: sub x28,x28,#8*4 csel x10,x19,x6,lo stp xzr,xzr,[x26,#8*0] csel x11,x20,x7,lo ldp x6,x7,[x27,#8*4] ldp x19,x20,[x1,#8*4] csel x12,x21,x8,lo stp xzr,xzr,[x26,#8*2] add x26,x26,#8*4 csel x13,x22,x9,lo ldp x8,x9,[x27,#8*6] ldp x21,x22,[x1,#8*6] add x1,x1,#8*4 stp x10,x11,[x27,#8*0] stp x12,x13,[x27,#8*2] add x27,x27,#8*4 cbnz x28,.Lmul4x_cond_copy csel x10,x19,x6,lo stp xzr,xzr,[x26,#8*0] csel x11,x20,x7,lo stp xzr,xzr,[x26,#8*2] csel x12,x21,x8,lo stp xzr,xzr,[x26,#8*3] csel x13,x22,x9,lo stp xzr,xzr,[x26,#8*4] stp x10,x11,[x27,#8*0] stp x12,x13,[x27,#8*2] b .Lmul4x_done .align 4 .Lmul4x4_post_condition: adc x0,x0,xzr ldr x1,[x29,#96] // pull rp // x19-3,x0 hold result, x14-7 hold modulus subs x6,x19,x14 ldr x30,[x29,#8] // pull return address sbcs x7,x20,x15 stp xzr,xzr,[sp,#8*0] sbcs x8,x21,x16 stp xzr,xzr,[sp,#8*2] sbcs x9,x22,x17 stp xzr,xzr,[sp,#8*4] sbcs xzr,x0,xzr // did it borrow? stp xzr,xzr,[sp,#8*6] // x6-3 hold result-modulus csel x6,x19,x6,lo csel x7,x20,x7,lo csel x8,x21,x8,lo csel x9,x22,x9,lo stp x6,x7,[x1,#8*0] stp x8,x9,[x1,#8*2] .Lmul4x_done: ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 // x30 is popped earlier AARCH64_VALIDATE_LINK_REGISTER ret .size bn_mul4x_mont,.-bn_mul4x_mont .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 4 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
74,349
thirdparty/crates/ring-0.17.9/pregenerated/chacha20_poly1305_armv8-linux64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include <ring-core/arm_arch.h> .section .rodata .align 7 .Lchacha20_consts: .byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' .Linc: .long 1,2,3,4 .Lrol8: .byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 .Lclamp: .quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC .text .type .Lpoly_hash_ad_internal,%function .align 6 .Lpoly_hash_ad_internal: .cfi_startproc cbnz x4, .Lpoly_hash_intro ret .Lpoly_hash_intro: cmp x4, #16 b.lt .Lpoly_hash_ad_tail ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #16 b .Lpoly_hash_ad_internal .Lpoly_hash_ad_tail: cbz x4, .Lpoly_hash_ad_ret eor v20.16b, v20.16b, v20.16b // Use T0 to load the AAD sub x4, x4, #1 .Lpoly_hash_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, x4] mov v20.b[0], w11 subs x4, x4, #1 b.ge .Lpoly_hash_tail_16_compose mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most .Lpoly_hash_ad_ret: ret .cfi_endproc .size .Lpoly_hash_ad_internal, .-.Lpoly_hash_ad_internal ///////////////////////////////// // // void chacha20_poly1305_seal(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *seal_data); // .globl chacha20_poly1305_seal .hidden chacha20_poly1305_seal .type chacha20_poly1305_seal,%function .align 6 chacha20_poly1305_seal: AARCH64_SIGN_LINK_REGISTER .cfi_startproc stp x29, x30, [sp, #-80]! .cfi_def_cfa_offset 80 .cfi_offset w30, -72 .cfi_offset w29, -80 mov x29, sp // We probably could do .cfi_def_cfa w29, 80 at this point, but since // we don't actually use the frame pointer like that, it's probably not // worth bothering. stp d8, d9, [sp, #16] stp d10, d11, [sp, #32] stp d12, d13, [sp, #48] stp d14, d15, [sp, #64] .cfi_offset b15, -8 .cfi_offset b14, -16 .cfi_offset b13, -24 .cfi_offset b12, -32 .cfi_offset b11, -40 .cfi_offset b10, -48 .cfi_offset b9, -56 .cfi_offset b8, -64 adrp x11, .Lchacha20_consts add x11, x11, :lo12:.Lchacha20_consts ld1 {v24.16b - v27.16b}, [x11] // .Load the CONSTS, INC, ROL8 and CLAMP values ld1 {v28.16b - v30.16b}, [x5] mov x15, #1 // Prepare the Poly1305 state mov x8, #0 mov x9, #0 mov x10, #0 ldr x12, [x5, #56] // The total cipher text length includes extra_in_len add x12, x12, x2 mov v31.d[0], x4 // Store the input and aad lengths mov v31.d[1], x12 cmp x2, #128 b.le .Lseal_128 // Optimization for smaller buffers // Initially we prepare 5 ChaCha20 blocks. Four to encrypt up to 4 blocks (256 bytes) of plaintext, // and one for the Poly1305 R and S keys. The first four blocks (A0-A3..D0-D3) are computed vertically, // the fifth block (A4-D4) horizontally. ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b sub x5, x5, #32 mov x6, #10 .align 5 .Lseal_init_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x6, x6, #1 b.hi .Lseal_init_rounds add v15.4s, v15.4s, v25.4s mov x11, #4 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s and v4.16b, v4.16b, v27.16b add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s mov x16, v4.d[0] // Move the R key to GPRs mov x17, v4.d[1] mov v27.16b, v9.16b // Store the S key bl .Lpoly_hash_ad_internal mov x3, x0 cmp x2, #256 b.le .Lseal_tail ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #256 mov x6, #4 // In the first run of the loop we need to hash 256 bytes, therefore we hash one block for the first 4 rounds mov x7, #6 // and two blocks for the remaining 6, for a total of (1 * 4 + 2 * 6) * 16 = 256 .Lseal_main_loop: adrp x11, .Lchacha20_consts add x11, x11, :lo12:.Lchacha20_consts ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s sub x5, x5, #32 .align 5 .Lseal_main_loop_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x6, x6, #1 b.ge .Lseal_main_loop_rounds ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most subs x7, x7, #1 b.gt .Lseal_main_loop_rounds eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s add v15.4s, v15.4s, v25.4s mov x11, #5 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s add v14.4s, v14.4s, v29.4s add v19.4s, v19.4s, v30.4s cmp x2, #320 b.le .Lseal_tail ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v9.16b eor v22.16b, v22.16b, v14.16b eor v23.16b, v23.16b, v19.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #320 mov x6, #0 mov x7, #10 // For the remainder of the loop we always hash and encrypt 320 bytes per iteration b .Lseal_main_loop .Lseal_tail: // This part of the function handles the storage and authentication of the last [0,320) bytes // We assume A0-A4 ... D0-D4 hold at least inl (320 max) bytes of the stream data. cmp x2, #64 b.lt .Lseal_tail_64 // Store and authenticate 64B blocks per iteration ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v22.d[0] mov x12, v22.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v23.d[0] mov x12, v23.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 // Shift the state left by 64 bytes for the next iteration of the loop mov v0.16b, v1.16b mov v5.16b, v6.16b mov v10.16b, v11.16b mov v15.16b, v16.16b mov v1.16b, v2.16b mov v6.16b, v7.16b mov v11.16b, v12.16b mov v16.16b, v17.16b mov v2.16b, v3.16b mov v7.16b, v8.16b mov v12.16b, v13.16b mov v17.16b, v18.16b mov v3.16b, v4.16b mov v8.16b, v9.16b mov v13.16b, v14.16b mov v18.16b, v19.16b b .Lseal_tail .Lseal_tail_64: ldp x3, x4, [x5, #48] // extra_in_len and extra_in_ptr // Here we handle the last [0,64) bytes of plaintext cmp x2, #16 b.lt .Lseal_tail_16 // Each iteration encrypt and authenticate a 16B block ld1 {v20.16b}, [x1], #16 eor v20.16b, v20.16b, v0.16b mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most st1 {v20.16b}, [x0], #16 sub x2, x2, #16 // Shift the state left by 16 bytes for the next iteration of the loop mov v0.16b, v5.16b mov v5.16b, v10.16b mov v10.16b, v15.16b b .Lseal_tail_64 .Lseal_tail_16: // Here we handle the last [0,16) bytes of ciphertext that require a padded block cbz x2, .Lseal_hash_extra eor v20.16b, v20.16b, v20.16b // Use T0 to load the plaintext/extra in eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask that will only mask the ciphertext bytes not v22.16b, v20.16b mov x6, x2 add x1, x1, x2 cbz x4, .Lseal_tail_16_compose // No extra data to pad with, zero padding mov x7, #16 // We need to load some extra_in first for padding sub x7, x7, x2 cmp x4, x7 csel x7, x4, x7, lt // .Load the minimum of extra_in_len and the amount needed to fill the register mov x12, x7 add x3, x3, x7 sub x4, x4, x7 .Lseal_tail16_compose_extra_in: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, #-1]! mov v20.b[0], w11 subs x7, x7, #1 b.gt .Lseal_tail16_compose_extra_in add x3, x3, x12 .Lseal_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x1, #-1]! mov v20.b[0], w11 ext v21.16b, v22.16b, v21.16b, #15 subs x2, x2, #1 b.gt .Lseal_tail_16_compose and v0.16b, v0.16b, v21.16b eor v20.16b, v20.16b, v0.16b mov v21.16b, v20.16b .Lseal_tail_16_store: umov w11, v20.b[0] strb w11, [x0], #1 ext v20.16b, v20.16b, v20.16b, #1 subs x6, x6, #1 b.gt .Lseal_tail_16_store // Hash in the final ct block concatenated with extra_in mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most .Lseal_hash_extra: cbz x4, .Lseal_finalize .Lseal_hash_extra_loop: cmp x4, #16 b.lt .Lseal_hash_extra_tail ld1 {v20.16b}, [x3], #16 mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #16 b .Lseal_hash_extra_loop .Lseal_hash_extra_tail: cbz x4, .Lseal_finalize eor v20.16b, v20.16b, v20.16b // Use T0 to load the remaining extra ciphertext add x3, x3, x4 .Lseal_hash_extra_load: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, #-1]! mov v20.b[0], w11 subs x4, x4, #1 b.gt .Lseal_hash_extra_load // Hash in the final padded extra_in blcok mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most .Lseal_finalize: mov x11, v31.d[0] mov x12, v31.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most // Final reduction step sub x12, xzr, x15 orr x13, xzr, #3 subs x11, x8, #-5 sbcs x12, x9, x12 sbcs x13, x10, x13 csel x8, x11, x8, cs csel x9, x12, x9, cs csel x10, x13, x10, cs mov x11, v27.d[0] mov x12, v27.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 stp x8, x9, [x5] ldp d8, d9, [sp, #16] ldp d10, d11, [sp, #32] ldp d12, d13, [sp, #48] ldp d14, d15, [sp, #64] .cfi_restore b15 .cfi_restore b14 .cfi_restore b13 .cfi_restore b12 .cfi_restore b11 .cfi_restore b10 .cfi_restore b9 .cfi_restore b8 ldp x29, x30, [sp], 80 .cfi_restore w29 .cfi_restore w30 .cfi_def_cfa_offset 0 AARCH64_VALIDATE_LINK_REGISTER ret .Lseal_128: // On some architectures preparing 5 blocks for small buffers is wasteful eor v25.16b, v25.16b, v25.16b mov x11, #1 mov v25.s[0], w11 mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v17.16b, v30.16b add v15.4s, v17.4s, v25.4s add v16.4s, v15.4s, v25.4s mov x6, #10 .Lseal_128_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x6, x6, #1 b.hi .Lseal_128_rounds add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s // Only the first 32 bytes of the third block (counter = 0) are needed, // so skip updating v12 and v17. add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v30.4s, v30.4s, v25.4s add v15.4s, v15.4s, v30.4s add v30.4s, v30.4s, v25.4s add v16.4s, v16.4s, v30.4s and v2.16b, v2.16b, v27.16b mov x16, v2.d[0] // Move the R key to GPRs mov x17, v2.d[1] mov v27.16b, v7.16b // Store the S key bl .Lpoly_hash_ad_internal b .Lseal_tail .cfi_endproc .size chacha20_poly1305_seal,.-chacha20_poly1305_seal ///////////////////////////////// // // void chacha20_poly1305_open(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *aead_data); // .globl chacha20_poly1305_open .hidden chacha20_poly1305_open .type chacha20_poly1305_open,%function .align 6 chacha20_poly1305_open: AARCH64_SIGN_LINK_REGISTER .cfi_startproc stp x29, x30, [sp, #-80]! .cfi_def_cfa_offset 80 .cfi_offset w30, -72 .cfi_offset w29, -80 mov x29, sp // We probably could do .cfi_def_cfa w29, 80 at this point, but since // we don't actually use the frame pointer like that, it's probably not // worth bothering. stp d8, d9, [sp, #16] stp d10, d11, [sp, #32] stp d12, d13, [sp, #48] stp d14, d15, [sp, #64] .cfi_offset b15, -8 .cfi_offset b14, -16 .cfi_offset b13, -24 .cfi_offset b12, -32 .cfi_offset b11, -40 .cfi_offset b10, -48 .cfi_offset b9, -56 .cfi_offset b8, -64 adrp x11, .Lchacha20_consts add x11, x11, :lo12:.Lchacha20_consts ld1 {v24.16b - v27.16b}, [x11] // .Load the CONSTS, INC, ROL8 and CLAMP values ld1 {v28.16b - v30.16b}, [x5] mov x15, #1 // Prepare the Poly1305 state mov x8, #0 mov x9, #0 mov x10, #0 mov v31.d[0], x4 // Store the input and aad lengths mov v31.d[1], x2 cmp x2, #128 b.le .Lopen_128 // Optimization for smaller buffers // Initially we prepare a single ChaCha20 block for the Poly1305 R and S keys mov v0.16b, v24.16b mov v5.16b, v28.16b mov v10.16b, v29.16b mov v15.16b, v30.16b mov x6, #10 .align 5 .Lopen_init_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 subs x6, x6, #1 b.hi .Lopen_init_rounds add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s and v0.16b, v0.16b, v27.16b mov x16, v0.d[0] // Move the R key to GPRs mov x17, v0.d[1] mov v27.16b, v5.16b // Store the S key bl .Lpoly_hash_ad_internal .Lopen_ad_done: mov x3, x1 // Each iteration of the loop hash 320 bytes, and prepare stream for 320 bytes .Lopen_main_loop: cmp x2, #192 b.lt .Lopen_tail adrp x11, .Lchacha20_consts add x11, x11, :lo12:.Lchacha20_consts ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] sub x5, x5, #32 add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s lsr x4, x2, #4 // How many whole blocks we have to hash, will always be at least 12 sub x4, x4, #10 mov x7, #10 subs x6, x7, x4 subs x6, x7, x4 // itr1 can be negative if we have more than 320 bytes to hash csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are full cbz x7, .Lopen_main_loop_rounds_short .align 5 .Lopen_main_loop_rounds: ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most .Lopen_main_loop_rounds_short: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x7, x7, #1 b.gt .Lopen_main_loop_rounds subs x6, x6, #1 b.ge .Lopen_main_loop_rounds_short eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s add v15.4s, v15.4s, v25.4s mov x11, #5 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s add v14.4s, v14.4s, v29.4s add v19.4s, v19.4s, v30.4s // We can always safely store 192 bytes ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #192 mov v0.16b, v3.16b mov v5.16b, v8.16b mov v10.16b, v13.16b mov v15.16b, v18.16b cmp x2, #64 b.lt .Lopen_tail_64_store ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 mov v0.16b, v4.16b mov v5.16b, v9.16b mov v10.16b, v14.16b mov v15.16b, v19.16b cmp x2, #64 b.lt .Lopen_tail_64_store ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v9.16b eor v22.16b, v22.16b, v14.16b eor v23.16b, v23.16b, v19.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 b .Lopen_main_loop .Lopen_tail: cbz x2, .Lopen_finalize lsr x4, x2, #4 // How many whole blocks we have to hash cmp x2, #64 b.le .Lopen_tail_64 cmp x2, #128 b.le .Lopen_tail_128 .Lopen_tail_192: // We need three more blocks mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v15.16b, v30.16b mov v16.16b, v30.16b mov v17.16b, v30.16b eor v23.16b, v23.16b, v23.16b eor v21.16b, v21.16b, v21.16b ins v23.s[0], v25.s[0] ins v21.d[0], x15 add v22.4s, v23.4s, v21.4s add v21.4s, v22.4s, v21.4s add v15.4s, v15.4s, v21.4s add v16.4s, v16.4s, v23.4s add v17.4s, v17.4s, v22.4s mov x7, #10 subs x6, x7, x4 // itr1 can be negative if we have more than 160 bytes to hash csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are hashing sub x4, x4, x7 cbz x7, .Lopen_tail_192_rounds_no_hash .Lopen_tail_192_rounds: ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most .Lopen_tail_192_rounds_no_hash: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x7, x7, #1 b.gt .Lopen_tail_192_rounds subs x6, x6, #1 b.ge .Lopen_tail_192_rounds_no_hash // We hashed 160 bytes at most, may still have 32 bytes left .Lopen_tail_192_hash: cbz x4, .Lopen_tail_192_hash_done ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #1 b .Lopen_tail_192_hash .Lopen_tail_192_hash_done: add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v12.4s, v12.4s, v29.4s add v15.4s, v15.4s, v30.4s add v16.4s, v16.4s, v30.4s add v17.4s, v17.4s, v30.4s add v15.4s, v15.4s, v21.4s add v16.4s, v16.4s, v23.4s add v17.4s, v17.4s, v22.4s ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #128 b .Lopen_tail_64_store .Lopen_tail_128: // We need two more blocks mov v0.16b, v24.16b mov v1.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v15.16b, v30.16b mov v16.16b, v30.16b eor v23.16b, v23.16b, v23.16b eor v22.16b, v22.16b, v22.16b ins v23.s[0], v25.s[0] ins v22.d[0], x15 add v22.4s, v22.4s, v23.4s add v15.4s, v15.4s, v22.4s add v16.4s, v16.4s, v23.4s mov x6, #10 sub x6, x6, x4 .Lopen_tail_128_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v1.4s, v1.4s, v6.4s eor v16.16b, v16.16b, v1.16b rev32 v16.8h, v16.8h add v11.4s, v11.4s, v16.4s eor v6.16b, v6.16b, v11.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 add v1.4s, v1.4s, v20.4s eor v16.16b, v16.16b, v1.16b tbl v16.16b, {v16.16b}, v26.16b add v11.4s, v11.4s, v16.4s eor v20.16b, v20.16b, v11.16b ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v6.16b, v6.16b, v6.16b, #4 ext v11.16b, v11.16b, v11.16b, #8 ext v16.16b, v16.16b, v16.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 add v1.4s, v1.4s, v6.4s eor v16.16b, v16.16b, v1.16b rev32 v16.8h, v16.8h add v11.4s, v11.4s, v16.4s eor v6.16b, v6.16b, v11.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 add v1.4s, v1.4s, v20.4s eor v16.16b, v16.16b, v1.16b tbl v16.16b, {v16.16b}, v26.16b add v11.4s, v11.4s, v16.4s eor v20.16b, v20.16b, v11.16b ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v6.16b, v6.16b, v6.16b, #12 ext v11.16b, v11.16b, v11.16b, #8 ext v16.16b, v16.16b, v16.16b, #4 subs x6, x6, #1 b.gt .Lopen_tail_128_rounds cbz x4, .Lopen_tail_128_rounds_done subs x4, x4, #1 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most b .Lopen_tail_128_rounds .Lopen_tail_128_rounds_done: add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v15.4s, v15.4s, v30.4s add v16.4s, v16.4s, v30.4s add v15.4s, v15.4s, v22.4s add v16.4s, v16.4s, v23.4s ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 b .Lopen_tail_64_store .Lopen_tail_64: // We just need a single block mov v0.16b, v24.16b mov v5.16b, v28.16b mov v10.16b, v29.16b mov v15.16b, v30.16b eor v23.16b, v23.16b, v23.16b ins v23.s[0], v25.s[0] add v15.4s, v15.4s, v23.4s mov x6, #10 sub x6, x6, x4 .Lopen_tail_64_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 subs x6, x6, #1 b.gt .Lopen_tail_64_rounds cbz x4, .Lopen_tail_64_rounds_done subs x4, x4, #1 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most b .Lopen_tail_64_rounds .Lopen_tail_64_rounds_done: add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v15.4s, v15.4s, v23.4s .Lopen_tail_64_store: cmp x2, #16 b.lt .Lopen_tail_16 ld1 {v20.16b}, [x1], #16 eor v20.16b, v20.16b, v0.16b st1 {v20.16b}, [x0], #16 mov v0.16b, v5.16b mov v5.16b, v10.16b mov v10.16b, v15.16b sub x2, x2, #16 b .Lopen_tail_64_store .Lopen_tail_16: // Here we handle the last [0,16) bytes that require a padded block cbz x2, .Lopen_finalize eor v20.16b, v20.16b, v20.16b // Use T0 to load the ciphertext eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask not v22.16b, v20.16b add x7, x1, x2 mov x6, x2 .Lopen_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x7, #-1]! mov v20.b[0], w11 ext v21.16b, v22.16b, v21.16b, #15 subs x2, x2, #1 b.gt .Lopen_tail_16_compose and v20.16b, v20.16b, v21.16b // Hash in the final padded block mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most eor v20.16b, v20.16b, v0.16b .Lopen_tail_16_store: umov w11, v20.b[0] strb w11, [x0], #1 ext v20.16b, v20.16b, v20.16b, #1 subs x6, x6, #1 b.gt .Lopen_tail_16_store .Lopen_finalize: mov x11, v31.d[0] mov x12, v31.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most // Final reduction step sub x12, xzr, x15 orr x13, xzr, #3 subs x11, x8, #-5 sbcs x12, x9, x12 sbcs x13, x10, x13 csel x8, x11, x8, cs csel x9, x12, x9, cs csel x10, x13, x10, cs mov x11, v27.d[0] mov x12, v27.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 stp x8, x9, [x5] ldp d8, d9, [sp, #16] ldp d10, d11, [sp, #32] ldp d12, d13, [sp, #48] ldp d14, d15, [sp, #64] .cfi_restore b15 .cfi_restore b14 .cfi_restore b13 .cfi_restore b12 .cfi_restore b11 .cfi_restore b10 .cfi_restore b9 .cfi_restore b8 ldp x29, x30, [sp], 80 .cfi_restore w29 .cfi_restore w30 .cfi_def_cfa_offset 0 AARCH64_VALIDATE_LINK_REGISTER ret .Lopen_128: // On some architectures preparing 5 blocks for small buffers is wasteful eor v25.16b, v25.16b, v25.16b mov x11, #1 mov v25.s[0], w11 mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v17.16b, v30.16b add v15.4s, v17.4s, v25.4s add v16.4s, v15.4s, v25.4s mov x6, #10 .Lopen_128_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x6, x6, #1 b.hi .Lopen_128_rounds add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v30.4s, v30.4s, v25.4s add v15.4s, v15.4s, v30.4s add v30.4s, v30.4s, v25.4s add v16.4s, v16.4s, v30.4s and v2.16b, v2.16b, v27.16b mov x16, v2.d[0] // Move the R key to GPRs mov x17, v2.d[1] mov v27.16b, v7.16b // Store the S key bl .Lpoly_hash_ad_internal .Lopen_128_store: cmp x2, #64 b.lt .Lopen_128_store_64 ld1 {v20.16b - v23.16b}, [x1], #64 mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v22.d[0] mov x12, v22.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v23.d[0] mov x12, v23.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 mov v0.16b, v1.16b mov v5.16b, v6.16b mov v10.16b, v11.16b mov v15.16b, v16.16b .Lopen_128_store_64: lsr x4, x2, #4 mov x3, x1 .Lopen_128_hash_64: cbz x4, .Lopen_tail_64_store ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #1 b .Lopen_128_hash_64 .cfi_endproc .size chacha20_poly1305_open,.-chacha20_poly1305_open #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
15,121
thirdparty/crates/ring-0.17.9/pregenerated/aesni-x86-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text #ifdef BORINGSSL_DISPATCH_TEST #endif .hidden _aesni_encrypt2 .type _aesni_encrypt2,@function .align 16 _aesni_encrypt2: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx .L000enc2_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%edx,%ecx,1),%xmm0 jnz .L000enc2_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,221,208 .byte 102,15,56,221,216 ret .size _aesni_encrypt2,.-_aesni_encrypt2 .hidden _aesni_encrypt3 .type _aesni_encrypt3,@function .align 16 _aesni_encrypt3: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx addl $16,%ecx .L001enc3_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 movups -16(%edx,%ecx,1),%xmm0 jnz .L001enc3_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 ret .size _aesni_encrypt3,.-_aesni_encrypt3 .hidden _aesni_encrypt4 .type _aesni_encrypt4,@function .align 16 _aesni_encrypt4: movups (%edx),%xmm0 movups 16(%edx),%xmm1 shll $4,%ecx xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 movups 32(%edx),%xmm0 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 15,31,64,0 addl $16,%ecx .L002enc4_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movups -16(%edx,%ecx,1),%xmm0 jnz .L002enc4_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 ret .size _aesni_encrypt4,.-_aesni_encrypt4 .hidden _aesni_encrypt6 .type _aesni_encrypt6,@function .align 16 _aesni_encrypt6: movups (%edx),%xmm0 shll $4,%ecx movups 16(%edx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,220,209 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,220,217 leal 32(%edx,%ecx,1),%edx negl %ecx .byte 102,15,56,220,225 pxor %xmm0,%xmm7 movups (%edx,%ecx,1),%xmm0 addl $16,%ecx jmp .L003_aesni_encrypt6_inner .align 16 .L004enc6_loop: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .L003_aesni_encrypt6_inner: .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .L_aesni_encrypt6_enter: movups (%edx,%ecx,1),%xmm1 addl $32,%ecx .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 movups -16(%edx,%ecx,1),%xmm0 jnz .L004enc6_loop .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 .byte 102,15,56,221,240 .byte 102,15,56,221,248 ret .size _aesni_encrypt6,.-_aesni_encrypt6 .globl aes_hw_ctr32_encrypt_blocks .hidden aes_hw_ctr32_encrypt_blocks .type aes_hw_ctr32_encrypt_blocks,@function .align 16 aes_hw_ctr32_encrypt_blocks: .L_aes_hw_ctr32_encrypt_blocks_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L005pic_for_function_hit .L005pic_for_function_hit: popl %ebx leal BORINGSSL_function_hit+0-.L005pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 20(%esp),%esi movl 24(%esp),%edi movl 28(%esp),%eax movl 32(%esp),%edx movl 36(%esp),%ebx movl %esp,%ebp subl $88,%esp andl $-16,%esp movl %ebp,80(%esp) cmpl $1,%eax je .L006ctr32_one_shortcut movdqu (%ebx),%xmm7 movl $202182159,(%esp) movl $134810123,4(%esp) movl $67438087,8(%esp) movl $66051,12(%esp) movl $6,%ecx xorl %ebp,%ebp movl %ecx,16(%esp) movl %ecx,20(%esp) movl %ecx,24(%esp) movl %ebp,28(%esp) .byte 102,15,58,22,251,3 .byte 102,15,58,34,253,3 movl 240(%edx),%ecx bswap %ebx pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 movdqa (%esp),%xmm2 .byte 102,15,58,34,195,0 leal 3(%ebx),%ebp .byte 102,15,58,34,205,0 incl %ebx .byte 102,15,58,34,195,1 incl %ebp .byte 102,15,58,34,205,1 incl %ebx .byte 102,15,58,34,195,2 incl %ebp .byte 102,15,58,34,205,2 movdqa %xmm0,48(%esp) .byte 102,15,56,0,194 movdqu (%edx),%xmm6 movdqa %xmm1,64(%esp) .byte 102,15,56,0,202 pshufd $192,%xmm0,%xmm2 pshufd $128,%xmm0,%xmm3 cmpl $6,%eax jb .L007ctr32_tail pxor %xmm6,%xmm7 shll $4,%ecx movl $16,%ebx movdqa %xmm7,32(%esp) movl %edx,%ebp subl %ecx,%ebx leal 32(%edx,%ecx,1),%edx subl $6,%eax jmp .L008ctr32_loop6 .align 16 .L008ctr32_loop6: pshufd $64,%xmm0,%xmm4 movdqa 32(%esp),%xmm0 pshufd $192,%xmm1,%xmm5 pxor %xmm0,%xmm2 pshufd $128,%xmm1,%xmm6 pxor %xmm0,%xmm3 pshufd $64,%xmm1,%xmm7 movups 16(%ebp),%xmm1 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 .byte 102,15,56,220,209 pxor %xmm0,%xmm6 pxor %xmm0,%xmm7 .byte 102,15,56,220,217 movups 32(%ebp),%xmm0 movl %ebx,%ecx .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 call .L_aesni_encrypt6_enter movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps %xmm1,%xmm2 movups 32(%esi),%xmm1 xorps %xmm0,%xmm3 movups %xmm2,(%edi) movdqa 16(%esp),%xmm0 xorps %xmm1,%xmm4 movdqa 64(%esp),%xmm1 movups %xmm3,16(%edi) movups %xmm4,32(%edi) paddd %xmm0,%xmm1 paddd 48(%esp),%xmm0 movdqa (%esp),%xmm2 movups 48(%esi),%xmm3 movups 64(%esi),%xmm4 xorps %xmm3,%xmm5 movups 80(%esi),%xmm3 leal 96(%esi),%esi movdqa %xmm0,48(%esp) .byte 102,15,56,0,194 xorps %xmm4,%xmm6 movups %xmm5,48(%edi) xorps %xmm3,%xmm7 movdqa %xmm1,64(%esp) .byte 102,15,56,0,202 movups %xmm6,64(%edi) pshufd $192,%xmm0,%xmm2 movups %xmm7,80(%edi) leal 96(%edi),%edi pshufd $128,%xmm0,%xmm3 subl $6,%eax jnc .L008ctr32_loop6 addl $6,%eax jz .L009ctr32_ret movdqu (%ebp),%xmm7 movl %ebp,%edx pxor 32(%esp),%xmm7 movl 240(%ebp),%ecx .L007ctr32_tail: por %xmm7,%xmm2 cmpl $2,%eax jb .L010ctr32_one pshufd $64,%xmm0,%xmm4 por %xmm7,%xmm3 je .L011ctr32_two pshufd $192,%xmm1,%xmm5 por %xmm7,%xmm4 cmpl $4,%eax jb .L012ctr32_three pshufd $128,%xmm1,%xmm6 por %xmm7,%xmm5 je .L013ctr32_four por %xmm7,%xmm6 call _aesni_encrypt6 movups (%esi),%xmm1 movups 16(%esi),%xmm0 xorps %xmm1,%xmm2 movups 32(%esi),%xmm1 xorps %xmm0,%xmm3 movups 48(%esi),%xmm0 xorps %xmm1,%xmm4 movups 64(%esi),%xmm1 xorps %xmm0,%xmm5 movups %xmm2,(%edi) xorps %xmm1,%xmm6 movups %xmm3,16(%edi) movups %xmm4,32(%edi) movups %xmm5,48(%edi) movups %xmm6,64(%edi) jmp .L009ctr32_ret .align 16 .L006ctr32_one_shortcut: movups (%ebx),%xmm2 movl 240(%edx),%ecx .L010ctr32_one: movups (%edx),%xmm0 movups 16(%edx),%xmm1 leal 32(%edx),%edx xorps %xmm0,%xmm2 .L014enc1_loop_1: .byte 102,15,56,220,209 decl %ecx movups (%edx),%xmm1 leal 16(%edx),%edx jnz .L014enc1_loop_1 .byte 102,15,56,221,209 movups (%esi),%xmm6 xorps %xmm2,%xmm6 movups %xmm6,(%edi) jmp .L009ctr32_ret .align 16 .L011ctr32_two: call _aesni_encrypt2 movups (%esi),%xmm5 movups 16(%esi),%xmm6 xorps %xmm5,%xmm2 xorps %xmm6,%xmm3 movups %xmm2,(%edi) movups %xmm3,16(%edi) jmp .L009ctr32_ret .align 16 .L012ctr32_three: call _aesni_encrypt3 movups (%esi),%xmm5 movups 16(%esi),%xmm6 xorps %xmm5,%xmm2 movups 32(%esi),%xmm7 xorps %xmm6,%xmm3 movups %xmm2,(%edi) xorps %xmm7,%xmm4 movups %xmm3,16(%edi) movups %xmm4,32(%edi) jmp .L009ctr32_ret .align 16 .L013ctr32_four: call _aesni_encrypt4 movups (%esi),%xmm6 movups 16(%esi),%xmm7 movups 32(%esi),%xmm1 xorps %xmm6,%xmm2 movups 48(%esi),%xmm0 xorps %xmm7,%xmm3 movups %xmm2,(%edi) xorps %xmm1,%xmm4 movups %xmm3,16(%edi) xorps %xmm0,%xmm5 movups %xmm4,32(%edi) movups %xmm5,48(%edi) .L009ctr32_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 movdqa %xmm0,32(%esp) pxor %xmm5,%xmm5 movdqa %xmm0,48(%esp) pxor %xmm6,%xmm6 movdqa %xmm0,64(%esp) pxor %xmm7,%xmm7 movl 80(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size aes_hw_ctr32_encrypt_blocks,.-.L_aes_hw_ctr32_encrypt_blocks_begin .globl aes_hw_set_encrypt_key_base .hidden aes_hw_set_encrypt_key_base .type aes_hw_set_encrypt_key_base,@function .align 16 aes_hw_set_encrypt_key_base: .L_aes_hw_set_encrypt_key_base_begin: #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L015pic_for_function_hit .L015pic_for_function_hit: popl %ebx leal BORINGSSL_function_hit+3-.L015pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 4(%esp),%eax movl 8(%esp),%ecx movl 12(%esp),%edx pushl %ebx call .L016pic .L016pic: popl %ebx leal .Lkey_const-.L016pic(%ebx),%ebx movups (%eax),%xmm0 xorps %xmm4,%xmm4 leal 16(%edx),%edx cmpl $256,%ecx je .L01714rounds cmpl $128,%ecx jne .L018bad_keybits .align 16 .L01910rounds: movl $9,%ecx movups %xmm0,-16(%edx) .byte 102,15,58,223,200,1 call .L020key_128_cold .byte 102,15,58,223,200,2 call .L021key_128 .byte 102,15,58,223,200,4 call .L021key_128 .byte 102,15,58,223,200,8 call .L021key_128 .byte 102,15,58,223,200,16 call .L021key_128 .byte 102,15,58,223,200,32 call .L021key_128 .byte 102,15,58,223,200,64 call .L021key_128 .byte 102,15,58,223,200,128 call .L021key_128 .byte 102,15,58,223,200,27 call .L021key_128 .byte 102,15,58,223,200,54 call .L021key_128 movups %xmm0,(%edx) movl %ecx,80(%edx) jmp .L022good_key .align 16 .L021key_128: movups %xmm0,(%edx) leal 16(%edx),%edx .L020key_128_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .align 16 .L01714rounds: movups 16(%eax),%xmm2 leal 16(%edx),%edx movl $13,%ecx movups %xmm0,-32(%edx) movups %xmm2,-16(%edx) .byte 102,15,58,223,202,1 call .L023key_256a_cold .byte 102,15,58,223,200,1 call .L024key_256b .byte 102,15,58,223,202,2 call .L025key_256a .byte 102,15,58,223,200,2 call .L024key_256b .byte 102,15,58,223,202,4 call .L025key_256a .byte 102,15,58,223,200,4 call .L024key_256b .byte 102,15,58,223,202,8 call .L025key_256a .byte 102,15,58,223,200,8 call .L024key_256b .byte 102,15,58,223,202,16 call .L025key_256a .byte 102,15,58,223,200,16 call .L024key_256b .byte 102,15,58,223,202,32 call .L025key_256a .byte 102,15,58,223,200,32 call .L024key_256b .byte 102,15,58,223,202,64 call .L025key_256a movups %xmm0,(%edx) movl %ecx,16(%edx) xorl %eax,%eax jmp .L022good_key .align 16 .L025key_256a: movups %xmm2,(%edx) leal 16(%edx),%edx .L023key_256a_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .align 16 .L024key_256b: movups %xmm0,(%edx) leal 16(%edx),%edx shufps $16,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $140,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 ret .L022good_key: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 xorl %eax,%eax popl %ebx ret .align 4 .L018bad_keybits: pxor %xmm0,%xmm0 movl $-2,%eax popl %ebx ret .size aes_hw_set_encrypt_key_base,.-.L_aes_hw_set_encrypt_key_base_begin .globl aes_hw_set_encrypt_key_alt .hidden aes_hw_set_encrypt_key_alt .type aes_hw_set_encrypt_key_alt,@function .align 16 aes_hw_set_encrypt_key_alt: .L_aes_hw_set_encrypt_key_alt_begin: #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L026pic_for_function_hit .L026pic_for_function_hit: popl %ebx leal BORINGSSL_function_hit+3-.L026pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 4(%esp),%eax movl 8(%esp),%ecx movl 12(%esp),%edx pushl %ebx call .L027pic .L027pic: popl %ebx leal .Lkey_const-.L027pic(%ebx),%ebx movups (%eax),%xmm0 xorps %xmm4,%xmm4 leal 16(%edx),%edx cmpl $256,%ecx je .L02814rounds_alt cmpl $128,%ecx jne .L029bad_keybits .align 16 .L03010rounds_alt: movdqa (%ebx),%xmm5 movl $8,%ecx movdqa 32(%ebx),%xmm4 movdqa %xmm0,%xmm2 movdqu %xmm0,-16(%edx) .L031loop_key128: .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 leal 16(%edx),%edx movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,-16(%edx) movdqa %xmm0,%xmm2 decl %ecx jnz .L031loop_key128 movdqa 48(%ebx),%xmm4 .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,(%edx) movdqa %xmm0,%xmm2 .byte 102,15,56,0,197 .byte 102,15,56,221,196 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,16(%edx) movl $9,%ecx movl %ecx,96(%edx) jmp .L032good_key .align 16 .L02814rounds_alt: movups 16(%eax),%xmm2 leal 16(%edx),%edx movdqa (%ebx),%xmm5 movdqa 32(%ebx),%xmm4 movl $7,%ecx movdqu %xmm0,-32(%edx) movdqa %xmm2,%xmm1 movdqu %xmm2,-16(%edx) .L033loop_key256: .byte 102,15,56,0,213 .byte 102,15,56,221,212 movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pslld $1,%xmm4 pxor %xmm2,%xmm0 movdqu %xmm0,(%edx) decl %ecx jz .L034done_key256 pshufd $255,%xmm0,%xmm2 pxor %xmm3,%xmm3 .byte 102,15,56,221,211 movdqa %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm3,%xmm1 pxor %xmm1,%xmm2 movdqu %xmm2,16(%edx) leal 32(%edx),%edx movdqa %xmm2,%xmm1 jmp .L033loop_key256 .L034done_key256: movl $13,%ecx movl %ecx,16(%edx) .L032good_key: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 xorl %eax,%eax popl %ebx ret .align 4 .L029bad_keybits: pxor %xmm0,%xmm0 movl $-2,%eax popl %ebx ret .size aes_hw_set_encrypt_key_alt,.-.L_aes_hw_set_encrypt_key_alt_begin .align 64 .Lkey_const: .long 202313229,202313229,202313229,202313229 .long 67569157,67569157,67569157,67569157 .long 1,1,1,1 .long 27,27,27,27 .byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69 .byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83 .byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115 .byte 115,108,46,111,114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
47,706
thirdparty/crates/ring-0.17.9/pregenerated/sha512-x86_64-macosx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _sha512_block_data_order_nohw .private_extern _sha512_block_data_order_nohw .p2align 4 _sha512_block_data_order_nohw: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 shlq $4,%rdx subq $128+32,%rsp leaq (%rsi,%rdx,8),%rdx andq $-64,%rsp movq %rdi,128+0(%rsp) movq %rsi,128+8(%rsp) movq %rdx,128+16(%rsp) movq %rax,152(%rsp) L$prologue: movq 0(%rdi),%rax movq 8(%rdi),%rbx movq 16(%rdi),%rcx movq 24(%rdi),%rdx movq 32(%rdi),%r8 movq 40(%rdi),%r9 movq 48(%rdi),%r10 movq 56(%rdi),%r11 jmp L$loop .p2align 4 L$loop: movq %rbx,%rdi leaq K512(%rip),%rbp xorq %rcx,%rdi movq 0(%rsi),%r12 movq %r8,%r13 movq %rax,%r14 bswapq %r12 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,0(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp addq %r14,%r11 movq 8(%rsi),%r12 movq %rdx,%r13 movq %r11,%r14 bswapq %r12 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,8(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp addq %r14,%r10 movq 16(%rsi),%r12 movq %rcx,%r13 movq %r10,%r14 bswapq %r12 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,16(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp addq %r14,%r9 movq 24(%rsi),%r12 movq %rbx,%r13 movq %r9,%r14 bswapq %r12 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,24(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp addq %r14,%r8 movq 32(%rsi),%r12 movq %rax,%r13 movq %r8,%r14 bswapq %r12 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,32(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp addq %r14,%rdx movq 40(%rsi),%r12 movq %r11,%r13 movq %rdx,%r14 bswapq %r12 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,40(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp addq %r14,%rcx movq 48(%rsi),%r12 movq %r10,%r13 movq %rcx,%r14 bswapq %r12 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,48(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp addq %r14,%rbx movq 56(%rsi),%r12 movq %r9,%r13 movq %rbx,%r14 bswapq %r12 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,56(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp addq %r14,%rax movq 64(%rsi),%r12 movq %r8,%r13 movq %rax,%r14 bswapq %r12 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,64(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp addq %r14,%r11 movq 72(%rsi),%r12 movq %rdx,%r13 movq %r11,%r14 bswapq %r12 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,72(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp addq %r14,%r10 movq 80(%rsi),%r12 movq %rcx,%r13 movq %r10,%r14 bswapq %r12 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,80(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp addq %r14,%r9 movq 88(%rsi),%r12 movq %rbx,%r13 movq %r9,%r14 bswapq %r12 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,88(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp addq %r14,%r8 movq 96(%rsi),%r12 movq %rax,%r13 movq %r8,%r14 bswapq %r12 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,96(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp addq %r14,%rdx movq 104(%rsi),%r12 movq %r11,%r13 movq %rdx,%r14 bswapq %r12 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,104(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp addq %r14,%rcx movq 112(%rsi),%r12 movq %r10,%r13 movq %rcx,%r14 bswapq %r12 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,112(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp addq %r14,%rbx movq 120(%rsi),%r12 movq %r9,%r13 movq %rbx,%r14 bswapq %r12 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,120(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp jmp L$rounds_16_xx .p2align 4 L$rounds_16_xx: movq 8(%rsp),%r13 movq 112(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rax movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 72(%rsp),%r12 addq 0(%rsp),%r12 movq %r8,%r13 addq %r15,%r12 movq %rax,%r14 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,0(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp movq 16(%rsp),%r13 movq 120(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r11 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 80(%rsp),%r12 addq 8(%rsp),%r12 movq %rdx,%r13 addq %rdi,%r12 movq %r11,%r14 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,8(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp movq 24(%rsp),%r13 movq 0(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r10 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 88(%rsp),%r12 addq 16(%rsp),%r12 movq %rcx,%r13 addq %r15,%r12 movq %r10,%r14 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,16(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp movq 32(%rsp),%r13 movq 8(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r9 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 96(%rsp),%r12 addq 24(%rsp),%r12 movq %rbx,%r13 addq %rdi,%r12 movq %r9,%r14 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,24(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp movq 40(%rsp),%r13 movq 16(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r8 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 104(%rsp),%r12 addq 32(%rsp),%r12 movq %rax,%r13 addq %r15,%r12 movq %r8,%r14 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,32(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp movq 48(%rsp),%r13 movq 24(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rdx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 112(%rsp),%r12 addq 40(%rsp),%r12 movq %r11,%r13 addq %rdi,%r12 movq %rdx,%r14 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,40(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp movq 56(%rsp),%r13 movq 32(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rcx movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 120(%rsp),%r12 addq 48(%rsp),%r12 movq %r10,%r13 addq %r15,%r12 movq %rcx,%r14 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,48(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp movq 64(%rsp),%r13 movq 40(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rbx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 0(%rsp),%r12 addq 56(%rsp),%r12 movq %r9,%r13 addq %rdi,%r12 movq %rbx,%r14 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,56(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp movq 72(%rsp),%r13 movq 48(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rax movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 8(%rsp),%r12 addq 64(%rsp),%r12 movq %r8,%r13 addq %r15,%r12 movq %rax,%r14 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,64(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp movq 80(%rsp),%r13 movq 56(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r11 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 16(%rsp),%r12 addq 72(%rsp),%r12 movq %rdx,%r13 addq %rdi,%r12 movq %r11,%r14 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,72(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp movq 88(%rsp),%r13 movq 64(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r10 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 24(%rsp),%r12 addq 80(%rsp),%r12 movq %rcx,%r13 addq %r15,%r12 movq %r10,%r14 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,80(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp movq 96(%rsp),%r13 movq 72(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r9 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 32(%rsp),%r12 addq 88(%rsp),%r12 movq %rbx,%r13 addq %rdi,%r12 movq %r9,%r14 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,88(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp movq 104(%rsp),%r13 movq 80(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r8 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 40(%rsp),%r12 addq 96(%rsp),%r12 movq %rax,%r13 addq %r15,%r12 movq %r8,%r14 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,96(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp movq 112(%rsp),%r13 movq 88(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rdx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 48(%rsp),%r12 addq 104(%rsp),%r12 movq %r11,%r13 addq %rdi,%r12 movq %rdx,%r14 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,104(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp movq 120(%rsp),%r13 movq 96(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rcx movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 56(%rsp),%r12 addq 112(%rsp),%r12 movq %r10,%r13 addq %r15,%r12 movq %rcx,%r14 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,112(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp movq 0(%rsp),%r13 movq 104(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rbx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 64(%rsp),%r12 addq 120(%rsp),%r12 movq %r9,%r13 addq %rdi,%r12 movq %rbx,%r14 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,120(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp cmpb $0,7(%rbp) jnz L$rounds_16_xx movq 128+0(%rsp),%rdi addq %r14,%rax leaq 128(%rsi),%rsi addq 0(%rdi),%rax addq 8(%rdi),%rbx addq 16(%rdi),%rcx addq 24(%rdi),%rdx addq 32(%rdi),%r8 addq 40(%rdi),%r9 addq 48(%rdi),%r10 addq 56(%rdi),%r11 cmpq 128+16(%rsp),%rsi movq %rax,0(%rdi) movq %rbx,8(%rdi) movq %rcx,16(%rdi) movq %rdx,24(%rdi) movq %r8,32(%rdi) movq %r9,40(%rdi) movq %r10,48(%rdi) movq %r11,56(%rdi) jb L$loop movq 152(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$epilogue: ret .section __DATA,__const .p2align 6 K512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0x0001020304050607,0x08090a0b0c0d0e0f .quad 0x0001020304050607,0x08090a0b0c0d0e0f .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text .globl _sha512_block_data_order_avx .private_extern _sha512_block_data_order_avx .p2align 6 _sha512_block_data_order_avx: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 shlq $4,%rdx subq $160,%rsp leaq (%rsi,%rdx,8),%rdx andq $-64,%rsp movq %rdi,128+0(%rsp) movq %rsi,128+8(%rsp) movq %rdx,128+16(%rsp) movq %rax,152(%rsp) L$prologue_avx: vzeroupper movq 0(%rdi),%rax movq 8(%rdi),%rbx movq 16(%rdi),%rcx movq 24(%rdi),%rdx movq 32(%rdi),%r8 movq 40(%rdi),%r9 movq 48(%rdi),%r10 movq 56(%rdi),%r11 jmp L$loop_avx .p2align 4 L$loop_avx: vmovdqa K512+1280(%rip),%xmm11 vmovdqu 0(%rsi),%xmm0 leaq K512+128(%rip),%rbp vmovdqu 16(%rsi),%xmm1 vmovdqu 32(%rsi),%xmm2 vpshufb %xmm11,%xmm0,%xmm0 vmovdqu 48(%rsi),%xmm3 vpshufb %xmm11,%xmm1,%xmm1 vmovdqu 64(%rsi),%xmm4 vpshufb %xmm11,%xmm2,%xmm2 vmovdqu 80(%rsi),%xmm5 vpshufb %xmm11,%xmm3,%xmm3 vmovdqu 96(%rsi),%xmm6 vpshufb %xmm11,%xmm4,%xmm4 vmovdqu 112(%rsi),%xmm7 vpshufb %xmm11,%xmm5,%xmm5 vpaddq -128(%rbp),%xmm0,%xmm8 vpshufb %xmm11,%xmm6,%xmm6 vpaddq -96(%rbp),%xmm1,%xmm9 vpshufb %xmm11,%xmm7,%xmm7 vpaddq -64(%rbp),%xmm2,%xmm10 vpaddq -32(%rbp),%xmm3,%xmm11 vmovdqa %xmm8,0(%rsp) vpaddq 0(%rbp),%xmm4,%xmm8 vmovdqa %xmm9,16(%rsp) vpaddq 32(%rbp),%xmm5,%xmm9 vmovdqa %xmm10,32(%rsp) vpaddq 64(%rbp),%xmm6,%xmm10 vmovdqa %xmm11,48(%rsp) vpaddq 96(%rbp),%xmm7,%xmm11 vmovdqa %xmm8,64(%rsp) movq %rax,%r14 vmovdqa %xmm9,80(%rsp) movq %rbx,%rdi vmovdqa %xmm10,96(%rsp) xorq %rcx,%rdi vmovdqa %xmm11,112(%rsp) movq %r8,%r13 jmp L$avx_00_47 .p2align 4 L$avx_00_47: addq $256,%rbp vpalignr $8,%xmm0,%xmm1,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rax vpalignr $8,%xmm4,%xmm5,%xmm11 movq %r9,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r8,%r13 xorq %r10,%r12 vpaddq %xmm11,%xmm0,%xmm0 shrdq $4,%r13,%r13 xorq %rax,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r8,%r12 xorq %r8,%r13 vpsllq $56,%xmm8,%xmm9 addq 0(%rsp),%r11 movq %rax,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r10,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rbx,%r15 addq %r12,%r11 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rax,%r14 addq %r13,%r11 vpxor %xmm10,%xmm8,%xmm8 xorq %rbx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm7,%xmm11 addq %r11,%rdx addq %rdi,%r11 vpxor %xmm9,%xmm8,%xmm8 movq %rdx,%r13 addq %r11,%r14 vpsllq $3,%xmm7,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r11 vpaddq %xmm8,%xmm0,%xmm0 movq %r8,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm7,%xmm9 xorq %rdx,%r13 xorq %r9,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r11,%r14 vpsllq $42,%xmm10,%xmm10 andq %rdx,%r12 xorq %rdx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 8(%rsp),%r10 movq %r11,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r9,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rax,%rdi addq %r12,%r10 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm0,%xmm0 xorq %r11,%r14 addq %r13,%r10 vpaddq -128(%rbp),%xmm0,%xmm10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 vmovdqa %xmm10,0(%rsp) vpalignr $8,%xmm1,%xmm2,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r10 vpalignr $8,%xmm5,%xmm6,%xmm11 movq %rdx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rcx,%r13 xorq %r8,%r12 vpaddq %xmm11,%xmm1,%xmm1 shrdq $4,%r13,%r13 xorq %r10,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rcx,%r12 xorq %rcx,%r13 vpsllq $56,%xmm8,%xmm9 addq 16(%rsp),%r9 movq %r10,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r8,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r11,%r15 addq %r12,%r9 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r10,%r14 addq %r13,%r9 vpxor %xmm10,%xmm8,%xmm8 xorq %r11,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm0,%xmm11 addq %r9,%rbx addq %rdi,%r9 vpxor %xmm9,%xmm8,%xmm8 movq %rbx,%r13 addq %r9,%r14 vpsllq $3,%xmm0,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r9 vpaddq %xmm8,%xmm1,%xmm1 movq %rcx,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm0,%xmm9 xorq %rbx,%r13 xorq %rdx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r9,%r14 vpsllq $42,%xmm10,%xmm10 andq %rbx,%r12 xorq %rbx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 24(%rsp),%r8 movq %r9,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rdx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r10,%rdi addq %r12,%r8 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm1,%xmm1 xorq %r9,%r14 addq %r13,%r8 vpaddq -96(%rbp),%xmm1,%xmm10 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 vmovdqa %xmm10,16(%rsp) vpalignr $8,%xmm2,%xmm3,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r8 vpalignr $8,%xmm6,%xmm7,%xmm11 movq %rbx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rax,%r13 xorq %rcx,%r12 vpaddq %xmm11,%xmm2,%xmm2 shrdq $4,%r13,%r13 xorq %r8,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rax,%r12 xorq %rax,%r13 vpsllq $56,%xmm8,%xmm9 addq 32(%rsp),%rdx movq %r8,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rcx,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r9,%r15 addq %r12,%rdx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r8,%r14 addq %r13,%rdx vpxor %xmm10,%xmm8,%xmm8 xorq %r9,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm1,%xmm11 addq %rdx,%r11 addq %rdi,%rdx vpxor %xmm9,%xmm8,%xmm8 movq %r11,%r13 addq %rdx,%r14 vpsllq $3,%xmm1,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rdx vpaddq %xmm8,%xmm2,%xmm2 movq %rax,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm1,%xmm9 xorq %r11,%r13 xorq %rbx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rdx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r11,%r12 xorq %r11,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 40(%rsp),%rcx movq %rdx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rbx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r8,%rdi addq %r12,%rcx vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm2,%xmm2 xorq %rdx,%r14 addq %r13,%rcx vpaddq -64(%rbp),%xmm2,%xmm10 xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 vmovdqa %xmm10,32(%rsp) vpalignr $8,%xmm3,%xmm4,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rcx vpalignr $8,%xmm7,%xmm0,%xmm11 movq %r11,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r10,%r13 xorq %rax,%r12 vpaddq %xmm11,%xmm3,%xmm3 shrdq $4,%r13,%r13 xorq %rcx,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r10,%r12 xorq %r10,%r13 vpsllq $56,%xmm8,%xmm9 addq 48(%rsp),%rbx movq %rcx,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rax,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rdx,%r15 addq %r12,%rbx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rcx,%r14 addq %r13,%rbx vpxor %xmm10,%xmm8,%xmm8 xorq %rdx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm2,%xmm11 addq %rbx,%r9 addq %rdi,%rbx vpxor %xmm9,%xmm8,%xmm8 movq %r9,%r13 addq %rbx,%r14 vpsllq $3,%xmm2,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rbx vpaddq %xmm8,%xmm3,%xmm3 movq %r10,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm2,%xmm9 xorq %r9,%r13 xorq %r11,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rbx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r9,%r12 xorq %r9,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 56(%rsp),%rax movq %rbx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r11,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rcx,%rdi addq %r12,%rax vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm3,%xmm3 xorq %rbx,%r14 addq %r13,%rax vpaddq -32(%rbp),%xmm3,%xmm10 xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 vmovdqa %xmm10,48(%rsp) vpalignr $8,%xmm4,%xmm5,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rax vpalignr $8,%xmm0,%xmm1,%xmm11 movq %r9,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r8,%r13 xorq %r10,%r12 vpaddq %xmm11,%xmm4,%xmm4 shrdq $4,%r13,%r13 xorq %rax,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r8,%r12 xorq %r8,%r13 vpsllq $56,%xmm8,%xmm9 addq 64(%rsp),%r11 movq %rax,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r10,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rbx,%r15 addq %r12,%r11 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rax,%r14 addq %r13,%r11 vpxor %xmm10,%xmm8,%xmm8 xorq %rbx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm3,%xmm11 addq %r11,%rdx addq %rdi,%r11 vpxor %xmm9,%xmm8,%xmm8 movq %rdx,%r13 addq %r11,%r14 vpsllq $3,%xmm3,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r11 vpaddq %xmm8,%xmm4,%xmm4 movq %r8,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm3,%xmm9 xorq %rdx,%r13 xorq %r9,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r11,%r14 vpsllq $42,%xmm10,%xmm10 andq %rdx,%r12 xorq %rdx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 72(%rsp),%r10 movq %r11,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r9,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rax,%rdi addq %r12,%r10 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm4,%xmm4 xorq %r11,%r14 addq %r13,%r10 vpaddq 0(%rbp),%xmm4,%xmm10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 vmovdqa %xmm10,64(%rsp) vpalignr $8,%xmm5,%xmm6,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r10 vpalignr $8,%xmm1,%xmm2,%xmm11 movq %rdx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rcx,%r13 xorq %r8,%r12 vpaddq %xmm11,%xmm5,%xmm5 shrdq $4,%r13,%r13 xorq %r10,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rcx,%r12 xorq %rcx,%r13 vpsllq $56,%xmm8,%xmm9 addq 80(%rsp),%r9 movq %r10,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r8,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r11,%r15 addq %r12,%r9 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r10,%r14 addq %r13,%r9 vpxor %xmm10,%xmm8,%xmm8 xorq %r11,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm4,%xmm11 addq %r9,%rbx addq %rdi,%r9 vpxor %xmm9,%xmm8,%xmm8 movq %rbx,%r13 addq %r9,%r14 vpsllq $3,%xmm4,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r9 vpaddq %xmm8,%xmm5,%xmm5 movq %rcx,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm4,%xmm9 xorq %rbx,%r13 xorq %rdx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r9,%r14 vpsllq $42,%xmm10,%xmm10 andq %rbx,%r12 xorq %rbx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 88(%rsp),%r8 movq %r9,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rdx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r10,%rdi addq %r12,%r8 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm5,%xmm5 xorq %r9,%r14 addq %r13,%r8 vpaddq 32(%rbp),%xmm5,%xmm10 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 vmovdqa %xmm10,80(%rsp) vpalignr $8,%xmm6,%xmm7,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r8 vpalignr $8,%xmm2,%xmm3,%xmm11 movq %rbx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rax,%r13 xorq %rcx,%r12 vpaddq %xmm11,%xmm6,%xmm6 shrdq $4,%r13,%r13 xorq %r8,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rax,%r12 xorq %rax,%r13 vpsllq $56,%xmm8,%xmm9 addq 96(%rsp),%rdx movq %r8,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rcx,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r9,%r15 addq %r12,%rdx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r8,%r14 addq %r13,%rdx vpxor %xmm10,%xmm8,%xmm8 xorq %r9,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm5,%xmm11 addq %rdx,%r11 addq %rdi,%rdx vpxor %xmm9,%xmm8,%xmm8 movq %r11,%r13 addq %rdx,%r14 vpsllq $3,%xmm5,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rdx vpaddq %xmm8,%xmm6,%xmm6 movq %rax,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm5,%xmm9 xorq %r11,%r13 xorq %rbx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rdx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r11,%r12 xorq %r11,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 104(%rsp),%rcx movq %rdx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rbx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r8,%rdi addq %r12,%rcx vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm6,%xmm6 xorq %rdx,%r14 addq %r13,%rcx vpaddq 64(%rbp),%xmm6,%xmm10 xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 vmovdqa %xmm10,96(%rsp) vpalignr $8,%xmm7,%xmm0,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rcx vpalignr $8,%xmm3,%xmm4,%xmm11 movq %r11,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r10,%r13 xorq %rax,%r12 vpaddq %xmm11,%xmm7,%xmm7 shrdq $4,%r13,%r13 xorq %rcx,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r10,%r12 xorq %r10,%r13 vpsllq $56,%xmm8,%xmm9 addq 112(%rsp),%rbx movq %rcx,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rax,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rdx,%r15 addq %r12,%rbx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rcx,%r14 addq %r13,%rbx vpxor %xmm10,%xmm8,%xmm8 xorq %rdx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm6,%xmm11 addq %rbx,%r9 addq %rdi,%rbx vpxor %xmm9,%xmm8,%xmm8 movq %r9,%r13 addq %rbx,%r14 vpsllq $3,%xmm6,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rbx vpaddq %xmm8,%xmm7,%xmm7 movq %r10,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm6,%xmm9 xorq %r9,%r13 xorq %r11,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rbx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r9,%r12 xorq %r9,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 120(%rsp),%rax movq %rbx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r11,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rcx,%rdi addq %r12,%rax vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm7,%xmm7 xorq %rbx,%r14 addq %r13,%rax vpaddq 96(%rbp),%xmm7,%xmm10 xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 vmovdqa %xmm10,112(%rsp) cmpb $0,135(%rbp) jne L$avx_00_47 shrdq $23,%r13,%r13 movq %r14,%rax movq %r9,%r12 shrdq $5,%r14,%r14 xorq %r8,%r13 xorq %r10,%r12 shrdq $4,%r13,%r13 xorq %rax,%r14 andq %r8,%r12 xorq %r8,%r13 addq 0(%rsp),%r11 movq %rax,%r15 xorq %r10,%r12 shrdq $6,%r14,%r14 xorq %rbx,%r15 addq %r12,%r11 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rax,%r14 addq %r13,%r11 xorq %rbx,%rdi shrdq $28,%r14,%r14 addq %r11,%rdx addq %rdi,%r11 movq %rdx,%r13 addq %r11,%r14 shrdq $23,%r13,%r13 movq %r14,%r11 movq %r8,%r12 shrdq $5,%r14,%r14 xorq %rdx,%r13 xorq %r9,%r12 shrdq $4,%r13,%r13 xorq %r11,%r14 andq %rdx,%r12 xorq %rdx,%r13 addq 8(%rsp),%r10 movq %r11,%rdi xorq %r9,%r12 shrdq $6,%r14,%r14 xorq %rax,%rdi addq %r12,%r10 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r11,%r14 addq %r13,%r10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 shrdq $23,%r13,%r13 movq %r14,%r10 movq %rdx,%r12 shrdq $5,%r14,%r14 xorq %rcx,%r13 xorq %r8,%r12 shrdq $4,%r13,%r13 xorq %r10,%r14 andq %rcx,%r12 xorq %rcx,%r13 addq 16(%rsp),%r9 movq %r10,%r15 xorq %r8,%r12 shrdq $6,%r14,%r14 xorq %r11,%r15 addq %r12,%r9 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r10,%r14 addq %r13,%r9 xorq %r11,%rdi shrdq $28,%r14,%r14 addq %r9,%rbx addq %rdi,%r9 movq %rbx,%r13 addq %r9,%r14 shrdq $23,%r13,%r13 movq %r14,%r9 movq %rcx,%r12 shrdq $5,%r14,%r14 xorq %rbx,%r13 xorq %rdx,%r12 shrdq $4,%r13,%r13 xorq %r9,%r14 andq %rbx,%r12 xorq %rbx,%r13 addq 24(%rsp),%r8 movq %r9,%rdi xorq %rdx,%r12 shrdq $6,%r14,%r14 xorq %r10,%rdi addq %r12,%r8 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r9,%r14 addq %r13,%r8 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 shrdq $23,%r13,%r13 movq %r14,%r8 movq %rbx,%r12 shrdq $5,%r14,%r14 xorq %rax,%r13 xorq %rcx,%r12 shrdq $4,%r13,%r13 xorq %r8,%r14 andq %rax,%r12 xorq %rax,%r13 addq 32(%rsp),%rdx movq %r8,%r15 xorq %rcx,%r12 shrdq $6,%r14,%r14 xorq %r9,%r15 addq %r12,%rdx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r8,%r14 addq %r13,%rdx xorq %r9,%rdi shrdq $28,%r14,%r14 addq %rdx,%r11 addq %rdi,%rdx movq %r11,%r13 addq %rdx,%r14 shrdq $23,%r13,%r13 movq %r14,%rdx movq %rax,%r12 shrdq $5,%r14,%r14 xorq %r11,%r13 xorq %rbx,%r12 shrdq $4,%r13,%r13 xorq %rdx,%r14 andq %r11,%r12 xorq %r11,%r13 addq 40(%rsp),%rcx movq %rdx,%rdi xorq %rbx,%r12 shrdq $6,%r14,%r14 xorq %r8,%rdi addq %r12,%rcx shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rdx,%r14 addq %r13,%rcx xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 shrdq $23,%r13,%r13 movq %r14,%rcx movq %r11,%r12 shrdq $5,%r14,%r14 xorq %r10,%r13 xorq %rax,%r12 shrdq $4,%r13,%r13 xorq %rcx,%r14 andq %r10,%r12 xorq %r10,%r13 addq 48(%rsp),%rbx movq %rcx,%r15 xorq %rax,%r12 shrdq $6,%r14,%r14 xorq %rdx,%r15 addq %r12,%rbx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rcx,%r14 addq %r13,%rbx xorq %rdx,%rdi shrdq $28,%r14,%r14 addq %rbx,%r9 addq %rdi,%rbx movq %r9,%r13 addq %rbx,%r14 shrdq $23,%r13,%r13 movq %r14,%rbx movq %r10,%r12 shrdq $5,%r14,%r14 xorq %r9,%r13 xorq %r11,%r12 shrdq $4,%r13,%r13 xorq %rbx,%r14 andq %r9,%r12 xorq %r9,%r13 addq 56(%rsp),%rax movq %rbx,%rdi xorq %r11,%r12 shrdq $6,%r14,%r14 xorq %rcx,%rdi addq %r12,%rax shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rbx,%r14 addq %r13,%rax xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 shrdq $23,%r13,%r13 movq %r14,%rax movq %r9,%r12 shrdq $5,%r14,%r14 xorq %r8,%r13 xorq %r10,%r12 shrdq $4,%r13,%r13 xorq %rax,%r14 andq %r8,%r12 xorq %r8,%r13 addq 64(%rsp),%r11 movq %rax,%r15 xorq %r10,%r12 shrdq $6,%r14,%r14 xorq %rbx,%r15 addq %r12,%r11 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rax,%r14 addq %r13,%r11 xorq %rbx,%rdi shrdq $28,%r14,%r14 addq %r11,%rdx addq %rdi,%r11 movq %rdx,%r13 addq %r11,%r14 shrdq $23,%r13,%r13 movq %r14,%r11 movq %r8,%r12 shrdq $5,%r14,%r14 xorq %rdx,%r13 xorq %r9,%r12 shrdq $4,%r13,%r13 xorq %r11,%r14 andq %rdx,%r12 xorq %rdx,%r13 addq 72(%rsp),%r10 movq %r11,%rdi xorq %r9,%r12 shrdq $6,%r14,%r14 xorq %rax,%rdi addq %r12,%r10 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r11,%r14 addq %r13,%r10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 shrdq $23,%r13,%r13 movq %r14,%r10 movq %rdx,%r12 shrdq $5,%r14,%r14 xorq %rcx,%r13 xorq %r8,%r12 shrdq $4,%r13,%r13 xorq %r10,%r14 andq %rcx,%r12 xorq %rcx,%r13 addq 80(%rsp),%r9 movq %r10,%r15 xorq %r8,%r12 shrdq $6,%r14,%r14 xorq %r11,%r15 addq %r12,%r9 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r10,%r14 addq %r13,%r9 xorq %r11,%rdi shrdq $28,%r14,%r14 addq %r9,%rbx addq %rdi,%r9 movq %rbx,%r13 addq %r9,%r14 shrdq $23,%r13,%r13 movq %r14,%r9 movq %rcx,%r12 shrdq $5,%r14,%r14 xorq %rbx,%r13 xorq %rdx,%r12 shrdq $4,%r13,%r13 xorq %r9,%r14 andq %rbx,%r12 xorq %rbx,%r13 addq 88(%rsp),%r8 movq %r9,%rdi xorq %rdx,%r12 shrdq $6,%r14,%r14 xorq %r10,%rdi addq %r12,%r8 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r9,%r14 addq %r13,%r8 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 shrdq $23,%r13,%r13 movq %r14,%r8 movq %rbx,%r12 shrdq $5,%r14,%r14 xorq %rax,%r13 xorq %rcx,%r12 shrdq $4,%r13,%r13 xorq %r8,%r14 andq %rax,%r12 xorq %rax,%r13 addq 96(%rsp),%rdx movq %r8,%r15 xorq %rcx,%r12 shrdq $6,%r14,%r14 xorq %r9,%r15 addq %r12,%rdx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r8,%r14 addq %r13,%rdx xorq %r9,%rdi shrdq $28,%r14,%r14 addq %rdx,%r11 addq %rdi,%rdx movq %r11,%r13 addq %rdx,%r14 shrdq $23,%r13,%r13 movq %r14,%rdx movq %rax,%r12 shrdq $5,%r14,%r14 xorq %r11,%r13 xorq %rbx,%r12 shrdq $4,%r13,%r13 xorq %rdx,%r14 andq %r11,%r12 xorq %r11,%r13 addq 104(%rsp),%rcx movq %rdx,%rdi xorq %rbx,%r12 shrdq $6,%r14,%r14 xorq %r8,%rdi addq %r12,%rcx shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rdx,%r14 addq %r13,%rcx xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 shrdq $23,%r13,%r13 movq %r14,%rcx movq %r11,%r12 shrdq $5,%r14,%r14 xorq %r10,%r13 xorq %rax,%r12 shrdq $4,%r13,%r13 xorq %rcx,%r14 andq %r10,%r12 xorq %r10,%r13 addq 112(%rsp),%rbx movq %rcx,%r15 xorq %rax,%r12 shrdq $6,%r14,%r14 xorq %rdx,%r15 addq %r12,%rbx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rcx,%r14 addq %r13,%rbx xorq %rdx,%rdi shrdq $28,%r14,%r14 addq %rbx,%r9 addq %rdi,%rbx movq %r9,%r13 addq %rbx,%r14 shrdq $23,%r13,%r13 movq %r14,%rbx movq %r10,%r12 shrdq $5,%r14,%r14 xorq %r9,%r13 xorq %r11,%r12 shrdq $4,%r13,%r13 xorq %rbx,%r14 andq %r9,%r12 xorq %r9,%r13 addq 120(%rsp),%rax movq %rbx,%rdi xorq %r11,%r12 shrdq $6,%r14,%r14 xorq %rcx,%rdi addq %r12,%rax shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rbx,%r14 addq %r13,%rax xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 movq 128+0(%rsp),%rdi movq %r14,%rax addq 0(%rdi),%rax leaq 128(%rsi),%rsi addq 8(%rdi),%rbx addq 16(%rdi),%rcx addq 24(%rdi),%rdx addq 32(%rdi),%r8 addq 40(%rdi),%r9 addq 48(%rdi),%r10 addq 56(%rdi),%r11 cmpq 128+16(%rsp),%rsi movq %rax,0(%rdi) movq %rbx,8(%rdi) movq %rcx,16(%rdi) movq %rdx,24(%rdi) movq %r8,32(%rdi) movq %r9,40(%rdi) movq %r10,48(%rdi) movq %r11,56(%rdi) jb L$loop_avx movq 152(%rsp),%rsi vzeroupper movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$epilogue_avx: ret #endif
marvin-hansen/iggy-streaming-system
20,463
thirdparty/crates/ring-0.17.9/pregenerated/x86_64-mont-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .globl bn_mul_mont_nohw .hidden bn_mul_mont_nohw .type bn_mul_mont_nohw,@function .align 16 bn_mul_mont_nohw: .cfi_startproc _CET_ENDBR movl %r9d,%r9d movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 negq %r9 movq %rsp,%r11 leaq -16(%rsp,%r9,8),%r10 negq %r9 andq $-1024,%r10 subq %r10,%r11 andq $-4096,%r11 leaq (%r10,%r11,1),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja .Lmul_page_walk jmp .Lmul_page_walk_done .align 16 .Lmul_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja .Lmul_page_walk .Lmul_page_walk_done: movq %rax,8(%rsp,%r9,8) .cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08 .Lmul_body: movq %rdx,%r12 movq (%r8),%r8 movq (%r12),%rbx movq (%rsi),%rax xorq %r14,%r14 xorq %r15,%r15 movq %r8,%rbp mulq %rbx movq %rax,%r10 movq (%rcx),%rax imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq %rdx,%r13 leaq 1(%r15),%r15 jmp .L1st_enter .align 16 .L1st: addq %rax,%r13 movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%r13 movq %r10,%r11 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 .L1st_enter: mulq %rbx addq %rax,%r11 movq (%rcx,%r15,8),%rax adcq $0,%rdx leaq 1(%r15),%r15 movq %rdx,%r10 mulq %rbp cmpq %r9,%r15 jne .L1st addq %rax,%r13 movq (%rsi),%rax adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 movq %r10,%r11 xorq %rdx,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r9,8) movq %rdx,(%rsp,%r9,8) leaq 1(%r14),%r14 jmp .Louter .align 16 .Louter: movq (%r12,%r14,8),%rbx xorq %r15,%r15 movq %r8,%rbp movq (%rsp),%r10 mulq %rbx addq %rax,%r10 movq (%rcx),%rax adcq $0,%rdx imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq 8(%rsp),%r10 movq %rdx,%r13 leaq 1(%r15),%r15 jmp .Linner_enter .align 16 .Linner: addq %rax,%r13 movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 movq (%rsp,%r15,8),%r10 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 .Linner_enter: mulq %rbx addq %rax,%r11 movq (%rcx,%r15,8),%rax adcq $0,%rdx addq %r11,%r10 movq %rdx,%r11 adcq $0,%r11 leaq 1(%r15),%r15 mulq %rbp cmpq %r9,%r15 jne .Linner addq %rax,%r13 movq (%rsi),%rax adcq $0,%rdx addq %r10,%r13 movq (%rsp,%r15,8),%r10 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 xorq %rdx,%rdx addq %r11,%r13 adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r9,8) movq %rdx,(%rsp,%r9,8) leaq 1(%r14),%r14 cmpq %r9,%r14 jb .Louter xorq %r14,%r14 movq (%rsp),%rax movq %r9,%r15 .align 16 .Lsub: sbbq (%rcx,%r14,8),%rax movq %rax,(%rdi,%r14,8) movq 8(%rsp,%r14,8),%rax leaq 1(%r14),%r14 decq %r15 jnz .Lsub sbbq $0,%rax movq $-1,%rbx xorq %rax,%rbx xorq %r14,%r14 movq %r9,%r15 .Lcopy: movq (%rdi,%r14,8),%rcx movq (%rsp,%r14,8),%rdx andq %rbx,%rcx andq %rax,%rdx movq %r9,(%rsp,%r14,8) orq %rcx,%rdx movq %rdx,(%rdi,%r14,8) leaq 1(%r14),%r14 subq $1,%r15 jnz .Lcopy movq 8(%rsp,%r9,8),%rsi .cfi_def_cfa %rsi,8 movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lmul_epilogue: ret .cfi_endproc .size bn_mul_mont_nohw,.-bn_mul_mont_nohw .globl bn_mul4x_mont .hidden bn_mul4x_mont .type bn_mul4x_mont,@function .align 16 bn_mul4x_mont: .cfi_startproc _CET_ENDBR movl %r9d,%r9d movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 negq %r9 movq %rsp,%r11 leaq -32(%rsp,%r9,8),%r10 negq %r9 andq $-1024,%r10 subq %r10,%r11 andq $-4096,%r11 leaq (%r10,%r11,1),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja .Lmul4x_page_walk jmp .Lmul4x_page_walk_done .Lmul4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja .Lmul4x_page_walk .Lmul4x_page_walk_done: movq %rax,8(%rsp,%r9,8) .cfi_escape 0x0f,0x0a,0x77,0x08,0x79,0x00,0x38,0x1e,0x22,0x06,0x23,0x08 .Lmul4x_body: movq %rdi,16(%rsp,%r9,8) movq %rdx,%r12 movq (%r8),%r8 movq (%r12),%rbx movq (%rsi),%rax xorq %r14,%r14 xorq %r15,%r15 movq %r8,%rbp mulq %rbx movq %rax,%r10 movq (%rcx),%rax imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi),%rax adcq $0,%rdx addq %r11,%rdi leaq 4(%r15),%r15 adcq $0,%rdx movq %rdi,(%rsp) movq %rdx,%r13 jmp .L1st4x .align 16 .L1st4x: mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq (%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx,%r15,8),%rax adcq $0,%rdx leaq 4(%r15),%r15 movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq -16(%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-32(%rsp,%r15,8) movq %rdx,%r13 cmpq %r9,%r15 jb .L1st4x mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi movq %r13,-8(%rsp,%r15,8) movq %rdi,(%rsp,%r15,8) leaq 1(%r14),%r14 .align 4 .Louter4x: movq (%r12,%r14,8),%rbx xorq %r15,%r15 movq (%rsp),%r10 movq %r8,%rbp mulq %rbx addq %rax,%r10 movq (%rcx),%rax adcq $0,%rdx imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx addq 8(%rsp),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi),%rax adcq $0,%rdx addq %r11,%rdi leaq 4(%r15),%r15 adcq $0,%rdx movq %rdi,(%rsp) movq %rdx,%r13 jmp .Linner4x .align 16 .Linner4x: mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx addq -16(%rsp,%r15,8),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx addq -8(%rsp,%r15,8),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq (%rcx,%r15,8),%rax adcq $0,%rdx addq (%rsp,%r15,8),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx,%r15,8),%rax adcq $0,%rdx addq 8(%rsp,%r15,8),%r11 adcq $0,%rdx leaq 4(%r15),%r15 movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq -16(%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-32(%rsp,%r15,8) movq %rdx,%r13 cmpq %r9,%r15 jb .Linner4x mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx addq -16(%rsp,%r15,8),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx addq -8(%rsp,%r15,8),%r11 adcq $0,%rdx leaq 1(%r14),%r14 movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi addq (%rsp,%r9,8),%r13 adcq $0,%rdi movq %r13,-8(%rsp,%r15,8) movq %rdi,(%rsp,%r15,8) cmpq %r9,%r14 jb .Louter4x movq 16(%rsp,%r9,8),%rdi leaq -4(%r9),%r15 movq 0(%rsp),%rax movq 8(%rsp),%rdx shrq $2,%r15 leaq (%rsp),%rsi xorq %r14,%r14 subq 0(%rcx),%rax movq 16(%rsi),%rbx movq 24(%rsi),%rbp sbbq 8(%rcx),%rdx .Lsub4x: movq %rax,0(%rdi,%r14,8) movq %rdx,8(%rdi,%r14,8) sbbq 16(%rcx,%r14,8),%rbx movq 32(%rsi,%r14,8),%rax movq 40(%rsi,%r14,8),%rdx sbbq 24(%rcx,%r14,8),%rbp movq %rbx,16(%rdi,%r14,8) movq %rbp,24(%rdi,%r14,8) sbbq 32(%rcx,%r14,8),%rax movq 48(%rsi,%r14,8),%rbx movq 56(%rsi,%r14,8),%rbp sbbq 40(%rcx,%r14,8),%rdx leaq 4(%r14),%r14 decq %r15 jnz .Lsub4x movq %rax,0(%rdi,%r14,8) movq 32(%rsi,%r14,8),%rax sbbq 16(%rcx,%r14,8),%rbx movq %rdx,8(%rdi,%r14,8) sbbq 24(%rcx,%r14,8),%rbp movq %rbx,16(%rdi,%r14,8) sbbq $0,%rax movq %rbp,24(%rdi,%r14,8) pxor %xmm0,%xmm0 .byte 102,72,15,110,224 pcmpeqd %xmm5,%xmm5 pshufd $0,%xmm4,%xmm4 movq %r9,%r15 pxor %xmm4,%xmm5 shrq $2,%r15 xorl %eax,%eax jmp .Lcopy4x .align 16 .Lcopy4x: movdqa (%rsp,%rax,1),%xmm1 movdqu (%rdi,%rax,1),%xmm2 pand %xmm4,%xmm1 pand %xmm5,%xmm2 movdqa 16(%rsp,%rax,1),%xmm3 movdqa %xmm0,(%rsp,%rax,1) por %xmm2,%xmm1 movdqu 16(%rdi,%rax,1),%xmm2 movdqu %xmm1,(%rdi,%rax,1) pand %xmm4,%xmm3 pand %xmm5,%xmm2 movdqa %xmm0,16(%rsp,%rax,1) por %xmm2,%xmm3 movdqu %xmm3,16(%rdi,%rax,1) leaq 32(%rax),%rax decq %r15 jnz .Lcopy4x movq 8(%rsp,%r9,8),%rsi .cfi_def_cfa %rsi, 8 movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lmul4x_epilogue: ret .cfi_endproc .size bn_mul4x_mont,.-bn_mul4x_mont .extern bn_sqrx8x_internal .hidden bn_sqrx8x_internal .extern bn_sqr8x_internal .hidden bn_sqr8x_internal .globl bn_sqr8x_mont .hidden bn_sqr8x_mont .type bn_sqr8x_mont,@function .align 32 bn_sqr8x_mont: .cfi_startproc _CET_ENDBR movl %r9d,%r9d movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 .Lsqr8x_prologue: movl %r9d,%r10d shll $3,%r9d shlq $3+2,%r10 negq %r9 leaq -64(%rsp,%r9,2),%r11 movq %rsp,%rbp movq (%r8),%r8 subq %rsi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb .Lsqr8x_sp_alt subq %r11,%rbp leaq -64(%rbp,%r9,2),%rbp jmp .Lsqr8x_sp_done .align 32 .Lsqr8x_sp_alt: leaq 4096-64(,%r9,2),%r10 leaq -64(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp .Lsqr8x_sp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lsqr8x_page_walk jmp .Lsqr8x_page_walk_done .align 16 .Lsqr8x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lsqr8x_page_walk .Lsqr8x_page_walk_done: movq %r9,%r10 negq %r9 movq %r8,32(%rsp) movq %rax,40(%rsp) .cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 .Lsqr8x_body: .byte 102,72,15,110,209 pxor %xmm0,%xmm0 .byte 102,72,15,110,207 .byte 102,73,15,110,218 testq %rdx,%rdx jz .Lsqr8x_nox call bn_sqrx8x_internal leaq (%r8,%rcx,1),%rbx movq %rcx,%r9 movq %rcx,%rdx .byte 102,72,15,126,207 sarq $3+2,%rcx jmp .Lsqr8x_sub .align 32 .Lsqr8x_nox: call bn_sqr8x_internal leaq (%rdi,%r9,1),%rbx movq %r9,%rcx movq %r9,%rdx .byte 102,72,15,126,207 sarq $3+2,%rcx jmp .Lsqr8x_sub .align 32 .Lsqr8x_sub: movq 0(%rbx),%r12 movq 8(%rbx),%r13 movq 16(%rbx),%r14 movq 24(%rbx),%r15 leaq 32(%rbx),%rbx sbbq 0(%rbp),%r12 sbbq 8(%rbp),%r13 sbbq 16(%rbp),%r14 sbbq 24(%rbp),%r15 leaq 32(%rbp),%rbp movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r14,16(%rdi) movq %r15,24(%rdi) leaq 32(%rdi),%rdi incq %rcx jnz .Lsqr8x_sub sbbq $0,%rax leaq (%rbx,%r9,1),%rbx leaq (%rdi,%r9,1),%rdi .byte 102,72,15,110,200 pxor %xmm0,%xmm0 pshufd $0,%xmm1,%xmm1 movq 40(%rsp),%rsi .cfi_def_cfa %rsi,8 jmp .Lsqr8x_cond_copy .align 32 .Lsqr8x_cond_copy: movdqa 0(%rbx),%xmm2 movdqa 16(%rbx),%xmm3 leaq 32(%rbx),%rbx movdqu 0(%rdi),%xmm4 movdqu 16(%rdi),%xmm5 leaq 32(%rdi),%rdi movdqa %xmm0,-32(%rbx) movdqa %xmm0,-16(%rbx) movdqa %xmm0,-32(%rbx,%rdx,1) movdqa %xmm0,-16(%rbx,%rdx,1) pcmpeqd %xmm1,%xmm0 pand %xmm1,%xmm2 pand %xmm1,%xmm3 pand %xmm0,%xmm4 pand %xmm0,%xmm5 pxor %xmm0,%xmm0 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqu %xmm4,-32(%rdi) movdqu %xmm5,-16(%rdi) addq $32,%r9 jnz .Lsqr8x_cond_copy movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lsqr8x_epilogue: ret .cfi_endproc .size bn_sqr8x_mont,.-bn_sqr8x_mont .globl bn_mulx4x_mont .hidden bn_mulx4x_mont .type bn_mulx4x_mont,@function .align 32 bn_mulx4x_mont: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 .Lmulx4x_prologue: shll $3,%r9d xorq %r10,%r10 subq %r9,%r10 movq (%r8),%r8 leaq -72(%rsp,%r10,1),%rbp andq $-128,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lmulx4x_page_walk jmp .Lmulx4x_page_walk_done .align 16 .Lmulx4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lmulx4x_page_walk .Lmulx4x_page_walk_done: leaq (%rdx,%r9,1),%r10 movq %r9,0(%rsp) shrq $5,%r9 movq %r10,16(%rsp) subq $1,%r9 movq %r8,24(%rsp) movq %rdi,32(%rsp) movq %rax,40(%rsp) .cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 movq %r9,48(%rsp) jmp .Lmulx4x_body .align 32 .Lmulx4x_body: leaq 8(%rdx),%rdi movq (%rdx),%rdx leaq 64+32(%rsp),%rbx movq %rdx,%r9 mulxq 0(%rsi),%r8,%rax mulxq 8(%rsi),%r11,%r14 addq %rax,%r11 movq %rdi,8(%rsp) mulxq 16(%rsi),%r12,%r13 adcq %r14,%r12 adcq $0,%r13 movq %r8,%rdi imulq 24(%rsp),%r8 xorq %rbp,%rbp mulxq 24(%rsi),%rax,%r14 movq %r8,%rdx leaq 32(%rsi),%rsi adcxq %rax,%r13 adcxq %rbp,%r14 mulxq 0(%rcx),%rax,%r10 adcxq %rax,%rdi adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00 movq 48(%rsp),%rdi movq %r10,-32(%rbx) adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-24(%rbx) adcxq %rax,%r12 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r12,-16(%rbx) jmp .Lmulx4x_1st .align 32 .Lmulx4x_1st: adcxq %rbp,%r15 mulxq 0(%rsi),%r10,%rax adcxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 .byte 0x67,0x67 movq %r8,%rdx adcxq %rax,%r13 adcxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 movq %r11,-32(%rbx) adoxq %r15,%r13 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r13,-16(%rbx) decq %rdi jnz .Lmulx4x_1st movq 0(%rsp),%rax movq 8(%rsp),%rdi adcq %rbp,%r15 addq %r15,%r14 sbbq %r15,%r15 movq %r14,-8(%rbx) jmp .Lmulx4x_outer .align 32 .Lmulx4x_outer: movq (%rdi),%rdx leaq 8(%rdi),%rdi subq %rax,%rsi movq %r15,(%rbx) leaq 64+32(%rsp),%rbx subq %rax,%rcx mulxq 0(%rsi),%r8,%r11 xorl %ebp,%ebp movq %rdx,%r9 mulxq 8(%rsi),%r14,%r12 adoxq -32(%rbx),%r8 adcxq %r14,%r11 mulxq 16(%rsi),%r15,%r13 adoxq -24(%rbx),%r11 adcxq %r15,%r12 adoxq -16(%rbx),%r12 adcxq %rbp,%r13 adoxq %rbp,%r13 movq %rdi,8(%rsp) movq %r8,%r15 imulq 24(%rsp),%r8 xorl %ebp,%ebp mulxq 24(%rsi),%rax,%r14 movq %r8,%rdx adcxq %rax,%r13 adoxq -8(%rbx),%r13 adcxq %rbp,%r14 leaq 32(%rsi),%rsi adoxq %rbp,%r14 mulxq 0(%rcx),%rax,%r10 adcxq %rax,%r15 adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 mulxq 16(%rcx),%rax,%r12 movq %r10,-32(%rbx) adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-24(%rbx) leaq 32(%rcx),%rcx adcxq %rax,%r12 adoxq %rbp,%r15 movq 48(%rsp),%rdi movq %r12,-16(%rbx) jmp .Lmulx4x_inner .align 32 .Lmulx4x_inner: mulxq 0(%rsi),%r10,%rax adcxq %rbp,%r15 adoxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq 0(%rbx),%r10 adoxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq 8(%rbx),%r11 adoxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 movq %r8,%rdx adcxq 16(%rbx),%r12 adoxq %rax,%r13 adcxq 24(%rbx),%r13 adoxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adcxq %rbp,%r14 adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 adoxq %r15,%r13 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-32(%rbx) movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r13,-16(%rbx) decq %rdi jnz .Lmulx4x_inner movq 0(%rsp),%rax movq 8(%rsp),%rdi adcq %rbp,%r15 subq 0(%rbx),%rbp adcq %r15,%r14 sbbq %r15,%r15 movq %r14,-8(%rbx) cmpq 16(%rsp),%rdi jne .Lmulx4x_outer leaq 64(%rsp),%rbx subq %rax,%rcx negq %r15 movq %rax,%rdx shrq $3+2,%rax movq 32(%rsp),%rdi jmp .Lmulx4x_sub .align 32 .Lmulx4x_sub: movq 0(%rbx),%r11 movq 8(%rbx),%r12 movq 16(%rbx),%r13 movq 24(%rbx),%r14 leaq 32(%rbx),%rbx sbbq 0(%rcx),%r11 sbbq 8(%rcx),%r12 sbbq 16(%rcx),%r13 sbbq 24(%rcx),%r14 leaq 32(%rcx),%rcx movq %r11,0(%rdi) movq %r12,8(%rdi) movq %r13,16(%rdi) movq %r14,24(%rdi) leaq 32(%rdi),%rdi decq %rax jnz .Lmulx4x_sub sbbq $0,%r15 leaq 64(%rsp),%rbx subq %rdx,%rdi .byte 102,73,15,110,207 pxor %xmm0,%xmm0 pshufd $0,%xmm1,%xmm1 movq 40(%rsp),%rsi .cfi_def_cfa %rsi,8 jmp .Lmulx4x_cond_copy .align 32 .Lmulx4x_cond_copy: movdqa 0(%rbx),%xmm2 movdqa 16(%rbx),%xmm3 leaq 32(%rbx),%rbx movdqu 0(%rdi),%xmm4 movdqu 16(%rdi),%xmm5 leaq 32(%rdi),%rdi movdqa %xmm0,-32(%rbx) movdqa %xmm0,-16(%rbx) pcmpeqd %xmm1,%xmm0 pand %xmm1,%xmm2 pand %xmm1,%xmm3 pand %xmm0,%xmm4 pand %xmm0,%xmm5 pxor %xmm0,%xmm0 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqu %xmm4,-32(%rdi) movdqu %xmm5,-16(%rdi) subq $32,%rdx jnz .Lmulx4x_cond_copy movq %rdx,(%rbx) movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lmulx4x_epilogue: ret .cfi_endproc .size bn_mulx4x_mont,.-bn_mulx4x_mont .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 16 #endif
marvin-hansen/iggy-streaming-system
21,844
thirdparty/crates/ring-0.17.9/pregenerated/armv4-mont-linux32.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) #include <ring-core/arm_arch.h> @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. .arch armv7-a .text #if defined(__thumb2__) .syntax unified .thumb #else .code 32 #endif .globl bn_mul_mont_nohw .hidden bn_mul_mont_nohw .type bn_mul_mont_nohw,%function .align 5 bn_mul_mont_nohw: ldr ip,[sp,#4] @ load num stmdb sp!,{r0,r2} @ sp points at argument block cmp ip,#2 mov r0,ip @ load num #ifdef __thumb2__ ittt lt #endif movlt r0,#0 addlt sp,sp,#2*4 blt .Labrt stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ save 10 registers mov r0,r0,lsl#2 @ rescale r0 for byte count sub sp,sp,r0 @ alloca(4*num) sub sp,sp,#4 @ +extra dword sub r0,r0,#4 @ "num=num-1" add r4,r2,r0 @ &bp[num-1] add r0,sp,r0 @ r0 to point at &tp[num-1] ldr r8,[r0,#14*4] @ &n0 ldr r2,[r2] @ bp[0] ldr r5,[r1],#4 @ ap[0],ap++ ldr r6,[r3],#4 @ np[0],np++ ldr r8,[r8] @ *n0 str r4,[r0,#15*4] @ save &bp[num] umull r10,r11,r5,r2 @ ap[0]*bp[0] str r8,[r0,#14*4] @ save n0 value mul r8,r10,r8 @ "tp[0]"*n0 mov r12,#0 umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]" mov r4,sp .L1st: ldr r5,[r1],#4 @ ap[j],ap++ mov r10,r11 ldr r6,[r3],#4 @ np[j],np++ mov r11,#0 umlal r10,r11,r5,r2 @ ap[j]*bp[0] mov r14,#0 umlal r12,r14,r6,r8 @ np[j]*n0 adds r12,r12,r10 str r12,[r4],#4 @ tp[j-1]=,tp++ adc r12,r14,#0 cmp r4,r0 bne .L1st adds r12,r12,r11 ldr r4,[r0,#13*4] @ restore bp mov r14,#0 ldr r8,[r0,#14*4] @ restore n0 adc r14,r14,#0 str r12,[r0] @ tp[num-1]= mov r7,sp str r14,[r0,#4] @ tp[num]= .Louter: sub r7,r0,r7 @ "original" r0-1 value sub r1,r1,r7 @ "rewind" ap to &ap[1] ldr r2,[r4,#4]! @ *(++bp) sub r3,r3,r7 @ "rewind" np to &np[1] ldr r5,[r1,#-4] @ ap[0] ldr r10,[sp] @ tp[0] ldr r6,[r3,#-4] @ np[0] ldr r7,[sp,#4] @ tp[1] mov r11,#0 umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0] str r4,[r0,#13*4] @ save bp mul r8,r10,r8 mov r12,#0 umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]" mov r4,sp .Linner: ldr r5,[r1],#4 @ ap[j],ap++ adds r10,r11,r7 @ +=tp[j] ldr r6,[r3],#4 @ np[j],np++ mov r11,#0 umlal r10,r11,r5,r2 @ ap[j]*bp[i] mov r14,#0 umlal r12,r14,r6,r8 @ np[j]*n0 adc r11,r11,#0 ldr r7,[r4,#8] @ tp[j+1] adds r12,r12,r10 str r12,[r4],#4 @ tp[j-1]=,tp++ adc r12,r14,#0 cmp r4,r0 bne .Linner adds r12,r12,r11 mov r14,#0 ldr r4,[r0,#13*4] @ restore bp adc r14,r14,#0 ldr r8,[r0,#14*4] @ restore n0 adds r12,r12,r7 ldr r7,[r0,#15*4] @ restore &bp[num] adc r14,r14,#0 str r12,[r0] @ tp[num-1]= str r14,[r0,#4] @ tp[num]= cmp r4,r7 #ifdef __thumb2__ itt ne #endif movne r7,sp bne .Louter ldr r2,[r0,#12*4] @ pull rp mov r5,sp add r0,r0,#4 @ r0 to point at &tp[num] sub r5,r0,r5 @ "original" num value mov r4,sp @ "rewind" r4 mov r1,r4 @ "borrow" r1 sub r3,r3,r5 @ "rewind" r3 to &np[0] subs r7,r7,r7 @ "clear" carry flag .Lsub: ldr r7,[r4],#4 ldr r6,[r3],#4 sbcs r7,r7,r6 @ tp[j]-np[j] str r7,[r2],#4 @ rp[j]= teq r4,r0 @ preserve carry bne .Lsub sbcs r14,r14,#0 @ upmost carry mov r4,sp @ "rewind" r4 sub r2,r2,r5 @ "rewind" r2 .Lcopy: ldr r7,[r4] @ conditional copy ldr r5,[r2] str sp,[r4],#4 @ zap tp #ifdef __thumb2__ it cc #endif movcc r5,r7 str r5,[r2],#4 teq r4,r0 @ preserve carry bne .Lcopy mov sp,r0 add sp,sp,#4 @ skip over tp[num+1] ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ restore registers add sp,sp,#2*4 @ skip over {r0,r2} mov r0,#1 .Labrt: #if __ARM_ARCH>=5 bx lr @ bx lr #else tst lr,#1 moveq pc,lr @ be binary compatible with V4, yet .word 0xe12fff1e @ interoperable with Thumb ISA:-) #endif .size bn_mul_mont_nohw,.-bn_mul_mont_nohw #if __ARM_MAX_ARCH__>=7 .arch armv7-a .fpu neon .globl bn_mul8x_mont_neon .hidden bn_mul8x_mont_neon .type bn_mul8x_mont_neon,%function .align 5 bn_mul8x_mont_neon: mov ip,sp stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so ldmia ip,{r4,r5} @ load rest of parameter block mov ip,sp cmp r5,#8 bhi .LNEON_8n @ special case for r5==8, everything is in register bank... vld1.32 {d28[0]}, [r2,:32]! veor d8,d8,d8 sub r7,sp,r5,lsl#4 vld1.32 {d0,d1,d2,d3}, [r1]! @ can't specify :32 :-( and r7,r7,#-64 vld1.32 {d30[0]}, [r4,:32] mov sp,r7 @ alloca vzip.16 d28,d8 vmull.u32 q6,d28,d0[0] vmull.u32 q7,d28,d0[1] vmull.u32 q8,d28,d1[0] vshl.i64 d29,d13,#16 vmull.u32 q9,d28,d1[1] vadd.u64 d29,d29,d12 veor d8,d8,d8 vmul.u32 d29,d29,d30 vmull.u32 q10,d28,d2[0] vld1.32 {d4,d5,d6,d7}, [r3]! vmull.u32 q11,d28,d2[1] vmull.u32 q12,d28,d3[0] vzip.16 d29,d8 vmull.u32 q13,d28,d3[1] vmlal.u32 q6,d29,d4[0] sub r9,r5,#1 vmlal.u32 q7,d29,d4[1] vmlal.u32 q8,d29,d5[0] vmlal.u32 q9,d29,d5[1] vmlal.u32 q10,d29,d6[0] vmov q5,q6 vmlal.u32 q11,d29,d6[1] vmov q6,q7 vmlal.u32 q12,d29,d7[0] vmov q7,q8 vmlal.u32 q13,d29,d7[1] vmov q8,q9 vmov q9,q10 vshr.u64 d10,d10,#16 vmov q10,q11 vmov q11,q12 vadd.u64 d10,d10,d11 vmov q12,q13 veor q13,q13 vshr.u64 d10,d10,#16 b .LNEON_outer8 .align 4 .LNEON_outer8: vld1.32 {d28[0]}, [r2,:32]! veor d8,d8,d8 vzip.16 d28,d8 vadd.u64 d12,d12,d10 vmlal.u32 q6,d28,d0[0] vmlal.u32 q7,d28,d0[1] vmlal.u32 q8,d28,d1[0] vshl.i64 d29,d13,#16 vmlal.u32 q9,d28,d1[1] vadd.u64 d29,d29,d12 veor d8,d8,d8 subs r9,r9,#1 vmul.u32 d29,d29,d30 vmlal.u32 q10,d28,d2[0] vmlal.u32 q11,d28,d2[1] vmlal.u32 q12,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q13,d28,d3[1] vmlal.u32 q6,d29,d4[0] vmlal.u32 q7,d29,d4[1] vmlal.u32 q8,d29,d5[0] vmlal.u32 q9,d29,d5[1] vmlal.u32 q10,d29,d6[0] vmov q5,q6 vmlal.u32 q11,d29,d6[1] vmov q6,q7 vmlal.u32 q12,d29,d7[0] vmov q7,q8 vmlal.u32 q13,d29,d7[1] vmov q8,q9 vmov q9,q10 vshr.u64 d10,d10,#16 vmov q10,q11 vmov q11,q12 vadd.u64 d10,d10,d11 vmov q12,q13 veor q13,q13 vshr.u64 d10,d10,#16 bne .LNEON_outer8 vadd.u64 d12,d12,d10 mov r7,sp vshr.u64 d10,d12,#16 mov r8,r5 vadd.u64 d13,d13,d10 add r6,sp,#96 vshr.u64 d10,d13,#16 vzip.16 d12,d13 b .LNEON_tail_entry .align 4 .LNEON_8n: veor q6,q6,q6 sub r7,sp,#128 veor q7,q7,q7 sub r7,r7,r5,lsl#4 veor q8,q8,q8 and r7,r7,#-64 veor q9,q9,q9 mov sp,r7 @ alloca veor q10,q10,q10 add r7,r7,#256 veor q11,q11,q11 sub r8,r5,#8 veor q12,q12,q12 veor q13,q13,q13 .LNEON_8n_init: vst1.64 {q6,q7},[r7,:256]! subs r8,r8,#8 vst1.64 {q8,q9},[r7,:256]! vst1.64 {q10,q11},[r7,:256]! vst1.64 {q12,q13},[r7,:256]! bne .LNEON_8n_init add r6,sp,#256 vld1.32 {d0,d1,d2,d3},[r1]! add r10,sp,#8 vld1.32 {d30[0]},[r4,:32] mov r9,r5 b .LNEON_8n_outer .align 4 .LNEON_8n_outer: vld1.32 {d28[0]},[r2,:32]! @ *b++ veor d8,d8,d8 vzip.16 d28,d8 add r7,sp,#128 vld1.32 {d4,d5,d6,d7},[r3]! vmlal.u32 q6,d28,d0[0] vmlal.u32 q7,d28,d0[1] veor d8,d8,d8 vmlal.u32 q8,d28,d1[0] vshl.i64 d29,d13,#16 vmlal.u32 q9,d28,d1[1] vadd.u64 d29,d29,d12 vmlal.u32 q10,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q11,d28,d2[1] vst1.32 {d28},[sp,:64] @ put aside smashed b[8*i+0] vmlal.u32 q12,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q13,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q6,d29,d4[0] veor d10,d10,d10 vmlal.u32 q7,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q8,d29,d5[0] vshr.u64 d12,d12,#16 vmlal.u32 q9,d29,d5[1] vmlal.u32 q10,d29,d6[0] vadd.u64 d12,d12,d13 vmlal.u32 q11,d29,d6[1] vshr.u64 d12,d12,#16 vmlal.u32 q12,d29,d7[0] vmlal.u32 q13,d29,d7[1] vadd.u64 d14,d14,d12 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+0] vmlal.u32 q7,d28,d0[0] vld1.64 {q6},[r6,:128]! vmlal.u32 q8,d28,d0[1] veor d8,d8,d8 vmlal.u32 q9,d28,d1[0] vshl.i64 d29,d15,#16 vmlal.u32 q10,d28,d1[1] vadd.u64 d29,d29,d14 vmlal.u32 q11,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q12,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+1] vmlal.u32 q13,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q6,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q7,d29,d4[0] veor d10,d10,d10 vmlal.u32 q8,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q9,d29,d5[0] vshr.u64 d14,d14,#16 vmlal.u32 q10,d29,d5[1] vmlal.u32 q11,d29,d6[0] vadd.u64 d14,d14,d15 vmlal.u32 q12,d29,d6[1] vshr.u64 d14,d14,#16 vmlal.u32 q13,d29,d7[0] vmlal.u32 q6,d29,d7[1] vadd.u64 d16,d16,d14 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+1] vmlal.u32 q8,d28,d0[0] vld1.64 {q7},[r6,:128]! vmlal.u32 q9,d28,d0[1] veor d8,d8,d8 vmlal.u32 q10,d28,d1[0] vshl.i64 d29,d17,#16 vmlal.u32 q11,d28,d1[1] vadd.u64 d29,d29,d16 vmlal.u32 q12,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q13,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+2] vmlal.u32 q6,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q7,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q8,d29,d4[0] veor d10,d10,d10 vmlal.u32 q9,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q10,d29,d5[0] vshr.u64 d16,d16,#16 vmlal.u32 q11,d29,d5[1] vmlal.u32 q12,d29,d6[0] vadd.u64 d16,d16,d17 vmlal.u32 q13,d29,d6[1] vshr.u64 d16,d16,#16 vmlal.u32 q6,d29,d7[0] vmlal.u32 q7,d29,d7[1] vadd.u64 d18,d18,d16 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+2] vmlal.u32 q9,d28,d0[0] vld1.64 {q8},[r6,:128]! vmlal.u32 q10,d28,d0[1] veor d8,d8,d8 vmlal.u32 q11,d28,d1[0] vshl.i64 d29,d19,#16 vmlal.u32 q12,d28,d1[1] vadd.u64 d29,d29,d18 vmlal.u32 q13,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q6,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+3] vmlal.u32 q7,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q8,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q9,d29,d4[0] veor d10,d10,d10 vmlal.u32 q10,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q11,d29,d5[0] vshr.u64 d18,d18,#16 vmlal.u32 q12,d29,d5[1] vmlal.u32 q13,d29,d6[0] vadd.u64 d18,d18,d19 vmlal.u32 q6,d29,d6[1] vshr.u64 d18,d18,#16 vmlal.u32 q7,d29,d7[0] vmlal.u32 q8,d29,d7[1] vadd.u64 d20,d20,d18 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+3] vmlal.u32 q10,d28,d0[0] vld1.64 {q9},[r6,:128]! vmlal.u32 q11,d28,d0[1] veor d8,d8,d8 vmlal.u32 q12,d28,d1[0] vshl.i64 d29,d21,#16 vmlal.u32 q13,d28,d1[1] vadd.u64 d29,d29,d20 vmlal.u32 q6,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q7,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+4] vmlal.u32 q8,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q9,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q10,d29,d4[0] veor d10,d10,d10 vmlal.u32 q11,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q12,d29,d5[0] vshr.u64 d20,d20,#16 vmlal.u32 q13,d29,d5[1] vmlal.u32 q6,d29,d6[0] vadd.u64 d20,d20,d21 vmlal.u32 q7,d29,d6[1] vshr.u64 d20,d20,#16 vmlal.u32 q8,d29,d7[0] vmlal.u32 q9,d29,d7[1] vadd.u64 d22,d22,d20 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+4] vmlal.u32 q11,d28,d0[0] vld1.64 {q10},[r6,:128]! vmlal.u32 q12,d28,d0[1] veor d8,d8,d8 vmlal.u32 q13,d28,d1[0] vshl.i64 d29,d23,#16 vmlal.u32 q6,d28,d1[1] vadd.u64 d29,d29,d22 vmlal.u32 q7,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q8,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+5] vmlal.u32 q9,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q10,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q11,d29,d4[0] veor d10,d10,d10 vmlal.u32 q12,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q13,d29,d5[0] vshr.u64 d22,d22,#16 vmlal.u32 q6,d29,d5[1] vmlal.u32 q7,d29,d6[0] vadd.u64 d22,d22,d23 vmlal.u32 q8,d29,d6[1] vshr.u64 d22,d22,#16 vmlal.u32 q9,d29,d7[0] vmlal.u32 q10,d29,d7[1] vadd.u64 d24,d24,d22 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+5] vmlal.u32 q12,d28,d0[0] vld1.64 {q11},[r6,:128]! vmlal.u32 q13,d28,d0[1] veor d8,d8,d8 vmlal.u32 q6,d28,d1[0] vshl.i64 d29,d25,#16 vmlal.u32 q7,d28,d1[1] vadd.u64 d29,d29,d24 vmlal.u32 q8,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q9,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+6] vmlal.u32 q10,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q11,d28,d3[1] vld1.32 {d28[0]},[r2,:32]! @ *b++ vmlal.u32 q12,d29,d4[0] veor d10,d10,d10 vmlal.u32 q13,d29,d4[1] vzip.16 d28,d10 vmlal.u32 q6,d29,d5[0] vshr.u64 d24,d24,#16 vmlal.u32 q7,d29,d5[1] vmlal.u32 q8,d29,d6[0] vadd.u64 d24,d24,d25 vmlal.u32 q9,d29,d6[1] vshr.u64 d24,d24,#16 vmlal.u32 q10,d29,d7[0] vmlal.u32 q11,d29,d7[1] vadd.u64 d26,d26,d24 vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+6] vmlal.u32 q13,d28,d0[0] vld1.64 {q12},[r6,:128]! vmlal.u32 q6,d28,d0[1] veor d8,d8,d8 vmlal.u32 q7,d28,d1[0] vshl.i64 d29,d27,#16 vmlal.u32 q8,d28,d1[1] vadd.u64 d29,d29,d26 vmlal.u32 q9,d28,d2[0] vmul.u32 d29,d29,d30 vmlal.u32 q10,d28,d2[1] vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+7] vmlal.u32 q11,d28,d3[0] vzip.16 d29,d8 vmlal.u32 q12,d28,d3[1] vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0] vmlal.u32 q13,d29,d4[0] vld1.32 {d0,d1,d2,d3},[r1]! vmlal.u32 q6,d29,d4[1] vmlal.u32 q7,d29,d5[0] vshr.u64 d26,d26,#16 vmlal.u32 q8,d29,d5[1] vmlal.u32 q9,d29,d6[0] vadd.u64 d26,d26,d27 vmlal.u32 q10,d29,d6[1] vshr.u64 d26,d26,#16 vmlal.u32 q11,d29,d7[0] vmlal.u32 q12,d29,d7[1] vadd.u64 d12,d12,d26 vst1.32 {d29},[r10,:64] @ put aside smashed m[8*i+7] add r10,sp,#8 @ rewind sub r8,r5,#8 b .LNEON_8n_inner .align 4 .LNEON_8n_inner: subs r8,r8,#8 vmlal.u32 q6,d28,d0[0] vld1.64 {q13},[r6,:128] vmlal.u32 q7,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+0] vmlal.u32 q8,d28,d1[0] vld1.32 {d4,d5,d6,d7},[r3]! vmlal.u32 q9,d28,d1[1] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q10,d28,d2[0] vmlal.u32 q11,d28,d2[1] vmlal.u32 q12,d28,d3[0] vmlal.u32 q13,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+1] vmlal.u32 q6,d29,d4[0] vmlal.u32 q7,d29,d4[1] vmlal.u32 q8,d29,d5[0] vmlal.u32 q9,d29,d5[1] vmlal.u32 q10,d29,d6[0] vmlal.u32 q11,d29,d6[1] vmlal.u32 q12,d29,d7[0] vmlal.u32 q13,d29,d7[1] vst1.64 {q6},[r7,:128]! vmlal.u32 q7,d28,d0[0] vld1.64 {q6},[r6,:128] vmlal.u32 q8,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+1] vmlal.u32 q9,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q10,d28,d1[1] vmlal.u32 q11,d28,d2[0] vmlal.u32 q12,d28,d2[1] vmlal.u32 q13,d28,d3[0] vmlal.u32 q6,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+2] vmlal.u32 q7,d29,d4[0] vmlal.u32 q8,d29,d4[1] vmlal.u32 q9,d29,d5[0] vmlal.u32 q10,d29,d5[1] vmlal.u32 q11,d29,d6[0] vmlal.u32 q12,d29,d6[1] vmlal.u32 q13,d29,d7[0] vmlal.u32 q6,d29,d7[1] vst1.64 {q7},[r7,:128]! vmlal.u32 q8,d28,d0[0] vld1.64 {q7},[r6,:128] vmlal.u32 q9,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+2] vmlal.u32 q10,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q11,d28,d1[1] vmlal.u32 q12,d28,d2[0] vmlal.u32 q13,d28,d2[1] vmlal.u32 q6,d28,d3[0] vmlal.u32 q7,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+3] vmlal.u32 q8,d29,d4[0] vmlal.u32 q9,d29,d4[1] vmlal.u32 q10,d29,d5[0] vmlal.u32 q11,d29,d5[1] vmlal.u32 q12,d29,d6[0] vmlal.u32 q13,d29,d6[1] vmlal.u32 q6,d29,d7[0] vmlal.u32 q7,d29,d7[1] vst1.64 {q8},[r7,:128]! vmlal.u32 q9,d28,d0[0] vld1.64 {q8},[r6,:128] vmlal.u32 q10,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+3] vmlal.u32 q11,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q12,d28,d1[1] vmlal.u32 q13,d28,d2[0] vmlal.u32 q6,d28,d2[1] vmlal.u32 q7,d28,d3[0] vmlal.u32 q8,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+4] vmlal.u32 q9,d29,d4[0] vmlal.u32 q10,d29,d4[1] vmlal.u32 q11,d29,d5[0] vmlal.u32 q12,d29,d5[1] vmlal.u32 q13,d29,d6[0] vmlal.u32 q6,d29,d6[1] vmlal.u32 q7,d29,d7[0] vmlal.u32 q8,d29,d7[1] vst1.64 {q9},[r7,:128]! vmlal.u32 q10,d28,d0[0] vld1.64 {q9},[r6,:128] vmlal.u32 q11,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+4] vmlal.u32 q12,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q13,d28,d1[1] vmlal.u32 q6,d28,d2[0] vmlal.u32 q7,d28,d2[1] vmlal.u32 q8,d28,d3[0] vmlal.u32 q9,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+5] vmlal.u32 q10,d29,d4[0] vmlal.u32 q11,d29,d4[1] vmlal.u32 q12,d29,d5[0] vmlal.u32 q13,d29,d5[1] vmlal.u32 q6,d29,d6[0] vmlal.u32 q7,d29,d6[1] vmlal.u32 q8,d29,d7[0] vmlal.u32 q9,d29,d7[1] vst1.64 {q10},[r7,:128]! vmlal.u32 q11,d28,d0[0] vld1.64 {q10},[r6,:128] vmlal.u32 q12,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+5] vmlal.u32 q13,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q6,d28,d1[1] vmlal.u32 q7,d28,d2[0] vmlal.u32 q8,d28,d2[1] vmlal.u32 q9,d28,d3[0] vmlal.u32 q10,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+6] vmlal.u32 q11,d29,d4[0] vmlal.u32 q12,d29,d4[1] vmlal.u32 q13,d29,d5[0] vmlal.u32 q6,d29,d5[1] vmlal.u32 q7,d29,d6[0] vmlal.u32 q8,d29,d6[1] vmlal.u32 q9,d29,d7[0] vmlal.u32 q10,d29,d7[1] vst1.64 {q11},[r7,:128]! vmlal.u32 q12,d28,d0[0] vld1.64 {q11},[r6,:128] vmlal.u32 q13,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+6] vmlal.u32 q6,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q7,d28,d1[1] vmlal.u32 q8,d28,d2[0] vmlal.u32 q9,d28,d2[1] vmlal.u32 q10,d28,d3[0] vmlal.u32 q11,d28,d3[1] vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+7] vmlal.u32 q12,d29,d4[0] vmlal.u32 q13,d29,d4[1] vmlal.u32 q6,d29,d5[0] vmlal.u32 q7,d29,d5[1] vmlal.u32 q8,d29,d6[0] vmlal.u32 q9,d29,d6[1] vmlal.u32 q10,d29,d7[0] vmlal.u32 q11,d29,d7[1] vst1.64 {q12},[r7,:128]! vmlal.u32 q13,d28,d0[0] vld1.64 {q12},[r6,:128] vmlal.u32 q6,d28,d0[1] vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+7] vmlal.u32 q7,d28,d1[0] it ne addne r6,r6,#16 @ don't advance in last iteration vmlal.u32 q8,d28,d1[1] vmlal.u32 q9,d28,d2[0] vmlal.u32 q10,d28,d2[1] vmlal.u32 q11,d28,d3[0] vmlal.u32 q12,d28,d3[1] it eq subeq r1,r1,r5,lsl#2 @ rewind vmlal.u32 q13,d29,d4[0] vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0] vmlal.u32 q6,d29,d4[1] vld1.32 {d0,d1,d2,d3},[r1]! vmlal.u32 q7,d29,d5[0] add r10,sp,#8 @ rewind vmlal.u32 q8,d29,d5[1] vmlal.u32 q9,d29,d6[0] vmlal.u32 q10,d29,d6[1] vmlal.u32 q11,d29,d7[0] vst1.64 {q13},[r7,:128]! vmlal.u32 q12,d29,d7[1] bne .LNEON_8n_inner add r6,sp,#128 vst1.64 {q6,q7},[r7,:256]! veor q2,q2,q2 @ d4-d5 vst1.64 {q8,q9},[r7,:256]! veor q3,q3,q3 @ d6-d7 vst1.64 {q10,q11},[r7,:256]! vst1.64 {q12},[r7,:128] subs r9,r9,#8 vld1.64 {q6,q7},[r6,:256]! vld1.64 {q8,q9},[r6,:256]! vld1.64 {q10,q11},[r6,:256]! vld1.64 {q12,q13},[r6,:256]! itt ne subne r3,r3,r5,lsl#2 @ rewind bne .LNEON_8n_outer add r7,sp,#128 vst1.64 {q2,q3}, [sp,:256]! @ start wiping stack frame vshr.u64 d10,d12,#16 vst1.64 {q2,q3},[sp,:256]! vadd.u64 d13,d13,d10 vst1.64 {q2,q3}, [sp,:256]! vshr.u64 d10,d13,#16 vst1.64 {q2,q3}, [sp,:256]! vzip.16 d12,d13 mov r8,r5 b .LNEON_tail_entry .align 4 .LNEON_tail: vadd.u64 d12,d12,d10 vshr.u64 d10,d12,#16 vld1.64 {q8,q9}, [r6, :256]! vadd.u64 d13,d13,d10 vld1.64 {q10,q11}, [r6, :256]! vshr.u64 d10,d13,#16 vld1.64 {q12,q13}, [r6, :256]! vzip.16 d12,d13 .LNEON_tail_entry: vadd.u64 d14,d14,d10 vst1.32 {d12[0]}, [r7, :32]! vshr.u64 d10,d14,#16 vadd.u64 d15,d15,d10 vshr.u64 d10,d15,#16 vzip.16 d14,d15 vadd.u64 d16,d16,d10 vst1.32 {d14[0]}, [r7, :32]! vshr.u64 d10,d16,#16 vadd.u64 d17,d17,d10 vshr.u64 d10,d17,#16 vzip.16 d16,d17 vadd.u64 d18,d18,d10 vst1.32 {d16[0]}, [r7, :32]! vshr.u64 d10,d18,#16 vadd.u64 d19,d19,d10 vshr.u64 d10,d19,#16 vzip.16 d18,d19 vadd.u64 d20,d20,d10 vst1.32 {d18[0]}, [r7, :32]! vshr.u64 d10,d20,#16 vadd.u64 d21,d21,d10 vshr.u64 d10,d21,#16 vzip.16 d20,d21 vadd.u64 d22,d22,d10 vst1.32 {d20[0]}, [r7, :32]! vshr.u64 d10,d22,#16 vadd.u64 d23,d23,d10 vshr.u64 d10,d23,#16 vzip.16 d22,d23 vadd.u64 d24,d24,d10 vst1.32 {d22[0]}, [r7, :32]! vshr.u64 d10,d24,#16 vadd.u64 d25,d25,d10 vshr.u64 d10,d25,#16 vzip.16 d24,d25 vadd.u64 d26,d26,d10 vst1.32 {d24[0]}, [r7, :32]! vshr.u64 d10,d26,#16 vadd.u64 d27,d27,d10 vshr.u64 d10,d27,#16 vzip.16 d26,d27 vld1.64 {q6,q7}, [r6, :256]! subs r8,r8,#8 vst1.32 {d26[0]}, [r7, :32]! bne .LNEON_tail vst1.32 {d10[0]}, [r7, :32] @ top-most bit sub r3,r3,r5,lsl#2 @ rewind r3 subs r1,sp,#0 @ clear carry flag add r2,sp,r5,lsl#2 .LNEON_sub: ldmia r1!, {r4,r5,r6,r7} ldmia r3!, {r8,r9,r10,r11} sbcs r8, r4,r8 sbcs r9, r5,r9 sbcs r10,r6,r10 sbcs r11,r7,r11 teq r1,r2 @ preserves carry stmia r0!, {r8,r9,r10,r11} bne .LNEON_sub ldr r10, [r1] @ load top-most bit mov r11,sp veor q0,q0,q0 sub r11,r2,r11 @ this is num*4 veor q1,q1,q1 mov r1,sp sub r0,r0,r11 @ rewind r0 mov r3,r2 @ second 3/4th of frame sbcs r10,r10,#0 @ result is carry flag .LNEON_copy_n_zap: ldmia r1!, {r4,r5,r6,r7} ldmia r0, {r8,r9,r10,r11} it cc movcc r8, r4 vst1.64 {q0,q1}, [r3,:256]! @ wipe itt cc movcc r9, r5 movcc r10,r6 vst1.64 {q0,q1}, [r3,:256]! @ wipe it cc movcc r11,r7 ldmia r1, {r4,r5,r6,r7} stmia r0!, {r8,r9,r10,r11} sub r1,r1,#16 ldmia r0, {r8,r9,r10,r11} it cc movcc r8, r4 vst1.64 {q0,q1}, [r1,:256]! @ wipe itt cc movcc r9, r5 movcc r10,r6 vst1.64 {q0,q1}, [r3,:256]! @ wipe it cc movcc r11,r7 teq r1,r2 @ preserves carry stmia r0!, {r8,r9,r10,r11} bne .LNEON_copy_n_zap mov sp,ip vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11} bx lr @ bx lr .size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon #endif .byte 77,111,110,116,103,111,109,101,114,121,32,109,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
19,873
thirdparty/crates/ring-0.17.9/pregenerated/aesni-x86_64-macosx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .p2align 4 _aesni_encrypt2: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax addq $16,%rax L$enc_loop2: .byte 102,15,56,220,209 .byte 102,15,56,220,217 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 movups -16(%rcx,%rax,1),%xmm0 jnz L$enc_loop2 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,221,208 .byte 102,15,56,221,216 ret .p2align 4 _aesni_encrypt3: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 xorps %xmm0,%xmm4 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax addq $16,%rax L$enc_loop3: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 movups -16(%rcx,%rax,1),%xmm0 jnz L$enc_loop3 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 ret .p2align 4 _aesni_encrypt4: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 xorps %xmm0,%xmm4 xorps %xmm0,%xmm5 movups 32(%rcx),%xmm0 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 0x0f,0x1f,0x00 addq $16,%rax L$enc_loop4: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movups -16(%rcx,%rax,1),%xmm0 jnz L$enc_loop4 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 ret .p2align 4 _aesni_encrypt6: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 pxor %xmm0,%xmm3 pxor %xmm0,%xmm4 .byte 102,15,56,220,209 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,220,217 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 .byte 102,15,56,220,225 pxor %xmm0,%xmm7 movups (%rcx,%rax,1),%xmm0 addq $16,%rax jmp L$enc_loop6_enter .p2align 4 L$enc_loop6: .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 L$enc_loop6_enter: .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 movups -16(%rcx,%rax,1),%xmm0 jnz L$enc_loop6 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 .byte 102,15,56,221,240 .byte 102,15,56,221,248 ret .p2align 4 _aesni_encrypt8: movups (%rcx),%xmm0 shll $4,%eax movups 16(%rcx),%xmm1 xorps %xmm0,%xmm2 xorps %xmm0,%xmm3 pxor %xmm0,%xmm4 pxor %xmm0,%xmm5 pxor %xmm0,%xmm6 leaq 32(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,220,209 pxor %xmm0,%xmm7 pxor %xmm0,%xmm8 .byte 102,15,56,220,217 pxor %xmm0,%xmm9 movups (%rcx,%rax,1),%xmm0 addq $16,%rax jmp L$enc_loop8_inner .p2align 4 L$enc_loop8: .byte 102,15,56,220,209 .byte 102,15,56,220,217 L$enc_loop8_inner: .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 L$enc_loop8_enter: movups (%rcx,%rax,1),%xmm1 addq $32,%rax .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups -16(%rcx,%rax,1),%xmm0 jnz L$enc_loop8 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 .byte 102,15,56,221,208 .byte 102,15,56,221,216 .byte 102,15,56,221,224 .byte 102,15,56,221,232 .byte 102,15,56,221,240 .byte 102,15,56,221,248 .byte 102,68,15,56,221,192 .byte 102,68,15,56,221,200 ret .globl _aes_hw_ctr32_encrypt_blocks .private_extern _aes_hw_ctr32_encrypt_blocks .p2align 4 _aes_hw_ctr32_encrypt_blocks: _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,BORINGSSL_function_hit(%rip) #endif cmpq $1,%rdx jne L$ctr32_bulk movups (%r8),%xmm2 movups (%rdi),%xmm3 movl 240(%rcx),%edx movups (%rcx),%xmm0 movups 16(%rcx),%xmm1 leaq 32(%rcx),%rcx xorps %xmm0,%xmm2 L$oop_enc1_1: .byte 102,15,56,220,209 decl %edx movups (%rcx),%xmm1 leaq 16(%rcx),%rcx jnz L$oop_enc1_1 .byte 102,15,56,221,209 pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 xorps %xmm3,%xmm2 pxor %xmm3,%xmm3 movups %xmm2,(%rsi) xorps %xmm2,%xmm2 jmp L$ctr32_epilogue .p2align 4 L$ctr32_bulk: leaq (%rsp),%r11 pushq %rbp subq $128,%rsp andq $-16,%rsp movdqu (%r8),%xmm2 movdqu (%rcx),%xmm0 movl 12(%r8),%r8d pxor %xmm0,%xmm2 movl 12(%rcx),%ebp movdqa %xmm2,0(%rsp) bswapl %r8d movdqa %xmm2,%xmm3 movdqa %xmm2,%xmm4 movdqa %xmm2,%xmm5 movdqa %xmm2,64(%rsp) movdqa %xmm2,80(%rsp) movdqa %xmm2,96(%rsp) movq %rdx,%r10 movdqa %xmm2,112(%rsp) leaq 1(%r8),%rax leaq 2(%r8),%rdx bswapl %eax bswapl %edx xorl %ebp,%eax xorl %ebp,%edx .byte 102,15,58,34,216,3 leaq 3(%r8),%rax movdqa %xmm3,16(%rsp) .byte 102,15,58,34,226,3 bswapl %eax movq %r10,%rdx leaq 4(%r8),%r10 movdqa %xmm4,32(%rsp) xorl %ebp,%eax bswapl %r10d .byte 102,15,58,34,232,3 xorl %ebp,%r10d movdqa %xmm5,48(%rsp) leaq 5(%r8),%r9 movl %r10d,64+12(%rsp) bswapl %r9d leaq 6(%r8),%r10 movl 240(%rcx),%eax xorl %ebp,%r9d bswapl %r10d movl %r9d,80+12(%rsp) xorl %ebp,%r10d leaq 7(%r8),%r9 movl %r10d,96+12(%rsp) bswapl %r9d xorl %ebp,%r9d movl %r9d,112+12(%rsp) movups 16(%rcx),%xmm1 movdqa 64(%rsp),%xmm6 movdqa 80(%rsp),%xmm7 cmpq $8,%rdx jb L$ctr32_tail leaq 128(%rcx),%rcx subq $8,%rdx jmp L$ctr32_loop8 .p2align 5 L$ctr32_loop8: addl $8,%r8d movdqa 96(%rsp),%xmm8 .byte 102,15,56,220,209 movl %r8d,%r9d movdqa 112(%rsp),%xmm9 .byte 102,15,56,220,217 bswapl %r9d movups 32-128(%rcx),%xmm0 .byte 102,15,56,220,225 xorl %ebp,%r9d nop .byte 102,15,56,220,233 movl %r9d,0+12(%rsp) leaq 1(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 48-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movl %r9d,16+12(%rsp) leaq 2(%r8),%r9 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 64-128(%rcx),%xmm0 bswapl %r9d .byte 102,15,56,220,209 .byte 102,15,56,220,217 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movl %r9d,32+12(%rsp) leaq 3(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 80-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movl %r9d,48+12(%rsp) leaq 4(%r8),%r9 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 96-128(%rcx),%xmm0 bswapl %r9d .byte 102,15,56,220,209 .byte 102,15,56,220,217 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movl %r9d,64+12(%rsp) leaq 5(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 112-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,224 .byte 102,15,56,220,232 movl %r9d,80+12(%rsp) leaq 6(%r8),%r9 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 128-128(%rcx),%xmm0 bswapl %r9d .byte 102,15,56,220,209 .byte 102,15,56,220,217 xorl %ebp,%r9d .byte 0x66,0x90 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movl %r9d,96+12(%rsp) leaq 7(%r8),%r9 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 144-128(%rcx),%xmm1 bswapl %r9d .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 xorl %ebp,%r9d movdqu 0(%rdi),%xmm10 .byte 102,15,56,220,232 movl %r9d,112+12(%rsp) cmpl $11,%eax .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 160-128(%rcx),%xmm0 jb L$ctr32_enc_done .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 176-128(%rcx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 192-128(%rcx),%xmm0 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movups 208-128(%rcx),%xmm1 .byte 102,15,56,220,208 .byte 102,15,56,220,216 .byte 102,15,56,220,224 .byte 102,15,56,220,232 .byte 102,15,56,220,240 .byte 102,15,56,220,248 .byte 102,68,15,56,220,192 .byte 102,68,15,56,220,200 movups 224-128(%rcx),%xmm0 jmp L$ctr32_enc_done .p2align 4 L$ctr32_enc_done: movdqu 16(%rdi),%xmm11 pxor %xmm0,%xmm10 movdqu 32(%rdi),%xmm12 pxor %xmm0,%xmm11 movdqu 48(%rdi),%xmm13 pxor %xmm0,%xmm12 movdqu 64(%rdi),%xmm14 pxor %xmm0,%xmm13 movdqu 80(%rdi),%xmm15 pxor %xmm0,%xmm14 prefetcht0 448(%rdi) prefetcht0 512(%rdi) pxor %xmm0,%xmm15 .byte 102,15,56,220,209 .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 .byte 102,15,56,220,241 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 .byte 102,68,15,56,220,201 movdqu 96(%rdi),%xmm1 leaq 128(%rdi),%rdi .byte 102,65,15,56,221,210 pxor %xmm0,%xmm1 movdqu 112-128(%rdi),%xmm10 .byte 102,65,15,56,221,219 pxor %xmm0,%xmm10 movdqa 0(%rsp),%xmm11 .byte 102,65,15,56,221,228 .byte 102,65,15,56,221,237 movdqa 16(%rsp),%xmm12 movdqa 32(%rsp),%xmm13 .byte 102,65,15,56,221,246 .byte 102,65,15,56,221,255 movdqa 48(%rsp),%xmm14 movdqa 64(%rsp),%xmm15 .byte 102,68,15,56,221,193 movdqa 80(%rsp),%xmm0 movups 16-128(%rcx),%xmm1 .byte 102,69,15,56,221,202 movups %xmm2,(%rsi) movdqa %xmm11,%xmm2 movups %xmm3,16(%rsi) movdqa %xmm12,%xmm3 movups %xmm4,32(%rsi) movdqa %xmm13,%xmm4 movups %xmm5,48(%rsi) movdqa %xmm14,%xmm5 movups %xmm6,64(%rsi) movdqa %xmm15,%xmm6 movups %xmm7,80(%rsi) movdqa %xmm0,%xmm7 movups %xmm8,96(%rsi) movups %xmm9,112(%rsi) leaq 128(%rsi),%rsi subq $8,%rdx jnc L$ctr32_loop8 addq $8,%rdx jz L$ctr32_done leaq -128(%rcx),%rcx L$ctr32_tail: leaq 16(%rcx),%rcx cmpq $4,%rdx jb L$ctr32_loop3 je L$ctr32_loop4 shll $4,%eax movdqa 96(%rsp),%xmm8 pxor %xmm9,%xmm9 movups 16(%rcx),%xmm0 .byte 102,15,56,220,209 .byte 102,15,56,220,217 leaq 32-16(%rcx,%rax,1),%rcx negq %rax .byte 102,15,56,220,225 addq $16,%rax movups (%rdi),%xmm10 .byte 102,15,56,220,233 .byte 102,15,56,220,241 movups 16(%rdi),%xmm11 movups 32(%rdi),%xmm12 .byte 102,15,56,220,249 .byte 102,68,15,56,220,193 call L$enc_loop8_enter movdqu 48(%rdi),%xmm13 pxor %xmm10,%xmm2 movdqu 64(%rdi),%xmm10 pxor %xmm11,%xmm3 movdqu %xmm2,(%rsi) pxor %xmm12,%xmm4 movdqu %xmm3,16(%rsi) pxor %xmm13,%xmm5 movdqu %xmm4,32(%rsi) pxor %xmm10,%xmm6 movdqu %xmm5,48(%rsi) movdqu %xmm6,64(%rsi) cmpq $6,%rdx jb L$ctr32_done movups 80(%rdi),%xmm11 xorps %xmm11,%xmm7 movups %xmm7,80(%rsi) je L$ctr32_done movups 96(%rdi),%xmm12 xorps %xmm12,%xmm8 movups %xmm8,96(%rsi) jmp L$ctr32_done .p2align 5 L$ctr32_loop4: .byte 102,15,56,220,209 leaq 16(%rcx),%rcx decl %eax .byte 102,15,56,220,217 .byte 102,15,56,220,225 .byte 102,15,56,220,233 movups (%rcx),%xmm1 jnz L$ctr32_loop4 .byte 102,15,56,221,209 .byte 102,15,56,221,217 movups (%rdi),%xmm10 movups 16(%rdi),%xmm11 .byte 102,15,56,221,225 .byte 102,15,56,221,233 movups 32(%rdi),%xmm12 movups 48(%rdi),%xmm13 xorps %xmm10,%xmm2 movups %xmm2,(%rsi) xorps %xmm11,%xmm3 movups %xmm3,16(%rsi) pxor %xmm12,%xmm4 movdqu %xmm4,32(%rsi) pxor %xmm13,%xmm5 movdqu %xmm5,48(%rsi) jmp L$ctr32_done .p2align 5 L$ctr32_loop3: .byte 102,15,56,220,209 leaq 16(%rcx),%rcx decl %eax .byte 102,15,56,220,217 .byte 102,15,56,220,225 movups (%rcx),%xmm1 jnz L$ctr32_loop3 .byte 102,15,56,221,209 .byte 102,15,56,221,217 .byte 102,15,56,221,225 movups (%rdi),%xmm10 xorps %xmm10,%xmm2 movups %xmm2,(%rsi) cmpq $2,%rdx jb L$ctr32_done movups 16(%rdi),%xmm11 xorps %xmm11,%xmm3 movups %xmm3,16(%rsi) je L$ctr32_done movups 32(%rdi),%xmm12 xorps %xmm12,%xmm4 movups %xmm4,32(%rsi) L$ctr32_done: xorps %xmm0,%xmm0 xorl %ebp,%ebp pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 movaps %xmm0,0(%rsp) pxor %xmm8,%xmm8 movaps %xmm0,16(%rsp) pxor %xmm9,%xmm9 movaps %xmm0,32(%rsp) pxor %xmm10,%xmm10 movaps %xmm0,48(%rsp) pxor %xmm11,%xmm11 movaps %xmm0,64(%rsp) pxor %xmm12,%xmm12 movaps %xmm0,80(%rsp) pxor %xmm13,%xmm13 movaps %xmm0,96(%rsp) pxor %xmm14,%xmm14 movaps %xmm0,112(%rsp) pxor %xmm15,%xmm15 movq -8(%r11),%rbp leaq (%r11),%rsp L$ctr32_epilogue: ret .globl _aes_hw_set_encrypt_key_base .private_extern _aes_hw_set_encrypt_key_base .p2align 4 _aes_hw_set_encrypt_key_base: _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,BORINGSSL_function_hit+3(%rip) #endif subq $8,%rsp movups (%rdi),%xmm0 xorps %xmm4,%xmm4 leaq 16(%rdx),%rax cmpl $256,%esi je L$14rounds cmpl $128,%esi jne L$bad_keybits L$10rounds: movl $9,%esi movups %xmm0,(%rdx) .byte 102,15,58,223,200,1 call L$key_expansion_128_cold .byte 102,15,58,223,200,2 call L$key_expansion_128 .byte 102,15,58,223,200,4 call L$key_expansion_128 .byte 102,15,58,223,200,8 call L$key_expansion_128 .byte 102,15,58,223,200,16 call L$key_expansion_128 .byte 102,15,58,223,200,32 call L$key_expansion_128 .byte 102,15,58,223,200,64 call L$key_expansion_128 .byte 102,15,58,223,200,128 call L$key_expansion_128 .byte 102,15,58,223,200,27 call L$key_expansion_128 .byte 102,15,58,223,200,54 call L$key_expansion_128 movups %xmm0,(%rax) movl %esi,80(%rax) xorl %eax,%eax jmp L$enc_key_ret .p2align 4 L$14rounds: movups 16(%rdi),%xmm2 movl $13,%esi leaq 16(%rax),%rax movups %xmm0,(%rdx) movups %xmm2,16(%rdx) .byte 102,15,58,223,202,1 call L$key_expansion_256a_cold .byte 102,15,58,223,200,1 call L$key_expansion_256b .byte 102,15,58,223,202,2 call L$key_expansion_256a .byte 102,15,58,223,200,2 call L$key_expansion_256b .byte 102,15,58,223,202,4 call L$key_expansion_256a .byte 102,15,58,223,200,4 call L$key_expansion_256b .byte 102,15,58,223,202,8 call L$key_expansion_256a .byte 102,15,58,223,200,8 call L$key_expansion_256b .byte 102,15,58,223,202,16 call L$key_expansion_256a .byte 102,15,58,223,200,16 call L$key_expansion_256b .byte 102,15,58,223,202,32 call L$key_expansion_256a .byte 102,15,58,223,200,32 call L$key_expansion_256b .byte 102,15,58,223,202,64 call L$key_expansion_256a movups %xmm0,(%rax) movl %esi,16(%rax) xorq %rax,%rax jmp L$enc_key_ret .p2align 4 L$bad_keybits: movq $-2,%rax L$enc_key_ret: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 addq $8,%rsp ret .p2align 4 L$key_expansion_128: movups %xmm0,(%rax) leaq 16(%rax),%rax L$key_expansion_128_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .p2align 4 L$key_expansion_256a: movups %xmm2,(%rax) leaq 16(%rax),%rax L$key_expansion_256a_cold: shufps $16,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $140,%xmm0,%xmm4 xorps %xmm4,%xmm0 shufps $255,%xmm1,%xmm1 xorps %xmm1,%xmm0 ret .p2align 4 L$key_expansion_256b: movups %xmm0,(%rax) leaq 16(%rax),%rax shufps $16,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $140,%xmm2,%xmm4 xorps %xmm4,%xmm2 shufps $170,%xmm1,%xmm1 xorps %xmm1,%xmm2 ret .globl _aes_hw_set_encrypt_key_alt .private_extern _aes_hw_set_encrypt_key_alt .p2align 4 _aes_hw_set_encrypt_key_alt: _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST movb $1,BORINGSSL_function_hit+3(%rip) #endif subq $8,%rsp movups (%rdi),%xmm0 xorps %xmm4,%xmm4 leaq 16(%rdx),%rax cmpl $256,%esi je L$14rounds_alt cmpl $128,%esi jne L$bad_keybits_alt movl $9,%esi movdqa L$key_rotate(%rip),%xmm5 movl $8,%r10d movdqa L$key_rcon1(%rip),%xmm4 movdqa %xmm0,%xmm2 movdqu %xmm0,(%rdx) jmp L$oop_key128 .p2align 4 L$oop_key128: .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 leaq 16(%rax),%rax movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,-16(%rax) movdqa %xmm0,%xmm2 decl %r10d jnz L$oop_key128 movdqa L$key_rcon1b(%rip),%xmm4 .byte 102,15,56,0,197 .byte 102,15,56,221,196 pslld $1,%xmm4 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,(%rax) movdqa %xmm0,%xmm2 .byte 102,15,56,0,197 .byte 102,15,56,221,196 movdqa %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm2,%xmm3 pslldq $4,%xmm2 pxor %xmm3,%xmm2 pxor %xmm2,%xmm0 movdqu %xmm0,16(%rax) movl %esi,96(%rax) xorl %eax,%eax jmp L$enc_key_ret_alt .p2align 4 L$14rounds_alt: movups 16(%rdi),%xmm2 movl $13,%esi leaq 16(%rax),%rax movdqa L$key_rotate(%rip),%xmm5 movdqa L$key_rcon1(%rip),%xmm4 movl $7,%r10d movdqu %xmm0,0(%rdx) movdqa %xmm2,%xmm1 movdqu %xmm2,16(%rdx) jmp L$oop_key256 .p2align 4 L$oop_key256: .byte 102,15,56,0,213 .byte 102,15,56,221,212 movdqa %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm0,%xmm3 pslldq $4,%xmm0 pxor %xmm3,%xmm0 pslld $1,%xmm4 pxor %xmm2,%xmm0 movdqu %xmm0,(%rax) decl %r10d jz L$done_key256 pshufd $0xff,%xmm0,%xmm2 pxor %xmm3,%xmm3 .byte 102,15,56,221,211 movdqa %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm1,%xmm3 pslldq $4,%xmm1 pxor %xmm3,%xmm1 pxor %xmm1,%xmm2 movdqu %xmm2,16(%rax) leaq 32(%rax),%rax movdqa %xmm2,%xmm1 jmp L$oop_key256 L$done_key256: movl %esi,16(%rax) xorl %eax,%eax jmp L$enc_key_ret_alt .p2align 4 L$bad_keybits_alt: movq $-2,%rax L$enc_key_ret_alt: pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 addq $8,%rsp ret .section __DATA,__const .p2align 6 L$bswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 L$increment32: .long 6,6,6,0 L$increment64: .long 1,0,0,0 L$increment1: .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 L$key_rotate: .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d L$key_rotate192: .long 0x04070605,0x04070605,0x04070605,0x04070605 L$key_rcon1: .long 1,1,1,1 L$key_rcon1b: .long 0x1b,0x1b,0x1b,0x1b .byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .p2align 6 .text #endif
marvin-hansen/iggy-streaming-system
51,084
thirdparty/crates/ring-0.17.9/pregenerated/x86_64-mont5-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .globl bn_mul4x_mont_gather5 .hidden bn_mul4x_mont_gather5 .type bn_mul4x_mont_gather5,@function .align 32 bn_mul4x_mont_gather5: .cfi_startproc _CET_ENDBR .byte 0x67 movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 .Lmul4x_prologue: .byte 0x67 shll $3,%r9d leaq (%r9,%r9,2),%r10 negq %r9 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb .Lmul4xsp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp .Lmul4xsp_done .align 32 .Lmul4xsp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp .Lmul4xsp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lmul4x_page_walk jmp .Lmul4x_page_walk_done .Lmul4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lmul4x_page_walk .Lmul4x_page_walk_done: negq %r9 movq %rax,40(%rsp) .cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 .Lmul4x_body: call mul4x_internal movq 40(%rsp),%rsi .cfi_def_cfa %rsi,8 movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lmul4x_epilogue: ret .cfi_endproc .size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5 .type mul4x_internal,@function .align 32 mul4x_internal: .cfi_startproc shlq $5,%r9 movd 8(%rax),%xmm5 leaq .Linc(%rip),%rax leaq 128(%rdx,%r9,1),%r13 shrq $5,%r9 movdqa 0(%rax),%xmm0 movdqa 16(%rax),%xmm1 leaq 88-112(%rsp,%r9,1),%r10 leaq 128(%rdx),%r12 pshufd $0,%xmm5,%xmm5 movdqa %xmm1,%xmm4 .byte 0x67,0x67 movdqa %xmm1,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 .byte 0x67 movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,112(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,128(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,144(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,160(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,176(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,192(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,208(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,224(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,240(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,256(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,272(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,288(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,304(%r10) paddd %xmm2,%xmm3 .byte 0x67 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,320(%r10) pcmpeqd %xmm5,%xmm3 movdqa %xmm2,336(%r10) pand 64(%r12),%xmm0 pand 80(%r12),%xmm1 pand 96(%r12),%xmm2 movdqa %xmm3,352(%r10) pand 112(%r12),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -128(%r12),%xmm4 movdqa -112(%r12),%xmm5 movdqa -96(%r12),%xmm2 pand 112(%r10),%xmm4 movdqa -80(%r12),%xmm3 pand 128(%r10),%xmm5 por %xmm4,%xmm0 pand 144(%r10),%xmm2 por %xmm5,%xmm1 pand 160(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -64(%r12),%xmm4 movdqa -48(%r12),%xmm5 movdqa -32(%r12),%xmm2 pand 176(%r10),%xmm4 movdqa -16(%r12),%xmm3 pand 192(%r10),%xmm5 por %xmm4,%xmm0 pand 208(%r10),%xmm2 por %xmm5,%xmm1 pand 224(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa 0(%r12),%xmm4 movdqa 16(%r12),%xmm5 movdqa 32(%r12),%xmm2 pand 240(%r10),%xmm4 movdqa 48(%r12),%xmm3 pand 256(%r10),%xmm5 por %xmm4,%xmm0 pand 272(%r10),%xmm2 por %xmm5,%xmm1 pand 288(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 por %xmm1,%xmm0 pshufd $0x4e,%xmm0,%xmm1 por %xmm1,%xmm0 leaq 256(%r12),%r12 .byte 102,72,15,126,195 movq %r13,16+8(%rsp) movq %rdi,56+8(%rsp) movq (%r8),%r8 movq (%rsi),%rax leaq (%rsi,%r9,1),%rsi negq %r9 movq %r8,%rbp mulq %rbx movq %rax,%r10 movq (%rcx),%rax imulq %r10,%rbp leaq 64+8(%rsp),%r14 movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi,%r9,1),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%r9),%r15 leaq 32(%rcx),%rcx adcq $0,%rdx movq %rdi,(%r14) movq %rdx,%r13 jmp .L1st4x .align 32 .L1st4x: mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%r14) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq 0(%rcx),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%rcx),%rcx adcq $0,%rdx movq %rdi,(%r14) movq %rdx,%r13 addq $32,%r15 jnz .L1st4x mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%r14) movq %rdx,%r13 leaq (%rcx,%r9,1),%rcx xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi movq %r13,-8(%r14) jmp .Louter4x .align 32 .Louter4x: leaq 16+128(%r14),%rdx pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 movdqa -128(%r12),%xmm0 movdqa -112(%r12),%xmm1 movdqa -96(%r12),%xmm2 movdqa -80(%r12),%xmm3 pand -128(%rdx),%xmm0 pand -112(%rdx),%xmm1 por %xmm0,%xmm4 pand -96(%rdx),%xmm2 por %xmm1,%xmm5 pand -80(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa -64(%r12),%xmm0 movdqa -48(%r12),%xmm1 movdqa -32(%r12),%xmm2 movdqa -16(%r12),%xmm3 pand -64(%rdx),%xmm0 pand -48(%rdx),%xmm1 por %xmm0,%xmm4 pand -32(%rdx),%xmm2 por %xmm1,%xmm5 pand -16(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 0(%r12),%xmm0 movdqa 16(%r12),%xmm1 movdqa 32(%r12),%xmm2 movdqa 48(%r12),%xmm3 pand 0(%rdx),%xmm0 pand 16(%rdx),%xmm1 por %xmm0,%xmm4 pand 32(%rdx),%xmm2 por %xmm1,%xmm5 pand 48(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 64(%r12),%xmm0 movdqa 80(%r12),%xmm1 movdqa 96(%r12),%xmm2 movdqa 112(%r12),%xmm3 pand 64(%rdx),%xmm0 pand 80(%rdx),%xmm1 por %xmm0,%xmm4 pand 96(%rdx),%xmm2 por %xmm1,%xmm5 pand 112(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 por %xmm5,%xmm4 pshufd $0x4e,%xmm4,%xmm0 por %xmm4,%xmm0 leaq 256(%r12),%r12 .byte 102,72,15,126,195 movq (%r14,%r9,1),%r10 movq %r8,%rbp mulq %rbx addq %rax,%r10 movq (%rcx),%rax adcq $0,%rdx imulq %r10,%rbp movq %rdx,%r11 movq %rdi,(%r14) leaq (%r14,%r9,1),%r14 mulq %rbp addq %rax,%r10 movq 8(%rsi,%r9,1),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx addq 8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%r9),%r15 leaq 32(%rcx),%rcx adcq $0,%rdx movq %rdx,%r13 jmp .Linner4x .align 32 .Linner4x: mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax adcq $0,%rdx addq 16(%r14),%r10 leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %rdi,-32(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx),%rax adcq $0,%rdx addq -8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq 0(%rcx),%rax adcq $0,%rdx addq (%r14),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %rdi,-16(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx addq 8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%rcx),%rcx adcq $0,%rdx movq %r13,-8(%r14) movq %rdx,%r13 addq $32,%r15 jnz .Linner4x mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax adcq $0,%rdx addq 16(%r14),%r10 leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %rdi,-32(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq %rbp,%rax movq -8(%rcx),%rbp adcq $0,%rdx addq -8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%r13 movq %rdi,-16(%r14) leaq (%rcx,%r9,1),%rcx xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi addq (%r14),%r13 adcq $0,%rdi movq %r13,-8(%r14) cmpq 16+8(%rsp),%r12 jb .Louter4x xorq %rax,%rax subq %r13,%rbp adcq %r15,%r15 orq %r15,%rdi subq %rdi,%rax leaq (%r14,%r9,1),%rbx movq (%rcx),%r12 leaq (%rcx),%rbp movq %r9,%rcx sarq $3+2,%rcx movq 56+8(%rsp),%rdi decq %r12 xorq %r10,%r10 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqr4x_sub_entry .cfi_endproc .size mul4x_internal,.-mul4x_internal .globl bn_power5_nohw .hidden bn_power5_nohw .type bn_power5_nohw,@function .align 32 bn_power5_nohw: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 .Lpower5_prologue: shll $3,%r9d leal (%r9,%r9,2),%r10d negq %r9 movq (%r8),%r8 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb .Lpwr_sp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp .Lpwr_sp_done .align 32 .Lpwr_sp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp .Lpwr_sp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lpwr_page_walk jmp .Lpwr_page_walk_done .Lpwr_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lpwr_page_walk .Lpwr_page_walk_done: movq %r9,%r10 negq %r9 movq %r8,32(%rsp) movq %rax,40(%rsp) .cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 .Lpower5_body: .byte 102,72,15,110,207 .byte 102,72,15,110,209 .byte 102,73,15,110,218 .byte 102,72,15,110,226 call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal .byte 102,72,15,126,209 .byte 102,72,15,126,226 movq %rsi,%rdi movq 40(%rsp),%rax leaq 32(%rsp),%r8 call mul4x_internal movq 40(%rsp),%rsi .cfi_def_cfa %rsi,8 movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lpower5_epilogue: ret .cfi_endproc .size bn_power5_nohw,.-bn_power5_nohw .globl bn_sqr8x_internal .hidden bn_sqr8x_internal .hidden bn_sqr8x_internal .type bn_sqr8x_internal,@function .align 32 bn_sqr8x_internal: __bn_sqr8x_internal: .cfi_startproc _CET_ENDBR leaq 32(%r10),%rbp leaq (%rsi,%r9,1),%rsi movq %r9,%rcx movq -32(%rsi,%rbp,1),%r14 leaq 48+8(%rsp,%r9,2),%rdi movq -24(%rsi,%rbp,1),%rax leaq -32(%rdi,%rbp,1),%rdi movq -16(%rsi,%rbp,1),%rbx movq %rax,%r15 mulq %r14 movq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 movq %r10,-24(%rdi,%rbp,1) mulq %r14 addq %rax,%r11 movq %rbx,%rax adcq $0,%rdx movq %r11,-16(%rdi,%rbp,1) movq %rdx,%r10 movq -8(%rsi,%rbp,1),%rbx mulq %r15 movq %rax,%r12 movq %rbx,%rax movq %rdx,%r13 leaq (%rbp),%rcx mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 movq %r10,-8(%rdi,%rcx,1) jmp .Lsqr4x_1st .align 32 .Lsqr4x_1st: movq (%rsi,%rcx,1),%rbx mulq %r15 addq %rax,%r13 movq %rbx,%rax movq %rdx,%r12 adcq $0,%r12 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq 8(%rsi,%rcx,1),%rbx movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %rbx,%rax movq %r11,(%rdi,%rcx,1) movq %rdx,%r13 adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax movq 16(%rsi,%rcx,1),%rbx movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 mulq %r15 addq %rax,%r13 movq %rbx,%rax movq %r10,8(%rdi,%rcx,1) movq %rdx,%r12 adcq $0,%r12 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq 24(%rsi,%rcx,1),%rbx movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %rbx,%rax movq %r11,16(%rdi,%rcx,1) movq %rdx,%r13 adcq $0,%r13 leaq 32(%rcx),%rcx mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 movq %r10,-8(%rdi,%rcx,1) cmpq $0,%rcx jne .Lsqr4x_1st mulq %r15 addq %rax,%r13 leaq 16(%rbp),%rbp adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,(%rdi) movq %rdx,%r12 movq %rdx,8(%rdi) jmp .Lsqr4x_outer .align 32 .Lsqr4x_outer: movq -32(%rsi,%rbp,1),%r14 leaq 48+8(%rsp,%r9,2),%rdi movq -24(%rsi,%rbp,1),%rax leaq -32(%rdi,%rbp,1),%rdi movq -16(%rsi,%rbp,1),%rbx movq %rax,%r15 mulq %r14 movq -24(%rdi,%rbp,1),%r10 addq %rax,%r10 movq %rbx,%rax adcq $0,%rdx movq %r10,-24(%rdi,%rbp,1) movq %rdx,%r11 mulq %r14 addq %rax,%r11 movq %rbx,%rax adcq $0,%rdx addq -16(%rdi,%rbp,1),%r11 movq %rdx,%r10 adcq $0,%r10 movq %r11,-16(%rdi,%rbp,1) xorq %r12,%r12 movq -8(%rsi,%rbp,1),%rbx mulq %r15 addq %rax,%r12 movq %rbx,%rax adcq $0,%rdx addq -8(%rdi,%rbp,1),%r12 movq %rdx,%r13 adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax adcq $0,%rdx addq %r12,%r10 movq %rdx,%r11 adcq $0,%r11 movq %r10,-8(%rdi,%rbp,1) leaq (%rbp),%rcx jmp .Lsqr4x_inner .align 32 .Lsqr4x_inner: movq (%rsi,%rcx,1),%rbx mulq %r15 addq %rax,%r13 movq %rbx,%rax movq %rdx,%r12 adcq $0,%r12 addq (%rdi,%rcx,1),%r13 adcq $0,%r12 .byte 0x67 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq 8(%rsi,%rcx,1),%rbx movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %r11,(%rdi,%rcx,1) movq %rbx,%rax movq %rdx,%r13 adcq $0,%r13 addq 8(%rdi,%rcx,1),%r12 leaq 16(%rcx),%rcx adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax adcq $0,%rdx addq %r12,%r10 movq %rdx,%r11 adcq $0,%r11 movq %r10,-8(%rdi,%rcx,1) cmpq $0,%rcx jne .Lsqr4x_inner .byte 0x67 mulq %r15 addq %rax,%r13 adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,(%rdi) movq %rdx,%r12 movq %rdx,8(%rdi) addq $16,%rbp jnz .Lsqr4x_outer movq -32(%rsi),%r14 leaq 48+8(%rsp,%r9,2),%rdi movq -24(%rsi),%rax leaq -32(%rdi,%rbp,1),%rdi movq -16(%rsi),%rbx movq %rax,%r15 mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq %r10,-24(%rdi) movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 movq -8(%rsi),%rbx adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %rbx,%rax movq %r11,-16(%rdi) movq %rdx,%r13 adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 movq %r10,-8(%rdi) mulq %r15 addq %rax,%r13 movq -16(%rsi),%rax adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,(%rdi) movq %rdx,%r12 movq %rdx,8(%rdi) mulq %rbx addq $16,%rbp xorq %r14,%r14 subq %r9,%rbp xorq %r15,%r15 addq %r12,%rax adcq $0,%rdx movq %rax,8(%rdi) movq %rdx,16(%rdi) movq %r15,24(%rdi) movq -16(%rsi,%rbp,1),%rax leaq 48+8(%rsp),%rdi xorq %r10,%r10 movq 8(%rdi),%r11 leaq (%r14,%r10,2),%r12 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq 16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 24(%rdi),%r11 adcq %rax,%r12 movq -8(%rsi,%rbp,1),%rax movq %r12,(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,8(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 movq 32(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 40(%rdi),%r11 adcq %rax,%rbx movq 0(%rsi,%rbp,1),%rax movq %rbx,16(%rdi) adcq %rdx,%r8 leaq 16(%rbp),%rbp movq %r8,24(%rdi) sbbq %r15,%r15 leaq 64(%rdi),%rdi jmp .Lsqr4x_shift_n_add .align 32 .Lsqr4x_shift_n_add: leaq (%r14,%r10,2),%r12 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq -16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq -8(%rdi),%r11 adcq %rax,%r12 movq -8(%rsi,%rbp,1),%rax movq %r12,-32(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,-24(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 movq 0(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 8(%rdi),%r11 adcq %rax,%rbx movq 0(%rsi,%rbp,1),%rax movq %rbx,-16(%rdi) adcq %rdx,%r8 leaq (%r14,%r10,2),%r12 movq %r8,-8(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq 16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 24(%rdi),%r11 adcq %rax,%r12 movq 8(%rsi,%rbp,1),%rax movq %r12,0(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,8(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 movq 32(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 40(%rdi),%r11 adcq %rax,%rbx movq 16(%rsi,%rbp,1),%rax movq %rbx,16(%rdi) adcq %rdx,%r8 movq %r8,24(%rdi) sbbq %r15,%r15 leaq 64(%rdi),%rdi addq $32,%rbp jnz .Lsqr4x_shift_n_add leaq (%r14,%r10,2),%r12 .byte 0x67 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq -16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq -8(%rdi),%r11 adcq %rax,%r12 movq -8(%rsi),%rax movq %r12,-32(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,-24(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 mulq %rax negq %r15 adcq %rax,%rbx adcq %rdx,%r8 movq %rbx,-16(%rdi) movq %r8,-8(%rdi) .byte 102,72,15,126,213 __bn_sqr8x_reduction: xorq %rax,%rax leaq (%r9,%rbp,1),%rcx leaq 48+8(%rsp,%r9,2),%rdx movq %rcx,0+8(%rsp) leaq 48+8(%rsp,%r9,1),%rdi movq %rdx,8+8(%rsp) negq %r9 jmp .L8x_reduction_loop .align 32 .L8x_reduction_loop: leaq (%rdi,%r9,1),%rdi .byte 0x66 movq 0(%rdi),%rbx movq 8(%rdi),%r9 movq 16(%rdi),%r10 movq 24(%rdi),%r11 movq 32(%rdi),%r12 movq 40(%rdi),%r13 movq 48(%rdi),%r14 movq 56(%rdi),%r15 movq %rax,(%rdx) leaq 64(%rdi),%rdi .byte 0x67 movq %rbx,%r8 imulq 32+8(%rsp),%rbx movq 0(%rbp),%rax movl $8,%ecx jmp .L8x_reduce .align 32 .L8x_reduce: mulq %rbx movq 8(%rbp),%rax negq %r8 movq %rdx,%r8 adcq $0,%r8 mulq %rbx addq %rax,%r9 movq 16(%rbp),%rax adcq $0,%rdx addq %r9,%r8 movq %rbx,48-8+8(%rsp,%rcx,8) movq %rdx,%r9 adcq $0,%r9 mulq %rbx addq %rax,%r10 movq 24(%rbp),%rax adcq $0,%rdx addq %r10,%r9 movq 32+8(%rsp),%rsi movq %rdx,%r10 adcq $0,%r10 mulq %rbx addq %rax,%r11 movq 32(%rbp),%rax adcq $0,%rdx imulq %r8,%rsi addq %r11,%r10 movq %rdx,%r11 adcq $0,%r11 mulq %rbx addq %rax,%r12 movq 40(%rbp),%rax adcq $0,%rdx addq %r12,%r11 movq %rdx,%r12 adcq $0,%r12 mulq %rbx addq %rax,%r13 movq 48(%rbp),%rax adcq $0,%rdx addq %r13,%r12 movq %rdx,%r13 adcq $0,%r13 mulq %rbx addq %rax,%r14 movq 56(%rbp),%rax adcq $0,%rdx addq %r14,%r13 movq %rdx,%r14 adcq $0,%r14 mulq %rbx movq %rsi,%rbx addq %rax,%r15 movq 0(%rbp),%rax adcq $0,%rdx addq %r15,%r14 movq %rdx,%r15 adcq $0,%r15 decl %ecx jnz .L8x_reduce leaq 64(%rbp),%rbp xorq %rax,%rax movq 8+8(%rsp),%rdx cmpq 0+8(%rsp),%rbp jae .L8x_no_tail .byte 0x66 addq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 sbbq %rsi,%rsi movq 48+56+8(%rsp),%rbx movl $8,%ecx movq 0(%rbp),%rax jmp .L8x_tail .align 32 .L8x_tail: mulq %rbx addq %rax,%r8 movq 8(%rbp),%rax movq %r8,(%rdi) movq %rdx,%r8 adcq $0,%r8 mulq %rbx addq %rax,%r9 movq 16(%rbp),%rax adcq $0,%rdx addq %r9,%r8 leaq 8(%rdi),%rdi movq %rdx,%r9 adcq $0,%r9 mulq %rbx addq %rax,%r10 movq 24(%rbp),%rax adcq $0,%rdx addq %r10,%r9 movq %rdx,%r10 adcq $0,%r10 mulq %rbx addq %rax,%r11 movq 32(%rbp),%rax adcq $0,%rdx addq %r11,%r10 movq %rdx,%r11 adcq $0,%r11 mulq %rbx addq %rax,%r12 movq 40(%rbp),%rax adcq $0,%rdx addq %r12,%r11 movq %rdx,%r12 adcq $0,%r12 mulq %rbx addq %rax,%r13 movq 48(%rbp),%rax adcq $0,%rdx addq %r13,%r12 movq %rdx,%r13 adcq $0,%r13 mulq %rbx addq %rax,%r14 movq 56(%rbp),%rax adcq $0,%rdx addq %r14,%r13 movq %rdx,%r14 adcq $0,%r14 mulq %rbx movq 48-16+8(%rsp,%rcx,8),%rbx addq %rax,%r15 adcq $0,%rdx addq %r15,%r14 movq 0(%rbp),%rax movq %rdx,%r15 adcq $0,%r15 decl %ecx jnz .L8x_tail leaq 64(%rbp),%rbp movq 8+8(%rsp),%rdx cmpq 0+8(%rsp),%rbp jae .L8x_tail_done movq 48+56+8(%rsp),%rbx negq %rsi movq 0(%rbp),%rax adcq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 sbbq %rsi,%rsi movl $8,%ecx jmp .L8x_tail .align 32 .L8x_tail_done: xorq %rax,%rax addq (%rdx),%r8 adcq $0,%r9 adcq $0,%r10 adcq $0,%r11 adcq $0,%r12 adcq $0,%r13 adcq $0,%r14 adcq $0,%r15 adcq $0,%rax negq %rsi .L8x_no_tail: adcq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 adcq $0,%rax movq -8(%rbp),%rcx xorq %rsi,%rsi .byte 102,72,15,126,213 movq %r8,0(%rdi) movq %r9,8(%rdi) .byte 102,73,15,126,217 movq %r10,16(%rdi) movq %r11,24(%rdi) movq %r12,32(%rdi) movq %r13,40(%rdi) movq %r14,48(%rdi) movq %r15,56(%rdi) leaq 64(%rdi),%rdi cmpq %rdx,%rdi jb .L8x_reduction_loop ret .cfi_endproc .size bn_sqr8x_internal,.-bn_sqr8x_internal .type __bn_post4x_internal,@function .align 32 __bn_post4x_internal: .cfi_startproc movq 0(%rbp),%r12 leaq (%rdi,%r9,1),%rbx movq %r9,%rcx .byte 102,72,15,126,207 negq %rax .byte 102,72,15,126,206 sarq $3+2,%rcx decq %r12 xorq %r10,%r10 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqr4x_sub_entry .align 16 .Lsqr4x_sub: movq 0(%rbp),%r12 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 .Lsqr4x_sub_entry: leaq 32(%rbp),%rbp notq %r12 notq %r13 notq %r14 notq %r15 andq %rax,%r12 andq %rax,%r13 andq %rax,%r14 andq %rax,%r15 negq %r10 adcq 0(%rbx),%r12 adcq 8(%rbx),%r13 adcq 16(%rbx),%r14 adcq 24(%rbx),%r15 movq %r12,0(%rdi) leaq 32(%rbx),%rbx movq %r13,8(%rdi) sbbq %r10,%r10 movq %r14,16(%rdi) movq %r15,24(%rdi) leaq 32(%rdi),%rdi incq %rcx jnz .Lsqr4x_sub movq %r9,%r10 negq %r9 ret .cfi_endproc .size __bn_post4x_internal,.-__bn_post4x_internal .globl bn_mulx4x_mont_gather5 .hidden bn_mulx4x_mont_gather5 .type bn_mulx4x_mont_gather5,@function .align 32 bn_mulx4x_mont_gather5: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 .Lmulx4x_prologue: shll $3,%r9d leaq (%r9,%r9,2),%r10 negq %r9 movq (%r8),%r8 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb .Lmulx4xsp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp .Lmulx4xsp_done .Lmulx4xsp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp .Lmulx4xsp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lmulx4x_page_walk jmp .Lmulx4x_page_walk_done .Lmulx4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lmulx4x_page_walk .Lmulx4x_page_walk_done: movq %r8,32(%rsp) movq %rax,40(%rsp) .cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 .Lmulx4x_body: call mulx4x_internal movq 40(%rsp),%rsi .cfi_def_cfa %rsi,8 movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lmulx4x_epilogue: ret .cfi_endproc .size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5 .type mulx4x_internal,@function .align 32 mulx4x_internal: .cfi_startproc movq %r9,8(%rsp) movq %r9,%r10 negq %r9 shlq $5,%r9 negq %r10 leaq 128(%rdx,%r9,1),%r13 shrq $5+5,%r9 movd 8(%rax),%xmm5 subq $1,%r9 leaq .Linc(%rip),%rax movq %r13,16+8(%rsp) movq %r9,24+8(%rsp) movq %rdi,56+8(%rsp) movdqa 0(%rax),%xmm0 movdqa 16(%rax),%xmm1 leaq 88-112(%rsp,%r10,1),%r10 leaq 128(%rdx),%rdi pshufd $0,%xmm5,%xmm5 movdqa %xmm1,%xmm4 .byte 0x67 movdqa %xmm1,%xmm2 .byte 0x67 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,112(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,128(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,144(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,160(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,176(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,192(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,208(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,224(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,240(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,256(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,272(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,288(%r10) movdqa %xmm4,%xmm3 .byte 0x67 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,304(%r10) paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,320(%r10) pcmpeqd %xmm5,%xmm3 movdqa %xmm2,336(%r10) pand 64(%rdi),%xmm0 pand 80(%rdi),%xmm1 pand 96(%rdi),%xmm2 movdqa %xmm3,352(%r10) pand 112(%rdi),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -128(%rdi),%xmm4 movdqa -112(%rdi),%xmm5 movdqa -96(%rdi),%xmm2 pand 112(%r10),%xmm4 movdqa -80(%rdi),%xmm3 pand 128(%r10),%xmm5 por %xmm4,%xmm0 pand 144(%r10),%xmm2 por %xmm5,%xmm1 pand 160(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -64(%rdi),%xmm4 movdqa -48(%rdi),%xmm5 movdqa -32(%rdi),%xmm2 pand 176(%r10),%xmm4 movdqa -16(%rdi),%xmm3 pand 192(%r10),%xmm5 por %xmm4,%xmm0 pand 208(%r10),%xmm2 por %xmm5,%xmm1 pand 224(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa 0(%rdi),%xmm4 movdqa 16(%rdi),%xmm5 movdqa 32(%rdi),%xmm2 pand 240(%r10),%xmm4 movdqa 48(%rdi),%xmm3 pand 256(%r10),%xmm5 por %xmm4,%xmm0 pand 272(%r10),%xmm2 por %xmm5,%xmm1 pand 288(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 pxor %xmm1,%xmm0 pshufd $0x4e,%xmm0,%xmm1 por %xmm1,%xmm0 leaq 256(%rdi),%rdi .byte 102,72,15,126,194 leaq 64+32+8(%rsp),%rbx movq %rdx,%r9 mulxq 0(%rsi),%r8,%rax mulxq 8(%rsi),%r11,%r12 addq %rax,%r11 mulxq 16(%rsi),%rax,%r13 adcq %rax,%r12 adcq $0,%r13 mulxq 24(%rsi),%rax,%r14 movq %r8,%r15 imulq 32+8(%rsp),%r8 xorq %rbp,%rbp movq %r8,%rdx movq %rdi,8+8(%rsp) leaq 32(%rsi),%rsi adcxq %rax,%r13 adcxq %rbp,%r14 mulxq 0(%rcx),%rax,%r10 adcxq %rax,%r15 adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 mulxq 16(%rcx),%rax,%r12 movq 24+8(%rsp),%rdi movq %r10,-32(%rbx) adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-24(%rbx) adcxq %rax,%r12 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r12,-16(%rbx) jmp .Lmulx4x_1st .align 32 .Lmulx4x_1st: adcxq %rbp,%r15 mulxq 0(%rsi),%r10,%rax adcxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 .byte 0x67,0x67 movq %r8,%rdx adcxq %rax,%r13 adcxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 movq %r11,-32(%rbx) adoxq %r15,%r13 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r13,-16(%rbx) decq %rdi jnz .Lmulx4x_1st movq 8(%rsp),%rax adcq %rbp,%r15 leaq (%rsi,%rax,1),%rsi addq %r15,%r14 movq 8+8(%rsp),%rdi adcq %rbp,%rbp movq %r14,-8(%rbx) jmp .Lmulx4x_outer .align 32 .Lmulx4x_outer: leaq 16-256(%rbx),%r10 pxor %xmm4,%xmm4 .byte 0x67,0x67 pxor %xmm5,%xmm5 movdqa -128(%rdi),%xmm0 movdqa -112(%rdi),%xmm1 movdqa -96(%rdi),%xmm2 pand 256(%r10),%xmm0 movdqa -80(%rdi),%xmm3 pand 272(%r10),%xmm1 por %xmm0,%xmm4 pand 288(%r10),%xmm2 por %xmm1,%xmm5 pand 304(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa -64(%rdi),%xmm0 movdqa -48(%rdi),%xmm1 movdqa -32(%rdi),%xmm2 pand 320(%r10),%xmm0 movdqa -16(%rdi),%xmm3 pand 336(%r10),%xmm1 por %xmm0,%xmm4 pand 352(%r10),%xmm2 por %xmm1,%xmm5 pand 368(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 0(%rdi),%xmm0 movdqa 16(%rdi),%xmm1 movdqa 32(%rdi),%xmm2 pand 384(%r10),%xmm0 movdqa 48(%rdi),%xmm3 pand 400(%r10),%xmm1 por %xmm0,%xmm4 pand 416(%r10),%xmm2 por %xmm1,%xmm5 pand 432(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 64(%rdi),%xmm0 movdqa 80(%rdi),%xmm1 movdqa 96(%rdi),%xmm2 pand 448(%r10),%xmm0 movdqa 112(%rdi),%xmm3 pand 464(%r10),%xmm1 por %xmm0,%xmm4 pand 480(%r10),%xmm2 por %xmm1,%xmm5 pand 496(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 por %xmm5,%xmm4 pshufd $0x4e,%xmm4,%xmm0 por %xmm4,%xmm0 leaq 256(%rdi),%rdi .byte 102,72,15,126,194 movq %rbp,(%rbx) leaq 32(%rbx,%rax,1),%rbx mulxq 0(%rsi),%r8,%r11 xorq %rbp,%rbp movq %rdx,%r9 mulxq 8(%rsi),%r14,%r12 adoxq -32(%rbx),%r8 adcxq %r14,%r11 mulxq 16(%rsi),%r15,%r13 adoxq -24(%rbx),%r11 adcxq %r15,%r12 mulxq 24(%rsi),%rdx,%r14 adoxq -16(%rbx),%r12 adcxq %rdx,%r13 leaq (%rcx,%rax,1),%rcx leaq 32(%rsi),%rsi adoxq -8(%rbx),%r13 adcxq %rbp,%r14 adoxq %rbp,%r14 movq %r8,%r15 imulq 32+8(%rsp),%r8 movq %r8,%rdx xorq %rbp,%rbp movq %rdi,8+8(%rsp) mulxq 0(%rcx),%rax,%r10 adcxq %rax,%r15 adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 mulxq 16(%rcx),%rax,%r12 adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq 24+8(%rsp),%rdi movq %r10,-32(%rbx) adcxq %rax,%r12 movq %r11,-24(%rbx) adoxq %rbp,%r15 movq %r12,-16(%rbx) leaq 32(%rcx),%rcx jmp .Lmulx4x_inner .align 32 .Lmulx4x_inner: mulxq 0(%rsi),%r10,%rax adcxq %rbp,%r15 adoxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq 0(%rbx),%r10 adoxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq 8(%rbx),%r11 adoxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 movq %r8,%rdx adcxq 16(%rbx),%r12 adoxq %rax,%r13 adcxq 24(%rbx),%r13 adoxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adcxq %rbp,%r14 adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 adoxq %r15,%r13 movq %r11,-32(%rbx) mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx leaq 32(%rcx),%rcx movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 movq %r13,-16(%rbx) decq %rdi jnz .Lmulx4x_inner movq 0+8(%rsp),%rax adcq %rbp,%r15 subq 0(%rbx),%rdi movq 8+8(%rsp),%rdi movq 16+8(%rsp),%r10 adcq %r15,%r14 leaq (%rsi,%rax,1),%rsi adcq %rbp,%rbp movq %r14,-8(%rbx) cmpq %r10,%rdi jb .Lmulx4x_outer movq -8(%rcx),%r10 movq %rbp,%r8 movq (%rcx,%rax,1),%r12 leaq (%rcx,%rax,1),%rbp movq %rax,%rcx leaq (%rbx,%rax,1),%rdi xorl %eax,%eax xorq %r15,%r15 subq %r14,%r10 adcq %r15,%r15 orq %r15,%r8 sarq $3+2,%rcx subq %r8,%rax movq 56+8(%rsp),%rdx decq %r12 movq 8(%rbp),%r13 xorq %r8,%r8 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqrx4x_sub_entry .cfi_endproc .size mulx4x_internal,.-mulx4x_internal .globl bn_powerx5 .hidden bn_powerx5 .type bn_powerx5,@function .align 32 bn_powerx5: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 .Lpowerx5_prologue: shll $3,%r9d leaq (%r9,%r9,2),%r10 negq %r9 movq (%r8),%r8 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb .Lpwrx_sp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp .Lpwrx_sp_done .align 32 .Lpwrx_sp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp .Lpwrx_sp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lpwrx_page_walk jmp .Lpwrx_page_walk_done .Lpwrx_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja .Lpwrx_page_walk .Lpwrx_page_walk_done: movq %r9,%r10 negq %r9 pxor %xmm0,%xmm0 .byte 102,72,15,110,207 .byte 102,72,15,110,209 .byte 102,73,15,110,218 .byte 102,72,15,110,226 movq %r8,32(%rsp) movq %rax,40(%rsp) .cfi_escape 0x0f,0x05,0x77,0x28,0x06,0x23,0x08 .Lpowerx5_body: call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal movq %r10,%r9 movq %rsi,%rdi .byte 102,72,15,126,209 .byte 102,72,15,126,226 movq 40(%rsp),%rax call mulx4x_internal movq 40(%rsp),%rsi .cfi_def_cfa %rsi,8 movq $1,%rax movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lpowerx5_epilogue: ret .cfi_endproc .size bn_powerx5,.-bn_powerx5 .globl bn_sqrx8x_internal .hidden bn_sqrx8x_internal .hidden bn_sqrx8x_internal .type bn_sqrx8x_internal,@function .align 32 bn_sqrx8x_internal: __bn_sqrx8x_internal: .cfi_startproc _CET_ENDBR leaq 48+8(%rsp),%rdi leaq (%rsi,%r9,1),%rbp movq %r9,0+8(%rsp) movq %rbp,8+8(%rsp) jmp .Lsqr8x_zero_start .align 32 .byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00 .Lsqrx8x_zero: .byte 0x3e movdqa %xmm0,0(%rdi) movdqa %xmm0,16(%rdi) movdqa %xmm0,32(%rdi) movdqa %xmm0,48(%rdi) .Lsqr8x_zero_start: movdqa %xmm0,64(%rdi) movdqa %xmm0,80(%rdi) movdqa %xmm0,96(%rdi) movdqa %xmm0,112(%rdi) leaq 128(%rdi),%rdi subq $64,%r9 jnz .Lsqrx8x_zero movq 0(%rsi),%rdx xorq %r10,%r10 xorq %r11,%r11 xorq %r12,%r12 xorq %r13,%r13 xorq %r14,%r14 xorq %r15,%r15 leaq 48+8(%rsp),%rdi xorq %rbp,%rbp jmp .Lsqrx8x_outer_loop .align 32 .Lsqrx8x_outer_loop: mulxq 8(%rsi),%r8,%rax adcxq %r9,%r8 adoxq %rax,%r10 mulxq 16(%rsi),%r9,%rax adcxq %r10,%r9 adoxq %rax,%r11 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 adcxq %r11,%r10 adoxq %rax,%r12 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 adcxq %r12,%r11 adoxq %rax,%r13 mulxq 40(%rsi),%r12,%rax adcxq %r13,%r12 adoxq %rax,%r14 mulxq 48(%rsi),%r13,%rax adcxq %r14,%r13 adoxq %r15,%rax mulxq 56(%rsi),%r14,%r15 movq 8(%rsi),%rdx adcxq %rax,%r14 adoxq %rbp,%r15 adcq 64(%rdi),%r15 movq %r8,8(%rdi) movq %r9,16(%rdi) sbbq %rcx,%rcx xorq %rbp,%rbp mulxq 16(%rsi),%r8,%rbx mulxq 24(%rsi),%r9,%rax adcxq %r10,%r8 adoxq %rbx,%r9 mulxq 32(%rsi),%r10,%rbx adcxq %r11,%r9 adoxq %rax,%r10 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 adcxq %r12,%r10 adoxq %rbx,%r11 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 adcxq %r13,%r11 adoxq %r14,%r12 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 movq 16(%rsi),%rdx adcxq %rax,%r12 adoxq %rbx,%r13 adcxq %r15,%r13 adoxq %rbp,%r14 adcxq %rbp,%r14 movq %r8,24(%rdi) movq %r9,32(%rdi) mulxq 24(%rsi),%r8,%rbx mulxq 32(%rsi),%r9,%rax adcxq %r10,%r8 adoxq %rbx,%r9 mulxq 40(%rsi),%r10,%rbx adcxq %r11,%r9 adoxq %rax,%r10 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 adcxq %r12,%r10 adoxq %r13,%r11 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 .byte 0x3e movq 24(%rsi),%rdx adcxq %rbx,%r11 adoxq %rax,%r12 adcxq %r14,%r12 movq %r8,40(%rdi) movq %r9,48(%rdi) mulxq 32(%rsi),%r8,%rax adoxq %rbp,%r13 adcxq %rbp,%r13 mulxq 40(%rsi),%r9,%rbx adcxq %r10,%r8 adoxq %rax,%r9 mulxq 48(%rsi),%r10,%rax adcxq %r11,%r9 adoxq %r12,%r10 mulxq 56(%rsi),%r11,%r12 movq 32(%rsi),%rdx movq 40(%rsi),%r14 adcxq %rbx,%r10 adoxq %rax,%r11 movq 48(%rsi),%r15 adcxq %r13,%r11 adoxq %rbp,%r12 adcxq %rbp,%r12 movq %r8,56(%rdi) movq %r9,64(%rdi) mulxq %r14,%r9,%rax movq 56(%rsi),%r8 adcxq %r10,%r9 mulxq %r15,%r10,%rbx adoxq %rax,%r10 adcxq %r11,%r10 mulxq %r8,%r11,%rax movq %r14,%rdx adoxq %rbx,%r11 adcxq %r12,%r11 adcxq %rbp,%rax mulxq %r15,%r14,%rbx mulxq %r8,%r12,%r13 movq %r15,%rdx leaq 64(%rsi),%rsi adcxq %r14,%r11 adoxq %rbx,%r12 adcxq %rax,%r12 adoxq %rbp,%r13 .byte 0x67,0x67 mulxq %r8,%r8,%r14 adcxq %r8,%r13 adcxq %rbp,%r14 cmpq 8+8(%rsp),%rsi je .Lsqrx8x_outer_break negq %rcx movq $-8,%rcx movq %rbp,%r15 movq 64(%rdi),%r8 adcxq 72(%rdi),%r9 adcxq 80(%rdi),%r10 adcxq 88(%rdi),%r11 adcq 96(%rdi),%r12 adcq 104(%rdi),%r13 adcq 112(%rdi),%r14 adcq 120(%rdi),%r15 leaq (%rsi),%rbp leaq 128(%rdi),%rdi sbbq %rax,%rax movq -64(%rsi),%rdx movq %rax,16+8(%rsp) movq %rdi,24+8(%rsp) xorl %eax,%eax jmp .Lsqrx8x_loop .align 32 .Lsqrx8x_loop: movq %r8,%rbx mulxq 0(%rbp),%rax,%r8 adcxq %rax,%rbx adoxq %r9,%r8 mulxq 8(%rbp),%rax,%r9 adcxq %rax,%r8 adoxq %r10,%r9 mulxq 16(%rbp),%rax,%r10 adcxq %rax,%r9 adoxq %r11,%r10 mulxq 24(%rbp),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 adcxq %rax,%r11 adoxq %r13,%r12 mulxq 40(%rbp),%rax,%r13 adcxq %rax,%r12 adoxq %r14,%r13 mulxq 48(%rbp),%rax,%r14 movq %rbx,(%rdi,%rcx,8) movl $0,%ebx adcxq %rax,%r13 adoxq %r15,%r14 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 movq 8(%rsi,%rcx,8),%rdx adcxq %rax,%r14 adoxq %rbx,%r15 adcxq %rbx,%r15 .byte 0x67 incq %rcx jnz .Lsqrx8x_loop leaq 64(%rbp),%rbp movq $-8,%rcx cmpq 8+8(%rsp),%rbp je .Lsqrx8x_break subq 16+8(%rsp),%rbx .byte 0x66 movq -64(%rsi),%rdx adcxq 0(%rdi),%r8 adcxq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 leaq 64(%rdi),%rdi .byte 0x67 sbbq %rax,%rax xorl %ebx,%ebx movq %rax,16+8(%rsp) jmp .Lsqrx8x_loop .align 32 .Lsqrx8x_break: xorq %rbp,%rbp subq 16+8(%rsp),%rbx adcxq %rbp,%r8 movq 24+8(%rsp),%rcx adcxq %rbp,%r9 movq 0(%rsi),%rdx adcq $0,%r10 movq %r8,0(%rdi) adcq $0,%r11 adcq $0,%r12 adcq $0,%r13 adcq $0,%r14 adcq $0,%r15 cmpq %rcx,%rdi je .Lsqrx8x_outer_loop movq %r9,8(%rdi) movq 8(%rcx),%r9 movq %r10,16(%rdi) movq 16(%rcx),%r10 movq %r11,24(%rdi) movq 24(%rcx),%r11 movq %r12,32(%rdi) movq 32(%rcx),%r12 movq %r13,40(%rdi) movq 40(%rcx),%r13 movq %r14,48(%rdi) movq 48(%rcx),%r14 movq %r15,56(%rdi) movq 56(%rcx),%r15 movq %rcx,%rdi jmp .Lsqrx8x_outer_loop .align 32 .Lsqrx8x_outer_break: movq %r9,72(%rdi) .byte 102,72,15,126,217 movq %r10,80(%rdi) movq %r11,88(%rdi) movq %r12,96(%rdi) movq %r13,104(%rdi) movq %r14,112(%rdi) leaq 48+8(%rsp),%rdi movq (%rsi,%rcx,1),%rdx movq 8(%rdi),%r11 xorq %r10,%r10 movq 0+8(%rsp),%r9 adoxq %r11,%r11 movq 16(%rdi),%r12 movq 24(%rdi),%r13 .align 32 .Lsqrx4x_shift_n_add: mulxq %rdx,%rax,%rbx adoxq %r12,%r12 adcxq %r10,%rax .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 adoxq %r13,%r13 adcxq %r11,%rbx movq 40(%rdi),%r11 movq %rax,0(%rdi) movq %rbx,8(%rdi) mulxq %rdx,%rax,%rbx adoxq %r10,%r10 adcxq %r12,%rax movq 16(%rsi,%rcx,1),%rdx movq 48(%rdi),%r12 adoxq %r11,%r11 adcxq %r13,%rbx movq 56(%rdi),%r13 movq %rax,16(%rdi) movq %rbx,24(%rdi) mulxq %rdx,%rax,%rbx adoxq %r12,%r12 adcxq %r10,%rax movq 24(%rsi,%rcx,1),%rdx leaq 32(%rcx),%rcx movq 64(%rdi),%r10 adoxq %r13,%r13 adcxq %r11,%rbx movq 72(%rdi),%r11 movq %rax,32(%rdi) movq %rbx,40(%rdi) mulxq %rdx,%rax,%rbx adoxq %r10,%r10 adcxq %r12,%rax jrcxz .Lsqrx4x_shift_n_add_break .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 adoxq %r11,%r11 adcxq %r13,%rbx movq 80(%rdi),%r12 movq 88(%rdi),%r13 movq %rax,48(%rdi) movq %rbx,56(%rdi) leaq 64(%rdi),%rdi nop jmp .Lsqrx4x_shift_n_add .align 32 .Lsqrx4x_shift_n_add_break: adcxq %r13,%rbx movq %rax,48(%rdi) movq %rbx,56(%rdi) leaq 64(%rdi),%rdi .byte 102,72,15,126,213 __bn_sqrx8x_reduction: xorl %eax,%eax movq 32+8(%rsp),%rbx movq 48+8(%rsp),%rdx leaq -64(%rbp,%r9,1),%rcx movq %rcx,0+8(%rsp) movq %rdi,8+8(%rsp) leaq 48+8(%rsp),%rdi jmp .Lsqrx8x_reduction_loop .align 32 .Lsqrx8x_reduction_loop: movq 8(%rdi),%r9 movq 16(%rdi),%r10 movq 24(%rdi),%r11 movq 32(%rdi),%r12 movq %rdx,%r8 imulq %rbx,%rdx movq 40(%rdi),%r13 movq 48(%rdi),%r14 movq 56(%rdi),%r15 movq %rax,24+8(%rsp) leaq 64(%rdi),%rdi xorq %rsi,%rsi movq $-8,%rcx jmp .Lsqrx8x_reduce .align 32 .Lsqrx8x_reduce: movq %r8,%rbx mulxq 0(%rbp),%rax,%r8 adcxq %rbx,%rax adoxq %r9,%r8 mulxq 8(%rbp),%rbx,%r9 adcxq %rbx,%r8 adoxq %r10,%r9 mulxq 16(%rbp),%rbx,%r10 adcxq %rbx,%r9 adoxq %r11,%r10 mulxq 24(%rbp),%rbx,%r11 adcxq %rbx,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 movq %rdx,%rax movq %r8,%rdx adcxq %rbx,%r11 adoxq %r13,%r12 mulxq 32+8(%rsp),%rbx,%rdx movq %rax,%rdx movq %rax,64+48+8(%rsp,%rcx,8) mulxq 40(%rbp),%rax,%r13 adcxq %rax,%r12 adoxq %r14,%r13 mulxq 48(%rbp),%rax,%r14 adcxq %rax,%r13 adoxq %r15,%r14 mulxq 56(%rbp),%rax,%r15 movq %rbx,%rdx adcxq %rax,%r14 adoxq %rsi,%r15 adcxq %rsi,%r15 .byte 0x67,0x67,0x67 incq %rcx jnz .Lsqrx8x_reduce movq %rsi,%rax cmpq 0+8(%rsp),%rbp jae .Lsqrx8x_no_tail movq 48+8(%rsp),%rdx addq 0(%rdi),%r8 leaq 64(%rbp),%rbp movq $-8,%rcx adcxq 8(%rdi),%r9 adcxq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 leaq 64(%rdi),%rdi sbbq %rax,%rax xorq %rsi,%rsi movq %rax,16+8(%rsp) jmp .Lsqrx8x_tail .align 32 .Lsqrx8x_tail: movq %r8,%rbx mulxq 0(%rbp),%rax,%r8 adcxq %rax,%rbx adoxq %r9,%r8 mulxq 8(%rbp),%rax,%r9 adcxq %rax,%r8 adoxq %r10,%r9 mulxq 16(%rbp),%rax,%r10 adcxq %rax,%r9 adoxq %r11,%r10 mulxq 24(%rbp),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 adcxq %rax,%r11 adoxq %r13,%r12 mulxq 40(%rbp),%rax,%r13 adcxq %rax,%r12 adoxq %r14,%r13 mulxq 48(%rbp),%rax,%r14 adcxq %rax,%r13 adoxq %r15,%r14 mulxq 56(%rbp),%rax,%r15 movq 72+48+8(%rsp,%rcx,8),%rdx adcxq %rax,%r14 adoxq %rsi,%r15 movq %rbx,(%rdi,%rcx,8) movq %r8,%rbx adcxq %rsi,%r15 incq %rcx jnz .Lsqrx8x_tail cmpq 0+8(%rsp),%rbp jae .Lsqrx8x_tail_done subq 16+8(%rsp),%rsi movq 48+8(%rsp),%rdx leaq 64(%rbp),%rbp adcq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 leaq 64(%rdi),%rdi sbbq %rax,%rax subq $8,%rcx xorq %rsi,%rsi movq %rax,16+8(%rsp) jmp .Lsqrx8x_tail .align 32 .Lsqrx8x_tail_done: xorq %rax,%rax addq 24+8(%rsp),%r8 adcq $0,%r9 adcq $0,%r10 adcq $0,%r11 adcq $0,%r12 adcq $0,%r13 adcq $0,%r14 adcq $0,%r15 adcq $0,%rax subq 16+8(%rsp),%rsi .Lsqrx8x_no_tail: adcq 0(%rdi),%r8 .byte 102,72,15,126,217 adcq 8(%rdi),%r9 movq 56(%rbp),%rsi .byte 102,72,15,126,213 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 adcq $0,%rax movq 32+8(%rsp),%rbx movq 64(%rdi,%rcx,1),%rdx movq %r8,0(%rdi) leaq 64(%rdi),%r8 movq %r9,8(%rdi) movq %r10,16(%rdi) movq %r11,24(%rdi) movq %r12,32(%rdi) movq %r13,40(%rdi) movq %r14,48(%rdi) movq %r15,56(%rdi) leaq 64(%rdi,%rcx,1),%rdi cmpq 8+8(%rsp),%r8 jb .Lsqrx8x_reduction_loop ret .cfi_endproc .size bn_sqrx8x_internal,.-bn_sqrx8x_internal .align 32 .type __bn_postx4x_internal,@function __bn_postx4x_internal: .cfi_startproc movq 0(%rbp),%r12 movq %rcx,%r10 movq %rcx,%r9 negq %rax sarq $3+2,%rcx .byte 102,72,15,126,202 .byte 102,72,15,126,206 decq %r12 movq 8(%rbp),%r13 xorq %r8,%r8 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp .Lsqrx4x_sub_entry .align 16 .Lsqrx4x_sub: movq 0(%rbp),%r12 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 .Lsqrx4x_sub_entry: andnq %rax,%r12,%r12 leaq 32(%rbp),%rbp andnq %rax,%r13,%r13 andnq %rax,%r14,%r14 andnq %rax,%r15,%r15 negq %r8 adcq 0(%rdi),%r12 adcq 8(%rdi),%r13 adcq 16(%rdi),%r14 adcq 24(%rdi),%r15 movq %r12,0(%rdx) leaq 32(%rdi),%rdi movq %r13,8(%rdx) sbbq %r8,%r8 movq %r14,16(%rdx) movq %r15,24(%rdx) leaq 32(%rdx),%rdx incq %rcx jnz .Lsqrx4x_sub negq %r9 ret .cfi_endproc .size __bn_postx4x_internal,.-__bn_postx4x_internal .globl bn_scatter5 .hidden bn_scatter5 .type bn_scatter5,@function .align 16 bn_scatter5: .cfi_startproc _CET_ENDBR cmpl $0,%esi jz .Lscatter_epilogue leaq (%rdx,%rcx,8),%rdx .Lscatter: movq (%rdi),%rax leaq 8(%rdi),%rdi movq %rax,(%rdx) leaq 256(%rdx),%rdx subl $1,%esi jnz .Lscatter .Lscatter_epilogue: ret .cfi_endproc .size bn_scatter5,.-bn_scatter5 .globl bn_gather5 .hidden bn_gather5 .type bn_gather5,@function .align 32 bn_gather5: .cfi_startproc .LSEH_begin_bn_gather5: _CET_ENDBR .byte 0x4c,0x8d,0x14,0x24 .cfi_def_cfa_register %r10 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 leaq .Linc(%rip),%rax andq $-16,%rsp movd %ecx,%xmm5 movdqa 0(%rax),%xmm0 movdqa 16(%rax),%xmm1 leaq 128(%rdx),%r11 leaq 128(%rsp),%rax pshufd $0,%xmm5,%xmm5 movdqa %xmm1,%xmm4 movdqa %xmm1,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,-128(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,-112(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,-96(%rax) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,-80(%rax) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,-64(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,-48(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,-32(%rax) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,-16(%rax) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,0(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,16(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,32(%rax) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,48(%rax) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,64(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,80(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,96(%rax) movdqa %xmm4,%xmm2 movdqa %xmm3,112(%rax) jmp .Lgather .align 32 .Lgather: pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 movdqa -128(%r11),%xmm0 movdqa -112(%r11),%xmm1 movdqa -96(%r11),%xmm2 pand -128(%rax),%xmm0 movdqa -80(%r11),%xmm3 pand -112(%rax),%xmm1 por %xmm0,%xmm4 pand -96(%rax),%xmm2 por %xmm1,%xmm5 pand -80(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa -64(%r11),%xmm0 movdqa -48(%r11),%xmm1 movdqa -32(%r11),%xmm2 pand -64(%rax),%xmm0 movdqa -16(%r11),%xmm3 pand -48(%rax),%xmm1 por %xmm0,%xmm4 pand -32(%rax),%xmm2 por %xmm1,%xmm5 pand -16(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 0(%r11),%xmm0 movdqa 16(%r11),%xmm1 movdqa 32(%r11),%xmm2 pand 0(%rax),%xmm0 movdqa 48(%r11),%xmm3 pand 16(%rax),%xmm1 por %xmm0,%xmm4 pand 32(%rax),%xmm2 por %xmm1,%xmm5 pand 48(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 64(%r11),%xmm0 movdqa 80(%r11),%xmm1 movdqa 96(%r11),%xmm2 pand 64(%rax),%xmm0 movdqa 112(%r11),%xmm3 pand 80(%rax),%xmm1 por %xmm0,%xmm4 pand 96(%rax),%xmm2 por %xmm1,%xmm5 pand 112(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 por %xmm5,%xmm4 leaq 256(%r11),%r11 pshufd $0x4e,%xmm4,%xmm0 por %xmm4,%xmm0 movq %xmm0,(%rdi) leaq 8(%rdi),%rdi subl $1,%esi jnz .Lgather leaq (%r10),%rsp .cfi_def_cfa_register %rsp ret .LSEH_end_bn_gather5: .cfi_endproc .size bn_gather5,.-bn_gather5 .section .rodata .align 64 .Linc: .long 0,0, 1,1 .long 2,2, 2,2 .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,119,105,116,104,32,115,99,97,116,116,101,114,47,103,97,116,104,101,114,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text #endif
marvin-hansen/iggy-streaming-system
21,653
thirdparty/crates/ring-0.17.9/pregenerated/ghash-x86_64-macosx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _gcm_init_clmul .private_extern _gcm_init_clmul .p2align 4 _gcm_init_clmul: _CET_ENDBR L$_init_clmul: movdqu (%rsi),%xmm2 pshufd $78,%xmm2,%xmm2 pshufd $255,%xmm2,%xmm4 movdqa %xmm2,%xmm3 psllq $1,%xmm2 pxor %xmm5,%xmm5 psrlq $63,%xmm3 pcmpgtd %xmm4,%xmm5 pslldq $8,%xmm3 por %xmm3,%xmm2 pand L$0x1c2_polynomial(%rip),%xmm5 pxor %xmm5,%xmm2 pshufd $78,%xmm2,%xmm6 movdqa %xmm2,%xmm0 pxor %xmm2,%xmm6 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,222,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 pshufd $78,%xmm2,%xmm3 pshufd $78,%xmm0,%xmm4 pxor %xmm2,%xmm3 movdqu %xmm2,0(%rdi) pxor %xmm0,%xmm4 movdqu %xmm0,16(%rdi) .byte 102,15,58,15,227,8 movdqu %xmm4,32(%rdi) movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,222,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 movdqa %xmm0,%xmm5 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,222,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 pshufd $78,%xmm5,%xmm3 pshufd $78,%xmm0,%xmm4 pxor %xmm5,%xmm3 movdqu %xmm5,48(%rdi) pxor %xmm0,%xmm4 movdqu %xmm0,64(%rdi) .byte 102,15,58,15,227,8 movdqu %xmm4,80(%rdi) ret .globl _gcm_ghash_clmul .private_extern _gcm_ghash_clmul .p2align 5 _gcm_ghash_clmul: _CET_ENDBR L$_ghash_clmul: movdqa L$bswap_mask(%rip),%xmm10 movdqu (%rdi),%xmm0 movdqu (%rsi),%xmm2 movdqu 32(%rsi),%xmm7 .byte 102,65,15,56,0,194 subq $0x10,%rcx jz L$odd_tail movdqu 16(%rsi),%xmm6 cmpq $0x30,%rcx jb L$skip4x subq $0x30,%rcx movq $0xA040608020C0E000,%rax movdqu 48(%rsi),%xmm14 movdqu 64(%rsi),%xmm15 movdqu 48(%rdx),%xmm3 movdqu 32(%rdx),%xmm11 .byte 102,65,15,56,0,218 .byte 102,69,15,56,0,218 movdqa %xmm3,%xmm5 pshufd $78,%xmm3,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,68,218,0 .byte 102,15,58,68,234,17 .byte 102,15,58,68,231,0 movdqa %xmm11,%xmm13 pshufd $78,%xmm11,%xmm12 pxor %xmm11,%xmm12 .byte 102,68,15,58,68,222,0 .byte 102,68,15,58,68,238,17 .byte 102,68,15,58,68,231,16 xorps %xmm11,%xmm3 xorps %xmm13,%xmm5 movups 80(%rsi),%xmm7 xorps %xmm12,%xmm4 movdqu 16(%rdx),%xmm11 movdqu 0(%rdx),%xmm8 .byte 102,69,15,56,0,218 .byte 102,69,15,56,0,194 movdqa %xmm11,%xmm13 pshufd $78,%xmm11,%xmm12 pxor %xmm8,%xmm0 pxor %xmm11,%xmm12 .byte 102,69,15,58,68,222,0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm8 pxor %xmm0,%xmm8 .byte 102,69,15,58,68,238,17 .byte 102,68,15,58,68,231,0 xorps %xmm11,%xmm3 xorps %xmm13,%xmm5 leaq 64(%rdx),%rdx subq $0x40,%rcx jc L$tail4x jmp L$mod4_loop .p2align 5 L$mod4_loop: .byte 102,65,15,58,68,199,0 xorps %xmm12,%xmm4 movdqu 48(%rdx),%xmm11 .byte 102,69,15,56,0,218 .byte 102,65,15,58,68,207,17 xorps %xmm3,%xmm0 movdqu 32(%rdx),%xmm3 movdqa %xmm11,%xmm13 .byte 102,68,15,58,68,199,16 pshufd $78,%xmm11,%xmm12 xorps %xmm5,%xmm1 pxor %xmm11,%xmm12 .byte 102,65,15,56,0,218 movups 32(%rsi),%xmm7 xorps %xmm4,%xmm8 .byte 102,68,15,58,68,218,0 pshufd $78,%xmm3,%xmm4 pxor %xmm0,%xmm8 movdqa %xmm3,%xmm5 pxor %xmm1,%xmm8 pxor %xmm3,%xmm4 movdqa %xmm8,%xmm9 .byte 102,68,15,58,68,234,17 pslldq $8,%xmm8 psrldq $8,%xmm9 pxor %xmm8,%xmm0 movdqa L$7_mask(%rip),%xmm8 pxor %xmm9,%xmm1 .byte 102,76,15,110,200 pand %xmm0,%xmm8 .byte 102,69,15,56,0,200 pxor %xmm0,%xmm9 .byte 102,68,15,58,68,231,0 psllq $57,%xmm9 movdqa %xmm9,%xmm8 pslldq $8,%xmm9 .byte 102,15,58,68,222,0 psrldq $8,%xmm8 pxor %xmm9,%xmm0 pxor %xmm8,%xmm1 movdqu 0(%rdx),%xmm8 movdqa %xmm0,%xmm9 psrlq $1,%xmm0 .byte 102,15,58,68,238,17 xorps %xmm11,%xmm3 movdqu 16(%rdx),%xmm11 .byte 102,69,15,56,0,218 .byte 102,15,58,68,231,16 xorps %xmm13,%xmm5 movups 80(%rsi),%xmm7 .byte 102,69,15,56,0,194 pxor %xmm9,%xmm1 pxor %xmm0,%xmm9 psrlq $5,%xmm0 movdqa %xmm11,%xmm13 pxor %xmm12,%xmm4 pshufd $78,%xmm11,%xmm12 pxor %xmm9,%xmm0 pxor %xmm8,%xmm1 pxor %xmm11,%xmm12 .byte 102,69,15,58,68,222,0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 movdqa %xmm0,%xmm1 .byte 102,69,15,58,68,238,17 xorps %xmm11,%xmm3 pshufd $78,%xmm0,%xmm8 pxor %xmm0,%xmm8 .byte 102,68,15,58,68,231,0 xorps %xmm13,%xmm5 leaq 64(%rdx),%rdx subq $0x40,%rcx jnc L$mod4_loop L$tail4x: .byte 102,65,15,58,68,199,0 .byte 102,65,15,58,68,207,17 .byte 102,68,15,58,68,199,16 xorps %xmm12,%xmm4 xorps %xmm3,%xmm0 xorps %xmm5,%xmm1 pxor %xmm0,%xmm1 pxor %xmm4,%xmm8 pxor %xmm1,%xmm8 pxor %xmm0,%xmm1 movdqa %xmm8,%xmm9 psrldq $8,%xmm8 pslldq $8,%xmm9 pxor %xmm8,%xmm1 pxor %xmm9,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 addq $0x40,%rcx jz L$done movdqu 32(%rsi),%xmm7 subq $0x10,%rcx jz L$odd_tail L$skip4x: movdqu (%rdx),%xmm8 movdqu 16(%rdx),%xmm3 .byte 102,69,15,56,0,194 .byte 102,65,15,56,0,218 pxor %xmm8,%xmm0 movdqa %xmm3,%xmm5 pshufd $78,%xmm3,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,68,218,0 .byte 102,15,58,68,234,17 .byte 102,15,58,68,231,0 leaq 32(%rdx),%rdx nop subq $0x20,%rcx jbe L$even_tail nop jmp L$mod_loop .p2align 5 L$mod_loop: movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm8 pshufd $78,%xmm0,%xmm4 pxor %xmm0,%xmm4 .byte 102,15,58,68,198,0 .byte 102,15,58,68,206,17 .byte 102,15,58,68,231,16 pxor %xmm3,%xmm0 pxor %xmm5,%xmm1 movdqu (%rdx),%xmm9 pxor %xmm0,%xmm8 .byte 102,69,15,56,0,202 movdqu 16(%rdx),%xmm3 pxor %xmm1,%xmm8 pxor %xmm9,%xmm1 pxor %xmm8,%xmm4 .byte 102,65,15,56,0,218 movdqa %xmm4,%xmm8 psrldq $8,%xmm8 pslldq $8,%xmm4 pxor %xmm8,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm3,%xmm5 movdqa %xmm0,%xmm9 movdqa %xmm0,%xmm8 psllq $5,%xmm0 pxor %xmm0,%xmm8 .byte 102,15,58,68,218,0 psllq $1,%xmm0 pxor %xmm8,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm8 pslldq $8,%xmm0 psrldq $8,%xmm8 pxor %xmm9,%xmm0 pshufd $78,%xmm5,%xmm4 pxor %xmm8,%xmm1 pxor %xmm5,%xmm4 movdqa %xmm0,%xmm9 psrlq $1,%xmm0 .byte 102,15,58,68,234,17 pxor %xmm9,%xmm1 pxor %xmm0,%xmm9 psrlq $5,%xmm0 pxor %xmm9,%xmm0 leaq 32(%rdx),%rdx psrlq $1,%xmm0 .byte 102,15,58,68,231,0 pxor %xmm1,%xmm0 subq $0x20,%rcx ja L$mod_loop L$even_tail: movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm8 pshufd $78,%xmm0,%xmm4 pxor %xmm0,%xmm4 .byte 102,15,58,68,198,0 .byte 102,15,58,68,206,17 .byte 102,15,58,68,231,16 pxor %xmm3,%xmm0 pxor %xmm5,%xmm1 pxor %xmm0,%xmm8 pxor %xmm1,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm8 psrldq $8,%xmm8 pslldq $8,%xmm4 pxor %xmm8,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 testq %rcx,%rcx jnz L$done L$odd_tail: movdqu (%rdx),%xmm8 .byte 102,69,15,56,0,194 pxor %xmm8,%xmm0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,223,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 L$done: .byte 102,65,15,56,0,194 movdqu %xmm0,(%rdi) ret .globl _gcm_init_avx .private_extern _gcm_init_avx .p2align 5 _gcm_init_avx: _CET_ENDBR vzeroupper vmovdqu (%rsi),%xmm2 vpshufd $78,%xmm2,%xmm2 vpshufd $255,%xmm2,%xmm4 vpsrlq $63,%xmm2,%xmm3 vpsllq $1,%xmm2,%xmm2 vpxor %xmm5,%xmm5,%xmm5 vpcmpgtd %xmm4,%xmm5,%xmm5 vpslldq $8,%xmm3,%xmm3 vpor %xmm3,%xmm2,%xmm2 vpand L$0x1c2_polynomial(%rip),%xmm5,%xmm5 vpxor %xmm5,%xmm2,%xmm2 vpunpckhqdq %xmm2,%xmm2,%xmm6 vmovdqa %xmm2,%xmm0 vpxor %xmm2,%xmm6,%xmm6 movq $4,%r10 jmp L$init_start_avx .p2align 5 L$init_loop_avx: vpalignr $8,%xmm3,%xmm4,%xmm5 vmovdqu %xmm5,-16(%rdi) vpunpckhqdq %xmm0,%xmm0,%xmm3 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3 vpxor %xmm0,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $8,%xmm3,%xmm4 vpsrldq $8,%xmm3,%xmm3 vpxor %xmm4,%xmm0,%xmm0 vpxor %xmm3,%xmm1,%xmm1 vpsllq $57,%xmm0,%xmm3 vpsllq $62,%xmm0,%xmm4 vpxor %xmm3,%xmm4,%xmm4 vpsllq $63,%xmm0,%xmm3 vpxor %xmm3,%xmm4,%xmm4 vpslldq $8,%xmm4,%xmm3 vpsrldq $8,%xmm4,%xmm4 vpxor %xmm3,%xmm0,%xmm0 vpxor %xmm4,%xmm1,%xmm1 vpsrlq $1,%xmm0,%xmm4 vpxor %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $5,%xmm4,%xmm4 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $1,%xmm0,%xmm0 vpxor %xmm1,%xmm0,%xmm0 L$init_start_avx: vmovdqa %xmm0,%xmm5 vpunpckhqdq %xmm0,%xmm0,%xmm3 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3 vpxor %xmm0,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $8,%xmm3,%xmm4 vpsrldq $8,%xmm3,%xmm3 vpxor %xmm4,%xmm0,%xmm0 vpxor %xmm3,%xmm1,%xmm1 vpsllq $57,%xmm0,%xmm3 vpsllq $62,%xmm0,%xmm4 vpxor %xmm3,%xmm4,%xmm4 vpsllq $63,%xmm0,%xmm3 vpxor %xmm3,%xmm4,%xmm4 vpslldq $8,%xmm4,%xmm3 vpsrldq $8,%xmm4,%xmm4 vpxor %xmm3,%xmm0,%xmm0 vpxor %xmm4,%xmm1,%xmm1 vpsrlq $1,%xmm0,%xmm4 vpxor %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $5,%xmm4,%xmm4 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $1,%xmm0,%xmm0 vpxor %xmm1,%xmm0,%xmm0 vpshufd $78,%xmm5,%xmm3 vpshufd $78,%xmm0,%xmm4 vpxor %xmm5,%xmm3,%xmm3 vmovdqu %xmm5,0(%rdi) vpxor %xmm0,%xmm4,%xmm4 vmovdqu %xmm0,16(%rdi) leaq 48(%rdi),%rdi subq $1,%r10 jnz L$init_loop_avx vpalignr $8,%xmm4,%xmm3,%xmm5 vmovdqu %xmm5,-16(%rdi) vzeroupper ret .globl _gcm_ghash_avx .private_extern _gcm_ghash_avx .p2align 5 _gcm_ghash_avx: _CET_ENDBR vzeroupper vmovdqu (%rdi),%xmm10 leaq L$0x1c2_polynomial(%rip),%r10 leaq 64(%rsi),%rsi vmovdqu L$bswap_mask(%rip),%xmm13 vpshufb %xmm13,%xmm10,%xmm10 cmpq $0x80,%rcx jb L$short_avx subq $0x80,%rcx vmovdqu 112(%rdx),%xmm14 vmovdqu 0-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm14 vmovdqu 32-64(%rsi),%xmm7 vpunpckhqdq %xmm14,%xmm14,%xmm9 vmovdqu 96(%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm14,%xmm9,%xmm9 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 16-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vmovdqu 80(%rdx),%xmm14 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vpshufb %xmm13,%xmm14,%xmm14 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 48-64(%rsi),%xmm6 vpxor %xmm14,%xmm9,%xmm9 vmovdqu 64(%rdx),%xmm15 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 80-64(%rsi),%xmm7 vpshufb %xmm13,%xmm15,%xmm15 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm1,%xmm4,%xmm4 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 64-64(%rsi),%xmm6 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vmovdqu 48(%rdx),%xmm14 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpxor %xmm4,%xmm1,%xmm1 vpshufb %xmm13,%xmm14,%xmm14 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 96-64(%rsi),%xmm6 vpxor %xmm5,%xmm2,%xmm2 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 128-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vmovdqu 32(%rdx),%xmm15 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm1,%xmm4,%xmm4 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 112-64(%rsi),%xmm6 vpxor %xmm2,%xmm5,%xmm5 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vmovdqu 16(%rdx),%xmm14 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpxor %xmm4,%xmm1,%xmm1 vpshufb %xmm13,%xmm14,%xmm14 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 144-64(%rsi),%xmm6 vpxor %xmm5,%xmm2,%xmm2 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 176-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vmovdqu (%rdx),%xmm15 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm1,%xmm4,%xmm4 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 160-64(%rsi),%xmm6 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2 leaq 128(%rdx),%rdx cmpq $0x80,%rcx jb L$tail_avx vpxor %xmm10,%xmm15,%xmm15 subq $0x80,%rcx jmp L$oop8x_avx .p2align 5 L$oop8x_avx: vpunpckhqdq %xmm15,%xmm15,%xmm8 vmovdqu 112(%rdx),%xmm14 vpxor %xmm0,%xmm3,%xmm3 vpxor %xmm15,%xmm8,%xmm8 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm10 vpshufb %xmm13,%xmm14,%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm11 vmovdqu 0-64(%rsi),%xmm6 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm12 vmovdqu 32-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vmovdqu 96(%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm3,%xmm10,%xmm10 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vxorps %xmm4,%xmm11,%xmm11 vmovdqu 16-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm5,%xmm12,%xmm12 vxorps %xmm15,%xmm8,%xmm8 vmovdqu 80(%rdx),%xmm14 vpxor %xmm10,%xmm12,%xmm12 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpxor %xmm11,%xmm12,%xmm12 vpslldq $8,%xmm12,%xmm9 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vpsrldq $8,%xmm12,%xmm12 vpxor %xmm9,%xmm10,%xmm10 vmovdqu 48-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm14 vxorps %xmm12,%xmm11,%xmm11 vpxor %xmm1,%xmm4,%xmm4 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 80-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vmovdqu 64(%rdx),%xmm15 vpalignr $8,%xmm10,%xmm10,%xmm12 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpshufb %xmm13,%xmm15,%xmm15 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 64-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vxorps %xmm15,%xmm8,%xmm8 vpxor %xmm5,%xmm2,%xmm2 vmovdqu 48(%rdx),%xmm14 vpclmulqdq $0x10,(%r10),%xmm10,%xmm10 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpshufb %xmm13,%xmm14,%xmm14 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 96-64(%rsi),%xmm6 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 128-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vmovdqu 32(%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpshufb %xmm13,%xmm15,%xmm15 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 112-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vpxor %xmm5,%xmm2,%xmm2 vxorps %xmm12,%xmm10,%xmm10 vmovdqu 16(%rdx),%xmm14 vpalignr $8,%xmm10,%xmm10,%xmm12 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpshufb %xmm13,%xmm14,%xmm14 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 144-64(%rsi),%xmm6 vpclmulqdq $0x10,(%r10),%xmm10,%xmm10 vxorps %xmm11,%xmm12,%xmm12 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 176-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vmovdqu (%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 160-64(%rsi),%xmm6 vpxor %xmm12,%xmm15,%xmm15 vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2 vpxor %xmm10,%xmm15,%xmm15 leaq 128(%rdx),%rdx subq $0x80,%rcx jnc L$oop8x_avx addq $0x80,%rcx jmp L$tail_no_xor_avx .p2align 5 L$short_avx: vmovdqu -16(%rdx,%rcx,1),%xmm14 leaq (%rdx,%rcx,1),%rdx vmovdqu 0-64(%rsi),%xmm6 vmovdqu 32-64(%rsi),%xmm7 vpshufb %xmm13,%xmm14,%xmm15 vmovdqa %xmm0,%xmm3 vmovdqa %xmm1,%xmm4 vmovdqa %xmm2,%xmm5 subq $0x10,%rcx jz L$tail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -32(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 16-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vpsrldq $8,%xmm7,%xmm7 subq $0x10,%rcx jz L$tail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -48(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 48-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovdqu 80-64(%rsi),%xmm7 subq $0x10,%rcx jz L$tail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -64(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 64-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vpsrldq $8,%xmm7,%xmm7 subq $0x10,%rcx jz L$tail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -80(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 96-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovdqu 128-64(%rsi),%xmm7 subq $0x10,%rcx jz L$tail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -96(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 112-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vpsrldq $8,%xmm7,%xmm7 subq $0x10,%rcx jz L$tail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -112(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 144-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovq 184-64(%rsi),%xmm7 subq $0x10,%rcx jmp L$tail_avx .p2align 5 L$tail_avx: vpxor %xmm10,%xmm15,%xmm15 L$tail_no_xor_avx: vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovdqu (%r10),%xmm12 vpxor %xmm0,%xmm3,%xmm10 vpxor %xmm1,%xmm4,%xmm11 vpxor %xmm2,%xmm5,%xmm5 vpxor %xmm10,%xmm5,%xmm5 vpxor %xmm11,%xmm5,%xmm5 vpslldq $8,%xmm5,%xmm9 vpsrldq $8,%xmm5,%xmm5 vpxor %xmm9,%xmm10,%xmm10 vpxor %xmm5,%xmm11,%xmm11 vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9 vpalignr $8,%xmm10,%xmm10,%xmm10 vpxor %xmm9,%xmm10,%xmm10 vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9 vpalignr $8,%xmm10,%xmm10,%xmm10 vpxor %xmm11,%xmm10,%xmm10 vpxor %xmm9,%xmm10,%xmm10 cmpq $0,%rcx jne L$short_avx vpshufb %xmm13,%xmm10,%xmm10 vmovdqu %xmm10,(%rdi) vzeroupper ret .section __DATA,__const .p2align 6 L$bswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 L$0x1c2_polynomial: .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 L$7_mask: .long 7,0,7,0 .p2align 6 .byte 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .p2align 6 .text #endif
marvin-hansen/iggy-streaming-system
6,302
thirdparty/crates/ring-0.17.9/pregenerated/ghash-armv4-linux32.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) #include <ring-core/arm_arch.h> @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. (ARMv8 PMULL @ instructions are in aesv8-armx.pl.) .arch armv7-a .text #if defined(__thumb2__) || defined(__clang__) .syntax unified #define ldrplb ldrbpl #define ldrneb ldrbne #endif #if defined(__thumb2__) .thumb #else .code 32 #endif #if __ARM_MAX_ARCH__>=7 .arch armv7-a .fpu neon .globl gcm_init_neon .hidden gcm_init_neon .type gcm_init_neon,%function .align 4 gcm_init_neon: vld1.64 d7,[r1]! @ load H vmov.i8 q8,#0xe1 vld1.64 d6,[r1] vshl.i64 d17,#57 vshr.u64 d16,#63 @ t0=0xc2....01 vdup.8 q9,d7[7] vshr.u64 d26,d6,#63 vshr.s8 q9,#7 @ broadcast carry bit vshl.i64 q3,q3,#1 vand q8,q8,q9 vorr d7,d26 @ H<<<=1 veor q3,q3,q8 @ twisted H vstmia r0,{q3} bx lr @ bx lr .size gcm_init_neon,.-gcm_init_neon .globl gcm_gmult_neon .hidden gcm_gmult_neon .type gcm_gmult_neon,%function .align 4 gcm_gmult_neon: vld1.64 d7,[r0]! @ load Xi vld1.64 d6,[r0]! vmov.i64 d29,#0x0000ffffffffffff vldmia r1,{d26,d27} @ load twisted H vmov.i64 d30,#0x00000000ffffffff #ifdef __ARMEL__ vrev64.8 q3,q3 #endif vmov.i64 d31,#0x000000000000ffff veor d28,d26,d27 @ Karatsuba pre-processing mov r3,#16 b .Lgmult_neon .size gcm_gmult_neon,.-gcm_gmult_neon .globl gcm_ghash_neon .hidden gcm_ghash_neon .type gcm_ghash_neon,%function .align 4 gcm_ghash_neon: vld1.64 d1,[r0]! @ load Xi vld1.64 d0,[r0]! vmov.i64 d29,#0x0000ffffffffffff vldmia r1,{d26,d27} @ load twisted H vmov.i64 d30,#0x00000000ffffffff #ifdef __ARMEL__ vrev64.8 q0,q0 #endif vmov.i64 d31,#0x000000000000ffff veor d28,d26,d27 @ Karatsuba pre-processing .Loop_neon: vld1.64 d7,[r2]! @ load inp vld1.64 d6,[r2]! #ifdef __ARMEL__ vrev64.8 q3,q3 #endif veor q3,q0 @ inp^=Xi .Lgmult_neon: vext.8 d16, d26, d26, #1 @ A1 vmull.p8 q8, d16, d6 @ F = A1*B vext.8 d0, d6, d6, #1 @ B1 vmull.p8 q0, d26, d0 @ E = A*B1 vext.8 d18, d26, d26, #2 @ A2 vmull.p8 q9, d18, d6 @ H = A2*B vext.8 d22, d6, d6, #2 @ B2 vmull.p8 q11, d26, d22 @ G = A*B2 vext.8 d20, d26, d26, #3 @ A3 veor q8, q8, q0 @ L = E + F vmull.p8 q10, d20, d6 @ J = A3*B vext.8 d0, d6, d6, #3 @ B3 veor q9, q9, q11 @ M = G + H vmull.p8 q0, d26, d0 @ I = A*B3 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 vand d17, d17, d29 vext.8 d22, d6, d6, #4 @ B4 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 vand d19, d19, d30 vmull.p8 q11, d26, d22 @ K = A*B4 veor q10, q10, q0 @ N = I + J veor d16, d16, d17 veor d18, d18, d19 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 vand d21, d21, d31 vext.8 q8, q8, q8, #15 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 vmov.i64 d23, #0 vext.8 q9, q9, q9, #14 veor d20, d20, d21 vmull.p8 q0, d26, d6 @ D = A*B vext.8 q11, q11, q11, #12 vext.8 q10, q10, q10, #13 veor q8, q8, q9 veor q10, q10, q11 veor q0, q0, q8 veor q0, q0, q10 veor d6,d6,d7 @ Karatsuba pre-processing vext.8 d16, d28, d28, #1 @ A1 vmull.p8 q8, d16, d6 @ F = A1*B vext.8 d2, d6, d6, #1 @ B1 vmull.p8 q1, d28, d2 @ E = A*B1 vext.8 d18, d28, d28, #2 @ A2 vmull.p8 q9, d18, d6 @ H = A2*B vext.8 d22, d6, d6, #2 @ B2 vmull.p8 q11, d28, d22 @ G = A*B2 vext.8 d20, d28, d28, #3 @ A3 veor q8, q8, q1 @ L = E + F vmull.p8 q10, d20, d6 @ J = A3*B vext.8 d2, d6, d6, #3 @ B3 veor q9, q9, q11 @ M = G + H vmull.p8 q1, d28, d2 @ I = A*B3 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 vand d17, d17, d29 vext.8 d22, d6, d6, #4 @ B4 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 vand d19, d19, d30 vmull.p8 q11, d28, d22 @ K = A*B4 veor q10, q10, q1 @ N = I + J veor d16, d16, d17 veor d18, d18, d19 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 vand d21, d21, d31 vext.8 q8, q8, q8, #15 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 vmov.i64 d23, #0 vext.8 q9, q9, q9, #14 veor d20, d20, d21 vmull.p8 q1, d28, d6 @ D = A*B vext.8 q11, q11, q11, #12 vext.8 q10, q10, q10, #13 veor q8, q8, q9 veor q10, q10, q11 veor q1, q1, q8 veor q1, q1, q10 vext.8 d16, d27, d27, #1 @ A1 vmull.p8 q8, d16, d7 @ F = A1*B vext.8 d4, d7, d7, #1 @ B1 vmull.p8 q2, d27, d4 @ E = A*B1 vext.8 d18, d27, d27, #2 @ A2 vmull.p8 q9, d18, d7 @ H = A2*B vext.8 d22, d7, d7, #2 @ B2 vmull.p8 q11, d27, d22 @ G = A*B2 vext.8 d20, d27, d27, #3 @ A3 veor q8, q8, q2 @ L = E + F vmull.p8 q10, d20, d7 @ J = A3*B vext.8 d4, d7, d7, #3 @ B3 veor q9, q9, q11 @ M = G + H vmull.p8 q2, d27, d4 @ I = A*B3 veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8 vand d17, d17, d29 vext.8 d22, d7, d7, #4 @ B4 veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16 vand d19, d19, d30 vmull.p8 q11, d27, d22 @ K = A*B4 veor q10, q10, q2 @ N = I + J veor d16, d16, d17 veor d18, d18, d19 veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24 vand d21, d21, d31 vext.8 q8, q8, q8, #15 veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32 vmov.i64 d23, #0 vext.8 q9, q9, q9, #14 veor d20, d20, d21 vmull.p8 q2, d27, d7 @ D = A*B vext.8 q11, q11, q11, #12 vext.8 q10, q10, q10, #13 veor q8, q8, q9 veor q10, q10, q11 veor q2, q2, q8 veor q2, q2, q10 veor q1,q1,q0 @ Karatsuba post-processing veor q1,q1,q2 veor d1,d1,d2 veor d4,d4,d3 @ Xh|Xl - 256-bit result @ equivalent of reduction_avx from ghash-x86_64.pl vshl.i64 q9,q0,#57 @ 1st phase vshl.i64 q10,q0,#62 veor q10,q10,q9 @ vshl.i64 q9,q0,#63 veor q10, q10, q9 @ veor d1,d1,d20 @ veor d4,d4,d21 vshr.u64 q10,q0,#1 @ 2nd phase veor q2,q2,q0 veor q0,q0,q10 @ vshr.u64 q10,q10,#6 vshr.u64 q0,q0,#1 @ veor q0,q0,q2 @ veor q0,q0,q10 @ subs r3,#16 bne .Loop_neon #ifdef __ARMEL__ vrev64.8 q0,q0 #endif sub r0,#16 vst1.64 d1,[r0]! @ write out Xi vst1.64 d0,[r0] bx lr @ bx lr .size gcm_ghash_neon,.-gcm_ghash_neon #endif .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
35,434
thirdparty/crates/ring-0.17.9/pregenerated/p256-armv8-asm-ios64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include "ring-core/arm_arch.h" .section __TEXT,__const .align 5 Lpoly: .quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001 LRR: // 2^512 mod P precomputed for NIST P256 polynomial .quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd Lone_mont: .quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe Lone: .quad 1,0,0,0 Lord: .quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000 LordK: .quad 0xccd1c8aaee00bc4f .byte 69,67,80,95,78,73,83,84,90,50,53,54,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .text // void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4], // const BN_ULONG x2[4]); .globl _ecp_nistz256_mul_mont .private_extern _ecp_nistz256_mul_mont .align 4 _ecp_nistz256_mul_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-32]! add x29,sp,#0 stp x19,x20,[sp,#16] ldr x3,[x2] // bp[0] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_mul_mont ldp x19,x20,[sp,#16] ldp x29,x30,[sp],#32 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl _ecp_nistz256_sqr_mont .private_extern _ecp_nistz256_sqr_mont .align 4 _ecp_nistz256_sqr_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-32]! add x29,sp,#0 stp x19,x20,[sp,#16] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sqr_mont ldp x19,x20,[sp,#16] ldp x29,x30,[sp],#32 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl _ecp_nistz256_neg .private_extern _ecp_nistz256_neg .align 4 _ecp_nistz256_neg: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x2,x1 mov x14,xzr // a = 0 mov x15,xzr mov x16,xzr mov x17,xzr adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sub_from ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded // to x4-x7 and b[0] - to x3 .align 4 __ecp_nistz256_mul_mont: mul x14,x4,x3 // a[0]*b[0] umulh x8,x4,x3 mul x15,x5,x3 // a[1]*b[0] umulh x9,x5,x3 mul x16,x6,x3 // a[2]*b[0] umulh x10,x6,x3 mul x17,x7,x3 // a[3]*b[0] umulh x11,x7,x3 ldr x3,[x2,#8] // b[1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adc x19,xzr,x11 mov x20,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr ldr x3,[x2,#8*(1+1)] // b[1+1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr ldr x3,[x2,#8*(2+1)] // b[2+1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr // last reduction subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 adcs x16,x17,x10 // +=acc[0]*0xffff0001 adcs x17,x19,x11 adc x19,x20,xzr adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x19,xzr // did it borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret // note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded // to x4-x7 .align 4 __ecp_nistz256_sqr_mont: // | | | | | |a1*a0| | // | | | | |a2*a0| | | // | |a3*a2|a3*a0| | | | // | | | |a2*a1| | | | // | | |a3*a1| | | | | // *| | | | | | | | 2| // +|a3*a3|a2*a2|a1*a1|a0*a0| // |--+--+--+--+--+--+--+--| // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow // // "can't overflow" below mark carrying into high part of // multiplication result, which can't overflow, because it // can never be all ones. mul x15,x5,x4 // a[1]*a[0] umulh x9,x5,x4 mul x16,x6,x4 // a[2]*a[0] umulh x10,x6,x4 mul x17,x7,x4 // a[3]*a[0] umulh x19,x7,x4 adds x16,x16,x9 // accumulate high parts of multiplication mul x8,x6,x5 // a[2]*a[1] umulh x9,x6,x5 adcs x17,x17,x10 mul x10,x7,x5 // a[3]*a[1] umulh x11,x7,x5 adc x19,x19,xzr // can't overflow mul x20,x7,x6 // a[3]*a[2] umulh x1,x7,x6 adds x9,x9,x10 // accumulate high parts of multiplication mul x14,x4,x4 // a[0]*a[0] adc x10,x11,xzr // can't overflow adds x17,x17,x8 // accumulate low parts of multiplication umulh x4,x4,x4 adcs x19,x19,x9 mul x9,x5,x5 // a[1]*a[1] adcs x20,x20,x10 umulh x5,x5,x5 adc x1,x1,xzr // can't overflow adds x15,x15,x15 // acc[1-6]*=2 mul x10,x6,x6 // a[2]*a[2] adcs x16,x16,x16 umulh x6,x6,x6 adcs x17,x17,x17 mul x11,x7,x7 // a[3]*a[3] adcs x19,x19,x19 umulh x7,x7,x7 adcs x20,x20,x20 adcs x1,x1,x1 adc x2,xzr,xzr adds x15,x15,x4 // +a[i]*a[i] adcs x16,x16,x9 adcs x17,x17,x5 adcs x19,x19,x10 adcs x20,x20,x6 lsl x8,x14,#32 adcs x1,x1,x11 lsr x9,x14,#32 adc x2,x2,x7 subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 adcs x16,x17,x10 // +=acc[0]*0xffff0001 adc x17,x11,xzr // can't overflow adds x14,x14,x19 // accumulate upper half adcs x15,x15,x20 adcs x16,x16,x1 adcs x17,x17,x2 adc x19,xzr,xzr adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x19,xzr // did it borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret // Note that __ecp_nistz256_add_to expects both input vectors pre-loaded to // x4-x7 and x8-x11. This is done because it's used in multiple // contexts, e.g. in multiplication by 2 and 3... .align 4 __ecp_nistz256_add_to: adds x14,x14,x8 // ret = a+b adcs x15,x15,x9 adcs x16,x16,x10 adcs x17,x17,x11 adc x1,xzr,xzr // zap x1 adds x8,x14,#1 // subs x8,x4,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x1,xzr // did subtraction borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret .align 4 __ecp_nistz256_sub_from: ldp x8,x9,[x2] ldp x10,x11,[x2,#16] subs x14,x14,x8 // ret = a-b sbcs x15,x15,x9 sbcs x16,x16,x10 sbcs x17,x17,x11 sbc x1,xzr,xzr // zap x1 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus adcs x9,x15,x12 adcs x10,x16,xzr adc x11,x17,x13 cmp x1,xzr // did subtraction borrow? csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret csel x15,x15,x9,eq csel x16,x16,x10,eq stp x14,x15,[x0] csel x17,x17,x11,eq stp x16,x17,[x0,#16] ret .align 4 __ecp_nistz256_sub_morf: ldp x8,x9,[x2] ldp x10,x11,[x2,#16] subs x14,x8,x14 // ret = b-a sbcs x15,x9,x15 sbcs x16,x10,x16 sbcs x17,x11,x17 sbc x1,xzr,xzr // zap x1 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus adcs x9,x15,x12 adcs x10,x16,xzr adc x11,x17,x13 cmp x1,xzr // did subtraction borrow? csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret csel x15,x15,x9,eq csel x16,x16,x10,eq stp x14,x15,[x0] csel x17,x17,x11,eq stp x16,x17,[x0,#16] ret .align 4 __ecp_nistz256_div_by_2: subs x8,x14,#1 // adds x8,x4,#-1 // tmp = a+modulus adcs x9,x15,x12 adcs x10,x16,xzr adcs x11,x17,x13 adc x1,xzr,xzr // zap x1 tst x14,#1 // is a even? csel x14,x14,x8,eq // ret = even ? a : a+modulus csel x15,x15,x9,eq csel x16,x16,x10,eq csel x17,x17,x11,eq csel x1,xzr,x1,eq lsr x14,x14,#1 // ret >>= 1 orr x14,x14,x15,lsl#63 lsr x15,x15,#1 orr x15,x15,x16,lsl#63 lsr x16,x16,#1 orr x16,x16,x17,lsl#63 lsr x17,x17,#1 stp x14,x15,[x0] orr x17,x17,x1,lsl#63 stp x16,x17,[x0,#16] ret .globl _ecp_nistz256_point_double .private_extern _ecp_nistz256_point_double .align 5 _ecp_nistz256_point_double: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] sub sp,sp,#32*4 Ldouble_shortcut: ldp x14,x15,[x1,#32] mov x21,x0 ldp x16,x17,[x1,#48] mov x22,x1 adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] mov x8,x14 ldr x13,[x13,#24] mov x9,x15 ldp x4,x5,[x22,#64] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[x22,#64+16] add x0,sp,#0 bl __ecp_nistz256_add_to // p256_mul_by_2(S, in_y); add x0,sp,#64 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z); ldp x8,x9,[x22] ldp x10,x11,[x22,#16] mov x4,x14 // put Zsqr aside for p256_sub mov x5,x15 mov x6,x16 mov x7,x17 add x0,sp,#32 bl __ecp_nistz256_add_to // p256_add(M, Zsqr, in_x); add x2,x22,#0 mov x14,x4 // restore Zsqr mov x15,x5 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont mov x16,x6 mov x17,x7 ldp x6,x7,[sp,#0+16] add x0,sp,#64 bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr); add x0,sp,#0 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S); ldr x3,[x22,#32] ldp x4,x5,[x22,#64] ldp x6,x7,[x22,#64+16] add x2,x22,#32 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y); mov x8,x14 mov x9,x15 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[sp,#0+16] add x0,x21,#64 bl __ecp_nistz256_add_to // p256_mul_by_2(res_z, tmp0); add x0,sp,#96 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S); ldr x3,[sp,#64] // forward load for p256_mul_mont ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x0,x21,#32 bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0); add x2,sp,#64 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr); mov x8,x14 // duplicate M mov x9,x15 mov x10,x16 mov x11,x17 mov x4,x14 // put M aside mov x5,x15 mov x6,x16 mov x7,x17 add x0,sp,#32 bl __ecp_nistz256_add_to mov x8,x4 // restore M mov x9,x5 ldr x3,[x22] // forward load for p256_mul_mont mov x10,x6 ldp x4,x5,[sp,#0] mov x11,x7 ldp x6,x7,[sp,#0+16] bl __ecp_nistz256_add_to // p256_mul_by_3(M, M); add x2,x22,#0 add x0,sp,#0 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x); mov x8,x14 mov x9,x15 ldp x4,x5,[sp,#32] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[sp,#32+16] add x0,sp,#96 bl __ecp_nistz256_add_to // p256_mul_by_2(tmp0, S); add x0,x21,#0 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M); add x2,sp,#96 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0); add x2,sp,#0 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x); ldr x3,[sp,#32] mov x4,x14 // copy S mov x5,x15 mov x6,x16 mov x7,x17 add x2,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M); add x2,x21,#32 add x0,x21,#32 bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y); add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl _ecp_nistz256_point_add .private_extern _ecp_nistz256_point_add .align 5 _ecp_nistz256_point_add: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#32*12 ldp x4,x5,[x2,#64] // in2_z ldp x6,x7,[x2,#64+16] mov x21,x0 mov x22,x1 mov x23,x2 adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] ldr x13,[x13,#24] orr x8,x4,x5 orr x10,x6,x7 orr x25,x8,x10 cmp x25,#0 csetm x25,ne // ~in2infty add x0,sp,#192 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z); ldp x4,x5,[x22,#64] // in1_z ldp x6,x7,[x22,#64+16] orr x8,x4,x5 orr x10,x6,x7 orr x24,x8,x10 cmp x24,#0 csetm x24,ne // ~in1infty add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z); ldr x3,[x23,#64] ldp x4,x5,[sp,#192] ldp x6,x7,[sp,#192+16] add x2,x23,#64 add x0,sp,#320 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z); ldr x3,[x22,#64] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,x22,#64 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z); ldr x3,[x22,#32] ldp x4,x5,[sp,#320] ldp x6,x7,[sp,#320+16] add x2,x22,#32 add x0,sp,#320 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y); ldr x3,[x23,#32] ldp x4,x5,[sp,#352] ldp x6,x7,[sp,#352+16] add x2,x23,#32 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y); add x2,sp,#320 ldr x3,[sp,#192] // forward load for p256_mul_mont ldp x4,x5,[x22] ldp x6,x7,[x22,#16] add x0,sp,#160 bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1); orr x14,x14,x15 // see if result is zero orr x16,x16,x17 orr x26,x14,x16 // ~is_equal(S1,S2) add x2,sp,#192 add x0,sp,#256 bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr); ldr x3,[sp,#128] ldp x4,x5,[x23] ldp x6,x7,[x23,#16] add x2,sp,#128 add x0,sp,#288 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr); add x2,sp,#256 ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont ldp x6,x7,[sp,#160+16] add x0,sp,#96 bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1); orr x14,x14,x15 // see if result is zero orr x16,x16,x17 orr x14,x14,x16 // ~is_equal(U1,U2) mvn x27,x24 // -1/0 -> 0/-1 mvn x28,x25 // -1/0 -> 0/-1 orr x14,x14,x27 orr x14,x14,x28 orr x14,x14,x26 cbnz x14,Ladd_proceed // if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2)) Ladd_double: mov x1,x22 mov x0,x21 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] add sp,sp,#256 // #256 is from #32*(12-4). difference in stack frames b Ldouble_shortcut .align 4 Ladd_proceed: add x0,sp,#192 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R); ldr x3,[x22,#64] ldp x4,x5,[sp,#96] ldp x6,x7,[sp,#96+16] add x2,x22,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z); ldp x4,x5,[sp,#96] ldp x6,x7,[sp,#96+16] add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H); ldr x3,[x23,#64] ldp x4,x5,[sp,#64] ldp x6,x7,[sp,#64+16] add x2,x23,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z); ldr x3,[sp,#96] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,sp,#96 add x0,sp,#224 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H); ldr x3,[sp,#128] ldp x4,x5,[sp,#256] ldp x6,x7,[sp,#256+16] add x2,sp,#128 add x0,sp,#288 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr); mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 add x0,sp,#128 bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2); add x2,sp,#192 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr); add x2,sp,#224 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub); add x2,sp,#288 ldr x3,[sp,#224] // forward load for p256_mul_mont ldp x4,x5,[sp,#320] ldp x6,x7,[sp,#320+16] add x0,sp,#32 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x); add x2,sp,#224 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub); ldr x3,[sp,#160] ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x2,sp,#160 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R); add x2,sp,#352 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2); ldp x4,x5,[sp,#0] // res ldp x6,x7,[sp,#0+16] ldp x8,x9,[x23] // in2 ldp x10,x11,[x23,#16] ldp x14,x15,[x22,#0] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#0+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+0+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+0+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#0+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#0+48] stp x14,x15,[x21,#0] stp x16,x17,[x21,#0+16] ldp x14,x15,[x22,#32] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#32+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+32+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+32+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#32+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#32+48] stp x14,x15,[x21,#32] stp x16,x17,[x21,#32+16] ldp x14,x15,[x22,#64] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#64+16] csel x8,x4,x8,ne csel x9,x5,x9,ne csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? csel x14,x8,x14,ne csel x15,x9,x15,ne csel x16,x10,x16,ne csel x17,x11,x17,ne stp x14,x15,[x21,#64] stp x16,x17,[x21,#64+16] Ladd_done: add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl _ecp_nistz256_point_add_affine .private_extern _ecp_nistz256_point_add_affine .align 5 _ecp_nistz256_point_add_affine: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-80]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] sub sp,sp,#32*10 mov x21,x0 mov x22,x1 mov x23,x2 adrp x13,Lpoly@PAGE add x13,x13,Lpoly@PAGEOFF ldr x12,[x13,#8] ldr x13,[x13,#24] ldp x4,x5,[x1,#64] // in1_z ldp x6,x7,[x1,#64+16] orr x8,x4,x5 orr x10,x6,x7 orr x24,x8,x10 cmp x24,#0 csetm x24,ne // ~in1infty ldp x14,x15,[x2] // in2_x ldp x16,x17,[x2,#16] ldp x8,x9,[x2,#32] // in2_y ldp x10,x11,[x2,#48] orr x14,x14,x15 orr x16,x16,x17 orr x8,x8,x9 orr x10,x10,x11 orr x14,x14,x16 orr x8,x8,x10 orr x25,x14,x8 cmp x25,#0 csetm x25,ne // ~in2infty add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z); mov x4,x14 mov x5,x15 mov x6,x16 mov x7,x17 ldr x3,[x23] add x2,x23,#0 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x); add x2,x22,#0 ldr x3,[x22,#64] // forward load for p256_mul_mont ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x0,sp,#160 bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x); add x2,x22,#64 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z); ldr x3,[x22,#64] ldp x4,x5,[sp,#160] ldp x6,x7,[sp,#160+16] add x2,x22,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z); ldr x3,[x23,#32] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,x23,#32 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y); add x2,x22,#32 ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont ldp x6,x7,[sp,#160+16] add x0,sp,#192 bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y); add x0,sp,#224 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H); ldp x4,x5,[sp,#192] ldp x6,x7,[sp,#192+16] add x0,sp,#288 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R); ldr x3,[sp,#160] ldp x4,x5,[sp,#224] ldp x6,x7,[sp,#224+16] add x2,sp,#160 add x0,sp,#256 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H); ldr x3,[x22] ldp x4,x5,[sp,#224] ldp x6,x7,[sp,#224+16] add x2,x22,#0 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr); mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 add x0,sp,#224 bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2); add x2,sp,#288 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr); add x2,sp,#256 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub); add x2,sp,#96 ldr x3,[x22,#32] // forward load for p256_mul_mont ldp x4,x5,[sp,#256] ldp x6,x7,[sp,#256+16] add x0,sp,#32 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x); add x2,x22,#32 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub); ldr x3,[sp,#192] ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x2,sp,#192 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R); add x2,sp,#128 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2); ldp x4,x5,[sp,#0] // res ldp x6,x7,[sp,#0+16] ldp x8,x9,[x23] // in2 ldp x10,x11,[x23,#16] ldp x14,x15,[x22,#0] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#0+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+0+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+0+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#0+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#0+48] stp x14,x15,[x21,#0] stp x16,x17,[x21,#0+16] adrp x23,Lone_mont@PAGE-64 add x23,x23,Lone_mont@PAGEOFF-64 ldp x14,x15,[x22,#32] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#32+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+32+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+32+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#32+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#32+48] stp x14,x15,[x21,#32] stp x16,x17,[x21,#32+16] ldp x14,x15,[x22,#64] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#64+16] csel x8,x4,x8,ne csel x9,x5,x9,ne csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? csel x14,x8,x14,ne csel x15,x9,x15,ne csel x16,x10,x16,ne csel x17,x11,x17,ne stp x14,x15,[x21,#64] stp x16,x17,[x21,#64+16] add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x29,x30,[sp],#80 AARCH64_VALIDATE_LINK_REGISTER ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4], // uint64_t b[4]); .globl _ecp_nistz256_ord_mul_mont .private_extern _ecp_nistz256_ord_mul_mont .align 4 _ecp_nistz256_ord_mul_mont: AARCH64_VALID_CALL_TARGET // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] adrp x23,Lord@PAGE add x23,x23,Lord@PAGEOFF ldr x3,[x2] // bp[0] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] ldp x12,x13,[x23,#0] ldp x21,x22,[x23,#16] ldr x23,[x23,#32] mul x14,x4,x3 // a[0]*b[0] umulh x8,x4,x3 mul x15,x5,x3 // a[1]*b[0] umulh x9,x5,x3 mul x16,x6,x3 // a[2]*b[0] umulh x10,x6,x3 mul x17,x7,x3 // a[3]*b[0] umulh x19,x7,x3 mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts of multiplication adcs x16,x16,x9 adcs x17,x17,x10 adc x19,x19,xzr mov x20,xzr ldr x3,[x2,#8*1] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr ldr x3,[x2,#8*2] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr ldr x3,[x2,#8*3] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr lsl x8,x24,#32 // last reduction subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr subs x8,x14,x12 // ret -= modulus sbcs x9,x15,x13 sbcs x10,x16,x21 sbcs x11,x17,x22 sbcs xzr,x19,xzr csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldr x29,[sp],#64 ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4], // uint64_t rep); .globl _ecp_nistz256_ord_sqr_mont .private_extern _ecp_nistz256_ord_sqr_mont .align 4 _ecp_nistz256_ord_sqr_mont: AARCH64_VALID_CALL_TARGET // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] adrp x23,Lord@PAGE add x23,x23,Lord@PAGEOFF ldp x4,x5,[x1] ldp x6,x7,[x1,#16] ldp x12,x13,[x23,#0] ldp x21,x22,[x23,#16] ldr x23,[x23,#32] b Loop_ord_sqr .align 4 Loop_ord_sqr: sub x2,x2,#1 //////////////////////////////////////////////////////////////// // | | | | | |a1*a0| | // | | | | |a2*a0| | | // | |a3*a2|a3*a0| | | | // | | | |a2*a1| | | | // | | |a3*a1| | | | | // *| | | | | | | | 2| // +|a3*a3|a2*a2|a1*a1|a0*a0| // |--+--+--+--+--+--+--+--| // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow // // "can't overflow" below mark carrying into high part of // multiplication result, which can't overflow, because it // can never be all ones. mul x15,x5,x4 // a[1]*a[0] umulh x9,x5,x4 mul x16,x6,x4 // a[2]*a[0] umulh x10,x6,x4 mul x17,x7,x4 // a[3]*a[0] umulh x19,x7,x4 adds x16,x16,x9 // accumulate high parts of multiplication mul x8,x6,x5 // a[2]*a[1] umulh x9,x6,x5 adcs x17,x17,x10 mul x10,x7,x5 // a[3]*a[1] umulh x11,x7,x5 adc x19,x19,xzr // can't overflow mul x20,x7,x6 // a[3]*a[2] umulh x1,x7,x6 adds x9,x9,x10 // accumulate high parts of multiplication mul x14,x4,x4 // a[0]*a[0] adc x10,x11,xzr // can't overflow adds x17,x17,x8 // accumulate low parts of multiplication umulh x4,x4,x4 adcs x19,x19,x9 mul x9,x5,x5 // a[1]*a[1] adcs x20,x20,x10 umulh x5,x5,x5 adc x1,x1,xzr // can't overflow adds x15,x15,x15 // acc[1-6]*=2 mul x10,x6,x6 // a[2]*a[2] adcs x16,x16,x16 umulh x6,x6,x6 adcs x17,x17,x17 mul x11,x7,x7 // a[3]*a[3] adcs x19,x19,x19 umulh x7,x7,x7 adcs x20,x20,x20 adcs x1,x1,x1 adc x3,xzr,xzr adds x15,x15,x4 // +a[i]*a[i] mul x24,x14,x23 adcs x16,x16,x9 adcs x17,x17,x5 adcs x19,x19,x10 adcs x20,x20,x6 adcs x1,x1,x11 adc x3,x3,x7 subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adc x17,xzr,x24 // can't overflow mul x11,x14,x23 lsl x8,x24,#32 subs x15,x15,x24 lsr x9,x24,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x11 mul x10,x13,x11 umulh x24,x13,x11 adcs x10,x10,x9 adc x24,x24,xzr adds x14,x15,x10 adcs x15,x16,x24 adcs x16,x17,x11 adc x17,xzr,x11 // can't overflow mul x24,x14,x23 lsl x8,x11,#32 subs x15,x15,x11 lsr x9,x11,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adc x17,xzr,x24 // can't overflow mul x11,x14,x23 lsl x8,x24,#32 subs x15,x15,x24 lsr x9,x24,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x11 mul x10,x13,x11 umulh x24,x13,x11 adcs x10,x10,x9 adc x24,x24,xzr adds x14,x15,x10 adcs x15,x16,x24 adcs x16,x17,x11 adc x17,xzr,x11 // can't overflow lsl x8,x11,#32 subs x15,x15,x11 lsr x9,x11,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow adds x14,x14,x19 // accumulate upper half adcs x15,x15,x20 adcs x16,x16,x1 adcs x17,x17,x3 adc x19,xzr,xzr subs x8,x14,x12 // ret -= modulus sbcs x9,x15,x13 sbcs x10,x16,x21 sbcs x11,x17,x22 sbcs xzr,x19,xzr csel x4,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x5,x15,x9,lo csel x6,x16,x10,lo csel x7,x17,x11,lo cbnz x2,Loop_ord_sqr stp x4,x5,[x0] stp x6,x7,[x0,#16] ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldr x29,[sp],#64 ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_select_w5(uint64_t *val, uint64_t *in_t, int index); .globl _ecp_nistz256_select_w5 .private_extern _ecp_nistz256_select_w5 .align 4 _ecp_nistz256_select_w5: AARCH64_VALID_CALL_TARGET // x10 := x0 // w9 := 0; loop counter and incremented internal index mov x10, x0 mov w9, #0 // [v16-v21] := 0 movi v16.16b, #0 movi v17.16b, #0 movi v18.16b, #0 movi v19.16b, #0 movi v20.16b, #0 movi v21.16b, #0 Lselect_w5_loop: // Loop 16 times. // Increment index (loop counter); tested at the end of the loop add w9, w9, #1 // [v22-v27] := Load a (3*256-bit = 6*128-bit) table entry starting at x1 // and advance x1 to point to the next entry ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64 // x11 := (w9 == w2)? All 1s : All 0s cmp w9, w2 csetm x11, eq // continue loading ... ld1 {v26.2d, v27.2d}, [x1],#32 // duplicate mask_64 into Mask (all 0s or all 1s) dup v3.2d, x11 // [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19] // i.e., values in output registers will remain the same if w9 != w2 bit v16.16b, v22.16b, v3.16b bit v17.16b, v23.16b, v3.16b bit v18.16b, v24.16b, v3.16b bit v19.16b, v25.16b, v3.16b bit v20.16b, v26.16b, v3.16b bit v21.16b, v27.16b, v3.16b // If bit #4 is not 0 (i.e. idx_ctr < 16) loop back tbz w9, #4, Lselect_w5_loop // Write [v16-v21] to memory at the output pointer st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x10],#64 st1 {v20.2d, v21.2d}, [x10] ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_select_w7(uint64_t *val, uint64_t *in_t, int index); .globl _ecp_nistz256_select_w7 .private_extern _ecp_nistz256_select_w7 .align 4 _ecp_nistz256_select_w7: AARCH64_VALID_CALL_TARGET // w9 := 0; loop counter and incremented internal index mov w9, #0 // [v16-v21] := 0 movi v16.16b, #0 movi v17.16b, #0 movi v18.16b, #0 movi v19.16b, #0 Lselect_w7_loop: // Loop 64 times. // Increment index (loop counter); tested at the end of the loop add w9, w9, #1 // [v22-v25] := Load a (2*256-bit = 4*128-bit) table entry starting at x1 // and advance x1 to point to the next entry ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64 // x11 := (w9 == w2)? All 1s : All 0s cmp w9, w2 csetm x11, eq // duplicate mask_64 into Mask (all 0s or all 1s) dup v3.2d, x11 // [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19] // i.e., values in output registers will remain the same if w9 != w2 bit v16.16b, v22.16b, v3.16b bit v17.16b, v23.16b, v3.16b bit v18.16b, v24.16b, v3.16b bit v19.16b, v25.16b, v3.16b // If bit #6 is not 0 (i.e. idx_ctr < 64) loop back tbz w9, #6, Lselect_w7_loop // Write [v16-v19] to memory at the output pointer st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0] ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
74,004
thirdparty/crates/ring-0.17.9/pregenerated/chacha20_poly1305_armv8-ios64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include <ring-core/arm_arch.h> .section __TEXT,__const .align 7 Lchacha20_consts: .byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' Linc: .long 1,2,3,4 Lrol8: .byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 Lclamp: .quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC .text .align 6 Lpoly_hash_ad_internal: .cfi_startproc cbnz x4, Lpoly_hash_intro ret Lpoly_hash_intro: cmp x4, #16 b.lt Lpoly_hash_ad_tail ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #16 b Lpoly_hash_ad_internal Lpoly_hash_ad_tail: cbz x4, Lpoly_hash_ad_ret eor v20.16b, v20.16b, v20.16b // Use T0 to load the AAD sub x4, x4, #1 Lpoly_hash_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, x4] mov v20.b[0], w11 subs x4, x4, #1 b.ge Lpoly_hash_tail_16_compose mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lpoly_hash_ad_ret: ret .cfi_endproc ///////////////////////////////// // // void chacha20_poly1305_seal(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *seal_data); // .globl _chacha20_poly1305_seal .private_extern _chacha20_poly1305_seal .align 6 _chacha20_poly1305_seal: AARCH64_SIGN_LINK_REGISTER .cfi_startproc stp x29, x30, [sp, #-80]! .cfi_def_cfa_offset 80 .cfi_offset w30, -72 .cfi_offset w29, -80 mov x29, sp // We probably could do .cfi_def_cfa w29, 80 at this point, but since // we don't actually use the frame pointer like that, it's probably not // worth bothering. stp d8, d9, [sp, #16] stp d10, d11, [sp, #32] stp d12, d13, [sp, #48] stp d14, d15, [sp, #64] .cfi_offset b15, -8 .cfi_offset b14, -16 .cfi_offset b13, -24 .cfi_offset b12, -32 .cfi_offset b11, -40 .cfi_offset b10, -48 .cfi_offset b9, -56 .cfi_offset b8, -64 adrp x11, Lchacha20_consts@PAGE add x11, x11, Lchacha20_consts@PAGEOFF ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values ld1 {v28.16b - v30.16b}, [x5] mov x15, #1 // Prepare the Poly1305 state mov x8, #0 mov x9, #0 mov x10, #0 ldr x12, [x5, #56] // The total cipher text length includes extra_in_len add x12, x12, x2 mov v31.d[0], x4 // Store the input and aad lengths mov v31.d[1], x12 cmp x2, #128 b.le Lseal_128 // Optimization for smaller buffers // Initially we prepare 5 ChaCha20 blocks. Four to encrypt up to 4 blocks (256 bytes) of plaintext, // and one for the Poly1305 R and S keys. The first four blocks (A0-A3..D0-D3) are computed vertically, // the fifth block (A4-D4) horizontally. ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b sub x5, x5, #32 mov x6, #10 .align 5 Lseal_init_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x6, x6, #1 b.hi Lseal_init_rounds add v15.4s, v15.4s, v25.4s mov x11, #4 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s and v4.16b, v4.16b, v27.16b add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s mov x16, v4.d[0] // Move the R key to GPRs mov x17, v4.d[1] mov v27.16b, v9.16b // Store the S key bl Lpoly_hash_ad_internal mov x3, x0 cmp x2, #256 b.le Lseal_tail ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #256 mov x6, #4 // In the first run of the loop we need to hash 256 bytes, therefore we hash one block for the first 4 rounds mov x7, #6 // and two blocks for the remaining 6, for a total of (1 * 4 + 2 * 6) * 16 = 256 Lseal_main_loop: adrp x11, Lchacha20_consts@PAGE add x11, x11, Lchacha20_consts@PAGEOFF ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s sub x5, x5, #32 .align 5 Lseal_main_loop_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x6, x6, #1 b.ge Lseal_main_loop_rounds ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most subs x7, x7, #1 b.gt Lseal_main_loop_rounds eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s add v15.4s, v15.4s, v25.4s mov x11, #5 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s add v14.4s, v14.4s, v29.4s add v19.4s, v19.4s, v30.4s cmp x2, #320 b.le Lseal_tail ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v9.16b eor v22.16b, v22.16b, v14.16b eor v23.16b, v23.16b, v19.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #320 mov x6, #0 mov x7, #10 // For the remainder of the loop we always hash and encrypt 320 bytes per iteration b Lseal_main_loop Lseal_tail: // This part of the function handles the storage and authentication of the last [0,320) bytes // We assume A0-A4 ... D0-D4 hold at least inl (320 max) bytes of the stream data. cmp x2, #64 b.lt Lseal_tail_64 // Store and authenticate 64B blocks per iteration ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v22.d[0] mov x12, v22.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v23.d[0] mov x12, v23.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 // Shift the state left by 64 bytes for the next iteration of the loop mov v0.16b, v1.16b mov v5.16b, v6.16b mov v10.16b, v11.16b mov v15.16b, v16.16b mov v1.16b, v2.16b mov v6.16b, v7.16b mov v11.16b, v12.16b mov v16.16b, v17.16b mov v2.16b, v3.16b mov v7.16b, v8.16b mov v12.16b, v13.16b mov v17.16b, v18.16b mov v3.16b, v4.16b mov v8.16b, v9.16b mov v13.16b, v14.16b mov v18.16b, v19.16b b Lseal_tail Lseal_tail_64: ldp x3, x4, [x5, #48] // extra_in_len and extra_in_ptr // Here we handle the last [0,64) bytes of plaintext cmp x2, #16 b.lt Lseal_tail_16 // Each iteration encrypt and authenticate a 16B block ld1 {v20.16b}, [x1], #16 eor v20.16b, v20.16b, v0.16b mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most st1 {v20.16b}, [x0], #16 sub x2, x2, #16 // Shift the state left by 16 bytes for the next iteration of the loop mov v0.16b, v5.16b mov v5.16b, v10.16b mov v10.16b, v15.16b b Lseal_tail_64 Lseal_tail_16: // Here we handle the last [0,16) bytes of ciphertext that require a padded block cbz x2, Lseal_hash_extra eor v20.16b, v20.16b, v20.16b // Use T0 to load the plaintext/extra in eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask that will only mask the ciphertext bytes not v22.16b, v20.16b mov x6, x2 add x1, x1, x2 cbz x4, Lseal_tail_16_compose // No extra data to pad with, zero padding mov x7, #16 // We need to load some extra_in first for padding sub x7, x7, x2 cmp x4, x7 csel x7, x4, x7, lt // Load the minimum of extra_in_len and the amount needed to fill the register mov x12, x7 add x3, x3, x7 sub x4, x4, x7 Lseal_tail16_compose_extra_in: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, #-1]! mov v20.b[0], w11 subs x7, x7, #1 b.gt Lseal_tail16_compose_extra_in add x3, x3, x12 Lseal_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x1, #-1]! mov v20.b[0], w11 ext v21.16b, v22.16b, v21.16b, #15 subs x2, x2, #1 b.gt Lseal_tail_16_compose and v0.16b, v0.16b, v21.16b eor v20.16b, v20.16b, v0.16b mov v21.16b, v20.16b Lseal_tail_16_store: umov w11, v20.b[0] strb w11, [x0], #1 ext v20.16b, v20.16b, v20.16b, #1 subs x6, x6, #1 b.gt Lseal_tail_16_store // Hash in the final ct block concatenated with extra_in mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lseal_hash_extra: cbz x4, Lseal_finalize Lseal_hash_extra_loop: cmp x4, #16 b.lt Lseal_hash_extra_tail ld1 {v20.16b}, [x3], #16 mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #16 b Lseal_hash_extra_loop Lseal_hash_extra_tail: cbz x4, Lseal_finalize eor v20.16b, v20.16b, v20.16b // Use T0 to load the remaining extra ciphertext add x3, x3, x4 Lseal_hash_extra_load: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x3, #-1]! mov v20.b[0], w11 subs x4, x4, #1 b.gt Lseal_hash_extra_load // Hash in the final padded extra_in blcok mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lseal_finalize: mov x11, v31.d[0] mov x12, v31.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most // Final reduction step sub x12, xzr, x15 orr x13, xzr, #3 subs x11, x8, #-5 sbcs x12, x9, x12 sbcs x13, x10, x13 csel x8, x11, x8, cs csel x9, x12, x9, cs csel x10, x13, x10, cs mov x11, v27.d[0] mov x12, v27.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 stp x8, x9, [x5] ldp d8, d9, [sp, #16] ldp d10, d11, [sp, #32] ldp d12, d13, [sp, #48] ldp d14, d15, [sp, #64] .cfi_restore b15 .cfi_restore b14 .cfi_restore b13 .cfi_restore b12 .cfi_restore b11 .cfi_restore b10 .cfi_restore b9 .cfi_restore b8 ldp x29, x30, [sp], 80 .cfi_restore w29 .cfi_restore w30 .cfi_def_cfa_offset 0 AARCH64_VALIDATE_LINK_REGISTER ret Lseal_128: // On some architectures preparing 5 blocks for small buffers is wasteful eor v25.16b, v25.16b, v25.16b mov x11, #1 mov v25.s[0], w11 mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v17.16b, v30.16b add v15.4s, v17.4s, v25.4s add v16.4s, v15.4s, v25.4s mov x6, #10 Lseal_128_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x6, x6, #1 b.hi Lseal_128_rounds add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s // Only the first 32 bytes of the third block (counter = 0) are needed, // so skip updating v12 and v17. add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v30.4s, v30.4s, v25.4s add v15.4s, v15.4s, v30.4s add v30.4s, v30.4s, v25.4s add v16.4s, v16.4s, v30.4s and v2.16b, v2.16b, v27.16b mov x16, v2.d[0] // Move the R key to GPRs mov x17, v2.d[1] mov v27.16b, v7.16b // Store the S key bl Lpoly_hash_ad_internal b Lseal_tail .cfi_endproc ///////////////////////////////// // // void chacha20_poly1305_open(uint8_t *pt, uint8_t *ct, size_t len_in, uint8_t *ad, size_t len_ad, union open_data *aead_data); // .globl _chacha20_poly1305_open .private_extern _chacha20_poly1305_open .align 6 _chacha20_poly1305_open: AARCH64_SIGN_LINK_REGISTER .cfi_startproc stp x29, x30, [sp, #-80]! .cfi_def_cfa_offset 80 .cfi_offset w30, -72 .cfi_offset w29, -80 mov x29, sp // We probably could do .cfi_def_cfa w29, 80 at this point, but since // we don't actually use the frame pointer like that, it's probably not // worth bothering. stp d8, d9, [sp, #16] stp d10, d11, [sp, #32] stp d12, d13, [sp, #48] stp d14, d15, [sp, #64] .cfi_offset b15, -8 .cfi_offset b14, -16 .cfi_offset b13, -24 .cfi_offset b12, -32 .cfi_offset b11, -40 .cfi_offset b10, -48 .cfi_offset b9, -56 .cfi_offset b8, -64 adrp x11, Lchacha20_consts@PAGE add x11, x11, Lchacha20_consts@PAGEOFF ld1 {v24.16b - v27.16b}, [x11] // Load the CONSTS, INC, ROL8 and CLAMP values ld1 {v28.16b - v30.16b}, [x5] mov x15, #1 // Prepare the Poly1305 state mov x8, #0 mov x9, #0 mov x10, #0 mov v31.d[0], x4 // Store the input and aad lengths mov v31.d[1], x2 cmp x2, #128 b.le Lopen_128 // Optimization for smaller buffers // Initially we prepare a single ChaCha20 block for the Poly1305 R and S keys mov v0.16b, v24.16b mov v5.16b, v28.16b mov v10.16b, v29.16b mov v15.16b, v30.16b mov x6, #10 .align 5 Lopen_init_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 subs x6, x6, #1 b.hi Lopen_init_rounds add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s and v0.16b, v0.16b, v27.16b mov x16, v0.d[0] // Move the R key to GPRs mov x17, v0.d[1] mov v27.16b, v5.16b // Store the S key bl Lpoly_hash_ad_internal Lopen_ad_done: mov x3, x1 // Each iteration of the loop hash 320 bytes, and prepare stream for 320 bytes Lopen_main_loop: cmp x2, #192 b.lt Lopen_tail adrp x11, Lchacha20_consts@PAGE add x11, x11, Lchacha20_consts@PAGEOFF ld4r {v0.4s,v1.4s,v2.4s,v3.4s}, [x11] mov v4.16b, v24.16b ld4r {v5.4s,v6.4s,v7.4s,v8.4s}, [x5], #16 mov v9.16b, v28.16b ld4r {v10.4s,v11.4s,v12.4s,v13.4s}, [x5], #16 mov v14.16b, v29.16b ld4r {v15.4s,v16.4s,v17.4s,v18.4s}, [x5] sub x5, x5, #32 add v15.4s, v15.4s, v25.4s mov v19.16b, v30.16b eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s lsr x4, x2, #4 // How many whole blocks we have to hash, will always be at least 12 sub x4, x4, #10 mov x7, #10 subs x6, x7, x4 subs x6, x7, x4 // itr1 can be negative if we have more than 320 bytes to hash csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are full cbz x7, Lopen_main_loop_rounds_short .align 5 Lopen_main_loop_rounds: ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lopen_main_loop_rounds_short: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v9.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v18.8h, v18.8h rev32 v19.8h, v19.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b eor v8.16b, v8.16b, v13.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v9.4s, #20 sli v8.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s add v3.4s, v3.4s, v7.4s add v4.4s, v4.4s, v8.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b eor v18.16b, v18.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v18.16b, {v18.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s add v13.4s, v13.4s, v18.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v14.16b ushr v9.4s, v8.4s, #25 sli v9.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #4 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #12 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most add v0.4s, v0.4s, v6.4s add v1.4s, v1.4s, v7.4s add v2.4s, v2.4s, v8.4s add v3.4s, v3.4s, v5.4s add v4.4s, v4.4s, v9.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b rev32 v18.8h, v18.8h rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h rev32 v19.8h, v19.8h add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v6.16b, v6.16b, v12.16b eor v7.16b, v7.16b, v13.16b eor v8.16b, v8.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v9.16b, v9.16b, v14.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 ushr v7.4s, v8.4s, #20 sli v7.4s, v8.4s, #12 ushr v8.4s, v5.4s, #20 sli v8.4s, v5.4s, #12 ushr v5.4s, v9.4s, #20 sli v5.4s, v9.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s add v3.4s, v3.4s, v8.4s add v4.4s, v4.4s, v5.4s eor v18.16b, v18.16b, v0.16b eor v15.16b, v15.16b, v1.16b eor v16.16b, v16.16b, v2.16b eor v17.16b, v17.16b, v3.16b eor v19.16b, v19.16b, v4.16b tbl v18.16b, {v18.16b}, v26.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b tbl v19.16b, {v19.16b}, v26.16b add v12.4s, v12.4s, v18.4s add v13.4s, v13.4s, v15.4s add v10.4s, v10.4s, v16.4s add v11.4s, v11.4s, v17.4s add v14.4s, v14.4s, v19.4s eor v20.16b, v20.16b, v12.16b eor v6.16b, v6.16b, v13.16b eor v7.16b, v7.16b, v10.16b eor v8.16b, v8.16b, v11.16b eor v5.16b, v5.16b, v14.16b ushr v9.4s, v5.4s, #25 sli v9.4s, v5.4s, #7 ushr v5.4s, v8.4s, #25 sli v5.4s, v8.4s, #7 ushr v8.4s, v7.4s, #25 sli v8.4s, v7.4s, #7 ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v9.16b, v9.16b, v9.16b, #12 ext v14.16b, v14.16b, v14.16b, #8 ext v19.16b, v19.16b, v19.16b, #4 subs x7, x7, #1 b.gt Lopen_main_loop_rounds subs x6, x6, #1 b.ge Lopen_main_loop_rounds_short eor v20.16b, v20.16b, v20.16b //zero not v21.16b, v20.16b // -1 sub v21.4s, v25.4s, v21.4s // Add +1 ext v20.16b, v21.16b, v20.16b, #12 // Get the last element (counter) add v19.4s, v19.4s, v20.4s add v15.4s, v15.4s, v25.4s mov x11, #5 dup v20.4s, w11 add v25.4s, v25.4s, v20.4s zip1 v20.4s, v0.4s, v1.4s zip2 v21.4s, v0.4s, v1.4s zip1 v22.4s, v2.4s, v3.4s zip2 v23.4s, v2.4s, v3.4s zip1 v0.2d, v20.2d, v22.2d zip2 v1.2d, v20.2d, v22.2d zip1 v2.2d, v21.2d, v23.2d zip2 v3.2d, v21.2d, v23.2d zip1 v20.4s, v5.4s, v6.4s zip2 v21.4s, v5.4s, v6.4s zip1 v22.4s, v7.4s, v8.4s zip2 v23.4s, v7.4s, v8.4s zip1 v5.2d, v20.2d, v22.2d zip2 v6.2d, v20.2d, v22.2d zip1 v7.2d, v21.2d, v23.2d zip2 v8.2d, v21.2d, v23.2d zip1 v20.4s, v10.4s, v11.4s zip2 v21.4s, v10.4s, v11.4s zip1 v22.4s, v12.4s, v13.4s zip2 v23.4s, v12.4s, v13.4s zip1 v10.2d, v20.2d, v22.2d zip2 v11.2d, v20.2d, v22.2d zip1 v12.2d, v21.2d, v23.2d zip2 v13.2d, v21.2d, v23.2d zip1 v20.4s, v15.4s, v16.4s zip2 v21.4s, v15.4s, v16.4s zip1 v22.4s, v17.4s, v18.4s zip2 v23.4s, v17.4s, v18.4s zip1 v15.2d, v20.2d, v22.2d zip2 v16.2d, v20.2d, v22.2d zip1 v17.2d, v21.2d, v23.2d zip2 v18.2d, v21.2d, v23.2d add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v1.4s, v1.4s, v24.4s add v6.4s, v6.4s, v28.4s add v11.4s, v11.4s, v29.4s add v16.4s, v16.4s, v30.4s add v2.4s, v2.4s, v24.4s add v7.4s, v7.4s, v28.4s add v12.4s, v12.4s, v29.4s add v17.4s, v17.4s, v30.4s add v3.4s, v3.4s, v24.4s add v8.4s, v8.4s, v28.4s add v13.4s, v13.4s, v29.4s add v18.4s, v18.4s, v30.4s add v4.4s, v4.4s, v24.4s add v9.4s, v9.4s, v28.4s add v14.4s, v14.4s, v29.4s add v19.4s, v19.4s, v30.4s // We can always safely store 192 bytes ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #192 mov v0.16b, v3.16b mov v5.16b, v8.16b mov v10.16b, v13.16b mov v15.16b, v18.16b cmp x2, #64 b.lt Lopen_tail_64_store ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v3.16b eor v21.16b, v21.16b, v8.16b eor v22.16b, v22.16b, v13.16b eor v23.16b, v23.16b, v18.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 mov v0.16b, v4.16b mov v5.16b, v9.16b mov v10.16b, v14.16b mov v15.16b, v19.16b cmp x2, #64 b.lt Lopen_tail_64_store ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v4.16b eor v21.16b, v21.16b, v9.16b eor v22.16b, v22.16b, v14.16b eor v23.16b, v23.16b, v19.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 b Lopen_main_loop Lopen_tail: cbz x2, Lopen_finalize lsr x4, x2, #4 // How many whole blocks we have to hash cmp x2, #64 b.le Lopen_tail_64 cmp x2, #128 b.le Lopen_tail_128 Lopen_tail_192: // We need three more blocks mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v15.16b, v30.16b mov v16.16b, v30.16b mov v17.16b, v30.16b eor v23.16b, v23.16b, v23.16b eor v21.16b, v21.16b, v21.16b ins v23.s[0], v25.s[0] ins v21.d[0], x15 add v22.4s, v23.4s, v21.4s add v21.4s, v22.4s, v21.4s add v15.4s, v15.4s, v21.4s add v16.4s, v16.4s, v23.4s add v17.4s, v17.4s, v22.4s mov x7, #10 subs x6, x7, x4 // itr1 can be negative if we have more than 160 bytes to hash csel x7, x7, x4, le // if itr1 is zero or less, itr2 should be 10 to indicate all 10 rounds are hashing sub x4, x4, x7 cbz x7, Lopen_tail_192_rounds_no_hash Lopen_tail_192_rounds: ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most Lopen_tail_192_rounds_no_hash: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x7, x7, #1 b.gt Lopen_tail_192_rounds subs x6, x6, #1 b.ge Lopen_tail_192_rounds_no_hash // We hashed 160 bytes at most, may still have 32 bytes left Lopen_tail_192_hash: cbz x4, Lopen_tail_192_hash_done ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #1 b Lopen_tail_192_hash Lopen_tail_192_hash_done: add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v12.4s, v12.4s, v29.4s add v15.4s, v15.4s, v30.4s add v16.4s, v16.4s, v30.4s add v17.4s, v17.4s, v30.4s add v15.4s, v15.4s, v21.4s add v16.4s, v16.4s, v23.4s add v17.4s, v17.4s, v22.4s ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v2.16b eor v21.16b, v21.16b, v7.16b eor v22.16b, v22.16b, v12.16b eor v23.16b, v23.16b, v17.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #128 b Lopen_tail_64_store Lopen_tail_128: // We need two more blocks mov v0.16b, v24.16b mov v1.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v15.16b, v30.16b mov v16.16b, v30.16b eor v23.16b, v23.16b, v23.16b eor v22.16b, v22.16b, v22.16b ins v23.s[0], v25.s[0] ins v22.d[0], x15 add v22.4s, v22.4s, v23.4s add v15.4s, v15.4s, v22.4s add v16.4s, v16.4s, v23.4s mov x6, #10 sub x6, x6, x4 Lopen_tail_128_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v1.4s, v1.4s, v6.4s eor v16.16b, v16.16b, v1.16b rev32 v16.8h, v16.8h add v11.4s, v11.4s, v16.4s eor v6.16b, v6.16b, v11.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 add v1.4s, v1.4s, v20.4s eor v16.16b, v16.16b, v1.16b tbl v16.16b, {v16.16b}, v26.16b add v11.4s, v11.4s, v16.4s eor v20.16b, v20.16b, v11.16b ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v6.16b, v6.16b, v6.16b, #4 ext v11.16b, v11.16b, v11.16b, #8 ext v16.16b, v16.16b, v16.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 add v1.4s, v1.4s, v6.4s eor v16.16b, v16.16b, v1.16b rev32 v16.8h, v16.8h add v11.4s, v11.4s, v16.4s eor v6.16b, v6.16b, v11.16b ushr v20.4s, v6.4s, #20 sli v20.4s, v6.4s, #12 add v1.4s, v1.4s, v20.4s eor v16.16b, v16.16b, v1.16b tbl v16.16b, {v16.16b}, v26.16b add v11.4s, v11.4s, v16.4s eor v20.16b, v20.16b, v11.16b ushr v6.4s, v20.4s, #25 sli v6.4s, v20.4s, #7 ext v6.16b, v6.16b, v6.16b, #12 ext v11.16b, v11.16b, v11.16b, #8 ext v16.16b, v16.16b, v16.16b, #4 subs x6, x6, #1 b.gt Lopen_tail_128_rounds cbz x4, Lopen_tail_128_rounds_done subs x4, x4, #1 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most b Lopen_tail_128_rounds Lopen_tail_128_rounds_done: add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v15.4s, v15.4s, v30.4s add v16.4s, v16.4s, v30.4s add v15.4s, v15.4s, v22.4s add v16.4s, v16.4s, v23.4s ld1 {v20.16b - v23.16b}, [x1], #64 eor v20.16b, v20.16b, v1.16b eor v21.16b, v21.16b, v6.16b eor v22.16b, v22.16b, v11.16b eor v23.16b, v23.16b, v16.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 b Lopen_tail_64_store Lopen_tail_64: // We just need a single block mov v0.16b, v24.16b mov v5.16b, v28.16b mov v10.16b, v29.16b mov v15.16b, v30.16b eor v23.16b, v23.16b, v23.16b ins v23.s[0], v25.s[0] add v15.4s, v15.4s, v23.4s mov x6, #10 sub x6, x6, x4 Lopen_tail_64_rounds: add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 add v0.4s, v0.4s, v5.4s eor v15.16b, v15.16b, v0.16b rev32 v15.8h, v15.8h add v10.4s, v10.4s, v15.4s eor v5.16b, v5.16b, v10.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 add v0.4s, v0.4s, v20.4s eor v15.16b, v15.16b, v0.16b tbl v15.16b, {v15.16b}, v26.16b add v10.4s, v10.4s, v15.4s eor v20.16b, v20.16b, v10.16b ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 subs x6, x6, #1 b.gt Lopen_tail_64_rounds cbz x4, Lopen_tail_64_rounds_done subs x4, x4, #1 ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most b Lopen_tail_64_rounds Lopen_tail_64_rounds_done: add v0.4s, v0.4s, v24.4s add v5.4s, v5.4s, v28.4s add v10.4s, v10.4s, v29.4s add v15.4s, v15.4s, v30.4s add v15.4s, v15.4s, v23.4s Lopen_tail_64_store: cmp x2, #16 b.lt Lopen_tail_16 ld1 {v20.16b}, [x1], #16 eor v20.16b, v20.16b, v0.16b st1 {v20.16b}, [x0], #16 mov v0.16b, v5.16b mov v5.16b, v10.16b mov v10.16b, v15.16b sub x2, x2, #16 b Lopen_tail_64_store Lopen_tail_16: // Here we handle the last [0,16) bytes that require a padded block cbz x2, Lopen_finalize eor v20.16b, v20.16b, v20.16b // Use T0 to load the ciphertext eor v21.16b, v21.16b, v21.16b // Use T1 to generate an AND mask not v22.16b, v20.16b add x7, x1, x2 mov x6, x2 Lopen_tail_16_compose: ext v20.16b, v20.16b, v20.16b, #15 ldrb w11, [x7, #-1]! mov v20.b[0], w11 ext v21.16b, v22.16b, v21.16b, #15 subs x2, x2, #1 b.gt Lopen_tail_16_compose and v20.16b, v20.16b, v21.16b // Hash in the final padded block mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most eor v20.16b, v20.16b, v0.16b Lopen_tail_16_store: umov w11, v20.b[0] strb w11, [x0], #1 ext v20.16b, v20.16b, v20.16b, #1 subs x6, x6, #1 b.gt Lopen_tail_16_store Lopen_finalize: mov x11, v31.d[0] mov x12, v31.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most // Final reduction step sub x12, xzr, x15 orr x13, xzr, #3 subs x11, x8, #-5 sbcs x12, x9, x12 sbcs x13, x10, x13 csel x8, x11, x8, cs csel x9, x12, x9, cs csel x10, x13, x10, cs mov x11, v27.d[0] mov x12, v27.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 stp x8, x9, [x5] ldp d8, d9, [sp, #16] ldp d10, d11, [sp, #32] ldp d12, d13, [sp, #48] ldp d14, d15, [sp, #64] .cfi_restore b15 .cfi_restore b14 .cfi_restore b13 .cfi_restore b12 .cfi_restore b11 .cfi_restore b10 .cfi_restore b9 .cfi_restore b8 ldp x29, x30, [sp], 80 .cfi_restore w29 .cfi_restore w30 .cfi_def_cfa_offset 0 AARCH64_VALIDATE_LINK_REGISTER ret Lopen_128: // On some architectures preparing 5 blocks for small buffers is wasteful eor v25.16b, v25.16b, v25.16b mov x11, #1 mov v25.s[0], w11 mov v0.16b, v24.16b mov v1.16b, v24.16b mov v2.16b, v24.16b mov v5.16b, v28.16b mov v6.16b, v28.16b mov v7.16b, v28.16b mov v10.16b, v29.16b mov v11.16b, v29.16b mov v12.16b, v29.16b mov v17.16b, v30.16b add v15.4s, v17.4s, v25.4s add v16.4s, v15.4s, v25.4s mov x6, #10 Lopen_128_rounds: add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #4 ext v6.16b, v6.16b, v6.16b, #4 ext v7.16b, v7.16b, v7.16b, #4 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #12 ext v16.16b, v16.16b, v16.16b, #12 ext v17.16b, v17.16b, v17.16b, #12 add v0.4s, v0.4s, v5.4s add v1.4s, v1.4s, v6.4s add v2.4s, v2.4s, v7.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b rev32 v15.8h, v15.8h rev32 v16.8h, v16.8h rev32 v17.8h, v17.8h add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v5.16b, v5.16b, v10.16b eor v6.16b, v6.16b, v11.16b eor v7.16b, v7.16b, v12.16b ushr v20.4s, v5.4s, #20 sli v20.4s, v5.4s, #12 ushr v5.4s, v6.4s, #20 sli v5.4s, v6.4s, #12 ushr v6.4s, v7.4s, #20 sli v6.4s, v7.4s, #12 add v0.4s, v0.4s, v20.4s add v1.4s, v1.4s, v5.4s add v2.4s, v2.4s, v6.4s eor v15.16b, v15.16b, v0.16b eor v16.16b, v16.16b, v1.16b eor v17.16b, v17.16b, v2.16b tbl v15.16b, {v15.16b}, v26.16b tbl v16.16b, {v16.16b}, v26.16b tbl v17.16b, {v17.16b}, v26.16b add v10.4s, v10.4s, v15.4s add v11.4s, v11.4s, v16.4s add v12.4s, v12.4s, v17.4s eor v20.16b, v20.16b, v10.16b eor v5.16b, v5.16b, v11.16b eor v6.16b, v6.16b, v12.16b ushr v7.4s, v6.4s, #25 sli v7.4s, v6.4s, #7 ushr v6.4s, v5.4s, #25 sli v6.4s, v5.4s, #7 ushr v5.4s, v20.4s, #25 sli v5.4s, v20.4s, #7 ext v5.16b, v5.16b, v5.16b, #12 ext v6.16b, v6.16b, v6.16b, #12 ext v7.16b, v7.16b, v7.16b, #12 ext v10.16b, v10.16b, v10.16b, #8 ext v11.16b, v11.16b, v11.16b, #8 ext v12.16b, v12.16b, v12.16b, #8 ext v15.16b, v15.16b, v15.16b, #4 ext v16.16b, v16.16b, v16.16b, #4 ext v17.16b, v17.16b, v17.16b, #4 subs x6, x6, #1 b.hi Lopen_128_rounds add v0.4s, v0.4s, v24.4s add v1.4s, v1.4s, v24.4s add v2.4s, v2.4s, v24.4s add v5.4s, v5.4s, v28.4s add v6.4s, v6.4s, v28.4s add v7.4s, v7.4s, v28.4s add v10.4s, v10.4s, v29.4s add v11.4s, v11.4s, v29.4s add v30.4s, v30.4s, v25.4s add v15.4s, v15.4s, v30.4s add v30.4s, v30.4s, v25.4s add v16.4s, v16.4s, v30.4s and v2.16b, v2.16b, v27.16b mov x16, v2.d[0] // Move the R key to GPRs mov x17, v2.d[1] mov v27.16b, v7.16b // Store the S key bl Lpoly_hash_ad_internal Lopen_128_store: cmp x2, #64 b.lt Lopen_128_store_64 ld1 {v20.16b - v23.16b}, [x1], #64 mov x11, v20.d[0] mov x12, v20.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v21.d[0] mov x12, v21.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v22.d[0] mov x12, v22.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most mov x11, v23.d[0] mov x12, v23.d[1] adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most eor v20.16b, v20.16b, v0.16b eor v21.16b, v21.16b, v5.16b eor v22.16b, v22.16b, v10.16b eor v23.16b, v23.16b, v15.16b st1 {v20.16b - v23.16b}, [x0], #64 sub x2, x2, #64 mov v0.16b, v1.16b mov v5.16b, v6.16b mov v10.16b, v11.16b mov v15.16b, v16.16b Lopen_128_store_64: lsr x4, x2, #4 mov x3, x1 Lopen_128_hash_64: cbz x4, Lopen_tail_64_store ldp x11, x12, [x3], 16 adds x8, x8, x11 adcs x9, x9, x12 adc x10, x10, x15 mul x11, x8, x16 // [t2:t1:t0] = [acc2:acc1:acc0] * r0 umulh x12, x8, x16 mul x13, x9, x16 umulh x14, x9, x16 adds x12, x12, x13 mul x13, x10, x16 adc x13, x13, x14 mul x14, x8, x17 // [t3:t2:t1:t0] = [acc2:acc1:acc0] * [r1:r0] umulh x8, x8, x17 adds x12, x12, x14 mul x14, x9, x17 umulh x9, x9, x17 adcs x14, x14, x8 mul x10, x10, x17 adc x10, x10, x9 adds x13, x13, x14 adc x14, x10, xzr and x10, x13, #3 // At this point acc2 is 2 bits at most (value of 3) and x8, x13, #-4 extr x13, x14, x13, #2 adds x8, x8, x11 lsr x11, x14, #2 adc x9, x14, x11 // No carry out since t0 is 61 bits and t3 is 63 bits adds x8, x8, x13 adcs x9, x9, x12 adc x10, x10, xzr // At this point acc2 has the value of 4 at most sub x4, x4, #1 b Lopen_128_hash_64 .cfi_endproc #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
40,235
thirdparty/crates/ring-0.17.9/pregenerated/chacha-armv8-win64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <ring-core/arm_arch.h> .section .rodata .align 5 Lsigma: .quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral Lone: .long 1,0,0,0 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .text .globl ChaCha20_ctr32_nohw .def ChaCha20_ctr32_nohw .type 32 .endef .align 5 ChaCha20_ctr32_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma add x5,x5,:lo12:Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#64 ldp x22,x23,[x5] // load sigma ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ldp x28,x30,[x4] // load counter #ifdef __AARCH64EB__ ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif Loop_outer: mov w5,w22 // unpack key block lsr x6,x22,#32 mov w7,w23 lsr x8,x23,#32 mov w9,w24 lsr x10,x24,#32 mov w11,w25 lsr x12,x25,#32 mov w13,w26 lsr x14,x26,#32 mov w15,w27 lsr x16,x27,#32 mov w17,w28 lsr x19,x28,#32 mov w20,w30 lsr x21,x30,#32 mov x4,#10 subs x2,x2,#64 Loop: sub x4,x4,#1 add w5,w5,w9 add w6,w6,w10 add w7,w7,w11 add w8,w8,w12 eor w17,w17,w5 eor w19,w19,w6 eor w20,w20,w7 eor w21,w21,w8 ror w17,w17,#16 ror w19,w19,#16 ror w20,w20,#16 ror w21,w21,#16 add w13,w13,w17 add w14,w14,w19 add w15,w15,w20 add w16,w16,w21 eor w9,w9,w13 eor w10,w10,w14 eor w11,w11,w15 eor w12,w12,w16 ror w9,w9,#20 ror w10,w10,#20 ror w11,w11,#20 ror w12,w12,#20 add w5,w5,w9 add w6,w6,w10 add w7,w7,w11 add w8,w8,w12 eor w17,w17,w5 eor w19,w19,w6 eor w20,w20,w7 eor w21,w21,w8 ror w17,w17,#24 ror w19,w19,#24 ror w20,w20,#24 ror w21,w21,#24 add w13,w13,w17 add w14,w14,w19 add w15,w15,w20 add w16,w16,w21 eor w9,w9,w13 eor w10,w10,w14 eor w11,w11,w15 eor w12,w12,w16 ror w9,w9,#25 ror w10,w10,#25 ror w11,w11,#25 ror w12,w12,#25 add w5,w5,w10 add w6,w6,w11 add w7,w7,w12 add w8,w8,w9 eor w21,w21,w5 eor w17,w17,w6 eor w19,w19,w7 eor w20,w20,w8 ror w21,w21,#16 ror w17,w17,#16 ror w19,w19,#16 ror w20,w20,#16 add w15,w15,w21 add w16,w16,w17 add w13,w13,w19 add w14,w14,w20 eor w10,w10,w15 eor w11,w11,w16 eor w12,w12,w13 eor w9,w9,w14 ror w10,w10,#20 ror w11,w11,#20 ror w12,w12,#20 ror w9,w9,#20 add w5,w5,w10 add w6,w6,w11 add w7,w7,w12 add w8,w8,w9 eor w21,w21,w5 eor w17,w17,w6 eor w19,w19,w7 eor w20,w20,w8 ror w21,w21,#24 ror w17,w17,#24 ror w19,w19,#24 ror w20,w20,#24 add w15,w15,w21 add w16,w16,w17 add w13,w13,w19 add w14,w14,w20 eor w10,w10,w15 eor w11,w11,w16 eor w12,w12,w13 eor w9,w9,w14 ror w10,w10,#25 ror w11,w11,#25 ror w12,w12,#25 ror w9,w9,#25 cbnz x4,Loop add w5,w5,w22 // accumulate key block add x6,x6,x22,lsr#32 add w7,w7,w23 add x8,x8,x23,lsr#32 add w9,w9,w24 add x10,x10,x24,lsr#32 add w11,w11,w25 add x12,x12,x25,lsr#32 add w13,w13,w26 add x14,x14,x26,lsr#32 add w15,w15,w27 add x16,x16,x27,lsr#32 add w17,w17,w28 add x19,x19,x28,lsr#32 add w20,w20,w30 add x21,x21,x30,lsr#32 b.lo Ltail add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#1 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 b.hi Loop_outer ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .align 4 Ltail: add x2,x2,#64 Less_than_64: sub x0,x0,#1 add x1,x1,x2 add x0,x0,x2 add x4,sp,x2 neg x2,x2 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif stp x5,x7,[sp,#0] stp x9,x11,[sp,#16] stp x13,x15,[sp,#32] stp x17,x20,[sp,#48] Loop_tail: ldrb w10,[x1,x2] ldrb w11,[x4,x2] add x2,x2,#1 eor w10,w10,w11 strb w10,[x0,x2] cbnz x2,Loop_tail stp xzr,xzr,[sp,#0] stp xzr,xzr,[sp,#16] stp xzr,xzr,[sp,#32] stp xzr,xzr,[sp,#48] ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl ChaCha20_ctr32_neon .def ChaCha20_ctr32_neon .type 32 .endef .align 5 ChaCha20_ctr32_neon: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma add x5,x5,:lo12:Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] cmp x2,#512 b.hs L512_or_more_neon sub sp,sp,#64 ldp x22,x23,[x5] // load sigma ld1 {v24.4s},[x5],#16 ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ld1 {v25.4s,v26.4s},[x3] ldp x28,x30,[x4] // load counter ld1 {v27.4s},[x4] ld1 {v31.4s},[x5] #ifdef __AARCH64EB__ rev64 v24.4s,v24.4s ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif add v27.4s,v27.4s,v31.4s // += 1 add v28.4s,v27.4s,v31.4s add v29.4s,v28.4s,v31.4s shl v31.4s,v31.4s,#2 // 1 -> 4 Loop_outer_neon: mov w5,w22 // unpack key block lsr x6,x22,#32 mov v0.16b,v24.16b mov w7,w23 lsr x8,x23,#32 mov v4.16b,v24.16b mov w9,w24 lsr x10,x24,#32 mov v16.16b,v24.16b mov w11,w25 mov v1.16b,v25.16b lsr x12,x25,#32 mov v5.16b,v25.16b mov w13,w26 mov v17.16b,v25.16b lsr x14,x26,#32 mov v3.16b,v27.16b mov w15,w27 mov v7.16b,v28.16b lsr x16,x27,#32 mov v19.16b,v29.16b mov w17,w28 mov v2.16b,v26.16b lsr x19,x28,#32 mov v6.16b,v26.16b mov w20,w30 mov v18.16b,v26.16b lsr x21,x30,#32 mov x4,#10 subs x2,x2,#256 Loop_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v16.4s,v16.4s,v17.4s add w7,w7,w11 eor v3.16b,v3.16b,v0.16b add w8,w8,w12 eor v7.16b,v7.16b,v4.16b eor w17,w17,w5 eor v19.16b,v19.16b,v16.16b eor w19,w19,w6 rev32 v3.8h,v3.8h eor w20,w20,w7 rev32 v7.8h,v7.8h eor w21,w21,w8 rev32 v19.8h,v19.8h ror w17,w17,#16 add v2.4s,v2.4s,v3.4s ror w19,w19,#16 add v6.4s,v6.4s,v7.4s ror w20,w20,#16 add v18.4s,v18.4s,v19.4s ror w21,w21,#16 eor v20.16b,v1.16b,v2.16b add w13,w13,w17 eor v21.16b,v5.16b,v6.16b add w14,w14,w19 eor v22.16b,v17.16b,v18.16b add w15,w15,w20 ushr v1.4s,v20.4s,#20 add w16,w16,w21 ushr v5.4s,v21.4s,#20 eor w9,w9,w13 ushr v17.4s,v22.4s,#20 eor w10,w10,w14 sli v1.4s,v20.4s,#12 eor w11,w11,w15 sli v5.4s,v21.4s,#12 eor w12,w12,w16 sli v17.4s,v22.4s,#12 ror w9,w9,#20 add v0.4s,v0.4s,v1.4s ror w10,w10,#20 add v4.4s,v4.4s,v5.4s ror w11,w11,#20 add v16.4s,v16.4s,v17.4s ror w12,w12,#20 eor v20.16b,v3.16b,v0.16b add w5,w5,w9 eor v21.16b,v7.16b,v4.16b add w6,w6,w10 eor v22.16b,v19.16b,v16.16b add w7,w7,w11 ushr v3.4s,v20.4s,#24 add w8,w8,w12 ushr v7.4s,v21.4s,#24 eor w17,w17,w5 ushr v19.4s,v22.4s,#24 eor w19,w19,w6 sli v3.4s,v20.4s,#8 eor w20,w20,w7 sli v7.4s,v21.4s,#8 eor w21,w21,w8 sli v19.4s,v22.4s,#8 ror w17,w17,#24 add v2.4s,v2.4s,v3.4s ror w19,w19,#24 add v6.4s,v6.4s,v7.4s ror w20,w20,#24 add v18.4s,v18.4s,v19.4s ror w21,w21,#24 eor v20.16b,v1.16b,v2.16b add w13,w13,w17 eor v21.16b,v5.16b,v6.16b add w14,w14,w19 eor v22.16b,v17.16b,v18.16b add w15,w15,w20 ushr v1.4s,v20.4s,#25 add w16,w16,w21 ushr v5.4s,v21.4s,#25 eor w9,w9,w13 ushr v17.4s,v22.4s,#25 eor w10,w10,w14 sli v1.4s,v20.4s,#7 eor w11,w11,w15 sli v5.4s,v21.4s,#7 eor w12,w12,w16 sli v17.4s,v22.4s,#7 ror w9,w9,#25 ext v2.16b,v2.16b,v2.16b,#8 ror w10,w10,#25 ext v6.16b,v6.16b,v6.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w10 add v4.4s,v4.4s,v5.4s add w6,w6,w11 add v16.4s,v16.4s,v17.4s add w7,w7,w12 eor v3.16b,v3.16b,v0.16b add w8,w8,w9 eor v7.16b,v7.16b,v4.16b eor w21,w21,w5 eor v19.16b,v19.16b,v16.16b eor w17,w17,w6 rev32 v3.8h,v3.8h eor w19,w19,w7 rev32 v7.8h,v7.8h eor w20,w20,w8 rev32 v19.8h,v19.8h ror w21,w21,#16 add v2.4s,v2.4s,v3.4s ror w17,w17,#16 add v6.4s,v6.4s,v7.4s ror w19,w19,#16 add v18.4s,v18.4s,v19.4s ror w20,w20,#16 eor v20.16b,v1.16b,v2.16b add w15,w15,w21 eor v21.16b,v5.16b,v6.16b add w16,w16,w17 eor v22.16b,v17.16b,v18.16b add w13,w13,w19 ushr v1.4s,v20.4s,#20 add w14,w14,w20 ushr v5.4s,v21.4s,#20 eor w10,w10,w15 ushr v17.4s,v22.4s,#20 eor w11,w11,w16 sli v1.4s,v20.4s,#12 eor w12,w12,w13 sli v5.4s,v21.4s,#12 eor w9,w9,w14 sli v17.4s,v22.4s,#12 ror w10,w10,#20 add v0.4s,v0.4s,v1.4s ror w11,w11,#20 add v4.4s,v4.4s,v5.4s ror w12,w12,#20 add v16.4s,v16.4s,v17.4s ror w9,w9,#20 eor v20.16b,v3.16b,v0.16b add w5,w5,w10 eor v21.16b,v7.16b,v4.16b add w6,w6,w11 eor v22.16b,v19.16b,v16.16b add w7,w7,w12 ushr v3.4s,v20.4s,#24 add w8,w8,w9 ushr v7.4s,v21.4s,#24 eor w21,w21,w5 ushr v19.4s,v22.4s,#24 eor w17,w17,w6 sli v3.4s,v20.4s,#8 eor w19,w19,w7 sli v7.4s,v21.4s,#8 eor w20,w20,w8 sli v19.4s,v22.4s,#8 ror w21,w21,#24 add v2.4s,v2.4s,v3.4s ror w17,w17,#24 add v6.4s,v6.4s,v7.4s ror w19,w19,#24 add v18.4s,v18.4s,v19.4s ror w20,w20,#24 eor v20.16b,v1.16b,v2.16b add w15,w15,w21 eor v21.16b,v5.16b,v6.16b add w16,w16,w17 eor v22.16b,v17.16b,v18.16b add w13,w13,w19 ushr v1.4s,v20.4s,#25 add w14,w14,w20 ushr v5.4s,v21.4s,#25 eor w10,w10,w15 ushr v17.4s,v22.4s,#25 eor w11,w11,w16 sli v1.4s,v20.4s,#7 eor w12,w12,w13 sli v5.4s,v21.4s,#7 eor w9,w9,w14 sli v17.4s,v22.4s,#7 ror w10,w10,#25 ext v2.16b,v2.16b,v2.16b,#8 ror w11,w11,#25 ext v6.16b,v6.16b,v6.16b,#8 ror w12,w12,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 cbnz x4,Loop_neon add w5,w5,w22 // accumulate key block add v0.4s,v0.4s,v24.4s add x6,x6,x22,lsr#32 add v4.4s,v4.4s,v24.4s add w7,w7,w23 add v16.4s,v16.4s,v24.4s add x8,x8,x23,lsr#32 add v2.4s,v2.4s,v26.4s add w9,w9,w24 add v6.4s,v6.4s,v26.4s add x10,x10,x24,lsr#32 add v18.4s,v18.4s,v26.4s add w11,w11,w25 add v3.4s,v3.4s,v27.4s add x12,x12,x25,lsr#32 add w13,w13,w26 add v7.4s,v7.4s,v28.4s add x14,x14,x26,lsr#32 add w15,w15,w27 add v19.4s,v19.4s,v29.4s add x16,x16,x27,lsr#32 add w17,w17,w28 add v1.4s,v1.4s,v25.4s add x19,x19,x28,lsr#32 add w20,w20,w30 add v5.4s,v5.4s,v25.4s add x21,x21,x30,lsr#32 add v17.4s,v17.4s,v25.4s b.lo Ltail_neon add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor v0.16b,v0.16b,v20.16b eor x15,x15,x16 eor v1.16b,v1.16b,v21.16b eor x17,x17,x19 eor v2.16b,v2.16b,v22.16b eor x20,x20,x21 eor v3.16b,v3.16b,v23.16b ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 stp x5,x7,[x0,#0] // store output add x28,x28,#4 // increment counter stp x9,x11,[x0,#16] add v27.4s,v27.4s,v31.4s // += 4 stp x13,x15,[x0,#32] add v28.4s,v28.4s,v31.4s stp x17,x20,[x0,#48] add v29.4s,v29.4s,v31.4s add x0,x0,#64 st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 eor v4.16b,v4.16b,v20.16b eor v5.16b,v5.16b,v21.16b eor v6.16b,v6.16b,v22.16b eor v7.16b,v7.16b,v23.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 eor v16.16b,v16.16b,v0.16b eor v17.16b,v17.16b,v1.16b eor v18.16b,v18.16b,v2.16b eor v19.16b,v19.16b,v3.16b st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 b.hi Loop_outer_neon ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret Ltail_neon: add x2,x2,#256 cmp x2,#64 b.lo Less_than_64 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#4 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 b.eq Ldone_neon sub x2,x2,#64 cmp x2,#64 b.lo Less_than_128 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor v0.16b,v0.16b,v20.16b eor v1.16b,v1.16b,v21.16b eor v2.16b,v2.16b,v22.16b eor v3.16b,v3.16b,v23.16b st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 b.eq Ldone_neon sub x2,x2,#64 cmp x2,#64 b.lo Less_than_192 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor v4.16b,v4.16b,v20.16b eor v5.16b,v5.16b,v21.16b eor v6.16b,v6.16b,v22.16b eor v7.16b,v7.16b,v23.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 b.eq Ldone_neon sub x2,x2,#64 st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp] b Last_neon Less_than_128: st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp] b Last_neon Less_than_192: st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp] b Last_neon .align 4 Last_neon: sub x0,x0,#1 add x1,x1,x2 add x0,x0,x2 add x4,sp,x2 neg x2,x2 Loop_tail_neon: ldrb w10,[x1,x2] ldrb w11,[x4,x2] add x2,x2,#1 eor w10,w10,w11 strb w10,[x0,x2] cbnz x2,Loop_tail_neon stp xzr,xzr,[sp,#0] stp xzr,xzr,[sp,#16] stp xzr,xzr,[sp,#32] stp xzr,xzr,[sp,#48] Ldone_neon: ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .def ChaCha20_512_neon .type 32 .endef .align 5 ChaCha20_512_neon: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,Lsigma add x5,x5,:lo12:Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] L512_or_more_neon: sub sp,sp,#128+64 ldp x22,x23,[x5] // load sigma ld1 {v24.4s},[x5],#16 ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ld1 {v25.4s,v26.4s},[x3] ldp x28,x30,[x4] // load counter ld1 {v27.4s},[x4] ld1 {v31.4s},[x5] #ifdef __AARCH64EB__ rev64 v24.4s,v24.4s ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif add v27.4s,v27.4s,v31.4s // += 1 stp q24,q25,[sp,#0] // off-load key block, invariant part add v27.4s,v27.4s,v31.4s // not typo str q26,[sp,#32] add v28.4s,v27.4s,v31.4s add v29.4s,v28.4s,v31.4s add v30.4s,v29.4s,v31.4s shl v31.4s,v31.4s,#2 // 1 -> 4 stp d8,d9,[sp,#128+0] // meet ABI requirements stp d10,d11,[sp,#128+16] stp d12,d13,[sp,#128+32] stp d14,d15,[sp,#128+48] sub x2,x2,#512 // not typo Loop_outer_512_neon: mov v0.16b,v24.16b mov v4.16b,v24.16b mov v8.16b,v24.16b mov v12.16b,v24.16b mov v16.16b,v24.16b mov v20.16b,v24.16b mov v1.16b,v25.16b mov w5,w22 // unpack key block mov v5.16b,v25.16b lsr x6,x22,#32 mov v9.16b,v25.16b mov w7,w23 mov v13.16b,v25.16b lsr x8,x23,#32 mov v17.16b,v25.16b mov w9,w24 mov v21.16b,v25.16b lsr x10,x24,#32 mov v3.16b,v27.16b mov w11,w25 mov v7.16b,v28.16b lsr x12,x25,#32 mov v11.16b,v29.16b mov w13,w26 mov v15.16b,v30.16b lsr x14,x26,#32 mov v2.16b,v26.16b mov w15,w27 mov v6.16b,v26.16b lsr x16,x27,#32 add v19.4s,v3.4s,v31.4s // +4 mov w17,w28 add v23.4s,v7.4s,v31.4s // +4 lsr x19,x28,#32 mov v10.16b,v26.16b mov w20,w30 mov v14.16b,v26.16b lsr x21,x30,#32 mov v18.16b,v26.16b stp q27,q28,[sp,#48] // off-load key block, variable part mov v22.16b,v26.16b str q29,[sp,#80] mov x4,#5 subs x2,x2,#512 Loop_upper_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v11.16b,v11.16b,v11.16b,#12 ext v15.16b,v15.16b,v15.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v23.16b,v23.16b,v23.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v9.16b,v9.16b,v9.16b,#4 ext v13.16b,v13.16b,v13.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 ext v21.16b,v21.16b,v21.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v11.16b,v11.16b,v11.16b,#4 ext v15.16b,v15.16b,v15.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v23.16b,v23.16b,v23.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v9.16b,v9.16b,v9.16b,#12 ext v13.16b,v13.16b,v13.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 ext v21.16b,v21.16b,v21.16b,#12 cbnz x4,Loop_upper_neon add w5,w5,w22 // accumulate key block add x6,x6,x22,lsr#32 add w7,w7,w23 add x8,x8,x23,lsr#32 add w9,w9,w24 add x10,x10,x24,lsr#32 add w11,w11,w25 add x12,x12,x25,lsr#32 add w13,w13,w26 add x14,x14,x26,lsr#32 add w15,w15,w27 add x16,x16,x27,lsr#32 add w17,w17,w28 add x19,x19,x28,lsr#32 add w20,w20,w30 add x21,x21,x30,lsr#32 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#1 // increment counter mov w5,w22 // unpack key block lsr x6,x22,#32 stp x9,x11,[x0,#16] mov w7,w23 lsr x8,x23,#32 stp x13,x15,[x0,#32] mov w9,w24 lsr x10,x24,#32 stp x17,x20,[x0,#48] add x0,x0,#64 mov w11,w25 lsr x12,x25,#32 mov w13,w26 lsr x14,x26,#32 mov w15,w27 lsr x16,x27,#32 mov w17,w28 lsr x19,x28,#32 mov w20,w30 lsr x21,x30,#32 mov x4,#5 Loop_lower_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v11.16b,v11.16b,v11.16b,#12 ext v15.16b,v15.16b,v15.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v23.16b,v23.16b,v23.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v9.16b,v9.16b,v9.16b,#4 ext v13.16b,v13.16b,v13.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 ext v21.16b,v21.16b,v21.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v11.16b,v11.16b,v11.16b,#4 ext v15.16b,v15.16b,v15.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v23.16b,v23.16b,v23.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v9.16b,v9.16b,v9.16b,#12 ext v13.16b,v13.16b,v13.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 ext v21.16b,v21.16b,v21.16b,#12 cbnz x4,Loop_lower_neon add w5,w5,w22 // accumulate key block ldp q24,q25,[sp,#0] add x6,x6,x22,lsr#32 ldp q26,q27,[sp,#32] add w7,w7,w23 ldp q28,q29,[sp,#64] add x8,x8,x23,lsr#32 add v0.4s,v0.4s,v24.4s add w9,w9,w24 add v4.4s,v4.4s,v24.4s add x10,x10,x24,lsr#32 add v8.4s,v8.4s,v24.4s add w11,w11,w25 add v12.4s,v12.4s,v24.4s add x12,x12,x25,lsr#32 add v16.4s,v16.4s,v24.4s add w13,w13,w26 add v20.4s,v20.4s,v24.4s add x14,x14,x26,lsr#32 add v2.4s,v2.4s,v26.4s add w15,w15,w27 add v6.4s,v6.4s,v26.4s add x16,x16,x27,lsr#32 add v10.4s,v10.4s,v26.4s add w17,w17,w28 add v14.4s,v14.4s,v26.4s add x19,x19,x28,lsr#32 add v18.4s,v18.4s,v26.4s add w20,w20,w30 add v22.4s,v22.4s,v26.4s add x21,x21,x30,lsr#32 add v19.4s,v19.4s,v31.4s // +4 add x5,x5,x6,lsl#32 // pack add v23.4s,v23.4s,v31.4s // +4 add x7,x7,x8,lsl#32 add v3.4s,v3.4s,v27.4s ldp x6,x8,[x1,#0] // load input add v7.4s,v7.4s,v28.4s add x9,x9,x10,lsl#32 add v11.4s,v11.4s,v29.4s add x11,x11,x12,lsl#32 add v15.4s,v15.4s,v30.4s ldp x10,x12,[x1,#16] add v19.4s,v19.4s,v27.4s add x13,x13,x14,lsl#32 add v23.4s,v23.4s,v28.4s add x15,x15,x16,lsl#32 add v1.4s,v1.4s,v25.4s ldp x14,x16,[x1,#32] add v5.4s,v5.4s,v25.4s add x17,x17,x19,lsl#32 add v9.4s,v9.4s,v25.4s add x20,x20,x21,lsl#32 add v13.4s,v13.4s,v25.4s ldp x19,x21,[x1,#48] add v17.4s,v17.4s,v25.4s add x1,x1,#64 add v21.4s,v21.4s,v25.4s #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor v0.16b,v0.16b,v24.16b eor x15,x15,x16 eor v1.16b,v1.16b,v25.16b eor x17,x17,x19 eor v2.16b,v2.16b,v26.16b eor x20,x20,x21 eor v3.16b,v3.16b,v27.16b ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 stp x5,x7,[x0,#0] // store output add x28,x28,#7 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 eor v4.16b,v4.16b,v24.16b eor v5.16b,v5.16b,v25.16b eor v6.16b,v6.16b,v26.16b eor v7.16b,v7.16b,v27.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 eor v8.16b,v8.16b,v0.16b ldp q24,q25,[sp,#0] eor v9.16b,v9.16b,v1.16b ldp q26,q27,[sp,#32] eor v10.16b,v10.16b,v2.16b eor v11.16b,v11.16b,v3.16b st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64 ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64 eor v12.16b,v12.16b,v4.16b eor v13.16b,v13.16b,v5.16b eor v14.16b,v14.16b,v6.16b eor v15.16b,v15.16b,v7.16b st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64 ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64 eor v16.16b,v16.16b,v8.16b eor v17.16b,v17.16b,v9.16b eor v18.16b,v18.16b,v10.16b eor v19.16b,v19.16b,v11.16b st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 shl v0.4s,v31.4s,#1 // 4 -> 8 eor v20.16b,v20.16b,v12.16b eor v21.16b,v21.16b,v13.16b eor v22.16b,v22.16b,v14.16b eor v23.16b,v23.16b,v15.16b st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64 add v27.4s,v27.4s,v0.4s // += 8 add v28.4s,v28.4s,v0.4s add v29.4s,v29.4s,v0.4s add v30.4s,v30.4s,v0.4s b.hs Loop_outer_512_neon adds x2,x2,#512 ushr v0.4s,v31.4s,#2 // 4 -> 1 ldp d8,d9,[sp,#128+0] // meet ABI requirements ldp d10,d11,[sp,#128+16] ldp d12,d13,[sp,#128+32] ldp d14,d15,[sp,#128+48] stp q24,q31,[sp,#0] // wipe off-load area stp q24,q31,[sp,#32] stp q24,q31,[sp,#64] b.eq Ldone_512_neon cmp x2,#192 sub v27.4s,v27.4s,v0.4s // -= 1 sub v28.4s,v28.4s,v0.4s sub v29.4s,v29.4s,v0.4s add sp,sp,#128 b.hs Loop_outer_neon eor v25.16b,v25.16b,v25.16b eor v26.16b,v26.16b,v26.16b eor v27.16b,v27.16b,v27.16b eor v28.16b,v28.16b,v28.16b eor v29.16b,v29.16b,v29.16b eor v30.16b,v30.16b,v30.16b b Loop_outer Ldone_512_neon: ldp x19,x20,[x29,#16] add sp,sp,#128+64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
marvin-hansen/iggy-streaming-system
31,065
thirdparty/crates/ring-0.17.9/pregenerated/chacha-x86_64-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .section .rodata .align 64 .Lzero: .long 0,0,0,0 .Lone: .long 1,0,0,0 .Linc: .long 0,1,2,3 .Lfour: .long 4,4,4,4 .Lincy: .long 0,2,4,6,1,3,5,7 .Leight: .long 8,8,8,8,8,8,8,8 .Lrot16: .byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd .Lrot24: .byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe .Lsigma: .byte 101,120,112,97,110,100,32,51,50,45,98,121,116,101,32,107,0 .align 64 .Lzeroz: .long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0 .Lfourz: .long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0 .Lincz: .long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 .Lsixteen: .long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text .globl ChaCha20_ctr32_nohw .hidden ChaCha20_ctr32_nohw .type ChaCha20_ctr32_nohw,@function .align 64 ChaCha20_ctr32_nohw: .cfi_startproc _CET_ENDBR pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset rbx,-16 pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset rbp,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset r15,-56 subq $64+24,%rsp .cfi_adjust_cfa_offset 88 .Lctr32_body: movdqu (%rcx),%xmm1 movdqu 16(%rcx),%xmm2 movdqu (%r8),%xmm3 movdqa .Lone(%rip),%xmm4 movdqa %xmm1,16(%rsp) movdqa %xmm2,32(%rsp) movdqa %xmm3,48(%rsp) movq %rdx,%rbp jmp .Loop_outer .align 32 .Loop_outer: movl $0x61707865,%eax movl $0x3320646e,%ebx movl $0x79622d32,%ecx movl $0x6b206574,%edx movl 16(%rsp),%r8d movl 20(%rsp),%r9d movl 24(%rsp),%r10d movl 28(%rsp),%r11d movd %xmm3,%r12d movl 52(%rsp),%r13d movl 56(%rsp),%r14d movl 60(%rsp),%r15d movq %rbp,64+0(%rsp) movl $10,%ebp movq %rsi,64+8(%rsp) .byte 102,72,15,126,214 movq %rdi,64+16(%rsp) movq %rsi,%rdi shrq $32,%rdi jmp .Loop .align 32 .Loop: addl %r8d,%eax xorl %eax,%r12d roll $16,%r12d addl %r9d,%ebx xorl %ebx,%r13d roll $16,%r13d addl %r12d,%esi xorl %esi,%r8d roll $12,%r8d addl %r13d,%edi xorl %edi,%r9d roll $12,%r9d addl %r8d,%eax xorl %eax,%r12d roll $8,%r12d addl %r9d,%ebx xorl %ebx,%r13d roll $8,%r13d addl %r12d,%esi xorl %esi,%r8d roll $7,%r8d addl %r13d,%edi xorl %edi,%r9d roll $7,%r9d movl %esi,32(%rsp) movl %edi,36(%rsp) movl 40(%rsp),%esi movl 44(%rsp),%edi addl %r10d,%ecx xorl %ecx,%r14d roll $16,%r14d addl %r11d,%edx xorl %edx,%r15d roll $16,%r15d addl %r14d,%esi xorl %esi,%r10d roll $12,%r10d addl %r15d,%edi xorl %edi,%r11d roll $12,%r11d addl %r10d,%ecx xorl %ecx,%r14d roll $8,%r14d addl %r11d,%edx xorl %edx,%r15d roll $8,%r15d addl %r14d,%esi xorl %esi,%r10d roll $7,%r10d addl %r15d,%edi xorl %edi,%r11d roll $7,%r11d addl %r9d,%eax xorl %eax,%r15d roll $16,%r15d addl %r10d,%ebx xorl %ebx,%r12d roll $16,%r12d addl %r15d,%esi xorl %esi,%r9d roll $12,%r9d addl %r12d,%edi xorl %edi,%r10d roll $12,%r10d addl %r9d,%eax xorl %eax,%r15d roll $8,%r15d addl %r10d,%ebx xorl %ebx,%r12d roll $8,%r12d addl %r15d,%esi xorl %esi,%r9d roll $7,%r9d addl %r12d,%edi xorl %edi,%r10d roll $7,%r10d movl %esi,40(%rsp) movl %edi,44(%rsp) movl 32(%rsp),%esi movl 36(%rsp),%edi addl %r11d,%ecx xorl %ecx,%r13d roll $16,%r13d addl %r8d,%edx xorl %edx,%r14d roll $16,%r14d addl %r13d,%esi xorl %esi,%r11d roll $12,%r11d addl %r14d,%edi xorl %edi,%r8d roll $12,%r8d addl %r11d,%ecx xorl %ecx,%r13d roll $8,%r13d addl %r8d,%edx xorl %edx,%r14d roll $8,%r14d addl %r13d,%esi xorl %esi,%r11d roll $7,%r11d addl %r14d,%edi xorl %edi,%r8d roll $7,%r8d decl %ebp jnz .Loop movl %edi,36(%rsp) movl %esi,32(%rsp) movq 64(%rsp),%rbp movdqa %xmm2,%xmm1 movq 64+8(%rsp),%rsi paddd %xmm4,%xmm3 movq 64+16(%rsp),%rdi addl $0x61707865,%eax addl $0x3320646e,%ebx addl $0x79622d32,%ecx addl $0x6b206574,%edx addl 16(%rsp),%r8d addl 20(%rsp),%r9d addl 24(%rsp),%r10d addl 28(%rsp),%r11d addl 48(%rsp),%r12d addl 52(%rsp),%r13d addl 56(%rsp),%r14d addl 60(%rsp),%r15d paddd 32(%rsp),%xmm1 cmpq $64,%rbp jb .Ltail xorl 0(%rsi),%eax xorl 4(%rsi),%ebx xorl 8(%rsi),%ecx xorl 12(%rsi),%edx xorl 16(%rsi),%r8d xorl 20(%rsi),%r9d xorl 24(%rsi),%r10d xorl 28(%rsi),%r11d movdqu 32(%rsi),%xmm0 xorl 48(%rsi),%r12d xorl 52(%rsi),%r13d xorl 56(%rsi),%r14d xorl 60(%rsi),%r15d leaq 64(%rsi),%rsi pxor %xmm1,%xmm0 movdqa %xmm2,32(%rsp) movd %xmm3,48(%rsp) movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) movdqu %xmm0,32(%rdi) movl %r12d,48(%rdi) movl %r13d,52(%rdi) movl %r14d,56(%rdi) movl %r15d,60(%rdi) leaq 64(%rdi),%rdi subq $64,%rbp jnz .Loop_outer jmp .Ldone .align 16 .Ltail: movl %eax,0(%rsp) movl %ebx,4(%rsp) xorq %rbx,%rbx movl %ecx,8(%rsp) movl %edx,12(%rsp) movl %r8d,16(%rsp) movl %r9d,20(%rsp) movl %r10d,24(%rsp) movl %r11d,28(%rsp) movdqa %xmm1,32(%rsp) movl %r12d,48(%rsp) movl %r13d,52(%rsp) movl %r14d,56(%rsp) movl %r15d,60(%rsp) .Loop_tail: movzbl (%rsi,%rbx,1),%eax movzbl (%rsp,%rbx,1),%edx leaq 1(%rbx),%rbx xorl %edx,%eax movb %al,-1(%rdi,%rbx,1) decq %rbp jnz .Loop_tail .Ldone: leaq 64+24+48(%rsp),%rsi movq -48(%rsi),%r15 .cfi_restore r15 movq -40(%rsi),%r14 .cfi_restore r14 movq -32(%rsi),%r13 .cfi_restore r13 movq -24(%rsi),%r12 .cfi_restore r12 movq -16(%rsi),%rbp .cfi_restore rbp movq -8(%rsi),%rbx .cfi_restore rbx leaq (%rsi),%rsp .cfi_adjust_cfa_offset -136 .Lno_data: ret .cfi_endproc .size ChaCha20_ctr32_nohw,.-ChaCha20_ctr32_nohw .globl ChaCha20_ctr32_ssse3_4x .hidden ChaCha20_ctr32_ssse3_4x .type ChaCha20_ctr32_ssse3_4x,@function .align 32 ChaCha20_ctr32_ssse3_4x: .cfi_startproc _CET_ENDBR movq %rsp,%r9 .cfi_def_cfa_register r9 subq $0x140+8,%rsp movdqa .Lsigma(%rip),%xmm11 movdqu (%rcx),%xmm15 movdqu 16(%rcx),%xmm7 movdqu (%r8),%xmm3 leaq 256(%rsp),%rcx leaq .Lrot16(%rip),%r10 leaq .Lrot24(%rip),%r11 pshufd $0x00,%xmm11,%xmm8 pshufd $0x55,%xmm11,%xmm9 movdqa %xmm8,64(%rsp) pshufd $0xaa,%xmm11,%xmm10 movdqa %xmm9,80(%rsp) pshufd $0xff,%xmm11,%xmm11 movdqa %xmm10,96(%rsp) movdqa %xmm11,112(%rsp) pshufd $0x00,%xmm15,%xmm12 pshufd $0x55,%xmm15,%xmm13 movdqa %xmm12,128-256(%rcx) pshufd $0xaa,%xmm15,%xmm14 movdqa %xmm13,144-256(%rcx) pshufd $0xff,%xmm15,%xmm15 movdqa %xmm14,160-256(%rcx) movdqa %xmm15,176-256(%rcx) pshufd $0x00,%xmm7,%xmm4 pshufd $0x55,%xmm7,%xmm5 movdqa %xmm4,192-256(%rcx) pshufd $0xaa,%xmm7,%xmm6 movdqa %xmm5,208-256(%rcx) pshufd $0xff,%xmm7,%xmm7 movdqa %xmm6,224-256(%rcx) movdqa %xmm7,240-256(%rcx) pshufd $0x00,%xmm3,%xmm0 pshufd $0x55,%xmm3,%xmm1 paddd .Linc(%rip),%xmm0 pshufd $0xaa,%xmm3,%xmm2 movdqa %xmm1,272-256(%rcx) pshufd $0xff,%xmm3,%xmm3 movdqa %xmm2,288-256(%rcx) movdqa %xmm3,304-256(%rcx) jmp .Loop_enter4x .align 32 .Loop_outer4x: movdqa 64(%rsp),%xmm8 movdqa 80(%rsp),%xmm9 movdqa 96(%rsp),%xmm10 movdqa 112(%rsp),%xmm11 movdqa 128-256(%rcx),%xmm12 movdqa 144-256(%rcx),%xmm13 movdqa 160-256(%rcx),%xmm14 movdqa 176-256(%rcx),%xmm15 movdqa 192-256(%rcx),%xmm4 movdqa 208-256(%rcx),%xmm5 movdqa 224-256(%rcx),%xmm6 movdqa 240-256(%rcx),%xmm7 movdqa 256-256(%rcx),%xmm0 movdqa 272-256(%rcx),%xmm1 movdqa 288-256(%rcx),%xmm2 movdqa 304-256(%rcx),%xmm3 paddd .Lfour(%rip),%xmm0 .Loop_enter4x: movdqa %xmm6,32(%rsp) movdqa %xmm7,48(%rsp) movdqa (%r10),%xmm7 movl $10,%eax movdqa %xmm0,256-256(%rcx) jmp .Loop4x .align 32 .Loop4x: paddd %xmm12,%xmm8 paddd %xmm13,%xmm9 pxor %xmm8,%xmm0 pxor %xmm9,%xmm1 .byte 102,15,56,0,199 .byte 102,15,56,0,207 paddd %xmm0,%xmm4 paddd %xmm1,%xmm5 pxor %xmm4,%xmm12 pxor %xmm5,%xmm13 movdqa %xmm12,%xmm6 pslld $12,%xmm12 psrld $20,%xmm6 movdqa %xmm13,%xmm7 pslld $12,%xmm13 por %xmm6,%xmm12 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm13 paddd %xmm12,%xmm8 paddd %xmm13,%xmm9 pxor %xmm8,%xmm0 pxor %xmm9,%xmm1 .byte 102,15,56,0,198 .byte 102,15,56,0,206 paddd %xmm0,%xmm4 paddd %xmm1,%xmm5 pxor %xmm4,%xmm12 pxor %xmm5,%xmm13 movdqa %xmm12,%xmm7 pslld $7,%xmm12 psrld $25,%xmm7 movdqa %xmm13,%xmm6 pslld $7,%xmm13 por %xmm7,%xmm12 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm13 movdqa %xmm4,0(%rsp) movdqa %xmm5,16(%rsp) movdqa 32(%rsp),%xmm4 movdqa 48(%rsp),%xmm5 paddd %xmm14,%xmm10 paddd %xmm15,%xmm11 pxor %xmm10,%xmm2 pxor %xmm11,%xmm3 .byte 102,15,56,0,215 .byte 102,15,56,0,223 paddd %xmm2,%xmm4 paddd %xmm3,%xmm5 pxor %xmm4,%xmm14 pxor %xmm5,%xmm15 movdqa %xmm14,%xmm6 pslld $12,%xmm14 psrld $20,%xmm6 movdqa %xmm15,%xmm7 pslld $12,%xmm15 por %xmm6,%xmm14 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm15 paddd %xmm14,%xmm10 paddd %xmm15,%xmm11 pxor %xmm10,%xmm2 pxor %xmm11,%xmm3 .byte 102,15,56,0,214 .byte 102,15,56,0,222 paddd %xmm2,%xmm4 paddd %xmm3,%xmm5 pxor %xmm4,%xmm14 pxor %xmm5,%xmm15 movdqa %xmm14,%xmm7 pslld $7,%xmm14 psrld $25,%xmm7 movdqa %xmm15,%xmm6 pslld $7,%xmm15 por %xmm7,%xmm14 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm15 paddd %xmm13,%xmm8 paddd %xmm14,%xmm9 pxor %xmm8,%xmm3 pxor %xmm9,%xmm0 .byte 102,15,56,0,223 .byte 102,15,56,0,199 paddd %xmm3,%xmm4 paddd %xmm0,%xmm5 pxor %xmm4,%xmm13 pxor %xmm5,%xmm14 movdqa %xmm13,%xmm6 pslld $12,%xmm13 psrld $20,%xmm6 movdqa %xmm14,%xmm7 pslld $12,%xmm14 por %xmm6,%xmm13 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm14 paddd %xmm13,%xmm8 paddd %xmm14,%xmm9 pxor %xmm8,%xmm3 pxor %xmm9,%xmm0 .byte 102,15,56,0,222 .byte 102,15,56,0,198 paddd %xmm3,%xmm4 paddd %xmm0,%xmm5 pxor %xmm4,%xmm13 pxor %xmm5,%xmm14 movdqa %xmm13,%xmm7 pslld $7,%xmm13 psrld $25,%xmm7 movdqa %xmm14,%xmm6 pslld $7,%xmm14 por %xmm7,%xmm13 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm14 movdqa %xmm4,32(%rsp) movdqa %xmm5,48(%rsp) movdqa 0(%rsp),%xmm4 movdqa 16(%rsp),%xmm5 paddd %xmm15,%xmm10 paddd %xmm12,%xmm11 pxor %xmm10,%xmm1 pxor %xmm11,%xmm2 .byte 102,15,56,0,207 .byte 102,15,56,0,215 paddd %xmm1,%xmm4 paddd %xmm2,%xmm5 pxor %xmm4,%xmm15 pxor %xmm5,%xmm12 movdqa %xmm15,%xmm6 pslld $12,%xmm15 psrld $20,%xmm6 movdqa %xmm12,%xmm7 pslld $12,%xmm12 por %xmm6,%xmm15 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm12 paddd %xmm15,%xmm10 paddd %xmm12,%xmm11 pxor %xmm10,%xmm1 pxor %xmm11,%xmm2 .byte 102,15,56,0,206 .byte 102,15,56,0,214 paddd %xmm1,%xmm4 paddd %xmm2,%xmm5 pxor %xmm4,%xmm15 pxor %xmm5,%xmm12 movdqa %xmm15,%xmm7 pslld $7,%xmm15 psrld $25,%xmm7 movdqa %xmm12,%xmm6 pslld $7,%xmm12 por %xmm7,%xmm15 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm12 decl %eax jnz .Loop4x paddd 64(%rsp),%xmm8 paddd 80(%rsp),%xmm9 paddd 96(%rsp),%xmm10 paddd 112(%rsp),%xmm11 movdqa %xmm8,%xmm6 punpckldq %xmm9,%xmm8 movdqa %xmm10,%xmm7 punpckldq %xmm11,%xmm10 punpckhdq %xmm9,%xmm6 punpckhdq %xmm11,%xmm7 movdqa %xmm8,%xmm9 punpcklqdq %xmm10,%xmm8 movdqa %xmm6,%xmm11 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm10,%xmm9 punpckhqdq %xmm7,%xmm11 paddd 128-256(%rcx),%xmm12 paddd 144-256(%rcx),%xmm13 paddd 160-256(%rcx),%xmm14 paddd 176-256(%rcx),%xmm15 movdqa %xmm8,0(%rsp) movdqa %xmm9,16(%rsp) movdqa 32(%rsp),%xmm8 movdqa 48(%rsp),%xmm9 movdqa %xmm12,%xmm10 punpckldq %xmm13,%xmm12 movdqa %xmm14,%xmm7 punpckldq %xmm15,%xmm14 punpckhdq %xmm13,%xmm10 punpckhdq %xmm15,%xmm7 movdqa %xmm12,%xmm13 punpcklqdq %xmm14,%xmm12 movdqa %xmm10,%xmm15 punpcklqdq %xmm7,%xmm10 punpckhqdq %xmm14,%xmm13 punpckhqdq %xmm7,%xmm15 paddd 192-256(%rcx),%xmm4 paddd 208-256(%rcx),%xmm5 paddd 224-256(%rcx),%xmm8 paddd 240-256(%rcx),%xmm9 movdqa %xmm6,32(%rsp) movdqa %xmm11,48(%rsp) movdqa %xmm4,%xmm14 punpckldq %xmm5,%xmm4 movdqa %xmm8,%xmm7 punpckldq %xmm9,%xmm8 punpckhdq %xmm5,%xmm14 punpckhdq %xmm9,%xmm7 movdqa %xmm4,%xmm5 punpcklqdq %xmm8,%xmm4 movdqa %xmm14,%xmm9 punpcklqdq %xmm7,%xmm14 punpckhqdq %xmm8,%xmm5 punpckhqdq %xmm7,%xmm9 paddd 256-256(%rcx),%xmm0 paddd 272-256(%rcx),%xmm1 paddd 288-256(%rcx),%xmm2 paddd 304-256(%rcx),%xmm3 movdqa %xmm0,%xmm8 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm8 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm8,%xmm3 punpcklqdq %xmm7,%xmm8 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 cmpq $256,%rdx jb .Ltail4x movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 leaq 128(%rsi),%rsi pxor 16(%rsp),%xmm6 pxor %xmm13,%xmm11 pxor %xmm5,%xmm2 pxor %xmm1,%xmm7 movdqu %xmm6,64(%rdi) movdqu 0(%rsi),%xmm6 movdqu %xmm11,80(%rdi) movdqu 16(%rsi),%xmm11 movdqu %xmm2,96(%rdi) movdqu 32(%rsi),%xmm2 movdqu %xmm7,112(%rdi) leaq 128(%rdi),%rdi movdqu 48(%rsi),%xmm7 pxor 32(%rsp),%xmm6 pxor %xmm10,%xmm11 pxor %xmm14,%xmm2 pxor %xmm8,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 leaq 128(%rsi),%rsi pxor 48(%rsp),%xmm6 pxor %xmm15,%xmm11 pxor %xmm9,%xmm2 pxor %xmm3,%xmm7 movdqu %xmm6,64(%rdi) movdqu %xmm11,80(%rdi) movdqu %xmm2,96(%rdi) movdqu %xmm7,112(%rdi) leaq 128(%rdi),%rdi subq $256,%rdx jnz .Loop_outer4x jmp .Ldone4x .Ltail4x: cmpq $192,%rdx jae .L192_or_more4x cmpq $128,%rdx jae .L128_or_more4x cmpq $64,%rdx jae .L64_or_more4x xorq %r10,%r10 movdqa %xmm12,16(%rsp) movdqa %xmm4,32(%rsp) movdqa %xmm0,48(%rsp) jmp .Loop_tail4x .align 32 .L64_or_more4x: movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu %xmm11,16(%rdi) movdqu %xmm2,32(%rdi) movdqu %xmm7,48(%rdi) je .Ldone4x movdqa 16(%rsp),%xmm6 leaq 64(%rsi),%rsi xorq %r10,%r10 movdqa %xmm6,0(%rsp) movdqa %xmm13,16(%rsp) leaq 64(%rdi),%rdi movdqa %xmm5,32(%rsp) subq $64,%rdx movdqa %xmm1,48(%rsp) jmp .Loop_tail4x .align 32 .L128_or_more4x: movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 pxor 16(%rsp),%xmm6 pxor %xmm13,%xmm11 pxor %xmm5,%xmm2 pxor %xmm1,%xmm7 movdqu %xmm6,64(%rdi) movdqu %xmm11,80(%rdi) movdqu %xmm2,96(%rdi) movdqu %xmm7,112(%rdi) je .Ldone4x movdqa 32(%rsp),%xmm6 leaq 128(%rsi),%rsi xorq %r10,%r10 movdqa %xmm6,0(%rsp) movdqa %xmm10,16(%rsp) leaq 128(%rdi),%rdi movdqa %xmm14,32(%rsp) subq $128,%rdx movdqa %xmm8,48(%rsp) jmp .Loop_tail4x .align 32 .L192_or_more4x: movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 leaq 128(%rsi),%rsi pxor 16(%rsp),%xmm6 pxor %xmm13,%xmm11 pxor %xmm5,%xmm2 pxor %xmm1,%xmm7 movdqu %xmm6,64(%rdi) movdqu 0(%rsi),%xmm6 movdqu %xmm11,80(%rdi) movdqu 16(%rsi),%xmm11 movdqu %xmm2,96(%rdi) movdqu 32(%rsi),%xmm2 movdqu %xmm7,112(%rdi) leaq 128(%rdi),%rdi movdqu 48(%rsi),%xmm7 pxor 32(%rsp),%xmm6 pxor %xmm10,%xmm11 pxor %xmm14,%xmm2 pxor %xmm8,%xmm7 movdqu %xmm6,0(%rdi) movdqu %xmm11,16(%rdi) movdqu %xmm2,32(%rdi) movdqu %xmm7,48(%rdi) je .Ldone4x movdqa 48(%rsp),%xmm6 leaq 64(%rsi),%rsi xorq %r10,%r10 movdqa %xmm6,0(%rsp) movdqa %xmm15,16(%rsp) leaq 64(%rdi),%rdi movdqa %xmm9,32(%rsp) subq $192,%rdx movdqa %xmm3,48(%rsp) .Loop_tail4x: movzbl (%rsi,%r10,1),%eax movzbl (%rsp,%r10,1),%ecx leaq 1(%r10),%r10 xorl %ecx,%eax movb %al,-1(%rdi,%r10,1) decq %rdx jnz .Loop_tail4x .Ldone4x: leaq (%r9),%rsp .cfi_def_cfa_register rsp .L4x_epilogue: ret .cfi_endproc .size ChaCha20_ctr32_ssse3_4x,.-ChaCha20_ctr32_ssse3_4x .globl ChaCha20_ctr32_avx2 .hidden ChaCha20_ctr32_avx2 .type ChaCha20_ctr32_avx2,@function .align 32 ChaCha20_ctr32_avx2: .cfi_startproc _CET_ENDBR movq %rsp,%r9 .cfi_def_cfa_register r9 subq $0x280+8,%rsp andq $-32,%rsp vzeroupper vbroadcasti128 .Lsigma(%rip),%ymm11 vbroadcasti128 (%rcx),%ymm3 vbroadcasti128 16(%rcx),%ymm15 vbroadcasti128 (%r8),%ymm7 leaq 256(%rsp),%rcx leaq 512(%rsp),%rax leaq .Lrot16(%rip),%r10 leaq .Lrot24(%rip),%r11 vpshufd $0x00,%ymm11,%ymm8 vpshufd $0x55,%ymm11,%ymm9 vmovdqa %ymm8,128-256(%rcx) vpshufd $0xaa,%ymm11,%ymm10 vmovdqa %ymm9,160-256(%rcx) vpshufd $0xff,%ymm11,%ymm11 vmovdqa %ymm10,192-256(%rcx) vmovdqa %ymm11,224-256(%rcx) vpshufd $0x00,%ymm3,%ymm0 vpshufd $0x55,%ymm3,%ymm1 vmovdqa %ymm0,256-256(%rcx) vpshufd $0xaa,%ymm3,%ymm2 vmovdqa %ymm1,288-256(%rcx) vpshufd $0xff,%ymm3,%ymm3 vmovdqa %ymm2,320-256(%rcx) vmovdqa %ymm3,352-256(%rcx) vpshufd $0x00,%ymm15,%ymm12 vpshufd $0x55,%ymm15,%ymm13 vmovdqa %ymm12,384-512(%rax) vpshufd $0xaa,%ymm15,%ymm14 vmovdqa %ymm13,416-512(%rax) vpshufd $0xff,%ymm15,%ymm15 vmovdqa %ymm14,448-512(%rax) vmovdqa %ymm15,480-512(%rax) vpshufd $0x00,%ymm7,%ymm4 vpshufd $0x55,%ymm7,%ymm5 vpaddd .Lincy(%rip),%ymm4,%ymm4 vpshufd $0xaa,%ymm7,%ymm6 vmovdqa %ymm5,544-512(%rax) vpshufd $0xff,%ymm7,%ymm7 vmovdqa %ymm6,576-512(%rax) vmovdqa %ymm7,608-512(%rax) jmp .Loop_enter8x .align 32 .Loop_outer8x: vmovdqa 128-256(%rcx),%ymm8 vmovdqa 160-256(%rcx),%ymm9 vmovdqa 192-256(%rcx),%ymm10 vmovdqa 224-256(%rcx),%ymm11 vmovdqa 256-256(%rcx),%ymm0 vmovdqa 288-256(%rcx),%ymm1 vmovdqa 320-256(%rcx),%ymm2 vmovdqa 352-256(%rcx),%ymm3 vmovdqa 384-512(%rax),%ymm12 vmovdqa 416-512(%rax),%ymm13 vmovdqa 448-512(%rax),%ymm14 vmovdqa 480-512(%rax),%ymm15 vmovdqa 512-512(%rax),%ymm4 vmovdqa 544-512(%rax),%ymm5 vmovdqa 576-512(%rax),%ymm6 vmovdqa 608-512(%rax),%ymm7 vpaddd .Leight(%rip),%ymm4,%ymm4 .Loop_enter8x: vmovdqa %ymm14,64(%rsp) vmovdqa %ymm15,96(%rsp) vbroadcasti128 (%r10),%ymm15 vmovdqa %ymm4,512-512(%rax) movl $10,%eax jmp .Loop8x .align 32 .Loop8x: vpaddd %ymm0,%ymm8,%ymm8 vpxor %ymm4,%ymm8,%ymm4 vpshufb %ymm15,%ymm4,%ymm4 vpaddd %ymm1,%ymm9,%ymm9 vpxor %ymm5,%ymm9,%ymm5 vpshufb %ymm15,%ymm5,%ymm5 vpaddd %ymm4,%ymm12,%ymm12 vpxor %ymm0,%ymm12,%ymm0 vpslld $12,%ymm0,%ymm14 vpsrld $20,%ymm0,%ymm0 vpor %ymm0,%ymm14,%ymm0 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm5,%ymm13,%ymm13 vpxor %ymm1,%ymm13,%ymm1 vpslld $12,%ymm1,%ymm15 vpsrld $20,%ymm1,%ymm1 vpor %ymm1,%ymm15,%ymm1 vpaddd %ymm0,%ymm8,%ymm8 vpxor %ymm4,%ymm8,%ymm4 vpshufb %ymm14,%ymm4,%ymm4 vpaddd %ymm1,%ymm9,%ymm9 vpxor %ymm5,%ymm9,%ymm5 vpshufb %ymm14,%ymm5,%ymm5 vpaddd %ymm4,%ymm12,%ymm12 vpxor %ymm0,%ymm12,%ymm0 vpslld $7,%ymm0,%ymm15 vpsrld $25,%ymm0,%ymm0 vpor %ymm0,%ymm15,%ymm0 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm5,%ymm13,%ymm13 vpxor %ymm1,%ymm13,%ymm1 vpslld $7,%ymm1,%ymm14 vpsrld $25,%ymm1,%ymm1 vpor %ymm1,%ymm14,%ymm1 vmovdqa %ymm12,0(%rsp) vmovdqa %ymm13,32(%rsp) vmovdqa 64(%rsp),%ymm12 vmovdqa 96(%rsp),%ymm13 vpaddd %ymm2,%ymm10,%ymm10 vpxor %ymm6,%ymm10,%ymm6 vpshufb %ymm15,%ymm6,%ymm6 vpaddd %ymm3,%ymm11,%ymm11 vpxor %ymm7,%ymm11,%ymm7 vpshufb %ymm15,%ymm7,%ymm7 vpaddd %ymm6,%ymm12,%ymm12 vpxor %ymm2,%ymm12,%ymm2 vpslld $12,%ymm2,%ymm14 vpsrld $20,%ymm2,%ymm2 vpor %ymm2,%ymm14,%ymm2 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm7,%ymm13,%ymm13 vpxor %ymm3,%ymm13,%ymm3 vpslld $12,%ymm3,%ymm15 vpsrld $20,%ymm3,%ymm3 vpor %ymm3,%ymm15,%ymm3 vpaddd %ymm2,%ymm10,%ymm10 vpxor %ymm6,%ymm10,%ymm6 vpshufb %ymm14,%ymm6,%ymm6 vpaddd %ymm3,%ymm11,%ymm11 vpxor %ymm7,%ymm11,%ymm7 vpshufb %ymm14,%ymm7,%ymm7 vpaddd %ymm6,%ymm12,%ymm12 vpxor %ymm2,%ymm12,%ymm2 vpslld $7,%ymm2,%ymm15 vpsrld $25,%ymm2,%ymm2 vpor %ymm2,%ymm15,%ymm2 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm7,%ymm13,%ymm13 vpxor %ymm3,%ymm13,%ymm3 vpslld $7,%ymm3,%ymm14 vpsrld $25,%ymm3,%ymm3 vpor %ymm3,%ymm14,%ymm3 vpaddd %ymm1,%ymm8,%ymm8 vpxor %ymm7,%ymm8,%ymm7 vpshufb %ymm15,%ymm7,%ymm7 vpaddd %ymm2,%ymm9,%ymm9 vpxor %ymm4,%ymm9,%ymm4 vpshufb %ymm15,%ymm4,%ymm4 vpaddd %ymm7,%ymm12,%ymm12 vpxor %ymm1,%ymm12,%ymm1 vpslld $12,%ymm1,%ymm14 vpsrld $20,%ymm1,%ymm1 vpor %ymm1,%ymm14,%ymm1 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm4,%ymm13,%ymm13 vpxor %ymm2,%ymm13,%ymm2 vpslld $12,%ymm2,%ymm15 vpsrld $20,%ymm2,%ymm2 vpor %ymm2,%ymm15,%ymm2 vpaddd %ymm1,%ymm8,%ymm8 vpxor %ymm7,%ymm8,%ymm7 vpshufb %ymm14,%ymm7,%ymm7 vpaddd %ymm2,%ymm9,%ymm9 vpxor %ymm4,%ymm9,%ymm4 vpshufb %ymm14,%ymm4,%ymm4 vpaddd %ymm7,%ymm12,%ymm12 vpxor %ymm1,%ymm12,%ymm1 vpslld $7,%ymm1,%ymm15 vpsrld $25,%ymm1,%ymm1 vpor %ymm1,%ymm15,%ymm1 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm4,%ymm13,%ymm13 vpxor %ymm2,%ymm13,%ymm2 vpslld $7,%ymm2,%ymm14 vpsrld $25,%ymm2,%ymm2 vpor %ymm2,%ymm14,%ymm2 vmovdqa %ymm12,64(%rsp) vmovdqa %ymm13,96(%rsp) vmovdqa 0(%rsp),%ymm12 vmovdqa 32(%rsp),%ymm13 vpaddd %ymm3,%ymm10,%ymm10 vpxor %ymm5,%ymm10,%ymm5 vpshufb %ymm15,%ymm5,%ymm5 vpaddd %ymm0,%ymm11,%ymm11 vpxor %ymm6,%ymm11,%ymm6 vpshufb %ymm15,%ymm6,%ymm6 vpaddd %ymm5,%ymm12,%ymm12 vpxor %ymm3,%ymm12,%ymm3 vpslld $12,%ymm3,%ymm14 vpsrld $20,%ymm3,%ymm3 vpor %ymm3,%ymm14,%ymm3 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm6,%ymm13,%ymm13 vpxor %ymm0,%ymm13,%ymm0 vpslld $12,%ymm0,%ymm15 vpsrld $20,%ymm0,%ymm0 vpor %ymm0,%ymm15,%ymm0 vpaddd %ymm3,%ymm10,%ymm10 vpxor %ymm5,%ymm10,%ymm5 vpshufb %ymm14,%ymm5,%ymm5 vpaddd %ymm0,%ymm11,%ymm11 vpxor %ymm6,%ymm11,%ymm6 vpshufb %ymm14,%ymm6,%ymm6 vpaddd %ymm5,%ymm12,%ymm12 vpxor %ymm3,%ymm12,%ymm3 vpslld $7,%ymm3,%ymm15 vpsrld $25,%ymm3,%ymm3 vpor %ymm3,%ymm15,%ymm3 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm6,%ymm13,%ymm13 vpxor %ymm0,%ymm13,%ymm0 vpslld $7,%ymm0,%ymm14 vpsrld $25,%ymm0,%ymm0 vpor %ymm0,%ymm14,%ymm0 decl %eax jnz .Loop8x leaq 512(%rsp),%rax vpaddd 128-256(%rcx),%ymm8,%ymm8 vpaddd 160-256(%rcx),%ymm9,%ymm9 vpaddd 192-256(%rcx),%ymm10,%ymm10 vpaddd 224-256(%rcx),%ymm11,%ymm11 vpunpckldq %ymm9,%ymm8,%ymm14 vpunpckldq %ymm11,%ymm10,%ymm15 vpunpckhdq %ymm9,%ymm8,%ymm8 vpunpckhdq %ymm11,%ymm10,%ymm10 vpunpcklqdq %ymm15,%ymm14,%ymm9 vpunpckhqdq %ymm15,%ymm14,%ymm14 vpunpcklqdq %ymm10,%ymm8,%ymm11 vpunpckhqdq %ymm10,%ymm8,%ymm8 vpaddd 256-256(%rcx),%ymm0,%ymm0 vpaddd 288-256(%rcx),%ymm1,%ymm1 vpaddd 320-256(%rcx),%ymm2,%ymm2 vpaddd 352-256(%rcx),%ymm3,%ymm3 vpunpckldq %ymm1,%ymm0,%ymm10 vpunpckldq %ymm3,%ymm2,%ymm15 vpunpckhdq %ymm1,%ymm0,%ymm0 vpunpckhdq %ymm3,%ymm2,%ymm2 vpunpcklqdq %ymm15,%ymm10,%ymm1 vpunpckhqdq %ymm15,%ymm10,%ymm10 vpunpcklqdq %ymm2,%ymm0,%ymm3 vpunpckhqdq %ymm2,%ymm0,%ymm0 vperm2i128 $0x20,%ymm1,%ymm9,%ymm15 vperm2i128 $0x31,%ymm1,%ymm9,%ymm1 vperm2i128 $0x20,%ymm10,%ymm14,%ymm9 vperm2i128 $0x31,%ymm10,%ymm14,%ymm10 vperm2i128 $0x20,%ymm3,%ymm11,%ymm14 vperm2i128 $0x31,%ymm3,%ymm11,%ymm3 vperm2i128 $0x20,%ymm0,%ymm8,%ymm11 vperm2i128 $0x31,%ymm0,%ymm8,%ymm0 vmovdqa %ymm15,0(%rsp) vmovdqa %ymm9,32(%rsp) vmovdqa 64(%rsp),%ymm15 vmovdqa 96(%rsp),%ymm9 vpaddd 384-512(%rax),%ymm12,%ymm12 vpaddd 416-512(%rax),%ymm13,%ymm13 vpaddd 448-512(%rax),%ymm15,%ymm15 vpaddd 480-512(%rax),%ymm9,%ymm9 vpunpckldq %ymm13,%ymm12,%ymm2 vpunpckldq %ymm9,%ymm15,%ymm8 vpunpckhdq %ymm13,%ymm12,%ymm12 vpunpckhdq %ymm9,%ymm15,%ymm15 vpunpcklqdq %ymm8,%ymm2,%ymm13 vpunpckhqdq %ymm8,%ymm2,%ymm2 vpunpcklqdq %ymm15,%ymm12,%ymm9 vpunpckhqdq %ymm15,%ymm12,%ymm12 vpaddd 512-512(%rax),%ymm4,%ymm4 vpaddd 544-512(%rax),%ymm5,%ymm5 vpaddd 576-512(%rax),%ymm6,%ymm6 vpaddd 608-512(%rax),%ymm7,%ymm7 vpunpckldq %ymm5,%ymm4,%ymm15 vpunpckldq %ymm7,%ymm6,%ymm8 vpunpckhdq %ymm5,%ymm4,%ymm4 vpunpckhdq %ymm7,%ymm6,%ymm6 vpunpcklqdq %ymm8,%ymm15,%ymm5 vpunpckhqdq %ymm8,%ymm15,%ymm15 vpunpcklqdq %ymm6,%ymm4,%ymm7 vpunpckhqdq %ymm6,%ymm4,%ymm4 vperm2i128 $0x20,%ymm5,%ymm13,%ymm8 vperm2i128 $0x31,%ymm5,%ymm13,%ymm5 vperm2i128 $0x20,%ymm15,%ymm2,%ymm13 vperm2i128 $0x31,%ymm15,%ymm2,%ymm15 vperm2i128 $0x20,%ymm7,%ymm9,%ymm2 vperm2i128 $0x31,%ymm7,%ymm9,%ymm7 vperm2i128 $0x20,%ymm4,%ymm12,%ymm9 vperm2i128 $0x31,%ymm4,%ymm12,%ymm4 vmovdqa 0(%rsp),%ymm6 vmovdqa 32(%rsp),%ymm12 cmpq $512,%rdx jb .Ltail8x vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 leaq 128(%rsi),%rsi vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) leaq 128(%rdi),%rdi vpxor 0(%rsi),%ymm12,%ymm12 vpxor 32(%rsi),%ymm13,%ymm13 vpxor 64(%rsi),%ymm10,%ymm10 vpxor 96(%rsi),%ymm15,%ymm15 leaq 128(%rsi),%rsi vmovdqu %ymm12,0(%rdi) vmovdqu %ymm13,32(%rdi) vmovdqu %ymm10,64(%rdi) vmovdqu %ymm15,96(%rdi) leaq 128(%rdi),%rdi vpxor 0(%rsi),%ymm14,%ymm14 vpxor 32(%rsi),%ymm2,%ymm2 vpxor 64(%rsi),%ymm3,%ymm3 vpxor 96(%rsi),%ymm7,%ymm7 leaq 128(%rsi),%rsi vmovdqu %ymm14,0(%rdi) vmovdqu %ymm2,32(%rdi) vmovdqu %ymm3,64(%rdi) vmovdqu %ymm7,96(%rdi) leaq 128(%rdi),%rdi vpxor 0(%rsi),%ymm11,%ymm11 vpxor 32(%rsi),%ymm9,%ymm9 vpxor 64(%rsi),%ymm0,%ymm0 vpxor 96(%rsi),%ymm4,%ymm4 leaq 128(%rsi),%rsi vmovdqu %ymm11,0(%rdi) vmovdqu %ymm9,32(%rdi) vmovdqu %ymm0,64(%rdi) vmovdqu %ymm4,96(%rdi) leaq 128(%rdi),%rdi subq $512,%rdx jnz .Loop_outer8x jmp .Ldone8x .Ltail8x: cmpq $448,%rdx jae .L448_or_more8x cmpq $384,%rdx jae .L384_or_more8x cmpq $320,%rdx jae .L320_or_more8x cmpq $256,%rdx jae .L256_or_more8x cmpq $192,%rdx jae .L192_or_more8x cmpq $128,%rdx jae .L128_or_more8x cmpq $64,%rdx jae .L64_or_more8x xorq %r10,%r10 vmovdqa %ymm6,0(%rsp) vmovdqa %ymm8,32(%rsp) jmp .Loop_tail8x .align 32 .L64_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) je .Ldone8x leaq 64(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm1,0(%rsp) leaq 64(%rdi),%rdi subq $64,%rdx vmovdqa %ymm5,32(%rsp) jmp .Loop_tail8x .align 32 .L128_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) je .Ldone8x leaq 128(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm12,0(%rsp) leaq 128(%rdi),%rdi subq $128,%rdx vmovdqa %ymm13,32(%rsp) jmp .Loop_tail8x .align 32 .L192_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) je .Ldone8x leaq 192(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm10,0(%rsp) leaq 192(%rdi),%rdi subq $192,%rdx vmovdqa %ymm15,32(%rsp) jmp .Loop_tail8x .align 32 .L256_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) je .Ldone8x leaq 256(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm14,0(%rsp) leaq 256(%rdi),%rdi subq $256,%rdx vmovdqa %ymm2,32(%rsp) jmp .Loop_tail8x .align 32 .L320_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vpxor 256(%rsi),%ymm14,%ymm14 vpxor 288(%rsi),%ymm2,%ymm2 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) vmovdqu %ymm14,256(%rdi) vmovdqu %ymm2,288(%rdi) je .Ldone8x leaq 320(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm3,0(%rsp) leaq 320(%rdi),%rdi subq $320,%rdx vmovdqa %ymm7,32(%rsp) jmp .Loop_tail8x .align 32 .L384_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vpxor 256(%rsi),%ymm14,%ymm14 vpxor 288(%rsi),%ymm2,%ymm2 vpxor 320(%rsi),%ymm3,%ymm3 vpxor 352(%rsi),%ymm7,%ymm7 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) vmovdqu %ymm14,256(%rdi) vmovdqu %ymm2,288(%rdi) vmovdqu %ymm3,320(%rdi) vmovdqu %ymm7,352(%rdi) je .Ldone8x leaq 384(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm11,0(%rsp) leaq 384(%rdi),%rdi subq $384,%rdx vmovdqa %ymm9,32(%rsp) jmp .Loop_tail8x .align 32 .L448_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vpxor 256(%rsi),%ymm14,%ymm14 vpxor 288(%rsi),%ymm2,%ymm2 vpxor 320(%rsi),%ymm3,%ymm3 vpxor 352(%rsi),%ymm7,%ymm7 vpxor 384(%rsi),%ymm11,%ymm11 vpxor 416(%rsi),%ymm9,%ymm9 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) vmovdqu %ymm14,256(%rdi) vmovdqu %ymm2,288(%rdi) vmovdqu %ymm3,320(%rdi) vmovdqu %ymm7,352(%rdi) vmovdqu %ymm11,384(%rdi) vmovdqu %ymm9,416(%rdi) je .Ldone8x leaq 448(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm0,0(%rsp) leaq 448(%rdi),%rdi subq $448,%rdx vmovdqa %ymm4,32(%rsp) .Loop_tail8x: movzbl (%rsi,%r10,1),%eax movzbl (%rsp,%r10,1),%ecx leaq 1(%r10),%r10 xorl %ecx,%eax movb %al,-1(%rdi,%r10,1) decq %rdx jnz .Loop_tail8x .Ldone8x: vzeroall leaq (%r9),%rsp .cfi_def_cfa_register rsp .L8x_epilogue: ret .cfi_endproc .size ChaCha20_ctr32_avx2,.-ChaCha20_ctr32_avx2 #endif
marvin-hansen/iggy-streaming-system
19,660
thirdparty/crates/ring-0.17.9/pregenerated/aesni-gcm-x86_64-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .type _aesni_ctr32_ghash_6x,@function .align 32 _aesni_ctr32_ghash_6x: .cfi_startproc vmovdqu 32(%r11),%xmm2 subq $6,%rdx vpxor %xmm4,%xmm4,%xmm4 vmovdqu 0-128(%rcx),%xmm15 vpaddb %xmm2,%xmm1,%xmm10 vpaddb %xmm2,%xmm10,%xmm11 vpaddb %xmm2,%xmm11,%xmm12 vpaddb %xmm2,%xmm12,%xmm13 vpaddb %xmm2,%xmm13,%xmm14 vpxor %xmm15,%xmm1,%xmm9 vmovdqu %xmm4,16+8(%rsp) jmp .Loop6x .align 32 .Loop6x: addl $100663296,%ebx jc .Lhandle_ctr32 vmovdqu 0-32(%r9),%xmm3 vpaddb %xmm2,%xmm14,%xmm1 vpxor %xmm15,%xmm10,%xmm10 vpxor %xmm15,%xmm11,%xmm11 .Lresume_ctr32: vmovdqu %xmm1,(%r8) vpclmulqdq $0x10,%xmm3,%xmm7,%xmm5 vpxor %xmm15,%xmm12,%xmm12 vmovups 16-128(%rcx),%xmm2 vpclmulqdq $0x01,%xmm3,%xmm7,%xmm6 xorq %r12,%r12 cmpq %r14,%r15 vaesenc %xmm2,%xmm9,%xmm9 vmovdqu 48+8(%rsp),%xmm0 vpxor %xmm15,%xmm13,%xmm13 vpclmulqdq $0x00,%xmm3,%xmm7,%xmm1 vaesenc %xmm2,%xmm10,%xmm10 vpxor %xmm15,%xmm14,%xmm14 setnc %r12b vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 vaesenc %xmm2,%xmm11,%xmm11 vmovdqu 16-32(%r9),%xmm3 negq %r12 vaesenc %xmm2,%xmm12,%xmm12 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm3,%xmm0,%xmm5 vpxor %xmm4,%xmm8,%xmm8 vaesenc %xmm2,%xmm13,%xmm13 vpxor %xmm5,%xmm1,%xmm4 andq $0x60,%r12 vmovups 32-128(%rcx),%xmm15 vpclmulqdq $0x10,%xmm3,%xmm0,%xmm1 vaesenc %xmm2,%xmm14,%xmm14 vpclmulqdq $0x01,%xmm3,%xmm0,%xmm2 leaq (%r14,%r12,1),%r14 vaesenc %xmm15,%xmm9,%xmm9 vpxor 16+8(%rsp),%xmm8,%xmm8 vpclmulqdq $0x11,%xmm3,%xmm0,%xmm3 vmovdqu 64+8(%rsp),%xmm0 vaesenc %xmm15,%xmm10,%xmm10 movbeq 88(%r14),%r13 vaesenc %xmm15,%xmm11,%xmm11 movbeq 80(%r14),%r12 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,32+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,40+8(%rsp) vmovdqu 48-32(%r9),%xmm5 vaesenc %xmm15,%xmm14,%xmm14 vmovups 48-128(%rcx),%xmm15 vpxor %xmm1,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm5,%xmm0,%xmm1 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm2,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm5,%xmm0,%xmm2 vaesenc %xmm15,%xmm10,%xmm10 vpxor %xmm3,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm5,%xmm0,%xmm3 vaesenc %xmm15,%xmm11,%xmm11 vpclmulqdq $0x11,%xmm5,%xmm0,%xmm5 vmovdqu 80+8(%rsp),%xmm0 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vpxor %xmm1,%xmm4,%xmm4 vmovdqu 64-32(%r9),%xmm1 vaesenc %xmm15,%xmm14,%xmm14 vmovups 64-128(%rcx),%xmm15 vpxor %xmm2,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm3,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3 vaesenc %xmm15,%xmm10,%xmm10 movbeq 72(%r14),%r13 vpxor %xmm5,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm1,%xmm0,%xmm5 vaesenc %xmm15,%xmm11,%xmm11 movbeq 64(%r14),%r12 vpclmulqdq $0x11,%xmm1,%xmm0,%xmm1 vmovdqu 96+8(%rsp),%xmm0 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,48+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,56+8(%rsp) vpxor %xmm2,%xmm4,%xmm4 vmovdqu 96-32(%r9),%xmm2 vaesenc %xmm15,%xmm14,%xmm14 vmovups 80-128(%rcx),%xmm15 vpxor %xmm3,%xmm6,%xmm6 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm2,%xmm0,%xmm5 vaesenc %xmm15,%xmm10,%xmm10 movbeq 56(%r14),%r13 vpxor %xmm1,%xmm7,%xmm7 vpclmulqdq $0x01,%xmm2,%xmm0,%xmm1 vpxor 112+8(%rsp),%xmm8,%xmm8 vaesenc %xmm15,%xmm11,%xmm11 movbeq 48(%r14),%r12 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm2 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,64+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,72+8(%rsp) vpxor %xmm3,%xmm4,%xmm4 vmovdqu 112-32(%r9),%xmm3 vaesenc %xmm15,%xmm14,%xmm14 vmovups 96-128(%rcx),%xmm15 vpxor %xmm5,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm5 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm1,%xmm6,%xmm6 vpclmulqdq $0x01,%xmm3,%xmm8,%xmm1 vaesenc %xmm15,%xmm10,%xmm10 movbeq 40(%r14),%r13 vpxor %xmm2,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm3,%xmm8,%xmm2 vaesenc %xmm15,%xmm11,%xmm11 movbeq 32(%r14),%r12 vpclmulqdq $0x11,%xmm3,%xmm8,%xmm8 vaesenc %xmm15,%xmm12,%xmm12 movq %r13,80+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 movq %r12,88+8(%rsp) vpxor %xmm5,%xmm6,%xmm6 vaesenc %xmm15,%xmm14,%xmm14 vpxor %xmm1,%xmm6,%xmm6 vmovups 112-128(%rcx),%xmm15 vpslldq $8,%xmm6,%xmm5 vpxor %xmm2,%xmm4,%xmm4 vmovdqu 16(%r11),%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor %xmm8,%xmm7,%xmm7 vaesenc %xmm15,%xmm10,%xmm10 vpxor %xmm5,%xmm4,%xmm4 movbeq 24(%r14),%r13 vaesenc %xmm15,%xmm11,%xmm11 movbeq 16(%r14),%r12 vpalignr $8,%xmm4,%xmm4,%xmm0 vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 movq %r13,96+8(%rsp) vaesenc %xmm15,%xmm12,%xmm12 movq %r12,104+8(%rsp) vaesenc %xmm15,%xmm13,%xmm13 vmovups 128-128(%rcx),%xmm1 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vmovups 144-128(%rcx),%xmm15 vaesenc %xmm1,%xmm10,%xmm10 vpsrldq $8,%xmm6,%xmm6 vaesenc %xmm1,%xmm11,%xmm11 vpxor %xmm6,%xmm7,%xmm7 vaesenc %xmm1,%xmm12,%xmm12 vpxor %xmm0,%xmm4,%xmm4 movbeq 8(%r14),%r13 vaesenc %xmm1,%xmm13,%xmm13 movbeq 0(%r14),%r12 vaesenc %xmm1,%xmm14,%xmm14 vmovups 160-128(%rcx),%xmm1 cmpl $11,%r10d jb .Lenc_tail vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovups 176-128(%rcx),%xmm15 vaesenc %xmm1,%xmm14,%xmm14 vmovups 192-128(%rcx),%xmm1 vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vaesenc %xmm1,%xmm9,%xmm9 vaesenc %xmm1,%xmm10,%xmm10 vaesenc %xmm1,%xmm11,%xmm11 vaesenc %xmm1,%xmm12,%xmm12 vaesenc %xmm1,%xmm13,%xmm13 vmovups 208-128(%rcx),%xmm15 vaesenc %xmm1,%xmm14,%xmm14 vmovups 224-128(%rcx),%xmm1 jmp .Lenc_tail .align 32 .Lhandle_ctr32: vmovdqu (%r11),%xmm0 vpshufb %xmm0,%xmm1,%xmm6 vmovdqu 48(%r11),%xmm5 vpaddd 64(%r11),%xmm6,%xmm10 vpaddd %xmm5,%xmm6,%xmm11 vmovdqu 0-32(%r9),%xmm3 vpaddd %xmm5,%xmm10,%xmm12 vpshufb %xmm0,%xmm10,%xmm10 vpaddd %xmm5,%xmm11,%xmm13 vpshufb %xmm0,%xmm11,%xmm11 vpxor %xmm15,%xmm10,%xmm10 vpaddd %xmm5,%xmm12,%xmm14 vpshufb %xmm0,%xmm12,%xmm12 vpxor %xmm15,%xmm11,%xmm11 vpaddd %xmm5,%xmm13,%xmm1 vpshufb %xmm0,%xmm13,%xmm13 vpshufb %xmm0,%xmm14,%xmm14 vpshufb %xmm0,%xmm1,%xmm1 jmp .Lresume_ctr32 .align 32 .Lenc_tail: vaesenc %xmm15,%xmm9,%xmm9 vmovdqu %xmm7,16+8(%rsp) vpalignr $8,%xmm4,%xmm4,%xmm8 vaesenc %xmm15,%xmm10,%xmm10 vpclmulqdq $0x10,%xmm3,%xmm4,%xmm4 vpxor 0(%rdi),%xmm1,%xmm2 vaesenc %xmm15,%xmm11,%xmm11 vpxor 16(%rdi),%xmm1,%xmm0 vaesenc %xmm15,%xmm12,%xmm12 vpxor 32(%rdi),%xmm1,%xmm5 vaesenc %xmm15,%xmm13,%xmm13 vpxor 48(%rdi),%xmm1,%xmm6 vaesenc %xmm15,%xmm14,%xmm14 vpxor 64(%rdi),%xmm1,%xmm7 vpxor 80(%rdi),%xmm1,%xmm3 vmovdqu (%r8),%xmm1 vaesenclast %xmm2,%xmm9,%xmm9 vmovdqu 32(%r11),%xmm2 vaesenclast %xmm0,%xmm10,%xmm10 vpaddb %xmm2,%xmm1,%xmm0 movq %r13,112+8(%rsp) leaq 96(%rdi),%rdi prefetcht0 512(%rdi) prefetcht0 576(%rdi) vaesenclast %xmm5,%xmm11,%xmm11 vpaddb %xmm2,%xmm0,%xmm5 movq %r12,120+8(%rsp) leaq 96(%rsi),%rsi vmovdqu 0-128(%rcx),%xmm15 vaesenclast %xmm6,%xmm12,%xmm12 vpaddb %xmm2,%xmm5,%xmm6 vaesenclast %xmm7,%xmm13,%xmm13 vpaddb %xmm2,%xmm6,%xmm7 vaesenclast %xmm3,%xmm14,%xmm14 vpaddb %xmm2,%xmm7,%xmm3 addq $0x60,%rax subq $0x6,%rdx jc .L6x_done vmovups %xmm9,-96(%rsi) vpxor %xmm15,%xmm1,%xmm9 vmovups %xmm10,-80(%rsi) vmovdqa %xmm0,%xmm10 vmovups %xmm11,-64(%rsi) vmovdqa %xmm5,%xmm11 vmovups %xmm12,-48(%rsi) vmovdqa %xmm6,%xmm12 vmovups %xmm13,-32(%rsi) vmovdqa %xmm7,%xmm13 vmovups %xmm14,-16(%rsi) vmovdqa %xmm3,%xmm14 vmovdqu 32+8(%rsp),%xmm7 jmp .Loop6x .L6x_done: vpxor 16+8(%rsp),%xmm8,%xmm8 vpxor %xmm4,%xmm8,%xmm8 ret .cfi_endproc .size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x .globl aesni_gcm_decrypt .hidden aesni_gcm_decrypt .type aesni_gcm_decrypt,@function .align 32 aesni_gcm_decrypt: .cfi_startproc _CET_ENDBR xorq %rax,%rax cmpq $0x60,%rdx jb .Lgcm_dec_abort pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 movq %rsp,%rbp .cfi_def_cfa_register %rbp pushq %rbx .cfi_offset %rbx,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 vzeroupper movq 16(%rbp),%r12 vmovdqu (%r8),%xmm1 addq $-128,%rsp movl 12(%r8),%ebx leaq .Lbswap_mask(%rip),%r11 leaq -128(%rcx),%r14 movq $0xf80,%r15 vmovdqu (%r12),%xmm8 andq $-128,%rsp vmovdqu (%r11),%xmm0 leaq 128(%rcx),%rcx leaq 32(%r9),%r9 movl 240-128(%rcx),%r10d vpshufb %xmm0,%xmm8,%xmm8 andq %r15,%r14 andq %rsp,%r15 subq %r14,%r15 jc .Ldec_no_key_aliasing cmpq $768,%r15 jnc .Ldec_no_key_aliasing subq %r15,%rsp .Ldec_no_key_aliasing: vmovdqu 80(%rdi),%xmm7 movq %rdi,%r14 vmovdqu 64(%rdi),%xmm4 leaq -192(%rdi,%rdx,1),%r15 vmovdqu 48(%rdi),%xmm5 shrq $4,%rdx xorq %rax,%rax vmovdqu 32(%rdi),%xmm6 vpshufb %xmm0,%xmm7,%xmm7 vmovdqu 16(%rdi),%xmm2 vpshufb %xmm0,%xmm4,%xmm4 vmovdqu (%rdi),%xmm3 vpshufb %xmm0,%xmm5,%xmm5 vmovdqu %xmm4,48(%rsp) vpshufb %xmm0,%xmm6,%xmm6 vmovdqu %xmm5,64(%rsp) vpshufb %xmm0,%xmm2,%xmm2 vmovdqu %xmm6,80(%rsp) vpshufb %xmm0,%xmm3,%xmm3 vmovdqu %xmm2,96(%rsp) vmovdqu %xmm3,112(%rsp) call _aesni_ctr32_ghash_6x movq 16(%rbp),%r12 vmovups %xmm9,-96(%rsi) vmovups %xmm10,-80(%rsi) vmovups %xmm11,-64(%rsi) vmovups %xmm12,-48(%rsi) vmovups %xmm13,-32(%rsi) vmovups %xmm14,-16(%rsi) vpshufb (%r11),%xmm8,%xmm8 vmovdqu %xmm8,(%r12) vzeroupper leaq -40(%rbp),%rsp .cfi_def_cfa %rsp, 0x38 popq %r15 .cfi_adjust_cfa_offset -8 .cfi_restore %r15 popq %r14 .cfi_adjust_cfa_offset -8 .cfi_restore %r14 popq %r13 .cfi_adjust_cfa_offset -8 .cfi_restore %r13 popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 popq %rbx .cfi_adjust_cfa_offset -8 .cfi_restore %rbx popq %rbp .cfi_adjust_cfa_offset -8 .cfi_restore %rbp .Lgcm_dec_abort: ret .cfi_endproc .size aesni_gcm_decrypt,.-aesni_gcm_decrypt .type _aesni_ctr32_6x,@function .align 32 _aesni_ctr32_6x: .cfi_startproc vmovdqu 0-128(%rcx),%xmm4 vmovdqu 32(%r11),%xmm2 leaq -1(%r10),%r13 vmovups 16-128(%rcx),%xmm15 leaq 32-128(%rcx),%r12 vpxor %xmm4,%xmm1,%xmm9 addl $100663296,%ebx jc .Lhandle_ctr32_2 vpaddb %xmm2,%xmm1,%xmm10 vpaddb %xmm2,%xmm10,%xmm11 vpxor %xmm4,%xmm10,%xmm10 vpaddb %xmm2,%xmm11,%xmm12 vpxor %xmm4,%xmm11,%xmm11 vpaddb %xmm2,%xmm12,%xmm13 vpxor %xmm4,%xmm12,%xmm12 vpaddb %xmm2,%xmm13,%xmm14 vpxor %xmm4,%xmm13,%xmm13 vpaddb %xmm2,%xmm14,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 .align 16 .Loop_ctr32: vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 vaesenc %xmm15,%xmm11,%xmm11 vaesenc %xmm15,%xmm12,%xmm12 vaesenc %xmm15,%xmm13,%xmm13 vaesenc %xmm15,%xmm14,%xmm14 vmovups (%r12),%xmm15 leaq 16(%r12),%r12 decl %r13d jnz .Loop_ctr32 vmovdqu (%r12),%xmm3 vaesenc %xmm15,%xmm9,%xmm9 vpxor 0(%rdi),%xmm3,%xmm4 vaesenc %xmm15,%xmm10,%xmm10 vpxor 16(%rdi),%xmm3,%xmm5 vaesenc %xmm15,%xmm11,%xmm11 vpxor 32(%rdi),%xmm3,%xmm6 vaesenc %xmm15,%xmm12,%xmm12 vpxor 48(%rdi),%xmm3,%xmm8 vaesenc %xmm15,%xmm13,%xmm13 vpxor 64(%rdi),%xmm3,%xmm2 vaesenc %xmm15,%xmm14,%xmm14 vpxor 80(%rdi),%xmm3,%xmm3 leaq 96(%rdi),%rdi vaesenclast %xmm4,%xmm9,%xmm9 vaesenclast %xmm5,%xmm10,%xmm10 vaesenclast %xmm6,%xmm11,%xmm11 vaesenclast %xmm8,%xmm12,%xmm12 vaesenclast %xmm2,%xmm13,%xmm13 vaesenclast %xmm3,%xmm14,%xmm14 vmovups %xmm9,0(%rsi) vmovups %xmm10,16(%rsi) vmovups %xmm11,32(%rsi) vmovups %xmm12,48(%rsi) vmovups %xmm13,64(%rsi) vmovups %xmm14,80(%rsi) leaq 96(%rsi),%rsi ret .align 32 .Lhandle_ctr32_2: vpshufb %xmm0,%xmm1,%xmm6 vmovdqu 48(%r11),%xmm5 vpaddd 64(%r11),%xmm6,%xmm10 vpaddd %xmm5,%xmm6,%xmm11 vpaddd %xmm5,%xmm10,%xmm12 vpshufb %xmm0,%xmm10,%xmm10 vpaddd %xmm5,%xmm11,%xmm13 vpshufb %xmm0,%xmm11,%xmm11 vpxor %xmm4,%xmm10,%xmm10 vpaddd %xmm5,%xmm12,%xmm14 vpshufb %xmm0,%xmm12,%xmm12 vpxor %xmm4,%xmm11,%xmm11 vpaddd %xmm5,%xmm13,%xmm1 vpshufb %xmm0,%xmm13,%xmm13 vpxor %xmm4,%xmm12,%xmm12 vpshufb %xmm0,%xmm14,%xmm14 vpxor %xmm4,%xmm13,%xmm13 vpshufb %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 .cfi_endproc .size _aesni_ctr32_6x,.-_aesni_ctr32_6x .globl aesni_gcm_encrypt .hidden aesni_gcm_encrypt .type aesni_gcm_encrypt,@function .align 32 aesni_gcm_encrypt: .cfi_startproc _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST .extern BORINGSSL_function_hit .hidden BORINGSSL_function_hit movb $1,BORINGSSL_function_hit+2(%rip) #endif xorq %rax,%rax cmpq $288,%rdx jb .Lgcm_enc_abort pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 movq %rsp,%rbp .cfi_def_cfa_register %rbp pushq %rbx .cfi_offset %rbx,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 vzeroupper vmovdqu (%r8),%xmm1 addq $-128,%rsp movl 12(%r8),%ebx leaq .Lbswap_mask(%rip),%r11 leaq -128(%rcx),%r14 movq $0xf80,%r15 leaq 128(%rcx),%rcx vmovdqu (%r11),%xmm0 andq $-128,%rsp movl 240-128(%rcx),%r10d andq %r15,%r14 andq %rsp,%r15 subq %r14,%r15 jc .Lenc_no_key_aliasing cmpq $768,%r15 jnc .Lenc_no_key_aliasing subq %r15,%rsp .Lenc_no_key_aliasing: movq %rsi,%r14 leaq -192(%rsi,%rdx,1),%r15 shrq $4,%rdx call _aesni_ctr32_6x vpshufb %xmm0,%xmm9,%xmm8 vpshufb %xmm0,%xmm10,%xmm2 vmovdqu %xmm8,112(%rsp) vpshufb %xmm0,%xmm11,%xmm4 vmovdqu %xmm2,96(%rsp) vpshufb %xmm0,%xmm12,%xmm5 vmovdqu %xmm4,80(%rsp) vpshufb %xmm0,%xmm13,%xmm6 vmovdqu %xmm5,64(%rsp) vpshufb %xmm0,%xmm14,%xmm7 vmovdqu %xmm6,48(%rsp) call _aesni_ctr32_6x movq 16(%rbp),%r12 leaq 32(%r9),%r9 vmovdqu (%r12),%xmm8 subq $12,%rdx movq $192,%rax vpshufb %xmm0,%xmm8,%xmm8 call _aesni_ctr32_ghash_6x vmovdqu 32(%rsp),%xmm7 vmovdqu (%r11),%xmm0 vmovdqu 0-32(%r9),%xmm3 vpunpckhqdq %xmm7,%xmm7,%xmm1 vmovdqu 32-32(%r9),%xmm15 vmovups %xmm9,-96(%rsi) vpshufb %xmm0,%xmm9,%xmm9 vpxor %xmm7,%xmm1,%xmm1 vmovups %xmm10,-80(%rsi) vpshufb %xmm0,%xmm10,%xmm10 vmovups %xmm11,-64(%rsi) vpshufb %xmm0,%xmm11,%xmm11 vmovups %xmm12,-48(%rsi) vpshufb %xmm0,%xmm12,%xmm12 vmovups %xmm13,-32(%rsi) vpshufb %xmm0,%xmm13,%xmm13 vmovups %xmm14,-16(%rsi) vpshufb %xmm0,%xmm14,%xmm14 vmovdqu %xmm9,16(%rsp) vmovdqu 48(%rsp),%xmm6 vmovdqu 16-32(%r9),%xmm0 vpunpckhqdq %xmm6,%xmm6,%xmm2 vpclmulqdq $0x00,%xmm3,%xmm7,%xmm5 vpxor %xmm6,%xmm2,%xmm2 vpclmulqdq $0x11,%xmm3,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1 vmovdqu 64(%rsp),%xmm9 vpclmulqdq $0x00,%xmm0,%xmm6,%xmm4 vmovdqu 48-32(%r9),%xmm3 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm9,%xmm9,%xmm5 vpclmulqdq $0x11,%xmm0,%xmm6,%xmm6 vpxor %xmm9,%xmm5,%xmm5 vpxor %xmm7,%xmm6,%xmm6 vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2 vmovdqu 80-32(%r9),%xmm15 vpxor %xmm1,%xmm2,%xmm2 vmovdqu 80(%rsp),%xmm1 vpclmulqdq $0x00,%xmm3,%xmm9,%xmm7 vmovdqu 64-32(%r9),%xmm0 vpxor %xmm4,%xmm7,%xmm7 vpunpckhqdq %xmm1,%xmm1,%xmm4 vpclmulqdq $0x11,%xmm3,%xmm9,%xmm9 vpxor %xmm1,%xmm4,%xmm4 vpxor %xmm6,%xmm9,%xmm9 vpclmulqdq $0x00,%xmm15,%xmm5,%xmm5 vpxor %xmm2,%xmm5,%xmm5 vmovdqu 96(%rsp),%xmm2 vpclmulqdq $0x00,%xmm0,%xmm1,%xmm6 vmovdqu 96-32(%r9),%xmm3 vpxor %xmm7,%xmm6,%xmm6 vpunpckhqdq %xmm2,%xmm2,%xmm7 vpclmulqdq $0x11,%xmm0,%xmm1,%xmm1 vpxor %xmm2,%xmm7,%xmm7 vpxor %xmm9,%xmm1,%xmm1 vpclmulqdq $0x10,%xmm15,%xmm4,%xmm4 vmovdqu 128-32(%r9),%xmm15 vpxor %xmm5,%xmm4,%xmm4 vpxor 112(%rsp),%xmm8,%xmm8 vpclmulqdq $0x00,%xmm3,%xmm2,%xmm5 vmovdqu 112-32(%r9),%xmm0 vpunpckhqdq %xmm8,%xmm8,%xmm9 vpxor %xmm6,%xmm5,%xmm5 vpclmulqdq $0x11,%xmm3,%xmm2,%xmm2 vpxor %xmm8,%xmm9,%xmm9 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm15,%xmm7,%xmm7 vpxor %xmm4,%xmm7,%xmm4 vpclmulqdq $0x00,%xmm0,%xmm8,%xmm6 vmovdqu 0-32(%r9),%xmm3 vpunpckhqdq %xmm14,%xmm14,%xmm1 vpclmulqdq $0x11,%xmm0,%xmm8,%xmm8 vpxor %xmm14,%xmm1,%xmm1 vpxor %xmm5,%xmm6,%xmm5 vpclmulqdq $0x10,%xmm15,%xmm9,%xmm9 vmovdqu 32-32(%r9),%xmm15 vpxor %xmm2,%xmm8,%xmm7 vpxor %xmm4,%xmm9,%xmm6 vmovdqu 16-32(%r9),%xmm0 vpxor %xmm5,%xmm7,%xmm9 vpclmulqdq $0x00,%xmm3,%xmm14,%xmm4 vpxor %xmm9,%xmm6,%xmm6 vpunpckhqdq %xmm13,%xmm13,%xmm2 vpclmulqdq $0x11,%xmm3,%xmm14,%xmm14 vpxor %xmm13,%xmm2,%xmm2 vpslldq $8,%xmm6,%xmm9 vpclmulqdq $0x00,%xmm15,%xmm1,%xmm1 vpxor %xmm9,%xmm5,%xmm8 vpsrldq $8,%xmm6,%xmm6 vpxor %xmm6,%xmm7,%xmm7 vpclmulqdq $0x00,%xmm0,%xmm13,%xmm5 vmovdqu 48-32(%r9),%xmm3 vpxor %xmm4,%xmm5,%xmm5 vpunpckhqdq %xmm12,%xmm12,%xmm9 vpclmulqdq $0x11,%xmm0,%xmm13,%xmm13 vpxor %xmm12,%xmm9,%xmm9 vpxor %xmm14,%xmm13,%xmm13 vpalignr $8,%xmm8,%xmm8,%xmm14 vpclmulqdq $0x10,%xmm15,%xmm2,%xmm2 vmovdqu 80-32(%r9),%xmm15 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm3,%xmm12,%xmm4 vmovdqu 64-32(%r9),%xmm0 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm11,%xmm11,%xmm1 vpclmulqdq $0x11,%xmm3,%xmm12,%xmm12 vpxor %xmm11,%xmm1,%xmm1 vpxor %xmm13,%xmm12,%xmm12 vxorps 16(%rsp),%xmm7,%xmm7 vpclmulqdq $0x00,%xmm15,%xmm9,%xmm9 vpxor %xmm2,%xmm9,%xmm9 vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8 vxorps %xmm14,%xmm8,%xmm8 vpclmulqdq $0x00,%xmm0,%xmm11,%xmm5 vmovdqu 96-32(%r9),%xmm3 vpxor %xmm4,%xmm5,%xmm5 vpunpckhqdq %xmm10,%xmm10,%xmm2 vpclmulqdq $0x11,%xmm0,%xmm11,%xmm11 vpxor %xmm10,%xmm2,%xmm2 vpalignr $8,%xmm8,%xmm8,%xmm14 vpxor %xmm12,%xmm11,%xmm11 vpclmulqdq $0x10,%xmm15,%xmm1,%xmm1 vmovdqu 128-32(%r9),%xmm15 vpxor %xmm9,%xmm1,%xmm1 vxorps %xmm7,%xmm14,%xmm14 vpclmulqdq $0x10,16(%r11),%xmm8,%xmm8 vxorps %xmm14,%xmm8,%xmm8 vpclmulqdq $0x00,%xmm3,%xmm10,%xmm4 vmovdqu 112-32(%r9),%xmm0 vpxor %xmm5,%xmm4,%xmm4 vpunpckhqdq %xmm8,%xmm8,%xmm9 vpclmulqdq $0x11,%xmm3,%xmm10,%xmm10 vpxor %xmm8,%xmm9,%xmm9 vpxor %xmm11,%xmm10,%xmm10 vpclmulqdq $0x00,%xmm15,%xmm2,%xmm2 vpxor %xmm1,%xmm2,%xmm2 vpclmulqdq $0x00,%xmm0,%xmm8,%xmm5 vpclmulqdq $0x11,%xmm0,%xmm8,%xmm7 vpxor %xmm4,%xmm5,%xmm5 vpclmulqdq $0x10,%xmm15,%xmm9,%xmm6 vpxor %xmm10,%xmm7,%xmm7 vpxor %xmm2,%xmm6,%xmm6 vpxor %xmm5,%xmm7,%xmm4 vpxor %xmm4,%xmm6,%xmm6 vpslldq $8,%xmm6,%xmm1 vmovdqu 16(%r11),%xmm3 vpsrldq $8,%xmm6,%xmm6 vpxor %xmm1,%xmm5,%xmm8 vpxor %xmm6,%xmm7,%xmm7 vpalignr $8,%xmm8,%xmm8,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8 vpxor %xmm2,%xmm8,%xmm8 vpalignr $8,%xmm8,%xmm8,%xmm2 vpclmulqdq $0x10,%xmm3,%xmm8,%xmm8 vpxor %xmm7,%xmm2,%xmm2 vpxor %xmm2,%xmm8,%xmm8 movq 16(%rbp),%r12 vpshufb (%r11),%xmm8,%xmm8 vmovdqu %xmm8,(%r12) vzeroupper leaq -40(%rbp),%rsp .cfi_def_cfa %rsp, 0x38 popq %r15 .cfi_adjust_cfa_offset -8 .cfi_restore %r15 popq %r14 .cfi_adjust_cfa_offset -8 .cfi_restore %r14 popq %r13 .cfi_adjust_cfa_offset -8 .cfi_restore %r13 popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 popq %rbx .cfi_adjust_cfa_offset -8 .cfi_restore %rbx popq %rbp .cfi_adjust_cfa_offset -8 .cfi_restore %rbp .Lgcm_enc_abort: ret .cfi_endproc .size aesni_gcm_encrypt,.-aesni_gcm_encrypt .section .rodata .align 64 .Lbswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .Lpoly: .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 .Lone_msb: .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 .Ltwo_lsb: .byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 .Lone_lsb: .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 .byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 64 .text #endif
marvin-hansen/iggy-streaming-system
82,384
thirdparty/crates/ring-0.17.9/pregenerated/aesv8-gcm-armv8-linux64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include <ring-core/arm_arch.h> #if __ARM_MAX_ARCH__ >= 8 .arch armv8-a+crypto .text .globl aes_gcm_enc_kernel .hidden aes_gcm_enc_kernel .type aes_gcm_enc_kernel,%function .align 4 aes_gcm_enc_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys add x4, x0, x1, lsr #3 // end_input_ptr lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible sub x5, x5, #1 // byte_len - 1 ldr q18, [x8, #0] // load rk0 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) ldr q25, [x8, #112] // load rk7 add x5, x5, x0 lsr x12, x11, #32 fmov d2, x10 // CTR block 2 orr w11, w11, w11 rev w12, w12 // rev_ctr32 fmov d1, x10 // CTR block 1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 add w12, w12, #1 // increment rev_ctr32 rev w9, w12 // CTR block 1 fmov d3, x10 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 1 add w12, w12, #1 // CTR block 1 ldr q19, [x8, #16] // load rk1 fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 ldr q20, [x8, #32] // load rk2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 orr x9, x11, x9, lsl #32 // CTR block 3 fmov v3.d[1], x9 // CTR block 3 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q21, [x8, #48] // load rk3 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q24, [x8, #96] // load rk6 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q23, [x8, #80] // load rk5 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ldr q14, [x6, #48] // load h3l | h3h ext v14.16b, v14.16b, v14.16b, #8 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q22, [x8, #64] // load rk4 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 ldr q13, [x6, #32] // load h2l | h2h ext v13.16b, v13.16b, v13.16b, #8 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q15, [x6, #80] // load h4l | h4h ext v15.16b, v15.16b, v15.16b, #8 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 ldr q29, [x8, #176] // load rk11 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 ldr q26, [x8, #128] // load rk8 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 add w12, w12, #1 // CTR block 3 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 trn2 v17.2d, v14.2d, v15.2d // h4l | h3l aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 ldr q27, [x8, #144] // load rk9 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 ldr q12, [x6] // load h1l | h1h ext v12.16b, v12.16b, v12.16b, #8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 ldr q28, [x8, #160] // load rk10 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 trn1 v9.2d, v14.2d, v15.2d // h4h | h3h aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 trn2 v16.2d, v12.2d, v13.2d // h2l | h1l aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 b.lt .Lenc_finish_first_blocks // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 b.eq .Lenc_finish_first_blocks // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 .Lenc_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v2.16b, v31.16b // AES block 2 - round N-1 trn1 v8.2d, v12.2d, v13.2d // h2h | h1h aese v1.16b, v31.16b // AES block 1 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 aese v3.16b, v31.16b // AES block 3 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k b.ge .Lenc_tail // handle tail ldp x19, x20, [x0, #16] // AES block 1 - load plaintext rev w9, w12 // CTR block 4 ldp x6, x7, [x0, #0] // AES block 0 - load plaintext ldp x23, x24, [x0, #48] // AES block 3 - load plaintext ldp x21, x22, [x0, #32] // AES block 2 - load plaintext add x0, x0, #64 // AES input_ptr update eor x19, x19, x13 // AES block 1 - round N low eor x20, x20, x14 // AES block 1 - round N high fmov d5, x19 // AES block 1 - mov low eor x6, x6, x13 // AES block 0 - round N low eor x7, x7, x14 // AES block 0 - round N high eor x24, x24, x14 // AES block 3 - round N high fmov d4, x6 // AES block 0 - mov low cmp x0, x5 // check if we have <= 8 blocks fmov v4.d[1], x7 // AES block 0 - mov high eor x23, x23, x13 // AES block 3 - round N low eor x21, x21, x13 // AES block 2 - round N low fmov v5.d[1], x20 // AES block 1 - mov high fmov d6, x21 // AES block 2 - mov low add w12, w12, #1 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov d7, x23 // AES block 3 - mov low eor x22, x22, x14 // AES block 2 - round N high fmov v6.d[1], x22 // AES block 2 - mov high eor v4.16b, v4.16b, v0.16b // AES block 0 - result fmov d0, x10 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 eor v5.16b, v5.16b, v1.16b // AES block 1 - result fmov d1, x10 // CTR block 5 orr x9, x11, x9, lsl #32 // CTR block 5 fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 st1 { v4.16b}, [x2], #16 // AES block 0 - store result fmov v7.d[1], x24 // AES block 3 - mov high orr x9, x11, x9, lsl #32 // CTR block 6 eor v6.16b, v6.16b, v2.16b // AES block 2 - result st1 { v5.16b}, [x2], #16 // AES block 1 - store result add w12, w12, #1 // CTR block 6 fmov d2, x10 // CTR block 6 fmov v2.d[1], x9 // CTR block 6 st1 { v6.16b}, [x2], #16 // AES block 2 - store result rev w9, w12 // CTR block 7 orr x9, x11, x9, lsl #32 // CTR block 7 eor v7.16b, v7.16b, v3.16b // AES block 3 - result st1 { v7.16b}, [x2], #16 // AES block 3 - store result b.ge .Lenc_prepretail // do prepretail .Lenc_main_loop: // main loop start aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d3, x10 // CTR block 4k+3 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 fmov v3.d[1], x9 // CTR block 4k+3 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 eor v4.16b, v4.16b, v11.16b // PRE 1 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x23, x23, x13 // AES block 4k+7 - round N low aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d10, v17.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high eor x22, x22, x14 // AES block 4k+6 - round N high mov d8, v4.d[1] // GHASH block 4k - mid aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor x19, x19, x13 // AES block 4k+5 - round N low aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 eor x21, x21, x13 // AES block 4k+6 - round N low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 movi v8.8b, #0xc2 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check fmov d5, x19 // AES block 4k+5 - mov low ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext b.lt .Lenc_main_loop_continue // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq .Lenc_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 .Lenc_main_loop_continue: shl d8, d8, #56 // mod_constant eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid add w12, w12, #1 // CTR block 4k+3 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up add x0, x0, #64 // AES input_ptr update pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid rev w9, w12 // CTR block 4k+8 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor x6, x6, x13 // AES block 4k+4 - round N low eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up eor x7, x7, x14 // AES block 4k+4 - round N high fmov d4, x6 // AES block 4k+4 - mov low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid eor x20, x20, x14 // AES block 4k+5 - round N high eor x24, x24, x14 // AES block 4k+7 - round N high add w12, w12, #1 // CTR block 4k+8 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid fmov d7, x23 // AES block 4k+7 - mov low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 fmov v5.d[1], x20 // AES block 4k+5 - mov high fmov d6, x21 // AES block 4k+6 - mov low cmp x0, x5 // .LOOP CONTROL fmov v6.d[1], x22 // AES block 4k+6 - mov high pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 rev w9, w12 // CTR block 4k+9 add w12, w12, #1 // CTR block 4k+9 eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result fmov d1, x10 // CTR block 4k+9 orr x9, x11, x9, lsl #32 // CTR block 4k+9 fmov v1.d[1], x9 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 rev w9, w12 // CTR block 4k+10 st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result orr x9, x11, x9, lsl #32 // CTR block 4k+10 eor v11.16b, v11.16b, v9.16b // MODULO - fold into low fmov v7.d[1], x24 // AES block 4k+7 - mov high ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result fmov d2, x10 // CTR block 4k+10 st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result fmov v2.d[1], x9 // CTR block 4k+10 rev w9, w12 // CTR block 4k+11 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low orr x9, x11, x9, lsl #32 // CTR block 4k+11 eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result b.lt .Lenc_main_loop .Lenc_prepretail: // PREPRETAIL aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov d3, x10 // CTR block 4k+3 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) fmov v3.d[1], x9 // CTR block 4k+3 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid add w12, w12, #1 // CTR block 4k+3 pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v10.16b, v10.16b, v9.16b // karatsuba tidy up aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 pmull v4.1q, v9.1d, v8.1d ext v9.16b, v9.16b, v9.16b, #8 eor v10.16b, v10.16b, v11.16b b.lt .Lenc_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 b.eq .Lenc_finish_prepretail // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 .Lenc_finish_prepretail: eor v10.16b, v10.16b, v4.16b eor v10.16b, v10.16b, v9.16b pmull v4.1q, v10.1d, v8.1d ext v10.16b, v10.16b, v10.16b, #8 aese v1.16b, v31.16b // AES block 4k+5 - round N-1 eor v11.16b, v11.16b, v4.16b aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b .Lenc_tail: // TAIL ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high cmp x5, #48 fmov d4, x6 // AES block 4k+4 - mov low fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result b.gt .Lenc_blocks_more_than_3 cmp x5, #32 mov v3.16b, v2.16b movi v11.8b, #0 movi v9.8b, #0 sub w12, w12, #1 mov v2.16b, v1.16b movi v10.8b, #0 b.gt .Lenc_blocks_more_than_2 mov v3.16b, v1.16b sub w12, w12, #1 cmp x5, #16 b.gt .Lenc_blocks_more_than_1 sub w12, w12, #1 b .Lenc_blocks_less_than_1 .Lenc_blocks_more_than_3: // blocks left > 3 st1 { v5.16b}, [x2], #16 // AES final-3 block - store result ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-3 block eor x6, x6, x13 // AES final-2 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag eor x7, x7, x14 // AES final-2 block - round N high mov d22, v4.d[1] // GHASH final-3 block - mid fmov d5, x6 // AES final-2 block - mov low fmov v5.d[1], x7 // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in mov d10, v17.d[1] // GHASH final-3 block - mid pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor v5.16b, v5.16b, v1.16b // AES final-2 block - result .Lenc_blocks_more_than_2: // blocks left > 2 st1 { v5.16b}, [x2], #16 // AES final-2 block - store result ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-2 block eor x6, x6, x13 // AES final-1 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag fmov d5, x6 // AES final-1 block - mov low eor x7, x7, x14 // AES final-1 block - round N high fmov v5.d[1], x7 // AES final-1 block - mov high movi v8.8b, #0 // suppress further partial tag feed in pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid eor v5.16b, v5.16b, v2.16b // AES final-1 block - result eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid .Lenc_blocks_more_than_1: // blocks left > 1 st1 { v5.16b}, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ldp x6, x7, [x0], #16 // AES final block - load input low & high eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in eor x6, x6, x13 // AES final block - round N low mov d22, v4.d[1] // GHASH final-1 block - mid pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor x7, x7, x14 // AES final block - round N high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high ins v22.d[1], v22.d[0] // GHASH final-1 block - mid fmov d5, x6 // AES final block - mov low fmov v5.d[1], x7 // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low eor v5.16b, v5.16b, v3.16b // AES final block - result eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low .Lenc_blocks_less_than_1: // blocks left <= 1 and x1, x1, #127 // bit_length %= 128 mvn x13, xzr // rkN_l = 0xffffffffffffffff sub x1, x1, #128 // bit_length -= 128 neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128]) ld1 { v18.16b}, [x2] // load existing bytes where the possibly partial last block is to be stored mvn x14, xzr // rkN_h = 0xffffffffffffffff and x1, x1, #127 // bit_length %= 128 lsr x14, x14, x1 // rkN_h is mask for top 64b of last block cmp x1, #64 csel x6, x13, x14, lt csel x7, x14, xzr, lt fmov d0, x6 // ctr0b is mask for last block fmov v0.d[1], x7 and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag bif v5.16b, v18.16b, v0.16b // insert existing bytes in top end of result before storing pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high mov d8, v4.d[1] // GHASH final block - mid rev w9, w12 pmull v21.1q, v4.1d, v12.1d // GHASH final block - low eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment str w9, [x16, #12] // store the updated counter st1 { v5.16b}, [x2] // store all 16B eor v11.16b, v11.16b, v9.16b // MODULO - fold into low eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret .size aes_gcm_enc_kernel,.-aes_gcm_enc_kernel .globl aes_gcm_dec_kernel .hidden aes_gcm_dec_kernel .type aes_gcm_dec_kernel,%function .align 4 aes_gcm_dec_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ldr q26, [x8, #128] // load rk8 sub x5, x5, #1 // byte_len - 1 ldr q25, [x8, #112] // load rk7 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) add x4, x0, x1, lsr #3 // end_input_ptr ldr q24, [x8, #96] // load rk6 lsr x12, x11, #32 ldr q23, [x8, #80] // load rk5 orr w11, w11, w11 ldr q21, [x8, #48] // load rk3 add x5, x5, x0 rev w12, w12 // rev_ctr32 add w12, w12, #1 // increment rev_ctr32 fmov d3, x10 // CTR block 3 rev w9, w12 // CTR block 1 add w12, w12, #1 // CTR block 1 fmov d1, x10 // CTR block 1 orr x9, x11, x9, lsl #32 // CTR block 1 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 fmov d2, x10 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 3 ldr q18, [x8, #0] // load rk0 fmov v3.d[1], x9 // CTR block 3 add w12, w12, #1 // CTR block 3 ldr q22, [x8, #64] // load rk4 ldr q19, [x8, #16] // load rk1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 ldr q14, [x6, #48] // load h3l | h3h ext v14.16b, v14.16b, v14.16b, #8 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 ldr q15, [x6, #80] // load h4l | h4h ext v15.16b, v15.16b, v15.16b, #8 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q13, [x6, #32] // load h2l | h2h ext v13.16b, v13.16b, v13.16b, #8 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q20, [x8, #32] // load rk2 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q27, [x8, #144] // load rk9 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q12, [x6] // load h1l | h1h ext v12.16b, v12.16b, v12.16b, #8 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q28, [x8, #160] // load rk10 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 cmp x17, #12 // setup flags for AES-128/192/256 check aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 ldr q29, [x8, #176] // load rk11 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 b.lt .Ldec_finish_first_blocks // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 b.eq .Ldec_finish_first_blocks // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 .Ldec_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks trn1 v9.2d, v14.2d, v15.2d // h4h | h3h trn2 v17.2d, v14.2d, v15.2d // h4l | h3l trn1 v8.2d, v12.2d, v13.2d // h2h | h1h trn2 v16.2d, v12.2d, v13.2d // h2l | h1l eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v1.16b, v31.16b // AES block 1 - round N-1 aese v2.16b, v31.16b // AES block 2 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k aese v3.16b, v31.16b // AES block 3 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 b.ge .Ldec_tail // handle tail ldr q4, [x0, #0] // AES block 0 - load ciphertext ldr q5, [x0, #16] // AES block 1 - load ciphertext rev w9, w12 // CTR block 4 eor v0.16b, v4.16b, v0.16b // AES block 0 - result eor v1.16b, v5.16b, v1.16b // AES block 1 - result rev64 v5.16b, v5.16b // GHASH block 1 ldr q7, [x0, #48] // AES block 3 - load ciphertext mov x7, v0.d[1] // AES block 0 - mov high mov x6, v0.d[0] // AES block 0 - mov low rev64 v4.16b, v4.16b // GHASH block 0 add w12, w12, #1 // CTR block 4 fmov d0, x10 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 mov x19, v1.d[0] // AES block 1 - mov low orr x9, x11, x9, lsl #32 // CTR block 5 mov x20, v1.d[1] // AES block 1 - mov high eor x7, x7, x14 // AES block 0 - round N high eor x6, x6, x13 // AES block 0 - round N low stp x6, x7, [x2], #16 // AES block 0 - store result fmov d1, x10 // CTR block 5 ldr q6, [x0, #32] // AES block 2 - load ciphertext add x0, x0, #64 // AES input_ptr update fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 add w12, w12, #1 // CTR block 6 eor x19, x19, x13 // AES block 1 - round N low orr x9, x11, x9, lsl #32 // CTR block 6 eor x20, x20, x14 // AES block 1 - round N high stp x19, x20, [x2], #16 // AES block 1 - store result eor v2.16b, v6.16b, v2.16b // AES block 2 - result cmp x0, x5 // check if we have <= 8 blocks b.ge .Ldec_prepretail // do prepretail .Ldec_main_loop: // main loop start mov x21, v2.d[0] // AES block 4k+2 - mov low ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 eor v4.16b, v4.16b, v11.16b // PRE 1 rev w9, w12 // CTR block 4k+7 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x23, v3.d[0] // AES block 4k+3 - mov low pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov v3.d[1], x9 // CTR block 4k+7 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor x22, x22, x14 // AES block 4k+2 - round N high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 rev64 v6.16b, v6.16b // GHASH block 4k+2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x21, x21, x13 // AES block 4k+2 - round N low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 stp x21, x22, [x2], #16 // AES block 4k+2 - store result pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 rev64 v7.16b, v7.16b // GHASH block 4k+3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor x23, x23, x13 // AES block 4k+3 - round N low pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor x24, x24, x14 // AES block 4k+3 - round N high eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 add w12, w12, #1 // CTR block 4k+7 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid rev w9, w12 // CTR block 4k+8 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 add w12, w12, #1 // CTR block 4k+8 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid movi v8.8b, #0xc2 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 b.lt .Ldec_main_loop_continue // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq .Ldec_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 .Ldec_main_loop_continue: pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext aese v0.16b, v31.16b // AES block 4k+4 - round N-1 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext mov x7, v0.d[1] // AES block 4k+4 - mov high eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid aese v1.16b, v31.16b // AES block 4k+5 - round N-1 add x0, x0, #64 // AES input_ptr update mov x6, v0.d[0] // AES block 4k+4 - mov low fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result rev w9, w12 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+9 cmp x0, x5 // .LOOP CONTROL add w12, w12, #1 // CTR block 4k+9 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high mov x20, v1.d[1] // AES block 4k+5 - mov high eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low mov x19, v1.d[0] // AES block 4k+5 - mov low fmov d1, x10 // CTR block 4k+9 ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment fmov v1.d[1], x9 // CTR block 4k+9 rev w9, w12 // CTR block 4k+10 add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+10 rev64 v5.16b, v5.16b // GHASH block 4k+5 eor x20, x20, x14 // AES block 4k+5 - round N high stp x6, x7, [x2], #16 // AES block 4k+4 - store result eor x19, x19, x13 // AES block 4k+5 - round N low stp x19, x20, [x2], #16 // AES block 4k+5 - store result rev64 v4.16b, v4.16b // GHASH block 4k+4 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low b.lt .Ldec_main_loop .Ldec_prepretail: // PREPRETAIL ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 mov x21, v2.d[0] // AES block 4k+2 - mov low eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 rev w9, w12 // CTR block 4k+7 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v6.16b, v6.16b // GHASH block 4k+2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 mov x23, v3.d[0] // AES block 4k+3 - mov low aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high fmov v3.d[1], x9 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 rev64 v7.16b, v7.16b // GHASH block 4k+3 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 cmp x17, #12 // setup flags for AES-128/192/256 check eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 b.lt .Ldec_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 b.eq .Ldec_finish_prepretail // branch if AES-192 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 .Ldec_finish_prepretail: eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor x22, x22, x14 // AES block 4k+2 - round N high eor x23, x23, x13 // AES block 4k+3 - round N low eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid add w12, w12, #1 // CTR block 4k+7 eor x21, x21, x13 // AES block 4k+2 - round N low pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor x24, x24, x14 // AES block 4k+3 - round N high stp x21, x22, [x2], #16 // AES block 4k+2 - store result ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low .Ldec_tail: // TAIL sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result mov x6, v0.d[0] // AES block 4k+4 - mov low mov x7, v0.d[1] // AES block 4k+4 - mov high ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag cmp x5, #48 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high b.gt .Ldec_blocks_more_than_3 sub w12, w12, #1 mov v3.16b, v2.16b movi v10.8b, #0 movi v11.8b, #0 cmp x5, #32 movi v9.8b, #0 mov v2.16b, v1.16b b.gt .Ldec_blocks_more_than_2 sub w12, w12, #1 mov v3.16b, v1.16b cmp x5, #16 b.gt .Ldec_blocks_more_than_1 sub w12, w12, #1 b .Ldec_blocks_less_than_1 .Ldec_blocks_more_than_3: // blocks left > 3 rev64 v4.16b, v5.16b // GHASH final-3 block ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext stp x6, x7, [x2], #16 // AES final-3 block - store result mov d10, v17.d[1] // GHASH final-3 block - mid eor v4.16b, v4.16b, v8.16b // feed in partial tag eor v0.16b, v5.16b, v1.16b // AES final-2 block - result mov d22, v4.d[1] // GHASH final-3 block - mid mov x6, v0.d[0] // AES final-2 block - mov low mov x7, v0.d[1] // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor x6, x6, x13 // AES final-2 block - round N low pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low eor x7, x7, x14 // AES final-2 block - round N high .Ldec_blocks_more_than_2: // blocks left > 2 rev64 v4.16b, v5.16b // GHASH final-2 block ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag stp x6, x7, [x2], #16 // AES final-2 block - store result eor v0.16b, v5.16b, v2.16b // AES final-1 block - result mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid mov x6, v0.d[0] // AES final-1 block - mov low mov x7, v0.d[1] // AES final-1 block - mov high eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low movi v8.8b, #0 // suppress further partial tag feed in pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high eor x6, x6, x13 // AES final-1 block - round N low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid eor x7, x7, x14 // AES final-1 block - round N high .Ldec_blocks_more_than_1: // blocks left > 1 stp x6, x7, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in mov d22, v4.d[1] // GHASH final-1 block - mid eor v0.16b, v5.16b, v3.16b // AES final block - result pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low mov x6, v0.d[0] // AES final block - mov low ins v22.d[1], v22.d[0] // GHASH final-1 block - mid mov x7, v0.d[1] // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid eor x6, x6, x13 // AES final block - round N low eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor x7, x7, x14 // AES final block - round N high .Ldec_blocks_less_than_1: // blocks left <= 1 and x1, x1, #127 // bit_length %= 128 mvn x14, xzr // rkN_h = 0xffffffffffffffff sub x1, x1, #128 // bit_length -= 128 mvn x13, xzr // rkN_l = 0xffffffffffffffff ldp x4, x5, [x2] // load existing bytes we need to not overwrite neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128]) and x1, x1, #127 // bit_length %= 128 lsr x14, x14, x1 // rkN_h is mask for top 64b of last block cmp x1, #64 csel x9, x13, x14, lt csel x10, x14, xzr, lt fmov d0, x9 // ctr0b is mask for last block and x6, x6, x9 mov v0.d[1], x10 bic x4, x4, x9 // mask out low existing bytes rev w9, w12 bic x5, x5, x10 // mask out high existing bytes orr x6, x6, x4 and x7, x7, x10 orr x7, x7, x5 and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag pmull v21.1q, v4.1d, v12.1d // GHASH final block - low mov d8, v4.d[1] // GHASH final block - mid eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment eor v11.16b, v11.16b, v8.16b // MODULO - fold into low stp x6, x7, [x2] str w9, [x16, #12] // store the updated counter eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret .size aes_gcm_dec_kernel,.-aes_gcm_dec_kernel #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
30,674
thirdparty/crates/ring-0.17.9/pregenerated/armv8-mont-ios64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include <ring-core/arm_arch.h> .text .globl _bn_mul_mont_nohw .private_extern _bn_mul_mont_nohw .align 5 _bn_mul_mont_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] ldr x9,[x2],#8 // bp[0] sub x22,sp,x5,lsl#3 ldp x7,x8,[x1],#16 // ap[0..1] lsl x5,x5,#3 ldr x4,[x4] // *n0 and x22,x22,#-16 // ABI says so ldp x13,x14,[x3],#16 // np[0..1] mul x6,x7,x9 // ap[0]*bp[0] sub x21,x5,#16 // j=num-2 umulh x7,x7,x9 mul x10,x8,x9 // ap[1]*bp[0] umulh x11,x8,x9 mul x15,x6,x4 // "tp[0]"*n0 mov sp,x22 // alloca // (*) mul x12,x13,x15 // np[0]*m1 umulh x13,x13,x15 mul x16,x14,x15 // np[1]*m1 // (*) adds x12,x12,x6 // discarded // (*) As for removal of first multiplication and addition // instructions. The outcome of first addition is // guaranteed to be zero, which leaves two computationally // significant outcomes: it either carries or not. Then // question is when does it carry? Is there alternative // way to deduce it? If you follow operations, you can // observe that condition for carry is quite simple: // x6 being non-zero. So that carry can be calculated // by adding -1 to x6. That's what next instruction does. subs xzr,x6,#1 // (*) umulh x17,x14,x15 adc x13,x13,xzr cbz x21,L1st_skip L1st: ldr x8,[x1],#8 adds x6,x10,x7 sub x21,x21,#8 // j-- adc x7,x11,xzr ldr x14,[x3],#8 adds x12,x16,x13 mul x10,x8,x9 // ap[j]*bp[0] adc x13,x17,xzr umulh x11,x8,x9 adds x12,x12,x6 mul x16,x14,x15 // np[j]*m1 adc x13,x13,xzr umulh x17,x14,x15 str x12,[x22],#8 // tp[j-1] cbnz x21,L1st L1st_skip: adds x6,x10,x7 sub x1,x1,x5 // rewind x1 adc x7,x11,xzr adds x12,x16,x13 sub x3,x3,x5 // rewind x3 adc x13,x17,xzr adds x12,x12,x6 sub x20,x5,#8 // i=num-1 adcs x13,x13,x7 adc x19,xzr,xzr // upmost overflow bit stp x12,x13,[x22] Louter: ldr x9,[x2],#8 // bp[i] ldp x7,x8,[x1],#16 ldr x23,[sp] // tp[0] add x22,sp,#8 mul x6,x7,x9 // ap[0]*bp[i] sub x21,x5,#16 // j=num-2 umulh x7,x7,x9 ldp x13,x14,[x3],#16 mul x10,x8,x9 // ap[1]*bp[i] adds x6,x6,x23 umulh x11,x8,x9 adc x7,x7,xzr mul x15,x6,x4 sub x20,x20,#8 // i-- // (*) mul x12,x13,x15 // np[0]*m1 umulh x13,x13,x15 mul x16,x14,x15 // np[1]*m1 // (*) adds x12,x12,x6 subs xzr,x6,#1 // (*) umulh x17,x14,x15 cbz x21,Linner_skip Linner: ldr x8,[x1],#8 adc x13,x13,xzr ldr x23,[x22],#8 // tp[j] adds x6,x10,x7 sub x21,x21,#8 // j-- adc x7,x11,xzr adds x12,x16,x13 ldr x14,[x3],#8 adc x13,x17,xzr mul x10,x8,x9 // ap[j]*bp[i] adds x6,x6,x23 umulh x11,x8,x9 adc x7,x7,xzr mul x16,x14,x15 // np[j]*m1 adds x12,x12,x6 umulh x17,x14,x15 str x12,[x22,#-16] // tp[j-1] cbnz x21,Linner Linner_skip: ldr x23,[x22],#8 // tp[j] adc x13,x13,xzr adds x6,x10,x7 sub x1,x1,x5 // rewind x1 adc x7,x11,xzr adds x12,x16,x13 sub x3,x3,x5 // rewind x3 adcs x13,x17,x19 adc x19,xzr,xzr adds x6,x6,x23 adc x7,x7,xzr adds x12,x12,x6 adcs x13,x13,x7 adc x19,x19,xzr // upmost overflow bit stp x12,x13,[x22,#-16] cbnz x20,Louter // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. ldr x23,[sp] // tp[0] add x22,sp,#8 ldr x14,[x3],#8 // np[0] subs x21,x5,#8 // j=num-1 and clear borrow mov x1,x0 Lsub: sbcs x8,x23,x14 // tp[j]-np[j] ldr x23,[x22],#8 sub x21,x21,#8 // j-- ldr x14,[x3],#8 str x8,[x1],#8 // rp[j]=tp[j]-np[j] cbnz x21,Lsub sbcs x8,x23,x14 sbcs x19,x19,xzr // did it borrow? str x8,[x1],#8 // rp[num-1] ldr x23,[sp] // tp[0] add x22,sp,#8 ldr x8,[x0],#8 // rp[0] sub x5,x5,#8 // num-- nop Lcond_copy: sub x5,x5,#8 // num-- csel x14,x23,x8,lo // did it borrow? ldr x23,[x22],#8 ldr x8,[x0],#8 str xzr,[x22,#-16] // wipe tp str x14,[x0,#-16] cbnz x5,Lcond_copy csel x14,x23,x8,lo str xzr,[x22,#-8] // wipe tp str x14,[x0,#-8] ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldr x29,[sp],#64 AARCH64_VALIDATE_LINK_REGISTER ret .globl _bn_sqr8x_mont .private_extern _bn_sqr8x_mont .align 5 _bn_sqr8x_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] stp x0,x3,[sp,#96] // offload rp and np ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] ldp x10,x11,[x1,#8*4] ldp x12,x13,[x1,#8*6] sub x2,sp,x5,lsl#4 lsl x5,x5,#3 ldr x4,[x4] // *n0 mov sp,x2 // alloca sub x27,x5,#8*8 b Lsqr8x_zero_start Lsqr8x_zero: sub x27,x27,#8*8 stp xzr,xzr,[x2,#8*0] stp xzr,xzr,[x2,#8*2] stp xzr,xzr,[x2,#8*4] stp xzr,xzr,[x2,#8*6] Lsqr8x_zero_start: stp xzr,xzr,[x2,#8*8] stp xzr,xzr,[x2,#8*10] stp xzr,xzr,[x2,#8*12] stp xzr,xzr,[x2,#8*14] add x2,x2,#8*16 cbnz x27,Lsqr8x_zero add x3,x1,x5 add x1,x1,#8*8 mov x19,xzr mov x20,xzr mov x21,xzr mov x22,xzr mov x23,xzr mov x24,xzr mov x25,xzr mov x26,xzr mov x2,sp str x4,[x29,#112] // offload n0 // Multiply everything but a[i]*a[i] .align 4 Lsqr8x_outer_loop: // a[1]a[0] (i) // a[2]a[0] // a[3]a[0] // a[4]a[0] // a[5]a[0] // a[6]a[0] // a[7]a[0] // a[2]a[1] (ii) // a[3]a[1] // a[4]a[1] // a[5]a[1] // a[6]a[1] // a[7]a[1] // a[3]a[2] (iii) // a[4]a[2] // a[5]a[2] // a[6]a[2] // a[7]a[2] // a[4]a[3] (iv) // a[5]a[3] // a[6]a[3] // a[7]a[3] // a[5]a[4] (v) // a[6]a[4] // a[7]a[4] // a[6]a[5] (vi) // a[7]a[5] // a[7]a[6] (vii) mul x14,x7,x6 // lo(a[1..7]*a[0]) (i) mul x15,x8,x6 mul x16,x9,x6 mul x17,x10,x6 adds x20,x20,x14 // t[1]+lo(a[1]*a[0]) mul x14,x11,x6 adcs x21,x21,x15 mul x15,x12,x6 adcs x22,x22,x16 mul x16,x13,x6 adcs x23,x23,x17 umulh x17,x7,x6 // hi(a[1..7]*a[0]) adcs x24,x24,x14 umulh x14,x8,x6 adcs x25,x25,x15 umulh x15,x9,x6 adcs x26,x26,x16 umulh x16,x10,x6 stp x19,x20,[x2],#8*2 // t[0..1] adc x19,xzr,xzr // t[8] adds x21,x21,x17 // t[2]+lo(a[1]*a[0]) umulh x17,x11,x6 adcs x22,x22,x14 umulh x14,x12,x6 adcs x23,x23,x15 umulh x15,x13,x6 adcs x24,x24,x16 mul x16,x8,x7 // lo(a[2..7]*a[1]) (ii) adcs x25,x25,x17 mul x17,x9,x7 adcs x26,x26,x14 mul x14,x10,x7 adc x19,x19,x15 mul x15,x11,x7 adds x22,x22,x16 mul x16,x12,x7 adcs x23,x23,x17 mul x17,x13,x7 adcs x24,x24,x14 umulh x14,x8,x7 // hi(a[2..7]*a[1]) adcs x25,x25,x15 umulh x15,x9,x7 adcs x26,x26,x16 umulh x16,x10,x7 adcs x19,x19,x17 umulh x17,x11,x7 stp x21,x22,[x2],#8*2 // t[2..3] adc x20,xzr,xzr // t[9] adds x23,x23,x14 umulh x14,x12,x7 adcs x24,x24,x15 umulh x15,x13,x7 adcs x25,x25,x16 mul x16,x9,x8 // lo(a[3..7]*a[2]) (iii) adcs x26,x26,x17 mul x17,x10,x8 adcs x19,x19,x14 mul x14,x11,x8 adc x20,x20,x15 mul x15,x12,x8 adds x24,x24,x16 mul x16,x13,x8 adcs x25,x25,x17 umulh x17,x9,x8 // hi(a[3..7]*a[2]) adcs x26,x26,x14 umulh x14,x10,x8 adcs x19,x19,x15 umulh x15,x11,x8 adcs x20,x20,x16 umulh x16,x12,x8 stp x23,x24,[x2],#8*2 // t[4..5] adc x21,xzr,xzr // t[10] adds x25,x25,x17 umulh x17,x13,x8 adcs x26,x26,x14 mul x14,x10,x9 // lo(a[4..7]*a[3]) (iv) adcs x19,x19,x15 mul x15,x11,x9 adcs x20,x20,x16 mul x16,x12,x9 adc x21,x21,x17 mul x17,x13,x9 adds x26,x26,x14 umulh x14,x10,x9 // hi(a[4..7]*a[3]) adcs x19,x19,x15 umulh x15,x11,x9 adcs x20,x20,x16 umulh x16,x12,x9 adcs x21,x21,x17 umulh x17,x13,x9 stp x25,x26,[x2],#8*2 // t[6..7] adc x22,xzr,xzr // t[11] adds x19,x19,x14 mul x14,x11,x10 // lo(a[5..7]*a[4]) (v) adcs x20,x20,x15 mul x15,x12,x10 adcs x21,x21,x16 mul x16,x13,x10 adc x22,x22,x17 umulh x17,x11,x10 // hi(a[5..7]*a[4]) adds x20,x20,x14 umulh x14,x12,x10 adcs x21,x21,x15 umulh x15,x13,x10 adcs x22,x22,x16 mul x16,x12,x11 // lo(a[6..7]*a[5]) (vi) adc x23,xzr,xzr // t[12] adds x21,x21,x17 mul x17,x13,x11 adcs x22,x22,x14 umulh x14,x12,x11 // hi(a[6..7]*a[5]) adc x23,x23,x15 umulh x15,x13,x11 adds x22,x22,x16 mul x16,x13,x12 // lo(a[7]*a[6]) (vii) adcs x23,x23,x17 umulh x17,x13,x12 // hi(a[7]*a[6]) adc x24,xzr,xzr // t[13] adds x23,x23,x14 sub x27,x3,x1 // done yet? adc x24,x24,x15 adds x24,x24,x16 sub x14,x3,x5 // rewinded ap adc x25,xzr,xzr // t[14] add x25,x25,x17 cbz x27,Lsqr8x_outer_break mov x4,x6 ldp x6,x7,[x2,#8*0] ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] adds x19,x19,x6 adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x0,x1 adcs x26,xzr,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved below mov x27,#-8*8 // a[8]a[0] // a[9]a[0] // a[a]a[0] // a[b]a[0] // a[c]a[0] // a[d]a[0] // a[e]a[0] // a[f]a[0] // a[8]a[1] // a[f]a[1]........................ // a[8]a[2] // a[f]a[2]........................ // a[8]a[3] // a[f]a[3]........................ // a[8]a[4] // a[f]a[4]........................ // a[8]a[5] // a[f]a[5]........................ // a[8]a[6] // a[f]a[6]........................ // a[8]a[7] // a[f]a[7]........................ Lsqr8x_mul: mul x14,x6,x4 adc x28,xzr,xzr // carry bit, modulo-scheduled mul x15,x7,x4 add x27,x27,#8 mul x16,x8,x4 mul x17,x9,x4 adds x19,x19,x14 mul x14,x10,x4 adcs x20,x20,x15 mul x15,x11,x4 adcs x21,x21,x16 mul x16,x12,x4 adcs x22,x22,x17 mul x17,x13,x4 adcs x23,x23,x14 umulh x14,x6,x4 adcs x24,x24,x15 umulh x15,x7,x4 adcs x25,x25,x16 umulh x16,x8,x4 adcs x26,x26,x17 umulh x17,x9,x4 adc x28,x28,xzr str x19,[x2],#8 adds x19,x20,x14 umulh x14,x10,x4 adcs x20,x21,x15 umulh x15,x11,x4 adcs x21,x22,x16 umulh x16,x12,x4 adcs x22,x23,x17 umulh x17,x13,x4 ldr x4,[x0,x27] adcs x23,x24,x14 adcs x24,x25,x15 adcs x25,x26,x16 adcs x26,x28,x17 //adc x28,xzr,xzr // moved above cbnz x27,Lsqr8x_mul // note that carry flag is guaranteed // to be zero at this point cmp x1,x3 // done yet? b.eq Lsqr8x_break ldp x6,x7,[x2,#8*0] ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] adds x19,x19,x6 ldr x4,[x0,#-8*8] adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x27,#-8*8 adcs x26,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved above b Lsqr8x_mul .align 4 Lsqr8x_break: ldp x6,x7,[x0,#8*0] add x1,x0,#8*8 ldp x8,x9,[x0,#8*2] sub x14,x3,x1 // is it last iteration? ldp x10,x11,[x0,#8*4] sub x15,x2,x14 ldp x12,x13,[x0,#8*6] cbz x14,Lsqr8x_outer_loop stp x19,x20,[x2,#8*0] ldp x19,x20,[x15,#8*0] stp x21,x22,[x2,#8*2] ldp x21,x22,[x15,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[x15,#8*4] stp x25,x26,[x2,#8*6] mov x2,x15 ldp x25,x26,[x15,#8*6] b Lsqr8x_outer_loop .align 4 Lsqr8x_outer_break: // Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0] ldp x7,x9,[x14,#8*0] // recall that x14 is &a[0] ldp x15,x16,[sp,#8*1] ldp x11,x13,[x14,#8*2] add x1,x14,#8*4 ldp x17,x14,[sp,#8*3] stp x19,x20,[x2,#8*0] mul x19,x7,x7 stp x21,x22,[x2,#8*2] umulh x7,x7,x7 stp x23,x24,[x2,#8*4] mul x8,x9,x9 stp x25,x26,[x2,#8*6] mov x2,sp umulh x9,x9,x9 adds x20,x7,x15,lsl#1 extr x15,x16,x15,#63 sub x27,x5,#8*4 Lsqr4x_shift_n_add: adcs x21,x8,x15 extr x16,x17,x16,#63 sub x27,x27,#8*4 adcs x22,x9,x16 ldp x15,x16,[x2,#8*5] mul x10,x11,x11 ldp x7,x9,[x1],#8*2 umulh x11,x11,x11 mul x12,x13,x13 umulh x13,x13,x13 extr x17,x14,x17,#63 stp x19,x20,[x2,#8*0] adcs x23,x10,x17 extr x14,x15,x14,#63 stp x21,x22,[x2,#8*2] adcs x24,x11,x14 ldp x17,x14,[x2,#8*7] extr x15,x16,x15,#63 adcs x25,x12,x15 extr x16,x17,x16,#63 adcs x26,x13,x16 ldp x15,x16,[x2,#8*9] mul x6,x7,x7 ldp x11,x13,[x1],#8*2 umulh x7,x7,x7 mul x8,x9,x9 umulh x9,x9,x9 stp x23,x24,[x2,#8*4] extr x17,x14,x17,#63 stp x25,x26,[x2,#8*6] add x2,x2,#8*8 adcs x19,x6,x17 extr x14,x15,x14,#63 adcs x20,x7,x14 ldp x17,x14,[x2,#8*3] extr x15,x16,x15,#63 cbnz x27,Lsqr4x_shift_n_add ldp x1,x4,[x29,#104] // pull np and n0 adcs x21,x8,x15 extr x16,x17,x16,#63 adcs x22,x9,x16 ldp x15,x16,[x2,#8*5] mul x10,x11,x11 umulh x11,x11,x11 stp x19,x20,[x2,#8*0] mul x12,x13,x13 umulh x13,x13,x13 stp x21,x22,[x2,#8*2] extr x17,x14,x17,#63 adcs x23,x10,x17 extr x14,x15,x14,#63 ldp x19,x20,[sp,#8*0] adcs x24,x11,x14 extr x15,x16,x15,#63 ldp x6,x7,[x1,#8*0] adcs x25,x12,x15 extr x16,xzr,x16,#63 ldp x8,x9,[x1,#8*2] adc x26,x13,x16 ldp x10,x11,[x1,#8*4] // Reduce by 512 bits per iteration mul x28,x4,x19 // t[0]*n0 ldp x12,x13,[x1,#8*6] add x3,x1,x5 ldp x21,x22,[sp,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[sp,#8*4] stp x25,x26,[x2,#8*6] ldp x25,x26,[sp,#8*6] add x1,x1,#8*8 mov x30,xzr // initial top-most carry mov x2,sp mov x27,#8 Lsqr8x_reduction: // (*) mul x14,x6,x28 // lo(n[0-7])*lo(t[0]*n0) mul x15,x7,x28 sub x27,x27,#1 mul x16,x8,x28 str x28,[x2],#8 // put aside t[0]*n0 for tail processing mul x17,x9,x28 // (*) adds xzr,x19,x14 subs xzr,x19,#1 // (*) mul x14,x10,x28 adcs x19,x20,x15 mul x15,x11,x28 adcs x20,x21,x16 mul x16,x12,x28 adcs x21,x22,x17 mul x17,x13,x28 adcs x22,x23,x14 umulh x14,x6,x28 // hi(n[0-7])*lo(t[0]*n0) adcs x23,x24,x15 umulh x15,x7,x28 adcs x24,x25,x16 umulh x16,x8,x28 adcs x25,x26,x17 umulh x17,x9,x28 adc x26,xzr,xzr adds x19,x19,x14 umulh x14,x10,x28 adcs x20,x20,x15 umulh x15,x11,x28 adcs x21,x21,x16 umulh x16,x12,x28 adcs x22,x22,x17 umulh x17,x13,x28 mul x28,x4,x19 // next t[0]*n0 adcs x23,x23,x14 adcs x24,x24,x15 adcs x25,x25,x16 adc x26,x26,x17 cbnz x27,Lsqr8x_reduction ldp x14,x15,[x2,#8*0] ldp x16,x17,[x2,#8*2] mov x0,x2 sub x27,x3,x1 // done yet? adds x19,x19,x14 adcs x20,x20,x15 ldp x14,x15,[x2,#8*4] adcs x21,x21,x16 adcs x22,x22,x17 ldp x16,x17,[x2,#8*6] adcs x23,x23,x14 adcs x24,x24,x15 adcs x25,x25,x16 adcs x26,x26,x17 //adc x28,xzr,xzr // moved below cbz x27,Lsqr8x8_post_condition ldr x4,[x2,#-8*8] ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] ldp x10,x11,[x1,#8*4] mov x27,#-8*8 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 Lsqr8x_tail: mul x14,x6,x4 adc x28,xzr,xzr // carry bit, modulo-scheduled mul x15,x7,x4 add x27,x27,#8 mul x16,x8,x4 mul x17,x9,x4 adds x19,x19,x14 mul x14,x10,x4 adcs x20,x20,x15 mul x15,x11,x4 adcs x21,x21,x16 mul x16,x12,x4 adcs x22,x22,x17 mul x17,x13,x4 adcs x23,x23,x14 umulh x14,x6,x4 adcs x24,x24,x15 umulh x15,x7,x4 adcs x25,x25,x16 umulh x16,x8,x4 adcs x26,x26,x17 umulh x17,x9,x4 adc x28,x28,xzr str x19,[x2],#8 adds x19,x20,x14 umulh x14,x10,x4 adcs x20,x21,x15 umulh x15,x11,x4 adcs x21,x22,x16 umulh x16,x12,x4 adcs x22,x23,x17 umulh x17,x13,x4 ldr x4,[x0,x27] adcs x23,x24,x14 adcs x24,x25,x15 adcs x25,x26,x16 adcs x26,x28,x17 //adc x28,xzr,xzr // moved above cbnz x27,Lsqr8x_tail // note that carry flag is guaranteed // to be zero at this point ldp x6,x7,[x2,#8*0] sub x27,x3,x1 // done yet? sub x16,x3,x5 // rewinded np ldp x8,x9,[x2,#8*2] ldp x10,x11,[x2,#8*4] ldp x12,x13,[x2,#8*6] cbz x27,Lsqr8x_tail_break ldr x4,[x0,#-8*8] adds x19,x19,x6 adcs x20,x20,x7 ldp x6,x7,[x1,#8*0] adcs x21,x21,x8 adcs x22,x22,x9 ldp x8,x9,[x1,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x1,#8*4] adcs x25,x25,x12 mov x27,#-8*8 adcs x26,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 //adc x28,xzr,xzr // moved above b Lsqr8x_tail .align 4 Lsqr8x_tail_break: ldr x4,[x29,#112] // pull n0 add x27,x2,#8*8 // end of current t[num] window subs xzr,x30,#1 // "move" top-most carry to carry bit adcs x14,x19,x6 adcs x15,x20,x7 ldp x19,x20,[x0,#8*0] adcs x21,x21,x8 ldp x6,x7,[x16,#8*0] // recall that x16 is &n[0] adcs x22,x22,x9 ldp x8,x9,[x16,#8*2] adcs x23,x23,x10 adcs x24,x24,x11 ldp x10,x11,[x16,#8*4] adcs x25,x25,x12 adcs x26,x26,x13 ldp x12,x13,[x16,#8*6] add x1,x16,#8*8 adc x30,xzr,xzr // top-most carry mul x28,x4,x19 stp x14,x15,[x2,#8*0] stp x21,x22,[x2,#8*2] ldp x21,x22,[x0,#8*2] stp x23,x24,[x2,#8*4] ldp x23,x24,[x0,#8*4] cmp x27,x29 // did we hit the bottom? stp x25,x26,[x2,#8*6] mov x2,x0 // slide the window ldp x25,x26,[x0,#8*6] mov x27,#8 b.ne Lsqr8x_reduction // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. ldr x0,[x29,#96] // pull rp add x2,x2,#8*8 subs x14,x19,x6 sbcs x15,x20,x7 sub x27,x5,#8*8 mov x3,x0 // x0 copy Lsqr8x_sub: sbcs x16,x21,x8 ldp x6,x7,[x1,#8*0] sbcs x17,x22,x9 stp x14,x15,[x0,#8*0] sbcs x14,x23,x10 ldp x8,x9,[x1,#8*2] sbcs x15,x24,x11 stp x16,x17,[x0,#8*2] sbcs x16,x25,x12 ldp x10,x11,[x1,#8*4] sbcs x17,x26,x13 ldp x12,x13,[x1,#8*6] add x1,x1,#8*8 ldp x19,x20,[x2,#8*0] sub x27,x27,#8*8 ldp x21,x22,[x2,#8*2] ldp x23,x24,[x2,#8*4] ldp x25,x26,[x2,#8*6] add x2,x2,#8*8 stp x14,x15,[x0,#8*4] sbcs x14,x19,x6 stp x16,x17,[x0,#8*6] add x0,x0,#8*8 sbcs x15,x20,x7 cbnz x27,Lsqr8x_sub sbcs x16,x21,x8 mov x2,sp add x1,sp,x5 ldp x6,x7,[x3,#8*0] sbcs x17,x22,x9 stp x14,x15,[x0,#8*0] sbcs x14,x23,x10 ldp x8,x9,[x3,#8*2] sbcs x15,x24,x11 stp x16,x17,[x0,#8*2] sbcs x16,x25,x12 ldp x19,x20,[x1,#8*0] sbcs x17,x26,x13 ldp x21,x22,[x1,#8*2] sbcs xzr,x30,xzr // did it borrow? ldr x30,[x29,#8] // pull return address stp x14,x15,[x0,#8*4] stp x16,x17,[x0,#8*6] sub x27,x5,#8*4 Lsqr4x_cond_copy: sub x27,x27,#8*4 csel x14,x19,x6,lo stp xzr,xzr,[x2,#8*0] csel x15,x20,x7,lo ldp x6,x7,[x3,#8*4] ldp x19,x20,[x1,#8*4] csel x16,x21,x8,lo stp xzr,xzr,[x2,#8*2] add x2,x2,#8*4 csel x17,x22,x9,lo ldp x8,x9,[x3,#8*6] ldp x21,x22,[x1,#8*6] add x1,x1,#8*4 stp x14,x15,[x3,#8*0] stp x16,x17,[x3,#8*2] add x3,x3,#8*4 stp xzr,xzr,[x1,#8*0] stp xzr,xzr,[x1,#8*2] cbnz x27,Lsqr4x_cond_copy csel x14,x19,x6,lo stp xzr,xzr,[x2,#8*0] csel x15,x20,x7,lo stp xzr,xzr,[x2,#8*2] csel x16,x21,x8,lo csel x17,x22,x9,lo stp x14,x15,[x3,#8*0] stp x16,x17,[x3,#8*2] b Lsqr8x_done .align 4 Lsqr8x8_post_condition: adc x28,xzr,xzr ldr x30,[x29,#8] // pull return address // x19-7,x28 hold result, x6-7 hold modulus subs x6,x19,x6 ldr x1,[x29,#96] // pull rp sbcs x7,x20,x7 stp xzr,xzr,[sp,#8*0] sbcs x8,x21,x8 stp xzr,xzr,[sp,#8*2] sbcs x9,x22,x9 stp xzr,xzr,[sp,#8*4] sbcs x10,x23,x10 stp xzr,xzr,[sp,#8*6] sbcs x11,x24,x11 stp xzr,xzr,[sp,#8*8] sbcs x12,x25,x12 stp xzr,xzr,[sp,#8*10] sbcs x13,x26,x13 stp xzr,xzr,[sp,#8*12] sbcs x28,x28,xzr // did it borrow? stp xzr,xzr,[sp,#8*14] // x6-7 hold result-modulus csel x6,x19,x6,lo csel x7,x20,x7,lo csel x8,x21,x8,lo csel x9,x22,x9,lo stp x6,x7,[x1,#8*0] csel x10,x23,x10,lo csel x11,x24,x11,lo stp x8,x9,[x1,#8*2] csel x12,x25,x12,lo csel x13,x26,x13,lo stp x10,x11,[x1,#8*4] stp x12,x13,[x1,#8*6] Lsqr8x_done: ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 // x30 is popped earlier AARCH64_VALIDATE_LINK_REGISTER ret .globl _bn_mul4x_mont .private_extern _bn_mul4x_mont .align 5 _bn_mul4x_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub x26,sp,x5,lsl#3 lsl x5,x5,#3 ldr x4,[x4] // *n0 sub sp,x26,#8*4 // alloca add x10,x2,x5 add x27,x1,x5 stp x0,x10,[x29,#96] // offload rp and &b[num] ldr x24,[x2,#8*0] // b[0] ldp x6,x7,[x1,#8*0] // a[0..3] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 mov x19,xzr mov x20,xzr mov x21,xzr mov x22,xzr ldp x14,x15,[x3,#8*0] // n[0..3] ldp x16,x17,[x3,#8*2] adds x3,x3,#8*4 // clear carry bit mov x0,xzr mov x28,#0 mov x26,sp Loop_mul4x_1st_reduction: mul x10,x6,x24 // lo(a[0..3]*b[0]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[0..3]*b[0]) adcs x20,x20,x11 mul x25,x19,x4 // t[0]*n0 adcs x21,x21,x12 umulh x11,x7,x24 adcs x22,x22,x13 umulh x12,x8,x24 adc x23,xzr,xzr umulh x13,x9,x24 ldr x24,[x2,x28] // next b[i] (or b[0]) adds x20,x20,x10 // (*) mul x10,x14,x25 // lo(n[0..3]*t[0]*n0) str x25,[x26],#8 // put aside t[0]*n0 for tail processing adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 // (*) adds xzr,x19,x10 subs xzr,x19,#1 // (*) umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0) adcs x19,x20,x11 umulh x11,x15,x25 adcs x20,x21,x12 umulh x12,x16,x25 adcs x21,x22,x13 umulh x13,x17,x25 adcs x22,x23,x0 adc x0,xzr,xzr adds x19,x19,x10 sub x10,x27,x1 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_1st_reduction cbz x10,Lmul4x4_post_condition ldp x6,x7,[x1,#8*0] // a[4..7] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 ldr x25,[sp] // a[0]*n0 ldp x14,x15,[x3,#8*0] // n[4..7] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 Loop_mul4x_1st_tail: mul x10,x6,x24 // lo(a[4..7]*b[i]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[4..7]*b[i]) adcs x20,x20,x11 umulh x11,x7,x24 adcs x21,x21,x12 umulh x12,x8,x24 adcs x22,x22,x13 umulh x13,x9,x24 adc x23,xzr,xzr ldr x24,[x2,x28] // next b[i] (or b[0]) adds x20,x20,x10 mul x10,x14,x25 // lo(n[4..7]*a[0]*n0) adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 adds x19,x19,x10 umulh x10,x14,x25 // hi(n[4..7]*a[0]*n0) adcs x20,x20,x11 umulh x11,x15,x25 adcs x21,x21,x12 umulh x12,x16,x25 adcs x22,x22,x13 adcs x23,x23,x0 umulh x13,x17,x25 adc x0,xzr,xzr ldr x25,[sp,x28] // next t[0]*n0 str x19,[x26],#8 // result!!! adds x19,x20,x10 sub x10,x27,x1 // done yet? adcs x20,x21,x11 adcs x21,x22,x12 adcs x22,x23,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_1st_tail sub x11,x27,x5 // rewinded x1 cbz x10,Lmul4x_proceed ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 ldp x14,x15,[x3,#8*0] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 b Loop_mul4x_1st_tail .align 5 Lmul4x_proceed: ldr x24,[x2,#8*4]! // *++b adc x30,x0,xzr ldp x6,x7,[x11,#8*0] // a[0..3] sub x3,x3,x5 // rewind np ldp x8,x9,[x11,#8*2] add x1,x11,#8*4 stp x19,x20,[x26,#8*0] // result!!! ldp x19,x20,[sp,#8*4] // t[0..3] stp x21,x22,[x26,#8*2] // result!!! ldp x21,x22,[sp,#8*6] ldp x14,x15,[x3,#8*0] // n[0..3] mov x26,sp ldp x16,x17,[x3,#8*2] adds x3,x3,#8*4 // clear carry bit mov x0,xzr .align 4 Loop_mul4x_reduction: mul x10,x6,x24 // lo(a[0..3]*b[4]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[0..3]*b[4]) adcs x20,x20,x11 mul x25,x19,x4 // t[0]*n0 adcs x21,x21,x12 umulh x11,x7,x24 adcs x22,x22,x13 umulh x12,x8,x24 adc x23,xzr,xzr umulh x13,x9,x24 ldr x24,[x2,x28] // next b[i] adds x20,x20,x10 // (*) mul x10,x14,x25 str x25,[x26],#8 // put aside t[0]*n0 for tail processing adcs x21,x21,x11 mul x11,x15,x25 // lo(n[0..3]*t[0]*n0 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 // (*) adds xzr,x19,x10 subs xzr,x19,#1 // (*) umulh x10,x14,x25 // hi(n[0..3]*t[0]*n0 adcs x19,x20,x11 umulh x11,x15,x25 adcs x20,x21,x12 umulh x12,x16,x25 adcs x21,x22,x13 umulh x13,x17,x25 adcs x22,x23,x0 adc x0,xzr,xzr adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_reduction adc x0,x0,xzr ldp x10,x11,[x26,#8*4] // t[4..7] ldp x12,x13,[x26,#8*6] ldp x6,x7,[x1,#8*0] // a[4..7] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr ldr x25,[sp] // t[0]*n0 ldp x14,x15,[x3,#8*0] // n[4..7] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 .align 4 Loop_mul4x_tail: mul x10,x6,x24 // lo(a[4..7]*b[4]) adc x0,x0,xzr // modulo-scheduled mul x11,x7,x24 add x28,x28,#8 mul x12,x8,x24 and x28,x28,#31 mul x13,x9,x24 adds x19,x19,x10 umulh x10,x6,x24 // hi(a[4..7]*b[4]) adcs x20,x20,x11 umulh x11,x7,x24 adcs x21,x21,x12 umulh x12,x8,x24 adcs x22,x22,x13 umulh x13,x9,x24 adc x23,xzr,xzr ldr x24,[x2,x28] // next b[i] adds x20,x20,x10 mul x10,x14,x25 // lo(n[4..7]*t[0]*n0) adcs x21,x21,x11 mul x11,x15,x25 adcs x22,x22,x12 mul x12,x16,x25 adc x23,x23,x13 // can't overflow mul x13,x17,x25 adds x19,x19,x10 umulh x10,x14,x25 // hi(n[4..7]*t[0]*n0) adcs x20,x20,x11 umulh x11,x15,x25 adcs x21,x21,x12 umulh x12,x16,x25 adcs x22,x22,x13 umulh x13,x17,x25 adcs x23,x23,x0 ldr x25,[sp,x28] // next a[0]*n0 adc x0,xzr,xzr str x19,[x26],#8 // result!!! adds x19,x20,x10 sub x10,x27,x1 // done yet? adcs x20,x21,x11 adcs x21,x22,x12 adcs x22,x23,x13 //adc x0,x0,xzr cbnz x28,Loop_mul4x_tail sub x11,x3,x5 // rewinded np? adc x0,x0,xzr cbz x10,Loop_mul4x_break ldp x10,x11,[x26,#8*4] ldp x12,x13,[x26,#8*6] ldp x6,x7,[x1,#8*0] ldp x8,x9,[x1,#8*2] add x1,x1,#8*4 adds x19,x19,x10 adcs x20,x20,x11 adcs x21,x21,x12 adcs x22,x22,x13 //adc x0,x0,xzr ldp x14,x15,[x3,#8*0] ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 b Loop_mul4x_tail .align 4 Loop_mul4x_break: ldp x12,x13,[x29,#96] // pull rp and &b[num] adds x19,x19,x30 add x2,x2,#8*4 // bp++ adcs x20,x20,xzr sub x1,x1,x5 // rewind ap adcs x21,x21,xzr stp x19,x20,[x26,#8*0] // result!!! adcs x22,x22,xzr ldp x19,x20,[sp,#8*4] // t[0..3] adc x30,x0,xzr stp x21,x22,[x26,#8*2] // result!!! cmp x2,x13 // done yet? ldp x21,x22,[sp,#8*6] ldp x14,x15,[x11,#8*0] // n[0..3] ldp x16,x17,[x11,#8*2] add x3,x11,#8*4 b.eq Lmul4x_post ldr x24,[x2] ldp x6,x7,[x1,#8*0] // a[0..3] ldp x8,x9,[x1,#8*2] adds x1,x1,#8*4 // clear carry bit mov x0,xzr mov x26,sp b Loop_mul4x_reduction .align 4 Lmul4x_post: // Final step. We see if result is larger than modulus, and // if it is, subtract the modulus. But comparison implies // subtraction. So we subtract modulus, see if it borrowed, // and conditionally copy original value. mov x0,x12 mov x27,x12 // x0 copy subs x10,x19,x14 add x26,sp,#8*8 sbcs x11,x20,x15 sub x28,x5,#8*4 Lmul4x_sub: sbcs x12,x21,x16 ldp x14,x15,[x3,#8*0] sub x28,x28,#8*4 ldp x19,x20,[x26,#8*0] sbcs x13,x22,x17 ldp x16,x17,[x3,#8*2] add x3,x3,#8*4 ldp x21,x22,[x26,#8*2] add x26,x26,#8*4 stp x10,x11,[x0,#8*0] sbcs x10,x19,x14 stp x12,x13,[x0,#8*2] add x0,x0,#8*4 sbcs x11,x20,x15 cbnz x28,Lmul4x_sub sbcs x12,x21,x16 mov x26,sp add x1,sp,#8*4 ldp x6,x7,[x27,#8*0] sbcs x13,x22,x17 stp x10,x11,[x0,#8*0] ldp x8,x9,[x27,#8*2] stp x12,x13,[x0,#8*2] ldp x19,x20,[x1,#8*0] ldp x21,x22,[x1,#8*2] sbcs xzr,x30,xzr // did it borrow? ldr x30,[x29,#8] // pull return address sub x28,x5,#8*4 Lmul4x_cond_copy: sub x28,x28,#8*4 csel x10,x19,x6,lo stp xzr,xzr,[x26,#8*0] csel x11,x20,x7,lo ldp x6,x7,[x27,#8*4] ldp x19,x20,[x1,#8*4] csel x12,x21,x8,lo stp xzr,xzr,[x26,#8*2] add x26,x26,#8*4 csel x13,x22,x9,lo ldp x8,x9,[x27,#8*6] ldp x21,x22,[x1,#8*6] add x1,x1,#8*4 stp x10,x11,[x27,#8*0] stp x12,x13,[x27,#8*2] add x27,x27,#8*4 cbnz x28,Lmul4x_cond_copy csel x10,x19,x6,lo stp xzr,xzr,[x26,#8*0] csel x11,x20,x7,lo stp xzr,xzr,[x26,#8*2] csel x12,x21,x8,lo stp xzr,xzr,[x26,#8*3] csel x13,x22,x9,lo stp xzr,xzr,[x26,#8*4] stp x10,x11,[x27,#8*0] stp x12,x13,[x27,#8*2] b Lmul4x_done .align 4 Lmul4x4_post_condition: adc x0,x0,xzr ldr x1,[x29,#96] // pull rp // x19-3,x0 hold result, x14-7 hold modulus subs x6,x19,x14 ldr x30,[x29,#8] // pull return address sbcs x7,x20,x15 stp xzr,xzr,[sp,#8*0] sbcs x8,x21,x16 stp xzr,xzr,[sp,#8*2] sbcs x9,x22,x17 stp xzr,xzr,[sp,#8*4] sbcs xzr,x0,xzr // did it borrow? stp xzr,xzr,[sp,#8*6] // x6-3 hold result-modulus csel x6,x19,x6,lo csel x7,x20,x7,lo csel x8,x21,x8,lo csel x9,x22,x9,lo stp x6,x7,[x1,#8*0] stp x8,x9,[x1,#8*2] Lmul4x_done: ldp x19,x20,[x29,#16] mov sp,x29 ldp x21,x22,[x29,#32] mov x0,#1 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 // x30 is popped earlier AARCH64_VALIDATE_LINK_REGISTER ret .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 4 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
30,277
thirdparty/crates/ring-0.17.9/pregenerated/chacha-x86_64-macosx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .section __DATA,__const .p2align 6 L$zero: .long 0,0,0,0 L$one: .long 1,0,0,0 L$inc: .long 0,1,2,3 L$four: .long 4,4,4,4 L$incy: .long 0,2,4,6,1,3,5,7 L$eight: .long 8,8,8,8,8,8,8,8 L$rot16: .byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd L$rot24: .byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe L$sigma: .byte 101,120,112,97,110,100,32,51,50,45,98,121,116,101,32,107,0 .p2align 6 L$zeroz: .long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0 L$fourz: .long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0 L$incz: .long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 L$sixteen: .long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text .globl _ChaCha20_ctr32_nohw .private_extern _ChaCha20_ctr32_nohw .p2align 6 _ChaCha20_ctr32_nohw: _CET_ENDBR pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $64+24,%rsp L$ctr32_body: movdqu (%rcx),%xmm1 movdqu 16(%rcx),%xmm2 movdqu (%r8),%xmm3 movdqa L$one(%rip),%xmm4 movdqa %xmm1,16(%rsp) movdqa %xmm2,32(%rsp) movdqa %xmm3,48(%rsp) movq %rdx,%rbp jmp L$oop_outer .p2align 5 L$oop_outer: movl $0x61707865,%eax movl $0x3320646e,%ebx movl $0x79622d32,%ecx movl $0x6b206574,%edx movl 16(%rsp),%r8d movl 20(%rsp),%r9d movl 24(%rsp),%r10d movl 28(%rsp),%r11d movd %xmm3,%r12d movl 52(%rsp),%r13d movl 56(%rsp),%r14d movl 60(%rsp),%r15d movq %rbp,64+0(%rsp) movl $10,%ebp movq %rsi,64+8(%rsp) .byte 102,72,15,126,214 movq %rdi,64+16(%rsp) movq %rsi,%rdi shrq $32,%rdi jmp L$oop .p2align 5 L$oop: addl %r8d,%eax xorl %eax,%r12d roll $16,%r12d addl %r9d,%ebx xorl %ebx,%r13d roll $16,%r13d addl %r12d,%esi xorl %esi,%r8d roll $12,%r8d addl %r13d,%edi xorl %edi,%r9d roll $12,%r9d addl %r8d,%eax xorl %eax,%r12d roll $8,%r12d addl %r9d,%ebx xorl %ebx,%r13d roll $8,%r13d addl %r12d,%esi xorl %esi,%r8d roll $7,%r8d addl %r13d,%edi xorl %edi,%r9d roll $7,%r9d movl %esi,32(%rsp) movl %edi,36(%rsp) movl 40(%rsp),%esi movl 44(%rsp),%edi addl %r10d,%ecx xorl %ecx,%r14d roll $16,%r14d addl %r11d,%edx xorl %edx,%r15d roll $16,%r15d addl %r14d,%esi xorl %esi,%r10d roll $12,%r10d addl %r15d,%edi xorl %edi,%r11d roll $12,%r11d addl %r10d,%ecx xorl %ecx,%r14d roll $8,%r14d addl %r11d,%edx xorl %edx,%r15d roll $8,%r15d addl %r14d,%esi xorl %esi,%r10d roll $7,%r10d addl %r15d,%edi xorl %edi,%r11d roll $7,%r11d addl %r9d,%eax xorl %eax,%r15d roll $16,%r15d addl %r10d,%ebx xorl %ebx,%r12d roll $16,%r12d addl %r15d,%esi xorl %esi,%r9d roll $12,%r9d addl %r12d,%edi xorl %edi,%r10d roll $12,%r10d addl %r9d,%eax xorl %eax,%r15d roll $8,%r15d addl %r10d,%ebx xorl %ebx,%r12d roll $8,%r12d addl %r15d,%esi xorl %esi,%r9d roll $7,%r9d addl %r12d,%edi xorl %edi,%r10d roll $7,%r10d movl %esi,40(%rsp) movl %edi,44(%rsp) movl 32(%rsp),%esi movl 36(%rsp),%edi addl %r11d,%ecx xorl %ecx,%r13d roll $16,%r13d addl %r8d,%edx xorl %edx,%r14d roll $16,%r14d addl %r13d,%esi xorl %esi,%r11d roll $12,%r11d addl %r14d,%edi xorl %edi,%r8d roll $12,%r8d addl %r11d,%ecx xorl %ecx,%r13d roll $8,%r13d addl %r8d,%edx xorl %edx,%r14d roll $8,%r14d addl %r13d,%esi xorl %esi,%r11d roll $7,%r11d addl %r14d,%edi xorl %edi,%r8d roll $7,%r8d decl %ebp jnz L$oop movl %edi,36(%rsp) movl %esi,32(%rsp) movq 64(%rsp),%rbp movdqa %xmm2,%xmm1 movq 64+8(%rsp),%rsi paddd %xmm4,%xmm3 movq 64+16(%rsp),%rdi addl $0x61707865,%eax addl $0x3320646e,%ebx addl $0x79622d32,%ecx addl $0x6b206574,%edx addl 16(%rsp),%r8d addl 20(%rsp),%r9d addl 24(%rsp),%r10d addl 28(%rsp),%r11d addl 48(%rsp),%r12d addl 52(%rsp),%r13d addl 56(%rsp),%r14d addl 60(%rsp),%r15d paddd 32(%rsp),%xmm1 cmpq $64,%rbp jb L$tail xorl 0(%rsi),%eax xorl 4(%rsi),%ebx xorl 8(%rsi),%ecx xorl 12(%rsi),%edx xorl 16(%rsi),%r8d xorl 20(%rsi),%r9d xorl 24(%rsi),%r10d xorl 28(%rsi),%r11d movdqu 32(%rsi),%xmm0 xorl 48(%rsi),%r12d xorl 52(%rsi),%r13d xorl 56(%rsi),%r14d xorl 60(%rsi),%r15d leaq 64(%rsi),%rsi pxor %xmm1,%xmm0 movdqa %xmm2,32(%rsp) movd %xmm3,48(%rsp) movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) movdqu %xmm0,32(%rdi) movl %r12d,48(%rdi) movl %r13d,52(%rdi) movl %r14d,56(%rdi) movl %r15d,60(%rdi) leaq 64(%rdi),%rdi subq $64,%rbp jnz L$oop_outer jmp L$done .p2align 4 L$tail: movl %eax,0(%rsp) movl %ebx,4(%rsp) xorq %rbx,%rbx movl %ecx,8(%rsp) movl %edx,12(%rsp) movl %r8d,16(%rsp) movl %r9d,20(%rsp) movl %r10d,24(%rsp) movl %r11d,28(%rsp) movdqa %xmm1,32(%rsp) movl %r12d,48(%rsp) movl %r13d,52(%rsp) movl %r14d,56(%rsp) movl %r15d,60(%rsp) L$oop_tail: movzbl (%rsi,%rbx,1),%eax movzbl (%rsp,%rbx,1),%edx leaq 1(%rbx),%rbx xorl %edx,%eax movb %al,-1(%rdi,%rbx,1) decq %rbp jnz L$oop_tail L$done: leaq 64+24+48(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$no_data: ret .globl _ChaCha20_ctr32_ssse3_4x .private_extern _ChaCha20_ctr32_ssse3_4x .p2align 5 _ChaCha20_ctr32_ssse3_4x: _CET_ENDBR movq %rsp,%r9 subq $0x140+8,%rsp movdqa L$sigma(%rip),%xmm11 movdqu (%rcx),%xmm15 movdqu 16(%rcx),%xmm7 movdqu (%r8),%xmm3 leaq 256(%rsp),%rcx leaq L$rot16(%rip),%r10 leaq L$rot24(%rip),%r11 pshufd $0x00,%xmm11,%xmm8 pshufd $0x55,%xmm11,%xmm9 movdqa %xmm8,64(%rsp) pshufd $0xaa,%xmm11,%xmm10 movdqa %xmm9,80(%rsp) pshufd $0xff,%xmm11,%xmm11 movdqa %xmm10,96(%rsp) movdqa %xmm11,112(%rsp) pshufd $0x00,%xmm15,%xmm12 pshufd $0x55,%xmm15,%xmm13 movdqa %xmm12,128-256(%rcx) pshufd $0xaa,%xmm15,%xmm14 movdqa %xmm13,144-256(%rcx) pshufd $0xff,%xmm15,%xmm15 movdqa %xmm14,160-256(%rcx) movdqa %xmm15,176-256(%rcx) pshufd $0x00,%xmm7,%xmm4 pshufd $0x55,%xmm7,%xmm5 movdqa %xmm4,192-256(%rcx) pshufd $0xaa,%xmm7,%xmm6 movdqa %xmm5,208-256(%rcx) pshufd $0xff,%xmm7,%xmm7 movdqa %xmm6,224-256(%rcx) movdqa %xmm7,240-256(%rcx) pshufd $0x00,%xmm3,%xmm0 pshufd $0x55,%xmm3,%xmm1 paddd L$inc(%rip),%xmm0 pshufd $0xaa,%xmm3,%xmm2 movdqa %xmm1,272-256(%rcx) pshufd $0xff,%xmm3,%xmm3 movdqa %xmm2,288-256(%rcx) movdqa %xmm3,304-256(%rcx) jmp L$oop_enter4x .p2align 5 L$oop_outer4x: movdqa 64(%rsp),%xmm8 movdqa 80(%rsp),%xmm9 movdqa 96(%rsp),%xmm10 movdqa 112(%rsp),%xmm11 movdqa 128-256(%rcx),%xmm12 movdqa 144-256(%rcx),%xmm13 movdqa 160-256(%rcx),%xmm14 movdqa 176-256(%rcx),%xmm15 movdqa 192-256(%rcx),%xmm4 movdqa 208-256(%rcx),%xmm5 movdqa 224-256(%rcx),%xmm6 movdqa 240-256(%rcx),%xmm7 movdqa 256-256(%rcx),%xmm0 movdqa 272-256(%rcx),%xmm1 movdqa 288-256(%rcx),%xmm2 movdqa 304-256(%rcx),%xmm3 paddd L$four(%rip),%xmm0 L$oop_enter4x: movdqa %xmm6,32(%rsp) movdqa %xmm7,48(%rsp) movdqa (%r10),%xmm7 movl $10,%eax movdqa %xmm0,256-256(%rcx) jmp L$oop4x .p2align 5 L$oop4x: paddd %xmm12,%xmm8 paddd %xmm13,%xmm9 pxor %xmm8,%xmm0 pxor %xmm9,%xmm1 .byte 102,15,56,0,199 .byte 102,15,56,0,207 paddd %xmm0,%xmm4 paddd %xmm1,%xmm5 pxor %xmm4,%xmm12 pxor %xmm5,%xmm13 movdqa %xmm12,%xmm6 pslld $12,%xmm12 psrld $20,%xmm6 movdqa %xmm13,%xmm7 pslld $12,%xmm13 por %xmm6,%xmm12 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm13 paddd %xmm12,%xmm8 paddd %xmm13,%xmm9 pxor %xmm8,%xmm0 pxor %xmm9,%xmm1 .byte 102,15,56,0,198 .byte 102,15,56,0,206 paddd %xmm0,%xmm4 paddd %xmm1,%xmm5 pxor %xmm4,%xmm12 pxor %xmm5,%xmm13 movdqa %xmm12,%xmm7 pslld $7,%xmm12 psrld $25,%xmm7 movdqa %xmm13,%xmm6 pslld $7,%xmm13 por %xmm7,%xmm12 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm13 movdqa %xmm4,0(%rsp) movdqa %xmm5,16(%rsp) movdqa 32(%rsp),%xmm4 movdqa 48(%rsp),%xmm5 paddd %xmm14,%xmm10 paddd %xmm15,%xmm11 pxor %xmm10,%xmm2 pxor %xmm11,%xmm3 .byte 102,15,56,0,215 .byte 102,15,56,0,223 paddd %xmm2,%xmm4 paddd %xmm3,%xmm5 pxor %xmm4,%xmm14 pxor %xmm5,%xmm15 movdqa %xmm14,%xmm6 pslld $12,%xmm14 psrld $20,%xmm6 movdqa %xmm15,%xmm7 pslld $12,%xmm15 por %xmm6,%xmm14 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm15 paddd %xmm14,%xmm10 paddd %xmm15,%xmm11 pxor %xmm10,%xmm2 pxor %xmm11,%xmm3 .byte 102,15,56,0,214 .byte 102,15,56,0,222 paddd %xmm2,%xmm4 paddd %xmm3,%xmm5 pxor %xmm4,%xmm14 pxor %xmm5,%xmm15 movdqa %xmm14,%xmm7 pslld $7,%xmm14 psrld $25,%xmm7 movdqa %xmm15,%xmm6 pslld $7,%xmm15 por %xmm7,%xmm14 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm15 paddd %xmm13,%xmm8 paddd %xmm14,%xmm9 pxor %xmm8,%xmm3 pxor %xmm9,%xmm0 .byte 102,15,56,0,223 .byte 102,15,56,0,199 paddd %xmm3,%xmm4 paddd %xmm0,%xmm5 pxor %xmm4,%xmm13 pxor %xmm5,%xmm14 movdqa %xmm13,%xmm6 pslld $12,%xmm13 psrld $20,%xmm6 movdqa %xmm14,%xmm7 pslld $12,%xmm14 por %xmm6,%xmm13 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm14 paddd %xmm13,%xmm8 paddd %xmm14,%xmm9 pxor %xmm8,%xmm3 pxor %xmm9,%xmm0 .byte 102,15,56,0,222 .byte 102,15,56,0,198 paddd %xmm3,%xmm4 paddd %xmm0,%xmm5 pxor %xmm4,%xmm13 pxor %xmm5,%xmm14 movdqa %xmm13,%xmm7 pslld $7,%xmm13 psrld $25,%xmm7 movdqa %xmm14,%xmm6 pslld $7,%xmm14 por %xmm7,%xmm13 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm14 movdqa %xmm4,32(%rsp) movdqa %xmm5,48(%rsp) movdqa 0(%rsp),%xmm4 movdqa 16(%rsp),%xmm5 paddd %xmm15,%xmm10 paddd %xmm12,%xmm11 pxor %xmm10,%xmm1 pxor %xmm11,%xmm2 .byte 102,15,56,0,207 .byte 102,15,56,0,215 paddd %xmm1,%xmm4 paddd %xmm2,%xmm5 pxor %xmm4,%xmm15 pxor %xmm5,%xmm12 movdqa %xmm15,%xmm6 pslld $12,%xmm15 psrld $20,%xmm6 movdqa %xmm12,%xmm7 pslld $12,%xmm12 por %xmm6,%xmm15 psrld $20,%xmm7 movdqa (%r11),%xmm6 por %xmm7,%xmm12 paddd %xmm15,%xmm10 paddd %xmm12,%xmm11 pxor %xmm10,%xmm1 pxor %xmm11,%xmm2 .byte 102,15,56,0,206 .byte 102,15,56,0,214 paddd %xmm1,%xmm4 paddd %xmm2,%xmm5 pxor %xmm4,%xmm15 pxor %xmm5,%xmm12 movdqa %xmm15,%xmm7 pslld $7,%xmm15 psrld $25,%xmm7 movdqa %xmm12,%xmm6 pslld $7,%xmm12 por %xmm7,%xmm15 psrld $25,%xmm6 movdqa (%r10),%xmm7 por %xmm6,%xmm12 decl %eax jnz L$oop4x paddd 64(%rsp),%xmm8 paddd 80(%rsp),%xmm9 paddd 96(%rsp),%xmm10 paddd 112(%rsp),%xmm11 movdqa %xmm8,%xmm6 punpckldq %xmm9,%xmm8 movdqa %xmm10,%xmm7 punpckldq %xmm11,%xmm10 punpckhdq %xmm9,%xmm6 punpckhdq %xmm11,%xmm7 movdqa %xmm8,%xmm9 punpcklqdq %xmm10,%xmm8 movdqa %xmm6,%xmm11 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm10,%xmm9 punpckhqdq %xmm7,%xmm11 paddd 128-256(%rcx),%xmm12 paddd 144-256(%rcx),%xmm13 paddd 160-256(%rcx),%xmm14 paddd 176-256(%rcx),%xmm15 movdqa %xmm8,0(%rsp) movdqa %xmm9,16(%rsp) movdqa 32(%rsp),%xmm8 movdqa 48(%rsp),%xmm9 movdqa %xmm12,%xmm10 punpckldq %xmm13,%xmm12 movdqa %xmm14,%xmm7 punpckldq %xmm15,%xmm14 punpckhdq %xmm13,%xmm10 punpckhdq %xmm15,%xmm7 movdqa %xmm12,%xmm13 punpcklqdq %xmm14,%xmm12 movdqa %xmm10,%xmm15 punpcklqdq %xmm7,%xmm10 punpckhqdq %xmm14,%xmm13 punpckhqdq %xmm7,%xmm15 paddd 192-256(%rcx),%xmm4 paddd 208-256(%rcx),%xmm5 paddd 224-256(%rcx),%xmm8 paddd 240-256(%rcx),%xmm9 movdqa %xmm6,32(%rsp) movdqa %xmm11,48(%rsp) movdqa %xmm4,%xmm14 punpckldq %xmm5,%xmm4 movdqa %xmm8,%xmm7 punpckldq %xmm9,%xmm8 punpckhdq %xmm5,%xmm14 punpckhdq %xmm9,%xmm7 movdqa %xmm4,%xmm5 punpcklqdq %xmm8,%xmm4 movdqa %xmm14,%xmm9 punpcklqdq %xmm7,%xmm14 punpckhqdq %xmm8,%xmm5 punpckhqdq %xmm7,%xmm9 paddd 256-256(%rcx),%xmm0 paddd 272-256(%rcx),%xmm1 paddd 288-256(%rcx),%xmm2 paddd 304-256(%rcx),%xmm3 movdqa %xmm0,%xmm8 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm8 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm8,%xmm3 punpcklqdq %xmm7,%xmm8 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 cmpq $256,%rdx jb L$tail4x movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 leaq 128(%rsi),%rsi pxor 16(%rsp),%xmm6 pxor %xmm13,%xmm11 pxor %xmm5,%xmm2 pxor %xmm1,%xmm7 movdqu %xmm6,64(%rdi) movdqu 0(%rsi),%xmm6 movdqu %xmm11,80(%rdi) movdqu 16(%rsi),%xmm11 movdqu %xmm2,96(%rdi) movdqu 32(%rsi),%xmm2 movdqu %xmm7,112(%rdi) leaq 128(%rdi),%rdi movdqu 48(%rsi),%xmm7 pxor 32(%rsp),%xmm6 pxor %xmm10,%xmm11 pxor %xmm14,%xmm2 pxor %xmm8,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 leaq 128(%rsi),%rsi pxor 48(%rsp),%xmm6 pxor %xmm15,%xmm11 pxor %xmm9,%xmm2 pxor %xmm3,%xmm7 movdqu %xmm6,64(%rdi) movdqu %xmm11,80(%rdi) movdqu %xmm2,96(%rdi) movdqu %xmm7,112(%rdi) leaq 128(%rdi),%rdi subq $256,%rdx jnz L$oop_outer4x jmp L$done4x L$tail4x: cmpq $192,%rdx jae L$192_or_more4x cmpq $128,%rdx jae L$128_or_more4x cmpq $64,%rdx jae L$64_or_more4x xorq %r10,%r10 movdqa %xmm12,16(%rsp) movdqa %xmm4,32(%rsp) movdqa %xmm0,48(%rsp) jmp L$oop_tail4x .p2align 5 L$64_or_more4x: movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu %xmm11,16(%rdi) movdqu %xmm2,32(%rdi) movdqu %xmm7,48(%rdi) je L$done4x movdqa 16(%rsp),%xmm6 leaq 64(%rsi),%rsi xorq %r10,%r10 movdqa %xmm6,0(%rsp) movdqa %xmm13,16(%rsp) leaq 64(%rdi),%rdi movdqa %xmm5,32(%rsp) subq $64,%rdx movdqa %xmm1,48(%rsp) jmp L$oop_tail4x .p2align 5 L$128_or_more4x: movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 pxor 16(%rsp),%xmm6 pxor %xmm13,%xmm11 pxor %xmm5,%xmm2 pxor %xmm1,%xmm7 movdqu %xmm6,64(%rdi) movdqu %xmm11,80(%rdi) movdqu %xmm2,96(%rdi) movdqu %xmm7,112(%rdi) je L$done4x movdqa 32(%rsp),%xmm6 leaq 128(%rsi),%rsi xorq %r10,%r10 movdqa %xmm6,0(%rsp) movdqa %xmm10,16(%rsp) leaq 128(%rdi),%rdi movdqa %xmm14,32(%rsp) subq $128,%rdx movdqa %xmm8,48(%rsp) jmp L$oop_tail4x .p2align 5 L$192_or_more4x: movdqu 0(%rsi),%xmm6 movdqu 16(%rsi),%xmm11 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm7 pxor 0(%rsp),%xmm6 pxor %xmm12,%xmm11 pxor %xmm4,%xmm2 pxor %xmm0,%xmm7 movdqu %xmm6,0(%rdi) movdqu 64(%rsi),%xmm6 movdqu %xmm11,16(%rdi) movdqu 80(%rsi),%xmm11 movdqu %xmm2,32(%rdi) movdqu 96(%rsi),%xmm2 movdqu %xmm7,48(%rdi) movdqu 112(%rsi),%xmm7 leaq 128(%rsi),%rsi pxor 16(%rsp),%xmm6 pxor %xmm13,%xmm11 pxor %xmm5,%xmm2 pxor %xmm1,%xmm7 movdqu %xmm6,64(%rdi) movdqu 0(%rsi),%xmm6 movdqu %xmm11,80(%rdi) movdqu 16(%rsi),%xmm11 movdqu %xmm2,96(%rdi) movdqu 32(%rsi),%xmm2 movdqu %xmm7,112(%rdi) leaq 128(%rdi),%rdi movdqu 48(%rsi),%xmm7 pxor 32(%rsp),%xmm6 pxor %xmm10,%xmm11 pxor %xmm14,%xmm2 pxor %xmm8,%xmm7 movdqu %xmm6,0(%rdi) movdqu %xmm11,16(%rdi) movdqu %xmm2,32(%rdi) movdqu %xmm7,48(%rdi) je L$done4x movdqa 48(%rsp),%xmm6 leaq 64(%rsi),%rsi xorq %r10,%r10 movdqa %xmm6,0(%rsp) movdqa %xmm15,16(%rsp) leaq 64(%rdi),%rdi movdqa %xmm9,32(%rsp) subq $192,%rdx movdqa %xmm3,48(%rsp) L$oop_tail4x: movzbl (%rsi,%r10,1),%eax movzbl (%rsp,%r10,1),%ecx leaq 1(%r10),%r10 xorl %ecx,%eax movb %al,-1(%rdi,%r10,1) decq %rdx jnz L$oop_tail4x L$done4x: leaq (%r9),%rsp L$4x_epilogue: ret .globl _ChaCha20_ctr32_avx2 .private_extern _ChaCha20_ctr32_avx2 .p2align 5 _ChaCha20_ctr32_avx2: _CET_ENDBR movq %rsp,%r9 subq $0x280+8,%rsp andq $-32,%rsp vzeroupper vbroadcasti128 L$sigma(%rip),%ymm11 vbroadcasti128 (%rcx),%ymm3 vbroadcasti128 16(%rcx),%ymm15 vbroadcasti128 (%r8),%ymm7 leaq 256(%rsp),%rcx leaq 512(%rsp),%rax leaq L$rot16(%rip),%r10 leaq L$rot24(%rip),%r11 vpshufd $0x00,%ymm11,%ymm8 vpshufd $0x55,%ymm11,%ymm9 vmovdqa %ymm8,128-256(%rcx) vpshufd $0xaa,%ymm11,%ymm10 vmovdqa %ymm9,160-256(%rcx) vpshufd $0xff,%ymm11,%ymm11 vmovdqa %ymm10,192-256(%rcx) vmovdqa %ymm11,224-256(%rcx) vpshufd $0x00,%ymm3,%ymm0 vpshufd $0x55,%ymm3,%ymm1 vmovdqa %ymm0,256-256(%rcx) vpshufd $0xaa,%ymm3,%ymm2 vmovdqa %ymm1,288-256(%rcx) vpshufd $0xff,%ymm3,%ymm3 vmovdqa %ymm2,320-256(%rcx) vmovdqa %ymm3,352-256(%rcx) vpshufd $0x00,%ymm15,%ymm12 vpshufd $0x55,%ymm15,%ymm13 vmovdqa %ymm12,384-512(%rax) vpshufd $0xaa,%ymm15,%ymm14 vmovdqa %ymm13,416-512(%rax) vpshufd $0xff,%ymm15,%ymm15 vmovdqa %ymm14,448-512(%rax) vmovdqa %ymm15,480-512(%rax) vpshufd $0x00,%ymm7,%ymm4 vpshufd $0x55,%ymm7,%ymm5 vpaddd L$incy(%rip),%ymm4,%ymm4 vpshufd $0xaa,%ymm7,%ymm6 vmovdqa %ymm5,544-512(%rax) vpshufd $0xff,%ymm7,%ymm7 vmovdqa %ymm6,576-512(%rax) vmovdqa %ymm7,608-512(%rax) jmp L$oop_enter8x .p2align 5 L$oop_outer8x: vmovdqa 128-256(%rcx),%ymm8 vmovdqa 160-256(%rcx),%ymm9 vmovdqa 192-256(%rcx),%ymm10 vmovdqa 224-256(%rcx),%ymm11 vmovdqa 256-256(%rcx),%ymm0 vmovdqa 288-256(%rcx),%ymm1 vmovdqa 320-256(%rcx),%ymm2 vmovdqa 352-256(%rcx),%ymm3 vmovdqa 384-512(%rax),%ymm12 vmovdqa 416-512(%rax),%ymm13 vmovdqa 448-512(%rax),%ymm14 vmovdqa 480-512(%rax),%ymm15 vmovdqa 512-512(%rax),%ymm4 vmovdqa 544-512(%rax),%ymm5 vmovdqa 576-512(%rax),%ymm6 vmovdqa 608-512(%rax),%ymm7 vpaddd L$eight(%rip),%ymm4,%ymm4 L$oop_enter8x: vmovdqa %ymm14,64(%rsp) vmovdqa %ymm15,96(%rsp) vbroadcasti128 (%r10),%ymm15 vmovdqa %ymm4,512-512(%rax) movl $10,%eax jmp L$oop8x .p2align 5 L$oop8x: vpaddd %ymm0,%ymm8,%ymm8 vpxor %ymm4,%ymm8,%ymm4 vpshufb %ymm15,%ymm4,%ymm4 vpaddd %ymm1,%ymm9,%ymm9 vpxor %ymm5,%ymm9,%ymm5 vpshufb %ymm15,%ymm5,%ymm5 vpaddd %ymm4,%ymm12,%ymm12 vpxor %ymm0,%ymm12,%ymm0 vpslld $12,%ymm0,%ymm14 vpsrld $20,%ymm0,%ymm0 vpor %ymm0,%ymm14,%ymm0 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm5,%ymm13,%ymm13 vpxor %ymm1,%ymm13,%ymm1 vpslld $12,%ymm1,%ymm15 vpsrld $20,%ymm1,%ymm1 vpor %ymm1,%ymm15,%ymm1 vpaddd %ymm0,%ymm8,%ymm8 vpxor %ymm4,%ymm8,%ymm4 vpshufb %ymm14,%ymm4,%ymm4 vpaddd %ymm1,%ymm9,%ymm9 vpxor %ymm5,%ymm9,%ymm5 vpshufb %ymm14,%ymm5,%ymm5 vpaddd %ymm4,%ymm12,%ymm12 vpxor %ymm0,%ymm12,%ymm0 vpslld $7,%ymm0,%ymm15 vpsrld $25,%ymm0,%ymm0 vpor %ymm0,%ymm15,%ymm0 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm5,%ymm13,%ymm13 vpxor %ymm1,%ymm13,%ymm1 vpslld $7,%ymm1,%ymm14 vpsrld $25,%ymm1,%ymm1 vpor %ymm1,%ymm14,%ymm1 vmovdqa %ymm12,0(%rsp) vmovdqa %ymm13,32(%rsp) vmovdqa 64(%rsp),%ymm12 vmovdqa 96(%rsp),%ymm13 vpaddd %ymm2,%ymm10,%ymm10 vpxor %ymm6,%ymm10,%ymm6 vpshufb %ymm15,%ymm6,%ymm6 vpaddd %ymm3,%ymm11,%ymm11 vpxor %ymm7,%ymm11,%ymm7 vpshufb %ymm15,%ymm7,%ymm7 vpaddd %ymm6,%ymm12,%ymm12 vpxor %ymm2,%ymm12,%ymm2 vpslld $12,%ymm2,%ymm14 vpsrld $20,%ymm2,%ymm2 vpor %ymm2,%ymm14,%ymm2 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm7,%ymm13,%ymm13 vpxor %ymm3,%ymm13,%ymm3 vpslld $12,%ymm3,%ymm15 vpsrld $20,%ymm3,%ymm3 vpor %ymm3,%ymm15,%ymm3 vpaddd %ymm2,%ymm10,%ymm10 vpxor %ymm6,%ymm10,%ymm6 vpshufb %ymm14,%ymm6,%ymm6 vpaddd %ymm3,%ymm11,%ymm11 vpxor %ymm7,%ymm11,%ymm7 vpshufb %ymm14,%ymm7,%ymm7 vpaddd %ymm6,%ymm12,%ymm12 vpxor %ymm2,%ymm12,%ymm2 vpslld $7,%ymm2,%ymm15 vpsrld $25,%ymm2,%ymm2 vpor %ymm2,%ymm15,%ymm2 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm7,%ymm13,%ymm13 vpxor %ymm3,%ymm13,%ymm3 vpslld $7,%ymm3,%ymm14 vpsrld $25,%ymm3,%ymm3 vpor %ymm3,%ymm14,%ymm3 vpaddd %ymm1,%ymm8,%ymm8 vpxor %ymm7,%ymm8,%ymm7 vpshufb %ymm15,%ymm7,%ymm7 vpaddd %ymm2,%ymm9,%ymm9 vpxor %ymm4,%ymm9,%ymm4 vpshufb %ymm15,%ymm4,%ymm4 vpaddd %ymm7,%ymm12,%ymm12 vpxor %ymm1,%ymm12,%ymm1 vpslld $12,%ymm1,%ymm14 vpsrld $20,%ymm1,%ymm1 vpor %ymm1,%ymm14,%ymm1 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm4,%ymm13,%ymm13 vpxor %ymm2,%ymm13,%ymm2 vpslld $12,%ymm2,%ymm15 vpsrld $20,%ymm2,%ymm2 vpor %ymm2,%ymm15,%ymm2 vpaddd %ymm1,%ymm8,%ymm8 vpxor %ymm7,%ymm8,%ymm7 vpshufb %ymm14,%ymm7,%ymm7 vpaddd %ymm2,%ymm9,%ymm9 vpxor %ymm4,%ymm9,%ymm4 vpshufb %ymm14,%ymm4,%ymm4 vpaddd %ymm7,%ymm12,%ymm12 vpxor %ymm1,%ymm12,%ymm1 vpslld $7,%ymm1,%ymm15 vpsrld $25,%ymm1,%ymm1 vpor %ymm1,%ymm15,%ymm1 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm4,%ymm13,%ymm13 vpxor %ymm2,%ymm13,%ymm2 vpslld $7,%ymm2,%ymm14 vpsrld $25,%ymm2,%ymm2 vpor %ymm2,%ymm14,%ymm2 vmovdqa %ymm12,64(%rsp) vmovdqa %ymm13,96(%rsp) vmovdqa 0(%rsp),%ymm12 vmovdqa 32(%rsp),%ymm13 vpaddd %ymm3,%ymm10,%ymm10 vpxor %ymm5,%ymm10,%ymm5 vpshufb %ymm15,%ymm5,%ymm5 vpaddd %ymm0,%ymm11,%ymm11 vpxor %ymm6,%ymm11,%ymm6 vpshufb %ymm15,%ymm6,%ymm6 vpaddd %ymm5,%ymm12,%ymm12 vpxor %ymm3,%ymm12,%ymm3 vpslld $12,%ymm3,%ymm14 vpsrld $20,%ymm3,%ymm3 vpor %ymm3,%ymm14,%ymm3 vbroadcasti128 (%r11),%ymm14 vpaddd %ymm6,%ymm13,%ymm13 vpxor %ymm0,%ymm13,%ymm0 vpslld $12,%ymm0,%ymm15 vpsrld $20,%ymm0,%ymm0 vpor %ymm0,%ymm15,%ymm0 vpaddd %ymm3,%ymm10,%ymm10 vpxor %ymm5,%ymm10,%ymm5 vpshufb %ymm14,%ymm5,%ymm5 vpaddd %ymm0,%ymm11,%ymm11 vpxor %ymm6,%ymm11,%ymm6 vpshufb %ymm14,%ymm6,%ymm6 vpaddd %ymm5,%ymm12,%ymm12 vpxor %ymm3,%ymm12,%ymm3 vpslld $7,%ymm3,%ymm15 vpsrld $25,%ymm3,%ymm3 vpor %ymm3,%ymm15,%ymm3 vbroadcasti128 (%r10),%ymm15 vpaddd %ymm6,%ymm13,%ymm13 vpxor %ymm0,%ymm13,%ymm0 vpslld $7,%ymm0,%ymm14 vpsrld $25,%ymm0,%ymm0 vpor %ymm0,%ymm14,%ymm0 decl %eax jnz L$oop8x leaq 512(%rsp),%rax vpaddd 128-256(%rcx),%ymm8,%ymm8 vpaddd 160-256(%rcx),%ymm9,%ymm9 vpaddd 192-256(%rcx),%ymm10,%ymm10 vpaddd 224-256(%rcx),%ymm11,%ymm11 vpunpckldq %ymm9,%ymm8,%ymm14 vpunpckldq %ymm11,%ymm10,%ymm15 vpunpckhdq %ymm9,%ymm8,%ymm8 vpunpckhdq %ymm11,%ymm10,%ymm10 vpunpcklqdq %ymm15,%ymm14,%ymm9 vpunpckhqdq %ymm15,%ymm14,%ymm14 vpunpcklqdq %ymm10,%ymm8,%ymm11 vpunpckhqdq %ymm10,%ymm8,%ymm8 vpaddd 256-256(%rcx),%ymm0,%ymm0 vpaddd 288-256(%rcx),%ymm1,%ymm1 vpaddd 320-256(%rcx),%ymm2,%ymm2 vpaddd 352-256(%rcx),%ymm3,%ymm3 vpunpckldq %ymm1,%ymm0,%ymm10 vpunpckldq %ymm3,%ymm2,%ymm15 vpunpckhdq %ymm1,%ymm0,%ymm0 vpunpckhdq %ymm3,%ymm2,%ymm2 vpunpcklqdq %ymm15,%ymm10,%ymm1 vpunpckhqdq %ymm15,%ymm10,%ymm10 vpunpcklqdq %ymm2,%ymm0,%ymm3 vpunpckhqdq %ymm2,%ymm0,%ymm0 vperm2i128 $0x20,%ymm1,%ymm9,%ymm15 vperm2i128 $0x31,%ymm1,%ymm9,%ymm1 vperm2i128 $0x20,%ymm10,%ymm14,%ymm9 vperm2i128 $0x31,%ymm10,%ymm14,%ymm10 vperm2i128 $0x20,%ymm3,%ymm11,%ymm14 vperm2i128 $0x31,%ymm3,%ymm11,%ymm3 vperm2i128 $0x20,%ymm0,%ymm8,%ymm11 vperm2i128 $0x31,%ymm0,%ymm8,%ymm0 vmovdqa %ymm15,0(%rsp) vmovdqa %ymm9,32(%rsp) vmovdqa 64(%rsp),%ymm15 vmovdqa 96(%rsp),%ymm9 vpaddd 384-512(%rax),%ymm12,%ymm12 vpaddd 416-512(%rax),%ymm13,%ymm13 vpaddd 448-512(%rax),%ymm15,%ymm15 vpaddd 480-512(%rax),%ymm9,%ymm9 vpunpckldq %ymm13,%ymm12,%ymm2 vpunpckldq %ymm9,%ymm15,%ymm8 vpunpckhdq %ymm13,%ymm12,%ymm12 vpunpckhdq %ymm9,%ymm15,%ymm15 vpunpcklqdq %ymm8,%ymm2,%ymm13 vpunpckhqdq %ymm8,%ymm2,%ymm2 vpunpcklqdq %ymm15,%ymm12,%ymm9 vpunpckhqdq %ymm15,%ymm12,%ymm12 vpaddd 512-512(%rax),%ymm4,%ymm4 vpaddd 544-512(%rax),%ymm5,%ymm5 vpaddd 576-512(%rax),%ymm6,%ymm6 vpaddd 608-512(%rax),%ymm7,%ymm7 vpunpckldq %ymm5,%ymm4,%ymm15 vpunpckldq %ymm7,%ymm6,%ymm8 vpunpckhdq %ymm5,%ymm4,%ymm4 vpunpckhdq %ymm7,%ymm6,%ymm6 vpunpcklqdq %ymm8,%ymm15,%ymm5 vpunpckhqdq %ymm8,%ymm15,%ymm15 vpunpcklqdq %ymm6,%ymm4,%ymm7 vpunpckhqdq %ymm6,%ymm4,%ymm4 vperm2i128 $0x20,%ymm5,%ymm13,%ymm8 vperm2i128 $0x31,%ymm5,%ymm13,%ymm5 vperm2i128 $0x20,%ymm15,%ymm2,%ymm13 vperm2i128 $0x31,%ymm15,%ymm2,%ymm15 vperm2i128 $0x20,%ymm7,%ymm9,%ymm2 vperm2i128 $0x31,%ymm7,%ymm9,%ymm7 vperm2i128 $0x20,%ymm4,%ymm12,%ymm9 vperm2i128 $0x31,%ymm4,%ymm12,%ymm4 vmovdqa 0(%rsp),%ymm6 vmovdqa 32(%rsp),%ymm12 cmpq $512,%rdx jb L$tail8x vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 leaq 128(%rsi),%rsi vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) leaq 128(%rdi),%rdi vpxor 0(%rsi),%ymm12,%ymm12 vpxor 32(%rsi),%ymm13,%ymm13 vpxor 64(%rsi),%ymm10,%ymm10 vpxor 96(%rsi),%ymm15,%ymm15 leaq 128(%rsi),%rsi vmovdqu %ymm12,0(%rdi) vmovdqu %ymm13,32(%rdi) vmovdqu %ymm10,64(%rdi) vmovdqu %ymm15,96(%rdi) leaq 128(%rdi),%rdi vpxor 0(%rsi),%ymm14,%ymm14 vpxor 32(%rsi),%ymm2,%ymm2 vpxor 64(%rsi),%ymm3,%ymm3 vpxor 96(%rsi),%ymm7,%ymm7 leaq 128(%rsi),%rsi vmovdqu %ymm14,0(%rdi) vmovdqu %ymm2,32(%rdi) vmovdqu %ymm3,64(%rdi) vmovdqu %ymm7,96(%rdi) leaq 128(%rdi),%rdi vpxor 0(%rsi),%ymm11,%ymm11 vpxor 32(%rsi),%ymm9,%ymm9 vpxor 64(%rsi),%ymm0,%ymm0 vpxor 96(%rsi),%ymm4,%ymm4 leaq 128(%rsi),%rsi vmovdqu %ymm11,0(%rdi) vmovdqu %ymm9,32(%rdi) vmovdqu %ymm0,64(%rdi) vmovdqu %ymm4,96(%rdi) leaq 128(%rdi),%rdi subq $512,%rdx jnz L$oop_outer8x jmp L$done8x L$tail8x: cmpq $448,%rdx jae L$448_or_more8x cmpq $384,%rdx jae L$384_or_more8x cmpq $320,%rdx jae L$320_or_more8x cmpq $256,%rdx jae L$256_or_more8x cmpq $192,%rdx jae L$192_or_more8x cmpq $128,%rdx jae L$128_or_more8x cmpq $64,%rdx jae L$64_or_more8x xorq %r10,%r10 vmovdqa %ymm6,0(%rsp) vmovdqa %ymm8,32(%rsp) jmp L$oop_tail8x .p2align 5 L$64_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) je L$done8x leaq 64(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm1,0(%rsp) leaq 64(%rdi),%rdi subq $64,%rdx vmovdqa %ymm5,32(%rsp) jmp L$oop_tail8x .p2align 5 L$128_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) je L$done8x leaq 128(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm12,0(%rsp) leaq 128(%rdi),%rdi subq $128,%rdx vmovdqa %ymm13,32(%rsp) jmp L$oop_tail8x .p2align 5 L$192_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) je L$done8x leaq 192(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm10,0(%rsp) leaq 192(%rdi),%rdi subq $192,%rdx vmovdqa %ymm15,32(%rsp) jmp L$oop_tail8x .p2align 5 L$256_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) je L$done8x leaq 256(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm14,0(%rsp) leaq 256(%rdi),%rdi subq $256,%rdx vmovdqa %ymm2,32(%rsp) jmp L$oop_tail8x .p2align 5 L$320_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vpxor 256(%rsi),%ymm14,%ymm14 vpxor 288(%rsi),%ymm2,%ymm2 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) vmovdqu %ymm14,256(%rdi) vmovdqu %ymm2,288(%rdi) je L$done8x leaq 320(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm3,0(%rsp) leaq 320(%rdi),%rdi subq $320,%rdx vmovdqa %ymm7,32(%rsp) jmp L$oop_tail8x .p2align 5 L$384_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vpxor 256(%rsi),%ymm14,%ymm14 vpxor 288(%rsi),%ymm2,%ymm2 vpxor 320(%rsi),%ymm3,%ymm3 vpxor 352(%rsi),%ymm7,%ymm7 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) vmovdqu %ymm14,256(%rdi) vmovdqu %ymm2,288(%rdi) vmovdqu %ymm3,320(%rdi) vmovdqu %ymm7,352(%rdi) je L$done8x leaq 384(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm11,0(%rsp) leaq 384(%rdi),%rdi subq $384,%rdx vmovdqa %ymm9,32(%rsp) jmp L$oop_tail8x .p2align 5 L$448_or_more8x: vpxor 0(%rsi),%ymm6,%ymm6 vpxor 32(%rsi),%ymm8,%ymm8 vpxor 64(%rsi),%ymm1,%ymm1 vpxor 96(%rsi),%ymm5,%ymm5 vpxor 128(%rsi),%ymm12,%ymm12 vpxor 160(%rsi),%ymm13,%ymm13 vpxor 192(%rsi),%ymm10,%ymm10 vpxor 224(%rsi),%ymm15,%ymm15 vpxor 256(%rsi),%ymm14,%ymm14 vpxor 288(%rsi),%ymm2,%ymm2 vpxor 320(%rsi),%ymm3,%ymm3 vpxor 352(%rsi),%ymm7,%ymm7 vpxor 384(%rsi),%ymm11,%ymm11 vpxor 416(%rsi),%ymm9,%ymm9 vmovdqu %ymm6,0(%rdi) vmovdqu %ymm8,32(%rdi) vmovdqu %ymm1,64(%rdi) vmovdqu %ymm5,96(%rdi) vmovdqu %ymm12,128(%rdi) vmovdqu %ymm13,160(%rdi) vmovdqu %ymm10,192(%rdi) vmovdqu %ymm15,224(%rdi) vmovdqu %ymm14,256(%rdi) vmovdqu %ymm2,288(%rdi) vmovdqu %ymm3,320(%rdi) vmovdqu %ymm7,352(%rdi) vmovdqu %ymm11,384(%rdi) vmovdqu %ymm9,416(%rdi) je L$done8x leaq 448(%rsi),%rsi xorq %r10,%r10 vmovdqa %ymm0,0(%rsp) leaq 448(%rdi),%rdi subq $448,%rdx vmovdqa %ymm4,32(%rsp) L$oop_tail8x: movzbl (%rsi,%r10,1),%eax movzbl (%rsp,%r10,1),%ecx leaq 1(%r10),%r10 xorl %ecx,%eax movb %al,-1(%rdi,%r10,1) decq %rdx jnz L$oop_tail8x L$done8x: vzeroall leaq (%r9),%rsp L$8x_epilogue: ret #endif
marvin-hansen/iggy-streaming-system
25,531
thirdparty/crates/ring-0.17.9/pregenerated/vpaes-armv8-win64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <ring-core/arm_arch.h> .section .rodata .align 7 // totally strategic alignment _vpaes_consts: Lk_mc_forward: // mc_forward .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605, 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 Lk_mc_backward: // mc_backward .quad 0x0605040702010003, 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F, 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 .quad 0x0A09080B06050407, 0x020100030E0D0C0F Lk_sr: // sr .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 .quad 0x0F060D040B020900, 0x070E050C030A0108 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 // // "Hot" constants // Lk_inv: // inv, inva .quad 0x0E05060F0D080180, 0x040703090A0B0C02 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 Lk_ipt: // input transform (lo, hi) .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 Lk_sbo: // sbou, sbot .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA Lk_sb1: // sb1u, sb1t .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 Lk_sb2: // sb2u, sb2t .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD // // Key schedule constants // Lk_dksd: // decryption key schedule: invskew x*D .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E Lk_dksb: // decryption key schedule: invskew x*B .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 Lk_dkse: // decryption key schedule: invskew x*E + 0x63 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 Lk_dks9: // decryption key schedule: invskew x*9 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE Lk_rcon: // rcon .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 Lk_opt: // output transform .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 Lk_deskew: // deskew tables: inverts the sbox's "skew" .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 .align 2 .align 6 .text ## ## _aes_preheat ## ## Fills register %r10 -> .aes_consts (so you can -fPIC) ## and %xmm9-%xmm15 as specified below. ## .def _vpaes_encrypt_preheat .type 32 .endef .align 4 _vpaes_encrypt_preheat: adrp x10, Lk_inv add x10, x10, :lo12:Lk_inv movi v17.16b, #0x0f ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // Lk_ipt, Lk_sbo ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // Lk_sb1, Lk_sb2 ret ## ## _aes_encrypt_core ## ## AES-encrypt %xmm0. ## ## Inputs: ## %xmm0 = input ## %xmm9-%xmm15 as in _vpaes_preheat ## (%rdx) = scheduled keys ## ## Output in %xmm0 ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax ## Preserves %xmm6 - %xmm8 so you get some local vectors ## ## .def _vpaes_encrypt_core .type 32 .endef .align 4 _vpaes_encrypt_core: mov x9, x2 ldr w8, [x2,#240] // pull rounds adrp x11, Lk_mc_forward+16 add x11, x11, :lo12:Lk_mc_forward+16 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 b Lenc_entry .align 4 Lenc_loop: // middle of middle round add x10, x11, #0x40 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D sub w8, w8, #1 // nr-- Lenc_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 cbnz w8, Lenc_loop // middle of last round add x10, x11, #0x80 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 ret .def _vpaes_encrypt_2x .type 32 .endef .align 4 _vpaes_encrypt_2x: mov x9, x2 ldr w8, [x2,#240] // pull rounds adrp x11, Lk_mc_forward+16 add x11, x11, :lo12:Lk_mc_forward+16 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 and v9.16b, v15.16b, v17.16b ushr v8.16b, v15.16b, #4 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 tbl v9.16b, {v20.16b}, v9.16b // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 tbl v10.16b, {v21.16b}, v8.16b eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 eor v8.16b, v9.16b, v16.16b eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 eor v8.16b, v8.16b, v10.16b b Lenc_2x_entry .align 4 Lenc_2x_loop: // middle of middle round add x10, x11, #0x40 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u tbl v12.16b, {v25.16b}, v10.16b ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t tbl v8.16b, {v24.16b}, v11.16b eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u tbl v13.16b, {v27.16b}, v10.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A eor v8.16b, v8.16b, v12.16b tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t tbl v10.16b, {v26.16b}, v11.16b ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B tbl v11.16b, {v8.16b}, v1.16b eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A eor v10.16b, v10.16b, v13.16b tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D tbl v8.16b, {v8.16b}, v4.16b eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B eor v11.16b, v11.16b, v10.16b tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C tbl v12.16b, {v11.16b},v1.16b eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D eor v8.16b, v8.16b, v11.16b and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D eor v8.16b, v8.16b, v12.16b sub w8, w8, #1 // nr-- Lenc_2x_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i and v9.16b, v8.16b, v17.16b ushr v8.16b, v8.16b, #4 tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k tbl v13.16b, {v19.16b},v9.16b eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j eor v9.16b, v9.16b, v8.16b tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v11.16b, {v18.16b},v8.16b tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j tbl v12.16b, {v18.16b},v9.16b eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v11.16b, v11.16b, v13.16b eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k eor v12.16b, v12.16b, v13.16b tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v10.16b, {v18.16b},v11.16b tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak tbl v11.16b, {v18.16b},v12.16b eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v10.16b, v10.16b, v9.16b eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo eor v11.16b, v11.16b, v8.16b ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 cbnz w8, Lenc_2x_loop // middle of last round add x10, x11, #0x80 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou tbl v12.16b, {v22.16b}, v10.16b ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t tbl v8.16b, {v23.16b}, v11.16b eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A eor v8.16b, v8.16b, v12.16b tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0 tbl v1.16b, {v8.16b},v1.16b ret ######################################################## ## ## ## AES key schedule ## ## ## ######################################################## .def _vpaes_key_preheat .type 32 .endef .align 4 _vpaes_key_preheat: adrp x10, Lk_inv add x10, x10, :lo12:Lk_inv movi v16.16b, #0x5b // Lk_s63 adrp x11, Lk_sb1 add x11, x11, :lo12:Lk_sb1 movi v17.16b, #0x0f // Lk_s0F ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // Lk_inv, Lk_ipt adrp x10, Lk_dksd add x10, x10, :lo12:Lk_dksd ld1 {v22.2d,v23.2d}, [x11] // Lk_sb1 adrp x11, Lk_mc_forward add x11, x11, :lo12:Lk_mc_forward ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // Lk_dksd, Lk_dksb ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // Lk_dkse, Lk_dks9 ld1 {v8.2d}, [x10] // Lk_rcon ld1 {v9.2d}, [x11] // Lk_mc_forward[0] ret .def _vpaes_schedule_core .type 32 .endef .align 4 _vpaes_schedule_core: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp,#-16]! add x29,sp,#0 bl _vpaes_key_preheat // load the tables ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned) // input transform mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3 bl _vpaes_schedule_transform mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7 adrp x10, Lk_sr // lea Lk_sr(%rip),%r10 add x10, x10, :lo12:Lk_sr add x8, x8, x10 // encrypting, output zeroth round key after transform st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) cmp w1, #192 // cmp $192, %esi b.hi Lschedule_256 b.eq Lschedule_192 // 128: fall though ## ## .schedule_128 ## ## 128-bit specific part of key schedule. ## ## This schedule is really simple, because all its parts ## are accomplished by the subroutines. ## Lschedule_128: mov x0, #10 // mov $10, %esi Loop_schedule_128: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // write output b Loop_schedule_128 ## ## .aes_schedule_192 ## ## 192-bit specific part of key schedule. ## ## The main body of this schedule is the same as the 128-bit ## schedule, but with more smearing. The long, high side is ## stored in %xmm7 as before, and the short, low side is in ## the high bits of %xmm6. ## ## This schedule is somewhat nastier, however, because each ## round produces 192 bits of key material, or 1.5 round keys. ## Therefore, on each cycle we do 2 rounds and produce 3 round ## keys. ## .align 4 Lschedule_192: sub x0, x0, #8 ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) bl _vpaes_schedule_transform // input transform mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros mov x0, #4 // mov $4, %esi Loop_schedule_192: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_round ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0 bl _vpaes_schedule_mangle // save key n bl _vpaes_schedule_192_smear bl _vpaes_schedule_mangle // save key n+1 bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // save key n+2 bl _vpaes_schedule_192_smear b Loop_schedule_192 ## ## .aes_schedule_256 ## ## 256-bit specific part of key schedule. ## ## The structure here is very similar to the 128-bit ## schedule, but with an additional "low side" in ## %xmm6. The low side's rounds are the same as the ## high side's, except no rcon and no rotation. ## .align 4 Lschedule_256: ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) bl _vpaes_schedule_transform // input transform mov x0, #7 // mov $7, %esi Loop_schedule_256: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_mangle // output low result mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 // high round bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // low round. swap xmm7 and xmm6 dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 movi v4.16b, #0 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7 bl _vpaes_schedule_low_round mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7 b Loop_schedule_256 ## ## .aes_schedule_mangle_last ## ## Mangler for last round of key schedule ## Mangles %xmm0 ## when encrypting, outputs out(%xmm0) ^ 63 ## when decrypting, outputs unskew(%xmm0) ## ## Always called right before return... jumps to cleanup and exits ## .align 4 Lschedule_mangle_last: // schedule last round key from xmm0 adrp x11, Lk_deskew // lea Lk_deskew(%rip),%r11 # prepare to deskew add x11, x11, :lo12:Lk_deskew cbnz w3, Lschedule_mangle_last_dec // encrypting ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1 adrp x11, Lk_opt // lea Lk_opt(%rip), %r11 # prepare to output transform add x11, x11, :lo12:Lk_opt add x2, x2, #32 // add $32, %rdx tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute Lschedule_mangle_last_dec: ld1 {v20.2d,v21.2d}, [x11] // reload constants sub x2, x2, #16 // add $-16, %rdx eor v0.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm0 bl _vpaes_schedule_transform // output transform st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key // cleanup eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 ldp x29, x30, [sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret ## ## .aes_schedule_192_smear ## ## Smear the short, low side in the 192-bit key schedule. ## ## Inputs: ## %xmm7: high side, b a x y ## %xmm6: low side, d c 0 0 ## %xmm13: 0 ## ## Outputs: ## %xmm6: b+c+d b+c 0 0 ## %xmm0: b+c+d b+c b a ## .def _vpaes_schedule_192_smear .type 32 .endef .align 4 _vpaes_schedule_192_smear: movi v1.16b, #0 dup v0.4s, v7.s[3] ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros ret ## ## .aes_schedule_round ## ## Runs one main round of the key schedule on %xmm0, %xmm7 ## ## Specifically, runs subbytes on the high dword of %xmm0 ## then rotates it by one byte and xors into the low dword of ## %xmm7. ## ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for ## next rcon. ## ## Smears the dwords of %xmm7 by xoring the low into the ## second low, result into third, result into highest. ## ## Returns results in %xmm7 = %xmm0. ## Clobbers %xmm1-%xmm4, %r11. ## .def _vpaes_schedule_round .type 32 .endef .align 4 _vpaes_schedule_round: // extract rcon from xmm8 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4 ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1 ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 // rotate dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0 // fall through... // low round: same as high round, but no rotation and no rcon. _vpaes_schedule_low_round: // smear xmm7 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4 // subbytes and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7 tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v7.16b, v7.16b, v16.16b // vpxor Lk_s63(%rip), %xmm7, %xmm7 tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output // add in smeared stuff eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7 ret ## ## .aes_schedule_transform ## ## Linear-transform %xmm0 according to tables at (%r11) ## ## Requires that %xmm9 = 0x0F0F... as in preheat ## Output in %xmm0 ## Clobbers %xmm1, %xmm2 ## .def _vpaes_schedule_transform .type 32 .endef .align 4 _vpaes_schedule_transform: and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 // vmovdqa (%r11), %xmm2 # lo tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 // vmovdqa 16(%r11), %xmm1 # hi tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 ret ## ## .aes_schedule_mangle ## ## Mangle xmm0 from (basis-transformed) standard version ## to our version. ## ## On encrypt, ## xor with 0x63 ## multiply by circulant 0,1,1,1 ## apply shiftrows transform ## ## On decrypt, ## xor with 0x63 ## multiply by "inverse mixcolumns" circulant E,B,D,9 ## deskew ## apply shiftrows transform ## ## ## Writes out to (%rdx), and increments or decrements it ## Keeps track of round number mod 4 in %r8 ## Preserves xmm0 ## Clobbers xmm1-xmm5 ## .def _vpaes_schedule_mangle .type 32 .endef .align 4 _vpaes_schedule_mangle: mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later // vmovdqa .Lk_mc_forward(%rip),%xmm5 // encrypting eor v4.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm4 add x2, x2, #16 // add $16, %rdx tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3 Lschedule_mangle_both: tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 add x8, x8, #48 // add $-16, %r8 and x8, x8, #~(1<<6) // and $0x30, %r8 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) ret .globl vpaes_set_encrypt_key .def vpaes_set_encrypt_key .type 32 .endef .align 4 vpaes_set_encrypt_key: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so lsr w9, w1, #5 // shr $5,%eax add w9, w9, #5 // $5,%eax str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; mov w3, #0 // mov $0,%ecx mov x8, #0x30 // mov $0x30,%r8d bl _vpaes_schedule_core eor x0, x0, x0 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .globl vpaes_ctr32_encrypt_blocks .def vpaes_ctr32_encrypt_blocks .type 32 .endef .align 4 vpaes_ctr32_encrypt_blocks: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so stp d10,d11,[sp,#-16]! stp d12,d13,[sp,#-16]! stp d14,d15,[sp,#-16]! cbz x2, Lctr32_done // Note, unlike the other functions, x2 here is measured in blocks, // not bytes. mov x17, x2 mov x2, x3 // Load the IV and counter portion. ldr w6, [x4, #12] ld1 {v7.16b}, [x4] bl _vpaes_encrypt_preheat tst x17, #1 rev w6, w6 // The counter is big-endian. b.eq Lctr32_prep_loop // Handle one block so the remaining block count is even for // _vpaes_encrypt_2x. ld1 {v6.16b}, [x0], #16 // Load input ahead of time bl _vpaes_encrypt_core eor v0.16b, v0.16b, v6.16b // XOR input and result st1 {v0.16b}, [x1], #16 subs x17, x17, #1 // Update the counter. add w6, w6, #1 rev w7, w6 mov v7.s[3], w7 b.ls Lctr32_done Lctr32_prep_loop: // _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x // uses v14 and v15. mov v15.16b, v7.16b mov v14.16b, v7.16b add w6, w6, #1 rev w7, w6 mov v15.s[3], w7 Lctr32_loop: ld1 {v6.16b,v7.16b}, [x0], #32 // Load input ahead of time bl _vpaes_encrypt_2x eor v0.16b, v0.16b, v6.16b // XOR input and result eor v1.16b, v1.16b, v7.16b // XOR input and result (#2) st1 {v0.16b,v1.16b}, [x1], #32 subs x17, x17, #2 // Update the counter. add w7, w6, #1 add w6, w6, #2 rev w7, w7 mov v14.s[3], w7 rev w7, w6 mov v15.s[3], w7 b.hi Lctr32_loop Lctr32_done: ldp d14,d15,[sp],#16 ldp d12,d13,[sp],#16 ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
marvin-hansen/iggy-streaming-system
69,148
thirdparty/crates/ring-0.17.9/pregenerated/sha256-x86_64-macosx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _sha256_block_data_order_nohw .private_extern _sha256_block_data_order_nohw .p2align 4 _sha256_block_data_order_nohw: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 shlq $4,%rdx subq $64+32,%rsp leaq (%rsi,%rdx,4),%rdx andq $-64,%rsp movq %rdi,64+0(%rsp) movq %rsi,64+8(%rsp) movq %rdx,64+16(%rsp) movq %rax,88(%rsp) L$prologue: movl 0(%rdi),%eax movl 4(%rdi),%ebx movl 8(%rdi),%ecx movl 12(%rdi),%edx movl 16(%rdi),%r8d movl 20(%rdi),%r9d movl 24(%rdi),%r10d movl 28(%rdi),%r11d jmp L$loop .p2align 4 L$loop: movl %ebx,%edi leaq K256(%rip),%rbp xorl %ecx,%edi movl 0(%rsi),%r12d movl %r8d,%r13d movl %eax,%r14d bswapl %r12d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,0(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp addl %r14d,%r11d movl 4(%rsi),%r12d movl %edx,%r13d movl %r11d,%r14d bswapl %r12d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,4(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp addl %r14d,%r10d movl 8(%rsi),%r12d movl %ecx,%r13d movl %r10d,%r14d bswapl %r12d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,8(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp addl %r14d,%r9d movl 12(%rsi),%r12d movl %ebx,%r13d movl %r9d,%r14d bswapl %r12d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,12(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp addl %r14d,%r8d movl 16(%rsi),%r12d movl %eax,%r13d movl %r8d,%r14d bswapl %r12d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,16(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp addl %r14d,%edx movl 20(%rsi),%r12d movl %r11d,%r13d movl %edx,%r14d bswapl %r12d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,20(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp addl %r14d,%ecx movl 24(%rsi),%r12d movl %r10d,%r13d movl %ecx,%r14d bswapl %r12d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,24(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp addl %r14d,%ebx movl 28(%rsi),%r12d movl %r9d,%r13d movl %ebx,%r14d bswapl %r12d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,28(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp addl %r14d,%eax movl 32(%rsi),%r12d movl %r8d,%r13d movl %eax,%r14d bswapl %r12d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,32(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp addl %r14d,%r11d movl 36(%rsi),%r12d movl %edx,%r13d movl %r11d,%r14d bswapl %r12d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,36(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp addl %r14d,%r10d movl 40(%rsi),%r12d movl %ecx,%r13d movl %r10d,%r14d bswapl %r12d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,40(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp addl %r14d,%r9d movl 44(%rsi),%r12d movl %ebx,%r13d movl %r9d,%r14d bswapl %r12d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,44(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp addl %r14d,%r8d movl 48(%rsi),%r12d movl %eax,%r13d movl %r8d,%r14d bswapl %r12d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,48(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp addl %r14d,%edx movl 52(%rsi),%r12d movl %r11d,%r13d movl %edx,%r14d bswapl %r12d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,52(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp addl %r14d,%ecx movl 56(%rsi),%r12d movl %r10d,%r13d movl %ecx,%r14d bswapl %r12d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,56(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp addl %r14d,%ebx movl 60(%rsi),%r12d movl %r9d,%r13d movl %ebx,%r14d bswapl %r12d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,60(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp jmp L$rounds_16_xx .p2align 4 L$rounds_16_xx: movl 4(%rsp),%r13d movl 56(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%eax movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 36(%rsp),%r12d addl 0(%rsp),%r12d movl %r8d,%r13d addl %r15d,%r12d movl %eax,%r14d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,0(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp movl 8(%rsp),%r13d movl 60(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r11d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 40(%rsp),%r12d addl 4(%rsp),%r12d movl %edx,%r13d addl %edi,%r12d movl %r11d,%r14d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,4(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp movl 12(%rsp),%r13d movl 0(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r10d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 44(%rsp),%r12d addl 8(%rsp),%r12d movl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r14d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,8(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp movl 16(%rsp),%r13d movl 4(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r9d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 48(%rsp),%r12d addl 12(%rsp),%r12d movl %ebx,%r13d addl %edi,%r12d movl %r9d,%r14d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,12(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp movl 20(%rsp),%r13d movl 8(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r8d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 52(%rsp),%r12d addl 16(%rsp),%r12d movl %eax,%r13d addl %r15d,%r12d movl %r8d,%r14d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,16(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp movl 24(%rsp),%r13d movl 12(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%edx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 56(%rsp),%r12d addl 20(%rsp),%r12d movl %r11d,%r13d addl %edi,%r12d movl %edx,%r14d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,20(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp movl 28(%rsp),%r13d movl 16(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ecx movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 60(%rsp),%r12d addl 24(%rsp),%r12d movl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r14d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,24(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp movl 32(%rsp),%r13d movl 20(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ebx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 0(%rsp),%r12d addl 28(%rsp),%r12d movl %r9d,%r13d addl %edi,%r12d movl %ebx,%r14d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,28(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp movl 36(%rsp),%r13d movl 24(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%eax movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 4(%rsp),%r12d addl 32(%rsp),%r12d movl %r8d,%r13d addl %r15d,%r12d movl %eax,%r14d rorl $14,%r13d movl %r9d,%r15d xorl %r8d,%r13d rorl $9,%r14d xorl %r10d,%r15d movl %r12d,32(%rsp) xorl %eax,%r14d andl %r8d,%r15d rorl $5,%r13d addl %r11d,%r12d xorl %r10d,%r15d rorl $11,%r14d xorl %r8d,%r13d addl %r15d,%r12d movl %eax,%r15d addl (%rbp),%r12d xorl %eax,%r14d xorl %ebx,%r15d rorl $6,%r13d movl %ebx,%r11d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r11d addl %r12d,%edx addl %r12d,%r11d leaq 4(%rbp),%rbp movl 40(%rsp),%r13d movl 28(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r11d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 8(%rsp),%r12d addl 36(%rsp),%r12d movl %edx,%r13d addl %edi,%r12d movl %r11d,%r14d rorl $14,%r13d movl %r8d,%edi xorl %edx,%r13d rorl $9,%r14d xorl %r9d,%edi movl %r12d,36(%rsp) xorl %r11d,%r14d andl %edx,%edi rorl $5,%r13d addl %r10d,%r12d xorl %r9d,%edi rorl $11,%r14d xorl %edx,%r13d addl %edi,%r12d movl %r11d,%edi addl (%rbp),%r12d xorl %r11d,%r14d xorl %eax,%edi rorl $6,%r13d movl %eax,%r10d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r10d addl %r12d,%ecx addl %r12d,%r10d leaq 4(%rbp),%rbp movl 44(%rsp),%r13d movl 32(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r10d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 12(%rsp),%r12d addl 40(%rsp),%r12d movl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r14d rorl $14,%r13d movl %edx,%r15d xorl %ecx,%r13d rorl $9,%r14d xorl %r8d,%r15d movl %r12d,40(%rsp) xorl %r10d,%r14d andl %ecx,%r15d rorl $5,%r13d addl %r9d,%r12d xorl %r8d,%r15d rorl $11,%r14d xorl %ecx,%r13d addl %r15d,%r12d movl %r10d,%r15d addl (%rbp),%r12d xorl %r10d,%r14d xorl %r11d,%r15d rorl $6,%r13d movl %r11d,%r9d andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%r9d addl %r12d,%ebx addl %r12d,%r9d leaq 4(%rbp),%rbp movl 48(%rsp),%r13d movl 36(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r9d movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 16(%rsp),%r12d addl 44(%rsp),%r12d movl %ebx,%r13d addl %edi,%r12d movl %r9d,%r14d rorl $14,%r13d movl %ecx,%edi xorl %ebx,%r13d rorl $9,%r14d xorl %edx,%edi movl %r12d,44(%rsp) xorl %r9d,%r14d andl %ebx,%edi rorl $5,%r13d addl %r8d,%r12d xorl %edx,%edi rorl $11,%r14d xorl %ebx,%r13d addl %edi,%r12d movl %r9d,%edi addl (%rbp),%r12d xorl %r9d,%r14d xorl %r10d,%edi rorl $6,%r13d movl %r10d,%r8d andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%r8d addl %r12d,%eax addl %r12d,%r8d leaq 20(%rbp),%rbp movl 52(%rsp),%r13d movl 40(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%r8d movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 20(%rsp),%r12d addl 48(%rsp),%r12d movl %eax,%r13d addl %r15d,%r12d movl %r8d,%r14d rorl $14,%r13d movl %ebx,%r15d xorl %eax,%r13d rorl $9,%r14d xorl %ecx,%r15d movl %r12d,48(%rsp) xorl %r8d,%r14d andl %eax,%r15d rorl $5,%r13d addl %edx,%r12d xorl %ecx,%r15d rorl $11,%r14d xorl %eax,%r13d addl %r15d,%r12d movl %r8d,%r15d addl (%rbp),%r12d xorl %r8d,%r14d xorl %r9d,%r15d rorl $6,%r13d movl %r9d,%edx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%edx addl %r12d,%r11d addl %r12d,%edx leaq 4(%rbp),%rbp movl 56(%rsp),%r13d movl 44(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%edx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 24(%rsp),%r12d addl 52(%rsp),%r12d movl %r11d,%r13d addl %edi,%r12d movl %edx,%r14d rorl $14,%r13d movl %eax,%edi xorl %r11d,%r13d rorl $9,%r14d xorl %ebx,%edi movl %r12d,52(%rsp) xorl %edx,%r14d andl %r11d,%edi rorl $5,%r13d addl %ecx,%r12d xorl %ebx,%edi rorl $11,%r14d xorl %r11d,%r13d addl %edi,%r12d movl %edx,%edi addl (%rbp),%r12d xorl %edx,%r14d xorl %r8d,%edi rorl $6,%r13d movl %r8d,%ecx andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%ecx addl %r12d,%r10d addl %r12d,%ecx leaq 4(%rbp),%rbp movl 60(%rsp),%r13d movl 48(%rsp),%r15d movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ecx movl %r15d,%r14d rorl $2,%r15d xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%r15d shrl $10,%r14d rorl $17,%r15d xorl %r13d,%r12d xorl %r14d,%r15d addl 28(%rsp),%r12d addl 56(%rsp),%r12d movl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r14d rorl $14,%r13d movl %r11d,%r15d xorl %r10d,%r13d rorl $9,%r14d xorl %eax,%r15d movl %r12d,56(%rsp) xorl %ecx,%r14d andl %r10d,%r15d rorl $5,%r13d addl %ebx,%r12d xorl %eax,%r15d rorl $11,%r14d xorl %r10d,%r13d addl %r15d,%r12d movl %ecx,%r15d addl (%rbp),%r12d xorl %ecx,%r14d xorl %edx,%r15d rorl $6,%r13d movl %edx,%ebx andl %r15d,%edi rorl $2,%r14d addl %r13d,%r12d xorl %edi,%ebx addl %r12d,%r9d addl %r12d,%ebx leaq 4(%rbp),%rbp movl 0(%rsp),%r13d movl 52(%rsp),%edi movl %r13d,%r12d rorl $11,%r13d addl %r14d,%ebx movl %edi,%r14d rorl $2,%edi xorl %r12d,%r13d shrl $3,%r12d rorl $7,%r13d xorl %r14d,%edi shrl $10,%r14d rorl $17,%edi xorl %r13d,%r12d xorl %r14d,%edi addl 32(%rsp),%r12d addl 60(%rsp),%r12d movl %r9d,%r13d addl %edi,%r12d movl %ebx,%r14d rorl $14,%r13d movl %r10d,%edi xorl %r9d,%r13d rorl $9,%r14d xorl %r11d,%edi movl %r12d,60(%rsp) xorl %ebx,%r14d andl %r9d,%edi rorl $5,%r13d addl %eax,%r12d xorl %r11d,%edi rorl $11,%r14d xorl %r9d,%r13d addl %edi,%r12d movl %ebx,%edi addl (%rbp),%r12d xorl %ebx,%r14d xorl %ecx,%edi rorl $6,%r13d movl %ecx,%eax andl %edi,%r15d rorl $2,%r14d addl %r13d,%r12d xorl %r15d,%eax addl %r12d,%r8d addl %r12d,%eax leaq 20(%rbp),%rbp cmpb $0,3(%rbp) jnz L$rounds_16_xx movq 64+0(%rsp),%rdi addl %r14d,%eax leaq 64(%rsi),%rsi addl 0(%rdi),%eax addl 4(%rdi),%ebx addl 8(%rdi),%ecx addl 12(%rdi),%edx addl 16(%rdi),%r8d addl 20(%rdi),%r9d addl 24(%rdi),%r10d addl 28(%rdi),%r11d cmpq 64+16(%rsp),%rsi movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) jb L$loop movq 88(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$epilogue: ret .section __DATA,__const .p2align 6 K256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908 .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text .globl _sha256_block_data_order_hw .private_extern _sha256_block_data_order_hw .p2align 6 _sha256_block_data_order_hw: _CET_ENDBR leaq K256+128(%rip),%rcx movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 movdqa 512-128(%rcx),%xmm7 pshufd $0x1b,%xmm1,%xmm0 pshufd $0xb1,%xmm1,%xmm1 pshufd $0x1b,%xmm2,%xmm2 movdqa %xmm7,%xmm8 .byte 102,15,58,15,202,8 punpcklqdq %xmm0,%xmm2 jmp L$oop_shaext .p2align 4 L$oop_shaext: movdqu (%rsi),%xmm3 movdqu 16(%rsi),%xmm4 movdqu 32(%rsi),%xmm5 .byte 102,15,56,0,223 movdqu 48(%rsi),%xmm6 movdqa 0-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 102,15,56,0,231 movdqa %xmm2,%xmm10 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 nop movdqa %xmm1,%xmm9 .byte 15,56,203,202 movdqa 32-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 102,15,56,0,239 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 leaq 64(%rsi),%rsi .byte 15,56,204,220 .byte 15,56,203,202 movdqa 64-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 102,15,56,0,247 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm6,%xmm7 .byte 102,15,58,15,253,4 nop paddd %xmm7,%xmm3 .byte 15,56,204,229 .byte 15,56,203,202 movdqa 96-128(%rcx),%xmm0 paddd %xmm6,%xmm0 .byte 15,56,205,222 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm3,%xmm7 .byte 102,15,58,15,254,4 nop paddd %xmm7,%xmm4 .byte 15,56,204,238 .byte 15,56,203,202 movdqa 128-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 15,56,205,227 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm4,%xmm7 .byte 102,15,58,15,251,4 nop paddd %xmm7,%xmm5 .byte 15,56,204,243 .byte 15,56,203,202 movdqa 160-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 15,56,205,236 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm5,%xmm7 .byte 102,15,58,15,252,4 nop paddd %xmm7,%xmm6 .byte 15,56,204,220 .byte 15,56,203,202 movdqa 192-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 15,56,205,245 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm6,%xmm7 .byte 102,15,58,15,253,4 nop paddd %xmm7,%xmm3 .byte 15,56,204,229 .byte 15,56,203,202 movdqa 224-128(%rcx),%xmm0 paddd %xmm6,%xmm0 .byte 15,56,205,222 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm3,%xmm7 .byte 102,15,58,15,254,4 nop paddd %xmm7,%xmm4 .byte 15,56,204,238 .byte 15,56,203,202 movdqa 256-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 15,56,205,227 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm4,%xmm7 .byte 102,15,58,15,251,4 nop paddd %xmm7,%xmm5 .byte 15,56,204,243 .byte 15,56,203,202 movdqa 288-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 15,56,205,236 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm5,%xmm7 .byte 102,15,58,15,252,4 nop paddd %xmm7,%xmm6 .byte 15,56,204,220 .byte 15,56,203,202 movdqa 320-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 15,56,205,245 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm6,%xmm7 .byte 102,15,58,15,253,4 nop paddd %xmm7,%xmm3 .byte 15,56,204,229 .byte 15,56,203,202 movdqa 352-128(%rcx),%xmm0 paddd %xmm6,%xmm0 .byte 15,56,205,222 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm3,%xmm7 .byte 102,15,58,15,254,4 nop paddd %xmm7,%xmm4 .byte 15,56,204,238 .byte 15,56,203,202 movdqa 384-128(%rcx),%xmm0 paddd %xmm3,%xmm0 .byte 15,56,205,227 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm4,%xmm7 .byte 102,15,58,15,251,4 nop paddd %xmm7,%xmm5 .byte 15,56,204,243 .byte 15,56,203,202 movdqa 416-128(%rcx),%xmm0 paddd %xmm4,%xmm0 .byte 15,56,205,236 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 movdqa %xmm5,%xmm7 .byte 102,15,58,15,252,4 .byte 15,56,203,202 paddd %xmm7,%xmm6 movdqa 448-128(%rcx),%xmm0 paddd %xmm5,%xmm0 .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 .byte 15,56,205,245 movdqa %xmm8,%xmm7 .byte 15,56,203,202 movdqa 480-128(%rcx),%xmm0 paddd %xmm6,%xmm0 nop .byte 15,56,203,209 pshufd $0x0e,%xmm0,%xmm0 decq %rdx nop .byte 15,56,203,202 paddd %xmm10,%xmm2 paddd %xmm9,%xmm1 jnz L$oop_shaext pshufd $0xb1,%xmm2,%xmm2 pshufd $0x1b,%xmm1,%xmm7 pshufd $0xb1,%xmm1,%xmm1 punpckhqdq %xmm2,%xmm1 .byte 102,15,58,15,215,8 movdqu %xmm1,(%rdi) movdqu %xmm2,16(%rdi) ret .globl _sha256_block_data_order_ssse3 .private_extern _sha256_block_data_order_ssse3 .p2align 6 _sha256_block_data_order_ssse3: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 shlq $4,%rdx subq $96,%rsp leaq (%rsi,%rdx,4),%rdx andq $-64,%rsp movq %rdi,64+0(%rsp) movq %rsi,64+8(%rsp) movq %rdx,64+16(%rsp) movq %rax,88(%rsp) L$prologue_ssse3: movl 0(%rdi),%eax movl 4(%rdi),%ebx movl 8(%rdi),%ecx movl 12(%rdi),%edx movl 16(%rdi),%r8d movl 20(%rdi),%r9d movl 24(%rdi),%r10d movl 28(%rdi),%r11d jmp L$loop_ssse3 .p2align 4 L$loop_ssse3: movdqa K256+512(%rip),%xmm7 movdqu 0(%rsi),%xmm0 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 .byte 102,15,56,0,199 movdqu 48(%rsi),%xmm3 leaq K256(%rip),%rbp .byte 102,15,56,0,207 movdqa 0(%rbp),%xmm4 movdqa 32(%rbp),%xmm5 .byte 102,15,56,0,215 paddd %xmm0,%xmm4 movdqa 64(%rbp),%xmm6 .byte 102,15,56,0,223 movdqa 96(%rbp),%xmm7 paddd %xmm1,%xmm5 paddd %xmm2,%xmm6 paddd %xmm3,%xmm7 movdqa %xmm4,0(%rsp) movl %eax,%r14d movdqa %xmm5,16(%rsp) movl %ebx,%edi movdqa %xmm6,32(%rsp) xorl %ecx,%edi movdqa %xmm7,48(%rsp) movl %r8d,%r13d jmp L$ssse3_00_47 .p2align 4 L$ssse3_00_47: subq $-128,%rbp rorl $14,%r13d movdqa %xmm1,%xmm4 movl %r14d,%eax movl %r9d,%r12d movdqa %xmm3,%xmm7 rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d .byte 102,15,58,15,224,4 andl %r8d,%r12d xorl %r8d,%r13d .byte 102,15,58,15,250,4 addl 0(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %ebx,%r15d addl %r12d,%r11d movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi paddd %xmm7,%xmm0 rorl $2,%r14d addl %r11d,%edx psrld $7,%xmm6 addl %edi,%r11d movl %edx,%r13d pshufd $250,%xmm3,%xmm7 addl %r11d,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%r11d movl %r8d,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %r11d,%r14d pxor %xmm5,%xmm4 andl %edx,%r12d xorl %edx,%r13d pslld $11,%xmm5 addl 4(%rsp),%r10d movl %r11d,%edi pxor %xmm6,%xmm4 xorl %r9d,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %eax,%edi addl %r12d,%r10d pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d psrld $10,%xmm7 addl %r13d,%r10d xorl %eax,%r15d paddd %xmm4,%xmm0 rorl $2,%r14d addl %r10d,%ecx psrlq $17,%xmm6 addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %ecx,%r13d xorl %r8d,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d pshufd $128,%xmm7,%xmm7 xorl %ecx,%r13d addl 8(%rsp),%r9d movl %r10d,%r15d psrldq $8,%xmm7 xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d paddd %xmm7,%xmm0 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d pshufd $80,%xmm0,%xmm7 xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx movdqa %xmm7,%xmm6 addl %edi,%r9d movl %ebx,%r13d psrld $10,%xmm7 addl %r9d,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%r9d movl %ecx,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d psrlq $2,%xmm6 andl %ebx,%r12d xorl %ebx,%r13d addl 12(%rsp),%r8d pxor %xmm6,%xmm7 movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %r10d,%edi addl %r12d,%r8d movdqa 0(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d paddd %xmm7,%xmm0 rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d paddd %xmm0,%xmm6 movl %eax,%r13d addl %r8d,%r14d movdqa %xmm6,0(%rsp) rorl $14,%r13d movdqa %xmm2,%xmm4 movl %r14d,%r8d movl %ebx,%r12d movdqa %xmm0,%xmm7 rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d .byte 102,15,58,15,225,4 andl %eax,%r12d xorl %eax,%r13d .byte 102,15,58,15,251,4 addl 16(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %r9d,%r15d addl %r12d,%edx movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi paddd %xmm7,%xmm1 rorl $2,%r14d addl %edx,%r11d psrld $7,%xmm6 addl %edi,%edx movl %r11d,%r13d pshufd $250,%xmm0,%xmm7 addl %edx,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%edx movl %eax,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %edx,%r14d pxor %xmm5,%xmm4 andl %r11d,%r12d xorl %r11d,%r13d pslld $11,%xmm5 addl 20(%rsp),%ecx movl %edx,%edi pxor %xmm6,%xmm4 xorl %ebx,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %r8d,%edi addl %r12d,%ecx pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d psrld $10,%xmm7 addl %r13d,%ecx xorl %r8d,%r15d paddd %xmm4,%xmm1 rorl $2,%r14d addl %ecx,%r10d psrlq $17,%xmm6 addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %r10d,%r13d xorl %eax,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d pshufd $128,%xmm7,%xmm7 xorl %r10d,%r13d addl 24(%rsp),%ebx movl %ecx,%r15d psrldq $8,%xmm7 xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d paddd %xmm7,%xmm1 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx pshufd $80,%xmm1,%xmm7 xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d movdqa %xmm7,%xmm6 addl %edi,%ebx movl %r9d,%r13d psrld $10,%xmm7 addl %ebx,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%ebx movl %r10d,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d psrlq $2,%xmm6 andl %r9d,%r12d xorl %r9d,%r13d addl 28(%rsp),%eax pxor %xmm6,%xmm7 movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %ecx,%edi addl %r12d,%eax movdqa 32(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d paddd %xmm7,%xmm1 rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax paddd %xmm1,%xmm6 movl %r8d,%r13d addl %eax,%r14d movdqa %xmm6,16(%rsp) rorl $14,%r13d movdqa %xmm3,%xmm4 movl %r14d,%eax movl %r9d,%r12d movdqa %xmm1,%xmm7 rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d .byte 102,15,58,15,226,4 andl %r8d,%r12d xorl %r8d,%r13d .byte 102,15,58,15,248,4 addl 32(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %ebx,%r15d addl %r12d,%r11d movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi paddd %xmm7,%xmm2 rorl $2,%r14d addl %r11d,%edx psrld $7,%xmm6 addl %edi,%r11d movl %edx,%r13d pshufd $250,%xmm1,%xmm7 addl %r11d,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%r11d movl %r8d,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %r11d,%r14d pxor %xmm5,%xmm4 andl %edx,%r12d xorl %edx,%r13d pslld $11,%xmm5 addl 36(%rsp),%r10d movl %r11d,%edi pxor %xmm6,%xmm4 xorl %r9d,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %eax,%edi addl %r12d,%r10d pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d psrld $10,%xmm7 addl %r13d,%r10d xorl %eax,%r15d paddd %xmm4,%xmm2 rorl $2,%r14d addl %r10d,%ecx psrlq $17,%xmm6 addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %ecx,%r13d xorl %r8d,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d pshufd $128,%xmm7,%xmm7 xorl %ecx,%r13d addl 40(%rsp),%r9d movl %r10d,%r15d psrldq $8,%xmm7 xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d paddd %xmm7,%xmm2 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d pshufd $80,%xmm2,%xmm7 xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx movdqa %xmm7,%xmm6 addl %edi,%r9d movl %ebx,%r13d psrld $10,%xmm7 addl %r9d,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%r9d movl %ecx,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d psrlq $2,%xmm6 andl %ebx,%r12d xorl %ebx,%r13d addl 44(%rsp),%r8d pxor %xmm6,%xmm7 movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %r10d,%edi addl %r12d,%r8d movdqa 64(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d paddd %xmm7,%xmm2 rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d paddd %xmm2,%xmm6 movl %eax,%r13d addl %r8d,%r14d movdqa %xmm6,32(%rsp) rorl $14,%r13d movdqa %xmm0,%xmm4 movl %r14d,%r8d movl %ebx,%r12d movdqa %xmm2,%xmm7 rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d .byte 102,15,58,15,227,4 andl %eax,%r12d xorl %eax,%r13d .byte 102,15,58,15,249,4 addl 48(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d movdqa %xmm4,%xmm5 xorl %r9d,%r15d addl %r12d,%edx movdqa %xmm4,%xmm6 rorl $6,%r13d andl %r15d,%edi psrld $3,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi paddd %xmm7,%xmm3 rorl $2,%r14d addl %edx,%r11d psrld $7,%xmm6 addl %edi,%edx movl %r11d,%r13d pshufd $250,%xmm2,%xmm7 addl %edx,%r14d rorl $14,%r13d pslld $14,%xmm5 movl %r14d,%edx movl %eax,%r12d pxor %xmm6,%xmm4 rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d psrld $11,%xmm6 xorl %edx,%r14d pxor %xmm5,%xmm4 andl %r11d,%r12d xorl %r11d,%r13d pslld $11,%xmm5 addl 52(%rsp),%ecx movl %edx,%edi pxor %xmm6,%xmm4 xorl %ebx,%r12d rorl $11,%r14d movdqa %xmm7,%xmm6 xorl %r8d,%edi addl %r12d,%ecx pxor %xmm5,%xmm4 rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d psrld $10,%xmm7 addl %r13d,%ecx xorl %r8d,%r15d paddd %xmm4,%xmm3 rorl $2,%r14d addl %ecx,%r10d psrlq $17,%xmm6 addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d pxor %xmm6,%xmm7 rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d psrlq $2,%xmm6 xorl %r10d,%r13d xorl %eax,%r12d pxor %xmm6,%xmm7 rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d pshufd $128,%xmm7,%xmm7 xorl %r10d,%r13d addl 56(%rsp),%ebx movl %ecx,%r15d psrldq $8,%xmm7 xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d paddd %xmm7,%xmm3 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx pshufd $80,%xmm3,%xmm7 xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d movdqa %xmm7,%xmm6 addl %edi,%ebx movl %r9d,%r13d psrld $10,%xmm7 addl %ebx,%r14d rorl $14,%r13d psrlq $17,%xmm6 movl %r14d,%ebx movl %r10d,%r12d pxor %xmm6,%xmm7 rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d psrlq $2,%xmm6 andl %r9d,%r12d xorl %r9d,%r13d addl 60(%rsp),%eax pxor %xmm6,%xmm7 movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d pshufd $8,%xmm7,%xmm7 xorl %ecx,%edi addl %r12d,%eax movdqa 96(%rbp),%xmm6 rorl $6,%r13d andl %edi,%r15d pslldq $8,%xmm7 xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d paddd %xmm7,%xmm3 rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax paddd %xmm3,%xmm6 movl %r8d,%r13d addl %eax,%r14d movdqa %xmm6,48(%rsp) cmpb $0,131(%rbp) jne L$ssse3_00_47 rorl $14,%r13d movl %r14d,%eax movl %r9d,%r12d rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 0(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d xorl %ebx,%r15d addl %r12d,%r11d rorl $6,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi rorl $2,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d rorl $14,%r13d movl %r14d,%r11d movl %r8d,%r12d rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 4(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d rorl $11,%r14d xorl %eax,%edi addl %r12d,%r10d rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d rorl $2,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d xorl %ecx,%r13d xorl %r8d,%r12d rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 8(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d rorl $14,%r13d movl %r14d,%r9d movl %ecx,%r12d rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 12(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d xorl %r10d,%edi addl %r12d,%r8d rorl $6,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d rorl $14,%r13d movl %r14d,%r8d movl %ebx,%r12d rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 16(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d xorl %r9d,%r15d addl %r12d,%edx rorl $6,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi rorl $2,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d rorl $14,%r13d movl %r14d,%edx movl %eax,%r12d rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 20(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d rorl $11,%r14d xorl %r8d,%edi addl %r12d,%ecx rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d rorl $2,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d xorl %r10d,%r13d xorl %eax,%r12d rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 24(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d rorl $14,%r13d movl %r14d,%ebx movl %r10d,%r12d rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 28(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d xorl %ecx,%edi addl %r12d,%eax rorl $6,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d rorl $14,%r13d movl %r14d,%eax movl %r9d,%r12d rorl $9,%r14d xorl %r8d,%r13d xorl %r10d,%r12d rorl $5,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 32(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d rorl $11,%r14d xorl %ebx,%r15d addl %r12d,%r11d rorl $6,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi rorl $2,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d rorl $14,%r13d movl %r14d,%r11d movl %r8d,%r12d rorl $9,%r14d xorl %edx,%r13d xorl %r9d,%r12d rorl $5,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 36(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d rorl $11,%r14d xorl %eax,%edi addl %r12d,%r10d rorl $6,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d rorl $2,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d rorl $14,%r13d movl %r14d,%r10d movl %edx,%r12d rorl $9,%r14d xorl %ecx,%r13d xorl %r8d,%r12d rorl $5,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 40(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d rorl $11,%r14d xorl %r11d,%r15d addl %r12d,%r9d rorl $6,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi rorl $2,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d rorl $14,%r13d movl %r14d,%r9d movl %ecx,%r12d rorl $9,%r14d xorl %ebx,%r13d xorl %edx,%r12d rorl $5,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 44(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d rorl $11,%r14d xorl %r10d,%edi addl %r12d,%r8d rorl $6,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d rorl $2,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d rorl $14,%r13d movl %r14d,%r8d movl %ebx,%r12d rorl $9,%r14d xorl %eax,%r13d xorl %ecx,%r12d rorl $5,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 48(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d rorl $11,%r14d xorl %r9d,%r15d addl %r12d,%edx rorl $6,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi rorl $2,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d rorl $14,%r13d movl %r14d,%edx movl %eax,%r12d rorl $9,%r14d xorl %r11d,%r13d xorl %ebx,%r12d rorl $5,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 52(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d rorl $11,%r14d xorl %r8d,%edi addl %r12d,%ecx rorl $6,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d rorl $2,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d rorl $14,%r13d movl %r14d,%ecx movl %r11d,%r12d rorl $9,%r14d xorl %r10d,%r13d xorl %eax,%r12d rorl $5,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 56(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d rorl $11,%r14d xorl %edx,%r15d addl %r12d,%ebx rorl $6,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi rorl $2,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d rorl $14,%r13d movl %r14d,%ebx movl %r10d,%r12d rorl $9,%r14d xorl %r9d,%r13d xorl %r11d,%r12d rorl $5,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 60(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d rorl $11,%r14d xorl %ecx,%edi addl %r12d,%eax rorl $6,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d rorl $2,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d movq 64+0(%rsp),%rdi movl %r14d,%eax addl 0(%rdi),%eax leaq 64(%rsi),%rsi addl 4(%rdi),%ebx addl 8(%rdi),%ecx addl 12(%rdi),%edx addl 16(%rdi),%r8d addl 20(%rdi),%r9d addl 24(%rdi),%r10d addl 28(%rdi),%r11d cmpq 64+16(%rsp),%rsi movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) jb L$loop_ssse3 movq 88(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$epilogue_ssse3: ret .globl _sha256_block_data_order_avx .private_extern _sha256_block_data_order_avx .p2align 6 _sha256_block_data_order_avx: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 shlq $4,%rdx subq $96,%rsp leaq (%rsi,%rdx,4),%rdx andq $-64,%rsp movq %rdi,64+0(%rsp) movq %rsi,64+8(%rsp) movq %rdx,64+16(%rsp) movq %rax,88(%rsp) L$prologue_avx: vzeroupper movl 0(%rdi),%eax movl 4(%rdi),%ebx movl 8(%rdi),%ecx movl 12(%rdi),%edx movl 16(%rdi),%r8d movl 20(%rdi),%r9d movl 24(%rdi),%r10d movl 28(%rdi),%r11d vmovdqa K256+512+32(%rip),%xmm8 vmovdqa K256+512+64(%rip),%xmm9 jmp L$loop_avx .p2align 4 L$loop_avx: vmovdqa K256+512(%rip),%xmm7 vmovdqu 0(%rsi),%xmm0 vmovdqu 16(%rsi),%xmm1 vmovdqu 32(%rsi),%xmm2 vmovdqu 48(%rsi),%xmm3 vpshufb %xmm7,%xmm0,%xmm0 leaq K256(%rip),%rbp vpshufb %xmm7,%xmm1,%xmm1 vpshufb %xmm7,%xmm2,%xmm2 vpaddd 0(%rbp),%xmm0,%xmm4 vpshufb %xmm7,%xmm3,%xmm3 vpaddd 32(%rbp),%xmm1,%xmm5 vpaddd 64(%rbp),%xmm2,%xmm6 vpaddd 96(%rbp),%xmm3,%xmm7 vmovdqa %xmm4,0(%rsp) movl %eax,%r14d vmovdqa %xmm5,16(%rsp) movl %ebx,%edi vmovdqa %xmm6,32(%rsp) xorl %ecx,%edi vmovdqa %xmm7,48(%rsp) movl %r8d,%r13d jmp L$avx_00_47 .p2align 4 L$avx_00_47: subq $-128,%rbp vpalignr $4,%xmm0,%xmm1,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d vpalignr $4,%xmm2,%xmm3,%xmm7 shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d vpaddd %xmm7,%xmm0,%xmm0 xorl %r8d,%r13d addl 0(%rsp),%r11d movl %eax,%r15d vpsrld $3,%xmm4,%xmm7 xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi vpshufd $250,%xmm3,%xmm7 shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d vpsrld $11,%xmm6,%xmm6 movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d vpsrld $10,%xmm7,%xmm6 addl 4(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d vpaddd %xmm4,%xmm0,%xmm0 addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d vpsrlq $2,%xmm7,%xmm7 addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d vpxor %xmm7,%xmm6,%xmm6 movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d vpaddd %xmm6,%xmm0,%xmm0 andl %ecx,%r12d xorl %ecx,%r13d addl 8(%rsp),%r9d vpshufd $80,%xmm0,%xmm7 movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d vpxor %xmm7,%xmm6,%xmm6 xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx vpsrlq $2,%xmm7,%xmm7 addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d vpaddd %xmm6,%xmm0,%xmm0 shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d vpaddd 0(%rbp),%xmm0,%xmm6 xorl %ebx,%r13d addl 12(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d vmovdqa %xmm6,0(%rsp) vpalignr $4,%xmm1,%xmm2,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d vpalignr $4,%xmm3,%xmm0,%xmm7 shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d vpaddd %xmm7,%xmm1,%xmm1 xorl %eax,%r13d addl 16(%rsp),%edx movl %r8d,%r15d vpsrld $3,%xmm4,%xmm7 xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi vpshufd $250,%xmm0,%xmm7 shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx vpsrld $11,%xmm6,%xmm6 movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d vpsrld $10,%xmm7,%xmm6 addl 20(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d vpaddd %xmm4,%xmm1,%xmm1 addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d vpsrlq $2,%xmm7,%xmm7 addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx vpxor %xmm7,%xmm6,%xmm6 movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d vpaddd %xmm6,%xmm1,%xmm1 andl %r10d,%r12d xorl %r10d,%r13d addl 24(%rsp),%ebx vpshufd $80,%xmm1,%xmm7 movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx vpxor %xmm7,%xmm6,%xmm6 xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d vpsrlq $2,%xmm7,%xmm7 addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d vpaddd %xmm6,%xmm1,%xmm1 shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d vpaddd 32(%rbp),%xmm1,%xmm6 xorl %r9d,%r13d addl 28(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d vmovdqa %xmm6,16(%rsp) vpalignr $4,%xmm2,%xmm3,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d vpalignr $4,%xmm0,%xmm1,%xmm7 shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d vpaddd %xmm7,%xmm2,%xmm2 xorl %r8d,%r13d addl 32(%rsp),%r11d movl %eax,%r15d vpsrld $3,%xmm4,%xmm7 xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi vpshufd $250,%xmm1,%xmm7 shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d vpsrld $11,%xmm6,%xmm6 movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d vpsrld $10,%xmm7,%xmm6 addl 36(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d vpaddd %xmm4,%xmm2,%xmm2 addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d vpsrlq $2,%xmm7,%xmm7 addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d vpxor %xmm7,%xmm6,%xmm6 movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d vpaddd %xmm6,%xmm2,%xmm2 andl %ecx,%r12d xorl %ecx,%r13d addl 40(%rsp),%r9d vpshufd $80,%xmm2,%xmm7 movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d vpxor %xmm7,%xmm6,%xmm6 xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx vpsrlq $2,%xmm7,%xmm7 addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d vpaddd %xmm6,%xmm2,%xmm2 shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d vpaddd 64(%rbp),%xmm2,%xmm6 xorl %ebx,%r13d addl 44(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d vmovdqa %xmm6,32(%rsp) vpalignr $4,%xmm3,%xmm0,%xmm4 shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d vpalignr $4,%xmm1,%xmm2,%xmm7 shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d vpsrld $7,%xmm4,%xmm6 shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d vpaddd %xmm7,%xmm3,%xmm3 xorl %eax,%r13d addl 48(%rsp),%edx movl %r8d,%r15d vpsrld $3,%xmm4,%xmm7 xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d vpslld $14,%xmm4,%xmm5 addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi vpxor %xmm6,%xmm7,%xmm4 xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi vpshufd $250,%xmm2,%xmm7 shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx vpsrld $11,%xmm6,%xmm6 movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d vpxor %xmm5,%xmm4,%xmm4 movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d vpslld $11,%xmm5,%xmm5 xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d vpxor %xmm6,%xmm4,%xmm4 xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d vpsrld $10,%xmm7,%xmm6 addl 52(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d vpxor %xmm5,%xmm4,%xmm4 shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx vpsrlq $17,%xmm7,%xmm7 shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d vpaddd %xmm4,%xmm3,%xmm3 addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d vpxor %xmm7,%xmm6,%xmm6 addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d vpsrlq $2,%xmm7,%xmm7 addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx vpxor %xmm7,%xmm6,%xmm6 movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d vpshufb %xmm8,%xmm6,%xmm6 xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d vpaddd %xmm6,%xmm3,%xmm3 andl %r10d,%r12d xorl %r10d,%r13d addl 56(%rsp),%ebx vpshufd $80,%xmm3,%xmm7 movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d vpsrld $10,%xmm7,%xmm6 xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d vpsrlq $17,%xmm7,%xmm7 andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx vpxor %xmm7,%xmm6,%xmm6 xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d vpsrlq $2,%xmm7,%xmm7 addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d vpxor %xmm7,%xmm6,%xmm6 shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d vpshufb %xmm9,%xmm6,%xmm6 shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d vpaddd %xmm6,%xmm3,%xmm3 shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d vpaddd 96(%rbp),%xmm3,%xmm6 xorl %r9d,%r13d addl 60(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d vmovdqa %xmm6,48(%rsp) cmpb $0,131(%rbp) jne L$avx_00_47 shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 0(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 4(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 8(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 12(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 16(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 20(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 24(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 28(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d shrdl $14,%r13d,%r13d movl %r14d,%eax movl %r9d,%r12d shrdl $9,%r14d,%r14d xorl %r8d,%r13d xorl %r10d,%r12d shrdl $5,%r13d,%r13d xorl %eax,%r14d andl %r8d,%r12d xorl %r8d,%r13d addl 32(%rsp),%r11d movl %eax,%r15d xorl %r10d,%r12d shrdl $11,%r14d,%r14d xorl %ebx,%r15d addl %r12d,%r11d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %eax,%r14d addl %r13d,%r11d xorl %ebx,%edi shrdl $2,%r14d,%r14d addl %r11d,%edx addl %edi,%r11d movl %edx,%r13d addl %r11d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r11d movl %r8d,%r12d shrdl $9,%r14d,%r14d xorl %edx,%r13d xorl %r9d,%r12d shrdl $5,%r13d,%r13d xorl %r11d,%r14d andl %edx,%r12d xorl %edx,%r13d addl 36(%rsp),%r10d movl %r11d,%edi xorl %r9d,%r12d shrdl $11,%r14d,%r14d xorl %eax,%edi addl %r12d,%r10d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r11d,%r14d addl %r13d,%r10d xorl %eax,%r15d shrdl $2,%r14d,%r14d addl %r10d,%ecx addl %r15d,%r10d movl %ecx,%r13d addl %r10d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r10d movl %edx,%r12d shrdl $9,%r14d,%r14d xorl %ecx,%r13d xorl %r8d,%r12d shrdl $5,%r13d,%r13d xorl %r10d,%r14d andl %ecx,%r12d xorl %ecx,%r13d addl 40(%rsp),%r9d movl %r10d,%r15d xorl %r8d,%r12d shrdl $11,%r14d,%r14d xorl %r11d,%r15d addl %r12d,%r9d shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r10d,%r14d addl %r13d,%r9d xorl %r11d,%edi shrdl $2,%r14d,%r14d addl %r9d,%ebx addl %edi,%r9d movl %ebx,%r13d addl %r9d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r9d movl %ecx,%r12d shrdl $9,%r14d,%r14d xorl %ebx,%r13d xorl %edx,%r12d shrdl $5,%r13d,%r13d xorl %r9d,%r14d andl %ebx,%r12d xorl %ebx,%r13d addl 44(%rsp),%r8d movl %r9d,%edi xorl %edx,%r12d shrdl $11,%r14d,%r14d xorl %r10d,%edi addl %r12d,%r8d shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %r9d,%r14d addl %r13d,%r8d xorl %r10d,%r15d shrdl $2,%r14d,%r14d addl %r8d,%eax addl %r15d,%r8d movl %eax,%r13d addl %r8d,%r14d shrdl $14,%r13d,%r13d movl %r14d,%r8d movl %ebx,%r12d shrdl $9,%r14d,%r14d xorl %eax,%r13d xorl %ecx,%r12d shrdl $5,%r13d,%r13d xorl %r8d,%r14d andl %eax,%r12d xorl %eax,%r13d addl 48(%rsp),%edx movl %r8d,%r15d xorl %ecx,%r12d shrdl $11,%r14d,%r14d xorl %r9d,%r15d addl %r12d,%edx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %r8d,%r14d addl %r13d,%edx xorl %r9d,%edi shrdl $2,%r14d,%r14d addl %edx,%r11d addl %edi,%edx movl %r11d,%r13d addl %edx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%edx movl %eax,%r12d shrdl $9,%r14d,%r14d xorl %r11d,%r13d xorl %ebx,%r12d shrdl $5,%r13d,%r13d xorl %edx,%r14d andl %r11d,%r12d xorl %r11d,%r13d addl 52(%rsp),%ecx movl %edx,%edi xorl %ebx,%r12d shrdl $11,%r14d,%r14d xorl %r8d,%edi addl %r12d,%ecx shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %edx,%r14d addl %r13d,%ecx xorl %r8d,%r15d shrdl $2,%r14d,%r14d addl %ecx,%r10d addl %r15d,%ecx movl %r10d,%r13d addl %ecx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ecx movl %r11d,%r12d shrdl $9,%r14d,%r14d xorl %r10d,%r13d xorl %eax,%r12d shrdl $5,%r13d,%r13d xorl %ecx,%r14d andl %r10d,%r12d xorl %r10d,%r13d addl 56(%rsp),%ebx movl %ecx,%r15d xorl %eax,%r12d shrdl $11,%r14d,%r14d xorl %edx,%r15d addl %r12d,%ebx shrdl $6,%r13d,%r13d andl %r15d,%edi xorl %ecx,%r14d addl %r13d,%ebx xorl %edx,%edi shrdl $2,%r14d,%r14d addl %ebx,%r9d addl %edi,%ebx movl %r9d,%r13d addl %ebx,%r14d shrdl $14,%r13d,%r13d movl %r14d,%ebx movl %r10d,%r12d shrdl $9,%r14d,%r14d xorl %r9d,%r13d xorl %r11d,%r12d shrdl $5,%r13d,%r13d xorl %ebx,%r14d andl %r9d,%r12d xorl %r9d,%r13d addl 60(%rsp),%eax movl %ebx,%edi xorl %r11d,%r12d shrdl $11,%r14d,%r14d xorl %ecx,%edi addl %r12d,%eax shrdl $6,%r13d,%r13d andl %edi,%r15d xorl %ebx,%r14d addl %r13d,%eax xorl %ecx,%r15d shrdl $2,%r14d,%r14d addl %eax,%r8d addl %r15d,%eax movl %r8d,%r13d addl %eax,%r14d movq 64+0(%rsp),%rdi movl %r14d,%eax addl 0(%rdi),%eax leaq 64(%rsi),%rsi addl 4(%rdi),%ebx addl 8(%rdi),%ecx addl 12(%rdi),%edx addl 16(%rdi),%r8d addl 20(%rdi),%r9d addl 24(%rdi),%r10d addl 28(%rdi),%r11d cmpq 64+16(%rsp),%rsi movl %eax,0(%rdi) movl %ebx,4(%rdi) movl %ecx,8(%rdi) movl %edx,12(%rdi) movl %r8d,16(%rdi) movl %r9d,20(%rdi) movl %r10d,24(%rdi) movl %r11d,28(%rdi) jb L$loop_avx movq 88(%rsp),%rsi vzeroupper movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$epilogue_avx: ret #endif
marvin-hansen/iggy-streaming-system
48,645
thirdparty/crates/ring-0.17.9/pregenerated/sha512-x86_64-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .globl sha512_block_data_order_nohw .hidden sha512_block_data_order_nohw .type sha512_block_data_order_nohw,@function .align 16 sha512_block_data_order_nohw: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 shlq $4,%rdx subq $128+32,%rsp leaq (%rsi,%rdx,8),%rdx andq $-64,%rsp movq %rdi,128+0(%rsp) movq %rsi,128+8(%rsp) movq %rdx,128+16(%rsp) movq %rax,152(%rsp) .cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x08 .Lprologue: movq 0(%rdi),%rax movq 8(%rdi),%rbx movq 16(%rdi),%rcx movq 24(%rdi),%rdx movq 32(%rdi),%r8 movq 40(%rdi),%r9 movq 48(%rdi),%r10 movq 56(%rdi),%r11 jmp .Lloop .align 16 .Lloop: movq %rbx,%rdi leaq K512(%rip),%rbp xorq %rcx,%rdi movq 0(%rsi),%r12 movq %r8,%r13 movq %rax,%r14 bswapq %r12 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,0(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp addq %r14,%r11 movq 8(%rsi),%r12 movq %rdx,%r13 movq %r11,%r14 bswapq %r12 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,8(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp addq %r14,%r10 movq 16(%rsi),%r12 movq %rcx,%r13 movq %r10,%r14 bswapq %r12 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,16(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp addq %r14,%r9 movq 24(%rsi),%r12 movq %rbx,%r13 movq %r9,%r14 bswapq %r12 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,24(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp addq %r14,%r8 movq 32(%rsi),%r12 movq %rax,%r13 movq %r8,%r14 bswapq %r12 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,32(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp addq %r14,%rdx movq 40(%rsi),%r12 movq %r11,%r13 movq %rdx,%r14 bswapq %r12 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,40(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp addq %r14,%rcx movq 48(%rsi),%r12 movq %r10,%r13 movq %rcx,%r14 bswapq %r12 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,48(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp addq %r14,%rbx movq 56(%rsi),%r12 movq %r9,%r13 movq %rbx,%r14 bswapq %r12 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,56(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp addq %r14,%rax movq 64(%rsi),%r12 movq %r8,%r13 movq %rax,%r14 bswapq %r12 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,64(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp addq %r14,%r11 movq 72(%rsi),%r12 movq %rdx,%r13 movq %r11,%r14 bswapq %r12 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,72(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp addq %r14,%r10 movq 80(%rsi),%r12 movq %rcx,%r13 movq %r10,%r14 bswapq %r12 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,80(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp addq %r14,%r9 movq 88(%rsi),%r12 movq %rbx,%r13 movq %r9,%r14 bswapq %r12 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,88(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp addq %r14,%r8 movq 96(%rsi),%r12 movq %rax,%r13 movq %r8,%r14 bswapq %r12 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,96(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp addq %r14,%rdx movq 104(%rsi),%r12 movq %r11,%r13 movq %rdx,%r14 bswapq %r12 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,104(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp addq %r14,%rcx movq 112(%rsi),%r12 movq %r10,%r13 movq %rcx,%r14 bswapq %r12 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,112(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp addq %r14,%rbx movq 120(%rsi),%r12 movq %r9,%r13 movq %rbx,%r14 bswapq %r12 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,120(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp jmp .Lrounds_16_xx .align 16 .Lrounds_16_xx: movq 8(%rsp),%r13 movq 112(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rax movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 72(%rsp),%r12 addq 0(%rsp),%r12 movq %r8,%r13 addq %r15,%r12 movq %rax,%r14 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,0(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp movq 16(%rsp),%r13 movq 120(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r11 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 80(%rsp),%r12 addq 8(%rsp),%r12 movq %rdx,%r13 addq %rdi,%r12 movq %r11,%r14 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,8(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp movq 24(%rsp),%r13 movq 0(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r10 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 88(%rsp),%r12 addq 16(%rsp),%r12 movq %rcx,%r13 addq %r15,%r12 movq %r10,%r14 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,16(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp movq 32(%rsp),%r13 movq 8(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r9 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 96(%rsp),%r12 addq 24(%rsp),%r12 movq %rbx,%r13 addq %rdi,%r12 movq %r9,%r14 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,24(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp movq 40(%rsp),%r13 movq 16(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r8 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 104(%rsp),%r12 addq 32(%rsp),%r12 movq %rax,%r13 addq %r15,%r12 movq %r8,%r14 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,32(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp movq 48(%rsp),%r13 movq 24(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rdx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 112(%rsp),%r12 addq 40(%rsp),%r12 movq %r11,%r13 addq %rdi,%r12 movq %rdx,%r14 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,40(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp movq 56(%rsp),%r13 movq 32(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rcx movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 120(%rsp),%r12 addq 48(%rsp),%r12 movq %r10,%r13 addq %r15,%r12 movq %rcx,%r14 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,48(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp movq 64(%rsp),%r13 movq 40(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rbx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 0(%rsp),%r12 addq 56(%rsp),%r12 movq %r9,%r13 addq %rdi,%r12 movq %rbx,%r14 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,56(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp movq 72(%rsp),%r13 movq 48(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rax movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 8(%rsp),%r12 addq 64(%rsp),%r12 movq %r8,%r13 addq %r15,%r12 movq %rax,%r14 rorq $23,%r13 movq %r9,%r15 xorq %r8,%r13 rorq $5,%r14 xorq %r10,%r15 movq %r12,64(%rsp) xorq %rax,%r14 andq %r8,%r15 rorq $4,%r13 addq %r11,%r12 xorq %r10,%r15 rorq $6,%r14 xorq %r8,%r13 addq %r15,%r12 movq %rax,%r15 addq (%rbp),%r12 xorq %rax,%r14 xorq %rbx,%r15 rorq $14,%r13 movq %rbx,%r11 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r11 addq %r12,%rdx addq %r12,%r11 leaq 8(%rbp),%rbp movq 80(%rsp),%r13 movq 56(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r11 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 16(%rsp),%r12 addq 72(%rsp),%r12 movq %rdx,%r13 addq %rdi,%r12 movq %r11,%r14 rorq $23,%r13 movq %r8,%rdi xorq %rdx,%r13 rorq $5,%r14 xorq %r9,%rdi movq %r12,72(%rsp) xorq %r11,%r14 andq %rdx,%rdi rorq $4,%r13 addq %r10,%r12 xorq %r9,%rdi rorq $6,%r14 xorq %rdx,%r13 addq %rdi,%r12 movq %r11,%rdi addq (%rbp),%r12 xorq %r11,%r14 xorq %rax,%rdi rorq $14,%r13 movq %rax,%r10 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r10 addq %r12,%rcx addq %r12,%r10 leaq 24(%rbp),%rbp movq 88(%rsp),%r13 movq 64(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r10 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 24(%rsp),%r12 addq 80(%rsp),%r12 movq %rcx,%r13 addq %r15,%r12 movq %r10,%r14 rorq $23,%r13 movq %rdx,%r15 xorq %rcx,%r13 rorq $5,%r14 xorq %r8,%r15 movq %r12,80(%rsp) xorq %r10,%r14 andq %rcx,%r15 rorq $4,%r13 addq %r9,%r12 xorq %r8,%r15 rorq $6,%r14 xorq %rcx,%r13 addq %r15,%r12 movq %r10,%r15 addq (%rbp),%r12 xorq %r10,%r14 xorq %r11,%r15 rorq $14,%r13 movq %r11,%r9 andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%r9 addq %r12,%rbx addq %r12,%r9 leaq 8(%rbp),%rbp movq 96(%rsp),%r13 movq 72(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%r9 movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 32(%rsp),%r12 addq 88(%rsp),%r12 movq %rbx,%r13 addq %rdi,%r12 movq %r9,%r14 rorq $23,%r13 movq %rcx,%rdi xorq %rbx,%r13 rorq $5,%r14 xorq %rdx,%rdi movq %r12,88(%rsp) xorq %r9,%r14 andq %rbx,%rdi rorq $4,%r13 addq %r8,%r12 xorq %rdx,%rdi rorq $6,%r14 xorq %rbx,%r13 addq %rdi,%r12 movq %r9,%rdi addq (%rbp),%r12 xorq %r9,%r14 xorq %r10,%rdi rorq $14,%r13 movq %r10,%r8 andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%r8 addq %r12,%rax addq %r12,%r8 leaq 24(%rbp),%rbp movq 104(%rsp),%r13 movq 80(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%r8 movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 40(%rsp),%r12 addq 96(%rsp),%r12 movq %rax,%r13 addq %r15,%r12 movq %r8,%r14 rorq $23,%r13 movq %rbx,%r15 xorq %rax,%r13 rorq $5,%r14 xorq %rcx,%r15 movq %r12,96(%rsp) xorq %r8,%r14 andq %rax,%r15 rorq $4,%r13 addq %rdx,%r12 xorq %rcx,%r15 rorq $6,%r14 xorq %rax,%r13 addq %r15,%r12 movq %r8,%r15 addq (%rbp),%r12 xorq %r8,%r14 xorq %r9,%r15 rorq $14,%r13 movq %r9,%rdx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rdx addq %r12,%r11 addq %r12,%rdx leaq 8(%rbp),%rbp movq 112(%rsp),%r13 movq 88(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rdx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 48(%rsp),%r12 addq 104(%rsp),%r12 movq %r11,%r13 addq %rdi,%r12 movq %rdx,%r14 rorq $23,%r13 movq %rax,%rdi xorq %r11,%r13 rorq $5,%r14 xorq %rbx,%rdi movq %r12,104(%rsp) xorq %rdx,%r14 andq %r11,%rdi rorq $4,%r13 addq %rcx,%r12 xorq %rbx,%rdi rorq $6,%r14 xorq %r11,%r13 addq %rdi,%r12 movq %rdx,%rdi addq (%rbp),%r12 xorq %rdx,%r14 xorq %r8,%rdi rorq $14,%r13 movq %r8,%rcx andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rcx addq %r12,%r10 addq %r12,%rcx leaq 24(%rbp),%rbp movq 120(%rsp),%r13 movq 96(%rsp),%r15 movq %r13,%r12 rorq $7,%r13 addq %r14,%rcx movq %r15,%r14 rorq $42,%r15 xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%r15 shrq $6,%r14 rorq $19,%r15 xorq %r13,%r12 xorq %r14,%r15 addq 56(%rsp),%r12 addq 112(%rsp),%r12 movq %r10,%r13 addq %r15,%r12 movq %rcx,%r14 rorq $23,%r13 movq %r11,%r15 xorq %r10,%r13 rorq $5,%r14 xorq %rax,%r15 movq %r12,112(%rsp) xorq %rcx,%r14 andq %r10,%r15 rorq $4,%r13 addq %rbx,%r12 xorq %rax,%r15 rorq $6,%r14 xorq %r10,%r13 addq %r15,%r12 movq %rcx,%r15 addq (%rbp),%r12 xorq %rcx,%r14 xorq %rdx,%r15 rorq $14,%r13 movq %rdx,%rbx andq %r15,%rdi rorq $28,%r14 addq %r13,%r12 xorq %rdi,%rbx addq %r12,%r9 addq %r12,%rbx leaq 8(%rbp),%rbp movq 0(%rsp),%r13 movq 104(%rsp),%rdi movq %r13,%r12 rorq $7,%r13 addq %r14,%rbx movq %rdi,%r14 rorq $42,%rdi xorq %r12,%r13 shrq $7,%r12 rorq $1,%r13 xorq %r14,%rdi shrq $6,%r14 rorq $19,%rdi xorq %r13,%r12 xorq %r14,%rdi addq 64(%rsp),%r12 addq 120(%rsp),%r12 movq %r9,%r13 addq %rdi,%r12 movq %rbx,%r14 rorq $23,%r13 movq %r10,%rdi xorq %r9,%r13 rorq $5,%r14 xorq %r11,%rdi movq %r12,120(%rsp) xorq %rbx,%r14 andq %r9,%rdi rorq $4,%r13 addq %rax,%r12 xorq %r11,%rdi rorq $6,%r14 xorq %r9,%r13 addq %rdi,%r12 movq %rbx,%rdi addq (%rbp),%r12 xorq %rbx,%r14 xorq %rcx,%rdi rorq $14,%r13 movq %rcx,%rax andq %rdi,%r15 rorq $28,%r14 addq %r13,%r12 xorq %r15,%rax addq %r12,%r8 addq %r12,%rax leaq 24(%rbp),%rbp cmpb $0,7(%rbp) jnz .Lrounds_16_xx movq 128+0(%rsp),%rdi addq %r14,%rax leaq 128(%rsi),%rsi addq 0(%rdi),%rax addq 8(%rdi),%rbx addq 16(%rdi),%rcx addq 24(%rdi),%rdx addq 32(%rdi),%r8 addq 40(%rdi),%r9 addq 48(%rdi),%r10 addq 56(%rdi),%r11 cmpq 128+16(%rsp),%rsi movq %rax,0(%rdi) movq %rbx,8(%rdi) movq %rcx,16(%rdi) movq %rdx,24(%rdi) movq %r8,32(%rdi) movq %r9,40(%rdi) movq %r10,48(%rdi) movq %r11,56(%rdi) jb .Lloop movq 152(%rsp),%rsi .cfi_def_cfa %rsi,8 movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lepilogue: ret .cfi_endproc .size sha512_block_data_order_nohw,.-sha512_block_data_order_nohw .section .rodata .align 64 .type K512,@object K512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0x0001020304050607,0x08090a0b0c0d0e0f .quad 0x0001020304050607,0x08090a0b0c0d0e0f .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text .globl sha512_block_data_order_avx .hidden sha512_block_data_order_avx .type sha512_block_data_order_avx,@function .align 64 sha512_block_data_order_avx: .cfi_startproc _CET_ENDBR movq %rsp,%rax .cfi_def_cfa_register %rax pushq %rbx .cfi_offset %rbx,-16 pushq %rbp .cfi_offset %rbp,-24 pushq %r12 .cfi_offset %r12,-32 pushq %r13 .cfi_offset %r13,-40 pushq %r14 .cfi_offset %r14,-48 pushq %r15 .cfi_offset %r15,-56 shlq $4,%rdx subq $160,%rsp leaq (%rsi,%rdx,8),%rdx andq $-64,%rsp movq %rdi,128+0(%rsp) movq %rsi,128+8(%rsp) movq %rdx,128+16(%rsp) movq %rax,152(%rsp) .cfi_escape 0x0f,0x06,0x77,0x98,0x01,0x06,0x23,0x08 .Lprologue_avx: vzeroupper movq 0(%rdi),%rax movq 8(%rdi),%rbx movq 16(%rdi),%rcx movq 24(%rdi),%rdx movq 32(%rdi),%r8 movq 40(%rdi),%r9 movq 48(%rdi),%r10 movq 56(%rdi),%r11 jmp .Lloop_avx .align 16 .Lloop_avx: vmovdqa K512+1280(%rip),%xmm11 vmovdqu 0(%rsi),%xmm0 leaq K512+128(%rip),%rbp vmovdqu 16(%rsi),%xmm1 vmovdqu 32(%rsi),%xmm2 vpshufb %xmm11,%xmm0,%xmm0 vmovdqu 48(%rsi),%xmm3 vpshufb %xmm11,%xmm1,%xmm1 vmovdqu 64(%rsi),%xmm4 vpshufb %xmm11,%xmm2,%xmm2 vmovdqu 80(%rsi),%xmm5 vpshufb %xmm11,%xmm3,%xmm3 vmovdqu 96(%rsi),%xmm6 vpshufb %xmm11,%xmm4,%xmm4 vmovdqu 112(%rsi),%xmm7 vpshufb %xmm11,%xmm5,%xmm5 vpaddq -128(%rbp),%xmm0,%xmm8 vpshufb %xmm11,%xmm6,%xmm6 vpaddq -96(%rbp),%xmm1,%xmm9 vpshufb %xmm11,%xmm7,%xmm7 vpaddq -64(%rbp),%xmm2,%xmm10 vpaddq -32(%rbp),%xmm3,%xmm11 vmovdqa %xmm8,0(%rsp) vpaddq 0(%rbp),%xmm4,%xmm8 vmovdqa %xmm9,16(%rsp) vpaddq 32(%rbp),%xmm5,%xmm9 vmovdqa %xmm10,32(%rsp) vpaddq 64(%rbp),%xmm6,%xmm10 vmovdqa %xmm11,48(%rsp) vpaddq 96(%rbp),%xmm7,%xmm11 vmovdqa %xmm8,64(%rsp) movq %rax,%r14 vmovdqa %xmm9,80(%rsp) movq %rbx,%rdi vmovdqa %xmm10,96(%rsp) xorq %rcx,%rdi vmovdqa %xmm11,112(%rsp) movq %r8,%r13 jmp .Lavx_00_47 .align 16 .Lavx_00_47: addq $256,%rbp vpalignr $8,%xmm0,%xmm1,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rax vpalignr $8,%xmm4,%xmm5,%xmm11 movq %r9,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r8,%r13 xorq %r10,%r12 vpaddq %xmm11,%xmm0,%xmm0 shrdq $4,%r13,%r13 xorq %rax,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r8,%r12 xorq %r8,%r13 vpsllq $56,%xmm8,%xmm9 addq 0(%rsp),%r11 movq %rax,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r10,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rbx,%r15 addq %r12,%r11 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rax,%r14 addq %r13,%r11 vpxor %xmm10,%xmm8,%xmm8 xorq %rbx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm7,%xmm11 addq %r11,%rdx addq %rdi,%r11 vpxor %xmm9,%xmm8,%xmm8 movq %rdx,%r13 addq %r11,%r14 vpsllq $3,%xmm7,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r11 vpaddq %xmm8,%xmm0,%xmm0 movq %r8,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm7,%xmm9 xorq %rdx,%r13 xorq %r9,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r11,%r14 vpsllq $42,%xmm10,%xmm10 andq %rdx,%r12 xorq %rdx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 8(%rsp),%r10 movq %r11,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r9,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rax,%rdi addq %r12,%r10 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm0,%xmm0 xorq %r11,%r14 addq %r13,%r10 vpaddq -128(%rbp),%xmm0,%xmm10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 vmovdqa %xmm10,0(%rsp) vpalignr $8,%xmm1,%xmm2,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r10 vpalignr $8,%xmm5,%xmm6,%xmm11 movq %rdx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rcx,%r13 xorq %r8,%r12 vpaddq %xmm11,%xmm1,%xmm1 shrdq $4,%r13,%r13 xorq %r10,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rcx,%r12 xorq %rcx,%r13 vpsllq $56,%xmm8,%xmm9 addq 16(%rsp),%r9 movq %r10,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r8,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r11,%r15 addq %r12,%r9 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r10,%r14 addq %r13,%r9 vpxor %xmm10,%xmm8,%xmm8 xorq %r11,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm0,%xmm11 addq %r9,%rbx addq %rdi,%r9 vpxor %xmm9,%xmm8,%xmm8 movq %rbx,%r13 addq %r9,%r14 vpsllq $3,%xmm0,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r9 vpaddq %xmm8,%xmm1,%xmm1 movq %rcx,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm0,%xmm9 xorq %rbx,%r13 xorq %rdx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r9,%r14 vpsllq $42,%xmm10,%xmm10 andq %rbx,%r12 xorq %rbx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 24(%rsp),%r8 movq %r9,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rdx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r10,%rdi addq %r12,%r8 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm1,%xmm1 xorq %r9,%r14 addq %r13,%r8 vpaddq -96(%rbp),%xmm1,%xmm10 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 vmovdqa %xmm10,16(%rsp) vpalignr $8,%xmm2,%xmm3,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r8 vpalignr $8,%xmm6,%xmm7,%xmm11 movq %rbx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rax,%r13 xorq %rcx,%r12 vpaddq %xmm11,%xmm2,%xmm2 shrdq $4,%r13,%r13 xorq %r8,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rax,%r12 xorq %rax,%r13 vpsllq $56,%xmm8,%xmm9 addq 32(%rsp),%rdx movq %r8,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rcx,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r9,%r15 addq %r12,%rdx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r8,%r14 addq %r13,%rdx vpxor %xmm10,%xmm8,%xmm8 xorq %r9,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm1,%xmm11 addq %rdx,%r11 addq %rdi,%rdx vpxor %xmm9,%xmm8,%xmm8 movq %r11,%r13 addq %rdx,%r14 vpsllq $3,%xmm1,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rdx vpaddq %xmm8,%xmm2,%xmm2 movq %rax,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm1,%xmm9 xorq %r11,%r13 xorq %rbx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rdx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r11,%r12 xorq %r11,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 40(%rsp),%rcx movq %rdx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rbx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r8,%rdi addq %r12,%rcx vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm2,%xmm2 xorq %rdx,%r14 addq %r13,%rcx vpaddq -64(%rbp),%xmm2,%xmm10 xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 vmovdqa %xmm10,32(%rsp) vpalignr $8,%xmm3,%xmm4,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rcx vpalignr $8,%xmm7,%xmm0,%xmm11 movq %r11,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r10,%r13 xorq %rax,%r12 vpaddq %xmm11,%xmm3,%xmm3 shrdq $4,%r13,%r13 xorq %rcx,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r10,%r12 xorq %r10,%r13 vpsllq $56,%xmm8,%xmm9 addq 48(%rsp),%rbx movq %rcx,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rax,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rdx,%r15 addq %r12,%rbx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rcx,%r14 addq %r13,%rbx vpxor %xmm10,%xmm8,%xmm8 xorq %rdx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm2,%xmm11 addq %rbx,%r9 addq %rdi,%rbx vpxor %xmm9,%xmm8,%xmm8 movq %r9,%r13 addq %rbx,%r14 vpsllq $3,%xmm2,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rbx vpaddq %xmm8,%xmm3,%xmm3 movq %r10,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm2,%xmm9 xorq %r9,%r13 xorq %r11,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rbx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r9,%r12 xorq %r9,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 56(%rsp),%rax movq %rbx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r11,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rcx,%rdi addq %r12,%rax vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm3,%xmm3 xorq %rbx,%r14 addq %r13,%rax vpaddq -32(%rbp),%xmm3,%xmm10 xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 vmovdqa %xmm10,48(%rsp) vpalignr $8,%xmm4,%xmm5,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rax vpalignr $8,%xmm0,%xmm1,%xmm11 movq %r9,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r8,%r13 xorq %r10,%r12 vpaddq %xmm11,%xmm4,%xmm4 shrdq $4,%r13,%r13 xorq %rax,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r8,%r12 xorq %r8,%r13 vpsllq $56,%xmm8,%xmm9 addq 64(%rsp),%r11 movq %rax,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r10,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rbx,%r15 addq %r12,%r11 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rax,%r14 addq %r13,%r11 vpxor %xmm10,%xmm8,%xmm8 xorq %rbx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm3,%xmm11 addq %r11,%rdx addq %rdi,%r11 vpxor %xmm9,%xmm8,%xmm8 movq %rdx,%r13 addq %r11,%r14 vpsllq $3,%xmm3,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r11 vpaddq %xmm8,%xmm4,%xmm4 movq %r8,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm3,%xmm9 xorq %rdx,%r13 xorq %r9,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r11,%r14 vpsllq $42,%xmm10,%xmm10 andq %rdx,%r12 xorq %rdx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 72(%rsp),%r10 movq %r11,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r9,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rax,%rdi addq %r12,%r10 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm4,%xmm4 xorq %r11,%r14 addq %r13,%r10 vpaddq 0(%rbp),%xmm4,%xmm10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 vmovdqa %xmm10,64(%rsp) vpalignr $8,%xmm5,%xmm6,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r10 vpalignr $8,%xmm1,%xmm2,%xmm11 movq %rdx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rcx,%r13 xorq %r8,%r12 vpaddq %xmm11,%xmm5,%xmm5 shrdq $4,%r13,%r13 xorq %r10,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rcx,%r12 xorq %rcx,%r13 vpsllq $56,%xmm8,%xmm9 addq 80(%rsp),%r9 movq %r10,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %r8,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r11,%r15 addq %r12,%r9 vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r10,%r14 addq %r13,%r9 vpxor %xmm10,%xmm8,%xmm8 xorq %r11,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm4,%xmm11 addq %r9,%rbx addq %rdi,%r9 vpxor %xmm9,%xmm8,%xmm8 movq %rbx,%r13 addq %r9,%r14 vpsllq $3,%xmm4,%xmm10 shrdq $23,%r13,%r13 movq %r14,%r9 vpaddq %xmm8,%xmm5,%xmm5 movq %rcx,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm4,%xmm9 xorq %rbx,%r13 xorq %rdx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %r9,%r14 vpsllq $42,%xmm10,%xmm10 andq %rbx,%r12 xorq %rbx,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 88(%rsp),%r8 movq %r9,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rdx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r10,%rdi addq %r12,%r8 vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm5,%xmm5 xorq %r9,%r14 addq %r13,%r8 vpaddq 32(%rbp),%xmm5,%xmm10 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 vmovdqa %xmm10,80(%rsp) vpalignr $8,%xmm6,%xmm7,%xmm8 shrdq $23,%r13,%r13 movq %r14,%r8 vpalignr $8,%xmm2,%xmm3,%xmm11 movq %rbx,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %rax,%r13 xorq %rcx,%r12 vpaddq %xmm11,%xmm6,%xmm6 shrdq $4,%r13,%r13 xorq %r8,%r14 vpsrlq $7,%xmm8,%xmm11 andq %rax,%r12 xorq %rax,%r13 vpsllq $56,%xmm8,%xmm9 addq 96(%rsp),%rdx movq %r8,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rcx,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %r9,%r15 addq %r12,%rdx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %r8,%r14 addq %r13,%rdx vpxor %xmm10,%xmm8,%xmm8 xorq %r9,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm5,%xmm11 addq %rdx,%r11 addq %rdi,%rdx vpxor %xmm9,%xmm8,%xmm8 movq %r11,%r13 addq %rdx,%r14 vpsllq $3,%xmm5,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rdx vpaddq %xmm8,%xmm6,%xmm6 movq %rax,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm5,%xmm9 xorq %r11,%r13 xorq %rbx,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rdx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r11,%r12 xorq %r11,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 104(%rsp),%rcx movq %rdx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %rbx,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %r8,%rdi addq %r12,%rcx vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm6,%xmm6 xorq %rdx,%r14 addq %r13,%rcx vpaddq 64(%rbp),%xmm6,%xmm10 xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 vmovdqa %xmm10,96(%rsp) vpalignr $8,%xmm7,%xmm0,%xmm8 shrdq $23,%r13,%r13 movq %r14,%rcx vpalignr $8,%xmm3,%xmm4,%xmm11 movq %r11,%r12 shrdq $5,%r14,%r14 vpsrlq $1,%xmm8,%xmm10 xorq %r10,%r13 xorq %rax,%r12 vpaddq %xmm11,%xmm7,%xmm7 shrdq $4,%r13,%r13 xorq %rcx,%r14 vpsrlq $7,%xmm8,%xmm11 andq %r10,%r12 xorq %r10,%r13 vpsllq $56,%xmm8,%xmm9 addq 112(%rsp),%rbx movq %rcx,%r15 vpxor %xmm10,%xmm11,%xmm8 xorq %rax,%r12 shrdq $6,%r14,%r14 vpsrlq $7,%xmm10,%xmm10 xorq %rdx,%r15 addq %r12,%rbx vpxor %xmm9,%xmm8,%xmm8 shrdq $14,%r13,%r13 andq %r15,%rdi vpsllq $7,%xmm9,%xmm9 xorq %rcx,%r14 addq %r13,%rbx vpxor %xmm10,%xmm8,%xmm8 xorq %rdx,%rdi shrdq $28,%r14,%r14 vpsrlq $6,%xmm6,%xmm11 addq %rbx,%r9 addq %rdi,%rbx vpxor %xmm9,%xmm8,%xmm8 movq %r9,%r13 addq %rbx,%r14 vpsllq $3,%xmm6,%xmm10 shrdq $23,%r13,%r13 movq %r14,%rbx vpaddq %xmm8,%xmm7,%xmm7 movq %r10,%r12 shrdq $5,%r14,%r14 vpsrlq $19,%xmm6,%xmm9 xorq %r9,%r13 xorq %r11,%r12 vpxor %xmm10,%xmm11,%xmm11 shrdq $4,%r13,%r13 xorq %rbx,%r14 vpsllq $42,%xmm10,%xmm10 andq %r9,%r12 xorq %r9,%r13 vpxor %xmm9,%xmm11,%xmm11 addq 120(%rsp),%rax movq %rbx,%rdi vpsrlq $42,%xmm9,%xmm9 xorq %r11,%r12 shrdq $6,%r14,%r14 vpxor %xmm10,%xmm11,%xmm11 xorq %rcx,%rdi addq %r12,%rax vpxor %xmm9,%xmm11,%xmm11 shrdq $14,%r13,%r13 andq %rdi,%r15 vpaddq %xmm11,%xmm7,%xmm7 xorq %rbx,%r14 addq %r13,%rax vpaddq 96(%rbp),%xmm7,%xmm10 xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 vmovdqa %xmm10,112(%rsp) cmpb $0,135(%rbp) jne .Lavx_00_47 shrdq $23,%r13,%r13 movq %r14,%rax movq %r9,%r12 shrdq $5,%r14,%r14 xorq %r8,%r13 xorq %r10,%r12 shrdq $4,%r13,%r13 xorq %rax,%r14 andq %r8,%r12 xorq %r8,%r13 addq 0(%rsp),%r11 movq %rax,%r15 xorq %r10,%r12 shrdq $6,%r14,%r14 xorq %rbx,%r15 addq %r12,%r11 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rax,%r14 addq %r13,%r11 xorq %rbx,%rdi shrdq $28,%r14,%r14 addq %r11,%rdx addq %rdi,%r11 movq %rdx,%r13 addq %r11,%r14 shrdq $23,%r13,%r13 movq %r14,%r11 movq %r8,%r12 shrdq $5,%r14,%r14 xorq %rdx,%r13 xorq %r9,%r12 shrdq $4,%r13,%r13 xorq %r11,%r14 andq %rdx,%r12 xorq %rdx,%r13 addq 8(%rsp),%r10 movq %r11,%rdi xorq %r9,%r12 shrdq $6,%r14,%r14 xorq %rax,%rdi addq %r12,%r10 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r11,%r14 addq %r13,%r10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 shrdq $23,%r13,%r13 movq %r14,%r10 movq %rdx,%r12 shrdq $5,%r14,%r14 xorq %rcx,%r13 xorq %r8,%r12 shrdq $4,%r13,%r13 xorq %r10,%r14 andq %rcx,%r12 xorq %rcx,%r13 addq 16(%rsp),%r9 movq %r10,%r15 xorq %r8,%r12 shrdq $6,%r14,%r14 xorq %r11,%r15 addq %r12,%r9 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r10,%r14 addq %r13,%r9 xorq %r11,%rdi shrdq $28,%r14,%r14 addq %r9,%rbx addq %rdi,%r9 movq %rbx,%r13 addq %r9,%r14 shrdq $23,%r13,%r13 movq %r14,%r9 movq %rcx,%r12 shrdq $5,%r14,%r14 xorq %rbx,%r13 xorq %rdx,%r12 shrdq $4,%r13,%r13 xorq %r9,%r14 andq %rbx,%r12 xorq %rbx,%r13 addq 24(%rsp),%r8 movq %r9,%rdi xorq %rdx,%r12 shrdq $6,%r14,%r14 xorq %r10,%rdi addq %r12,%r8 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r9,%r14 addq %r13,%r8 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 shrdq $23,%r13,%r13 movq %r14,%r8 movq %rbx,%r12 shrdq $5,%r14,%r14 xorq %rax,%r13 xorq %rcx,%r12 shrdq $4,%r13,%r13 xorq %r8,%r14 andq %rax,%r12 xorq %rax,%r13 addq 32(%rsp),%rdx movq %r8,%r15 xorq %rcx,%r12 shrdq $6,%r14,%r14 xorq %r9,%r15 addq %r12,%rdx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r8,%r14 addq %r13,%rdx xorq %r9,%rdi shrdq $28,%r14,%r14 addq %rdx,%r11 addq %rdi,%rdx movq %r11,%r13 addq %rdx,%r14 shrdq $23,%r13,%r13 movq %r14,%rdx movq %rax,%r12 shrdq $5,%r14,%r14 xorq %r11,%r13 xorq %rbx,%r12 shrdq $4,%r13,%r13 xorq %rdx,%r14 andq %r11,%r12 xorq %r11,%r13 addq 40(%rsp),%rcx movq %rdx,%rdi xorq %rbx,%r12 shrdq $6,%r14,%r14 xorq %r8,%rdi addq %r12,%rcx shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rdx,%r14 addq %r13,%rcx xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 shrdq $23,%r13,%r13 movq %r14,%rcx movq %r11,%r12 shrdq $5,%r14,%r14 xorq %r10,%r13 xorq %rax,%r12 shrdq $4,%r13,%r13 xorq %rcx,%r14 andq %r10,%r12 xorq %r10,%r13 addq 48(%rsp),%rbx movq %rcx,%r15 xorq %rax,%r12 shrdq $6,%r14,%r14 xorq %rdx,%r15 addq %r12,%rbx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rcx,%r14 addq %r13,%rbx xorq %rdx,%rdi shrdq $28,%r14,%r14 addq %rbx,%r9 addq %rdi,%rbx movq %r9,%r13 addq %rbx,%r14 shrdq $23,%r13,%r13 movq %r14,%rbx movq %r10,%r12 shrdq $5,%r14,%r14 xorq %r9,%r13 xorq %r11,%r12 shrdq $4,%r13,%r13 xorq %rbx,%r14 andq %r9,%r12 xorq %r9,%r13 addq 56(%rsp),%rax movq %rbx,%rdi xorq %r11,%r12 shrdq $6,%r14,%r14 xorq %rcx,%rdi addq %r12,%rax shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rbx,%r14 addq %r13,%rax xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 shrdq $23,%r13,%r13 movq %r14,%rax movq %r9,%r12 shrdq $5,%r14,%r14 xorq %r8,%r13 xorq %r10,%r12 shrdq $4,%r13,%r13 xorq %rax,%r14 andq %r8,%r12 xorq %r8,%r13 addq 64(%rsp),%r11 movq %rax,%r15 xorq %r10,%r12 shrdq $6,%r14,%r14 xorq %rbx,%r15 addq %r12,%r11 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rax,%r14 addq %r13,%r11 xorq %rbx,%rdi shrdq $28,%r14,%r14 addq %r11,%rdx addq %rdi,%r11 movq %rdx,%r13 addq %r11,%r14 shrdq $23,%r13,%r13 movq %r14,%r11 movq %r8,%r12 shrdq $5,%r14,%r14 xorq %rdx,%r13 xorq %r9,%r12 shrdq $4,%r13,%r13 xorq %r11,%r14 andq %rdx,%r12 xorq %rdx,%r13 addq 72(%rsp),%r10 movq %r11,%rdi xorq %r9,%r12 shrdq $6,%r14,%r14 xorq %rax,%rdi addq %r12,%r10 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r11,%r14 addq %r13,%r10 xorq %rax,%r15 shrdq $28,%r14,%r14 addq %r10,%rcx addq %r15,%r10 movq %rcx,%r13 addq %r10,%r14 shrdq $23,%r13,%r13 movq %r14,%r10 movq %rdx,%r12 shrdq $5,%r14,%r14 xorq %rcx,%r13 xorq %r8,%r12 shrdq $4,%r13,%r13 xorq %r10,%r14 andq %rcx,%r12 xorq %rcx,%r13 addq 80(%rsp),%r9 movq %r10,%r15 xorq %r8,%r12 shrdq $6,%r14,%r14 xorq %r11,%r15 addq %r12,%r9 shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r10,%r14 addq %r13,%r9 xorq %r11,%rdi shrdq $28,%r14,%r14 addq %r9,%rbx addq %rdi,%r9 movq %rbx,%r13 addq %r9,%r14 shrdq $23,%r13,%r13 movq %r14,%r9 movq %rcx,%r12 shrdq $5,%r14,%r14 xorq %rbx,%r13 xorq %rdx,%r12 shrdq $4,%r13,%r13 xorq %r9,%r14 andq %rbx,%r12 xorq %rbx,%r13 addq 88(%rsp),%r8 movq %r9,%rdi xorq %rdx,%r12 shrdq $6,%r14,%r14 xorq %r10,%rdi addq %r12,%r8 shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %r9,%r14 addq %r13,%r8 xorq %r10,%r15 shrdq $28,%r14,%r14 addq %r8,%rax addq %r15,%r8 movq %rax,%r13 addq %r8,%r14 shrdq $23,%r13,%r13 movq %r14,%r8 movq %rbx,%r12 shrdq $5,%r14,%r14 xorq %rax,%r13 xorq %rcx,%r12 shrdq $4,%r13,%r13 xorq %r8,%r14 andq %rax,%r12 xorq %rax,%r13 addq 96(%rsp),%rdx movq %r8,%r15 xorq %rcx,%r12 shrdq $6,%r14,%r14 xorq %r9,%r15 addq %r12,%rdx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %r8,%r14 addq %r13,%rdx xorq %r9,%rdi shrdq $28,%r14,%r14 addq %rdx,%r11 addq %rdi,%rdx movq %r11,%r13 addq %rdx,%r14 shrdq $23,%r13,%r13 movq %r14,%rdx movq %rax,%r12 shrdq $5,%r14,%r14 xorq %r11,%r13 xorq %rbx,%r12 shrdq $4,%r13,%r13 xorq %rdx,%r14 andq %r11,%r12 xorq %r11,%r13 addq 104(%rsp),%rcx movq %rdx,%rdi xorq %rbx,%r12 shrdq $6,%r14,%r14 xorq %r8,%rdi addq %r12,%rcx shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rdx,%r14 addq %r13,%rcx xorq %r8,%r15 shrdq $28,%r14,%r14 addq %rcx,%r10 addq %r15,%rcx movq %r10,%r13 addq %rcx,%r14 shrdq $23,%r13,%r13 movq %r14,%rcx movq %r11,%r12 shrdq $5,%r14,%r14 xorq %r10,%r13 xorq %rax,%r12 shrdq $4,%r13,%r13 xorq %rcx,%r14 andq %r10,%r12 xorq %r10,%r13 addq 112(%rsp),%rbx movq %rcx,%r15 xorq %rax,%r12 shrdq $6,%r14,%r14 xorq %rdx,%r15 addq %r12,%rbx shrdq $14,%r13,%r13 andq %r15,%rdi xorq %rcx,%r14 addq %r13,%rbx xorq %rdx,%rdi shrdq $28,%r14,%r14 addq %rbx,%r9 addq %rdi,%rbx movq %r9,%r13 addq %rbx,%r14 shrdq $23,%r13,%r13 movq %r14,%rbx movq %r10,%r12 shrdq $5,%r14,%r14 xorq %r9,%r13 xorq %r11,%r12 shrdq $4,%r13,%r13 xorq %rbx,%r14 andq %r9,%r12 xorq %r9,%r13 addq 120(%rsp),%rax movq %rbx,%rdi xorq %r11,%r12 shrdq $6,%r14,%r14 xorq %rcx,%rdi addq %r12,%rax shrdq $14,%r13,%r13 andq %rdi,%r15 xorq %rbx,%r14 addq %r13,%rax xorq %rcx,%r15 shrdq $28,%r14,%r14 addq %rax,%r8 addq %r15,%rax movq %r8,%r13 addq %rax,%r14 movq 128+0(%rsp),%rdi movq %r14,%rax addq 0(%rdi),%rax leaq 128(%rsi),%rsi addq 8(%rdi),%rbx addq 16(%rdi),%rcx addq 24(%rdi),%rdx addq 32(%rdi),%r8 addq 40(%rdi),%r9 addq 48(%rdi),%r10 addq 56(%rdi),%r11 cmpq 128+16(%rsp),%rsi movq %rax,0(%rdi) movq %rbx,8(%rdi) movq %rcx,16(%rdi) movq %rdx,24(%rdi) movq %r8,32(%rdi) movq %r9,40(%rdi) movq %r10,48(%rdi) movq %r11,56(%rdi) jb .Lloop_avx movq 152(%rsp),%rsi .cfi_def_cfa %rsi,8 vzeroupper movq -48(%rsi),%r15 .cfi_restore %r15 movq -40(%rsi),%r14 .cfi_restore %r14 movq -32(%rsi),%r13 .cfi_restore %r13 movq -24(%rsi),%r12 .cfi_restore %r12 movq -16(%rsi),%rbp .cfi_restore %rbp movq -8(%rsi),%rbx .cfi_restore %rbx leaq (%rsi),%rsp .cfi_def_cfa_register %rsp .Lepilogue_avx: ret .cfi_endproc .size sha512_block_data_order_avx,.-sha512_block_data_order_avx #endif
marvin-hansen/iggy-streaming-system
48,620
thirdparty/crates/ring-0.17.9/pregenerated/x86_64-mont5-macosx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _bn_mul4x_mont_gather5 .private_extern _bn_mul4x_mont_gather5 .p2align 5 _bn_mul4x_mont_gather5: _CET_ENDBR .byte 0x67 movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$mul4x_prologue: .byte 0x67 shll $3,%r9d leaq (%r9,%r9,2),%r10 negq %r9 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb L$mul4xsp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp L$mul4xsp_done .p2align 5 L$mul4xsp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp L$mul4xsp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$mul4x_page_walk jmp L$mul4x_page_walk_done L$mul4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$mul4x_page_walk L$mul4x_page_walk_done: negq %r9 movq %rax,40(%rsp) L$mul4x_body: call mul4x_internal movq 40(%rsp),%rsi movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$mul4x_epilogue: ret .p2align 5 mul4x_internal: shlq $5,%r9 movd 8(%rax),%xmm5 leaq L$inc(%rip),%rax leaq 128(%rdx,%r9,1),%r13 shrq $5,%r9 movdqa 0(%rax),%xmm0 movdqa 16(%rax),%xmm1 leaq 88-112(%rsp,%r9,1),%r10 leaq 128(%rdx),%r12 pshufd $0,%xmm5,%xmm5 movdqa %xmm1,%xmm4 .byte 0x67,0x67 movdqa %xmm1,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 .byte 0x67 movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,112(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,128(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,144(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,160(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,176(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,192(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,208(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,224(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,240(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,256(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,272(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,288(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,304(%r10) paddd %xmm2,%xmm3 .byte 0x67 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,320(%r10) pcmpeqd %xmm5,%xmm3 movdqa %xmm2,336(%r10) pand 64(%r12),%xmm0 pand 80(%r12),%xmm1 pand 96(%r12),%xmm2 movdqa %xmm3,352(%r10) pand 112(%r12),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -128(%r12),%xmm4 movdqa -112(%r12),%xmm5 movdqa -96(%r12),%xmm2 pand 112(%r10),%xmm4 movdqa -80(%r12),%xmm3 pand 128(%r10),%xmm5 por %xmm4,%xmm0 pand 144(%r10),%xmm2 por %xmm5,%xmm1 pand 160(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -64(%r12),%xmm4 movdqa -48(%r12),%xmm5 movdqa -32(%r12),%xmm2 pand 176(%r10),%xmm4 movdqa -16(%r12),%xmm3 pand 192(%r10),%xmm5 por %xmm4,%xmm0 pand 208(%r10),%xmm2 por %xmm5,%xmm1 pand 224(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa 0(%r12),%xmm4 movdqa 16(%r12),%xmm5 movdqa 32(%r12),%xmm2 pand 240(%r10),%xmm4 movdqa 48(%r12),%xmm3 pand 256(%r10),%xmm5 por %xmm4,%xmm0 pand 272(%r10),%xmm2 por %xmm5,%xmm1 pand 288(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 por %xmm1,%xmm0 pshufd $0x4e,%xmm0,%xmm1 por %xmm1,%xmm0 leaq 256(%r12),%r12 .byte 102,72,15,126,195 movq %r13,16+8(%rsp) movq %rdi,56+8(%rsp) movq (%r8),%r8 movq (%rsi),%rax leaq (%rsi,%r9,1),%rsi negq %r9 movq %r8,%rbp mulq %rbx movq %rax,%r10 movq (%rcx),%rax imulq %r10,%rbp leaq 64+8(%rsp),%r14 movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi,%r9,1),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%r9),%r15 leaq 32(%rcx),%rcx adcq $0,%rdx movq %rdi,(%r14) movq %rdx,%r13 jmp L$1st4x .p2align 5 L$1st4x: mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%r14) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq 0(%rcx),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%rcx),%rcx adcq $0,%rdx movq %rdi,(%r14) movq %rdx,%r13 addq $32,%r15 jnz L$1st4x mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%r14) movq %rdx,%r13 leaq (%rcx,%r9,1),%rcx xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi movq %r13,-8(%r14) jmp L$outer4x .p2align 5 L$outer4x: leaq 16+128(%r14),%rdx pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 movdqa -128(%r12),%xmm0 movdqa -112(%r12),%xmm1 movdqa -96(%r12),%xmm2 movdqa -80(%r12),%xmm3 pand -128(%rdx),%xmm0 pand -112(%rdx),%xmm1 por %xmm0,%xmm4 pand -96(%rdx),%xmm2 por %xmm1,%xmm5 pand -80(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa -64(%r12),%xmm0 movdqa -48(%r12),%xmm1 movdqa -32(%r12),%xmm2 movdqa -16(%r12),%xmm3 pand -64(%rdx),%xmm0 pand -48(%rdx),%xmm1 por %xmm0,%xmm4 pand -32(%rdx),%xmm2 por %xmm1,%xmm5 pand -16(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 0(%r12),%xmm0 movdqa 16(%r12),%xmm1 movdqa 32(%r12),%xmm2 movdqa 48(%r12),%xmm3 pand 0(%rdx),%xmm0 pand 16(%rdx),%xmm1 por %xmm0,%xmm4 pand 32(%rdx),%xmm2 por %xmm1,%xmm5 pand 48(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 64(%r12),%xmm0 movdqa 80(%r12),%xmm1 movdqa 96(%r12),%xmm2 movdqa 112(%r12),%xmm3 pand 64(%rdx),%xmm0 pand 80(%rdx),%xmm1 por %xmm0,%xmm4 pand 96(%rdx),%xmm2 por %xmm1,%xmm5 pand 112(%rdx),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 por %xmm5,%xmm4 pshufd $0x4e,%xmm4,%xmm0 por %xmm4,%xmm0 leaq 256(%r12),%r12 .byte 102,72,15,126,195 movq (%r14,%r9,1),%r10 movq %r8,%rbp mulq %rbx addq %rax,%r10 movq (%rcx),%rax adcq $0,%rdx imulq %r10,%rbp movq %rdx,%r11 movq %rdi,(%r14) leaq (%r14,%r9,1),%r14 mulq %rbp addq %rax,%r10 movq 8(%rsi,%r9,1),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx addq 8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%r9),%r15 leaq 32(%rcx),%rcx adcq $0,%rdx movq %rdx,%r13 jmp L$inner4x .p2align 5 L$inner4x: mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax adcq $0,%rdx addq 16(%r14),%r10 leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %rdi,-32(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx),%rax adcq $0,%rdx addq -8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq 0(%rcx),%rax adcq $0,%rdx addq (%r14),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,1),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %rdi,-16(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx addq 8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi,%r15,1),%rax adcq $0,%rdx addq %r11,%rdi leaq 32(%rcx),%rcx adcq $0,%rdx movq %r13,-8(%r14) movq %rdx,%r13 addq $32,%r15 jnz L$inner4x mulq %rbx addq %rax,%r10 movq -16(%rcx),%rax adcq $0,%rdx addq 16(%r14),%r10 leaq 32(%r14),%r14 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %rdi,-32(%r14) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq %rbp,%rax movq -8(%rcx),%rbp adcq $0,%rdx addq -8(%r14),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r9,1),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %r13,-24(%r14) movq %rdx,%r13 movq %rdi,-16(%r14) leaq (%rcx,%r9,1),%rcx xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi addq (%r14),%r13 adcq $0,%rdi movq %r13,-8(%r14) cmpq 16+8(%rsp),%r12 jb L$outer4x xorq %rax,%rax subq %r13,%rbp adcq %r15,%r15 orq %r15,%rdi subq %rdi,%rax leaq (%r14,%r9,1),%rbx movq (%rcx),%r12 leaq (%rcx),%rbp movq %r9,%rcx sarq $3+2,%rcx movq 56+8(%rsp),%rdi decq %r12 xorq %r10,%r10 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp L$sqr4x_sub_entry .globl _bn_power5_nohw .private_extern _bn_power5_nohw .p2align 5 _bn_power5_nohw: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$power5_prologue: shll $3,%r9d leal (%r9,%r9,2),%r10d negq %r9 movq (%r8),%r8 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb L$pwr_sp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp L$pwr_sp_done .p2align 5 L$pwr_sp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp L$pwr_sp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$pwr_page_walk jmp L$pwr_page_walk_done L$pwr_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$pwr_page_walk L$pwr_page_walk_done: movq %r9,%r10 negq %r9 movq %r8,32(%rsp) movq %rax,40(%rsp) L$power5_body: .byte 102,72,15,110,207 .byte 102,72,15,110,209 .byte 102,73,15,110,218 .byte 102,72,15,110,226 call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal call __bn_sqr8x_internal call __bn_post4x_internal .byte 102,72,15,126,209 .byte 102,72,15,126,226 movq %rsi,%rdi movq 40(%rsp),%rax leaq 32(%rsp),%r8 call mul4x_internal movq 40(%rsp),%rsi movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$power5_epilogue: ret .globl _bn_sqr8x_internal .private_extern _bn_sqr8x_internal .private_extern _bn_sqr8x_internal .p2align 5 _bn_sqr8x_internal: __bn_sqr8x_internal: _CET_ENDBR leaq 32(%r10),%rbp leaq (%rsi,%r9,1),%rsi movq %r9,%rcx movq -32(%rsi,%rbp,1),%r14 leaq 48+8(%rsp,%r9,2),%rdi movq -24(%rsi,%rbp,1),%rax leaq -32(%rdi,%rbp,1),%rdi movq -16(%rsi,%rbp,1),%rbx movq %rax,%r15 mulq %r14 movq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 movq %r10,-24(%rdi,%rbp,1) mulq %r14 addq %rax,%r11 movq %rbx,%rax adcq $0,%rdx movq %r11,-16(%rdi,%rbp,1) movq %rdx,%r10 movq -8(%rsi,%rbp,1),%rbx mulq %r15 movq %rax,%r12 movq %rbx,%rax movq %rdx,%r13 leaq (%rbp),%rcx mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 movq %r10,-8(%rdi,%rcx,1) jmp L$sqr4x_1st .p2align 5 L$sqr4x_1st: movq (%rsi,%rcx,1),%rbx mulq %r15 addq %rax,%r13 movq %rbx,%rax movq %rdx,%r12 adcq $0,%r12 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq 8(%rsi,%rcx,1),%rbx movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %rbx,%rax movq %r11,(%rdi,%rcx,1) movq %rdx,%r13 adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax movq 16(%rsi,%rcx,1),%rbx movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 mulq %r15 addq %rax,%r13 movq %rbx,%rax movq %r10,8(%rdi,%rcx,1) movq %rdx,%r12 adcq $0,%r12 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq 24(%rsi,%rcx,1),%rbx movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %rbx,%rax movq %r11,16(%rdi,%rcx,1) movq %rdx,%r13 adcq $0,%r13 leaq 32(%rcx),%rcx mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 movq %r10,-8(%rdi,%rcx,1) cmpq $0,%rcx jne L$sqr4x_1st mulq %r15 addq %rax,%r13 leaq 16(%rbp),%rbp adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,(%rdi) movq %rdx,%r12 movq %rdx,8(%rdi) jmp L$sqr4x_outer .p2align 5 L$sqr4x_outer: movq -32(%rsi,%rbp,1),%r14 leaq 48+8(%rsp,%r9,2),%rdi movq -24(%rsi,%rbp,1),%rax leaq -32(%rdi,%rbp,1),%rdi movq -16(%rsi,%rbp,1),%rbx movq %rax,%r15 mulq %r14 movq -24(%rdi,%rbp,1),%r10 addq %rax,%r10 movq %rbx,%rax adcq $0,%rdx movq %r10,-24(%rdi,%rbp,1) movq %rdx,%r11 mulq %r14 addq %rax,%r11 movq %rbx,%rax adcq $0,%rdx addq -16(%rdi,%rbp,1),%r11 movq %rdx,%r10 adcq $0,%r10 movq %r11,-16(%rdi,%rbp,1) xorq %r12,%r12 movq -8(%rsi,%rbp,1),%rbx mulq %r15 addq %rax,%r12 movq %rbx,%rax adcq $0,%rdx addq -8(%rdi,%rbp,1),%r12 movq %rdx,%r13 adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax adcq $0,%rdx addq %r12,%r10 movq %rdx,%r11 adcq $0,%r11 movq %r10,-8(%rdi,%rbp,1) leaq (%rbp),%rcx jmp L$sqr4x_inner .p2align 5 L$sqr4x_inner: movq (%rsi,%rcx,1),%rbx mulq %r15 addq %rax,%r13 movq %rbx,%rax movq %rdx,%r12 adcq $0,%r12 addq (%rdi,%rcx,1),%r13 adcq $0,%r12 .byte 0x67 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq 8(%rsi,%rcx,1),%rbx movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %r11,(%rdi,%rcx,1) movq %rbx,%rax movq %rdx,%r13 adcq $0,%r13 addq 8(%rdi,%rcx,1),%r12 leaq 16(%rcx),%rcx adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax adcq $0,%rdx addq %r12,%r10 movq %rdx,%r11 adcq $0,%r11 movq %r10,-8(%rdi,%rcx,1) cmpq $0,%rcx jne L$sqr4x_inner .byte 0x67 mulq %r15 addq %rax,%r13 adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,(%rdi) movq %rdx,%r12 movq %rdx,8(%rdi) addq $16,%rbp jnz L$sqr4x_outer movq -32(%rsi),%r14 leaq 48+8(%rsp,%r9,2),%rdi movq -24(%rsi),%rax leaq -32(%rdi,%rbp,1),%rdi movq -16(%rsi),%rbx movq %rax,%r15 mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 mulq %r14 addq %rax,%r11 movq %rbx,%rax movq %r10,-24(%rdi) movq %rdx,%r10 adcq $0,%r10 addq %r13,%r11 movq -8(%rsi),%rbx adcq $0,%r10 mulq %r15 addq %rax,%r12 movq %rbx,%rax movq %r11,-16(%rdi) movq %rdx,%r13 adcq $0,%r13 mulq %r14 addq %rax,%r10 movq %rbx,%rax movq %rdx,%r11 adcq $0,%r11 addq %r12,%r10 adcq $0,%r11 movq %r10,-8(%rdi) mulq %r15 addq %rax,%r13 movq -16(%rsi),%rax adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,(%rdi) movq %rdx,%r12 movq %rdx,8(%rdi) mulq %rbx addq $16,%rbp xorq %r14,%r14 subq %r9,%rbp xorq %r15,%r15 addq %r12,%rax adcq $0,%rdx movq %rax,8(%rdi) movq %rdx,16(%rdi) movq %r15,24(%rdi) movq -16(%rsi,%rbp,1),%rax leaq 48+8(%rsp),%rdi xorq %r10,%r10 movq 8(%rdi),%r11 leaq (%r14,%r10,2),%r12 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq 16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 24(%rdi),%r11 adcq %rax,%r12 movq -8(%rsi,%rbp,1),%rax movq %r12,(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,8(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 movq 32(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 40(%rdi),%r11 adcq %rax,%rbx movq 0(%rsi,%rbp,1),%rax movq %rbx,16(%rdi) adcq %rdx,%r8 leaq 16(%rbp),%rbp movq %r8,24(%rdi) sbbq %r15,%r15 leaq 64(%rdi),%rdi jmp L$sqr4x_shift_n_add .p2align 5 L$sqr4x_shift_n_add: leaq (%r14,%r10,2),%r12 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq -16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq -8(%rdi),%r11 adcq %rax,%r12 movq -8(%rsi,%rbp,1),%rax movq %r12,-32(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,-24(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 movq 0(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 8(%rdi),%r11 adcq %rax,%rbx movq 0(%rsi,%rbp,1),%rax movq %rbx,-16(%rdi) adcq %rdx,%r8 leaq (%r14,%r10,2),%r12 movq %r8,-8(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq 16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 24(%rdi),%r11 adcq %rax,%r12 movq 8(%rsi,%rbp,1),%rax movq %r12,0(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,8(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 movq 32(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq 40(%rdi),%r11 adcq %rax,%rbx movq 16(%rsi,%rbp,1),%rax movq %rbx,16(%rdi) adcq %rdx,%r8 movq %r8,24(%rdi) sbbq %r15,%r15 leaq 64(%rdi),%rdi addq $32,%rbp jnz L$sqr4x_shift_n_add leaq (%r14,%r10,2),%r12 .byte 0x67 shrq $63,%r10 leaq (%rcx,%r11,2),%r13 shrq $63,%r11 orq %r10,%r13 movq -16(%rdi),%r10 movq %r11,%r14 mulq %rax negq %r15 movq -8(%rdi),%r11 adcq %rax,%r12 movq -8(%rsi),%rax movq %r12,-32(%rdi) adcq %rdx,%r13 leaq (%r14,%r10,2),%rbx movq %r13,-24(%rdi) sbbq %r15,%r15 shrq $63,%r10 leaq (%rcx,%r11,2),%r8 shrq $63,%r11 orq %r10,%r8 mulq %rax negq %r15 adcq %rax,%rbx adcq %rdx,%r8 movq %rbx,-16(%rdi) movq %r8,-8(%rdi) .byte 102,72,15,126,213 __bn_sqr8x_reduction: xorq %rax,%rax leaq (%r9,%rbp,1),%rcx leaq 48+8(%rsp,%r9,2),%rdx movq %rcx,0+8(%rsp) leaq 48+8(%rsp,%r9,1),%rdi movq %rdx,8+8(%rsp) negq %r9 jmp L$8x_reduction_loop .p2align 5 L$8x_reduction_loop: leaq (%rdi,%r9,1),%rdi .byte 0x66 movq 0(%rdi),%rbx movq 8(%rdi),%r9 movq 16(%rdi),%r10 movq 24(%rdi),%r11 movq 32(%rdi),%r12 movq 40(%rdi),%r13 movq 48(%rdi),%r14 movq 56(%rdi),%r15 movq %rax,(%rdx) leaq 64(%rdi),%rdi .byte 0x67 movq %rbx,%r8 imulq 32+8(%rsp),%rbx movq 0(%rbp),%rax movl $8,%ecx jmp L$8x_reduce .p2align 5 L$8x_reduce: mulq %rbx movq 8(%rbp),%rax negq %r8 movq %rdx,%r8 adcq $0,%r8 mulq %rbx addq %rax,%r9 movq 16(%rbp),%rax adcq $0,%rdx addq %r9,%r8 movq %rbx,48-8+8(%rsp,%rcx,8) movq %rdx,%r9 adcq $0,%r9 mulq %rbx addq %rax,%r10 movq 24(%rbp),%rax adcq $0,%rdx addq %r10,%r9 movq 32+8(%rsp),%rsi movq %rdx,%r10 adcq $0,%r10 mulq %rbx addq %rax,%r11 movq 32(%rbp),%rax adcq $0,%rdx imulq %r8,%rsi addq %r11,%r10 movq %rdx,%r11 adcq $0,%r11 mulq %rbx addq %rax,%r12 movq 40(%rbp),%rax adcq $0,%rdx addq %r12,%r11 movq %rdx,%r12 adcq $0,%r12 mulq %rbx addq %rax,%r13 movq 48(%rbp),%rax adcq $0,%rdx addq %r13,%r12 movq %rdx,%r13 adcq $0,%r13 mulq %rbx addq %rax,%r14 movq 56(%rbp),%rax adcq $0,%rdx addq %r14,%r13 movq %rdx,%r14 adcq $0,%r14 mulq %rbx movq %rsi,%rbx addq %rax,%r15 movq 0(%rbp),%rax adcq $0,%rdx addq %r15,%r14 movq %rdx,%r15 adcq $0,%r15 decl %ecx jnz L$8x_reduce leaq 64(%rbp),%rbp xorq %rax,%rax movq 8+8(%rsp),%rdx cmpq 0+8(%rsp),%rbp jae L$8x_no_tail .byte 0x66 addq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 sbbq %rsi,%rsi movq 48+56+8(%rsp),%rbx movl $8,%ecx movq 0(%rbp),%rax jmp L$8x_tail .p2align 5 L$8x_tail: mulq %rbx addq %rax,%r8 movq 8(%rbp),%rax movq %r8,(%rdi) movq %rdx,%r8 adcq $0,%r8 mulq %rbx addq %rax,%r9 movq 16(%rbp),%rax adcq $0,%rdx addq %r9,%r8 leaq 8(%rdi),%rdi movq %rdx,%r9 adcq $0,%r9 mulq %rbx addq %rax,%r10 movq 24(%rbp),%rax adcq $0,%rdx addq %r10,%r9 movq %rdx,%r10 adcq $0,%r10 mulq %rbx addq %rax,%r11 movq 32(%rbp),%rax adcq $0,%rdx addq %r11,%r10 movq %rdx,%r11 adcq $0,%r11 mulq %rbx addq %rax,%r12 movq 40(%rbp),%rax adcq $0,%rdx addq %r12,%r11 movq %rdx,%r12 adcq $0,%r12 mulq %rbx addq %rax,%r13 movq 48(%rbp),%rax adcq $0,%rdx addq %r13,%r12 movq %rdx,%r13 adcq $0,%r13 mulq %rbx addq %rax,%r14 movq 56(%rbp),%rax adcq $0,%rdx addq %r14,%r13 movq %rdx,%r14 adcq $0,%r14 mulq %rbx movq 48-16+8(%rsp,%rcx,8),%rbx addq %rax,%r15 adcq $0,%rdx addq %r15,%r14 movq 0(%rbp),%rax movq %rdx,%r15 adcq $0,%r15 decl %ecx jnz L$8x_tail leaq 64(%rbp),%rbp movq 8+8(%rsp),%rdx cmpq 0+8(%rsp),%rbp jae L$8x_tail_done movq 48+56+8(%rsp),%rbx negq %rsi movq 0(%rbp),%rax adcq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 sbbq %rsi,%rsi movl $8,%ecx jmp L$8x_tail .p2align 5 L$8x_tail_done: xorq %rax,%rax addq (%rdx),%r8 adcq $0,%r9 adcq $0,%r10 adcq $0,%r11 adcq $0,%r12 adcq $0,%r13 adcq $0,%r14 adcq $0,%r15 adcq $0,%rax negq %rsi L$8x_no_tail: adcq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 adcq $0,%rax movq -8(%rbp),%rcx xorq %rsi,%rsi .byte 102,72,15,126,213 movq %r8,0(%rdi) movq %r9,8(%rdi) .byte 102,73,15,126,217 movq %r10,16(%rdi) movq %r11,24(%rdi) movq %r12,32(%rdi) movq %r13,40(%rdi) movq %r14,48(%rdi) movq %r15,56(%rdi) leaq 64(%rdi),%rdi cmpq %rdx,%rdi jb L$8x_reduction_loop ret .p2align 5 __bn_post4x_internal: movq 0(%rbp),%r12 leaq (%rdi,%r9,1),%rbx movq %r9,%rcx .byte 102,72,15,126,207 negq %rax .byte 102,72,15,126,206 sarq $3+2,%rcx decq %r12 xorq %r10,%r10 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp L$sqr4x_sub_entry .p2align 4 L$sqr4x_sub: movq 0(%rbp),%r12 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 L$sqr4x_sub_entry: leaq 32(%rbp),%rbp notq %r12 notq %r13 notq %r14 notq %r15 andq %rax,%r12 andq %rax,%r13 andq %rax,%r14 andq %rax,%r15 negq %r10 adcq 0(%rbx),%r12 adcq 8(%rbx),%r13 adcq 16(%rbx),%r14 adcq 24(%rbx),%r15 movq %r12,0(%rdi) leaq 32(%rbx),%rbx movq %r13,8(%rdi) sbbq %r10,%r10 movq %r14,16(%rdi) movq %r15,24(%rdi) leaq 32(%rdi),%rdi incq %rcx jnz L$sqr4x_sub movq %r9,%r10 negq %r9 ret .globl _bn_mulx4x_mont_gather5 .private_extern _bn_mulx4x_mont_gather5 .p2align 5 _bn_mulx4x_mont_gather5: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$mulx4x_prologue: shll $3,%r9d leaq (%r9,%r9,2),%r10 negq %r9 movq (%r8),%r8 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb L$mulx4xsp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp L$mulx4xsp_done L$mulx4xsp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp L$mulx4xsp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$mulx4x_page_walk jmp L$mulx4x_page_walk_done L$mulx4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$mulx4x_page_walk L$mulx4x_page_walk_done: movq %r8,32(%rsp) movq %rax,40(%rsp) L$mulx4x_body: call mulx4x_internal movq 40(%rsp),%rsi movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$mulx4x_epilogue: ret .p2align 5 mulx4x_internal: movq %r9,8(%rsp) movq %r9,%r10 negq %r9 shlq $5,%r9 negq %r10 leaq 128(%rdx,%r9,1),%r13 shrq $5+5,%r9 movd 8(%rax),%xmm5 subq $1,%r9 leaq L$inc(%rip),%rax movq %r13,16+8(%rsp) movq %r9,24+8(%rsp) movq %rdi,56+8(%rsp) movdqa 0(%rax),%xmm0 movdqa 16(%rax),%xmm1 leaq 88-112(%rsp,%r10,1),%r10 leaq 128(%rdx),%rdi pshufd $0,%xmm5,%xmm5 movdqa %xmm1,%xmm4 .byte 0x67 movdqa %xmm1,%xmm2 .byte 0x67 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,112(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,128(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,144(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,160(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,176(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,192(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,208(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,224(%r10) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,240(%r10) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,256(%r10) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,272(%r10) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,288(%r10) movdqa %xmm4,%xmm3 .byte 0x67 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,304(%r10) paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,320(%r10) pcmpeqd %xmm5,%xmm3 movdqa %xmm2,336(%r10) pand 64(%rdi),%xmm0 pand 80(%rdi),%xmm1 pand 96(%rdi),%xmm2 movdqa %xmm3,352(%r10) pand 112(%rdi),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -128(%rdi),%xmm4 movdqa -112(%rdi),%xmm5 movdqa -96(%rdi),%xmm2 pand 112(%r10),%xmm4 movdqa -80(%rdi),%xmm3 pand 128(%r10),%xmm5 por %xmm4,%xmm0 pand 144(%r10),%xmm2 por %xmm5,%xmm1 pand 160(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa -64(%rdi),%xmm4 movdqa -48(%rdi),%xmm5 movdqa -32(%rdi),%xmm2 pand 176(%r10),%xmm4 movdqa -16(%rdi),%xmm3 pand 192(%r10),%xmm5 por %xmm4,%xmm0 pand 208(%r10),%xmm2 por %xmm5,%xmm1 pand 224(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 movdqa 0(%rdi),%xmm4 movdqa 16(%rdi),%xmm5 movdqa 32(%rdi),%xmm2 pand 240(%r10),%xmm4 movdqa 48(%rdi),%xmm3 pand 256(%r10),%xmm5 por %xmm4,%xmm0 pand 272(%r10),%xmm2 por %xmm5,%xmm1 pand 288(%r10),%xmm3 por %xmm2,%xmm0 por %xmm3,%xmm1 pxor %xmm1,%xmm0 pshufd $0x4e,%xmm0,%xmm1 por %xmm1,%xmm0 leaq 256(%rdi),%rdi .byte 102,72,15,126,194 leaq 64+32+8(%rsp),%rbx movq %rdx,%r9 mulxq 0(%rsi),%r8,%rax mulxq 8(%rsi),%r11,%r12 addq %rax,%r11 mulxq 16(%rsi),%rax,%r13 adcq %rax,%r12 adcq $0,%r13 mulxq 24(%rsi),%rax,%r14 movq %r8,%r15 imulq 32+8(%rsp),%r8 xorq %rbp,%rbp movq %r8,%rdx movq %rdi,8+8(%rsp) leaq 32(%rsi),%rsi adcxq %rax,%r13 adcxq %rbp,%r14 mulxq 0(%rcx),%rax,%r10 adcxq %rax,%r15 adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 mulxq 16(%rcx),%rax,%r12 movq 24+8(%rsp),%rdi movq %r10,-32(%rbx) adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-24(%rbx) adcxq %rax,%r12 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r12,-16(%rbx) jmp L$mulx4x_1st .p2align 5 L$mulx4x_1st: adcxq %rbp,%r15 mulxq 0(%rsi),%r10,%rax adcxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 .byte 0x67,0x67 movq %r8,%rdx adcxq %rax,%r13 adcxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 movq %r11,-32(%rbx) adoxq %r15,%r13 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r13,-16(%rbx) decq %rdi jnz L$mulx4x_1st movq 8(%rsp),%rax adcq %rbp,%r15 leaq (%rsi,%rax,1),%rsi addq %r15,%r14 movq 8+8(%rsp),%rdi adcq %rbp,%rbp movq %r14,-8(%rbx) jmp L$mulx4x_outer .p2align 5 L$mulx4x_outer: leaq 16-256(%rbx),%r10 pxor %xmm4,%xmm4 .byte 0x67,0x67 pxor %xmm5,%xmm5 movdqa -128(%rdi),%xmm0 movdqa -112(%rdi),%xmm1 movdqa -96(%rdi),%xmm2 pand 256(%r10),%xmm0 movdqa -80(%rdi),%xmm3 pand 272(%r10),%xmm1 por %xmm0,%xmm4 pand 288(%r10),%xmm2 por %xmm1,%xmm5 pand 304(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa -64(%rdi),%xmm0 movdqa -48(%rdi),%xmm1 movdqa -32(%rdi),%xmm2 pand 320(%r10),%xmm0 movdqa -16(%rdi),%xmm3 pand 336(%r10),%xmm1 por %xmm0,%xmm4 pand 352(%r10),%xmm2 por %xmm1,%xmm5 pand 368(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 0(%rdi),%xmm0 movdqa 16(%rdi),%xmm1 movdqa 32(%rdi),%xmm2 pand 384(%r10),%xmm0 movdqa 48(%rdi),%xmm3 pand 400(%r10),%xmm1 por %xmm0,%xmm4 pand 416(%r10),%xmm2 por %xmm1,%xmm5 pand 432(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 64(%rdi),%xmm0 movdqa 80(%rdi),%xmm1 movdqa 96(%rdi),%xmm2 pand 448(%r10),%xmm0 movdqa 112(%rdi),%xmm3 pand 464(%r10),%xmm1 por %xmm0,%xmm4 pand 480(%r10),%xmm2 por %xmm1,%xmm5 pand 496(%r10),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 por %xmm5,%xmm4 pshufd $0x4e,%xmm4,%xmm0 por %xmm4,%xmm0 leaq 256(%rdi),%rdi .byte 102,72,15,126,194 movq %rbp,(%rbx) leaq 32(%rbx,%rax,1),%rbx mulxq 0(%rsi),%r8,%r11 xorq %rbp,%rbp movq %rdx,%r9 mulxq 8(%rsi),%r14,%r12 adoxq -32(%rbx),%r8 adcxq %r14,%r11 mulxq 16(%rsi),%r15,%r13 adoxq -24(%rbx),%r11 adcxq %r15,%r12 mulxq 24(%rsi),%rdx,%r14 adoxq -16(%rbx),%r12 adcxq %rdx,%r13 leaq (%rcx,%rax,1),%rcx leaq 32(%rsi),%rsi adoxq -8(%rbx),%r13 adcxq %rbp,%r14 adoxq %rbp,%r14 movq %r8,%r15 imulq 32+8(%rsp),%r8 movq %r8,%rdx xorq %rbp,%rbp movq %rdi,8+8(%rsp) mulxq 0(%rcx),%rax,%r10 adcxq %rax,%r15 adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 mulxq 16(%rcx),%rax,%r12 adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq 24+8(%rsp),%rdi movq %r10,-32(%rbx) adcxq %rax,%r12 movq %r11,-24(%rbx) adoxq %rbp,%r15 movq %r12,-16(%rbx) leaq 32(%rcx),%rcx jmp L$mulx4x_inner .p2align 5 L$mulx4x_inner: mulxq 0(%rsi),%r10,%rax adcxq %rbp,%r15 adoxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq 0(%rbx),%r10 adoxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq 8(%rbx),%r11 adoxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 movq %r8,%rdx adcxq 16(%rbx),%r12 adoxq %rax,%r13 adcxq 24(%rbx),%r13 adoxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adcxq %rbp,%r14 adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 adoxq %r15,%r13 movq %r11,-32(%rbx) mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx leaq 32(%rcx),%rcx movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 movq %r13,-16(%rbx) decq %rdi jnz L$mulx4x_inner movq 0+8(%rsp),%rax adcq %rbp,%r15 subq 0(%rbx),%rdi movq 8+8(%rsp),%rdi movq 16+8(%rsp),%r10 adcq %r15,%r14 leaq (%rsi,%rax,1),%rsi adcq %rbp,%rbp movq %r14,-8(%rbx) cmpq %r10,%rdi jb L$mulx4x_outer movq -8(%rcx),%r10 movq %rbp,%r8 movq (%rcx,%rax,1),%r12 leaq (%rcx,%rax,1),%rbp movq %rax,%rcx leaq (%rbx,%rax,1),%rdi xorl %eax,%eax xorq %r15,%r15 subq %r14,%r10 adcq %r15,%r15 orq %r15,%r8 sarq $3+2,%rcx subq %r8,%rax movq 56+8(%rsp),%rdx decq %r12 movq 8(%rbp),%r13 xorq %r8,%r8 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp L$sqrx4x_sub_entry .globl _bn_powerx5 .private_extern _bn_powerx5 .p2align 5 _bn_powerx5: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$powerx5_prologue: shll $3,%r9d leaq (%r9,%r9,2),%r10 negq %r9 movq (%r8),%r8 leaq -320(%rsp,%r9,2),%r11 movq %rsp,%rbp subq %rdi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb L$pwrx_sp_alt subq %r11,%rbp leaq -320(%rbp,%r9,2),%rbp jmp L$pwrx_sp_done .p2align 5 L$pwrx_sp_alt: leaq 4096-320(,%r9,2),%r10 leaq -320(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp L$pwrx_sp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$pwrx_page_walk jmp L$pwrx_page_walk_done L$pwrx_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$pwrx_page_walk L$pwrx_page_walk_done: movq %r9,%r10 negq %r9 pxor %xmm0,%xmm0 .byte 102,72,15,110,207 .byte 102,72,15,110,209 .byte 102,73,15,110,218 .byte 102,72,15,110,226 movq %r8,32(%rsp) movq %rax,40(%rsp) L$powerx5_body: call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal call __bn_sqrx8x_internal call __bn_postx4x_internal movq %r10,%r9 movq %rsi,%rdi .byte 102,72,15,126,209 .byte 102,72,15,126,226 movq 40(%rsp),%rax call mulx4x_internal movq 40(%rsp),%rsi movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$powerx5_epilogue: ret .globl _bn_sqrx8x_internal .private_extern _bn_sqrx8x_internal .private_extern _bn_sqrx8x_internal .p2align 5 _bn_sqrx8x_internal: __bn_sqrx8x_internal: _CET_ENDBR leaq 48+8(%rsp),%rdi leaq (%rsi,%r9,1),%rbp movq %r9,0+8(%rsp) movq %rbp,8+8(%rsp) jmp L$sqr8x_zero_start .p2align 5 .byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00 L$sqrx8x_zero: .byte 0x3e movdqa %xmm0,0(%rdi) movdqa %xmm0,16(%rdi) movdqa %xmm0,32(%rdi) movdqa %xmm0,48(%rdi) L$sqr8x_zero_start: movdqa %xmm0,64(%rdi) movdqa %xmm0,80(%rdi) movdqa %xmm0,96(%rdi) movdqa %xmm0,112(%rdi) leaq 128(%rdi),%rdi subq $64,%r9 jnz L$sqrx8x_zero movq 0(%rsi),%rdx xorq %r10,%r10 xorq %r11,%r11 xorq %r12,%r12 xorq %r13,%r13 xorq %r14,%r14 xorq %r15,%r15 leaq 48+8(%rsp),%rdi xorq %rbp,%rbp jmp L$sqrx8x_outer_loop .p2align 5 L$sqrx8x_outer_loop: mulxq 8(%rsi),%r8,%rax adcxq %r9,%r8 adoxq %rax,%r10 mulxq 16(%rsi),%r9,%rax adcxq %r10,%r9 adoxq %rax,%r11 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 adcxq %r11,%r10 adoxq %rax,%r12 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 adcxq %r12,%r11 adoxq %rax,%r13 mulxq 40(%rsi),%r12,%rax adcxq %r13,%r12 adoxq %rax,%r14 mulxq 48(%rsi),%r13,%rax adcxq %r14,%r13 adoxq %r15,%rax mulxq 56(%rsi),%r14,%r15 movq 8(%rsi),%rdx adcxq %rax,%r14 adoxq %rbp,%r15 adcq 64(%rdi),%r15 movq %r8,8(%rdi) movq %r9,16(%rdi) sbbq %rcx,%rcx xorq %rbp,%rbp mulxq 16(%rsi),%r8,%rbx mulxq 24(%rsi),%r9,%rax adcxq %r10,%r8 adoxq %rbx,%r9 mulxq 32(%rsi),%r10,%rbx adcxq %r11,%r9 adoxq %rax,%r10 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 adcxq %r12,%r10 adoxq %rbx,%r11 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 adcxq %r13,%r11 adoxq %r14,%r12 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 movq 16(%rsi),%rdx adcxq %rax,%r12 adoxq %rbx,%r13 adcxq %r15,%r13 adoxq %rbp,%r14 adcxq %rbp,%r14 movq %r8,24(%rdi) movq %r9,32(%rdi) mulxq 24(%rsi),%r8,%rbx mulxq 32(%rsi),%r9,%rax adcxq %r10,%r8 adoxq %rbx,%r9 mulxq 40(%rsi),%r10,%rbx adcxq %r11,%r9 adoxq %rax,%r10 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 adcxq %r12,%r10 adoxq %r13,%r11 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 .byte 0x3e movq 24(%rsi),%rdx adcxq %rbx,%r11 adoxq %rax,%r12 adcxq %r14,%r12 movq %r8,40(%rdi) movq %r9,48(%rdi) mulxq 32(%rsi),%r8,%rax adoxq %rbp,%r13 adcxq %rbp,%r13 mulxq 40(%rsi),%r9,%rbx adcxq %r10,%r8 adoxq %rax,%r9 mulxq 48(%rsi),%r10,%rax adcxq %r11,%r9 adoxq %r12,%r10 mulxq 56(%rsi),%r11,%r12 movq 32(%rsi),%rdx movq 40(%rsi),%r14 adcxq %rbx,%r10 adoxq %rax,%r11 movq 48(%rsi),%r15 adcxq %r13,%r11 adoxq %rbp,%r12 adcxq %rbp,%r12 movq %r8,56(%rdi) movq %r9,64(%rdi) mulxq %r14,%r9,%rax movq 56(%rsi),%r8 adcxq %r10,%r9 mulxq %r15,%r10,%rbx adoxq %rax,%r10 adcxq %r11,%r10 mulxq %r8,%r11,%rax movq %r14,%rdx adoxq %rbx,%r11 adcxq %r12,%r11 adcxq %rbp,%rax mulxq %r15,%r14,%rbx mulxq %r8,%r12,%r13 movq %r15,%rdx leaq 64(%rsi),%rsi adcxq %r14,%r11 adoxq %rbx,%r12 adcxq %rax,%r12 adoxq %rbp,%r13 .byte 0x67,0x67 mulxq %r8,%r8,%r14 adcxq %r8,%r13 adcxq %rbp,%r14 cmpq 8+8(%rsp),%rsi je L$sqrx8x_outer_break negq %rcx movq $-8,%rcx movq %rbp,%r15 movq 64(%rdi),%r8 adcxq 72(%rdi),%r9 adcxq 80(%rdi),%r10 adcxq 88(%rdi),%r11 adcq 96(%rdi),%r12 adcq 104(%rdi),%r13 adcq 112(%rdi),%r14 adcq 120(%rdi),%r15 leaq (%rsi),%rbp leaq 128(%rdi),%rdi sbbq %rax,%rax movq -64(%rsi),%rdx movq %rax,16+8(%rsp) movq %rdi,24+8(%rsp) xorl %eax,%eax jmp L$sqrx8x_loop .p2align 5 L$sqrx8x_loop: movq %r8,%rbx mulxq 0(%rbp),%rax,%r8 adcxq %rax,%rbx adoxq %r9,%r8 mulxq 8(%rbp),%rax,%r9 adcxq %rax,%r8 adoxq %r10,%r9 mulxq 16(%rbp),%rax,%r10 adcxq %rax,%r9 adoxq %r11,%r10 mulxq 24(%rbp),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 adcxq %rax,%r11 adoxq %r13,%r12 mulxq 40(%rbp),%rax,%r13 adcxq %rax,%r12 adoxq %r14,%r13 mulxq 48(%rbp),%rax,%r14 movq %rbx,(%rdi,%rcx,8) movl $0,%ebx adcxq %rax,%r13 adoxq %r15,%r14 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 movq 8(%rsi,%rcx,8),%rdx adcxq %rax,%r14 adoxq %rbx,%r15 adcxq %rbx,%r15 .byte 0x67 incq %rcx jnz L$sqrx8x_loop leaq 64(%rbp),%rbp movq $-8,%rcx cmpq 8+8(%rsp),%rbp je L$sqrx8x_break subq 16+8(%rsp),%rbx .byte 0x66 movq -64(%rsi),%rdx adcxq 0(%rdi),%r8 adcxq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 leaq 64(%rdi),%rdi .byte 0x67 sbbq %rax,%rax xorl %ebx,%ebx movq %rax,16+8(%rsp) jmp L$sqrx8x_loop .p2align 5 L$sqrx8x_break: xorq %rbp,%rbp subq 16+8(%rsp),%rbx adcxq %rbp,%r8 movq 24+8(%rsp),%rcx adcxq %rbp,%r9 movq 0(%rsi),%rdx adcq $0,%r10 movq %r8,0(%rdi) adcq $0,%r11 adcq $0,%r12 adcq $0,%r13 adcq $0,%r14 adcq $0,%r15 cmpq %rcx,%rdi je L$sqrx8x_outer_loop movq %r9,8(%rdi) movq 8(%rcx),%r9 movq %r10,16(%rdi) movq 16(%rcx),%r10 movq %r11,24(%rdi) movq 24(%rcx),%r11 movq %r12,32(%rdi) movq 32(%rcx),%r12 movq %r13,40(%rdi) movq 40(%rcx),%r13 movq %r14,48(%rdi) movq 48(%rcx),%r14 movq %r15,56(%rdi) movq 56(%rcx),%r15 movq %rcx,%rdi jmp L$sqrx8x_outer_loop .p2align 5 L$sqrx8x_outer_break: movq %r9,72(%rdi) .byte 102,72,15,126,217 movq %r10,80(%rdi) movq %r11,88(%rdi) movq %r12,96(%rdi) movq %r13,104(%rdi) movq %r14,112(%rdi) leaq 48+8(%rsp),%rdi movq (%rsi,%rcx,1),%rdx movq 8(%rdi),%r11 xorq %r10,%r10 movq 0+8(%rsp),%r9 adoxq %r11,%r11 movq 16(%rdi),%r12 movq 24(%rdi),%r13 .p2align 5 L$sqrx4x_shift_n_add: mulxq %rdx,%rax,%rbx adoxq %r12,%r12 adcxq %r10,%rax .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 adoxq %r13,%r13 adcxq %r11,%rbx movq 40(%rdi),%r11 movq %rax,0(%rdi) movq %rbx,8(%rdi) mulxq %rdx,%rax,%rbx adoxq %r10,%r10 adcxq %r12,%rax movq 16(%rsi,%rcx,1),%rdx movq 48(%rdi),%r12 adoxq %r11,%r11 adcxq %r13,%rbx movq 56(%rdi),%r13 movq %rax,16(%rdi) movq %rbx,24(%rdi) mulxq %rdx,%rax,%rbx adoxq %r12,%r12 adcxq %r10,%rax movq 24(%rsi,%rcx,1),%rdx leaq 32(%rcx),%rcx movq 64(%rdi),%r10 adoxq %r13,%r13 adcxq %r11,%rbx movq 72(%rdi),%r11 movq %rax,32(%rdi) movq %rbx,40(%rdi) mulxq %rdx,%rax,%rbx adoxq %r10,%r10 adcxq %r12,%rax jrcxz L$sqrx4x_shift_n_add_break .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 adoxq %r11,%r11 adcxq %r13,%rbx movq 80(%rdi),%r12 movq 88(%rdi),%r13 movq %rax,48(%rdi) movq %rbx,56(%rdi) leaq 64(%rdi),%rdi nop jmp L$sqrx4x_shift_n_add .p2align 5 L$sqrx4x_shift_n_add_break: adcxq %r13,%rbx movq %rax,48(%rdi) movq %rbx,56(%rdi) leaq 64(%rdi),%rdi .byte 102,72,15,126,213 __bn_sqrx8x_reduction: xorl %eax,%eax movq 32+8(%rsp),%rbx movq 48+8(%rsp),%rdx leaq -64(%rbp,%r9,1),%rcx movq %rcx,0+8(%rsp) movq %rdi,8+8(%rsp) leaq 48+8(%rsp),%rdi jmp L$sqrx8x_reduction_loop .p2align 5 L$sqrx8x_reduction_loop: movq 8(%rdi),%r9 movq 16(%rdi),%r10 movq 24(%rdi),%r11 movq 32(%rdi),%r12 movq %rdx,%r8 imulq %rbx,%rdx movq 40(%rdi),%r13 movq 48(%rdi),%r14 movq 56(%rdi),%r15 movq %rax,24+8(%rsp) leaq 64(%rdi),%rdi xorq %rsi,%rsi movq $-8,%rcx jmp L$sqrx8x_reduce .p2align 5 L$sqrx8x_reduce: movq %r8,%rbx mulxq 0(%rbp),%rax,%r8 adcxq %rbx,%rax adoxq %r9,%r8 mulxq 8(%rbp),%rbx,%r9 adcxq %rbx,%r8 adoxq %r10,%r9 mulxq 16(%rbp),%rbx,%r10 adcxq %rbx,%r9 adoxq %r11,%r10 mulxq 24(%rbp),%rbx,%r11 adcxq %rbx,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 movq %rdx,%rax movq %r8,%rdx adcxq %rbx,%r11 adoxq %r13,%r12 mulxq 32+8(%rsp),%rbx,%rdx movq %rax,%rdx movq %rax,64+48+8(%rsp,%rcx,8) mulxq 40(%rbp),%rax,%r13 adcxq %rax,%r12 adoxq %r14,%r13 mulxq 48(%rbp),%rax,%r14 adcxq %rax,%r13 adoxq %r15,%r14 mulxq 56(%rbp),%rax,%r15 movq %rbx,%rdx adcxq %rax,%r14 adoxq %rsi,%r15 adcxq %rsi,%r15 .byte 0x67,0x67,0x67 incq %rcx jnz L$sqrx8x_reduce movq %rsi,%rax cmpq 0+8(%rsp),%rbp jae L$sqrx8x_no_tail movq 48+8(%rsp),%rdx addq 0(%rdi),%r8 leaq 64(%rbp),%rbp movq $-8,%rcx adcxq 8(%rdi),%r9 adcxq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 leaq 64(%rdi),%rdi sbbq %rax,%rax xorq %rsi,%rsi movq %rax,16+8(%rsp) jmp L$sqrx8x_tail .p2align 5 L$sqrx8x_tail: movq %r8,%rbx mulxq 0(%rbp),%rax,%r8 adcxq %rax,%rbx adoxq %r9,%r8 mulxq 8(%rbp),%rax,%r9 adcxq %rax,%r8 adoxq %r10,%r9 mulxq 16(%rbp),%rax,%r10 adcxq %rax,%r9 adoxq %r11,%r10 mulxq 24(%rbp),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 adcxq %rax,%r11 adoxq %r13,%r12 mulxq 40(%rbp),%rax,%r13 adcxq %rax,%r12 adoxq %r14,%r13 mulxq 48(%rbp),%rax,%r14 adcxq %rax,%r13 adoxq %r15,%r14 mulxq 56(%rbp),%rax,%r15 movq 72+48+8(%rsp,%rcx,8),%rdx adcxq %rax,%r14 adoxq %rsi,%r15 movq %rbx,(%rdi,%rcx,8) movq %r8,%rbx adcxq %rsi,%r15 incq %rcx jnz L$sqrx8x_tail cmpq 0+8(%rsp),%rbp jae L$sqrx8x_tail_done subq 16+8(%rsp),%rsi movq 48+8(%rsp),%rdx leaq 64(%rbp),%rbp adcq 0(%rdi),%r8 adcq 8(%rdi),%r9 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 leaq 64(%rdi),%rdi sbbq %rax,%rax subq $8,%rcx xorq %rsi,%rsi movq %rax,16+8(%rsp) jmp L$sqrx8x_tail .p2align 5 L$sqrx8x_tail_done: xorq %rax,%rax addq 24+8(%rsp),%r8 adcq $0,%r9 adcq $0,%r10 adcq $0,%r11 adcq $0,%r12 adcq $0,%r13 adcq $0,%r14 adcq $0,%r15 adcq $0,%rax subq 16+8(%rsp),%rsi L$sqrx8x_no_tail: adcq 0(%rdi),%r8 .byte 102,72,15,126,217 adcq 8(%rdi),%r9 movq 56(%rbp),%rsi .byte 102,72,15,126,213 adcq 16(%rdi),%r10 adcq 24(%rdi),%r11 adcq 32(%rdi),%r12 adcq 40(%rdi),%r13 adcq 48(%rdi),%r14 adcq 56(%rdi),%r15 adcq $0,%rax movq 32+8(%rsp),%rbx movq 64(%rdi,%rcx,1),%rdx movq %r8,0(%rdi) leaq 64(%rdi),%r8 movq %r9,8(%rdi) movq %r10,16(%rdi) movq %r11,24(%rdi) movq %r12,32(%rdi) movq %r13,40(%rdi) movq %r14,48(%rdi) movq %r15,56(%rdi) leaq 64(%rdi,%rcx,1),%rdi cmpq 8+8(%rsp),%r8 jb L$sqrx8x_reduction_loop ret .p2align 5 __bn_postx4x_internal: movq 0(%rbp),%r12 movq %rcx,%r10 movq %rcx,%r9 negq %rax sarq $3+2,%rcx .byte 102,72,15,126,202 .byte 102,72,15,126,206 decq %r12 movq 8(%rbp),%r13 xorq %r8,%r8 movq 16(%rbp),%r14 movq 24(%rbp),%r15 jmp L$sqrx4x_sub_entry .p2align 4 L$sqrx4x_sub: movq 0(%rbp),%r12 movq 8(%rbp),%r13 movq 16(%rbp),%r14 movq 24(%rbp),%r15 L$sqrx4x_sub_entry: andnq %rax,%r12,%r12 leaq 32(%rbp),%rbp andnq %rax,%r13,%r13 andnq %rax,%r14,%r14 andnq %rax,%r15,%r15 negq %r8 adcq 0(%rdi),%r12 adcq 8(%rdi),%r13 adcq 16(%rdi),%r14 adcq 24(%rdi),%r15 movq %r12,0(%rdx) leaq 32(%rdi),%rdi movq %r13,8(%rdx) sbbq %r8,%r8 movq %r14,16(%rdx) movq %r15,24(%rdx) leaq 32(%rdx),%rdx incq %rcx jnz L$sqrx4x_sub negq %r9 ret .globl _bn_scatter5 .private_extern _bn_scatter5 .p2align 4 _bn_scatter5: _CET_ENDBR cmpl $0,%esi jz L$scatter_epilogue leaq (%rdx,%rcx,8),%rdx L$scatter: movq (%rdi),%rax leaq 8(%rdi),%rdi movq %rax,(%rdx) leaq 256(%rdx),%rdx subl $1,%esi jnz L$scatter L$scatter_epilogue: ret .globl _bn_gather5 .private_extern _bn_gather5 .p2align 5 _bn_gather5: L$SEH_begin_bn_gather5: _CET_ENDBR .byte 0x4c,0x8d,0x14,0x24 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 leaq L$inc(%rip),%rax andq $-16,%rsp movd %ecx,%xmm5 movdqa 0(%rax),%xmm0 movdqa 16(%rax),%xmm1 leaq 128(%rdx),%r11 leaq 128(%rsp),%rax pshufd $0,%xmm5,%xmm5 movdqa %xmm1,%xmm4 movdqa %xmm1,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,-128(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,-112(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,-96(%rax) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,-80(%rax) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,-64(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,-48(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,-32(%rax) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,-16(%rax) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,0(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,16(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,32(%rax) movdqa %xmm4,%xmm2 paddd %xmm0,%xmm1 pcmpeqd %xmm5,%xmm0 movdqa %xmm3,48(%rax) movdqa %xmm4,%xmm3 paddd %xmm1,%xmm2 pcmpeqd %xmm5,%xmm1 movdqa %xmm0,64(%rax) movdqa %xmm4,%xmm0 paddd %xmm2,%xmm3 pcmpeqd %xmm5,%xmm2 movdqa %xmm1,80(%rax) movdqa %xmm4,%xmm1 paddd %xmm3,%xmm0 pcmpeqd %xmm5,%xmm3 movdqa %xmm2,96(%rax) movdqa %xmm4,%xmm2 movdqa %xmm3,112(%rax) jmp L$gather .p2align 5 L$gather: pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 movdqa -128(%r11),%xmm0 movdqa -112(%r11),%xmm1 movdqa -96(%r11),%xmm2 pand -128(%rax),%xmm0 movdqa -80(%r11),%xmm3 pand -112(%rax),%xmm1 por %xmm0,%xmm4 pand -96(%rax),%xmm2 por %xmm1,%xmm5 pand -80(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa -64(%r11),%xmm0 movdqa -48(%r11),%xmm1 movdqa -32(%r11),%xmm2 pand -64(%rax),%xmm0 movdqa -16(%r11),%xmm3 pand -48(%rax),%xmm1 por %xmm0,%xmm4 pand -32(%rax),%xmm2 por %xmm1,%xmm5 pand -16(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 0(%r11),%xmm0 movdqa 16(%r11),%xmm1 movdqa 32(%r11),%xmm2 pand 0(%rax),%xmm0 movdqa 48(%r11),%xmm3 pand 16(%rax),%xmm1 por %xmm0,%xmm4 pand 32(%rax),%xmm2 por %xmm1,%xmm5 pand 48(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqa 64(%r11),%xmm0 movdqa 80(%r11),%xmm1 movdqa 96(%r11),%xmm2 pand 64(%rax),%xmm0 movdqa 112(%r11),%xmm3 pand 80(%rax),%xmm1 por %xmm0,%xmm4 pand 96(%rax),%xmm2 por %xmm1,%xmm5 pand 112(%rax),%xmm3 por %xmm2,%xmm4 por %xmm3,%xmm5 por %xmm5,%xmm4 leaq 256(%r11),%r11 pshufd $0x4e,%xmm4,%xmm0 por %xmm4,%xmm0 movq %xmm0,(%rdi) leaq 8(%rdi),%rdi subl $1,%esi jnz L$gather leaq (%r10),%rsp ret L$SEH_end_bn_gather5: .section __DATA,__const .p2align 6 L$inc: .long 0,0, 1,1 .long 2,2, 2,2 .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,119,105,116,104,32,115,99,97,116,116,101,114,47,103,97,116,104,101,114,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .text #endif
marvin-hansen/iggy-streaming-system
25,199
thirdparty/crates/ring-0.17.9/pregenerated/vpaes-armv8-ios64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include <ring-core/arm_arch.h> .section __TEXT,__const .align 7 // totally strategic alignment _vpaes_consts: Lk_mc_forward: // mc_forward .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605, 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 Lk_mc_backward: // mc_backward .quad 0x0605040702010003, 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F, 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 .quad 0x0A09080B06050407, 0x020100030E0D0C0F Lk_sr: // sr .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 .quad 0x0F060D040B020900, 0x070E050C030A0108 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 // // "Hot" constants // Lk_inv: // inv, inva .quad 0x0E05060F0D080180, 0x040703090A0B0C02 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 Lk_ipt: // input transform (lo, hi) .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 Lk_sbo: // sbou, sbot .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA Lk_sb1: // sb1u, sb1t .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 Lk_sb2: // sb2u, sb2t .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD // // Key schedule constants // Lk_dksd: // decryption key schedule: invskew x*D .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E Lk_dksb: // decryption key schedule: invskew x*B .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 Lk_dkse: // decryption key schedule: invskew x*E + 0x63 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 Lk_dks9: // decryption key schedule: invskew x*9 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE Lk_rcon: // rcon .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 Lk_opt: // output transform .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 Lk_deskew: // deskew tables: inverts the sbox's "skew" .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 .align 2 .align 6 .text ## ## _aes_preheat ## ## Fills register %r10 -> .aes_consts (so you can -fPIC) ## and %xmm9-%xmm15 as specified below. ## .align 4 _vpaes_encrypt_preheat: adrp x10, Lk_inv@PAGE add x10, x10, Lk_inv@PAGEOFF movi v17.16b, #0x0f ld1 {v18.2d,v19.2d}, [x10],#32 // Lk_inv ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // Lk_ipt, Lk_sbo ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // Lk_sb1, Lk_sb2 ret ## ## _aes_encrypt_core ## ## AES-encrypt %xmm0. ## ## Inputs: ## %xmm0 = input ## %xmm9-%xmm15 as in _vpaes_preheat ## (%rdx) = scheduled keys ## ## Output in %xmm0 ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax ## Preserves %xmm6 - %xmm8 so you get some local vectors ## ## .align 4 _vpaes_encrypt_core: mov x9, x2 ldr w8, [x2,#240] // pull rounds adrp x11, Lk_mc_forward@PAGE+16 add x11, x11, Lk_mc_forward@PAGEOFF+16 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 b Lenc_entry .align 4 Lenc_loop: // middle of middle round add x10, x11, #0x40 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D sub w8, w8, #1 // nr-- Lenc_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 cbnz w8, Lenc_loop // middle of last round add x10, x11, #0x80 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 ret .align 4 _vpaes_encrypt_2x: mov x9, x2 ldr w8, [x2,#240] // pull rounds adrp x11, Lk_mc_forward@PAGE+16 add x11, x11, Lk_mc_forward@PAGEOFF+16 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 and v9.16b, v15.16b, v17.16b ushr v8.16b, v15.16b, #4 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 tbl v9.16b, {v20.16b}, v9.16b // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 tbl v10.16b, {v21.16b}, v8.16b eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 eor v8.16b, v9.16b, v16.16b eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 eor v8.16b, v8.16b, v10.16b b Lenc_2x_entry .align 4 Lenc_2x_loop: // middle of middle round add x10, x11, #0x40 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u tbl v12.16b, {v25.16b}, v10.16b ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # Lk_mc_forward[] tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t tbl v8.16b, {v24.16b}, v11.16b eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u tbl v13.16b, {v27.16b}, v10.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A eor v8.16b, v8.16b, v12.16b tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t tbl v10.16b, {v26.16b}, v11.16b ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # Lk_mc_backward[] tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B tbl v11.16b, {v8.16b}, v1.16b eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A eor v10.16b, v10.16b, v13.16b tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D tbl v8.16b, {v8.16b}, v4.16b eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B eor v11.16b, v11.16b, v10.16b tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C tbl v12.16b, {v11.16b},v1.16b eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D eor v8.16b, v8.16b, v11.16b and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D eor v8.16b, v8.16b, v12.16b sub w8, w8, #1 // nr-- Lenc_2x_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i and v9.16b, v8.16b, v17.16b ushr v8.16b, v8.16b, #4 tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k tbl v13.16b, {v19.16b},v9.16b eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j eor v9.16b, v9.16b, v8.16b tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v11.16b, {v18.16b},v8.16b tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j tbl v12.16b, {v18.16b},v9.16b eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v11.16b, v11.16b, v13.16b eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k eor v12.16b, v12.16b, v13.16b tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v10.16b, {v18.16b},v11.16b tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak tbl v11.16b, {v18.16b},v12.16b eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v10.16b, v10.16b, v9.16b eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo eor v11.16b, v11.16b, v8.16b ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 cbnz w8, Lenc_2x_loop // middle of last round add x10, x11, #0x80 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou tbl v12.16b, {v22.16b}, v10.16b ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # Lk_sr[] tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t tbl v8.16b, {v23.16b}, v11.16b eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A eor v8.16b, v8.16b, v12.16b tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0 tbl v1.16b, {v8.16b},v1.16b ret ######################################################## ## ## ## AES key schedule ## ## ## ######################################################## .align 4 _vpaes_key_preheat: adrp x10, Lk_inv@PAGE add x10, x10, Lk_inv@PAGEOFF movi v16.16b, #0x5b // Lk_s63 adrp x11, Lk_sb1@PAGE add x11, x11, Lk_sb1@PAGEOFF movi v17.16b, #0x0f // Lk_s0F ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // Lk_inv, Lk_ipt adrp x10, Lk_dksd@PAGE add x10, x10, Lk_dksd@PAGEOFF ld1 {v22.2d,v23.2d}, [x11] // Lk_sb1 adrp x11, Lk_mc_forward@PAGE add x11, x11, Lk_mc_forward@PAGEOFF ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // Lk_dksd, Lk_dksb ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // Lk_dkse, Lk_dks9 ld1 {v8.2d}, [x10] // Lk_rcon ld1 {v9.2d}, [x11] // Lk_mc_forward[0] ret .align 4 _vpaes_schedule_core: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp,#-16]! add x29,sp,#0 bl _vpaes_key_preheat // load the tables ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned) // input transform mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3 bl _vpaes_schedule_transform mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7 adrp x10, Lk_sr@PAGE // lea Lk_sr(%rip),%r10 add x10, x10, Lk_sr@PAGEOFF add x8, x8, x10 // encrypting, output zeroth round key after transform st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) cmp w1, #192 // cmp $192, %esi b.hi Lschedule_256 b.eq Lschedule_192 // 128: fall though ## ## .schedule_128 ## ## 128-bit specific part of key schedule. ## ## This schedule is really simple, because all its parts ## are accomplished by the subroutines. ## Lschedule_128: mov x0, #10 // mov $10, %esi Loop_schedule_128: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // write output b Loop_schedule_128 ## ## .aes_schedule_192 ## ## 192-bit specific part of key schedule. ## ## The main body of this schedule is the same as the 128-bit ## schedule, but with more smearing. The long, high side is ## stored in %xmm7 as before, and the short, low side is in ## the high bits of %xmm6. ## ## This schedule is somewhat nastier, however, because each ## round produces 192 bits of key material, or 1.5 round keys. ## Therefore, on each cycle we do 2 rounds and produce 3 round ## keys. ## .align 4 Lschedule_192: sub x0, x0, #8 ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) bl _vpaes_schedule_transform // input transform mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros mov x0, #4 // mov $4, %esi Loop_schedule_192: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_round ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0 bl _vpaes_schedule_mangle // save key n bl _vpaes_schedule_192_smear bl _vpaes_schedule_mangle // save key n+1 bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // save key n+2 bl _vpaes_schedule_192_smear b Loop_schedule_192 ## ## .aes_schedule_256 ## ## 256-bit specific part of key schedule. ## ## The structure here is very similar to the 128-bit ## schedule, but with an additional "low side" in ## %xmm6. The low side's rounds are the same as the ## high side's, except no rcon and no rotation. ## .align 4 Lschedule_256: ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) bl _vpaes_schedule_transform // input transform mov x0, #7 // mov $7, %esi Loop_schedule_256: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_mangle // output low result mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 // high round bl _vpaes_schedule_round cbz x0, Lschedule_mangle_last bl _vpaes_schedule_mangle // low round. swap xmm7 and xmm6 dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 movi v4.16b, #0 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7 bl _vpaes_schedule_low_round mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7 b Loop_schedule_256 ## ## .aes_schedule_mangle_last ## ## Mangler for last round of key schedule ## Mangles %xmm0 ## when encrypting, outputs out(%xmm0) ^ 63 ## when decrypting, outputs unskew(%xmm0) ## ## Always called right before return... jumps to cleanup and exits ## .align 4 Lschedule_mangle_last: // schedule last round key from xmm0 adrp x11, Lk_deskew@PAGE // lea Lk_deskew(%rip),%r11 # prepare to deskew add x11, x11, Lk_deskew@PAGEOFF cbnz w3, Lschedule_mangle_last_dec // encrypting ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1 adrp x11, Lk_opt@PAGE // lea Lk_opt(%rip), %r11 # prepare to output transform add x11, x11, Lk_opt@PAGEOFF add x2, x2, #32 // add $32, %rdx tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute Lschedule_mangle_last_dec: ld1 {v20.2d,v21.2d}, [x11] // reload constants sub x2, x2, #16 // add $-16, %rdx eor v0.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm0 bl _vpaes_schedule_transform // output transform st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key // cleanup eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 ldp x29, x30, [sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret ## ## .aes_schedule_192_smear ## ## Smear the short, low side in the 192-bit key schedule. ## ## Inputs: ## %xmm7: high side, b a x y ## %xmm6: low side, d c 0 0 ## %xmm13: 0 ## ## Outputs: ## %xmm6: b+c+d b+c 0 0 ## %xmm0: b+c+d b+c b a ## .align 4 _vpaes_schedule_192_smear: movi v1.16b, #0 dup v0.4s, v7.s[3] ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros ret ## ## .aes_schedule_round ## ## Runs one main round of the key schedule on %xmm0, %xmm7 ## ## Specifically, runs subbytes on the high dword of %xmm0 ## then rotates it by one byte and xors into the low dword of ## %xmm7. ## ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for ## next rcon. ## ## Smears the dwords of %xmm7 by xoring the low into the ## second low, result into third, result into highest. ## ## Returns results in %xmm7 = %xmm0. ## Clobbers %xmm1-%xmm4, %r11. ## .align 4 _vpaes_schedule_round: // extract rcon from xmm8 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4 ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1 ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 // rotate dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0 // fall through... // low round: same as high round, but no rotation and no rcon. _vpaes_schedule_low_round: // smear xmm7 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4 // subbytes and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7 tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v7.16b, v7.16b, v16.16b // vpxor Lk_s63(%rip), %xmm7, %xmm7 tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output // add in smeared stuff eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7 ret ## ## .aes_schedule_transform ## ## Linear-transform %xmm0 according to tables at (%r11) ## ## Requires that %xmm9 = 0x0F0F... as in preheat ## Output in %xmm0 ## Clobbers %xmm1, %xmm2 ## .align 4 _vpaes_schedule_transform: and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 // vmovdqa (%r11), %xmm2 # lo tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 // vmovdqa 16(%r11), %xmm1 # hi tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 ret ## ## .aes_schedule_mangle ## ## Mangle xmm0 from (basis-transformed) standard version ## to our version. ## ## On encrypt, ## xor with 0x63 ## multiply by circulant 0,1,1,1 ## apply shiftrows transform ## ## On decrypt, ## xor with 0x63 ## multiply by "inverse mixcolumns" circulant E,B,D,9 ## deskew ## apply shiftrows transform ## ## ## Writes out to (%rdx), and increments or decrements it ## Keeps track of round number mod 4 in %r8 ## Preserves xmm0 ## Clobbers xmm1-xmm5 ## .align 4 _vpaes_schedule_mangle: mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later // vmovdqa .Lk_mc_forward(%rip),%xmm5 // encrypting eor v4.16b, v0.16b, v16.16b // vpxor Lk_s63(%rip), %xmm0, %xmm4 add x2, x2, #16 // add $16, %rdx tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3 Lschedule_mangle_both: tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 add x8, x8, #48 // add $-16, %r8 and x8, x8, #~(1<<6) // and $0x30, %r8 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) ret .globl _vpaes_set_encrypt_key .private_extern _vpaes_set_encrypt_key .align 4 _vpaes_set_encrypt_key: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so lsr w9, w1, #5 // shr $5,%eax add w9, w9, #5 // $5,%eax str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; mov w3, #0 // mov $0,%ecx mov x8, #0x30 // mov $0x30,%r8d bl _vpaes_schedule_core eor x0, x0, x0 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .globl _vpaes_ctr32_encrypt_blocks .private_extern _vpaes_ctr32_encrypt_blocks .align 4 _vpaes_ctr32_encrypt_blocks: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so stp d10,d11,[sp,#-16]! stp d12,d13,[sp,#-16]! stp d14,d15,[sp,#-16]! cbz x2, Lctr32_done // Note, unlike the other functions, x2 here is measured in blocks, // not bytes. mov x17, x2 mov x2, x3 // Load the IV and counter portion. ldr w6, [x4, #12] ld1 {v7.16b}, [x4] bl _vpaes_encrypt_preheat tst x17, #1 rev w6, w6 // The counter is big-endian. b.eq Lctr32_prep_loop // Handle one block so the remaining block count is even for // _vpaes_encrypt_2x. ld1 {v6.16b}, [x0], #16 // Load input ahead of time bl _vpaes_encrypt_core eor v0.16b, v0.16b, v6.16b // XOR input and result st1 {v0.16b}, [x1], #16 subs x17, x17, #1 // Update the counter. add w6, w6, #1 rev w7, w6 mov v7.s[3], w7 b.ls Lctr32_done Lctr32_prep_loop: // _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x // uses v14 and v15. mov v15.16b, v7.16b mov v14.16b, v7.16b add w6, w6, #1 rev w7, w6 mov v15.s[3], w7 Lctr32_loop: ld1 {v6.16b,v7.16b}, [x0], #32 // Load input ahead of time bl _vpaes_encrypt_2x eor v0.16b, v0.16b, v6.16b // XOR input and result eor v1.16b, v1.16b, v7.16b // XOR input and result (#2) st1 {v0.16b,v1.16b}, [x1], #32 subs x17, x17, #2 // Update the counter. add w7, w6, #1 add w6, w6, #2 rev w7, w7 mov v14.s[3], w7 rev w7, w6 mov v15.s[3], w7 b.hi Lctr32_loop Lctr32_done: ldp d14,d15,[sp],#16 ldp d12,d13,[sp],#16 ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
40,454
thirdparty/crates/ring-0.17.9/pregenerated/chacha-armv8-linux64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include <ring-core/arm_arch.h> .section .rodata .align 5 .Lsigma: .quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral .Lone: .long 1,0,0,0 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .text .globl ChaCha20_ctr32_nohw .hidden ChaCha20_ctr32_nohw .type ChaCha20_ctr32_nohw,%function .align 5 ChaCha20_ctr32_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,.Lsigma add x5,x5,:lo12:.Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#64 ldp x22,x23,[x5] // load sigma ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ldp x28,x30,[x4] // load counter #ifdef __AARCH64EB__ ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif .Loop_outer: mov w5,w22 // unpack key block lsr x6,x22,#32 mov w7,w23 lsr x8,x23,#32 mov w9,w24 lsr x10,x24,#32 mov w11,w25 lsr x12,x25,#32 mov w13,w26 lsr x14,x26,#32 mov w15,w27 lsr x16,x27,#32 mov w17,w28 lsr x19,x28,#32 mov w20,w30 lsr x21,x30,#32 mov x4,#10 subs x2,x2,#64 .Loop: sub x4,x4,#1 add w5,w5,w9 add w6,w6,w10 add w7,w7,w11 add w8,w8,w12 eor w17,w17,w5 eor w19,w19,w6 eor w20,w20,w7 eor w21,w21,w8 ror w17,w17,#16 ror w19,w19,#16 ror w20,w20,#16 ror w21,w21,#16 add w13,w13,w17 add w14,w14,w19 add w15,w15,w20 add w16,w16,w21 eor w9,w9,w13 eor w10,w10,w14 eor w11,w11,w15 eor w12,w12,w16 ror w9,w9,#20 ror w10,w10,#20 ror w11,w11,#20 ror w12,w12,#20 add w5,w5,w9 add w6,w6,w10 add w7,w7,w11 add w8,w8,w12 eor w17,w17,w5 eor w19,w19,w6 eor w20,w20,w7 eor w21,w21,w8 ror w17,w17,#24 ror w19,w19,#24 ror w20,w20,#24 ror w21,w21,#24 add w13,w13,w17 add w14,w14,w19 add w15,w15,w20 add w16,w16,w21 eor w9,w9,w13 eor w10,w10,w14 eor w11,w11,w15 eor w12,w12,w16 ror w9,w9,#25 ror w10,w10,#25 ror w11,w11,#25 ror w12,w12,#25 add w5,w5,w10 add w6,w6,w11 add w7,w7,w12 add w8,w8,w9 eor w21,w21,w5 eor w17,w17,w6 eor w19,w19,w7 eor w20,w20,w8 ror w21,w21,#16 ror w17,w17,#16 ror w19,w19,#16 ror w20,w20,#16 add w15,w15,w21 add w16,w16,w17 add w13,w13,w19 add w14,w14,w20 eor w10,w10,w15 eor w11,w11,w16 eor w12,w12,w13 eor w9,w9,w14 ror w10,w10,#20 ror w11,w11,#20 ror w12,w12,#20 ror w9,w9,#20 add w5,w5,w10 add w6,w6,w11 add w7,w7,w12 add w8,w8,w9 eor w21,w21,w5 eor w17,w17,w6 eor w19,w19,w7 eor w20,w20,w8 ror w21,w21,#24 ror w17,w17,#24 ror w19,w19,#24 ror w20,w20,#24 add w15,w15,w21 add w16,w16,w17 add w13,w13,w19 add w14,w14,w20 eor w10,w10,w15 eor w11,w11,w16 eor w12,w12,w13 eor w9,w9,w14 ror w10,w10,#25 ror w11,w11,#25 ror w12,w12,#25 ror w9,w9,#25 cbnz x4,.Loop add w5,w5,w22 // accumulate key block add x6,x6,x22,lsr#32 add w7,w7,w23 add x8,x8,x23,lsr#32 add w9,w9,w24 add x10,x10,x24,lsr#32 add w11,w11,w25 add x12,x12,x25,lsr#32 add w13,w13,w26 add x14,x14,x26,lsr#32 add w15,w15,w27 add x16,x16,x27,lsr#32 add w17,w17,w28 add x19,x19,x28,lsr#32 add w20,w20,w30 add x21,x21,x30,lsr#32 b.lo .Ltail add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#1 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 b.hi .Loop_outer ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .align 4 .Ltail: add x2,x2,#64 .Less_than_64: sub x0,x0,#1 add x1,x1,x2 add x0,x0,x2 add x4,sp,x2 neg x2,x2 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif stp x5,x7,[sp,#0] stp x9,x11,[sp,#16] stp x13,x15,[sp,#32] stp x17,x20,[sp,#48] .Loop_tail: ldrb w10,[x1,x2] ldrb w11,[x4,x2] add x2,x2,#1 eor w10,w10,w11 strb w10,[x0,x2] cbnz x2,.Loop_tail stp xzr,xzr,[sp,#0] stp xzr,xzr,[sp,#16] stp xzr,xzr,[sp,#32] stp xzr,xzr,[sp,#48] ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .size ChaCha20_ctr32_nohw,.-ChaCha20_ctr32_nohw .globl ChaCha20_ctr32_neon .hidden ChaCha20_ctr32_neon .type ChaCha20_ctr32_neon,%function .align 5 ChaCha20_ctr32_neon: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,.Lsigma add x5,x5,:lo12:.Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] cmp x2,#512 b.hs .L512_or_more_neon sub sp,sp,#64 ldp x22,x23,[x5] // load sigma ld1 {v24.4s},[x5],#16 ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ld1 {v25.4s,v26.4s},[x3] ldp x28,x30,[x4] // load counter ld1 {v27.4s},[x4] ld1 {v31.4s},[x5] #ifdef __AARCH64EB__ rev64 v24.4s,v24.4s ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif add v27.4s,v27.4s,v31.4s // += 1 add v28.4s,v27.4s,v31.4s add v29.4s,v28.4s,v31.4s shl v31.4s,v31.4s,#2 // 1 -> 4 .Loop_outer_neon: mov w5,w22 // unpack key block lsr x6,x22,#32 mov v0.16b,v24.16b mov w7,w23 lsr x8,x23,#32 mov v4.16b,v24.16b mov w9,w24 lsr x10,x24,#32 mov v16.16b,v24.16b mov w11,w25 mov v1.16b,v25.16b lsr x12,x25,#32 mov v5.16b,v25.16b mov w13,w26 mov v17.16b,v25.16b lsr x14,x26,#32 mov v3.16b,v27.16b mov w15,w27 mov v7.16b,v28.16b lsr x16,x27,#32 mov v19.16b,v29.16b mov w17,w28 mov v2.16b,v26.16b lsr x19,x28,#32 mov v6.16b,v26.16b mov w20,w30 mov v18.16b,v26.16b lsr x21,x30,#32 mov x4,#10 subs x2,x2,#256 .Loop_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v16.4s,v16.4s,v17.4s add w7,w7,w11 eor v3.16b,v3.16b,v0.16b add w8,w8,w12 eor v7.16b,v7.16b,v4.16b eor w17,w17,w5 eor v19.16b,v19.16b,v16.16b eor w19,w19,w6 rev32 v3.8h,v3.8h eor w20,w20,w7 rev32 v7.8h,v7.8h eor w21,w21,w8 rev32 v19.8h,v19.8h ror w17,w17,#16 add v2.4s,v2.4s,v3.4s ror w19,w19,#16 add v6.4s,v6.4s,v7.4s ror w20,w20,#16 add v18.4s,v18.4s,v19.4s ror w21,w21,#16 eor v20.16b,v1.16b,v2.16b add w13,w13,w17 eor v21.16b,v5.16b,v6.16b add w14,w14,w19 eor v22.16b,v17.16b,v18.16b add w15,w15,w20 ushr v1.4s,v20.4s,#20 add w16,w16,w21 ushr v5.4s,v21.4s,#20 eor w9,w9,w13 ushr v17.4s,v22.4s,#20 eor w10,w10,w14 sli v1.4s,v20.4s,#12 eor w11,w11,w15 sli v5.4s,v21.4s,#12 eor w12,w12,w16 sli v17.4s,v22.4s,#12 ror w9,w9,#20 add v0.4s,v0.4s,v1.4s ror w10,w10,#20 add v4.4s,v4.4s,v5.4s ror w11,w11,#20 add v16.4s,v16.4s,v17.4s ror w12,w12,#20 eor v20.16b,v3.16b,v0.16b add w5,w5,w9 eor v21.16b,v7.16b,v4.16b add w6,w6,w10 eor v22.16b,v19.16b,v16.16b add w7,w7,w11 ushr v3.4s,v20.4s,#24 add w8,w8,w12 ushr v7.4s,v21.4s,#24 eor w17,w17,w5 ushr v19.4s,v22.4s,#24 eor w19,w19,w6 sli v3.4s,v20.4s,#8 eor w20,w20,w7 sli v7.4s,v21.4s,#8 eor w21,w21,w8 sli v19.4s,v22.4s,#8 ror w17,w17,#24 add v2.4s,v2.4s,v3.4s ror w19,w19,#24 add v6.4s,v6.4s,v7.4s ror w20,w20,#24 add v18.4s,v18.4s,v19.4s ror w21,w21,#24 eor v20.16b,v1.16b,v2.16b add w13,w13,w17 eor v21.16b,v5.16b,v6.16b add w14,w14,w19 eor v22.16b,v17.16b,v18.16b add w15,w15,w20 ushr v1.4s,v20.4s,#25 add w16,w16,w21 ushr v5.4s,v21.4s,#25 eor w9,w9,w13 ushr v17.4s,v22.4s,#25 eor w10,w10,w14 sli v1.4s,v20.4s,#7 eor w11,w11,w15 sli v5.4s,v21.4s,#7 eor w12,w12,w16 sli v17.4s,v22.4s,#7 ror w9,w9,#25 ext v2.16b,v2.16b,v2.16b,#8 ror w10,w10,#25 ext v6.16b,v6.16b,v6.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w10 add v4.4s,v4.4s,v5.4s add w6,w6,w11 add v16.4s,v16.4s,v17.4s add w7,w7,w12 eor v3.16b,v3.16b,v0.16b add w8,w8,w9 eor v7.16b,v7.16b,v4.16b eor w21,w21,w5 eor v19.16b,v19.16b,v16.16b eor w17,w17,w6 rev32 v3.8h,v3.8h eor w19,w19,w7 rev32 v7.8h,v7.8h eor w20,w20,w8 rev32 v19.8h,v19.8h ror w21,w21,#16 add v2.4s,v2.4s,v3.4s ror w17,w17,#16 add v6.4s,v6.4s,v7.4s ror w19,w19,#16 add v18.4s,v18.4s,v19.4s ror w20,w20,#16 eor v20.16b,v1.16b,v2.16b add w15,w15,w21 eor v21.16b,v5.16b,v6.16b add w16,w16,w17 eor v22.16b,v17.16b,v18.16b add w13,w13,w19 ushr v1.4s,v20.4s,#20 add w14,w14,w20 ushr v5.4s,v21.4s,#20 eor w10,w10,w15 ushr v17.4s,v22.4s,#20 eor w11,w11,w16 sli v1.4s,v20.4s,#12 eor w12,w12,w13 sli v5.4s,v21.4s,#12 eor w9,w9,w14 sli v17.4s,v22.4s,#12 ror w10,w10,#20 add v0.4s,v0.4s,v1.4s ror w11,w11,#20 add v4.4s,v4.4s,v5.4s ror w12,w12,#20 add v16.4s,v16.4s,v17.4s ror w9,w9,#20 eor v20.16b,v3.16b,v0.16b add w5,w5,w10 eor v21.16b,v7.16b,v4.16b add w6,w6,w11 eor v22.16b,v19.16b,v16.16b add w7,w7,w12 ushr v3.4s,v20.4s,#24 add w8,w8,w9 ushr v7.4s,v21.4s,#24 eor w21,w21,w5 ushr v19.4s,v22.4s,#24 eor w17,w17,w6 sli v3.4s,v20.4s,#8 eor w19,w19,w7 sli v7.4s,v21.4s,#8 eor w20,w20,w8 sli v19.4s,v22.4s,#8 ror w21,w21,#24 add v2.4s,v2.4s,v3.4s ror w17,w17,#24 add v6.4s,v6.4s,v7.4s ror w19,w19,#24 add v18.4s,v18.4s,v19.4s ror w20,w20,#24 eor v20.16b,v1.16b,v2.16b add w15,w15,w21 eor v21.16b,v5.16b,v6.16b add w16,w16,w17 eor v22.16b,v17.16b,v18.16b add w13,w13,w19 ushr v1.4s,v20.4s,#25 add w14,w14,w20 ushr v5.4s,v21.4s,#25 eor w10,w10,w15 ushr v17.4s,v22.4s,#25 eor w11,w11,w16 sli v1.4s,v20.4s,#7 eor w12,w12,w13 sli v5.4s,v21.4s,#7 eor w9,w9,w14 sli v17.4s,v22.4s,#7 ror w10,w10,#25 ext v2.16b,v2.16b,v2.16b,#8 ror w11,w11,#25 ext v6.16b,v6.16b,v6.16b,#8 ror w12,w12,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 cbnz x4,.Loop_neon add w5,w5,w22 // accumulate key block add v0.4s,v0.4s,v24.4s add x6,x6,x22,lsr#32 add v4.4s,v4.4s,v24.4s add w7,w7,w23 add v16.4s,v16.4s,v24.4s add x8,x8,x23,lsr#32 add v2.4s,v2.4s,v26.4s add w9,w9,w24 add v6.4s,v6.4s,v26.4s add x10,x10,x24,lsr#32 add v18.4s,v18.4s,v26.4s add w11,w11,w25 add v3.4s,v3.4s,v27.4s add x12,x12,x25,lsr#32 add w13,w13,w26 add v7.4s,v7.4s,v28.4s add x14,x14,x26,lsr#32 add w15,w15,w27 add v19.4s,v19.4s,v29.4s add x16,x16,x27,lsr#32 add w17,w17,w28 add v1.4s,v1.4s,v25.4s add x19,x19,x28,lsr#32 add w20,w20,w30 add v5.4s,v5.4s,v25.4s add x21,x21,x30,lsr#32 add v17.4s,v17.4s,v25.4s b.lo .Ltail_neon add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor v0.16b,v0.16b,v20.16b eor x15,x15,x16 eor v1.16b,v1.16b,v21.16b eor x17,x17,x19 eor v2.16b,v2.16b,v22.16b eor x20,x20,x21 eor v3.16b,v3.16b,v23.16b ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 stp x5,x7,[x0,#0] // store output add x28,x28,#4 // increment counter stp x9,x11,[x0,#16] add v27.4s,v27.4s,v31.4s // += 4 stp x13,x15,[x0,#32] add v28.4s,v28.4s,v31.4s stp x17,x20,[x0,#48] add v29.4s,v29.4s,v31.4s add x0,x0,#64 st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 eor v4.16b,v4.16b,v20.16b eor v5.16b,v5.16b,v21.16b eor v6.16b,v6.16b,v22.16b eor v7.16b,v7.16b,v23.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 eor v16.16b,v16.16b,v0.16b eor v17.16b,v17.16b,v1.16b eor v18.16b,v18.16b,v2.16b eor v19.16b,v19.16b,v3.16b st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 b.hi .Loop_outer_neon ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .Ltail_neon: add x2,x2,#256 cmp x2,#64 b.lo .Less_than_64 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#4 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 b.eq .Ldone_neon sub x2,x2,#64 cmp x2,#64 b.lo .Less_than_128 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor v0.16b,v0.16b,v20.16b eor v1.16b,v1.16b,v21.16b eor v2.16b,v2.16b,v22.16b eor v3.16b,v3.16b,v23.16b st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 b.eq .Ldone_neon sub x2,x2,#64 cmp x2,#64 b.lo .Less_than_192 ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 eor v4.16b,v4.16b,v20.16b eor v5.16b,v5.16b,v21.16b eor v6.16b,v6.16b,v22.16b eor v7.16b,v7.16b,v23.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 b.eq .Ldone_neon sub x2,x2,#64 st1 {v16.16b,v17.16b,v18.16b,v19.16b},[sp] b .Last_neon .Less_than_128: st1 {v0.16b,v1.16b,v2.16b,v3.16b},[sp] b .Last_neon .Less_than_192: st1 {v4.16b,v5.16b,v6.16b,v7.16b},[sp] b .Last_neon .align 4 .Last_neon: sub x0,x0,#1 add x1,x1,x2 add x0,x0,x2 add x4,sp,x2 neg x2,x2 .Loop_tail_neon: ldrb w10,[x1,x2] ldrb w11,[x4,x2] add x2,x2,#1 eor w10,w10,w11 strb w10,[x0,x2] cbnz x2,.Loop_tail_neon stp xzr,xzr,[sp,#0] stp xzr,xzr,[sp,#16] stp xzr,xzr,[sp,#32] stp xzr,xzr,[sp,#48] .Ldone_neon: ldp x19,x20,[x29,#16] add sp,sp,#64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .size ChaCha20_ctr32_neon,.-ChaCha20_ctr32_neon .type ChaCha20_512_neon,%function .align 5 ChaCha20_512_neon: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 adrp x5,.Lsigma add x5,x5,:lo12:.Lsigma stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] .L512_or_more_neon: sub sp,sp,#128+64 ldp x22,x23,[x5] // load sigma ld1 {v24.4s},[x5],#16 ldp x24,x25,[x3] // load key ldp x26,x27,[x3,#16] ld1 {v25.4s,v26.4s},[x3] ldp x28,x30,[x4] // load counter ld1 {v27.4s},[x4] ld1 {v31.4s},[x5] #ifdef __AARCH64EB__ rev64 v24.4s,v24.4s ror x24,x24,#32 ror x25,x25,#32 ror x26,x26,#32 ror x27,x27,#32 ror x28,x28,#32 ror x30,x30,#32 #endif add v27.4s,v27.4s,v31.4s // += 1 stp q24,q25,[sp,#0] // off-load key block, invariant part add v27.4s,v27.4s,v31.4s // not typo str q26,[sp,#32] add v28.4s,v27.4s,v31.4s add v29.4s,v28.4s,v31.4s add v30.4s,v29.4s,v31.4s shl v31.4s,v31.4s,#2 // 1 -> 4 stp d8,d9,[sp,#128+0] // meet ABI requirements stp d10,d11,[sp,#128+16] stp d12,d13,[sp,#128+32] stp d14,d15,[sp,#128+48] sub x2,x2,#512 // not typo .Loop_outer_512_neon: mov v0.16b,v24.16b mov v4.16b,v24.16b mov v8.16b,v24.16b mov v12.16b,v24.16b mov v16.16b,v24.16b mov v20.16b,v24.16b mov v1.16b,v25.16b mov w5,w22 // unpack key block mov v5.16b,v25.16b lsr x6,x22,#32 mov v9.16b,v25.16b mov w7,w23 mov v13.16b,v25.16b lsr x8,x23,#32 mov v17.16b,v25.16b mov w9,w24 mov v21.16b,v25.16b lsr x10,x24,#32 mov v3.16b,v27.16b mov w11,w25 mov v7.16b,v28.16b lsr x12,x25,#32 mov v11.16b,v29.16b mov w13,w26 mov v15.16b,v30.16b lsr x14,x26,#32 mov v2.16b,v26.16b mov w15,w27 mov v6.16b,v26.16b lsr x16,x27,#32 add v19.4s,v3.4s,v31.4s // +4 mov w17,w28 add v23.4s,v7.4s,v31.4s // +4 lsr x19,x28,#32 mov v10.16b,v26.16b mov w20,w30 mov v14.16b,v26.16b lsr x21,x30,#32 mov v18.16b,v26.16b stp q27,q28,[sp,#48] // off-load key block, variable part mov v22.16b,v26.16b str q29,[sp,#80] mov x4,#5 subs x2,x2,#512 .Loop_upper_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v11.16b,v11.16b,v11.16b,#12 ext v15.16b,v15.16b,v15.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v23.16b,v23.16b,v23.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v9.16b,v9.16b,v9.16b,#4 ext v13.16b,v13.16b,v13.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 ext v21.16b,v21.16b,v21.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v11.16b,v11.16b,v11.16b,#4 ext v15.16b,v15.16b,v15.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v23.16b,v23.16b,v23.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v9.16b,v9.16b,v9.16b,#12 ext v13.16b,v13.16b,v13.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 ext v21.16b,v21.16b,v21.16b,#12 cbnz x4,.Loop_upper_neon add w5,w5,w22 // accumulate key block add x6,x6,x22,lsr#32 add w7,w7,w23 add x8,x8,x23,lsr#32 add w9,w9,w24 add x10,x10,x24,lsr#32 add w11,w11,w25 add x12,x12,x25,lsr#32 add w13,w13,w26 add x14,x14,x26,lsr#32 add w15,w15,w27 add x16,x16,x27,lsr#32 add w17,w17,w28 add x19,x19,x28,lsr#32 add w20,w20,w30 add x21,x21,x30,lsr#32 add x5,x5,x6,lsl#32 // pack add x7,x7,x8,lsl#32 ldp x6,x8,[x1,#0] // load input add x9,x9,x10,lsl#32 add x11,x11,x12,lsl#32 ldp x10,x12,[x1,#16] add x13,x13,x14,lsl#32 add x15,x15,x16,lsl#32 ldp x14,x16,[x1,#32] add x17,x17,x19,lsl#32 add x20,x20,x21,lsl#32 ldp x19,x21,[x1,#48] add x1,x1,#64 #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor x15,x15,x16 eor x17,x17,x19 eor x20,x20,x21 stp x5,x7,[x0,#0] // store output add x28,x28,#1 // increment counter mov w5,w22 // unpack key block lsr x6,x22,#32 stp x9,x11,[x0,#16] mov w7,w23 lsr x8,x23,#32 stp x13,x15,[x0,#32] mov w9,w24 lsr x10,x24,#32 stp x17,x20,[x0,#48] add x0,x0,#64 mov w11,w25 lsr x12,x25,#32 mov w13,w26 lsr x14,x26,#32 mov w15,w27 lsr x16,x27,#32 mov w17,w28 lsr x19,x28,#32 mov w20,w30 lsr x21,x30,#32 mov x4,#5 .Loop_lower_neon: sub x4,x4,#1 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#12 ext v7.16b,v7.16b,v7.16b,#12 ext v11.16b,v11.16b,v11.16b,#12 ext v15.16b,v15.16b,v15.16b,#12 ext v19.16b,v19.16b,v19.16b,#12 ext v23.16b,v23.16b,v23.16b,#12 ext v1.16b,v1.16b,v1.16b,#4 ext v5.16b,v5.16b,v5.16b,#4 ext v9.16b,v9.16b,v9.16b,#4 ext v13.16b,v13.16b,v13.16b,#4 ext v17.16b,v17.16b,v17.16b,#4 ext v21.16b,v21.16b,v21.16b,#4 add v0.4s,v0.4s,v1.4s add w5,w5,w9 add v4.4s,v4.4s,v5.4s add w6,w6,w10 add v8.4s,v8.4s,v9.4s add w7,w7,w11 add v12.4s,v12.4s,v13.4s add w8,w8,w12 add v16.4s,v16.4s,v17.4s eor w17,w17,w5 add v20.4s,v20.4s,v21.4s eor w19,w19,w6 eor v3.16b,v3.16b,v0.16b eor w20,w20,w7 eor v7.16b,v7.16b,v4.16b eor w21,w21,w8 eor v11.16b,v11.16b,v8.16b ror w17,w17,#16 eor v15.16b,v15.16b,v12.16b ror w19,w19,#16 eor v19.16b,v19.16b,v16.16b ror w20,w20,#16 eor v23.16b,v23.16b,v20.16b ror w21,w21,#16 rev32 v3.8h,v3.8h add w13,w13,w17 rev32 v7.8h,v7.8h add w14,w14,w19 rev32 v11.8h,v11.8h add w15,w15,w20 rev32 v15.8h,v15.8h add w16,w16,w21 rev32 v19.8h,v19.8h eor w9,w9,w13 rev32 v23.8h,v23.8h eor w10,w10,w14 add v2.4s,v2.4s,v3.4s eor w11,w11,w15 add v6.4s,v6.4s,v7.4s eor w12,w12,w16 add v10.4s,v10.4s,v11.4s ror w9,w9,#20 add v14.4s,v14.4s,v15.4s ror w10,w10,#20 add v18.4s,v18.4s,v19.4s ror w11,w11,#20 add v22.4s,v22.4s,v23.4s ror w12,w12,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w9 eor v25.16b,v5.16b,v6.16b add w6,w6,w10 eor v26.16b,v9.16b,v10.16b add w7,w7,w11 eor v27.16b,v13.16b,v14.16b add w8,w8,w12 eor v28.16b,v17.16b,v18.16b eor w17,w17,w5 eor v29.16b,v21.16b,v22.16b eor w19,w19,w6 ushr v1.4s,v24.4s,#20 eor w20,w20,w7 ushr v5.4s,v25.4s,#20 eor w21,w21,w8 ushr v9.4s,v26.4s,#20 ror w17,w17,#24 ushr v13.4s,v27.4s,#20 ror w19,w19,#24 ushr v17.4s,v28.4s,#20 ror w20,w20,#24 ushr v21.4s,v29.4s,#20 ror w21,w21,#24 sli v1.4s,v24.4s,#12 add w13,w13,w17 sli v5.4s,v25.4s,#12 add w14,w14,w19 sli v9.4s,v26.4s,#12 add w15,w15,w20 sli v13.4s,v27.4s,#12 add w16,w16,w21 sli v17.4s,v28.4s,#12 eor w9,w9,w13 sli v21.4s,v29.4s,#12 eor w10,w10,w14 add v0.4s,v0.4s,v1.4s eor w11,w11,w15 add v4.4s,v4.4s,v5.4s eor w12,w12,w16 add v8.4s,v8.4s,v9.4s ror w9,w9,#25 add v12.4s,v12.4s,v13.4s ror w10,w10,#25 add v16.4s,v16.4s,v17.4s ror w11,w11,#25 add v20.4s,v20.4s,v21.4s ror w12,w12,#25 eor v24.16b,v3.16b,v0.16b add w5,w5,w10 eor v25.16b,v7.16b,v4.16b add w6,w6,w11 eor v26.16b,v11.16b,v8.16b add w7,w7,w12 eor v27.16b,v15.16b,v12.16b add w8,w8,w9 eor v28.16b,v19.16b,v16.16b eor w21,w21,w5 eor v29.16b,v23.16b,v20.16b eor w17,w17,w6 ushr v3.4s,v24.4s,#24 eor w19,w19,w7 ushr v7.4s,v25.4s,#24 eor w20,w20,w8 ushr v11.4s,v26.4s,#24 ror w21,w21,#16 ushr v15.4s,v27.4s,#24 ror w17,w17,#16 ushr v19.4s,v28.4s,#24 ror w19,w19,#16 ushr v23.4s,v29.4s,#24 ror w20,w20,#16 sli v3.4s,v24.4s,#8 add w15,w15,w21 sli v7.4s,v25.4s,#8 add w16,w16,w17 sli v11.4s,v26.4s,#8 add w13,w13,w19 sli v15.4s,v27.4s,#8 add w14,w14,w20 sli v19.4s,v28.4s,#8 eor w10,w10,w15 sli v23.4s,v29.4s,#8 eor w11,w11,w16 add v2.4s,v2.4s,v3.4s eor w12,w12,w13 add v6.4s,v6.4s,v7.4s eor w9,w9,w14 add v10.4s,v10.4s,v11.4s ror w10,w10,#20 add v14.4s,v14.4s,v15.4s ror w11,w11,#20 add v18.4s,v18.4s,v19.4s ror w12,w12,#20 add v22.4s,v22.4s,v23.4s ror w9,w9,#20 eor v24.16b,v1.16b,v2.16b add w5,w5,w10 eor v25.16b,v5.16b,v6.16b add w6,w6,w11 eor v26.16b,v9.16b,v10.16b add w7,w7,w12 eor v27.16b,v13.16b,v14.16b add w8,w8,w9 eor v28.16b,v17.16b,v18.16b eor w21,w21,w5 eor v29.16b,v21.16b,v22.16b eor w17,w17,w6 ushr v1.4s,v24.4s,#25 eor w19,w19,w7 ushr v5.4s,v25.4s,#25 eor w20,w20,w8 ushr v9.4s,v26.4s,#25 ror w21,w21,#24 ushr v13.4s,v27.4s,#25 ror w17,w17,#24 ushr v17.4s,v28.4s,#25 ror w19,w19,#24 ushr v21.4s,v29.4s,#25 ror w20,w20,#24 sli v1.4s,v24.4s,#7 add w15,w15,w21 sli v5.4s,v25.4s,#7 add w16,w16,w17 sli v9.4s,v26.4s,#7 add w13,w13,w19 sli v13.4s,v27.4s,#7 add w14,w14,w20 sli v17.4s,v28.4s,#7 eor w10,w10,w15 sli v21.4s,v29.4s,#7 eor w11,w11,w16 ext v2.16b,v2.16b,v2.16b,#8 eor w12,w12,w13 ext v6.16b,v6.16b,v6.16b,#8 eor w9,w9,w14 ext v10.16b,v10.16b,v10.16b,#8 ror w10,w10,#25 ext v14.16b,v14.16b,v14.16b,#8 ror w11,w11,#25 ext v18.16b,v18.16b,v18.16b,#8 ror w12,w12,#25 ext v22.16b,v22.16b,v22.16b,#8 ror w9,w9,#25 ext v3.16b,v3.16b,v3.16b,#4 ext v7.16b,v7.16b,v7.16b,#4 ext v11.16b,v11.16b,v11.16b,#4 ext v15.16b,v15.16b,v15.16b,#4 ext v19.16b,v19.16b,v19.16b,#4 ext v23.16b,v23.16b,v23.16b,#4 ext v1.16b,v1.16b,v1.16b,#12 ext v5.16b,v5.16b,v5.16b,#12 ext v9.16b,v9.16b,v9.16b,#12 ext v13.16b,v13.16b,v13.16b,#12 ext v17.16b,v17.16b,v17.16b,#12 ext v21.16b,v21.16b,v21.16b,#12 cbnz x4,.Loop_lower_neon add w5,w5,w22 // accumulate key block ldp q24,q25,[sp,#0] add x6,x6,x22,lsr#32 ldp q26,q27,[sp,#32] add w7,w7,w23 ldp q28,q29,[sp,#64] add x8,x8,x23,lsr#32 add v0.4s,v0.4s,v24.4s add w9,w9,w24 add v4.4s,v4.4s,v24.4s add x10,x10,x24,lsr#32 add v8.4s,v8.4s,v24.4s add w11,w11,w25 add v12.4s,v12.4s,v24.4s add x12,x12,x25,lsr#32 add v16.4s,v16.4s,v24.4s add w13,w13,w26 add v20.4s,v20.4s,v24.4s add x14,x14,x26,lsr#32 add v2.4s,v2.4s,v26.4s add w15,w15,w27 add v6.4s,v6.4s,v26.4s add x16,x16,x27,lsr#32 add v10.4s,v10.4s,v26.4s add w17,w17,w28 add v14.4s,v14.4s,v26.4s add x19,x19,x28,lsr#32 add v18.4s,v18.4s,v26.4s add w20,w20,w30 add v22.4s,v22.4s,v26.4s add x21,x21,x30,lsr#32 add v19.4s,v19.4s,v31.4s // +4 add x5,x5,x6,lsl#32 // pack add v23.4s,v23.4s,v31.4s // +4 add x7,x7,x8,lsl#32 add v3.4s,v3.4s,v27.4s ldp x6,x8,[x1,#0] // load input add v7.4s,v7.4s,v28.4s add x9,x9,x10,lsl#32 add v11.4s,v11.4s,v29.4s add x11,x11,x12,lsl#32 add v15.4s,v15.4s,v30.4s ldp x10,x12,[x1,#16] add v19.4s,v19.4s,v27.4s add x13,x13,x14,lsl#32 add v23.4s,v23.4s,v28.4s add x15,x15,x16,lsl#32 add v1.4s,v1.4s,v25.4s ldp x14,x16,[x1,#32] add v5.4s,v5.4s,v25.4s add x17,x17,x19,lsl#32 add v9.4s,v9.4s,v25.4s add x20,x20,x21,lsl#32 add v13.4s,v13.4s,v25.4s ldp x19,x21,[x1,#48] add v17.4s,v17.4s,v25.4s add x1,x1,#64 add v21.4s,v21.4s,v25.4s #ifdef __AARCH64EB__ rev x5,x5 rev x7,x7 rev x9,x9 rev x11,x11 rev x13,x13 rev x15,x15 rev x17,x17 rev x20,x20 #endif ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 eor x5,x5,x6 eor x7,x7,x8 eor x9,x9,x10 eor x11,x11,x12 eor x13,x13,x14 eor v0.16b,v0.16b,v24.16b eor x15,x15,x16 eor v1.16b,v1.16b,v25.16b eor x17,x17,x19 eor v2.16b,v2.16b,v26.16b eor x20,x20,x21 eor v3.16b,v3.16b,v27.16b ld1 {v24.16b,v25.16b,v26.16b,v27.16b},[x1],#64 stp x5,x7,[x0,#0] // store output add x28,x28,#7 // increment counter stp x9,x11,[x0,#16] stp x13,x15,[x0,#32] stp x17,x20,[x0,#48] add x0,x0,#64 st1 {v0.16b,v1.16b,v2.16b,v3.16b},[x0],#64 ld1 {v0.16b,v1.16b,v2.16b,v3.16b},[x1],#64 eor v4.16b,v4.16b,v24.16b eor v5.16b,v5.16b,v25.16b eor v6.16b,v6.16b,v26.16b eor v7.16b,v7.16b,v27.16b st1 {v4.16b,v5.16b,v6.16b,v7.16b},[x0],#64 ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 eor v8.16b,v8.16b,v0.16b ldp q24,q25,[sp,#0] eor v9.16b,v9.16b,v1.16b ldp q26,q27,[sp,#32] eor v10.16b,v10.16b,v2.16b eor v11.16b,v11.16b,v3.16b st1 {v8.16b,v9.16b,v10.16b,v11.16b},[x0],#64 ld1 {v8.16b,v9.16b,v10.16b,v11.16b},[x1],#64 eor v12.16b,v12.16b,v4.16b eor v13.16b,v13.16b,v5.16b eor v14.16b,v14.16b,v6.16b eor v15.16b,v15.16b,v7.16b st1 {v12.16b,v13.16b,v14.16b,v15.16b},[x0],#64 ld1 {v12.16b,v13.16b,v14.16b,v15.16b},[x1],#64 eor v16.16b,v16.16b,v8.16b eor v17.16b,v17.16b,v9.16b eor v18.16b,v18.16b,v10.16b eor v19.16b,v19.16b,v11.16b st1 {v16.16b,v17.16b,v18.16b,v19.16b},[x0],#64 shl v0.4s,v31.4s,#1 // 4 -> 8 eor v20.16b,v20.16b,v12.16b eor v21.16b,v21.16b,v13.16b eor v22.16b,v22.16b,v14.16b eor v23.16b,v23.16b,v15.16b st1 {v20.16b,v21.16b,v22.16b,v23.16b},[x0],#64 add v27.4s,v27.4s,v0.4s // += 8 add v28.4s,v28.4s,v0.4s add v29.4s,v29.4s,v0.4s add v30.4s,v30.4s,v0.4s b.hs .Loop_outer_512_neon adds x2,x2,#512 ushr v0.4s,v31.4s,#2 // 4 -> 1 ldp d8,d9,[sp,#128+0] // meet ABI requirements ldp d10,d11,[sp,#128+16] ldp d12,d13,[sp,#128+32] ldp d14,d15,[sp,#128+48] stp q24,q31,[sp,#0] // wipe off-load area stp q24,q31,[sp,#32] stp q24,q31,[sp,#64] b.eq .Ldone_512_neon cmp x2,#192 sub v27.4s,v27.4s,v0.4s // -= 1 sub v28.4s,v28.4s,v0.4s sub v29.4s,v29.4s,v0.4s add sp,sp,#128 b.hs .Loop_outer_neon eor v25.16b,v25.16b,v25.16b eor v26.16b,v26.16b,v26.16b eor v27.16b,v27.16b,v27.16b eor v28.16b,v28.16b,v28.16b eor v29.16b,v29.16b,v29.16b eor v30.16b,v30.16b,v30.16b b .Loop_outer .Ldone_512_neon: ldp x19,x20,[x29,#16] add sp,sp,#128+64 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .size ChaCha20_512_neon,.-ChaCha20_512_neon #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
18,647
thirdparty/crates/ring-0.17.9/pregenerated/x86_64-mont-macosx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .globl _bn_mul_mont_nohw .private_extern _bn_mul_mont_nohw .p2align 4 _bn_mul_mont_nohw: _CET_ENDBR movl %r9d,%r9d movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 negq %r9 movq %rsp,%r11 leaq -16(%rsp,%r9,8),%r10 negq %r9 andq $-1024,%r10 subq %r10,%r11 andq $-4096,%r11 leaq (%r10,%r11,1),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja L$mul_page_walk jmp L$mul_page_walk_done .p2align 4 L$mul_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja L$mul_page_walk L$mul_page_walk_done: movq %rax,8(%rsp,%r9,8) L$mul_body: movq %rdx,%r12 movq (%r8),%r8 movq (%r12),%rbx movq (%rsi),%rax xorq %r14,%r14 xorq %r15,%r15 movq %r8,%rbp mulq %rbx movq %rax,%r10 movq (%rcx),%rax imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq %rdx,%r13 leaq 1(%r15),%r15 jmp L$1st_enter .p2align 4 L$1st: addq %rax,%r13 movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%r13 movq %r10,%r11 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 L$1st_enter: mulq %rbx addq %rax,%r11 movq (%rcx,%r15,8),%rax adcq $0,%rdx leaq 1(%r15),%r15 movq %rdx,%r10 mulq %rbp cmpq %r9,%r15 jne L$1st addq %rax,%r13 movq (%rsi),%rax adcq $0,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 movq %r10,%r11 xorq %rdx,%rdx addq %r11,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r9,8) movq %rdx,(%rsp,%r9,8) leaq 1(%r14),%r14 jmp L$outer .p2align 4 L$outer: movq (%r12,%r14,8),%rbx xorq %r15,%r15 movq %r8,%rbp movq (%rsp),%r10 mulq %rbx addq %rax,%r10 movq (%rcx),%rax adcq $0,%rdx imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq 8(%rsp),%r10 movq %rdx,%r13 leaq 1(%r15),%r15 jmp L$inner_enter .p2align 4 L$inner: addq %rax,%r13 movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 movq (%rsp,%r15,8),%r10 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 L$inner_enter: mulq %rbx addq %rax,%r11 movq (%rcx,%r15,8),%rax adcq $0,%rdx addq %r11,%r10 movq %rdx,%r11 adcq $0,%r11 leaq 1(%r15),%r15 mulq %rbp cmpq %r9,%r15 jne L$inner addq %rax,%r13 movq (%rsi),%rax adcq $0,%rdx addq %r10,%r13 movq (%rsp,%r15,8),%r10 adcq $0,%rdx movq %r13,-16(%rsp,%r15,8) movq %rdx,%r13 xorq %rdx,%rdx addq %r11,%r13 adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r9,8) movq %rdx,(%rsp,%r9,8) leaq 1(%r14),%r14 cmpq %r9,%r14 jb L$outer xorq %r14,%r14 movq (%rsp),%rax movq %r9,%r15 .p2align 4 L$sub: sbbq (%rcx,%r14,8),%rax movq %rax,(%rdi,%r14,8) movq 8(%rsp,%r14,8),%rax leaq 1(%r14),%r14 decq %r15 jnz L$sub sbbq $0,%rax movq $-1,%rbx xorq %rax,%rbx xorq %r14,%r14 movq %r9,%r15 L$copy: movq (%rdi,%r14,8),%rcx movq (%rsp,%r14,8),%rdx andq %rbx,%rcx andq %rax,%rdx movq %r9,(%rsp,%r14,8) orq %rcx,%rdx movq %rdx,(%rdi,%r14,8) leaq 1(%r14),%r14 subq $1,%r15 jnz L$copy movq 8(%rsp,%r9,8),%rsi movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$mul_epilogue: ret .globl _bn_mul4x_mont .private_extern _bn_mul4x_mont .p2align 4 _bn_mul4x_mont: _CET_ENDBR movl %r9d,%r9d movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 negq %r9 movq %rsp,%r11 leaq -32(%rsp,%r9,8),%r10 negq %r9 andq $-1024,%r10 subq %r10,%r11 andq $-4096,%r11 leaq (%r10,%r11,1),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja L$mul4x_page_walk jmp L$mul4x_page_walk_done L$mul4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r11 cmpq %r10,%rsp ja L$mul4x_page_walk L$mul4x_page_walk_done: movq %rax,8(%rsp,%r9,8) L$mul4x_body: movq %rdi,16(%rsp,%r9,8) movq %rdx,%r12 movq (%r8),%r8 movq (%r12),%rbx movq (%rsi),%rax xorq %r14,%r14 xorq %r15,%r15 movq %r8,%rbp mulq %rbx movq %rax,%r10 movq (%rcx),%rax imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi),%rax adcq $0,%rdx addq %r11,%rdi leaq 4(%r15),%r15 adcq $0,%rdx movq %rdi,(%rsp) movq %rdx,%r13 jmp L$1st4x .p2align 4 L$1st4x: mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq (%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx,%r15,8),%rax adcq $0,%rdx leaq 4(%r15),%r15 movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq -16(%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-32(%rsp,%r15,8) movq %rdx,%r13 cmpq %r9,%r15 jb L$1st4x mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi movq %r13,-8(%rsp,%r15,8) movq %rdi,(%rsp,%r15,8) leaq 1(%r14),%r14 .p2align 2 L$outer4x: movq (%r12,%r14,8),%rbx xorq %r15,%r15 movq (%rsp),%r10 movq %r8,%rbp mulq %rbx addq %rax,%r10 movq (%rcx),%rax adcq $0,%rdx imulq %r10,%rbp movq %rdx,%r11 mulq %rbp addq %rax,%r10 movq 8(%rsi),%rax adcq $0,%rdx movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx),%rax adcq $0,%rdx addq 8(%rsp),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq 16(%rsi),%rax adcq $0,%rdx addq %r11,%rdi leaq 4(%r15),%r15 adcq $0,%rdx movq %rdi,(%rsp) movq %rdx,%r13 jmp L$inner4x .p2align 4 L$inner4x: mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx addq -16(%rsp,%r15,8),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx addq -8(%rsp,%r15,8),%r11 adcq $0,%rdx movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 mulq %rbx addq %rax,%r10 movq (%rcx,%r15,8),%rax adcq $0,%rdx addq (%rsp,%r15,8),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq 8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-8(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq 8(%rcx,%r15,8),%rax adcq $0,%rdx addq 8(%rsp,%r15,8),%r11 adcq $0,%rdx leaq 4(%r15),%r15 movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq -16(%rsi,%r15,8),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-32(%rsp,%r15,8) movq %rdx,%r13 cmpq %r9,%r15 jb L$inner4x mulq %rbx addq %rax,%r10 movq -16(%rcx,%r15,8),%rax adcq $0,%rdx addq -16(%rsp,%r15,8),%r10 adcq $0,%rdx movq %rdx,%r11 mulq %rbp addq %rax,%r13 movq -8(%rsi,%r15,8),%rax adcq $0,%rdx addq %r10,%r13 adcq $0,%rdx movq %r13,-24(%rsp,%r15,8) movq %rdx,%rdi mulq %rbx addq %rax,%r11 movq -8(%rcx,%r15,8),%rax adcq $0,%rdx addq -8(%rsp,%r15,8),%r11 adcq $0,%rdx leaq 1(%r14),%r14 movq %rdx,%r10 mulq %rbp addq %rax,%rdi movq (%rsi),%rax adcq $0,%rdx addq %r11,%rdi adcq $0,%rdx movq %rdi,-16(%rsp,%r15,8) movq %rdx,%r13 xorq %rdi,%rdi addq %r10,%r13 adcq $0,%rdi addq (%rsp,%r9,8),%r13 adcq $0,%rdi movq %r13,-8(%rsp,%r15,8) movq %rdi,(%rsp,%r15,8) cmpq %r9,%r14 jb L$outer4x movq 16(%rsp,%r9,8),%rdi leaq -4(%r9),%r15 movq 0(%rsp),%rax movq 8(%rsp),%rdx shrq $2,%r15 leaq (%rsp),%rsi xorq %r14,%r14 subq 0(%rcx),%rax movq 16(%rsi),%rbx movq 24(%rsi),%rbp sbbq 8(%rcx),%rdx L$sub4x: movq %rax,0(%rdi,%r14,8) movq %rdx,8(%rdi,%r14,8) sbbq 16(%rcx,%r14,8),%rbx movq 32(%rsi,%r14,8),%rax movq 40(%rsi,%r14,8),%rdx sbbq 24(%rcx,%r14,8),%rbp movq %rbx,16(%rdi,%r14,8) movq %rbp,24(%rdi,%r14,8) sbbq 32(%rcx,%r14,8),%rax movq 48(%rsi,%r14,8),%rbx movq 56(%rsi,%r14,8),%rbp sbbq 40(%rcx,%r14,8),%rdx leaq 4(%r14),%r14 decq %r15 jnz L$sub4x movq %rax,0(%rdi,%r14,8) movq 32(%rsi,%r14,8),%rax sbbq 16(%rcx,%r14,8),%rbx movq %rdx,8(%rdi,%r14,8) sbbq 24(%rcx,%r14,8),%rbp movq %rbx,16(%rdi,%r14,8) sbbq $0,%rax movq %rbp,24(%rdi,%r14,8) pxor %xmm0,%xmm0 .byte 102,72,15,110,224 pcmpeqd %xmm5,%xmm5 pshufd $0,%xmm4,%xmm4 movq %r9,%r15 pxor %xmm4,%xmm5 shrq $2,%r15 xorl %eax,%eax jmp L$copy4x .p2align 4 L$copy4x: movdqa (%rsp,%rax,1),%xmm1 movdqu (%rdi,%rax,1),%xmm2 pand %xmm4,%xmm1 pand %xmm5,%xmm2 movdqa 16(%rsp,%rax,1),%xmm3 movdqa %xmm0,(%rsp,%rax,1) por %xmm2,%xmm1 movdqu 16(%rdi,%rax,1),%xmm2 movdqu %xmm1,(%rdi,%rax,1) pand %xmm4,%xmm3 pand %xmm5,%xmm2 movdqa %xmm0,16(%rsp,%rax,1) por %xmm2,%xmm3 movdqu %xmm3,16(%rdi,%rax,1) leaq 32(%rax),%rax decq %r15 jnz L$copy4x movq 8(%rsp,%r9,8),%rsi movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$mul4x_epilogue: ret .globl _bn_sqr8x_mont .private_extern _bn_sqr8x_mont .p2align 5 _bn_sqr8x_mont: _CET_ENDBR movl %r9d,%r9d movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$sqr8x_prologue: movl %r9d,%r10d shll $3,%r9d shlq $3+2,%r10 negq %r9 leaq -64(%rsp,%r9,2),%r11 movq %rsp,%rbp movq (%r8),%r8 subq %rsi,%r11 andq $4095,%r11 cmpq %r11,%r10 jb L$sqr8x_sp_alt subq %r11,%rbp leaq -64(%rbp,%r9,2),%rbp jmp L$sqr8x_sp_done .p2align 5 L$sqr8x_sp_alt: leaq 4096-64(,%r9,2),%r10 leaq -64(%rbp,%r9,2),%rbp subq %r10,%r11 movq $0,%r10 cmovcq %r10,%r11 subq %r11,%rbp L$sqr8x_sp_done: andq $-64,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$sqr8x_page_walk jmp L$sqr8x_page_walk_done .p2align 4 L$sqr8x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$sqr8x_page_walk L$sqr8x_page_walk_done: movq %r9,%r10 negq %r9 movq %r8,32(%rsp) movq %rax,40(%rsp) L$sqr8x_body: .byte 102,72,15,110,209 pxor %xmm0,%xmm0 .byte 102,72,15,110,207 .byte 102,73,15,110,218 testq %rdx,%rdx jz L$sqr8x_nox call _bn_sqrx8x_internal leaq (%r8,%rcx,1),%rbx movq %rcx,%r9 movq %rcx,%rdx .byte 102,72,15,126,207 sarq $3+2,%rcx jmp L$sqr8x_sub .p2align 5 L$sqr8x_nox: call _bn_sqr8x_internal leaq (%rdi,%r9,1),%rbx movq %r9,%rcx movq %r9,%rdx .byte 102,72,15,126,207 sarq $3+2,%rcx jmp L$sqr8x_sub .p2align 5 L$sqr8x_sub: movq 0(%rbx),%r12 movq 8(%rbx),%r13 movq 16(%rbx),%r14 movq 24(%rbx),%r15 leaq 32(%rbx),%rbx sbbq 0(%rbp),%r12 sbbq 8(%rbp),%r13 sbbq 16(%rbp),%r14 sbbq 24(%rbp),%r15 leaq 32(%rbp),%rbp movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r14,16(%rdi) movq %r15,24(%rdi) leaq 32(%rdi),%rdi incq %rcx jnz L$sqr8x_sub sbbq $0,%rax leaq (%rbx,%r9,1),%rbx leaq (%rdi,%r9,1),%rdi .byte 102,72,15,110,200 pxor %xmm0,%xmm0 pshufd $0,%xmm1,%xmm1 movq 40(%rsp),%rsi jmp L$sqr8x_cond_copy .p2align 5 L$sqr8x_cond_copy: movdqa 0(%rbx),%xmm2 movdqa 16(%rbx),%xmm3 leaq 32(%rbx),%rbx movdqu 0(%rdi),%xmm4 movdqu 16(%rdi),%xmm5 leaq 32(%rdi),%rdi movdqa %xmm0,-32(%rbx) movdqa %xmm0,-16(%rbx) movdqa %xmm0,-32(%rbx,%rdx,1) movdqa %xmm0,-16(%rbx,%rdx,1) pcmpeqd %xmm1,%xmm0 pand %xmm1,%xmm2 pand %xmm1,%xmm3 pand %xmm0,%xmm4 pand %xmm0,%xmm5 pxor %xmm0,%xmm0 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqu %xmm4,-32(%rdi) movdqu %xmm5,-16(%rdi) addq $32,%r9 jnz L$sqr8x_cond_copy movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$sqr8x_epilogue: ret .globl _bn_mulx4x_mont .private_extern _bn_mulx4x_mont .p2align 5 _bn_mulx4x_mont: _CET_ENDBR movq %rsp,%rax pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$mulx4x_prologue: shll $3,%r9d xorq %r10,%r10 subq %r9,%r10 movq (%r8),%r8 leaq -72(%rsp,%r10,1),%rbp andq $-128,%rbp movq %rsp,%r11 subq %rbp,%r11 andq $-4096,%r11 leaq (%r11,%rbp,1),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$mulx4x_page_walk jmp L$mulx4x_page_walk_done .p2align 4 L$mulx4x_page_walk: leaq -4096(%rsp),%rsp movq (%rsp),%r10 cmpq %rbp,%rsp ja L$mulx4x_page_walk L$mulx4x_page_walk_done: leaq (%rdx,%r9,1),%r10 movq %r9,0(%rsp) shrq $5,%r9 movq %r10,16(%rsp) subq $1,%r9 movq %r8,24(%rsp) movq %rdi,32(%rsp) movq %rax,40(%rsp) movq %r9,48(%rsp) jmp L$mulx4x_body .p2align 5 L$mulx4x_body: leaq 8(%rdx),%rdi movq (%rdx),%rdx leaq 64+32(%rsp),%rbx movq %rdx,%r9 mulxq 0(%rsi),%r8,%rax mulxq 8(%rsi),%r11,%r14 addq %rax,%r11 movq %rdi,8(%rsp) mulxq 16(%rsi),%r12,%r13 adcq %r14,%r12 adcq $0,%r13 movq %r8,%rdi imulq 24(%rsp),%r8 xorq %rbp,%rbp mulxq 24(%rsi),%rax,%r14 movq %r8,%rdx leaq 32(%rsi),%rsi adcxq %rax,%r13 adcxq %rbp,%r14 mulxq 0(%rcx),%rax,%r10 adcxq %rax,%rdi adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 .byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00 movq 48(%rsp),%rdi movq %r10,-32(%rbx) adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-24(%rbx) adcxq %rax,%r12 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r12,-16(%rbx) jmp L$mulx4x_1st .p2align 5 L$mulx4x_1st: adcxq %rbp,%r15 mulxq 0(%rsi),%r10,%rax adcxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 .byte 0x67,0x67 movq %r8,%rdx adcxq %rax,%r13 adcxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 movq %r11,-32(%rbx) adoxq %r15,%r13 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r13,-16(%rbx) decq %rdi jnz L$mulx4x_1st movq 0(%rsp),%rax movq 8(%rsp),%rdi adcq %rbp,%r15 addq %r15,%r14 sbbq %r15,%r15 movq %r14,-8(%rbx) jmp L$mulx4x_outer .p2align 5 L$mulx4x_outer: movq (%rdi),%rdx leaq 8(%rdi),%rdi subq %rax,%rsi movq %r15,(%rbx) leaq 64+32(%rsp),%rbx subq %rax,%rcx mulxq 0(%rsi),%r8,%r11 xorl %ebp,%ebp movq %rdx,%r9 mulxq 8(%rsi),%r14,%r12 adoxq -32(%rbx),%r8 adcxq %r14,%r11 mulxq 16(%rsi),%r15,%r13 adoxq -24(%rbx),%r11 adcxq %r15,%r12 adoxq -16(%rbx),%r12 adcxq %rbp,%r13 adoxq %rbp,%r13 movq %rdi,8(%rsp) movq %r8,%r15 imulq 24(%rsp),%r8 xorl %ebp,%ebp mulxq 24(%rsi),%rax,%r14 movq %r8,%rdx adcxq %rax,%r13 adoxq -8(%rbx),%r13 adcxq %rbp,%r14 leaq 32(%rsi),%rsi adoxq %rbp,%r14 mulxq 0(%rcx),%rax,%r10 adcxq %rax,%r15 adoxq %r11,%r10 mulxq 8(%rcx),%rax,%r11 adcxq %rax,%r10 adoxq %r12,%r11 mulxq 16(%rcx),%rax,%r12 movq %r10,-32(%rbx) adcxq %rax,%r11 adoxq %r13,%r12 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-24(%rbx) leaq 32(%rcx),%rcx adcxq %rax,%r12 adoxq %rbp,%r15 movq 48(%rsp),%rdi movq %r12,-16(%rbx) jmp L$mulx4x_inner .p2align 5 L$mulx4x_inner: mulxq 0(%rsi),%r10,%rax adcxq %rbp,%r15 adoxq %r14,%r10 mulxq 8(%rsi),%r11,%r14 adcxq 0(%rbx),%r10 adoxq %rax,%r11 mulxq 16(%rsi),%r12,%rax adcxq 8(%rbx),%r11 adoxq %r14,%r12 mulxq 24(%rsi),%r13,%r14 movq %r8,%rdx adcxq 16(%rbx),%r12 adoxq %rax,%r13 adcxq 24(%rbx),%r13 adoxq %rbp,%r14 leaq 32(%rsi),%rsi leaq 32(%rbx),%rbx adcxq %rbp,%r14 adoxq %r15,%r10 mulxq 0(%rcx),%rax,%r15 adcxq %rax,%r10 adoxq %r15,%r11 mulxq 8(%rcx),%rax,%r15 adcxq %rax,%r11 adoxq %r15,%r12 mulxq 16(%rcx),%rax,%r15 movq %r10,-40(%rbx) adcxq %rax,%r12 adoxq %r15,%r13 mulxq 24(%rcx),%rax,%r15 movq %r9,%rdx movq %r11,-32(%rbx) movq %r12,-24(%rbx) adcxq %rax,%r13 adoxq %rbp,%r15 leaq 32(%rcx),%rcx movq %r13,-16(%rbx) decq %rdi jnz L$mulx4x_inner movq 0(%rsp),%rax movq 8(%rsp),%rdi adcq %rbp,%r15 subq 0(%rbx),%rbp adcq %r15,%r14 sbbq %r15,%r15 movq %r14,-8(%rbx) cmpq 16(%rsp),%rdi jne L$mulx4x_outer leaq 64(%rsp),%rbx subq %rax,%rcx negq %r15 movq %rax,%rdx shrq $3+2,%rax movq 32(%rsp),%rdi jmp L$mulx4x_sub .p2align 5 L$mulx4x_sub: movq 0(%rbx),%r11 movq 8(%rbx),%r12 movq 16(%rbx),%r13 movq 24(%rbx),%r14 leaq 32(%rbx),%rbx sbbq 0(%rcx),%r11 sbbq 8(%rcx),%r12 sbbq 16(%rcx),%r13 sbbq 24(%rcx),%r14 leaq 32(%rcx),%rcx movq %r11,0(%rdi) movq %r12,8(%rdi) movq %r13,16(%rdi) movq %r14,24(%rdi) leaq 32(%rdi),%rdi decq %rax jnz L$mulx4x_sub sbbq $0,%r15 leaq 64(%rsp),%rbx subq %rdx,%rdi .byte 102,73,15,110,207 pxor %xmm0,%xmm0 pshufd $0,%xmm1,%xmm1 movq 40(%rsp),%rsi jmp L$mulx4x_cond_copy .p2align 5 L$mulx4x_cond_copy: movdqa 0(%rbx),%xmm2 movdqa 16(%rbx),%xmm3 leaq 32(%rbx),%rbx movdqu 0(%rdi),%xmm4 movdqu 16(%rdi),%xmm5 leaq 32(%rdi),%rdi movdqa %xmm0,-32(%rbx) movdqa %xmm0,-16(%rbx) pcmpeqd %xmm1,%xmm0 pand %xmm1,%xmm2 pand %xmm1,%xmm3 pand %xmm0,%xmm4 pand %xmm0,%xmm5 pxor %xmm0,%xmm0 por %xmm2,%xmm4 por %xmm3,%xmm5 movdqu %xmm4,-32(%rdi) movdqu %xmm5,-16(%rdi) subq $32,%rdx jnz L$mulx4x_cond_copy movq %rdx,(%rbx) movq $1,%rax movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbp movq -8(%rsi),%rbx leaq (%rsi),%rsp L$mulx4x_epilogue: ret .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .p2align 4 #endif
marvin-hansen/iggy-streaming-system
33,969
thirdparty/crates/ring-0.17.9/pregenerated/sha256-armv8-win64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) // Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy // in the file LICENSE in the source distribution or at // https://www.openssl.org/source/license.html // ==================================================================== // Written by Andy Polyakov <appro@openssl.org> for the OpenSSL // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. // // Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. // // Performance in cycles per processed byte and improvement coefficient // over code generated with "default" compiler: // // SHA256-hw SHA256(*) SHA512 // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) // Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. // (**) The result is a trade-off: it's possible to improve it by // 10% (or by 1 cycle per round), but at the cost of 20% loss // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code // generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ # include <ring-core/arm_arch.h> #endif .text .globl sha256_block_data_order_nohw .def sha256_block_data_order_nohw .type 32 .endef .align 6 sha256_block_data_order_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#4*4 ldp w20,w21,[x0] // load context ldp w22,w23,[x0,#2*4] ldp w24,w25,[x0,#4*4] add x2,x1,x2,lsl#6 // end of input ldp w26,w27,[x0,#6*4] adrp x30,LK256 add x30,x30,:lo12:LK256 stp x0,x2,[x29,#96] Loop: ldp w3,w4,[x1],#2*4 ldr w19,[x30],#4 // *K++ eor w28,w21,w22 // magic seed str x1,[x29,#112] #ifndef __AARCH64EB__ rev w3,w3 // 0 #endif ror w16,w24,#6 add w27,w27,w19 // h+=K[i] eor w6,w24,w24,ror#14 and w17,w25,w24 bic w19,w26,w24 add w27,w27,w3 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w6,ror#11 // Sigma1(e) ror w6,w20,#2 add w27,w27,w17 // h+=Ch(e,f,g) eor w17,w20,w20,ror#9 add w27,w27,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w23,w23,w27 // d+=h eor w28,w28,w21 // Maj(a,b,c) eor w17,w6,w17,ror#13 // Sigma0(a) add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w4,w4 // 1 #endif ldp w5,w6,[x1],#2*4 add w27,w27,w17 // h+=Sigma0(a) ror w16,w23,#6 add w26,w26,w28 // h+=K[i] eor w7,w23,w23,ror#14 and w17,w24,w23 bic w28,w25,w23 add w26,w26,w4 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w7,ror#11 // Sigma1(e) ror w7,w27,#2 add w26,w26,w17 // h+=Ch(e,f,g) eor w17,w27,w27,ror#9 add w26,w26,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w22,w22,w26 // d+=h eor w19,w19,w20 // Maj(a,b,c) eor w17,w7,w17,ror#13 // Sigma0(a) add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w5,w5 // 2 #endif add w26,w26,w17 // h+=Sigma0(a) ror w16,w22,#6 add w25,w25,w19 // h+=K[i] eor w8,w22,w22,ror#14 and w17,w23,w22 bic w19,w24,w22 add w25,w25,w5 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w8,ror#11 // Sigma1(e) ror w8,w26,#2 add w25,w25,w17 // h+=Ch(e,f,g) eor w17,w26,w26,ror#9 add w25,w25,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w21,w21,w25 // d+=h eor w28,w28,w27 // Maj(a,b,c) eor w17,w8,w17,ror#13 // Sigma0(a) add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w6,w6 // 3 #endif ldp w7,w8,[x1],#2*4 add w25,w25,w17 // h+=Sigma0(a) ror w16,w21,#6 add w24,w24,w28 // h+=K[i] eor w9,w21,w21,ror#14 and w17,w22,w21 bic w28,w23,w21 add w24,w24,w6 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w9,ror#11 // Sigma1(e) ror w9,w25,#2 add w24,w24,w17 // h+=Ch(e,f,g) eor w17,w25,w25,ror#9 add w24,w24,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w20,w20,w24 // d+=h eor w19,w19,w26 // Maj(a,b,c) eor w17,w9,w17,ror#13 // Sigma0(a) add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w7,w7 // 4 #endif add w24,w24,w17 // h+=Sigma0(a) ror w16,w20,#6 add w23,w23,w19 // h+=K[i] eor w10,w20,w20,ror#14 and w17,w21,w20 bic w19,w22,w20 add w23,w23,w7 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w10,ror#11 // Sigma1(e) ror w10,w24,#2 add w23,w23,w17 // h+=Ch(e,f,g) eor w17,w24,w24,ror#9 add w23,w23,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w27,w27,w23 // d+=h eor w28,w28,w25 // Maj(a,b,c) eor w17,w10,w17,ror#13 // Sigma0(a) add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w8,w8 // 5 #endif ldp w9,w10,[x1],#2*4 add w23,w23,w17 // h+=Sigma0(a) ror w16,w27,#6 add w22,w22,w28 // h+=K[i] eor w11,w27,w27,ror#14 and w17,w20,w27 bic w28,w21,w27 add w22,w22,w8 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w11,ror#11 // Sigma1(e) ror w11,w23,#2 add w22,w22,w17 // h+=Ch(e,f,g) eor w17,w23,w23,ror#9 add w22,w22,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w26,w26,w22 // d+=h eor w19,w19,w24 // Maj(a,b,c) eor w17,w11,w17,ror#13 // Sigma0(a) add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w9,w9 // 6 #endif add w22,w22,w17 // h+=Sigma0(a) ror w16,w26,#6 add w21,w21,w19 // h+=K[i] eor w12,w26,w26,ror#14 and w17,w27,w26 bic w19,w20,w26 add w21,w21,w9 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w12,ror#11 // Sigma1(e) ror w12,w22,#2 add w21,w21,w17 // h+=Ch(e,f,g) eor w17,w22,w22,ror#9 add w21,w21,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w25,w25,w21 // d+=h eor w28,w28,w23 // Maj(a,b,c) eor w17,w12,w17,ror#13 // Sigma0(a) add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w10,w10 // 7 #endif ldp w11,w12,[x1],#2*4 add w21,w21,w17 // h+=Sigma0(a) ror w16,w25,#6 add w20,w20,w28 // h+=K[i] eor w13,w25,w25,ror#14 and w17,w26,w25 bic w28,w27,w25 add w20,w20,w10 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w13,ror#11 // Sigma1(e) ror w13,w21,#2 add w20,w20,w17 // h+=Ch(e,f,g) eor w17,w21,w21,ror#9 add w20,w20,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w24,w24,w20 // d+=h eor w19,w19,w22 // Maj(a,b,c) eor w17,w13,w17,ror#13 // Sigma0(a) add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w20,w20,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w11,w11 // 8 #endif add w20,w20,w17 // h+=Sigma0(a) ror w16,w24,#6 add w27,w27,w19 // h+=K[i] eor w14,w24,w24,ror#14 and w17,w25,w24 bic w19,w26,w24 add w27,w27,w11 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w14,ror#11 // Sigma1(e) ror w14,w20,#2 add w27,w27,w17 // h+=Ch(e,f,g) eor w17,w20,w20,ror#9 add w27,w27,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w23,w23,w27 // d+=h eor w28,w28,w21 // Maj(a,b,c) eor w17,w14,w17,ror#13 // Sigma0(a) add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w12,w12 // 9 #endif ldp w13,w14,[x1],#2*4 add w27,w27,w17 // h+=Sigma0(a) ror w16,w23,#6 add w26,w26,w28 // h+=K[i] eor w15,w23,w23,ror#14 and w17,w24,w23 bic w28,w25,w23 add w26,w26,w12 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w15,ror#11 // Sigma1(e) ror w15,w27,#2 add w26,w26,w17 // h+=Ch(e,f,g) eor w17,w27,w27,ror#9 add w26,w26,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w22,w22,w26 // d+=h eor w19,w19,w20 // Maj(a,b,c) eor w17,w15,w17,ror#13 // Sigma0(a) add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w13,w13 // 10 #endif add w26,w26,w17 // h+=Sigma0(a) ror w16,w22,#6 add w25,w25,w19 // h+=K[i] eor w0,w22,w22,ror#14 and w17,w23,w22 bic w19,w24,w22 add w25,w25,w13 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w0,ror#11 // Sigma1(e) ror w0,w26,#2 add w25,w25,w17 // h+=Ch(e,f,g) eor w17,w26,w26,ror#9 add w25,w25,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w21,w21,w25 // d+=h eor w28,w28,w27 // Maj(a,b,c) eor w17,w0,w17,ror#13 // Sigma0(a) add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w14,w14 // 11 #endif ldp w15,w0,[x1],#2*4 add w25,w25,w17 // h+=Sigma0(a) str w6,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] eor w6,w21,w21,ror#14 and w17,w22,w21 bic w28,w23,w21 add w24,w24,w14 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w6,ror#11 // Sigma1(e) ror w6,w25,#2 add w24,w24,w17 // h+=Ch(e,f,g) eor w17,w25,w25,ror#9 add w24,w24,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w20,w20,w24 // d+=h eor w19,w19,w26 // Maj(a,b,c) eor w17,w6,w17,ror#13 // Sigma0(a) add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w15,w15 // 12 #endif add w24,w24,w17 // h+=Sigma0(a) str w7,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] eor w7,w20,w20,ror#14 and w17,w21,w20 bic w19,w22,w20 add w23,w23,w15 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w7,ror#11 // Sigma1(e) ror w7,w24,#2 add w23,w23,w17 // h+=Ch(e,f,g) eor w17,w24,w24,ror#9 add w23,w23,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w27,w27,w23 // d+=h eor w28,w28,w25 // Maj(a,b,c) eor w17,w7,w17,ror#13 // Sigma0(a) add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w0,w0 // 13 #endif ldp w1,w2,[x1] add w23,w23,w17 // h+=Sigma0(a) str w8,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] eor w8,w27,w27,ror#14 and w17,w20,w27 bic w28,w21,w27 add w22,w22,w0 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w8,ror#11 // Sigma1(e) ror w8,w23,#2 add w22,w22,w17 // h+=Ch(e,f,g) eor w17,w23,w23,ror#9 add w22,w22,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w26,w26,w22 // d+=h eor w19,w19,w24 // Maj(a,b,c) eor w17,w8,w17,ror#13 // Sigma0(a) add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w1,w1 // 14 #endif ldr w6,[sp,#12] add w22,w22,w17 // h+=Sigma0(a) str w9,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] eor w9,w26,w26,ror#14 and w17,w27,w26 bic w19,w20,w26 add w21,w21,w1 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w9,ror#11 // Sigma1(e) ror w9,w22,#2 add w21,w21,w17 // h+=Ch(e,f,g) eor w17,w22,w22,ror#9 add w21,w21,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w25,w25,w21 // d+=h eor w28,w28,w23 // Maj(a,b,c) eor w17,w9,w17,ror#13 // Sigma0(a) add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w2,w2 // 15 #endif ldr w7,[sp,#0] add w21,w21,w17 // h+=Sigma0(a) str w10,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w9,w4,#7 and w17,w26,w25 ror w8,w1,#17 bic w28,w27,w25 ror w10,w21,#2 add w20,w20,w2 // h+=X[i] eor w16,w16,w25,ror#11 eor w9,w9,w4,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w10,w10,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w8,w8,w1,ror#19 eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w10,w21,ror#22 // Sigma0(a) eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) add w3,w3,w12 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w3,w3,w9 add w20,w20,w17 // h+=Sigma0(a) add w3,w3,w8 Loop_16_xx: ldr w8,[sp,#4] str w11,[sp,#0] ror w16,w24,#6 add w27,w27,w19 // h+=K[i] ror w10,w5,#7 and w17,w25,w24 ror w9,w2,#17 bic w19,w26,w24 ror w11,w20,#2 add w27,w27,w3 // h+=X[i] eor w16,w16,w24,ror#11 eor w10,w10,w5,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w24,ror#25 // Sigma1(e) eor w11,w11,w20,ror#13 add w27,w27,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w9,w9,w2,ror#19 eor w10,w10,w5,lsr#3 // sigma0(X[i+1]) add w27,w27,w16 // h+=Sigma1(e) eor w28,w28,w21 // Maj(a,b,c) eor w17,w11,w20,ror#22 // Sigma0(a) eor w9,w9,w2,lsr#10 // sigma1(X[i+14]) add w4,w4,w13 add w23,w23,w27 // d+=h add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w4,w4,w10 add w27,w27,w17 // h+=Sigma0(a) add w4,w4,w9 ldr w9,[sp,#8] str w12,[sp,#4] ror w16,w23,#6 add w26,w26,w28 // h+=K[i] ror w11,w6,#7 and w17,w24,w23 ror w10,w3,#17 bic w28,w25,w23 ror w12,w27,#2 add w26,w26,w4 // h+=X[i] eor w16,w16,w23,ror#11 eor w11,w11,w6,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w23,ror#25 // Sigma1(e) eor w12,w12,w27,ror#13 add w26,w26,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w10,w10,w3,ror#19 eor w11,w11,w6,lsr#3 // sigma0(X[i+1]) add w26,w26,w16 // h+=Sigma1(e) eor w19,w19,w20 // Maj(a,b,c) eor w17,w12,w27,ror#22 // Sigma0(a) eor w10,w10,w3,lsr#10 // sigma1(X[i+14]) add w5,w5,w14 add w22,w22,w26 // d+=h add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w5,w5,w11 add w26,w26,w17 // h+=Sigma0(a) add w5,w5,w10 ldr w10,[sp,#12] str w13,[sp,#8] ror w16,w22,#6 add w25,w25,w19 // h+=K[i] ror w12,w7,#7 and w17,w23,w22 ror w11,w4,#17 bic w19,w24,w22 ror w13,w26,#2 add w25,w25,w5 // h+=X[i] eor w16,w16,w22,ror#11 eor w12,w12,w7,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w22,ror#25 // Sigma1(e) eor w13,w13,w26,ror#13 add w25,w25,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w11,w11,w4,ror#19 eor w12,w12,w7,lsr#3 // sigma0(X[i+1]) add w25,w25,w16 // h+=Sigma1(e) eor w28,w28,w27 // Maj(a,b,c) eor w17,w13,w26,ror#22 // Sigma0(a) eor w11,w11,w4,lsr#10 // sigma1(X[i+14]) add w6,w6,w15 add w21,w21,w25 // d+=h add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w6,w6,w12 add w25,w25,w17 // h+=Sigma0(a) add w6,w6,w11 ldr w11,[sp,#0] str w14,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] ror w13,w8,#7 and w17,w22,w21 ror w12,w5,#17 bic w28,w23,w21 ror w14,w25,#2 add w24,w24,w6 // h+=X[i] eor w16,w16,w21,ror#11 eor w13,w13,w8,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w21,ror#25 // Sigma1(e) eor w14,w14,w25,ror#13 add w24,w24,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w12,w12,w5,ror#19 eor w13,w13,w8,lsr#3 // sigma0(X[i+1]) add w24,w24,w16 // h+=Sigma1(e) eor w19,w19,w26 // Maj(a,b,c) eor w17,w14,w25,ror#22 // Sigma0(a) eor w12,w12,w5,lsr#10 // sigma1(X[i+14]) add w7,w7,w0 add w20,w20,w24 // d+=h add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w7,w7,w13 add w24,w24,w17 // h+=Sigma0(a) add w7,w7,w12 ldr w12,[sp,#4] str w15,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] ror w14,w9,#7 and w17,w21,w20 ror w13,w6,#17 bic w19,w22,w20 ror w15,w24,#2 add w23,w23,w7 // h+=X[i] eor w16,w16,w20,ror#11 eor w14,w14,w9,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w20,ror#25 // Sigma1(e) eor w15,w15,w24,ror#13 add w23,w23,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w13,w13,w6,ror#19 eor w14,w14,w9,lsr#3 // sigma0(X[i+1]) add w23,w23,w16 // h+=Sigma1(e) eor w28,w28,w25 // Maj(a,b,c) eor w17,w15,w24,ror#22 // Sigma0(a) eor w13,w13,w6,lsr#10 // sigma1(X[i+14]) add w8,w8,w1 add w27,w27,w23 // d+=h add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w8,w8,w14 add w23,w23,w17 // h+=Sigma0(a) add w8,w8,w13 ldr w13,[sp,#8] str w0,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] ror w15,w10,#7 and w17,w20,w27 ror w14,w7,#17 bic w28,w21,w27 ror w0,w23,#2 add w22,w22,w8 // h+=X[i] eor w16,w16,w27,ror#11 eor w15,w15,w10,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w27,ror#25 // Sigma1(e) eor w0,w0,w23,ror#13 add w22,w22,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w14,w14,w7,ror#19 eor w15,w15,w10,lsr#3 // sigma0(X[i+1]) add w22,w22,w16 // h+=Sigma1(e) eor w19,w19,w24 // Maj(a,b,c) eor w17,w0,w23,ror#22 // Sigma0(a) eor w14,w14,w7,lsr#10 // sigma1(X[i+14]) add w9,w9,w2 add w26,w26,w22 // d+=h add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w9,w9,w15 add w22,w22,w17 // h+=Sigma0(a) add w9,w9,w14 ldr w14,[sp,#12] str w1,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] ror w0,w11,#7 and w17,w27,w26 ror w15,w8,#17 bic w19,w20,w26 ror w1,w22,#2 add w21,w21,w9 // h+=X[i] eor w16,w16,w26,ror#11 eor w0,w0,w11,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w26,ror#25 // Sigma1(e) eor w1,w1,w22,ror#13 add w21,w21,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w15,w15,w8,ror#19 eor w0,w0,w11,lsr#3 // sigma0(X[i+1]) add w21,w21,w16 // h+=Sigma1(e) eor w28,w28,w23 // Maj(a,b,c) eor w17,w1,w22,ror#22 // Sigma0(a) eor w15,w15,w8,lsr#10 // sigma1(X[i+14]) add w10,w10,w3 add w25,w25,w21 // d+=h add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w10,w10,w0 add w21,w21,w17 // h+=Sigma0(a) add w10,w10,w15 ldr w15,[sp,#0] str w2,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w1,w12,#7 and w17,w26,w25 ror w0,w9,#17 bic w28,w27,w25 ror w2,w21,#2 add w20,w20,w10 // h+=X[i] eor w16,w16,w25,ror#11 eor w1,w1,w12,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w2,w2,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w0,w0,w9,ror#19 eor w1,w1,w12,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w2,w21,ror#22 // Sigma0(a) eor w0,w0,w9,lsr#10 // sigma1(X[i+14]) add w11,w11,w4 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w11,w11,w1 add w20,w20,w17 // h+=Sigma0(a) add w11,w11,w0 ldr w0,[sp,#4] str w3,[sp,#0] ror w16,w24,#6 add w27,w27,w19 // h+=K[i] ror w2,w13,#7 and w17,w25,w24 ror w1,w10,#17 bic w19,w26,w24 ror w3,w20,#2 add w27,w27,w11 // h+=X[i] eor w16,w16,w24,ror#11 eor w2,w2,w13,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w24,ror#25 // Sigma1(e) eor w3,w3,w20,ror#13 add w27,w27,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w1,w1,w10,ror#19 eor w2,w2,w13,lsr#3 // sigma0(X[i+1]) add w27,w27,w16 // h+=Sigma1(e) eor w28,w28,w21 // Maj(a,b,c) eor w17,w3,w20,ror#22 // Sigma0(a) eor w1,w1,w10,lsr#10 // sigma1(X[i+14]) add w12,w12,w5 add w23,w23,w27 // d+=h add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w12,w12,w2 add w27,w27,w17 // h+=Sigma0(a) add w12,w12,w1 ldr w1,[sp,#8] str w4,[sp,#4] ror w16,w23,#6 add w26,w26,w28 // h+=K[i] ror w3,w14,#7 and w17,w24,w23 ror w2,w11,#17 bic w28,w25,w23 ror w4,w27,#2 add w26,w26,w12 // h+=X[i] eor w16,w16,w23,ror#11 eor w3,w3,w14,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w23,ror#25 // Sigma1(e) eor w4,w4,w27,ror#13 add w26,w26,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w2,w2,w11,ror#19 eor w3,w3,w14,lsr#3 // sigma0(X[i+1]) add w26,w26,w16 // h+=Sigma1(e) eor w19,w19,w20 // Maj(a,b,c) eor w17,w4,w27,ror#22 // Sigma0(a) eor w2,w2,w11,lsr#10 // sigma1(X[i+14]) add w13,w13,w6 add w22,w22,w26 // d+=h add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w13,w13,w3 add w26,w26,w17 // h+=Sigma0(a) add w13,w13,w2 ldr w2,[sp,#12] str w5,[sp,#8] ror w16,w22,#6 add w25,w25,w19 // h+=K[i] ror w4,w15,#7 and w17,w23,w22 ror w3,w12,#17 bic w19,w24,w22 ror w5,w26,#2 add w25,w25,w13 // h+=X[i] eor w16,w16,w22,ror#11 eor w4,w4,w15,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w22,ror#25 // Sigma1(e) eor w5,w5,w26,ror#13 add w25,w25,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w3,w3,w12,ror#19 eor w4,w4,w15,lsr#3 // sigma0(X[i+1]) add w25,w25,w16 // h+=Sigma1(e) eor w28,w28,w27 // Maj(a,b,c) eor w17,w5,w26,ror#22 // Sigma0(a) eor w3,w3,w12,lsr#10 // sigma1(X[i+14]) add w14,w14,w7 add w21,w21,w25 // d+=h add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w14,w14,w4 add w25,w25,w17 // h+=Sigma0(a) add w14,w14,w3 ldr w3,[sp,#0] str w6,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] ror w5,w0,#7 and w17,w22,w21 ror w4,w13,#17 bic w28,w23,w21 ror w6,w25,#2 add w24,w24,w14 // h+=X[i] eor w16,w16,w21,ror#11 eor w5,w5,w0,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w21,ror#25 // Sigma1(e) eor w6,w6,w25,ror#13 add w24,w24,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w4,w4,w13,ror#19 eor w5,w5,w0,lsr#3 // sigma0(X[i+1]) add w24,w24,w16 // h+=Sigma1(e) eor w19,w19,w26 // Maj(a,b,c) eor w17,w6,w25,ror#22 // Sigma0(a) eor w4,w4,w13,lsr#10 // sigma1(X[i+14]) add w15,w15,w8 add w20,w20,w24 // d+=h add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w15,w15,w5 add w24,w24,w17 // h+=Sigma0(a) add w15,w15,w4 ldr w4,[sp,#4] str w7,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] ror w6,w1,#7 and w17,w21,w20 ror w5,w14,#17 bic w19,w22,w20 ror w7,w24,#2 add w23,w23,w15 // h+=X[i] eor w16,w16,w20,ror#11 eor w6,w6,w1,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w20,ror#25 // Sigma1(e) eor w7,w7,w24,ror#13 add w23,w23,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w5,w5,w14,ror#19 eor w6,w6,w1,lsr#3 // sigma0(X[i+1]) add w23,w23,w16 // h+=Sigma1(e) eor w28,w28,w25 // Maj(a,b,c) eor w17,w7,w24,ror#22 // Sigma0(a) eor w5,w5,w14,lsr#10 // sigma1(X[i+14]) add w0,w0,w9 add w27,w27,w23 // d+=h add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w0,w0,w6 add w23,w23,w17 // h+=Sigma0(a) add w0,w0,w5 ldr w5,[sp,#8] str w8,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] ror w7,w2,#7 and w17,w20,w27 ror w6,w15,#17 bic w28,w21,w27 ror w8,w23,#2 add w22,w22,w0 // h+=X[i] eor w16,w16,w27,ror#11 eor w7,w7,w2,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w27,ror#25 // Sigma1(e) eor w8,w8,w23,ror#13 add w22,w22,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w6,w6,w15,ror#19 eor w7,w7,w2,lsr#3 // sigma0(X[i+1]) add w22,w22,w16 // h+=Sigma1(e) eor w19,w19,w24 // Maj(a,b,c) eor w17,w8,w23,ror#22 // Sigma0(a) eor w6,w6,w15,lsr#10 // sigma1(X[i+14]) add w1,w1,w10 add w26,w26,w22 // d+=h add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w1,w1,w7 add w22,w22,w17 // h+=Sigma0(a) add w1,w1,w6 ldr w6,[sp,#12] str w9,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] ror w8,w3,#7 and w17,w27,w26 ror w7,w0,#17 bic w19,w20,w26 ror w9,w22,#2 add w21,w21,w1 // h+=X[i] eor w16,w16,w26,ror#11 eor w8,w8,w3,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w26,ror#25 // Sigma1(e) eor w9,w9,w22,ror#13 add w21,w21,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w7,w7,w0,ror#19 eor w8,w8,w3,lsr#3 // sigma0(X[i+1]) add w21,w21,w16 // h+=Sigma1(e) eor w28,w28,w23 // Maj(a,b,c) eor w17,w9,w22,ror#22 // Sigma0(a) eor w7,w7,w0,lsr#10 // sigma1(X[i+14]) add w2,w2,w11 add w25,w25,w21 // d+=h add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w2,w2,w8 add w21,w21,w17 // h+=Sigma0(a) add w2,w2,w7 ldr w7,[sp,#0] str w10,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w9,w4,#7 and w17,w26,w25 ror w8,w1,#17 bic w28,w27,w25 ror w10,w21,#2 add w20,w20,w2 // h+=X[i] eor w16,w16,w25,ror#11 eor w9,w9,w4,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w10,w10,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w8,w8,w1,ror#19 eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w10,w21,ror#22 // Sigma0(a) eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) add w3,w3,w12 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w3,w3,w9 add w20,w20,w17 // h+=Sigma0(a) add w3,w3,w8 cbnz w19,Loop_16_xx ldp x0,x2,[x29,#96] ldr x1,[x29,#112] sub x30,x30,#260 // rewind ldp w3,w4,[x0] ldp w5,w6,[x0,#2*4] add x1,x1,#14*4 // advance input pointer ldp w7,w8,[x0,#4*4] add w20,w20,w3 ldp w9,w10,[x0,#6*4] add w21,w21,w4 add w22,w22,w5 add w23,w23,w6 stp w20,w21,[x0] add w24,w24,w7 add w25,w25,w8 stp w22,w23,[x0,#2*4] add w26,w26,w9 add w27,w27,w10 cmp x1,x2 stp w24,w25,[x0,#4*4] stp w26,w27,[x0,#6*4] b.ne Loop ldp x19,x20,[x29,#16] add sp,sp,#4*4 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .section .rodata .align 6 LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0 //terminator .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 .text #ifndef __KERNEL__ .globl sha256_block_data_order_hw .def sha256_block_data_order_hw .type 32 .endef .align 6 sha256_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v0.4s,v1.4s},[x0] adrp x3,LK256 add x3,x3,:lo12:LK256 Loop_hw: ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 sub x2,x2,#1 ld1 {v16.4s},[x3],#16 rev32 v4.16b,v4.16b rev32 v5.16b,v5.16b rev32 v6.16b,v6.16b rev32 v7.16b,v7.16b orr v18.16b,v0.16b,v0.16b // offload orr v19.16b,v1.16b,v1.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s ld1 {v17.4s},[x3] add v16.4s,v16.4s,v6.4s sub x3,x3,#64*4-16 // rewind orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s add v17.4s,v17.4s,v7.4s orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s add v0.4s,v0.4s,v18.4s add v1.4s,v1.4s,v19.4s cbnz x2,Loop_hw st1 {v0.4s,v1.4s},[x0] ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
marvin-hansen/iggy-streaming-system
35,688
thirdparty/crates/ring-0.17.9/pregenerated/p256-armv8-asm-win64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include "ring-core/arm_arch.h" .section .rodata .align 5 Lpoly: .quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001 LRR: // 2^512 mod P precomputed for NIST P256 polynomial .quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd Lone_mont: .quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe Lone: .quad 1,0,0,0 Lord: .quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000 LordK: .quad 0xccd1c8aaee00bc4f .byte 69,67,80,95,78,73,83,84,90,50,53,54,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .text // void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4], // const BN_ULONG x2[4]); .globl ecp_nistz256_mul_mont .def ecp_nistz256_mul_mont .type 32 .endef .align 4 ecp_nistz256_mul_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-32]! add x29,sp,#0 stp x19,x20,[sp,#16] ldr x3,[x2] // bp[0] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_mul_mont ldp x19,x20,[sp,#16] ldp x29,x30,[sp],#32 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_sqr_mont .def ecp_nistz256_sqr_mont .type 32 .endef .align 4 ecp_nistz256_sqr_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-32]! add x29,sp,#0 stp x19,x20,[sp,#16] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sqr_mont ldp x19,x20,[sp,#16] ldp x29,x30,[sp],#32 AARCH64_VALIDATE_LINK_REGISTER ret // void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_neg .def ecp_nistz256_neg .type 32 .endef .align 4 ecp_nistz256_neg: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x2,x1 mov x14,xzr // a = 0 mov x15,xzr mov x16,xzr mov x17,xzr adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sub_from ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret // note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded // to x4-x7 and b[0] - to x3 .def __ecp_nistz256_mul_mont .type 32 .endef .align 4 __ecp_nistz256_mul_mont: mul x14,x4,x3 // a[0]*b[0] umulh x8,x4,x3 mul x15,x5,x3 // a[1]*b[0] umulh x9,x5,x3 mul x16,x6,x3 // a[2]*b[0] umulh x10,x6,x3 mul x17,x7,x3 // a[3]*b[0] umulh x11,x7,x3 ldr x3,[x2,#8] // b[1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adc x19,xzr,x11 mov x20,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr ldr x3,[x2,#8*(1+1)] // b[1+1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr ldr x3,[x2,#8*(2+1)] // b[2+1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr // last reduction subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 adcs x16,x17,x10 // +=acc[0]*0xffff0001 adcs x17,x19,x11 adc x19,x20,xzr adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x19,xzr // did it borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret // note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded // to x4-x7 .def __ecp_nistz256_sqr_mont .type 32 .endef .align 4 __ecp_nistz256_sqr_mont: // | | | | | |a1*a0| | // | | | | |a2*a0| | | // | |a3*a2|a3*a0| | | | // | | | |a2*a1| | | | // | | |a3*a1| | | | | // *| | | | | | | | 2| // +|a3*a3|a2*a2|a1*a1|a0*a0| // |--+--+--+--+--+--+--+--| // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow // // "can't overflow" below mark carrying into high part of // multiplication result, which can't overflow, because it // can never be all ones. mul x15,x5,x4 // a[1]*a[0] umulh x9,x5,x4 mul x16,x6,x4 // a[2]*a[0] umulh x10,x6,x4 mul x17,x7,x4 // a[3]*a[0] umulh x19,x7,x4 adds x16,x16,x9 // accumulate high parts of multiplication mul x8,x6,x5 // a[2]*a[1] umulh x9,x6,x5 adcs x17,x17,x10 mul x10,x7,x5 // a[3]*a[1] umulh x11,x7,x5 adc x19,x19,xzr // can't overflow mul x20,x7,x6 // a[3]*a[2] umulh x1,x7,x6 adds x9,x9,x10 // accumulate high parts of multiplication mul x14,x4,x4 // a[0]*a[0] adc x10,x11,xzr // can't overflow adds x17,x17,x8 // accumulate low parts of multiplication umulh x4,x4,x4 adcs x19,x19,x9 mul x9,x5,x5 // a[1]*a[1] adcs x20,x20,x10 umulh x5,x5,x5 adc x1,x1,xzr // can't overflow adds x15,x15,x15 // acc[1-6]*=2 mul x10,x6,x6 // a[2]*a[2] adcs x16,x16,x16 umulh x6,x6,x6 adcs x17,x17,x17 mul x11,x7,x7 // a[3]*a[3] adcs x19,x19,x19 umulh x7,x7,x7 adcs x20,x20,x20 adcs x1,x1,x1 adc x2,xzr,xzr adds x15,x15,x4 // +a[i]*a[i] adcs x16,x16,x9 adcs x17,x17,x5 adcs x19,x19,x10 adcs x20,x20,x6 lsl x8,x14,#32 adcs x1,x1,x11 lsr x9,x14,#32 adc x2,x2,x7 subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 adcs x16,x17,x10 // +=acc[0]*0xffff0001 adc x17,x11,xzr // can't overflow adds x14,x14,x19 // accumulate upper half adcs x15,x15,x20 adcs x16,x16,x1 adcs x17,x17,x2 adc x19,xzr,xzr adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x19,xzr // did it borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret // Note that __ecp_nistz256_add_to expects both input vectors pre-loaded to // x4-x7 and x8-x11. This is done because it's used in multiple // contexts, e.g. in multiplication by 2 and 3... .def __ecp_nistz256_add_to .type 32 .endef .align 4 __ecp_nistz256_add_to: adds x14,x14,x8 // ret = a+b adcs x15,x15,x9 adcs x16,x16,x10 adcs x17,x17,x11 adc x1,xzr,xzr // zap x1 adds x8,x14,#1 // subs x8,x4,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x1,xzr // did subtraction borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret .def __ecp_nistz256_sub_from .type 32 .endef .align 4 __ecp_nistz256_sub_from: ldp x8,x9,[x2] ldp x10,x11,[x2,#16] subs x14,x14,x8 // ret = a-b sbcs x15,x15,x9 sbcs x16,x16,x10 sbcs x17,x17,x11 sbc x1,xzr,xzr // zap x1 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus adcs x9,x15,x12 adcs x10,x16,xzr adc x11,x17,x13 cmp x1,xzr // did subtraction borrow? csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret csel x15,x15,x9,eq csel x16,x16,x10,eq stp x14,x15,[x0] csel x17,x17,x11,eq stp x16,x17,[x0,#16] ret .def __ecp_nistz256_sub_morf .type 32 .endef .align 4 __ecp_nistz256_sub_morf: ldp x8,x9,[x2] ldp x10,x11,[x2,#16] subs x14,x8,x14 // ret = b-a sbcs x15,x9,x15 sbcs x16,x10,x16 sbcs x17,x11,x17 sbc x1,xzr,xzr // zap x1 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus adcs x9,x15,x12 adcs x10,x16,xzr adc x11,x17,x13 cmp x1,xzr // did subtraction borrow? csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret csel x15,x15,x9,eq csel x16,x16,x10,eq stp x14,x15,[x0] csel x17,x17,x11,eq stp x16,x17,[x0,#16] ret .def __ecp_nistz256_div_by_2 .type 32 .endef .align 4 __ecp_nistz256_div_by_2: subs x8,x14,#1 // adds x8,x4,#-1 // tmp = a+modulus adcs x9,x15,x12 adcs x10,x16,xzr adcs x11,x17,x13 adc x1,xzr,xzr // zap x1 tst x14,#1 // is a even? csel x14,x14,x8,eq // ret = even ? a : a+modulus csel x15,x15,x9,eq csel x16,x16,x10,eq csel x17,x17,x11,eq csel x1,xzr,x1,eq lsr x14,x14,#1 // ret >>= 1 orr x14,x14,x15,lsl#63 lsr x15,x15,#1 orr x15,x15,x16,lsl#63 lsr x16,x16,#1 orr x16,x16,x17,lsl#63 lsr x17,x17,#1 stp x14,x15,[x0] orr x17,x17,x1,lsl#63 stp x16,x17,[x0,#16] ret .globl ecp_nistz256_point_double .def ecp_nistz256_point_double .type 32 .endef .align 5 ecp_nistz256_point_double: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] sub sp,sp,#32*4 Ldouble_shortcut: ldp x14,x15,[x1,#32] mov x21,x0 ldp x16,x17,[x1,#48] mov x22,x1 adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] mov x8,x14 ldr x13,[x13,#24] mov x9,x15 ldp x4,x5,[x22,#64] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[x22,#64+16] add x0,sp,#0 bl __ecp_nistz256_add_to // p256_mul_by_2(S, in_y); add x0,sp,#64 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z); ldp x8,x9,[x22] ldp x10,x11,[x22,#16] mov x4,x14 // put Zsqr aside for p256_sub mov x5,x15 mov x6,x16 mov x7,x17 add x0,sp,#32 bl __ecp_nistz256_add_to // p256_add(M, Zsqr, in_x); add x2,x22,#0 mov x14,x4 // restore Zsqr mov x15,x5 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont mov x16,x6 mov x17,x7 ldp x6,x7,[sp,#0+16] add x0,sp,#64 bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr); add x0,sp,#0 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S); ldr x3,[x22,#32] ldp x4,x5,[x22,#64] ldp x6,x7,[x22,#64+16] add x2,x22,#32 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y); mov x8,x14 mov x9,x15 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[sp,#0+16] add x0,x21,#64 bl __ecp_nistz256_add_to // p256_mul_by_2(res_z, tmp0); add x0,sp,#96 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S); ldr x3,[sp,#64] // forward load for p256_mul_mont ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x0,x21,#32 bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0); add x2,sp,#64 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr); mov x8,x14 // duplicate M mov x9,x15 mov x10,x16 mov x11,x17 mov x4,x14 // put M aside mov x5,x15 mov x6,x16 mov x7,x17 add x0,sp,#32 bl __ecp_nistz256_add_to mov x8,x4 // restore M mov x9,x5 ldr x3,[x22] // forward load for p256_mul_mont mov x10,x6 ldp x4,x5,[sp,#0] mov x11,x7 ldp x6,x7,[sp,#0+16] bl __ecp_nistz256_add_to // p256_mul_by_3(M, M); add x2,x22,#0 add x0,sp,#0 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x); mov x8,x14 mov x9,x15 ldp x4,x5,[sp,#32] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[sp,#32+16] add x0,sp,#96 bl __ecp_nistz256_add_to // p256_mul_by_2(tmp0, S); add x0,x21,#0 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M); add x2,sp,#96 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0); add x2,sp,#0 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x); ldr x3,[sp,#32] mov x4,x14 // copy S mov x5,x15 mov x6,x16 mov x7,x17 add x2,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M); add x2,x21,#32 add x0,x21,#32 bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y); add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl ecp_nistz256_point_add .def ecp_nistz256_point_add .type 32 .endef .align 5 ecp_nistz256_point_add: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#32*12 ldp x4,x5,[x2,#64] // in2_z ldp x6,x7,[x2,#64+16] mov x21,x0 mov x22,x1 mov x23,x2 adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] orr x8,x4,x5 orr x10,x6,x7 orr x25,x8,x10 cmp x25,#0 csetm x25,ne // ~in2infty add x0,sp,#192 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z); ldp x4,x5,[x22,#64] // in1_z ldp x6,x7,[x22,#64+16] orr x8,x4,x5 orr x10,x6,x7 orr x24,x8,x10 cmp x24,#0 csetm x24,ne // ~in1infty add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z); ldr x3,[x23,#64] ldp x4,x5,[sp,#192] ldp x6,x7,[sp,#192+16] add x2,x23,#64 add x0,sp,#320 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z); ldr x3,[x22,#64] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,x22,#64 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z); ldr x3,[x22,#32] ldp x4,x5,[sp,#320] ldp x6,x7,[sp,#320+16] add x2,x22,#32 add x0,sp,#320 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y); ldr x3,[x23,#32] ldp x4,x5,[sp,#352] ldp x6,x7,[sp,#352+16] add x2,x23,#32 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y); add x2,sp,#320 ldr x3,[sp,#192] // forward load for p256_mul_mont ldp x4,x5,[x22] ldp x6,x7,[x22,#16] add x0,sp,#160 bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1); orr x14,x14,x15 // see if result is zero orr x16,x16,x17 orr x26,x14,x16 // ~is_equal(S1,S2) add x2,sp,#192 add x0,sp,#256 bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr); ldr x3,[sp,#128] ldp x4,x5,[x23] ldp x6,x7,[x23,#16] add x2,sp,#128 add x0,sp,#288 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr); add x2,sp,#256 ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont ldp x6,x7,[sp,#160+16] add x0,sp,#96 bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1); orr x14,x14,x15 // see if result is zero orr x16,x16,x17 orr x14,x14,x16 // ~is_equal(U1,U2) mvn x27,x24 // -1/0 -> 0/-1 mvn x28,x25 // -1/0 -> 0/-1 orr x14,x14,x27 orr x14,x14,x28 orr x14,x14,x26 cbnz x14,Ladd_proceed // if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2)) Ladd_double: mov x1,x22 mov x0,x21 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] add sp,sp,#256 // #256 is from #32*(12-4). difference in stack frames b Ldouble_shortcut .align 4 Ladd_proceed: add x0,sp,#192 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R); ldr x3,[x22,#64] ldp x4,x5,[sp,#96] ldp x6,x7,[sp,#96+16] add x2,x22,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z); ldp x4,x5,[sp,#96] ldp x6,x7,[sp,#96+16] add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H); ldr x3,[x23,#64] ldp x4,x5,[sp,#64] ldp x6,x7,[sp,#64+16] add x2,x23,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z); ldr x3,[sp,#96] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,sp,#96 add x0,sp,#224 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H); ldr x3,[sp,#128] ldp x4,x5,[sp,#256] ldp x6,x7,[sp,#256+16] add x2,sp,#128 add x0,sp,#288 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr); mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 add x0,sp,#128 bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2); add x2,sp,#192 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr); add x2,sp,#224 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub); add x2,sp,#288 ldr x3,[sp,#224] // forward load for p256_mul_mont ldp x4,x5,[sp,#320] ldp x6,x7,[sp,#320+16] add x0,sp,#32 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x); add x2,sp,#224 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub); ldr x3,[sp,#160] ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x2,sp,#160 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R); add x2,sp,#352 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2); ldp x4,x5,[sp,#0] // res ldp x6,x7,[sp,#0+16] ldp x8,x9,[x23] // in2 ldp x10,x11,[x23,#16] ldp x14,x15,[x22,#0] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#0+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+0+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+0+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#0+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#0+48] stp x14,x15,[x21,#0] stp x16,x17,[x21,#0+16] ldp x14,x15,[x22,#32] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#32+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+32+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+32+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#32+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#32+48] stp x14,x15,[x21,#32] stp x16,x17,[x21,#32+16] ldp x14,x15,[x22,#64] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#64+16] csel x8,x4,x8,ne csel x9,x5,x9,ne csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? csel x14,x8,x14,ne csel x15,x9,x15,ne csel x16,x10,x16,ne csel x17,x11,x17,ne stp x14,x15,[x21,#64] stp x16,x17,[x21,#64+16] Ladd_done: add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .globl ecp_nistz256_point_add_affine .def ecp_nistz256_point_add_affine .type 32 .endef .align 5 ecp_nistz256_point_add_affine: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-80]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] sub sp,sp,#32*10 mov x21,x0 mov x22,x1 mov x23,x2 adrp x13,Lpoly add x13,x13,:lo12:Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] ldp x4,x5,[x1,#64] // in1_z ldp x6,x7,[x1,#64+16] orr x8,x4,x5 orr x10,x6,x7 orr x24,x8,x10 cmp x24,#0 csetm x24,ne // ~in1infty ldp x14,x15,[x2] // in2_x ldp x16,x17,[x2,#16] ldp x8,x9,[x2,#32] // in2_y ldp x10,x11,[x2,#48] orr x14,x14,x15 orr x16,x16,x17 orr x8,x8,x9 orr x10,x10,x11 orr x14,x14,x16 orr x8,x8,x10 orr x25,x14,x8 cmp x25,#0 csetm x25,ne // ~in2infty add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z); mov x4,x14 mov x5,x15 mov x6,x16 mov x7,x17 ldr x3,[x23] add x2,x23,#0 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x); add x2,x22,#0 ldr x3,[x22,#64] // forward load for p256_mul_mont ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x0,sp,#160 bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x); add x2,x22,#64 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z); ldr x3,[x22,#64] ldp x4,x5,[sp,#160] ldp x6,x7,[sp,#160+16] add x2,x22,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z); ldr x3,[x23,#32] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,x23,#32 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y); add x2,x22,#32 ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont ldp x6,x7,[sp,#160+16] add x0,sp,#192 bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y); add x0,sp,#224 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H); ldp x4,x5,[sp,#192] ldp x6,x7,[sp,#192+16] add x0,sp,#288 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R); ldr x3,[sp,#160] ldp x4,x5,[sp,#224] ldp x6,x7,[sp,#224+16] add x2,sp,#160 add x0,sp,#256 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H); ldr x3,[x22] ldp x4,x5,[sp,#224] ldp x6,x7,[sp,#224+16] add x2,x22,#0 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr); mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 add x0,sp,#224 bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2); add x2,sp,#288 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr); add x2,sp,#256 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub); add x2,sp,#96 ldr x3,[x22,#32] // forward load for p256_mul_mont ldp x4,x5,[sp,#256] ldp x6,x7,[sp,#256+16] add x0,sp,#32 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x); add x2,x22,#32 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub); ldr x3,[sp,#192] ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x2,sp,#192 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R); add x2,sp,#128 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2); ldp x4,x5,[sp,#0] // res ldp x6,x7,[sp,#0+16] ldp x8,x9,[x23] // in2 ldp x10,x11,[x23,#16] ldp x14,x15,[x22,#0] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#0+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+0+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+0+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#0+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#0+48] stp x14,x15,[x21,#0] stp x16,x17,[x21,#0+16] adrp x23,Lone_mont-64 add x23,x23,:lo12:Lone_mont-64 ldp x14,x15,[x22,#32] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#32+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+32+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+32+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#32+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#32+48] stp x14,x15,[x21,#32] stp x16,x17,[x21,#32+16] ldp x14,x15,[x22,#64] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#64+16] csel x8,x4,x8,ne csel x9,x5,x9,ne csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? csel x14,x8,x14,ne csel x15,x9,x15,ne csel x16,x10,x16,ne csel x17,x11,x17,ne stp x14,x15,[x21,#64] stp x16,x17,[x21,#64+16] add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x29,x30,[sp],#80 AARCH64_VALIDATE_LINK_REGISTER ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4], // uint64_t b[4]); .globl ecp_nistz256_ord_mul_mont .def ecp_nistz256_ord_mul_mont .type 32 .endef .align 4 ecp_nistz256_ord_mul_mont: AARCH64_VALID_CALL_TARGET // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] adrp x23,Lord add x23,x23,:lo12:Lord ldr x3,[x2] // bp[0] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] ldp x12,x13,[x23,#0] ldp x21,x22,[x23,#16] ldr x23,[x23,#32] mul x14,x4,x3 // a[0]*b[0] umulh x8,x4,x3 mul x15,x5,x3 // a[1]*b[0] umulh x9,x5,x3 mul x16,x6,x3 // a[2]*b[0] umulh x10,x6,x3 mul x17,x7,x3 // a[3]*b[0] umulh x19,x7,x3 mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts of multiplication adcs x16,x16,x9 adcs x17,x17,x10 adc x19,x19,xzr mov x20,xzr ldr x3,[x2,#8*1] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr ldr x3,[x2,#8*2] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr ldr x3,[x2,#8*3] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr lsl x8,x24,#32 // last reduction subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr subs x8,x14,x12 // ret -= modulus sbcs x9,x15,x13 sbcs x10,x16,x21 sbcs x11,x17,x22 sbcs xzr,x19,xzr csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldr x29,[sp],#64 ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4], // uint64_t rep); .globl ecp_nistz256_ord_sqr_mont .def ecp_nistz256_ord_sqr_mont .type 32 .endef .align 4 ecp_nistz256_ord_sqr_mont: AARCH64_VALID_CALL_TARGET // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] adrp x23,Lord add x23,x23,:lo12:Lord ldp x4,x5,[x1] ldp x6,x7,[x1,#16] ldp x12,x13,[x23,#0] ldp x21,x22,[x23,#16] ldr x23,[x23,#32] b Loop_ord_sqr .align 4 Loop_ord_sqr: sub x2,x2,#1 //////////////////////////////////////////////////////////////// // | | | | | |a1*a0| | // | | | | |a2*a0| | | // | |a3*a2|a3*a0| | | | // | | | |a2*a1| | | | // | | |a3*a1| | | | | // *| | | | | | | | 2| // +|a3*a3|a2*a2|a1*a1|a0*a0| // |--+--+--+--+--+--+--+--| // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow // // "can't overflow" below mark carrying into high part of // multiplication result, which can't overflow, because it // can never be all ones. mul x15,x5,x4 // a[1]*a[0] umulh x9,x5,x4 mul x16,x6,x4 // a[2]*a[0] umulh x10,x6,x4 mul x17,x7,x4 // a[3]*a[0] umulh x19,x7,x4 adds x16,x16,x9 // accumulate high parts of multiplication mul x8,x6,x5 // a[2]*a[1] umulh x9,x6,x5 adcs x17,x17,x10 mul x10,x7,x5 // a[3]*a[1] umulh x11,x7,x5 adc x19,x19,xzr // can't overflow mul x20,x7,x6 // a[3]*a[2] umulh x1,x7,x6 adds x9,x9,x10 // accumulate high parts of multiplication mul x14,x4,x4 // a[0]*a[0] adc x10,x11,xzr // can't overflow adds x17,x17,x8 // accumulate low parts of multiplication umulh x4,x4,x4 adcs x19,x19,x9 mul x9,x5,x5 // a[1]*a[1] adcs x20,x20,x10 umulh x5,x5,x5 adc x1,x1,xzr // can't overflow adds x15,x15,x15 // acc[1-6]*=2 mul x10,x6,x6 // a[2]*a[2] adcs x16,x16,x16 umulh x6,x6,x6 adcs x17,x17,x17 mul x11,x7,x7 // a[3]*a[3] adcs x19,x19,x19 umulh x7,x7,x7 adcs x20,x20,x20 adcs x1,x1,x1 adc x3,xzr,xzr adds x15,x15,x4 // +a[i]*a[i] mul x24,x14,x23 adcs x16,x16,x9 adcs x17,x17,x5 adcs x19,x19,x10 adcs x20,x20,x6 adcs x1,x1,x11 adc x3,x3,x7 subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adc x17,xzr,x24 // can't overflow mul x11,x14,x23 lsl x8,x24,#32 subs x15,x15,x24 lsr x9,x24,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x11 mul x10,x13,x11 umulh x24,x13,x11 adcs x10,x10,x9 adc x24,x24,xzr adds x14,x15,x10 adcs x15,x16,x24 adcs x16,x17,x11 adc x17,xzr,x11 // can't overflow mul x24,x14,x23 lsl x8,x11,#32 subs x15,x15,x11 lsr x9,x11,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adc x17,xzr,x24 // can't overflow mul x11,x14,x23 lsl x8,x24,#32 subs x15,x15,x24 lsr x9,x24,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x11 mul x10,x13,x11 umulh x24,x13,x11 adcs x10,x10,x9 adc x24,x24,xzr adds x14,x15,x10 adcs x15,x16,x24 adcs x16,x17,x11 adc x17,xzr,x11 // can't overflow lsl x8,x11,#32 subs x15,x15,x11 lsr x9,x11,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow adds x14,x14,x19 // accumulate upper half adcs x15,x15,x20 adcs x16,x16,x1 adcs x17,x17,x3 adc x19,xzr,xzr subs x8,x14,x12 // ret -= modulus sbcs x9,x15,x13 sbcs x10,x16,x21 sbcs x11,x17,x22 sbcs xzr,x19,xzr csel x4,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x5,x15,x9,lo csel x6,x16,x10,lo csel x7,x17,x11,lo cbnz x2,Loop_ord_sqr stp x4,x5,[x0] stp x6,x7,[x0,#16] ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldr x29,[sp],#64 ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_select_w5(uint64_t *val, uint64_t *in_t, int index); .globl ecp_nistz256_select_w5 .def ecp_nistz256_select_w5 .type 32 .endef .align 4 ecp_nistz256_select_w5: AARCH64_VALID_CALL_TARGET // x10 := x0 // w9 := 0; loop counter and incremented internal index mov x10, x0 mov w9, #0 // [v16-v21] := 0 movi v16.16b, #0 movi v17.16b, #0 movi v18.16b, #0 movi v19.16b, #0 movi v20.16b, #0 movi v21.16b, #0 Lselect_w5_loop: // Loop 16 times. // Increment index (loop counter); tested at the end of the loop add w9, w9, #1 // [v22-v27] := Load a (3*256-bit = 6*128-bit) table entry starting at x1 // and advance x1 to point to the next entry ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64 // x11 := (w9 == w2)? All 1s : All 0s cmp w9, w2 csetm x11, eq // continue loading ... ld1 {v26.2d, v27.2d}, [x1],#32 // duplicate mask_64 into Mask (all 0s or all 1s) dup v3.2d, x11 // [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19] // i.e., values in output registers will remain the same if w9 != w2 bit v16.16b, v22.16b, v3.16b bit v17.16b, v23.16b, v3.16b bit v18.16b, v24.16b, v3.16b bit v19.16b, v25.16b, v3.16b bit v20.16b, v26.16b, v3.16b bit v21.16b, v27.16b, v3.16b // If bit #4 is not 0 (i.e. idx_ctr < 16) loop back tbz w9, #4, Lselect_w5_loop // Write [v16-v21] to memory at the output pointer st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x10],#64 st1 {v20.2d, v21.2d}, [x10] ret //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_select_w7(uint64_t *val, uint64_t *in_t, int index); .globl ecp_nistz256_select_w7 .def ecp_nistz256_select_w7 .type 32 .endef .align 4 ecp_nistz256_select_w7: AARCH64_VALID_CALL_TARGET // w9 := 0; loop counter and incremented internal index mov w9, #0 // [v16-v21] := 0 movi v16.16b, #0 movi v17.16b, #0 movi v18.16b, #0 movi v19.16b, #0 Lselect_w7_loop: // Loop 64 times. // Increment index (loop counter); tested at the end of the loop add w9, w9, #1 // [v22-v25] := Load a (2*256-bit = 4*128-bit) table entry starting at x1 // and advance x1 to point to the next entry ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64 // x11 := (w9 == w2)? All 1s : All 0s cmp w9, w2 csetm x11, eq // duplicate mask_64 into Mask (all 0s or all 1s) dup v3.2d, x11 // [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19] // i.e., values in output registers will remain the same if w9 != w2 bit v16.16b, v22.16b, v3.16b bit v17.16b, v23.16b, v3.16b bit v18.16b, v24.16b, v3.16b bit v19.16b, v25.16b, v3.16b // If bit #6 is not 0 (i.e. idx_ctr < 64) loop back tbz w9, #6, Lselect_w7_loop // Write [v16-v19] to memory at the output pointer st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0] ret #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
marvin-hansen/iggy-streaming-system
21,961
thirdparty/crates/ring-0.17.9/pregenerated/ghash-x86_64-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .globl gcm_init_clmul .hidden gcm_init_clmul .type gcm_init_clmul,@function .align 16 gcm_init_clmul: .cfi_startproc _CET_ENDBR .L_init_clmul: movdqu (%rsi),%xmm2 pshufd $78,%xmm2,%xmm2 pshufd $255,%xmm2,%xmm4 movdqa %xmm2,%xmm3 psllq $1,%xmm2 pxor %xmm5,%xmm5 psrlq $63,%xmm3 pcmpgtd %xmm4,%xmm5 pslldq $8,%xmm3 por %xmm3,%xmm2 pand .L0x1c2_polynomial(%rip),%xmm5 pxor %xmm5,%xmm2 pshufd $78,%xmm2,%xmm6 movdqa %xmm2,%xmm0 pxor %xmm2,%xmm6 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,222,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 pshufd $78,%xmm2,%xmm3 pshufd $78,%xmm0,%xmm4 pxor %xmm2,%xmm3 movdqu %xmm2,0(%rdi) pxor %xmm0,%xmm4 movdqu %xmm0,16(%rdi) .byte 102,15,58,15,227,8 movdqu %xmm4,32(%rdi) movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,222,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 movdqa %xmm0,%xmm5 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,222,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 pshufd $78,%xmm5,%xmm3 pshufd $78,%xmm0,%xmm4 pxor %xmm5,%xmm3 movdqu %xmm5,48(%rdi) pxor %xmm0,%xmm4 movdqu %xmm0,64(%rdi) .byte 102,15,58,15,227,8 movdqu %xmm4,80(%rdi) ret .cfi_endproc .size gcm_init_clmul,.-gcm_init_clmul .globl gcm_ghash_clmul .hidden gcm_ghash_clmul .type gcm_ghash_clmul,@function .align 32 gcm_ghash_clmul: .cfi_startproc _CET_ENDBR .L_ghash_clmul: movdqa .Lbswap_mask(%rip),%xmm10 movdqu (%rdi),%xmm0 movdqu (%rsi),%xmm2 movdqu 32(%rsi),%xmm7 .byte 102,65,15,56,0,194 subq $0x10,%rcx jz .Lodd_tail movdqu 16(%rsi),%xmm6 cmpq $0x30,%rcx jb .Lskip4x subq $0x30,%rcx movq $0xA040608020C0E000,%rax movdqu 48(%rsi),%xmm14 movdqu 64(%rsi),%xmm15 movdqu 48(%rdx),%xmm3 movdqu 32(%rdx),%xmm11 .byte 102,65,15,56,0,218 .byte 102,69,15,56,0,218 movdqa %xmm3,%xmm5 pshufd $78,%xmm3,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,68,218,0 .byte 102,15,58,68,234,17 .byte 102,15,58,68,231,0 movdqa %xmm11,%xmm13 pshufd $78,%xmm11,%xmm12 pxor %xmm11,%xmm12 .byte 102,68,15,58,68,222,0 .byte 102,68,15,58,68,238,17 .byte 102,68,15,58,68,231,16 xorps %xmm11,%xmm3 xorps %xmm13,%xmm5 movups 80(%rsi),%xmm7 xorps %xmm12,%xmm4 movdqu 16(%rdx),%xmm11 movdqu 0(%rdx),%xmm8 .byte 102,69,15,56,0,218 .byte 102,69,15,56,0,194 movdqa %xmm11,%xmm13 pshufd $78,%xmm11,%xmm12 pxor %xmm8,%xmm0 pxor %xmm11,%xmm12 .byte 102,69,15,58,68,222,0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm8 pxor %xmm0,%xmm8 .byte 102,69,15,58,68,238,17 .byte 102,68,15,58,68,231,0 xorps %xmm11,%xmm3 xorps %xmm13,%xmm5 leaq 64(%rdx),%rdx subq $0x40,%rcx jc .Ltail4x jmp .Lmod4_loop .align 32 .Lmod4_loop: .byte 102,65,15,58,68,199,0 xorps %xmm12,%xmm4 movdqu 48(%rdx),%xmm11 .byte 102,69,15,56,0,218 .byte 102,65,15,58,68,207,17 xorps %xmm3,%xmm0 movdqu 32(%rdx),%xmm3 movdqa %xmm11,%xmm13 .byte 102,68,15,58,68,199,16 pshufd $78,%xmm11,%xmm12 xorps %xmm5,%xmm1 pxor %xmm11,%xmm12 .byte 102,65,15,56,0,218 movups 32(%rsi),%xmm7 xorps %xmm4,%xmm8 .byte 102,68,15,58,68,218,0 pshufd $78,%xmm3,%xmm4 pxor %xmm0,%xmm8 movdqa %xmm3,%xmm5 pxor %xmm1,%xmm8 pxor %xmm3,%xmm4 movdqa %xmm8,%xmm9 .byte 102,68,15,58,68,234,17 pslldq $8,%xmm8 psrldq $8,%xmm9 pxor %xmm8,%xmm0 movdqa .L7_mask(%rip),%xmm8 pxor %xmm9,%xmm1 .byte 102,76,15,110,200 pand %xmm0,%xmm8 .byte 102,69,15,56,0,200 pxor %xmm0,%xmm9 .byte 102,68,15,58,68,231,0 psllq $57,%xmm9 movdqa %xmm9,%xmm8 pslldq $8,%xmm9 .byte 102,15,58,68,222,0 psrldq $8,%xmm8 pxor %xmm9,%xmm0 pxor %xmm8,%xmm1 movdqu 0(%rdx),%xmm8 movdqa %xmm0,%xmm9 psrlq $1,%xmm0 .byte 102,15,58,68,238,17 xorps %xmm11,%xmm3 movdqu 16(%rdx),%xmm11 .byte 102,69,15,56,0,218 .byte 102,15,58,68,231,16 xorps %xmm13,%xmm5 movups 80(%rsi),%xmm7 .byte 102,69,15,56,0,194 pxor %xmm9,%xmm1 pxor %xmm0,%xmm9 psrlq $5,%xmm0 movdqa %xmm11,%xmm13 pxor %xmm12,%xmm4 pshufd $78,%xmm11,%xmm12 pxor %xmm9,%xmm0 pxor %xmm8,%xmm1 pxor %xmm11,%xmm12 .byte 102,69,15,58,68,222,0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 movdqa %xmm0,%xmm1 .byte 102,69,15,58,68,238,17 xorps %xmm11,%xmm3 pshufd $78,%xmm0,%xmm8 pxor %xmm0,%xmm8 .byte 102,68,15,58,68,231,0 xorps %xmm13,%xmm5 leaq 64(%rdx),%rdx subq $0x40,%rcx jnc .Lmod4_loop .Ltail4x: .byte 102,65,15,58,68,199,0 .byte 102,65,15,58,68,207,17 .byte 102,68,15,58,68,199,16 xorps %xmm12,%xmm4 xorps %xmm3,%xmm0 xorps %xmm5,%xmm1 pxor %xmm0,%xmm1 pxor %xmm4,%xmm8 pxor %xmm1,%xmm8 pxor %xmm0,%xmm1 movdqa %xmm8,%xmm9 psrldq $8,%xmm8 pslldq $8,%xmm9 pxor %xmm8,%xmm1 pxor %xmm9,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 addq $0x40,%rcx jz .Ldone movdqu 32(%rsi),%xmm7 subq $0x10,%rcx jz .Lodd_tail .Lskip4x: movdqu (%rdx),%xmm8 movdqu 16(%rdx),%xmm3 .byte 102,69,15,56,0,194 .byte 102,65,15,56,0,218 pxor %xmm8,%xmm0 movdqa %xmm3,%xmm5 pshufd $78,%xmm3,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,68,218,0 .byte 102,15,58,68,234,17 .byte 102,15,58,68,231,0 leaq 32(%rdx),%rdx nop subq $0x20,%rcx jbe .Leven_tail nop jmp .Lmod_loop .align 32 .Lmod_loop: movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm8 pshufd $78,%xmm0,%xmm4 pxor %xmm0,%xmm4 .byte 102,15,58,68,198,0 .byte 102,15,58,68,206,17 .byte 102,15,58,68,231,16 pxor %xmm3,%xmm0 pxor %xmm5,%xmm1 movdqu (%rdx),%xmm9 pxor %xmm0,%xmm8 .byte 102,69,15,56,0,202 movdqu 16(%rdx),%xmm3 pxor %xmm1,%xmm8 pxor %xmm9,%xmm1 pxor %xmm8,%xmm4 .byte 102,65,15,56,0,218 movdqa %xmm4,%xmm8 psrldq $8,%xmm8 pslldq $8,%xmm4 pxor %xmm8,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm3,%xmm5 movdqa %xmm0,%xmm9 movdqa %xmm0,%xmm8 psllq $5,%xmm0 pxor %xmm0,%xmm8 .byte 102,15,58,68,218,0 psllq $1,%xmm0 pxor %xmm8,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm8 pslldq $8,%xmm0 psrldq $8,%xmm8 pxor %xmm9,%xmm0 pshufd $78,%xmm5,%xmm4 pxor %xmm8,%xmm1 pxor %xmm5,%xmm4 movdqa %xmm0,%xmm9 psrlq $1,%xmm0 .byte 102,15,58,68,234,17 pxor %xmm9,%xmm1 pxor %xmm0,%xmm9 psrlq $5,%xmm0 pxor %xmm9,%xmm0 leaq 32(%rdx),%rdx psrlq $1,%xmm0 .byte 102,15,58,68,231,0 pxor %xmm1,%xmm0 subq $0x20,%rcx ja .Lmod_loop .Leven_tail: movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm8 pshufd $78,%xmm0,%xmm4 pxor %xmm0,%xmm4 .byte 102,15,58,68,198,0 .byte 102,15,58,68,206,17 .byte 102,15,58,68,231,16 pxor %xmm3,%xmm0 pxor %xmm5,%xmm1 pxor %xmm0,%xmm8 pxor %xmm1,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm8 psrldq $8,%xmm8 pslldq $8,%xmm4 pxor %xmm8,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 testq %rcx,%rcx jnz .Ldone .Lodd_tail: movdqu (%rdx),%xmm8 .byte 102,69,15,56,0,194 pxor %xmm8,%xmm0 movdqa %xmm0,%xmm1 pshufd $78,%xmm0,%xmm3 pxor %xmm0,%xmm3 .byte 102,15,58,68,194,0 .byte 102,15,58,68,202,17 .byte 102,15,58,68,223,0 pxor %xmm0,%xmm3 pxor %xmm1,%xmm3 movdqa %xmm3,%xmm4 psrldq $8,%xmm3 pslldq $8,%xmm4 pxor %xmm3,%xmm1 pxor %xmm4,%xmm0 movdqa %xmm0,%xmm4 movdqa %xmm0,%xmm3 psllq $5,%xmm0 pxor %xmm0,%xmm3 psllq $1,%xmm0 pxor %xmm3,%xmm0 psllq $57,%xmm0 movdqa %xmm0,%xmm3 pslldq $8,%xmm0 psrldq $8,%xmm3 pxor %xmm4,%xmm0 pxor %xmm3,%xmm1 movdqa %xmm0,%xmm4 psrlq $1,%xmm0 pxor %xmm4,%xmm1 pxor %xmm0,%xmm4 psrlq $5,%xmm0 pxor %xmm4,%xmm0 psrlq $1,%xmm0 pxor %xmm1,%xmm0 .Ldone: .byte 102,65,15,56,0,194 movdqu %xmm0,(%rdi) ret .cfi_endproc .size gcm_ghash_clmul,.-gcm_ghash_clmul .globl gcm_init_avx .hidden gcm_init_avx .type gcm_init_avx,@function .align 32 gcm_init_avx: .cfi_startproc _CET_ENDBR vzeroupper vmovdqu (%rsi),%xmm2 vpshufd $78,%xmm2,%xmm2 vpshufd $255,%xmm2,%xmm4 vpsrlq $63,%xmm2,%xmm3 vpsllq $1,%xmm2,%xmm2 vpxor %xmm5,%xmm5,%xmm5 vpcmpgtd %xmm4,%xmm5,%xmm5 vpslldq $8,%xmm3,%xmm3 vpor %xmm3,%xmm2,%xmm2 vpand .L0x1c2_polynomial(%rip),%xmm5,%xmm5 vpxor %xmm5,%xmm2,%xmm2 vpunpckhqdq %xmm2,%xmm2,%xmm6 vmovdqa %xmm2,%xmm0 vpxor %xmm2,%xmm6,%xmm6 movq $4,%r10 jmp .Linit_start_avx .align 32 .Linit_loop_avx: vpalignr $8,%xmm3,%xmm4,%xmm5 vmovdqu %xmm5,-16(%rdi) vpunpckhqdq %xmm0,%xmm0,%xmm3 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3 vpxor %xmm0,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $8,%xmm3,%xmm4 vpsrldq $8,%xmm3,%xmm3 vpxor %xmm4,%xmm0,%xmm0 vpxor %xmm3,%xmm1,%xmm1 vpsllq $57,%xmm0,%xmm3 vpsllq $62,%xmm0,%xmm4 vpxor %xmm3,%xmm4,%xmm4 vpsllq $63,%xmm0,%xmm3 vpxor %xmm3,%xmm4,%xmm4 vpslldq $8,%xmm4,%xmm3 vpsrldq $8,%xmm4,%xmm4 vpxor %xmm3,%xmm0,%xmm0 vpxor %xmm4,%xmm1,%xmm1 vpsrlq $1,%xmm0,%xmm4 vpxor %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $5,%xmm4,%xmm4 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $1,%xmm0,%xmm0 vpxor %xmm1,%xmm0,%xmm0 .Linit_start_avx: vmovdqa %xmm0,%xmm5 vpunpckhqdq %xmm0,%xmm0,%xmm3 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm2,%xmm0,%xmm1 vpclmulqdq $0x00,%xmm2,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm3,%xmm3 vpxor %xmm0,%xmm1,%xmm4 vpxor %xmm4,%xmm3,%xmm3 vpslldq $8,%xmm3,%xmm4 vpsrldq $8,%xmm3,%xmm3 vpxor %xmm4,%xmm0,%xmm0 vpxor %xmm3,%xmm1,%xmm1 vpsllq $57,%xmm0,%xmm3 vpsllq $62,%xmm0,%xmm4 vpxor %xmm3,%xmm4,%xmm4 vpsllq $63,%xmm0,%xmm3 vpxor %xmm3,%xmm4,%xmm4 vpslldq $8,%xmm4,%xmm3 vpsrldq $8,%xmm4,%xmm4 vpxor %xmm3,%xmm0,%xmm0 vpxor %xmm4,%xmm1,%xmm1 vpsrlq $1,%xmm0,%xmm4 vpxor %xmm0,%xmm1,%xmm1 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $5,%xmm4,%xmm4 vpxor %xmm4,%xmm0,%xmm0 vpsrlq $1,%xmm0,%xmm0 vpxor %xmm1,%xmm0,%xmm0 vpshufd $78,%xmm5,%xmm3 vpshufd $78,%xmm0,%xmm4 vpxor %xmm5,%xmm3,%xmm3 vmovdqu %xmm5,0(%rdi) vpxor %xmm0,%xmm4,%xmm4 vmovdqu %xmm0,16(%rdi) leaq 48(%rdi),%rdi subq $1,%r10 jnz .Linit_loop_avx vpalignr $8,%xmm4,%xmm3,%xmm5 vmovdqu %xmm5,-16(%rdi) vzeroupper ret .cfi_endproc .size gcm_init_avx,.-gcm_init_avx .globl gcm_ghash_avx .hidden gcm_ghash_avx .type gcm_ghash_avx,@function .align 32 gcm_ghash_avx: .cfi_startproc _CET_ENDBR vzeroupper vmovdqu (%rdi),%xmm10 leaq .L0x1c2_polynomial(%rip),%r10 leaq 64(%rsi),%rsi vmovdqu .Lbswap_mask(%rip),%xmm13 vpshufb %xmm13,%xmm10,%xmm10 cmpq $0x80,%rcx jb .Lshort_avx subq $0x80,%rcx vmovdqu 112(%rdx),%xmm14 vmovdqu 0-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm14 vmovdqu 32-64(%rsi),%xmm7 vpunpckhqdq %xmm14,%xmm14,%xmm9 vmovdqu 96(%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm14,%xmm9,%xmm9 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 16-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vmovdqu 80(%rdx),%xmm14 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vpshufb %xmm13,%xmm14,%xmm14 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 48-64(%rsi),%xmm6 vpxor %xmm14,%xmm9,%xmm9 vmovdqu 64(%rdx),%xmm15 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 80-64(%rsi),%xmm7 vpshufb %xmm13,%xmm15,%xmm15 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm1,%xmm4,%xmm4 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 64-64(%rsi),%xmm6 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vmovdqu 48(%rdx),%xmm14 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpxor %xmm4,%xmm1,%xmm1 vpshufb %xmm13,%xmm14,%xmm14 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 96-64(%rsi),%xmm6 vpxor %xmm5,%xmm2,%xmm2 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 128-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vmovdqu 32(%rdx),%xmm15 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm1,%xmm4,%xmm4 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 112-64(%rsi),%xmm6 vpxor %xmm2,%xmm5,%xmm5 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vmovdqu 16(%rdx),%xmm14 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpxor %xmm4,%xmm1,%xmm1 vpshufb %xmm13,%xmm14,%xmm14 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 144-64(%rsi),%xmm6 vpxor %xmm5,%xmm2,%xmm2 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 176-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vmovdqu (%rdx),%xmm15 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm1,%xmm4,%xmm4 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 160-64(%rsi),%xmm6 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2 leaq 128(%rdx),%rdx cmpq $0x80,%rcx jb .Ltail_avx vpxor %xmm10,%xmm15,%xmm15 subq $0x80,%rcx jmp .Loop8x_avx .align 32 .Loop8x_avx: vpunpckhqdq %xmm15,%xmm15,%xmm8 vmovdqu 112(%rdx),%xmm14 vpxor %xmm0,%xmm3,%xmm3 vpxor %xmm15,%xmm8,%xmm8 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm10 vpshufb %xmm13,%xmm14,%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm11 vmovdqu 0-64(%rsi),%xmm6 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm12 vmovdqu 32-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vmovdqu 96(%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpxor %xmm3,%xmm10,%xmm10 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vxorps %xmm4,%xmm11,%xmm11 vmovdqu 16-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm5,%xmm12,%xmm12 vxorps %xmm15,%xmm8,%xmm8 vmovdqu 80(%rdx),%xmm14 vpxor %xmm10,%xmm12,%xmm12 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpxor %xmm11,%xmm12,%xmm12 vpslldq $8,%xmm12,%xmm9 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vpsrldq $8,%xmm12,%xmm12 vpxor %xmm9,%xmm10,%xmm10 vmovdqu 48-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm14 vxorps %xmm12,%xmm11,%xmm11 vpxor %xmm1,%xmm4,%xmm4 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 80-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vmovdqu 64(%rdx),%xmm15 vpalignr $8,%xmm10,%xmm10,%xmm12 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpshufb %xmm13,%xmm15,%xmm15 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 64-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vxorps %xmm15,%xmm8,%xmm8 vpxor %xmm5,%xmm2,%xmm2 vmovdqu 48(%rdx),%xmm14 vpclmulqdq $0x10,(%r10),%xmm10,%xmm10 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpshufb %xmm13,%xmm14,%xmm14 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 96-64(%rsi),%xmm6 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 128-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vmovdqu 32(%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpshufb %xmm13,%xmm15,%xmm15 vpxor %xmm3,%xmm0,%xmm0 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 112-64(%rsi),%xmm6 vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm4,%xmm1,%xmm1 vpclmulqdq $0x00,%xmm7,%xmm9,%xmm2 vpxor %xmm15,%xmm8,%xmm8 vpxor %xmm5,%xmm2,%xmm2 vxorps %xmm12,%xmm10,%xmm10 vmovdqu 16(%rdx),%xmm14 vpalignr $8,%xmm10,%xmm10,%xmm12 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm3 vpshufb %xmm13,%xmm14,%xmm14 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm4 vmovdqu 144-64(%rsi),%xmm6 vpclmulqdq $0x10,(%r10),%xmm10,%xmm10 vxorps %xmm11,%xmm12,%xmm12 vpunpckhqdq %xmm14,%xmm14,%xmm9 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x10,%xmm7,%xmm8,%xmm5 vmovdqu 176-64(%rsi),%xmm7 vpxor %xmm14,%xmm9,%xmm9 vpxor %xmm2,%xmm5,%xmm5 vmovdqu (%rdx),%xmm15 vpclmulqdq $0x00,%xmm6,%xmm14,%xmm0 vpshufb %xmm13,%xmm15,%xmm15 vpclmulqdq $0x11,%xmm6,%xmm14,%xmm1 vmovdqu 160-64(%rsi),%xmm6 vpxor %xmm12,%xmm15,%xmm15 vpclmulqdq $0x10,%xmm7,%xmm9,%xmm2 vpxor %xmm10,%xmm15,%xmm15 leaq 128(%rdx),%rdx subq $0x80,%rcx jnc .Loop8x_avx addq $0x80,%rcx jmp .Ltail_no_xor_avx .align 32 .Lshort_avx: vmovdqu -16(%rdx,%rcx,1),%xmm14 leaq (%rdx,%rcx,1),%rdx vmovdqu 0-64(%rsi),%xmm6 vmovdqu 32-64(%rsi),%xmm7 vpshufb %xmm13,%xmm14,%xmm15 vmovdqa %xmm0,%xmm3 vmovdqa %xmm1,%xmm4 vmovdqa %xmm2,%xmm5 subq $0x10,%rcx jz .Ltail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -32(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 16-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vpsrldq $8,%xmm7,%xmm7 subq $0x10,%rcx jz .Ltail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -48(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 48-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovdqu 80-64(%rsi),%xmm7 subq $0x10,%rcx jz .Ltail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -64(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 64-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vpsrldq $8,%xmm7,%xmm7 subq $0x10,%rcx jz .Ltail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -80(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 96-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovdqu 128-64(%rsi),%xmm7 subq $0x10,%rcx jz .Ltail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -96(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 112-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vpsrldq $8,%xmm7,%xmm7 subq $0x10,%rcx jz .Ltail_avx vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vmovdqu -112(%rdx),%xmm14 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vmovdqu 144-64(%rsi),%xmm6 vpshufb %xmm13,%xmm14,%xmm15 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovq 184-64(%rsi),%xmm7 subq $0x10,%rcx jmp .Ltail_avx .align 32 .Ltail_avx: vpxor %xmm10,%xmm15,%xmm15 .Ltail_no_xor_avx: vpunpckhqdq %xmm15,%xmm15,%xmm8 vpxor %xmm0,%xmm3,%xmm3 vpclmulqdq $0x00,%xmm6,%xmm15,%xmm0 vpxor %xmm15,%xmm8,%xmm8 vpxor %xmm1,%xmm4,%xmm4 vpclmulqdq $0x11,%xmm6,%xmm15,%xmm1 vpxor %xmm2,%xmm5,%xmm5 vpclmulqdq $0x00,%xmm7,%xmm8,%xmm2 vmovdqu (%r10),%xmm12 vpxor %xmm0,%xmm3,%xmm10 vpxor %xmm1,%xmm4,%xmm11 vpxor %xmm2,%xmm5,%xmm5 vpxor %xmm10,%xmm5,%xmm5 vpxor %xmm11,%xmm5,%xmm5 vpslldq $8,%xmm5,%xmm9 vpsrldq $8,%xmm5,%xmm5 vpxor %xmm9,%xmm10,%xmm10 vpxor %xmm5,%xmm11,%xmm11 vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9 vpalignr $8,%xmm10,%xmm10,%xmm10 vpxor %xmm9,%xmm10,%xmm10 vpclmulqdq $0x10,%xmm12,%xmm10,%xmm9 vpalignr $8,%xmm10,%xmm10,%xmm10 vpxor %xmm11,%xmm10,%xmm10 vpxor %xmm9,%xmm10,%xmm10 cmpq $0,%rcx jne .Lshort_avx vpshufb %xmm13,%xmm10,%xmm10 vmovdqu %xmm10,(%rdi) vzeroupper ret .cfi_endproc .size gcm_ghash_avx,.-gcm_ghash_avx .section .rodata .align 64 .Lbswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .L0x1c2_polynomial: .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 .L7_mask: .long 7,0,7,0 .align 64 .byte 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 64 .text #endif
marvin-hansen/iggy-streaming-system
12,170
thirdparty/crates/ring-0.17.9/pregenerated/vpaes-x86_64-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .text .type _vpaes_encrypt_core,@function .align 16 _vpaes_encrypt_core: .cfi_startproc movq %rdx,%r9 movq $16,%r11 movl 240(%rdx),%eax movdqa %xmm9,%xmm1 movdqa .Lk_ipt(%rip),%xmm2 pandn %xmm0,%xmm1 movdqu (%r9),%xmm5 psrld $4,%xmm1 pand %xmm9,%xmm0 .byte 102,15,56,0,208 movdqa .Lk_ipt+16(%rip),%xmm0 .byte 102,15,56,0,193 pxor %xmm5,%xmm2 addq $16,%r9 pxor %xmm2,%xmm0 leaq .Lk_mc_backward(%rip),%r10 jmp .Lenc_entry .align 16 .Lenc_loop: movdqa %xmm13,%xmm4 movdqa %xmm12,%xmm0 .byte 102,15,56,0,226 .byte 102,15,56,0,195 pxor %xmm5,%xmm4 movdqa %xmm15,%xmm5 pxor %xmm4,%xmm0 movdqa -64(%r11,%r10,1),%xmm1 .byte 102,15,56,0,234 movdqa (%r11,%r10,1),%xmm4 movdqa %xmm14,%xmm2 .byte 102,15,56,0,211 movdqa %xmm0,%xmm3 pxor %xmm5,%xmm2 .byte 102,15,56,0,193 addq $16,%r9 pxor %xmm2,%xmm0 .byte 102,15,56,0,220 addq $16,%r11 pxor %xmm0,%xmm3 .byte 102,15,56,0,193 andq $0x30,%r11 subq $1,%rax pxor %xmm3,%xmm0 .Lenc_entry: movdqa %xmm9,%xmm1 movdqa %xmm11,%xmm5 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm9,%xmm0 .byte 102,15,56,0,232 movdqa %xmm10,%xmm3 pxor %xmm1,%xmm0 .byte 102,15,56,0,217 movdqa %xmm10,%xmm4 pxor %xmm5,%xmm3 .byte 102,15,56,0,224 movdqa %xmm10,%xmm2 pxor %xmm5,%xmm4 .byte 102,15,56,0,211 movdqa %xmm10,%xmm3 pxor %xmm0,%xmm2 .byte 102,15,56,0,220 movdqu (%r9),%xmm5 pxor %xmm1,%xmm3 jnz .Lenc_loop movdqa -96(%r10),%xmm4 movdqa -80(%r10),%xmm0 .byte 102,15,56,0,226 pxor %xmm5,%xmm4 .byte 102,15,56,0,195 movdqa 64(%r11,%r10,1),%xmm1 pxor %xmm4,%xmm0 .byte 102,15,56,0,193 ret .cfi_endproc .size _vpaes_encrypt_core,.-_vpaes_encrypt_core .type _vpaes_encrypt_core_2x,@function .align 16 _vpaes_encrypt_core_2x: .cfi_startproc movq %rdx,%r9 movq $16,%r11 movl 240(%rdx),%eax movdqa %xmm9,%xmm1 movdqa %xmm9,%xmm7 movdqa .Lk_ipt(%rip),%xmm2 movdqa %xmm2,%xmm8 pandn %xmm0,%xmm1 pandn %xmm6,%xmm7 movdqu (%r9),%xmm5 psrld $4,%xmm1 psrld $4,%xmm7 pand %xmm9,%xmm0 pand %xmm9,%xmm6 .byte 102,15,56,0,208 .byte 102,68,15,56,0,198 movdqa .Lk_ipt+16(%rip),%xmm0 movdqa %xmm0,%xmm6 .byte 102,15,56,0,193 .byte 102,15,56,0,247 pxor %xmm5,%xmm2 pxor %xmm5,%xmm8 addq $16,%r9 pxor %xmm2,%xmm0 pxor %xmm8,%xmm6 leaq .Lk_mc_backward(%rip),%r10 jmp .Lenc2x_entry .align 16 .Lenc2x_loop: movdqa .Lk_sb1(%rip),%xmm4 movdqa .Lk_sb1+16(%rip),%xmm0 movdqa %xmm4,%xmm12 movdqa %xmm0,%xmm6 .byte 102,15,56,0,226 .byte 102,69,15,56,0,224 .byte 102,15,56,0,195 .byte 102,65,15,56,0,243 pxor %xmm5,%xmm4 pxor %xmm5,%xmm12 movdqa .Lk_sb2(%rip),%xmm5 movdqa %xmm5,%xmm13 pxor %xmm4,%xmm0 pxor %xmm12,%xmm6 movdqa -64(%r11,%r10,1),%xmm1 .byte 102,15,56,0,234 .byte 102,69,15,56,0,232 movdqa (%r11,%r10,1),%xmm4 movdqa .Lk_sb2+16(%rip),%xmm2 movdqa %xmm2,%xmm8 .byte 102,15,56,0,211 .byte 102,69,15,56,0,195 movdqa %xmm0,%xmm3 movdqa %xmm6,%xmm11 pxor %xmm5,%xmm2 pxor %xmm13,%xmm8 .byte 102,15,56,0,193 .byte 102,15,56,0,241 addq $16,%r9 pxor %xmm2,%xmm0 pxor %xmm8,%xmm6 .byte 102,15,56,0,220 .byte 102,68,15,56,0,220 addq $16,%r11 pxor %xmm0,%xmm3 pxor %xmm6,%xmm11 .byte 102,15,56,0,193 .byte 102,15,56,0,241 andq $0x30,%r11 subq $1,%rax pxor %xmm3,%xmm0 pxor %xmm11,%xmm6 .Lenc2x_entry: movdqa %xmm9,%xmm1 movdqa %xmm9,%xmm7 movdqa .Lk_inv+16(%rip),%xmm5 movdqa %xmm5,%xmm13 pandn %xmm0,%xmm1 pandn %xmm6,%xmm7 psrld $4,%xmm1 psrld $4,%xmm7 pand %xmm9,%xmm0 pand %xmm9,%xmm6 .byte 102,15,56,0,232 .byte 102,68,15,56,0,238 movdqa %xmm10,%xmm3 movdqa %xmm10,%xmm11 pxor %xmm1,%xmm0 pxor %xmm7,%xmm6 .byte 102,15,56,0,217 .byte 102,68,15,56,0,223 movdqa %xmm10,%xmm4 movdqa %xmm10,%xmm12 pxor %xmm5,%xmm3 pxor %xmm13,%xmm11 .byte 102,15,56,0,224 .byte 102,68,15,56,0,230 movdqa %xmm10,%xmm2 movdqa %xmm10,%xmm8 pxor %xmm5,%xmm4 pxor %xmm13,%xmm12 .byte 102,15,56,0,211 .byte 102,69,15,56,0,195 movdqa %xmm10,%xmm3 movdqa %xmm10,%xmm11 pxor %xmm0,%xmm2 pxor %xmm6,%xmm8 .byte 102,15,56,0,220 .byte 102,69,15,56,0,220 movdqu (%r9),%xmm5 pxor %xmm1,%xmm3 pxor %xmm7,%xmm11 jnz .Lenc2x_loop movdqa -96(%r10),%xmm4 movdqa -80(%r10),%xmm0 movdqa %xmm4,%xmm12 movdqa %xmm0,%xmm6 .byte 102,15,56,0,226 .byte 102,69,15,56,0,224 pxor %xmm5,%xmm4 pxor %xmm5,%xmm12 .byte 102,15,56,0,195 .byte 102,65,15,56,0,243 movdqa 64(%r11,%r10,1),%xmm1 pxor %xmm4,%xmm0 pxor %xmm12,%xmm6 .byte 102,15,56,0,193 .byte 102,15,56,0,241 ret .cfi_endproc .size _vpaes_encrypt_core_2x,.-_vpaes_encrypt_core_2x .type _vpaes_schedule_core,@function .align 16 _vpaes_schedule_core: .cfi_startproc call _vpaes_preheat movdqa .Lk_rcon(%rip),%xmm8 movdqu (%rdi),%xmm0 movdqa %xmm0,%xmm3 leaq .Lk_ipt(%rip),%r11 call _vpaes_schedule_transform movdqa %xmm0,%xmm7 leaq .Lk_sr(%rip),%r10 movdqu %xmm0,(%rdx) .Lschedule_go: cmpl $192,%esi ja .Lschedule_256 .Lschedule_128: movl $10,%esi .Loop_schedule_128: call _vpaes_schedule_round decq %rsi jz .Lschedule_mangle_last call _vpaes_schedule_mangle jmp .Loop_schedule_128 .align 16 .Lschedule_256: movdqu 16(%rdi),%xmm0 call _vpaes_schedule_transform movl $7,%esi .Loop_schedule_256: call _vpaes_schedule_mangle movdqa %xmm0,%xmm6 call _vpaes_schedule_round decq %rsi jz .Lschedule_mangle_last call _vpaes_schedule_mangle pshufd $0xFF,%xmm0,%xmm0 movdqa %xmm7,%xmm5 movdqa %xmm6,%xmm7 call _vpaes_schedule_low_round movdqa %xmm5,%xmm7 jmp .Loop_schedule_256 .align 16 .Lschedule_mangle_last: leaq .Lk_deskew(%rip),%r11 movdqa (%r8,%r10,1),%xmm1 .byte 102,15,56,0,193 leaq .Lk_opt(%rip),%r11 addq $32,%rdx .Lschedule_mangle_last_dec: addq $-16,%rdx pxor .Lk_s63(%rip),%xmm0 call _vpaes_schedule_transform movdqu %xmm0,(%rdx) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 ret .cfi_endproc .size _vpaes_schedule_core,.-_vpaes_schedule_core .type _vpaes_schedule_round,@function .align 16 _vpaes_schedule_round: .cfi_startproc pxor %xmm1,%xmm1 .byte 102,65,15,58,15,200,15 .byte 102,69,15,58,15,192,15 pxor %xmm1,%xmm7 pshufd $0xFF,%xmm0,%xmm0 .byte 102,15,58,15,192,1 _vpaes_schedule_low_round: movdqa %xmm7,%xmm1 pslldq $4,%xmm7 pxor %xmm1,%xmm7 movdqa %xmm7,%xmm1 pslldq $8,%xmm7 pxor %xmm1,%xmm7 pxor .Lk_s63(%rip),%xmm7 movdqa %xmm9,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm9,%xmm0 movdqa %xmm11,%xmm2 .byte 102,15,56,0,208 pxor %xmm1,%xmm0 movdqa %xmm10,%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 movdqa %xmm10,%xmm4 .byte 102,15,56,0,224 pxor %xmm2,%xmm4 movdqa %xmm10,%xmm2 .byte 102,15,56,0,211 pxor %xmm0,%xmm2 movdqa %xmm10,%xmm3 .byte 102,15,56,0,220 pxor %xmm1,%xmm3 movdqa %xmm13,%xmm4 .byte 102,15,56,0,226 movdqa %xmm12,%xmm0 .byte 102,15,56,0,195 pxor %xmm4,%xmm0 pxor %xmm7,%xmm0 movdqa %xmm0,%xmm7 ret .cfi_endproc .size _vpaes_schedule_round,.-_vpaes_schedule_round .type _vpaes_schedule_transform,@function .align 16 _vpaes_schedule_transform: .cfi_startproc movdqa %xmm9,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm9,%xmm0 movdqa (%r11),%xmm2 .byte 102,15,56,0,208 movdqa 16(%r11),%xmm0 .byte 102,15,56,0,193 pxor %xmm2,%xmm0 ret .cfi_endproc .size _vpaes_schedule_transform,.-_vpaes_schedule_transform .type _vpaes_schedule_mangle,@function .align 16 _vpaes_schedule_mangle: .cfi_startproc movdqa %xmm0,%xmm4 movdqa .Lk_mc_forward(%rip),%xmm5 addq $16,%rdx pxor .Lk_s63(%rip),%xmm4 .byte 102,15,56,0,229 movdqa %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 .Lschedule_mangle_both: movdqa (%r8,%r10,1),%xmm1 .byte 102,15,56,0,217 addq $-16,%r8 andq $0x30,%r8 movdqu %xmm3,(%rdx) ret .cfi_endproc .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle .globl vpaes_set_encrypt_key .hidden vpaes_set_encrypt_key .type vpaes_set_encrypt_key,@function .align 16 vpaes_set_encrypt_key: .cfi_startproc _CET_ENDBR #ifdef BORINGSSL_DISPATCH_TEST .extern BORINGSSL_function_hit .hidden BORINGSSL_function_hit movb $1,BORINGSSL_function_hit+5(%rip) #endif movl %esi,%eax shrl $5,%eax addl $5,%eax movl %eax,240(%rdx) movl $0,%ecx movl $0x30,%r8d call _vpaes_schedule_core xorl %eax,%eax ret .cfi_endproc .size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key .globl vpaes_ctr32_encrypt_blocks .hidden vpaes_ctr32_encrypt_blocks .type vpaes_ctr32_encrypt_blocks,@function .align 16 vpaes_ctr32_encrypt_blocks: .cfi_startproc _CET_ENDBR xchgq %rcx,%rdx testq %rcx,%rcx jz .Lctr32_abort movdqu (%r8),%xmm0 movdqa .Lctr_add_one(%rip),%xmm8 subq %rdi,%rsi call _vpaes_preheat movdqa %xmm0,%xmm6 pshufb .Lrev_ctr(%rip),%xmm6 testq $1,%rcx jz .Lctr32_prep_loop movdqu (%rdi),%xmm7 call _vpaes_encrypt_core pxor %xmm7,%xmm0 paddd %xmm8,%xmm6 movdqu %xmm0,(%rsi,%rdi,1) subq $1,%rcx leaq 16(%rdi),%rdi jz .Lctr32_done .Lctr32_prep_loop: movdqa %xmm6,%xmm14 movdqa %xmm6,%xmm15 paddd %xmm8,%xmm15 .Lctr32_loop: movdqa .Lrev_ctr(%rip),%xmm1 movdqa %xmm14,%xmm0 movdqa %xmm15,%xmm6 .byte 102,15,56,0,193 .byte 102,15,56,0,241 call _vpaes_encrypt_core_2x movdqu (%rdi),%xmm1 movdqu 16(%rdi),%xmm2 movdqa .Lctr_add_two(%rip),%xmm3 pxor %xmm1,%xmm0 pxor %xmm2,%xmm6 paddd %xmm3,%xmm14 paddd %xmm3,%xmm15 movdqu %xmm0,(%rsi,%rdi,1) movdqu %xmm6,16(%rsi,%rdi,1) subq $2,%rcx leaq 32(%rdi),%rdi jnz .Lctr32_loop .Lctr32_done: .Lctr32_abort: ret .cfi_endproc .size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks .type _vpaes_preheat,@function .align 16 _vpaes_preheat: .cfi_startproc leaq .Lk_s0F(%rip),%r10 movdqa -32(%r10),%xmm10 movdqa -16(%r10),%xmm11 movdqa 0(%r10),%xmm9 movdqa 48(%r10),%xmm13 movdqa 64(%r10),%xmm12 movdqa 80(%r10),%xmm15 movdqa 96(%r10),%xmm14 ret .cfi_endproc .size _vpaes_preheat,.-_vpaes_preheat .type _vpaes_consts,@object .section .rodata .align 64 _vpaes_consts: .Lk_inv: .quad 0x0E05060F0D080180, 0x040703090A0B0C02 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 .Lk_s0F: .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F .Lk_ipt: .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 .Lk_sb1: .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF .Lk_sb2: .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A .Lk_sbo: .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA .Lk_mc_forward: .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605, 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 .Lk_mc_backward: .quad 0x0605040702010003, 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F, 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 .quad 0x0A09080B06050407, 0x020100030E0D0C0F .Lk_sr: .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 .quad 0x0F060D040B020900, 0x070E050C030A0108 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 .Lk_rcon: .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 .Lk_s63: .quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B .Lk_opt: .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 .Lk_deskew: .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 .Lrev_ctr: .quad 0x0706050403020100, 0x0c0d0e0f0b0a0908 .Lctr_add_one: .quad 0x0000000000000000, 0x0000000100000000 .Lctr_add_two: .quad 0x0000000000000000, 0x0000000200000000 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 .align 64 .size _vpaes_consts,.-_vpaes_consts .text #endif
marvin-hansen/iggy-streaming-system
48,971
thirdparty/crates/ring-0.17.9/pregenerated/sha512-armv8-win64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) // Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy // in the file LICENSE in the source distribution or at // https://www.openssl.org/source/license.html // ==================================================================== // Written by Andy Polyakov <appro@openssl.org> for the OpenSSL // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. // // Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. // // Performance in cycles per processed byte and improvement coefficient // over code generated with "default" compiler: // // SHA256-hw SHA256(*) SHA512 // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) // Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. // (**) The result is a trade-off: it's possible to improve it by // 10% (or by 1 cycle per round), but at the cost of 20% loss // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code // generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ # include <ring-core/arm_arch.h> #endif .text .globl sha512_block_data_order_nohw .def sha512_block_data_order_nohw .type 32 .endef .align 6 sha512_block_data_order_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#4*8 ldp x20,x21,[x0] // load context ldp x22,x23,[x0,#2*8] ldp x24,x25,[x0,#4*8] add x2,x1,x2,lsl#7 // end of input ldp x26,x27,[x0,#6*8] adrp x30,LK512 add x30,x30,:lo12:LK512 stp x0,x2,[x29,#96] Loop: ldp x3,x4,[x1],#2*8 ldr x19,[x30],#8 // *K++ eor x28,x21,x22 // magic seed str x1,[x29,#112] #ifndef __AARCH64EB__ rev x3,x3 // 0 #endif ror x16,x24,#14 add x27,x27,x19 // h+=K[i] eor x6,x24,x24,ror#23 and x17,x25,x24 bic x19,x26,x24 add x27,x27,x3 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x6,ror#18 // Sigma1(e) ror x6,x20,#28 add x27,x27,x17 // h+=Ch(e,f,g) eor x17,x20,x20,ror#5 add x27,x27,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x23,x23,x27 // d+=h eor x28,x28,x21 // Maj(a,b,c) eor x17,x6,x17,ror#34 // Sigma0(a) add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x4,x4 // 1 #endif ldp x5,x6,[x1],#2*8 add x27,x27,x17 // h+=Sigma0(a) ror x16,x23,#14 add x26,x26,x28 // h+=K[i] eor x7,x23,x23,ror#23 and x17,x24,x23 bic x28,x25,x23 add x26,x26,x4 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x7,ror#18 // Sigma1(e) ror x7,x27,#28 add x26,x26,x17 // h+=Ch(e,f,g) eor x17,x27,x27,ror#5 add x26,x26,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x22,x22,x26 // d+=h eor x19,x19,x20 // Maj(a,b,c) eor x17,x7,x17,ror#34 // Sigma0(a) add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x5,x5 // 2 #endif add x26,x26,x17 // h+=Sigma0(a) ror x16,x22,#14 add x25,x25,x19 // h+=K[i] eor x8,x22,x22,ror#23 and x17,x23,x22 bic x19,x24,x22 add x25,x25,x5 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x8,ror#18 // Sigma1(e) ror x8,x26,#28 add x25,x25,x17 // h+=Ch(e,f,g) eor x17,x26,x26,ror#5 add x25,x25,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x21,x21,x25 // d+=h eor x28,x28,x27 // Maj(a,b,c) eor x17,x8,x17,ror#34 // Sigma0(a) add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x6,x6 // 3 #endif ldp x7,x8,[x1],#2*8 add x25,x25,x17 // h+=Sigma0(a) ror x16,x21,#14 add x24,x24,x28 // h+=K[i] eor x9,x21,x21,ror#23 and x17,x22,x21 bic x28,x23,x21 add x24,x24,x6 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x9,ror#18 // Sigma1(e) ror x9,x25,#28 add x24,x24,x17 // h+=Ch(e,f,g) eor x17,x25,x25,ror#5 add x24,x24,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x20,x20,x24 // d+=h eor x19,x19,x26 // Maj(a,b,c) eor x17,x9,x17,ror#34 // Sigma0(a) add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x7,x7 // 4 #endif add x24,x24,x17 // h+=Sigma0(a) ror x16,x20,#14 add x23,x23,x19 // h+=K[i] eor x10,x20,x20,ror#23 and x17,x21,x20 bic x19,x22,x20 add x23,x23,x7 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x10,ror#18 // Sigma1(e) ror x10,x24,#28 add x23,x23,x17 // h+=Ch(e,f,g) eor x17,x24,x24,ror#5 add x23,x23,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x27,x27,x23 // d+=h eor x28,x28,x25 // Maj(a,b,c) eor x17,x10,x17,ror#34 // Sigma0(a) add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x8,x8 // 5 #endif ldp x9,x10,[x1],#2*8 add x23,x23,x17 // h+=Sigma0(a) ror x16,x27,#14 add x22,x22,x28 // h+=K[i] eor x11,x27,x27,ror#23 and x17,x20,x27 bic x28,x21,x27 add x22,x22,x8 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x11,ror#18 // Sigma1(e) ror x11,x23,#28 add x22,x22,x17 // h+=Ch(e,f,g) eor x17,x23,x23,ror#5 add x22,x22,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x26,x26,x22 // d+=h eor x19,x19,x24 // Maj(a,b,c) eor x17,x11,x17,ror#34 // Sigma0(a) add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x9,x9 // 6 #endif add x22,x22,x17 // h+=Sigma0(a) ror x16,x26,#14 add x21,x21,x19 // h+=K[i] eor x12,x26,x26,ror#23 and x17,x27,x26 bic x19,x20,x26 add x21,x21,x9 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x12,ror#18 // Sigma1(e) ror x12,x22,#28 add x21,x21,x17 // h+=Ch(e,f,g) eor x17,x22,x22,ror#5 add x21,x21,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x25,x25,x21 // d+=h eor x28,x28,x23 // Maj(a,b,c) eor x17,x12,x17,ror#34 // Sigma0(a) add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x10,x10 // 7 #endif ldp x11,x12,[x1],#2*8 add x21,x21,x17 // h+=Sigma0(a) ror x16,x25,#14 add x20,x20,x28 // h+=K[i] eor x13,x25,x25,ror#23 and x17,x26,x25 bic x28,x27,x25 add x20,x20,x10 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x13,ror#18 // Sigma1(e) ror x13,x21,#28 add x20,x20,x17 // h+=Ch(e,f,g) eor x17,x21,x21,ror#5 add x20,x20,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x24,x24,x20 // d+=h eor x19,x19,x22 // Maj(a,b,c) eor x17,x13,x17,ror#34 // Sigma0(a) add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x20,x20,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x11,x11 // 8 #endif add x20,x20,x17 // h+=Sigma0(a) ror x16,x24,#14 add x27,x27,x19 // h+=K[i] eor x14,x24,x24,ror#23 and x17,x25,x24 bic x19,x26,x24 add x27,x27,x11 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x14,ror#18 // Sigma1(e) ror x14,x20,#28 add x27,x27,x17 // h+=Ch(e,f,g) eor x17,x20,x20,ror#5 add x27,x27,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x23,x23,x27 // d+=h eor x28,x28,x21 // Maj(a,b,c) eor x17,x14,x17,ror#34 // Sigma0(a) add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x12,x12 // 9 #endif ldp x13,x14,[x1],#2*8 add x27,x27,x17 // h+=Sigma0(a) ror x16,x23,#14 add x26,x26,x28 // h+=K[i] eor x15,x23,x23,ror#23 and x17,x24,x23 bic x28,x25,x23 add x26,x26,x12 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x15,ror#18 // Sigma1(e) ror x15,x27,#28 add x26,x26,x17 // h+=Ch(e,f,g) eor x17,x27,x27,ror#5 add x26,x26,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x22,x22,x26 // d+=h eor x19,x19,x20 // Maj(a,b,c) eor x17,x15,x17,ror#34 // Sigma0(a) add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x13,x13 // 10 #endif add x26,x26,x17 // h+=Sigma0(a) ror x16,x22,#14 add x25,x25,x19 // h+=K[i] eor x0,x22,x22,ror#23 and x17,x23,x22 bic x19,x24,x22 add x25,x25,x13 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x0,ror#18 // Sigma1(e) ror x0,x26,#28 add x25,x25,x17 // h+=Ch(e,f,g) eor x17,x26,x26,ror#5 add x25,x25,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x21,x21,x25 // d+=h eor x28,x28,x27 // Maj(a,b,c) eor x17,x0,x17,ror#34 // Sigma0(a) add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x14,x14 // 11 #endif ldp x15,x0,[x1],#2*8 add x25,x25,x17 // h+=Sigma0(a) str x6,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] eor x6,x21,x21,ror#23 and x17,x22,x21 bic x28,x23,x21 add x24,x24,x14 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x6,ror#18 // Sigma1(e) ror x6,x25,#28 add x24,x24,x17 // h+=Ch(e,f,g) eor x17,x25,x25,ror#5 add x24,x24,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x20,x20,x24 // d+=h eor x19,x19,x26 // Maj(a,b,c) eor x17,x6,x17,ror#34 // Sigma0(a) add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x15,x15 // 12 #endif add x24,x24,x17 // h+=Sigma0(a) str x7,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] eor x7,x20,x20,ror#23 and x17,x21,x20 bic x19,x22,x20 add x23,x23,x15 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x7,ror#18 // Sigma1(e) ror x7,x24,#28 add x23,x23,x17 // h+=Ch(e,f,g) eor x17,x24,x24,ror#5 add x23,x23,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x27,x27,x23 // d+=h eor x28,x28,x25 // Maj(a,b,c) eor x17,x7,x17,ror#34 // Sigma0(a) add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x0,x0 // 13 #endif ldp x1,x2,[x1] add x23,x23,x17 // h+=Sigma0(a) str x8,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] eor x8,x27,x27,ror#23 and x17,x20,x27 bic x28,x21,x27 add x22,x22,x0 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x8,ror#18 // Sigma1(e) ror x8,x23,#28 add x22,x22,x17 // h+=Ch(e,f,g) eor x17,x23,x23,ror#5 add x22,x22,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x26,x26,x22 // d+=h eor x19,x19,x24 // Maj(a,b,c) eor x17,x8,x17,ror#34 // Sigma0(a) add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x1,x1 // 14 #endif ldr x6,[sp,#24] add x22,x22,x17 // h+=Sigma0(a) str x9,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] eor x9,x26,x26,ror#23 and x17,x27,x26 bic x19,x20,x26 add x21,x21,x1 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x9,ror#18 // Sigma1(e) ror x9,x22,#28 add x21,x21,x17 // h+=Ch(e,f,g) eor x17,x22,x22,ror#5 add x21,x21,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x25,x25,x21 // d+=h eor x28,x28,x23 // Maj(a,b,c) eor x17,x9,x17,ror#34 // Sigma0(a) add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x2,x2 // 15 #endif ldr x7,[sp,#0] add x21,x21,x17 // h+=Sigma0(a) str x10,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x9,x4,#1 and x17,x26,x25 ror x8,x1,#19 bic x28,x27,x25 ror x10,x21,#28 add x20,x20,x2 // h+=X[i] eor x16,x16,x25,ror#18 eor x9,x9,x4,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x10,x10,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x8,x8,x1,ror#61 eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x10,x21,ror#39 // Sigma0(a) eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) add x3,x3,x12 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x3,x3,x9 add x20,x20,x17 // h+=Sigma0(a) add x3,x3,x8 Loop_16_xx: ldr x8,[sp,#8] str x11,[sp,#0] ror x16,x24,#14 add x27,x27,x19 // h+=K[i] ror x10,x5,#1 and x17,x25,x24 ror x9,x2,#19 bic x19,x26,x24 ror x11,x20,#28 add x27,x27,x3 // h+=X[i] eor x16,x16,x24,ror#18 eor x10,x10,x5,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x24,ror#41 // Sigma1(e) eor x11,x11,x20,ror#34 add x27,x27,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x9,x9,x2,ror#61 eor x10,x10,x5,lsr#7 // sigma0(X[i+1]) add x27,x27,x16 // h+=Sigma1(e) eor x28,x28,x21 // Maj(a,b,c) eor x17,x11,x20,ror#39 // Sigma0(a) eor x9,x9,x2,lsr#6 // sigma1(X[i+14]) add x4,x4,x13 add x23,x23,x27 // d+=h add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x4,x4,x10 add x27,x27,x17 // h+=Sigma0(a) add x4,x4,x9 ldr x9,[sp,#16] str x12,[sp,#8] ror x16,x23,#14 add x26,x26,x28 // h+=K[i] ror x11,x6,#1 and x17,x24,x23 ror x10,x3,#19 bic x28,x25,x23 ror x12,x27,#28 add x26,x26,x4 // h+=X[i] eor x16,x16,x23,ror#18 eor x11,x11,x6,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x23,ror#41 // Sigma1(e) eor x12,x12,x27,ror#34 add x26,x26,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x10,x10,x3,ror#61 eor x11,x11,x6,lsr#7 // sigma0(X[i+1]) add x26,x26,x16 // h+=Sigma1(e) eor x19,x19,x20 // Maj(a,b,c) eor x17,x12,x27,ror#39 // Sigma0(a) eor x10,x10,x3,lsr#6 // sigma1(X[i+14]) add x5,x5,x14 add x22,x22,x26 // d+=h add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x5,x5,x11 add x26,x26,x17 // h+=Sigma0(a) add x5,x5,x10 ldr x10,[sp,#24] str x13,[sp,#16] ror x16,x22,#14 add x25,x25,x19 // h+=K[i] ror x12,x7,#1 and x17,x23,x22 ror x11,x4,#19 bic x19,x24,x22 ror x13,x26,#28 add x25,x25,x5 // h+=X[i] eor x16,x16,x22,ror#18 eor x12,x12,x7,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x22,ror#41 // Sigma1(e) eor x13,x13,x26,ror#34 add x25,x25,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x11,x11,x4,ror#61 eor x12,x12,x7,lsr#7 // sigma0(X[i+1]) add x25,x25,x16 // h+=Sigma1(e) eor x28,x28,x27 // Maj(a,b,c) eor x17,x13,x26,ror#39 // Sigma0(a) eor x11,x11,x4,lsr#6 // sigma1(X[i+14]) add x6,x6,x15 add x21,x21,x25 // d+=h add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x6,x6,x12 add x25,x25,x17 // h+=Sigma0(a) add x6,x6,x11 ldr x11,[sp,#0] str x14,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] ror x13,x8,#1 and x17,x22,x21 ror x12,x5,#19 bic x28,x23,x21 ror x14,x25,#28 add x24,x24,x6 // h+=X[i] eor x16,x16,x21,ror#18 eor x13,x13,x8,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x21,ror#41 // Sigma1(e) eor x14,x14,x25,ror#34 add x24,x24,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x12,x12,x5,ror#61 eor x13,x13,x8,lsr#7 // sigma0(X[i+1]) add x24,x24,x16 // h+=Sigma1(e) eor x19,x19,x26 // Maj(a,b,c) eor x17,x14,x25,ror#39 // Sigma0(a) eor x12,x12,x5,lsr#6 // sigma1(X[i+14]) add x7,x7,x0 add x20,x20,x24 // d+=h add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x7,x7,x13 add x24,x24,x17 // h+=Sigma0(a) add x7,x7,x12 ldr x12,[sp,#8] str x15,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] ror x14,x9,#1 and x17,x21,x20 ror x13,x6,#19 bic x19,x22,x20 ror x15,x24,#28 add x23,x23,x7 // h+=X[i] eor x16,x16,x20,ror#18 eor x14,x14,x9,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x20,ror#41 // Sigma1(e) eor x15,x15,x24,ror#34 add x23,x23,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x13,x13,x6,ror#61 eor x14,x14,x9,lsr#7 // sigma0(X[i+1]) add x23,x23,x16 // h+=Sigma1(e) eor x28,x28,x25 // Maj(a,b,c) eor x17,x15,x24,ror#39 // Sigma0(a) eor x13,x13,x6,lsr#6 // sigma1(X[i+14]) add x8,x8,x1 add x27,x27,x23 // d+=h add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x8,x8,x14 add x23,x23,x17 // h+=Sigma0(a) add x8,x8,x13 ldr x13,[sp,#16] str x0,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] ror x15,x10,#1 and x17,x20,x27 ror x14,x7,#19 bic x28,x21,x27 ror x0,x23,#28 add x22,x22,x8 // h+=X[i] eor x16,x16,x27,ror#18 eor x15,x15,x10,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x27,ror#41 // Sigma1(e) eor x0,x0,x23,ror#34 add x22,x22,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x14,x14,x7,ror#61 eor x15,x15,x10,lsr#7 // sigma0(X[i+1]) add x22,x22,x16 // h+=Sigma1(e) eor x19,x19,x24 // Maj(a,b,c) eor x17,x0,x23,ror#39 // Sigma0(a) eor x14,x14,x7,lsr#6 // sigma1(X[i+14]) add x9,x9,x2 add x26,x26,x22 // d+=h add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x9,x9,x15 add x22,x22,x17 // h+=Sigma0(a) add x9,x9,x14 ldr x14,[sp,#24] str x1,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] ror x0,x11,#1 and x17,x27,x26 ror x15,x8,#19 bic x19,x20,x26 ror x1,x22,#28 add x21,x21,x9 // h+=X[i] eor x16,x16,x26,ror#18 eor x0,x0,x11,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x26,ror#41 // Sigma1(e) eor x1,x1,x22,ror#34 add x21,x21,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x15,x15,x8,ror#61 eor x0,x0,x11,lsr#7 // sigma0(X[i+1]) add x21,x21,x16 // h+=Sigma1(e) eor x28,x28,x23 // Maj(a,b,c) eor x17,x1,x22,ror#39 // Sigma0(a) eor x15,x15,x8,lsr#6 // sigma1(X[i+14]) add x10,x10,x3 add x25,x25,x21 // d+=h add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x10,x10,x0 add x21,x21,x17 // h+=Sigma0(a) add x10,x10,x15 ldr x15,[sp,#0] str x2,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x1,x12,#1 and x17,x26,x25 ror x0,x9,#19 bic x28,x27,x25 ror x2,x21,#28 add x20,x20,x10 // h+=X[i] eor x16,x16,x25,ror#18 eor x1,x1,x12,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x2,x2,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x0,x0,x9,ror#61 eor x1,x1,x12,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x2,x21,ror#39 // Sigma0(a) eor x0,x0,x9,lsr#6 // sigma1(X[i+14]) add x11,x11,x4 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x11,x11,x1 add x20,x20,x17 // h+=Sigma0(a) add x11,x11,x0 ldr x0,[sp,#8] str x3,[sp,#0] ror x16,x24,#14 add x27,x27,x19 // h+=K[i] ror x2,x13,#1 and x17,x25,x24 ror x1,x10,#19 bic x19,x26,x24 ror x3,x20,#28 add x27,x27,x11 // h+=X[i] eor x16,x16,x24,ror#18 eor x2,x2,x13,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x24,ror#41 // Sigma1(e) eor x3,x3,x20,ror#34 add x27,x27,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x1,x1,x10,ror#61 eor x2,x2,x13,lsr#7 // sigma0(X[i+1]) add x27,x27,x16 // h+=Sigma1(e) eor x28,x28,x21 // Maj(a,b,c) eor x17,x3,x20,ror#39 // Sigma0(a) eor x1,x1,x10,lsr#6 // sigma1(X[i+14]) add x12,x12,x5 add x23,x23,x27 // d+=h add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x12,x12,x2 add x27,x27,x17 // h+=Sigma0(a) add x12,x12,x1 ldr x1,[sp,#16] str x4,[sp,#8] ror x16,x23,#14 add x26,x26,x28 // h+=K[i] ror x3,x14,#1 and x17,x24,x23 ror x2,x11,#19 bic x28,x25,x23 ror x4,x27,#28 add x26,x26,x12 // h+=X[i] eor x16,x16,x23,ror#18 eor x3,x3,x14,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x23,ror#41 // Sigma1(e) eor x4,x4,x27,ror#34 add x26,x26,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x2,x2,x11,ror#61 eor x3,x3,x14,lsr#7 // sigma0(X[i+1]) add x26,x26,x16 // h+=Sigma1(e) eor x19,x19,x20 // Maj(a,b,c) eor x17,x4,x27,ror#39 // Sigma0(a) eor x2,x2,x11,lsr#6 // sigma1(X[i+14]) add x13,x13,x6 add x22,x22,x26 // d+=h add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x13,x13,x3 add x26,x26,x17 // h+=Sigma0(a) add x13,x13,x2 ldr x2,[sp,#24] str x5,[sp,#16] ror x16,x22,#14 add x25,x25,x19 // h+=K[i] ror x4,x15,#1 and x17,x23,x22 ror x3,x12,#19 bic x19,x24,x22 ror x5,x26,#28 add x25,x25,x13 // h+=X[i] eor x16,x16,x22,ror#18 eor x4,x4,x15,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x22,ror#41 // Sigma1(e) eor x5,x5,x26,ror#34 add x25,x25,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x3,x3,x12,ror#61 eor x4,x4,x15,lsr#7 // sigma0(X[i+1]) add x25,x25,x16 // h+=Sigma1(e) eor x28,x28,x27 // Maj(a,b,c) eor x17,x5,x26,ror#39 // Sigma0(a) eor x3,x3,x12,lsr#6 // sigma1(X[i+14]) add x14,x14,x7 add x21,x21,x25 // d+=h add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x14,x14,x4 add x25,x25,x17 // h+=Sigma0(a) add x14,x14,x3 ldr x3,[sp,#0] str x6,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] ror x5,x0,#1 and x17,x22,x21 ror x4,x13,#19 bic x28,x23,x21 ror x6,x25,#28 add x24,x24,x14 // h+=X[i] eor x16,x16,x21,ror#18 eor x5,x5,x0,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x21,ror#41 // Sigma1(e) eor x6,x6,x25,ror#34 add x24,x24,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x4,x4,x13,ror#61 eor x5,x5,x0,lsr#7 // sigma0(X[i+1]) add x24,x24,x16 // h+=Sigma1(e) eor x19,x19,x26 // Maj(a,b,c) eor x17,x6,x25,ror#39 // Sigma0(a) eor x4,x4,x13,lsr#6 // sigma1(X[i+14]) add x15,x15,x8 add x20,x20,x24 // d+=h add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x15,x15,x5 add x24,x24,x17 // h+=Sigma0(a) add x15,x15,x4 ldr x4,[sp,#8] str x7,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] ror x6,x1,#1 and x17,x21,x20 ror x5,x14,#19 bic x19,x22,x20 ror x7,x24,#28 add x23,x23,x15 // h+=X[i] eor x16,x16,x20,ror#18 eor x6,x6,x1,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x20,ror#41 // Sigma1(e) eor x7,x7,x24,ror#34 add x23,x23,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x5,x5,x14,ror#61 eor x6,x6,x1,lsr#7 // sigma0(X[i+1]) add x23,x23,x16 // h+=Sigma1(e) eor x28,x28,x25 // Maj(a,b,c) eor x17,x7,x24,ror#39 // Sigma0(a) eor x5,x5,x14,lsr#6 // sigma1(X[i+14]) add x0,x0,x9 add x27,x27,x23 // d+=h add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x0,x0,x6 add x23,x23,x17 // h+=Sigma0(a) add x0,x0,x5 ldr x5,[sp,#16] str x8,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] ror x7,x2,#1 and x17,x20,x27 ror x6,x15,#19 bic x28,x21,x27 ror x8,x23,#28 add x22,x22,x0 // h+=X[i] eor x16,x16,x27,ror#18 eor x7,x7,x2,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x27,ror#41 // Sigma1(e) eor x8,x8,x23,ror#34 add x22,x22,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x6,x6,x15,ror#61 eor x7,x7,x2,lsr#7 // sigma0(X[i+1]) add x22,x22,x16 // h+=Sigma1(e) eor x19,x19,x24 // Maj(a,b,c) eor x17,x8,x23,ror#39 // Sigma0(a) eor x6,x6,x15,lsr#6 // sigma1(X[i+14]) add x1,x1,x10 add x26,x26,x22 // d+=h add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x1,x1,x7 add x22,x22,x17 // h+=Sigma0(a) add x1,x1,x6 ldr x6,[sp,#24] str x9,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] ror x8,x3,#1 and x17,x27,x26 ror x7,x0,#19 bic x19,x20,x26 ror x9,x22,#28 add x21,x21,x1 // h+=X[i] eor x16,x16,x26,ror#18 eor x8,x8,x3,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x26,ror#41 // Sigma1(e) eor x9,x9,x22,ror#34 add x21,x21,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x7,x7,x0,ror#61 eor x8,x8,x3,lsr#7 // sigma0(X[i+1]) add x21,x21,x16 // h+=Sigma1(e) eor x28,x28,x23 // Maj(a,b,c) eor x17,x9,x22,ror#39 // Sigma0(a) eor x7,x7,x0,lsr#6 // sigma1(X[i+14]) add x2,x2,x11 add x25,x25,x21 // d+=h add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x2,x2,x8 add x21,x21,x17 // h+=Sigma0(a) add x2,x2,x7 ldr x7,[sp,#0] str x10,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x9,x4,#1 and x17,x26,x25 ror x8,x1,#19 bic x28,x27,x25 ror x10,x21,#28 add x20,x20,x2 // h+=X[i] eor x16,x16,x25,ror#18 eor x9,x9,x4,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x10,x10,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x8,x8,x1,ror#61 eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x10,x21,ror#39 // Sigma0(a) eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) add x3,x3,x12 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x3,x3,x9 add x20,x20,x17 // h+=Sigma0(a) add x3,x3,x8 cbnz x19,Loop_16_xx ldp x0,x2,[x29,#96] ldr x1,[x29,#112] sub x30,x30,#648 // rewind ldp x3,x4,[x0] ldp x5,x6,[x0,#2*8] add x1,x1,#14*8 // advance input pointer ldp x7,x8,[x0,#4*8] add x20,x20,x3 ldp x9,x10,[x0,#6*8] add x21,x21,x4 add x22,x22,x5 add x23,x23,x6 stp x20,x21,[x0] add x24,x24,x7 add x25,x25,x8 stp x22,x23,[x0,#2*8] add x26,x26,x9 add x27,x27,x10 cmp x1,x2 stp x24,x25,[x0,#4*8] stp x26,x27,[x0,#6*8] b.ne Loop ldp x19,x20,[x29,#16] add sp,sp,#4*8 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .section .rodata .align 6 LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0 // terminator .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 .text #ifndef __KERNEL__ .globl sha512_block_data_order_hw .def sha512_block_data_order_hw .type 32 .endef .align 6 sha512_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context adrp x3,LK512 add x3,x3,:lo12:LK512 rev64 v16.16b,v16.16b rev64 v17.16b,v17.16b rev64 v18.16b,v18.16b rev64 v19.16b,v19.16b rev64 v20.16b,v20.16b rev64 v21.16b,v21.16b rev64 v22.16b,v22.16b rev64 v23.16b,v23.16b b Loop_hw .align 4 Loop_hw: ld1 {v24.2d},[x3],#16 subs x2,x2,#1 sub x4,x1,#128 orr v26.16b,v0.16b,v0.16b // offload orr v27.16b,v1.16b,v1.16b orr v28.16b,v2.16b,v2.16b orr v29.16b,v3.16b,v3.16b csel x1,x1,x4,ne // conditional rewind add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v16.2d ld1 {v16.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b rev64 v16.16b,v16.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v17.2d ld1 {v17.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b rev64 v17.16b,v17.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v18.2d ld1 {v18.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b rev64 v18.16b,v18.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v19.2d ld1 {v19.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b rev64 v19.16b,v19.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v20.2d ld1 {v20.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b rev64 v20.16b,v20.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v21.2d ld1 {v21.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b rev64 v21.16b,v21.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v22.2d ld1 {v22.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b rev64 v22.16b,v22.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b sub x3,x3,#80*8 // rewind add v25.2d,v25.2d,v23.2d ld1 {v23.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b rev64 v23.16b,v23.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v0.2d,v0.2d,v26.2d // accumulate add v1.2d,v1.2d,v27.2d add v2.2d,v2.2d,v28.2d add v3.2d,v3.2d,v29.2d cbnz x2,Loop_hw st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
marvin-hansen/iggy-streaming-system
4,168
thirdparty/crates/ring-0.17.9/pregenerated/ghashv8-armx-ios64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include <ring-core/arm_arch.h> #if __ARM_MAX_ARCH__>=7 .text .globl _gcm_init_clmul .private_extern _gcm_init_clmul .align 4 _gcm_init_clmul: AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x1] //load input H movi v19.16b,#0xe1 shl v19.2d,v19.2d,#57 //0xc2.0 ext v3.16b,v17.16b,v17.16b,#8 ushr v18.2d,v19.2d,#63 dup v17.4s,v17.s[1] ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01 ushr v18.2d,v3.2d,#63 sshr v17.4s,v17.4s,#31 //broadcast carry bit and v18.16b,v18.16b,v16.16b shl v3.2d,v3.2d,#1 ext v18.16b,v18.16b,v18.16b,#8 and v16.16b,v16.16b,v17.16b orr v3.16b,v3.16b,v18.16b //H<<<=1 eor v20.16b,v3.16b,v16.16b //twisted H st1 {v20.2d},[x0],#16 //store Htable[0] //calculate H^2 ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing pmull v0.1q,v20.1d,v20.1d eor v16.16b,v16.16b,v20.16b pmull2 v2.1q,v20.2d,v20.2d pmull v1.1q,v16.1d,v16.1d ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v22.16b,v0.16b,v18.16b ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2] //calculate H^3 and H^4 pmull v0.1q,v20.1d, v22.1d pmull v5.1q,v22.1d,v22.1d pmull2 v2.1q,v20.2d, v22.2d pmull2 v7.1q,v22.2d,v22.2d pmull v1.1q,v16.1d,v17.1d pmull v6.1q,v17.1d,v17.1d ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing ext v17.16b,v5.16b,v7.16b,#8 eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v16.16b eor v4.16b,v5.16b,v7.16b eor v6.16b,v6.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase eor v6.16b,v6.16b,v4.16b pmull v4.1q,v5.1d,v19.1d ins v2.d[0],v1.d[1] ins v7.d[0],v6.d[1] ins v1.d[1],v0.d[0] ins v6.d[1],v5.d[0] eor v0.16b,v1.16b,v18.16b eor v5.16b,v6.16b,v4.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase ext v4.16b,v5.16b,v5.16b,#8 pmull v0.1q,v0.1d,v19.1d pmull v5.1q,v5.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v4.16b,v4.16b,v7.16b eor v20.16b, v0.16b,v18.16b //H^3 eor v22.16b,v5.16b,v4.16b //H^4 ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing ext v17.16b,v22.16b,v22.16b,#8 eor v16.16b,v16.16b,v20.16b eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5] ret .globl _gcm_gmult_clmul .private_extern _gcm_gmult_clmul .align 4 _gcm_gmult_clmul: AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x0] //load Xi movi v19.16b,#0xe1 ld1 {v20.2d,v21.2d},[x1] //load twisted H, ... shl v19.2d,v19.2d,#57 #ifndef __AARCH64EB__ rev64 v17.16b,v17.16b #endif ext v3.16b,v17.16b,v17.16b,#8 pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi) ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing eor v18.16b,v0.16b,v2.16b eor v1.16b,v1.16b,v17.16b eor v1.16b,v1.16b,v18.16b pmull v18.1q,v0.1d,v19.1d //1st phase of reduction ins v2.d[0],v1.d[1] ins v1.d[1],v0.d[0] eor v0.16b,v1.16b,v18.16b ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction pmull v0.1q,v0.1d,v19.1d eor v18.16b,v18.16b,v2.16b eor v0.16b,v0.16b,v18.16b #ifndef __AARCH64EB__ rev64 v0.16b,v0.16b #endif ext v0.16b,v0.16b,v0.16b,#8 st1 {v0.2d},[x0] //write out Xi ret .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
12,622
thirdparty/crates/ring-0.17.9/pregenerated/chacha-x86-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text .globl ChaCha20_ctr32_ssse3 .hidden ChaCha20_ctr32_ssse3 .type ChaCha20_ctr32_ssse3,@function .align 16 ChaCha20_ctr32_ssse3: .L_ChaCha20_ctr32_ssse3_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi call .Lpic_point .Lpic_point: popl %eax movl 20(%esp),%edi movl 24(%esp),%esi movl 28(%esp),%ecx movl 32(%esp),%edx movl 36(%esp),%ebx movl %esp,%ebp subl $524,%esp andl $-64,%esp movl %ebp,512(%esp) leal .Lssse3_data-.Lpic_point(%eax),%eax movdqu (%ebx),%xmm3 cmpl $256,%ecx jb .L0001x movl %edx,516(%esp) movl %ebx,520(%esp) subl $256,%ecx leal 384(%esp),%ebp movdqu (%edx),%xmm7 pshufd $0,%xmm3,%xmm0 pshufd $85,%xmm3,%xmm1 pshufd $170,%xmm3,%xmm2 pshufd $255,%xmm3,%xmm3 paddd 48(%eax),%xmm0 pshufd $0,%xmm7,%xmm4 pshufd $85,%xmm7,%xmm5 psubd 64(%eax),%xmm0 pshufd $170,%xmm7,%xmm6 pshufd $255,%xmm7,%xmm7 movdqa %xmm0,64(%ebp) movdqa %xmm1,80(%ebp) movdqa %xmm2,96(%ebp) movdqa %xmm3,112(%ebp) movdqu 16(%edx),%xmm3 movdqa %xmm4,-64(%ebp) movdqa %xmm5,-48(%ebp) movdqa %xmm6,-32(%ebp) movdqa %xmm7,-16(%ebp) movdqa 32(%eax),%xmm7 leal 128(%esp),%ebx pshufd $0,%xmm3,%xmm0 pshufd $85,%xmm3,%xmm1 pshufd $170,%xmm3,%xmm2 pshufd $255,%xmm3,%xmm3 pshufd $0,%xmm7,%xmm4 pshufd $85,%xmm7,%xmm5 pshufd $170,%xmm7,%xmm6 pshufd $255,%xmm7,%xmm7 movdqa %xmm0,(%ebp) movdqa %xmm1,16(%ebp) movdqa %xmm2,32(%ebp) movdqa %xmm3,48(%ebp) movdqa %xmm4,-128(%ebp) movdqa %xmm5,-112(%ebp) movdqa %xmm6,-96(%ebp) movdqa %xmm7,-80(%ebp) leal 128(%esi),%esi leal 128(%edi),%edi jmp .L001outer_loop .align 16 .L001outer_loop: movdqa -112(%ebp),%xmm1 movdqa -96(%ebp),%xmm2 movdqa -80(%ebp),%xmm3 movdqa -48(%ebp),%xmm5 movdqa -32(%ebp),%xmm6 movdqa -16(%ebp),%xmm7 movdqa %xmm1,-112(%ebx) movdqa %xmm2,-96(%ebx) movdqa %xmm3,-80(%ebx) movdqa %xmm5,-48(%ebx) movdqa %xmm6,-32(%ebx) movdqa %xmm7,-16(%ebx) movdqa 32(%ebp),%xmm2 movdqa 48(%ebp),%xmm3 movdqa 64(%ebp),%xmm4 movdqa 80(%ebp),%xmm5 movdqa 96(%ebp),%xmm6 movdqa 112(%ebp),%xmm7 paddd 64(%eax),%xmm4 movdqa %xmm2,32(%ebx) movdqa %xmm3,48(%ebx) movdqa %xmm4,64(%ebx) movdqa %xmm5,80(%ebx) movdqa %xmm6,96(%ebx) movdqa %xmm7,112(%ebx) movdqa %xmm4,64(%ebp) movdqa -128(%ebp),%xmm0 movdqa %xmm4,%xmm6 movdqa -64(%ebp),%xmm3 movdqa (%ebp),%xmm4 movdqa 16(%ebp),%xmm5 movl $10,%edx nop .align 16 .L002loop: paddd %xmm3,%xmm0 movdqa %xmm3,%xmm2 pxor %xmm0,%xmm6 pshufb (%eax),%xmm6 paddd %xmm6,%xmm4 pxor %xmm4,%xmm2 movdqa -48(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -112(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 80(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-128(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,64(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 movdqa %xmm4,(%ebx) pshufb (%eax),%xmm7 movdqa %xmm2,-64(%ebx) paddd %xmm7,%xmm5 movdqa 32(%ebx),%xmm4 pxor %xmm5,%xmm3 movdqa -32(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -96(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 96(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-112(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,80(%ebx) pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 movdqa %xmm5,16(%ebx) pshufb (%eax),%xmm6 movdqa %xmm3,-48(%ebx) paddd %xmm6,%xmm4 movdqa 48(%ebx),%xmm5 pxor %xmm4,%xmm2 movdqa -16(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -80(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 112(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-96(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,96(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 pshufb (%eax),%xmm7 movdqa %xmm2,-32(%ebx) paddd %xmm7,%xmm5 pxor %xmm5,%xmm3 movdqa -48(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -128(%ebx),%xmm0 paddd %xmm3,%xmm1 pxor %xmm1,%xmm7 movdqa %xmm1,-80(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,%xmm6 pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 pshufb (%eax),%xmm6 movdqa %xmm3,-16(%ebx) paddd %xmm6,%xmm4 pxor %xmm4,%xmm2 movdqa -32(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -112(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 64(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-128(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,112(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 movdqa %xmm4,32(%ebx) pshufb (%eax),%xmm7 movdqa %xmm2,-48(%ebx) paddd %xmm7,%xmm5 movdqa (%ebx),%xmm4 pxor %xmm5,%xmm3 movdqa -16(%ebx),%xmm2 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -96(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 80(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-112(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,64(%ebx) pxor %xmm5,%xmm3 paddd %xmm2,%xmm0 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 pxor %xmm0,%xmm6 por %xmm1,%xmm3 movdqa %xmm5,48(%ebx) pshufb (%eax),%xmm6 movdqa %xmm3,-32(%ebx) paddd %xmm6,%xmm4 movdqa 16(%ebx),%xmm5 pxor %xmm4,%xmm2 movdqa -64(%ebx),%xmm3 movdqa %xmm2,%xmm1 pslld $12,%xmm2 psrld $20,%xmm1 por %xmm1,%xmm2 movdqa -80(%ebx),%xmm1 paddd %xmm2,%xmm0 movdqa 96(%ebx),%xmm7 pxor %xmm0,%xmm6 movdqa %xmm0,-96(%ebx) pshufb 16(%eax),%xmm6 paddd %xmm6,%xmm4 movdqa %xmm6,80(%ebx) pxor %xmm4,%xmm2 paddd %xmm3,%xmm1 movdqa %xmm2,%xmm0 pslld $7,%xmm2 psrld $25,%xmm0 pxor %xmm1,%xmm7 por %xmm0,%xmm2 pshufb (%eax),%xmm7 movdqa %xmm2,-16(%ebx) paddd %xmm7,%xmm5 pxor %xmm5,%xmm3 movdqa %xmm3,%xmm0 pslld $12,%xmm3 psrld $20,%xmm0 por %xmm0,%xmm3 movdqa -128(%ebx),%xmm0 paddd %xmm3,%xmm1 movdqa 64(%ebx),%xmm6 pxor %xmm1,%xmm7 movdqa %xmm1,-80(%ebx) pshufb 16(%eax),%xmm7 paddd %xmm7,%xmm5 movdqa %xmm7,96(%ebx) pxor %xmm5,%xmm3 movdqa %xmm3,%xmm1 pslld $7,%xmm3 psrld $25,%xmm1 por %xmm1,%xmm3 decl %edx jnz .L002loop movdqa %xmm3,-64(%ebx) movdqa %xmm4,(%ebx) movdqa %xmm5,16(%ebx) movdqa %xmm6,64(%ebx) movdqa %xmm7,96(%ebx) movdqa -112(%ebx),%xmm1 movdqa -96(%ebx),%xmm2 movdqa -80(%ebx),%xmm3 paddd -128(%ebp),%xmm0 paddd -112(%ebp),%xmm1 paddd -96(%ebp),%xmm2 paddd -80(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa -64(%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa -48(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa -32(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa -16(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd -64(%ebp),%xmm0 paddd -48(%ebp),%xmm1 paddd -32(%ebp),%xmm2 paddd -16(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa (%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa 16(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa 32(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa 48(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd (%ebp),%xmm0 paddd 16(%ebp),%xmm1 paddd 32(%ebp),%xmm2 paddd 48(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 16(%esi),%esi pxor %xmm0,%xmm4 movdqa 64(%ebx),%xmm0 pxor %xmm1,%xmm5 movdqa 80(%ebx),%xmm1 pxor %xmm2,%xmm6 movdqa 96(%ebx),%xmm2 pxor %xmm3,%xmm7 movdqa 112(%ebx),%xmm3 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 16(%edi),%edi paddd 64(%ebp),%xmm0 paddd 80(%ebp),%xmm1 paddd 96(%ebp),%xmm2 paddd 112(%ebp),%xmm3 movdqa %xmm0,%xmm6 punpckldq %xmm1,%xmm0 movdqa %xmm2,%xmm7 punpckldq %xmm3,%xmm2 punpckhdq %xmm1,%xmm6 punpckhdq %xmm3,%xmm7 movdqa %xmm0,%xmm1 punpcklqdq %xmm2,%xmm0 movdqa %xmm6,%xmm3 punpcklqdq %xmm7,%xmm6 punpckhqdq %xmm2,%xmm1 punpckhqdq %xmm7,%xmm3 movdqu -128(%esi),%xmm4 movdqu -64(%esi),%xmm5 movdqu (%esi),%xmm2 movdqu 64(%esi),%xmm7 leal 208(%esi),%esi pxor %xmm0,%xmm4 pxor %xmm1,%xmm5 pxor %xmm2,%xmm6 pxor %xmm3,%xmm7 movdqu %xmm4,-128(%edi) movdqu %xmm5,-64(%edi) movdqu %xmm6,(%edi) movdqu %xmm7,64(%edi) leal 208(%edi),%edi subl $256,%ecx jnc .L001outer_loop addl $256,%ecx jz .L003done movl 520(%esp),%ebx leal -128(%esi),%esi movl 516(%esp),%edx leal -128(%edi),%edi movd 64(%ebp),%xmm2 movdqu (%ebx),%xmm3 paddd 96(%eax),%xmm2 pand 112(%eax),%xmm3 por %xmm2,%xmm3 .L0001x: movdqa 32(%eax),%xmm0 movdqu (%edx),%xmm1 movdqu 16(%edx),%xmm2 movdqa (%eax),%xmm6 movdqa 16(%eax),%xmm7 movl %ebp,48(%esp) movdqa %xmm0,(%esp) movdqa %xmm1,16(%esp) movdqa %xmm2,32(%esp) movdqa %xmm3,48(%esp) movl $10,%edx jmp .L004loop1x .align 16 .L005outer1x: movdqa 80(%eax),%xmm3 movdqa (%esp),%xmm0 movdqa 16(%esp),%xmm1 movdqa 32(%esp),%xmm2 paddd 48(%esp),%xmm3 movl $10,%edx movdqa %xmm3,48(%esp) jmp .L004loop1x .align 16 .L004loop1x: paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,222 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $20,%xmm1 pslld $12,%xmm4 por %xmm4,%xmm1 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,223 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $25,%xmm1 pslld $7,%xmm4 por %xmm4,%xmm1 pshufd $78,%xmm2,%xmm2 pshufd $57,%xmm1,%xmm1 pshufd $147,%xmm3,%xmm3 nop paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,222 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $20,%xmm1 pslld $12,%xmm4 por %xmm4,%xmm1 paddd %xmm1,%xmm0 pxor %xmm0,%xmm3 .byte 102,15,56,0,223 paddd %xmm3,%xmm2 pxor %xmm2,%xmm1 movdqa %xmm1,%xmm4 psrld $25,%xmm1 pslld $7,%xmm4 por %xmm4,%xmm1 pshufd $78,%xmm2,%xmm2 pshufd $147,%xmm1,%xmm1 pshufd $57,%xmm3,%xmm3 decl %edx jnz .L004loop1x paddd (%esp),%xmm0 paddd 16(%esp),%xmm1 paddd 32(%esp),%xmm2 paddd 48(%esp),%xmm3 cmpl $64,%ecx jb .L006tail movdqu (%esi),%xmm4 movdqu 16(%esi),%xmm5 pxor %xmm4,%xmm0 movdqu 32(%esi),%xmm4 pxor %xmm5,%xmm1 movdqu 48(%esi),%xmm5 pxor %xmm4,%xmm2 pxor %xmm5,%xmm3 leal 64(%esi),%esi movdqu %xmm0,(%edi) movdqu %xmm1,16(%edi) movdqu %xmm2,32(%edi) movdqu %xmm3,48(%edi) leal 64(%edi),%edi subl $64,%ecx jnz .L005outer1x jmp .L003done .L006tail: movdqa %xmm0,(%esp) movdqa %xmm1,16(%esp) movdqa %xmm2,32(%esp) movdqa %xmm3,48(%esp) xorl %eax,%eax xorl %edx,%edx xorl %ebp,%ebp .L007tail_loop: movb (%esp,%ebp,1),%al movb (%esi,%ebp,1),%dl leal 1(%ebp),%ebp xorb %dl,%al movb %al,-1(%edi,%ebp,1) decl %ecx jnz .L007tail_loop .L003done: movl 512(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size ChaCha20_ctr32_ssse3,.-.L_ChaCha20_ctr32_ssse3_begin .align 64 .Lssse3_data: .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 .byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14 .long 1634760805,857760878,2036477234,1797285236 .long 0,1,2,3 .long 4,4,4,4 .long 1,0,0,0 .long 4,0,0,0 .long 0,-1,-1,-1 .align 64 .byte 67,104,97,67,104,97,50,48,32,102,111,114,32,120,56,54 .byte 44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32 .byte 60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111 .byte 114,103,62,0 #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
11,057
thirdparty/crates/ring-0.17.9/pregenerated/ghash-neon-armv8-linux64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include <ring-core/arm_arch.h> .text .globl gcm_init_neon .hidden gcm_init_neon .type gcm_init_neon,%function .align 4 gcm_init_neon: AARCH64_VALID_CALL_TARGET // This function is adapted from gcm_init_v8. xC2 is t3. ld1 {v17.2d}, [x1] // load H movi v19.16b, #0xe1 shl v19.2d, v19.2d, #57 // 0xc2.0 ext v3.16b, v17.16b, v17.16b, #8 ushr v18.2d, v19.2d, #63 dup v17.4s, v17.s[1] ext v16.16b, v18.16b, v19.16b, #8 // t0=0xc2....01 ushr v18.2d, v3.2d, #63 sshr v17.4s, v17.4s, #31 // broadcast carry bit and v18.16b, v18.16b, v16.16b shl v3.2d, v3.2d, #1 ext v18.16b, v18.16b, v18.16b, #8 and v16.16b, v16.16b, v17.16b orr v3.16b, v3.16b, v18.16b // H<<<=1 eor v5.16b, v3.16b, v16.16b // twisted H st1 {v5.2d}, [x0] // store Htable[0] ret .size gcm_init_neon,.-gcm_init_neon .globl gcm_gmult_neon .hidden gcm_gmult_neon .type gcm_gmult_neon,%function .align 4 gcm_gmult_neon: AARCH64_VALID_CALL_TARGET ld1 {v3.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] adrp x9, .Lmasks // load constants add x9, x9, :lo12:.Lmasks ld1 {v24.2d, v25.2d}, [x9] rev64 v3.16b, v3.16b // byteswap Xi ext v3.16b, v3.16b, v3.16b, #8 eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing mov x3, #16 b .Lgmult_neon .size gcm_gmult_neon,.-gcm_gmult_neon .globl gcm_ghash_neon .hidden gcm_ghash_neon .type gcm_ghash_neon,%function .align 4 gcm_ghash_neon: AARCH64_VALID_CALL_TARGET ld1 {v0.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] adrp x9, .Lmasks // load constants add x9, x9, :lo12:.Lmasks ld1 {v24.2d, v25.2d}, [x9] rev64 v0.16b, v0.16b // byteswap Xi ext v0.16b, v0.16b, v0.16b, #8 eor v7.8b, v5.8b, v6.8b // Karatsuba pre-processing .Loop_neon: ld1 {v3.16b}, [x2], #16 // load inp rev64 v3.16b, v3.16b // byteswap inp ext v3.16b, v3.16b, v3.16b, #8 eor v3.16b, v3.16b, v0.16b // inp ^= Xi .Lgmult_neon: // Split the input into v3 and v4. (The upper halves are unused, // so it is okay to leave them alone.) ins v4.d[0], v3.d[1] ext v16.8b, v5.8b, v5.8b, #1 // A1 pmull v16.8h, v16.8b, v3.8b // F = A1*B ext v0.8b, v3.8b, v3.8b, #1 // B1 pmull v0.8h, v5.8b, v0.8b // E = A*B1 ext v17.8b, v5.8b, v5.8b, #2 // A2 pmull v17.8h, v17.8b, v3.8b // H = A2*B ext v19.8b, v3.8b, v3.8b, #2 // B2 pmull v19.8h, v5.8b, v19.8b // G = A*B2 ext v18.8b, v5.8b, v5.8b, #3 // A3 eor v16.16b, v16.16b, v0.16b // L = E + F pmull v18.8h, v18.8b, v3.8b // J = A3*B ext v0.8b, v3.8b, v3.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v0.8h, v5.8b, v0.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v3.8b, v3.8b, #4 // B4 eor v18.16b, v18.16b, v0.16b // N = I + J pmull v19.8h, v5.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v0.8h, v5.8b, v3.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v0.16b, v0.16b, v16.16b eor v0.16b, v0.16b, v18.16b eor v3.8b, v3.8b, v4.8b // Karatsuba pre-processing ext v16.8b, v7.8b, v7.8b, #1 // A1 pmull v16.8h, v16.8b, v3.8b // F = A1*B ext v1.8b, v3.8b, v3.8b, #1 // B1 pmull v1.8h, v7.8b, v1.8b // E = A*B1 ext v17.8b, v7.8b, v7.8b, #2 // A2 pmull v17.8h, v17.8b, v3.8b // H = A2*B ext v19.8b, v3.8b, v3.8b, #2 // B2 pmull v19.8h, v7.8b, v19.8b // G = A*B2 ext v18.8b, v7.8b, v7.8b, #3 // A3 eor v16.16b, v16.16b, v1.16b // L = E + F pmull v18.8h, v18.8b, v3.8b // J = A3*B ext v1.8b, v3.8b, v3.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v1.8h, v7.8b, v1.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v3.8b, v3.8b, #4 // B4 eor v18.16b, v18.16b, v1.16b // N = I + J pmull v19.8h, v7.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v1.8h, v7.8b, v3.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v1.16b, v1.16b, v16.16b eor v1.16b, v1.16b, v18.16b ext v16.8b, v6.8b, v6.8b, #1 // A1 pmull v16.8h, v16.8b, v4.8b // F = A1*B ext v2.8b, v4.8b, v4.8b, #1 // B1 pmull v2.8h, v6.8b, v2.8b // E = A*B1 ext v17.8b, v6.8b, v6.8b, #2 // A2 pmull v17.8h, v17.8b, v4.8b // H = A2*B ext v19.8b, v4.8b, v4.8b, #2 // B2 pmull v19.8h, v6.8b, v19.8b // G = A*B2 ext v18.8b, v6.8b, v6.8b, #3 // A3 eor v16.16b, v16.16b, v2.16b // L = E + F pmull v18.8h, v18.8b, v4.8b // J = A3*B ext v2.8b, v4.8b, v4.8b, #3 // B3 eor v17.16b, v17.16b, v19.16b // M = G + H pmull v2.8h, v6.8b, v2.8b // I = A*B3 // Here we diverge from the 32-bit version. It computes the following // (instructions reordered for clarity): // // veor $t0#lo, $t0#lo, $t0#hi @ t0 = P0 + P1 (L) // vand $t0#hi, $t0#hi, $k48 // veor $t0#lo, $t0#lo, $t0#hi // // veor $t1#lo, $t1#lo, $t1#hi @ t1 = P2 + P3 (M) // vand $t1#hi, $t1#hi, $k32 // veor $t1#lo, $t1#lo, $t1#hi // // veor $t2#lo, $t2#lo, $t2#hi @ t2 = P4 + P5 (N) // vand $t2#hi, $t2#hi, $k16 // veor $t2#lo, $t2#lo, $t2#hi // // veor $t3#lo, $t3#lo, $t3#hi @ t3 = P6 + P7 (K) // vmov.i64 $t3#hi, #0 // // $kN is a mask with the bottom N bits set. AArch64 cannot compute on // upper halves of SIMD registers, so we must split each half into // separate registers. To compensate, we pair computations up and // parallelize. ext v19.8b, v4.8b, v4.8b, #4 // B4 eor v18.16b, v18.16b, v2.16b // N = I + J pmull v19.8h, v6.8b, v19.8b // K = A*B4 // This can probably be scheduled more efficiently. For now, we just // pair up independent instructions. zip1 v20.2d, v16.2d, v17.2d zip1 v22.2d, v18.2d, v19.2d zip2 v21.2d, v16.2d, v17.2d zip2 v23.2d, v18.2d, v19.2d eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b and v21.16b, v21.16b, v24.16b and v23.16b, v23.16b, v25.16b eor v20.16b, v20.16b, v21.16b eor v22.16b, v22.16b, v23.16b zip1 v16.2d, v20.2d, v21.2d zip1 v18.2d, v22.2d, v23.2d zip2 v17.2d, v20.2d, v21.2d zip2 v19.2d, v22.2d, v23.2d ext v16.16b, v16.16b, v16.16b, #15 // t0 = t0 << 8 ext v17.16b, v17.16b, v17.16b, #14 // t1 = t1 << 16 pmull v2.8h, v6.8b, v4.8b // D = A*B ext v19.16b, v19.16b, v19.16b, #12 // t3 = t3 << 32 ext v18.16b, v18.16b, v18.16b, #13 // t2 = t2 << 24 eor v16.16b, v16.16b, v17.16b eor v18.16b, v18.16b, v19.16b eor v2.16b, v2.16b, v16.16b eor v2.16b, v2.16b, v18.16b ext v16.16b, v0.16b, v2.16b, #8 eor v1.16b, v1.16b, v0.16b // Karatsuba post-processing eor v1.16b, v1.16b, v2.16b eor v1.16b, v1.16b, v16.16b // Xm overlaps Xh.lo and Xl.hi ins v0.d[1], v1.d[0] // Xh|Xl - 256-bit result // This is a no-op due to the ins instruction below. // ins v2.d[0], v1.d[1] // equivalent of reduction_avx from ghash-x86_64.pl shl v17.2d, v0.2d, #57 // 1st phase shl v18.2d, v0.2d, #62 eor v18.16b, v18.16b, v17.16b // shl v17.2d, v0.2d, #63 eor v18.16b, v18.16b, v17.16b // // Note Xm contains {Xl.d[1], Xh.d[0]}. eor v18.16b, v18.16b, v1.16b ins v0.d[1], v18.d[0] // Xl.d[1] ^= t2.d[0] ins v2.d[0], v18.d[1] // Xh.d[0] ^= t2.d[1] ushr v18.2d, v0.2d, #1 // 2nd phase eor v2.16b, v2.16b,v0.16b eor v0.16b, v0.16b,v18.16b // ushr v18.2d, v18.2d, #6 ushr v0.2d, v0.2d, #1 // eor v0.16b, v0.16b, v2.16b // eor v0.16b, v0.16b, v18.16b // subs x3, x3, #16 bne .Loop_neon rev64 v0.16b, v0.16b // byteswap Xi and write ext v0.16b, v0.16b, v0.16b, #8 st1 {v0.16b}, [x0] ret .size gcm_ghash_neon,.-gcm_ghash_neon .section .rodata .align 4 .Lmasks: .quad 0x0000ffffffffffff // k48 .quad 0x00000000ffffffff // k32 .quad 0x000000000000ffff // k16 .quad 0x0000000000000000 // k0 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,100,101,114,105,118,101,100,32,102,114,111,109,32,65,82,77,118,52,32,118,101,114,115,105,111,110,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
7,888
thirdparty/crates/ring-0.17.9/pregenerated/aesv8-armx-linux64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include <ring-core/arm_arch.h> #if __ARM_MAX_ARCH__>=7 .text .arch armv8-a+crypto .section .rodata .align 5 .Lrcon: .long 0x01,0x01,0x01,0x01 .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat .long 0x1b,0x1b,0x1b,0x1b .text .globl aes_hw_set_encrypt_key .hidden aes_hw_set_encrypt_key .type aes_hw_set_encrypt_key,%function .align 5 aes_hw_set_encrypt_key: .Lenc_key: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x3,#-2 cmp w1,#128 b.lt .Lenc_key_abort cmp w1,#256 b.gt .Lenc_key_abort tst w1,#0x3f b.ne .Lenc_key_abort adrp x3,.Lrcon add x3,x3,:lo12:.Lrcon cmp w1,#192 eor v0.16b,v0.16b,v0.16b ld1 {v3.16b},[x0],#16 mov w1,#8 // reuse w1 ld1 {v1.4s,v2.4s},[x3],#32 b.lt .Loop128 // 192-bit key support was removed. b .L256 .align 4 .Loop128: tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b b.ne .Loop128 ld1 {v1.4s},[x3] tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b eor v3.16b,v3.16b,v6.16b st1 {v3.4s},[x2] add x2,x2,#0x50 mov w12,#10 b .Ldone // 192-bit key support was removed. .align 4 .L256: ld1 {v4.16b},[x0] mov w1,#7 mov w12,#14 st1 {v3.4s},[x2],#16 .Loop256: tbl v6.16b,{v4.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v4.4s},[x2],#16 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b st1 {v3.4s},[x2],#16 b.eq .Ldone dup v6.4s,v3.s[3] // just splat ext v5.16b,v0.16b,v4.16b,#12 aese v6.16b,v0.16b eor v4.16b,v4.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v4.16b,v4.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v4.16b,v4.16b,v5.16b eor v4.16b,v4.16b,v6.16b b .Loop256 .Ldone: str w12,[x2] mov x3,#0 .Lenc_key_abort: mov x0,x3 // return value ldr x29,[sp],#16 ret .size aes_hw_set_encrypt_key,.-aes_hw_set_encrypt_key .globl aes_hw_ctr32_encrypt_blocks .hidden aes_hw_ctr32_encrypt_blocks .type aes_hw_ctr32_encrypt_blocks,%function .align 5 aes_hw_ctr32_encrypt_blocks: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ldr w5,[x3,#240] ldr w8, [x4, #12] ld1 {v0.4s},[x4] ld1 {v16.4s,v17.4s},[x3] // load key schedule... sub w5,w5,#4 mov x12,#16 cmp x2,#2 add x7,x3,x5,lsl#4 // pointer to last 5 round keys sub w5,w5,#2 ld1 {v20.4s,v21.4s},[x7],#32 ld1 {v22.4s,v23.4s},[x7],#32 ld1 {v7.4s},[x7] add x7,x3,#32 mov w6,w5 csel x12,xzr,x12,lo // ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are // affected by silicon errata #1742098 [0] and #1655431 [1], // respectively, where the second instruction of an aese/aesmc // instruction pair may execute twice if an interrupt is taken right // after the first instruction consumes an input register of which a // single 32-bit lane has been updated the last time it was modified. // // This function uses a counter in one 32-bit lane. The vmov lines // could write to v1.16b and v18.16b directly, but that trips this bugs. // We write to v6.16b and copy to the final register as a workaround. // // [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice // [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice #ifndef __AARCH64EB__ rev w8, w8 #endif add w10, w8, #1 orr v6.16b,v0.16b,v0.16b rev w10, w10 mov v6.s[3],w10 add w8, w8, #2 orr v1.16b,v6.16b,v6.16b b.ls .Lctr32_tail rev w12, w8 mov v6.s[3],w12 sub x2,x2,#3 // bias orr v18.16b,v6.16b,v6.16b b .Loop3x_ctr32 .align 4 .Loop3x_ctr32: aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b aese v18.16b,v17.16b aesmc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt .Loop3x_ctr32 aese v0.16b,v16.16b aesmc v4.16b,v0.16b aese v1.16b,v16.16b aesmc v5.16b,v1.16b ld1 {v2.16b},[x0],#16 add w9,w8,#1 aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v3.16b},[x0],#16 rev w9,w9 aese v4.16b,v17.16b aesmc v4.16b,v4.16b aese v5.16b,v17.16b aesmc v5.16b,v5.16b ld1 {v19.16b},[x0],#16 mov x7,x3 aese v18.16b,v17.16b aesmc v17.16b,v18.16b aese v4.16b,v20.16b aesmc v4.16b,v4.16b aese v5.16b,v20.16b aesmc v5.16b,v5.16b eor v2.16b,v2.16b,v7.16b add w10,w8,#2 aese v17.16b,v20.16b aesmc v17.16b,v17.16b eor v3.16b,v3.16b,v7.16b add w8,w8,#3 aese v4.16b,v21.16b aesmc v4.16b,v4.16b aese v5.16b,v21.16b aesmc v5.16b,v5.16b // Note the logic to update v0.16b, v1.16b, and v1.16b is written to work // around a bug in ARM Cortex-A57 and Cortex-A72 cores running in // 32-bit mode. See the comment above. eor v19.16b,v19.16b,v7.16b mov v6.s[3], w9 aese v17.16b,v21.16b aesmc v17.16b,v17.16b orr v0.16b,v6.16b,v6.16b rev w10,w10 aese v4.16b,v22.16b aesmc v4.16b,v4.16b mov v6.s[3], w10 rev w12,w8 aese v5.16b,v22.16b aesmc v5.16b,v5.16b orr v1.16b,v6.16b,v6.16b mov v6.s[3], w12 aese v17.16b,v22.16b aesmc v17.16b,v17.16b orr v18.16b,v6.16b,v6.16b subs x2,x2,#3 aese v4.16b,v23.16b aese v5.16b,v23.16b aese v17.16b,v23.16b eor v2.16b,v2.16b,v4.16b ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] st1 {v2.16b},[x1],#16 eor v3.16b,v3.16b,v5.16b mov w6,w5 st1 {v3.16b},[x1],#16 eor v19.16b,v19.16b,v17.16b ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] st1 {v19.16b},[x1],#16 b.hs .Loop3x_ctr32 adds x2,x2,#3 b.eq .Lctr32_done cmp x2,#1 mov x12,#16 csel x12,xzr,x12,eq .Lctr32_tail: aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b ld1 {v17.4s},[x7],#16 b.gt .Lctr32_tail aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b ld1 {v2.16b},[x0],x12 aese v0.16b,v20.16b aesmc v0.16b,v0.16b aese v1.16b,v20.16b aesmc v1.16b,v1.16b ld1 {v3.16b},[x0] aese v0.16b,v21.16b aesmc v0.16b,v0.16b aese v1.16b,v21.16b aesmc v1.16b,v1.16b eor v2.16b,v2.16b,v7.16b aese v0.16b,v22.16b aesmc v0.16b,v0.16b aese v1.16b,v22.16b aesmc v1.16b,v1.16b eor v3.16b,v3.16b,v7.16b aese v0.16b,v23.16b aese v1.16b,v23.16b cmp x2,#1 eor v2.16b,v2.16b,v0.16b eor v3.16b,v3.16b,v1.16b st1 {v2.16b},[x1],#16 b.eq .Lctr32_done st1 {v3.16b},[x1] .Lctr32_done: ldr x29,[sp],#16 ret .size aes_hw_ctr32_encrypt_blocks,.-aes_hw_ctr32_encrypt_blocks #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
49,206
thirdparty/crates/ring-0.17.9/pregenerated/sha512-armv8-linux64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) // Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy // in the file LICENSE in the source distribution or at // https://www.openssl.org/source/license.html // ==================================================================== // Written by Andy Polyakov <appro@openssl.org> for the OpenSSL // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. // // Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. // // Performance in cycles per processed byte and improvement coefficient // over code generated with "default" compiler: // // SHA256-hw SHA256(*) SHA512 // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) // Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. // (**) The result is a trade-off: it's possible to improve it by // 10% (or by 1 cycle per round), but at the cost of 20% loss // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code // generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ # include <ring-core/arm_arch.h> #endif .text .globl sha512_block_data_order_nohw .hidden sha512_block_data_order_nohw .type sha512_block_data_order_nohw,%function .align 6 sha512_block_data_order_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#4*8 ldp x20,x21,[x0] // load context ldp x22,x23,[x0,#2*8] ldp x24,x25,[x0,#4*8] add x2,x1,x2,lsl#7 // end of input ldp x26,x27,[x0,#6*8] adrp x30,.LK512 add x30,x30,:lo12:.LK512 stp x0,x2,[x29,#96] .Loop: ldp x3,x4,[x1],#2*8 ldr x19,[x30],#8 // *K++ eor x28,x21,x22 // magic seed str x1,[x29,#112] #ifndef __AARCH64EB__ rev x3,x3 // 0 #endif ror x16,x24,#14 add x27,x27,x19 // h+=K[i] eor x6,x24,x24,ror#23 and x17,x25,x24 bic x19,x26,x24 add x27,x27,x3 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x6,ror#18 // Sigma1(e) ror x6,x20,#28 add x27,x27,x17 // h+=Ch(e,f,g) eor x17,x20,x20,ror#5 add x27,x27,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x23,x23,x27 // d+=h eor x28,x28,x21 // Maj(a,b,c) eor x17,x6,x17,ror#34 // Sigma0(a) add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x4,x4 // 1 #endif ldp x5,x6,[x1],#2*8 add x27,x27,x17 // h+=Sigma0(a) ror x16,x23,#14 add x26,x26,x28 // h+=K[i] eor x7,x23,x23,ror#23 and x17,x24,x23 bic x28,x25,x23 add x26,x26,x4 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x7,ror#18 // Sigma1(e) ror x7,x27,#28 add x26,x26,x17 // h+=Ch(e,f,g) eor x17,x27,x27,ror#5 add x26,x26,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x22,x22,x26 // d+=h eor x19,x19,x20 // Maj(a,b,c) eor x17,x7,x17,ror#34 // Sigma0(a) add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x5,x5 // 2 #endif add x26,x26,x17 // h+=Sigma0(a) ror x16,x22,#14 add x25,x25,x19 // h+=K[i] eor x8,x22,x22,ror#23 and x17,x23,x22 bic x19,x24,x22 add x25,x25,x5 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x8,ror#18 // Sigma1(e) ror x8,x26,#28 add x25,x25,x17 // h+=Ch(e,f,g) eor x17,x26,x26,ror#5 add x25,x25,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x21,x21,x25 // d+=h eor x28,x28,x27 // Maj(a,b,c) eor x17,x8,x17,ror#34 // Sigma0(a) add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x6,x6 // 3 #endif ldp x7,x8,[x1],#2*8 add x25,x25,x17 // h+=Sigma0(a) ror x16,x21,#14 add x24,x24,x28 // h+=K[i] eor x9,x21,x21,ror#23 and x17,x22,x21 bic x28,x23,x21 add x24,x24,x6 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x9,ror#18 // Sigma1(e) ror x9,x25,#28 add x24,x24,x17 // h+=Ch(e,f,g) eor x17,x25,x25,ror#5 add x24,x24,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x20,x20,x24 // d+=h eor x19,x19,x26 // Maj(a,b,c) eor x17,x9,x17,ror#34 // Sigma0(a) add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x7,x7 // 4 #endif add x24,x24,x17 // h+=Sigma0(a) ror x16,x20,#14 add x23,x23,x19 // h+=K[i] eor x10,x20,x20,ror#23 and x17,x21,x20 bic x19,x22,x20 add x23,x23,x7 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x10,ror#18 // Sigma1(e) ror x10,x24,#28 add x23,x23,x17 // h+=Ch(e,f,g) eor x17,x24,x24,ror#5 add x23,x23,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x27,x27,x23 // d+=h eor x28,x28,x25 // Maj(a,b,c) eor x17,x10,x17,ror#34 // Sigma0(a) add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x8,x8 // 5 #endif ldp x9,x10,[x1],#2*8 add x23,x23,x17 // h+=Sigma0(a) ror x16,x27,#14 add x22,x22,x28 // h+=K[i] eor x11,x27,x27,ror#23 and x17,x20,x27 bic x28,x21,x27 add x22,x22,x8 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x11,ror#18 // Sigma1(e) ror x11,x23,#28 add x22,x22,x17 // h+=Ch(e,f,g) eor x17,x23,x23,ror#5 add x22,x22,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x26,x26,x22 // d+=h eor x19,x19,x24 // Maj(a,b,c) eor x17,x11,x17,ror#34 // Sigma0(a) add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x9,x9 // 6 #endif add x22,x22,x17 // h+=Sigma0(a) ror x16,x26,#14 add x21,x21,x19 // h+=K[i] eor x12,x26,x26,ror#23 and x17,x27,x26 bic x19,x20,x26 add x21,x21,x9 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x12,ror#18 // Sigma1(e) ror x12,x22,#28 add x21,x21,x17 // h+=Ch(e,f,g) eor x17,x22,x22,ror#5 add x21,x21,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x25,x25,x21 // d+=h eor x28,x28,x23 // Maj(a,b,c) eor x17,x12,x17,ror#34 // Sigma0(a) add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x10,x10 // 7 #endif ldp x11,x12,[x1],#2*8 add x21,x21,x17 // h+=Sigma0(a) ror x16,x25,#14 add x20,x20,x28 // h+=K[i] eor x13,x25,x25,ror#23 and x17,x26,x25 bic x28,x27,x25 add x20,x20,x10 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x13,ror#18 // Sigma1(e) ror x13,x21,#28 add x20,x20,x17 // h+=Ch(e,f,g) eor x17,x21,x21,ror#5 add x20,x20,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x24,x24,x20 // d+=h eor x19,x19,x22 // Maj(a,b,c) eor x17,x13,x17,ror#34 // Sigma0(a) add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x20,x20,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x11,x11 // 8 #endif add x20,x20,x17 // h+=Sigma0(a) ror x16,x24,#14 add x27,x27,x19 // h+=K[i] eor x14,x24,x24,ror#23 and x17,x25,x24 bic x19,x26,x24 add x27,x27,x11 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x14,ror#18 // Sigma1(e) ror x14,x20,#28 add x27,x27,x17 // h+=Ch(e,f,g) eor x17,x20,x20,ror#5 add x27,x27,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x23,x23,x27 // d+=h eor x28,x28,x21 // Maj(a,b,c) eor x17,x14,x17,ror#34 // Sigma0(a) add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x12,x12 // 9 #endif ldp x13,x14,[x1],#2*8 add x27,x27,x17 // h+=Sigma0(a) ror x16,x23,#14 add x26,x26,x28 // h+=K[i] eor x15,x23,x23,ror#23 and x17,x24,x23 bic x28,x25,x23 add x26,x26,x12 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x15,ror#18 // Sigma1(e) ror x15,x27,#28 add x26,x26,x17 // h+=Ch(e,f,g) eor x17,x27,x27,ror#5 add x26,x26,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x22,x22,x26 // d+=h eor x19,x19,x20 // Maj(a,b,c) eor x17,x15,x17,ror#34 // Sigma0(a) add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x13,x13 // 10 #endif add x26,x26,x17 // h+=Sigma0(a) ror x16,x22,#14 add x25,x25,x19 // h+=K[i] eor x0,x22,x22,ror#23 and x17,x23,x22 bic x19,x24,x22 add x25,x25,x13 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x0,ror#18 // Sigma1(e) ror x0,x26,#28 add x25,x25,x17 // h+=Ch(e,f,g) eor x17,x26,x26,ror#5 add x25,x25,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x21,x21,x25 // d+=h eor x28,x28,x27 // Maj(a,b,c) eor x17,x0,x17,ror#34 // Sigma0(a) add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x14,x14 // 11 #endif ldp x15,x0,[x1],#2*8 add x25,x25,x17 // h+=Sigma0(a) str x6,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] eor x6,x21,x21,ror#23 and x17,x22,x21 bic x28,x23,x21 add x24,x24,x14 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x6,ror#18 // Sigma1(e) ror x6,x25,#28 add x24,x24,x17 // h+=Ch(e,f,g) eor x17,x25,x25,ror#5 add x24,x24,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x20,x20,x24 // d+=h eor x19,x19,x26 // Maj(a,b,c) eor x17,x6,x17,ror#34 // Sigma0(a) add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x15,x15 // 12 #endif add x24,x24,x17 // h+=Sigma0(a) str x7,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] eor x7,x20,x20,ror#23 and x17,x21,x20 bic x19,x22,x20 add x23,x23,x15 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x7,ror#18 // Sigma1(e) ror x7,x24,#28 add x23,x23,x17 // h+=Ch(e,f,g) eor x17,x24,x24,ror#5 add x23,x23,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x27,x27,x23 // d+=h eor x28,x28,x25 // Maj(a,b,c) eor x17,x7,x17,ror#34 // Sigma0(a) add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x0,x0 // 13 #endif ldp x1,x2,[x1] add x23,x23,x17 // h+=Sigma0(a) str x8,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] eor x8,x27,x27,ror#23 and x17,x20,x27 bic x28,x21,x27 add x22,x22,x0 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x8,ror#18 // Sigma1(e) ror x8,x23,#28 add x22,x22,x17 // h+=Ch(e,f,g) eor x17,x23,x23,ror#5 add x22,x22,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x26,x26,x22 // d+=h eor x19,x19,x24 // Maj(a,b,c) eor x17,x8,x17,ror#34 // Sigma0(a) add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x1,x1 // 14 #endif ldr x6,[sp,#24] add x22,x22,x17 // h+=Sigma0(a) str x9,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] eor x9,x26,x26,ror#23 and x17,x27,x26 bic x19,x20,x26 add x21,x21,x1 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x9,ror#18 // Sigma1(e) ror x9,x22,#28 add x21,x21,x17 // h+=Ch(e,f,g) eor x17,x22,x22,ror#5 add x21,x21,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x25,x25,x21 // d+=h eor x28,x28,x23 // Maj(a,b,c) eor x17,x9,x17,ror#34 // Sigma0(a) add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x2,x2 // 15 #endif ldr x7,[sp,#0] add x21,x21,x17 // h+=Sigma0(a) str x10,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x9,x4,#1 and x17,x26,x25 ror x8,x1,#19 bic x28,x27,x25 ror x10,x21,#28 add x20,x20,x2 // h+=X[i] eor x16,x16,x25,ror#18 eor x9,x9,x4,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x10,x10,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x8,x8,x1,ror#61 eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x10,x21,ror#39 // Sigma0(a) eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) add x3,x3,x12 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x3,x3,x9 add x20,x20,x17 // h+=Sigma0(a) add x3,x3,x8 .Loop_16_xx: ldr x8,[sp,#8] str x11,[sp,#0] ror x16,x24,#14 add x27,x27,x19 // h+=K[i] ror x10,x5,#1 and x17,x25,x24 ror x9,x2,#19 bic x19,x26,x24 ror x11,x20,#28 add x27,x27,x3 // h+=X[i] eor x16,x16,x24,ror#18 eor x10,x10,x5,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x24,ror#41 // Sigma1(e) eor x11,x11,x20,ror#34 add x27,x27,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x9,x9,x2,ror#61 eor x10,x10,x5,lsr#7 // sigma0(X[i+1]) add x27,x27,x16 // h+=Sigma1(e) eor x28,x28,x21 // Maj(a,b,c) eor x17,x11,x20,ror#39 // Sigma0(a) eor x9,x9,x2,lsr#6 // sigma1(X[i+14]) add x4,x4,x13 add x23,x23,x27 // d+=h add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x4,x4,x10 add x27,x27,x17 // h+=Sigma0(a) add x4,x4,x9 ldr x9,[sp,#16] str x12,[sp,#8] ror x16,x23,#14 add x26,x26,x28 // h+=K[i] ror x11,x6,#1 and x17,x24,x23 ror x10,x3,#19 bic x28,x25,x23 ror x12,x27,#28 add x26,x26,x4 // h+=X[i] eor x16,x16,x23,ror#18 eor x11,x11,x6,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x23,ror#41 // Sigma1(e) eor x12,x12,x27,ror#34 add x26,x26,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x10,x10,x3,ror#61 eor x11,x11,x6,lsr#7 // sigma0(X[i+1]) add x26,x26,x16 // h+=Sigma1(e) eor x19,x19,x20 // Maj(a,b,c) eor x17,x12,x27,ror#39 // Sigma0(a) eor x10,x10,x3,lsr#6 // sigma1(X[i+14]) add x5,x5,x14 add x22,x22,x26 // d+=h add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x5,x5,x11 add x26,x26,x17 // h+=Sigma0(a) add x5,x5,x10 ldr x10,[sp,#24] str x13,[sp,#16] ror x16,x22,#14 add x25,x25,x19 // h+=K[i] ror x12,x7,#1 and x17,x23,x22 ror x11,x4,#19 bic x19,x24,x22 ror x13,x26,#28 add x25,x25,x5 // h+=X[i] eor x16,x16,x22,ror#18 eor x12,x12,x7,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x22,ror#41 // Sigma1(e) eor x13,x13,x26,ror#34 add x25,x25,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x11,x11,x4,ror#61 eor x12,x12,x7,lsr#7 // sigma0(X[i+1]) add x25,x25,x16 // h+=Sigma1(e) eor x28,x28,x27 // Maj(a,b,c) eor x17,x13,x26,ror#39 // Sigma0(a) eor x11,x11,x4,lsr#6 // sigma1(X[i+14]) add x6,x6,x15 add x21,x21,x25 // d+=h add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x6,x6,x12 add x25,x25,x17 // h+=Sigma0(a) add x6,x6,x11 ldr x11,[sp,#0] str x14,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] ror x13,x8,#1 and x17,x22,x21 ror x12,x5,#19 bic x28,x23,x21 ror x14,x25,#28 add x24,x24,x6 // h+=X[i] eor x16,x16,x21,ror#18 eor x13,x13,x8,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x21,ror#41 // Sigma1(e) eor x14,x14,x25,ror#34 add x24,x24,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x12,x12,x5,ror#61 eor x13,x13,x8,lsr#7 // sigma0(X[i+1]) add x24,x24,x16 // h+=Sigma1(e) eor x19,x19,x26 // Maj(a,b,c) eor x17,x14,x25,ror#39 // Sigma0(a) eor x12,x12,x5,lsr#6 // sigma1(X[i+14]) add x7,x7,x0 add x20,x20,x24 // d+=h add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x7,x7,x13 add x24,x24,x17 // h+=Sigma0(a) add x7,x7,x12 ldr x12,[sp,#8] str x15,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] ror x14,x9,#1 and x17,x21,x20 ror x13,x6,#19 bic x19,x22,x20 ror x15,x24,#28 add x23,x23,x7 // h+=X[i] eor x16,x16,x20,ror#18 eor x14,x14,x9,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x20,ror#41 // Sigma1(e) eor x15,x15,x24,ror#34 add x23,x23,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x13,x13,x6,ror#61 eor x14,x14,x9,lsr#7 // sigma0(X[i+1]) add x23,x23,x16 // h+=Sigma1(e) eor x28,x28,x25 // Maj(a,b,c) eor x17,x15,x24,ror#39 // Sigma0(a) eor x13,x13,x6,lsr#6 // sigma1(X[i+14]) add x8,x8,x1 add x27,x27,x23 // d+=h add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x8,x8,x14 add x23,x23,x17 // h+=Sigma0(a) add x8,x8,x13 ldr x13,[sp,#16] str x0,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] ror x15,x10,#1 and x17,x20,x27 ror x14,x7,#19 bic x28,x21,x27 ror x0,x23,#28 add x22,x22,x8 // h+=X[i] eor x16,x16,x27,ror#18 eor x15,x15,x10,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x27,ror#41 // Sigma1(e) eor x0,x0,x23,ror#34 add x22,x22,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x14,x14,x7,ror#61 eor x15,x15,x10,lsr#7 // sigma0(X[i+1]) add x22,x22,x16 // h+=Sigma1(e) eor x19,x19,x24 // Maj(a,b,c) eor x17,x0,x23,ror#39 // Sigma0(a) eor x14,x14,x7,lsr#6 // sigma1(X[i+14]) add x9,x9,x2 add x26,x26,x22 // d+=h add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x9,x9,x15 add x22,x22,x17 // h+=Sigma0(a) add x9,x9,x14 ldr x14,[sp,#24] str x1,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] ror x0,x11,#1 and x17,x27,x26 ror x15,x8,#19 bic x19,x20,x26 ror x1,x22,#28 add x21,x21,x9 // h+=X[i] eor x16,x16,x26,ror#18 eor x0,x0,x11,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x26,ror#41 // Sigma1(e) eor x1,x1,x22,ror#34 add x21,x21,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x15,x15,x8,ror#61 eor x0,x0,x11,lsr#7 // sigma0(X[i+1]) add x21,x21,x16 // h+=Sigma1(e) eor x28,x28,x23 // Maj(a,b,c) eor x17,x1,x22,ror#39 // Sigma0(a) eor x15,x15,x8,lsr#6 // sigma1(X[i+14]) add x10,x10,x3 add x25,x25,x21 // d+=h add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x10,x10,x0 add x21,x21,x17 // h+=Sigma0(a) add x10,x10,x15 ldr x15,[sp,#0] str x2,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x1,x12,#1 and x17,x26,x25 ror x0,x9,#19 bic x28,x27,x25 ror x2,x21,#28 add x20,x20,x10 // h+=X[i] eor x16,x16,x25,ror#18 eor x1,x1,x12,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x2,x2,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x0,x0,x9,ror#61 eor x1,x1,x12,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x2,x21,ror#39 // Sigma0(a) eor x0,x0,x9,lsr#6 // sigma1(X[i+14]) add x11,x11,x4 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x11,x11,x1 add x20,x20,x17 // h+=Sigma0(a) add x11,x11,x0 ldr x0,[sp,#8] str x3,[sp,#0] ror x16,x24,#14 add x27,x27,x19 // h+=K[i] ror x2,x13,#1 and x17,x25,x24 ror x1,x10,#19 bic x19,x26,x24 ror x3,x20,#28 add x27,x27,x11 // h+=X[i] eor x16,x16,x24,ror#18 eor x2,x2,x13,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x24,ror#41 // Sigma1(e) eor x3,x3,x20,ror#34 add x27,x27,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x1,x1,x10,ror#61 eor x2,x2,x13,lsr#7 // sigma0(X[i+1]) add x27,x27,x16 // h+=Sigma1(e) eor x28,x28,x21 // Maj(a,b,c) eor x17,x3,x20,ror#39 // Sigma0(a) eor x1,x1,x10,lsr#6 // sigma1(X[i+14]) add x12,x12,x5 add x23,x23,x27 // d+=h add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x12,x12,x2 add x27,x27,x17 // h+=Sigma0(a) add x12,x12,x1 ldr x1,[sp,#16] str x4,[sp,#8] ror x16,x23,#14 add x26,x26,x28 // h+=K[i] ror x3,x14,#1 and x17,x24,x23 ror x2,x11,#19 bic x28,x25,x23 ror x4,x27,#28 add x26,x26,x12 // h+=X[i] eor x16,x16,x23,ror#18 eor x3,x3,x14,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x23,ror#41 // Sigma1(e) eor x4,x4,x27,ror#34 add x26,x26,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x2,x2,x11,ror#61 eor x3,x3,x14,lsr#7 // sigma0(X[i+1]) add x26,x26,x16 // h+=Sigma1(e) eor x19,x19,x20 // Maj(a,b,c) eor x17,x4,x27,ror#39 // Sigma0(a) eor x2,x2,x11,lsr#6 // sigma1(X[i+14]) add x13,x13,x6 add x22,x22,x26 // d+=h add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x13,x13,x3 add x26,x26,x17 // h+=Sigma0(a) add x13,x13,x2 ldr x2,[sp,#24] str x5,[sp,#16] ror x16,x22,#14 add x25,x25,x19 // h+=K[i] ror x4,x15,#1 and x17,x23,x22 ror x3,x12,#19 bic x19,x24,x22 ror x5,x26,#28 add x25,x25,x13 // h+=X[i] eor x16,x16,x22,ror#18 eor x4,x4,x15,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x22,ror#41 // Sigma1(e) eor x5,x5,x26,ror#34 add x25,x25,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x3,x3,x12,ror#61 eor x4,x4,x15,lsr#7 // sigma0(X[i+1]) add x25,x25,x16 // h+=Sigma1(e) eor x28,x28,x27 // Maj(a,b,c) eor x17,x5,x26,ror#39 // Sigma0(a) eor x3,x3,x12,lsr#6 // sigma1(X[i+14]) add x14,x14,x7 add x21,x21,x25 // d+=h add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x14,x14,x4 add x25,x25,x17 // h+=Sigma0(a) add x14,x14,x3 ldr x3,[sp,#0] str x6,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] ror x5,x0,#1 and x17,x22,x21 ror x4,x13,#19 bic x28,x23,x21 ror x6,x25,#28 add x24,x24,x14 // h+=X[i] eor x16,x16,x21,ror#18 eor x5,x5,x0,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x21,ror#41 // Sigma1(e) eor x6,x6,x25,ror#34 add x24,x24,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x4,x4,x13,ror#61 eor x5,x5,x0,lsr#7 // sigma0(X[i+1]) add x24,x24,x16 // h+=Sigma1(e) eor x19,x19,x26 // Maj(a,b,c) eor x17,x6,x25,ror#39 // Sigma0(a) eor x4,x4,x13,lsr#6 // sigma1(X[i+14]) add x15,x15,x8 add x20,x20,x24 // d+=h add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x15,x15,x5 add x24,x24,x17 // h+=Sigma0(a) add x15,x15,x4 ldr x4,[sp,#8] str x7,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] ror x6,x1,#1 and x17,x21,x20 ror x5,x14,#19 bic x19,x22,x20 ror x7,x24,#28 add x23,x23,x15 // h+=X[i] eor x16,x16,x20,ror#18 eor x6,x6,x1,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x20,ror#41 // Sigma1(e) eor x7,x7,x24,ror#34 add x23,x23,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x5,x5,x14,ror#61 eor x6,x6,x1,lsr#7 // sigma0(X[i+1]) add x23,x23,x16 // h+=Sigma1(e) eor x28,x28,x25 // Maj(a,b,c) eor x17,x7,x24,ror#39 // Sigma0(a) eor x5,x5,x14,lsr#6 // sigma1(X[i+14]) add x0,x0,x9 add x27,x27,x23 // d+=h add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x0,x0,x6 add x23,x23,x17 // h+=Sigma0(a) add x0,x0,x5 ldr x5,[sp,#16] str x8,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] ror x7,x2,#1 and x17,x20,x27 ror x6,x15,#19 bic x28,x21,x27 ror x8,x23,#28 add x22,x22,x0 // h+=X[i] eor x16,x16,x27,ror#18 eor x7,x7,x2,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x27,ror#41 // Sigma1(e) eor x8,x8,x23,ror#34 add x22,x22,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x6,x6,x15,ror#61 eor x7,x7,x2,lsr#7 // sigma0(X[i+1]) add x22,x22,x16 // h+=Sigma1(e) eor x19,x19,x24 // Maj(a,b,c) eor x17,x8,x23,ror#39 // Sigma0(a) eor x6,x6,x15,lsr#6 // sigma1(X[i+14]) add x1,x1,x10 add x26,x26,x22 // d+=h add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x1,x1,x7 add x22,x22,x17 // h+=Sigma0(a) add x1,x1,x6 ldr x6,[sp,#24] str x9,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] ror x8,x3,#1 and x17,x27,x26 ror x7,x0,#19 bic x19,x20,x26 ror x9,x22,#28 add x21,x21,x1 // h+=X[i] eor x16,x16,x26,ror#18 eor x8,x8,x3,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x26,ror#41 // Sigma1(e) eor x9,x9,x22,ror#34 add x21,x21,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x7,x7,x0,ror#61 eor x8,x8,x3,lsr#7 // sigma0(X[i+1]) add x21,x21,x16 // h+=Sigma1(e) eor x28,x28,x23 // Maj(a,b,c) eor x17,x9,x22,ror#39 // Sigma0(a) eor x7,x7,x0,lsr#6 // sigma1(X[i+14]) add x2,x2,x11 add x25,x25,x21 // d+=h add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x2,x2,x8 add x21,x21,x17 // h+=Sigma0(a) add x2,x2,x7 ldr x7,[sp,#0] str x10,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x9,x4,#1 and x17,x26,x25 ror x8,x1,#19 bic x28,x27,x25 ror x10,x21,#28 add x20,x20,x2 // h+=X[i] eor x16,x16,x25,ror#18 eor x9,x9,x4,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x10,x10,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x8,x8,x1,ror#61 eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x10,x21,ror#39 // Sigma0(a) eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) add x3,x3,x12 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x3,x3,x9 add x20,x20,x17 // h+=Sigma0(a) add x3,x3,x8 cbnz x19,.Loop_16_xx ldp x0,x2,[x29,#96] ldr x1,[x29,#112] sub x30,x30,#648 // rewind ldp x3,x4,[x0] ldp x5,x6,[x0,#2*8] add x1,x1,#14*8 // advance input pointer ldp x7,x8,[x0,#4*8] add x20,x20,x3 ldp x9,x10,[x0,#6*8] add x21,x21,x4 add x22,x22,x5 add x23,x23,x6 stp x20,x21,[x0] add x24,x24,x7 add x25,x25,x8 stp x22,x23,[x0,#2*8] add x26,x26,x9 add x27,x27,x10 cmp x1,x2 stp x24,x25,[x0,#4*8] stp x26,x27,[x0,#6*8] b.ne .Loop ldp x19,x20,[x29,#16] add sp,sp,#4*8 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .size sha512_block_data_order_nohw,.-sha512_block_data_order_nohw .section .rodata .align 6 .type .LK512,%object .LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0 // terminator .size .LK512,.-.LK512 .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 .text #ifndef __KERNEL__ .globl sha512_block_data_order_hw .hidden sha512_block_data_order_hw .type sha512_block_data_order_hw,%function .align 6 sha512_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context adrp x3,.LK512 add x3,x3,:lo12:.LK512 rev64 v16.16b,v16.16b rev64 v17.16b,v17.16b rev64 v18.16b,v18.16b rev64 v19.16b,v19.16b rev64 v20.16b,v20.16b rev64 v21.16b,v21.16b rev64 v22.16b,v22.16b rev64 v23.16b,v23.16b b .Loop_hw .align 4 .Loop_hw: ld1 {v24.2d},[x3],#16 subs x2,x2,#1 sub x4,x1,#128 orr v26.16b,v0.16b,v0.16b // offload orr v27.16b,v1.16b,v1.16b orr v28.16b,v2.16b,v2.16b orr v29.16b,v3.16b,v3.16b csel x1,x1,x4,ne // conditional rewind add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .inst 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .inst 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v16.2d ld1 {v16.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b rev64 v16.16b,v16.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v17.2d ld1 {v17.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b rev64 v17.16b,v17.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v18.2d ld1 {v18.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b rev64 v18.16b,v18.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v19.2d ld1 {v19.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b rev64 v19.16b,v19.16b add v4.2d,v1.2d,v3.2d // "D + T1" .inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v20.2d ld1 {v20.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b rev64 v20.16b,v20.16b add v1.2d,v0.2d,v2.2d // "D + T1" .inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v21.2d ld1 {v21.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b rev64 v21.16b,v21.16b add v0.2d,v3.2d,v4.2d // "D + T1" .inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v22.2d ld1 {v22.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b rev64 v22.16b,v22.16b add v3.2d,v2.2d,v1.2d // "D + T1" .inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b sub x3,x3,#80*8 // rewind add v25.2d,v25.2d,v23.2d ld1 {v23.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b rev64 v23.16b,v23.16b add v2.2d,v4.2d,v0.2d // "D + T1" .inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v0.2d,v0.2d,v26.2d // accumulate add v1.2d,v1.2d,v27.2d add v2.2d,v2.2d,v28.2d add v3.2d,v3.2d,v29.2d cbnz x2,.Loop_hw st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context ldr x29,[sp],#16 ret .size sha512_block_data_order_hw,.-sha512_block_data_order_hw #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
193,277
thirdparty/crates/ring-0.17.9/pregenerated/chacha20_poly1305_x86_64-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__) .section .rodata .align 64 chacha20_poly1305_constants: .Lchacha20_consts: .byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' .byte 'e','x','p','a','n','d',' ','3','2','-','b','y','t','e',' ','k' .Lrol8: .byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 .byte 3,0,1,2, 7,4,5,6, 11,8,9,10, 15,12,13,14 .Lrol16: .byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13 .byte 2,3,0,1, 6,7,4,5, 10,11,8,9, 14,15,12,13 .Lavx2_init: .long 0,0,0,0 .Lsse_inc: .long 1,0,0,0 .Lavx2_inc: .long 2,0,0,0,2,0,0,0 .Lclamp: .quad 0x0FFFFFFC0FFFFFFF, 0x0FFFFFFC0FFFFFFC .quad 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF .align 16 .Land_masks: .byte 0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00 .byte 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff .text .type poly_hash_ad_internal,@function .align 64 poly_hash_ad_internal: .cfi_startproc .cfi_def_cfa rsp, 8 xorq %r10,%r10 xorq %r11,%r11 xorq %r12,%r12 cmpq $13,%r8 jne .Lhash_ad_loop .Lpoly_fast_tls_ad: movq (%rcx),%r10 movq 5(%rcx),%r11 shrq $24,%r11 movq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 ret .Lhash_ad_loop: cmpq $16,%r8 jb .Lhash_ad_tail addq 0+0(%rcx),%r10 adcq 8+0(%rcx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rcx),%rcx subq $16,%r8 jmp .Lhash_ad_loop .Lhash_ad_tail: cmpq $0,%r8 je .Lhash_ad_done xorq %r13,%r13 xorq %r14,%r14 xorq %r15,%r15 addq %r8,%rcx .Lhash_ad_tail_loop: shldq $8,%r13,%r14 shlq $8,%r13 movzbq -1(%rcx),%r15 xorq %r15,%r13 decq %rcx decq %r8 jne .Lhash_ad_tail_loop addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lhash_ad_done: ret .cfi_endproc .size poly_hash_ad_internal, .-poly_hash_ad_internal .globl chacha20_poly1305_open_nohw .hidden chacha20_poly1305_open_nohw .type chacha20_poly1305_open_nohw,@function .align 64 chacha20_poly1305_open_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 pushq %r9 .cfi_adjust_cfa_offset 8 .cfi_offset %r9,-64 subq $288 + 0 + 32,%rsp .cfi_adjust_cfa_offset 288 + 32 leaq 32(%rsp),%rbp andq $-32,%rbp movq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) cmpq $128,%rbx jbe .Lopen_sse_128 movdqa .Lchacha20_consts(%rip),%xmm0 movdqu 0(%r9),%xmm4 movdqu 16(%r9),%xmm8 movdqu 32(%r9),%xmm12 movdqa %xmm12,%xmm7 movdqa %xmm4,0+48(%rbp) movdqa %xmm8,0+64(%rbp) movdqa %xmm12,0+96(%rbp) movq $10,%r10 .Lopen_sse_init_rounds: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 decq %r10 jne .Lopen_sse_init_rounds paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 pand .Lclamp(%rip),%xmm0 movdqa %xmm0,0+0(%rbp) movdqa %xmm4,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal .Lopen_sse_main_loop: cmpq $256,%rbx jb .Lopen_sse_tail movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa 0+96(%rbp),%xmm15 paddd .Lsse_inc(%rip),%xmm15 movdqa %xmm15,%xmm14 paddd .Lsse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) movq $4,%rcx movq %rsi,%r8 .Lopen_sse_main_loop_rounds: movdqa %xmm8,0+80(%rbp) movdqa .Lrol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 leaq 16(%r8),%r8 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movdqa .Lrol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 movdqa %xmm8,0+80(%rbp) movdqa .Lrol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa .Lrol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 decq %rcx jge .Lopen_sse_main_loop_rounds addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 cmpq $-6,%rcx jg .Lopen_sse_main_loop_rounds paddd .Lchacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd .Lchacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqa %xmm12,0+80(%rbp) movdqu 0 + 0(%rsi),%xmm12 pxor %xmm3,%xmm12 movdqu %xmm12,0 + 0(%rdi) movdqu 16 + 0(%rsi),%xmm12 pxor %xmm7,%xmm12 movdqu %xmm12,16 + 0(%rdi) movdqu 32 + 0(%rsi),%xmm12 pxor %xmm11,%xmm12 movdqu %xmm12,32 + 0(%rdi) movdqu 48 + 0(%rsi),%xmm12 pxor %xmm15,%xmm12 movdqu %xmm12,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 64(%rdi) movdqu %xmm6,16 + 64(%rdi) movdqu %xmm10,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 128(%rdi) movdqu %xmm5,16 + 128(%rdi) movdqu %xmm9,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) movdqu 0 + 192(%rsi),%xmm3 movdqu 16 + 192(%rsi),%xmm7 movdqu 32 + 192(%rsi),%xmm11 movdqu 48 + 192(%rsi),%xmm15 pxor %xmm3,%xmm0 pxor %xmm7,%xmm4 pxor %xmm11,%xmm8 pxor 0+80(%rbp),%xmm15 movdqu %xmm0,0 + 192(%rdi) movdqu %xmm4,16 + 192(%rdi) movdqu %xmm8,32 + 192(%rdi) movdqu %xmm15,48 + 192(%rdi) leaq 256(%rsi),%rsi leaq 256(%rdi),%rdi subq $256,%rbx jmp .Lopen_sse_main_loop .Lopen_sse_tail: testq %rbx,%rbx jz .Lopen_sse_finalize cmpq $192,%rbx ja .Lopen_sse_tail_256 cmpq $128,%rbx ja .Lopen_sse_tail_192 cmpq $64,%rbx ja .Lopen_sse_tail_128 movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa 0+96(%rbp),%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) xorq %r8,%r8 movq %rbx,%rcx cmpq $16,%rcx jb .Lopen_sse_tail_64_rounds .Lopen_sse_tail_64_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 subq $16,%rcx .Lopen_sse_tail_64_rounds: addq $16,%r8 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 cmpq $16,%rcx jae .Lopen_sse_tail_64_rounds_and_x1hash cmpq $160,%r8 jne .Lopen_sse_tail_64_rounds paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 jmp .Lopen_sse_tail_64_dec_loop .Lopen_sse_tail_128: movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa 0+96(%rbp),%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movq %rbx,%rcx andq $-16,%rcx xorq %r8,%r8 .Lopen_sse_tail_128_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lopen_sse_tail_128_rounds: addq $16,%r8 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 cmpq %rcx,%r8 jb .Lopen_sse_tail_128_rounds_and_x1hash cmpq $160,%r8 jne .Lopen_sse_tail_128_rounds paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 0(%rdi) movdqu %xmm5,16 + 0(%rdi) movdqu %xmm9,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) subq $64,%rbx leaq 64(%rsi),%rsi leaq 64(%rdi),%rdi jmp .Lopen_sse_tail_64_dec_loop .Lopen_sse_tail_192: movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa 0+96(%rbp),%xmm14 paddd .Lsse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movq %rbx,%rcx movq $160,%r8 cmpq $160,%rcx cmovgq %r8,%rcx andq $-16,%rcx xorq %r8,%r8 .Lopen_sse_tail_192_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lopen_sse_tail_192_rounds: addq $16,%r8 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 cmpq %rcx,%r8 jb .Lopen_sse_tail_192_rounds_and_x1hash cmpq $160,%r8 jne .Lopen_sse_tail_192_rounds cmpq $176,%rbx jb .Lopen_sse_tail_192_finish addq 0+160(%rsi),%r10 adcq 8+160(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 cmpq $192,%rbx jb .Lopen_sse_tail_192_finish addq 0+176(%rsi),%r10 adcq 8+176(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lopen_sse_tail_192_finish: paddd .Lchacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 0(%rdi) movdqu %xmm6,16 + 0(%rdi) movdqu %xmm10,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 64(%rdi) movdqu %xmm5,16 + 64(%rdi) movdqu %xmm9,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) subq $128,%rbx leaq 128(%rsi),%rsi leaq 128(%rdi),%rdi jmp .Lopen_sse_tail_64_dec_loop .Lopen_sse_tail_256: movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa 0+96(%rbp),%xmm15 paddd .Lsse_inc(%rip),%xmm15 movdqa %xmm15,%xmm14 paddd .Lsse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) xorq %r8,%r8 .Lopen_sse_tail_256_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movdqa %xmm11,0+80(%rbp) paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $12,%xmm11 psrld $20,%xmm4 pxor %xmm11,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $7,%xmm11 psrld $25,%xmm4 pxor %xmm11,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $12,%xmm11 psrld $20,%xmm5 pxor %xmm11,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $7,%xmm11 psrld $25,%xmm5 pxor %xmm11,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $12,%xmm11 psrld $20,%xmm6 pxor %xmm11,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $7,%xmm11 psrld $25,%xmm6 pxor %xmm11,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 movdqa 0+80(%rbp),%xmm11 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movdqa %xmm9,0+80(%rbp) paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb .Lrol16(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $12,%xmm9 psrld $20,%xmm7 pxor %xmm9,%xmm7 paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb .Lrol8(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $7,%xmm9 psrld $25,%xmm7 pxor %xmm9,%xmm7 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 movdqa 0+80(%rbp),%xmm9 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx movdqa %xmm11,0+80(%rbp) paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $12,%xmm11 psrld $20,%xmm4 pxor %xmm11,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm11 pslld $7,%xmm11 psrld $25,%xmm4 pxor %xmm11,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $12,%xmm11 psrld $20,%xmm5 pxor %xmm11,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm11 pslld $7,%xmm11 psrld $25,%xmm5 pxor %xmm11,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $12,%xmm11 psrld $20,%xmm6 pxor %xmm11,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm11 pslld $7,%xmm11 psrld $25,%xmm6 pxor %xmm11,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 movdqa 0+80(%rbp),%xmm11 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movdqa %xmm9,0+80(%rbp) paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb .Lrol16(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $12,%xmm9 psrld $20,%xmm7 pxor %xmm9,%xmm7 paddd %xmm7,%xmm3 pxor %xmm3,%xmm15 pshufb .Lrol8(%rip),%xmm15 paddd %xmm15,%xmm11 pxor %xmm11,%xmm7 movdqa %xmm7,%xmm9 pslld $7,%xmm9 psrld $25,%xmm7 pxor %xmm9,%xmm7 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 movdqa 0+80(%rbp),%xmm9 addq $16,%r8 cmpq $160,%r8 jb .Lopen_sse_tail_256_rounds_and_x1hash movq %rbx,%rcx andq $-16,%rcx .Lopen_sse_tail_256_hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq $16,%r8 cmpq %rcx,%r8 jb .Lopen_sse_tail_256_hash paddd .Lchacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd .Lchacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqa %xmm12,0+80(%rbp) movdqu 0 + 0(%rsi),%xmm12 pxor %xmm3,%xmm12 movdqu %xmm12,0 + 0(%rdi) movdqu 16 + 0(%rsi),%xmm12 pxor %xmm7,%xmm12 movdqu %xmm12,16 + 0(%rdi) movdqu 32 + 0(%rsi),%xmm12 pxor %xmm11,%xmm12 movdqu %xmm12,32 + 0(%rdi) movdqu 48 + 0(%rsi),%xmm12 pxor %xmm15,%xmm12 movdqu %xmm12,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 64(%rdi) movdqu %xmm6,16 + 64(%rdi) movdqu %xmm10,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 128(%rdi) movdqu %xmm5,16 + 128(%rdi) movdqu %xmm9,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) movdqa 0+80(%rbp),%xmm12 subq $192,%rbx leaq 192(%rsi),%rsi leaq 192(%rdi),%rdi .Lopen_sse_tail_64_dec_loop: cmpq $16,%rbx jb .Lopen_sse_tail_16_init subq $16,%rbx movdqu (%rsi),%xmm3 pxor %xmm3,%xmm0 movdqu %xmm0,(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi movdqa %xmm4,%xmm0 movdqa %xmm8,%xmm4 movdqa %xmm12,%xmm8 jmp .Lopen_sse_tail_64_dec_loop .Lopen_sse_tail_16_init: movdqa %xmm0,%xmm1 .Lopen_sse_tail_16: testq %rbx,%rbx jz .Lopen_sse_finalize pxor %xmm3,%xmm3 leaq -1(%rsi,%rbx,1),%rsi movq %rbx,%r8 .Lopen_sse_tail_16_compose: pslldq $1,%xmm3 pinsrb $0,(%rsi),%xmm3 subq $1,%rsi subq $1,%r8 jnz .Lopen_sse_tail_16_compose .byte 102,73,15,126,221 pextrq $1,%xmm3,%r14 pxor %xmm1,%xmm3 .Lopen_sse_tail_16_extract: pextrb $0,%xmm3,(%rdi) psrldq $1,%xmm3 addq $1,%rdi subq $1,%rbx jne .Lopen_sse_tail_16_extract addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lopen_sse_finalize: addq 0+0+32(%rbp),%r10 adcq 8+0+32(%rbp),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movq %r10,%r13 movq %r11,%r14 movq %r12,%r15 subq $-5,%r10 sbbq $-1,%r11 sbbq $3,%r12 cmovcq %r13,%r10 cmovcq %r14,%r11 cmovcq %r15,%r12 addq 0+0+16(%rbp),%r10 adcq 8+0+16(%rbp),%r11 .cfi_remember_state addq $288 + 0 + 32,%rsp .cfi_adjust_cfa_offset -(288 + 32) popq %r9 .cfi_adjust_cfa_offset -8 .cfi_restore %r9 movq %r10,(%r9) movq %r11,8(%r9) popq %r15 .cfi_adjust_cfa_offset -8 .cfi_restore %r15 popq %r14 .cfi_adjust_cfa_offset -8 .cfi_restore %r14 popq %r13 .cfi_adjust_cfa_offset -8 .cfi_restore %r13 popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 popq %rbx .cfi_adjust_cfa_offset -8 .cfi_restore %rbx popq %rbp .cfi_adjust_cfa_offset -8 .cfi_restore %rbp ret .Lopen_sse_128: .cfi_restore_state movdqu .Lchacha20_consts(%rip),%xmm0 movdqa %xmm0,%xmm1 movdqa %xmm0,%xmm2 movdqu 0(%r9),%xmm4 movdqa %xmm4,%xmm5 movdqa %xmm4,%xmm6 movdqu 16(%r9),%xmm8 movdqa %xmm8,%xmm9 movdqa %xmm8,%xmm10 movdqu 32(%r9),%xmm12 movdqa %xmm12,%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm14 paddd .Lsse_inc(%rip),%xmm14 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa %xmm13,%xmm15 movq $10,%r10 .Lopen_sse_128_rounds: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 decq %r10 jnz .Lopen_sse_128_rounds paddd .Lchacha20_consts(%rip),%xmm0 paddd .Lchacha20_consts(%rip),%xmm1 paddd .Lchacha20_consts(%rip),%xmm2 paddd %xmm7,%xmm4 paddd %xmm7,%xmm5 paddd %xmm7,%xmm6 paddd %xmm11,%xmm9 paddd %xmm11,%xmm10 paddd %xmm15,%xmm13 paddd .Lsse_inc(%rip),%xmm15 paddd %xmm15,%xmm14 pand .Lclamp(%rip),%xmm0 movdqa %xmm0,0+0(%rbp) movdqa %xmm4,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal .Lopen_sse_128_xor_hash: cmpq $16,%rbx jb .Lopen_sse_tail_16 subq $16,%rbx addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movdqu 0(%rsi),%xmm3 pxor %xmm3,%xmm1 movdqu %xmm1,0(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movdqa %xmm5,%xmm1 movdqa %xmm9,%xmm5 movdqa %xmm13,%xmm9 movdqa %xmm2,%xmm13 movdqa %xmm6,%xmm2 movdqa %xmm10,%xmm6 movdqa %xmm14,%xmm10 jmp .Lopen_sse_128_xor_hash .size chacha20_poly1305_open_nohw, .-chacha20_poly1305_open_nohw .cfi_endproc .globl chacha20_poly1305_seal_nohw .hidden chacha20_poly1305_seal_nohw .type chacha20_poly1305_seal_nohw,@function .align 64 chacha20_poly1305_seal_nohw: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 pushq %r9 .cfi_adjust_cfa_offset 8 .cfi_offset %r9,-64 subq $288 + 0 + 32,%rsp .cfi_adjust_cfa_offset 288 + 32 leaq 32(%rsp),%rbp andq $-32,%rbp movq 56(%r9),%rbx addq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) movq %rdx,%rbx cmpq $128,%rbx jbe .Lseal_sse_128 movdqa .Lchacha20_consts(%rip),%xmm0 movdqu 0(%r9),%xmm4 movdqu 16(%r9),%xmm8 movdqu 32(%r9),%xmm12 movdqa %xmm0,%xmm1 movdqa %xmm0,%xmm2 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm5 movdqa %xmm4,%xmm6 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm9 movdqa %xmm8,%xmm10 movdqa %xmm8,%xmm11 movdqa %xmm12,%xmm15 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,%xmm14 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,%xmm13 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm4,0+48(%rbp) movdqa %xmm8,0+64(%rbp) movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) movq $10,%r10 .Lseal_sse_init_rounds: movdqa %xmm8,0+80(%rbp) movdqa .Lrol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa .Lrol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 movdqa %xmm8,0+80(%rbp) movdqa .Lrol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa .Lrol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 decq %r10 jnz .Lseal_sse_init_rounds paddd .Lchacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd .Lchacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 pand .Lclamp(%rip),%xmm3 movdqa %xmm3,0+0(%rbp) movdqa %xmm7,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 0(%rdi) movdqu %xmm6,16 + 0(%rdi) movdqu %xmm10,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 64(%rdi) movdqu %xmm5,16 + 64(%rdi) movdqu %xmm9,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) cmpq $192,%rbx ja .Lseal_sse_main_init movq $128,%rcx subq $128,%rbx leaq 128(%rsi),%rsi jmp .Lseal_sse_128_tail_hash .Lseal_sse_main_init: movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm0 pxor %xmm7,%xmm4 pxor %xmm11,%xmm8 pxor %xmm12,%xmm15 movdqu %xmm0,0 + 128(%rdi) movdqu %xmm4,16 + 128(%rdi) movdqu %xmm8,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) movq $192,%rcx subq $192,%rbx leaq 192(%rsi),%rsi movq $2,%rcx movq $8,%r8 cmpq $64,%rbx jbe .Lseal_sse_tail_64 cmpq $128,%rbx jbe .Lseal_sse_tail_128 cmpq $192,%rbx jbe .Lseal_sse_tail_192 .Lseal_sse_main_loop: movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa %xmm0,%xmm3 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa 0+96(%rbp),%xmm15 paddd .Lsse_inc(%rip),%xmm15 movdqa %xmm15,%xmm14 paddd .Lsse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) movdqa %xmm15,0+144(%rbp) .align 32 .Lseal_sse_main_rounds: movdqa %xmm8,0+80(%rbp) movdqa .Lrol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movdqa .Lrol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 .byte 102,15,58,15,255,4 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,12 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 movdqa %xmm8,0+80(%rbp) movdqa .Lrol16(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $20,%xmm8 pslld $32-20,%xmm4 pxor %xmm8,%xmm4 movdqa .Lrol8(%rip),%xmm8 paddd %xmm7,%xmm3 paddd %xmm6,%xmm2 paddd %xmm5,%xmm1 paddd %xmm4,%xmm0 pxor %xmm3,%xmm15 pxor %xmm2,%xmm14 pxor %xmm1,%xmm13 pxor %xmm0,%xmm12 .byte 102,69,15,56,0,248 .byte 102,69,15,56,0,240 .byte 102,69,15,56,0,232 .byte 102,69,15,56,0,224 movdqa 0+80(%rbp),%xmm8 paddd %xmm15,%xmm11 paddd %xmm14,%xmm10 paddd %xmm13,%xmm9 paddd %xmm12,%xmm8 pxor %xmm11,%xmm7 pxor %xmm10,%xmm6 pxor %xmm9,%xmm5 pxor %xmm8,%xmm4 movdqa %xmm8,0+80(%rbp) movdqa %xmm7,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm7 pxor %xmm8,%xmm7 movdqa %xmm6,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm6 pxor %xmm8,%xmm6 movdqa %xmm5,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm5 pxor %xmm8,%xmm5 movdqa %xmm4,%xmm8 psrld $25,%xmm8 pslld $32-25,%xmm4 pxor %xmm8,%xmm4 movdqa 0+80(%rbp),%xmm8 .byte 102,15,58,15,255,12 .byte 102,69,15,58,15,219,8 .byte 102,69,15,58,15,255,4 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 leaq 16(%rdi),%rdi decq %r8 jge .Lseal_sse_main_rounds addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi decq %rcx jg .Lseal_sse_main_rounds paddd .Lchacha20_consts(%rip),%xmm3 paddd 0+48(%rbp),%xmm7 paddd 0+64(%rbp),%xmm11 paddd 0+144(%rbp),%xmm15 paddd .Lchacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqa %xmm14,0+80(%rbp) movdqa %xmm14,0+80(%rbp) movdqu 0 + 0(%rsi),%xmm14 pxor %xmm3,%xmm14 movdqu %xmm14,0 + 0(%rdi) movdqu 16 + 0(%rsi),%xmm14 pxor %xmm7,%xmm14 movdqu %xmm14,16 + 0(%rdi) movdqu 32 + 0(%rsi),%xmm14 pxor %xmm11,%xmm14 movdqu %xmm14,32 + 0(%rdi) movdqu 48 + 0(%rsi),%xmm14 pxor %xmm15,%xmm14 movdqu %xmm14,48 + 0(%rdi) movdqa 0+80(%rbp),%xmm14 movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 64(%rdi) movdqu %xmm6,16 + 64(%rdi) movdqu %xmm10,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movdqu 0 + 128(%rsi),%xmm3 movdqu 16 + 128(%rsi),%xmm7 movdqu 32 + 128(%rsi),%xmm11 movdqu 48 + 128(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 128(%rdi) movdqu %xmm5,16 + 128(%rdi) movdqu %xmm9,32 + 128(%rdi) movdqu %xmm15,48 + 128(%rdi) cmpq $256,%rbx ja .Lseal_sse_main_loop_xor movq $192,%rcx subq $192,%rbx leaq 192(%rsi),%rsi jmp .Lseal_sse_128_tail_hash .Lseal_sse_main_loop_xor: movdqu 0 + 192(%rsi),%xmm3 movdqu 16 + 192(%rsi),%xmm7 movdqu 32 + 192(%rsi),%xmm11 movdqu 48 + 192(%rsi),%xmm15 pxor %xmm3,%xmm0 pxor %xmm7,%xmm4 pxor %xmm11,%xmm8 pxor %xmm12,%xmm15 movdqu %xmm0,0 + 192(%rdi) movdqu %xmm4,16 + 192(%rdi) movdqu %xmm8,32 + 192(%rdi) movdqu %xmm15,48 + 192(%rdi) leaq 256(%rsi),%rsi subq $256,%rbx movq $6,%rcx movq $4,%r8 cmpq $192,%rbx jg .Lseal_sse_main_loop movq %rbx,%rcx testq %rbx,%rbx je .Lseal_sse_128_tail_hash movq $6,%rcx cmpq $128,%rbx ja .Lseal_sse_tail_192 cmpq $64,%rbx ja .Lseal_sse_tail_128 .Lseal_sse_tail_64: movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa 0+96(%rbp),%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) .Lseal_sse_tail_64_rounds_and_x2hash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi .Lseal_sse_tail_64_rounds_and_x1hash: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi decq %rcx jg .Lseal_sse_tail_64_rounds_and_x2hash decq %r8 jge .Lseal_sse_tail_64_rounds_and_x1hash paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 jmp .Lseal_sse_128_tail_xor .Lseal_sse_tail_128: movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa 0+96(%rbp),%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) .Lseal_sse_tail_128_rounds_and_x2hash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi .Lseal_sse_tail_128_rounds_and_x1hash: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 leaq 16(%rdi),%rdi decq %rcx jg .Lseal_sse_tail_128_rounds_and_x2hash decq %r8 jge .Lseal_sse_tail_128_rounds_and_x1hash paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 0(%rdi) movdqu %xmm5,16 + 0(%rdi) movdqu %xmm9,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movq $64,%rcx subq $64,%rbx leaq 64(%rsi),%rsi jmp .Lseal_sse_128_tail_hash .Lseal_sse_tail_192: movdqa .Lchacha20_consts(%rip),%xmm0 movdqa 0+48(%rbp),%xmm4 movdqa 0+64(%rbp),%xmm8 movdqa %xmm0,%xmm1 movdqa %xmm4,%xmm5 movdqa %xmm8,%xmm9 movdqa %xmm0,%xmm2 movdqa %xmm4,%xmm6 movdqa %xmm8,%xmm10 movdqa 0+96(%rbp),%xmm14 paddd .Lsse_inc(%rip),%xmm14 movdqa %xmm14,%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm13,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,0+96(%rbp) movdqa %xmm13,0+112(%rbp) movdqa %xmm14,0+128(%rbp) .Lseal_sse_tail_192_rounds_and_x2hash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi .Lseal_sse_tail_192_rounds_and_x1hash: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 leaq 16(%rdi),%rdi decq %rcx jg .Lseal_sse_tail_192_rounds_and_x2hash decq %r8 jge .Lseal_sse_tail_192_rounds_and_x1hash paddd .Lchacha20_consts(%rip),%xmm2 paddd 0+48(%rbp),%xmm6 paddd 0+64(%rbp),%xmm10 paddd 0+128(%rbp),%xmm14 paddd .Lchacha20_consts(%rip),%xmm1 paddd 0+48(%rbp),%xmm5 paddd 0+64(%rbp),%xmm9 paddd 0+112(%rbp),%xmm13 paddd .Lchacha20_consts(%rip),%xmm0 paddd 0+48(%rbp),%xmm4 paddd 0+64(%rbp),%xmm8 paddd 0+96(%rbp),%xmm12 movdqu 0 + 0(%rsi),%xmm3 movdqu 16 + 0(%rsi),%xmm7 movdqu 32 + 0(%rsi),%xmm11 movdqu 48 + 0(%rsi),%xmm15 pxor %xmm3,%xmm2 pxor %xmm7,%xmm6 pxor %xmm11,%xmm10 pxor %xmm14,%xmm15 movdqu %xmm2,0 + 0(%rdi) movdqu %xmm6,16 + 0(%rdi) movdqu %xmm10,32 + 0(%rdi) movdqu %xmm15,48 + 0(%rdi) movdqu 0 + 64(%rsi),%xmm3 movdqu 16 + 64(%rsi),%xmm7 movdqu 32 + 64(%rsi),%xmm11 movdqu 48 + 64(%rsi),%xmm15 pxor %xmm3,%xmm1 pxor %xmm7,%xmm5 pxor %xmm11,%xmm9 pxor %xmm13,%xmm15 movdqu %xmm1,0 + 64(%rdi) movdqu %xmm5,16 + 64(%rdi) movdqu %xmm9,32 + 64(%rdi) movdqu %xmm15,48 + 64(%rdi) movq $128,%rcx subq $128,%rbx leaq 128(%rsi),%rsi .Lseal_sse_128_tail_hash: cmpq $16,%rcx jb .Lseal_sse_128_tail_xor addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 subq $16,%rcx leaq 16(%rdi),%rdi jmp .Lseal_sse_128_tail_hash .Lseal_sse_128_tail_xor: cmpq $16,%rbx jb .Lseal_sse_tail_16 subq $16,%rbx movdqu 0(%rsi),%xmm3 pxor %xmm3,%xmm0 movdqu %xmm0,0(%rdi) addq 0(%rdi),%r10 adcq 8(%rdi),%r11 adcq $1,%r12 leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movdqa %xmm4,%xmm0 movdqa %xmm8,%xmm4 movdqa %xmm12,%xmm8 movdqa %xmm1,%xmm12 movdqa %xmm5,%xmm1 movdqa %xmm9,%xmm5 movdqa %xmm13,%xmm9 jmp .Lseal_sse_128_tail_xor .Lseal_sse_tail_16: testq %rbx,%rbx jz .Lprocess_blocks_of_extra_in movq %rbx,%r8 movq %rbx,%rcx leaq -1(%rsi,%rbx,1),%rsi pxor %xmm15,%xmm15 .Lseal_sse_tail_16_compose: pslldq $1,%xmm15 pinsrb $0,(%rsi),%xmm15 leaq -1(%rsi),%rsi decq %rcx jne .Lseal_sse_tail_16_compose pxor %xmm0,%xmm15 movq %rbx,%rcx movdqu %xmm15,%xmm0 .Lseal_sse_tail_16_extract: pextrb $0,%xmm0,(%rdi) psrldq $1,%xmm0 addq $1,%rdi subq $1,%rcx jnz .Lseal_sse_tail_16_extract movq 288 + 0 + 32(%rsp),%r9 movq 56(%r9),%r14 movq 48(%r9),%r13 testq %r14,%r14 jz .Lprocess_partial_block movq $16,%r15 subq %rbx,%r15 cmpq %r15,%r14 jge .Lload_extra_in movq %r14,%r15 .Lload_extra_in: leaq -1(%r13,%r15,1),%rsi addq %r15,%r13 subq %r15,%r14 movq %r13,48(%r9) movq %r14,56(%r9) addq %r15,%r8 pxor %xmm11,%xmm11 .Lload_extra_load_loop: pslldq $1,%xmm11 pinsrb $0,(%rsi),%xmm11 leaq -1(%rsi),%rsi subq $1,%r15 jnz .Lload_extra_load_loop movq %rbx,%r15 .Lload_extra_shift_loop: pslldq $1,%xmm11 subq $1,%r15 jnz .Lload_extra_shift_loop leaq .Land_masks(%rip),%r15 shlq $4,%rbx pand -16(%r15,%rbx,1),%xmm15 por %xmm11,%xmm15 .byte 102,77,15,126,253 pextrq $1,%xmm15,%r14 addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lprocess_blocks_of_extra_in: movq 288+32+0 (%rsp),%r9 movq 48(%r9),%rsi movq 56(%r9),%r8 movq %r8,%rcx shrq $4,%r8 .Lprocess_extra_hash_loop: jz process_extra_in_trailer addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rsi),%rsi subq $1,%r8 jmp .Lprocess_extra_hash_loop process_extra_in_trailer: andq $15,%rcx movq %rcx,%rbx jz .Ldo_length_block leaq -1(%rsi,%rcx,1),%rsi .Lprocess_extra_in_trailer_load: pslldq $1,%xmm15 pinsrb $0,(%rsi),%xmm15 leaq -1(%rsi),%rsi subq $1,%rcx jnz .Lprocess_extra_in_trailer_load .Lprocess_partial_block: leaq .Land_masks(%rip),%r15 shlq $4,%rbx pand -16(%r15,%rbx,1),%xmm15 .byte 102,77,15,126,253 pextrq $1,%xmm15,%r14 addq %r13,%r10 adcq %r14,%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Ldo_length_block: addq 0+0+32(%rbp),%r10 adcq 8+0+32(%rbp),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 movq %r10,%r13 movq %r11,%r14 movq %r12,%r15 subq $-5,%r10 sbbq $-1,%r11 sbbq $3,%r12 cmovcq %r13,%r10 cmovcq %r14,%r11 cmovcq %r15,%r12 addq 0+0+16(%rbp),%r10 adcq 8+0+16(%rbp),%r11 .cfi_remember_state addq $288 + 0 + 32,%rsp .cfi_adjust_cfa_offset -(288 + 32) popq %r9 .cfi_adjust_cfa_offset -8 .cfi_restore %r9 movq %r10,(%r9) movq %r11,8(%r9) popq %r15 .cfi_adjust_cfa_offset -8 .cfi_restore %r15 popq %r14 .cfi_adjust_cfa_offset -8 .cfi_restore %r14 popq %r13 .cfi_adjust_cfa_offset -8 .cfi_restore %r13 popq %r12 .cfi_adjust_cfa_offset -8 .cfi_restore %r12 popq %rbx .cfi_adjust_cfa_offset -8 .cfi_restore %rbx popq %rbp .cfi_adjust_cfa_offset -8 .cfi_restore %rbp ret .Lseal_sse_128: .cfi_restore_state movdqu .Lchacha20_consts(%rip),%xmm0 movdqa %xmm0,%xmm1 movdqa %xmm0,%xmm2 movdqu 0(%r9),%xmm4 movdqa %xmm4,%xmm5 movdqa %xmm4,%xmm6 movdqu 16(%r9),%xmm8 movdqa %xmm8,%xmm9 movdqa %xmm8,%xmm10 movdqu 32(%r9),%xmm14 movdqa %xmm14,%xmm12 paddd .Lsse_inc(%rip),%xmm12 movdqa %xmm12,%xmm13 paddd .Lsse_inc(%rip),%xmm13 movdqa %xmm4,%xmm7 movdqa %xmm8,%xmm11 movdqa %xmm12,%xmm15 movq $10,%r10 .Lseal_sse_128_rounds: paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,4 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,12 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,4 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,12 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,4 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,12 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol16(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $12,%xmm3 psrld $20,%xmm4 pxor %xmm3,%xmm4 paddd %xmm4,%xmm0 pxor %xmm0,%xmm12 pshufb .Lrol8(%rip),%xmm12 paddd %xmm12,%xmm8 pxor %xmm8,%xmm4 movdqa %xmm4,%xmm3 pslld $7,%xmm3 psrld $25,%xmm4 pxor %xmm3,%xmm4 .byte 102,15,58,15,228,12 .byte 102,69,15,58,15,192,8 .byte 102,69,15,58,15,228,4 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol16(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $12,%xmm3 psrld $20,%xmm5 pxor %xmm3,%xmm5 paddd %xmm5,%xmm1 pxor %xmm1,%xmm13 pshufb .Lrol8(%rip),%xmm13 paddd %xmm13,%xmm9 pxor %xmm9,%xmm5 movdqa %xmm5,%xmm3 pslld $7,%xmm3 psrld $25,%xmm5 pxor %xmm3,%xmm5 .byte 102,15,58,15,237,12 .byte 102,69,15,58,15,201,8 .byte 102,69,15,58,15,237,4 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol16(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $12,%xmm3 psrld $20,%xmm6 pxor %xmm3,%xmm6 paddd %xmm6,%xmm2 pxor %xmm2,%xmm14 pshufb .Lrol8(%rip),%xmm14 paddd %xmm14,%xmm10 pxor %xmm10,%xmm6 movdqa %xmm6,%xmm3 pslld $7,%xmm3 psrld $25,%xmm6 pxor %xmm3,%xmm6 .byte 102,15,58,15,246,12 .byte 102,69,15,58,15,210,8 .byte 102,69,15,58,15,246,4 decq %r10 jnz .Lseal_sse_128_rounds paddd .Lchacha20_consts(%rip),%xmm0 paddd .Lchacha20_consts(%rip),%xmm1 paddd .Lchacha20_consts(%rip),%xmm2 paddd %xmm7,%xmm4 paddd %xmm7,%xmm5 paddd %xmm7,%xmm6 paddd %xmm11,%xmm8 paddd %xmm11,%xmm9 paddd %xmm15,%xmm12 paddd .Lsse_inc(%rip),%xmm15 paddd %xmm15,%xmm13 pand .Lclamp(%rip),%xmm2 movdqa %xmm2,0+0(%rbp) movdqa %xmm6,0+16(%rbp) movq %r8,%r8 call poly_hash_ad_internal jmp .Lseal_sse_128_tail_xor .size chacha20_poly1305_seal_nohw, .-chacha20_poly1305_seal_nohw .cfi_endproc .globl chacha20_poly1305_open_avx2 .hidden chacha20_poly1305_open_avx2 .type chacha20_poly1305_open_avx2,@function .align 64 chacha20_poly1305_open_avx2: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 pushq %r9 .cfi_adjust_cfa_offset 8 .cfi_offset %r9,-64 subq $288 + 0 + 32,%rsp .cfi_adjust_cfa_offset 288 + 32 leaq 32(%rsp),%rbp andq $-32,%rbp movq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) vzeroupper vmovdqa .Lchacha20_consts(%rip),%ymm0 vbroadcasti128 0(%r9),%ymm4 vbroadcasti128 16(%r9),%ymm8 vbroadcasti128 32(%r9),%ymm12 vpaddd .Lavx2_init(%rip),%ymm12,%ymm12 cmpq $192,%rbx jbe .Lopen_avx2_192 cmpq $320,%rbx jbe .Lopen_avx2_320 vmovdqa %ymm4,0+64(%rbp) vmovdqa %ymm8,0+96(%rbp) vmovdqa %ymm12,0+160(%rbp) movq $10,%r10 .Lopen_avx2_init_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 decq %r10 jne .Lopen_avx2_init_rounds vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand .Lclamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 movq %r8,%r8 call poly_hash_ad_internal xorq %rcx,%rcx .Lopen_avx2_init_hash: addq 0+0(%rsi,%rcx,1),%r10 adcq 8+0(%rsi,%rcx,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq $16,%rcx cmpq $64,%rcx jne .Lopen_avx2_init_hash vpxor 0(%rsi),%ymm0,%ymm0 vpxor 32(%rsi),%ymm4,%ymm4 vmovdqu %ymm0,0(%rdi) vmovdqu %ymm4,32(%rdi) leaq 64(%rsi),%rsi leaq 64(%rdi),%rdi subq $64,%rbx .Lopen_avx2_main_loop: cmpq $512,%rbx jb .Lopen_avx2_main_loop_done vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) xorq %rcx,%rcx .Lopen_avx2_main_loop_rounds: addq 0+0(%rsi,%rcx,1),%r10 adcq 8+0(%rsi,%rcx,1),%r11 adcq $1,%r12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 addq %rax,%r15 adcq %rdx,%r9 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 addq 0+16(%rsi,%rcx,1),%r10 adcq 8+16(%rsi,%rcx,1),%r11 adcq $1,%r12 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 addq %rax,%r15 adcq %rdx,%r9 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq 0+32(%rsi,%rcx,1),%r10 adcq 8+32(%rsi,%rcx,1),%r11 adcq $1,%r12 leaq 48(%rcx),%rcx vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq %rax,%r15 adcq %rdx,%r9 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpalignr $4,%ymm12,%ymm12,%ymm12 cmpq $60*8,%rcx jne .Lopen_avx2_main_loop_rounds vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) addq 0+60*8(%rsi),%r10 adcq 8+60*8(%rsi),%r11 adcq $1,%r12 vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) addq 0+60*8+16(%rsi),%r10 adcq 8+60*8+16(%rsi),%r11 adcq $1,%r12 vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vperm2i128 $0x13,%ymm0,%ymm4,%ymm4 vperm2i128 $0x02,%ymm8,%ymm12,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm8 vpxor 0+384(%rsi),%ymm3,%ymm3 vpxor 32+384(%rsi),%ymm0,%ymm0 vpxor 64+384(%rsi),%ymm4,%ymm4 vpxor 96+384(%rsi),%ymm8,%ymm8 vmovdqu %ymm3,0+384(%rdi) vmovdqu %ymm0,32+384(%rdi) vmovdqu %ymm4,64+384(%rdi) vmovdqu %ymm8,96+384(%rdi) leaq 512(%rsi),%rsi leaq 512(%rdi),%rdi subq $512,%rbx jmp .Lopen_avx2_main_loop .Lopen_avx2_main_loop_done: testq %rbx,%rbx vzeroupper je .Lopen_sse_finalize cmpq $384,%rbx ja .Lopen_avx2_tail_512 cmpq $256,%rbx ja .Lopen_avx2_tail_384 cmpq $128,%rbx ja .Lopen_avx2_tail_256 vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) xorq %r8,%r8 movq %rbx,%rcx andq $-16,%rcx testq %rcx,%rcx je .Lopen_avx2_tail_128_rounds .Lopen_avx2_tail_128_rounds_and_x1hash: addq 0+0(%rsi,%r8,1),%r10 adcq 8+0(%rsi,%r8,1),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lopen_avx2_tail_128_rounds: addq $16,%r8 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 cmpq %rcx,%r8 jb .Lopen_avx2_tail_128_rounds_and_x1hash cmpq $160,%r8 jne .Lopen_avx2_tail_128_rounds vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 jmp .Lopen_avx2_tail_128_xor .Lopen_avx2_tail_256: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) movq %rbx,0+128(%rbp) movq %rbx,%rcx subq $128,%rcx shrq $4,%rcx movq $10,%r8 cmpq $10,%rcx cmovgq %r8,%rcx movq %rsi,%rbx xorq %r8,%r8 .Lopen_avx2_tail_256_rounds_and_x1hash: addq 0+0(%rbx),%r10 adcq 8+0(%rbx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rbx),%rbx .Lopen_avx2_tail_256_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 incq %r8 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 cmpq %rcx,%r8 jb .Lopen_avx2_tail_256_rounds_and_x1hash cmpq $10,%r8 jne .Lopen_avx2_tail_256_rounds movq %rbx,%r8 subq %rsi,%rbx movq %rbx,%rcx movq 0+128(%rbp),%rbx .Lopen_avx2_tail_256_hash: addq $16,%rcx cmpq %rbx,%rcx jg .Lopen_avx2_tail_256_done addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 jmp .Lopen_avx2_tail_256_hash .Lopen_avx2_tail_256_done: vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm1,%ymm1 vpxor 64+0(%rsi),%ymm5,%ymm5 vpxor 96+0(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm1,32+0(%rdi) vmovdqu %ymm5,64+0(%rdi) vmovdqu %ymm9,96+0(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 leaq 128(%rsi),%rsi leaq 128(%rdi),%rdi subq $128,%rbx jmp .Lopen_avx2_tail_128_xor .Lopen_avx2_tail_384: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) movq %rbx,0+128(%rbp) movq %rbx,%rcx subq $256,%rcx shrq $4,%rcx addq $6,%rcx movq $10,%r8 cmpq $10,%rcx cmovgq %r8,%rcx movq %rsi,%rbx xorq %r8,%r8 .Lopen_avx2_tail_384_rounds_and_x2hash: addq 0+0(%rbx),%r10 adcq 8+0(%rbx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rbx),%rbx .Lopen_avx2_tail_384_rounds_and_x1hash: vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 addq 0+0(%rbx),%r10 adcq 8+0(%rbx),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rbx),%rbx incq %r8 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 cmpq %rcx,%r8 jb .Lopen_avx2_tail_384_rounds_and_x2hash cmpq $10,%r8 jne .Lopen_avx2_tail_384_rounds_and_x1hash movq %rbx,%r8 subq %rsi,%rbx movq %rbx,%rcx movq 0+128(%rbp),%rbx .Lopen_avx2_384_tail_hash: addq $16,%rcx cmpq %rbx,%rcx jg .Lopen_avx2_384_tail_done addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 jmp .Lopen_avx2_384_tail_hash .Lopen_avx2_384_tail_done: vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm2,%ymm2 vpxor 64+0(%rsi),%ymm6,%ymm6 vpxor 96+0(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm2,32+0(%rdi) vmovdqu %ymm6,64+0(%rdi) vmovdqu %ymm10,96+0(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm1,%ymm1 vpxor 64+128(%rsi),%ymm5,%ymm5 vpxor 96+128(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm1,32+128(%rdi) vmovdqu %ymm5,64+128(%rdi) vmovdqu %ymm9,96+128(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 leaq 256(%rsi),%rsi leaq 256(%rdi),%rdi subq $256,%rbx jmp .Lopen_avx2_tail_128_xor .Lopen_avx2_tail_512: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) xorq %rcx,%rcx movq %rsi,%r8 .Lopen_avx2_tail_512_rounds_and_x2hash: addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 .Lopen_avx2_tail_512_rounds_and_x1hash: vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 addq 0+16(%r8),%r10 adcq 8+16(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%r8),%r8 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 incq %rcx cmpq $4,%rcx jl .Lopen_avx2_tail_512_rounds_and_x2hash cmpq $10,%rcx jne .Lopen_avx2_tail_512_rounds_and_x1hash movq %rbx,%rcx subq $384,%rcx andq $-16,%rcx .Lopen_avx2_tail_512_hash: testq %rcx,%rcx je .Lopen_avx2_tail_512_done addq 0+0(%r8),%r10 adcq 8+0(%r8),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%r8),%r8 subq $16,%rcx jmp .Lopen_avx2_tail_512_hash .Lopen_avx2_tail_512_done: vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 leaq 384(%rsi),%rsi leaq 384(%rdi),%rdi subq $384,%rbx .Lopen_avx2_tail_128_xor: cmpq $32,%rbx jb .Lopen_avx2_tail_32_xor subq $32,%rbx vpxor (%rsi),%ymm0,%ymm0 vmovdqu %ymm0,(%rdi) leaq 32(%rsi),%rsi leaq 32(%rdi),%rdi vmovdqa %ymm4,%ymm0 vmovdqa %ymm8,%ymm4 vmovdqa %ymm12,%ymm8 jmp .Lopen_avx2_tail_128_xor .Lopen_avx2_tail_32_xor: cmpq $16,%rbx vmovdqa %xmm0,%xmm1 jb .Lopen_avx2_exit subq $16,%rbx vpxor (%rsi),%xmm0,%xmm1 vmovdqu %xmm1,(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi vperm2i128 $0x11,%ymm0,%ymm0,%ymm0 vmovdqa %xmm0,%xmm1 .Lopen_avx2_exit: vzeroupper jmp .Lopen_sse_tail_16 .Lopen_avx2_192: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13 vmovdqa %ymm12,%ymm11 vmovdqa %ymm13,%ymm15 movq $10,%r10 .Lopen_avx2_192_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 decq %r10 jne .Lopen_avx2_192_rounds vpaddd %ymm2,%ymm0,%ymm0 vpaddd %ymm2,%ymm1,%ymm1 vpaddd %ymm6,%ymm4,%ymm4 vpaddd %ymm6,%ymm5,%ymm5 vpaddd %ymm10,%ymm8,%ymm8 vpaddd %ymm10,%ymm9,%ymm9 vpaddd %ymm11,%ymm12,%ymm12 vpaddd %ymm15,%ymm13,%ymm13 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand .Lclamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 .Lopen_avx2_short: movq %r8,%r8 call poly_hash_ad_internal .Lopen_avx2_short_hash_and_xor_loop: cmpq $32,%rbx jb .Lopen_avx2_short_tail_32 subq $32,%rbx addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rsi),%r10 adcq 8+16(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor (%rsi),%ymm0,%ymm0 vmovdqu %ymm0,(%rdi) leaq 32(%rsi),%rsi leaq 32(%rdi),%rdi vmovdqa %ymm4,%ymm0 vmovdqa %ymm8,%ymm4 vmovdqa %ymm12,%ymm8 vmovdqa %ymm1,%ymm12 vmovdqa %ymm5,%ymm1 vmovdqa %ymm9,%ymm5 vmovdqa %ymm13,%ymm9 vmovdqa %ymm2,%ymm13 vmovdqa %ymm6,%ymm2 jmp .Lopen_avx2_short_hash_and_xor_loop .Lopen_avx2_short_tail_32: cmpq $16,%rbx vmovdqa %xmm0,%xmm1 jb .Lopen_avx2_short_tail_32_exit subq $16,%rbx addq 0+0(%rsi),%r10 adcq 8+0(%rsi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor (%rsi),%xmm0,%xmm3 vmovdqu %xmm3,(%rdi) leaq 16(%rsi),%rsi leaq 16(%rdi),%rdi vextracti128 $1,%ymm0,%xmm1 .Lopen_avx2_short_tail_32_exit: vzeroupper jmp .Lopen_sse_tail_16 .Lopen_avx2_320: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13 vpaddd .Lavx2_inc(%rip),%ymm13,%ymm14 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) movq $10,%r10 .Lopen_avx2_320_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 decq %r10 jne .Lopen_avx2_320_rounds vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd %ymm7,%ymm4,%ymm4 vpaddd %ymm7,%ymm5,%ymm5 vpaddd %ymm7,%ymm6,%ymm6 vpaddd %ymm11,%ymm8,%ymm8 vpaddd %ymm11,%ymm9,%ymm9 vpaddd %ymm11,%ymm10,%ymm10 vpaddd 0+160(%rbp),%ymm12,%ymm12 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd 0+224(%rbp),%ymm14,%ymm14 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand .Lclamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 vperm2i128 $0x02,%ymm2,%ymm6,%ymm9 vperm2i128 $0x02,%ymm10,%ymm14,%ymm13 vperm2i128 $0x13,%ymm2,%ymm6,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm6 jmp .Lopen_avx2_short .size chacha20_poly1305_open_avx2, .-chacha20_poly1305_open_avx2 .cfi_endproc .globl chacha20_poly1305_seal_avx2 .hidden chacha20_poly1305_seal_avx2 .type chacha20_poly1305_seal_avx2,@function .align 64 chacha20_poly1305_seal_avx2: .cfi_startproc _CET_ENDBR pushq %rbp .cfi_adjust_cfa_offset 8 .cfi_offset %rbp,-16 pushq %rbx .cfi_adjust_cfa_offset 8 .cfi_offset %rbx,-24 pushq %r12 .cfi_adjust_cfa_offset 8 .cfi_offset %r12,-32 pushq %r13 .cfi_adjust_cfa_offset 8 .cfi_offset %r13,-40 pushq %r14 .cfi_adjust_cfa_offset 8 .cfi_offset %r14,-48 pushq %r15 .cfi_adjust_cfa_offset 8 .cfi_offset %r15,-56 pushq %r9 .cfi_adjust_cfa_offset 8 .cfi_offset %r9,-64 subq $288 + 0 + 32,%rsp .cfi_adjust_cfa_offset 288 + 32 leaq 32(%rsp),%rbp andq $-32,%rbp movq 56(%r9),%rbx addq %rdx,%rbx movq %r8,0+0+32(%rbp) movq %rbx,8+0+32(%rbp) movq %rdx,%rbx vzeroupper vmovdqa .Lchacha20_consts(%rip),%ymm0 vbroadcasti128 0(%r9),%ymm4 vbroadcasti128 16(%r9),%ymm8 vbroadcasti128 32(%r9),%ymm12 vpaddd .Lavx2_init(%rip),%ymm12,%ymm12 cmpq $192,%rbx jbe .Lseal_avx2_192 cmpq $320,%rbx jbe .Lseal_avx2_320 vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm4,%ymm7 vmovdqa %ymm4,0+64(%rbp) vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vmovdqa %ymm8,%ymm11 vmovdqa %ymm8,0+96(%rbp) vmovdqa %ymm12,%ymm15 vpaddd .Lavx2_inc(%rip),%ymm15,%ymm14 vpaddd .Lavx2_inc(%rip),%ymm14,%ymm13 vpaddd .Lavx2_inc(%rip),%ymm13,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm15,0+256(%rbp) movq $10,%r10 .Lseal_avx2_init_rounds: vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 decq %r10 jnz .Lseal_avx2_init_rounds vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vperm2i128 $0x02,%ymm3,%ymm7,%ymm15 vperm2i128 $0x13,%ymm3,%ymm7,%ymm3 vpand .Lclamp(%rip),%ymm15,%ymm15 vmovdqa %ymm15,0+0(%rbp) movq %r8,%r8 call poly_hash_ad_internal vpxor 0(%rsi),%ymm3,%ymm3 vpxor 32(%rsi),%ymm11,%ymm11 vmovdqu %ymm3,0(%rdi) vmovdqu %ymm11,32(%rdi) vperm2i128 $0x02,%ymm2,%ymm6,%ymm15 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+64(%rsi),%ymm15,%ymm15 vpxor 32+64(%rsi),%ymm2,%ymm2 vpxor 64+64(%rsi),%ymm6,%ymm6 vpxor 96+64(%rsi),%ymm10,%ymm10 vmovdqu %ymm15,0+64(%rdi) vmovdqu %ymm2,32+64(%rdi) vmovdqu %ymm6,64+64(%rdi) vmovdqu %ymm10,96+64(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm15 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+192(%rsi),%ymm15,%ymm15 vpxor 32+192(%rsi),%ymm1,%ymm1 vpxor 64+192(%rsi),%ymm5,%ymm5 vpxor 96+192(%rsi),%ymm9,%ymm9 vmovdqu %ymm15,0+192(%rdi) vmovdqu %ymm1,32+192(%rdi) vmovdqu %ymm5,64+192(%rdi) vmovdqu %ymm9,96+192(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm15 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm15,%ymm8 leaq 320(%rsi),%rsi subq $320,%rbx movq $320,%rcx cmpq $128,%rbx jbe .Lseal_avx2_short_hash_remainder vpxor 0(%rsi),%ymm0,%ymm0 vpxor 32(%rsi),%ymm4,%ymm4 vpxor 64(%rsi),%ymm8,%ymm8 vpxor 96(%rsi),%ymm12,%ymm12 vmovdqu %ymm0,320(%rdi) vmovdqu %ymm4,352(%rdi) vmovdqu %ymm8,384(%rdi) vmovdqu %ymm12,416(%rdi) leaq 128(%rsi),%rsi subq $128,%rbx movq $8,%rcx movq $2,%r8 cmpq $128,%rbx jbe .Lseal_avx2_tail_128 cmpq $256,%rbx jbe .Lseal_avx2_tail_256 cmpq $384,%rbx jbe .Lseal_avx2_tail_384 cmpq $512,%rbx jbe .Lseal_avx2_tail_512 vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 subq $16,%rdi movq $9,%rcx jmp .Lseal_avx2_main_loop_rounds_entry .align 32 .Lseal_avx2_main_loop: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) movq $10,%rcx .align 32 .Lseal_avx2_main_loop_rounds: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 addq %rax,%r15 adcq %rdx,%r9 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 .Lseal_avx2_main_loop_rounds_entry: vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 addq %rax,%r15 adcq %rdx,%r9 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq 0+32(%rdi),%r10 adcq 8+32(%rdi),%r11 adcq $1,%r12 leaq 48(%rdi),%rdi vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 addq %rax,%r15 adcq %rdx,%r9 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpalignr $4,%ymm12,%ymm12,%ymm12 decq %rcx jne .Lseal_avx2_main_loop_rounds vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vperm2i128 $0x13,%ymm0,%ymm4,%ymm4 vperm2i128 $0x02,%ymm8,%ymm12,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm8 vpxor 0+384(%rsi),%ymm3,%ymm3 vpxor 32+384(%rsi),%ymm0,%ymm0 vpxor 64+384(%rsi),%ymm4,%ymm4 vpxor 96+384(%rsi),%ymm8,%ymm8 vmovdqu %ymm3,0+384(%rdi) vmovdqu %ymm0,32+384(%rdi) vmovdqu %ymm4,64+384(%rdi) vmovdqu %ymm8,96+384(%rdi) leaq 512(%rsi),%rsi subq $512,%rbx cmpq $512,%rbx jg .Lseal_avx2_main_loop addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi movq $10,%rcx xorq %r8,%r8 cmpq $384,%rbx ja .Lseal_avx2_tail_512 cmpq $256,%rbx ja .Lseal_avx2_tail_384 cmpq $128,%rbx ja .Lseal_avx2_tail_256 .Lseal_avx2_tail_128: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) .Lseal_avx2_tail_128_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi .Lseal_avx2_tail_128_rounds_and_2xhash: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi decq %rcx jg .Lseal_avx2_tail_128_rounds_and_3xhash decq %r8 jge .Lseal_avx2_tail_128_rounds_and_2xhash vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 jmp .Lseal_avx2_short_loop .Lseal_avx2_tail_256: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) .Lseal_avx2_tail_256_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi .Lseal_avx2_tail_256_rounds_and_2xhash: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi decq %rcx jg .Lseal_avx2_tail_256_rounds_and_3xhash decq %r8 jge .Lseal_avx2_tail_256_rounds_and_2xhash vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm1,%ymm1 vpxor 64+0(%rsi),%ymm5,%ymm5 vpxor 96+0(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm1,32+0(%rdi) vmovdqu %ymm5,64+0(%rdi) vmovdqu %ymm9,96+0(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 movq $128,%rcx leaq 128(%rsi),%rsi subq $128,%rbx jmp .Lseal_avx2_short_hash_remainder .Lseal_avx2_tail_384: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) .Lseal_avx2_tail_384_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi .Lseal_avx2_tail_384_rounds_and_2xhash: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 leaq 32(%rdi),%rdi decq %rcx jg .Lseal_avx2_tail_384_rounds_and_3xhash decq %r8 jge .Lseal_avx2_tail_384_rounds_and_2xhash vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+0(%rsi),%ymm3,%ymm3 vpxor 32+0(%rsi),%ymm2,%ymm2 vpxor 64+0(%rsi),%ymm6,%ymm6 vpxor 96+0(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+0(%rdi) vmovdqu %ymm2,32+0(%rdi) vmovdqu %ymm6,64+0(%rdi) vmovdqu %ymm10,96+0(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm1,%ymm1 vpxor 64+128(%rsi),%ymm5,%ymm5 vpxor 96+128(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm1,32+128(%rdi) vmovdqu %ymm5,64+128(%rdi) vmovdqu %ymm9,96+128(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 movq $256,%rcx leaq 256(%rsi),%rsi subq $256,%rbx jmp .Lseal_avx2_short_hash_remainder .Lseal_avx2_tail_512: vmovdqa .Lchacha20_consts(%rip),%ymm0 vmovdqa 0+64(%rbp),%ymm4 vmovdqa 0+96(%rbp),%ymm8 vmovdqa %ymm0,%ymm1 vmovdqa %ymm4,%ymm5 vmovdqa %ymm8,%ymm9 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm10 vmovdqa %ymm0,%ymm3 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa .Lavx2_inc(%rip),%ymm12 vpaddd 0+160(%rbp),%ymm12,%ymm15 vpaddd %ymm15,%ymm12,%ymm14 vpaddd %ymm14,%ymm12,%ymm13 vpaddd %ymm13,%ymm12,%ymm12 vmovdqa %ymm15,0+256(%rbp) vmovdqa %ymm14,0+224(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm12,0+160(%rbp) .Lseal_avx2_tail_512_rounds_and_3xhash: addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi .Lseal_avx2_tail_512_rounds_and_2xhash: vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $4,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $12,%ymm15,%ymm15,%ymm15 vpalignr $4,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $4,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $4,%ymm4,%ymm4,%ymm4 addq %rax,%r15 adcq %rdx,%r9 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm12,%ymm12,%ymm12 vmovdqa %ymm8,0+128(%rbp) vmovdqa .Lrol16(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $20,%ymm7,%ymm8 vpslld $32-20,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $20,%ymm6,%ymm8 vpslld $32-20,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $20,%ymm5,%ymm8 vpslld $32-20,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $20,%ymm4,%ymm8 vpslld $32-20,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa .Lrol8(%rip),%ymm8 vpaddd %ymm7,%ymm3,%ymm3 vpaddd %ymm6,%ymm2,%ymm2 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 vpaddd %ymm5,%ymm1,%ymm1 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm3,%ymm15,%ymm15 vpxor %ymm2,%ymm14,%ymm14 vpxor %ymm1,%ymm13,%ymm13 vpxor %ymm0,%ymm12,%ymm12 vpshufb %ymm8,%ymm15,%ymm15 vpshufb %ymm8,%ymm14,%ymm14 vpshufb %ymm8,%ymm13,%ymm13 vpshufb %ymm8,%ymm12,%ymm12 vpaddd %ymm15,%ymm11,%ymm11 vpaddd %ymm14,%ymm10,%ymm10 vpaddd %ymm13,%ymm9,%ymm9 vpaddd 0+128(%rbp),%ymm12,%ymm8 vpxor %ymm11,%ymm7,%ymm7 vpxor %ymm10,%ymm6,%ymm6 vpxor %ymm9,%ymm5,%ymm5 vpxor %ymm8,%ymm4,%ymm4 vmovdqa %ymm8,0+128(%rbp) vpsrld $25,%ymm7,%ymm8 movq 0+0+0(%rbp),%rdx movq %rdx,%r15 mulxq %r10,%r13,%r14 mulxq %r11,%rax,%rdx imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 vpslld $32-25,%ymm7,%ymm7 vpxor %ymm8,%ymm7,%ymm7 vpsrld $25,%ymm6,%ymm8 vpslld $32-25,%ymm6,%ymm6 vpxor %ymm8,%ymm6,%ymm6 vpsrld $25,%ymm5,%ymm8 vpslld $32-25,%ymm5,%ymm5 vpxor %ymm8,%ymm5,%ymm5 vpsrld $25,%ymm4,%ymm8 vpslld $32-25,%ymm4,%ymm4 vpxor %ymm8,%ymm4,%ymm4 vmovdqa 0+128(%rbp),%ymm8 vpalignr $12,%ymm7,%ymm7,%ymm7 vpalignr $8,%ymm11,%ymm11,%ymm11 vpalignr $4,%ymm15,%ymm15,%ymm15 vpalignr $12,%ymm6,%ymm6,%ymm6 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $12,%ymm5,%ymm5,%ymm5 vpalignr $8,%ymm9,%ymm9,%ymm9 movq 8+0+0(%rbp),%rdx mulxq %r10,%r10,%rax addq %r10,%r14 mulxq %r11,%r11,%r9 adcq %r11,%r15 adcq $0,%r9 imulq %r12,%rdx vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $12,%ymm4,%ymm4,%ymm4 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm12,%ymm12,%ymm12 addq %rax,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi decq %rcx jg .Lseal_avx2_tail_512_rounds_and_3xhash decq %r8 jge .Lseal_avx2_tail_512_rounds_and_2xhash vpaddd .Lchacha20_consts(%rip),%ymm3,%ymm3 vpaddd 0+64(%rbp),%ymm7,%ymm7 vpaddd 0+96(%rbp),%ymm11,%ymm11 vpaddd 0+256(%rbp),%ymm15,%ymm15 vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd 0+64(%rbp),%ymm6,%ymm6 vpaddd 0+96(%rbp),%ymm10,%ymm10 vpaddd 0+224(%rbp),%ymm14,%ymm14 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd 0+64(%rbp),%ymm5,%ymm5 vpaddd 0+96(%rbp),%ymm9,%ymm9 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd 0+64(%rbp),%ymm4,%ymm4 vpaddd 0+96(%rbp),%ymm8,%ymm8 vpaddd 0+160(%rbp),%ymm12,%ymm12 vmovdqa %ymm0,0+128(%rbp) vperm2i128 $0x02,%ymm3,%ymm7,%ymm0 vperm2i128 $0x13,%ymm3,%ymm7,%ymm7 vperm2i128 $0x02,%ymm11,%ymm15,%ymm3 vperm2i128 $0x13,%ymm11,%ymm15,%ymm11 vpxor 0+0(%rsi),%ymm0,%ymm0 vpxor 32+0(%rsi),%ymm3,%ymm3 vpxor 64+0(%rsi),%ymm7,%ymm7 vpxor 96+0(%rsi),%ymm11,%ymm11 vmovdqu %ymm0,0+0(%rdi) vmovdqu %ymm3,32+0(%rdi) vmovdqu %ymm7,64+0(%rdi) vmovdqu %ymm11,96+0(%rdi) vmovdqa 0+128(%rbp),%ymm0 vperm2i128 $0x02,%ymm2,%ymm6,%ymm3 vperm2i128 $0x13,%ymm2,%ymm6,%ymm6 vperm2i128 $0x02,%ymm10,%ymm14,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm10 vpxor 0+128(%rsi),%ymm3,%ymm3 vpxor 32+128(%rsi),%ymm2,%ymm2 vpxor 64+128(%rsi),%ymm6,%ymm6 vpxor 96+128(%rsi),%ymm10,%ymm10 vmovdqu %ymm3,0+128(%rdi) vmovdqu %ymm2,32+128(%rdi) vmovdqu %ymm6,64+128(%rdi) vmovdqu %ymm10,96+128(%rdi) vperm2i128 $0x02,%ymm1,%ymm5,%ymm3 vperm2i128 $0x13,%ymm1,%ymm5,%ymm5 vperm2i128 $0x02,%ymm9,%ymm13,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm9 vpxor 0+256(%rsi),%ymm3,%ymm3 vpxor 32+256(%rsi),%ymm1,%ymm1 vpxor 64+256(%rsi),%ymm5,%ymm5 vpxor 96+256(%rsi),%ymm9,%ymm9 vmovdqu %ymm3,0+256(%rdi) vmovdqu %ymm1,32+256(%rdi) vmovdqu %ymm5,64+256(%rdi) vmovdqu %ymm9,96+256(%rdi) vperm2i128 $0x13,%ymm0,%ymm4,%ymm3 vperm2i128 $0x02,%ymm0,%ymm4,%ymm0 vperm2i128 $0x02,%ymm8,%ymm12,%ymm4 vperm2i128 $0x13,%ymm8,%ymm12,%ymm12 vmovdqa %ymm3,%ymm8 movq $384,%rcx leaq 384(%rsi),%rsi subq $384,%rbx jmp .Lseal_avx2_short_hash_remainder .Lseal_avx2_320: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13 vpaddd .Lavx2_inc(%rip),%ymm13,%ymm14 vmovdqa %ymm4,%ymm7 vmovdqa %ymm8,%ymm11 vmovdqa %ymm12,0+160(%rbp) vmovdqa %ymm13,0+192(%rbp) vmovdqa %ymm14,0+224(%rbp) movq $10,%r10 .Lseal_avx2_320_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $12,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $4,%ymm6,%ymm6,%ymm6 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol16(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpsrld $20,%ymm6,%ymm3 vpslld $12,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpaddd %ymm6,%ymm2,%ymm2 vpxor %ymm2,%ymm14,%ymm14 vpshufb .Lrol8(%rip),%ymm14,%ymm14 vpaddd %ymm14,%ymm10,%ymm10 vpxor %ymm10,%ymm6,%ymm6 vpslld $7,%ymm6,%ymm3 vpsrld $25,%ymm6,%ymm6 vpxor %ymm3,%ymm6,%ymm6 vpalignr $4,%ymm14,%ymm14,%ymm14 vpalignr $8,%ymm10,%ymm10,%ymm10 vpalignr $12,%ymm6,%ymm6,%ymm6 decq %r10 jne .Lseal_avx2_320_rounds vpaddd .Lchacha20_consts(%rip),%ymm0,%ymm0 vpaddd .Lchacha20_consts(%rip),%ymm1,%ymm1 vpaddd .Lchacha20_consts(%rip),%ymm2,%ymm2 vpaddd %ymm7,%ymm4,%ymm4 vpaddd %ymm7,%ymm5,%ymm5 vpaddd %ymm7,%ymm6,%ymm6 vpaddd %ymm11,%ymm8,%ymm8 vpaddd %ymm11,%ymm9,%ymm9 vpaddd %ymm11,%ymm10,%ymm10 vpaddd 0+160(%rbp),%ymm12,%ymm12 vpaddd 0+192(%rbp),%ymm13,%ymm13 vpaddd 0+224(%rbp),%ymm14,%ymm14 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand .Lclamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 vperm2i128 $0x02,%ymm2,%ymm6,%ymm9 vperm2i128 $0x02,%ymm10,%ymm14,%ymm13 vperm2i128 $0x13,%ymm2,%ymm6,%ymm2 vperm2i128 $0x13,%ymm10,%ymm14,%ymm6 jmp .Lseal_avx2_short .Lseal_avx2_192: vmovdqa %ymm0,%ymm1 vmovdqa %ymm0,%ymm2 vmovdqa %ymm4,%ymm5 vmovdqa %ymm4,%ymm6 vmovdqa %ymm8,%ymm9 vmovdqa %ymm8,%ymm10 vpaddd .Lavx2_inc(%rip),%ymm12,%ymm13 vmovdqa %ymm12,%ymm11 vmovdqa %ymm13,%ymm15 movq $10,%r10 .Lseal_avx2_192_rounds: vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $12,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $4,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $12,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $4,%ymm5,%ymm5,%ymm5 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol16(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpsrld $20,%ymm4,%ymm3 vpslld $12,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpaddd %ymm4,%ymm0,%ymm0 vpxor %ymm0,%ymm12,%ymm12 vpshufb .Lrol8(%rip),%ymm12,%ymm12 vpaddd %ymm12,%ymm8,%ymm8 vpxor %ymm8,%ymm4,%ymm4 vpslld $7,%ymm4,%ymm3 vpsrld $25,%ymm4,%ymm4 vpxor %ymm3,%ymm4,%ymm4 vpalignr $4,%ymm12,%ymm12,%ymm12 vpalignr $8,%ymm8,%ymm8,%ymm8 vpalignr $12,%ymm4,%ymm4,%ymm4 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol16(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpsrld $20,%ymm5,%ymm3 vpslld $12,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpaddd %ymm5,%ymm1,%ymm1 vpxor %ymm1,%ymm13,%ymm13 vpshufb .Lrol8(%rip),%ymm13,%ymm13 vpaddd %ymm13,%ymm9,%ymm9 vpxor %ymm9,%ymm5,%ymm5 vpslld $7,%ymm5,%ymm3 vpsrld $25,%ymm5,%ymm5 vpxor %ymm3,%ymm5,%ymm5 vpalignr $4,%ymm13,%ymm13,%ymm13 vpalignr $8,%ymm9,%ymm9,%ymm9 vpalignr $12,%ymm5,%ymm5,%ymm5 decq %r10 jne .Lseal_avx2_192_rounds vpaddd %ymm2,%ymm0,%ymm0 vpaddd %ymm2,%ymm1,%ymm1 vpaddd %ymm6,%ymm4,%ymm4 vpaddd %ymm6,%ymm5,%ymm5 vpaddd %ymm10,%ymm8,%ymm8 vpaddd %ymm10,%ymm9,%ymm9 vpaddd %ymm11,%ymm12,%ymm12 vpaddd %ymm15,%ymm13,%ymm13 vperm2i128 $0x02,%ymm0,%ymm4,%ymm3 vpand .Lclamp(%rip),%ymm3,%ymm3 vmovdqa %ymm3,0+0(%rbp) vperm2i128 $0x13,%ymm0,%ymm4,%ymm0 vperm2i128 $0x13,%ymm8,%ymm12,%ymm4 vperm2i128 $0x02,%ymm1,%ymm5,%ymm8 vperm2i128 $0x02,%ymm9,%ymm13,%ymm12 vperm2i128 $0x13,%ymm1,%ymm5,%ymm1 vperm2i128 $0x13,%ymm9,%ymm13,%ymm5 .Lseal_avx2_short: movq %r8,%r8 call poly_hash_ad_internal xorq %rcx,%rcx .Lseal_avx2_short_hash_remainder: cmpq $16,%rcx jb .Lseal_avx2_short_loop addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 subq $16,%rcx addq $16,%rdi jmp .Lseal_avx2_short_hash_remainder .Lseal_avx2_short_loop: cmpq $32,%rbx jb .Lseal_avx2_short_tail subq $32,%rbx vpxor (%rsi),%ymm0,%ymm0 vmovdqu %ymm0,(%rdi) leaq 32(%rsi),%rsi addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 addq 0+16(%rdi),%r10 adcq 8+16(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 32(%rdi),%rdi vmovdqa %ymm4,%ymm0 vmovdqa %ymm8,%ymm4 vmovdqa %ymm12,%ymm8 vmovdqa %ymm1,%ymm12 vmovdqa %ymm5,%ymm1 vmovdqa %ymm9,%ymm5 vmovdqa %ymm13,%ymm9 vmovdqa %ymm2,%ymm13 vmovdqa %ymm6,%ymm2 jmp .Lseal_avx2_short_loop .Lseal_avx2_short_tail: cmpq $16,%rbx jb .Lseal_avx2_exit subq $16,%rbx vpxor (%rsi),%xmm0,%xmm3 vmovdqu %xmm3,(%rdi) leaq 16(%rsi),%rsi addq 0+0(%rdi),%r10 adcq 8+0(%rdi),%r11 adcq $1,%r12 movq 0+0+0(%rbp),%rax movq %rax,%r15 mulq %r10 movq %rax,%r13 movq %rdx,%r14 movq 0+0+0(%rbp),%rax mulq %r11 imulq %r12,%r15 addq %rax,%r14 adcq %rdx,%r15 movq 8+0+0(%rbp),%rax movq %rax,%r9 mulq %r10 addq %rax,%r14 adcq $0,%rdx movq %rdx,%r10 movq 8+0+0(%rbp),%rax mulq %r11 addq %rax,%r15 adcq $0,%rdx imulq %r12,%r9 addq %r10,%r15 adcq %rdx,%r9 movq %r13,%r10 movq %r14,%r11 movq %r15,%r12 andq $3,%r12 movq %r15,%r13 andq $-4,%r13 movq %r9,%r14 shrdq $2,%r9,%r15 shrq $2,%r9 addq %r13,%r15 adcq %r14,%r9 addq %r15,%r10 adcq %r9,%r11 adcq $0,%r12 leaq 16(%rdi),%rdi vextracti128 $1,%ymm0,%xmm0 .Lseal_avx2_exit: vzeroupper jmp .Lseal_sse_tail_16 .cfi_endproc .size chacha20_poly1305_seal_avx2, .-chacha20_poly1305_seal_avx2 #endif
marvin-hansen/iggy-streaming-system
33,986
thirdparty/crates/ring-0.17.9/pregenerated/sha256-armv8-ios64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) // Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy // in the file LICENSE in the source distribution or at // https://www.openssl.org/source/license.html // ==================================================================== // Written by Andy Polyakov <appro@openssl.org> for the OpenSSL // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. // // Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. // // Performance in cycles per processed byte and improvement coefficient // over code generated with "default" compiler: // // SHA256-hw SHA256(*) SHA512 // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) // Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. // (**) The result is a trade-off: it's possible to improve it by // 10% (or by 1 cycle per round), but at the cost of 20% loss // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code // generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ # include <ring-core/arm_arch.h> #endif .text .globl _sha256_block_data_order_nohw .private_extern _sha256_block_data_order_nohw .align 6 _sha256_block_data_order_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#4*4 ldp w20,w21,[x0] // load context ldp w22,w23,[x0,#2*4] ldp w24,w25,[x0,#4*4] add x2,x1,x2,lsl#6 // end of input ldp w26,w27,[x0,#6*4] adrp x30,LK256@PAGE add x30,x30,LK256@PAGEOFF stp x0,x2,[x29,#96] Loop: ldp w3,w4,[x1],#2*4 ldr w19,[x30],#4 // *K++ eor w28,w21,w22 // magic seed str x1,[x29,#112] #ifndef __AARCH64EB__ rev w3,w3 // 0 #endif ror w16,w24,#6 add w27,w27,w19 // h+=K[i] eor w6,w24,w24,ror#14 and w17,w25,w24 bic w19,w26,w24 add w27,w27,w3 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w6,ror#11 // Sigma1(e) ror w6,w20,#2 add w27,w27,w17 // h+=Ch(e,f,g) eor w17,w20,w20,ror#9 add w27,w27,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w23,w23,w27 // d+=h eor w28,w28,w21 // Maj(a,b,c) eor w17,w6,w17,ror#13 // Sigma0(a) add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w4,w4 // 1 #endif ldp w5,w6,[x1],#2*4 add w27,w27,w17 // h+=Sigma0(a) ror w16,w23,#6 add w26,w26,w28 // h+=K[i] eor w7,w23,w23,ror#14 and w17,w24,w23 bic w28,w25,w23 add w26,w26,w4 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w7,ror#11 // Sigma1(e) ror w7,w27,#2 add w26,w26,w17 // h+=Ch(e,f,g) eor w17,w27,w27,ror#9 add w26,w26,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w22,w22,w26 // d+=h eor w19,w19,w20 // Maj(a,b,c) eor w17,w7,w17,ror#13 // Sigma0(a) add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w5,w5 // 2 #endif add w26,w26,w17 // h+=Sigma0(a) ror w16,w22,#6 add w25,w25,w19 // h+=K[i] eor w8,w22,w22,ror#14 and w17,w23,w22 bic w19,w24,w22 add w25,w25,w5 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w8,ror#11 // Sigma1(e) ror w8,w26,#2 add w25,w25,w17 // h+=Ch(e,f,g) eor w17,w26,w26,ror#9 add w25,w25,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w21,w21,w25 // d+=h eor w28,w28,w27 // Maj(a,b,c) eor w17,w8,w17,ror#13 // Sigma0(a) add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w6,w6 // 3 #endif ldp w7,w8,[x1],#2*4 add w25,w25,w17 // h+=Sigma0(a) ror w16,w21,#6 add w24,w24,w28 // h+=K[i] eor w9,w21,w21,ror#14 and w17,w22,w21 bic w28,w23,w21 add w24,w24,w6 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w9,ror#11 // Sigma1(e) ror w9,w25,#2 add w24,w24,w17 // h+=Ch(e,f,g) eor w17,w25,w25,ror#9 add w24,w24,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w20,w20,w24 // d+=h eor w19,w19,w26 // Maj(a,b,c) eor w17,w9,w17,ror#13 // Sigma0(a) add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w7,w7 // 4 #endif add w24,w24,w17 // h+=Sigma0(a) ror w16,w20,#6 add w23,w23,w19 // h+=K[i] eor w10,w20,w20,ror#14 and w17,w21,w20 bic w19,w22,w20 add w23,w23,w7 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w10,ror#11 // Sigma1(e) ror w10,w24,#2 add w23,w23,w17 // h+=Ch(e,f,g) eor w17,w24,w24,ror#9 add w23,w23,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w27,w27,w23 // d+=h eor w28,w28,w25 // Maj(a,b,c) eor w17,w10,w17,ror#13 // Sigma0(a) add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w8,w8 // 5 #endif ldp w9,w10,[x1],#2*4 add w23,w23,w17 // h+=Sigma0(a) ror w16,w27,#6 add w22,w22,w28 // h+=K[i] eor w11,w27,w27,ror#14 and w17,w20,w27 bic w28,w21,w27 add w22,w22,w8 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w11,ror#11 // Sigma1(e) ror w11,w23,#2 add w22,w22,w17 // h+=Ch(e,f,g) eor w17,w23,w23,ror#9 add w22,w22,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w26,w26,w22 // d+=h eor w19,w19,w24 // Maj(a,b,c) eor w17,w11,w17,ror#13 // Sigma0(a) add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w9,w9 // 6 #endif add w22,w22,w17 // h+=Sigma0(a) ror w16,w26,#6 add w21,w21,w19 // h+=K[i] eor w12,w26,w26,ror#14 and w17,w27,w26 bic w19,w20,w26 add w21,w21,w9 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w12,ror#11 // Sigma1(e) ror w12,w22,#2 add w21,w21,w17 // h+=Ch(e,f,g) eor w17,w22,w22,ror#9 add w21,w21,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w25,w25,w21 // d+=h eor w28,w28,w23 // Maj(a,b,c) eor w17,w12,w17,ror#13 // Sigma0(a) add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w10,w10 // 7 #endif ldp w11,w12,[x1],#2*4 add w21,w21,w17 // h+=Sigma0(a) ror w16,w25,#6 add w20,w20,w28 // h+=K[i] eor w13,w25,w25,ror#14 and w17,w26,w25 bic w28,w27,w25 add w20,w20,w10 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w13,ror#11 // Sigma1(e) ror w13,w21,#2 add w20,w20,w17 // h+=Ch(e,f,g) eor w17,w21,w21,ror#9 add w20,w20,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w24,w24,w20 // d+=h eor w19,w19,w22 // Maj(a,b,c) eor w17,w13,w17,ror#13 // Sigma0(a) add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w20,w20,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w11,w11 // 8 #endif add w20,w20,w17 // h+=Sigma0(a) ror w16,w24,#6 add w27,w27,w19 // h+=K[i] eor w14,w24,w24,ror#14 and w17,w25,w24 bic w19,w26,w24 add w27,w27,w11 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w14,ror#11 // Sigma1(e) ror w14,w20,#2 add w27,w27,w17 // h+=Ch(e,f,g) eor w17,w20,w20,ror#9 add w27,w27,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w23,w23,w27 // d+=h eor w28,w28,w21 // Maj(a,b,c) eor w17,w14,w17,ror#13 // Sigma0(a) add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w12,w12 // 9 #endif ldp w13,w14,[x1],#2*4 add w27,w27,w17 // h+=Sigma0(a) ror w16,w23,#6 add w26,w26,w28 // h+=K[i] eor w15,w23,w23,ror#14 and w17,w24,w23 bic w28,w25,w23 add w26,w26,w12 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w15,ror#11 // Sigma1(e) ror w15,w27,#2 add w26,w26,w17 // h+=Ch(e,f,g) eor w17,w27,w27,ror#9 add w26,w26,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w22,w22,w26 // d+=h eor w19,w19,w20 // Maj(a,b,c) eor w17,w15,w17,ror#13 // Sigma0(a) add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w13,w13 // 10 #endif add w26,w26,w17 // h+=Sigma0(a) ror w16,w22,#6 add w25,w25,w19 // h+=K[i] eor w0,w22,w22,ror#14 and w17,w23,w22 bic w19,w24,w22 add w25,w25,w13 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w0,ror#11 // Sigma1(e) ror w0,w26,#2 add w25,w25,w17 // h+=Ch(e,f,g) eor w17,w26,w26,ror#9 add w25,w25,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w21,w21,w25 // d+=h eor w28,w28,w27 // Maj(a,b,c) eor w17,w0,w17,ror#13 // Sigma0(a) add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w14,w14 // 11 #endif ldp w15,w0,[x1],#2*4 add w25,w25,w17 // h+=Sigma0(a) str w6,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] eor w6,w21,w21,ror#14 and w17,w22,w21 bic w28,w23,w21 add w24,w24,w14 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w6,ror#11 // Sigma1(e) ror w6,w25,#2 add w24,w24,w17 // h+=Ch(e,f,g) eor w17,w25,w25,ror#9 add w24,w24,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w20,w20,w24 // d+=h eor w19,w19,w26 // Maj(a,b,c) eor w17,w6,w17,ror#13 // Sigma0(a) add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w15,w15 // 12 #endif add w24,w24,w17 // h+=Sigma0(a) str w7,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] eor w7,w20,w20,ror#14 and w17,w21,w20 bic w19,w22,w20 add w23,w23,w15 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w7,ror#11 // Sigma1(e) ror w7,w24,#2 add w23,w23,w17 // h+=Ch(e,f,g) eor w17,w24,w24,ror#9 add w23,w23,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w27,w27,w23 // d+=h eor w28,w28,w25 // Maj(a,b,c) eor w17,w7,w17,ror#13 // Sigma0(a) add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w0,w0 // 13 #endif ldp w1,w2,[x1] add w23,w23,w17 // h+=Sigma0(a) str w8,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] eor w8,w27,w27,ror#14 and w17,w20,w27 bic w28,w21,w27 add w22,w22,w0 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w8,ror#11 // Sigma1(e) ror w8,w23,#2 add w22,w22,w17 // h+=Ch(e,f,g) eor w17,w23,w23,ror#9 add w22,w22,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w26,w26,w22 // d+=h eor w19,w19,w24 // Maj(a,b,c) eor w17,w8,w17,ror#13 // Sigma0(a) add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w1,w1 // 14 #endif ldr w6,[sp,#12] add w22,w22,w17 // h+=Sigma0(a) str w9,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] eor w9,w26,w26,ror#14 and w17,w27,w26 bic w19,w20,w26 add w21,w21,w1 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w9,ror#11 // Sigma1(e) ror w9,w22,#2 add w21,w21,w17 // h+=Ch(e,f,g) eor w17,w22,w22,ror#9 add w21,w21,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w25,w25,w21 // d+=h eor w28,w28,w23 // Maj(a,b,c) eor w17,w9,w17,ror#13 // Sigma0(a) add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w2,w2 // 15 #endif ldr w7,[sp,#0] add w21,w21,w17 // h+=Sigma0(a) str w10,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w9,w4,#7 and w17,w26,w25 ror w8,w1,#17 bic w28,w27,w25 ror w10,w21,#2 add w20,w20,w2 // h+=X[i] eor w16,w16,w25,ror#11 eor w9,w9,w4,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w10,w10,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w8,w8,w1,ror#19 eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w10,w21,ror#22 // Sigma0(a) eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) add w3,w3,w12 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w3,w3,w9 add w20,w20,w17 // h+=Sigma0(a) add w3,w3,w8 Loop_16_xx: ldr w8,[sp,#4] str w11,[sp,#0] ror w16,w24,#6 add w27,w27,w19 // h+=K[i] ror w10,w5,#7 and w17,w25,w24 ror w9,w2,#17 bic w19,w26,w24 ror w11,w20,#2 add w27,w27,w3 // h+=X[i] eor w16,w16,w24,ror#11 eor w10,w10,w5,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w24,ror#25 // Sigma1(e) eor w11,w11,w20,ror#13 add w27,w27,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w9,w9,w2,ror#19 eor w10,w10,w5,lsr#3 // sigma0(X[i+1]) add w27,w27,w16 // h+=Sigma1(e) eor w28,w28,w21 // Maj(a,b,c) eor w17,w11,w20,ror#22 // Sigma0(a) eor w9,w9,w2,lsr#10 // sigma1(X[i+14]) add w4,w4,w13 add w23,w23,w27 // d+=h add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w4,w4,w10 add w27,w27,w17 // h+=Sigma0(a) add w4,w4,w9 ldr w9,[sp,#8] str w12,[sp,#4] ror w16,w23,#6 add w26,w26,w28 // h+=K[i] ror w11,w6,#7 and w17,w24,w23 ror w10,w3,#17 bic w28,w25,w23 ror w12,w27,#2 add w26,w26,w4 // h+=X[i] eor w16,w16,w23,ror#11 eor w11,w11,w6,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w23,ror#25 // Sigma1(e) eor w12,w12,w27,ror#13 add w26,w26,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w10,w10,w3,ror#19 eor w11,w11,w6,lsr#3 // sigma0(X[i+1]) add w26,w26,w16 // h+=Sigma1(e) eor w19,w19,w20 // Maj(a,b,c) eor w17,w12,w27,ror#22 // Sigma0(a) eor w10,w10,w3,lsr#10 // sigma1(X[i+14]) add w5,w5,w14 add w22,w22,w26 // d+=h add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w5,w5,w11 add w26,w26,w17 // h+=Sigma0(a) add w5,w5,w10 ldr w10,[sp,#12] str w13,[sp,#8] ror w16,w22,#6 add w25,w25,w19 // h+=K[i] ror w12,w7,#7 and w17,w23,w22 ror w11,w4,#17 bic w19,w24,w22 ror w13,w26,#2 add w25,w25,w5 // h+=X[i] eor w16,w16,w22,ror#11 eor w12,w12,w7,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w22,ror#25 // Sigma1(e) eor w13,w13,w26,ror#13 add w25,w25,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w11,w11,w4,ror#19 eor w12,w12,w7,lsr#3 // sigma0(X[i+1]) add w25,w25,w16 // h+=Sigma1(e) eor w28,w28,w27 // Maj(a,b,c) eor w17,w13,w26,ror#22 // Sigma0(a) eor w11,w11,w4,lsr#10 // sigma1(X[i+14]) add w6,w6,w15 add w21,w21,w25 // d+=h add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w6,w6,w12 add w25,w25,w17 // h+=Sigma0(a) add w6,w6,w11 ldr w11,[sp,#0] str w14,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] ror w13,w8,#7 and w17,w22,w21 ror w12,w5,#17 bic w28,w23,w21 ror w14,w25,#2 add w24,w24,w6 // h+=X[i] eor w16,w16,w21,ror#11 eor w13,w13,w8,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w21,ror#25 // Sigma1(e) eor w14,w14,w25,ror#13 add w24,w24,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w12,w12,w5,ror#19 eor w13,w13,w8,lsr#3 // sigma0(X[i+1]) add w24,w24,w16 // h+=Sigma1(e) eor w19,w19,w26 // Maj(a,b,c) eor w17,w14,w25,ror#22 // Sigma0(a) eor w12,w12,w5,lsr#10 // sigma1(X[i+14]) add w7,w7,w0 add w20,w20,w24 // d+=h add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w7,w7,w13 add w24,w24,w17 // h+=Sigma0(a) add w7,w7,w12 ldr w12,[sp,#4] str w15,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] ror w14,w9,#7 and w17,w21,w20 ror w13,w6,#17 bic w19,w22,w20 ror w15,w24,#2 add w23,w23,w7 // h+=X[i] eor w16,w16,w20,ror#11 eor w14,w14,w9,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w20,ror#25 // Sigma1(e) eor w15,w15,w24,ror#13 add w23,w23,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w13,w13,w6,ror#19 eor w14,w14,w9,lsr#3 // sigma0(X[i+1]) add w23,w23,w16 // h+=Sigma1(e) eor w28,w28,w25 // Maj(a,b,c) eor w17,w15,w24,ror#22 // Sigma0(a) eor w13,w13,w6,lsr#10 // sigma1(X[i+14]) add w8,w8,w1 add w27,w27,w23 // d+=h add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w8,w8,w14 add w23,w23,w17 // h+=Sigma0(a) add w8,w8,w13 ldr w13,[sp,#8] str w0,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] ror w15,w10,#7 and w17,w20,w27 ror w14,w7,#17 bic w28,w21,w27 ror w0,w23,#2 add w22,w22,w8 // h+=X[i] eor w16,w16,w27,ror#11 eor w15,w15,w10,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w27,ror#25 // Sigma1(e) eor w0,w0,w23,ror#13 add w22,w22,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w14,w14,w7,ror#19 eor w15,w15,w10,lsr#3 // sigma0(X[i+1]) add w22,w22,w16 // h+=Sigma1(e) eor w19,w19,w24 // Maj(a,b,c) eor w17,w0,w23,ror#22 // Sigma0(a) eor w14,w14,w7,lsr#10 // sigma1(X[i+14]) add w9,w9,w2 add w26,w26,w22 // d+=h add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w9,w9,w15 add w22,w22,w17 // h+=Sigma0(a) add w9,w9,w14 ldr w14,[sp,#12] str w1,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] ror w0,w11,#7 and w17,w27,w26 ror w15,w8,#17 bic w19,w20,w26 ror w1,w22,#2 add w21,w21,w9 // h+=X[i] eor w16,w16,w26,ror#11 eor w0,w0,w11,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w26,ror#25 // Sigma1(e) eor w1,w1,w22,ror#13 add w21,w21,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w15,w15,w8,ror#19 eor w0,w0,w11,lsr#3 // sigma0(X[i+1]) add w21,w21,w16 // h+=Sigma1(e) eor w28,w28,w23 // Maj(a,b,c) eor w17,w1,w22,ror#22 // Sigma0(a) eor w15,w15,w8,lsr#10 // sigma1(X[i+14]) add w10,w10,w3 add w25,w25,w21 // d+=h add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w10,w10,w0 add w21,w21,w17 // h+=Sigma0(a) add w10,w10,w15 ldr w15,[sp,#0] str w2,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w1,w12,#7 and w17,w26,w25 ror w0,w9,#17 bic w28,w27,w25 ror w2,w21,#2 add w20,w20,w10 // h+=X[i] eor w16,w16,w25,ror#11 eor w1,w1,w12,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w2,w2,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w0,w0,w9,ror#19 eor w1,w1,w12,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w2,w21,ror#22 // Sigma0(a) eor w0,w0,w9,lsr#10 // sigma1(X[i+14]) add w11,w11,w4 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w11,w11,w1 add w20,w20,w17 // h+=Sigma0(a) add w11,w11,w0 ldr w0,[sp,#4] str w3,[sp,#0] ror w16,w24,#6 add w27,w27,w19 // h+=K[i] ror w2,w13,#7 and w17,w25,w24 ror w1,w10,#17 bic w19,w26,w24 ror w3,w20,#2 add w27,w27,w11 // h+=X[i] eor w16,w16,w24,ror#11 eor w2,w2,w13,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w24,ror#25 // Sigma1(e) eor w3,w3,w20,ror#13 add w27,w27,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w1,w1,w10,ror#19 eor w2,w2,w13,lsr#3 // sigma0(X[i+1]) add w27,w27,w16 // h+=Sigma1(e) eor w28,w28,w21 // Maj(a,b,c) eor w17,w3,w20,ror#22 // Sigma0(a) eor w1,w1,w10,lsr#10 // sigma1(X[i+14]) add w12,w12,w5 add w23,w23,w27 // d+=h add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w12,w12,w2 add w27,w27,w17 // h+=Sigma0(a) add w12,w12,w1 ldr w1,[sp,#8] str w4,[sp,#4] ror w16,w23,#6 add w26,w26,w28 // h+=K[i] ror w3,w14,#7 and w17,w24,w23 ror w2,w11,#17 bic w28,w25,w23 ror w4,w27,#2 add w26,w26,w12 // h+=X[i] eor w16,w16,w23,ror#11 eor w3,w3,w14,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w23,ror#25 // Sigma1(e) eor w4,w4,w27,ror#13 add w26,w26,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w2,w2,w11,ror#19 eor w3,w3,w14,lsr#3 // sigma0(X[i+1]) add w26,w26,w16 // h+=Sigma1(e) eor w19,w19,w20 // Maj(a,b,c) eor w17,w4,w27,ror#22 // Sigma0(a) eor w2,w2,w11,lsr#10 // sigma1(X[i+14]) add w13,w13,w6 add w22,w22,w26 // d+=h add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w13,w13,w3 add w26,w26,w17 // h+=Sigma0(a) add w13,w13,w2 ldr w2,[sp,#12] str w5,[sp,#8] ror w16,w22,#6 add w25,w25,w19 // h+=K[i] ror w4,w15,#7 and w17,w23,w22 ror w3,w12,#17 bic w19,w24,w22 ror w5,w26,#2 add w25,w25,w13 // h+=X[i] eor w16,w16,w22,ror#11 eor w4,w4,w15,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w22,ror#25 // Sigma1(e) eor w5,w5,w26,ror#13 add w25,w25,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w3,w3,w12,ror#19 eor w4,w4,w15,lsr#3 // sigma0(X[i+1]) add w25,w25,w16 // h+=Sigma1(e) eor w28,w28,w27 // Maj(a,b,c) eor w17,w5,w26,ror#22 // Sigma0(a) eor w3,w3,w12,lsr#10 // sigma1(X[i+14]) add w14,w14,w7 add w21,w21,w25 // d+=h add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w14,w14,w4 add w25,w25,w17 // h+=Sigma0(a) add w14,w14,w3 ldr w3,[sp,#0] str w6,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] ror w5,w0,#7 and w17,w22,w21 ror w4,w13,#17 bic w28,w23,w21 ror w6,w25,#2 add w24,w24,w14 // h+=X[i] eor w16,w16,w21,ror#11 eor w5,w5,w0,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w21,ror#25 // Sigma1(e) eor w6,w6,w25,ror#13 add w24,w24,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w4,w4,w13,ror#19 eor w5,w5,w0,lsr#3 // sigma0(X[i+1]) add w24,w24,w16 // h+=Sigma1(e) eor w19,w19,w26 // Maj(a,b,c) eor w17,w6,w25,ror#22 // Sigma0(a) eor w4,w4,w13,lsr#10 // sigma1(X[i+14]) add w15,w15,w8 add w20,w20,w24 // d+=h add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w15,w15,w5 add w24,w24,w17 // h+=Sigma0(a) add w15,w15,w4 ldr w4,[sp,#4] str w7,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] ror w6,w1,#7 and w17,w21,w20 ror w5,w14,#17 bic w19,w22,w20 ror w7,w24,#2 add w23,w23,w15 // h+=X[i] eor w16,w16,w20,ror#11 eor w6,w6,w1,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w20,ror#25 // Sigma1(e) eor w7,w7,w24,ror#13 add w23,w23,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w5,w5,w14,ror#19 eor w6,w6,w1,lsr#3 // sigma0(X[i+1]) add w23,w23,w16 // h+=Sigma1(e) eor w28,w28,w25 // Maj(a,b,c) eor w17,w7,w24,ror#22 // Sigma0(a) eor w5,w5,w14,lsr#10 // sigma1(X[i+14]) add w0,w0,w9 add w27,w27,w23 // d+=h add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w0,w0,w6 add w23,w23,w17 // h+=Sigma0(a) add w0,w0,w5 ldr w5,[sp,#8] str w8,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] ror w7,w2,#7 and w17,w20,w27 ror w6,w15,#17 bic w28,w21,w27 ror w8,w23,#2 add w22,w22,w0 // h+=X[i] eor w16,w16,w27,ror#11 eor w7,w7,w2,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w27,ror#25 // Sigma1(e) eor w8,w8,w23,ror#13 add w22,w22,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w6,w6,w15,ror#19 eor w7,w7,w2,lsr#3 // sigma0(X[i+1]) add w22,w22,w16 // h+=Sigma1(e) eor w19,w19,w24 // Maj(a,b,c) eor w17,w8,w23,ror#22 // Sigma0(a) eor w6,w6,w15,lsr#10 // sigma1(X[i+14]) add w1,w1,w10 add w26,w26,w22 // d+=h add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w1,w1,w7 add w22,w22,w17 // h+=Sigma0(a) add w1,w1,w6 ldr w6,[sp,#12] str w9,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] ror w8,w3,#7 and w17,w27,w26 ror w7,w0,#17 bic w19,w20,w26 ror w9,w22,#2 add w21,w21,w1 // h+=X[i] eor w16,w16,w26,ror#11 eor w8,w8,w3,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w26,ror#25 // Sigma1(e) eor w9,w9,w22,ror#13 add w21,w21,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w7,w7,w0,ror#19 eor w8,w8,w3,lsr#3 // sigma0(X[i+1]) add w21,w21,w16 // h+=Sigma1(e) eor w28,w28,w23 // Maj(a,b,c) eor w17,w9,w22,ror#22 // Sigma0(a) eor w7,w7,w0,lsr#10 // sigma1(X[i+14]) add w2,w2,w11 add w25,w25,w21 // d+=h add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w2,w2,w8 add w21,w21,w17 // h+=Sigma0(a) add w2,w2,w7 ldr w7,[sp,#0] str w10,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w9,w4,#7 and w17,w26,w25 ror w8,w1,#17 bic w28,w27,w25 ror w10,w21,#2 add w20,w20,w2 // h+=X[i] eor w16,w16,w25,ror#11 eor w9,w9,w4,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w10,w10,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w8,w8,w1,ror#19 eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w10,w21,ror#22 // Sigma0(a) eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) add w3,w3,w12 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w3,w3,w9 add w20,w20,w17 // h+=Sigma0(a) add w3,w3,w8 cbnz w19,Loop_16_xx ldp x0,x2,[x29,#96] ldr x1,[x29,#112] sub x30,x30,#260 // rewind ldp w3,w4,[x0] ldp w5,w6,[x0,#2*4] add x1,x1,#14*4 // advance input pointer ldp w7,w8,[x0,#4*4] add w20,w20,w3 ldp w9,w10,[x0,#6*4] add w21,w21,w4 add w22,w22,w5 add w23,w23,w6 stp w20,w21,[x0] add w24,w24,w7 add w25,w25,w8 stp w22,w23,[x0,#2*4] add w26,w26,w9 add w27,w27,w10 cmp x1,x2 stp w24,w25,[x0,#4*4] stp w26,w27,[x0,#6*4] b.ne Loop ldp x19,x20,[x29,#16] add sp,sp,#4*4 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .section __TEXT,__const .align 6 LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0 //terminator .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 .text #ifndef __KERNEL__ .globl _sha256_block_data_order_hw .private_extern _sha256_block_data_order_hw .align 6 _sha256_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v0.4s,v1.4s},[x0] adrp x3,LK256@PAGE add x3,x3,LK256@PAGEOFF Loop_hw: ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 sub x2,x2,#1 ld1 {v16.4s},[x3],#16 rev32 v4.16b,v4.16b rev32 v5.16b,v5.16b rev32 v6.16b,v6.16b rev32 v7.16b,v7.16b orr v18.16b,v0.16b,v0.16b // offload orr v19.16b,v1.16b,v1.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .long 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .long 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .long 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .long 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .long 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .long 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s ld1 {v17.4s},[x3] add v16.4s,v16.4s,v6.4s sub x3,x3,#64*4-16 // rewind orr v2.16b,v0.16b,v0.16b .long 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .long 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s add v17.4s,v17.4s,v7.4s orr v2.16b,v0.16b,v0.16b .long 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .long 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s add v0.4s,v0.4s,v18.4s add v1.4s,v1.4s,v19.4s cbnz x2,Loop_hw st1 {v0.4s,v1.4s},[x0] ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
34,203
thirdparty/crates/ring-0.17.9/pregenerated/sha256-armv8-linux64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) // Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy // in the file LICENSE in the source distribution or at // https://www.openssl.org/source/license.html // ==================================================================== // Written by Andy Polyakov <appro@openssl.org> for the OpenSSL // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. // // Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. // // Performance in cycles per processed byte and improvement coefficient // over code generated with "default" compiler: // // SHA256-hw SHA256(*) SHA512 // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) // Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. // (**) The result is a trade-off: it's possible to improve it by // 10% (or by 1 cycle per round), but at the cost of 20% loss // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code // generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ # include <ring-core/arm_arch.h> #endif .text .globl sha256_block_data_order_nohw .hidden sha256_block_data_order_nohw .type sha256_block_data_order_nohw,%function .align 6 sha256_block_data_order_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#4*4 ldp w20,w21,[x0] // load context ldp w22,w23,[x0,#2*4] ldp w24,w25,[x0,#4*4] add x2,x1,x2,lsl#6 // end of input ldp w26,w27,[x0,#6*4] adrp x30,.LK256 add x30,x30,:lo12:.LK256 stp x0,x2,[x29,#96] .Loop: ldp w3,w4,[x1],#2*4 ldr w19,[x30],#4 // *K++ eor w28,w21,w22 // magic seed str x1,[x29,#112] #ifndef __AARCH64EB__ rev w3,w3 // 0 #endif ror w16,w24,#6 add w27,w27,w19 // h+=K[i] eor w6,w24,w24,ror#14 and w17,w25,w24 bic w19,w26,w24 add w27,w27,w3 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w6,ror#11 // Sigma1(e) ror w6,w20,#2 add w27,w27,w17 // h+=Ch(e,f,g) eor w17,w20,w20,ror#9 add w27,w27,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w23,w23,w27 // d+=h eor w28,w28,w21 // Maj(a,b,c) eor w17,w6,w17,ror#13 // Sigma0(a) add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w4,w4 // 1 #endif ldp w5,w6,[x1],#2*4 add w27,w27,w17 // h+=Sigma0(a) ror w16,w23,#6 add w26,w26,w28 // h+=K[i] eor w7,w23,w23,ror#14 and w17,w24,w23 bic w28,w25,w23 add w26,w26,w4 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w7,ror#11 // Sigma1(e) ror w7,w27,#2 add w26,w26,w17 // h+=Ch(e,f,g) eor w17,w27,w27,ror#9 add w26,w26,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w22,w22,w26 // d+=h eor w19,w19,w20 // Maj(a,b,c) eor w17,w7,w17,ror#13 // Sigma0(a) add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w5,w5 // 2 #endif add w26,w26,w17 // h+=Sigma0(a) ror w16,w22,#6 add w25,w25,w19 // h+=K[i] eor w8,w22,w22,ror#14 and w17,w23,w22 bic w19,w24,w22 add w25,w25,w5 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w8,ror#11 // Sigma1(e) ror w8,w26,#2 add w25,w25,w17 // h+=Ch(e,f,g) eor w17,w26,w26,ror#9 add w25,w25,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w21,w21,w25 // d+=h eor w28,w28,w27 // Maj(a,b,c) eor w17,w8,w17,ror#13 // Sigma0(a) add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w6,w6 // 3 #endif ldp w7,w8,[x1],#2*4 add w25,w25,w17 // h+=Sigma0(a) ror w16,w21,#6 add w24,w24,w28 // h+=K[i] eor w9,w21,w21,ror#14 and w17,w22,w21 bic w28,w23,w21 add w24,w24,w6 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w9,ror#11 // Sigma1(e) ror w9,w25,#2 add w24,w24,w17 // h+=Ch(e,f,g) eor w17,w25,w25,ror#9 add w24,w24,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w20,w20,w24 // d+=h eor w19,w19,w26 // Maj(a,b,c) eor w17,w9,w17,ror#13 // Sigma0(a) add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w7,w7 // 4 #endif add w24,w24,w17 // h+=Sigma0(a) ror w16,w20,#6 add w23,w23,w19 // h+=K[i] eor w10,w20,w20,ror#14 and w17,w21,w20 bic w19,w22,w20 add w23,w23,w7 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w10,ror#11 // Sigma1(e) ror w10,w24,#2 add w23,w23,w17 // h+=Ch(e,f,g) eor w17,w24,w24,ror#9 add w23,w23,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w27,w27,w23 // d+=h eor w28,w28,w25 // Maj(a,b,c) eor w17,w10,w17,ror#13 // Sigma0(a) add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w8,w8 // 5 #endif ldp w9,w10,[x1],#2*4 add w23,w23,w17 // h+=Sigma0(a) ror w16,w27,#6 add w22,w22,w28 // h+=K[i] eor w11,w27,w27,ror#14 and w17,w20,w27 bic w28,w21,w27 add w22,w22,w8 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w11,ror#11 // Sigma1(e) ror w11,w23,#2 add w22,w22,w17 // h+=Ch(e,f,g) eor w17,w23,w23,ror#9 add w22,w22,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w26,w26,w22 // d+=h eor w19,w19,w24 // Maj(a,b,c) eor w17,w11,w17,ror#13 // Sigma0(a) add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w9,w9 // 6 #endif add w22,w22,w17 // h+=Sigma0(a) ror w16,w26,#6 add w21,w21,w19 // h+=K[i] eor w12,w26,w26,ror#14 and w17,w27,w26 bic w19,w20,w26 add w21,w21,w9 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w12,ror#11 // Sigma1(e) ror w12,w22,#2 add w21,w21,w17 // h+=Ch(e,f,g) eor w17,w22,w22,ror#9 add w21,w21,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w25,w25,w21 // d+=h eor w28,w28,w23 // Maj(a,b,c) eor w17,w12,w17,ror#13 // Sigma0(a) add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w10,w10 // 7 #endif ldp w11,w12,[x1],#2*4 add w21,w21,w17 // h+=Sigma0(a) ror w16,w25,#6 add w20,w20,w28 // h+=K[i] eor w13,w25,w25,ror#14 and w17,w26,w25 bic w28,w27,w25 add w20,w20,w10 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w13,ror#11 // Sigma1(e) ror w13,w21,#2 add w20,w20,w17 // h+=Ch(e,f,g) eor w17,w21,w21,ror#9 add w20,w20,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w24,w24,w20 // d+=h eor w19,w19,w22 // Maj(a,b,c) eor w17,w13,w17,ror#13 // Sigma0(a) add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w20,w20,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w11,w11 // 8 #endif add w20,w20,w17 // h+=Sigma0(a) ror w16,w24,#6 add w27,w27,w19 // h+=K[i] eor w14,w24,w24,ror#14 and w17,w25,w24 bic w19,w26,w24 add w27,w27,w11 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w14,ror#11 // Sigma1(e) ror w14,w20,#2 add w27,w27,w17 // h+=Ch(e,f,g) eor w17,w20,w20,ror#9 add w27,w27,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w23,w23,w27 // d+=h eor w28,w28,w21 // Maj(a,b,c) eor w17,w14,w17,ror#13 // Sigma0(a) add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w12,w12 // 9 #endif ldp w13,w14,[x1],#2*4 add w27,w27,w17 // h+=Sigma0(a) ror w16,w23,#6 add w26,w26,w28 // h+=K[i] eor w15,w23,w23,ror#14 and w17,w24,w23 bic w28,w25,w23 add w26,w26,w12 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w15,ror#11 // Sigma1(e) ror w15,w27,#2 add w26,w26,w17 // h+=Ch(e,f,g) eor w17,w27,w27,ror#9 add w26,w26,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w22,w22,w26 // d+=h eor w19,w19,w20 // Maj(a,b,c) eor w17,w15,w17,ror#13 // Sigma0(a) add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w13,w13 // 10 #endif add w26,w26,w17 // h+=Sigma0(a) ror w16,w22,#6 add w25,w25,w19 // h+=K[i] eor w0,w22,w22,ror#14 and w17,w23,w22 bic w19,w24,w22 add w25,w25,w13 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w0,ror#11 // Sigma1(e) ror w0,w26,#2 add w25,w25,w17 // h+=Ch(e,f,g) eor w17,w26,w26,ror#9 add w25,w25,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w21,w21,w25 // d+=h eor w28,w28,w27 // Maj(a,b,c) eor w17,w0,w17,ror#13 // Sigma0(a) add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w14,w14 // 11 #endif ldp w15,w0,[x1],#2*4 add w25,w25,w17 // h+=Sigma0(a) str w6,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] eor w6,w21,w21,ror#14 and w17,w22,w21 bic w28,w23,w21 add w24,w24,w14 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w6,ror#11 // Sigma1(e) ror w6,w25,#2 add w24,w24,w17 // h+=Ch(e,f,g) eor w17,w25,w25,ror#9 add w24,w24,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w20,w20,w24 // d+=h eor w19,w19,w26 // Maj(a,b,c) eor w17,w6,w17,ror#13 // Sigma0(a) add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w15,w15 // 12 #endif add w24,w24,w17 // h+=Sigma0(a) str w7,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] eor w7,w20,w20,ror#14 and w17,w21,w20 bic w19,w22,w20 add w23,w23,w15 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w7,ror#11 // Sigma1(e) ror w7,w24,#2 add w23,w23,w17 // h+=Ch(e,f,g) eor w17,w24,w24,ror#9 add w23,w23,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w27,w27,w23 // d+=h eor w28,w28,w25 // Maj(a,b,c) eor w17,w7,w17,ror#13 // Sigma0(a) add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w0,w0 // 13 #endif ldp w1,w2,[x1] add w23,w23,w17 // h+=Sigma0(a) str w8,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] eor w8,w27,w27,ror#14 and w17,w20,w27 bic w28,w21,w27 add w22,w22,w0 // h+=X[i] orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w8,ror#11 // Sigma1(e) ror w8,w23,#2 add w22,w22,w17 // h+=Ch(e,f,g) eor w17,w23,w23,ror#9 add w22,w22,w16 // h+=Sigma1(e) and w19,w19,w28 // (b^c)&=(a^b) add w26,w26,w22 // d+=h eor w19,w19,w24 // Maj(a,b,c) eor w17,w8,w17,ror#13 // Sigma0(a) add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w1,w1 // 14 #endif ldr w6,[sp,#12] add w22,w22,w17 // h+=Sigma0(a) str w9,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] eor w9,w26,w26,ror#14 and w17,w27,w26 bic w19,w20,w26 add w21,w21,w1 // h+=X[i] orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w9,ror#11 // Sigma1(e) ror w9,w22,#2 add w21,w21,w17 // h+=Ch(e,f,g) eor w17,w22,w22,ror#9 add w21,w21,w16 // h+=Sigma1(e) and w28,w28,w19 // (b^c)&=(a^b) add w25,w25,w21 // d+=h eor w28,w28,w23 // Maj(a,b,c) eor w17,w9,w17,ror#13 // Sigma0(a) add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev w2,w2 // 15 #endif ldr w7,[sp,#0] add w21,w21,w17 // h+=Sigma0(a) str w10,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w9,w4,#7 and w17,w26,w25 ror w8,w1,#17 bic w28,w27,w25 ror w10,w21,#2 add w20,w20,w2 // h+=X[i] eor w16,w16,w25,ror#11 eor w9,w9,w4,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w10,w10,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w8,w8,w1,ror#19 eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w10,w21,ror#22 // Sigma0(a) eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) add w3,w3,w12 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w3,w3,w9 add w20,w20,w17 // h+=Sigma0(a) add w3,w3,w8 .Loop_16_xx: ldr w8,[sp,#4] str w11,[sp,#0] ror w16,w24,#6 add w27,w27,w19 // h+=K[i] ror w10,w5,#7 and w17,w25,w24 ror w9,w2,#17 bic w19,w26,w24 ror w11,w20,#2 add w27,w27,w3 // h+=X[i] eor w16,w16,w24,ror#11 eor w10,w10,w5,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w24,ror#25 // Sigma1(e) eor w11,w11,w20,ror#13 add w27,w27,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w9,w9,w2,ror#19 eor w10,w10,w5,lsr#3 // sigma0(X[i+1]) add w27,w27,w16 // h+=Sigma1(e) eor w28,w28,w21 // Maj(a,b,c) eor w17,w11,w20,ror#22 // Sigma0(a) eor w9,w9,w2,lsr#10 // sigma1(X[i+14]) add w4,w4,w13 add w23,w23,w27 // d+=h add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w4,w4,w10 add w27,w27,w17 // h+=Sigma0(a) add w4,w4,w9 ldr w9,[sp,#8] str w12,[sp,#4] ror w16,w23,#6 add w26,w26,w28 // h+=K[i] ror w11,w6,#7 and w17,w24,w23 ror w10,w3,#17 bic w28,w25,w23 ror w12,w27,#2 add w26,w26,w4 // h+=X[i] eor w16,w16,w23,ror#11 eor w11,w11,w6,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w23,ror#25 // Sigma1(e) eor w12,w12,w27,ror#13 add w26,w26,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w10,w10,w3,ror#19 eor w11,w11,w6,lsr#3 // sigma0(X[i+1]) add w26,w26,w16 // h+=Sigma1(e) eor w19,w19,w20 // Maj(a,b,c) eor w17,w12,w27,ror#22 // Sigma0(a) eor w10,w10,w3,lsr#10 // sigma1(X[i+14]) add w5,w5,w14 add w22,w22,w26 // d+=h add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w5,w5,w11 add w26,w26,w17 // h+=Sigma0(a) add w5,w5,w10 ldr w10,[sp,#12] str w13,[sp,#8] ror w16,w22,#6 add w25,w25,w19 // h+=K[i] ror w12,w7,#7 and w17,w23,w22 ror w11,w4,#17 bic w19,w24,w22 ror w13,w26,#2 add w25,w25,w5 // h+=X[i] eor w16,w16,w22,ror#11 eor w12,w12,w7,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w22,ror#25 // Sigma1(e) eor w13,w13,w26,ror#13 add w25,w25,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w11,w11,w4,ror#19 eor w12,w12,w7,lsr#3 // sigma0(X[i+1]) add w25,w25,w16 // h+=Sigma1(e) eor w28,w28,w27 // Maj(a,b,c) eor w17,w13,w26,ror#22 // Sigma0(a) eor w11,w11,w4,lsr#10 // sigma1(X[i+14]) add w6,w6,w15 add w21,w21,w25 // d+=h add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w6,w6,w12 add w25,w25,w17 // h+=Sigma0(a) add w6,w6,w11 ldr w11,[sp,#0] str w14,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] ror w13,w8,#7 and w17,w22,w21 ror w12,w5,#17 bic w28,w23,w21 ror w14,w25,#2 add w24,w24,w6 // h+=X[i] eor w16,w16,w21,ror#11 eor w13,w13,w8,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w21,ror#25 // Sigma1(e) eor w14,w14,w25,ror#13 add w24,w24,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w12,w12,w5,ror#19 eor w13,w13,w8,lsr#3 // sigma0(X[i+1]) add w24,w24,w16 // h+=Sigma1(e) eor w19,w19,w26 // Maj(a,b,c) eor w17,w14,w25,ror#22 // Sigma0(a) eor w12,w12,w5,lsr#10 // sigma1(X[i+14]) add w7,w7,w0 add w20,w20,w24 // d+=h add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w7,w7,w13 add w24,w24,w17 // h+=Sigma0(a) add w7,w7,w12 ldr w12,[sp,#4] str w15,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] ror w14,w9,#7 and w17,w21,w20 ror w13,w6,#17 bic w19,w22,w20 ror w15,w24,#2 add w23,w23,w7 // h+=X[i] eor w16,w16,w20,ror#11 eor w14,w14,w9,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w20,ror#25 // Sigma1(e) eor w15,w15,w24,ror#13 add w23,w23,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w13,w13,w6,ror#19 eor w14,w14,w9,lsr#3 // sigma0(X[i+1]) add w23,w23,w16 // h+=Sigma1(e) eor w28,w28,w25 // Maj(a,b,c) eor w17,w15,w24,ror#22 // Sigma0(a) eor w13,w13,w6,lsr#10 // sigma1(X[i+14]) add w8,w8,w1 add w27,w27,w23 // d+=h add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w8,w8,w14 add w23,w23,w17 // h+=Sigma0(a) add w8,w8,w13 ldr w13,[sp,#8] str w0,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] ror w15,w10,#7 and w17,w20,w27 ror w14,w7,#17 bic w28,w21,w27 ror w0,w23,#2 add w22,w22,w8 // h+=X[i] eor w16,w16,w27,ror#11 eor w15,w15,w10,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w27,ror#25 // Sigma1(e) eor w0,w0,w23,ror#13 add w22,w22,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w14,w14,w7,ror#19 eor w15,w15,w10,lsr#3 // sigma0(X[i+1]) add w22,w22,w16 // h+=Sigma1(e) eor w19,w19,w24 // Maj(a,b,c) eor w17,w0,w23,ror#22 // Sigma0(a) eor w14,w14,w7,lsr#10 // sigma1(X[i+14]) add w9,w9,w2 add w26,w26,w22 // d+=h add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w9,w9,w15 add w22,w22,w17 // h+=Sigma0(a) add w9,w9,w14 ldr w14,[sp,#12] str w1,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] ror w0,w11,#7 and w17,w27,w26 ror w15,w8,#17 bic w19,w20,w26 ror w1,w22,#2 add w21,w21,w9 // h+=X[i] eor w16,w16,w26,ror#11 eor w0,w0,w11,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w26,ror#25 // Sigma1(e) eor w1,w1,w22,ror#13 add w21,w21,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w15,w15,w8,ror#19 eor w0,w0,w11,lsr#3 // sigma0(X[i+1]) add w21,w21,w16 // h+=Sigma1(e) eor w28,w28,w23 // Maj(a,b,c) eor w17,w1,w22,ror#22 // Sigma0(a) eor w15,w15,w8,lsr#10 // sigma1(X[i+14]) add w10,w10,w3 add w25,w25,w21 // d+=h add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w10,w10,w0 add w21,w21,w17 // h+=Sigma0(a) add w10,w10,w15 ldr w15,[sp,#0] str w2,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w1,w12,#7 and w17,w26,w25 ror w0,w9,#17 bic w28,w27,w25 ror w2,w21,#2 add w20,w20,w10 // h+=X[i] eor w16,w16,w25,ror#11 eor w1,w1,w12,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w2,w2,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w0,w0,w9,ror#19 eor w1,w1,w12,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w2,w21,ror#22 // Sigma0(a) eor w0,w0,w9,lsr#10 // sigma1(X[i+14]) add w11,w11,w4 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w11,w11,w1 add w20,w20,w17 // h+=Sigma0(a) add w11,w11,w0 ldr w0,[sp,#4] str w3,[sp,#0] ror w16,w24,#6 add w27,w27,w19 // h+=K[i] ror w2,w13,#7 and w17,w25,w24 ror w1,w10,#17 bic w19,w26,w24 ror w3,w20,#2 add w27,w27,w11 // h+=X[i] eor w16,w16,w24,ror#11 eor w2,w2,w13,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w20,w21 // a^b, b^c in next round eor w16,w16,w24,ror#25 // Sigma1(e) eor w3,w3,w20,ror#13 add w27,w27,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w1,w1,w10,ror#19 eor w2,w2,w13,lsr#3 // sigma0(X[i+1]) add w27,w27,w16 // h+=Sigma1(e) eor w28,w28,w21 // Maj(a,b,c) eor w17,w3,w20,ror#22 // Sigma0(a) eor w1,w1,w10,lsr#10 // sigma1(X[i+14]) add w12,w12,w5 add w23,w23,w27 // d+=h add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w12,w12,w2 add w27,w27,w17 // h+=Sigma0(a) add w12,w12,w1 ldr w1,[sp,#8] str w4,[sp,#4] ror w16,w23,#6 add w26,w26,w28 // h+=K[i] ror w3,w14,#7 and w17,w24,w23 ror w2,w11,#17 bic w28,w25,w23 ror w4,w27,#2 add w26,w26,w12 // h+=X[i] eor w16,w16,w23,ror#11 eor w3,w3,w14,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w27,w20 // a^b, b^c in next round eor w16,w16,w23,ror#25 // Sigma1(e) eor w4,w4,w27,ror#13 add w26,w26,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w2,w2,w11,ror#19 eor w3,w3,w14,lsr#3 // sigma0(X[i+1]) add w26,w26,w16 // h+=Sigma1(e) eor w19,w19,w20 // Maj(a,b,c) eor w17,w4,w27,ror#22 // Sigma0(a) eor w2,w2,w11,lsr#10 // sigma1(X[i+14]) add w13,w13,w6 add w22,w22,w26 // d+=h add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w13,w13,w3 add w26,w26,w17 // h+=Sigma0(a) add w13,w13,w2 ldr w2,[sp,#12] str w5,[sp,#8] ror w16,w22,#6 add w25,w25,w19 // h+=K[i] ror w4,w15,#7 and w17,w23,w22 ror w3,w12,#17 bic w19,w24,w22 ror w5,w26,#2 add w25,w25,w13 // h+=X[i] eor w16,w16,w22,ror#11 eor w4,w4,w15,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w26,w27 // a^b, b^c in next round eor w16,w16,w22,ror#25 // Sigma1(e) eor w5,w5,w26,ror#13 add w25,w25,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w3,w3,w12,ror#19 eor w4,w4,w15,lsr#3 // sigma0(X[i+1]) add w25,w25,w16 // h+=Sigma1(e) eor w28,w28,w27 // Maj(a,b,c) eor w17,w5,w26,ror#22 // Sigma0(a) eor w3,w3,w12,lsr#10 // sigma1(X[i+14]) add w14,w14,w7 add w21,w21,w25 // d+=h add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w14,w14,w4 add w25,w25,w17 // h+=Sigma0(a) add w14,w14,w3 ldr w3,[sp,#0] str w6,[sp,#12] ror w16,w21,#6 add w24,w24,w28 // h+=K[i] ror w5,w0,#7 and w17,w22,w21 ror w4,w13,#17 bic w28,w23,w21 ror w6,w25,#2 add w24,w24,w14 // h+=X[i] eor w16,w16,w21,ror#11 eor w5,w5,w0,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w25,w26 // a^b, b^c in next round eor w16,w16,w21,ror#25 // Sigma1(e) eor w6,w6,w25,ror#13 add w24,w24,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w4,w4,w13,ror#19 eor w5,w5,w0,lsr#3 // sigma0(X[i+1]) add w24,w24,w16 // h+=Sigma1(e) eor w19,w19,w26 // Maj(a,b,c) eor w17,w6,w25,ror#22 // Sigma0(a) eor w4,w4,w13,lsr#10 // sigma1(X[i+14]) add w15,w15,w8 add w20,w20,w24 // d+=h add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w15,w15,w5 add w24,w24,w17 // h+=Sigma0(a) add w15,w15,w4 ldr w4,[sp,#4] str w7,[sp,#0] ror w16,w20,#6 add w23,w23,w19 // h+=K[i] ror w6,w1,#7 and w17,w21,w20 ror w5,w14,#17 bic w19,w22,w20 ror w7,w24,#2 add w23,w23,w15 // h+=X[i] eor w16,w16,w20,ror#11 eor w6,w6,w1,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w24,w25 // a^b, b^c in next round eor w16,w16,w20,ror#25 // Sigma1(e) eor w7,w7,w24,ror#13 add w23,w23,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w5,w5,w14,ror#19 eor w6,w6,w1,lsr#3 // sigma0(X[i+1]) add w23,w23,w16 // h+=Sigma1(e) eor w28,w28,w25 // Maj(a,b,c) eor w17,w7,w24,ror#22 // Sigma0(a) eor w5,w5,w14,lsr#10 // sigma1(X[i+14]) add w0,w0,w9 add w27,w27,w23 // d+=h add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w0,w0,w6 add w23,w23,w17 // h+=Sigma0(a) add w0,w0,w5 ldr w5,[sp,#8] str w8,[sp,#4] ror w16,w27,#6 add w22,w22,w28 // h+=K[i] ror w7,w2,#7 and w17,w20,w27 ror w6,w15,#17 bic w28,w21,w27 ror w8,w23,#2 add w22,w22,w0 // h+=X[i] eor w16,w16,w27,ror#11 eor w7,w7,w2,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w23,w24 // a^b, b^c in next round eor w16,w16,w27,ror#25 // Sigma1(e) eor w8,w8,w23,ror#13 add w22,w22,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w6,w6,w15,ror#19 eor w7,w7,w2,lsr#3 // sigma0(X[i+1]) add w22,w22,w16 // h+=Sigma1(e) eor w19,w19,w24 // Maj(a,b,c) eor w17,w8,w23,ror#22 // Sigma0(a) eor w6,w6,w15,lsr#10 // sigma1(X[i+14]) add w1,w1,w10 add w26,w26,w22 // d+=h add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w1,w1,w7 add w22,w22,w17 // h+=Sigma0(a) add w1,w1,w6 ldr w6,[sp,#12] str w9,[sp,#8] ror w16,w26,#6 add w21,w21,w19 // h+=K[i] ror w8,w3,#7 and w17,w27,w26 ror w7,w0,#17 bic w19,w20,w26 ror w9,w22,#2 add w21,w21,w1 // h+=X[i] eor w16,w16,w26,ror#11 eor w8,w8,w3,ror#18 orr w17,w17,w19 // Ch(e,f,g) eor w19,w22,w23 // a^b, b^c in next round eor w16,w16,w26,ror#25 // Sigma1(e) eor w9,w9,w22,ror#13 add w21,w21,w17 // h+=Ch(e,f,g) and w28,w28,w19 // (b^c)&=(a^b) eor w7,w7,w0,ror#19 eor w8,w8,w3,lsr#3 // sigma0(X[i+1]) add w21,w21,w16 // h+=Sigma1(e) eor w28,w28,w23 // Maj(a,b,c) eor w17,w9,w22,ror#22 // Sigma0(a) eor w7,w7,w0,lsr#10 // sigma1(X[i+14]) add w2,w2,w11 add w25,w25,w21 // d+=h add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round add w2,w2,w8 add w21,w21,w17 // h+=Sigma0(a) add w2,w2,w7 ldr w7,[sp,#0] str w10,[sp,#12] ror w16,w25,#6 add w20,w20,w28 // h+=K[i] ror w9,w4,#7 and w17,w26,w25 ror w8,w1,#17 bic w28,w27,w25 ror w10,w21,#2 add w20,w20,w2 // h+=X[i] eor w16,w16,w25,ror#11 eor w9,w9,w4,ror#18 orr w17,w17,w28 // Ch(e,f,g) eor w28,w21,w22 // a^b, b^c in next round eor w16,w16,w25,ror#25 // Sigma1(e) eor w10,w10,w21,ror#13 add w20,w20,w17 // h+=Ch(e,f,g) and w19,w19,w28 // (b^c)&=(a^b) eor w8,w8,w1,ror#19 eor w9,w9,w4,lsr#3 // sigma0(X[i+1]) add w20,w20,w16 // h+=Sigma1(e) eor w19,w19,w22 // Maj(a,b,c) eor w17,w10,w21,ror#22 // Sigma0(a) eor w8,w8,w1,lsr#10 // sigma1(X[i+14]) add w3,w3,w12 add w24,w24,w20 // d+=h add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round add w3,w3,w9 add w20,w20,w17 // h+=Sigma0(a) add w3,w3,w8 cbnz w19,.Loop_16_xx ldp x0,x2,[x29,#96] ldr x1,[x29,#112] sub x30,x30,#260 // rewind ldp w3,w4,[x0] ldp w5,w6,[x0,#2*4] add x1,x1,#14*4 // advance input pointer ldp w7,w8,[x0,#4*4] add w20,w20,w3 ldp w9,w10,[x0,#6*4] add w21,w21,w4 add w22,w22,w5 add w23,w23,w6 stp w20,w21,[x0] add w24,w24,w7 add w25,w25,w8 stp w22,w23,[x0,#2*4] add w26,w26,w9 add w27,w27,w10 cmp x1,x2 stp w24,w25,[x0,#4*4] stp w26,w27,[x0,#6*4] b.ne .Loop ldp x19,x20,[x29,#16] add sp,sp,#4*4 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .size sha256_block_data_order_nohw,.-sha256_block_data_order_nohw .section .rodata .align 6 .type .LK256,%object .LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0 //terminator .size .LK256,.-.LK256 .byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 .text #ifndef __KERNEL__ .globl sha256_block_data_order_hw .hidden sha256_block_data_order_hw .type sha256_block_data_order_hw,%function .align 6 sha256_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v0.4s,v1.4s},[x0] adrp x3,.LK256 add x3,x3,:lo12:.LK256 .Loop_hw: ld1 {v4.16b,v5.16b,v6.16b,v7.16b},[x1],#64 sub x2,x2,#1 ld1 {v16.4s},[x3],#16 rev32 v4.16b,v4.16b rev32 v5.16b,v5.16b rev32 v6.16b,v6.16b rev32 v7.16b,v7.16b orr v18.16b,v0.16b,v0.16b // offload orr v19.16b,v1.16b,v1.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .inst 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .inst 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v6.4s .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v7.4s .inst 0x5e282887 //sha256su0 v7.16b,v4.16b orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b ld1 {v17.4s},[x3],#16 add v16.4s,v16.4s,v4.4s orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s ld1 {v16.4s},[x3],#16 add v17.4s,v17.4s,v5.4s orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s ld1 {v17.4s},[x3] add v16.4s,v16.4s,v6.4s sub x3,x3,#64*4-16 // rewind orr v2.16b,v0.16b,v0.16b .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s add v17.4s,v17.4s,v7.4s orr v2.16b,v0.16b,v0.16b .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s add v0.4s,v0.4s,v18.4s add v1.4s,v1.4s,v19.4s cbnz x2,.Loop_hw st1 {v0.4s,v1.4s},[x0] ldr x29,[sp],#16 ret .size sha256_block_data_order_hw,.-sha256_block_data_order_hw #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
26,239
thirdparty/crates/ring-0.17.9/pregenerated/vpaes-armv8-linux64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include <ring-core/arm_arch.h> .section .rodata .type _vpaes_consts,%object .align 7 // totally strategic alignment _vpaes_consts: .Lk_mc_forward: // mc_forward .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605, 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 .Lk_mc_backward: // mc_backward .quad 0x0605040702010003, 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F, 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 .quad 0x0A09080B06050407, 0x020100030E0D0C0F .Lk_sr: // sr .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 .quad 0x0F060D040B020900, 0x070E050C030A0108 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 // // "Hot" constants // .Lk_inv: // inv, inva .quad 0x0E05060F0D080180, 0x040703090A0B0C02 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 .Lk_ipt: // input transform (lo, hi) .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 .Lk_sbo: // sbou, sbot .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA .Lk_sb1: // sb1u, sb1t .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 .Lk_sb2: // sb2u, sb2t .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD // // Key schedule constants // .Lk_dksd: // decryption key schedule: invskew x*D .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E .Lk_dksb: // decryption key schedule: invskew x*B .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 .Lk_dkse: // decryption key schedule: invskew x*E + 0x63 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 .Lk_dks9: // decryption key schedule: invskew x*9 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE .Lk_rcon: // rcon .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 .Lk_opt: // output transform .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 .Lk_deskew: // deskew tables: inverts the sbox's "skew" .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 .align 2 .size _vpaes_consts,.-_vpaes_consts .align 6 .text ## ## _aes_preheat ## ## Fills register %r10 -> .aes_consts (so you can -fPIC) ## and %xmm9-%xmm15 as specified below. ## .type _vpaes_encrypt_preheat,%function .align 4 _vpaes_encrypt_preheat: adrp x10, .Lk_inv add x10, x10, :lo12:.Lk_inv movi v17.16b, #0x0f ld1 {v18.2d,v19.2d}, [x10],#32 // .Lk_inv ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // .Lk_ipt, .Lk_sbo ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // .Lk_sb1, .Lk_sb2 ret .size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat ## ## _aes_encrypt_core ## ## AES-encrypt %xmm0. ## ## Inputs: ## %xmm0 = input ## %xmm9-%xmm15 as in _vpaes_preheat ## (%rdx) = scheduled keys ## ## Output in %xmm0 ## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax ## Preserves %xmm6 - %xmm8 so you get some local vectors ## ## .type _vpaes_encrypt_core,%function .align 4 _vpaes_encrypt_core: mov x9, x2 ldr w8, [x2,#240] // pull rounds adrp x11, .Lk_mc_forward+16 add x11, x11, :lo12:.Lk_mc_forward+16 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 b .Lenc_entry .align 4 .Lenc_loop: // middle of middle round add x10, x11, #0x40 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D sub w8, w8, #1 // nr-- .Lenc_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 cbnz w8, .Lenc_loop // middle of last round add x10, x11, #0x80 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 ret .size _vpaes_encrypt_core,.-_vpaes_encrypt_core .type _vpaes_encrypt_2x,%function .align 4 _vpaes_encrypt_2x: mov x9, x2 ldr w8, [x2,#240] // pull rounds adrp x11, .Lk_mc_forward+16 add x11, x11, :lo12:.Lk_mc_forward+16 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 and v9.16b, v15.16b, v17.16b ushr v8.16b, v15.16b, #4 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 tbl v9.16b, {v20.16b}, v9.16b // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 tbl v10.16b, {v21.16b}, v8.16b eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 eor v8.16b, v9.16b, v16.16b eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 eor v8.16b, v8.16b, v10.16b b .Lenc_2x_entry .align 4 .Lenc_2x_loop: // middle of middle round add x10, x11, #0x40 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u tbl v12.16b, {v25.16b}, v10.16b ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t tbl v8.16b, {v24.16b}, v11.16b eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u tbl v13.16b, {v27.16b}, v10.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A eor v8.16b, v8.16b, v12.16b tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t tbl v10.16b, {v26.16b}, v11.16b ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B tbl v11.16b, {v8.16b}, v1.16b eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A eor v10.16b, v10.16b, v13.16b tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D tbl v8.16b, {v8.16b}, v4.16b eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B eor v11.16b, v11.16b, v10.16b tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C tbl v12.16b, {v11.16b},v1.16b eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D eor v8.16b, v8.16b, v11.16b and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D eor v8.16b, v8.16b, v12.16b sub w8, w8, #1 // nr-- .Lenc_2x_entry: // top of round and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i and v9.16b, v8.16b, v17.16b ushr v8.16b, v8.16b, #4 tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k tbl v13.16b, {v19.16b},v9.16b eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j eor v9.16b, v9.16b, v8.16b tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i tbl v11.16b, {v18.16b},v8.16b tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j tbl v12.16b, {v18.16b},v9.16b eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k eor v11.16b, v11.16b, v13.16b eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k eor v12.16b, v12.16b, v13.16b tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak tbl v10.16b, {v18.16b},v11.16b tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak tbl v11.16b, {v18.16b},v12.16b eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io eor v10.16b, v10.16b, v9.16b eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo eor v11.16b, v11.16b, v8.16b ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 cbnz w8, .Lenc_2x_loop // middle of last round add x10, x11, #0x80 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou tbl v12.16b, {v22.16b}, v10.16b ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t tbl v8.16b, {v23.16b}, v11.16b eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k eor v12.16b, v12.16b, v16.16b eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A eor v8.16b, v8.16b, v12.16b tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0 tbl v1.16b, {v8.16b},v1.16b ret .size _vpaes_encrypt_2x,.-_vpaes_encrypt_2x ######################################################## ## ## ## AES key schedule ## ## ## ######################################################## .type _vpaes_key_preheat,%function .align 4 _vpaes_key_preheat: adrp x10, .Lk_inv add x10, x10, :lo12:.Lk_inv movi v16.16b, #0x5b // .Lk_s63 adrp x11, .Lk_sb1 add x11, x11, :lo12:.Lk_sb1 movi v17.16b, #0x0f // .Lk_s0F ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // .Lk_inv, .Lk_ipt adrp x10, .Lk_dksd add x10, x10, :lo12:.Lk_dksd ld1 {v22.2d,v23.2d}, [x11] // .Lk_sb1 adrp x11, .Lk_mc_forward add x11, x11, :lo12:.Lk_mc_forward ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // .Lk_dksd, .Lk_dksb ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // .Lk_dkse, .Lk_dks9 ld1 {v8.2d}, [x10] // .Lk_rcon ld1 {v9.2d}, [x11] // .Lk_mc_forward[0] ret .size _vpaes_key_preheat,.-_vpaes_key_preheat .type _vpaes_schedule_core,%function .align 4 _vpaes_schedule_core: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp,#-16]! add x29,sp,#0 bl _vpaes_key_preheat // load the tables ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned) // input transform mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3 bl _vpaes_schedule_transform mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7 adrp x10, .Lk_sr // lea .Lk_sr(%rip),%r10 add x10, x10, :lo12:.Lk_sr add x8, x8, x10 // encrypting, output zeroth round key after transform st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) cmp w1, #192 // cmp $192, %esi b.hi .Lschedule_256 b.eq .Lschedule_192 // 128: fall though ## ## .schedule_128 ## ## 128-bit specific part of key schedule. ## ## This schedule is really simple, because all its parts ## are accomplished by the subroutines. ## .Lschedule_128: mov x0, #10 // mov $10, %esi .Loop_schedule_128: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_round cbz x0, .Lschedule_mangle_last bl _vpaes_schedule_mangle // write output b .Loop_schedule_128 ## ## .aes_schedule_192 ## ## 192-bit specific part of key schedule. ## ## The main body of this schedule is the same as the 128-bit ## schedule, but with more smearing. The long, high side is ## stored in %xmm7 as before, and the short, low side is in ## the high bits of %xmm6. ## ## This schedule is somewhat nastier, however, because each ## round produces 192 bits of key material, or 1.5 round keys. ## Therefore, on each cycle we do 2 rounds and produce 3 round ## keys. ## .align 4 .Lschedule_192: sub x0, x0, #8 ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) bl _vpaes_schedule_transform // input transform mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros mov x0, #4 // mov $4, %esi .Loop_schedule_192: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_round ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0 bl _vpaes_schedule_mangle // save key n bl _vpaes_schedule_192_smear bl _vpaes_schedule_mangle // save key n+1 bl _vpaes_schedule_round cbz x0, .Lschedule_mangle_last bl _vpaes_schedule_mangle // save key n+2 bl _vpaes_schedule_192_smear b .Loop_schedule_192 ## ## .aes_schedule_256 ## ## 256-bit specific part of key schedule. ## ## The structure here is very similar to the 128-bit ## schedule, but with an additional "low side" in ## %xmm6. The low side's rounds are the same as the ## high side's, except no rcon and no rotation. ## .align 4 .Lschedule_256: ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) bl _vpaes_schedule_transform // input transform mov x0, #7 // mov $7, %esi .Loop_schedule_256: sub x0, x0, #1 // dec %esi bl _vpaes_schedule_mangle // output low result mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 // high round bl _vpaes_schedule_round cbz x0, .Lschedule_mangle_last bl _vpaes_schedule_mangle // low round. swap xmm7 and xmm6 dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 movi v4.16b, #0 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7 bl _vpaes_schedule_low_round mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7 b .Loop_schedule_256 ## ## .aes_schedule_mangle_last ## ## Mangler for last round of key schedule ## Mangles %xmm0 ## when encrypting, outputs out(%xmm0) ^ 63 ## when decrypting, outputs unskew(%xmm0) ## ## Always called right before return... jumps to cleanup and exits ## .align 4 .Lschedule_mangle_last: // schedule last round key from xmm0 adrp x11, .Lk_deskew // lea .Lk_deskew(%rip),%r11 # prepare to deskew add x11, x11, :lo12:.Lk_deskew cbnz w3, .Lschedule_mangle_last_dec // encrypting ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1 adrp x11, .Lk_opt // lea .Lk_opt(%rip), %r11 # prepare to output transform add x11, x11, :lo12:.Lk_opt add x2, x2, #32 // add $32, %rdx tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute .Lschedule_mangle_last_dec: ld1 {v20.2d,v21.2d}, [x11] // reload constants sub x2, x2, #16 // add $-16, %rdx eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0 bl _vpaes_schedule_transform // output transform st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key // cleanup eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 ldp x29, x30, [sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size _vpaes_schedule_core,.-_vpaes_schedule_core ## ## .aes_schedule_192_smear ## ## Smear the short, low side in the 192-bit key schedule. ## ## Inputs: ## %xmm7: high side, b a x y ## %xmm6: low side, d c 0 0 ## %xmm13: 0 ## ## Outputs: ## %xmm6: b+c+d b+c 0 0 ## %xmm0: b+c+d b+c b a ## .type _vpaes_schedule_192_smear,%function .align 4 _vpaes_schedule_192_smear: movi v1.16b, #0 dup v0.4s, v7.s[3] ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros ret .size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear ## ## .aes_schedule_round ## ## Runs one main round of the key schedule on %xmm0, %xmm7 ## ## Specifically, runs subbytes on the high dword of %xmm0 ## then rotates it by one byte and xors into the low dword of ## %xmm7. ## ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for ## next rcon. ## ## Smears the dwords of %xmm7 by xoring the low into the ## second low, result into third, result into highest. ## ## Returns results in %xmm7 = %xmm0. ## Clobbers %xmm1-%xmm4, %r11. ## .type _vpaes_schedule_round,%function .align 4 _vpaes_schedule_round: // extract rcon from xmm8 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4 ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1 ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 // rotate dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0 // fall through... // low round: same as high round, but no rotation and no rcon. _vpaes_schedule_low_round: // smear xmm7 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4 // subbytes and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7 tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j eor v7.16b, v7.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm7, %xmm7 tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output // add in smeared stuff eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7 ret .size _vpaes_schedule_round,.-_vpaes_schedule_round ## ## .aes_schedule_transform ## ## Linear-transform %xmm0 according to tables at (%r11) ## ## Requires that %xmm9 = 0x0F0F... as in preheat ## Output in %xmm0 ## Clobbers %xmm1, %xmm2 ## .type _vpaes_schedule_transform,%function .align 4 _vpaes_schedule_transform: and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 // vmovdqa (%r11), %xmm2 # lo tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 // vmovdqa 16(%r11), %xmm1 # hi tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 ret .size _vpaes_schedule_transform,.-_vpaes_schedule_transform ## ## .aes_schedule_mangle ## ## Mangle xmm0 from (basis-transformed) standard version ## to our version. ## ## On encrypt, ## xor with 0x63 ## multiply by circulant 0,1,1,1 ## apply shiftrows transform ## ## On decrypt, ## xor with 0x63 ## multiply by "inverse mixcolumns" circulant E,B,D,9 ## deskew ## apply shiftrows transform ## ## ## Writes out to (%rdx), and increments or decrements it ## Keeps track of round number mod 4 in %r8 ## Preserves xmm0 ## Clobbers xmm1-xmm5 ## .type _vpaes_schedule_mangle,%function .align 4 _vpaes_schedule_mangle: mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later // vmovdqa .Lk_mc_forward(%rip),%xmm5 // encrypting eor v4.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm4 add x2, x2, #16 // add $16, %rdx tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3 .Lschedule_mangle_both: tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 add x8, x8, #48 // add $-16, %r8 and x8, x8, #~(1<<6) // and $0x30, %r8 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) ret .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle .globl vpaes_set_encrypt_key .hidden vpaes_set_encrypt_key .type vpaes_set_encrypt_key,%function .align 4 vpaes_set_encrypt_key: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so lsr w9, w1, #5 // shr $5,%eax add w9, w9, #5 // $5,%eax str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; mov w3, #0 // mov $0,%ecx mov x8, #0x30 // mov $0x30,%r8d bl _vpaes_schedule_core eor x0, x0, x0 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key .globl vpaes_ctr32_encrypt_blocks .hidden vpaes_ctr32_encrypt_blocks .type vpaes_ctr32_encrypt_blocks,%function .align 4 vpaes_ctr32_encrypt_blocks: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so stp d10,d11,[sp,#-16]! stp d12,d13,[sp,#-16]! stp d14,d15,[sp,#-16]! cbz x2, .Lctr32_done // Note, unlike the other functions, x2 here is measured in blocks, // not bytes. mov x17, x2 mov x2, x3 // Load the IV and counter portion. ldr w6, [x4, #12] ld1 {v7.16b}, [x4] bl _vpaes_encrypt_preheat tst x17, #1 rev w6, w6 // The counter is big-endian. b.eq .Lctr32_prep_loop // Handle one block so the remaining block count is even for // _vpaes_encrypt_2x. ld1 {v6.16b}, [x0], #16 // .Load input ahead of time bl _vpaes_encrypt_core eor v0.16b, v0.16b, v6.16b // XOR input and result st1 {v0.16b}, [x1], #16 subs x17, x17, #1 // Update the counter. add w6, w6, #1 rev w7, w6 mov v7.s[3], w7 b.ls .Lctr32_done .Lctr32_prep_loop: // _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x // uses v14 and v15. mov v15.16b, v7.16b mov v14.16b, v7.16b add w6, w6, #1 rev w7, w6 mov v15.s[3], w7 .Lctr32_loop: ld1 {v6.16b,v7.16b}, [x0], #32 // .Load input ahead of time bl _vpaes_encrypt_2x eor v0.16b, v0.16b, v6.16b // XOR input and result eor v1.16b, v1.16b, v7.16b // XOR input and result (#2) st1 {v0.16b,v1.16b}, [x1], #32 subs x17, x17, #2 // Update the counter. add w7, w6, #1 add w6, w6, #2 rev w7, w7 mov v14.s[3], w7 rev w7, w6 mov v15.s[3], w7 b.hi .Lctr32_loop .Lctr32_done: ldp d14,d15,[sp],#16 ldp d12,d13,[sp],#16 ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
28,813
thirdparty/crates/ring-0.17.9/pregenerated/chacha-armv4-linux32.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) #include <ring-core/arm_arch.h> @ Silence ARMv8 deprecated IT instruction warnings. This file is used by both @ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. .arch armv7-a .text #if defined(__thumb2__) || defined(__clang__) .syntax unified #endif #if defined(__thumb2__) .thumb #else .code 32 #endif #if defined(__thumb2__) || defined(__clang__) #define ldrhsb ldrbhs #endif .align 5 .Lsigma: .long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral .Lone: .long 1,0,0,0 .globl ChaCha20_ctr32_nohw .hidden ChaCha20_ctr32_nohw .type ChaCha20_ctr32_nohw,%function .align 5 ChaCha20_ctr32_nohw: ldr r12,[sp,#0] @ pull pointer to counter and nonce stmdb sp!,{r0,r1,r2,r4-r11,lr} adr r14,.Lsigma ldmia r12,{r4,r5,r6,r7} @ load counter and nonce sub sp,sp,#4*(16) @ off-load area stmdb sp!,{r4,r5,r6,r7} @ copy counter and nonce ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key ldmia r14,{r0,r1,r2,r3} @ load sigma stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy key stmdb sp!,{r0,r1,r2,r3} @ copy sigma str r10,[sp,#4*(16+10)] @ off-load "rx" str r11,[sp,#4*(16+11)] @ off-load "rx" b .Loop_outer_enter .align 4 .Loop_outer: ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material str r11,[sp,#4*(32+2)] @ save len str r12, [sp,#4*(32+1)] @ save inp str r14, [sp,#4*(32+0)] @ save out .Loop_outer_enter: ldr r11, [sp,#4*(15)] ldr r12,[sp,#4*(12)] @ modulo-scheduled load ldr r10, [sp,#4*(13)] ldr r14,[sp,#4*(14)] str r11, [sp,#4*(16+15)] mov r11,#10 b .Loop .align 4 .Loop: subs r11,r11,#1 add r0,r0,r4 mov r12,r12,ror#16 add r1,r1,r5 mov r10,r10,ror#16 eor r12,r12,r0,ror#16 eor r10,r10,r1,ror#16 add r8,r8,r12 mov r4,r4,ror#20 add r9,r9,r10 mov r5,r5,ror#20 eor r4,r4,r8,ror#20 eor r5,r5,r9,ror#20 add r0,r0,r4 mov r12,r12,ror#24 add r1,r1,r5 mov r10,r10,ror#24 eor r12,r12,r0,ror#24 eor r10,r10,r1,ror#24 add r8,r8,r12 mov r4,r4,ror#25 add r9,r9,r10 mov r5,r5,ror#25 str r10,[sp,#4*(16+13)] ldr r10,[sp,#4*(16+15)] eor r4,r4,r8,ror#25 eor r5,r5,r9,ror#25 str r8,[sp,#4*(16+8)] ldr r8,[sp,#4*(16+10)] add r2,r2,r6 mov r14,r14,ror#16 str r9,[sp,#4*(16+9)] ldr r9,[sp,#4*(16+11)] add r3,r3,r7 mov r10,r10,ror#16 eor r14,r14,r2,ror#16 eor r10,r10,r3,ror#16 add r8,r8,r14 mov r6,r6,ror#20 add r9,r9,r10 mov r7,r7,ror#20 eor r6,r6,r8,ror#20 eor r7,r7,r9,ror#20 add r2,r2,r6 mov r14,r14,ror#24 add r3,r3,r7 mov r10,r10,ror#24 eor r14,r14,r2,ror#24 eor r10,r10,r3,ror#24 add r8,r8,r14 mov r6,r6,ror#25 add r9,r9,r10 mov r7,r7,ror#25 eor r6,r6,r8,ror#25 eor r7,r7,r9,ror#25 add r0,r0,r5 mov r10,r10,ror#16 add r1,r1,r6 mov r12,r12,ror#16 eor r10,r10,r0,ror#16 eor r12,r12,r1,ror#16 add r8,r8,r10 mov r5,r5,ror#20 add r9,r9,r12 mov r6,r6,ror#20 eor r5,r5,r8,ror#20 eor r6,r6,r9,ror#20 add r0,r0,r5 mov r10,r10,ror#24 add r1,r1,r6 mov r12,r12,ror#24 eor r10,r10,r0,ror#24 eor r12,r12,r1,ror#24 add r8,r8,r10 mov r5,r5,ror#25 str r10,[sp,#4*(16+15)] ldr r10,[sp,#4*(16+13)] add r9,r9,r12 mov r6,r6,ror#25 eor r5,r5,r8,ror#25 eor r6,r6,r9,ror#25 str r8,[sp,#4*(16+10)] ldr r8,[sp,#4*(16+8)] add r2,r2,r7 mov r10,r10,ror#16 str r9,[sp,#4*(16+11)] ldr r9,[sp,#4*(16+9)] add r3,r3,r4 mov r14,r14,ror#16 eor r10,r10,r2,ror#16 eor r14,r14,r3,ror#16 add r8,r8,r10 mov r7,r7,ror#20 add r9,r9,r14 mov r4,r4,ror#20 eor r7,r7,r8,ror#20 eor r4,r4,r9,ror#20 add r2,r2,r7 mov r10,r10,ror#24 add r3,r3,r4 mov r14,r14,ror#24 eor r10,r10,r2,ror#24 eor r14,r14,r3,ror#24 add r8,r8,r10 mov r7,r7,ror#25 add r9,r9,r14 mov r4,r4,ror#25 eor r7,r7,r8,ror#25 eor r4,r4,r9,ror#25 bne .Loop ldr r11,[sp,#4*(32+2)] @ load len str r8, [sp,#4*(16+8)] @ modulo-scheduled store str r9, [sp,#4*(16+9)] str r12,[sp,#4*(16+12)] str r10, [sp,#4*(16+13)] str r14,[sp,#4*(16+14)] @ at this point we have first half of 512-bit result in @ rx and second half at sp+4*(16+8) cmp r11,#64 @ done yet? #ifdef __thumb2__ itete lo #endif addlo r12,sp,#4*(0) @ shortcut or ... ldrhs r12,[sp,#4*(32+1)] @ ... load inp addlo r14,sp,#4*(0) @ shortcut or ... ldrhs r14,[sp,#4*(32+0)] @ ... load out ldr r8,[sp,#4*(0)] @ load key material ldr r9,[sp,#4*(1)] #if __ARM_ARCH>=6 || !defined(__ARMEB__) # if __ARM_ARCH<7 orr r10,r12,r14 tst r10,#3 @ are input and output aligned? ldr r10,[sp,#4*(2)] bne .Lunaligned cmp r11,#64 @ restore flags # else ldr r10,[sp,#4*(2)] # endif ldr r11,[sp,#4*(3)] add r0,r0,r8 @ accumulate key material add r1,r1,r9 # ifdef __thumb2__ itt hs # endif ldrhs r8,[r12],#16 @ load input ldrhs r9,[r12,#-12] add r2,r2,r10 add r3,r3,r11 # ifdef __thumb2__ itt hs # endif ldrhs r10,[r12,#-8] ldrhs r11,[r12,#-4] # if __ARM_ARCH>=6 && defined(__ARMEB__) rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 # endif # ifdef __thumb2__ itt hs # endif eorhs r0,r0,r8 @ xor with input eorhs r1,r1,r9 add r8,sp,#4*(4) str r0,[r14],#16 @ store output # ifdef __thumb2__ itt hs # endif eorhs r2,r2,r10 eorhs r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material str r1,[r14,#-12] str r2,[r14,#-8] str r3,[r14,#-4] add r4,r4,r8 @ accumulate key material add r5,r5,r9 # ifdef __thumb2__ itt hs # endif ldrhs r8,[r12],#16 @ load input ldrhs r9,[r12,#-12] add r6,r6,r10 add r7,r7,r11 # ifdef __thumb2__ itt hs # endif ldrhs r10,[r12,#-8] ldrhs r11,[r12,#-4] # if __ARM_ARCH>=6 && defined(__ARMEB__) rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif # ifdef __thumb2__ itt hs # endif eorhs r4,r4,r8 eorhs r5,r5,r9 add r8,sp,#4*(8) str r4,[r14],#16 @ store output # ifdef __thumb2__ itt hs # endif eorhs r6,r6,r10 eorhs r7,r7,r11 str r5,[r14,#-12] ldmia r8,{r8,r9,r10,r11} @ load key material str r6,[r14,#-8] add r0,sp,#4*(16+8) str r7,[r14,#-4] ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half add r0,r0,r8 @ accumulate key material add r1,r1,r9 # ifdef __thumb2__ itt hs # endif ldrhs r8,[r12],#16 @ load input ldrhs r9,[r12,#-12] # ifdef __thumb2__ itt hi # endif strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it add r2,r2,r10 add r3,r3,r11 # ifdef __thumb2__ itt hs # endif ldrhs r10,[r12,#-8] ldrhs r11,[r12,#-4] # if __ARM_ARCH>=6 && defined(__ARMEB__) rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 # endif # ifdef __thumb2__ itt hs # endif eorhs r0,r0,r8 eorhs r1,r1,r9 add r8,sp,#4*(12) str r0,[r14],#16 @ store output # ifdef __thumb2__ itt hs # endif eorhs r2,r2,r10 eorhs r3,r3,r11 str r1,[r14,#-12] ldmia r8,{r8,r9,r10,r11} @ load key material str r2,[r14,#-8] str r3,[r14,#-4] add r4,r4,r8 @ accumulate key material add r5,r5,r9 # ifdef __thumb2__ itt hi # endif addhi r8,r8,#1 @ next counter value strhi r8,[sp,#4*(12)] @ save next counter value # ifdef __thumb2__ itt hs # endif ldrhs r8,[r12],#16 @ load input ldrhs r9,[r12,#-12] add r6,r6,r10 add r7,r7,r11 # ifdef __thumb2__ itt hs # endif ldrhs r10,[r12,#-8] ldrhs r11,[r12,#-4] # if __ARM_ARCH>=6 && defined(__ARMEB__) rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif # ifdef __thumb2__ itt hs # endif eorhs r4,r4,r8 eorhs r5,r5,r9 # ifdef __thumb2__ it ne # endif ldrne r8,[sp,#4*(32+2)] @ re-load len # ifdef __thumb2__ itt hs # endif eorhs r6,r6,r10 eorhs r7,r7,r11 str r4,[r14],#16 @ store output str r5,[r14,#-12] # ifdef __thumb2__ it hs # endif subhs r11,r8,#64 @ len-=64 str r6,[r14,#-8] str r7,[r14,#-4] bhi .Loop_outer beq .Ldone # if __ARM_ARCH<7 b .Ltail .align 4 .Lunaligned:@ unaligned endian-neutral path cmp r11,#64 @ restore flags # endif #endif #if __ARM_ARCH<7 ldr r11,[sp,#4*(3)] add r0,r0,r8 @ accumulate key material add r1,r1,r9 add r2,r2,r10 # ifdef __thumb2__ itete lo # endif eorlo r8,r8,r8 @ zero or ... ldrhsb r8,[r12],#16 @ ... load input eorlo r9,r9,r9 ldrhsb r9,[r12,#-12] add r3,r3,r11 # ifdef __thumb2__ itete lo # endif eorlo r10,r10,r10 ldrhsb r10,[r12,#-8] eorlo r11,r11,r11 ldrhsb r11,[r12,#-4] eor r0,r8,r0 @ xor with input (or zero) eor r1,r9,r1 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-15] @ load more input ldrhsb r9,[r12,#-11] eor r2,r10,r2 strb r0,[r14],#16 @ store output eor r3,r11,r3 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-7] ldrhsb r11,[r12,#-3] strb r1,[r14,#-12] eor r0,r8,r0,lsr#8 strb r2,[r14,#-8] eor r1,r9,r1,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-14] @ load more input ldrhsb r9,[r12,#-10] strb r3,[r14,#-4] eor r2,r10,r2,lsr#8 strb r0,[r14,#-15] eor r3,r11,r3,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-6] ldrhsb r11,[r12,#-2] strb r1,[r14,#-11] eor r0,r8,r0,lsr#8 strb r2,[r14,#-7] eor r1,r9,r1,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-13] @ load more input ldrhsb r9,[r12,#-9] strb r3,[r14,#-3] eor r2,r10,r2,lsr#8 strb r0,[r14,#-14] eor r3,r11,r3,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-5] ldrhsb r11,[r12,#-1] strb r1,[r14,#-10] strb r2,[r14,#-6] eor r0,r8,r0,lsr#8 strb r3,[r14,#-2] eor r1,r9,r1,lsr#8 strb r0,[r14,#-13] eor r2,r10,r2,lsr#8 strb r1,[r14,#-9] eor r3,r11,r3,lsr#8 strb r2,[r14,#-5] strb r3,[r14,#-1] add r8,sp,#4*(4+0) ldmia r8,{r8,r9,r10,r11} @ load key material add r0,sp,#4*(16+8) add r4,r4,r8 @ accumulate key material add r5,r5,r9 add r6,r6,r10 # ifdef __thumb2__ itete lo # endif eorlo r8,r8,r8 @ zero or ... ldrhsb r8,[r12],#16 @ ... load input eorlo r9,r9,r9 ldrhsb r9,[r12,#-12] add r7,r7,r11 # ifdef __thumb2__ itete lo # endif eorlo r10,r10,r10 ldrhsb r10,[r12,#-8] eorlo r11,r11,r11 ldrhsb r11,[r12,#-4] eor r4,r8,r4 @ xor with input (or zero) eor r5,r9,r5 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-15] @ load more input ldrhsb r9,[r12,#-11] eor r6,r10,r6 strb r4,[r14],#16 @ store output eor r7,r11,r7 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-7] ldrhsb r11,[r12,#-3] strb r5,[r14,#-12] eor r4,r8,r4,lsr#8 strb r6,[r14,#-8] eor r5,r9,r5,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-14] @ load more input ldrhsb r9,[r12,#-10] strb r7,[r14,#-4] eor r6,r10,r6,lsr#8 strb r4,[r14,#-15] eor r7,r11,r7,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-6] ldrhsb r11,[r12,#-2] strb r5,[r14,#-11] eor r4,r8,r4,lsr#8 strb r6,[r14,#-7] eor r5,r9,r5,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-13] @ load more input ldrhsb r9,[r12,#-9] strb r7,[r14,#-3] eor r6,r10,r6,lsr#8 strb r4,[r14,#-14] eor r7,r11,r7,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-5] ldrhsb r11,[r12,#-1] strb r5,[r14,#-10] strb r6,[r14,#-6] eor r4,r8,r4,lsr#8 strb r7,[r14,#-2] eor r5,r9,r5,lsr#8 strb r4,[r14,#-13] eor r6,r10,r6,lsr#8 strb r5,[r14,#-9] eor r7,r11,r7,lsr#8 strb r6,[r14,#-5] strb r7,[r14,#-1] add r8,sp,#4*(4+4) ldmia r8,{r8,r9,r10,r11} @ load key material ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half # ifdef __thumb2__ itt hi # endif strhi r10,[sp,#4*(16+10)] @ copy "rx" strhi r11,[sp,#4*(16+11)] @ copy "rx" add r0,r0,r8 @ accumulate key material add r1,r1,r9 add r2,r2,r10 # ifdef __thumb2__ itete lo # endif eorlo r8,r8,r8 @ zero or ... ldrhsb r8,[r12],#16 @ ... load input eorlo r9,r9,r9 ldrhsb r9,[r12,#-12] add r3,r3,r11 # ifdef __thumb2__ itete lo # endif eorlo r10,r10,r10 ldrhsb r10,[r12,#-8] eorlo r11,r11,r11 ldrhsb r11,[r12,#-4] eor r0,r8,r0 @ xor with input (or zero) eor r1,r9,r1 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-15] @ load more input ldrhsb r9,[r12,#-11] eor r2,r10,r2 strb r0,[r14],#16 @ store output eor r3,r11,r3 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-7] ldrhsb r11,[r12,#-3] strb r1,[r14,#-12] eor r0,r8,r0,lsr#8 strb r2,[r14,#-8] eor r1,r9,r1,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-14] @ load more input ldrhsb r9,[r12,#-10] strb r3,[r14,#-4] eor r2,r10,r2,lsr#8 strb r0,[r14,#-15] eor r3,r11,r3,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-6] ldrhsb r11,[r12,#-2] strb r1,[r14,#-11] eor r0,r8,r0,lsr#8 strb r2,[r14,#-7] eor r1,r9,r1,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-13] @ load more input ldrhsb r9,[r12,#-9] strb r3,[r14,#-3] eor r2,r10,r2,lsr#8 strb r0,[r14,#-14] eor r3,r11,r3,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-5] ldrhsb r11,[r12,#-1] strb r1,[r14,#-10] strb r2,[r14,#-6] eor r0,r8,r0,lsr#8 strb r3,[r14,#-2] eor r1,r9,r1,lsr#8 strb r0,[r14,#-13] eor r2,r10,r2,lsr#8 strb r1,[r14,#-9] eor r3,r11,r3,lsr#8 strb r2,[r14,#-5] strb r3,[r14,#-1] add r8,sp,#4*(4+8) ldmia r8,{r8,r9,r10,r11} @ load key material add r4,r4,r8 @ accumulate key material # ifdef __thumb2__ itt hi # endif addhi r8,r8,#1 @ next counter value strhi r8,[sp,#4*(12)] @ save next counter value add r5,r5,r9 add r6,r6,r10 # ifdef __thumb2__ itete lo # endif eorlo r8,r8,r8 @ zero or ... ldrhsb r8,[r12],#16 @ ... load input eorlo r9,r9,r9 ldrhsb r9,[r12,#-12] add r7,r7,r11 # ifdef __thumb2__ itete lo # endif eorlo r10,r10,r10 ldrhsb r10,[r12,#-8] eorlo r11,r11,r11 ldrhsb r11,[r12,#-4] eor r4,r8,r4 @ xor with input (or zero) eor r5,r9,r5 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-15] @ load more input ldrhsb r9,[r12,#-11] eor r6,r10,r6 strb r4,[r14],#16 @ store output eor r7,r11,r7 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-7] ldrhsb r11,[r12,#-3] strb r5,[r14,#-12] eor r4,r8,r4,lsr#8 strb r6,[r14,#-8] eor r5,r9,r5,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-14] @ load more input ldrhsb r9,[r12,#-10] strb r7,[r14,#-4] eor r6,r10,r6,lsr#8 strb r4,[r14,#-15] eor r7,r11,r7,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-6] ldrhsb r11,[r12,#-2] strb r5,[r14,#-11] eor r4,r8,r4,lsr#8 strb r6,[r14,#-7] eor r5,r9,r5,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r8,[r12,#-13] @ load more input ldrhsb r9,[r12,#-9] strb r7,[r14,#-3] eor r6,r10,r6,lsr#8 strb r4,[r14,#-14] eor r7,r11,r7,lsr#8 # ifdef __thumb2__ itt hs # endif ldrhsb r10,[r12,#-5] ldrhsb r11,[r12,#-1] strb r5,[r14,#-10] strb r6,[r14,#-6] eor r4,r8,r4,lsr#8 strb r7,[r14,#-2] eor r5,r9,r5,lsr#8 strb r4,[r14,#-13] eor r6,r10,r6,lsr#8 strb r5,[r14,#-9] eor r7,r11,r7,lsr#8 strb r6,[r14,#-5] strb r7,[r14,#-1] # ifdef __thumb2__ it ne # endif ldrne r8,[sp,#4*(32+2)] @ re-load len # ifdef __thumb2__ it hs # endif subhs r11,r8,#64 @ len-=64 bhi .Loop_outer beq .Ldone #endif .Ltail: ldr r12,[sp,#4*(32+1)] @ load inp add r9,sp,#4*(0) ldr r14,[sp,#4*(32+0)] @ load out .Loop_tail: ldrb r10,[r9],#1 @ read buffer on stack ldrb r11,[r12],#1 @ read input subs r8,r8,#1 eor r11,r11,r10 strb r11,[r14],#1 @ store output bne .Loop_tail .Ldone: add sp,sp,#4*(32+3) ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} .size ChaCha20_ctr32_nohw,.-ChaCha20_ctr32_nohw #if __ARM_MAX_ARCH__>=7 .arch armv7-a .fpu neon .globl ChaCha20_ctr32_neon .hidden ChaCha20_ctr32_neon .type ChaCha20_ctr32_neon,%function .align 5 ChaCha20_ctr32_neon: ldr r12,[sp,#0] @ pull pointer to counter and nonce stmdb sp!,{r0,r1,r2,r4-r11,lr} adr r14,.Lsigma vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI spec says so stmdb sp!,{r0,r1,r2,r3} vld1.32 {q1,q2},[r3] @ load key ldmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} @ load key sub sp,sp,#4*(16+16) vld1.32 {q3},[r12] @ load counter and nonce add r12,sp,#4*8 ldmia r14,{r0,r1,r2,r3} @ load sigma vld1.32 {q0},[r14]! @ load sigma vld1.32 {q12},[r14] @ one vst1.32 {q2,q3},[r12] @ copy 1/2key|counter|nonce vst1.32 {q0,q1},[sp] @ copy sigma|1/2key str r10,[sp,#4*(16+10)] @ off-load "rx" str r11,[sp,#4*(16+11)] @ off-load "rx" vshl.i32 d26,d24,#1 @ two vstr d24,[sp,#4*(16+0)] vshl.i32 d28,d24,#2 @ four vstr d26,[sp,#4*(16+2)] vmov q4,q0 vstr d28,[sp,#4*(16+4)] vmov q8,q0 vmov q5,q1 vmov q9,q1 b .Loop_neon_enter .align 4 .Loop_neon_outer: ldmia sp,{r0,r1,r2,r3,r4,r5,r6,r7,r8,r9} @ load key material cmp r11,#64*2 @ if len<=64*2 bls .Lbreak_neon @ switch to integer-only vmov q4,q0 str r11,[sp,#4*(32+2)] @ save len vmov q8,q0 str r12, [sp,#4*(32+1)] @ save inp vmov q5,q1 str r14, [sp,#4*(32+0)] @ save out vmov q9,q1 .Loop_neon_enter: ldr r11, [sp,#4*(15)] vadd.i32 q7,q3,q12 @ counter+1 ldr r12,[sp,#4*(12)] @ modulo-scheduled load vmov q6,q2 ldr r10, [sp,#4*(13)] vmov q10,q2 ldr r14,[sp,#4*(14)] vadd.i32 q11,q7,q12 @ counter+2 str r11, [sp,#4*(16+15)] mov r11,#10 add r12,r12,#3 @ counter+3 b .Loop_neon .align 4 .Loop_neon: subs r11,r11,#1 vadd.i32 q0,q0,q1 add r0,r0,r4 vadd.i32 q4,q4,q5 mov r12,r12,ror#16 vadd.i32 q8,q8,q9 add r1,r1,r5 veor q3,q3,q0 mov r10,r10,ror#16 veor q7,q7,q4 eor r12,r12,r0,ror#16 veor q11,q11,q8 eor r10,r10,r1,ror#16 vrev32.16 q3,q3 add r8,r8,r12 vrev32.16 q7,q7 mov r4,r4,ror#20 vrev32.16 q11,q11 add r9,r9,r10 vadd.i32 q2,q2,q3 mov r5,r5,ror#20 vadd.i32 q6,q6,q7 eor r4,r4,r8,ror#20 vadd.i32 q10,q10,q11 eor r5,r5,r9,ror#20 veor q12,q1,q2 add r0,r0,r4 veor q13,q5,q6 mov r12,r12,ror#24 veor q14,q9,q10 add r1,r1,r5 vshr.u32 q1,q12,#20 mov r10,r10,ror#24 vshr.u32 q5,q13,#20 eor r12,r12,r0,ror#24 vshr.u32 q9,q14,#20 eor r10,r10,r1,ror#24 vsli.32 q1,q12,#12 add r8,r8,r12 vsli.32 q5,q13,#12 mov r4,r4,ror#25 vsli.32 q9,q14,#12 add r9,r9,r10 vadd.i32 q0,q0,q1 mov r5,r5,ror#25 vadd.i32 q4,q4,q5 str r10,[sp,#4*(16+13)] vadd.i32 q8,q8,q9 ldr r10,[sp,#4*(16+15)] veor q12,q3,q0 eor r4,r4,r8,ror#25 veor q13,q7,q4 eor r5,r5,r9,ror#25 veor q14,q11,q8 str r8,[sp,#4*(16+8)] vshr.u32 q3,q12,#24 ldr r8,[sp,#4*(16+10)] vshr.u32 q7,q13,#24 add r2,r2,r6 vshr.u32 q11,q14,#24 mov r14,r14,ror#16 vsli.32 q3,q12,#8 str r9,[sp,#4*(16+9)] vsli.32 q7,q13,#8 ldr r9,[sp,#4*(16+11)] vsli.32 q11,q14,#8 add r3,r3,r7 vadd.i32 q2,q2,q3 mov r10,r10,ror#16 vadd.i32 q6,q6,q7 eor r14,r14,r2,ror#16 vadd.i32 q10,q10,q11 eor r10,r10,r3,ror#16 veor q12,q1,q2 add r8,r8,r14 veor q13,q5,q6 mov r6,r6,ror#20 veor q14,q9,q10 add r9,r9,r10 vshr.u32 q1,q12,#25 mov r7,r7,ror#20 vshr.u32 q5,q13,#25 eor r6,r6,r8,ror#20 vshr.u32 q9,q14,#25 eor r7,r7,r9,ror#20 vsli.32 q1,q12,#7 add r2,r2,r6 vsli.32 q5,q13,#7 mov r14,r14,ror#24 vsli.32 q9,q14,#7 add r3,r3,r7 vext.8 q2,q2,q2,#8 mov r10,r10,ror#24 vext.8 q6,q6,q6,#8 eor r14,r14,r2,ror#24 vext.8 q10,q10,q10,#8 eor r10,r10,r3,ror#24 vext.8 q1,q1,q1,#4 add r8,r8,r14 vext.8 q5,q5,q5,#4 mov r6,r6,ror#25 vext.8 q9,q9,q9,#4 add r9,r9,r10 vext.8 q3,q3,q3,#12 mov r7,r7,ror#25 vext.8 q7,q7,q7,#12 eor r6,r6,r8,ror#25 vext.8 q11,q11,q11,#12 eor r7,r7,r9,ror#25 vadd.i32 q0,q0,q1 add r0,r0,r5 vadd.i32 q4,q4,q5 mov r10,r10,ror#16 vadd.i32 q8,q8,q9 add r1,r1,r6 veor q3,q3,q0 mov r12,r12,ror#16 veor q7,q7,q4 eor r10,r10,r0,ror#16 veor q11,q11,q8 eor r12,r12,r1,ror#16 vrev32.16 q3,q3 add r8,r8,r10 vrev32.16 q7,q7 mov r5,r5,ror#20 vrev32.16 q11,q11 add r9,r9,r12 vadd.i32 q2,q2,q3 mov r6,r6,ror#20 vadd.i32 q6,q6,q7 eor r5,r5,r8,ror#20 vadd.i32 q10,q10,q11 eor r6,r6,r9,ror#20 veor q12,q1,q2 add r0,r0,r5 veor q13,q5,q6 mov r10,r10,ror#24 veor q14,q9,q10 add r1,r1,r6 vshr.u32 q1,q12,#20 mov r12,r12,ror#24 vshr.u32 q5,q13,#20 eor r10,r10,r0,ror#24 vshr.u32 q9,q14,#20 eor r12,r12,r1,ror#24 vsli.32 q1,q12,#12 add r8,r8,r10 vsli.32 q5,q13,#12 mov r5,r5,ror#25 vsli.32 q9,q14,#12 str r10,[sp,#4*(16+15)] vadd.i32 q0,q0,q1 ldr r10,[sp,#4*(16+13)] vadd.i32 q4,q4,q5 add r9,r9,r12 vadd.i32 q8,q8,q9 mov r6,r6,ror#25 veor q12,q3,q0 eor r5,r5,r8,ror#25 veor q13,q7,q4 eor r6,r6,r9,ror#25 veor q14,q11,q8 str r8,[sp,#4*(16+10)] vshr.u32 q3,q12,#24 ldr r8,[sp,#4*(16+8)] vshr.u32 q7,q13,#24 add r2,r2,r7 vshr.u32 q11,q14,#24 mov r10,r10,ror#16 vsli.32 q3,q12,#8 str r9,[sp,#4*(16+11)] vsli.32 q7,q13,#8 ldr r9,[sp,#4*(16+9)] vsli.32 q11,q14,#8 add r3,r3,r4 vadd.i32 q2,q2,q3 mov r14,r14,ror#16 vadd.i32 q6,q6,q7 eor r10,r10,r2,ror#16 vadd.i32 q10,q10,q11 eor r14,r14,r3,ror#16 veor q12,q1,q2 add r8,r8,r10 veor q13,q5,q6 mov r7,r7,ror#20 veor q14,q9,q10 add r9,r9,r14 vshr.u32 q1,q12,#25 mov r4,r4,ror#20 vshr.u32 q5,q13,#25 eor r7,r7,r8,ror#20 vshr.u32 q9,q14,#25 eor r4,r4,r9,ror#20 vsli.32 q1,q12,#7 add r2,r2,r7 vsli.32 q5,q13,#7 mov r10,r10,ror#24 vsli.32 q9,q14,#7 add r3,r3,r4 vext.8 q2,q2,q2,#8 mov r14,r14,ror#24 vext.8 q6,q6,q6,#8 eor r10,r10,r2,ror#24 vext.8 q10,q10,q10,#8 eor r14,r14,r3,ror#24 vext.8 q1,q1,q1,#12 add r8,r8,r10 vext.8 q5,q5,q5,#12 mov r7,r7,ror#25 vext.8 q9,q9,q9,#12 add r9,r9,r14 vext.8 q3,q3,q3,#4 mov r4,r4,ror#25 vext.8 q7,q7,q7,#4 eor r7,r7,r8,ror#25 vext.8 q11,q11,q11,#4 eor r4,r4,r9,ror#25 bne .Loop_neon add r11,sp,#32 vld1.32 {q12,q13},[sp] @ load key material vld1.32 {q14,q15},[r11] ldr r11,[sp,#4*(32+2)] @ load len str r8, [sp,#4*(16+8)] @ modulo-scheduled store str r9, [sp,#4*(16+9)] str r12,[sp,#4*(16+12)] str r10, [sp,#4*(16+13)] str r14,[sp,#4*(16+14)] @ at this point we have first half of 512-bit result in @ rx and second half at sp+4*(16+8) ldr r12,[sp,#4*(32+1)] @ load inp ldr r14,[sp,#4*(32+0)] @ load out vadd.i32 q0,q0,q12 @ accumulate key material vadd.i32 q4,q4,q12 vadd.i32 q8,q8,q12 vldr d24,[sp,#4*(16+0)] @ one vadd.i32 q1,q1,q13 vadd.i32 q5,q5,q13 vadd.i32 q9,q9,q13 vldr d26,[sp,#4*(16+2)] @ two vadd.i32 q2,q2,q14 vadd.i32 q6,q6,q14 vadd.i32 q10,q10,q14 vadd.i32 d14,d14,d24 @ counter+1 vadd.i32 d22,d22,d26 @ counter+2 vadd.i32 q3,q3,q15 vadd.i32 q7,q7,q15 vadd.i32 q11,q11,q15 cmp r11,#64*4 blo .Ltail_neon vld1.8 {q12,q13},[r12]! @ load input mov r11,sp vld1.8 {q14,q15},[r12]! veor q0,q0,q12 @ xor with input veor q1,q1,q13 vld1.8 {q12,q13},[r12]! veor q2,q2,q14 veor q3,q3,q15 vld1.8 {q14,q15},[r12]! veor q4,q4,q12 vst1.8 {q0,q1},[r14]! @ store output veor q5,q5,q13 vld1.8 {q12,q13},[r12]! veor q6,q6,q14 vst1.8 {q2,q3},[r14]! veor q7,q7,q15 vld1.8 {q14,q15},[r12]! veor q8,q8,q12 vld1.32 {q0,q1},[r11]! @ load for next iteration veor d25,d25,d25 vldr d24,[sp,#4*(16+4)] @ four veor q9,q9,q13 vld1.32 {q2,q3},[r11] veor q10,q10,q14 vst1.8 {q4,q5},[r14]! veor q11,q11,q15 vst1.8 {q6,q7},[r14]! vadd.i32 d6,d6,d24 @ next counter value vldr d24,[sp,#4*(16+0)] @ one ldmia sp,{r8,r9,r10,r11} @ load key material add r0,r0,r8 @ accumulate key material ldr r8,[r12],#16 @ load input vst1.8 {q8,q9},[r14]! add r1,r1,r9 ldr r9,[r12,#-12] vst1.8 {q10,q11},[r14]! add r2,r2,r10 ldr r10,[r12,#-8] add r3,r3,r11 ldr r11,[r12,#-4] # ifdef __ARMEB__ rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 # endif eor r0,r0,r8 @ xor with input add r8,sp,#4*(4) eor r1,r1,r9 str r0,[r14],#16 @ store output eor r2,r2,r10 str r1,[r14,#-12] eor r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material str r2,[r14,#-8] str r3,[r14,#-4] add r4,r4,r8 @ accumulate key material ldr r8,[r12],#16 @ load input add r5,r5,r9 ldr r9,[r12,#-12] add r6,r6,r10 ldr r10,[r12,#-8] add r7,r7,r11 ldr r11,[r12,#-4] # ifdef __ARMEB__ rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif eor r4,r4,r8 add r8,sp,#4*(8) eor r5,r5,r9 str r4,[r14],#16 @ store output eor r6,r6,r10 str r5,[r14,#-12] eor r7,r7,r11 ldmia r8,{r8,r9,r10,r11} @ load key material str r6,[r14,#-8] add r0,sp,#4*(16+8) str r7,[r14,#-4] ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half add r0,r0,r8 @ accumulate key material ldr r8,[r12],#16 @ load input add r1,r1,r9 ldr r9,[r12,#-12] # ifdef __thumb2__ it hi # endif strhi r10,[sp,#4*(16+10)] @ copy "rx" while at it add r2,r2,r10 ldr r10,[r12,#-8] # ifdef __thumb2__ it hi # endif strhi r11,[sp,#4*(16+11)] @ copy "rx" while at it add r3,r3,r11 ldr r11,[r12,#-4] # ifdef __ARMEB__ rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 # endif eor r0,r0,r8 add r8,sp,#4*(12) eor r1,r1,r9 str r0,[r14],#16 @ store output eor r2,r2,r10 str r1,[r14,#-12] eor r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material str r2,[r14,#-8] str r3,[r14,#-4] add r4,r4,r8 @ accumulate key material add r8,r8,#4 @ next counter value add r5,r5,r9 str r8,[sp,#4*(12)] @ save next counter value ldr r8,[r12],#16 @ load input add r6,r6,r10 add r4,r4,#3 @ counter+3 ldr r9,[r12,#-12] add r7,r7,r11 ldr r10,[r12,#-8] ldr r11,[r12,#-4] # ifdef __ARMEB__ rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif eor r4,r4,r8 # ifdef __thumb2__ it hi # endif ldrhi r8,[sp,#4*(32+2)] @ re-load len eor r5,r5,r9 eor r6,r6,r10 str r4,[r14],#16 @ store output eor r7,r7,r11 str r5,[r14,#-12] sub r11,r8,#64*4 @ len-=64*4 str r6,[r14,#-8] str r7,[r14,#-4] bhi .Loop_neon_outer b .Ldone_neon .align 4 .Lbreak_neon: @ harmonize NEON and integer-only stack frames: load data @ from NEON frame, but save to integer-only one; distance @ between the two is 4*(32+4+16-32)=4*(20). str r11, [sp,#4*(20+32+2)] @ save len add r11,sp,#4*(32+4) str r12, [sp,#4*(20+32+1)] @ save inp str r14, [sp,#4*(20+32+0)] @ save out ldr r12,[sp,#4*(16+10)] ldr r14,[sp,#4*(16+11)] vldmia r11,{d8,d9,d10,d11,d12,d13,d14,d15} @ fulfill ABI requirement str r12,[sp,#4*(20+16+10)] @ copy "rx" str r14,[sp,#4*(20+16+11)] @ copy "rx" ldr r11, [sp,#4*(15)] ldr r12,[sp,#4*(12)] @ modulo-scheduled load ldr r10, [sp,#4*(13)] ldr r14,[sp,#4*(14)] str r11, [sp,#4*(20+16+15)] add r11,sp,#4*(20) vst1.32 {q0,q1},[r11]! @ copy key add sp,sp,#4*(20) @ switch frame vst1.32 {q2,q3},[r11] mov r11,#10 b .Loop @ go integer-only .align 4 .Ltail_neon: cmp r11,#64*3 bhs .L192_or_more_neon cmp r11,#64*2 bhs .L128_or_more_neon cmp r11,#64*1 bhs .L64_or_more_neon add r8,sp,#4*(8) vst1.8 {q0,q1},[sp] add r10,sp,#4*(0) vst1.8 {q2,q3},[r8] b .Loop_tail_neon .align 4 .L64_or_more_neon: vld1.8 {q12,q13},[r12]! vld1.8 {q14,q15},[r12]! veor q0,q0,q12 veor q1,q1,q13 veor q2,q2,q14 veor q3,q3,q15 vst1.8 {q0,q1},[r14]! vst1.8 {q2,q3},[r14]! beq .Ldone_neon add r8,sp,#4*(8) vst1.8 {q4,q5},[sp] add r10,sp,#4*(0) vst1.8 {q6,q7},[r8] sub r11,r11,#64*1 @ len-=64*1 b .Loop_tail_neon .align 4 .L128_or_more_neon: vld1.8 {q12,q13},[r12]! vld1.8 {q14,q15},[r12]! veor q0,q0,q12 veor q1,q1,q13 vld1.8 {q12,q13},[r12]! veor q2,q2,q14 veor q3,q3,q15 vld1.8 {q14,q15},[r12]! veor q4,q4,q12 veor q5,q5,q13 vst1.8 {q0,q1},[r14]! veor q6,q6,q14 vst1.8 {q2,q3},[r14]! veor q7,q7,q15 vst1.8 {q4,q5},[r14]! vst1.8 {q6,q7},[r14]! beq .Ldone_neon add r8,sp,#4*(8) vst1.8 {q8,q9},[sp] add r10,sp,#4*(0) vst1.8 {q10,q11},[r8] sub r11,r11,#64*2 @ len-=64*2 b .Loop_tail_neon .align 4 .L192_or_more_neon: vld1.8 {q12,q13},[r12]! vld1.8 {q14,q15},[r12]! veor q0,q0,q12 veor q1,q1,q13 vld1.8 {q12,q13},[r12]! veor q2,q2,q14 veor q3,q3,q15 vld1.8 {q14,q15},[r12]! veor q4,q4,q12 veor q5,q5,q13 vld1.8 {q12,q13},[r12]! veor q6,q6,q14 vst1.8 {q0,q1},[r14]! veor q7,q7,q15 vld1.8 {q14,q15},[r12]! veor q8,q8,q12 vst1.8 {q2,q3},[r14]! veor q9,q9,q13 vst1.8 {q4,q5},[r14]! veor q10,q10,q14 vst1.8 {q6,q7},[r14]! veor q11,q11,q15 vst1.8 {q8,q9},[r14]! vst1.8 {q10,q11},[r14]! beq .Ldone_neon ldmia sp,{r8,r9,r10,r11} @ load key material add r0,r0,r8 @ accumulate key material add r8,sp,#4*(4) add r1,r1,r9 add r2,r2,r10 add r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material add r4,r4,r8 @ accumulate key material add r8,sp,#4*(8) add r5,r5,r9 add r6,r6,r10 add r7,r7,r11 ldmia r8,{r8,r9,r10,r11} @ load key material # ifdef __ARMEB__ rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif stmia sp,{r0,r1,r2,r3,r4,r5,r6,r7} add r0,sp,#4*(16+8) ldmia r0,{r0,r1,r2,r3,r4,r5,r6,r7} @ load second half add r0,r0,r8 @ accumulate key material add r8,sp,#4*(12) add r1,r1,r9 add r2,r2,r10 add r3,r3,r11 ldmia r8,{r8,r9,r10,r11} @ load key material add r4,r4,r8 @ accumulate key material add r8,sp,#4*(8) add r5,r5,r9 add r4,r4,#3 @ counter+3 add r6,r6,r10 add r7,r7,r11 ldr r11,[sp,#4*(32+2)] @ re-load len # ifdef __ARMEB__ rev r0,r0 rev r1,r1 rev r2,r2 rev r3,r3 rev r4,r4 rev r5,r5 rev r6,r6 rev r7,r7 # endif stmia r8,{r0,r1,r2,r3,r4,r5,r6,r7} add r10,sp,#4*(0) sub r11,r11,#64*3 @ len-=64*3 .Loop_tail_neon: ldrb r8,[r10],#1 @ read buffer on stack ldrb r9,[r12],#1 @ read input subs r11,r11,#1 eor r8,r8,r9 strb r8,[r14],#1 @ store output bne .Loop_tail_neon .Ldone_neon: add sp,sp,#4*(32+4) vldmia sp,{d8,d9,d10,d11,d12,d13,d14,d15} add sp,sp,#4*(16+3) ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc} .size ChaCha20_ctr32_neon,.-ChaCha20_ctr32_neon #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
68,707
thirdparty/crates/ring-0.17.9/pregenerated/p256-x86_64-asm-macosx.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__APPLE__) .text .section __DATA,__const .p2align 6 L$poly: .quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001 L$One: .long 1,1,1,1,1,1,1,1 L$Two: .long 2,2,2,2,2,2,2,2 L$Three: .long 3,3,3,3,3,3,3,3 L$ONE_mont: .quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe L$ord: .quad 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000 L$ordK: .quad 0xccd1c8aaee00bc4f .text .globl _ecp_nistz256_neg .private_extern _ecp_nistz256_neg .p2align 5 _ecp_nistz256_neg: _CET_ENDBR pushq %r12 pushq %r13 L$neg_body: xorq %r8,%r8 xorq %r9,%r9 xorq %r10,%r10 xorq %r11,%r11 xorq %r13,%r13 subq 0(%rsi),%r8 sbbq 8(%rsi),%r9 sbbq 16(%rsi),%r10 movq %r8,%rax sbbq 24(%rsi),%r11 leaq L$poly(%rip),%rsi movq %r9,%rdx sbbq $0,%r13 addq 0(%rsi),%r8 movq %r10,%rcx adcq 8(%rsi),%r9 adcq 16(%rsi),%r10 movq %r11,%r12 adcq 24(%rsi),%r11 testq %r13,%r13 cmovzq %rax,%r8 cmovzq %rdx,%r9 movq %r8,0(%rdi) cmovzq %rcx,%r10 movq %r9,8(%rdi) cmovzq %r12,%r11 movq %r10,16(%rdi) movq %r11,24(%rdi) movq 0(%rsp),%r13 movq 8(%rsp),%r12 leaq 16(%rsp),%rsp L$neg_epilogue: ret .globl _ecp_nistz256_ord_mul_mont_nohw .private_extern _ecp_nistz256_ord_mul_mont_nohw .p2align 5 _ecp_nistz256_ord_mul_mont_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$ord_mul_body: movq 0(%rdx),%rax movq %rdx,%rbx leaq L$ord(%rip),%r14 movq L$ordK(%rip),%r15 movq %rax,%rcx mulq 0(%rsi) movq %rax,%r8 movq %rcx,%rax movq %rdx,%r9 mulq 8(%rsi) addq %rax,%r9 movq %rcx,%rax adcq $0,%rdx movq %rdx,%r10 mulq 16(%rsi) addq %rax,%r10 movq %rcx,%rax adcq $0,%rdx movq %r8,%r13 imulq %r15,%r8 movq %rdx,%r11 mulq 24(%rsi) addq %rax,%r11 movq %r8,%rax adcq $0,%rdx movq %rdx,%r12 mulq 0(%r14) movq %r8,%rbp addq %rax,%r13 movq %r8,%rax adcq $0,%rdx movq %rdx,%rcx subq %r8,%r10 sbbq $0,%r8 mulq 8(%r14) addq %rcx,%r9 adcq $0,%rdx addq %rax,%r9 movq %rbp,%rax adcq %rdx,%r10 movq %rbp,%rdx adcq $0,%r8 shlq $32,%rax shrq $32,%rdx subq %rax,%r11 movq 8(%rbx),%rax sbbq %rdx,%rbp addq %r8,%r11 adcq %rbp,%r12 adcq $0,%r13 movq %rax,%rcx mulq 0(%rsi) addq %rax,%r9 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 8(%rsi) addq %rbp,%r10 adcq $0,%rdx addq %rax,%r10 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 16(%rsi) addq %rbp,%r11 adcq $0,%rdx addq %rax,%r11 movq %rcx,%rax adcq $0,%rdx movq %r9,%rcx imulq %r15,%r9 movq %rdx,%rbp mulq 24(%rsi) addq %rbp,%r12 adcq $0,%rdx xorq %r8,%r8 addq %rax,%r12 movq %r9,%rax adcq %rdx,%r13 adcq $0,%r8 mulq 0(%r14) movq %r9,%rbp addq %rax,%rcx movq %r9,%rax adcq %rdx,%rcx subq %r9,%r11 sbbq $0,%r9 mulq 8(%r14) addq %rcx,%r10 adcq $0,%rdx addq %rax,%r10 movq %rbp,%rax adcq %rdx,%r11 movq %rbp,%rdx adcq $0,%r9 shlq $32,%rax shrq $32,%rdx subq %rax,%r12 movq 16(%rbx),%rax sbbq %rdx,%rbp addq %r9,%r12 adcq %rbp,%r13 adcq $0,%r8 movq %rax,%rcx mulq 0(%rsi) addq %rax,%r10 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 8(%rsi) addq %rbp,%r11 adcq $0,%rdx addq %rax,%r11 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 16(%rsi) addq %rbp,%r12 adcq $0,%rdx addq %rax,%r12 movq %rcx,%rax adcq $0,%rdx movq %r10,%rcx imulq %r15,%r10 movq %rdx,%rbp mulq 24(%rsi) addq %rbp,%r13 adcq $0,%rdx xorq %r9,%r9 addq %rax,%r13 movq %r10,%rax adcq %rdx,%r8 adcq $0,%r9 mulq 0(%r14) movq %r10,%rbp addq %rax,%rcx movq %r10,%rax adcq %rdx,%rcx subq %r10,%r12 sbbq $0,%r10 mulq 8(%r14) addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %rbp,%rax adcq %rdx,%r12 movq %rbp,%rdx adcq $0,%r10 shlq $32,%rax shrq $32,%rdx subq %rax,%r13 movq 24(%rbx),%rax sbbq %rdx,%rbp addq %r10,%r13 adcq %rbp,%r8 adcq $0,%r9 movq %rax,%rcx mulq 0(%rsi) addq %rax,%r11 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 8(%rsi) addq %rbp,%r12 adcq $0,%rdx addq %rax,%r12 movq %rcx,%rax adcq $0,%rdx movq %rdx,%rbp mulq 16(%rsi) addq %rbp,%r13 adcq $0,%rdx addq %rax,%r13 movq %rcx,%rax adcq $0,%rdx movq %r11,%rcx imulq %r15,%r11 movq %rdx,%rbp mulq 24(%rsi) addq %rbp,%r8 adcq $0,%rdx xorq %r10,%r10 addq %rax,%r8 movq %r11,%rax adcq %rdx,%r9 adcq $0,%r10 mulq 0(%r14) movq %r11,%rbp addq %rax,%rcx movq %r11,%rax adcq %rdx,%rcx subq %r11,%r13 sbbq $0,%r11 mulq 8(%r14) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %rbp,%rax adcq %rdx,%r13 movq %rbp,%rdx adcq $0,%r11 shlq $32,%rax shrq $32,%rdx subq %rax,%r8 sbbq %rdx,%rbp addq %r11,%r8 adcq %rbp,%r9 adcq $0,%r10 movq %r12,%rsi subq 0(%r14),%r12 movq %r13,%r11 sbbq 8(%r14),%r13 movq %r8,%rcx sbbq 16(%r14),%r8 movq %r9,%rbp sbbq 24(%r14),%r9 sbbq $0,%r10 cmovcq %rsi,%r12 cmovcq %r11,%r13 cmovcq %rcx,%r8 cmovcq %rbp,%r9 movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$ord_mul_epilogue: ret .globl _ecp_nistz256_ord_sqr_mont_nohw .private_extern _ecp_nistz256_ord_sqr_mont_nohw .p2align 5 _ecp_nistz256_ord_sqr_mont_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$ord_sqr_body: movq 0(%rsi),%r8 movq 8(%rsi),%rax movq 16(%rsi),%r14 movq 24(%rsi),%r15 leaq L$ord(%rip),%rsi movq %rdx,%rbx jmp L$oop_ord_sqr .p2align 5 L$oop_ord_sqr: movq %rax,%rbp mulq %r8 movq %rax,%r9 .byte 102,72,15,110,205 movq %r14,%rax movq %rdx,%r10 mulq %r8 addq %rax,%r10 movq %r15,%rax .byte 102,73,15,110,214 adcq $0,%rdx movq %rdx,%r11 mulq %r8 addq %rax,%r11 movq %r15,%rax .byte 102,73,15,110,223 adcq $0,%rdx movq %rdx,%r12 mulq %r14 movq %rax,%r13 movq %r14,%rax movq %rdx,%r14 mulq %rbp addq %rax,%r11 movq %r15,%rax adcq $0,%rdx movq %rdx,%r15 mulq %rbp addq %rax,%r12 adcq $0,%rdx addq %r15,%r12 adcq %rdx,%r13 adcq $0,%r14 xorq %r15,%r15 movq %r8,%rax addq %r9,%r9 adcq %r10,%r10 adcq %r11,%r11 adcq %r12,%r12 adcq %r13,%r13 adcq %r14,%r14 adcq $0,%r15 mulq %rax movq %rax,%r8 .byte 102,72,15,126,200 movq %rdx,%rbp mulq %rax addq %rbp,%r9 adcq %rax,%r10 .byte 102,72,15,126,208 adcq $0,%rdx movq %rdx,%rbp mulq %rax addq %rbp,%r11 adcq %rax,%r12 .byte 102,72,15,126,216 adcq $0,%rdx movq %rdx,%rbp movq %r8,%rcx imulq 32(%rsi),%r8 mulq %rax addq %rbp,%r13 adcq %rax,%r14 movq 0(%rsi),%rax adcq %rdx,%r15 mulq %r8 movq %r8,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r8,%r10 sbbq $0,%rbp mulq %r8 addq %rcx,%r9 adcq $0,%rdx addq %rax,%r9 movq %r8,%rax adcq %rdx,%r10 movq %r8,%rdx adcq $0,%rbp movq %r9,%rcx imulq 32(%rsi),%r9 shlq $32,%rax shrq $32,%rdx subq %rax,%r11 movq 0(%rsi),%rax sbbq %rdx,%r8 addq %rbp,%r11 adcq $0,%r8 mulq %r9 movq %r9,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r9,%r11 sbbq $0,%rbp mulq %r9 addq %rcx,%r10 adcq $0,%rdx addq %rax,%r10 movq %r9,%rax adcq %rdx,%r11 movq %r9,%rdx adcq $0,%rbp movq %r10,%rcx imulq 32(%rsi),%r10 shlq $32,%rax shrq $32,%rdx subq %rax,%r8 movq 0(%rsi),%rax sbbq %rdx,%r9 addq %rbp,%r8 adcq $0,%r9 mulq %r10 movq %r10,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r10,%r8 sbbq $0,%rbp mulq %r10 addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %r10,%rax adcq %rdx,%r8 movq %r10,%rdx adcq $0,%rbp movq %r11,%rcx imulq 32(%rsi),%r11 shlq $32,%rax shrq $32,%rdx subq %rax,%r9 movq 0(%rsi),%rax sbbq %rdx,%r10 addq %rbp,%r9 adcq $0,%r10 mulq %r11 movq %r11,%rbp addq %rax,%rcx movq 8(%rsi),%rax adcq %rdx,%rcx subq %r11,%r9 sbbq $0,%rbp mulq %r11 addq %rcx,%r8 adcq $0,%rdx addq %rax,%r8 movq %r11,%rax adcq %rdx,%r9 movq %r11,%rdx adcq $0,%rbp shlq $32,%rax shrq $32,%rdx subq %rax,%r10 sbbq %rdx,%r11 addq %rbp,%r10 adcq $0,%r11 xorq %rdx,%rdx addq %r12,%r8 adcq %r13,%r9 movq %r8,%r12 adcq %r14,%r10 adcq %r15,%r11 movq %r9,%rax adcq $0,%rdx subq 0(%rsi),%r8 movq %r10,%r14 sbbq 8(%rsi),%r9 sbbq 16(%rsi),%r10 movq %r11,%r15 sbbq 24(%rsi),%r11 sbbq $0,%rdx cmovcq %r12,%r8 cmovncq %r9,%rax cmovncq %r10,%r14 cmovncq %r11,%r15 decq %rbx jnz L$oop_ord_sqr movq %r8,0(%rdi) movq %rax,8(%rdi) pxor %xmm1,%xmm1 movq %r14,16(%rdi) pxor %xmm2,%xmm2 movq %r15,24(%rdi) pxor %xmm3,%xmm3 movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$ord_sqr_epilogue: ret .globl _ecp_nistz256_ord_mul_mont_adx .private_extern _ecp_nistz256_ord_mul_mont_adx .p2align 5 _ecp_nistz256_ord_mul_mont_adx: L$ecp_nistz256_ord_mul_mont_adx: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$ord_mulx_body: movq %rdx,%rbx movq 0(%rdx),%rdx movq 0(%rsi),%r9 movq 8(%rsi),%r10 movq 16(%rsi),%r11 movq 24(%rsi),%r12 leaq -128(%rsi),%rsi leaq L$ord-128(%rip),%r14 movq L$ordK(%rip),%r15 mulxq %r9,%r8,%r9 mulxq %r10,%rcx,%r10 mulxq %r11,%rbp,%r11 addq %rcx,%r9 mulxq %r12,%rcx,%r12 movq %r8,%rdx mulxq %r15,%rdx,%rax adcq %rbp,%r10 adcq %rcx,%r11 adcq $0,%r12 xorq %r13,%r13 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r8 adoxq %rbp,%r9 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 24+128(%r14),%rcx,%rbp movq 8(%rbx),%rdx adcxq %rcx,%r11 adoxq %rbp,%r12 adcxq %r8,%r12 adoxq %r8,%r13 adcq $0,%r13 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 24+128(%rsi),%rcx,%rbp movq %r9,%rdx mulxq %r15,%rdx,%rax adcxq %rcx,%r12 adoxq %rbp,%r13 adcxq %r8,%r13 adoxq %r8,%r8 adcq $0,%r8 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 24+128(%r14),%rcx,%rbp movq 16(%rbx),%rdx adcxq %rcx,%r12 adoxq %rbp,%r13 adcxq %r9,%r13 adoxq %r9,%r8 adcq $0,%r8 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 24+128(%rsi),%rcx,%rbp movq %r10,%rdx mulxq %r15,%rdx,%rax adcxq %rcx,%r13 adoxq %rbp,%r8 adcxq %r9,%r8 adoxq %r9,%r9 adcq $0,%r9 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 24+128(%r14),%rcx,%rbp movq 24(%rbx),%rdx adcxq %rcx,%r13 adoxq %rbp,%r8 adcxq %r10,%r8 adoxq %r10,%r9 adcq $0,%r9 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r13 adoxq %rbp,%r8 mulxq 24+128(%rsi),%rcx,%rbp movq %r11,%rdx mulxq %r15,%rdx,%rax adcxq %rcx,%r8 adoxq %rbp,%r9 adcxq %r10,%r9 adoxq %r10,%r10 adcq $0,%r10 mulxq 0+128(%r14),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 8+128(%r14),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 16+128(%r14),%rcx,%rbp adcxq %rcx,%r13 adoxq %rbp,%r8 mulxq 24+128(%r14),%rcx,%rbp leaq 128(%r14),%r14 movq %r12,%rbx adcxq %rcx,%r8 adoxq %rbp,%r9 movq %r13,%rdx adcxq %r11,%r9 adoxq %r11,%r10 adcq $0,%r10 movq %r8,%rcx subq 0(%r14),%r12 sbbq 8(%r14),%r13 sbbq 16(%r14),%r8 movq %r9,%rbp sbbq 24(%r14),%r9 sbbq $0,%r10 cmovcq %rbx,%r12 cmovcq %rdx,%r13 cmovcq %rcx,%r8 cmovcq %rbp,%r9 movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$ord_mulx_epilogue: ret .globl _ecp_nistz256_ord_sqr_mont_adx .private_extern _ecp_nistz256_ord_sqr_mont_adx .p2align 5 _ecp_nistz256_ord_sqr_mont_adx: _CET_ENDBR L$ecp_nistz256_ord_sqr_mont_adx: pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$ord_sqrx_body: movq %rdx,%rbx movq 0(%rsi),%rdx movq 8(%rsi),%r14 movq 16(%rsi),%r15 movq 24(%rsi),%r8 leaq L$ord(%rip),%rsi jmp L$oop_ord_sqrx .p2align 5 L$oop_ord_sqrx: mulxq %r14,%r9,%r10 mulxq %r15,%rcx,%r11 movq %rdx,%rax .byte 102,73,15,110,206 mulxq %r8,%rbp,%r12 movq %r14,%rdx addq %rcx,%r10 .byte 102,73,15,110,215 adcq %rbp,%r11 adcq $0,%r12 xorq %r13,%r13 mulxq %r15,%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq %r8,%rcx,%rbp movq %r15,%rdx adcxq %rcx,%r12 adoxq %rbp,%r13 adcq $0,%r13 mulxq %r8,%rcx,%r14 movq %rax,%rdx .byte 102,73,15,110,216 xorq %r15,%r15 adcxq %r9,%r9 adoxq %rcx,%r13 adcxq %r10,%r10 adoxq %r15,%r14 mulxq %rdx,%r8,%rbp .byte 102,72,15,126,202 adcxq %r11,%r11 adoxq %rbp,%r9 adcxq %r12,%r12 mulxq %rdx,%rcx,%rax .byte 102,72,15,126,210 adcxq %r13,%r13 adoxq %rcx,%r10 adcxq %r14,%r14 mulxq %rdx,%rcx,%rbp .byte 0x67 .byte 102,72,15,126,218 adoxq %rax,%r11 adcxq %r15,%r15 adoxq %rcx,%r12 adoxq %rbp,%r13 mulxq %rdx,%rcx,%rax adoxq %rcx,%r14 adoxq %rax,%r15 movq %r8,%rdx mulxq 32(%rsi),%rdx,%rcx xorq %rax,%rax mulxq 0(%rsi),%rcx,%rbp adcxq %rcx,%r8 adoxq %rbp,%r9 mulxq 8(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 16(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 24(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r8 adcxq %rax,%r8 movq %r9,%rdx mulxq 32(%rsi),%rdx,%rcx mulxq 0(%rsi),%rcx,%rbp adoxq %rcx,%r9 adcxq %rbp,%r10 mulxq 8(%rsi),%rcx,%rbp adoxq %rcx,%r10 adcxq %rbp,%r11 mulxq 16(%rsi),%rcx,%rbp adoxq %rcx,%r11 adcxq %rbp,%r8 mulxq 24(%rsi),%rcx,%rbp adoxq %rcx,%r8 adcxq %rbp,%r9 adoxq %rax,%r9 movq %r10,%rdx mulxq 32(%rsi),%rdx,%rcx mulxq 0(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r8 mulxq 16(%rsi),%rcx,%rbp adcxq %rcx,%r8 adoxq %rbp,%r9 mulxq 24(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 adcxq %rax,%r10 movq %r11,%rdx mulxq 32(%rsi),%rdx,%rcx mulxq 0(%rsi),%rcx,%rbp adoxq %rcx,%r11 adcxq %rbp,%r8 mulxq 8(%rsi),%rcx,%rbp adoxq %rcx,%r8 adcxq %rbp,%r9 mulxq 16(%rsi),%rcx,%rbp adoxq %rcx,%r9 adcxq %rbp,%r10 mulxq 24(%rsi),%rcx,%rbp adoxq %rcx,%r10 adcxq %rbp,%r11 adoxq %rax,%r11 addq %r8,%r12 adcq %r13,%r9 movq %r12,%rdx adcq %r14,%r10 adcq %r15,%r11 movq %r9,%r14 adcq $0,%rax subq 0(%rsi),%r12 movq %r10,%r15 sbbq 8(%rsi),%r9 sbbq 16(%rsi),%r10 movq %r11,%r8 sbbq 24(%rsi),%r11 sbbq $0,%rax cmovncq %r12,%rdx cmovncq %r9,%r14 cmovncq %r10,%r15 cmovncq %r11,%r8 decq %rbx jnz L$oop_ord_sqrx movq %rdx,0(%rdi) movq %r14,8(%rdi) pxor %xmm1,%xmm1 movq %r15,16(%rdi) pxor %xmm2,%xmm2 movq %r8,24(%rdi) pxor %xmm3,%xmm3 movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$ord_sqrx_epilogue: ret .globl _ecp_nistz256_mul_mont_nohw .private_extern _ecp_nistz256_mul_mont_nohw .p2align 5 _ecp_nistz256_mul_mont_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$mul_body: movq %rdx,%rbx movq 0(%rdx),%rax movq 0(%rsi),%r9 movq 8(%rsi),%r10 movq 16(%rsi),%r11 movq 24(%rsi),%r12 call __ecp_nistz256_mul_montq movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$mul_epilogue: ret .p2align 5 __ecp_nistz256_mul_montq: movq %rax,%rbp mulq %r9 movq L$poly+8(%rip),%r14 movq %rax,%r8 movq %rbp,%rax movq %rdx,%r9 mulq %r10 movq L$poly+24(%rip),%r15 addq %rax,%r9 movq %rbp,%rax adcq $0,%rdx movq %rdx,%r10 mulq %r11 addq %rax,%r10 movq %rbp,%rax adcq $0,%rdx movq %rdx,%r11 mulq %r12 addq %rax,%r11 movq %r8,%rax adcq $0,%rdx xorq %r13,%r13 movq %rdx,%r12 movq %r8,%rbp shlq $32,%r8 mulq %r15 shrq $32,%rbp addq %r8,%r9 adcq %rbp,%r10 adcq %rax,%r11 movq 8(%rbx),%rax adcq %rdx,%r12 adcq $0,%r13 xorq %r8,%r8 movq %rax,%rbp mulq 0(%rsi) addq %rax,%r9 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 8(%rsi) addq %rcx,%r10 adcq $0,%rdx addq %rax,%r10 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 16(%rsi) addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 24(%rsi) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %r9,%rax adcq %rdx,%r13 adcq $0,%r8 movq %r9,%rbp shlq $32,%r9 mulq %r15 shrq $32,%rbp addq %r9,%r10 adcq %rbp,%r11 adcq %rax,%r12 movq 16(%rbx),%rax adcq %rdx,%r13 adcq $0,%r8 xorq %r9,%r9 movq %rax,%rbp mulq 0(%rsi) addq %rax,%r10 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 8(%rsi) addq %rcx,%r11 adcq $0,%rdx addq %rax,%r11 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 16(%rsi) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 24(%rsi) addq %rcx,%r13 adcq $0,%rdx addq %rax,%r13 movq %r10,%rax adcq %rdx,%r8 adcq $0,%r9 movq %r10,%rbp shlq $32,%r10 mulq %r15 shrq $32,%rbp addq %r10,%r11 adcq %rbp,%r12 adcq %rax,%r13 movq 24(%rbx),%rax adcq %rdx,%r8 adcq $0,%r9 xorq %r10,%r10 movq %rax,%rbp mulq 0(%rsi) addq %rax,%r11 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 8(%rsi) addq %rcx,%r12 adcq $0,%rdx addq %rax,%r12 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 16(%rsi) addq %rcx,%r13 adcq $0,%rdx addq %rax,%r13 movq %rbp,%rax adcq $0,%rdx movq %rdx,%rcx mulq 24(%rsi) addq %rcx,%r8 adcq $0,%rdx addq %rax,%r8 movq %r11,%rax adcq %rdx,%r9 adcq $0,%r10 movq %r11,%rbp shlq $32,%r11 mulq %r15 shrq $32,%rbp addq %r11,%r12 adcq %rbp,%r13 movq %r12,%rcx adcq %rax,%r8 adcq %rdx,%r9 movq %r13,%rbp adcq $0,%r10 subq $-1,%r12 movq %r8,%rbx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%rdx sbbq %r15,%r9 sbbq $0,%r10 cmovcq %rcx,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rbx,%r8 movq %r13,8(%rdi) cmovcq %rdx,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .globl _ecp_nistz256_sqr_mont_nohw .private_extern _ecp_nistz256_sqr_mont_nohw .p2align 5 _ecp_nistz256_sqr_mont_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$sqr_body: movq 0(%rsi),%rax movq 8(%rsi),%r14 movq 16(%rsi),%r15 movq 24(%rsi),%r8 call __ecp_nistz256_sqr_montq movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$sqr_epilogue: ret .p2align 5 __ecp_nistz256_sqr_montq: movq %rax,%r13 mulq %r14 movq %rax,%r9 movq %r15,%rax movq %rdx,%r10 mulq %r13 addq %rax,%r10 movq %r8,%rax adcq $0,%rdx movq %rdx,%r11 mulq %r13 addq %rax,%r11 movq %r15,%rax adcq $0,%rdx movq %rdx,%r12 mulq %r14 addq %rax,%r11 movq %r8,%rax adcq $0,%rdx movq %rdx,%rbp mulq %r14 addq %rax,%r12 movq %r8,%rax adcq $0,%rdx addq %rbp,%r12 movq %rdx,%r13 adcq $0,%r13 mulq %r15 xorq %r15,%r15 addq %rax,%r13 movq 0(%rsi),%rax movq %rdx,%r14 adcq $0,%r14 addq %r9,%r9 adcq %r10,%r10 adcq %r11,%r11 adcq %r12,%r12 adcq %r13,%r13 adcq %r14,%r14 adcq $0,%r15 mulq %rax movq %rax,%r8 movq 8(%rsi),%rax movq %rdx,%rcx mulq %rax addq %rcx,%r9 adcq %rax,%r10 movq 16(%rsi),%rax adcq $0,%rdx movq %rdx,%rcx mulq %rax addq %rcx,%r11 adcq %rax,%r12 movq 24(%rsi),%rax adcq $0,%rdx movq %rdx,%rcx mulq %rax addq %rcx,%r13 adcq %rax,%r14 movq %r8,%rax adcq %rdx,%r15 movq L$poly+8(%rip),%rsi movq L$poly+24(%rip),%rbp movq %r8,%rcx shlq $32,%r8 mulq %rbp shrq $32,%rcx addq %r8,%r9 adcq %rcx,%r10 adcq %rax,%r11 movq %r9,%rax adcq $0,%rdx movq %r9,%rcx shlq $32,%r9 movq %rdx,%r8 mulq %rbp shrq $32,%rcx addq %r9,%r10 adcq %rcx,%r11 adcq %rax,%r8 movq %r10,%rax adcq $0,%rdx movq %r10,%rcx shlq $32,%r10 movq %rdx,%r9 mulq %rbp shrq $32,%rcx addq %r10,%r11 adcq %rcx,%r8 adcq %rax,%r9 movq %r11,%rax adcq $0,%rdx movq %r11,%rcx shlq $32,%r11 movq %rdx,%r10 mulq %rbp shrq $32,%rcx addq %r11,%r8 adcq %rcx,%r9 adcq %rax,%r10 adcq $0,%rdx xorq %r11,%r11 addq %r8,%r12 adcq %r9,%r13 movq %r12,%r8 adcq %r10,%r14 adcq %rdx,%r15 movq %r13,%r9 adcq $0,%r11 subq $-1,%r12 movq %r14,%r10 sbbq %rsi,%r13 sbbq $0,%r14 movq %r15,%rcx sbbq %rbp,%r15 sbbq $0,%r11 cmovcq %r8,%r12 cmovcq %r9,%r13 movq %r12,0(%rdi) cmovcq %r10,%r14 movq %r13,8(%rdi) cmovcq %rcx,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) ret .globl _ecp_nistz256_mul_mont_adx .private_extern _ecp_nistz256_mul_mont_adx .p2align 5 _ecp_nistz256_mul_mont_adx: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$mulx_body: movq %rdx,%rbx movq 0(%rdx),%rdx movq 0(%rsi),%r9 movq 8(%rsi),%r10 movq 16(%rsi),%r11 movq 24(%rsi),%r12 leaq -128(%rsi),%rsi call __ecp_nistz256_mul_montx movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$mulx_epilogue: ret .p2align 5 __ecp_nistz256_mul_montx: mulxq %r9,%r8,%r9 mulxq %r10,%rcx,%r10 movq $32,%r14 xorq %r13,%r13 mulxq %r11,%rbp,%r11 movq L$poly+24(%rip),%r15 adcq %rcx,%r9 mulxq %r12,%rcx,%r12 movq %r8,%rdx adcq %rbp,%r10 shlxq %r14,%r8,%rbp adcq %rcx,%r11 shrxq %r14,%r8,%rcx adcq $0,%r12 addq %rbp,%r9 adcq %rcx,%r10 mulxq %r15,%rcx,%rbp movq 8(%rbx),%rdx adcq %rcx,%r11 adcq %rbp,%r12 adcq $0,%r13 xorq %r8,%r8 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r9 adoxq %rbp,%r10 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 24+128(%rsi),%rcx,%rbp movq %r9,%rdx adcxq %rcx,%r12 shlxq %r14,%r9,%rcx adoxq %rbp,%r13 shrxq %r14,%r9,%rbp adcxq %r8,%r13 adoxq %r8,%r8 adcq $0,%r8 addq %rcx,%r10 adcq %rbp,%r11 mulxq %r15,%rcx,%rbp movq 16(%rbx),%rdx adcq %rcx,%r12 adcq %rbp,%r13 adcq $0,%r8 xorq %r9,%r9 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r10 adoxq %rbp,%r11 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 24+128(%rsi),%rcx,%rbp movq %r10,%rdx adcxq %rcx,%r13 shlxq %r14,%r10,%rcx adoxq %rbp,%r8 shrxq %r14,%r10,%rbp adcxq %r9,%r8 adoxq %r9,%r9 adcq $0,%r9 addq %rcx,%r11 adcq %rbp,%r12 mulxq %r15,%rcx,%rbp movq 24(%rbx),%rdx adcq %rcx,%r13 adcq %rbp,%r8 adcq $0,%r9 xorq %r10,%r10 mulxq 0+128(%rsi),%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq 8+128(%rsi),%rcx,%rbp adcxq %rcx,%r12 adoxq %rbp,%r13 mulxq 16+128(%rsi),%rcx,%rbp adcxq %rcx,%r13 adoxq %rbp,%r8 mulxq 24+128(%rsi),%rcx,%rbp movq %r11,%rdx adcxq %rcx,%r8 shlxq %r14,%r11,%rcx adoxq %rbp,%r9 shrxq %r14,%r11,%rbp adcxq %r10,%r9 adoxq %r10,%r10 adcq $0,%r10 addq %rcx,%r12 adcq %rbp,%r13 mulxq %r15,%rcx,%rbp movq %r12,%rbx movq L$poly+8(%rip),%r14 adcq %rcx,%r8 movq %r13,%rdx adcq %rbp,%r9 adcq $0,%r10 xorl %eax,%eax movq %r8,%rcx sbbq $-1,%r12 sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%rbp sbbq %r15,%r9 sbbq $0,%r10 cmovcq %rbx,%r12 cmovcq %rdx,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %rbp,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .globl _ecp_nistz256_sqr_mont_adx .private_extern _ecp_nistz256_sqr_mont_adx .p2align 5 _ecp_nistz256_sqr_mont_adx: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 L$sqrx_body: movq 0(%rsi),%rdx movq 8(%rsi),%r14 movq 16(%rsi),%r15 movq 24(%rsi),%r8 leaq -128(%rsi),%rsi call __ecp_nistz256_sqr_montx movq 0(%rsp),%r15 movq 8(%rsp),%r14 movq 16(%rsp),%r13 movq 24(%rsp),%r12 movq 32(%rsp),%rbx movq 40(%rsp),%rbp leaq 48(%rsp),%rsp L$sqrx_epilogue: ret .p2align 5 __ecp_nistz256_sqr_montx: mulxq %r14,%r9,%r10 mulxq %r15,%rcx,%r11 xorl %eax,%eax adcq %rcx,%r10 mulxq %r8,%rbp,%r12 movq %r14,%rdx adcq %rbp,%r11 adcq $0,%r12 xorq %r13,%r13 mulxq %r15,%rcx,%rbp adcxq %rcx,%r11 adoxq %rbp,%r12 mulxq %r8,%rcx,%rbp movq %r15,%rdx adcxq %rcx,%r12 adoxq %rbp,%r13 adcq $0,%r13 mulxq %r8,%rcx,%r14 movq 0+128(%rsi),%rdx xorq %r15,%r15 adcxq %r9,%r9 adoxq %rcx,%r13 adcxq %r10,%r10 adoxq %r15,%r14 mulxq %rdx,%r8,%rbp movq 8+128(%rsi),%rdx adcxq %r11,%r11 adoxq %rbp,%r9 adcxq %r12,%r12 mulxq %rdx,%rcx,%rax movq 16+128(%rsi),%rdx adcxq %r13,%r13 adoxq %rcx,%r10 adcxq %r14,%r14 .byte 0x67 mulxq %rdx,%rcx,%rbp movq 24+128(%rsi),%rdx adoxq %rax,%r11 adcxq %r15,%r15 adoxq %rcx,%r12 movq $32,%rsi adoxq %rbp,%r13 .byte 0x67,0x67 mulxq %rdx,%rcx,%rax movq L$poly+24(%rip),%rdx adoxq %rcx,%r14 shlxq %rsi,%r8,%rcx adoxq %rax,%r15 shrxq %rsi,%r8,%rax movq %rdx,%rbp addq %rcx,%r9 adcq %rax,%r10 mulxq %r8,%rcx,%r8 adcq %rcx,%r11 shlxq %rsi,%r9,%rcx adcq $0,%r8 shrxq %rsi,%r9,%rax addq %rcx,%r10 adcq %rax,%r11 mulxq %r9,%rcx,%r9 adcq %rcx,%r8 shlxq %rsi,%r10,%rcx adcq $0,%r9 shrxq %rsi,%r10,%rax addq %rcx,%r11 adcq %rax,%r8 mulxq %r10,%rcx,%r10 adcq %rcx,%r9 shlxq %rsi,%r11,%rcx adcq $0,%r10 shrxq %rsi,%r11,%rax addq %rcx,%r8 adcq %rax,%r9 mulxq %r11,%rcx,%r11 adcq %rcx,%r10 adcq $0,%r11 xorq %rdx,%rdx addq %r8,%r12 movq L$poly+8(%rip),%rsi adcq %r9,%r13 movq %r12,%r8 adcq %r10,%r14 adcq %r11,%r15 movq %r13,%r9 adcq $0,%rdx subq $-1,%r12 movq %r14,%r10 sbbq %rsi,%r13 sbbq $0,%r14 movq %r15,%r11 sbbq %rbp,%r15 sbbq $0,%rdx cmovcq %r8,%r12 cmovcq %r9,%r13 movq %r12,0(%rdi) cmovcq %r10,%r14 movq %r13,8(%rdi) cmovcq %r11,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) ret .globl _ecp_nistz256_select_w5_nohw .private_extern _ecp_nistz256_select_w5_nohw .p2align 5 _ecp_nistz256_select_w5_nohw: _CET_ENDBR movdqa L$One(%rip),%xmm0 movd %edx,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 movdqa %xmm0,%xmm8 pshufd $0,%xmm1,%xmm1 movq $16,%rax L$select_loop_sse_w5: movdqa %xmm8,%xmm15 paddd %xmm0,%xmm8 pcmpeqd %xmm1,%xmm15 movdqa 0(%rsi),%xmm9 movdqa 16(%rsi),%xmm10 movdqa 32(%rsi),%xmm11 movdqa 48(%rsi),%xmm12 movdqa 64(%rsi),%xmm13 movdqa 80(%rsi),%xmm14 leaq 96(%rsi),%rsi pand %xmm15,%xmm9 pand %xmm15,%xmm10 por %xmm9,%xmm2 pand %xmm15,%xmm11 por %xmm10,%xmm3 pand %xmm15,%xmm12 por %xmm11,%xmm4 pand %xmm15,%xmm13 por %xmm12,%xmm5 pand %xmm15,%xmm14 por %xmm13,%xmm6 por %xmm14,%xmm7 decq %rax jnz L$select_loop_sse_w5 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqu %xmm4,32(%rdi) movdqu %xmm5,48(%rdi) movdqu %xmm6,64(%rdi) movdqu %xmm7,80(%rdi) ret L$SEH_end_ecp_nistz256_select_w5_nohw: .globl _ecp_nistz256_select_w7_nohw .private_extern _ecp_nistz256_select_w7_nohw .p2align 5 _ecp_nistz256_select_w7_nohw: _CET_ENDBR movdqa L$One(%rip),%xmm8 movd %edx,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 movdqa %xmm8,%xmm0 pshufd $0,%xmm1,%xmm1 movq $64,%rax L$select_loop_sse_w7: movdqa %xmm8,%xmm15 paddd %xmm0,%xmm8 movdqa 0(%rsi),%xmm9 movdqa 16(%rsi),%xmm10 pcmpeqd %xmm1,%xmm15 movdqa 32(%rsi),%xmm11 movdqa 48(%rsi),%xmm12 leaq 64(%rsi),%rsi pand %xmm15,%xmm9 pand %xmm15,%xmm10 por %xmm9,%xmm2 pand %xmm15,%xmm11 por %xmm10,%xmm3 pand %xmm15,%xmm12 por %xmm11,%xmm4 prefetcht0 255(%rsi) por %xmm12,%xmm5 decq %rax jnz L$select_loop_sse_w7 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqu %xmm4,32(%rdi) movdqu %xmm5,48(%rdi) ret L$SEH_end_ecp_nistz256_select_w7_nohw: .globl _ecp_nistz256_select_w5_avx2 .private_extern _ecp_nistz256_select_w5_avx2 .p2align 5 _ecp_nistz256_select_w5_avx2: _CET_ENDBR vzeroupper vmovdqa L$Two(%rip),%ymm0 vpxor %ymm2,%ymm2,%ymm2 vpxor %ymm3,%ymm3,%ymm3 vpxor %ymm4,%ymm4,%ymm4 vmovdqa L$One(%rip),%ymm5 vmovdqa L$Two(%rip),%ymm10 vmovd %edx,%xmm1 vpermd %ymm1,%ymm2,%ymm1 movq $8,%rax L$select_loop_avx2_w5: vmovdqa 0(%rsi),%ymm6 vmovdqa 32(%rsi),%ymm7 vmovdqa 64(%rsi),%ymm8 vmovdqa 96(%rsi),%ymm11 vmovdqa 128(%rsi),%ymm12 vmovdqa 160(%rsi),%ymm13 vpcmpeqd %ymm1,%ymm5,%ymm9 vpcmpeqd %ymm1,%ymm10,%ymm14 vpaddd %ymm0,%ymm5,%ymm5 vpaddd %ymm0,%ymm10,%ymm10 leaq 192(%rsi),%rsi vpand %ymm9,%ymm6,%ymm6 vpand %ymm9,%ymm7,%ymm7 vpand %ymm9,%ymm8,%ymm8 vpand %ymm14,%ymm11,%ymm11 vpand %ymm14,%ymm12,%ymm12 vpand %ymm14,%ymm13,%ymm13 vpxor %ymm6,%ymm2,%ymm2 vpxor %ymm7,%ymm3,%ymm3 vpxor %ymm8,%ymm4,%ymm4 vpxor %ymm11,%ymm2,%ymm2 vpxor %ymm12,%ymm3,%ymm3 vpxor %ymm13,%ymm4,%ymm4 decq %rax jnz L$select_loop_avx2_w5 vmovdqu %ymm2,0(%rdi) vmovdqu %ymm3,32(%rdi) vmovdqu %ymm4,64(%rdi) vzeroupper ret L$SEH_end_ecp_nistz256_select_w5_avx2: .globl _ecp_nistz256_select_w7_avx2 .private_extern _ecp_nistz256_select_w7_avx2 .p2align 5 _ecp_nistz256_select_w7_avx2: _CET_ENDBR vzeroupper vmovdqa L$Three(%rip),%ymm0 vpxor %ymm2,%ymm2,%ymm2 vpxor %ymm3,%ymm3,%ymm3 vmovdqa L$One(%rip),%ymm4 vmovdqa L$Two(%rip),%ymm8 vmovdqa L$Three(%rip),%ymm12 vmovd %edx,%xmm1 vpermd %ymm1,%ymm2,%ymm1 movq $21,%rax L$select_loop_avx2_w7: vmovdqa 0(%rsi),%ymm5 vmovdqa 32(%rsi),%ymm6 vmovdqa 64(%rsi),%ymm9 vmovdqa 96(%rsi),%ymm10 vmovdqa 128(%rsi),%ymm13 vmovdqa 160(%rsi),%ymm14 vpcmpeqd %ymm1,%ymm4,%ymm7 vpcmpeqd %ymm1,%ymm8,%ymm11 vpcmpeqd %ymm1,%ymm12,%ymm15 vpaddd %ymm0,%ymm4,%ymm4 vpaddd %ymm0,%ymm8,%ymm8 vpaddd %ymm0,%ymm12,%ymm12 leaq 192(%rsi),%rsi vpand %ymm7,%ymm5,%ymm5 vpand %ymm7,%ymm6,%ymm6 vpand %ymm11,%ymm9,%ymm9 vpand %ymm11,%ymm10,%ymm10 vpand %ymm15,%ymm13,%ymm13 vpand %ymm15,%ymm14,%ymm14 vpxor %ymm5,%ymm2,%ymm2 vpxor %ymm6,%ymm3,%ymm3 vpxor %ymm9,%ymm2,%ymm2 vpxor %ymm10,%ymm3,%ymm3 vpxor %ymm13,%ymm2,%ymm2 vpxor %ymm14,%ymm3,%ymm3 decq %rax jnz L$select_loop_avx2_w7 vmovdqa 0(%rsi),%ymm5 vmovdqa 32(%rsi),%ymm6 vpcmpeqd %ymm1,%ymm4,%ymm7 vpand %ymm7,%ymm5,%ymm5 vpand %ymm7,%ymm6,%ymm6 vpxor %ymm5,%ymm2,%ymm2 vpxor %ymm6,%ymm3,%ymm3 vmovdqu %ymm2,0(%rdi) vmovdqu %ymm3,32(%rdi) vzeroupper ret L$SEH_end_ecp_nistz256_select_w7_avx2: .p2align 5 __ecp_nistz256_add_toq: xorq %r11,%r11 addq 0(%rbx),%r12 adcq 8(%rbx),%r13 movq %r12,%rax adcq 16(%rbx),%r8 adcq 24(%rbx),%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .p2align 5 __ecp_nistz256_sub_fromq: subq 0(%rbx),%r12 sbbq 8(%rbx),%r13 movq %r12,%rax sbbq 16(%rbx),%r8 sbbq 24(%rbx),%r9 movq %r13,%rbp sbbq %r11,%r11 addq $-1,%r12 movq %r8,%rcx adcq %r14,%r13 adcq $0,%r8 movq %r9,%r10 adcq %r15,%r9 testq %r11,%r11 cmovzq %rax,%r12 cmovzq %rbp,%r13 movq %r12,0(%rdi) cmovzq %rcx,%r8 movq %r13,8(%rdi) cmovzq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .p2align 5 __ecp_nistz256_subq: subq %r12,%rax sbbq %r13,%rbp movq %rax,%r12 sbbq %r8,%rcx sbbq %r9,%r10 movq %rbp,%r13 sbbq %r11,%r11 addq $-1,%rax movq %rcx,%r8 adcq %r14,%rbp adcq $0,%rcx movq %r10,%r9 adcq %r15,%r10 testq %r11,%r11 cmovnzq %rax,%r12 cmovnzq %rbp,%r13 cmovnzq %rcx,%r8 cmovnzq %r10,%r9 ret .p2align 5 __ecp_nistz256_mul_by_2q: xorq %r11,%r11 addq %r12,%r12 adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .globl _ecp_nistz256_point_double_nohw .private_extern _ecp_nistz256_point_double_nohw .p2align 5 _ecp_nistz256_point_double_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $160+8,%rsp L$point_doubleq_body: L$point_double_shortcutq: movdqu 0(%rsi),%xmm0 movq %rsi,%rbx movdqu 16(%rsi),%xmm1 movq 32+0(%rsi),%r12 movq 32+8(%rsi),%r13 movq 32+16(%rsi),%r8 movq 32+24(%rsi),%r9 movq L$poly+8(%rip),%r14 movq L$poly+24(%rip),%r15 movdqa %xmm0,96(%rsp) movdqa %xmm1,96+16(%rsp) leaq 32(%rdi),%r10 leaq 64(%rdi),%r11 .byte 102,72,15,110,199 .byte 102,73,15,110,202 .byte 102,73,15,110,211 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_by_2q movq 64+0(%rsi),%rax movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 leaq 64-0(%rsi),%rsi leaq 64(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 0+0(%rsp),%rax movq 8+0(%rsp),%r14 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 0(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 32(%rbx),%rax movq 64+0(%rbx),%r9 movq 64+8(%rbx),%r10 movq 64+16(%rbx),%r11 movq 64+24(%rbx),%r12 leaq 64-0(%rbx),%rsi leaq 32(%rbx),%rbx .byte 102,72,15,126,215 call __ecp_nistz256_mul_montq call __ecp_nistz256_mul_by_2q movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 32(%rsp),%rdi call __ecp_nistz256_add_toq movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 0+0(%rsp),%rax movq 8+0(%rsp),%r14 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 .byte 102,72,15,126,207 call __ecp_nistz256_sqr_montq xorq %r9,%r9 movq %r12,%rax addq $-1,%r12 movq %r13,%r10 adcq %rsi,%r13 movq %r14,%rcx adcq $0,%r14 movq %r15,%r8 adcq %rbp,%r15 adcq $0,%r9 xorq %rsi,%rsi testq $1,%rax cmovzq %rax,%r12 cmovzq %r10,%r13 cmovzq %rcx,%r14 cmovzq %r8,%r15 cmovzq %rsi,%r9 movq %r13,%rax shrq $1,%r12 shlq $63,%rax movq %r14,%r10 shrq $1,%r13 orq %rax,%r12 shlq $63,%r10 movq %r15,%rcx shrq $1,%r14 orq %r10,%r13 shlq $63,%rcx movq %r12,0(%rdi) shrq $1,%r15 movq %r13,8(%rdi) shlq $63,%r9 orq %rcx,%r14 orq %r9,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) movq 64(%rsp),%rax leaq 64(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2q leaq 32(%rsp),%rbx leaq 32(%rsp),%rdi call __ecp_nistz256_add_toq movq 96(%rsp),%rax leaq 96(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2q movq 0+32(%rsp),%rax movq 8+32(%rsp),%r14 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r15 movq 24+32(%rsp),%r8 .byte 102,72,15,126,199 call __ecp_nistz256_sqr_montq leaq 128(%rsp),%rbx movq %r14,%r8 movq %r15,%r9 movq %rsi,%r14 movq %rbp,%r15 call __ecp_nistz256_sub_fromq movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 0(%rsp),%rdi call __ecp_nistz256_subq movq 32(%rsp),%rax leaq 32(%rsp),%rbx movq %r12,%r14 xorl %ecx,%ecx movq %r12,0+0(%rsp) movq %r13,%r10 movq %r13,0+8(%rsp) cmovzq %r8,%r11 movq %r8,0+16(%rsp) leaq 0-0(%rsp),%rsi cmovzq %r9,%r12 movq %r9,0+24(%rsp) movq %r14,%r9 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq .byte 102,72,15,126,203 .byte 102,72,15,126,207 call __ecp_nistz256_sub_fromq leaq 160+56(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbx movq -8(%rsi),%rbp leaq (%rsi),%rsp L$point_doubleq_epilogue: ret .globl _ecp_nistz256_point_add_nohw .private_extern _ecp_nistz256_point_add_nohw .p2align 5 _ecp_nistz256_point_add_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $576+8,%rsp L$point_addq_body: movdqu 0(%rsi),%xmm0 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq %rsi,%rbx movq %rdx,%rsi movdqa %xmm0,384(%rsp) movdqa %xmm1,384+16(%rsp) movdqa %xmm2,416(%rsp) movdqa %xmm3,416+16(%rsp) movdqa %xmm4,448(%rsp) movdqa %xmm5,448+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rsi),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 por %xmm3,%xmm5 movdqu 48(%rsi),%xmm3 movq 64+0(%rsi),%rax movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,480(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,480+16(%rsp) movdqu 64(%rsi),%xmm0 movdqu 80(%rsi),%xmm1 movdqa %xmm2,512(%rsp) movdqa %xmm3,512+16(%rsp) por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm0,%xmm1 .byte 102,72,15,110,199 leaq 64-0(%rsi),%rsi movq %rax,544+0(%rsp) movq %r14,544+8(%rsp) movq %r15,544+16(%rsp) movq %r8,544+24(%rsp) leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montq pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm1,%xmm4 por %xmm1,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 por %xmm3,%xmm4 pxor %xmm3,%xmm3 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 movq 64+0(%rbx),%rax movq 64+8(%rbx),%r14 movq 64+16(%rbx),%r15 movq 64+24(%rbx),%r8 .byte 102,72,15,110,203 leaq 64-0(%rbx),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 544(%rsp),%rax leaq 544(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq 0+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montq movq 448(%rsp),%rax leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montq movq 416(%rsp),%rax leaq 416(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq 0+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montq movq 512(%rsp),%rax leaq 512(%rsp),%rbx movq 0+256(%rsp),%r9 movq 8+256(%rsp),%r10 leaq 0+256(%rsp),%rsi movq 16+256(%rsp),%r11 movq 24+256(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 224(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromq orq %r13,%r12 movdqa %xmm4,%xmm2 orq %r8,%r12 orq %r9,%r12 por %xmm5,%xmm2 .byte 102,73,15,110,220 movq 384(%rsp),%rax leaq 384(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq 0+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montq movq 480(%rsp),%rax leaq 480(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 160(%rsp),%rbx leaq 0(%rsp),%rdi call __ecp_nistz256_sub_fromq orq %r13,%r12 orq %r8,%r12 orq %r9,%r12 .byte 102,73,15,126,208 .byte 102,73,15,126,217 orq %r8,%r12 .byte 0x3e jnz L$add_proceedq testq %r9,%r9 jz L$add_doubleq .byte 102,72,15,126,199 pxor %xmm0,%xmm0 movdqu %xmm0,0(%rdi) movdqu %xmm0,16(%rdi) movdqu %xmm0,32(%rdi) movdqu %xmm0,48(%rdi) movdqu %xmm0,64(%rdi) movdqu %xmm0,80(%rdi) jmp L$add_doneq .p2align 5 L$add_doubleq: .byte 102,72,15,126,206 .byte 102,72,15,126,199 addq $416,%rsp jmp L$point_double_shortcutq .p2align 5 L$add_proceedq: movq 0+64(%rsp),%rax movq 8+64(%rsp),%r14 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 448(%rsp),%rax leaq 448(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montq movq 0+0(%rsp),%rax movq 8+0(%rsp),%r14 leaq 0+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 544(%rsp),%rax leaq 544(%rsp),%rbx movq 0+352(%rsp),%r9 movq 8+352(%rsp),%r10 leaq 0+352(%rsp),%rsi movq 16+352(%rsp),%r11 movq 24+352(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montq movq 0(%rsp),%rax leaq 0(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 128(%rsp),%rdi call __ecp_nistz256_mul_montq movq 160(%rsp),%rax leaq 160(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montq xorq %r11,%r11 addq %r12,%r12 leaq 96(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subq leaq 128(%rsp),%rbx leaq 288(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 192+0(%rsp),%rax movq 192+8(%rsp),%rbp movq 192+16(%rsp),%rcx movq 192+24(%rsp),%r10 leaq 320(%rsp),%rdi call __ecp_nistz256_subq movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 128(%rsp),%rax leaq 128(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq 0+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montq movq 320(%rsp),%rax leaq 320(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 320(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 256(%rsp),%rbx leaq 320(%rsp),%rdi call __ecp_nistz256_sub_fromq .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 352(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 352+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 544(%rsp),%xmm2 pand 544+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 480(%rsp),%xmm2 pand 480+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 320(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 320+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 512(%rsp),%xmm2 pand 512+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) L$add_doneq: leaq 576+56(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbx movq -8(%rsi),%rbp leaq (%rsi),%rsp L$point_addq_epilogue: ret .globl _ecp_nistz256_point_add_affine_nohw .private_extern _ecp_nistz256_point_add_affine_nohw .p2align 5 _ecp_nistz256_point_add_affine_nohw: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $480+8,%rsp L$add_affineq_body: movdqu 0(%rsi),%xmm0 movq %rdx,%rbx movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq 64+0(%rsi),%rax movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,320(%rsp) movdqa %xmm1,320+16(%rsp) movdqa %xmm2,352(%rsp) movdqa %xmm3,352+16(%rsp) movdqa %xmm4,384(%rsp) movdqa %xmm5,384+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rbx),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rbx),%xmm1 movdqu 32(%rbx),%xmm2 por %xmm3,%xmm5 movdqu 48(%rbx),%xmm3 movdqa %xmm0,416(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,416+16(%rsp) por %xmm0,%xmm1 .byte 102,72,15,110,199 movdqa %xmm2,448(%rsp) movdqa %xmm3,448+16(%rsp) por %xmm2,%xmm3 por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm1,%xmm3 leaq 64-0(%rsi),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montq pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm3,%xmm4 movq 0(%rbx),%rax movq %r12,%r9 por %xmm3,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 movq %r13,%r10 por %xmm3,%xmm4 pxor %xmm3,%xmm3 movq %r14,%r11 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 leaq 32-0(%rsp),%rsi movq %r15,%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 320(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 384(%rsp),%rax leaq 384(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq movq 384(%rsp),%rax leaq 384(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 288(%rsp),%rdi call __ecp_nistz256_mul_montq movq 448(%rsp),%rax leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq 0+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 352(%rsp),%rbx leaq 96(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 0+64(%rsp),%rax movq 8+64(%rsp),%r14 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 128(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 0+96(%rsp),%rax movq 8+96(%rsp),%r14 leaq 0+96(%rsp),%rsi movq 16+96(%rsp),%r15 movq 24+96(%rsp),%r8 leaq 192(%rsp),%rdi call __ecp_nistz256_sqr_montq movq 128(%rsp),%rax leaq 128(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montq movq 320(%rsp),%rax leaq 320(%rsp),%rbx movq 0+128(%rsp),%r9 movq 8+128(%rsp),%r10 leaq 0+128(%rsp),%rsi movq 16+128(%rsp),%r11 movq 24+128(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montq xorq %r11,%r11 addq %r12,%r12 leaq 192(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subq leaq 160(%rsp),%rbx leaq 224(%rsp),%rdi call __ecp_nistz256_sub_fromq movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 64(%rsp),%rdi call __ecp_nistz256_subq movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 352(%rsp),%rax leaq 352(%rsp),%rbx movq 0+160(%rsp),%r9 movq 8+160(%rsp),%r10 leaq 0+160(%rsp),%rsi movq 16+160(%rsp),%r11 movq 24+160(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montq movq 96(%rsp),%rax leaq 96(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq 0+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 64(%rsp),%rdi call __ecp_nistz256_mul_montq leaq 32(%rsp),%rbx leaq 256(%rsp),%rdi call __ecp_nistz256_sub_fromq .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand L$ONE_mont(%rip),%xmm2 pand L$ONE_mont+16(%rip),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 224(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 224+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 320(%rsp),%xmm2 pand 320+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 256(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 256+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 352(%rsp),%xmm2 pand 352+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) leaq 480+56(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbx movq -8(%rsi),%rbp leaq (%rsi),%rsp L$add_affineq_epilogue: ret .p2align 5 __ecp_nistz256_add_tox: xorq %r11,%r11 adcq 0(%rbx),%r12 adcq 8(%rbx),%r13 movq %r12,%rax adcq 16(%rbx),%r8 adcq 24(%rbx),%r9 movq %r13,%rbp adcq $0,%r11 xorq %r10,%r10 sbbq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .p2align 5 __ecp_nistz256_sub_fromx: xorq %r11,%r11 sbbq 0(%rbx),%r12 sbbq 8(%rbx),%r13 movq %r12,%rax sbbq 16(%rbx),%r8 sbbq 24(%rbx),%r9 movq %r13,%rbp sbbq $0,%r11 xorq %r10,%r10 adcq $-1,%r12 movq %r8,%rcx adcq %r14,%r13 adcq $0,%r8 movq %r9,%r10 adcq %r15,%r9 btq $0,%r11 cmovncq %rax,%r12 cmovncq %rbp,%r13 movq %r12,0(%rdi) cmovncq %rcx,%r8 movq %r13,8(%rdi) cmovncq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .p2align 5 __ecp_nistz256_subx: xorq %r11,%r11 sbbq %r12,%rax sbbq %r13,%rbp movq %rax,%r12 sbbq %r8,%rcx sbbq %r9,%r10 movq %rbp,%r13 sbbq $0,%r11 xorq %r9,%r9 adcq $-1,%rax movq %rcx,%r8 adcq %r14,%rbp adcq $0,%rcx movq %r10,%r9 adcq %r15,%r10 btq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 cmovcq %rcx,%r8 cmovcq %r10,%r9 ret .p2align 5 __ecp_nistz256_mul_by_2x: xorq %r11,%r11 adcq %r12,%r12 adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 xorq %r10,%r10 sbbq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 cmovcq %rbp,%r13 movq %r12,0(%rdi) cmovcq %rcx,%r8 movq %r13,8(%rdi) cmovcq %r10,%r9 movq %r8,16(%rdi) movq %r9,24(%rdi) ret .globl _ecp_nistz256_point_double_adx .private_extern _ecp_nistz256_point_double_adx .p2align 5 _ecp_nistz256_point_double_adx: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $160+8,%rsp L$point_doublex_body: L$point_double_shortcutx: movdqu 0(%rsi),%xmm0 movq %rsi,%rbx movdqu 16(%rsi),%xmm1 movq 32+0(%rsi),%r12 movq 32+8(%rsi),%r13 movq 32+16(%rsi),%r8 movq 32+24(%rsi),%r9 movq L$poly+8(%rip),%r14 movq L$poly+24(%rip),%r15 movdqa %xmm0,96(%rsp) movdqa %xmm1,96+16(%rsp) leaq 32(%rdi),%r10 leaq 64(%rdi),%r11 .byte 102,72,15,110,199 .byte 102,73,15,110,202 .byte 102,73,15,110,211 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_by_2x movq 64+0(%rsi),%rdx movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 leaq 64-128(%rsi),%rsi leaq 64(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 0+0(%rsp),%rdx movq 8+0(%rsp),%r14 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 0(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 32(%rbx),%rdx movq 64+0(%rbx),%r9 movq 64+8(%rbx),%r10 movq 64+16(%rbx),%r11 movq 64+24(%rbx),%r12 leaq 64-128(%rbx),%rsi leaq 32(%rbx),%rbx .byte 102,72,15,126,215 call __ecp_nistz256_mul_montx call __ecp_nistz256_mul_by_2x movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 32(%rsp),%rdi call __ecp_nistz256_add_tox movq 96+0(%rsp),%r12 movq 96+8(%rsp),%r13 leaq 64(%rsp),%rbx movq 96+16(%rsp),%r8 movq 96+24(%rsp),%r9 leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 0+0(%rsp),%rdx movq 8+0(%rsp),%r14 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 .byte 102,72,15,126,207 call __ecp_nistz256_sqr_montx xorq %r9,%r9 movq %r12,%rax addq $-1,%r12 movq %r13,%r10 adcq %rsi,%r13 movq %r14,%rcx adcq $0,%r14 movq %r15,%r8 adcq %rbp,%r15 adcq $0,%r9 xorq %rsi,%rsi testq $1,%rax cmovzq %rax,%r12 cmovzq %r10,%r13 cmovzq %rcx,%r14 cmovzq %r8,%r15 cmovzq %rsi,%r9 movq %r13,%rax shrq $1,%r12 shlq $63,%rax movq %r14,%r10 shrq $1,%r13 orq %rax,%r12 shlq $63,%r10 movq %r15,%rcx shrq $1,%r14 orq %r10,%r13 shlq $63,%rcx movq %r12,0(%rdi) shrq $1,%r15 movq %r13,8(%rdi) shlq $63,%r9 orq %rcx,%r14 orq %r9,%r15 movq %r14,16(%rdi) movq %r15,24(%rdi) movq 64(%rsp),%rdx leaq 64(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2x leaq 32(%rsp),%rbx leaq 32(%rsp),%rdi call __ecp_nistz256_add_tox movq 96(%rsp),%rdx leaq 96(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 128(%rsp),%rdi call __ecp_nistz256_mul_by_2x movq 0+32(%rsp),%rdx movq 8+32(%rsp),%r14 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r15 movq 24+32(%rsp),%r8 .byte 102,72,15,126,199 call __ecp_nistz256_sqr_montx leaq 128(%rsp),%rbx movq %r14,%r8 movq %r15,%r9 movq %rsi,%r14 movq %rbp,%r15 call __ecp_nistz256_sub_fromx movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 0(%rsp),%rdi call __ecp_nistz256_subx movq 32(%rsp),%rdx leaq 32(%rsp),%rbx movq %r12,%r14 xorl %ecx,%ecx movq %r12,0+0(%rsp) movq %r13,%r10 movq %r13,0+8(%rsp) cmovzq %r8,%r11 movq %r8,0+16(%rsp) leaq 0-128(%rsp),%rsi cmovzq %r9,%r12 movq %r9,0+24(%rsp) movq %r14,%r9 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx .byte 102,72,15,126,203 .byte 102,72,15,126,207 call __ecp_nistz256_sub_fromx leaq 160+56(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbx movq -8(%rsi),%rbp leaq (%rsi),%rsp L$point_doublex_epilogue: ret .globl _ecp_nistz256_point_add_adx .private_extern _ecp_nistz256_point_add_adx .p2align 5 _ecp_nistz256_point_add_adx: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $576+8,%rsp L$point_addx_body: movdqu 0(%rsi),%xmm0 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq %rsi,%rbx movq %rdx,%rsi movdqa %xmm0,384(%rsp) movdqa %xmm1,384+16(%rsp) movdqa %xmm2,416(%rsp) movdqa %xmm3,416+16(%rsp) movdqa %xmm4,448(%rsp) movdqa %xmm5,448+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rsi),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 por %xmm3,%xmm5 movdqu 48(%rsi),%xmm3 movq 64+0(%rsi),%rdx movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,480(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,480+16(%rsp) movdqu 64(%rsi),%xmm0 movdqu 80(%rsi),%xmm1 movdqa %xmm2,512(%rsp) movdqa %xmm3,512+16(%rsp) por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm0,%xmm1 .byte 102,72,15,110,199 leaq 64-128(%rsi),%rsi movq %rdx,544+0(%rsp) movq %r14,544+8(%rsp) movq %r15,544+16(%rsp) movq %r8,544+24(%rsp) leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montx pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm1,%xmm4 por %xmm1,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 por %xmm3,%xmm4 pxor %xmm3,%xmm3 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 movq 64+0(%rbx),%rdx movq 64+8(%rbx),%r14 movq 64+16(%rbx),%r15 movq 64+24(%rbx),%r8 .byte 102,72,15,110,203 leaq 64-128(%rbx),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 544(%rsp),%rdx leaq 544(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq -128+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montx movq 448(%rsp),%rdx leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montx movq 416(%rsp),%rdx leaq 416(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq -128+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 224(%rsp),%rdi call __ecp_nistz256_mul_montx movq 512(%rsp),%rdx leaq 512(%rsp),%rbx movq 0+256(%rsp),%r9 movq 8+256(%rsp),%r10 leaq -128+256(%rsp),%rsi movq 16+256(%rsp),%r11 movq 24+256(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 224(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromx orq %r13,%r12 movdqa %xmm4,%xmm2 orq %r8,%r12 orq %r9,%r12 por %xmm5,%xmm2 .byte 102,73,15,110,220 movq 384(%rsp),%rdx leaq 384(%rsp),%rbx movq 0+96(%rsp),%r9 movq 8+96(%rsp),%r10 leaq -128+96(%rsp),%rsi movq 16+96(%rsp),%r11 movq 24+96(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montx movq 480(%rsp),%rdx leaq 480(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 160(%rsp),%rbx leaq 0(%rsp),%rdi call __ecp_nistz256_sub_fromx orq %r13,%r12 orq %r8,%r12 orq %r9,%r12 .byte 102,73,15,126,208 .byte 102,73,15,126,217 orq %r8,%r12 .byte 0x3e jnz L$add_proceedx testq %r9,%r9 jz L$add_doublex .byte 102,72,15,126,199 pxor %xmm0,%xmm0 movdqu %xmm0,0(%rdi) movdqu %xmm0,16(%rdi) movdqu %xmm0,32(%rdi) movdqu %xmm0,48(%rdi) movdqu %xmm0,64(%rdi) movdqu %xmm0,80(%rdi) jmp L$add_donex .p2align 5 L$add_doublex: .byte 102,72,15,126,206 .byte 102,72,15,126,199 addq $416,%rsp jmp L$point_double_shortcutx .p2align 5 L$add_proceedx: movq 0+64(%rsp),%rdx movq 8+64(%rsp),%r14 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 96(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 448(%rsp),%rdx leaq 448(%rsp),%rbx movq 0+0(%rsp),%r9 movq 8+0(%rsp),%r10 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r11 movq 24+0(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montx movq 0+0(%rsp),%rdx movq 8+0(%rsp),%r14 leaq -128+0(%rsp),%rsi movq 16+0(%rsp),%r15 movq 24+0(%rsp),%r8 leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 544(%rsp),%rdx leaq 544(%rsp),%rbx movq 0+352(%rsp),%r9 movq 8+352(%rsp),%r10 leaq -128+352(%rsp),%rsi movq 16+352(%rsp),%r11 movq 24+352(%rsp),%r12 leaq 352(%rsp),%rdi call __ecp_nistz256_mul_montx movq 0(%rsp),%rdx leaq 0(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 128(%rsp),%rdi call __ecp_nistz256_mul_montx movq 160(%rsp),%rdx leaq 160(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 192(%rsp),%rdi call __ecp_nistz256_mul_montx xorq %r11,%r11 addq %r12,%r12 leaq 96(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subx leaq 128(%rsp),%rbx leaq 288(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 192+0(%rsp),%rax movq 192+8(%rsp),%rbp movq 192+16(%rsp),%rcx movq 192+24(%rsp),%r10 leaq 320(%rsp),%rdi call __ecp_nistz256_subx movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 128(%rsp),%rdx leaq 128(%rsp),%rbx movq 0+224(%rsp),%r9 movq 8+224(%rsp),%r10 leaq -128+224(%rsp),%rsi movq 16+224(%rsp),%r11 movq 24+224(%rsp),%r12 leaq 256(%rsp),%rdi call __ecp_nistz256_mul_montx movq 320(%rsp),%rdx leaq 320(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 320(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 256(%rsp),%rbx leaq 320(%rsp),%rdi call __ecp_nistz256_sub_fromx .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 352(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 352+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 544(%rsp),%xmm2 pand 544+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 480(%rsp),%xmm2 pand 480+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 320(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 320+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 512(%rsp),%xmm2 pand 512+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) L$add_donex: leaq 576+56(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbx movq -8(%rsi),%rbp leaq (%rsi),%rsp L$point_addx_epilogue: ret .globl _ecp_nistz256_point_add_affine_adx .private_extern _ecp_nistz256_point_add_affine_adx .p2align 5 _ecp_nistz256_point_add_affine_adx: _CET_ENDBR pushq %rbp pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $480+8,%rsp L$add_affinex_body: movdqu 0(%rsi),%xmm0 movq %rdx,%rbx movdqu 16(%rsi),%xmm1 movdqu 32(%rsi),%xmm2 movdqu 48(%rsi),%xmm3 movdqu 64(%rsi),%xmm4 movdqu 80(%rsi),%xmm5 movq 64+0(%rsi),%rdx movq 64+8(%rsi),%r14 movq 64+16(%rsi),%r15 movq 64+24(%rsi),%r8 movdqa %xmm0,320(%rsp) movdqa %xmm1,320+16(%rsp) movdqa %xmm2,352(%rsp) movdqa %xmm3,352+16(%rsp) movdqa %xmm4,384(%rsp) movdqa %xmm5,384+16(%rsp) por %xmm4,%xmm5 movdqu 0(%rbx),%xmm0 pshufd $0xb1,%xmm5,%xmm3 movdqu 16(%rbx),%xmm1 movdqu 32(%rbx),%xmm2 por %xmm3,%xmm5 movdqu 48(%rbx),%xmm3 movdqa %xmm0,416(%rsp) pshufd $0x1e,%xmm5,%xmm4 movdqa %xmm1,416+16(%rsp) por %xmm0,%xmm1 .byte 102,72,15,110,199 movdqa %xmm2,448(%rsp) movdqa %xmm3,448+16(%rsp) por %xmm2,%xmm3 por %xmm4,%xmm5 pxor %xmm4,%xmm4 por %xmm1,%xmm3 leaq 64-128(%rsi),%rsi leaq 32(%rsp),%rdi call __ecp_nistz256_sqr_montx pcmpeqd %xmm4,%xmm5 pshufd $0xb1,%xmm3,%xmm4 movq 0(%rbx),%rdx movq %r12,%r9 por %xmm3,%xmm4 pshufd $0,%xmm5,%xmm5 pshufd $0x1e,%xmm4,%xmm3 movq %r13,%r10 por %xmm3,%xmm4 pxor %xmm3,%xmm3 movq %r14,%r11 pcmpeqd %xmm3,%xmm4 pshufd $0,%xmm4,%xmm4 leaq 32-128(%rsp),%rsi movq %r15,%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 320(%rsp),%rbx leaq 64(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 384(%rsp),%rdx leaq 384(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx movq 384(%rsp),%rdx leaq 384(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 288(%rsp),%rdi call __ecp_nistz256_mul_montx movq 448(%rsp),%rdx leaq 448(%rsp),%rbx movq 0+32(%rsp),%r9 movq 8+32(%rsp),%r10 leaq -128+32(%rsp),%rsi movq 16+32(%rsp),%r11 movq 24+32(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 352(%rsp),%rbx leaq 96(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 0+64(%rsp),%rdx movq 8+64(%rsp),%r14 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r15 movq 24+64(%rsp),%r8 leaq 128(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 0+96(%rsp),%rdx movq 8+96(%rsp),%r14 leaq -128+96(%rsp),%rsi movq 16+96(%rsp),%r15 movq 24+96(%rsp),%r8 leaq 192(%rsp),%rdi call __ecp_nistz256_sqr_montx movq 128(%rsp),%rdx leaq 128(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 160(%rsp),%rdi call __ecp_nistz256_mul_montx movq 320(%rsp),%rdx leaq 320(%rsp),%rbx movq 0+128(%rsp),%r9 movq 8+128(%rsp),%r10 leaq -128+128(%rsp),%rsi movq 16+128(%rsp),%r11 movq 24+128(%rsp),%r12 leaq 0(%rsp),%rdi call __ecp_nistz256_mul_montx xorq %r11,%r11 addq %r12,%r12 leaq 192(%rsp),%rsi adcq %r13,%r13 movq %r12,%rax adcq %r8,%r8 adcq %r9,%r9 movq %r13,%rbp adcq $0,%r11 subq $-1,%r12 movq %r8,%rcx sbbq %r14,%r13 sbbq $0,%r8 movq %r9,%r10 sbbq %r15,%r9 sbbq $0,%r11 cmovcq %rax,%r12 movq 0(%rsi),%rax cmovcq %rbp,%r13 movq 8(%rsi),%rbp cmovcq %rcx,%r8 movq 16(%rsi),%rcx cmovcq %r10,%r9 movq 24(%rsi),%r10 call __ecp_nistz256_subx leaq 160(%rsp),%rbx leaq 224(%rsp),%rdi call __ecp_nistz256_sub_fromx movq 0+0(%rsp),%rax movq 0+8(%rsp),%rbp movq 0+16(%rsp),%rcx movq 0+24(%rsp),%r10 leaq 64(%rsp),%rdi call __ecp_nistz256_subx movq %r12,0(%rdi) movq %r13,8(%rdi) movq %r8,16(%rdi) movq %r9,24(%rdi) movq 352(%rsp),%rdx leaq 352(%rsp),%rbx movq 0+160(%rsp),%r9 movq 8+160(%rsp),%r10 leaq -128+160(%rsp),%rsi movq 16+160(%rsp),%r11 movq 24+160(%rsp),%r12 leaq 32(%rsp),%rdi call __ecp_nistz256_mul_montx movq 96(%rsp),%rdx leaq 96(%rsp),%rbx movq 0+64(%rsp),%r9 movq 8+64(%rsp),%r10 leaq -128+64(%rsp),%rsi movq 16+64(%rsp),%r11 movq 24+64(%rsp),%r12 leaq 64(%rsp),%rdi call __ecp_nistz256_mul_montx leaq 32(%rsp),%rbx leaq 256(%rsp),%rdi call __ecp_nistz256_sub_fromx .byte 102,72,15,126,199 movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 288(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 288+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand L$ONE_mont(%rip),%xmm2 pand L$ONE_mont+16(%rip),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 384(%rsp),%xmm2 pand 384+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,64(%rdi) movdqu %xmm3,80(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 224(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 224+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 416(%rsp),%xmm2 pand 416+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 320(%rsp),%xmm2 pand 320+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,0(%rdi) movdqu %xmm3,16(%rdi) movdqa %xmm5,%xmm0 movdqa %xmm5,%xmm1 pandn 256(%rsp),%xmm0 movdqa %xmm5,%xmm2 pandn 256+16(%rsp),%xmm1 movdqa %xmm5,%xmm3 pand 448(%rsp),%xmm2 pand 448+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqa %xmm4,%xmm0 movdqa %xmm4,%xmm1 pandn %xmm2,%xmm0 movdqa %xmm4,%xmm2 pandn %xmm3,%xmm1 movdqa %xmm4,%xmm3 pand 352(%rsp),%xmm2 pand 352+16(%rsp),%xmm3 por %xmm0,%xmm2 por %xmm1,%xmm3 movdqu %xmm2,32(%rdi) movdqu %xmm3,48(%rdi) leaq 480+56(%rsp),%rsi movq -48(%rsi),%r15 movq -40(%rsi),%r14 movq -32(%rsi),%r13 movq -24(%rsi),%r12 movq -16(%rsi),%rbx movq -8(%rsi),%rbp leaq (%rsi),%rsp L$add_affinex_epilogue: ret #endif
marvin-hansen/iggy-streaming-system
23,124
thirdparty/crates/ring-0.17.9/pregenerated/vpaes-armv7-linux32.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) .syntax unified .arch armv7-a .fpu neon #if defined(__thumb2__) .thumb #else .code 32 #endif .text .type _vpaes_consts,%object .align 7 @ totally strategic alignment _vpaes_consts: .Lk_mc_forward:@ mc_forward .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605, 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 .Lk_mc_backward:@ mc_backward .quad 0x0605040702010003, 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F, 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 .quad 0x0A09080B06050407, 0x020100030E0D0C0F .Lk_sr:@ sr .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 .quad 0x0F060D040B020900, 0x070E050C030A0108 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 @ @ "Hot" constants @ .Lk_inv:@ inv, inva .quad 0x0E05060F0D080180, 0x040703090A0B0C02 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 .Lk_ipt:@ input transform (lo, hi) .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 .Lk_sbo:@ sbou, sbot .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA .Lk_sb1:@ sb1u, sb1t .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 .Lk_sb2:@ sb2u, sb2t .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,55,32,78,69,79,78,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 .align 2 .size _vpaes_consts,.-_vpaes_consts .align 6 @@ @@ _aes_preheat @@ @@ Fills q9-q15 as specified below. @@ .type _vpaes_preheat,%function .align 4 _vpaes_preheat: adr r10, .Lk_inv vmov.i8 q9, #0x0f @ .Lk_s0F vld1.64 {q10,q11}, [r10]! @ .Lk_inv add r10, r10, #64 @ Skip .Lk_ipt, .Lk_sbo vld1.64 {q12,q13}, [r10]! @ .Lk_sb1 vld1.64 {q14,q15}, [r10] @ .Lk_sb2 bx lr @@ @@ _aes_encrypt_core @@ @@ AES-encrypt q0. @@ @@ Inputs: @@ q0 = input @@ q9-q15 as in _vpaes_preheat @@ [r2] = scheduled keys @@ @@ Output in q0 @@ Clobbers q1-q5, r8-r11 @@ Preserves q6-q8 so you get some local vectors @@ @@ .type _vpaes_encrypt_core,%function .align 4 _vpaes_encrypt_core: mov r9, r2 ldr r8, [r2,#240] @ pull rounds adr r11, .Lk_ipt @ vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo @ vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi vld1.64 {q2, q3}, [r11] adr r11, .Lk_mc_forward+16 vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 # round0 key vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 vtbl.8 d2, {q2}, d2 @ vpshufb %xmm1, %xmm2, %xmm1 vtbl.8 d3, {q2}, d3 vtbl.8 d4, {q3}, d0 @ vpshufb %xmm0, %xmm3, %xmm2 vtbl.8 d5, {q3}, d1 veor q0, q1, q5 @ vpxor %xmm5, %xmm1, %xmm0 veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 @ .Lenc_entry ends with a bnz instruction which is normally paired with @ subs in .Lenc_loop. tst r8, r8 b .Lenc_entry .align 4 .Lenc_loop: @ middle of middle round add r10, r11, #0x40 vtbl.8 d8, {q13}, d4 @ vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u vtbl.8 d9, {q13}, d5 vld1.64 {q1}, [r11]! @ vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] vtbl.8 d0, {q12}, d6 @ vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t vtbl.8 d1, {q12}, d7 veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k vtbl.8 d10, {q15}, d4 @ vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u vtbl.8 d11, {q15}, d5 veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A vtbl.8 d4, {q14}, d6 @ vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t vtbl.8 d5, {q14}, d7 vld1.64 {q4}, [r10] @ vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] vtbl.8 d6, {q0}, d2 @ vpshufb %xmm1, %xmm0, %xmm3 # 0 = B vtbl.8 d7, {q0}, d3 veor q2, q2, q5 @ vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A @ Write to q5 instead of q0, so the table and destination registers do @ not overlap. vtbl.8 d10, {q0}, d8 @ vpshufb %xmm4, %xmm0, %xmm0 # 3 = D vtbl.8 d11, {q0}, d9 veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B vtbl.8 d8, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C vtbl.8 d9, {q3}, d3 @ Here we restore the original q0/q5 usage. veor q0, q5, q3 @ vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D and r11, r11, #~(1<<6) @ and $0x30, %r11 # ... mod 4 veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D subs r8, r8, #1 @ nr-- .Lenc_entry: @ top of round vand q1, q0, q9 @ vpand %xmm0, %xmm9, %xmm1 # 0 = k vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i vtbl.8 d10, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k vtbl.8 d11, {q11}, d3 veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i vtbl.8 d7, {q10}, d1 vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j vtbl.8 d9, {q10}, d3 veor q3, q3, q5 @ vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k vtbl.8 d4, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak vtbl.8 d5, {q10}, d7 vtbl.8 d6, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak vtbl.8 d7, {q10}, d9 veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 bne .Lenc_loop @ middle of last round add r10, r11, #0x80 adr r11, .Lk_sbo @ Read to q1 instead of q4, so the vtbl.8 instruction below does not @ overlap table and destination registers. vld1.64 {q1}, [r11]! @ vmovdqa -0x60(%r10), %xmm4 # 3 : sbou vld1.64 {q0}, [r11] @ vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 vtbl.8 d8, {q1}, d4 @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou vtbl.8 d9, {q1}, d5 vld1.64 {q1}, [r10] @ vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] @ Write to q2 instead of q0 below, to avoid overlapping table and @ destination registers. vtbl.8 d4, {q0}, d6 @ vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t vtbl.8 d5, {q0}, d7 veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k veor q2, q2, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A @ Here we restore the original q0/q2 usage. vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 vtbl.8 d1, {q2}, d3 bx lr .size _vpaes_encrypt_core,.-_vpaes_encrypt_core @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @@ @@ @@ AES key schedule @@ @@ @@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ @ This function diverges from both x86_64 and armv7 in which constants are @ pinned. x86_64 has a common preheat function for all operations. aarch64 @ separates them because it has enough registers to pin nearly all constants. @ armv7 does not have enough registers, but needing explicit loads and stores @ also complicates using x86_64's register allocation directly. @ @ We pin some constants for convenience and leave q14 and q15 free to load @ others on demand. @ @ Key schedule constants @ .type _vpaes_key_consts,%object .align 4 _vpaes_key_consts: .Lk_rcon:@ rcon .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 .Lk_opt:@ output transform .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 .Lk_deskew:@ deskew tables: inverts the sbox's "skew" .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 .size _vpaes_key_consts,.-_vpaes_key_consts .type _vpaes_key_preheat,%function .align 4 _vpaes_key_preheat: adr r11, .Lk_rcon vmov.i8 q12, #0x5b @ .Lk_s63 adr r10, .Lk_inv @ Must be aligned to 8 mod 16. vmov.i8 q9, #0x0f @ .Lk_s0F vld1.64 {q10,q11}, [r10] @ .Lk_inv vld1.64 {q8}, [r11] @ .Lk_rcon bx lr .size _vpaes_key_preheat,.-_vpaes_key_preheat .type _vpaes_schedule_core,%function .align 4 _vpaes_schedule_core: @ We only need to save lr, but ARM requires an 8-byte stack alignment, @ so save an extra register. stmdb sp!, {r3,lr} bl _vpaes_key_preheat @ load the tables adr r11, .Lk_ipt @ Must be aligned to 8 mod 16. vld1.64 {q0}, [r0]! @ vmovdqu (%rdi), %xmm0 # load key (unaligned) @ input transform @ Use q4 here rather than q3 so .Lschedule_am_decrypting does not @ overlap table and destination. vmov q4, q0 @ vmovdqa %xmm0, %xmm3 bl _vpaes_schedule_transform adr r10, .Lk_sr @ Must be aligned to 8 mod 16. vmov q7, q0 @ vmovdqa %xmm0, %xmm7 add r8, r8, r10 @ encrypting, output zeroth round key after transform vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) @ *ring*: Decryption removed. .Lschedule_go: cmp r1, #192 @ cmp $192, %esi bhi .Lschedule_256 @ 128: fall though @@ @@ .schedule_128 @@ @@ 128-bit specific part of key schedule. @@ @@ This schedule is really simple, because all its parts @@ are accomplished by the subroutines. @@ .Lschedule_128: mov r0, #10 @ mov $10, %esi .Loop_schedule_128: bl _vpaes_schedule_round subs r0, r0, #1 @ dec %esi beq .Lschedule_mangle_last bl _vpaes_schedule_mangle @ write output b .Loop_schedule_128 @@ @@ .aes_schedule_256 @@ @@ 256-bit specific part of key schedule. @@ @@ The structure here is very similar to the 128-bit @@ schedule, but with an additional "low side" in @@ q6. The low side's rounds are the same as the @@ high side's, except no rcon and no rotation. @@ .align 4 .Lschedule_256: vld1.64 {q0}, [r0] @ vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) bl _vpaes_schedule_transform @ input transform mov r0, #7 @ mov $7, %esi .Loop_schedule_256: bl _vpaes_schedule_mangle @ output low result vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 @ high round bl _vpaes_schedule_round subs r0, r0, #1 @ dec %esi beq .Lschedule_mangle_last bl _vpaes_schedule_mangle @ low round. swap xmm7 and xmm6 vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0 vmov.i8 q4, #0 vmov q5, q7 @ vmovdqa %xmm7, %xmm5 vmov q7, q6 @ vmovdqa %xmm6, %xmm7 bl _vpaes_schedule_low_round vmov q7, q5 @ vmovdqa %xmm5, %xmm7 b .Loop_schedule_256 @@ @@ .aes_schedule_mangle_last @@ @@ Mangler for last round of key schedule @@ Mangles q0 @@ when encrypting, outputs out(q0) ^ 63 @@ when decrypting, outputs unskew(q0) @@ @@ Always called right before return... jumps to cleanup and exits @@ .align 4 .Lschedule_mangle_last: @ schedule last round key from xmm0 adr r11, .Lk_deskew @ lea .Lk_deskew(%rip),%r11 # prepare to deskew @ encrypting vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10),%xmm1 adr r11, .Lk_opt @ lea .Lk_opt(%rip), %r11 # prepare to output transform add r2, r2, #32 @ add $32, %rdx vmov q2, q0 vtbl.8 d0, {q2}, d2 @ vpshufb %xmm1, %xmm0, %xmm0 # output permute vtbl.8 d1, {q2}, d3 .Lschedule_mangle_last_dec: sub r2, r2, #16 @ add $-16, %rdx veor q0, q0, q12 @ vpxor .Lk_s63(%rip), %xmm0, %xmm0 bl _vpaes_schedule_transform @ output transform vst1.64 {q0}, [r2] @ vmovdqu %xmm0, (%rdx) # save last key @ cleanup veor q0, q0, q0 @ vpxor %xmm0, %xmm0, %xmm0 veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1 veor q2, q2, q2 @ vpxor %xmm2, %xmm2, %xmm2 veor q3, q3, q3 @ vpxor %xmm3, %xmm3, %xmm3 veor q4, q4, q4 @ vpxor %xmm4, %xmm4, %xmm4 veor q5, q5, q5 @ vpxor %xmm5, %xmm5, %xmm5 veor q6, q6, q6 @ vpxor %xmm6, %xmm6, %xmm6 veor q7, q7, q7 @ vpxor %xmm7, %xmm7, %xmm7 ldmia sp!, {r3,pc} @ return .size _vpaes_schedule_core,.-_vpaes_schedule_core @@ @@ .aes_schedule_round @@ @@ Runs one main round of the key schedule on q0, q7 @@ @@ Specifically, runs subbytes on the high dword of q0 @@ then rotates it by one byte and xors into the low dword of @@ q7. @@ @@ Adds rcon from low byte of q8, then rotates q8 for @@ next rcon. @@ @@ Smears the dwords of q7 by xoring the low into the @@ second low, result into third, result into highest. @@ @@ Returns results in q7 = q0. @@ Clobbers q1-q4, r11. @@ .type _vpaes_schedule_round,%function .align 4 _vpaes_schedule_round: @ extract rcon from xmm8 vmov.i8 q4, #0 @ vpxor %xmm4, %xmm4, %xmm4 vext.8 q1, q8, q4, #15 @ vpalignr $15, %xmm8, %xmm4, %xmm1 vext.8 q8, q8, q8, #15 @ vpalignr $15, %xmm8, %xmm8, %xmm8 veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7 @ rotate vdup.32 q0, d1[1] @ vpshufd $0xFF, %xmm0, %xmm0 vext.8 q0, q0, q0, #1 @ vpalignr $1, %xmm0, %xmm0, %xmm0 @ fall through... @ low round: same as high round, but no rotation and no rcon. _vpaes_schedule_low_round: @ The x86_64 version pins .Lk_sb1 in %xmm13 and .Lk_sb1+16 in %xmm12. @ We pin other values in _vpaes_key_preheat, so load them now. adr r11, .Lk_sb1 vld1.64 {q14,q15}, [r11] @ smear xmm7 vext.8 q1, q4, q7, #12 @ vpslldq $4, %xmm7, %xmm1 veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7 vext.8 q4, q4, q7, #8 @ vpslldq $8, %xmm7, %xmm4 @ subbytes vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 # 1 = i veor q7, q7, q4 @ vpxor %xmm4, %xmm7, %xmm7 vtbl.8 d4, {q11}, d2 @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k vtbl.8 d5, {q11}, d3 veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j vtbl.8 d6, {q10}, d0 @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i vtbl.8 d7, {q10}, d1 veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k vtbl.8 d8, {q10}, d2 @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j vtbl.8 d9, {q10}, d3 veor q7, q7, q12 @ vpxor .Lk_s63(%rip), %xmm7, %xmm7 vtbl.8 d6, {q10}, d6 @ vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak vtbl.8 d7, {q10}, d7 veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k vtbl.8 d4, {q10}, d8 @ vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak vtbl.8 d5, {q10}, d9 veor q3, q3, q1 @ vpxor %xmm1, %xmm3, %xmm3 # 2 = io veor q2, q2, q0 @ vpxor %xmm0, %xmm2, %xmm2 # 3 = jo vtbl.8 d8, {q15}, d6 @ vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou vtbl.8 d9, {q15}, d7 vtbl.8 d2, {q14}, d4 @ vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t vtbl.8 d3, {q14}, d5 veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output @ add in smeared stuff veor q0, q1, q7 @ vpxor %xmm7, %xmm1, %xmm0 veor q7, q1, q7 @ vmovdqa %xmm0, %xmm7 bx lr .size _vpaes_schedule_round,.-_vpaes_schedule_round @@ @@ .aes_schedule_transform @@ @@ Linear-transform q0 according to tables at [r11] @@ @@ Requires that q9 = 0x0F0F... as in preheat @@ Output in q0 @@ Clobbers q1, q2, q14, q15 @@ .type _vpaes_schedule_transform,%function .align 4 _vpaes_schedule_transform: vld1.64 {q14,q15}, [r11] @ vmovdqa (%r11), %xmm2 # lo @ vmovdqa 16(%r11), %xmm1 # hi vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 vshr.u8 q0, q0, #4 @ vpsrlb $4, %xmm0, %xmm0 vtbl.8 d4, {q14}, d2 @ vpshufb %xmm1, %xmm2, %xmm2 vtbl.8 d5, {q14}, d3 vtbl.8 d0, {q15}, d0 @ vpshufb %xmm0, %xmm1, %xmm0 vtbl.8 d1, {q15}, d1 veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0 bx lr .size _vpaes_schedule_transform,.-_vpaes_schedule_transform @@ @@ .aes_schedule_mangle @@ @@ Mangles q0 from (basis-transformed) standard version @@ to our version. @@ @@ On encrypt, @@ xor with 0x63 @@ multiply by circulant 0,1,1,1 @@ apply shiftrows transform @@ @@ On decrypt, @@ xor with 0x63 @@ multiply by "inverse mixcolumns" circulant E,B,D,9 @@ deskew @@ apply shiftrows transform @@ @@ @@ Writes out to [r2], and increments or decrements it @@ Keeps track of round number mod 4 in r8 @@ Preserves q0 @@ Clobbers q1-q5 @@ .type _vpaes_schedule_mangle,%function .align 4 _vpaes_schedule_mangle: tst r3, r3 vmov q4, q0 @ vmovdqa %xmm0, %xmm4 # save xmm0 for later adr r11, .Lk_mc_forward @ Must be aligned to 8 mod 16. vld1.64 {q5}, [r11] @ vmovdqa .Lk_mc_forward(%rip),%xmm5 @ encrypting @ Write to q2 so we do not overlap table and destination below. veor q2, q0, q12 @ vpxor .Lk_s63(%rip), %xmm0, %xmm4 add r2, r2, #16 @ add $16, %rdx vtbl.8 d8, {q2}, d10 @ vpshufb %xmm5, %xmm4, %xmm4 vtbl.8 d9, {q2}, d11 vtbl.8 d2, {q4}, d10 @ vpshufb %xmm5, %xmm4, %xmm1 vtbl.8 d3, {q4}, d11 vtbl.8 d6, {q1}, d10 @ vpshufb %xmm5, %xmm1, %xmm3 vtbl.8 d7, {q1}, d11 veor q4, q4, q1 @ vpxor %xmm1, %xmm4, %xmm4 vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1 veor q3, q3, q4 @ vpxor %xmm4, %xmm3, %xmm3 .Lschedule_mangle_both: @ Write to q2 so table and destination do not overlap. vtbl.8 d4, {q3}, d2 @ vpshufb %xmm1, %xmm3, %xmm3 vtbl.8 d5, {q3}, d3 add r8, r8, #64-16 @ add $-16, %r8 and r8, r8, #~(1<<6) @ and $0x30, %r8 vst1.64 {q2}, [r2] @ vmovdqu %xmm3, (%rdx) bx lr .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle .globl vpaes_set_encrypt_key .hidden vpaes_set_encrypt_key .type vpaes_set_encrypt_key,%function .align 4 vpaes_set_encrypt_key: stmdb sp!, {r7,r8,r9,r10,r11, lr} vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} lsr r9, r1, #5 @ shr $5,%eax add r9, r9, #5 @ $5,%eax str r9, [r2,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; mov r3, #0 @ mov $0,%ecx mov r8, #0x30 @ mov $0x30,%r8d bl _vpaes_schedule_core eor r0, r0, r0 vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return .size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key @ Additional constants for converting to bsaes. .type _vpaes_convert_consts,%object .align 4 _vpaes_convert_consts: @ .Lk_opt_then_skew applies skew(opt(x)) XOR 0x63, where skew is the linear @ transform in the AES S-box. 0x63 is incorporated into the low half of the @ table. This was computed with the following script: @ @ def u64s_to_u128(x, y): @ return x | (y << 64) @ def u128_to_u64s(w): @ return w & ((1<<64)-1), w >> 64 @ def get_byte(w, i): @ return (w >> (i*8)) & 0xff @ def apply_table(table, b): @ lo = b & 0xf @ hi = b >> 4 @ return get_byte(table[0], lo) ^ get_byte(table[1], hi) @ def opt(b): @ table = [ @ u64s_to_u128(0xFF9F4929D6B66000, 0xF7974121DEBE6808), @ u64s_to_u128(0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0), @ ] @ return apply_table(table, b) @ def rot_byte(b, n): @ return 0xff & ((b << n) | (b >> (8-n))) @ def skew(x): @ return (x ^ rot_byte(x, 1) ^ rot_byte(x, 2) ^ rot_byte(x, 3) ^ @ rot_byte(x, 4)) @ table = [0, 0] @ for i in range(16): @ table[0] |= (skew(opt(i)) ^ 0x63) << (i*8) @ table[1] |= skew(opt(i<<4)) << (i*8) @ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[0])) @ print(" .quad 0x%016x, 0x%016x" % u128_to_u64s(table[1])) .Lk_opt_then_skew: .quad 0x9cb8436798bc4763, 0x6440bb9f6044bf9b .quad 0x1f30062936192f00, 0xb49bad829db284ab @ void vpaes_encrypt_key_to_bsaes(AES_KEY *bsaes, const AES_KEY *vpaes); .globl vpaes_encrypt_key_to_bsaes .hidden vpaes_encrypt_key_to_bsaes .type vpaes_encrypt_key_to_bsaes,%function .align 4 vpaes_encrypt_key_to_bsaes: stmdb sp!, {r11, lr} @ See _vpaes_schedule_core for the key schedule logic. In particular, @ _vpaes_schedule_transform(.Lk_ipt) (section 2.2 of the paper), @ _vpaes_schedule_mangle (section 4.3), and .Lschedule_mangle_last @ contain the transformations not in the bsaes representation. This @ function inverts those transforms. @ @ Note also that bsaes-armv7.pl expects aes-armv4.pl's key @ representation, which does not match the other aes_nohw_* @ implementations. The ARM aes_nohw_* stores each 32-bit word @ byteswapped, as a convenience for (unsupported) big-endian ARM, at the @ cost of extra REV and VREV32 operations in little-endian ARM. vmov.i8 q9, #0x0f @ Required by _vpaes_schedule_transform adr r2, .Lk_mc_forward @ Must be aligned to 8 mod 16. add r3, r2, 0x90 @ .Lk_sr+0x10-.Lk_mc_forward = 0x90 (Apple's toolchain doesn't support the expression) vld1.64 {q12}, [r2] vmov.i8 q10, #0x5b @ .Lk_s63 from vpaes-x86_64 adr r11, .Lk_opt @ Must be aligned to 8 mod 16. vmov.i8 q11, #0x63 @ .LK_s63 without .Lk_ipt applied @ vpaes stores one fewer round count than bsaes, but the number of keys @ is the same. ldr r2, [r1,#240] add r2, r2, #1 str r2, [r0,#240] @ The first key is transformed with _vpaes_schedule_transform(.Lk_ipt). @ Invert this with .Lk_opt. vld1.64 {q0}, [r1]! bl _vpaes_schedule_transform vrev32.8 q0, q0 vst1.64 {q0}, [r0]! @ The middle keys have _vpaes_schedule_transform(.Lk_ipt) applied, @ followed by _vpaes_schedule_mangle. _vpaes_schedule_mangle XORs 0x63, @ multiplies by the circulant 0,1,1,1, then applies ShiftRows. .Loop_enc_key_to_bsaes: vld1.64 {q0}, [r1]! @ Invert the ShiftRows step (see .Lschedule_mangle_both). Note we cycle @ r3 in the opposite direction and start at .Lk_sr+0x10 instead of 0x30. @ We use r3 rather than r8 to avoid a callee-saved register. vld1.64 {q1}, [r3] vtbl.8 d4, {q0}, d2 vtbl.8 d5, {q0}, d3 add r3, r3, #16 and r3, r3, #~(1<<6) vmov q0, q2 @ Handle the last key differently. subs r2, r2, #1 beq .Loop_enc_key_to_bsaes_last @ Multiply by the circulant. This is its own inverse. vtbl.8 d2, {q0}, d24 vtbl.8 d3, {q0}, d25 vmov q0, q1 vtbl.8 d4, {q1}, d24 vtbl.8 d5, {q1}, d25 veor q0, q0, q2 vtbl.8 d2, {q2}, d24 vtbl.8 d3, {q2}, d25 veor q0, q0, q1 @ XOR and finish. veor q0, q0, q10 bl _vpaes_schedule_transform vrev32.8 q0, q0 vst1.64 {q0}, [r0]! b .Loop_enc_key_to_bsaes .Loop_enc_key_to_bsaes_last: @ The final key does not have a basis transform (note @ .Lschedule_mangle_last inverts the original transform). It only XORs @ 0x63 and applies ShiftRows. The latter was already inverted in the @ loop. Note that, because we act on the original representation, we use @ q11, not q10. veor q0, q0, q11 vrev32.8 q0, q0 vst1.64 {q0}, [r0] @ Wipe registers which contained key material. veor q0, q0, q0 veor q1, q1, q1 veor q2, q2, q2 ldmia sp!, {r11, pc} @ return .size vpaes_encrypt_key_to_bsaes,.-vpaes_encrypt_key_to_bsaes .globl vpaes_ctr32_encrypt_blocks .hidden vpaes_ctr32_encrypt_blocks .type vpaes_ctr32_encrypt_blocks,%function .align 4 vpaes_ctr32_encrypt_blocks: mov ip, sp stmdb sp!, {r7,r8,r9,r10,r11, lr} @ This function uses q4-q7 (d8-d15), which are callee-saved. vstmdb sp!, {d8,d9,d10,d11,d12,d13,d14,d15} cmp r2, #0 @ r8 is passed on the stack. ldr r8, [ip] beq .Lctr32_done @ _vpaes_encrypt_core expects the key in r2, so swap r2 and r3. mov r9, r3 mov r3, r2 mov r2, r9 @ Load the IV and counter portion. ldr r7, [r8, #12] vld1.8 {q7}, [r8] bl _vpaes_preheat rev r7, r7 @ The counter is big-endian. .Lctr32_loop: vmov q0, q7 vld1.8 {q6}, [r0]! @ .Load input ahead of time bl _vpaes_encrypt_core veor q0, q0, q6 @ XOR input and result vst1.8 {q0}, [r1]! subs r3, r3, #1 @ Update the counter. add r7, r7, #1 rev r9, r7 vmov.32 d15[1], r9 bne .Lctr32_loop .Lctr32_done: vldmia sp!, {d8,d9,d10,d11,d12,d13,d14,d15} ldmia sp!, {r7,r8,r9,r10,r11, pc} @ return .size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks #endif // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
7,683
thirdparty/crates/ring-0.17.9/pregenerated/aesv8-armx-ios64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include <ring-core/arm_arch.h> #if __ARM_MAX_ARCH__>=7 .text .section __TEXT,__const .align 5 Lrcon: .long 0x01,0x01,0x01,0x01 .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat .long 0x1b,0x1b,0x1b,0x1b .text .globl _aes_hw_set_encrypt_key .private_extern _aes_hw_set_encrypt_key .align 5 _aes_hw_set_encrypt_key: Lenc_key: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x3,#-2 cmp w1,#128 b.lt Lenc_key_abort cmp w1,#256 b.gt Lenc_key_abort tst w1,#0x3f b.ne Lenc_key_abort adrp x3,Lrcon@PAGE add x3,x3,Lrcon@PAGEOFF cmp w1,#192 eor v0.16b,v0.16b,v0.16b ld1 {v3.16b},[x0],#16 mov w1,#8 // reuse w1 ld1 {v1.4s,v2.4s},[x3],#32 b.lt Loop128 // 192-bit key support was removed. b L256 .align 4 Loop128: tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b b.ne Loop128 ld1 {v1.4s},[x3] tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b eor v3.16b,v3.16b,v6.16b st1 {v3.4s},[x2] add x2,x2,#0x50 mov w12,#10 b Ldone // 192-bit key support was removed. .align 4 L256: ld1 {v4.16b},[x0] mov w1,#7 mov w12,#14 st1 {v3.4s},[x2],#16 Loop256: tbl v6.16b,{v4.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v4.4s},[x2],#16 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b st1 {v3.4s},[x2],#16 b.eq Ldone dup v6.4s,v3.s[3] // just splat ext v5.16b,v0.16b,v4.16b,#12 aese v6.16b,v0.16b eor v4.16b,v4.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v4.16b,v4.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v4.16b,v4.16b,v5.16b eor v4.16b,v4.16b,v6.16b b Loop256 Ldone: str w12,[x2] mov x3,#0 Lenc_key_abort: mov x0,x3 // return value ldr x29,[sp],#16 ret .globl _aes_hw_ctr32_encrypt_blocks .private_extern _aes_hw_ctr32_encrypt_blocks .align 5 _aes_hw_ctr32_encrypt_blocks: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ldr w5,[x3,#240] ldr w8, [x4, #12] ld1 {v0.4s},[x4] ld1 {v16.4s,v17.4s},[x3] // load key schedule... sub w5,w5,#4 mov x12,#16 cmp x2,#2 add x7,x3,x5,lsl#4 // pointer to last 5 round keys sub w5,w5,#2 ld1 {v20.4s,v21.4s},[x7],#32 ld1 {v22.4s,v23.4s},[x7],#32 ld1 {v7.4s},[x7] add x7,x3,#32 mov w6,w5 csel x12,xzr,x12,lo // ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are // affected by silicon errata #1742098 [0] and #1655431 [1], // respectively, where the second instruction of an aese/aesmc // instruction pair may execute twice if an interrupt is taken right // after the first instruction consumes an input register of which a // single 32-bit lane has been updated the last time it was modified. // // This function uses a counter in one 32-bit lane. The vmov lines // could write to v1.16b and v18.16b directly, but that trips this bugs. // We write to v6.16b and copy to the final register as a workaround. // // [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice // [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice #ifndef __AARCH64EB__ rev w8, w8 #endif add w10, w8, #1 orr v6.16b,v0.16b,v0.16b rev w10, w10 mov v6.s[3],w10 add w8, w8, #2 orr v1.16b,v6.16b,v6.16b b.ls Lctr32_tail rev w12, w8 mov v6.s[3],w12 sub x2,x2,#3 // bias orr v18.16b,v6.16b,v6.16b b Loop3x_ctr32 .align 4 Loop3x_ctr32: aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b aese v18.16b,v17.16b aesmc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt Loop3x_ctr32 aese v0.16b,v16.16b aesmc v4.16b,v0.16b aese v1.16b,v16.16b aesmc v5.16b,v1.16b ld1 {v2.16b},[x0],#16 add w9,w8,#1 aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v3.16b},[x0],#16 rev w9,w9 aese v4.16b,v17.16b aesmc v4.16b,v4.16b aese v5.16b,v17.16b aesmc v5.16b,v5.16b ld1 {v19.16b},[x0],#16 mov x7,x3 aese v18.16b,v17.16b aesmc v17.16b,v18.16b aese v4.16b,v20.16b aesmc v4.16b,v4.16b aese v5.16b,v20.16b aesmc v5.16b,v5.16b eor v2.16b,v2.16b,v7.16b add w10,w8,#2 aese v17.16b,v20.16b aesmc v17.16b,v17.16b eor v3.16b,v3.16b,v7.16b add w8,w8,#3 aese v4.16b,v21.16b aesmc v4.16b,v4.16b aese v5.16b,v21.16b aesmc v5.16b,v5.16b // Note the logic to update v0.16b, v1.16b, and v1.16b is written to work // around a bug in ARM Cortex-A57 and Cortex-A72 cores running in // 32-bit mode. See the comment above. eor v19.16b,v19.16b,v7.16b mov v6.s[3], w9 aese v17.16b,v21.16b aesmc v17.16b,v17.16b orr v0.16b,v6.16b,v6.16b rev w10,w10 aese v4.16b,v22.16b aesmc v4.16b,v4.16b mov v6.s[3], w10 rev w12,w8 aese v5.16b,v22.16b aesmc v5.16b,v5.16b orr v1.16b,v6.16b,v6.16b mov v6.s[3], w12 aese v17.16b,v22.16b aesmc v17.16b,v17.16b orr v18.16b,v6.16b,v6.16b subs x2,x2,#3 aese v4.16b,v23.16b aese v5.16b,v23.16b aese v17.16b,v23.16b eor v2.16b,v2.16b,v4.16b ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] st1 {v2.16b},[x1],#16 eor v3.16b,v3.16b,v5.16b mov w6,w5 st1 {v3.16b},[x1],#16 eor v19.16b,v19.16b,v17.16b ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] st1 {v19.16b},[x1],#16 b.hs Loop3x_ctr32 adds x2,x2,#3 b.eq Lctr32_done cmp x2,#1 mov x12,#16 csel x12,xzr,x12,eq Lctr32_tail: aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b ld1 {v17.4s},[x7],#16 b.gt Lctr32_tail aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b ld1 {v2.16b},[x0],x12 aese v0.16b,v20.16b aesmc v0.16b,v0.16b aese v1.16b,v20.16b aesmc v1.16b,v1.16b ld1 {v3.16b},[x0] aese v0.16b,v21.16b aesmc v0.16b,v0.16b aese v1.16b,v21.16b aesmc v1.16b,v1.16b eor v2.16b,v2.16b,v7.16b aese v0.16b,v22.16b aesmc v0.16b,v0.16b aese v1.16b,v22.16b aesmc v1.16b,v1.16b eor v3.16b,v3.16b,v7.16b aese v0.16b,v23.16b aese v1.16b,v23.16b cmp x2,#1 eor v2.16b,v2.16b,v0.16b eor v3.16b,v3.16b,v1.16b st1 {v2.16b},[x1],#16 b.eq Lctr32_done st1 {v3.16b},[x1] Lctr32_done: ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
36,779
thirdparty/crates/ring-0.17.9/pregenerated/p256-armv8-asm-linux64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__ELF__) #include "ring-core/arm_arch.h" .section .rodata .align 5 .Lpoly: .quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001 .LRR: // 2^512 mod P precomputed for NIST P256 polynomial .quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd .Lone_mont: .quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe .Lone: .quad 1,0,0,0 .Lord: .quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000 .LordK: .quad 0xccd1c8aaee00bc4f .byte 69,67,80,95,78,73,83,84,90,50,53,54,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .text // void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4], // const BN_ULONG x2[4]); .globl ecp_nistz256_mul_mont .hidden ecp_nistz256_mul_mont .type ecp_nistz256_mul_mont,%function .align 4 ecp_nistz256_mul_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-32]! add x29,sp,#0 stp x19,x20,[sp,#16] ldr x3,[x2] // bp[0] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_mul_mont ldp x19,x20,[sp,#16] ldp x29,x30,[sp],#32 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont // void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_sqr_mont .hidden ecp_nistz256_sqr_mont .type ecp_nistz256_sqr_mont,%function .align 4 ecp_nistz256_sqr_mont: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-32]! add x29,sp,#0 stp x19,x20,[sp,#16] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sqr_mont ldp x19,x20,[sp,#16] ldp x29,x30,[sp],#32 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont // void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]); .globl ecp_nistz256_neg .hidden ecp_nistz256_neg .type ecp_nistz256_neg,%function .align 4 ecp_nistz256_neg: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x2,x1 mov x14,xzr // a = 0 mov x15,xzr mov x16,xzr mov x17,xzr adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] bl __ecp_nistz256_sub_from ldp x29,x30,[sp],#16 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_neg,.-ecp_nistz256_neg // note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded // to x4-x7 and b[0] - to x3 .type __ecp_nistz256_mul_mont,%function .align 4 __ecp_nistz256_mul_mont: mul x14,x4,x3 // a[0]*b[0] umulh x8,x4,x3 mul x15,x5,x3 // a[1]*b[0] umulh x9,x5,x3 mul x16,x6,x3 // a[2]*b[0] umulh x10,x6,x3 mul x17,x7,x3 // a[3]*b[0] umulh x11,x7,x3 ldr x3,[x2,#8] // b[1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adc x19,xzr,x11 mov x20,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr ldr x3,[x2,#8*(1+1)] // b[1+1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr ldr x3,[x2,#8*(2+1)] // b[2+1] adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] mul x8,x4,x3 // lo(a[0]*b[i]) adcs x15,x16,x9 mul x9,x5,x3 // lo(a[1]*b[i]) adcs x16,x17,x10 // +=acc[0]*0xffff0001 mul x10,x6,x3 // lo(a[2]*b[i]) adcs x17,x19,x11 mul x11,x7,x3 // lo(a[3]*b[i]) adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts of multiplication umulh x8,x4,x3 // hi(a[0]*b[i]) adcs x15,x15,x9 umulh x9,x5,x3 // hi(a[1]*b[i]) adcs x16,x16,x10 umulh x10,x6,x3 // hi(a[2]*b[i]) adcs x17,x17,x11 umulh x11,x7,x3 // hi(a[3]*b[i]) adc x19,x19,xzr adds x15,x15,x8 // accumulate high parts of multiplication lsl x8,x14,#32 adcs x16,x16,x9 lsr x9,x14,#32 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr // last reduction subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 adcs x16,x17,x10 // +=acc[0]*0xffff0001 adcs x17,x19,x11 adc x19,x20,xzr adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x19,xzr // did it borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret .size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont // note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded // to x4-x7 .type __ecp_nistz256_sqr_mont,%function .align 4 __ecp_nistz256_sqr_mont: // | | | | | |a1*a0| | // | | | | |a2*a0| | | // | |a3*a2|a3*a0| | | | // | | | |a2*a1| | | | // | | |a3*a1| | | | | // *| | | | | | | | 2| // +|a3*a3|a2*a2|a1*a1|a0*a0| // |--+--+--+--+--+--+--+--| // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow // // "can't overflow" below mark carrying into high part of // multiplication result, which can't overflow, because it // can never be all ones. mul x15,x5,x4 // a[1]*a[0] umulh x9,x5,x4 mul x16,x6,x4 // a[2]*a[0] umulh x10,x6,x4 mul x17,x7,x4 // a[3]*a[0] umulh x19,x7,x4 adds x16,x16,x9 // accumulate high parts of multiplication mul x8,x6,x5 // a[2]*a[1] umulh x9,x6,x5 adcs x17,x17,x10 mul x10,x7,x5 // a[3]*a[1] umulh x11,x7,x5 adc x19,x19,xzr // can't overflow mul x20,x7,x6 // a[3]*a[2] umulh x1,x7,x6 adds x9,x9,x10 // accumulate high parts of multiplication mul x14,x4,x4 // a[0]*a[0] adc x10,x11,xzr // can't overflow adds x17,x17,x8 // accumulate low parts of multiplication umulh x4,x4,x4 adcs x19,x19,x9 mul x9,x5,x5 // a[1]*a[1] adcs x20,x20,x10 umulh x5,x5,x5 adc x1,x1,xzr // can't overflow adds x15,x15,x15 // acc[1-6]*=2 mul x10,x6,x6 // a[2]*a[2] adcs x16,x16,x16 umulh x6,x6,x6 adcs x17,x17,x17 mul x11,x7,x7 // a[3]*a[3] adcs x19,x19,x19 umulh x7,x7,x7 adcs x20,x20,x20 adcs x1,x1,x1 adc x2,xzr,xzr adds x15,x15,x4 // +a[i]*a[i] adcs x16,x16,x9 adcs x17,x17,x5 adcs x19,x19,x10 adcs x20,x20,x6 lsl x8,x14,#32 adcs x1,x1,x11 lsr x9,x14,#32 adc x2,x2,x7 subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 lsl x8,x14,#32 adcs x16,x17,x10 // +=acc[0]*0xffff0001 lsr x9,x14,#32 adc x17,x11,xzr // can't overflow subs x10,x14,x8 // "*0xffff0001" sbc x11,x14,x9 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0] adcs x15,x16,x9 adcs x16,x17,x10 // +=acc[0]*0xffff0001 adc x17,x11,xzr // can't overflow adds x14,x14,x19 // accumulate upper half adcs x15,x15,x20 adcs x16,x16,x1 adcs x17,x17,x2 adc x19,xzr,xzr adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x19,xzr // did it borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret .size __ecp_nistz256_sqr_mont,.-__ecp_nistz256_sqr_mont // Note that __ecp_nistz256_add_to expects both input vectors pre-loaded to // x4-x7 and x8-x11. This is done because it's used in multiple // contexts, e.g. in multiplication by 2 and 3... .type __ecp_nistz256_add_to,%function .align 4 __ecp_nistz256_add_to: adds x14,x14,x8 // ret = a+b adcs x15,x15,x9 adcs x16,x16,x10 adcs x17,x17,x11 adc x1,xzr,xzr // zap x1 adds x8,x14,#1 // subs x8,x4,#-1 // tmp = ret-modulus sbcs x9,x15,x12 sbcs x10,x16,xzr sbcs x11,x17,x13 sbcs xzr,x1,xzr // did subtraction borrow? csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ret .size __ecp_nistz256_add_to,.-__ecp_nistz256_add_to .type __ecp_nistz256_sub_from,%function .align 4 __ecp_nistz256_sub_from: ldp x8,x9,[x2] ldp x10,x11,[x2,#16] subs x14,x14,x8 // ret = a-b sbcs x15,x15,x9 sbcs x16,x16,x10 sbcs x17,x17,x11 sbc x1,xzr,xzr // zap x1 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus adcs x9,x15,x12 adcs x10,x16,xzr adc x11,x17,x13 cmp x1,xzr // did subtraction borrow? csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret csel x15,x15,x9,eq csel x16,x16,x10,eq stp x14,x15,[x0] csel x17,x17,x11,eq stp x16,x17,[x0,#16] ret .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from .type __ecp_nistz256_sub_morf,%function .align 4 __ecp_nistz256_sub_morf: ldp x8,x9,[x2] ldp x10,x11,[x2,#16] subs x14,x8,x14 // ret = b-a sbcs x15,x9,x15 sbcs x16,x10,x16 sbcs x17,x11,x17 sbc x1,xzr,xzr // zap x1 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus adcs x9,x15,x12 adcs x10,x16,xzr adc x11,x17,x13 cmp x1,xzr // did subtraction borrow? csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret csel x15,x15,x9,eq csel x16,x16,x10,eq stp x14,x15,[x0] csel x17,x17,x11,eq stp x16,x17,[x0,#16] ret .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf .type __ecp_nistz256_div_by_2,%function .align 4 __ecp_nistz256_div_by_2: subs x8,x14,#1 // adds x8,x4,#-1 // tmp = a+modulus adcs x9,x15,x12 adcs x10,x16,xzr adcs x11,x17,x13 adc x1,xzr,xzr // zap x1 tst x14,#1 // is a even? csel x14,x14,x8,eq // ret = even ? a : a+modulus csel x15,x15,x9,eq csel x16,x16,x10,eq csel x17,x17,x11,eq csel x1,xzr,x1,eq lsr x14,x14,#1 // ret >>= 1 orr x14,x14,x15,lsl#63 lsr x15,x15,#1 orr x15,x15,x16,lsl#63 lsr x16,x16,#1 orr x16,x16,x17,lsl#63 lsr x17,x17,#1 stp x14,x15,[x0] orr x17,x17,x1,lsl#63 stp x16,x17,[x0,#16] ret .size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2 .globl ecp_nistz256_point_double .hidden ecp_nistz256_point_double .type ecp_nistz256_point_double,%function .align 5 ecp_nistz256_point_double: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] sub sp,sp,#32*4 .Ldouble_shortcut: ldp x14,x15,[x1,#32] mov x21,x0 ldp x16,x17,[x1,#48] mov x22,x1 adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] mov x8,x14 ldr x13,[x13,#24] mov x9,x15 ldp x4,x5,[x22,#64] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[x22,#64+16] add x0,sp,#0 bl __ecp_nistz256_add_to // p256_mul_by_2(S, in_y); add x0,sp,#64 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z); ldp x8,x9,[x22] ldp x10,x11,[x22,#16] mov x4,x14 // put Zsqr aside for p256_sub mov x5,x15 mov x6,x16 mov x7,x17 add x0,sp,#32 bl __ecp_nistz256_add_to // p256_add(M, Zsqr, in_x); add x2,x22,#0 mov x14,x4 // restore Zsqr mov x15,x5 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont mov x16,x6 mov x17,x7 ldp x6,x7,[sp,#0+16] add x0,sp,#64 bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr); add x0,sp,#0 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S); ldr x3,[x22,#32] ldp x4,x5,[x22,#64] ldp x6,x7,[x22,#64+16] add x2,x22,#32 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y); mov x8,x14 mov x9,x15 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[sp,#0+16] add x0,x21,#64 bl __ecp_nistz256_add_to // p256_mul_by_2(res_z, tmp0); add x0,sp,#96 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S); ldr x3,[sp,#64] // forward load for p256_mul_mont ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x0,x21,#32 bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0); add x2,sp,#64 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr); mov x8,x14 // duplicate M mov x9,x15 mov x10,x16 mov x11,x17 mov x4,x14 // put M aside mov x5,x15 mov x6,x16 mov x7,x17 add x0,sp,#32 bl __ecp_nistz256_add_to mov x8,x4 // restore M mov x9,x5 ldr x3,[x22] // forward load for p256_mul_mont mov x10,x6 ldp x4,x5,[sp,#0] mov x11,x7 ldp x6,x7,[sp,#0+16] bl __ecp_nistz256_add_to // p256_mul_by_3(M, M); add x2,x22,#0 add x0,sp,#0 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x); mov x8,x14 mov x9,x15 ldp x4,x5,[sp,#32] // forward load for p256_sqr_mont mov x10,x16 mov x11,x17 ldp x6,x7,[sp,#32+16] add x0,sp,#96 bl __ecp_nistz256_add_to // p256_mul_by_2(tmp0, S); add x0,x21,#0 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M); add x2,sp,#96 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0); add x2,sp,#0 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x); ldr x3,[sp,#32] mov x4,x14 // copy S mov x5,x15 mov x6,x16 mov x7,x17 add x2,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M); add x2,x21,#32 add x0,x21,#32 bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y); add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_point_double,.-ecp_nistz256_point_double .globl ecp_nistz256_point_add .hidden ecp_nistz256_point_add .type ecp_nistz256_point_add,%function .align 5 ecp_nistz256_point_add: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-96]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#32*12 ldp x4,x5,[x2,#64] // in2_z ldp x6,x7,[x2,#64+16] mov x21,x0 mov x22,x1 mov x23,x2 adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] orr x8,x4,x5 orr x10,x6,x7 orr x25,x8,x10 cmp x25,#0 csetm x25,ne // ~in2infty add x0,sp,#192 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z); ldp x4,x5,[x22,#64] // in1_z ldp x6,x7,[x22,#64+16] orr x8,x4,x5 orr x10,x6,x7 orr x24,x8,x10 cmp x24,#0 csetm x24,ne // ~in1infty add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z); ldr x3,[x23,#64] ldp x4,x5,[sp,#192] ldp x6,x7,[sp,#192+16] add x2,x23,#64 add x0,sp,#320 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z); ldr x3,[x22,#64] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,x22,#64 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z); ldr x3,[x22,#32] ldp x4,x5,[sp,#320] ldp x6,x7,[sp,#320+16] add x2,x22,#32 add x0,sp,#320 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y); ldr x3,[x23,#32] ldp x4,x5,[sp,#352] ldp x6,x7,[sp,#352+16] add x2,x23,#32 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y); add x2,sp,#320 ldr x3,[sp,#192] // forward load for p256_mul_mont ldp x4,x5,[x22] ldp x6,x7,[x22,#16] add x0,sp,#160 bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1); orr x14,x14,x15 // see if result is zero orr x16,x16,x17 orr x26,x14,x16 // ~is_equal(S1,S2) add x2,sp,#192 add x0,sp,#256 bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr); ldr x3,[sp,#128] ldp x4,x5,[x23] ldp x6,x7,[x23,#16] add x2,sp,#128 add x0,sp,#288 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr); add x2,sp,#256 ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont ldp x6,x7,[sp,#160+16] add x0,sp,#96 bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1); orr x14,x14,x15 // see if result is zero orr x16,x16,x17 orr x14,x14,x16 // ~is_equal(U1,U2) mvn x27,x24 // -1/0 -> 0/-1 mvn x28,x25 // -1/0 -> 0/-1 orr x14,x14,x27 orr x14,x14,x28 orr x14,x14,x26 cbnz x14,.Ladd_proceed // if(~is_equal(U1,U2) | in1infty | in2infty | ~is_equal(S1,S2)) .Ladd_double: mov x1,x22 mov x0,x21 ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] add sp,sp,#256 // #256 is from #32*(12-4). difference in stack frames b .Ldouble_shortcut .align 4 .Ladd_proceed: add x0,sp,#192 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R); ldr x3,[x22,#64] ldp x4,x5,[sp,#96] ldp x6,x7,[sp,#96+16] add x2,x22,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z); ldp x4,x5,[sp,#96] ldp x6,x7,[sp,#96+16] add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H); ldr x3,[x23,#64] ldp x4,x5,[sp,#64] ldp x6,x7,[sp,#64+16] add x2,x23,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z); ldr x3,[sp,#96] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,sp,#96 add x0,sp,#224 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H); ldr x3,[sp,#128] ldp x4,x5,[sp,#256] ldp x6,x7,[sp,#256+16] add x2,sp,#128 add x0,sp,#288 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr); mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 add x0,sp,#128 bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2); add x2,sp,#192 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr); add x2,sp,#224 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub); add x2,sp,#288 ldr x3,[sp,#224] // forward load for p256_mul_mont ldp x4,x5,[sp,#320] ldp x6,x7,[sp,#320+16] add x0,sp,#32 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x); add x2,sp,#224 add x0,sp,#352 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub); ldr x3,[sp,#160] ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x2,sp,#160 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R); add x2,sp,#352 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2); ldp x4,x5,[sp,#0] // res ldp x6,x7,[sp,#0+16] ldp x8,x9,[x23] // in2 ldp x10,x11,[x23,#16] ldp x14,x15,[x22,#0] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#0+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+0+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+0+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#0+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#0+48] stp x14,x15,[x21,#0] stp x16,x17,[x21,#0+16] ldp x14,x15,[x22,#32] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#32+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+32+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+32+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#32+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#32+48] stp x14,x15,[x21,#32] stp x16,x17,[x21,#32+16] ldp x14,x15,[x22,#64] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#64+16] csel x8,x4,x8,ne csel x9,x5,x9,ne csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? csel x14,x8,x14,ne csel x15,x9,x15,ne csel x16,x10,x16,ne csel x17,x11,x17,ne stp x14,x15,[x21,#64] stp x16,x17,[x21,#64+16] .Ladd_done: add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#96 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_point_add,.-ecp_nistz256_point_add .globl ecp_nistz256_point_add_affine .hidden ecp_nistz256_point_add_affine .type ecp_nistz256_point_add_affine,%function .align 5 ecp_nistz256_point_add_affine: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-80]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] sub sp,sp,#32*10 mov x21,x0 mov x22,x1 mov x23,x2 adrp x13,.Lpoly add x13,x13,:lo12:.Lpoly ldr x12,[x13,#8] ldr x13,[x13,#24] ldp x4,x5,[x1,#64] // in1_z ldp x6,x7,[x1,#64+16] orr x8,x4,x5 orr x10,x6,x7 orr x24,x8,x10 cmp x24,#0 csetm x24,ne // ~in1infty ldp x14,x15,[x2] // in2_x ldp x16,x17,[x2,#16] ldp x8,x9,[x2,#32] // in2_y ldp x10,x11,[x2,#48] orr x14,x14,x15 orr x16,x16,x17 orr x8,x8,x9 orr x10,x10,x11 orr x14,x14,x16 orr x8,x8,x10 orr x25,x14,x8 cmp x25,#0 csetm x25,ne // ~in2infty add x0,sp,#128 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z); mov x4,x14 mov x5,x15 mov x6,x16 mov x7,x17 ldr x3,[x23] add x2,x23,#0 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x); add x2,x22,#0 ldr x3,[x22,#64] // forward load for p256_mul_mont ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x0,sp,#160 bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x); add x2,x22,#64 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z); ldr x3,[x22,#64] ldp x4,x5,[sp,#160] ldp x6,x7,[sp,#160+16] add x2,x22,#64 add x0,sp,#64 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z); ldr x3,[x23,#32] ldp x4,x5,[sp,#128] ldp x6,x7,[sp,#128+16] add x2,x23,#32 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y); add x2,x22,#32 ldp x4,x5,[sp,#160] // forward load for p256_sqr_mont ldp x6,x7,[sp,#160+16] add x0,sp,#192 bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y); add x0,sp,#224 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H); ldp x4,x5,[sp,#192] ldp x6,x7,[sp,#192+16] add x0,sp,#288 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R); ldr x3,[sp,#160] ldp x4,x5,[sp,#224] ldp x6,x7,[sp,#224+16] add x2,sp,#160 add x0,sp,#256 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H); ldr x3,[x22] ldp x4,x5,[sp,#224] ldp x6,x7,[sp,#224+16] add x2,x22,#0 add x0,sp,#96 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr); mov x8,x14 mov x9,x15 mov x10,x16 mov x11,x17 add x0,sp,#224 bl __ecp_nistz256_add_to // p256_mul_by_2(Hsqr, U2); add x2,sp,#288 add x0,sp,#0 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr); add x2,sp,#256 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub); add x2,sp,#96 ldr x3,[x22,#32] // forward load for p256_mul_mont ldp x4,x5,[sp,#256] ldp x6,x7,[sp,#256+16] add x0,sp,#32 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x); add x2,x22,#32 add x0,sp,#128 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub); ldr x3,[sp,#192] ldp x4,x5,[sp,#32] ldp x6,x7,[sp,#32+16] add x2,sp,#192 add x0,sp,#32 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R); add x2,sp,#128 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2); ldp x4,x5,[sp,#0] // res ldp x6,x7,[sp,#0+16] ldp x8,x9,[x23] // in2 ldp x10,x11,[x23,#16] ldp x14,x15,[x22,#0] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#0+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+0+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+0+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#0+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#0+48] stp x14,x15,[x21,#0] stp x16,x17,[x21,#0+16] adrp x23,.Lone_mont-64 add x23,x23,:lo12:.Lone_mont-64 ldp x14,x15,[x22,#32] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#32+16] csel x8,x4,x8,ne csel x9,x5,x9,ne ldp x4,x5,[sp,#0+32+32] // res csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? ldp x6,x7,[sp,#0+32+48] csel x14,x8,x14,ne csel x15,x9,x15,ne ldp x8,x9,[x23,#32+32] // in2 csel x16,x10,x16,ne csel x17,x11,x17,ne ldp x10,x11,[x23,#32+48] stp x14,x15,[x21,#32] stp x16,x17,[x21,#32+16] ldp x14,x15,[x22,#64] // in1 cmp x24,#0 // ~, remember? ldp x16,x17,[x22,#64+16] csel x8,x4,x8,ne csel x9,x5,x9,ne csel x10,x6,x10,ne csel x11,x7,x11,ne cmp x25,#0 // ~, remember? csel x14,x8,x14,ne csel x15,x9,x15,ne csel x16,x10,x16,ne csel x17,x11,x17,ne stp x14,x15,[x21,#64] stp x16,x17,[x21,#64+16] add sp,x29,#0 // destroy frame ldp x19,x20,[x29,#16] ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x29,x30,[sp],#80 AARCH64_VALIDATE_LINK_REGISTER ret .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4], // uint64_t b[4]); .globl ecp_nistz256_ord_mul_mont .hidden ecp_nistz256_ord_mul_mont .type ecp_nistz256_ord_mul_mont,%function .align 4 ecp_nistz256_ord_mul_mont: AARCH64_VALID_CALL_TARGET // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] adrp x23,.Lord add x23,x23,:lo12:.Lord ldr x3,[x2] // bp[0] ldp x4,x5,[x1] ldp x6,x7,[x1,#16] ldp x12,x13,[x23,#0] ldp x21,x22,[x23,#16] ldr x23,[x23,#32] mul x14,x4,x3 // a[0]*b[0] umulh x8,x4,x3 mul x15,x5,x3 // a[1]*b[0] umulh x9,x5,x3 mul x16,x6,x3 // a[2]*b[0] umulh x10,x6,x3 mul x17,x7,x3 // a[3]*b[0] umulh x19,x7,x3 mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts of multiplication adcs x16,x16,x9 adcs x17,x17,x10 adc x19,x19,xzr mov x20,xzr ldr x3,[x2,#8*1] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr ldr x3,[x2,#8*2] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr ldr x3,[x2,#8*3] // b[i] lsl x8,x24,#32 subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 mul x8,x4,x3 adc x11,x11,xzr mul x9,x5,x3 adds x14,x15,x10 mul x10,x6,x3 adcs x15,x16,x11 mul x11,x7,x3 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr adds x14,x14,x8 // accumulate low parts umulh x8,x4,x3 adcs x15,x15,x9 umulh x9,x5,x3 adcs x16,x16,x10 umulh x10,x6,x3 adcs x17,x17,x11 umulh x11,x7,x3 adc x19,x19,xzr mul x24,x14,x23 adds x15,x15,x8 // accumulate high parts adcs x16,x16,x9 adcs x17,x17,x10 adcs x19,x19,x11 adc x20,xzr,xzr lsl x8,x24,#32 // last reduction subs x16,x16,x24 lsr x9,x24,#32 sbcs x17,x17,x8 sbcs x19,x19,x9 sbc x20,x20,xzr subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adcs x17,x19,x24 adc x19,x20,xzr subs x8,x14,x12 // ret -= modulus sbcs x9,x15,x13 sbcs x10,x16,x21 sbcs x11,x17,x22 sbcs xzr,x19,xzr csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x15,x15,x9,lo csel x16,x16,x10,lo stp x14,x15,[x0] csel x17,x17,x11,lo stp x16,x17,[x0,#16] ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldr x29,[sp],#64 ret .size ecp_nistz256_ord_mul_mont,.-ecp_nistz256_ord_mul_mont //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4], // uint64_t rep); .globl ecp_nistz256_ord_sqr_mont .hidden ecp_nistz256_ord_sqr_mont .type ecp_nistz256_ord_sqr_mont,%function .align 4 ecp_nistz256_ord_sqr_mont: AARCH64_VALID_CALL_TARGET // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-64]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] adrp x23,.Lord add x23,x23,:lo12:.Lord ldp x4,x5,[x1] ldp x6,x7,[x1,#16] ldp x12,x13,[x23,#0] ldp x21,x22,[x23,#16] ldr x23,[x23,#32] b .Loop_ord_sqr .align 4 .Loop_ord_sqr: sub x2,x2,#1 //////////////////////////////////////////////////////////////// // | | | | | |a1*a0| | // | | | | |a2*a0| | | // | |a3*a2|a3*a0| | | | // | | | |a2*a1| | | | // | | |a3*a1| | | | | // *| | | | | | | | 2| // +|a3*a3|a2*a2|a1*a1|a0*a0| // |--+--+--+--+--+--+--+--| // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is , i.e. follow // // "can't overflow" below mark carrying into high part of // multiplication result, which can't overflow, because it // can never be all ones. mul x15,x5,x4 // a[1]*a[0] umulh x9,x5,x4 mul x16,x6,x4 // a[2]*a[0] umulh x10,x6,x4 mul x17,x7,x4 // a[3]*a[0] umulh x19,x7,x4 adds x16,x16,x9 // accumulate high parts of multiplication mul x8,x6,x5 // a[2]*a[1] umulh x9,x6,x5 adcs x17,x17,x10 mul x10,x7,x5 // a[3]*a[1] umulh x11,x7,x5 adc x19,x19,xzr // can't overflow mul x20,x7,x6 // a[3]*a[2] umulh x1,x7,x6 adds x9,x9,x10 // accumulate high parts of multiplication mul x14,x4,x4 // a[0]*a[0] adc x10,x11,xzr // can't overflow adds x17,x17,x8 // accumulate low parts of multiplication umulh x4,x4,x4 adcs x19,x19,x9 mul x9,x5,x5 // a[1]*a[1] adcs x20,x20,x10 umulh x5,x5,x5 adc x1,x1,xzr // can't overflow adds x15,x15,x15 // acc[1-6]*=2 mul x10,x6,x6 // a[2]*a[2] adcs x16,x16,x16 umulh x6,x6,x6 adcs x17,x17,x17 mul x11,x7,x7 // a[3]*a[3] adcs x19,x19,x19 umulh x7,x7,x7 adcs x20,x20,x20 adcs x1,x1,x1 adc x3,xzr,xzr adds x15,x15,x4 // +a[i]*a[i] mul x24,x14,x23 adcs x16,x16,x9 adcs x17,x17,x5 adcs x19,x19,x10 adcs x20,x20,x6 adcs x1,x1,x11 adc x3,x3,x7 subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adc x17,xzr,x24 // can't overflow mul x11,x14,x23 lsl x8,x24,#32 subs x15,x15,x24 lsr x9,x24,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x11 mul x10,x13,x11 umulh x24,x13,x11 adcs x10,x10,x9 adc x24,x24,xzr adds x14,x15,x10 adcs x15,x16,x24 adcs x16,x17,x11 adc x17,xzr,x11 // can't overflow mul x24,x14,x23 lsl x8,x11,#32 subs x15,x15,x11 lsr x9,x11,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x24 mul x10,x13,x24 umulh x11,x13,x24 adcs x10,x10,x9 adc x11,x11,xzr adds x14,x15,x10 adcs x15,x16,x11 adcs x16,x17,x24 adc x17,xzr,x24 // can't overflow mul x11,x14,x23 lsl x8,x24,#32 subs x15,x15,x24 lsr x9,x24,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow subs xzr,x14,#1 umulh x9,x12,x11 mul x10,x13,x11 umulh x24,x13,x11 adcs x10,x10,x9 adc x24,x24,xzr adds x14,x15,x10 adcs x15,x16,x24 adcs x16,x17,x11 adc x17,xzr,x11 // can't overflow lsl x8,x11,#32 subs x15,x15,x11 lsr x9,x11,#32 sbcs x16,x16,x8 sbc x17,x17,x9 // can't borrow adds x14,x14,x19 // accumulate upper half adcs x15,x15,x20 adcs x16,x16,x1 adcs x17,x17,x3 adc x19,xzr,xzr subs x8,x14,x12 // ret -= modulus sbcs x9,x15,x13 sbcs x10,x16,x21 sbcs x11,x17,x22 sbcs xzr,x19,xzr csel x4,x14,x8,lo // ret = borrow ? ret : ret-modulus csel x5,x15,x9,lo csel x6,x16,x10,lo csel x7,x17,x11,lo cbnz x2,.Loop_ord_sqr stp x4,x5,[x0] stp x6,x7,[x0,#16] ldp x19,x20,[sp,#16] ldp x21,x22,[sp,#32] ldp x23,x24,[sp,#48] ldr x29,[sp],#64 ret .size ecp_nistz256_ord_sqr_mont,.-ecp_nistz256_ord_sqr_mont //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_select_w5(uint64_t *val, uint64_t *in_t, int index); .globl ecp_nistz256_select_w5 .hidden ecp_nistz256_select_w5 .type ecp_nistz256_select_w5,%function .align 4 ecp_nistz256_select_w5: AARCH64_VALID_CALL_TARGET // x10 := x0 // w9 := 0; loop counter and incremented internal index mov x10, x0 mov w9, #0 // [v16-v21] := 0 movi v16.16b, #0 movi v17.16b, #0 movi v18.16b, #0 movi v19.16b, #0 movi v20.16b, #0 movi v21.16b, #0 .Lselect_w5_loop: // Loop 16 times. // Increment index (loop counter); tested at the end of the loop add w9, w9, #1 // [v22-v27] := Load a (3*256-bit = 6*128-bit) table entry starting at x1 // and advance x1 to point to the next entry ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64 // x11 := (w9 == w2)? All 1s : All 0s cmp w9, w2 csetm x11, eq // continue loading ... ld1 {v26.2d, v27.2d}, [x1],#32 // duplicate mask_64 into Mask (all 0s or all 1s) dup v3.2d, x11 // [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19] // i.e., values in output registers will remain the same if w9 != w2 bit v16.16b, v22.16b, v3.16b bit v17.16b, v23.16b, v3.16b bit v18.16b, v24.16b, v3.16b bit v19.16b, v25.16b, v3.16b bit v20.16b, v26.16b, v3.16b bit v21.16b, v27.16b, v3.16b // If bit #4 is not 0 (i.e. idx_ctr < 16) loop back tbz w9, #4, .Lselect_w5_loop // Write [v16-v21] to memory at the output pointer st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x10],#64 st1 {v20.2d, v21.2d}, [x10] ret .size ecp_nistz256_select_w5,.-ecp_nistz256_select_w5 //////////////////////////////////////////////////////////////////////// // void ecp_nistz256_select_w7(uint64_t *val, uint64_t *in_t, int index); .globl ecp_nistz256_select_w7 .hidden ecp_nistz256_select_w7 .type ecp_nistz256_select_w7,%function .align 4 ecp_nistz256_select_w7: AARCH64_VALID_CALL_TARGET // w9 := 0; loop counter and incremented internal index mov w9, #0 // [v16-v21] := 0 movi v16.16b, #0 movi v17.16b, #0 movi v18.16b, #0 movi v19.16b, #0 .Lselect_w7_loop: // Loop 64 times. // Increment index (loop counter); tested at the end of the loop add w9, w9, #1 // [v22-v25] := Load a (2*256-bit = 4*128-bit) table entry starting at x1 // and advance x1 to point to the next entry ld1 {v22.2d, v23.2d, v24.2d, v25.2d}, [x1],#64 // x11 := (w9 == w2)? All 1s : All 0s cmp w9, w2 csetm x11, eq // duplicate mask_64 into Mask (all 0s or all 1s) dup v3.2d, x11 // [v16-v19] := (Mask == all 1s)? [v22-v25] : [v16-v19] // i.e., values in output registers will remain the same if w9 != w2 bit v16.16b, v22.16b, v3.16b bit v17.16b, v23.16b, v3.16b bit v18.16b, v24.16b, v3.16b bit v19.16b, v25.16b, v3.16b // If bit #6 is not 0 (i.e. idx_ctr < 64) loop back tbz w9, #6, .Lselect_w7_loop // Write [v16-v19] to memory at the output pointer st1 {v16.2d, v17.2d, v18.2d, v19.2d}, [x0] ret .size ecp_nistz256_select_w7,.-ecp_nistz256_select_w7 #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
9,958
thirdparty/crates/ring-0.17.9/pregenerated/vpaes-x86-elf.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__) .text #ifdef BORINGSSL_DISPATCH_TEST #endif .align 64 .L_vpaes_consts: .long 218628480,235210255,168496130,67568393 .long 252381056,17041926,33884169,51187212 .long 252645135,252645135,252645135,252645135 .long 1512730624,3266504856,1377990664,3401244816 .long 830229760,1275146365,2969422977,3447763452 .long 3411033600,2979783055,338359620,2782886510 .long 4209124096,907596821,221174255,1006095553 .long 191964160,3799684038,3164090317,1589111125 .long 182528256,1777043520,2877432650,3265356744 .long 1874708224,3503451415,3305285752,363511674 .long 1606117888,3487855781,1093350906,2384367825 .long 197121,67569157,134941193,202313229 .long 67569157,134941193,202313229,197121 .long 134941193,202313229,197121,67569157 .long 202313229,197121,67569157,134941193 .long 33619971,100992007,168364043,235736079 .long 235736079,33619971,100992007,168364043 .long 168364043,235736079,33619971,100992007 .long 100992007,168364043,235736079,33619971 .long 50462976,117835012,185207048,252579084 .long 252314880,51251460,117574920,184942860 .long 184682752,252054788,50987272,118359308 .long 118099200,185467140,251790600,50727180 .long 2946363062,528716217,1300004225,1881839624 .long 1532713819,1532713819,1532713819,1532713819 .long 3602276352,4288629033,3737020424,4153884961 .long 1354558464,32357713,2958822624,3775749553 .long 1201988352,132424512,1572796698,503232858 .long 2213177600,1597421020,4103937655,675398315 .byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105 .byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83 .byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117 .byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105 .byte 118,101,114,115,105,116,121,41,0 .align 64 .hidden _vpaes_preheat .type _vpaes_preheat,@function .align 16 _vpaes_preheat: addl (%esp),%ebp movdqa -48(%ebp),%xmm7 movdqa -16(%ebp),%xmm6 ret .size _vpaes_preheat,.-_vpaes_preheat .hidden _vpaes_encrypt_core .type _vpaes_encrypt_core,@function .align 16 _vpaes_encrypt_core: movl $16,%ecx movl 240(%edx),%eax movdqa %xmm6,%xmm1 movdqa (%ebp),%xmm2 pandn %xmm0,%xmm1 pand %xmm6,%xmm0 movdqu (%edx),%xmm5 .byte 102,15,56,0,208 movdqa 16(%ebp),%xmm0 pxor %xmm5,%xmm2 psrld $4,%xmm1 addl $16,%edx .byte 102,15,56,0,193 leal 192(%ebp),%ebx pxor %xmm2,%xmm0 jmp .L000enc_entry .align 16 .L001enc_loop: movdqa 32(%ebp),%xmm4 movdqa 48(%ebp),%xmm0 .byte 102,15,56,0,226 .byte 102,15,56,0,195 pxor %xmm5,%xmm4 movdqa 64(%ebp),%xmm5 pxor %xmm4,%xmm0 movdqa -64(%ebx,%ecx,1),%xmm1 .byte 102,15,56,0,234 movdqa 80(%ebp),%xmm2 movdqa (%ebx,%ecx,1),%xmm4 .byte 102,15,56,0,211 movdqa %xmm0,%xmm3 pxor %xmm5,%xmm2 .byte 102,15,56,0,193 addl $16,%edx pxor %xmm2,%xmm0 .byte 102,15,56,0,220 addl $16,%ecx pxor %xmm0,%xmm3 .byte 102,15,56,0,193 andl $48,%ecx subl $1,%eax pxor %xmm3,%xmm0 .L000enc_entry: movdqa %xmm6,%xmm1 movdqa -32(%ebp),%xmm5 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm6,%xmm0 .byte 102,15,56,0,232 movdqa %xmm7,%xmm3 pxor %xmm1,%xmm0 .byte 102,15,56,0,217 movdqa %xmm7,%xmm4 pxor %xmm5,%xmm3 .byte 102,15,56,0,224 movdqa %xmm7,%xmm2 pxor %xmm5,%xmm4 .byte 102,15,56,0,211 movdqa %xmm7,%xmm3 pxor %xmm0,%xmm2 .byte 102,15,56,0,220 movdqu (%edx),%xmm5 pxor %xmm1,%xmm3 jnz .L001enc_loop movdqa 96(%ebp),%xmm4 movdqa 112(%ebp),%xmm0 .byte 102,15,56,0,226 pxor %xmm5,%xmm4 .byte 102,15,56,0,195 movdqa 64(%ebx,%ecx,1),%xmm1 pxor %xmm4,%xmm0 .byte 102,15,56,0,193 ret .size _vpaes_encrypt_core,.-_vpaes_encrypt_core .hidden _vpaes_schedule_core .type _vpaes_schedule_core,@function .align 16 _vpaes_schedule_core: addl (%esp),%ebp movdqu (%esi),%xmm0 movdqa 320(%ebp),%xmm2 movdqa %xmm0,%xmm3 leal (%ebp),%ebx movdqa %xmm2,4(%esp) call _vpaes_schedule_transform movdqa %xmm0,%xmm7 testl %edi,%edi jnz .L002schedule_am_decrypting movdqu %xmm0,(%edx) jmp .L003schedule_go .L002schedule_am_decrypting: movdqa 256(%ebp,%ecx,1),%xmm1 .byte 102,15,56,0,217 movdqu %xmm3,(%edx) xorl $48,%ecx .L003schedule_go: cmpl $192,%eax ja .L004schedule_256 .L005schedule_128: movl $10,%eax .L006loop_schedule_128: call _vpaes_schedule_round decl %eax jz .L007schedule_mangle_last call _vpaes_schedule_mangle jmp .L006loop_schedule_128 .align 16 .L004schedule_256: movdqu 16(%esi),%xmm0 call _vpaes_schedule_transform movl $7,%eax .L008loop_schedule_256: call _vpaes_schedule_mangle movdqa %xmm0,%xmm6 call _vpaes_schedule_round decl %eax jz .L007schedule_mangle_last call _vpaes_schedule_mangle pshufd $255,%xmm0,%xmm0 movdqa %xmm7,20(%esp) movdqa %xmm6,%xmm7 call .L_vpaes_schedule_low_round movdqa 20(%esp),%xmm7 jmp .L008loop_schedule_256 .align 16 .L007schedule_mangle_last: leal 384(%ebp),%ebx testl %edi,%edi jnz .L009schedule_mangle_last_dec movdqa 256(%ebp,%ecx,1),%xmm1 .byte 102,15,56,0,193 leal 352(%ebp),%ebx addl $32,%edx .L009schedule_mangle_last_dec: addl $-16,%edx pxor 336(%ebp),%xmm0 call _vpaes_schedule_transform movdqu %xmm0,(%edx) pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 pxor %xmm3,%xmm3 pxor %xmm4,%xmm4 pxor %xmm5,%xmm5 pxor %xmm6,%xmm6 pxor %xmm7,%xmm7 ret .size _vpaes_schedule_core,.-_vpaes_schedule_core .hidden _vpaes_schedule_round .type _vpaes_schedule_round,@function .align 16 _vpaes_schedule_round: movdqa 8(%esp),%xmm2 pxor %xmm1,%xmm1 .byte 102,15,58,15,202,15 .byte 102,15,58,15,210,15 pxor %xmm1,%xmm7 pshufd $255,%xmm0,%xmm0 .byte 102,15,58,15,192,1 movdqa %xmm2,8(%esp) .L_vpaes_schedule_low_round: movdqa %xmm7,%xmm1 pslldq $4,%xmm7 pxor %xmm1,%xmm7 movdqa %xmm7,%xmm1 pslldq $8,%xmm7 pxor %xmm1,%xmm7 pxor 336(%ebp),%xmm7 movdqa -16(%ebp),%xmm4 movdqa -48(%ebp),%xmm5 movdqa %xmm4,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm4,%xmm0 movdqa -32(%ebp),%xmm2 .byte 102,15,56,0,208 pxor %xmm1,%xmm0 movdqa %xmm5,%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 movdqa %xmm5,%xmm4 .byte 102,15,56,0,224 pxor %xmm2,%xmm4 movdqa %xmm5,%xmm2 .byte 102,15,56,0,211 pxor %xmm0,%xmm2 movdqa %xmm5,%xmm3 .byte 102,15,56,0,220 pxor %xmm1,%xmm3 movdqa 32(%ebp),%xmm4 .byte 102,15,56,0,226 movdqa 48(%ebp),%xmm0 .byte 102,15,56,0,195 pxor %xmm4,%xmm0 pxor %xmm7,%xmm0 movdqa %xmm0,%xmm7 ret .size _vpaes_schedule_round,.-_vpaes_schedule_round .hidden _vpaes_schedule_transform .type _vpaes_schedule_transform,@function .align 16 _vpaes_schedule_transform: movdqa -16(%ebp),%xmm2 movdqa %xmm2,%xmm1 pandn %xmm0,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm0 movdqa (%ebx),%xmm2 .byte 102,15,56,0,208 movdqa 16(%ebx),%xmm0 .byte 102,15,56,0,193 pxor %xmm2,%xmm0 ret .size _vpaes_schedule_transform,.-_vpaes_schedule_transform .hidden _vpaes_schedule_mangle .type _vpaes_schedule_mangle,@function .align 16 _vpaes_schedule_mangle: movdqa %xmm0,%xmm4 movdqa 128(%ebp),%xmm5 testl %edi,%edi jnz .L010schedule_mangle_dec addl $16,%edx pxor 336(%ebp),%xmm4 .byte 102,15,56,0,229 movdqa %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 .byte 102,15,56,0,229 pxor %xmm4,%xmm3 jmp .L011schedule_mangle_both .align 16 .L010schedule_mangle_dec: movdqa -16(%ebp),%xmm2 leal (%ebp),%esi movdqa %xmm2,%xmm1 pandn %xmm4,%xmm1 psrld $4,%xmm1 pand %xmm2,%xmm4 movdqa (%esi),%xmm2 .byte 102,15,56,0,212 movdqa 16(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 32(%esi),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 48(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 64(%esi),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 80(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 .byte 102,15,56,0,221 movdqa 96(%esi),%xmm2 .byte 102,15,56,0,212 pxor %xmm3,%xmm2 movdqa 112(%esi),%xmm3 .byte 102,15,56,0,217 pxor %xmm2,%xmm3 addl $-16,%edx .L011schedule_mangle_both: movdqa 256(%ebp,%ecx,1),%xmm1 .byte 102,15,56,0,217 addl $-16,%ecx andl $48,%ecx movdqu %xmm3,(%edx) ret .size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle .globl vpaes_set_encrypt_key .hidden vpaes_set_encrypt_key .type vpaes_set_encrypt_key,@function .align 16 vpaes_set_encrypt_key: .L_vpaes_set_encrypt_key_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L012pic_for_function_hit .L012pic_for_function_hit: popl %ebx leal BORINGSSL_function_hit+5-.L012pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif movl 20(%esp),%esi leal -56(%esp),%ebx movl 24(%esp),%eax andl $-16,%ebx movl 28(%esp),%edx xchgl %esp,%ebx movl %ebx,48(%esp) movl %eax,%ebx shrl $5,%ebx addl $5,%ebx movl %ebx,240(%edx) movl $48,%ecx movl $0,%edi leal .L_vpaes_consts+0x30-.L013pic_point,%ebp call _vpaes_schedule_core .L013pic_point: movl 48(%esp),%esp xorl %eax,%eax popl %edi popl %esi popl %ebx popl %ebp ret .size vpaes_set_encrypt_key,.-.L_vpaes_set_encrypt_key_begin .globl vpaes_encrypt .hidden vpaes_encrypt .type vpaes_encrypt,@function .align 16 vpaes_encrypt: .L_vpaes_encrypt_begin: pushl %ebp pushl %ebx pushl %esi pushl %edi #ifdef BORINGSSL_DISPATCH_TEST pushl %ebx pushl %edx call .L014pic_for_function_hit .L014pic_for_function_hit: popl %ebx leal BORINGSSL_function_hit+4-.L014pic_for_function_hit(%ebx),%ebx movl $1,%edx movb %dl,(%ebx) popl %edx popl %ebx #endif leal .L_vpaes_consts+0x30-.L015pic_point,%ebp call _vpaes_preheat .L015pic_point: movl 20(%esp),%esi leal -56(%esp),%ebx movl 24(%esp),%edi andl $-16,%ebx movl 28(%esp),%edx xchgl %esp,%ebx movl %ebx,48(%esp) movdqu (%esi),%xmm0 call _vpaes_encrypt_core movdqu %xmm0,(%edi) movl 48(%esp),%esp popl %edi popl %esi popl %ebx popl %ebp ret .size vpaes_encrypt,.-.L_vpaes_encrypt_begin #endif // !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__ELF__)
marvin-hansen/iggy-streaming-system
82,184
thirdparty/crates/ring-0.17.9/pregenerated/aesv8-gcm-armv8-ios64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) #include <ring-core/arm_arch.h> #if __ARM_MAX_ARCH__ >= 8 .text .globl _aes_gcm_enc_kernel .private_extern _aes_gcm_enc_kernel .align 4 _aes_gcm_enc_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys add x4, x0, x1, lsr #3 // end_input_ptr lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible sub x5, x5, #1 // byte_len - 1 ldr q18, [x8, #0] // load rk0 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) ldr q25, [x8, #112] // load rk7 add x5, x5, x0 lsr x12, x11, #32 fmov d2, x10 // CTR block 2 orr w11, w11, w11 rev w12, w12 // rev_ctr32 fmov d1, x10 // CTR block 1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 add w12, w12, #1 // increment rev_ctr32 rev w9, w12 // CTR block 1 fmov d3, x10 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 1 add w12, w12, #1 // CTR block 1 ldr q19, [x8, #16] // load rk1 fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 ldr q20, [x8, #32] // load rk2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 orr x9, x11, x9, lsl #32 // CTR block 3 fmov v3.d[1], x9 // CTR block 3 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q21, [x8, #48] // load rk3 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q24, [x8, #96] // load rk6 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q23, [x8, #80] // load rk5 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ldr q14, [x6, #48] // load h3l | h3h ext v14.16b, v14.16b, v14.16b, #8 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q22, [x8, #64] // load rk4 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 ldr q13, [x6, #32] // load h2l | h2h ext v13.16b, v13.16b, v13.16b, #8 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q15, [x6, #80] // load h4l | h4h ext v15.16b, v15.16b, v15.16b, #8 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 ldr q29, [x8, #176] // load rk11 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 ldr q26, [x8, #128] // load rk8 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 add w12, w12, #1 // CTR block 3 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 trn2 v17.2d, v14.2d, v15.2d // h4l | h3l aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 ldr q27, [x8, #144] // load rk9 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 ldr q12, [x6] // load h1l | h1h ext v12.16b, v12.16b, v12.16b, #8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 ldr q28, [x8, #160] // load rk10 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 trn1 v9.2d, v14.2d, v15.2d // h4h | h3h aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 trn2 v16.2d, v12.2d, v13.2d // h2l | h1l aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 b.lt Lenc_finish_first_blocks // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 b.eq Lenc_finish_first_blocks // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 Lenc_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v2.16b, v31.16b // AES block 2 - round N-1 trn1 v8.2d, v12.2d, v13.2d // h2h | h1h aese v1.16b, v31.16b // AES block 1 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 aese v3.16b, v31.16b // AES block 3 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k b.ge Lenc_tail // handle tail ldp x19, x20, [x0, #16] // AES block 1 - load plaintext rev w9, w12 // CTR block 4 ldp x6, x7, [x0, #0] // AES block 0 - load plaintext ldp x23, x24, [x0, #48] // AES block 3 - load plaintext ldp x21, x22, [x0, #32] // AES block 2 - load plaintext add x0, x0, #64 // AES input_ptr update eor x19, x19, x13 // AES block 1 - round N low eor x20, x20, x14 // AES block 1 - round N high fmov d5, x19 // AES block 1 - mov low eor x6, x6, x13 // AES block 0 - round N low eor x7, x7, x14 // AES block 0 - round N high eor x24, x24, x14 // AES block 3 - round N high fmov d4, x6 // AES block 0 - mov low cmp x0, x5 // check if we have <= 8 blocks fmov v4.d[1], x7 // AES block 0 - mov high eor x23, x23, x13 // AES block 3 - round N low eor x21, x21, x13 // AES block 2 - round N low fmov v5.d[1], x20 // AES block 1 - mov high fmov d6, x21 // AES block 2 - mov low add w12, w12, #1 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov d7, x23 // AES block 3 - mov low eor x22, x22, x14 // AES block 2 - round N high fmov v6.d[1], x22 // AES block 2 - mov high eor v4.16b, v4.16b, v0.16b // AES block 0 - result fmov d0, x10 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 eor v5.16b, v5.16b, v1.16b // AES block 1 - result fmov d1, x10 // CTR block 5 orr x9, x11, x9, lsl #32 // CTR block 5 fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 st1 { v4.16b}, [x2], #16 // AES block 0 - store result fmov v7.d[1], x24 // AES block 3 - mov high orr x9, x11, x9, lsl #32 // CTR block 6 eor v6.16b, v6.16b, v2.16b // AES block 2 - result st1 { v5.16b}, [x2], #16 // AES block 1 - store result add w12, w12, #1 // CTR block 6 fmov d2, x10 // CTR block 6 fmov v2.d[1], x9 // CTR block 6 st1 { v6.16b}, [x2], #16 // AES block 2 - store result rev w9, w12 // CTR block 7 orr x9, x11, x9, lsl #32 // CTR block 7 eor v7.16b, v7.16b, v3.16b // AES block 3 - result st1 { v7.16b}, [x2], #16 // AES block 3 - store result b.ge Lenc_prepretail // do prepretail Lenc_main_loop: // main loop start aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d3, x10 // CTR block 4k+3 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 fmov v3.d[1], x9 // CTR block 4k+3 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 ldp x23, x24, [x0, #48] // AES block 4k+7 - load plaintext aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 ldp x21, x22, [x0, #32] // AES block 4k+6 - load plaintext aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 eor v4.16b, v4.16b, v11.16b // PRE 1 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x23, x23, x13 // AES block 4k+7 - round N low aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d10, v17.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high eor x22, x22, x14 // AES block 4k+6 - round N high mov d8, v4.d[1] // GHASH block 4k - mid aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 ldp x19, x20, [x0, #16] // AES block 4k+5 - load plaintext aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor x19, x19, x13 // AES block 4k+5 - round N low aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 eor x21, x21, x13 // AES block 4k+6 - round N low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 movi v8.8b, #0xc2 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check fmov d5, x19 // AES block 4k+5 - mov low ldp x6, x7, [x0, #0] // AES block 4k+4 - load plaintext b.lt Lenc_main_loop_continue // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq Lenc_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Lenc_main_loop_continue: shl d8, d8, #56 // mod_constant eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid add w12, w12, #1 // CTR block 4k+3 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up add x0, x0, #64 // AES input_ptr update pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid rev w9, w12 // CTR block 4k+8 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor x6, x6, x13 // AES block 4k+4 - round N low eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up eor x7, x7, x14 // AES block 4k+4 - round N high fmov d4, x6 // AES block 4k+4 - mov low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v7.16b, v9.16b, v7.16b // MODULO - fold into mid eor x20, x20, x14 // AES block 4k+5 - round N high eor x24, x24, x14 // AES block 4k+7 - round N high add w12, w12, #1 // CTR block 4k+8 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid fmov d7, x23 // AES block 4k+7 - mov low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 fmov v5.d[1], x20 // AES block 4k+5 - mov high fmov d6, x21 // AES block 4k+6 - mov low cmp x0, x5 // LOOP CONTROL fmov v6.d[1], x22 // AES block 4k+6 - mov high pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v4.16b, v4.16b, v0.16b // AES block 4k+4 - result fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 rev w9, w12 // CTR block 4k+9 add w12, w12, #1 // CTR block 4k+9 eor v5.16b, v5.16b, v1.16b // AES block 4k+5 - result fmov d1, x10 // CTR block 4k+9 orr x9, x11, x9, lsl #32 // CTR block 4k+9 fmov v1.d[1], x9 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 rev w9, w12 // CTR block 4k+10 st1 { v4.16b}, [x2], #16 // AES block 4k+4 - store result orr x9, x11, x9, lsl #32 // CTR block 4k+10 eor v11.16b, v11.16b, v9.16b // MODULO - fold into low fmov v7.d[1], x24 // AES block 4k+7 - mov high ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment st1 { v5.16b}, [x2], #16 // AES block 4k+5 - store result add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 eor v6.16b, v6.16b, v2.16b // AES block 4k+6 - result fmov d2, x10 // CTR block 4k+10 st1 { v6.16b}, [x2], #16 // AES block 4k+6 - store result fmov v2.d[1], x9 // CTR block 4k+10 rev w9, w12 // CTR block 4k+11 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low orr x9, x11, x9, lsl #32 // CTR block 4k+11 eor v7.16b, v7.16b, v3.16b // AES block 4k+7 - result st1 { v7.16b}, [x2], #16 // AES block 4k+7 - store result b.lt Lenc_main_loop Lenc_prepretail: // PREPRETAIL aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 rev64 v6.16b, v6.16b // GHASH block 4k+2 (t0, t1, and t2 free) aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov d3, x10 // CTR block 4k+3 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 rev64 v4.16b, v4.16b // GHASH block 4k (only t0 is free) fmov v3.d[1], x9 // CTR block 4k+3 ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v5.16b, v5.16b // GHASH block 4k+1 (t0 and t1 free) aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 rev64 v7.16b, v7.16b // GHASH block 4k+3 (t0, t1, t2 and t3 free) aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid add w12, w12, #1 // CTR block 4k+3 pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high mov d4, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid eor v4.8b, v4.8b, v7.8b // GHASH block 4k+3 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 pmull v4.1q, v4.1d, v16.1d // GHASH block 4k+3 - mid eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+3 - mid pmull v6.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 cmp x17, #12 // setup flags for AES-128/192/256 check aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v11.16b, v11.16b, v6.16b // GHASH block 4k+3 - low aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v10.16b, v10.16b, v9.16b // karatsuba tidy up aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 pmull v4.1q, v9.1d, v8.1d ext v9.16b, v9.16b, v9.16b, #8 eor v10.16b, v10.16b, v11.16b b.lt Lenc_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 b.eq Lenc_finish_prepretail // branch if AES-192 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 Lenc_finish_prepretail: eor v10.16b, v10.16b, v4.16b eor v10.16b, v10.16b, v9.16b pmull v4.1q, v10.1d, v8.1d ext v10.16b, v10.16b, v10.16b, #8 aese v1.16b, v31.16b // AES block 4k+5 - round N-1 eor v11.16b, v11.16b, v4.16b aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b Lenc_tail: // TAIL ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ldp x6, x7, [x0], #16 // AES block 4k+4 - load plaintext eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high cmp x5, #48 fmov d4, x6 // AES block 4k+4 - mov low fmov v4.d[1], x7 // AES block 4k+4 - mov high eor v5.16b, v4.16b, v0.16b // AES block 4k+4 - result b.gt Lenc_blocks_more_than_3 cmp x5, #32 mov v3.16b, v2.16b movi v11.8b, #0 movi v9.8b, #0 sub w12, w12, #1 mov v2.16b, v1.16b movi v10.8b, #0 b.gt Lenc_blocks_more_than_2 mov v3.16b, v1.16b sub w12, w12, #1 cmp x5, #16 b.gt Lenc_blocks_more_than_1 sub w12, w12, #1 b Lenc_blocks_less_than_1 Lenc_blocks_more_than_3: // blocks left > 3 st1 { v5.16b}, [x2], #16 // AES final-3 block - store result ldp x6, x7, [x0], #16 // AES final-2 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-3 block eor x6, x6, x13 // AES final-2 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag eor x7, x7, x14 // AES final-2 block - round N high mov d22, v4.d[1] // GHASH final-3 block - mid fmov d5, x6 // AES final-2 block - mov low fmov v5.d[1], x7 // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in mov d10, v17.d[1] // GHASH final-3 block - mid pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor v5.16b, v5.16b, v1.16b // AES final-2 block - result Lenc_blocks_more_than_2: // blocks left > 2 st1 { v5.16b}, [x2], #16 // AES final-2 block - store result ldp x6, x7, [x0], #16 // AES final-1 block - load input low & high rev64 v4.16b, v5.16b // GHASH final-2 block eor x6, x6, x13 // AES final-1 block - round N low eor v4.16b, v4.16b, v8.16b // feed in partial tag fmov d5, x6 // AES final-1 block - mov low eor x7, x7, x14 // AES final-1 block - round N high fmov v5.d[1], x7 // AES final-1 block - mov high movi v8.8b, #0 // suppress further partial tag feed in pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid eor v5.16b, v5.16b, v2.16b // AES final-1 block - result eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid Lenc_blocks_more_than_1: // blocks left > 1 st1 { v5.16b}, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ldp x6, x7, [x0], #16 // AES final block - load input low & high eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in eor x6, x6, x13 // AES final block - round N low mov d22, v4.d[1] // GHASH final-1 block - mid pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor x7, x7, x14 // AES final block - round N high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high ins v22.d[1], v22.d[0] // GHASH final-1 block - mid fmov d5, x6 // AES final block - mov low fmov v5.d[1], x7 // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low eor v5.16b, v5.16b, v3.16b // AES final block - result eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low Lenc_blocks_less_than_1: // blocks left <= 1 and x1, x1, #127 // bit_length %= 128 mvn x13, xzr // rkN_l = 0xffffffffffffffff sub x1, x1, #128 // bit_length -= 128 neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128]) ld1 { v18.16b}, [x2] // load existing bytes where the possibly partial last block is to be stored mvn x14, xzr // rkN_h = 0xffffffffffffffff and x1, x1, #127 // bit_length %= 128 lsr x14, x14, x1 // rkN_h is mask for top 64b of last block cmp x1, #64 csel x6, x13, x14, lt csel x7, x14, xzr, lt fmov d0, x6 // ctr0b is mask for last block fmov v0.d[1], x7 and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag bif v5.16b, v18.16b, v0.16b // insert existing bytes in top end of result before storing pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high mov d8, v4.d[1] // GHASH final block - mid rev w9, w12 pmull v21.1q, v4.1d, v12.1d // GHASH final block - low eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v4.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v4.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v9.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment str w9, [x16, #12] // store the updated counter st1 { v5.16b}, [x2] // store all 16B eor v11.16b, v11.16b, v9.16b // MODULO - fold into low eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret .globl _aes_gcm_dec_kernel .private_extern _aes_gcm_dec_kernel .align 4 _aes_gcm_dec_kernel: AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp, #-128]! mov x29, sp stp x19, x20, [sp, #16] mov x16, x4 mov x8, x5 stp x21, x22, [sp, #32] stp x23, x24, [sp, #48] stp d8, d9, [sp, #64] stp d10, d11, [sp, #80] stp d12, d13, [sp, #96] stp d14, d15, [sp, #112] ldr w17, [x8, #240] add x19, x8, x17, lsl #4 // borrow input_l1 for last key ldp x13, x14, [x19] // load round N keys ldr q31, [x19, #-16] // load round N-1 keys lsr x5, x1, #3 // byte_len mov x15, x5 ldp x10, x11, [x16] // ctr96_b64, ctr96_t32 ldr q26, [x8, #128] // load rk8 sub x5, x5, #1 // byte_len - 1 ldr q25, [x8, #112] // load rk7 and x5, x5, #0xffffffffffffffc0 // number of bytes to be processed in main loop (at least 1 byte must be handled by tail) add x4, x0, x1, lsr #3 // end_input_ptr ldr q24, [x8, #96] // load rk6 lsr x12, x11, #32 ldr q23, [x8, #80] // load rk5 orr w11, w11, w11 ldr q21, [x8, #48] // load rk3 add x5, x5, x0 rev w12, w12 // rev_ctr32 add w12, w12, #1 // increment rev_ctr32 fmov d3, x10 // CTR block 3 rev w9, w12 // CTR block 1 add w12, w12, #1 // CTR block 1 fmov d1, x10 // CTR block 1 orr x9, x11, x9, lsl #32 // CTR block 1 ld1 { v0.16b}, [x16] // special case vector load initial counter so we can start first AES block as quickly as possible fmov v1.d[1], x9 // CTR block 1 rev w9, w12 // CTR block 2 add w12, w12, #1 // CTR block 2 fmov d2, x10 // CTR block 2 orr x9, x11, x9, lsl #32 // CTR block 2 fmov v2.d[1], x9 // CTR block 2 rev w9, w12 // CTR block 3 orr x9, x11, x9, lsl #32 // CTR block 3 ldr q18, [x8, #0] // load rk0 fmov v3.d[1], x9 // CTR block 3 add w12, w12, #1 // CTR block 3 ldr q22, [x8, #64] // load rk4 ldr q19, [x8, #16] // load rk1 aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 0 - round 0 ldr q14, [x6, #48] // load h3l | h3h ext v14.16b, v14.16b, v14.16b, #8 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 3 - round 0 ldr q15, [x6, #80] // load h4l | h4h ext v15.16b, v15.16b, v15.16b, #8 aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 1 - round 0 ldr q13, [x6, #32] // load h2l | h2h ext v13.16b, v13.16b, v13.16b, #8 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 2 - round 0 ldr q20, [x8, #32] // load rk2 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 0 - round 1 aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 1 - round 1 ld1 { v11.16b}, [x3] ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 2 - round 1 ldr q27, [x8, #144] // load rk9 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 3 - round 1 ldr q30, [x8, #192] // load rk12 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 0 - round 2 ldr q12, [x6] // load h1l | h1h ext v12.16b, v12.16b, v12.16b, #8 aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 2 - round 2 ldr q28, [x8, #160] // load rk10 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 3 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 0 - round 3 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 1 - round 2 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 3 - round 3 aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 0 - round 4 aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 2 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 1 - round 3 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 3 - round 4 aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 2 - round 4 aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 1 - round 4 aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 3 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 0 - round 5 aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 1 - round 5 aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 2 - round 5 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 0 - round 6 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 3 - round 6 cmp x17, #12 // setup flags for AES-128/192/256 check aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 1 - round 6 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 2 - round 6 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 0 - round 7 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 1 - round 7 aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 3 - round 7 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 0 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 2 - round 7 aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 3 - round 8 aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 1 - round 8 ldr q29, [x8, #176] // load rk11 aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 2 - round 8 b.lt Ldec_finish_first_blocks // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 0 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 1 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 3 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 2 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 0 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 1 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 3 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 2 - round 10 b.eq Ldec_finish_first_blocks // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 0 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 3 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 1 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 2 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 1 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 0 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 2 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 3 - round 12 Ldec_finish_first_blocks: cmp x0, x5 // check if we have <= 4 blocks trn1 v9.2d, v14.2d, v15.2d // h4h | h3h trn2 v17.2d, v14.2d, v15.2d // h4l | h3l trn1 v8.2d, v12.2d, v13.2d // h2h | h1h trn2 v16.2d, v12.2d, v13.2d // h2l | h1l eor v17.16b, v17.16b, v9.16b // h4k | h3k aese v1.16b, v31.16b // AES block 1 - round N-1 aese v2.16b, v31.16b // AES block 2 - round N-1 eor v16.16b, v16.16b, v8.16b // h2k | h1k aese v3.16b, v31.16b // AES block 3 - round N-1 aese v0.16b, v31.16b // AES block 0 - round N-1 b.ge Ldec_tail // handle tail ldr q4, [x0, #0] // AES block 0 - load ciphertext ldr q5, [x0, #16] // AES block 1 - load ciphertext rev w9, w12 // CTR block 4 eor v0.16b, v4.16b, v0.16b // AES block 0 - result eor v1.16b, v5.16b, v1.16b // AES block 1 - result rev64 v5.16b, v5.16b // GHASH block 1 ldr q7, [x0, #48] // AES block 3 - load ciphertext mov x7, v0.d[1] // AES block 0 - mov high mov x6, v0.d[0] // AES block 0 - mov low rev64 v4.16b, v4.16b // GHASH block 0 add w12, w12, #1 // CTR block 4 fmov d0, x10 // CTR block 4 orr x9, x11, x9, lsl #32 // CTR block 4 fmov v0.d[1], x9 // CTR block 4 rev w9, w12 // CTR block 5 add w12, w12, #1 // CTR block 5 mov x19, v1.d[0] // AES block 1 - mov low orr x9, x11, x9, lsl #32 // CTR block 5 mov x20, v1.d[1] // AES block 1 - mov high eor x7, x7, x14 // AES block 0 - round N high eor x6, x6, x13 // AES block 0 - round N low stp x6, x7, [x2], #16 // AES block 0 - store result fmov d1, x10 // CTR block 5 ldr q6, [x0, #32] // AES block 2 - load ciphertext add x0, x0, #64 // AES input_ptr update fmov v1.d[1], x9 // CTR block 5 rev w9, w12 // CTR block 6 add w12, w12, #1 // CTR block 6 eor x19, x19, x13 // AES block 1 - round N low orr x9, x11, x9, lsl #32 // CTR block 6 eor x20, x20, x14 // AES block 1 - round N high stp x19, x20, [x2], #16 // AES block 1 - store result eor v2.16b, v6.16b, v2.16b // AES block 2 - result cmp x0, x5 // check if we have <= 8 blocks b.ge Ldec_prepretail // do prepretail Ldec_main_loop: // main loop start mov x21, v2.d[0] // AES block 4k+2 - mov low ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 eor v4.16b, v4.16b, v11.16b // PRE 1 rev w9, w12 // CTR block 4k+7 aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x23, v3.d[0] // AES block 4k+3 - mov low pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 fmov v3.d[1], x9 // CTR block 4k+7 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 eor x22, x22, x14 // AES block 4k+2 - round N high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 mov d10, v17.d[1] // GHASH block 4k - mid aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 rev64 v6.16b, v6.16b // GHASH block 4k+2 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 eor x21, x21, x13 // AES block 4k+2 - round N low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 stp x21, x22, [x2], #16 // AES block 4k+2 - store result pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 rev64 v7.16b, v7.16b // GHASH block 4k+3 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor x23, x23, x13 // AES block 4k+3 - round N low pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low eor x24, x24, x14 // AES block 4k+3 - round N high eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 add w12, w12, #1 // CTR block 4k+7 aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid rev w9, w12 // CTR block 4k+8 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 add w12, w12, #1 // CTR block 4k+8 aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low orr x9, x11, x9, lsl #32 // CTR block 4k+8 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high cmp x17, #12 // setup flags for AES-128/192/256 check eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid movi v8.8b, #0xc2 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 b.lt Ldec_main_loop_continue // branch if AES-128 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 b.eq Ldec_main_loop_continue // branch if AES-192 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Ldec_main_loop_continue: pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up ldr q4, [x0, #0] // AES block 4k+4 - load ciphertext aese v0.16b, v31.16b // AES block 4k+4 - round N-1 ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up ldr q5, [x0, #16] // AES block 4k+5 - load ciphertext eor v0.16b, v4.16b, v0.16b // AES block 4k+4 - result stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid ldr q7, [x0, #48] // AES block 4k+7 - load ciphertext ldr q6, [x0, #32] // AES block 4k+6 - load ciphertext mov x7, v0.d[1] // AES block 4k+4 - mov high eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid aese v1.16b, v31.16b // AES block 4k+5 - round N-1 add x0, x0, #64 // AES input_ptr update mov x6, v0.d[0] // AES block 4k+4 - mov low fmov d0, x10 // CTR block 4k+8 fmov v0.d[1], x9 // CTR block 4k+8 pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor v1.16b, v5.16b, v1.16b // AES block 4k+5 - result rev w9, w12 // CTR block 4k+9 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+9 cmp x0, x5 // LOOP CONTROL add w12, w12, #1 // CTR block 4k+9 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high mov x20, v1.d[1] // AES block 4k+5 - mov high eor v2.16b, v6.16b, v2.16b // AES block 4k+6 - result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low mov x19, v1.d[0] // AES block 4k+5 - mov low fmov d1, x10 // CTR block 4k+9 ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment fmov v1.d[1], x9 // CTR block 4k+9 rev w9, w12 // CTR block 4k+10 add w12, w12, #1 // CTR block 4k+10 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 orr x9, x11, x9, lsl #32 // CTR block 4k+10 rev64 v5.16b, v5.16b // GHASH block 4k+5 eor x20, x20, x14 // AES block 4k+5 - round N high stp x6, x7, [x2], #16 // AES block 4k+4 - store result eor x19, x19, x13 // AES block 4k+5 - round N low stp x19, x20, [x2], #16 // AES block 4k+5 - store result rev64 v4.16b, v4.16b // GHASH block 4k+4 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low b.lt Ldec_main_loop Ldec_prepretail: // PREPRETAIL ext v11.16b, v11.16b, v11.16b, #8 // PRE 0 mov x21, v2.d[0] // AES block 4k+2 - mov low eor v3.16b, v7.16b, v3.16b // AES block 4k+3 - result aese v0.16b, v18.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 0 mov x22, v2.d[1] // AES block 4k+2 - mov high aese v1.16b, v18.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 0 fmov d2, x10 // CTR block 4k+6 fmov v2.d[1], x9 // CTR block 4k+6 rev w9, w12 // CTR block 4k+7 eor v4.16b, v4.16b, v11.16b // PRE 1 rev64 v6.16b, v6.16b // GHASH block 4k+2 orr x9, x11, x9, lsl #32 // CTR block 4k+7 mov x23, v3.d[0] // AES block 4k+3 - mov low aese v1.16b, v19.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 1 mov x24, v3.d[1] // AES block 4k+3 - mov high pmull v11.1q, v4.1d, v15.1d // GHASH block 4k - low mov d8, v4.d[1] // GHASH block 4k - mid fmov d3, x10 // CTR block 4k+7 pmull2 v9.1q, v4.2d, v15.2d // GHASH block 4k - high fmov v3.d[1], x9 // CTR block 4k+7 aese v2.16b, v18.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 0 mov d10, v17.d[1] // GHASH block 4k - mid aese v0.16b, v19.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 1 eor v8.8b, v8.8b, v4.8b // GHASH block 4k - mid pmull2 v4.1q, v5.2d, v14.2d // GHASH block 4k+1 - high aese v2.16b, v19.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 1 rev64 v7.16b, v7.16b // GHASH block 4k+3 aese v3.16b, v18.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 0 pmull v10.1q, v8.1d, v10.1d // GHASH block 4k - mid eor v9.16b, v9.16b, v4.16b // GHASH block 4k+1 - high pmull v8.1q, v5.1d, v14.1d // GHASH block 4k+1 - low aese v3.16b, v19.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 1 mov d4, v5.d[1] // GHASH block 4k+1 - mid aese v0.16b, v20.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 2 aese v1.16b, v20.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 2 eor v11.16b, v11.16b, v8.16b // GHASH block 4k+1 - low aese v2.16b, v20.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 2 aese v0.16b, v21.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 3 mov d8, v6.d[1] // GHASH block 4k+2 - mid aese v3.16b, v20.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 2 eor v4.8b, v4.8b, v5.8b // GHASH block 4k+1 - mid pmull v5.1q, v6.1d, v13.1d // GHASH block 4k+2 - low aese v0.16b, v22.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 4 aese v3.16b, v21.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 3 eor v8.8b, v8.8b, v6.8b // GHASH block 4k+2 - mid pmull v4.1q, v4.1d, v17.1d // GHASH block 4k+1 - mid aese v0.16b, v23.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 5 eor v11.16b, v11.16b, v5.16b // GHASH block 4k+2 - low aese v3.16b, v22.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 4 pmull2 v5.1q, v7.2d, v12.2d // GHASH block 4k+3 - high eor v10.16b, v10.16b, v4.16b // GHASH block 4k+1 - mid pmull2 v4.1q, v6.2d, v13.2d // GHASH block 4k+2 - high aese v3.16b, v23.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 5 ins v8.d[1], v8.d[0] // GHASH block 4k+2 - mid aese v2.16b, v21.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 3 aese v1.16b, v21.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 3 eor v9.16b, v9.16b, v4.16b // GHASH block 4k+2 - high pmull v4.1q, v7.1d, v12.1d // GHASH block 4k+3 - low aese v2.16b, v22.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 4 mov d6, v7.d[1] // GHASH block 4k+3 - mid aese v1.16b, v22.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 4 pmull2 v8.1q, v8.2d, v16.2d // GHASH block 4k+2 - mid aese v2.16b, v23.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 5 eor v6.8b, v6.8b, v7.8b // GHASH block 4k+3 - mid aese v1.16b, v23.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 5 aese v3.16b, v24.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 6 eor v10.16b, v10.16b, v8.16b // GHASH block 4k+2 - mid aese v2.16b, v24.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 6 aese v0.16b, v24.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 6 movi v8.8b, #0xc2 aese v1.16b, v24.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 6 eor v11.16b, v11.16b, v4.16b // GHASH block 4k+3 - low pmull v6.1q, v6.1d, v16.1d // GHASH block 4k+3 - mid aese v3.16b, v25.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 7 cmp x17, #12 // setup flags for AES-128/192/256 check eor v9.16b, v9.16b, v5.16b // GHASH block 4k+3 - high aese v1.16b, v25.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 7 aese v0.16b, v25.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 7 eor v10.16b, v10.16b, v6.16b // GHASH block 4k+3 - mid aese v3.16b, v26.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 8 aese v2.16b, v25.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 7 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up aese v1.16b, v26.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 8 aese v0.16b, v26.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 8 shl d8, d8, #56 // mod_constant aese v2.16b, v26.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 8 b.lt Ldec_finish_prepretail // branch if AES-128 aese v1.16b, v27.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 9 aese v2.16b, v27.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 9 aese v3.16b, v27.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 9 aese v0.16b, v27.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 9 aese v2.16b, v28.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 10 aese v3.16b, v28.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 10 aese v0.16b, v28.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 10 aese v1.16b, v28.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 10 b.eq Ldec_finish_prepretail // branch if AES-192 aese v2.16b, v29.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 11 aese v0.16b, v29.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 11 aese v1.16b, v29.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 11 aese v2.16b, v30.16b aesmc v2.16b, v2.16b // AES block 4k+6 - round 12 aese v3.16b, v29.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 11 aese v1.16b, v30.16b aesmc v1.16b, v1.16b // AES block 4k+5 - round 12 aese v0.16b, v30.16b aesmc v0.16b, v0.16b // AES block 4k+4 - round 12 aese v3.16b, v30.16b aesmc v3.16b, v3.16b // AES block 4k+7 - round 12 Ldec_finish_prepretail: eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor x22, x22, x14 // AES block 4k+2 - round N high eor x23, x23, x13 // AES block 4k+3 - round N low eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid add w12, w12, #1 // CTR block 4k+7 eor x21, x21, x13 // AES block 4k+2 - round N low pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low eor x24, x24, x14 // AES block 4k+3 - round N high stp x21, x22, [x2], #16 // AES block 4k+2 - store result ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment stp x23, x24, [x2], #16 // AES block 4k+3 - store result eor v11.16b, v11.16b, v8.16b // MODULO - fold into low aese v1.16b, v31.16b // AES block 4k+5 - round N-1 aese v0.16b, v31.16b // AES block 4k+4 - round N-1 aese v3.16b, v31.16b // AES block 4k+7 - round N-1 aese v2.16b, v31.16b // AES block 4k+6 - round N-1 eor v11.16b, v11.16b, v10.16b // MODULO - fold into low Ldec_tail: // TAIL sub x5, x4, x0 // main_end_input_ptr is number of bytes left to process ld1 { v5.16b}, [x0], #16 // AES block 4k+4 - load ciphertext eor v0.16b, v5.16b, v0.16b // AES block 4k+4 - result mov x6, v0.d[0] // AES block 4k+4 - mov low mov x7, v0.d[1] // AES block 4k+4 - mov high ext v8.16b, v11.16b, v11.16b, #8 // prepare final partial tag cmp x5, #48 eor x6, x6, x13 // AES block 4k+4 - round N low eor x7, x7, x14 // AES block 4k+4 - round N high b.gt Ldec_blocks_more_than_3 sub w12, w12, #1 mov v3.16b, v2.16b movi v10.8b, #0 movi v11.8b, #0 cmp x5, #32 movi v9.8b, #0 mov v2.16b, v1.16b b.gt Ldec_blocks_more_than_2 sub w12, w12, #1 mov v3.16b, v1.16b cmp x5, #16 b.gt Ldec_blocks_more_than_1 sub w12, w12, #1 b Ldec_blocks_less_than_1 Ldec_blocks_more_than_3: // blocks left > 3 rev64 v4.16b, v5.16b // GHASH final-3 block ld1 { v5.16b}, [x0], #16 // AES final-2 block - load ciphertext stp x6, x7, [x2], #16 // AES final-3 block - store result mov d10, v17.d[1] // GHASH final-3 block - mid eor v4.16b, v4.16b, v8.16b // feed in partial tag eor v0.16b, v5.16b, v1.16b // AES final-2 block - result mov d22, v4.d[1] // GHASH final-3 block - mid mov x6, v0.d[0] // AES final-2 block - mov low mov x7, v0.d[1] // AES final-2 block - mov high eor v22.8b, v22.8b, v4.8b // GHASH final-3 block - mid movi v8.8b, #0 // suppress further partial tag feed in pmull2 v9.1q, v4.2d, v15.2d // GHASH final-3 block - high pmull v10.1q, v22.1d, v10.1d // GHASH final-3 block - mid eor x6, x6, x13 // AES final-2 block - round N low pmull v11.1q, v4.1d, v15.1d // GHASH final-3 block - low eor x7, x7, x14 // AES final-2 block - round N high Ldec_blocks_more_than_2: // blocks left > 2 rev64 v4.16b, v5.16b // GHASH final-2 block ld1 { v5.16b}, [x0], #16 // AES final-1 block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag stp x6, x7, [x2], #16 // AES final-2 block - store result eor v0.16b, v5.16b, v2.16b // AES final-1 block - result mov d22, v4.d[1] // GHASH final-2 block - mid pmull v21.1q, v4.1d, v14.1d // GHASH final-2 block - low pmull2 v20.1q, v4.2d, v14.2d // GHASH final-2 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-2 block - mid mov x6, v0.d[0] // AES final-1 block - mov low mov x7, v0.d[1] // AES final-1 block - mov high eor v11.16b, v11.16b, v21.16b // GHASH final-2 block - low movi v8.8b, #0 // suppress further partial tag feed in pmull v22.1q, v22.1d, v17.1d // GHASH final-2 block - mid eor v9.16b, v9.16b, v20.16b // GHASH final-2 block - high eor x6, x6, x13 // AES final-1 block - round N low eor v10.16b, v10.16b, v22.16b // GHASH final-2 block - mid eor x7, x7, x14 // AES final-1 block - round N high Ldec_blocks_more_than_1: // blocks left > 1 stp x6, x7, [x2], #16 // AES final-1 block - store result rev64 v4.16b, v5.16b // GHASH final-1 block ld1 { v5.16b}, [x0], #16 // AES final block - load ciphertext eor v4.16b, v4.16b, v8.16b // feed in partial tag movi v8.8b, #0 // suppress further partial tag feed in mov d22, v4.d[1] // GHASH final-1 block - mid eor v0.16b, v5.16b, v3.16b // AES final block - result pmull2 v20.1q, v4.2d, v13.2d // GHASH final-1 block - high eor v22.8b, v22.8b, v4.8b // GHASH final-1 block - mid pmull v21.1q, v4.1d, v13.1d // GHASH final-1 block - low mov x6, v0.d[0] // AES final block - mov low ins v22.d[1], v22.d[0] // GHASH final-1 block - mid mov x7, v0.d[1] // AES final block - mov high pmull2 v22.1q, v22.2d, v16.2d // GHASH final-1 block - mid eor x6, x6, x13 // AES final block - round N low eor v11.16b, v11.16b, v21.16b // GHASH final-1 block - low eor v9.16b, v9.16b, v20.16b // GHASH final-1 block - high eor v10.16b, v10.16b, v22.16b // GHASH final-1 block - mid eor x7, x7, x14 // AES final block - round N high Ldec_blocks_less_than_1: // blocks left <= 1 and x1, x1, #127 // bit_length %= 128 mvn x14, xzr // rkN_h = 0xffffffffffffffff sub x1, x1, #128 // bit_length -= 128 mvn x13, xzr // rkN_l = 0xffffffffffffffff ldp x4, x5, [x2] // load existing bytes we need to not overwrite neg x1, x1 // bit_length = 128 - #bits in input (in range [1,128]) and x1, x1, #127 // bit_length %= 128 lsr x14, x14, x1 // rkN_h is mask for top 64b of last block cmp x1, #64 csel x9, x13, x14, lt csel x10, x14, xzr, lt fmov d0, x9 // ctr0b is mask for last block and x6, x6, x9 mov v0.d[1], x10 bic x4, x4, x9 // mask out low existing bytes rev w9, w12 bic x5, x5, x10 // mask out high existing bytes orr x6, x6, x4 and x7, x7, x10 orr x7, x7, x5 and v5.16b, v5.16b, v0.16b // possibly partial last block has zeroes in highest bits rev64 v4.16b, v5.16b // GHASH final block eor v4.16b, v4.16b, v8.16b // feed in partial tag pmull v21.1q, v4.1d, v12.1d // GHASH final block - low mov d8, v4.d[1] // GHASH final block - mid eor v8.8b, v8.8b, v4.8b // GHASH final block - mid pmull2 v20.1q, v4.2d, v12.2d // GHASH final block - high pmull v8.1q, v8.1d, v16.1d // GHASH final block - mid eor v9.16b, v9.16b, v20.16b // GHASH final block - high eor v11.16b, v11.16b, v21.16b // GHASH final block - low eor v10.16b, v10.16b, v8.16b // GHASH final block - mid movi v8.8b, #0xc2 eor v6.16b, v11.16b, v9.16b // MODULO - karatsuba tidy up shl d8, d8, #56 // mod_constant eor v10.16b, v10.16b, v6.16b // MODULO - karatsuba tidy up pmull v7.1q, v9.1d, v8.1d // MODULO - top 64b align with mid ext v9.16b, v9.16b, v9.16b, #8 // MODULO - other top alignment eor v10.16b, v10.16b, v7.16b // MODULO - fold into mid eor v10.16b, v10.16b, v9.16b // MODULO - fold into mid pmull v8.1q, v10.1d, v8.1d // MODULO - mid 64b align with low ext v10.16b, v10.16b, v10.16b, #8 // MODULO - other mid alignment eor v11.16b, v11.16b, v8.16b // MODULO - fold into low stp x6, x7, [x2] str w9, [x16, #12] // store the updated counter eor v11.16b, v11.16b, v10.16b // MODULO - fold into low ext v11.16b, v11.16b, v11.16b, #8 rev64 v11.16b, v11.16b mov x0, x15 st1 { v11.16b }, [x3] ldp x19, x20, [sp, #16] ldp x21, x22, [sp, #32] ldp x23, x24, [sp, #48] ldp d8, d9, [sp, #64] ldp d10, d11, [sp, #80] ldp d12, d13, [sp, #96] ldp d14, d15, [sp, #112] ldp x29, x30, [sp], #128 AARCH64_VALIDATE_LINK_REGISTER ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
48,988
thirdparty/crates/ring-0.17.9/pregenerated/sha512-armv8-ios64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__) // Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy // in the file LICENSE in the source distribution or at // https://www.openssl.org/source/license.html // ==================================================================== // Written by Andy Polyakov <appro@openssl.org> for the OpenSSL // project. The module is, however, dual licensed under OpenSSL and // CRYPTOGAMS licenses depending on where you obtain it. For further // details see http://www.openssl.org/~appro/cryptogams/. // // Permission to use under GPLv2 terms is granted. // ==================================================================== // // SHA256/512 for ARMv8. // // Performance in cycles per processed byte and improvement coefficient // over code generated with "default" compiler: // // SHA256-hw SHA256(*) SHA512 // Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) // Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) // Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) // Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. // (**) The result is a trade-off: it's possible to improve it by // 10% (or by 1 cycle per round), but at the cost of 20% loss // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code // generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ # include <ring-core/arm_arch.h> #endif .text .globl _sha512_block_data_order_nohw .private_extern _sha512_block_data_order_nohw .align 6 _sha512_block_data_order_nohw: AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] stp x21,x22,[sp,#32] stp x23,x24,[sp,#48] stp x25,x26,[sp,#64] stp x27,x28,[sp,#80] sub sp,sp,#4*8 ldp x20,x21,[x0] // load context ldp x22,x23,[x0,#2*8] ldp x24,x25,[x0,#4*8] add x2,x1,x2,lsl#7 // end of input ldp x26,x27,[x0,#6*8] adrp x30,LK512@PAGE add x30,x30,LK512@PAGEOFF stp x0,x2,[x29,#96] Loop: ldp x3,x4,[x1],#2*8 ldr x19,[x30],#8 // *K++ eor x28,x21,x22 // magic seed str x1,[x29,#112] #ifndef __AARCH64EB__ rev x3,x3 // 0 #endif ror x16,x24,#14 add x27,x27,x19 // h+=K[i] eor x6,x24,x24,ror#23 and x17,x25,x24 bic x19,x26,x24 add x27,x27,x3 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x6,ror#18 // Sigma1(e) ror x6,x20,#28 add x27,x27,x17 // h+=Ch(e,f,g) eor x17,x20,x20,ror#5 add x27,x27,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x23,x23,x27 // d+=h eor x28,x28,x21 // Maj(a,b,c) eor x17,x6,x17,ror#34 // Sigma0(a) add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x4,x4 // 1 #endif ldp x5,x6,[x1],#2*8 add x27,x27,x17 // h+=Sigma0(a) ror x16,x23,#14 add x26,x26,x28 // h+=K[i] eor x7,x23,x23,ror#23 and x17,x24,x23 bic x28,x25,x23 add x26,x26,x4 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x7,ror#18 // Sigma1(e) ror x7,x27,#28 add x26,x26,x17 // h+=Ch(e,f,g) eor x17,x27,x27,ror#5 add x26,x26,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x22,x22,x26 // d+=h eor x19,x19,x20 // Maj(a,b,c) eor x17,x7,x17,ror#34 // Sigma0(a) add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x5,x5 // 2 #endif add x26,x26,x17 // h+=Sigma0(a) ror x16,x22,#14 add x25,x25,x19 // h+=K[i] eor x8,x22,x22,ror#23 and x17,x23,x22 bic x19,x24,x22 add x25,x25,x5 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x8,ror#18 // Sigma1(e) ror x8,x26,#28 add x25,x25,x17 // h+=Ch(e,f,g) eor x17,x26,x26,ror#5 add x25,x25,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x21,x21,x25 // d+=h eor x28,x28,x27 // Maj(a,b,c) eor x17,x8,x17,ror#34 // Sigma0(a) add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x6,x6 // 3 #endif ldp x7,x8,[x1],#2*8 add x25,x25,x17 // h+=Sigma0(a) ror x16,x21,#14 add x24,x24,x28 // h+=K[i] eor x9,x21,x21,ror#23 and x17,x22,x21 bic x28,x23,x21 add x24,x24,x6 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x9,ror#18 // Sigma1(e) ror x9,x25,#28 add x24,x24,x17 // h+=Ch(e,f,g) eor x17,x25,x25,ror#5 add x24,x24,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x20,x20,x24 // d+=h eor x19,x19,x26 // Maj(a,b,c) eor x17,x9,x17,ror#34 // Sigma0(a) add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x7,x7 // 4 #endif add x24,x24,x17 // h+=Sigma0(a) ror x16,x20,#14 add x23,x23,x19 // h+=K[i] eor x10,x20,x20,ror#23 and x17,x21,x20 bic x19,x22,x20 add x23,x23,x7 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x10,ror#18 // Sigma1(e) ror x10,x24,#28 add x23,x23,x17 // h+=Ch(e,f,g) eor x17,x24,x24,ror#5 add x23,x23,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x27,x27,x23 // d+=h eor x28,x28,x25 // Maj(a,b,c) eor x17,x10,x17,ror#34 // Sigma0(a) add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x8,x8 // 5 #endif ldp x9,x10,[x1],#2*8 add x23,x23,x17 // h+=Sigma0(a) ror x16,x27,#14 add x22,x22,x28 // h+=K[i] eor x11,x27,x27,ror#23 and x17,x20,x27 bic x28,x21,x27 add x22,x22,x8 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x11,ror#18 // Sigma1(e) ror x11,x23,#28 add x22,x22,x17 // h+=Ch(e,f,g) eor x17,x23,x23,ror#5 add x22,x22,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x26,x26,x22 // d+=h eor x19,x19,x24 // Maj(a,b,c) eor x17,x11,x17,ror#34 // Sigma0(a) add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x9,x9 // 6 #endif add x22,x22,x17 // h+=Sigma0(a) ror x16,x26,#14 add x21,x21,x19 // h+=K[i] eor x12,x26,x26,ror#23 and x17,x27,x26 bic x19,x20,x26 add x21,x21,x9 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x12,ror#18 // Sigma1(e) ror x12,x22,#28 add x21,x21,x17 // h+=Ch(e,f,g) eor x17,x22,x22,ror#5 add x21,x21,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x25,x25,x21 // d+=h eor x28,x28,x23 // Maj(a,b,c) eor x17,x12,x17,ror#34 // Sigma0(a) add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x10,x10 // 7 #endif ldp x11,x12,[x1],#2*8 add x21,x21,x17 // h+=Sigma0(a) ror x16,x25,#14 add x20,x20,x28 // h+=K[i] eor x13,x25,x25,ror#23 and x17,x26,x25 bic x28,x27,x25 add x20,x20,x10 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x13,ror#18 // Sigma1(e) ror x13,x21,#28 add x20,x20,x17 // h+=Ch(e,f,g) eor x17,x21,x21,ror#5 add x20,x20,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x24,x24,x20 // d+=h eor x19,x19,x22 // Maj(a,b,c) eor x17,x13,x17,ror#34 // Sigma0(a) add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x20,x20,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x11,x11 // 8 #endif add x20,x20,x17 // h+=Sigma0(a) ror x16,x24,#14 add x27,x27,x19 // h+=K[i] eor x14,x24,x24,ror#23 and x17,x25,x24 bic x19,x26,x24 add x27,x27,x11 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x14,ror#18 // Sigma1(e) ror x14,x20,#28 add x27,x27,x17 // h+=Ch(e,f,g) eor x17,x20,x20,ror#5 add x27,x27,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x23,x23,x27 // d+=h eor x28,x28,x21 // Maj(a,b,c) eor x17,x14,x17,ror#34 // Sigma0(a) add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x12,x12 // 9 #endif ldp x13,x14,[x1],#2*8 add x27,x27,x17 // h+=Sigma0(a) ror x16,x23,#14 add x26,x26,x28 // h+=K[i] eor x15,x23,x23,ror#23 and x17,x24,x23 bic x28,x25,x23 add x26,x26,x12 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x15,ror#18 // Sigma1(e) ror x15,x27,#28 add x26,x26,x17 // h+=Ch(e,f,g) eor x17,x27,x27,ror#5 add x26,x26,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x22,x22,x26 // d+=h eor x19,x19,x20 // Maj(a,b,c) eor x17,x15,x17,ror#34 // Sigma0(a) add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x13,x13 // 10 #endif add x26,x26,x17 // h+=Sigma0(a) ror x16,x22,#14 add x25,x25,x19 // h+=K[i] eor x0,x22,x22,ror#23 and x17,x23,x22 bic x19,x24,x22 add x25,x25,x13 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x0,ror#18 // Sigma1(e) ror x0,x26,#28 add x25,x25,x17 // h+=Ch(e,f,g) eor x17,x26,x26,ror#5 add x25,x25,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x21,x21,x25 // d+=h eor x28,x28,x27 // Maj(a,b,c) eor x17,x0,x17,ror#34 // Sigma0(a) add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x14,x14 // 11 #endif ldp x15,x0,[x1],#2*8 add x25,x25,x17 // h+=Sigma0(a) str x6,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] eor x6,x21,x21,ror#23 and x17,x22,x21 bic x28,x23,x21 add x24,x24,x14 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x6,ror#18 // Sigma1(e) ror x6,x25,#28 add x24,x24,x17 // h+=Ch(e,f,g) eor x17,x25,x25,ror#5 add x24,x24,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x20,x20,x24 // d+=h eor x19,x19,x26 // Maj(a,b,c) eor x17,x6,x17,ror#34 // Sigma0(a) add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x15,x15 // 12 #endif add x24,x24,x17 // h+=Sigma0(a) str x7,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] eor x7,x20,x20,ror#23 and x17,x21,x20 bic x19,x22,x20 add x23,x23,x15 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x7,ror#18 // Sigma1(e) ror x7,x24,#28 add x23,x23,x17 // h+=Ch(e,f,g) eor x17,x24,x24,ror#5 add x23,x23,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x27,x27,x23 // d+=h eor x28,x28,x25 // Maj(a,b,c) eor x17,x7,x17,ror#34 // Sigma0(a) add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x0,x0 // 13 #endif ldp x1,x2,[x1] add x23,x23,x17 // h+=Sigma0(a) str x8,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] eor x8,x27,x27,ror#23 and x17,x20,x27 bic x28,x21,x27 add x22,x22,x0 // h+=X[i] orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x8,ror#18 // Sigma1(e) ror x8,x23,#28 add x22,x22,x17 // h+=Ch(e,f,g) eor x17,x23,x23,ror#5 add x22,x22,x16 // h+=Sigma1(e) and x19,x19,x28 // (b^c)&=(a^b) add x26,x26,x22 // d+=h eor x19,x19,x24 // Maj(a,b,c) eor x17,x8,x17,ror#34 // Sigma0(a) add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x1,x1 // 14 #endif ldr x6,[sp,#24] add x22,x22,x17 // h+=Sigma0(a) str x9,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] eor x9,x26,x26,ror#23 and x17,x27,x26 bic x19,x20,x26 add x21,x21,x1 // h+=X[i] orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x9,ror#18 // Sigma1(e) ror x9,x22,#28 add x21,x21,x17 // h+=Ch(e,f,g) eor x17,x22,x22,ror#5 add x21,x21,x16 // h+=Sigma1(e) and x28,x28,x19 // (b^c)&=(a^b) add x25,x25,x21 // d+=h eor x28,x28,x23 // Maj(a,b,c) eor x17,x9,x17,ror#34 // Sigma0(a) add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) #ifndef __AARCH64EB__ rev x2,x2 // 15 #endif ldr x7,[sp,#0] add x21,x21,x17 // h+=Sigma0(a) str x10,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x9,x4,#1 and x17,x26,x25 ror x8,x1,#19 bic x28,x27,x25 ror x10,x21,#28 add x20,x20,x2 // h+=X[i] eor x16,x16,x25,ror#18 eor x9,x9,x4,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x10,x10,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x8,x8,x1,ror#61 eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x10,x21,ror#39 // Sigma0(a) eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) add x3,x3,x12 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x3,x3,x9 add x20,x20,x17 // h+=Sigma0(a) add x3,x3,x8 Loop_16_xx: ldr x8,[sp,#8] str x11,[sp,#0] ror x16,x24,#14 add x27,x27,x19 // h+=K[i] ror x10,x5,#1 and x17,x25,x24 ror x9,x2,#19 bic x19,x26,x24 ror x11,x20,#28 add x27,x27,x3 // h+=X[i] eor x16,x16,x24,ror#18 eor x10,x10,x5,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x24,ror#41 // Sigma1(e) eor x11,x11,x20,ror#34 add x27,x27,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x9,x9,x2,ror#61 eor x10,x10,x5,lsr#7 // sigma0(X[i+1]) add x27,x27,x16 // h+=Sigma1(e) eor x28,x28,x21 // Maj(a,b,c) eor x17,x11,x20,ror#39 // Sigma0(a) eor x9,x9,x2,lsr#6 // sigma1(X[i+14]) add x4,x4,x13 add x23,x23,x27 // d+=h add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x4,x4,x10 add x27,x27,x17 // h+=Sigma0(a) add x4,x4,x9 ldr x9,[sp,#16] str x12,[sp,#8] ror x16,x23,#14 add x26,x26,x28 // h+=K[i] ror x11,x6,#1 and x17,x24,x23 ror x10,x3,#19 bic x28,x25,x23 ror x12,x27,#28 add x26,x26,x4 // h+=X[i] eor x16,x16,x23,ror#18 eor x11,x11,x6,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x23,ror#41 // Sigma1(e) eor x12,x12,x27,ror#34 add x26,x26,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x10,x10,x3,ror#61 eor x11,x11,x6,lsr#7 // sigma0(X[i+1]) add x26,x26,x16 // h+=Sigma1(e) eor x19,x19,x20 // Maj(a,b,c) eor x17,x12,x27,ror#39 // Sigma0(a) eor x10,x10,x3,lsr#6 // sigma1(X[i+14]) add x5,x5,x14 add x22,x22,x26 // d+=h add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x5,x5,x11 add x26,x26,x17 // h+=Sigma0(a) add x5,x5,x10 ldr x10,[sp,#24] str x13,[sp,#16] ror x16,x22,#14 add x25,x25,x19 // h+=K[i] ror x12,x7,#1 and x17,x23,x22 ror x11,x4,#19 bic x19,x24,x22 ror x13,x26,#28 add x25,x25,x5 // h+=X[i] eor x16,x16,x22,ror#18 eor x12,x12,x7,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x22,ror#41 // Sigma1(e) eor x13,x13,x26,ror#34 add x25,x25,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x11,x11,x4,ror#61 eor x12,x12,x7,lsr#7 // sigma0(X[i+1]) add x25,x25,x16 // h+=Sigma1(e) eor x28,x28,x27 // Maj(a,b,c) eor x17,x13,x26,ror#39 // Sigma0(a) eor x11,x11,x4,lsr#6 // sigma1(X[i+14]) add x6,x6,x15 add x21,x21,x25 // d+=h add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x6,x6,x12 add x25,x25,x17 // h+=Sigma0(a) add x6,x6,x11 ldr x11,[sp,#0] str x14,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] ror x13,x8,#1 and x17,x22,x21 ror x12,x5,#19 bic x28,x23,x21 ror x14,x25,#28 add x24,x24,x6 // h+=X[i] eor x16,x16,x21,ror#18 eor x13,x13,x8,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x21,ror#41 // Sigma1(e) eor x14,x14,x25,ror#34 add x24,x24,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x12,x12,x5,ror#61 eor x13,x13,x8,lsr#7 // sigma0(X[i+1]) add x24,x24,x16 // h+=Sigma1(e) eor x19,x19,x26 // Maj(a,b,c) eor x17,x14,x25,ror#39 // Sigma0(a) eor x12,x12,x5,lsr#6 // sigma1(X[i+14]) add x7,x7,x0 add x20,x20,x24 // d+=h add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x7,x7,x13 add x24,x24,x17 // h+=Sigma0(a) add x7,x7,x12 ldr x12,[sp,#8] str x15,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] ror x14,x9,#1 and x17,x21,x20 ror x13,x6,#19 bic x19,x22,x20 ror x15,x24,#28 add x23,x23,x7 // h+=X[i] eor x16,x16,x20,ror#18 eor x14,x14,x9,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x20,ror#41 // Sigma1(e) eor x15,x15,x24,ror#34 add x23,x23,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x13,x13,x6,ror#61 eor x14,x14,x9,lsr#7 // sigma0(X[i+1]) add x23,x23,x16 // h+=Sigma1(e) eor x28,x28,x25 // Maj(a,b,c) eor x17,x15,x24,ror#39 // Sigma0(a) eor x13,x13,x6,lsr#6 // sigma1(X[i+14]) add x8,x8,x1 add x27,x27,x23 // d+=h add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x8,x8,x14 add x23,x23,x17 // h+=Sigma0(a) add x8,x8,x13 ldr x13,[sp,#16] str x0,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] ror x15,x10,#1 and x17,x20,x27 ror x14,x7,#19 bic x28,x21,x27 ror x0,x23,#28 add x22,x22,x8 // h+=X[i] eor x16,x16,x27,ror#18 eor x15,x15,x10,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x27,ror#41 // Sigma1(e) eor x0,x0,x23,ror#34 add x22,x22,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x14,x14,x7,ror#61 eor x15,x15,x10,lsr#7 // sigma0(X[i+1]) add x22,x22,x16 // h+=Sigma1(e) eor x19,x19,x24 // Maj(a,b,c) eor x17,x0,x23,ror#39 // Sigma0(a) eor x14,x14,x7,lsr#6 // sigma1(X[i+14]) add x9,x9,x2 add x26,x26,x22 // d+=h add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x9,x9,x15 add x22,x22,x17 // h+=Sigma0(a) add x9,x9,x14 ldr x14,[sp,#24] str x1,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] ror x0,x11,#1 and x17,x27,x26 ror x15,x8,#19 bic x19,x20,x26 ror x1,x22,#28 add x21,x21,x9 // h+=X[i] eor x16,x16,x26,ror#18 eor x0,x0,x11,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x26,ror#41 // Sigma1(e) eor x1,x1,x22,ror#34 add x21,x21,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x15,x15,x8,ror#61 eor x0,x0,x11,lsr#7 // sigma0(X[i+1]) add x21,x21,x16 // h+=Sigma1(e) eor x28,x28,x23 // Maj(a,b,c) eor x17,x1,x22,ror#39 // Sigma0(a) eor x15,x15,x8,lsr#6 // sigma1(X[i+14]) add x10,x10,x3 add x25,x25,x21 // d+=h add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x10,x10,x0 add x21,x21,x17 // h+=Sigma0(a) add x10,x10,x15 ldr x15,[sp,#0] str x2,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x1,x12,#1 and x17,x26,x25 ror x0,x9,#19 bic x28,x27,x25 ror x2,x21,#28 add x20,x20,x10 // h+=X[i] eor x16,x16,x25,ror#18 eor x1,x1,x12,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x2,x2,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x0,x0,x9,ror#61 eor x1,x1,x12,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x2,x21,ror#39 // Sigma0(a) eor x0,x0,x9,lsr#6 // sigma1(X[i+14]) add x11,x11,x4 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x11,x11,x1 add x20,x20,x17 // h+=Sigma0(a) add x11,x11,x0 ldr x0,[sp,#8] str x3,[sp,#0] ror x16,x24,#14 add x27,x27,x19 // h+=K[i] ror x2,x13,#1 and x17,x25,x24 ror x1,x10,#19 bic x19,x26,x24 ror x3,x20,#28 add x27,x27,x11 // h+=X[i] eor x16,x16,x24,ror#18 eor x2,x2,x13,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x20,x21 // a^b, b^c in next round eor x16,x16,x24,ror#41 // Sigma1(e) eor x3,x3,x20,ror#34 add x27,x27,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x1,x1,x10,ror#61 eor x2,x2,x13,lsr#7 // sigma0(X[i+1]) add x27,x27,x16 // h+=Sigma1(e) eor x28,x28,x21 // Maj(a,b,c) eor x17,x3,x20,ror#39 // Sigma0(a) eor x1,x1,x10,lsr#6 // sigma1(X[i+14]) add x12,x12,x5 add x23,x23,x27 // d+=h add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x12,x12,x2 add x27,x27,x17 // h+=Sigma0(a) add x12,x12,x1 ldr x1,[sp,#16] str x4,[sp,#8] ror x16,x23,#14 add x26,x26,x28 // h+=K[i] ror x3,x14,#1 and x17,x24,x23 ror x2,x11,#19 bic x28,x25,x23 ror x4,x27,#28 add x26,x26,x12 // h+=X[i] eor x16,x16,x23,ror#18 eor x3,x3,x14,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x27,x20 // a^b, b^c in next round eor x16,x16,x23,ror#41 // Sigma1(e) eor x4,x4,x27,ror#34 add x26,x26,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x2,x2,x11,ror#61 eor x3,x3,x14,lsr#7 // sigma0(X[i+1]) add x26,x26,x16 // h+=Sigma1(e) eor x19,x19,x20 // Maj(a,b,c) eor x17,x4,x27,ror#39 // Sigma0(a) eor x2,x2,x11,lsr#6 // sigma1(X[i+14]) add x13,x13,x6 add x22,x22,x26 // d+=h add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x13,x13,x3 add x26,x26,x17 // h+=Sigma0(a) add x13,x13,x2 ldr x2,[sp,#24] str x5,[sp,#16] ror x16,x22,#14 add x25,x25,x19 // h+=K[i] ror x4,x15,#1 and x17,x23,x22 ror x3,x12,#19 bic x19,x24,x22 ror x5,x26,#28 add x25,x25,x13 // h+=X[i] eor x16,x16,x22,ror#18 eor x4,x4,x15,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x26,x27 // a^b, b^c in next round eor x16,x16,x22,ror#41 // Sigma1(e) eor x5,x5,x26,ror#34 add x25,x25,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x3,x3,x12,ror#61 eor x4,x4,x15,lsr#7 // sigma0(X[i+1]) add x25,x25,x16 // h+=Sigma1(e) eor x28,x28,x27 // Maj(a,b,c) eor x17,x5,x26,ror#39 // Sigma0(a) eor x3,x3,x12,lsr#6 // sigma1(X[i+14]) add x14,x14,x7 add x21,x21,x25 // d+=h add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x14,x14,x4 add x25,x25,x17 // h+=Sigma0(a) add x14,x14,x3 ldr x3,[sp,#0] str x6,[sp,#24] ror x16,x21,#14 add x24,x24,x28 // h+=K[i] ror x5,x0,#1 and x17,x22,x21 ror x4,x13,#19 bic x28,x23,x21 ror x6,x25,#28 add x24,x24,x14 // h+=X[i] eor x16,x16,x21,ror#18 eor x5,x5,x0,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x25,x26 // a^b, b^c in next round eor x16,x16,x21,ror#41 // Sigma1(e) eor x6,x6,x25,ror#34 add x24,x24,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x4,x4,x13,ror#61 eor x5,x5,x0,lsr#7 // sigma0(X[i+1]) add x24,x24,x16 // h+=Sigma1(e) eor x19,x19,x26 // Maj(a,b,c) eor x17,x6,x25,ror#39 // Sigma0(a) eor x4,x4,x13,lsr#6 // sigma1(X[i+14]) add x15,x15,x8 add x20,x20,x24 // d+=h add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x15,x15,x5 add x24,x24,x17 // h+=Sigma0(a) add x15,x15,x4 ldr x4,[sp,#8] str x7,[sp,#0] ror x16,x20,#14 add x23,x23,x19 // h+=K[i] ror x6,x1,#1 and x17,x21,x20 ror x5,x14,#19 bic x19,x22,x20 ror x7,x24,#28 add x23,x23,x15 // h+=X[i] eor x16,x16,x20,ror#18 eor x6,x6,x1,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x24,x25 // a^b, b^c in next round eor x16,x16,x20,ror#41 // Sigma1(e) eor x7,x7,x24,ror#34 add x23,x23,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x5,x5,x14,ror#61 eor x6,x6,x1,lsr#7 // sigma0(X[i+1]) add x23,x23,x16 // h+=Sigma1(e) eor x28,x28,x25 // Maj(a,b,c) eor x17,x7,x24,ror#39 // Sigma0(a) eor x5,x5,x14,lsr#6 // sigma1(X[i+14]) add x0,x0,x9 add x27,x27,x23 // d+=h add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x0,x0,x6 add x23,x23,x17 // h+=Sigma0(a) add x0,x0,x5 ldr x5,[sp,#16] str x8,[sp,#8] ror x16,x27,#14 add x22,x22,x28 // h+=K[i] ror x7,x2,#1 and x17,x20,x27 ror x6,x15,#19 bic x28,x21,x27 ror x8,x23,#28 add x22,x22,x0 // h+=X[i] eor x16,x16,x27,ror#18 eor x7,x7,x2,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x23,x24 // a^b, b^c in next round eor x16,x16,x27,ror#41 // Sigma1(e) eor x8,x8,x23,ror#34 add x22,x22,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x6,x6,x15,ror#61 eor x7,x7,x2,lsr#7 // sigma0(X[i+1]) add x22,x22,x16 // h+=Sigma1(e) eor x19,x19,x24 // Maj(a,b,c) eor x17,x8,x23,ror#39 // Sigma0(a) eor x6,x6,x15,lsr#6 // sigma1(X[i+14]) add x1,x1,x10 add x26,x26,x22 // d+=h add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x1,x1,x7 add x22,x22,x17 // h+=Sigma0(a) add x1,x1,x6 ldr x6,[sp,#24] str x9,[sp,#16] ror x16,x26,#14 add x21,x21,x19 // h+=K[i] ror x8,x3,#1 and x17,x27,x26 ror x7,x0,#19 bic x19,x20,x26 ror x9,x22,#28 add x21,x21,x1 // h+=X[i] eor x16,x16,x26,ror#18 eor x8,x8,x3,ror#8 orr x17,x17,x19 // Ch(e,f,g) eor x19,x22,x23 // a^b, b^c in next round eor x16,x16,x26,ror#41 // Sigma1(e) eor x9,x9,x22,ror#34 add x21,x21,x17 // h+=Ch(e,f,g) and x28,x28,x19 // (b^c)&=(a^b) eor x7,x7,x0,ror#61 eor x8,x8,x3,lsr#7 // sigma0(X[i+1]) add x21,x21,x16 // h+=Sigma1(e) eor x28,x28,x23 // Maj(a,b,c) eor x17,x9,x22,ror#39 // Sigma0(a) eor x7,x7,x0,lsr#6 // sigma1(X[i+14]) add x2,x2,x11 add x25,x25,x21 // d+=h add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round add x2,x2,x8 add x21,x21,x17 // h+=Sigma0(a) add x2,x2,x7 ldr x7,[sp,#0] str x10,[sp,#24] ror x16,x25,#14 add x20,x20,x28 // h+=K[i] ror x9,x4,#1 and x17,x26,x25 ror x8,x1,#19 bic x28,x27,x25 ror x10,x21,#28 add x20,x20,x2 // h+=X[i] eor x16,x16,x25,ror#18 eor x9,x9,x4,ror#8 orr x17,x17,x28 // Ch(e,f,g) eor x28,x21,x22 // a^b, b^c in next round eor x16,x16,x25,ror#41 // Sigma1(e) eor x10,x10,x21,ror#34 add x20,x20,x17 // h+=Ch(e,f,g) and x19,x19,x28 // (b^c)&=(a^b) eor x8,x8,x1,ror#61 eor x9,x9,x4,lsr#7 // sigma0(X[i+1]) add x20,x20,x16 // h+=Sigma1(e) eor x19,x19,x22 // Maj(a,b,c) eor x17,x10,x21,ror#39 // Sigma0(a) eor x8,x8,x1,lsr#6 // sigma1(X[i+14]) add x3,x3,x12 add x24,x24,x20 // d+=h add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round add x3,x3,x9 add x20,x20,x17 // h+=Sigma0(a) add x3,x3,x8 cbnz x19,Loop_16_xx ldp x0,x2,[x29,#96] ldr x1,[x29,#112] sub x30,x30,#648 // rewind ldp x3,x4,[x0] ldp x5,x6,[x0,#2*8] add x1,x1,#14*8 // advance input pointer ldp x7,x8,[x0,#4*8] add x20,x20,x3 ldp x9,x10,[x0,#6*8] add x21,x21,x4 add x22,x22,x5 add x23,x23,x6 stp x20,x21,[x0] add x24,x24,x7 add x25,x25,x8 stp x22,x23,[x0,#2*8] add x26,x26,x9 add x27,x27,x10 cmp x1,x2 stp x24,x25,[x0,#4*8] stp x26,x27,[x0,#6*8] b.ne Loop ldp x19,x20,[x29,#16] add sp,sp,#4*8 ldp x21,x22,[x29,#32] ldp x23,x24,[x29,#48] ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 AARCH64_VALIDATE_LINK_REGISTER ret .section __TEXT,__const .align 6 LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .quad 0 // terminator .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 .text #ifndef __KERNEL__ .globl _sha512_block_data_order_hw .private_extern _sha512_block_data_order_hw .align 6 _sha512_block_data_order_hw: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context adrp x3,LK512@PAGE add x3,x3,LK512@PAGEOFF rev64 v16.16b,v16.16b rev64 v17.16b,v17.16b rev64 v18.16b,v18.16b rev64 v19.16b,v19.16b rev64 v20.16b,v20.16b rev64 v21.16b,v21.16b rev64 v22.16b,v22.16b rev64 v23.16b,v23.16b b Loop_hw .align 4 Loop_hw: ld1 {v24.2d},[x3],#16 subs x2,x2,#1 sub x4,x1,#128 orr v26.16b,v0.16b,v0.16b // offload orr v27.16b,v1.16b,v1.16b orr v28.16b,v2.16b,v2.16b orr v29.16b,v3.16b,v3.16b csel x1,x1,x4,ne // conditional rewind add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v24.2d,v24.2d,v16.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08230 //sha512su0 v16.16b,v17.16b ext v7.16b,v20.16b,v21.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v25.2d,v25.2d,v17.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08251 //sha512su0 v17.16b,v18.16b ext v7.16b,v21.16b,v22.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v24.2d,v24.2d,v18.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec08272 //sha512su0 v18.16b,v19.16b ext v7.16b,v22.16b,v23.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b add v25.2d,v25.2d,v19.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08293 //sha512su0 v19.16b,v20.16b ext v7.16b,v23.16b,v16.16b,#8 .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b .long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b add v24.2d,v24.2d,v20.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082b4 //sha512su0 v20.16b,v21.16b ext v7.16b,v16.16b,v17.16b,#8 .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b .long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b add v25.2d,v25.2d,v21.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec082d5 //sha512su0 v21.16b,v22.16b ext v7.16b,v17.16b,v18.16b,#8 .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b .long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v24.2d,v24.2d,v22.2d ld1 {v25.2d},[x3],#16 ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" .long 0xcec082f6 //sha512su0 v22.16b,v23.16b ext v7.16b,v18.16b,v19.16b,#8 .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b .long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b add v25.2d,v25.2d,v23.2d ld1 {v24.2d},[x3],#16 ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" .long 0xcec08217 //sha512su0 v23.16b,v16.16b ext v7.16b,v19.16b,v20.16b,#8 .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b .long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v16.2d ld1 {v16.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b rev64 v16.16b,v16.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v17.2d ld1 {v17.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b rev64 v17.16b,v17.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v18.2d ld1 {v18.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b rev64 v18.16b,v18.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v19.2d ld1 {v19.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v2.16b,v3.16b,#8 ext v6.16b,v1.16b,v2.16b,#8 add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b rev64 v19.16b,v19.16b add v4.2d,v1.2d,v3.2d // "D + T1" .long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v20.2d ld1 {v20.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v4.16b,v2.16b,#8 ext v6.16b,v0.16b,v4.16b,#8 add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b rev64 v20.16b,v20.16b add v1.2d,v0.2d,v2.2d // "D + T1" .long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b ld1 {v24.2d},[x3],#16 add v25.2d,v25.2d,v21.2d ld1 {v21.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v1.16b,v4.16b,#8 ext v6.16b,v3.16b,v1.16b,#8 add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b rev64 v21.16b,v21.16b add v0.2d,v3.2d,v4.2d // "D + T1" .long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b ld1 {v25.2d},[x3],#16 add v24.2d,v24.2d,v22.2d ld1 {v22.16b},[x1],#16 // load next input ext v24.16b,v24.16b,v24.16b,#8 ext v5.16b,v0.16b,v1.16b,#8 ext v6.16b,v2.16b,v0.16b,#8 add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" .long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b rev64 v22.16b,v22.16b add v3.2d,v2.2d,v1.2d // "D + T1" .long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b sub x3,x3,#80*8 // rewind add v25.2d,v25.2d,v23.2d ld1 {v23.16b},[x1],#16 // load next input ext v25.16b,v25.16b,v25.16b,#8 ext v5.16b,v3.16b,v0.16b,#8 ext v6.16b,v4.16b,v3.16b,#8 add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" .long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b rev64 v23.16b,v23.16b add v2.2d,v4.2d,v0.2d // "D + T1" .long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b add v0.2d,v0.2d,v26.2d // accumulate add v1.2d,v1.2d,v27.2d add v2.2d,v2.2d,v28.2d add v3.2d,v3.2d,v29.2d cbnz x2,Loop_hw st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
marvin-hansen/iggy-streaming-system
7,693
thirdparty/crates/ring-0.17.9/pregenerated/aesv8-armx-win64.S
// This file is generated from a similarly-named Perl script in the BoringSSL // source tree. Do not edit by hand. #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32) #include <ring-core/arm_arch.h> #if __ARM_MAX_ARCH__>=7 .text .arch armv8-a+crypto .section .rodata .align 5 Lrcon: .long 0x01,0x01,0x01,0x01 .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat .long 0x1b,0x1b,0x1b,0x1b .text .globl aes_hw_set_encrypt_key .def aes_hw_set_encrypt_key .type 32 .endef .align 5 aes_hw_set_encrypt_key: Lenc_key: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x3,#-2 cmp w1,#128 b.lt Lenc_key_abort cmp w1,#256 b.gt Lenc_key_abort tst w1,#0x3f b.ne Lenc_key_abort adrp x3,Lrcon add x3,x3,:lo12:Lrcon cmp w1,#192 eor v0.16b,v0.16b,v0.16b ld1 {v3.16b},[x0],#16 mov w1,#8 // reuse w1 ld1 {v1.4s,v2.4s},[x3],#32 b.lt Loop128 // 192-bit key support was removed. b L256 .align 4 Loop128: tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b b.ne Loop128 ld1 {v1.4s},[x3] tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b tbl v6.16b,{v3.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v3.4s},[x2],#16 aese v6.16b,v0.16b eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b eor v3.16b,v3.16b,v6.16b st1 {v3.4s},[x2] add x2,x2,#0x50 mov w12,#10 b Ldone // 192-bit key support was removed. .align 4 L256: ld1 {v4.16b},[x0] mov w1,#7 mov w12,#14 st1 {v3.4s},[x2],#16 Loop256: tbl v6.16b,{v4.16b},v2.16b ext v5.16b,v0.16b,v3.16b,#12 st1 {v4.4s},[x2],#16 aese v6.16b,v0.16b subs w1,w1,#1 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v3.16b,v3.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v6.16b,v6.16b,v1.16b eor v3.16b,v3.16b,v5.16b shl v1.16b,v1.16b,#1 eor v3.16b,v3.16b,v6.16b st1 {v3.4s},[x2],#16 b.eq Ldone dup v6.4s,v3.s[3] // just splat ext v5.16b,v0.16b,v4.16b,#12 aese v6.16b,v0.16b eor v4.16b,v4.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v4.16b,v4.16b,v5.16b ext v5.16b,v0.16b,v5.16b,#12 eor v4.16b,v4.16b,v5.16b eor v4.16b,v4.16b,v6.16b b Loop256 Ldone: str w12,[x2] mov x3,#0 Lenc_key_abort: mov x0,x3 // return value ldr x29,[sp],#16 ret .globl aes_hw_ctr32_encrypt_blocks .def aes_hw_ctr32_encrypt_blocks .type 32 .endef .align 5 aes_hw_ctr32_encrypt_blocks: // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ldr w5,[x3,#240] ldr w8, [x4, #12] ld1 {v0.4s},[x4] ld1 {v16.4s,v17.4s},[x3] // load key schedule... sub w5,w5,#4 mov x12,#16 cmp x2,#2 add x7,x3,x5,lsl#4 // pointer to last 5 round keys sub w5,w5,#2 ld1 {v20.4s,v21.4s},[x7],#32 ld1 {v22.4s,v23.4s},[x7],#32 ld1 {v7.4s},[x7] add x7,x3,#32 mov w6,w5 csel x12,xzr,x12,lo // ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are // affected by silicon errata #1742098 [0] and #1655431 [1], // respectively, where the second instruction of an aese/aesmc // instruction pair may execute twice if an interrupt is taken right // after the first instruction consumes an input register of which a // single 32-bit lane has been updated the last time it was modified. // // This function uses a counter in one 32-bit lane. The vmov lines // could write to v1.16b and v18.16b directly, but that trips this bugs. // We write to v6.16b and copy to the final register as a workaround. // // [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice // [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice #ifndef __AARCH64EB__ rev w8, w8 #endif add w10, w8, #1 orr v6.16b,v0.16b,v0.16b rev w10, w10 mov v6.s[3],w10 add w8, w8, #2 orr v1.16b,v6.16b,v6.16b b.ls Lctr32_tail rev w12, w8 mov v6.s[3],w12 sub x2,x2,#3 // bias orr v18.16b,v6.16b,v6.16b b Loop3x_ctr32 .align 4 Loop3x_ctr32: aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b aese v18.16b,v17.16b aesmc v18.16b,v18.16b ld1 {v17.4s},[x7],#16 b.gt Loop3x_ctr32 aese v0.16b,v16.16b aesmc v4.16b,v0.16b aese v1.16b,v16.16b aesmc v5.16b,v1.16b ld1 {v2.16b},[x0],#16 add w9,w8,#1 aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v3.16b},[x0],#16 rev w9,w9 aese v4.16b,v17.16b aesmc v4.16b,v4.16b aese v5.16b,v17.16b aesmc v5.16b,v5.16b ld1 {v19.16b},[x0],#16 mov x7,x3 aese v18.16b,v17.16b aesmc v17.16b,v18.16b aese v4.16b,v20.16b aesmc v4.16b,v4.16b aese v5.16b,v20.16b aesmc v5.16b,v5.16b eor v2.16b,v2.16b,v7.16b add w10,w8,#2 aese v17.16b,v20.16b aesmc v17.16b,v17.16b eor v3.16b,v3.16b,v7.16b add w8,w8,#3 aese v4.16b,v21.16b aesmc v4.16b,v4.16b aese v5.16b,v21.16b aesmc v5.16b,v5.16b // Note the logic to update v0.16b, v1.16b, and v1.16b is written to work // around a bug in ARM Cortex-A57 and Cortex-A72 cores running in // 32-bit mode. See the comment above. eor v19.16b,v19.16b,v7.16b mov v6.s[3], w9 aese v17.16b,v21.16b aesmc v17.16b,v17.16b orr v0.16b,v6.16b,v6.16b rev w10,w10 aese v4.16b,v22.16b aesmc v4.16b,v4.16b mov v6.s[3], w10 rev w12,w8 aese v5.16b,v22.16b aesmc v5.16b,v5.16b orr v1.16b,v6.16b,v6.16b mov v6.s[3], w12 aese v17.16b,v22.16b aesmc v17.16b,v17.16b orr v18.16b,v6.16b,v6.16b subs x2,x2,#3 aese v4.16b,v23.16b aese v5.16b,v23.16b aese v17.16b,v23.16b eor v2.16b,v2.16b,v4.16b ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0] st1 {v2.16b},[x1],#16 eor v3.16b,v3.16b,v5.16b mov w6,w5 st1 {v3.16b},[x1],#16 eor v19.16b,v19.16b,v17.16b ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1] st1 {v19.16b},[x1],#16 b.hs Loop3x_ctr32 adds x2,x2,#3 b.eq Lctr32_done cmp x2,#1 mov x12,#16 csel x12,xzr,x12,eq Lctr32_tail: aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b ld1 {v16.4s},[x7],#16 subs w6,w6,#2 aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b ld1 {v17.4s},[x7],#16 b.gt Lctr32_tail aese v0.16b,v16.16b aesmc v0.16b,v0.16b aese v1.16b,v16.16b aesmc v1.16b,v1.16b aese v0.16b,v17.16b aesmc v0.16b,v0.16b aese v1.16b,v17.16b aesmc v1.16b,v1.16b ld1 {v2.16b},[x0],x12 aese v0.16b,v20.16b aesmc v0.16b,v0.16b aese v1.16b,v20.16b aesmc v1.16b,v1.16b ld1 {v3.16b},[x0] aese v0.16b,v21.16b aesmc v0.16b,v0.16b aese v1.16b,v21.16b aesmc v1.16b,v1.16b eor v2.16b,v2.16b,v7.16b aese v0.16b,v22.16b aesmc v0.16b,v0.16b aese v1.16b,v22.16b aesmc v1.16b,v1.16b eor v3.16b,v3.16b,v7.16b aese v0.16b,v23.16b aese v1.16b,v23.16b cmp x2,#1 eor v2.16b,v2.16b,v0.16b eor v3.16b,v3.16b,v1.16b st1 {v2.16b},[x1],#16 b.eq Lctr32_done st1 {v3.16b},[x1] Lctr32_done: ldr x29,[sp],#16 ret #endif #endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
marvin-hansen/iggy-streaming-system
2,659
thirdparty/crates/ring-0.17.9/third_party/fiat/asm/fiat_curve25519_adx_square.S
#include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \ (defined(__APPLE__) || defined(__ELF__)) .intel_syntax noprefix .text #if defined(__APPLE__) .private_extern _fiat_curve25519_adx_square .global _fiat_curve25519_adx_square _fiat_curve25519_adx_square: #else .type fiat_curve25519_adx_square, @function .hidden fiat_curve25519_adx_square .global fiat_curve25519_adx_square fiat_curve25519_adx_square: #endif .cfi_startproc _CET_ENDBR push rbp .cfi_adjust_cfa_offset 8 .cfi_offset rbp, -16 mov rbp, rsp mov rdx, [ rsi + 0x0 ] mulx r10, rax, [ rsi + 0x8 ] mov rdx, [ rsi + 0x0 ] mulx rcx, r11, [ rsi + 0x10 ] xor rdx, rdx adox r11, r10 mov rdx, [ rsi + 0x0 ] mulx r9, r8, [ rsi + 0x18 ] mov rdx, [ rsi + 0x8 ] mov [ rsp - 0x80 ], rbx .cfi_offset rbx, -16-0x80 mulx rbx, r10, [ rsi + 0x18 ] adox r8, rcx mov [rsp - 0x48 ], rdi adox r10, r9 adcx rax, rax mov rdx, [ rsi + 0x10 ] mulx r9, rcx, [ rsi + 0x18 ] adox rcx, rbx mov rdx, [ rsi + 0x10 ] mulx rdi, rbx, [ rsi + 0x8 ] mov rdx, 0x0 adox r9, rdx mov [ rsp - 0x70 ], r12 .cfi_offset r12, -16-0x70 mov r12, -0x3 inc r12 adox rbx, r8 adox rdi, r10 adcx r11, r11 mov r8, rdx adox r8, rcx mov r10, rdx adox r10, r9 adcx rbx, rbx mov rdx, [ rsi + 0x0 ] mulx r9, rcx, rdx mov rdx, [ rsi + 0x8 ] mov [ rsp - 0x68 ], r13 .cfi_offset r13, -16-0x68 mov [ rsp - 0x60 ], r14 .cfi_offset r14, -16-0x60 mulx r14, r13, rdx seto dl inc r12 adox r9, rax adox r13, r11 adox r14, rbx adcx rdi, rdi mov al, dl mov rdx, [ rsi + 0x10 ] mulx rbx, r11, rdx adox r11, rdi adcx r8, r8 adox rbx, r8 adcx r10, r10 movzx rdx, al mov rdi, 0x0 adcx rdx, rdi movzx r8, al lea r8, [ r8 + rdx ] mov rdx, [ rsi + 0x18 ] mulx rdi, rax, rdx adox rax, r10 mov rdx, 0x26 mov [ rsp - 0x58 ], r15 .cfi_offset r15, -16-0x58 mulx r15, r10, r11 clc adcx r10, rcx mulx r11, rcx, rbx adox r8, rdi mulx rdi, rbx, r8 inc r12 adox rcx, r9 mulx r8, r9, rax adcx r15, rcx adox r9, r13 adcx r11, r9 adox rbx, r14 adox rdi, r12 adcx r8, rbx adc rdi, 0x0 mulx r14, r13, rdi test al, al mov rdi, [ rsp - 0x48 ] adox r13, r10 mov r14, r12 adox r14, r15 mov [ rdi + 0x8 ], r14 mov rax, r12 adox rax, r11 mov r10, r12 adox r10, r8 mov [ rdi + 0x10 ], rax mov rcx, r12 cmovo rcx, rdx adcx r13, rcx mov [ rdi + 0x0 ], r13 mov [ rdi + 0x18 ], r10 mov rbx, [ rsp - 0x80 ] .cfi_restore rbx mov r12, [ rsp - 0x70 ] .cfi_restore r12 mov r13, [ rsp - 0x68 ] .cfi_restore r13 mov r14, [ rsp - 0x60 ] .cfi_restore r14 mov r15, [ rsp - 0x58 ] .cfi_restore r15 pop rbp .cfi_restore rbp .cfi_adjust_cfa_offset -8 ret .cfi_endproc #if defined(__ELF__) .size fiat_curve25519_adx_square, .-fiat_curve25519_adx_square #endif #endif
marvin-hansen/iggy-streaming-system
3,464
thirdparty/crates/ring-0.17.9/third_party/fiat/asm/fiat_curve25519_adx_mul.S
#include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \ (defined(__APPLE__) || defined(__ELF__)) .intel_syntax noprefix .text #if defined(__APPLE__) .private_extern _fiat_curve25519_adx_mul .global _fiat_curve25519_adx_mul _fiat_curve25519_adx_mul: #else .type fiat_curve25519_adx_mul, @function .hidden fiat_curve25519_adx_mul .global fiat_curve25519_adx_mul fiat_curve25519_adx_mul: #endif .cfi_startproc _CET_ENDBR push rbp .cfi_adjust_cfa_offset 8 .cfi_offset rbp, -16 mov rbp, rsp mov rax, rdx mov rdx, [ rsi + 0x18 ] mulx r11, r10, [ rax + 0x8 ] mov rdx, [ rax + 0x0 ] mov [ rsp - 0x58 ], r15 .cfi_offset r15, -16-0x58 mulx r8, rcx, [ rsi + 0x18 ] mov rdx, [ rsi + 0x8 ] mov [ rsp - 0x80 ], rbx .cfi_offset rbx, -16-0x80 mulx rbx, r9, [ rax + 0x18 ] mov rdx, [ rsi + 0x8 ] mov [ rsp - 0x70 ], r12 .cfi_offset r12, -16-0x70 mulx r15, r12, [ rax + 0x8 ] mov rdx, [ rsi + 0x0 ] mov [ rsp - 0x68 ], r13 .cfi_offset r13, -16-0x68 mov [ rsp - 0x60 ], r14 .cfi_offset r14, -16-0x60 mulx r14, r13, [ rax + 0x0 ] mov rdx, [ rax + 0x10 ] mov [ rsp - 0x18 ], r15 mov [ rsp - 0x50 ], rdi mulx rdi, r15, [ rsi + 0x0 ] mov rdx, [ rax + 0x18 ] mov [ rsp - 0x48 ], r13 mov [ rsp - 0x40 ], r9 mulx r9, r13, [ rsi + 0x0 ] test al, al adox rcx, rdi mov rdx, [ rsi + 0x10 ] mov [ rsp - 0x38 ], r13 mulx r13, rdi, [ rax + 0x8 ] adox r10, r9 mov rdx, 0x0 adox rbx, rdx adcx rdi, rcx adcx r8, r10 mov r9, rdx adcx r9, rbx mov rdx, [ rsi + 0x10 ] mulx r10, rcx, [ rax + 0x0 ] mov rdx, [ rsi + 0x0 ] mov [ rsp - 0x30 ], r15 mulx r15, rbx, [ rax + 0x8 ] mov rdx, -0x2 inc rdx adox rcx, r15 setc r15b clc adcx rcx, r12 adox r10, rdi mov rdx, [ rax + 0x10 ] mov [ rsp - 0x78 ], rcx mulx rcx, rdi, [ rsi + 0x10 ] adox rdi, r8 mov rdx, [ rax + 0x18 ] mov [ rsp - 0x28 ], rcx mulx rcx, r8, [ rsi + 0x10 ] mov rdx, [ rax + 0x10 ] mov [ rsp - 0x20 ], r8 mulx r12, r8, [ rsi + 0x18 ] adox r8, r9 mov rdx, [ rsi + 0x8 ] mov [ rsp - 0x10 ], r12 mulx r12, r9, [ rax + 0x10 ] movzx rdx, r15b lea rdx, [ rdx + rcx ] adcx r9, r10 adcx r13, rdi mov r15, 0x0 mov r10, r15 adox r10, rdx mov rdx, [ rax + 0x18 ] mulx rcx, rdi, [ rsi + 0x18 ] adox rcx, r15 adcx r11, r8 mov rdx, r15 adcx rdx, r10 adcx rcx, r15 mov r8, rdx mov rdx, [ rax + 0x0 ] mulx r15, r10, [ rsi + 0x8 ] test al, al adox r10, r14 adcx rbx, r10 adox r15, [ rsp - 0x78 ] adcx r15, [ rsp - 0x30 ] adox r9, [ rsp - 0x18 ] adcx r9, [ rsp - 0x38 ] adox r13, [ rsp - 0x40 ] adcx r12, r13 adox r11, [ rsp - 0x20 ] adcx r11, [ rsp - 0x28 ] mov rdx, 0x26 mulx rsi, r14, r12 adox rdi, r8 adcx rdi, [ rsp - 0x10 ] mulx r10, r8, r11 mov r13, 0x0 adox rcx, r13 adcx rcx, r13 mulx r11, r12, rdi xor rdi, rdi adox r8, rbx adox r12, r15 mulx rbx, r13, rcx adcx r14, [ rsp - 0x48 ] adox r13, r9 adox rbx, rdi adcx rsi, r8 adcx r10, r12 adcx r11, r13 adc rbx, 0x0 mulx r9, r15, rbx xor r9, r9 adox r15, r14 mov rdi, r9 adox rdi, rsi mov rcx, r9 adox rcx, r10 mov r8, [ rsp - 0x50 ] mov [ r8 + 0x8 ], rdi mov r12, r9 adox r12, r11 mov r14, r9 cmovo r14, rdx mov [ r8 + 0x18 ], r12 adcx r15, r14 mov [ r8 + 0x0 ], r15 mov [ r8 + 0x10 ], rcx mov rbx, [ rsp - 0x80 ] .cfi_restore rbx mov r12, [ rsp - 0x70 ] .cfi_restore r12 mov r13, [ rsp - 0x68 ] .cfi_restore r13 mov r14, [ rsp - 0x60 ] .cfi_restore r14 mov r15, [ rsp - 0x58 ] .cfi_restore r15 pop rbp .cfi_restore rbp .cfi_adjust_cfa_offset -8 ret .cfi_endproc #if defined(__ELF__) .size fiat_curve25519_adx_mul, .-fiat_curve25519_adx_mul #endif #endif
marvin-hansen/iggy-streaming-system
62,534
thirdparty/crates/ring-0.17.9/crypto/poly1305/poly1305_arm_asm.S
#include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) #pragma GCC diagnostic ignored "-Wlanguage-extension-token" # This implementation was taken from the public domain, neon2 version in # SUPERCOP by D. J. Bernstein and Peter Schwabe. # qhasm: int32 input_0 # qhasm: int32 input_1 # qhasm: int32 input_2 # qhasm: int32 input_3 # qhasm: stack32 input_4 # qhasm: stack32 input_5 # qhasm: stack32 input_6 # qhasm: stack32 input_7 # qhasm: int32 caller_r4 # qhasm: int32 caller_r5 # qhasm: int32 caller_r6 # qhasm: int32 caller_r7 # qhasm: int32 caller_r8 # qhasm: int32 caller_r9 # qhasm: int32 caller_r10 # qhasm: int32 caller_r11 # qhasm: int32 caller_r12 # qhasm: int32 caller_r14 # qhasm: reg128 caller_q4 # qhasm: reg128 caller_q5 # qhasm: reg128 caller_q6 # qhasm: reg128 caller_q7 # qhasm: startcode .fpu neon .text # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 x01 # qhasm: reg128 x23 # qhasm: reg128 x4 # qhasm: reg128 y0 # qhasm: reg128 y12 # qhasm: reg128 y34 # qhasm: reg128 5y12 # qhasm: reg128 5y34 # qhasm: stack128 y0_stack # qhasm: stack128 y12_stack # qhasm: stack128 y34_stack # qhasm: stack128 5y12_stack # qhasm: stack128 5y34_stack # qhasm: reg128 z0 # qhasm: reg128 z12 # qhasm: reg128 z34 # qhasm: reg128 5z12 # qhasm: reg128 5z34 # qhasm: stack128 z0_stack # qhasm: stack128 z12_stack # qhasm: stack128 z34_stack # qhasm: stack128 5z12_stack # qhasm: stack128 5z34_stack # qhasm: stack128 two24 # qhasm: int32 ptr # qhasm: reg128 c01 # qhasm: reg128 c23 # qhasm: reg128 d01 # qhasm: reg128 d23 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 t2 # qhasm: reg128 t3 # qhasm: reg128 t4 # qhasm: reg128 mask # qhasm: reg128 u0 # qhasm: reg128 u1 # qhasm: reg128 u2 # qhasm: reg128 u3 # qhasm: reg128 u4 # qhasm: reg128 v01 # qhasm: reg128 mid # qhasm: reg128 v23 # qhasm: reg128 v4 # qhasm: int32 len # qhasm: qpushenter crypto_onetimeauth_poly1305_neon2_blocks .align 4 .global openssl_poly1305_neon2_blocks .hidden openssl_poly1305_neon2_blocks .type openssl_poly1305_neon2_blocks STT_FUNC openssl_poly1305_neon2_blocks: vpush {q4,q5,q6,q7} mov r12,sp sub sp,sp,#192 bic sp,sp,#31 # qhasm: len = input_3 # asm 1: mov >len=int32#4,<input_3=int32#4 # asm 2: mov >len=r3,<input_3=r3 mov r3,r3 # qhasm: new y0 # qhasm: y0 = mem64[input_1]y0[1]; input_1 += 8 # asm 1: vld1.8 {<y0=reg128#1%bot},[<input_1=int32#2]! # asm 2: vld1.8 {<y0=d0},[<input_1=r1]! vld1.8 {d0},[r1]! # qhasm: y12 = mem128[input_1]; input_1 += 16 # asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<input_1=int32#2]! # asm 2: vld1.8 {>y12=d2->y12=d3},[<input_1=r1]! vld1.8 {d2-d3},[r1]! # qhasm: y34 = mem128[input_1]; input_1 += 16 # asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<input_1=int32#2]! # asm 2: vld1.8 {>y34=d4->y34=d5},[<input_1=r1]! vld1.8 {d4-d5},[r1]! # qhasm: input_1 += 8 # asm 1: add >input_1=int32#2,<input_1=int32#2,#8 # asm 2: add >input_1=r1,<input_1=r1,#8 add r1,r1,#8 # qhasm: new z0 # qhasm: z0 = mem64[input_1]z0[1]; input_1 += 8 # asm 1: vld1.8 {<z0=reg128#4%bot},[<input_1=int32#2]! # asm 2: vld1.8 {<z0=d6},[<input_1=r1]! vld1.8 {d6},[r1]! # qhasm: z12 = mem128[input_1]; input_1 += 16 # asm 1: vld1.8 {>z12=reg128#5%bot->z12=reg128#5%top},[<input_1=int32#2]! # asm 2: vld1.8 {>z12=d8->z12=d9},[<input_1=r1]! vld1.8 {d8-d9},[r1]! # qhasm: z34 = mem128[input_1]; input_1 += 16 # asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<input_1=int32#2]! # asm 2: vld1.8 {>z34=d10->z34=d11},[<input_1=r1]! vld1.8 {d10-d11},[r1]! # qhasm: 2x mask = 0xffffffff # asm 1: vmov.i64 >mask=reg128#7,#0xffffffff # asm 2: vmov.i64 >mask=q6,#0xffffffff vmov.i64 q6,#0xffffffff # qhasm: 2x u4 = 0xff # asm 1: vmov.i64 >u4=reg128#8,#0xff # asm 2: vmov.i64 >u4=q7,#0xff vmov.i64 q7,#0xff # qhasm: x01 aligned= mem128[input_0];input_0+=16 # asm 1: vld1.8 {>x01=reg128#9%bot->x01=reg128#9%top},[<input_0=int32#1,: 128]! # asm 2: vld1.8 {>x01=d16->x01=d17},[<input_0=r0,: 128]! vld1.8 {d16-d17},[r0,: 128]! # qhasm: x23 aligned= mem128[input_0];input_0+=16 # asm 1: vld1.8 {>x23=reg128#10%bot->x23=reg128#10%top},[<input_0=int32#1,: 128]! # asm 2: vld1.8 {>x23=d18->x23=d19},[<input_0=r0,: 128]! vld1.8 {d18-d19},[r0,: 128]! # qhasm: x4 aligned= mem64[input_0]x4[1] # asm 1: vld1.8 {<x4=reg128#11%bot},[<input_0=int32#1,: 64] # asm 2: vld1.8 {<x4=d20},[<input_0=r0,: 64] vld1.8 {d20},[r0,: 64] # qhasm: input_0 -= 32 # asm 1: sub >input_0=int32#1,<input_0=int32#1,#32 # asm 2: sub >input_0=r0,<input_0=r0,#32 sub r0,r0,#32 # qhasm: 2x mask unsigned>>=6 # asm 1: vshr.u64 >mask=reg128#7,<mask=reg128#7,#6 # asm 2: vshr.u64 >mask=q6,<mask=q6,#6 vshr.u64 q6,q6,#6 # qhasm: 2x u4 unsigned>>= 7 # asm 1: vshr.u64 >u4=reg128#8,<u4=reg128#8,#7 # asm 2: vshr.u64 >u4=q7,<u4=q7,#7 vshr.u64 q7,q7,#7 # qhasm: 4x 5y12 = y12 << 2 # asm 1: vshl.i32 >5y12=reg128#12,<y12=reg128#2,#2 # asm 2: vshl.i32 >5y12=q11,<y12=q1,#2 vshl.i32 q11,q1,#2 # qhasm: 4x 5y34 = y34 << 2 # asm 1: vshl.i32 >5y34=reg128#13,<y34=reg128#3,#2 # asm 2: vshl.i32 >5y34=q12,<y34=q2,#2 vshl.i32 q12,q2,#2 # qhasm: 4x 5y12 += y12 # asm 1: vadd.i32 >5y12=reg128#12,<5y12=reg128#12,<y12=reg128#2 # asm 2: vadd.i32 >5y12=q11,<5y12=q11,<y12=q1 vadd.i32 q11,q11,q1 # qhasm: 4x 5y34 += y34 # asm 1: vadd.i32 >5y34=reg128#13,<5y34=reg128#13,<y34=reg128#3 # asm 2: vadd.i32 >5y34=q12,<5y34=q12,<y34=q2 vadd.i32 q12,q12,q2 # qhasm: 2x u4 <<= 24 # asm 1: vshl.i64 >u4=reg128#8,<u4=reg128#8,#24 # asm 2: vshl.i64 >u4=q7,<u4=q7,#24 vshl.i64 q7,q7,#24 # qhasm: 4x 5z12 = z12 << 2 # asm 1: vshl.i32 >5z12=reg128#14,<z12=reg128#5,#2 # asm 2: vshl.i32 >5z12=q13,<z12=q4,#2 vshl.i32 q13,q4,#2 # qhasm: 4x 5z34 = z34 << 2 # asm 1: vshl.i32 >5z34=reg128#15,<z34=reg128#6,#2 # asm 2: vshl.i32 >5z34=q14,<z34=q5,#2 vshl.i32 q14,q5,#2 # qhasm: 4x 5z12 += z12 # asm 1: vadd.i32 >5z12=reg128#14,<5z12=reg128#14,<z12=reg128#5 # asm 2: vadd.i32 >5z12=q13,<5z12=q13,<z12=q4 vadd.i32 q13,q13,q4 # qhasm: 4x 5z34 += z34 # asm 1: vadd.i32 >5z34=reg128#15,<5z34=reg128#15,<z34=reg128#6 # asm 2: vadd.i32 >5z34=q14,<5z34=q14,<z34=q5 vadd.i32 q14,q14,q5 # qhasm: new two24 # qhasm: new y0_stack # qhasm: new y12_stack # qhasm: new y34_stack # qhasm: new 5y12_stack # qhasm: new 5y34_stack # qhasm: new z0_stack # qhasm: new z12_stack # qhasm: new z34_stack # qhasm: new 5z12_stack # qhasm: new 5z34_stack # qhasm: ptr = &two24 # asm 1: lea >ptr=int32#2,<two24=stack128#1 # asm 2: lea >ptr=r1,<two24=[sp,#0] add r1,sp,#0 # qhasm: mem128[ptr] aligned= u4 # asm 1: vst1.8 {<u4=reg128#8%bot-<u4=reg128#8%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<u4=d14-<u4=d15},[<ptr=r1,: 128] vst1.8 {d14-d15},[r1,: 128] # qhasm: r4 = u4 # asm 1: vmov >r4=reg128#16,<u4=reg128#8 # asm 2: vmov >r4=q15,<u4=q7 vmov q15,q7 # qhasm: r0 = u4 # asm 1: vmov >r0=reg128#8,<u4=reg128#8 # asm 2: vmov >r0=q7,<u4=q7 vmov q7,q7 # qhasm: ptr = &y0_stack # asm 1: lea >ptr=int32#2,<y0_stack=stack128#2 # asm 2: lea >ptr=r1,<y0_stack=[sp,#16] add r1,sp,#16 # qhasm: mem128[ptr] aligned= y0 # asm 1: vst1.8 {<y0=reg128#1%bot-<y0=reg128#1%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<y0=d0-<y0=d1},[<ptr=r1,: 128] vst1.8 {d0-d1},[r1,: 128] # qhasm: ptr = &y12_stack # asm 1: lea >ptr=int32#2,<y12_stack=stack128#3 # asm 2: lea >ptr=r1,<y12_stack=[sp,#32] add r1,sp,#32 # qhasm: mem128[ptr] aligned= y12 # asm 1: vst1.8 {<y12=reg128#2%bot-<y12=reg128#2%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<y12=d2-<y12=d3},[<ptr=r1,: 128] vst1.8 {d2-d3},[r1,: 128] # qhasm: ptr = &y34_stack # asm 1: lea >ptr=int32#2,<y34_stack=stack128#4 # asm 2: lea >ptr=r1,<y34_stack=[sp,#48] add r1,sp,#48 # qhasm: mem128[ptr] aligned= y34 # asm 1: vst1.8 {<y34=reg128#3%bot-<y34=reg128#3%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<y34=d4-<y34=d5},[<ptr=r1,: 128] vst1.8 {d4-d5},[r1,: 128] # qhasm: ptr = &z0_stack # asm 1: lea >ptr=int32#2,<z0_stack=stack128#7 # asm 2: lea >ptr=r1,<z0_stack=[sp,#96] add r1,sp,#96 # qhasm: mem128[ptr] aligned= z0 # asm 1: vst1.8 {<z0=reg128#4%bot-<z0=reg128#4%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<z0=d6-<z0=d7},[<ptr=r1,: 128] vst1.8 {d6-d7},[r1,: 128] # qhasm: ptr = &z12_stack # asm 1: lea >ptr=int32#2,<z12_stack=stack128#8 # asm 2: lea >ptr=r1,<z12_stack=[sp,#112] add r1,sp,#112 # qhasm: mem128[ptr] aligned= z12 # asm 1: vst1.8 {<z12=reg128#5%bot-<z12=reg128#5%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<z12=d8-<z12=d9},[<ptr=r1,: 128] vst1.8 {d8-d9},[r1,: 128] # qhasm: ptr = &z34_stack # asm 1: lea >ptr=int32#2,<z34_stack=stack128#9 # asm 2: lea >ptr=r1,<z34_stack=[sp,#128] add r1,sp,#128 # qhasm: mem128[ptr] aligned= z34 # asm 1: vst1.8 {<z34=reg128#6%bot-<z34=reg128#6%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<z34=d10-<z34=d11},[<ptr=r1,: 128] vst1.8 {d10-d11},[r1,: 128] # qhasm: ptr = &5y12_stack # asm 1: lea >ptr=int32#2,<5y12_stack=stack128#5 # asm 2: lea >ptr=r1,<5y12_stack=[sp,#64] add r1,sp,#64 # qhasm: mem128[ptr] aligned= 5y12 # asm 1: vst1.8 {<5y12=reg128#12%bot-<5y12=reg128#12%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<5y12=d22-<5y12=d23},[<ptr=r1,: 128] vst1.8 {d22-d23},[r1,: 128] # qhasm: ptr = &5y34_stack # asm 1: lea >ptr=int32#2,<5y34_stack=stack128#6 # asm 2: lea >ptr=r1,<5y34_stack=[sp,#80] add r1,sp,#80 # qhasm: mem128[ptr] aligned= 5y34 # asm 1: vst1.8 {<5y34=reg128#13%bot-<5y34=reg128#13%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<5y34=d24-<5y34=d25},[<ptr=r1,: 128] vst1.8 {d24-d25},[r1,: 128] # qhasm: ptr = &5z12_stack # asm 1: lea >ptr=int32#2,<5z12_stack=stack128#10 # asm 2: lea >ptr=r1,<5z12_stack=[sp,#144] add r1,sp,#144 # qhasm: mem128[ptr] aligned= 5z12 # asm 1: vst1.8 {<5z12=reg128#14%bot-<5z12=reg128#14%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<5z12=d26-<5z12=d27},[<ptr=r1,: 128] vst1.8 {d26-d27},[r1,: 128] # qhasm: ptr = &5z34_stack # asm 1: lea >ptr=int32#2,<5z34_stack=stack128#11 # asm 2: lea >ptr=r1,<5z34_stack=[sp,#160] add r1,sp,#160 # qhasm: mem128[ptr] aligned= 5z34 # asm 1: vst1.8 {<5z34=reg128#15%bot-<5z34=reg128#15%top},[<ptr=int32#2,: 128] # asm 2: vst1.8 {<5z34=d28-<5z34=d29},[<ptr=r1,: 128] vst1.8 {d28-d29},[r1,: 128] # qhasm: unsigned>? len - 64 # asm 1: cmp <len=int32#4,#64 # asm 2: cmp <len=r3,#64 cmp r3,#64 # qhasm: goto below64bytes if !unsigned> bls ._below64bytes # qhasm: input_2 += 32 # asm 1: add >input_2=int32#2,<input_2=int32#3,#32 # asm 2: add >input_2=r1,<input_2=r2,#32 add r1,r2,#32 # qhasm: mainloop2: ._mainloop2: # qhasm: c01 = mem128[input_2];input_2+=16 # asm 1: vld1.8 {>c01=reg128#1%bot->c01=reg128#1%top},[<input_2=int32#2]! # asm 2: vld1.8 {>c01=d0->c01=d1},[<input_2=r1]! vld1.8 {d0-d1},[r1]! # qhasm: c23 = mem128[input_2];input_2+=16 # asm 1: vld1.8 {>c23=reg128#2%bot->c23=reg128#2%top},[<input_2=int32#2]! # asm 2: vld1.8 {>c23=d2->c23=d3},[<input_2=r1]! vld1.8 {d2-d3},[r1]! # qhasm: r4[0,1] += x01[0] unsigned* z34[2]; r4[2,3] += x01[1] unsigned* z34[3] # asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%bot,<z34=reg128#6%top # asm 2: vmlal.u32 <r4=q15,<x01=d16,<z34=d11 vmlal.u32 q15,d16,d11 # qhasm: ptr = &z12_stack # asm 1: lea >ptr=int32#3,<z12_stack=stack128#8 # asm 2: lea >ptr=r2,<z12_stack=[sp,#112] add r2,sp,#112 # qhasm: z12 aligned= mem128[ptr] # asm 1: vld1.8 {>z12=reg128#3%bot->z12=reg128#3%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>z12=d4->z12=d5},[<ptr=r2,: 128] vld1.8 {d4-d5},[r2,: 128] # qhasm: r4[0,1] += x01[2] unsigned* z34[0]; r4[2,3] += x01[3] unsigned* z34[1] # asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%top,<z34=reg128#6%bot # asm 2: vmlal.u32 <r4=q15,<x01=d17,<z34=d10 vmlal.u32 q15,d17,d10 # qhasm: ptr = &z0_stack # asm 1: lea >ptr=int32#3,<z0_stack=stack128#7 # asm 2: lea >ptr=r2,<z0_stack=[sp,#96] add r2,sp,#96 # qhasm: z0 aligned= mem128[ptr] # asm 1: vld1.8 {>z0=reg128#4%bot->z0=reg128#4%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>z0=d6->z0=d7},[<ptr=r2,: 128] vld1.8 {d6-d7},[r2,: 128] # qhasm: r4[0,1] += x23[0] unsigned* z12[2]; r4[2,3] += x23[1] unsigned* z12[3] # asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%bot,<z12=reg128#3%top # asm 2: vmlal.u32 <r4=q15,<x23=d18,<z12=d5 vmlal.u32 q15,d18,d5 # qhasm: c01 c23 = c01[0]c01[1]c01[2]c23[2]c23[0]c23[1]c01[3]c23[3] # asm 1: vtrn.32 <c01=reg128#1%top,<c23=reg128#2%top # asm 2: vtrn.32 <c01=d1,<c23=d3 vtrn.32 d1,d3 # qhasm: r4[0,1] += x23[2] unsigned* z12[0]; r4[2,3] += x23[3] unsigned* z12[1] # asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%top,<z12=reg128#3%bot # asm 2: vmlal.u32 <r4=q15,<x23=d19,<z12=d4 vmlal.u32 q15,d19,d4 # qhasm: r4[0,1] += x4[0] unsigned* z0[0]; r4[2,3] += x4[1] unsigned* z0[1] # asm 1: vmlal.u32 <r4=reg128#16,<x4=reg128#11%bot,<z0=reg128#4%bot # asm 2: vmlal.u32 <r4=q15,<x4=d20,<z0=d6 vmlal.u32 q15,d20,d6 # qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18 # asm 1: vshll.u32 >r3=reg128#5,<c23=reg128#2%top,#18 # asm 2: vshll.u32 >r3=q4,<c23=d3,#18 vshll.u32 q4,d3,#18 # qhasm: c01 c23 = c01[0]c23[0]c01[2]c01[3]c01[1]c23[1]c23[2]c23[3] # asm 1: vtrn.32 <c01=reg128#1%bot,<c23=reg128#2%bot # asm 2: vtrn.32 <c01=d0,<c23=d2 vtrn.32 d0,d2 # qhasm: r3[0,1] += x01[0] unsigned* z34[0]; r3[2,3] += x01[1] unsigned* z34[1] # asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%bot,<z34=reg128#6%bot # asm 2: vmlal.u32 <r3=q4,<x01=d16,<z34=d10 vmlal.u32 q4,d16,d10 # qhasm: r3[0,1] += x01[2] unsigned* z12[2]; r3[2,3] += x01[3] unsigned* z12[3] # asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%top,<z12=reg128#3%top # asm 2: vmlal.u32 <r3=q4,<x01=d17,<z12=d5 vmlal.u32 q4,d17,d5 # qhasm: r0 = r0[1]c01[0]r0[2,3] # asm 1: vext.32 <r0=reg128#8%bot,<r0=reg128#8%bot,<c01=reg128#1%bot,#1 # asm 2: vext.32 <r0=d14,<r0=d14,<c01=d0,#1 vext.32 d14,d14,d0,#1 # qhasm: r3[0,1] += x23[0] unsigned* z12[0]; r3[2,3] += x23[1] unsigned* z12[1] # asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%bot,<z12=reg128#3%bot # asm 2: vmlal.u32 <r3=q4,<x23=d18,<z12=d4 vmlal.u32 q4,d18,d4 # qhasm: input_2 -= 64 # asm 1: sub >input_2=int32#2,<input_2=int32#2,#64 # asm 2: sub >input_2=r1,<input_2=r1,#64 sub r1,r1,#64 # qhasm: r3[0,1] += x23[2] unsigned* z0[0]; r3[2,3] += x23[3] unsigned* z0[1] # asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%top,<z0=reg128#4%bot # asm 2: vmlal.u32 <r3=q4,<x23=d19,<z0=d6 vmlal.u32 q4,d19,d6 # qhasm: ptr = &5z34_stack # asm 1: lea >ptr=int32#3,<5z34_stack=stack128#11 # asm 2: lea >ptr=r2,<5z34_stack=[sp,#160] add r2,sp,#160 # qhasm: 5z34 aligned= mem128[ptr] # asm 1: vld1.8 {>5z34=reg128#6%bot->5z34=reg128#6%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>5z34=d10->5z34=d11},[<ptr=r2,: 128] vld1.8 {d10-d11},[r2,: 128] # qhasm: r3[0,1] += x4[0] unsigned* 5z34[2]; r3[2,3] += x4[1] unsigned* 5z34[3] # asm 1: vmlal.u32 <r3=reg128#5,<x4=reg128#11%bot,<5z34=reg128#6%top # asm 2: vmlal.u32 <r3=q4,<x4=d20,<5z34=d11 vmlal.u32 q4,d20,d11 # qhasm: r0 = r0[1]r0[0]r0[3]r0[2] # asm 1: vrev64.i32 >r0=reg128#8,<r0=reg128#8 # asm 2: vrev64.i32 >r0=q7,<r0=q7 vrev64.i32 q7,q7 # qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12 # asm 1: vshll.u32 >r2=reg128#14,<c01=reg128#1%top,#12 # asm 2: vshll.u32 >r2=q13,<c01=d1,#12 vshll.u32 q13,d1,#12 # qhasm: d01 = mem128[input_2];input_2+=16 # asm 1: vld1.8 {>d01=reg128#12%bot->d01=reg128#12%top},[<input_2=int32#2]! # asm 2: vld1.8 {>d01=d22->d01=d23},[<input_2=r1]! vld1.8 {d22-d23},[r1]! # qhasm: r2[0,1] += x01[0] unsigned* z12[2]; r2[2,3] += x01[1] unsigned* z12[3] # asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%bot,<z12=reg128#3%top # asm 2: vmlal.u32 <r2=q13,<x01=d16,<z12=d5 vmlal.u32 q13,d16,d5 # qhasm: r2[0,1] += x01[2] unsigned* z12[0]; r2[2,3] += x01[3] unsigned* z12[1] # asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%top,<z12=reg128#3%bot # asm 2: vmlal.u32 <r2=q13,<x01=d17,<z12=d4 vmlal.u32 q13,d17,d4 # qhasm: r2[0,1] += x23[0] unsigned* z0[0]; r2[2,3] += x23[1] unsigned* z0[1] # asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%bot,<z0=reg128#4%bot # asm 2: vmlal.u32 <r2=q13,<x23=d18,<z0=d6 vmlal.u32 q13,d18,d6 # qhasm: r2[0,1] += x23[2] unsigned* 5z34[2]; r2[2,3] += x23[3] unsigned* 5z34[3] # asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%top,<5z34=reg128#6%top # asm 2: vmlal.u32 <r2=q13,<x23=d19,<5z34=d11 vmlal.u32 q13,d19,d11 # qhasm: r2[0,1] += x4[0] unsigned* 5z34[0]; r2[2,3] += x4[1] unsigned* 5z34[1] # asm 1: vmlal.u32 <r2=reg128#14,<x4=reg128#11%bot,<5z34=reg128#6%bot # asm 2: vmlal.u32 <r2=q13,<x4=d20,<5z34=d10 vmlal.u32 q13,d20,d10 # qhasm: r0 = r0[0,1]c01[1]r0[2] # asm 1: vext.32 <r0=reg128#8%top,<c01=reg128#1%bot,<r0=reg128#8%top,#1 # asm 2: vext.32 <r0=d15,<c01=d0,<r0=d15,#1 vext.32 d15,d0,d15,#1 # qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6 # asm 1: vshll.u32 >r1=reg128#15,<c23=reg128#2%bot,#6 # asm 2: vshll.u32 >r1=q14,<c23=d2,#6 vshll.u32 q14,d2,#6 # qhasm: r1[0,1] += x01[0] unsigned* z12[0]; r1[2,3] += x01[1] unsigned* z12[1] # asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%bot,<z12=reg128#3%bot # asm 2: vmlal.u32 <r1=q14,<x01=d16,<z12=d4 vmlal.u32 q14,d16,d4 # qhasm: r1[0,1] += x01[2] unsigned* z0[0]; r1[2,3] += x01[3] unsigned* z0[1] # asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%top,<z0=reg128#4%bot # asm 2: vmlal.u32 <r1=q14,<x01=d17,<z0=d6 vmlal.u32 q14,d17,d6 # qhasm: r1[0,1] += x23[0] unsigned* 5z34[2]; r1[2,3] += x23[1] unsigned* 5z34[3] # asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%bot,<5z34=reg128#6%top # asm 2: vmlal.u32 <r1=q14,<x23=d18,<5z34=d11 vmlal.u32 q14,d18,d11 # qhasm: r1[0,1] += x23[2] unsigned* 5z34[0]; r1[2,3] += x23[3] unsigned* 5z34[1] # asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%top,<5z34=reg128#6%bot # asm 2: vmlal.u32 <r1=q14,<x23=d19,<5z34=d10 vmlal.u32 q14,d19,d10 # qhasm: ptr = &5z12_stack # asm 1: lea >ptr=int32#3,<5z12_stack=stack128#10 # asm 2: lea >ptr=r2,<5z12_stack=[sp,#144] add r2,sp,#144 # qhasm: 5z12 aligned= mem128[ptr] # asm 1: vld1.8 {>5z12=reg128#1%bot->5z12=reg128#1%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>5z12=d0->5z12=d1},[<ptr=r2,: 128] vld1.8 {d0-d1},[r2,: 128] # qhasm: r1[0,1] += x4[0] unsigned* 5z12[2]; r1[2,3] += x4[1] unsigned* 5z12[3] # asm 1: vmlal.u32 <r1=reg128#15,<x4=reg128#11%bot,<5z12=reg128#1%top # asm 2: vmlal.u32 <r1=q14,<x4=d20,<5z12=d1 vmlal.u32 q14,d20,d1 # qhasm: d23 = mem128[input_2];input_2+=16 # asm 1: vld1.8 {>d23=reg128#2%bot->d23=reg128#2%top},[<input_2=int32#2]! # asm 2: vld1.8 {>d23=d2->d23=d3},[<input_2=r1]! vld1.8 {d2-d3},[r1]! # qhasm: input_2 += 32 # asm 1: add >input_2=int32#2,<input_2=int32#2,#32 # asm 2: add >input_2=r1,<input_2=r1,#32 add r1,r1,#32 # qhasm: r0[0,1] += x4[0] unsigned* 5z12[0]; r0[2,3] += x4[1] unsigned* 5z12[1] # asm 1: vmlal.u32 <r0=reg128#8,<x4=reg128#11%bot,<5z12=reg128#1%bot # asm 2: vmlal.u32 <r0=q7,<x4=d20,<5z12=d0 vmlal.u32 q7,d20,d0 # qhasm: r0[0,1] += x23[0] unsigned* 5z34[0]; r0[2,3] += x23[1] unsigned* 5z34[1] # asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%bot,<5z34=reg128#6%bot # asm 2: vmlal.u32 <r0=q7,<x23=d18,<5z34=d10 vmlal.u32 q7,d18,d10 # qhasm: d01 d23 = d01[0] d23[0] d01[1] d23[1] # asm 1: vswp <d23=reg128#2%bot,<d01=reg128#12%top # asm 2: vswp <d23=d2,<d01=d23 vswp d2,d23 # qhasm: r0[0,1] += x23[2] unsigned* 5z12[2]; r0[2,3] += x23[3] unsigned* 5z12[3] # asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%top,<5z12=reg128#1%top # asm 2: vmlal.u32 <r0=q7,<x23=d19,<5z12=d1 vmlal.u32 q7,d19,d1 # qhasm: r0[0,1] += x01[0] unsigned* z0[0]; r0[2,3] += x01[1] unsigned* z0[1] # asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%bot,<z0=reg128#4%bot # asm 2: vmlal.u32 <r0=q7,<x01=d16,<z0=d6 vmlal.u32 q7,d16,d6 # qhasm: new mid # qhasm: 2x v4 = d23 unsigned>> 40 # asm 1: vshr.u64 >v4=reg128#4,<d23=reg128#2,#40 # asm 2: vshr.u64 >v4=q3,<d23=q1,#40 vshr.u64 q3,q1,#40 # qhasm: mid = d01[1]d23[0] mid[2,3] # asm 1: vext.32 <mid=reg128#1%bot,<d01=reg128#12%bot,<d23=reg128#2%bot,#1 # asm 2: vext.32 <mid=d0,<d01=d22,<d23=d2,#1 vext.32 d0,d22,d2,#1 # qhasm: new v23 # qhasm: v23[2] = d23[0,1] unsigned>> 14; v23[3] = d23[2,3] unsigned>> 14 # asm 1: vshrn.u64 <v23=reg128#10%top,<d23=reg128#2,#14 # asm 2: vshrn.u64 <v23=d19,<d23=q1,#14 vshrn.u64 d19,q1,#14 # qhasm: mid = mid[0,1] d01[3]d23[2] # asm 1: vext.32 <mid=reg128#1%top,<d01=reg128#12%top,<d23=reg128#2%top,#1 # asm 2: vext.32 <mid=d1,<d01=d23,<d23=d3,#1 vext.32 d1,d23,d3,#1 # qhasm: new v01 # qhasm: v01[2] = d01[0,1] unsigned>> 26; v01[3] = d01[2,3] unsigned>> 26 # asm 1: vshrn.u64 <v01=reg128#11%top,<d01=reg128#12,#26 # asm 2: vshrn.u64 <v01=d21,<d01=q11,#26 vshrn.u64 d21,q11,#26 # qhasm: v01 = d01[1]d01[0] v01[2,3] # asm 1: vext.32 <v01=reg128#11%bot,<d01=reg128#12%bot,<d01=reg128#12%bot,#1 # asm 2: vext.32 <v01=d20,<d01=d22,<d01=d22,#1 vext.32 d20,d22,d22,#1 # qhasm: r0[0,1] += x01[2] unsigned* 5z34[2]; r0[2,3] += x01[3] unsigned* 5z34[3] # asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%top,<5z34=reg128#6%top # asm 2: vmlal.u32 <r0=q7,<x01=d17,<5z34=d11 vmlal.u32 q7,d17,d11 # qhasm: v01 = v01[1]d01[2] v01[2,3] # asm 1: vext.32 <v01=reg128#11%bot,<v01=reg128#11%bot,<d01=reg128#12%top,#1 # asm 2: vext.32 <v01=d20,<v01=d20,<d01=d23,#1 vext.32 d20,d20,d23,#1 # qhasm: v23[0] = mid[0,1] unsigned>> 20; v23[1] = mid[2,3] unsigned>> 20 # asm 1: vshrn.u64 <v23=reg128#10%bot,<mid=reg128#1,#20 # asm 2: vshrn.u64 <v23=d18,<mid=q0,#20 vshrn.u64 d18,q0,#20 # qhasm: v4 = v4[0]v4[2]v4[1]v4[3] # asm 1: vtrn.32 <v4=reg128#4%bot,<v4=reg128#4%top # asm 2: vtrn.32 <v4=d6,<v4=d7 vtrn.32 d6,d7 # qhasm: 4x v01 &= 0x03ffffff # asm 1: vand.i32 <v01=reg128#11,#0x03ffffff # asm 2: vand.i32 <v01=q10,#0x03ffffff vand.i32 q10,#0x03ffffff # qhasm: ptr = &y34_stack # asm 1: lea >ptr=int32#3,<y34_stack=stack128#4 # asm 2: lea >ptr=r2,<y34_stack=[sp,#48] add r2,sp,#48 # qhasm: y34 aligned= mem128[ptr] # asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>y34=d4->y34=d5},[<ptr=r2,: 128] vld1.8 {d4-d5},[r2,: 128] # qhasm: 4x v23 &= 0x03ffffff # asm 1: vand.i32 <v23=reg128#10,#0x03ffffff # asm 2: vand.i32 <v23=q9,#0x03ffffff vand.i32 q9,#0x03ffffff # qhasm: ptr = &y12_stack # asm 1: lea >ptr=int32#3,<y12_stack=stack128#3 # asm 2: lea >ptr=r2,<y12_stack=[sp,#32] add r2,sp,#32 # qhasm: y12 aligned= mem128[ptr] # asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>y12=d2->y12=d3},[<ptr=r2,: 128] vld1.8 {d2-d3},[r2,: 128] # qhasm: 4x v4 |= 0x01000000 # asm 1: vorr.i32 <v4=reg128#4,#0x01000000 # asm 2: vorr.i32 <v4=q3,#0x01000000 vorr.i32 q3,#0x01000000 # qhasm: ptr = &y0_stack # asm 1: lea >ptr=int32#3,<y0_stack=stack128#2 # asm 2: lea >ptr=r2,<y0_stack=[sp,#16] add r2,sp,#16 # qhasm: y0 aligned= mem128[ptr] # asm 1: vld1.8 {>y0=reg128#1%bot->y0=reg128#1%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>y0=d0->y0=d1},[<ptr=r2,: 128] vld1.8 {d0-d1},[r2,: 128] # qhasm: r4[0,1] += v01[0] unsigned* y34[2]; r4[2,3] += v01[1] unsigned* y34[3] # asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%bot,<y34=reg128#3%top # asm 2: vmlal.u32 <r4=q15,<v01=d20,<y34=d5 vmlal.u32 q15,d20,d5 # qhasm: r4[0,1] += v01[2] unsigned* y34[0]; r4[2,3] += v01[3] unsigned* y34[1] # asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%top,<y34=reg128#3%bot # asm 2: vmlal.u32 <r4=q15,<v01=d21,<y34=d4 vmlal.u32 q15,d21,d4 # qhasm: r4[0,1] += v23[0] unsigned* y12[2]; r4[2,3] += v23[1] unsigned* y12[3] # asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%bot,<y12=reg128#2%top # asm 2: vmlal.u32 <r4=q15,<v23=d18,<y12=d3 vmlal.u32 q15,d18,d3 # qhasm: r4[0,1] += v23[2] unsigned* y12[0]; r4[2,3] += v23[3] unsigned* y12[1] # asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%top,<y12=reg128#2%bot # asm 2: vmlal.u32 <r4=q15,<v23=d19,<y12=d2 vmlal.u32 q15,d19,d2 # qhasm: r4[0,1] += v4[0] unsigned* y0[0]; r4[2,3] += v4[1] unsigned* y0[1] # asm 1: vmlal.u32 <r4=reg128#16,<v4=reg128#4%bot,<y0=reg128#1%bot # asm 2: vmlal.u32 <r4=q15,<v4=d6,<y0=d0 vmlal.u32 q15,d6,d0 # qhasm: ptr = &5y34_stack # asm 1: lea >ptr=int32#3,<5y34_stack=stack128#6 # asm 2: lea >ptr=r2,<5y34_stack=[sp,#80] add r2,sp,#80 # qhasm: 5y34 aligned= mem128[ptr] # asm 1: vld1.8 {>5y34=reg128#13%bot->5y34=reg128#13%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>5y34=d24->5y34=d25},[<ptr=r2,: 128] vld1.8 {d24-d25},[r2,: 128] # qhasm: r3[0,1] += v01[0] unsigned* y34[0]; r3[2,3] += v01[1] unsigned* y34[1] # asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%bot,<y34=reg128#3%bot # asm 2: vmlal.u32 <r3=q4,<v01=d20,<y34=d4 vmlal.u32 q4,d20,d4 # qhasm: r3[0,1] += v01[2] unsigned* y12[2]; r3[2,3] += v01[3] unsigned* y12[3] # asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%top,<y12=reg128#2%top # asm 2: vmlal.u32 <r3=q4,<v01=d21,<y12=d3 vmlal.u32 q4,d21,d3 # qhasm: r3[0,1] += v23[0] unsigned* y12[0]; r3[2,3] += v23[1] unsigned* y12[1] # asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%bot,<y12=reg128#2%bot # asm 2: vmlal.u32 <r3=q4,<v23=d18,<y12=d2 vmlal.u32 q4,d18,d2 # qhasm: r3[0,1] += v23[2] unsigned* y0[0]; r3[2,3] += v23[3] unsigned* y0[1] # asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%top,<y0=reg128#1%bot # asm 2: vmlal.u32 <r3=q4,<v23=d19,<y0=d0 vmlal.u32 q4,d19,d0 # qhasm: r3[0,1] += v4[0] unsigned* 5y34[2]; r3[2,3] += v4[1] unsigned* 5y34[3] # asm 1: vmlal.u32 <r3=reg128#5,<v4=reg128#4%bot,<5y34=reg128#13%top # asm 2: vmlal.u32 <r3=q4,<v4=d6,<5y34=d25 vmlal.u32 q4,d6,d25 # qhasm: ptr = &5y12_stack # asm 1: lea >ptr=int32#3,<5y12_stack=stack128#5 # asm 2: lea >ptr=r2,<5y12_stack=[sp,#64] add r2,sp,#64 # qhasm: 5y12 aligned= mem128[ptr] # asm 1: vld1.8 {>5y12=reg128#12%bot->5y12=reg128#12%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>5y12=d22->5y12=d23},[<ptr=r2,: 128] vld1.8 {d22-d23},[r2,: 128] # qhasm: r0[0,1] += v4[0] unsigned* 5y12[0]; r0[2,3] += v4[1] unsigned* 5y12[1] # asm 1: vmlal.u32 <r0=reg128#8,<v4=reg128#4%bot,<5y12=reg128#12%bot # asm 2: vmlal.u32 <r0=q7,<v4=d6,<5y12=d22 vmlal.u32 q7,d6,d22 # qhasm: r0[0,1] += v23[0] unsigned* 5y34[0]; r0[2,3] += v23[1] unsigned* 5y34[1] # asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%bot,<5y34=reg128#13%bot # asm 2: vmlal.u32 <r0=q7,<v23=d18,<5y34=d24 vmlal.u32 q7,d18,d24 # qhasm: r0[0,1] += v23[2] unsigned* 5y12[2]; r0[2,3] += v23[3] unsigned* 5y12[3] # asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%top,<5y12=reg128#12%top # asm 2: vmlal.u32 <r0=q7,<v23=d19,<5y12=d23 vmlal.u32 q7,d19,d23 # qhasm: r0[0,1] += v01[0] unsigned* y0[0]; r0[2,3] += v01[1] unsigned* y0[1] # asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%bot,<y0=reg128#1%bot # asm 2: vmlal.u32 <r0=q7,<v01=d20,<y0=d0 vmlal.u32 q7,d20,d0 # qhasm: r0[0,1] += v01[2] unsigned* 5y34[2]; r0[2,3] += v01[3] unsigned* 5y34[3] # asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%top,<5y34=reg128#13%top # asm 2: vmlal.u32 <r0=q7,<v01=d21,<5y34=d25 vmlal.u32 q7,d21,d25 # qhasm: r1[0,1] += v01[0] unsigned* y12[0]; r1[2,3] += v01[1] unsigned* y12[1] # asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%bot,<y12=reg128#2%bot # asm 2: vmlal.u32 <r1=q14,<v01=d20,<y12=d2 vmlal.u32 q14,d20,d2 # qhasm: r1[0,1] += v01[2] unsigned* y0[0]; r1[2,3] += v01[3] unsigned* y0[1] # asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%top,<y0=reg128#1%bot # asm 2: vmlal.u32 <r1=q14,<v01=d21,<y0=d0 vmlal.u32 q14,d21,d0 # qhasm: r1[0,1] += v23[0] unsigned* 5y34[2]; r1[2,3] += v23[1] unsigned* 5y34[3] # asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%bot,<5y34=reg128#13%top # asm 2: vmlal.u32 <r1=q14,<v23=d18,<5y34=d25 vmlal.u32 q14,d18,d25 # qhasm: r1[0,1] += v23[2] unsigned* 5y34[0]; r1[2,3] += v23[3] unsigned* 5y34[1] # asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%top,<5y34=reg128#13%bot # asm 2: vmlal.u32 <r1=q14,<v23=d19,<5y34=d24 vmlal.u32 q14,d19,d24 # qhasm: r1[0,1] += v4[0] unsigned* 5y12[2]; r1[2,3] += v4[1] unsigned* 5y12[3] # asm 1: vmlal.u32 <r1=reg128#15,<v4=reg128#4%bot,<5y12=reg128#12%top # asm 2: vmlal.u32 <r1=q14,<v4=d6,<5y12=d23 vmlal.u32 q14,d6,d23 # qhasm: r2[0,1] += v01[0] unsigned* y12[2]; r2[2,3] += v01[1] unsigned* y12[3] # asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%bot,<y12=reg128#2%top # asm 2: vmlal.u32 <r2=q13,<v01=d20,<y12=d3 vmlal.u32 q13,d20,d3 # qhasm: r2[0,1] += v01[2] unsigned* y12[0]; r2[2,3] += v01[3] unsigned* y12[1] # asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%top,<y12=reg128#2%bot # asm 2: vmlal.u32 <r2=q13,<v01=d21,<y12=d2 vmlal.u32 q13,d21,d2 # qhasm: r2[0,1] += v23[0] unsigned* y0[0]; r2[2,3] += v23[1] unsigned* y0[1] # asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%bot,<y0=reg128#1%bot # asm 2: vmlal.u32 <r2=q13,<v23=d18,<y0=d0 vmlal.u32 q13,d18,d0 # qhasm: r2[0,1] += v23[2] unsigned* 5y34[2]; r2[2,3] += v23[3] unsigned* 5y34[3] # asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%top,<5y34=reg128#13%top # asm 2: vmlal.u32 <r2=q13,<v23=d19,<5y34=d25 vmlal.u32 q13,d19,d25 # qhasm: r2[0,1] += v4[0] unsigned* 5y34[0]; r2[2,3] += v4[1] unsigned* 5y34[1] # asm 1: vmlal.u32 <r2=reg128#14,<v4=reg128#4%bot,<5y34=reg128#13%bot # asm 2: vmlal.u32 <r2=q13,<v4=d6,<5y34=d24 vmlal.u32 q13,d6,d24 # qhasm: ptr = &two24 # asm 1: lea >ptr=int32#3,<two24=stack128#1 # asm 2: lea >ptr=r2,<two24=[sp,#0] add r2,sp,#0 # qhasm: 2x t1 = r0 unsigned>> 26 # asm 1: vshr.u64 >t1=reg128#4,<r0=reg128#8,#26 # asm 2: vshr.u64 >t1=q3,<r0=q7,#26 vshr.u64 q3,q7,#26 # qhasm: len -= 64 # asm 1: sub >len=int32#4,<len=int32#4,#64 # asm 2: sub >len=r3,<len=r3,#64 sub r3,r3,#64 # qhasm: r0 &= mask # asm 1: vand >r0=reg128#6,<r0=reg128#8,<mask=reg128#7 # asm 2: vand >r0=q5,<r0=q7,<mask=q6 vand q5,q7,q6 # qhasm: 2x r1 += t1 # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#15,<t1=reg128#4 # asm 2: vadd.i64 >r1=q3,<r1=q14,<t1=q3 vadd.i64 q3,q14,q3 # qhasm: 2x t4 = r3 unsigned>> 26 # asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#5,#26 # asm 2: vshr.u64 >t4=q7,<r3=q4,#26 vshr.u64 q7,q4,#26 # qhasm: r3 &= mask # asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7 # asm 2: vand >r3=q4,<r3=q4,<mask=q6 vand q4,q4,q6 # qhasm: 2x x4 = r4 + t4 # asm 1: vadd.i64 >x4=reg128#8,<r4=reg128#16,<t4=reg128#8 # asm 2: vadd.i64 >x4=q7,<r4=q15,<t4=q7 vadd.i64 q7,q15,q7 # qhasm: r4 aligned= mem128[ptr] # asm 1: vld1.8 {>r4=reg128#16%bot->r4=reg128#16%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>r4=d30->r4=d31},[<ptr=r2,: 128] vld1.8 {d30-d31},[r2,: 128] # qhasm: 2x t2 = r1 unsigned>> 26 # asm 1: vshr.u64 >t2=reg128#9,<r1=reg128#4,#26 # asm 2: vshr.u64 >t2=q8,<r1=q3,#26 vshr.u64 q8,q3,#26 # qhasm: r1 &= mask # asm 1: vand >r1=reg128#4,<r1=reg128#4,<mask=reg128#7 # asm 2: vand >r1=q3,<r1=q3,<mask=q6 vand q3,q3,q6 # qhasm: 2x t0 = x4 unsigned>> 26 # asm 1: vshr.u64 >t0=reg128#10,<x4=reg128#8,#26 # asm 2: vshr.u64 >t0=q9,<x4=q7,#26 vshr.u64 q9,q7,#26 # qhasm: 2x r2 += t2 # asm 1: vadd.i64 >r2=reg128#9,<r2=reg128#14,<t2=reg128#9 # asm 2: vadd.i64 >r2=q8,<r2=q13,<t2=q8 vadd.i64 q8,q13,q8 # qhasm: x4 &= mask # asm 1: vand >x4=reg128#11,<x4=reg128#8,<mask=reg128#7 # asm 2: vand >x4=q10,<x4=q7,<mask=q6 vand q10,q7,q6 # qhasm: 2x x01 = r0 + t0 # asm 1: vadd.i64 >x01=reg128#6,<r0=reg128#6,<t0=reg128#10 # asm 2: vadd.i64 >x01=q5,<r0=q5,<t0=q9 vadd.i64 q5,q5,q9 # qhasm: r0 aligned= mem128[ptr] # asm 1: vld1.8 {>r0=reg128#8%bot->r0=reg128#8%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>r0=d14->r0=d15},[<ptr=r2,: 128] vld1.8 {d14-d15},[r2,: 128] # qhasm: ptr = &z34_stack # asm 1: lea >ptr=int32#3,<z34_stack=stack128#9 # asm 2: lea >ptr=r2,<z34_stack=[sp,#128] add r2,sp,#128 # qhasm: 2x t0 <<= 2 # asm 1: vshl.i64 >t0=reg128#10,<t0=reg128#10,#2 # asm 2: vshl.i64 >t0=q9,<t0=q9,#2 vshl.i64 q9,q9,#2 # qhasm: 2x t3 = r2 unsigned>> 26 # asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#9,#26 # asm 2: vshr.u64 >t3=q13,<r2=q8,#26 vshr.u64 q13,q8,#26 # qhasm: 2x x01 += t0 # asm 1: vadd.i64 >x01=reg128#15,<x01=reg128#6,<t0=reg128#10 # asm 2: vadd.i64 >x01=q14,<x01=q5,<t0=q9 vadd.i64 q14,q5,q9 # qhasm: z34 aligned= mem128[ptr] # asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<ptr=int32#3,: 128] # asm 2: vld1.8 {>z34=d10->z34=d11},[<ptr=r2,: 128] vld1.8 {d10-d11},[r2,: 128] # qhasm: x23 = r2 & mask # asm 1: vand >x23=reg128#10,<r2=reg128#9,<mask=reg128#7 # asm 2: vand >x23=q9,<r2=q8,<mask=q6 vand q9,q8,q6 # qhasm: 2x r3 += t3 # asm 1: vadd.i64 >r3=reg128#5,<r3=reg128#5,<t3=reg128#14 # asm 2: vadd.i64 >r3=q4,<r3=q4,<t3=q13 vadd.i64 q4,q4,q13 # qhasm: input_2 += 32 # asm 1: add >input_2=int32#2,<input_2=int32#2,#32 # asm 2: add >input_2=r1,<input_2=r1,#32 add r1,r1,#32 # qhasm: 2x t1 = x01 unsigned>> 26 # asm 1: vshr.u64 >t1=reg128#14,<x01=reg128#15,#26 # asm 2: vshr.u64 >t1=q13,<x01=q14,#26 vshr.u64 q13,q14,#26 # qhasm: x23 = x23[0,2,1,3] # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top # asm 2: vtrn.32 <x23=d18,<x23=d19 vtrn.32 d18,d19 # qhasm: x01 = x01 & mask # asm 1: vand >x01=reg128#9,<x01=reg128#15,<mask=reg128#7 # asm 2: vand >x01=q8,<x01=q14,<mask=q6 vand q8,q14,q6 # qhasm: 2x r1 += t1 # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#4,<t1=reg128#14 # asm 2: vadd.i64 >r1=q3,<r1=q3,<t1=q13 vadd.i64 q3,q3,q13 # qhasm: 2x t4 = r3 unsigned>> 26 # asm 1: vshr.u64 >t4=reg128#14,<r3=reg128#5,#26 # asm 2: vshr.u64 >t4=q13,<r3=q4,#26 vshr.u64 q13,q4,#26 # qhasm: x01 = x01[0,2,1,3] # asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top # asm 2: vtrn.32 <x01=d16,<x01=d17 vtrn.32 d16,d17 # qhasm: r3 &= mask # asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7 # asm 2: vand >r3=q4,<r3=q4,<mask=q6 vand q4,q4,q6 # qhasm: r1 = r1[0,2,1,3] # asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top # asm 2: vtrn.32 <r1=d6,<r1=d7 vtrn.32 d6,d7 # qhasm: 2x x4 += t4 # asm 1: vadd.i64 >x4=reg128#11,<x4=reg128#11,<t4=reg128#14 # asm 2: vadd.i64 >x4=q10,<x4=q10,<t4=q13 vadd.i64 q10,q10,q13 # qhasm: r3 = r3[0,2,1,3] # asm 1: vtrn.32 <r3=reg128#5%bot,<r3=reg128#5%top # asm 2: vtrn.32 <r3=d8,<r3=d9 vtrn.32 d8,d9 # qhasm: x01 = x01[0,1] r1[0,1] # asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0 # asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0 vext.32 d17,d6,d6,#0 # qhasm: x23 = x23[0,1] r3[0,1] # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#5%bot,<r3=reg128#5%bot,#0 # asm 2: vext.32 <x23=d19,<r3=d8,<r3=d8,#0 vext.32 d19,d8,d8,#0 # qhasm: x4 = x4[0,2,1,3] # asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top # asm 2: vtrn.32 <x4=d20,<x4=d21 vtrn.32 d20,d21 # qhasm: unsigned>? len - 64 # asm 1: cmp <len=int32#4,#64 # asm 2: cmp <len=r3,#64 cmp r3,#64 # qhasm: goto mainloop2 if unsigned> bhi ._mainloop2 # qhasm: input_2 -= 32 # asm 1: sub >input_2=int32#3,<input_2=int32#2,#32 # asm 2: sub >input_2=r2,<input_2=r1,#32 sub r2,r1,#32 # qhasm: below64bytes: ._below64bytes: # qhasm: unsigned>? len - 32 # asm 1: cmp <len=int32#4,#32 # asm 2: cmp <len=r3,#32 cmp r3,#32 # qhasm: goto end if !unsigned> bls ._end # qhasm: mainloop: ._mainloop: # qhasm: new r0 # qhasm: ptr = &two24 # asm 1: lea >ptr=int32#2,<two24=stack128#1 # asm 2: lea >ptr=r1,<two24=[sp,#0] add r1,sp,#0 # qhasm: r4 aligned= mem128[ptr] # asm 1: vld1.8 {>r4=reg128#5%bot->r4=reg128#5%top},[<ptr=int32#2,: 128] # asm 2: vld1.8 {>r4=d8->r4=d9},[<ptr=r1,: 128] vld1.8 {d8-d9},[r1,: 128] # qhasm: u4 aligned= mem128[ptr] # asm 1: vld1.8 {>u4=reg128#6%bot->u4=reg128#6%top},[<ptr=int32#2,: 128] # asm 2: vld1.8 {>u4=d10->u4=d11},[<ptr=r1,: 128] vld1.8 {d10-d11},[r1,: 128] # qhasm: c01 = mem128[input_2];input_2+=16 # asm 1: vld1.8 {>c01=reg128#8%bot->c01=reg128#8%top},[<input_2=int32#3]! # asm 2: vld1.8 {>c01=d14->c01=d15},[<input_2=r2]! vld1.8 {d14-d15},[r2]! # qhasm: r4[0,1] += x01[0] unsigned* y34[2]; r4[2,3] += x01[1] unsigned* y34[3] # asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%bot,<y34=reg128#3%top # asm 2: vmlal.u32 <r4=q4,<x01=d16,<y34=d5 vmlal.u32 q4,d16,d5 # qhasm: c23 = mem128[input_2];input_2+=16 # asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_2=int32#3]! # asm 2: vld1.8 {>c23=d26->c23=d27},[<input_2=r2]! vld1.8 {d26-d27},[r2]! # qhasm: r4[0,1] += x01[2] unsigned* y34[0]; r4[2,3] += x01[3] unsigned* y34[1] # asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%top,<y34=reg128#3%bot # asm 2: vmlal.u32 <r4=q4,<x01=d17,<y34=d4 vmlal.u32 q4,d17,d4 # qhasm: r0 = u4[1]c01[0]r0[2,3] # asm 1: vext.32 <r0=reg128#4%bot,<u4=reg128#6%bot,<c01=reg128#8%bot,#1 # asm 2: vext.32 <r0=d6,<u4=d10,<c01=d14,#1 vext.32 d6,d10,d14,#1 # qhasm: r4[0,1] += x23[0] unsigned* y12[2]; r4[2,3] += x23[1] unsigned* y12[3] # asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%bot,<y12=reg128#2%top # asm 2: vmlal.u32 <r4=q4,<x23=d18,<y12=d3 vmlal.u32 q4,d18,d3 # qhasm: r0 = r0[0,1]u4[1]c23[0] # asm 1: vext.32 <r0=reg128#4%top,<u4=reg128#6%bot,<c23=reg128#14%bot,#1 # asm 2: vext.32 <r0=d7,<u4=d10,<c23=d26,#1 vext.32 d7,d10,d26,#1 # qhasm: r4[0,1] += x23[2] unsigned* y12[0]; r4[2,3] += x23[3] unsigned* y12[1] # asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%top,<y12=reg128#2%bot # asm 2: vmlal.u32 <r4=q4,<x23=d19,<y12=d2 vmlal.u32 q4,d19,d2 # qhasm: r0 = r0[1]r0[0]r0[3]r0[2] # asm 1: vrev64.i32 >r0=reg128#4,<r0=reg128#4 # asm 2: vrev64.i32 >r0=q3,<r0=q3 vrev64.i32 q3,q3 # qhasm: r4[0,1] += x4[0] unsigned* y0[0]; r4[2,3] += x4[1] unsigned* y0[1] # asm 1: vmlal.u32 <r4=reg128#5,<x4=reg128#11%bot,<y0=reg128#1%bot # asm 2: vmlal.u32 <r4=q4,<x4=d20,<y0=d0 vmlal.u32 q4,d20,d0 # qhasm: r0[0,1] += x4[0] unsigned* 5y12[0]; r0[2,3] += x4[1] unsigned* 5y12[1] # asm 1: vmlal.u32 <r0=reg128#4,<x4=reg128#11%bot,<5y12=reg128#12%bot # asm 2: vmlal.u32 <r0=q3,<x4=d20,<5y12=d22 vmlal.u32 q3,d20,d22 # qhasm: r0[0,1] += x23[0] unsigned* 5y34[0]; r0[2,3] += x23[1] unsigned* 5y34[1] # asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%bot,<5y34=reg128#13%bot # asm 2: vmlal.u32 <r0=q3,<x23=d18,<5y34=d24 vmlal.u32 q3,d18,d24 # qhasm: r0[0,1] += x23[2] unsigned* 5y12[2]; r0[2,3] += x23[3] unsigned* 5y12[3] # asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%top,<5y12=reg128#12%top # asm 2: vmlal.u32 <r0=q3,<x23=d19,<5y12=d23 vmlal.u32 q3,d19,d23 # qhasm: c01 c23 = c01[0]c23[0]c01[2]c23[2]c01[1]c23[1]c01[3]c23[3] # asm 1: vtrn.32 <c01=reg128#8,<c23=reg128#14 # asm 2: vtrn.32 <c01=q7,<c23=q13 vtrn.32 q7,q13 # qhasm: r0[0,1] += x01[0] unsigned* y0[0]; r0[2,3] += x01[1] unsigned* y0[1] # asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%bot,<y0=reg128#1%bot # asm 2: vmlal.u32 <r0=q3,<x01=d16,<y0=d0 vmlal.u32 q3,d16,d0 # qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18 # asm 1: vshll.u32 >r3=reg128#6,<c23=reg128#14%top,#18 # asm 2: vshll.u32 >r3=q5,<c23=d27,#18 vshll.u32 q5,d27,#18 # qhasm: r0[0,1] += x01[2] unsigned* 5y34[2]; r0[2,3] += x01[3] unsigned* 5y34[3] # asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%top,<5y34=reg128#13%top # asm 2: vmlal.u32 <r0=q3,<x01=d17,<5y34=d25 vmlal.u32 q3,d17,d25 # qhasm: r3[0,1] += x01[0] unsigned* y34[0]; r3[2,3] += x01[1] unsigned* y34[1] # asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%bot,<y34=reg128#3%bot # asm 2: vmlal.u32 <r3=q5,<x01=d16,<y34=d4 vmlal.u32 q5,d16,d4 # qhasm: r3[0,1] += x01[2] unsigned* y12[2]; r3[2,3] += x01[3] unsigned* y12[3] # asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%top,<y12=reg128#2%top # asm 2: vmlal.u32 <r3=q5,<x01=d17,<y12=d3 vmlal.u32 q5,d17,d3 # qhasm: r3[0,1] += x23[0] unsigned* y12[0]; r3[2,3] += x23[1] unsigned* y12[1] # asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%bot,<y12=reg128#2%bot # asm 2: vmlal.u32 <r3=q5,<x23=d18,<y12=d2 vmlal.u32 q5,d18,d2 # qhasm: r3[0,1] += x23[2] unsigned* y0[0]; r3[2,3] += x23[3] unsigned* y0[1] # asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%top,<y0=reg128#1%bot # asm 2: vmlal.u32 <r3=q5,<x23=d19,<y0=d0 vmlal.u32 q5,d19,d0 # qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6 # asm 1: vshll.u32 >r1=reg128#14,<c23=reg128#14%bot,#6 # asm 2: vshll.u32 >r1=q13,<c23=d26,#6 vshll.u32 q13,d26,#6 # qhasm: r3[0,1] += x4[0] unsigned* 5y34[2]; r3[2,3] += x4[1] unsigned* 5y34[3] # asm 1: vmlal.u32 <r3=reg128#6,<x4=reg128#11%bot,<5y34=reg128#13%top # asm 2: vmlal.u32 <r3=q5,<x4=d20,<5y34=d25 vmlal.u32 q5,d20,d25 # qhasm: r1[0,1] += x01[0] unsigned* y12[0]; r1[2,3] += x01[1] unsigned* y12[1] # asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%bot,<y12=reg128#2%bot # asm 2: vmlal.u32 <r1=q13,<x01=d16,<y12=d2 vmlal.u32 q13,d16,d2 # qhasm: r1[0,1] += x01[2] unsigned* y0[0]; r1[2,3] += x01[3] unsigned* y0[1] # asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%top,<y0=reg128#1%bot # asm 2: vmlal.u32 <r1=q13,<x01=d17,<y0=d0 vmlal.u32 q13,d17,d0 # qhasm: r1[0,1] += x23[0] unsigned* 5y34[2]; r1[2,3] += x23[1] unsigned* 5y34[3] # asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%bot,<5y34=reg128#13%top # asm 2: vmlal.u32 <r1=q13,<x23=d18,<5y34=d25 vmlal.u32 q13,d18,d25 # qhasm: r1[0,1] += x23[2] unsigned* 5y34[0]; r1[2,3] += x23[3] unsigned* 5y34[1] # asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%top,<5y34=reg128#13%bot # asm 2: vmlal.u32 <r1=q13,<x23=d19,<5y34=d24 vmlal.u32 q13,d19,d24 # qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12 # asm 1: vshll.u32 >r2=reg128#8,<c01=reg128#8%top,#12 # asm 2: vshll.u32 >r2=q7,<c01=d15,#12 vshll.u32 q7,d15,#12 # qhasm: r1[0,1] += x4[0] unsigned* 5y12[2]; r1[2,3] += x4[1] unsigned* 5y12[3] # asm 1: vmlal.u32 <r1=reg128#14,<x4=reg128#11%bot,<5y12=reg128#12%top # asm 2: vmlal.u32 <r1=q13,<x4=d20,<5y12=d23 vmlal.u32 q13,d20,d23 # qhasm: r2[0,1] += x01[0] unsigned* y12[2]; r2[2,3] += x01[1] unsigned* y12[3] # asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%bot,<y12=reg128#2%top # asm 2: vmlal.u32 <r2=q7,<x01=d16,<y12=d3 vmlal.u32 q7,d16,d3 # qhasm: r2[0,1] += x01[2] unsigned* y12[0]; r2[2,3] += x01[3] unsigned* y12[1] # asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%top,<y12=reg128#2%bot # asm 2: vmlal.u32 <r2=q7,<x01=d17,<y12=d2 vmlal.u32 q7,d17,d2 # qhasm: r2[0,1] += x23[0] unsigned* y0[0]; r2[2,3] += x23[1] unsigned* y0[1] # asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%bot,<y0=reg128#1%bot # asm 2: vmlal.u32 <r2=q7,<x23=d18,<y0=d0 vmlal.u32 q7,d18,d0 # qhasm: r2[0,1] += x23[2] unsigned* 5y34[2]; r2[2,3] += x23[3] unsigned* 5y34[3] # asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%top,<5y34=reg128#13%top # asm 2: vmlal.u32 <r2=q7,<x23=d19,<5y34=d25 vmlal.u32 q7,d19,d25 # qhasm: r2[0,1] += x4[0] unsigned* 5y34[0]; r2[2,3] += x4[1] unsigned* 5y34[1] # asm 1: vmlal.u32 <r2=reg128#8,<x4=reg128#11%bot,<5y34=reg128#13%bot # asm 2: vmlal.u32 <r2=q7,<x4=d20,<5y34=d24 vmlal.u32 q7,d20,d24 # qhasm: 2x t1 = r0 unsigned>> 26 # asm 1: vshr.u64 >t1=reg128#9,<r0=reg128#4,#26 # asm 2: vshr.u64 >t1=q8,<r0=q3,#26 vshr.u64 q8,q3,#26 # qhasm: r0 &= mask # asm 1: vand >r0=reg128#4,<r0=reg128#4,<mask=reg128#7 # asm 2: vand >r0=q3,<r0=q3,<mask=q6 vand q3,q3,q6 # qhasm: 2x r1 += t1 # asm 1: vadd.i64 >r1=reg128#9,<r1=reg128#14,<t1=reg128#9 # asm 2: vadd.i64 >r1=q8,<r1=q13,<t1=q8 vadd.i64 q8,q13,q8 # qhasm: 2x t4 = r3 unsigned>> 26 # asm 1: vshr.u64 >t4=reg128#10,<r3=reg128#6,#26 # asm 2: vshr.u64 >t4=q9,<r3=q5,#26 vshr.u64 q9,q5,#26 # qhasm: r3 &= mask # asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7 # asm 2: vand >r3=q5,<r3=q5,<mask=q6 vand q5,q5,q6 # qhasm: 2x r4 += t4 # asm 1: vadd.i64 >r4=reg128#5,<r4=reg128#5,<t4=reg128#10 # asm 2: vadd.i64 >r4=q4,<r4=q4,<t4=q9 vadd.i64 q4,q4,q9 # qhasm: 2x t2 = r1 unsigned>> 26 # asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#9,#26 # asm 2: vshr.u64 >t2=q9,<r1=q8,#26 vshr.u64 q9,q8,#26 # qhasm: r1 &= mask # asm 1: vand >r1=reg128#11,<r1=reg128#9,<mask=reg128#7 # asm 2: vand >r1=q10,<r1=q8,<mask=q6 vand q10,q8,q6 # qhasm: 2x t0 = r4 unsigned>> 26 # asm 1: vshr.u64 >t0=reg128#9,<r4=reg128#5,#26 # asm 2: vshr.u64 >t0=q8,<r4=q4,#26 vshr.u64 q8,q4,#26 # qhasm: 2x r2 += t2 # asm 1: vadd.i64 >r2=reg128#8,<r2=reg128#8,<t2=reg128#10 # asm 2: vadd.i64 >r2=q7,<r2=q7,<t2=q9 vadd.i64 q7,q7,q9 # qhasm: r4 &= mask # asm 1: vand >r4=reg128#5,<r4=reg128#5,<mask=reg128#7 # asm 2: vand >r4=q4,<r4=q4,<mask=q6 vand q4,q4,q6 # qhasm: 2x r0 += t0 # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9 # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8 vadd.i64 q3,q3,q8 # qhasm: 2x t0 <<= 2 # asm 1: vshl.i64 >t0=reg128#9,<t0=reg128#9,#2 # asm 2: vshl.i64 >t0=q8,<t0=q8,#2 vshl.i64 q8,q8,#2 # qhasm: 2x t3 = r2 unsigned>> 26 # asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#8,#26 # asm 2: vshr.u64 >t3=q13,<r2=q7,#26 vshr.u64 q13,q7,#26 # qhasm: 2x r0 += t0 # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9 # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8 vadd.i64 q3,q3,q8 # qhasm: x23 = r2 & mask # asm 1: vand >x23=reg128#10,<r2=reg128#8,<mask=reg128#7 # asm 2: vand >x23=q9,<r2=q7,<mask=q6 vand q9,q7,q6 # qhasm: 2x r3 += t3 # asm 1: vadd.i64 >r3=reg128#6,<r3=reg128#6,<t3=reg128#14 # asm 2: vadd.i64 >r3=q5,<r3=q5,<t3=q13 vadd.i64 q5,q5,q13 # qhasm: 2x t1 = r0 unsigned>> 26 # asm 1: vshr.u64 >t1=reg128#8,<r0=reg128#4,#26 # asm 2: vshr.u64 >t1=q7,<r0=q3,#26 vshr.u64 q7,q3,#26 # qhasm: x01 = r0 & mask # asm 1: vand >x01=reg128#9,<r0=reg128#4,<mask=reg128#7 # asm 2: vand >x01=q8,<r0=q3,<mask=q6 vand q8,q3,q6 # qhasm: 2x r1 += t1 # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#11,<t1=reg128#8 # asm 2: vadd.i64 >r1=q3,<r1=q10,<t1=q7 vadd.i64 q3,q10,q7 # qhasm: 2x t4 = r3 unsigned>> 26 # asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#6,#26 # asm 2: vshr.u64 >t4=q7,<r3=q5,#26 vshr.u64 q7,q5,#26 # qhasm: r3 &= mask # asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7 # asm 2: vand >r3=q5,<r3=q5,<mask=q6 vand q5,q5,q6 # qhasm: 2x x4 = r4 + t4 # asm 1: vadd.i64 >x4=reg128#11,<r4=reg128#5,<t4=reg128#8 # asm 2: vadd.i64 >x4=q10,<r4=q4,<t4=q7 vadd.i64 q10,q4,q7 # qhasm: len -= 32 # asm 1: sub >len=int32#4,<len=int32#4,#32 # asm 2: sub >len=r3,<len=r3,#32 sub r3,r3,#32 # qhasm: x01 = x01[0,2,1,3] # asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top # asm 2: vtrn.32 <x01=d16,<x01=d17 vtrn.32 d16,d17 # qhasm: x23 = x23[0,2,1,3] # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top # asm 2: vtrn.32 <x23=d18,<x23=d19 vtrn.32 d18,d19 # qhasm: r1 = r1[0,2,1,3] # asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top # asm 2: vtrn.32 <r1=d6,<r1=d7 vtrn.32 d6,d7 # qhasm: r3 = r3[0,2,1,3] # asm 1: vtrn.32 <r3=reg128#6%bot,<r3=reg128#6%top # asm 2: vtrn.32 <r3=d10,<r3=d11 vtrn.32 d10,d11 # qhasm: x4 = x4[0,2,1,3] # asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top # asm 2: vtrn.32 <x4=d20,<x4=d21 vtrn.32 d20,d21 # qhasm: x01 = x01[0,1] r1[0,1] # asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0 # asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0 vext.32 d17,d6,d6,#0 # qhasm: x23 = x23[0,1] r3[0,1] # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#6%bot,<r3=reg128#6%bot,#0 # asm 2: vext.32 <x23=d19,<r3=d10,<r3=d10,#0 vext.32 d19,d10,d10,#0 # qhasm: unsigned>? len - 32 # asm 1: cmp <len=int32#4,#32 # asm 2: cmp <len=r3,#32 cmp r3,#32 # qhasm: goto mainloop if unsigned> bhi ._mainloop # qhasm: end: ._end: # qhasm: mem128[input_0] = x01;input_0+=16 # asm 1: vst1.8 {<x01=reg128#9%bot-<x01=reg128#9%top},[<input_0=int32#1]! # asm 2: vst1.8 {<x01=d16-<x01=d17},[<input_0=r0]! vst1.8 {d16-d17},[r0]! # qhasm: mem128[input_0] = x23;input_0+=16 # asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1]! # asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0]! vst1.8 {d18-d19},[r0]! # qhasm: mem64[input_0] = x4[0] # asm 1: vst1.8 <x4=reg128#11%bot,[<input_0=int32#1] # asm 2: vst1.8 <x4=d20,[<input_0=r0] vst1.8 d20,[r0] # qhasm: len = len # asm 1: mov >len=int32#1,<len=int32#4 # asm 2: mov >len=r0,<len=r3 mov r0,r3 # qhasm: qpopreturn len mov sp,r12 vpop {q4,q5,q6,q7} bx lr # qhasm: int32 input_0 # qhasm: int32 input_1 # qhasm: int32 input_2 # qhasm: int32 input_3 # qhasm: stack32 input_4 # qhasm: stack32 input_5 # qhasm: stack32 input_6 # qhasm: stack32 input_7 # qhasm: int32 caller_r4 # qhasm: int32 caller_r5 # qhasm: int32 caller_r6 # qhasm: int32 caller_r7 # qhasm: int32 caller_r8 # qhasm: int32 caller_r9 # qhasm: int32 caller_r10 # qhasm: int32 caller_r11 # qhasm: int32 caller_r12 # qhasm: int32 caller_r14 # qhasm: reg128 caller_q4 # qhasm: reg128 caller_q5 # qhasm: reg128 caller_q6 # qhasm: reg128 caller_q7 # qhasm: reg128 r0 # qhasm: reg128 r1 # qhasm: reg128 r2 # qhasm: reg128 r3 # qhasm: reg128 r4 # qhasm: reg128 x01 # qhasm: reg128 x23 # qhasm: reg128 x4 # qhasm: reg128 y01 # qhasm: reg128 y23 # qhasm: reg128 y4 # qhasm: reg128 _5y01 # qhasm: reg128 _5y23 # qhasm: reg128 _5y4 # qhasm: reg128 c01 # qhasm: reg128 c23 # qhasm: reg128 c4 # qhasm: reg128 t0 # qhasm: reg128 t1 # qhasm: reg128 t2 # qhasm: reg128 t3 # qhasm: reg128 t4 # qhasm: reg128 mask # qhasm: enter crypto_onetimeauth_poly1305_neon2_addmulmod .align 2 .global openssl_poly1305_neon2_addmulmod .hidden openssl_poly1305_neon2_addmulmod .type openssl_poly1305_neon2_addmulmod STT_FUNC openssl_poly1305_neon2_addmulmod: sub sp,sp,#0 # qhasm: 2x mask = 0xffffffff # asm 1: vmov.i64 >mask=reg128#1,#0xffffffff # asm 2: vmov.i64 >mask=q0,#0xffffffff vmov.i64 q0,#0xffffffff # qhasm: y01 aligned= mem128[input_2];input_2+=16 # asm 1: vld1.8 {>y01=reg128#2%bot->y01=reg128#2%top},[<input_2=int32#3,: 128]! # asm 2: vld1.8 {>y01=d2->y01=d3},[<input_2=r2,: 128]! vld1.8 {d2-d3},[r2,: 128]! # qhasm: 4x _5y01 = y01 << 2 # asm 1: vshl.i32 >_5y01=reg128#3,<y01=reg128#2,#2 # asm 2: vshl.i32 >_5y01=q2,<y01=q1,#2 vshl.i32 q2,q1,#2 # qhasm: y23 aligned= mem128[input_2];input_2+=16 # asm 1: vld1.8 {>y23=reg128#4%bot->y23=reg128#4%top},[<input_2=int32#3,: 128]! # asm 2: vld1.8 {>y23=d6->y23=d7},[<input_2=r2,: 128]! vld1.8 {d6-d7},[r2,: 128]! # qhasm: 4x _5y23 = y23 << 2 # asm 1: vshl.i32 >_5y23=reg128#9,<y23=reg128#4,#2 # asm 2: vshl.i32 >_5y23=q8,<y23=q3,#2 vshl.i32 q8,q3,#2 # qhasm: y4 aligned= mem64[input_2]y4[1] # asm 1: vld1.8 {<y4=reg128#10%bot},[<input_2=int32#3,: 64] # asm 2: vld1.8 {<y4=d18},[<input_2=r2,: 64] vld1.8 {d18},[r2,: 64] # qhasm: 4x _5y4 = y4 << 2 # asm 1: vshl.i32 >_5y4=reg128#11,<y4=reg128#10,#2 # asm 2: vshl.i32 >_5y4=q10,<y4=q9,#2 vshl.i32 q10,q9,#2 # qhasm: x01 aligned= mem128[input_1];input_1+=16 # asm 1: vld1.8 {>x01=reg128#12%bot->x01=reg128#12%top},[<input_1=int32#2,: 128]! # asm 2: vld1.8 {>x01=d22->x01=d23},[<input_1=r1,: 128]! vld1.8 {d22-d23},[r1,: 128]! # qhasm: 4x _5y01 += y01 # asm 1: vadd.i32 >_5y01=reg128#3,<_5y01=reg128#3,<y01=reg128#2 # asm 2: vadd.i32 >_5y01=q2,<_5y01=q2,<y01=q1 vadd.i32 q2,q2,q1 # qhasm: x23 aligned= mem128[input_1];input_1+=16 # asm 1: vld1.8 {>x23=reg128#13%bot->x23=reg128#13%top},[<input_1=int32#2,: 128]! # asm 2: vld1.8 {>x23=d24->x23=d25},[<input_1=r1,: 128]! vld1.8 {d24-d25},[r1,: 128]! # qhasm: 4x _5y23 += y23 # asm 1: vadd.i32 >_5y23=reg128#9,<_5y23=reg128#9,<y23=reg128#4 # asm 2: vadd.i32 >_5y23=q8,<_5y23=q8,<y23=q3 vadd.i32 q8,q8,q3 # qhasm: 4x _5y4 += y4 # asm 1: vadd.i32 >_5y4=reg128#11,<_5y4=reg128#11,<y4=reg128#10 # asm 2: vadd.i32 >_5y4=q10,<_5y4=q10,<y4=q9 vadd.i32 q10,q10,q9 # qhasm: c01 aligned= mem128[input_3];input_3+=16 # asm 1: vld1.8 {>c01=reg128#14%bot->c01=reg128#14%top},[<input_3=int32#4,: 128]! # asm 2: vld1.8 {>c01=d26->c01=d27},[<input_3=r3,: 128]! vld1.8 {d26-d27},[r3,: 128]! # qhasm: 4x x01 += c01 # asm 1: vadd.i32 >x01=reg128#12,<x01=reg128#12,<c01=reg128#14 # asm 2: vadd.i32 >x01=q11,<x01=q11,<c01=q13 vadd.i32 q11,q11,q13 # qhasm: c23 aligned= mem128[input_3];input_3+=16 # asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_3=int32#4,: 128]! # asm 2: vld1.8 {>c23=d26->c23=d27},[<input_3=r3,: 128]! vld1.8 {d26-d27},[r3,: 128]! # qhasm: 4x x23 += c23 # asm 1: vadd.i32 >x23=reg128#13,<x23=reg128#13,<c23=reg128#14 # asm 2: vadd.i32 >x23=q12,<x23=q12,<c23=q13 vadd.i32 q12,q12,q13 # qhasm: x4 aligned= mem64[input_1]x4[1] # asm 1: vld1.8 {<x4=reg128#14%bot},[<input_1=int32#2,: 64] # asm 2: vld1.8 {<x4=d26},[<input_1=r1,: 64] vld1.8 {d26},[r1,: 64] # qhasm: 2x mask unsigned>>=6 # asm 1: vshr.u64 >mask=reg128#1,<mask=reg128#1,#6 # asm 2: vshr.u64 >mask=q0,<mask=q0,#6 vshr.u64 q0,q0,#6 # qhasm: c4 aligned= mem64[input_3]c4[1] # asm 1: vld1.8 {<c4=reg128#15%bot},[<input_3=int32#4,: 64] # asm 2: vld1.8 {<c4=d28},[<input_3=r3,: 64] vld1.8 {d28},[r3,: 64] # qhasm: 4x x4 += c4 # asm 1: vadd.i32 >x4=reg128#14,<x4=reg128#14,<c4=reg128#15 # asm 2: vadd.i32 >x4=q13,<x4=q13,<c4=q14 vadd.i32 q13,q13,q14 # qhasm: r0[0,1] = x01[0] unsigned* y01[0]; r0[2,3] = x01[1] unsigned* y01[1] # asm 1: vmull.u32 >r0=reg128#15,<x01=reg128#12%bot,<y01=reg128#2%bot # asm 2: vmull.u32 >r0=q14,<x01=d22,<y01=d2 vmull.u32 q14,d22,d2 # qhasm: r0[0,1] += x01[2] unsigned* _5y4[0]; r0[2,3] += x01[3] unsigned* _5y4[1] # asm 1: vmlal.u32 <r0=reg128#15,<x01=reg128#12%top,<_5y4=reg128#11%bot # asm 2: vmlal.u32 <r0=q14,<x01=d23,<_5y4=d20 vmlal.u32 q14,d23,d20 # qhasm: r0[0,1] += x23[0] unsigned* _5y23[2]; r0[2,3] += x23[1] unsigned* _5y23[3] # asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%bot,<_5y23=reg128#9%top # asm 2: vmlal.u32 <r0=q14,<x23=d24,<_5y23=d17 vmlal.u32 q14,d24,d17 # qhasm: r0[0,1] += x23[2] unsigned* _5y23[0]; r0[2,3] += x23[3] unsigned* _5y23[1] # asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%top,<_5y23=reg128#9%bot # asm 2: vmlal.u32 <r0=q14,<x23=d25,<_5y23=d16 vmlal.u32 q14,d25,d16 # qhasm: r0[0,1] += x4[0] unsigned* _5y01[2]; r0[2,3] += x4[1] unsigned* _5y01[3] # asm 1: vmlal.u32 <r0=reg128#15,<x4=reg128#14%bot,<_5y01=reg128#3%top # asm 2: vmlal.u32 <r0=q14,<x4=d26,<_5y01=d5 vmlal.u32 q14,d26,d5 # qhasm: r1[0,1] = x01[0] unsigned* y01[2]; r1[2,3] = x01[1] unsigned* y01[3] # asm 1: vmull.u32 >r1=reg128#3,<x01=reg128#12%bot,<y01=reg128#2%top # asm 2: vmull.u32 >r1=q2,<x01=d22,<y01=d3 vmull.u32 q2,d22,d3 # qhasm: r1[0,1] += x01[2] unsigned* y01[0]; r1[2,3] += x01[3] unsigned* y01[1] # asm 1: vmlal.u32 <r1=reg128#3,<x01=reg128#12%top,<y01=reg128#2%bot # asm 2: vmlal.u32 <r1=q2,<x01=d23,<y01=d2 vmlal.u32 q2,d23,d2 # qhasm: r1[0,1] += x23[0] unsigned* _5y4[0]; r1[2,3] += x23[1] unsigned* _5y4[1] # asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%bot,<_5y4=reg128#11%bot # asm 2: vmlal.u32 <r1=q2,<x23=d24,<_5y4=d20 vmlal.u32 q2,d24,d20 # qhasm: r1[0,1] += x23[2] unsigned* _5y23[2]; r1[2,3] += x23[3] unsigned* _5y23[3] # asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%top,<_5y23=reg128#9%top # asm 2: vmlal.u32 <r1=q2,<x23=d25,<_5y23=d17 vmlal.u32 q2,d25,d17 # qhasm: r1[0,1] += x4[0] unsigned* _5y23[0]; r1[2,3] += x4[1] unsigned* _5y23[1] # asm 1: vmlal.u32 <r1=reg128#3,<x4=reg128#14%bot,<_5y23=reg128#9%bot # asm 2: vmlal.u32 <r1=q2,<x4=d26,<_5y23=d16 vmlal.u32 q2,d26,d16 # qhasm: r2[0,1] = x01[0] unsigned* y23[0]; r2[2,3] = x01[1] unsigned* y23[1] # asm 1: vmull.u32 >r2=reg128#16,<x01=reg128#12%bot,<y23=reg128#4%bot # asm 2: vmull.u32 >r2=q15,<x01=d22,<y23=d6 vmull.u32 q15,d22,d6 # qhasm: r2[0,1] += x01[2] unsigned* y01[2]; r2[2,3] += x01[3] unsigned* y01[3] # asm 1: vmlal.u32 <r2=reg128#16,<x01=reg128#12%top,<y01=reg128#2%top # asm 2: vmlal.u32 <r2=q15,<x01=d23,<y01=d3 vmlal.u32 q15,d23,d3 # qhasm: r2[0,1] += x23[0] unsigned* y01[0]; r2[2,3] += x23[1] unsigned* y01[1] # asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%bot,<y01=reg128#2%bot # asm 2: vmlal.u32 <r2=q15,<x23=d24,<y01=d2 vmlal.u32 q15,d24,d2 # qhasm: r2[0,1] += x23[2] unsigned* _5y4[0]; r2[2,3] += x23[3] unsigned* _5y4[1] # asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%top,<_5y4=reg128#11%bot # asm 2: vmlal.u32 <r2=q15,<x23=d25,<_5y4=d20 vmlal.u32 q15,d25,d20 # qhasm: r2[0,1] += x4[0] unsigned* _5y23[2]; r2[2,3] += x4[1] unsigned* _5y23[3] # asm 1: vmlal.u32 <r2=reg128#16,<x4=reg128#14%bot,<_5y23=reg128#9%top # asm 2: vmlal.u32 <r2=q15,<x4=d26,<_5y23=d17 vmlal.u32 q15,d26,d17 # qhasm: r3[0,1] = x01[0] unsigned* y23[2]; r3[2,3] = x01[1] unsigned* y23[3] # asm 1: vmull.u32 >r3=reg128#9,<x01=reg128#12%bot,<y23=reg128#4%top # asm 2: vmull.u32 >r3=q8,<x01=d22,<y23=d7 vmull.u32 q8,d22,d7 # qhasm: r3[0,1] += x01[2] unsigned* y23[0]; r3[2,3] += x01[3] unsigned* y23[1] # asm 1: vmlal.u32 <r3=reg128#9,<x01=reg128#12%top,<y23=reg128#4%bot # asm 2: vmlal.u32 <r3=q8,<x01=d23,<y23=d6 vmlal.u32 q8,d23,d6 # qhasm: r3[0,1] += x23[0] unsigned* y01[2]; r3[2,3] += x23[1] unsigned* y01[3] # asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%bot,<y01=reg128#2%top # asm 2: vmlal.u32 <r3=q8,<x23=d24,<y01=d3 vmlal.u32 q8,d24,d3 # qhasm: r3[0,1] += x23[2] unsigned* y01[0]; r3[2,3] += x23[3] unsigned* y01[1] # asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%top,<y01=reg128#2%bot # asm 2: vmlal.u32 <r3=q8,<x23=d25,<y01=d2 vmlal.u32 q8,d25,d2 # qhasm: r3[0,1] += x4[0] unsigned* _5y4[0]; r3[2,3] += x4[1] unsigned* _5y4[1] # asm 1: vmlal.u32 <r3=reg128#9,<x4=reg128#14%bot,<_5y4=reg128#11%bot # asm 2: vmlal.u32 <r3=q8,<x4=d26,<_5y4=d20 vmlal.u32 q8,d26,d20 # qhasm: r4[0,1] = x01[0] unsigned* y4[0]; r4[2,3] = x01[1] unsigned* y4[1] # asm 1: vmull.u32 >r4=reg128#10,<x01=reg128#12%bot,<y4=reg128#10%bot # asm 2: vmull.u32 >r4=q9,<x01=d22,<y4=d18 vmull.u32 q9,d22,d18 # qhasm: r4[0,1] += x01[2] unsigned* y23[2]; r4[2,3] += x01[3] unsigned* y23[3] # asm 1: vmlal.u32 <r4=reg128#10,<x01=reg128#12%top,<y23=reg128#4%top # asm 2: vmlal.u32 <r4=q9,<x01=d23,<y23=d7 vmlal.u32 q9,d23,d7 # qhasm: r4[0,1] += x23[0] unsigned* y23[0]; r4[2,3] += x23[1] unsigned* y23[1] # asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%bot,<y23=reg128#4%bot # asm 2: vmlal.u32 <r4=q9,<x23=d24,<y23=d6 vmlal.u32 q9,d24,d6 # qhasm: r4[0,1] += x23[2] unsigned* y01[2]; r4[2,3] += x23[3] unsigned* y01[3] # asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%top,<y01=reg128#2%top # asm 2: vmlal.u32 <r4=q9,<x23=d25,<y01=d3 vmlal.u32 q9,d25,d3 # qhasm: r4[0,1] += x4[0] unsigned* y01[0]; r4[2,3] += x4[1] unsigned* y01[1] # asm 1: vmlal.u32 <r4=reg128#10,<x4=reg128#14%bot,<y01=reg128#2%bot # asm 2: vmlal.u32 <r4=q9,<x4=d26,<y01=d2 vmlal.u32 q9,d26,d2 # qhasm: 2x t1 = r0 unsigned>> 26 # asm 1: vshr.u64 >t1=reg128#2,<r0=reg128#15,#26 # asm 2: vshr.u64 >t1=q1,<r0=q14,#26 vshr.u64 q1,q14,#26 # qhasm: r0 &= mask # asm 1: vand >r0=reg128#4,<r0=reg128#15,<mask=reg128#1 # asm 2: vand >r0=q3,<r0=q14,<mask=q0 vand q3,q14,q0 # qhasm: 2x r1 += t1 # asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#3,<t1=reg128#2 # asm 2: vadd.i64 >r1=q1,<r1=q2,<t1=q1 vadd.i64 q1,q2,q1 # qhasm: 2x t4 = r3 unsigned>> 26 # asm 1: vshr.u64 >t4=reg128#3,<r3=reg128#9,#26 # asm 2: vshr.u64 >t4=q2,<r3=q8,#26 vshr.u64 q2,q8,#26 # qhasm: r3 &= mask # asm 1: vand >r3=reg128#9,<r3=reg128#9,<mask=reg128#1 # asm 2: vand >r3=q8,<r3=q8,<mask=q0 vand q8,q8,q0 # qhasm: 2x r4 += t4 # asm 1: vadd.i64 >r4=reg128#3,<r4=reg128#10,<t4=reg128#3 # asm 2: vadd.i64 >r4=q2,<r4=q9,<t4=q2 vadd.i64 q2,q9,q2 # qhasm: 2x t2 = r1 unsigned>> 26 # asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#2,#26 # asm 2: vshr.u64 >t2=q9,<r1=q1,#26 vshr.u64 q9,q1,#26 # qhasm: r1 &= mask # asm 1: vand >r1=reg128#2,<r1=reg128#2,<mask=reg128#1 # asm 2: vand >r1=q1,<r1=q1,<mask=q0 vand q1,q1,q0 # qhasm: 2x t0 = r4 unsigned>> 26 # asm 1: vshr.u64 >t0=reg128#11,<r4=reg128#3,#26 # asm 2: vshr.u64 >t0=q10,<r4=q2,#26 vshr.u64 q10,q2,#26 # qhasm: 2x r2 += t2 # asm 1: vadd.i64 >r2=reg128#10,<r2=reg128#16,<t2=reg128#10 # asm 2: vadd.i64 >r2=q9,<r2=q15,<t2=q9 vadd.i64 q9,q15,q9 # qhasm: r4 &= mask # asm 1: vand >r4=reg128#3,<r4=reg128#3,<mask=reg128#1 # asm 2: vand >r4=q2,<r4=q2,<mask=q0 vand q2,q2,q0 # qhasm: 2x r0 += t0 # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11 # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10 vadd.i64 q3,q3,q10 # qhasm: 2x t0 <<= 2 # asm 1: vshl.i64 >t0=reg128#11,<t0=reg128#11,#2 # asm 2: vshl.i64 >t0=q10,<t0=q10,#2 vshl.i64 q10,q10,#2 # qhasm: 2x t3 = r2 unsigned>> 26 # asm 1: vshr.u64 >t3=reg128#12,<r2=reg128#10,#26 # asm 2: vshr.u64 >t3=q11,<r2=q9,#26 vshr.u64 q11,q9,#26 # qhasm: 2x r0 += t0 # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11 # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10 vadd.i64 q3,q3,q10 # qhasm: x23 = r2 & mask # asm 1: vand >x23=reg128#10,<r2=reg128#10,<mask=reg128#1 # asm 2: vand >x23=q9,<r2=q9,<mask=q0 vand q9,q9,q0 # qhasm: 2x r3 += t3 # asm 1: vadd.i64 >r3=reg128#9,<r3=reg128#9,<t3=reg128#12 # asm 2: vadd.i64 >r3=q8,<r3=q8,<t3=q11 vadd.i64 q8,q8,q11 # qhasm: 2x t1 = r0 unsigned>> 26 # asm 1: vshr.u64 >t1=reg128#11,<r0=reg128#4,#26 # asm 2: vshr.u64 >t1=q10,<r0=q3,#26 vshr.u64 q10,q3,#26 # qhasm: x23 = x23[0,2,1,3] # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top # asm 2: vtrn.32 <x23=d18,<x23=d19 vtrn.32 d18,d19 # qhasm: x01 = r0 & mask # asm 1: vand >x01=reg128#4,<r0=reg128#4,<mask=reg128#1 # asm 2: vand >x01=q3,<r0=q3,<mask=q0 vand q3,q3,q0 # qhasm: 2x r1 += t1 # asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#2,<t1=reg128#11 # asm 2: vadd.i64 >r1=q1,<r1=q1,<t1=q10 vadd.i64 q1,q1,q10 # qhasm: 2x t4 = r3 unsigned>> 26 # asm 1: vshr.u64 >t4=reg128#11,<r3=reg128#9,#26 # asm 2: vshr.u64 >t4=q10,<r3=q8,#26 vshr.u64 q10,q8,#26 # qhasm: x01 = x01[0,2,1,3] # asm 1: vtrn.32 <x01=reg128#4%bot,<x01=reg128#4%top # asm 2: vtrn.32 <x01=d6,<x01=d7 vtrn.32 d6,d7 # qhasm: r3 &= mask # asm 1: vand >r3=reg128#1,<r3=reg128#9,<mask=reg128#1 # asm 2: vand >r3=q0,<r3=q8,<mask=q0 vand q0,q8,q0 # qhasm: r1 = r1[0,2,1,3] # asm 1: vtrn.32 <r1=reg128#2%bot,<r1=reg128#2%top # asm 2: vtrn.32 <r1=d2,<r1=d3 vtrn.32 d2,d3 # qhasm: 2x x4 = r4 + t4 # asm 1: vadd.i64 >x4=reg128#3,<r4=reg128#3,<t4=reg128#11 # asm 2: vadd.i64 >x4=q2,<r4=q2,<t4=q10 vadd.i64 q2,q2,q10 # qhasm: r3 = r3[0,2,1,3] # asm 1: vtrn.32 <r3=reg128#1%bot,<r3=reg128#1%top # asm 2: vtrn.32 <r3=d0,<r3=d1 vtrn.32 d0,d1 # qhasm: x01 = x01[0,1] r1[0,1] # asm 1: vext.32 <x01=reg128#4%top,<r1=reg128#2%bot,<r1=reg128#2%bot,#0 # asm 2: vext.32 <x01=d7,<r1=d2,<r1=d2,#0 vext.32 d7,d2,d2,#0 # qhasm: x23 = x23[0,1] r3[0,1] # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#1%bot,<r3=reg128#1%bot,#0 # asm 2: vext.32 <x23=d19,<r3=d0,<r3=d0,#0 vext.32 d19,d0,d0,#0 # qhasm: x4 = x4[0,2,1,3] # asm 1: vtrn.32 <x4=reg128#3%bot,<x4=reg128#3%top # asm 2: vtrn.32 <x4=d4,<x4=d5 vtrn.32 d4,d5 # qhasm: mem128[input_0] aligned= x01;input_0+=16 # asm 1: vst1.8 {<x01=reg128#4%bot-<x01=reg128#4%top},[<input_0=int32#1,: 128]! # asm 2: vst1.8 {<x01=d6-<x01=d7},[<input_0=r0,: 128]! vst1.8 {d6-d7},[r0,: 128]! # qhasm: mem128[input_0] aligned= x23;input_0+=16 # asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1,: 128]! # asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0,: 128]! vst1.8 {d18-d19},[r0,: 128]! # qhasm: mem64[input_0] aligned= x4[0] # asm 1: vst1.8 <x4=reg128#3%bot,[<input_0=int32#1,: 64] # asm 2: vst1.8 <x4=d4,[<input_0=r0,: 64] vst1.8 d4,[r0,: 64] # qhasm: return add sp,sp,#0 bx lr #endif /* !OPENSSL_NO_ASM && OPENSSL_ARM && __ELF__ */
marvin-hansen/iggy-streaming-system
41,612
thirdparty/crates/ring-0.17.9/crypto/curve25519/asm/x25519-asm-arm.S
/* Copyright (c) 2015, Google Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This file is taken from crypto_scalarmult/curve25519/neon2/scalarmult.s in * SUPERCOP 20141124 (http://bench.cr.yp.to/supercop.html). That code is public * domain licensed but the standard ISC license is included above to keep * licensing simple. */ #include <ring-core/asm_base.h> #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__) .fpu neon .text .align 4 .global x25519_NEON .hidden x25519_NEON .type x25519_NEON, %function x25519_NEON: vpush {q4,q5,q6,q7} mov r12,sp sub sp,sp,#736 and sp,sp,#0xffffffe0 strd r4,[sp,#0] strd r6,[sp,#8] strd r8,[sp,#16] strd r10,[sp,#24] str r12,[sp,#480] str r14,[sp,#484] mov r0,r0 mov r1,r1 mov r2,r2 add r3,sp,#32 ldr r4,=0 ldr r5,=254 vmov.i32 q0,#1 vshr.u64 q1,q0,#7 vshr.u64 q0,q0,#8 vmov.i32 d4,#19 vmov.i32 d5,#38 add r6,sp,#512 vst1.8 {d2-d3},[r6,: 128] add r6,sp,#528 vst1.8 {d0-d1},[r6,: 128] add r6,sp,#544 vst1.8 {d4-d5},[r6,: 128] add r6,r3,#0 vmov.i32 q2,#0 vst1.8 {d4-d5},[r6,: 128]! vst1.8 {d4-d5},[r6,: 128]! vst1.8 d4,[r6,: 64] add r6,r3,#0 ldr r7,=960 sub r7,r7,#2 neg r7,r7 sub r7,r7,r7,LSL #7 str r7,[r6] add r6,sp,#704 vld1.8 {d4-d5},[r1]! vld1.8 {d6-d7},[r1] vst1.8 {d4-d5},[r6,: 128]! vst1.8 {d6-d7},[r6,: 128] sub r1,r6,#16 ldrb r6,[r1] and r6,r6,#248 strb r6,[r1] ldrb r6,[r1,#31] and r6,r6,#127 orr r6,r6,#64 strb r6,[r1,#31] vmov.i64 q2,#0xffffffff vshr.u64 q3,q2,#7 vshr.u64 q2,q2,#6 vld1.8 {d8},[r2] vld1.8 {d10},[r2] add r2,r2,#6 vld1.8 {d12},[r2] vld1.8 {d14},[r2] add r2,r2,#6 vld1.8 {d16},[r2] add r2,r2,#4 vld1.8 {d18},[r2] vld1.8 {d20},[r2] add r2,r2,#6 vld1.8 {d22},[r2] add r2,r2,#2 vld1.8 {d24},[r2] vld1.8 {d26},[r2] vshr.u64 q5,q5,#26 vshr.u64 q6,q6,#3 vshr.u64 q7,q7,#29 vshr.u64 q8,q8,#6 vshr.u64 q10,q10,#25 vshr.u64 q11,q11,#3 vshr.u64 q12,q12,#12 vshr.u64 q13,q13,#38 vand q4,q4,q2 vand q6,q6,q2 vand q8,q8,q2 vand q10,q10,q2 vand q2,q12,q2 vand q5,q5,q3 vand q7,q7,q3 vand q9,q9,q3 vand q11,q11,q3 vand q3,q13,q3 add r2,r3,#48 vadd.i64 q12,q4,q1 vadd.i64 q13,q10,q1 vshr.s64 q12,q12,#26 vshr.s64 q13,q13,#26 vadd.i64 q5,q5,q12 vshl.i64 q12,q12,#26 vadd.i64 q14,q5,q0 vadd.i64 q11,q11,q13 vshl.i64 q13,q13,#26 vadd.i64 q15,q11,q0 vsub.i64 q4,q4,q12 vshr.s64 q12,q14,#25 vsub.i64 q10,q10,q13 vshr.s64 q13,q15,#25 vadd.i64 q6,q6,q12 vshl.i64 q12,q12,#25 vadd.i64 q14,q6,q1 vadd.i64 q2,q2,q13 vsub.i64 q5,q5,q12 vshr.s64 q12,q14,#26 vshl.i64 q13,q13,#25 vadd.i64 q14,q2,q1 vadd.i64 q7,q7,q12 vshl.i64 q12,q12,#26 vadd.i64 q15,q7,q0 vsub.i64 q11,q11,q13 vshr.s64 q13,q14,#26 vsub.i64 q6,q6,q12 vshr.s64 q12,q15,#25 vadd.i64 q3,q3,q13 vshl.i64 q13,q13,#26 vadd.i64 q14,q3,q0 vadd.i64 q8,q8,q12 vshl.i64 q12,q12,#25 vadd.i64 q15,q8,q1 add r2,r2,#8 vsub.i64 q2,q2,q13 vshr.s64 q13,q14,#25 vsub.i64 q7,q7,q12 vshr.s64 q12,q15,#26 vadd.i64 q14,q13,q13 vadd.i64 q9,q9,q12 vtrn.32 d12,d14 vshl.i64 q12,q12,#26 vtrn.32 d13,d15 vadd.i64 q0,q9,q0 vadd.i64 q4,q4,q14 vst1.8 d12,[r2,: 64]! vshl.i64 q6,q13,#4 vsub.i64 q7,q8,q12 vshr.s64 q0,q0,#25 vadd.i64 q4,q4,q6 vadd.i64 q6,q10,q0 vshl.i64 q0,q0,#25 vadd.i64 q8,q6,q1 vadd.i64 q4,q4,q13 vshl.i64 q10,q13,#25 vadd.i64 q1,q4,q1 vsub.i64 q0,q9,q0 vshr.s64 q8,q8,#26 vsub.i64 q3,q3,q10 vtrn.32 d14,d0 vshr.s64 q1,q1,#26 vtrn.32 d15,d1 vadd.i64 q0,q11,q8 vst1.8 d14,[r2,: 64] vshl.i64 q7,q8,#26 vadd.i64 q5,q5,q1 vtrn.32 d4,d6 vshl.i64 q1,q1,#26 vtrn.32 d5,d7 vsub.i64 q3,q6,q7 add r2,r2,#16 vsub.i64 q1,q4,q1 vst1.8 d4,[r2,: 64] vtrn.32 d6,d0 vtrn.32 d7,d1 sub r2,r2,#8 vtrn.32 d2,d10 vtrn.32 d3,d11 vst1.8 d6,[r2,: 64] sub r2,r2,#24 vst1.8 d2,[r2,: 64] add r2,r3,#96 vmov.i32 q0,#0 vmov.i64 d2,#0xff vmov.i64 d3,#0 vshr.u32 q1,q1,#7 vst1.8 {d2-d3},[r2,: 128]! vst1.8 {d0-d1},[r2,: 128]! vst1.8 d0,[r2,: 64] add r2,r3,#144 vmov.i32 q0,#0 vst1.8 {d0-d1},[r2,: 128]! vst1.8 {d0-d1},[r2,: 128]! vst1.8 d0,[r2,: 64] add r2,r3,#240 vmov.i32 q0,#0 vmov.i64 d2,#0xff vmov.i64 d3,#0 vshr.u32 q1,q1,#7 vst1.8 {d2-d3},[r2,: 128]! vst1.8 {d0-d1},[r2,: 128]! vst1.8 d0,[r2,: 64] add r2,r3,#48 add r6,r3,#192 vld1.8 {d0-d1},[r2,: 128]! vld1.8 {d2-d3},[r2,: 128]! vld1.8 {d4},[r2,: 64] vst1.8 {d0-d1},[r6,: 128]! vst1.8 {d2-d3},[r6,: 128]! vst1.8 d4,[r6,: 64] ._mainloop: mov r2,r5,LSR #3 and r6,r5,#7 ldrb r2,[r1,r2] mov r2,r2,LSR r6 and r2,r2,#1 str r5,[sp,#488] eor r4,r4,r2 str r2,[sp,#492] neg r2,r4 add r4,r3,#96 add r5,r3,#192 add r6,r3,#144 vld1.8 {d8-d9},[r4,: 128]! add r7,r3,#240 vld1.8 {d10-d11},[r5,: 128]! veor q6,q4,q5 vld1.8 {d14-d15},[r6,: 128]! vdup.i32 q8,r2 vld1.8 {d18-d19},[r7,: 128]! veor q10,q7,q9 vld1.8 {d22-d23},[r4,: 128]! vand q6,q6,q8 vld1.8 {d24-d25},[r5,: 128]! vand q10,q10,q8 vld1.8 {d26-d27},[r6,: 128]! veor q4,q4,q6 vld1.8 {d28-d29},[r7,: 128]! veor q5,q5,q6 vld1.8 {d0},[r4,: 64] veor q6,q7,q10 vld1.8 {d2},[r5,: 64] veor q7,q9,q10 vld1.8 {d4},[r6,: 64] veor q9,q11,q12 vld1.8 {d6},[r7,: 64] veor q10,q0,q1 sub r2,r4,#32 vand q9,q9,q8 sub r4,r5,#32 vand q10,q10,q8 sub r5,r6,#32 veor q11,q11,q9 sub r6,r7,#32 veor q0,q0,q10 veor q9,q12,q9 veor q1,q1,q10 veor q10,q13,q14 veor q12,q2,q3 vand q10,q10,q8 vand q8,q12,q8 veor q12,q13,q10 veor q2,q2,q8 veor q10,q14,q10 veor q3,q3,q8 vadd.i32 q8,q4,q6 vsub.i32 q4,q4,q6 vst1.8 {d16-d17},[r2,: 128]! vadd.i32 q6,q11,q12 vst1.8 {d8-d9},[r5,: 128]! vsub.i32 q4,q11,q12 vst1.8 {d12-d13},[r2,: 128]! vadd.i32 q6,q0,q2 vst1.8 {d8-d9},[r5,: 128]! vsub.i32 q0,q0,q2 vst1.8 d12,[r2,: 64] vadd.i32 q2,q5,q7 vst1.8 d0,[r5,: 64] vsub.i32 q0,q5,q7 vst1.8 {d4-d5},[r4,: 128]! vadd.i32 q2,q9,q10 vst1.8 {d0-d1},[r6,: 128]! vsub.i32 q0,q9,q10 vst1.8 {d4-d5},[r4,: 128]! vadd.i32 q2,q1,q3 vst1.8 {d0-d1},[r6,: 128]! vsub.i32 q0,q1,q3 vst1.8 d4,[r4,: 64] vst1.8 d0,[r6,: 64] add r2,sp,#544 add r4,r3,#96 add r5,r3,#144 vld1.8 {d0-d1},[r2,: 128] vld1.8 {d2-d3},[r4,: 128]! vld1.8 {d4-d5},[r5,: 128]! vzip.i32 q1,q2 vld1.8 {d6-d7},[r4,: 128]! vld1.8 {d8-d9},[r5,: 128]! vshl.i32 q5,q1,#1 vzip.i32 q3,q4 vshl.i32 q6,q2,#1 vld1.8 {d14},[r4,: 64] vshl.i32 q8,q3,#1 vld1.8 {d15},[r5,: 64] vshl.i32 q9,q4,#1 vmul.i32 d21,d7,d1 vtrn.32 d14,d15 vmul.i32 q11,q4,q0 vmul.i32 q0,q7,q0 vmull.s32 q12,d2,d2 vmlal.s32 q12,d11,d1 vmlal.s32 q12,d12,d0 vmlal.s32 q12,d13,d23 vmlal.s32 q12,d16,d22 vmlal.s32 q12,d7,d21 vmull.s32 q10,d2,d11 vmlal.s32 q10,d4,d1 vmlal.s32 q10,d13,d0 vmlal.s32 q10,d6,d23 vmlal.s32 q10,d17,d22 vmull.s32 q13,d10,d4 vmlal.s32 q13,d11,d3 vmlal.s32 q13,d13,d1 vmlal.s32 q13,d16,d0 vmlal.s32 q13,d17,d23 vmlal.s32 q13,d8,d22 vmull.s32 q1,d10,d5 vmlal.s32 q1,d11,d4 vmlal.s32 q1,d6,d1 vmlal.s32 q1,d17,d0 vmlal.s32 q1,d8,d23 vmull.s32 q14,d10,d6 vmlal.s32 q14,d11,d13 vmlal.s32 q14,d4,d4 vmlal.s32 q14,d17,d1 vmlal.s32 q14,d18,d0 vmlal.s32 q14,d9,d23 vmull.s32 q11,d10,d7 vmlal.s32 q11,d11,d6 vmlal.s32 q11,d12,d5 vmlal.s32 q11,d8,d1 vmlal.s32 q11,d19,d0 vmull.s32 q15,d10,d8 vmlal.s32 q15,d11,d17 vmlal.s32 q15,d12,d6 vmlal.s32 q15,d13,d5 vmlal.s32 q15,d19,d1 vmlal.s32 q15,d14,d0 vmull.s32 q2,d10,d9 vmlal.s32 q2,d11,d8 vmlal.s32 q2,d12,d7 vmlal.s32 q2,d13,d6 vmlal.s32 q2,d14,d1 vmull.s32 q0,d15,d1 vmlal.s32 q0,d10,d14 vmlal.s32 q0,d11,d19 vmlal.s32 q0,d12,d8 vmlal.s32 q0,d13,d17 vmlal.s32 q0,d6,d6 add r2,sp,#512 vld1.8 {d18-d19},[r2,: 128] vmull.s32 q3,d16,d7 vmlal.s32 q3,d10,d15 vmlal.s32 q3,d11,d14 vmlal.s32 q3,d12,d9 vmlal.s32 q3,d13,d8 add r2,sp,#528 vld1.8 {d8-d9},[r2,: 128] vadd.i64 q5,q12,q9 vadd.i64 q6,q15,q9 vshr.s64 q5,q5,#26 vshr.s64 q6,q6,#26 vadd.i64 q7,q10,q5 vshl.i64 q5,q5,#26 vadd.i64 q8,q7,q4 vadd.i64 q2,q2,q6 vshl.i64 q6,q6,#26 vadd.i64 q10,q2,q4 vsub.i64 q5,q12,q5 vshr.s64 q8,q8,#25 vsub.i64 q6,q15,q6 vshr.s64 q10,q10,#25 vadd.i64 q12,q13,q8 vshl.i64 q8,q8,#25 vadd.i64 q13,q12,q9 vadd.i64 q0,q0,q10 vsub.i64 q7,q7,q8 vshr.s64 q8,q13,#26 vshl.i64 q10,q10,#25 vadd.i64 q13,q0,q9 vadd.i64 q1,q1,q8 vshl.i64 q8,q8,#26 vadd.i64 q15,q1,q4 vsub.i64 q2,q2,q10 vshr.s64 q10,q13,#26 vsub.i64 q8,q12,q8 vshr.s64 q12,q15,#25 vadd.i64 q3,q3,q10 vshl.i64 q10,q10,#26 vadd.i64 q13,q3,q4 vadd.i64 q14,q14,q12 add r2,r3,#288 vshl.i64 q12,q12,#25 add r4,r3,#336 vadd.i64 q15,q14,q9 add r2,r2,#8 vsub.i64 q0,q0,q10 add r4,r4,#8 vshr.s64 q10,q13,#25 vsub.i64 q1,q1,q12 vshr.s64 q12,q15,#26 vadd.i64 q13,q10,q10 vadd.i64 q11,q11,q12 vtrn.32 d16,d2 vshl.i64 q12,q12,#26 vtrn.32 d17,d3 vadd.i64 q1,q11,q4 vadd.i64 q4,q5,q13 vst1.8 d16,[r2,: 64]! vshl.i64 q5,q10,#4 vst1.8 d17,[r4,: 64]! vsub.i64 q8,q14,q12 vshr.s64 q1,q1,#25 vadd.i64 q4,q4,q5 vadd.i64 q5,q6,q1 vshl.i64 q1,q1,#25 vadd.i64 q6,q5,q9 vadd.i64 q4,q4,q10 vshl.i64 q10,q10,#25 vadd.i64 q9,q4,q9 vsub.i64 q1,q11,q1 vshr.s64 q6,q6,#26 vsub.i64 q3,q3,q10 vtrn.32 d16,d2 vshr.s64 q9,q9,#26 vtrn.32 d17,d3 vadd.i64 q1,q2,q6 vst1.8 d16,[r2,: 64] vshl.i64 q2,q6,#26 vst1.8 d17,[r4,: 64] vadd.i64 q6,q7,q9 vtrn.32 d0,d6 vshl.i64 q7,q9,#26 vtrn.32 d1,d7 vsub.i64 q2,q5,q2 add r2,r2,#16 vsub.i64 q3,q4,q7 vst1.8 d0,[r2,: 64] add r4,r4,#16 vst1.8 d1,[r4,: 64] vtrn.32 d4,d2 vtrn.32 d5,d3 sub r2,r2,#8 sub r4,r4,#8 vtrn.32 d6,d12 vtrn.32 d7,d13 vst1.8 d4,[r2,: 64] vst1.8 d5,[r4,: 64] sub r2,r2,#24 sub r4,r4,#24 vst1.8 d6,[r2,: 64] vst1.8 d7,[r4,: 64] add r2,r3,#240 add r4,r3,#96 vld1.8 {d0-d1},[r4,: 128]! vld1.8 {d2-d3},[r4,: 128]! vld1.8 {d4},[r4,: 64] add r4,r3,#144 vld1.8 {d6-d7},[r4,: 128]! vtrn.32 q0,q3 vld1.8 {d8-d9},[r4,: 128]! vshl.i32 q5,q0,#4 vtrn.32 q1,q4 vshl.i32 q6,q3,#4 vadd.i32 q5,q5,q0 vadd.i32 q6,q6,q3 vshl.i32 q7,q1,#4 vld1.8 {d5},[r4,: 64] vshl.i32 q8,q4,#4 vtrn.32 d4,d5 vadd.i32 q7,q7,q1 vadd.i32 q8,q8,q4 vld1.8 {d18-d19},[r2,: 128]! vshl.i32 q10,q2,#4 vld1.8 {d22-d23},[r2,: 128]! vadd.i32 q10,q10,q2 vld1.8 {d24},[r2,: 64] vadd.i32 q5,q5,q0 add r2,r3,#192 vld1.8 {d26-d27},[r2,: 128]! vadd.i32 q6,q6,q3 vld1.8 {d28-d29},[r2,: 128]! vadd.i32 q8,q8,q4 vld1.8 {d25},[r2,: 64] vadd.i32 q10,q10,q2 vtrn.32 q9,q13 vadd.i32 q7,q7,q1 vadd.i32 q5,q5,q0 vtrn.32 q11,q14 vadd.i32 q6,q6,q3 add r2,sp,#560 vadd.i32 q10,q10,q2 vtrn.32 d24,d25 vst1.8 {d12-d13},[r2,: 128] vshl.i32 q6,q13,#1 add r2,sp,#576 vst1.8 {d20-d21},[r2,: 128] vshl.i32 q10,q14,#1 add r2,sp,#592 vst1.8 {d12-d13},[r2,: 128] vshl.i32 q15,q12,#1 vadd.i32 q8,q8,q4 vext.32 d10,d31,d30,#0 vadd.i32 q7,q7,q1 add r2,sp,#608 vst1.8 {d16-d17},[r2,: 128] vmull.s32 q8,d18,d5 vmlal.s32 q8,d26,d4 vmlal.s32 q8,d19,d9 vmlal.s32 q8,d27,d3 vmlal.s32 q8,d22,d8 vmlal.s32 q8,d28,d2 vmlal.s32 q8,d23,d7 vmlal.s32 q8,d29,d1 vmlal.s32 q8,d24,d6 vmlal.s32 q8,d25,d0 add r2,sp,#624 vst1.8 {d14-d15},[r2,: 128] vmull.s32 q2,d18,d4 vmlal.s32 q2,d12,d9 vmlal.s32 q2,d13,d8 vmlal.s32 q2,d19,d3 vmlal.s32 q2,d22,d2 vmlal.s32 q2,d23,d1 vmlal.s32 q2,d24,d0 add r2,sp,#640 vst1.8 {d20-d21},[r2,: 128] vmull.s32 q7,d18,d9 vmlal.s32 q7,d26,d3 vmlal.s32 q7,d19,d8 vmlal.s32 q7,d27,d2 vmlal.s32 q7,d22,d7 vmlal.s32 q7,d28,d1 vmlal.s32 q7,d23,d6 vmlal.s32 q7,d29,d0 add r2,sp,#656 vst1.8 {d10-d11},[r2,: 128] vmull.s32 q5,d18,d3 vmlal.s32 q5,d19,d2 vmlal.s32 q5,d22,d1 vmlal.s32 q5,d23,d0 vmlal.s32 q5,d12,d8 add r2,sp,#672 vst1.8 {d16-d17},[r2,: 128] vmull.s32 q4,d18,d8 vmlal.s32 q4,d26,d2 vmlal.s32 q4,d19,d7 vmlal.s32 q4,d27,d1 vmlal.s32 q4,d22,d6 vmlal.s32 q4,d28,d0 vmull.s32 q8,d18,d7 vmlal.s32 q8,d26,d1 vmlal.s32 q8,d19,d6 vmlal.s32 q8,d27,d0 add r2,sp,#576 vld1.8 {d20-d21},[r2,: 128] vmlal.s32 q7,d24,d21 vmlal.s32 q7,d25,d20 vmlal.s32 q4,d23,d21 vmlal.s32 q4,d29,d20 vmlal.s32 q8,d22,d21 vmlal.s32 q8,d28,d20 vmlal.s32 q5,d24,d20 add r2,sp,#576 vst1.8 {d14-d15},[r2,: 128] vmull.s32 q7,d18,d6 vmlal.s32 q7,d26,d0 add r2,sp,#656 vld1.8 {d30-d31},[r2,: 128] vmlal.s32 q2,d30,d21 vmlal.s32 q7,d19,d21 vmlal.s32 q7,d27,d20 add r2,sp,#624 vld1.8 {d26-d27},[r2,: 128] vmlal.s32 q4,d25,d27 vmlal.s32 q8,d29,d27 vmlal.s32 q8,d25,d26 vmlal.s32 q7,d28,d27 vmlal.s32 q7,d29,d26 add r2,sp,#608 vld1.8 {d28-d29},[r2,: 128] vmlal.s32 q4,d24,d29 vmlal.s32 q8,d23,d29 vmlal.s32 q8,d24,d28 vmlal.s32 q7,d22,d29 vmlal.s32 q7,d23,d28 add r2,sp,#608 vst1.8 {d8-d9},[r2,: 128] add r2,sp,#560 vld1.8 {d8-d9},[r2,: 128] vmlal.s32 q7,d24,d9 vmlal.s32 q7,d25,d31 vmull.s32 q1,d18,d2 vmlal.s32 q1,d19,d1 vmlal.s32 q1,d22,d0 vmlal.s32 q1,d24,d27 vmlal.s32 q1,d23,d20 vmlal.s32 q1,d12,d7 vmlal.s32 q1,d13,d6 vmull.s32 q6,d18,d1 vmlal.s32 q6,d19,d0 vmlal.s32 q6,d23,d27 vmlal.s32 q6,d22,d20 vmlal.s32 q6,d24,d26 vmull.s32 q0,d18,d0 vmlal.s32 q0,d22,d27 vmlal.s32 q0,d23,d26 vmlal.s32 q0,d24,d31 vmlal.s32 q0,d19,d20 add r2,sp,#640 vld1.8 {d18-d19},[r2,: 128] vmlal.s32 q2,d18,d7 vmlal.s32 q2,d19,d6 vmlal.s32 q5,d18,d6 vmlal.s32 q5,d19,d21 vmlal.s32 q1,d18,d21 vmlal.s32 q1,d19,d29 vmlal.s32 q0,d18,d28 vmlal.s32 q0,d19,d9 vmlal.s32 q6,d18,d29 vmlal.s32 q6,d19,d28 add r2,sp,#592 vld1.8 {d18-d19},[r2,: 128] add r2,sp,#512 vld1.8 {d22-d23},[r2,: 128] vmlal.s32 q5,d19,d7 vmlal.s32 q0,d18,d21 vmlal.s32 q0,d19,d29 vmlal.s32 q6,d18,d6 add r2,sp,#528 vld1.8 {d6-d7},[r2,: 128] vmlal.s32 q6,d19,d21 add r2,sp,#576 vld1.8 {d18-d19},[r2,: 128] vmlal.s32 q0,d30,d8 add r2,sp,#672 vld1.8 {d20-d21},[r2,: 128] vmlal.s32 q5,d30,d29 add r2,sp,#608 vld1.8 {d24-d25},[r2,: 128] vmlal.s32 q1,d30,d28 vadd.i64 q13,q0,q11 vadd.i64 q14,q5,q11 vmlal.s32 q6,d30,d9 vshr.s64 q4,q13,#26 vshr.s64 q13,q14,#26 vadd.i64 q7,q7,q4 vshl.i64 q4,q4,#26 vadd.i64 q14,q7,q3 vadd.i64 q9,q9,q13 vshl.i64 q13,q13,#26 vadd.i64 q15,q9,q3 vsub.i64 q0,q0,q4 vshr.s64 q4,q14,#25 vsub.i64 q5,q5,q13 vshr.s64 q13,q15,#25 vadd.i64 q6,q6,q4 vshl.i64 q4,q4,#25 vadd.i64 q14,q6,q11 vadd.i64 q2,q2,q13 vsub.i64 q4,q7,q4 vshr.s64 q7,q14,#26 vshl.i64 q13,q13,#25 vadd.i64 q14,q2,q11 vadd.i64 q8,q8,q7 vshl.i64 q7,q7,#26 vadd.i64 q15,q8,q3 vsub.i64 q9,q9,q13 vshr.s64 q13,q14,#26 vsub.i64 q6,q6,q7 vshr.s64 q7,q15,#25 vadd.i64 q10,q10,q13 vshl.i64 q13,q13,#26 vadd.i64 q14,q10,q3 vadd.i64 q1,q1,q7 add r2,r3,#144 vshl.i64 q7,q7,#25 add r4,r3,#96 vadd.i64 q15,q1,q11 add r2,r2,#8 vsub.i64 q2,q2,q13 add r4,r4,#8 vshr.s64 q13,q14,#25 vsub.i64 q7,q8,q7 vshr.s64 q8,q15,#26 vadd.i64 q14,q13,q13 vadd.i64 q12,q12,q8 vtrn.32 d12,d14 vshl.i64 q8,q8,#26 vtrn.32 d13,d15 vadd.i64 q3,q12,q3 vadd.i64 q0,q0,q14 vst1.8 d12,[r2,: 64]! vshl.i64 q7,q13,#4 vst1.8 d13,[r4,: 64]! vsub.i64 q1,q1,q8 vshr.s64 q3,q3,#25 vadd.i64 q0,q0,q7 vadd.i64 q5,q5,q3 vshl.i64 q3,q3,#25 vadd.i64 q6,q5,q11 vadd.i64 q0,q0,q13 vshl.i64 q7,q13,#25 vadd.i64 q8,q0,q11 vsub.i64 q3,q12,q3 vshr.s64 q6,q6,#26 vsub.i64 q7,q10,q7 vtrn.32 d2,d6 vshr.s64 q8,q8,#26 vtrn.32 d3,d7 vadd.i64 q3,q9,q6 vst1.8 d2,[r2,: 64] vshl.i64 q6,q6,#26 vst1.8 d3,[r4,: 64] vadd.i64 q1,q4,q8 vtrn.32 d4,d14 vshl.i64 q4,q8,#26 vtrn.32 d5,d15 vsub.i64 q5,q5,q6 add r2,r2,#16 vsub.i64 q0,q0,q4 vst1.8 d4,[r2,: 64] add r4,r4,#16 vst1.8 d5,[r4,: 64] vtrn.32 d10,d6 vtrn.32 d11,d7 sub r2,r2,#8 sub r4,r4,#8 vtrn.32 d0,d2 vtrn.32 d1,d3 vst1.8 d10,[r2,: 64] vst1.8 d11,[r4,: 64] sub r2,r2,#24 sub r4,r4,#24 vst1.8 d0,[r2,: 64] vst1.8 d1,[r4,: 64] add r2,r3,#288 add r4,r3,#336 vld1.8 {d0-d1},[r2,: 128]! vld1.8 {d2-d3},[r4,: 128]! vsub.i32 q0,q0,q1 vld1.8 {d2-d3},[r2,: 128]! vld1.8 {d4-d5},[r4,: 128]! vsub.i32 q1,q1,q2 add r5,r3,#240 vld1.8 {d4},[r2,: 64] vld1.8 {d6},[r4,: 64] vsub.i32 q2,q2,q3 vst1.8 {d0-d1},[r5,: 128]! vst1.8 {d2-d3},[r5,: 128]! vst1.8 d4,[r5,: 64] add r2,r3,#144 add r4,r3,#96 add r5,r3,#144 add r6,r3,#192 vld1.8 {d0-d1},[r2,: 128]! vld1.8 {d2-d3},[r4,: 128]! vsub.i32 q2,q0,q1 vadd.i32 q0,q0,q1 vld1.8 {d2-d3},[r2,: 128]! vld1.8 {d6-d7},[r4,: 128]! vsub.i32 q4,q1,q3 vadd.i32 q1,q1,q3 vld1.8 {d6},[r2,: 64] vld1.8 {d10},[r4,: 64] vsub.i32 q6,q3,q5 vadd.i32 q3,q3,q5 vst1.8 {d4-d5},[r5,: 128]! vst1.8 {d0-d1},[r6,: 128]! vst1.8 {d8-d9},[r5,: 128]! vst1.8 {d2-d3},[r6,: 128]! vst1.8 d12,[r5,: 64] vst1.8 d6,[r6,: 64] add r2,r3,#0 add r4,r3,#240 vld1.8 {d0-d1},[r4,: 128]! vld1.8 {d2-d3},[r4,: 128]! vld1.8 {d4},[r4,: 64] add r4,r3,#336 vld1.8 {d6-d7},[r4,: 128]! vtrn.32 q0,q3 vld1.8 {d8-d9},[r4,: 128]! vshl.i32 q5,q0,#4 vtrn.32 q1,q4 vshl.i32 q6,q3,#4 vadd.i32 q5,q5,q0 vadd.i32 q6,q6,q3 vshl.i32 q7,q1,#4 vld1.8 {d5},[r4,: 64] vshl.i32 q8,q4,#4 vtrn.32 d4,d5 vadd.i32 q7,q7,q1 vadd.i32 q8,q8,q4 vld1.8 {d18-d19},[r2,: 128]! vshl.i32 q10,q2,#4 vld1.8 {d22-d23},[r2,: 128]! vadd.i32 q10,q10,q2 vld1.8 {d24},[r2,: 64] vadd.i32 q5,q5,q0 add r2,r3,#288 vld1.8 {d26-d27},[r2,: 128]! vadd.i32 q6,q6,q3 vld1.8 {d28-d29},[r2,: 128]! vadd.i32 q8,q8,q4 vld1.8 {d25},[r2,: 64] vadd.i32 q10,q10,q2 vtrn.32 q9,q13 vadd.i32 q7,q7,q1 vadd.i32 q5,q5,q0 vtrn.32 q11,q14 vadd.i32 q6,q6,q3 add r2,sp,#560 vadd.i32 q10,q10,q2 vtrn.32 d24,d25 vst1.8 {d12-d13},[r2,: 128] vshl.i32 q6,q13,#1 add r2,sp,#576 vst1.8 {d20-d21},[r2,: 128] vshl.i32 q10,q14,#1 add r2,sp,#592 vst1.8 {d12-d13},[r2,: 128] vshl.i32 q15,q12,#1 vadd.i32 q8,q8,q4 vext.32 d10,d31,d30,#0 vadd.i32 q7,q7,q1 add r2,sp,#608 vst1.8 {d16-d17},[r2,: 128] vmull.s32 q8,d18,d5 vmlal.s32 q8,d26,d4 vmlal.s32 q8,d19,d9 vmlal.s32 q8,d27,d3 vmlal.s32 q8,d22,d8 vmlal.s32 q8,d28,d2 vmlal.s32 q8,d23,d7 vmlal.s32 q8,d29,d1 vmlal.s32 q8,d24,d6 vmlal.s32 q8,d25,d0 add r2,sp,#624 vst1.8 {d14-d15},[r2,: 128] vmull.s32 q2,d18,d4 vmlal.s32 q2,d12,d9 vmlal.s32 q2,d13,d8 vmlal.s32 q2,d19,d3 vmlal.s32 q2,d22,d2 vmlal.s32 q2,d23,d1 vmlal.s32 q2,d24,d0 add r2,sp,#640 vst1.8 {d20-d21},[r2,: 128] vmull.s32 q7,d18,d9 vmlal.s32 q7,d26,d3 vmlal.s32 q7,d19,d8 vmlal.s32 q7,d27,d2 vmlal.s32 q7,d22,d7 vmlal.s32 q7,d28,d1 vmlal.s32 q7,d23,d6 vmlal.s32 q7,d29,d0 add r2,sp,#656 vst1.8 {d10-d11},[r2,: 128] vmull.s32 q5,d18,d3 vmlal.s32 q5,d19,d2 vmlal.s32 q5,d22,d1 vmlal.s32 q5,d23,d0 vmlal.s32 q5,d12,d8 add r2,sp,#672 vst1.8 {d16-d17},[r2,: 128] vmull.s32 q4,d18,d8 vmlal.s32 q4,d26,d2 vmlal.s32 q4,d19,d7 vmlal.s32 q4,d27,d1 vmlal.s32 q4,d22,d6 vmlal.s32 q4,d28,d0 vmull.s32 q8,d18,d7 vmlal.s32 q8,d26,d1 vmlal.s32 q8,d19,d6 vmlal.s32 q8,d27,d0 add r2,sp,#576 vld1.8 {d20-d21},[r2,: 128] vmlal.s32 q7,d24,d21 vmlal.s32 q7,d25,d20 vmlal.s32 q4,d23,d21 vmlal.s32 q4,d29,d20 vmlal.s32 q8,d22,d21 vmlal.s32 q8,d28,d20 vmlal.s32 q5,d24,d20 add r2,sp,#576 vst1.8 {d14-d15},[r2,: 128] vmull.s32 q7,d18,d6 vmlal.s32 q7,d26,d0 add r2,sp,#656 vld1.8 {d30-d31},[r2,: 128] vmlal.s32 q2,d30,d21 vmlal.s32 q7,d19,d21 vmlal.s32 q7,d27,d20 add r2,sp,#624 vld1.8 {d26-d27},[r2,: 128] vmlal.s32 q4,d25,d27 vmlal.s32 q8,d29,d27 vmlal.s32 q8,d25,d26 vmlal.s32 q7,d28,d27 vmlal.s32 q7,d29,d26 add r2,sp,#608 vld1.8 {d28-d29},[r2,: 128] vmlal.s32 q4,d24,d29 vmlal.s32 q8,d23,d29 vmlal.s32 q8,d24,d28 vmlal.s32 q7,d22,d29 vmlal.s32 q7,d23,d28 add r2,sp,#608 vst1.8 {d8-d9},[r2,: 128] add r2,sp,#560 vld1.8 {d8-d9},[r2,: 128] vmlal.s32 q7,d24,d9 vmlal.s32 q7,d25,d31 vmull.s32 q1,d18,d2 vmlal.s32 q1,d19,d1 vmlal.s32 q1,d22,d0 vmlal.s32 q1,d24,d27 vmlal.s32 q1,d23,d20 vmlal.s32 q1,d12,d7 vmlal.s32 q1,d13,d6 vmull.s32 q6,d18,d1 vmlal.s32 q6,d19,d0 vmlal.s32 q6,d23,d27 vmlal.s32 q6,d22,d20 vmlal.s32 q6,d24,d26 vmull.s32 q0,d18,d0 vmlal.s32 q0,d22,d27 vmlal.s32 q0,d23,d26 vmlal.s32 q0,d24,d31 vmlal.s32 q0,d19,d20 add r2,sp,#640 vld1.8 {d18-d19},[r2,: 128] vmlal.s32 q2,d18,d7 vmlal.s32 q2,d19,d6 vmlal.s32 q5,d18,d6 vmlal.s32 q5,d19,d21 vmlal.s32 q1,d18,d21 vmlal.s32 q1,d19,d29 vmlal.s32 q0,d18,d28 vmlal.s32 q0,d19,d9 vmlal.s32 q6,d18,d29 vmlal.s32 q6,d19,d28 add r2,sp,#592 vld1.8 {d18-d19},[r2,: 128] add r2,sp,#512 vld1.8 {d22-d23},[r2,: 128] vmlal.s32 q5,d19,d7 vmlal.s32 q0,d18,d21 vmlal.s32 q0,d19,d29 vmlal.s32 q6,d18,d6 add r2,sp,#528 vld1.8 {d6-d7},[r2,: 128] vmlal.s32 q6,d19,d21 add r2,sp,#576 vld1.8 {d18-d19},[r2,: 128] vmlal.s32 q0,d30,d8 add r2,sp,#672 vld1.8 {d20-d21},[r2,: 128] vmlal.s32 q5,d30,d29 add r2,sp,#608 vld1.8 {d24-d25},[r2,: 128] vmlal.s32 q1,d30,d28 vadd.i64 q13,q0,q11 vadd.i64 q14,q5,q11 vmlal.s32 q6,d30,d9 vshr.s64 q4,q13,#26 vshr.s64 q13,q14,#26 vadd.i64 q7,q7,q4 vshl.i64 q4,q4,#26 vadd.i64 q14,q7,q3 vadd.i64 q9,q9,q13 vshl.i64 q13,q13,#26 vadd.i64 q15,q9,q3 vsub.i64 q0,q0,q4 vshr.s64 q4,q14,#25 vsub.i64 q5,q5,q13 vshr.s64 q13,q15,#25 vadd.i64 q6,q6,q4 vshl.i64 q4,q4,#25 vadd.i64 q14,q6,q11 vadd.i64 q2,q2,q13 vsub.i64 q4,q7,q4 vshr.s64 q7,q14,#26 vshl.i64 q13,q13,#25 vadd.i64 q14,q2,q11 vadd.i64 q8,q8,q7 vshl.i64 q7,q7,#26 vadd.i64 q15,q8,q3 vsub.i64 q9,q9,q13 vshr.s64 q13,q14,#26 vsub.i64 q6,q6,q7 vshr.s64 q7,q15,#25 vadd.i64 q10,q10,q13 vshl.i64 q13,q13,#26 vadd.i64 q14,q10,q3 vadd.i64 q1,q1,q7 add r2,r3,#288 vshl.i64 q7,q7,#25 add r4,r3,#96 vadd.i64 q15,q1,q11 add r2,r2,#8 vsub.i64 q2,q2,q13 add r4,r4,#8 vshr.s64 q13,q14,#25 vsub.i64 q7,q8,q7 vshr.s64 q8,q15,#26 vadd.i64 q14,q13,q13 vadd.i64 q12,q12,q8 vtrn.32 d12,d14 vshl.i64 q8,q8,#26 vtrn.32 d13,d15 vadd.i64 q3,q12,q3 vadd.i64 q0,q0,q14 vst1.8 d12,[r2,: 64]! vshl.i64 q7,q13,#4 vst1.8 d13,[r4,: 64]! vsub.i64 q1,q1,q8 vshr.s64 q3,q3,#25 vadd.i64 q0,q0,q7 vadd.i64 q5,q5,q3 vshl.i64 q3,q3,#25 vadd.i64 q6,q5,q11 vadd.i64 q0,q0,q13 vshl.i64 q7,q13,#25 vadd.i64 q8,q0,q11 vsub.i64 q3,q12,q3 vshr.s64 q6,q6,#26 vsub.i64 q7,q10,q7 vtrn.32 d2,d6 vshr.s64 q8,q8,#26 vtrn.32 d3,d7 vadd.i64 q3,q9,q6 vst1.8 d2,[r2,: 64] vshl.i64 q6,q6,#26 vst1.8 d3,[r4,: 64] vadd.i64 q1,q4,q8 vtrn.32 d4,d14 vshl.i64 q4,q8,#26 vtrn.32 d5,d15 vsub.i64 q5,q5,q6 add r2,r2,#16 vsub.i64 q0,q0,q4 vst1.8 d4,[r2,: 64] add r4,r4,#16 vst1.8 d5,[r4,: 64] vtrn.32 d10,d6 vtrn.32 d11,d7 sub r2,r2,#8 sub r4,r4,#8 vtrn.32 d0,d2 vtrn.32 d1,d3 vst1.8 d10,[r2,: 64] vst1.8 d11,[r4,: 64] sub r2,r2,#24 sub r4,r4,#24 vst1.8 d0,[r2,: 64] vst1.8 d1,[r4,: 64] add r2,sp,#544 add r4,r3,#144 add r5,r3,#192 vld1.8 {d0-d1},[r2,: 128] vld1.8 {d2-d3},[r4,: 128]! vld1.8 {d4-d5},[r5,: 128]! vzip.i32 q1,q2 vld1.8 {d6-d7},[r4,: 128]! vld1.8 {d8-d9},[r5,: 128]! vshl.i32 q5,q1,#1 vzip.i32 q3,q4 vshl.i32 q6,q2,#1 vld1.8 {d14},[r4,: 64] vshl.i32 q8,q3,#1 vld1.8 {d15},[r5,: 64] vshl.i32 q9,q4,#1 vmul.i32 d21,d7,d1 vtrn.32 d14,d15 vmul.i32 q11,q4,q0 vmul.i32 q0,q7,q0 vmull.s32 q12,d2,d2 vmlal.s32 q12,d11,d1 vmlal.s32 q12,d12,d0 vmlal.s32 q12,d13,d23 vmlal.s32 q12,d16,d22 vmlal.s32 q12,d7,d21 vmull.s32 q10,d2,d11 vmlal.s32 q10,d4,d1 vmlal.s32 q10,d13,d0 vmlal.s32 q10,d6,d23 vmlal.s32 q10,d17,d22 vmull.s32 q13,d10,d4 vmlal.s32 q13,d11,d3 vmlal.s32 q13,d13,d1 vmlal.s32 q13,d16,d0 vmlal.s32 q13,d17,d23 vmlal.s32 q13,d8,d22 vmull.s32 q1,d10,d5 vmlal.s32 q1,d11,d4 vmlal.s32 q1,d6,d1 vmlal.s32 q1,d17,d0 vmlal.s32 q1,d8,d23 vmull.s32 q14,d10,d6 vmlal.s32 q14,d11,d13 vmlal.s32 q14,d4,d4 vmlal.s32 q14,d17,d1 vmlal.s32 q14,d18,d0 vmlal.s32 q14,d9,d23 vmull.s32 q11,d10,d7 vmlal.s32 q11,d11,d6 vmlal.s32 q11,d12,d5 vmlal.s32 q11,d8,d1 vmlal.s32 q11,d19,d0 vmull.s32 q15,d10,d8 vmlal.s32 q15,d11,d17 vmlal.s32 q15,d12,d6 vmlal.s32 q15,d13,d5 vmlal.s32 q15,d19,d1 vmlal.s32 q15,d14,d0 vmull.s32 q2,d10,d9 vmlal.s32 q2,d11,d8 vmlal.s32 q2,d12,d7 vmlal.s32 q2,d13,d6 vmlal.s32 q2,d14,d1 vmull.s32 q0,d15,d1 vmlal.s32 q0,d10,d14 vmlal.s32 q0,d11,d19 vmlal.s32 q0,d12,d8 vmlal.s32 q0,d13,d17 vmlal.s32 q0,d6,d6 add r2,sp,#512 vld1.8 {d18-d19},[r2,: 128] vmull.s32 q3,d16,d7 vmlal.s32 q3,d10,d15 vmlal.s32 q3,d11,d14 vmlal.s32 q3,d12,d9 vmlal.s32 q3,d13,d8 add r2,sp,#528 vld1.8 {d8-d9},[r2,: 128] vadd.i64 q5,q12,q9 vadd.i64 q6,q15,q9 vshr.s64 q5,q5,#26 vshr.s64 q6,q6,#26 vadd.i64 q7,q10,q5 vshl.i64 q5,q5,#26 vadd.i64 q8,q7,q4 vadd.i64 q2,q2,q6 vshl.i64 q6,q6,#26 vadd.i64 q10,q2,q4 vsub.i64 q5,q12,q5 vshr.s64 q8,q8,#25 vsub.i64 q6,q15,q6 vshr.s64 q10,q10,#25 vadd.i64 q12,q13,q8 vshl.i64 q8,q8,#25 vadd.i64 q13,q12,q9 vadd.i64 q0,q0,q10 vsub.i64 q7,q7,q8 vshr.s64 q8,q13,#26 vshl.i64 q10,q10,#25 vadd.i64 q13,q0,q9 vadd.i64 q1,q1,q8 vshl.i64 q8,q8,#26 vadd.i64 q15,q1,q4 vsub.i64 q2,q2,q10 vshr.s64 q10,q13,#26 vsub.i64 q8,q12,q8 vshr.s64 q12,q15,#25 vadd.i64 q3,q3,q10 vshl.i64 q10,q10,#26 vadd.i64 q13,q3,q4 vadd.i64 q14,q14,q12 add r2,r3,#144 vshl.i64 q12,q12,#25 add r4,r3,#192 vadd.i64 q15,q14,q9 add r2,r2,#8 vsub.i64 q0,q0,q10 add r4,r4,#8 vshr.s64 q10,q13,#25 vsub.i64 q1,q1,q12 vshr.s64 q12,q15,#26 vadd.i64 q13,q10,q10 vadd.i64 q11,q11,q12 vtrn.32 d16,d2 vshl.i64 q12,q12,#26 vtrn.32 d17,d3 vadd.i64 q1,q11,q4 vadd.i64 q4,q5,q13 vst1.8 d16,[r2,: 64]! vshl.i64 q5,q10,#4 vst1.8 d17,[r4,: 64]! vsub.i64 q8,q14,q12 vshr.s64 q1,q1,#25 vadd.i64 q4,q4,q5 vadd.i64 q5,q6,q1 vshl.i64 q1,q1,#25 vadd.i64 q6,q5,q9 vadd.i64 q4,q4,q10 vshl.i64 q10,q10,#25 vadd.i64 q9,q4,q9 vsub.i64 q1,q11,q1 vshr.s64 q6,q6,#26 vsub.i64 q3,q3,q10 vtrn.32 d16,d2 vshr.s64 q9,q9,#26 vtrn.32 d17,d3 vadd.i64 q1,q2,q6 vst1.8 d16,[r2,: 64] vshl.i64 q2,q6,#26 vst1.8 d17,[r4,: 64] vadd.i64 q6,q7,q9 vtrn.32 d0,d6 vshl.i64 q7,q9,#26 vtrn.32 d1,d7 vsub.i64 q2,q5,q2 add r2,r2,#16 vsub.i64 q3,q4,q7 vst1.8 d0,[r2,: 64] add r4,r4,#16 vst1.8 d1,[r4,: 64] vtrn.32 d4,d2 vtrn.32 d5,d3 sub r2,r2,#8 sub r4,r4,#8 vtrn.32 d6,d12 vtrn.32 d7,d13 vst1.8 d4,[r2,: 64] vst1.8 d5,[r4,: 64] sub r2,r2,#24 sub r4,r4,#24 vst1.8 d6,[r2,: 64] vst1.8 d7,[r4,: 64] add r2,r3,#336 add r4,r3,#288 vld1.8 {d0-d1},[r2,: 128]! vld1.8 {d2-d3},[r4,: 128]! vadd.i32 q0,q0,q1 vld1.8 {d2-d3},[r2,: 128]! vld1.8 {d4-d5},[r4,: 128]! vadd.i32 q1,q1,q2 add r5,r3,#288 vld1.8 {d4},[r2,: 64] vld1.8 {d6},[r4,: 64] vadd.i32 q2,q2,q3 vst1.8 {d0-d1},[r5,: 128]! vst1.8 {d2-d3},[r5,: 128]! vst1.8 d4,[r5,: 64] add r2,r3,#48 add r4,r3,#144 vld1.8 {d0-d1},[r4,: 128]! vld1.8 {d2-d3},[r4,: 128]! vld1.8 {d4},[r4,: 64] add r4,r3,#288 vld1.8 {d6-d7},[r4,: 128]! vtrn.32 q0,q3 vld1.8 {d8-d9},[r4,: 128]! vshl.i32 q5,q0,#4 vtrn.32 q1,q4 vshl.i32 q6,q3,#4 vadd.i32 q5,q5,q0 vadd.i32 q6,q6,q3 vshl.i32 q7,q1,#4 vld1.8 {d5},[r4,: 64] vshl.i32 q8,q4,#4 vtrn.32 d4,d5 vadd.i32 q7,q7,q1 vadd.i32 q8,q8,q4 vld1.8 {d18-d19},[r2,: 128]! vshl.i32 q10,q2,#4 vld1.8 {d22-d23},[r2,: 128]! vadd.i32 q10,q10,q2 vld1.8 {d24},[r2,: 64] vadd.i32 q5,q5,q0 add r2,r3,#240 vld1.8 {d26-d27},[r2,: 128]! vadd.i32 q6,q6,q3 vld1.8 {d28-d29},[r2,: 128]! vadd.i32 q8,q8,q4 vld1.8 {d25},[r2,: 64] vadd.i32 q10,q10,q2 vtrn.32 q9,q13 vadd.i32 q7,q7,q1 vadd.i32 q5,q5,q0 vtrn.32 q11,q14 vadd.i32 q6,q6,q3 add r2,sp,#560 vadd.i32 q10,q10,q2 vtrn.32 d24,d25 vst1.8 {d12-d13},[r2,: 128] vshl.i32 q6,q13,#1 add r2,sp,#576 vst1.8 {d20-d21},[r2,: 128] vshl.i32 q10,q14,#1 add r2,sp,#592 vst1.8 {d12-d13},[r2,: 128] vshl.i32 q15,q12,#1 vadd.i32 q8,q8,q4 vext.32 d10,d31,d30,#0 vadd.i32 q7,q7,q1 add r2,sp,#608 vst1.8 {d16-d17},[r2,: 128] vmull.s32 q8,d18,d5 vmlal.s32 q8,d26,d4 vmlal.s32 q8,d19,d9 vmlal.s32 q8,d27,d3 vmlal.s32 q8,d22,d8 vmlal.s32 q8,d28,d2 vmlal.s32 q8,d23,d7 vmlal.s32 q8,d29,d1 vmlal.s32 q8,d24,d6 vmlal.s32 q8,d25,d0 add r2,sp,#624 vst1.8 {d14-d15},[r2,: 128] vmull.s32 q2,d18,d4 vmlal.s32 q2,d12,d9 vmlal.s32 q2,d13,d8 vmlal.s32 q2,d19,d3 vmlal.s32 q2,d22,d2 vmlal.s32 q2,d23,d1 vmlal.s32 q2,d24,d0 add r2,sp,#640 vst1.8 {d20-d21},[r2,: 128] vmull.s32 q7,d18,d9 vmlal.s32 q7,d26,d3 vmlal.s32 q7,d19,d8 vmlal.s32 q7,d27,d2 vmlal.s32 q7,d22,d7 vmlal.s32 q7,d28,d1 vmlal.s32 q7,d23,d6 vmlal.s32 q7,d29,d0 add r2,sp,#656 vst1.8 {d10-d11},[r2,: 128] vmull.s32 q5,d18,d3 vmlal.s32 q5,d19,d2 vmlal.s32 q5,d22,d1 vmlal.s32 q5,d23,d0 vmlal.s32 q5,d12,d8 add r2,sp,#672 vst1.8 {d16-d17},[r2,: 128] vmull.s32 q4,d18,d8 vmlal.s32 q4,d26,d2 vmlal.s32 q4,d19,d7 vmlal.s32 q4,d27,d1 vmlal.s32 q4,d22,d6 vmlal.s32 q4,d28,d0 vmull.s32 q8,d18,d7 vmlal.s32 q8,d26,d1 vmlal.s32 q8,d19,d6 vmlal.s32 q8,d27,d0 add r2,sp,#576 vld1.8 {d20-d21},[r2,: 128] vmlal.s32 q7,d24,d21 vmlal.s32 q7,d25,d20 vmlal.s32 q4,d23,d21 vmlal.s32 q4,d29,d20 vmlal.s32 q8,d22,d21 vmlal.s32 q8,d28,d20 vmlal.s32 q5,d24,d20 add r2,sp,#576 vst1.8 {d14-d15},[r2,: 128] vmull.s32 q7,d18,d6 vmlal.s32 q7,d26,d0 add r2,sp,#656 vld1.8 {d30-d31},[r2,: 128] vmlal.s32 q2,d30,d21 vmlal.s32 q7,d19,d21 vmlal.s32 q7,d27,d20 add r2,sp,#624 vld1.8 {d26-d27},[r2,: 128] vmlal.s32 q4,d25,d27 vmlal.s32 q8,d29,d27 vmlal.s32 q8,d25,d26 vmlal.s32 q7,d28,d27 vmlal.s32 q7,d29,d26 add r2,sp,#608 vld1.8 {d28-d29},[r2,: 128] vmlal.s32 q4,d24,d29 vmlal.s32 q8,d23,d29 vmlal.s32 q8,d24,d28 vmlal.s32 q7,d22,d29 vmlal.s32 q7,d23,d28 add r2,sp,#608 vst1.8 {d8-d9},[r2,: 128] add r2,sp,#560 vld1.8 {d8-d9},[r2,: 128] vmlal.s32 q7,d24,d9 vmlal.s32 q7,d25,d31 vmull.s32 q1,d18,d2 vmlal.s32 q1,d19,d1 vmlal.s32 q1,d22,d0 vmlal.s32 q1,d24,d27 vmlal.s32 q1,d23,d20 vmlal.s32 q1,d12,d7 vmlal.s32 q1,d13,d6 vmull.s32 q6,d18,d1 vmlal.s32 q6,d19,d0 vmlal.s32 q6,d23,d27 vmlal.s32 q6,d22,d20 vmlal.s32 q6,d24,d26 vmull.s32 q0,d18,d0 vmlal.s32 q0,d22,d27 vmlal.s32 q0,d23,d26 vmlal.s32 q0,d24,d31 vmlal.s32 q0,d19,d20 add r2,sp,#640 vld1.8 {d18-d19},[r2,: 128] vmlal.s32 q2,d18,d7 vmlal.s32 q2,d19,d6 vmlal.s32 q5,d18,d6 vmlal.s32 q5,d19,d21 vmlal.s32 q1,d18,d21 vmlal.s32 q1,d19,d29 vmlal.s32 q0,d18,d28 vmlal.s32 q0,d19,d9 vmlal.s32 q6,d18,d29 vmlal.s32 q6,d19,d28 add r2,sp,#592 vld1.8 {d18-d19},[r2,: 128] add r2,sp,#512 vld1.8 {d22-d23},[r2,: 128] vmlal.s32 q5,d19,d7 vmlal.s32 q0,d18,d21 vmlal.s32 q0,d19,d29 vmlal.s32 q6,d18,d6 add r2,sp,#528 vld1.8 {d6-d7},[r2,: 128] vmlal.s32 q6,d19,d21 add r2,sp,#576 vld1.8 {d18-d19},[r2,: 128] vmlal.s32 q0,d30,d8 add r2,sp,#672 vld1.8 {d20-d21},[r2,: 128] vmlal.s32 q5,d30,d29 add r2,sp,#608 vld1.8 {d24-d25},[r2,: 128] vmlal.s32 q1,d30,d28 vadd.i64 q13,q0,q11 vadd.i64 q14,q5,q11 vmlal.s32 q6,d30,d9 vshr.s64 q4,q13,#26 vshr.s64 q13,q14,#26 vadd.i64 q7,q7,q4 vshl.i64 q4,q4,#26 vadd.i64 q14,q7,q3 vadd.i64 q9,q9,q13 vshl.i64 q13,q13,#26 vadd.i64 q15,q9,q3 vsub.i64 q0,q0,q4 vshr.s64 q4,q14,#25 vsub.i64 q5,q5,q13 vshr.s64 q13,q15,#25 vadd.i64 q6,q6,q4 vshl.i64 q4,q4,#25 vadd.i64 q14,q6,q11 vadd.i64 q2,q2,q13 vsub.i64 q4,q7,q4 vshr.s64 q7,q14,#26 vshl.i64 q13,q13,#25 vadd.i64 q14,q2,q11 vadd.i64 q8,q8,q7 vshl.i64 q7,q7,#26 vadd.i64 q15,q8,q3 vsub.i64 q9,q9,q13 vshr.s64 q13,q14,#26 vsub.i64 q6,q6,q7 vshr.s64 q7,q15,#25 vadd.i64 q10,q10,q13 vshl.i64 q13,q13,#26 vadd.i64 q14,q10,q3 vadd.i64 q1,q1,q7 add r2,r3,#240 vshl.i64 q7,q7,#25 add r4,r3,#144 vadd.i64 q15,q1,q11 add r2,r2,#8 vsub.i64 q2,q2,q13 add r4,r4,#8 vshr.s64 q13,q14,#25 vsub.i64 q7,q8,q7 vshr.s64 q8,q15,#26 vadd.i64 q14,q13,q13 vadd.i64 q12,q12,q8 vtrn.32 d12,d14 vshl.i64 q8,q8,#26 vtrn.32 d13,d15 vadd.i64 q3,q12,q3 vadd.i64 q0,q0,q14 vst1.8 d12,[r2,: 64]! vshl.i64 q7,q13,#4 vst1.8 d13,[r4,: 64]! vsub.i64 q1,q1,q8 vshr.s64 q3,q3,#25 vadd.i64 q0,q0,q7 vadd.i64 q5,q5,q3 vshl.i64 q3,q3,#25 vadd.i64 q6,q5,q11 vadd.i64 q0,q0,q13 vshl.i64 q7,q13,#25 vadd.i64 q8,q0,q11 vsub.i64 q3,q12,q3 vshr.s64 q6,q6,#26 vsub.i64 q7,q10,q7 vtrn.32 d2,d6 vshr.s64 q8,q8,#26 vtrn.32 d3,d7 vadd.i64 q3,q9,q6 vst1.8 d2,[r2,: 64] vshl.i64 q6,q6,#26 vst1.8 d3,[r4,: 64] vadd.i64 q1,q4,q8 vtrn.32 d4,d14 vshl.i64 q4,q8,#26 vtrn.32 d5,d15 vsub.i64 q5,q5,q6 add r2,r2,#16 vsub.i64 q0,q0,q4 vst1.8 d4,[r2,: 64] add r4,r4,#16 vst1.8 d5,[r4,: 64] vtrn.32 d10,d6 vtrn.32 d11,d7 sub r2,r2,#8 sub r4,r4,#8 vtrn.32 d0,d2 vtrn.32 d1,d3 vst1.8 d10,[r2,: 64] vst1.8 d11,[r4,: 64] sub r2,r2,#24 sub r4,r4,#24 vst1.8 d0,[r2,: 64] vst1.8 d1,[r4,: 64] ldr r2,[sp,#488] ldr r4,[sp,#492] subs r5,r2,#1 bge ._mainloop add r1,r3,#144 add r2,r3,#336 vld1.8 {d0-d1},[r1,: 128]! vld1.8 {d2-d3},[r1,: 128]! vld1.8 {d4},[r1,: 64] vst1.8 {d0-d1},[r2,: 128]! vst1.8 {d2-d3},[r2,: 128]! vst1.8 d4,[r2,: 64] ldr r1,=0 ._invertloop: add r2,r3,#144 ldr r4,=0 ldr r5,=2 cmp r1,#1 ldreq r5,=1 addeq r2,r3,#336 addeq r4,r3,#48 cmp r1,#2 ldreq r5,=1 addeq r2,r3,#48 cmp r1,#3 ldreq r5,=5 addeq r4,r3,#336 cmp r1,#4 ldreq r5,=10 cmp r1,#5 ldreq r5,=20 cmp r1,#6 ldreq r5,=10 addeq r2,r3,#336 addeq r4,r3,#336 cmp r1,#7 ldreq r5,=50 cmp r1,#8 ldreq r5,=100 cmp r1,#9 ldreq r5,=50 addeq r2,r3,#336 cmp r1,#10 ldreq r5,=5 addeq r2,r3,#48 cmp r1,#11 ldreq r5,=0 addeq r2,r3,#96 add r6,r3,#144 add r7,r3,#288 vld1.8 {d0-d1},[r6,: 128]! vld1.8 {d2-d3},[r6,: 128]! vld1.8 {d4},[r6,: 64] vst1.8 {d0-d1},[r7,: 128]! vst1.8 {d2-d3},[r7,: 128]! vst1.8 d4,[r7,: 64] cmp r5,#0 beq ._skipsquaringloop ._squaringloop: add r6,r3,#288 add r7,r3,#288 add r8,r3,#288 vmov.i32 q0,#19 vmov.i32 q1,#0 vmov.i32 q2,#1 vzip.i32 q1,q2 vld1.8 {d4-d5},[r7,: 128]! vld1.8 {d6-d7},[r7,: 128]! vld1.8 {d9},[r7,: 64] vld1.8 {d10-d11},[r6,: 128]! add r7,sp,#416 vld1.8 {d12-d13},[r6,: 128]! vmul.i32 q7,q2,q0 vld1.8 {d8},[r6,: 64] vext.32 d17,d11,d10,#1 vmul.i32 q9,q3,q0 vext.32 d16,d10,d8,#1 vshl.u32 q10,q5,q1 vext.32 d22,d14,d4,#1 vext.32 d24,d18,d6,#1 vshl.u32 q13,q6,q1 vshl.u32 d28,d8,d2 vrev64.i32 d22,d22 vmul.i32 d1,d9,d1 vrev64.i32 d24,d24 vext.32 d29,d8,d13,#1 vext.32 d0,d1,d9,#1 vrev64.i32 d0,d0 vext.32 d2,d9,d1,#1 vext.32 d23,d15,d5,#1 vmull.s32 q4,d20,d4 vrev64.i32 d23,d23 vmlal.s32 q4,d21,d1 vrev64.i32 d2,d2 vmlal.s32 q4,d26,d19 vext.32 d3,d5,d15,#1 vmlal.s32 q4,d27,d18 vrev64.i32 d3,d3 vmlal.s32 q4,d28,d15 vext.32 d14,d12,d11,#1 vmull.s32 q5,d16,d23 vext.32 d15,d13,d12,#1 vmlal.s32 q5,d17,d4 vst1.8 d8,[r7,: 64]! vmlal.s32 q5,d14,d1 vext.32 d12,d9,d8,#0 vmlal.s32 q5,d15,d19 vmov.i64 d13,#0 vmlal.s32 q5,d29,d18 vext.32 d25,d19,d7,#1 vmlal.s32 q6,d20,d5 vrev64.i32 d25,d25 vmlal.s32 q6,d21,d4 vst1.8 d11,[r7,: 64]! vmlal.s32 q6,d26,d1 vext.32 d9,d10,d10,#0 vmlal.s32 q6,d27,d19 vmov.i64 d8,#0 vmlal.s32 q6,d28,d18 vmlal.s32 q4,d16,d24 vmlal.s32 q4,d17,d5 vmlal.s32 q4,d14,d4 vst1.8 d12,[r7,: 64]! vmlal.s32 q4,d15,d1 vext.32 d10,d13,d12,#0 vmlal.s32 q4,d29,d19 vmov.i64 d11,#0 vmlal.s32 q5,d20,d6 vmlal.s32 q5,d21,d5 vmlal.s32 q5,d26,d4 vext.32 d13,d8,d8,#0 vmlal.s32 q5,d27,d1 vmov.i64 d12,#0 vmlal.s32 q5,d28,d19 vst1.8 d9,[r7,: 64]! vmlal.s32 q6,d16,d25 vmlal.s32 q6,d17,d6 vst1.8 d10,[r7,: 64] vmlal.s32 q6,d14,d5 vext.32 d8,d11,d10,#0 vmlal.s32 q6,d15,d4 vmov.i64 d9,#0 vmlal.s32 q6,d29,d1 vmlal.s32 q4,d20,d7 vmlal.s32 q4,d21,d6 vmlal.s32 q4,d26,d5 vext.32 d11,d12,d12,#0 vmlal.s32 q4,d27,d4 vmov.i64 d10,#0 vmlal.s32 q4,d28,d1 vmlal.s32 q5,d16,d0 sub r6,r7,#32 vmlal.s32 q5,d17,d7 vmlal.s32 q5,d14,d6 vext.32 d30,d9,d8,#0 vmlal.s32 q5,d15,d5 vld1.8 {d31},[r6,: 64]! vmlal.s32 q5,d29,d4 vmlal.s32 q15,d20,d0 vext.32 d0,d6,d18,#1 vmlal.s32 q15,d21,d25 vrev64.i32 d0,d0 vmlal.s32 q15,d26,d24 vext.32 d1,d7,d19,#1 vext.32 d7,d10,d10,#0 vmlal.s32 q15,d27,d23 vrev64.i32 d1,d1 vld1.8 {d6},[r6,: 64] vmlal.s32 q15,d28,d22 vmlal.s32 q3,d16,d4 add r6,r6,#24 vmlal.s32 q3,d17,d2 vext.32 d4,d31,d30,#0 vmov d17,d11 vmlal.s32 q3,d14,d1 vext.32 d11,d13,d13,#0 vext.32 d13,d30,d30,#0 vmlal.s32 q3,d15,d0 vext.32 d1,d8,d8,#0 vmlal.s32 q3,d29,d3 vld1.8 {d5},[r6,: 64] sub r6,r6,#16 vext.32 d10,d6,d6,#0 vmov.i32 q1,#0xffffffff vshl.i64 q4,q1,#25 add r7,sp,#512 vld1.8 {d14-d15},[r7,: 128] vadd.i64 q9,q2,q7 vshl.i64 q1,q1,#26 vshr.s64 q10,q9,#26 vld1.8 {d0},[r6,: 64]! vadd.i64 q5,q5,q10 vand q9,q9,q1 vld1.8 {d16},[r6,: 64]! add r6,sp,#528 vld1.8 {d20-d21},[r6,: 128] vadd.i64 q11,q5,q10 vsub.i64 q2,q2,q9 vshr.s64 q9,q11,#25 vext.32 d12,d5,d4,#0 vand q11,q11,q4 vadd.i64 q0,q0,q9 vmov d19,d7 vadd.i64 q3,q0,q7 vsub.i64 q5,q5,q11 vshr.s64 q11,q3,#26 vext.32 d18,d11,d10,#0 vand q3,q3,q1 vadd.i64 q8,q8,q11 vadd.i64 q11,q8,q10 vsub.i64 q0,q0,q3 vshr.s64 q3,q11,#25 vand q11,q11,q4 vadd.i64 q3,q6,q3 vadd.i64 q6,q3,q7 vsub.i64 q8,q8,q11 vshr.s64 q11,q6,#26 vand q6,q6,q1 vadd.i64 q9,q9,q11 vadd.i64 d25,d19,d21 vsub.i64 q3,q3,q6 vshr.s64 d23,d25,#25 vand q4,q12,q4 vadd.i64 d21,d23,d23 vshl.i64 d25,d23,#4 vadd.i64 d21,d21,d23 vadd.i64 d25,d25,d21 vadd.i64 d4,d4,d25 vzip.i32 q0,q8 vadd.i64 d12,d4,d14 add r6,r8,#8 vst1.8 d0,[r6,: 64] vsub.i64 d19,d19,d9 add r6,r6,#16 vst1.8 d16,[r6,: 64] vshr.s64 d22,d12,#26 vand q0,q6,q1 vadd.i64 d10,d10,d22 vzip.i32 q3,q9 vsub.i64 d4,d4,d0 sub r6,r6,#8 vst1.8 d6,[r6,: 64] add r6,r6,#16 vst1.8 d18,[r6,: 64] vzip.i32 q2,q5 sub r6,r6,#32 vst1.8 d4,[r6,: 64] subs r5,r5,#1 bhi ._squaringloop ._skipsquaringloop: mov r2,r2 add r5,r3,#288 add r6,r3,#144 vmov.i32 q0,#19 vmov.i32 q1,#0 vmov.i32 q2,#1 vzip.i32 q1,q2 vld1.8 {d4-d5},[r5,: 128]! vld1.8 {d6-d7},[r5,: 128]! vld1.8 {d9},[r5,: 64] vld1.8 {d10-d11},[r2,: 128]! add r5,sp,#416 vld1.8 {d12-d13},[r2,: 128]! vmul.i32 q7,q2,q0 vld1.8 {d8},[r2,: 64] vext.32 d17,d11,d10,#1 vmul.i32 q9,q3,q0 vext.32 d16,d10,d8,#1 vshl.u32 q10,q5,q1 vext.32 d22,d14,d4,#1 vext.32 d24,d18,d6,#1 vshl.u32 q13,q6,q1 vshl.u32 d28,d8,d2 vrev64.i32 d22,d22 vmul.i32 d1,d9,d1 vrev64.i32 d24,d24 vext.32 d29,d8,d13,#1 vext.32 d0,d1,d9,#1 vrev64.i32 d0,d0 vext.32 d2,d9,d1,#1 vext.32 d23,d15,d5,#1 vmull.s32 q4,d20,d4 vrev64.i32 d23,d23 vmlal.s32 q4,d21,d1 vrev64.i32 d2,d2 vmlal.s32 q4,d26,d19 vext.32 d3,d5,d15,#1 vmlal.s32 q4,d27,d18 vrev64.i32 d3,d3 vmlal.s32 q4,d28,d15 vext.32 d14,d12,d11,#1 vmull.s32 q5,d16,d23 vext.32 d15,d13,d12,#1 vmlal.s32 q5,d17,d4 vst1.8 d8,[r5,: 64]! vmlal.s32 q5,d14,d1 vext.32 d12,d9,d8,#0 vmlal.s32 q5,d15,d19 vmov.i64 d13,#0 vmlal.s32 q5,d29,d18 vext.32 d25,d19,d7,#1 vmlal.s32 q6,d20,d5 vrev64.i32 d25,d25 vmlal.s32 q6,d21,d4 vst1.8 d11,[r5,: 64]! vmlal.s32 q6,d26,d1 vext.32 d9,d10,d10,#0 vmlal.s32 q6,d27,d19 vmov.i64 d8,#0 vmlal.s32 q6,d28,d18 vmlal.s32 q4,d16,d24 vmlal.s32 q4,d17,d5 vmlal.s32 q4,d14,d4 vst1.8 d12,[r5,: 64]! vmlal.s32 q4,d15,d1 vext.32 d10,d13,d12,#0 vmlal.s32 q4,d29,d19 vmov.i64 d11,#0 vmlal.s32 q5,d20,d6 vmlal.s32 q5,d21,d5 vmlal.s32 q5,d26,d4 vext.32 d13,d8,d8,#0 vmlal.s32 q5,d27,d1 vmov.i64 d12,#0 vmlal.s32 q5,d28,d19 vst1.8 d9,[r5,: 64]! vmlal.s32 q6,d16,d25 vmlal.s32 q6,d17,d6 vst1.8 d10,[r5,: 64] vmlal.s32 q6,d14,d5 vext.32 d8,d11,d10,#0 vmlal.s32 q6,d15,d4 vmov.i64 d9,#0 vmlal.s32 q6,d29,d1 vmlal.s32 q4,d20,d7 vmlal.s32 q4,d21,d6 vmlal.s32 q4,d26,d5 vext.32 d11,d12,d12,#0 vmlal.s32 q4,d27,d4 vmov.i64 d10,#0 vmlal.s32 q4,d28,d1 vmlal.s32 q5,d16,d0 sub r2,r5,#32 vmlal.s32 q5,d17,d7 vmlal.s32 q5,d14,d6 vext.32 d30,d9,d8,#0 vmlal.s32 q5,d15,d5 vld1.8 {d31},[r2,: 64]! vmlal.s32 q5,d29,d4 vmlal.s32 q15,d20,d0 vext.32 d0,d6,d18,#1 vmlal.s32 q15,d21,d25 vrev64.i32 d0,d0 vmlal.s32 q15,d26,d24 vext.32 d1,d7,d19,#1 vext.32 d7,d10,d10,#0 vmlal.s32 q15,d27,d23 vrev64.i32 d1,d1 vld1.8 {d6},[r2,: 64] vmlal.s32 q15,d28,d22 vmlal.s32 q3,d16,d4 add r2,r2,#24 vmlal.s32 q3,d17,d2 vext.32 d4,d31,d30,#0 vmov d17,d11 vmlal.s32 q3,d14,d1 vext.32 d11,d13,d13,#0 vext.32 d13,d30,d30,#0 vmlal.s32 q3,d15,d0 vext.32 d1,d8,d8,#0 vmlal.s32 q3,d29,d3 vld1.8 {d5},[r2,: 64] sub r2,r2,#16 vext.32 d10,d6,d6,#0 vmov.i32 q1,#0xffffffff vshl.i64 q4,q1,#25 add r5,sp,#512 vld1.8 {d14-d15},[r5,: 128] vadd.i64 q9,q2,q7 vshl.i64 q1,q1,#26 vshr.s64 q10,q9,#26 vld1.8 {d0},[r2,: 64]! vadd.i64 q5,q5,q10 vand q9,q9,q1 vld1.8 {d16},[r2,: 64]! add r2,sp,#528 vld1.8 {d20-d21},[r2,: 128] vadd.i64 q11,q5,q10 vsub.i64 q2,q2,q9 vshr.s64 q9,q11,#25 vext.32 d12,d5,d4,#0 vand q11,q11,q4 vadd.i64 q0,q0,q9 vmov d19,d7 vadd.i64 q3,q0,q7 vsub.i64 q5,q5,q11 vshr.s64 q11,q3,#26 vext.32 d18,d11,d10,#0 vand q3,q3,q1 vadd.i64 q8,q8,q11 vadd.i64 q11,q8,q10 vsub.i64 q0,q0,q3 vshr.s64 q3,q11,#25 vand q11,q11,q4 vadd.i64 q3,q6,q3 vadd.i64 q6,q3,q7 vsub.i64 q8,q8,q11 vshr.s64 q11,q6,#26 vand q6,q6,q1 vadd.i64 q9,q9,q11 vadd.i64 d25,d19,d21 vsub.i64 q3,q3,q6 vshr.s64 d23,d25,#25 vand q4,q12,q4 vadd.i64 d21,d23,d23 vshl.i64 d25,d23,#4 vadd.i64 d21,d21,d23 vadd.i64 d25,d25,d21 vadd.i64 d4,d4,d25 vzip.i32 q0,q8 vadd.i64 d12,d4,d14 add r2,r6,#8 vst1.8 d0,[r2,: 64] vsub.i64 d19,d19,d9 add r2,r2,#16 vst1.8 d16,[r2,: 64] vshr.s64 d22,d12,#26 vand q0,q6,q1 vadd.i64 d10,d10,d22 vzip.i32 q3,q9 vsub.i64 d4,d4,d0 sub r2,r2,#8 vst1.8 d6,[r2,: 64] add r2,r2,#16 vst1.8 d18,[r2,: 64] vzip.i32 q2,q5 sub r2,r2,#32 vst1.8 d4,[r2,: 64] cmp r4,#0 beq ._skippostcopy add r2,r3,#144 mov r4,r4 vld1.8 {d0-d1},[r2,: 128]! vld1.8 {d2-d3},[r2,: 128]! vld1.8 {d4},[r2,: 64] vst1.8 {d0-d1},[r4,: 128]! vst1.8 {d2-d3},[r4,: 128]! vst1.8 d4,[r4,: 64] ._skippostcopy: cmp r1,#1 bne ._skipfinalcopy add r2,r3,#288 add r4,r3,#144 vld1.8 {d0-d1},[r2,: 128]! vld1.8 {d2-d3},[r2,: 128]! vld1.8 {d4},[r2,: 64] vst1.8 {d0-d1},[r4,: 128]! vst1.8 {d2-d3},[r4,: 128]! vst1.8 d4,[r4,: 64] ._skipfinalcopy: add r1,r1,#1 cmp r1,#12 blo ._invertloop add r1,r3,#144 ldr r2,[r1],#4 ldr r3,[r1],#4 ldr r4,[r1],#4 ldr r5,[r1],#4 ldr r6,[r1],#4 ldr r7,[r1],#4 ldr r8,[r1],#4 ldr r9,[r1],#4 ldr r10,[r1],#4 ldr r1,[r1] add r11,r1,r1,LSL #4 add r11,r11,r1,LSL #1 add r11,r11,#16777216 mov r11,r11,ASR #25 add r11,r11,r2 mov r11,r11,ASR #26 add r11,r11,r3 mov r11,r11,ASR #25 add r11,r11,r4 mov r11,r11,ASR #26 add r11,r11,r5 mov r11,r11,ASR #25 add r11,r11,r6 mov r11,r11,ASR #26 add r11,r11,r7 mov r11,r11,ASR #25 add r11,r11,r8 mov r11,r11,ASR #26 add r11,r11,r9 mov r11,r11,ASR #25 add r11,r11,r10 mov r11,r11,ASR #26 add r11,r11,r1 mov r11,r11,ASR #25 add r2,r2,r11 add r2,r2,r11,LSL #1 add r2,r2,r11,LSL #4 mov r11,r2,ASR #26 add r3,r3,r11 sub r2,r2,r11,LSL #26 mov r11,r3,ASR #25 add r4,r4,r11 sub r3,r3,r11,LSL #25 mov r11,r4,ASR #26 add r5,r5,r11 sub r4,r4,r11,LSL #26 mov r11,r5,ASR #25 add r6,r6,r11 sub r5,r5,r11,LSL #25 mov r11,r6,ASR #26 add r7,r7,r11 sub r6,r6,r11,LSL #26 mov r11,r7,ASR #25 add r8,r8,r11 sub r7,r7,r11,LSL #25 mov r11,r8,ASR #26 add r9,r9,r11 sub r8,r8,r11,LSL #26 mov r11,r9,ASR #25 add r10,r10,r11 sub r9,r9,r11,LSL #25 mov r11,r10,ASR #26 add r1,r1,r11 sub r10,r10,r11,LSL #26 mov r11,r1,ASR #25 sub r1,r1,r11,LSL #25 add r2,r2,r3,LSL #26 mov r3,r3,LSR #6 add r3,r3,r4,LSL #19 mov r4,r4,LSR #13 add r4,r4,r5,LSL #13 mov r5,r5,LSR #19 add r5,r5,r6,LSL #6 add r6,r7,r8,LSL #25 mov r7,r8,LSR #7 add r7,r7,r9,LSL #19 mov r8,r9,LSR #13 add r8,r8,r10,LSL #12 mov r9,r10,LSR #20 add r1,r9,r1,LSL #6 str r2,[r0],#4 str r3,[r0],#4 str r4,[r0],#4 str r5,[r0],#4 str r6,[r0],#4 str r7,[r0],#4 str r8,[r0],#4 str r1,[r0] ldrd r4,[sp,#0] ldrd r6,[sp,#8] ldrd r8,[sp,#16] ldrd r10,[sp,#24] ldr r12,[sp,#480] ldr r14,[sp,#484] ldr r0,=0 mov sp,r12 vpop {q4,q5,q6,q7} bx lr #endif /* !OPENSSL_NO_ASM && OPENSSL_ARM && __ELF__ */
marvin-hansen/iggy-streaming-system
104,832
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p256/p256_montjscalarmul.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery-Jacobian form scalar multiplication for P-256 // Input scalar[4], point[12]; output res[12] // // extern void p256_montjscalarmul // (uint64_t res[static 12], // uint64_t scalar[static 4], // uint64_t point[static 12]); // // This function is a variant of its affine point version p256_scalarmul. // Here, input and output points are assumed to be in Jacobian form with // their coordinates in the Montgomery domain. Thus, if priming indicates // Montgomery form, x' = (2^256 * x) mod p_256 etc., each point argument // is a triple (x',y',z') representing the affine point (x/z^2,y/z^3) when // z' is nonzero or the point at infinity (group identity) if z' = 0. // // Given scalar = n and point = P, assumed to be on the NIST elliptic // curve P-256, returns a representation of n * P. If the result is the // point at infinity (either because the input point was or because the // scalar was a multiple of p_256) then the output is guaranteed to // represent the point at infinity, i.e. to have its z coordinate zero. // // Standard x86-64 ABI: RDI = res, RSI = scalar, RDX = point // Microsoft x64 ABI: RCX = res, RDX = scalar, R8 = point // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p256_montjscalarmul) S2N_BN_SYM_PRIVACY_DIRECTIVE(p256_montjscalarmul) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Intermediate variables on the stack. Uppercase syntactic variants // make x86_att version simpler to generate. #define SCALARB (0*NUMSIZE) #define scalarb (0*NUMSIZE)(%rsp) #define ACC (1*NUMSIZE) #define acc (1*NUMSIZE)(%rsp) #define TABENT (4*NUMSIZE) #define tabent (4*NUMSIZE)(%rsp) #define TAB (7*NUMSIZE) #define tab (7*NUMSIZE)(%rsp) #define res (31*NUMSIZE)(%rsp) #define NSPACE (32*NUMSIZE) // Avoid using .rep for the sake of the BoringSSL/AWS-LC delocator, // which doesn't accept repetitions, assembler macros etc. #define selectblock(I) \ cmpq $I, %rdi ; \ cmovzq TAB+96*(I-1)(%rsp), %rax ; \ cmovzq TAB+96*(I-1)+8(%rsp), %rbx ; \ cmovzq TAB+96*(I-1)+16(%rsp), %rcx ; \ cmovzq TAB+96*(I-1)+24(%rsp), %rdx ; \ cmovzq TAB+96*(I-1)+32(%rsp), %r8 ; \ cmovzq TAB+96*(I-1)+40(%rsp), %r9 ; \ cmovzq TAB+96*(I-1)+48(%rsp), %r10 ; \ cmovzq TAB+96*(I-1)+56(%rsp), %r11 ; \ cmovzq TAB+96*(I-1)+64(%rsp), %r12 ; \ cmovzq TAB+96*(I-1)+72(%rsp), %r13 ; \ cmovzq TAB+96*(I-1)+80(%rsp), %r14 ; \ cmovzq TAB+96*(I-1)+88(%rsp), %r15 S2N_BN_SYMBOL(p256_montjscalarmul): // The Windows version literally calls the standard ABI version. // This simplifies the proofs since subroutine offsets are fixed. #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx callq p256_montjscalarmul_standard popq %rsi popq %rdi ret p256_montjscalarmul_standard: #endif // Real start of the standard ABI code. pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbp pushq %rbx subq $NSPACE, %rsp // Preserve the "res" and "point" input arguments. We load and process the // scalar immediately so we don't bother preserving that input argument. // Also, "point" is only needed early on and so its register gets re-used. movq %rdx, %rbx movq %rdi, res // Load the digits of group order n_256 = [%r15;%r14;%r13;%r12] movq $0xf3b9cac2fc632551, %r12 movq $0xbce6faada7179e84, %r13 movq $0xffffffffffffffff, %r14 movq $0xffffffff00000000, %r15 // First, reduce the input scalar mod n_256, i.e. conditionally subtract n_256 movq (%rsi), %r8 subq %r12, %r8 movq 8(%rsi), %r9 sbbq %r13, %r9 movq 16(%rsi), %r10 sbbq %r14, %r10 movq 24(%rsi), %r11 sbbq %r15, %r11 cmovcq (%rsi), %r8 cmovcq 8(%rsi), %r9 cmovcq 16(%rsi), %r10 cmovcq 24(%rsi), %r11 // Now if the top bit of the reduced scalar is set, negate it mod n_256, // i.e. do n |-> n_256 - n. Remember the sign in %rbp so we can // correspondingly negate the point below. subq %r8, %r12 sbbq %r9, %r13 sbbq %r10, %r14 sbbq %r11, %r15 movq %r11, %rbp shrq $63, %rbp cmovnzq %r12, %r8 cmovnzq %r13, %r9 cmovnzq %r14, %r10 cmovnzq %r15, %r11 // In either case then add the recoding constant 0x08888...888 to allow // signed digits. movq $0x8888888888888888, %rax addq %rax, %r8 adcq %rax, %r9 adcq %rax, %r10 adcq %rax, %r11 btc $63, %r11 movq %r8, SCALARB(%rsp) movq %r9, SCALARB+8(%rsp) movq %r10, SCALARB+16(%rsp) movq %r11, SCALARB+24(%rsp) // Set the tab[0] table entry to the input point = 1 * P, except // that we negate it if the top bit of the scalar was set. This // negation takes care over the y = 0 case to maintain all the // coordinates < p_256 throughout, even though triples (x,y,z) // with y = 0 can only represent a point on the curve when z = 0 // and it represents the point at infinity regardless of x and y. movq (%rbx), %rax movq %rax, TAB(%rsp) movq 8(%rbx), %rax movq %rax, TAB+8(%rsp) movq 16(%rbx), %rax movq %rax, TAB+16(%rsp) movq 24(%rbx), %rax movq %rax, TAB+24(%rsp) movq 32(%rbx), %r12 movq %r12, %rax movq 40(%rbx), %r13 orq %r13, %rax movq 48(%rbx), %r14 movq %r14, %rcx movq 56(%rbx), %r15 orq %r15, %rcx orq %rcx, %rax cmovzq %rax, %rbp xorl %r10d, %r10d leaq -1(%r10), %r8 movq $0x00000000ffffffff, %r11 movq %r11, %r9 negq %r11 subq %r12, %r8 sbbq %r13, %r9 sbbq %r14, %r10 sbbq %r15, %r11 testq %rbp, %rbp cmovzq %r12, %r8 cmovzq %r13, %r9 cmovzq %r14, %r10 cmovzq %r15, %r11 movq %r8, TAB+32(%rsp) movq %r9, TAB+40(%rsp) movq %r10, TAB+48(%rsp) movq %r11, TAB+56(%rsp) movq 64(%rbx), %rax movq %rax, TAB+64(%rsp) movq 72(%rbx), %rax movq %rax, TAB+72(%rsp) movq 80(%rbx), %rax movq %rax, TAB+80(%rsp) movq 88(%rbx), %rax movq %rax, TAB+88(%rsp) // Compute and record tab[1] = 2 * p, ..., tab[7] = 8 * P leaq TAB+96*1(%rsp), %rdi leaq TAB(%rsp), %rsi callq p256_montjscalarmul_p256_montjdouble leaq TAB+96*2(%rsp), %rdi leaq TAB+96*1(%rsp), %rsi leaq TAB(%rsp), %rdx callq p256_montjscalarmul_p256_montjadd leaq TAB+96*3(%rsp), %rdi leaq TAB+96*1(%rsp), %rsi callq p256_montjscalarmul_p256_montjdouble leaq TAB+96*4(%rsp), %rdi leaq TAB+96*3(%rsp), %rsi leaq TAB(%rsp), %rdx callq p256_montjscalarmul_p256_montjadd leaq TAB+96*5(%rsp), %rdi leaq TAB+96*2(%rsp), %rsi callq p256_montjscalarmul_p256_montjdouble leaq TAB+96*6(%rsp), %rdi leaq TAB+96*5(%rsp), %rsi leaq TAB(%rsp), %rdx callq p256_montjscalarmul_p256_montjadd leaq TAB+96*7(%rsp), %rdi leaq TAB+96*3(%rsp), %rsi callq p256_montjscalarmul_p256_montjdouble // Set up accumulator as table entry for top 4 bits (constant-time indexing) movq SCALARB+24(%rsp), %rdi shrq $60, %rdi xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) movq %rax, ACC(%rsp) movq %rbx, ACC+8(%rsp) movq %rcx, ACC+16(%rsp) movq %rdx, ACC+24(%rsp) movq %r8, ACC+32(%rsp) movq %r9, ACC+40(%rsp) movq %r10, ACC+48(%rsp) movq %r11, ACC+56(%rsp) movq %r12, ACC+64(%rsp) movq %r13, ACC+72(%rsp) movq %r14, ACC+80(%rsp) movq %r15, ACC+88(%rsp) // Main loop over size-4 bitfield movl $252, %ebp p256_montjscalarmul_mainloop: subq $4, %rbp leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p256_montjscalarmul_p256_montjdouble leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p256_montjscalarmul_p256_montjdouble leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p256_montjscalarmul_p256_montjdouble leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p256_montjscalarmul_p256_montjdouble movq %rbp, %rax shrq $6, %rax movq (%rsp,%rax,8), %rdi movq %rbp, %rcx shrq %cl, %rdi andq $15, %rdi subq $8, %rdi sbbq %rsi, %rsi // %rsi = sign of digit (-1 = negative) xorq %rsi, %rdi subq %rsi, %rdi // %rdi = absolute value of digit xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) // Store it to "tabent" with the y coordinate optionally negated // Again, do it carefully to give coordinates < p_256 even in // the degenerate case y = 0 (when z = 0 for points on the curve). movq %rax, TABENT(%rsp) movq %rbx, TABENT+8(%rsp) movq %rcx, TABENT+16(%rsp) movq %rdx, TABENT+24(%rsp) movq %r12, TABENT+64(%rsp) movq %r13, TABENT+72(%rsp) movq %r14, TABENT+80(%rsp) movq %r15, TABENT+88(%rsp) movq %r8, %rax xorl %r14d, %r14d orq %r9, %rax leaq -1(%r14), %r12 movq %r10, %rcx movq $0x00000000ffffffff, %r15 orq %r11, %rcx movq %r15, %r13 negq %r15 orq %rcx, %rax cmovzq %rax, %rsi subq %r8, %r12 sbbq %r9, %r13 sbbq %r10, %r14 sbbq %r11, %r15 testq %rsi, %rsi cmovnzq %r12, %r8 cmovnzq %r13, %r9 cmovnzq %r14, %r10 cmovnzq %r15, %r11 movq %r8, TABENT+32(%rsp) movq %r9, TABENT+40(%rsp) movq %r10, TABENT+48(%rsp) movq %r11, TABENT+56(%rsp) leaq TABENT(%rsp), %rdx leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p256_montjscalarmul_p256_montjadd testq %rbp, %rbp jne p256_montjscalarmul_mainloop // That's the end of the main loop, and we just need to copy the // result in "acc" to the output. movq res, %rdi movq ACC(%rsp), %rax movq %rax, (%rdi) movq ACC+8(%rsp), %rax movq %rax, 8(%rdi) movq ACC+16(%rsp), %rax movq %rax, 16(%rdi) movq ACC+24(%rsp), %rax movq %rax, 24(%rdi) movq ACC+32(%rsp), %rax movq %rax, 32(%rdi) movq ACC+40(%rsp), %rax movq %rax, 40(%rdi) movq ACC+48(%rsp), %rax movq %rax, 48(%rdi) movq ACC+56(%rsp), %rax movq %rax, 56(%rdi) movq ACC+64(%rsp), %rax movq %rax, 64(%rdi) movq ACC+72(%rsp), %rax movq %rax, 72(%rdi) movq ACC+80(%rsp), %rax movq %rax, 80(%rdi) movq ACC+88(%rsp), %rax movq %rax, 88(%rdi) // Restore stack and registers and return addq $NSPACE, %rsp popq %rbx popq %rbp popq %r12 popq %r13 popq %r14 popq %r15 ret // Local copies of subroutines, complete clones at the moment p256_montjscalarmul_p256_montjadd: pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $0xe0, %rsp movq %rdx, %rbp movq 0x40(%rsi), %rdx mulxq %rdx, %r8, %r15 mulxq 0x48(%rsi), %r9, %r10 mulxq 0x58(%rsi), %r11, %r12 movq 0x50(%rsi), %rdx mulxq 0x58(%rsi), %r13, %r14 xorl %ecx, %ecx mulxq 0x40(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x48(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq 0x58(%rsi), %rdx mulxq 0x48(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rcx, %r13 adoxq %rcx, %r14 adcq %rcx, %r14 xorl %ecx, %ecx adcxq %r9, %r9 adoxq %r15, %r9 movq 0x48(%rsi), %rdx mulxq %rdx, %rax, %rdx adcxq %r10, %r10 adoxq %rax, %r10 adcxq %r11, %r11 adoxq %rdx, %r11 movq 0x50(%rsi), %rdx mulxq %rdx, %rax, %rdx adcxq %r12, %r12 adoxq %rax, %r12 adcxq %r13, %r13 adoxq %rdx, %r13 movq 0x58(%rsi), %rdx mulxq %rdx, %rax, %r15 adcxq %r14, %r14 adoxq %rax, %r14 adcxq %rcx, %r15 adoxq %rcx, %r15 xorl %ecx, %ecx movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq $0xffffffff00000001, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rcx, %r13 movl %ecx, %r9d adoxq %rcx, %r9 adcxq %rcx, %r9 addq %r9, %r14 adcq %rcx, %r15 movl %ecx, %r8d adcq %rcx, %r8 xorl %ecx, %ecx movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq $0xffffffff00000001, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %rcx, %r15 adoxq %rcx, %r8 adcq %rcx, %r8 movl $0x1, %r8d leaq -0x1(%rdx), %rdx leaq -0x1(%rcx), %rax movl $0xfffffffe, %r11d cmoveq %rcx, %r8 cmoveq %rcx, %rdx cmoveq %rcx, %rax cmoveq %rcx, %r11 addq %r8, %r12 adcq %rdx, %r13 adcq %rax, %r14 adcq %r11, %r15 movq %r12, (%rsp) movq %r13, 0x8(%rsp) movq %r14, 0x10(%rsp) movq %r15, 0x18(%rsp) movq 0x40(%rbp), %rdx mulxq %rdx, %r8, %r15 mulxq 0x48(%rbp), %r9, %r10 mulxq 0x58(%rbp), %r11, %r12 movq 0x50(%rbp), %rdx mulxq 0x58(%rbp), %r13, %r14 xorl %ecx, %ecx mulxq 0x40(%rbp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x48(%rbp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq 0x58(%rbp), %rdx mulxq 0x48(%rbp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rcx, %r13 adoxq %rcx, %r14 adcq %rcx, %r14 xorl %ecx, %ecx adcxq %r9, %r9 adoxq %r15, %r9 movq 0x48(%rbp), %rdx mulxq %rdx, %rax, %rdx adcxq %r10, %r10 adoxq %rax, %r10 adcxq %r11, %r11 adoxq %rdx, %r11 movq 0x50(%rbp), %rdx mulxq %rdx, %rax, %rdx adcxq %r12, %r12 adoxq %rax, %r12 adcxq %r13, %r13 adoxq %rdx, %r13 movq 0x58(%rbp), %rdx mulxq %rdx, %rax, %r15 adcxq %r14, %r14 adoxq %rax, %r14 adcxq %rcx, %r15 adoxq %rcx, %r15 xorl %ecx, %ecx movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq $0xffffffff00000001, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rcx, %r13 movl %ecx, %r9d adoxq %rcx, %r9 adcxq %rcx, %r9 addq %r9, %r14 adcq %rcx, %r15 movl %ecx, %r8d adcq %rcx, %r8 xorl %ecx, %ecx movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq $0xffffffff00000001, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %rcx, %r15 adoxq %rcx, %r8 adcq %rcx, %r8 movl $0x1, %r8d leaq -0x1(%rdx), %rdx leaq -0x1(%rcx), %rax movl $0xfffffffe, %r11d cmoveq %rcx, %r8 cmoveq %rcx, %rdx cmoveq %rcx, %rax cmoveq %rcx, %r11 addq %r8, %r12 adcq %rdx, %r13 adcq %rax, %r14 adcq %r11, %r15 movq %r12, 0xa0(%rsp) movq %r13, 0xa8(%rsp) movq %r14, 0xb0(%rsp) movq %r15, 0xb8(%rsp) xorl %r13d, %r13d movq 0x20(%rsi), %rdx mulxq 0x40(%rbp), %r8, %r9 mulxq 0x48(%rbp), %rbx, %r10 adcq %rbx, %r9 mulxq 0x50(%rbp), %rbx, %r11 adcq %rbx, %r10 mulxq 0x58(%rbp), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0x28(%rsi), %rdx xorl %r14d, %r14d mulxq 0x40(%rbp), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x48(%rbp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x50(%rbp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x58(%rbp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0x30(%rsi), %rdx xorl %r8d, %r8d mulxq 0x40(%rbp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x48(%rbp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x50(%rbp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0x58(%rbp), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0x38(%rsi), %rdx xorl %r9d, %r9d mulxq 0x40(%rbp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x48(%rbp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x50(%rbp), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0x58(%rbp), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0xc0(%rsp) movq %r13, 0xc8(%rsp) movq %r14, 0xd0(%rsp) movq %r15, 0xd8(%rsp) xorl %r13d, %r13d movq 0x20(%rbp), %rdx mulxq 0x40(%rsi), %r8, %r9 mulxq 0x48(%rsi), %rbx, %r10 adcq %rbx, %r9 mulxq 0x50(%rsi), %rbx, %r11 adcq %rbx, %r10 mulxq 0x58(%rsi), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0x28(%rbp), %rdx xorl %r14d, %r14d mulxq 0x40(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x48(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x50(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x58(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0x30(%rbp), %rdx xorl %r8d, %r8d mulxq 0x40(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x48(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x50(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0x58(%rsi), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0x38(%rbp), %rdx xorl %r9d, %r9d mulxq 0x40(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x48(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x50(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0x58(%rsi), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x20(%rsp) movq %r13, 0x28(%rsp) movq %r14, 0x30(%rsp) movq %r15, 0x38(%rsp) xorl %r13d, %r13d movq 0x0(%rbp), %rdx mulxq (%rsp), %r8, %r9 mulxq 0x8(%rsp), %rbx, %r10 adcq %rbx, %r9 mulxq 0x10(%rsp), %rbx, %r11 adcq %rbx, %r10 mulxq 0x18(%rsp), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0x8(%rbp), %rdx xorl %r14d, %r14d mulxq (%rsp), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x8(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x10(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x18(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0x10(%rbp), %rdx xorl %r8d, %r8d mulxq (%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x8(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x10(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0x18(%rsp), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0x18(%rbp), %rdx xorl %r9d, %r9d mulxq (%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x8(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x10(%rsp), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0x18(%rsp), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x40(%rsp) movq %r13, 0x48(%rsp) movq %r14, 0x50(%rsp) movq %r15, 0x58(%rsp) xorl %r13d, %r13d movq (%rsi), %rdx mulxq 0xa0(%rsp), %r8, %r9 mulxq 0xa8(%rsp), %rbx, %r10 adcq %rbx, %r9 mulxq 0xb0(%rsp), %rbx, %r11 adcq %rbx, %r10 mulxq 0xb8(%rsp), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0x8(%rsi), %rdx xorl %r14d, %r14d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xb8(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0x10(%rsi), %rdx xorl %r8d, %r8d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0xb8(%rsp), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0x18(%rsi), %rdx xorl %r9d, %r9d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0xb8(%rsp), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x80(%rsp) movq %r13, 0x88(%rsp) movq %r14, 0x90(%rsp) movq %r15, 0x98(%rsp) xorl %r13d, %r13d movq 0x20(%rsp), %rdx mulxq (%rsp), %r8, %r9 mulxq 0x8(%rsp), %rbx, %r10 adcq %rbx, %r9 mulxq 0x10(%rsp), %rbx, %r11 adcq %rbx, %r10 mulxq 0x18(%rsp), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0x28(%rsp), %rdx xorl %r14d, %r14d mulxq (%rsp), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x8(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x10(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x18(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0x30(%rsp), %rdx xorl %r8d, %r8d mulxq (%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x8(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x10(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0x18(%rsp), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0x38(%rsp), %rdx xorl %r9d, %r9d mulxq (%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x8(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x10(%rsp), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0x18(%rsp), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x20(%rsp) movq %r13, 0x28(%rsp) movq %r14, 0x30(%rsp) movq %r15, 0x38(%rsp) xorl %r13d, %r13d movq 0xc0(%rsp), %rdx mulxq 0xa0(%rsp), %r8, %r9 mulxq 0xa8(%rsp), %rbx, %r10 adcq %rbx, %r9 mulxq 0xb0(%rsp), %rbx, %r11 adcq %rbx, %r10 mulxq 0xb8(%rsp), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0xc8(%rsp), %rdx xorl %r14d, %r14d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xb8(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0xd0(%rsp), %rdx xorl %r8d, %r8d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0xb8(%rsp), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0xd8(%rsp), %rdx xorl %r9d, %r9d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0xb8(%rsp), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0xc0(%rsp) movq %r13, 0xc8(%rsp) movq %r14, 0xd0(%rsp) movq %r15, 0xd8(%rsp) movq 0x40(%rsp), %rax subq 0x80(%rsp), %rax movq 0x48(%rsp), %rcx sbbq 0x88(%rsp), %rcx movq 0x50(%rsp), %r8 sbbq 0x90(%rsp), %r8 movq 0x58(%rsp), %r9 sbbq 0x98(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0xa0(%rsp) adcq %r10, %rcx movq %rcx, 0xa8(%rsp) adcq $0x0, %r8 movq %r8, 0xb0(%rsp) adcq %rdx, %r9 movq %r9, 0xb8(%rsp) movq 0x20(%rsp), %rax subq 0xc0(%rsp), %rax movq 0x28(%rsp), %rcx sbbq 0xc8(%rsp), %rcx movq 0x30(%rsp), %r8 sbbq 0xd0(%rsp), %r8 movq 0x38(%rsp), %r9 sbbq 0xd8(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0x20(%rsp) adcq %r10, %rcx movq %rcx, 0x28(%rsp) adcq $0x0, %r8 movq %r8, 0x30(%rsp) adcq %rdx, %r9 movq %r9, 0x38(%rsp) movq 0xa0(%rsp), %rdx mulxq %rdx, %r8, %r15 mulxq 0xa8(%rsp), %r9, %r10 mulxq 0xb8(%rsp), %r11, %r12 movq 0xb0(%rsp), %rdx mulxq 0xb8(%rsp), %r13, %r14 xorl %ecx, %ecx mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq 0xb8(%rsp), %rdx mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rcx, %r13 adoxq %rcx, %r14 adcq %rcx, %r14 xorl %ecx, %ecx adcxq %r9, %r9 adoxq %r15, %r9 movq 0xa8(%rsp), %rdx mulxq %rdx, %rax, %rdx adcxq %r10, %r10 adoxq %rax, %r10 adcxq %r11, %r11 adoxq %rdx, %r11 movq 0xb0(%rsp), %rdx mulxq %rdx, %rax, %rdx adcxq %r12, %r12 adoxq %rax, %r12 adcxq %r13, %r13 adoxq %rdx, %r13 movq 0xb8(%rsp), %rdx mulxq %rdx, %rax, %r15 adcxq %r14, %r14 adoxq %rax, %r14 adcxq %rcx, %r15 adoxq %rcx, %r15 xorl %ecx, %ecx movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq $0xffffffff00000001, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rcx, %r13 movl %ecx, %r9d adoxq %rcx, %r9 adcxq %rcx, %r9 addq %r9, %r14 adcq %rcx, %r15 movl %ecx, %r8d adcq %rcx, %r8 xorl %ecx, %ecx movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq $0xffffffff00000001, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %rcx, %r15 adoxq %rcx, %r8 adcq %rcx, %r8 movl $0x1, %r8d leaq -0x1(%rdx), %rdx leaq -0x1(%rcx), %rax movl $0xfffffffe, %r11d cmoveq %rcx, %r8 cmoveq %rcx, %rdx cmoveq %rcx, %rax cmoveq %rcx, %r11 addq %r8, %r12 adcq %rdx, %r13 adcq %rax, %r14 adcq %r11, %r15 movq %r12, 0x60(%rsp) movq %r13, 0x68(%rsp) movq %r14, 0x70(%rsp) movq %r15, 0x78(%rsp) movq 0x20(%rsp), %rdx mulxq %rdx, %r8, %r15 mulxq 0x28(%rsp), %r9, %r10 mulxq 0x38(%rsp), %r11, %r12 movq 0x30(%rsp), %rdx mulxq 0x38(%rsp), %r13, %r14 xorl %ecx, %ecx mulxq 0x20(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x28(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq 0x38(%rsp), %rdx mulxq 0x28(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rcx, %r13 adoxq %rcx, %r14 adcq %rcx, %r14 xorl %ecx, %ecx adcxq %r9, %r9 adoxq %r15, %r9 movq 0x28(%rsp), %rdx mulxq %rdx, %rax, %rdx adcxq %r10, %r10 adoxq %rax, %r10 adcxq %r11, %r11 adoxq %rdx, %r11 movq 0x30(%rsp), %rdx mulxq %rdx, %rax, %rdx adcxq %r12, %r12 adoxq %rax, %r12 adcxq %r13, %r13 adoxq %rdx, %r13 movq 0x38(%rsp), %rdx mulxq %rdx, %rax, %r15 adcxq %r14, %r14 adoxq %rax, %r14 adcxq %rcx, %r15 adoxq %rcx, %r15 xorl %ecx, %ecx movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq $0xffffffff00000001, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rcx, %r13 movl %ecx, %r9d adoxq %rcx, %r9 adcxq %rcx, %r9 addq %r9, %r14 adcq %rcx, %r15 movl %ecx, %r8d adcq %rcx, %r8 xorl %ecx, %ecx movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq $0xffffffff00000001, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %rcx, %r15 adoxq %rcx, %r8 adcq %rcx, %r8 movl $0x1, %ebx addq %r12, %rbx leaq -0x1(%rdx), %rdx adcq %r13, %rdx leaq -0x1(%rcx), %rcx movq %rcx, %rax adcq %r14, %rcx movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rbx, %r12 cmovbq %rdx, %r13 cmovbq %rcx, %r14 cmovbq %r11, %r15 movq %r12, (%rsp) movq %r13, 0x8(%rsp) movq %r14, 0x10(%rsp) movq %r15, 0x18(%rsp) xorl %r13d, %r13d movq 0x80(%rsp), %rdx mulxq 0x60(%rsp), %r8, %r9 mulxq 0x68(%rsp), %rbx, %r10 adcq %rbx, %r9 mulxq 0x70(%rsp), %rbx, %r11 adcq %rbx, %r10 mulxq 0x78(%rsp), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0x88(%rsp), %rdx xorl %r14d, %r14d mulxq 0x60(%rsp), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x68(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x70(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x78(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0x90(%rsp), %rdx xorl %r8d, %r8d mulxq 0x60(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x68(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x70(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0x78(%rsp), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0x98(%rsp), %rdx xorl %r9d, %r9d mulxq 0x60(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x68(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x70(%rsp), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0x78(%rsp), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x80(%rsp) movq %r13, 0x88(%rsp) movq %r14, 0x90(%rsp) movq %r15, 0x98(%rsp) xorl %r13d, %r13d movq 0x40(%rsp), %rdx mulxq 0x60(%rsp), %r8, %r9 mulxq 0x68(%rsp), %rbx, %r10 adcq %rbx, %r9 mulxq 0x70(%rsp), %rbx, %r11 adcq %rbx, %r10 mulxq 0x78(%rsp), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0x48(%rsp), %rdx xorl %r14d, %r14d mulxq 0x60(%rsp), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x68(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x70(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x78(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0x50(%rsp), %rdx xorl %r8d, %r8d mulxq 0x60(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x68(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x70(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0x78(%rsp), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0x58(%rsp), %rdx xorl %r9d, %r9d mulxq 0x60(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x68(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x70(%rsp), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0x78(%rsp), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x40(%rsp) movq %r13, 0x48(%rsp) movq %r14, 0x50(%rsp) movq %r15, 0x58(%rsp) movq (%rsp), %rax subq 0x80(%rsp), %rax movq 0x8(%rsp), %rcx sbbq 0x88(%rsp), %rcx movq 0x10(%rsp), %r8 sbbq 0x90(%rsp), %r8 movq 0x18(%rsp), %r9 sbbq 0x98(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, (%rsp) adcq %r10, %rcx movq %rcx, 0x8(%rsp) adcq $0x0, %r8 movq %r8, 0x10(%rsp) adcq %rdx, %r9 movq %r9, 0x18(%rsp) movq 0x40(%rsp), %rax subq 0x80(%rsp), %rax movq 0x48(%rsp), %rcx sbbq 0x88(%rsp), %rcx movq 0x50(%rsp), %r8 sbbq 0x90(%rsp), %r8 movq 0x58(%rsp), %r9 sbbq 0x98(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0x60(%rsp) adcq %r10, %rcx movq %rcx, 0x68(%rsp) adcq $0x0, %r8 movq %r8, 0x70(%rsp) adcq %rdx, %r9 movq %r9, 0x78(%rsp) xorl %r13d, %r13d movq 0x40(%rsi), %rdx mulxq 0xa0(%rsp), %r8, %r9 mulxq 0xa8(%rsp), %rbx, %r10 adcq %rbx, %r9 mulxq 0xb0(%rsp), %rbx, %r11 adcq %rbx, %r10 mulxq 0xb8(%rsp), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0x48(%rsi), %rdx xorl %r14d, %r14d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xb8(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0x50(%rsi), %rdx xorl %r8d, %r8d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0xb8(%rsp), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0x58(%rsi), %rdx xorl %r9d, %r9d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0xb8(%rsp), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0xa0(%rsp) movq %r13, 0xa8(%rsp) movq %r14, 0xb0(%rsp) movq %r15, 0xb8(%rsp) movq (%rsp), %rax subq 0x40(%rsp), %rax movq 0x8(%rsp), %rcx sbbq 0x48(%rsp), %rcx movq 0x10(%rsp), %r8 sbbq 0x50(%rsp), %r8 movq 0x18(%rsp), %r9 sbbq 0x58(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, (%rsp) adcq %r10, %rcx movq %rcx, 0x8(%rsp) adcq $0x0, %r8 movq %r8, 0x10(%rsp) adcq %rdx, %r9 movq %r9, 0x18(%rsp) movq 0x80(%rsp), %rax subq (%rsp), %rax movq 0x88(%rsp), %rcx sbbq 0x8(%rsp), %rcx movq 0x90(%rsp), %r8 sbbq 0x10(%rsp), %r8 movq 0x98(%rsp), %r9 sbbq 0x18(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0x80(%rsp) adcq %r10, %rcx movq %rcx, 0x88(%rsp) adcq $0x0, %r8 movq %r8, 0x90(%rsp) adcq %rdx, %r9 movq %r9, 0x98(%rsp) xorl %r13d, %r13d movq 0xc0(%rsp), %rdx mulxq 0x60(%rsp), %r8, %r9 mulxq 0x68(%rsp), %rbx, %r10 adcq %rbx, %r9 mulxq 0x70(%rsp), %rbx, %r11 adcq %rbx, %r10 mulxq 0x78(%rsp), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0xc8(%rsp), %rdx xorl %r14d, %r14d mulxq 0x60(%rsp), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x68(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x70(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x78(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0xd0(%rsp), %rdx xorl %r8d, %r8d mulxq 0x60(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x68(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x70(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0x78(%rsp), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0xd8(%rsp), %rdx xorl %r9d, %r9d mulxq 0x60(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x68(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x70(%rsp), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0x78(%rsp), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x60(%rsp) movq %r13, 0x68(%rsp) movq %r14, 0x70(%rsp) movq %r15, 0x78(%rsp) xorl %r13d, %r13d movq 0x40(%rbp), %rdx mulxq 0xa0(%rsp), %r8, %r9 mulxq 0xa8(%rsp), %rbx, %r10 adcq %rbx, %r9 mulxq 0xb0(%rsp), %rbx, %r11 adcq %rbx, %r10 mulxq 0xb8(%rsp), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0x48(%rbp), %rdx xorl %r14d, %r14d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xb8(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0x50(%rbp), %rdx xorl %r8d, %r8d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0xb8(%rsp), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0x58(%rbp), %rdx xorl %r9d, %r9d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0xb8(%rsp), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0xa0(%rsp) movq %r13, 0xa8(%rsp) movq %r14, 0xb0(%rsp) movq %r15, 0xb8(%rsp) xorl %r13d, %r13d movq 0x80(%rsp), %rdx mulxq 0x20(%rsp), %r8, %r9 mulxq 0x28(%rsp), %rbx, %r10 adcq %rbx, %r9 mulxq 0x30(%rsp), %rbx, %r11 adcq %rbx, %r10 mulxq 0x38(%rsp), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0x88(%rsp), %rdx xorl %r14d, %r14d mulxq 0x20(%rsp), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x28(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x30(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x38(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0x90(%rsp), %rdx xorl %r8d, %r8d mulxq 0x20(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x28(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x30(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0x38(%rsp), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0x98(%rsp), %rdx xorl %r9d, %r9d mulxq 0x20(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x28(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x30(%rsp), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0x38(%rsp), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x80(%rsp) movq %r13, 0x88(%rsp) movq %r14, 0x90(%rsp) movq %r15, 0x98(%rsp) movq 0x80(%rsp), %rax subq 0x60(%rsp), %rax movq 0x88(%rsp), %rcx sbbq 0x68(%rsp), %rcx movq 0x90(%rsp), %r8 sbbq 0x70(%rsp), %r8 movq 0x98(%rsp), %r9 sbbq 0x78(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0x80(%rsp) adcq %r10, %rcx movq %rcx, 0x88(%rsp) adcq $0x0, %r8 movq %r8, 0x90(%rsp) adcq %rdx, %r9 movq %r9, 0x98(%rsp) movq 0x40(%rsi), %r8 movq 0x48(%rsi), %r9 movq 0x50(%rsi), %r10 movq 0x58(%rsi), %r11 movq %r8, %rax movq %r9, %rdx orq %r10, %rax orq %r11, %rdx orq %rdx, %rax negq %rax sbbq %rax, %rax movq 0x40(%rbp), %r12 movq 0x48(%rbp), %r13 movq 0x50(%rbp), %r14 movq 0x58(%rbp), %r15 movq %r12, %rbx movq %r13, %rdx orq %r14, %rbx orq %r15, %rdx orq %rdx, %rbx negq %rbx sbbq %rbx, %rbx cmpq %rax, %rbx cmovbq %r8, %r12 cmovbq %r9, %r13 cmovbq %r10, %r14 cmovbq %r11, %r15 cmoveq 0xa0(%rsp), %r12 cmoveq 0xa8(%rsp), %r13 cmoveq 0xb0(%rsp), %r14 cmoveq 0xb8(%rsp), %r15 movq (%rsp), %rax cmovbq (%rsi), %rax cmova 0x0(%rbp), %rax movq 0x8(%rsp), %rbx cmovbq 0x8(%rsi), %rbx cmova 0x8(%rbp), %rbx movq 0x10(%rsp), %rcx cmovbq 0x10(%rsi), %rcx cmova 0x10(%rbp), %rcx movq 0x18(%rsp), %rdx cmovbq 0x18(%rsi), %rdx cmova 0x18(%rbp), %rdx movq 0x80(%rsp), %r8 cmovbq 0x20(%rsi), %r8 cmova 0x20(%rbp), %r8 movq 0x88(%rsp), %r9 cmovbq 0x28(%rsi), %r9 cmova 0x28(%rbp), %r9 movq 0x90(%rsp), %r10 cmovbq 0x30(%rsi), %r10 cmova 0x30(%rbp), %r10 movq 0x98(%rsp), %r11 cmovbq 0x38(%rsi), %r11 cmova 0x38(%rbp), %r11 movq %rax, (%rdi) movq %rbx, 0x8(%rdi) movq %rcx, 0x10(%rdi) movq %rdx, 0x18(%rdi) movq %r8, 0x20(%rdi) movq %r9, 0x28(%rdi) movq %r10, 0x30(%rdi) movq %r11, 0x38(%rdi) movq %r12, 0x40(%rdi) movq %r13, 0x48(%rdi) movq %r14, 0x50(%rdi) movq %r15, 0x58(%rdi) addq $0xe0, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret p256_montjscalarmul_p256_montjdouble: pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $0xc0, %rsp movq 0x40(%rsi), %rdx mulxq %rdx, %r8, %r15 mulxq 0x48(%rsi), %r9, %r10 mulxq 0x58(%rsi), %r11, %r12 movq 0x50(%rsi), %rdx mulxq 0x58(%rsi), %r13, %r14 xorl %ebp, %ebp mulxq 0x40(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x48(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq 0x58(%rsi), %rdx mulxq 0x48(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rbp, %r13 adoxq %rbp, %r14 adcq %rbp, %r14 xorl %ebp, %ebp adcxq %r9, %r9 adoxq %r15, %r9 movq 0x48(%rsi), %rdx mulxq %rdx, %rax, %rdx adcxq %r10, %r10 adoxq %rax, %r10 adcxq %r11, %r11 adoxq %rdx, %r11 movq 0x50(%rsi), %rdx mulxq %rdx, %rax, %rdx adcxq %r12, %r12 adoxq %rax, %r12 adcxq %r13, %r13 adoxq %rdx, %r13 movq 0x58(%rsi), %rdx mulxq %rdx, %rax, %r15 adcxq %r14, %r14 adoxq %rax, %r14 adcxq %rbp, %r15 adoxq %rbp, %r15 xorl %ebp, %ebp movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq $0xffffffff00000001, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rbp, %r13 movl %ebp, %r9d adoxq %rbp, %r9 adcxq %rbp, %r9 addq %r9, %r14 adcq %rbp, %r15 movl %ebp, %r8d adcq %rbp, %r8 xorl %ebp, %ebp movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq $0xffffffff00000001, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %rbp, %r15 adoxq %rbp, %r8 adcq %rbp, %r8 movl $0x1, %ecx addq %r12, %rcx leaq -0x1(%rdx), %rdx adcq %r13, %rdx leaq -0x1(%rbp), %rbp movq %rbp, %rax adcq %r14, %rbp movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %rbp, %r14 cmovbq %r11, %r15 movq %r12, (%rsp) movq %r13, 0x8(%rsp) movq %r14, 0x10(%rsp) movq %r15, 0x18(%rsp) movq 0x20(%rsi), %rdx mulxq %rdx, %r8, %r15 mulxq 0x28(%rsi), %r9, %r10 mulxq 0x38(%rsi), %r11, %r12 movq 0x30(%rsi), %rdx mulxq 0x38(%rsi), %r13, %r14 xorl %ebp, %ebp mulxq 0x20(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq 0x38(%rsi), %rdx mulxq 0x28(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rbp, %r13 adoxq %rbp, %r14 adcq %rbp, %r14 xorl %ebp, %ebp adcxq %r9, %r9 adoxq %r15, %r9 movq 0x28(%rsi), %rdx mulxq %rdx, %rax, %rdx adcxq %r10, %r10 adoxq %rax, %r10 adcxq %r11, %r11 adoxq %rdx, %r11 movq 0x30(%rsi), %rdx mulxq %rdx, %rax, %rdx adcxq %r12, %r12 adoxq %rax, %r12 adcxq %r13, %r13 adoxq %rdx, %r13 movq 0x38(%rsi), %rdx mulxq %rdx, %rax, %r15 adcxq %r14, %r14 adoxq %rax, %r14 adcxq %rbp, %r15 adoxq %rbp, %r15 xorl %ebp, %ebp movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq $0xffffffff00000001, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rbp, %r13 movl %ebp, %r9d adoxq %rbp, %r9 adcxq %rbp, %r9 addq %r9, %r14 adcq %rbp, %r15 movl %ebp, %r8d adcq %rbp, %r8 xorl %ebp, %ebp movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq $0xffffffff00000001, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %rbp, %r15 adoxq %rbp, %r8 adcq %rbp, %r8 movl $0x1, %ecx addq %r12, %rcx leaq -0x1(%rdx), %rdx adcq %r13, %rdx leaq -0x1(%rbp), %rbp movq %rbp, %rax adcq %r14, %rbp movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %rbp, %r14 cmovbq %r11, %r15 movq %r12, 0x20(%rsp) movq %r13, 0x28(%rsp) movq %r14, 0x30(%rsp) movq %r15, 0x38(%rsp) movq (%rsi), %rax subq (%rsp), %rax movq 0x8(%rsi), %rcx sbbq 0x8(%rsp), %rcx movq 0x10(%rsi), %r8 sbbq 0x10(%rsp), %r8 movq 0x18(%rsi), %r9 sbbq 0x18(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0x60(%rsp) adcq %r10, %rcx movq %rcx, 0x68(%rsp) adcq $0x0, %r8 movq %r8, 0x70(%rsp) adcq %rdx, %r9 movq %r9, 0x78(%rsp) movq (%rsi), %rax addq (%rsp), %rax movq 0x8(%rsi), %rcx adcq 0x8(%rsp), %rcx movq 0x10(%rsi), %r8 adcq 0x10(%rsp), %r8 movq 0x18(%rsi), %r9 adcq 0x18(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx subq %r11, %rax movq %rax, 0x40(%rsp) sbbq %r10, %rcx movq %rcx, 0x48(%rsp) sbbq $0x0, %r8 movq %r8, 0x50(%rsp) sbbq %rdx, %r9 movq %r9, 0x58(%rsp) xorl %r13d, %r13d movq 0x60(%rsp), %rdx mulxq 0x40(%rsp), %r8, %r9 mulxq 0x48(%rsp), %rbx, %r10 adcq %rbx, %r9 mulxq 0x50(%rsp), %rbx, %r11 adcq %rbx, %r10 mulxq 0x58(%rsp), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0x68(%rsp), %rdx xorl %r14d, %r14d mulxq 0x40(%rsp), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x48(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x50(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x58(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0x70(%rsp), %rdx xorl %r8d, %r8d mulxq 0x40(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x48(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x50(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0x58(%rsp), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0x78(%rsp), %rdx xorl %r9d, %r9d mulxq 0x40(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x48(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x50(%rsp), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0x58(%rsp), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x60(%rsp) movq %r13, 0x68(%rsp) movq %r14, 0x70(%rsp) movq %r15, 0x78(%rsp) xorq %r11, %r11 movq 0x20(%rsi), %rax addq 0x40(%rsi), %rax movq 0x28(%rsi), %rcx adcq 0x48(%rsi), %rcx movq 0x30(%rsi), %r8 adcq 0x50(%rsi), %r8 movq 0x38(%rsi), %r9 adcq 0x58(%rsi), %r9 adcq %r11, %r11 subq $0xffffffffffffffff, %rax movl $0xffffffff, %r10d sbbq %r10, %rcx sbbq $0x0, %r8 movq $0xffffffff00000001, %rdx sbbq %rdx, %r9 sbbq $0x0, %r11 andq %r11, %r10 andq %r11, %rdx addq %r11, %rax movq %rax, 0x40(%rsp) adcq %r10, %rcx movq %rcx, 0x48(%rsp) adcq $0x0, %r8 movq %r8, 0x50(%rsp) adcq %rdx, %r9 movq %r9, 0x58(%rsp) xorl %r13d, %r13d movq 0x20(%rsp), %rdx mulxq (%rsi), %r8, %r9 mulxq 0x8(%rsi), %rbx, %r10 adcq %rbx, %r9 mulxq 0x10(%rsi), %rbx, %r11 adcq %rbx, %r10 mulxq 0x18(%rsi), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0x28(%rsp), %rdx xorl %r14d, %r14d mulxq (%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x18(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0x30(%rsp), %rdx xorl %r8d, %r8d mulxq (%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0x18(%rsi), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0x38(%rsp), %rdx xorl %r9d, %r9d mulxq (%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0x18(%rsi), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x80(%rsp) movq %r13, 0x88(%rsp) movq %r14, 0x90(%rsp) movq %r15, 0x98(%rsp) movq 0x60(%rsp), %rdx mulxq %rdx, %r8, %r15 mulxq 0x68(%rsp), %r9, %r10 mulxq 0x78(%rsp), %r11, %r12 movq 0x70(%rsp), %rdx mulxq 0x78(%rsp), %r13, %r14 xorl %ebp, %ebp mulxq 0x60(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x68(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq 0x78(%rsp), %rdx mulxq 0x68(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rbp, %r13 adoxq %rbp, %r14 adcq %rbp, %r14 xorl %ebp, %ebp adcxq %r9, %r9 adoxq %r15, %r9 movq 0x68(%rsp), %rdx mulxq %rdx, %rax, %rdx adcxq %r10, %r10 adoxq %rax, %r10 adcxq %r11, %r11 adoxq %rdx, %r11 movq 0x70(%rsp), %rdx mulxq %rdx, %rax, %rdx adcxq %r12, %r12 adoxq %rax, %r12 adcxq %r13, %r13 adoxq %rdx, %r13 movq 0x78(%rsp), %rdx mulxq %rdx, %rax, %r15 adcxq %r14, %r14 adoxq %rax, %r14 adcxq %rbp, %r15 adoxq %rbp, %r15 xorl %ebp, %ebp movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq $0xffffffff00000001, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rbp, %r13 movl %ebp, %r9d adoxq %rbp, %r9 adcxq %rbp, %r9 addq %r9, %r14 adcq %rbp, %r15 movl %ebp, %r8d adcq %rbp, %r8 xorl %ebp, %ebp movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq $0xffffffff00000001, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %rbp, %r15 adoxq %rbp, %r8 adcq %rbp, %r8 movl $0x1, %ecx addq %r12, %rcx leaq -0x1(%rdx), %rdx adcq %r13, %rdx leaq -0x1(%rbp), %rbp movq %rbp, %rax adcq %r14, %rbp movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %rbp, %r14 cmovbq %r11, %r15 movq %r12, 0xa0(%rsp) movq %r13, 0xa8(%rsp) movq %r14, 0xb0(%rsp) movq %r15, 0xb8(%rsp) movq 0x40(%rsp), %rdx mulxq %rdx, %r8, %r15 mulxq 0x48(%rsp), %r9, %r10 mulxq 0x58(%rsp), %r11, %r12 movq 0x50(%rsp), %rdx mulxq 0x58(%rsp), %r13, %r14 xorl %ebp, %ebp mulxq 0x40(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x48(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq 0x58(%rsp), %rdx mulxq 0x48(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rbp, %r13 adoxq %rbp, %r14 adcq %rbp, %r14 xorl %ebp, %ebp adcxq %r9, %r9 adoxq %r15, %r9 movq 0x48(%rsp), %rdx mulxq %rdx, %rax, %rdx adcxq %r10, %r10 adoxq %rax, %r10 adcxq %r11, %r11 adoxq %rdx, %r11 movq 0x50(%rsp), %rdx mulxq %rdx, %rax, %rdx adcxq %r12, %r12 adoxq %rax, %r12 adcxq %r13, %r13 adoxq %rdx, %r13 movq 0x58(%rsp), %rdx mulxq %rdx, %rax, %r15 adcxq %r14, %r14 adoxq %rax, %r14 adcxq %rbp, %r15 adoxq %rbp, %r15 xorl %ebp, %ebp movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq $0xffffffff00000001, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rbp, %r13 movl %ebp, %r9d adoxq %rbp, %r9 adcxq %rbp, %r9 addq %r9, %r14 adcq %rbp, %r15 movl %ebp, %r8d adcq %rbp, %r8 xorl %ebp, %ebp movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq $0xffffffff00000001, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %rbp, %r15 adoxq %rbp, %r8 adcq %rbp, %r8 movl $0x1, %ecx addq %r12, %rcx leaq -0x1(%rdx), %rdx adcq %r13, %rdx leaq -0x1(%rbp), %rbp movq %rbp, %rax adcq %r14, %rbp movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %rbp, %r14 cmovbq %r11, %r15 movq %r12, 0x40(%rsp) movq %r13, 0x48(%rsp) movq %r14, 0x50(%rsp) movq %r15, 0x58(%rsp) movq $0xffffffffffffffff, %r8 xorl %r10d, %r10d subq 0xa0(%rsp), %r8 movq $0xffffffff, %r9 sbbq 0xa8(%rsp), %r9 sbbq 0xb0(%rsp), %r10 movq $0xffffffff00000001, %r11 sbbq 0xb8(%rsp), %r11 xorl %r12d, %r12d movq $0x9, %rdx mulxq %r8, %r8, %rax mulxq %r9, %r9, %rcx addq %rax, %r9 mulxq %r10, %r10, %rax adcq %rcx, %r10 mulxq %r11, %r11, %rcx adcq %rax, %r11 adcq %rcx, %r12 movq $0xc, %rdx xorl %eax, %eax mulxq 0x80(%rsp), %rax, %rcx adcxq %rax, %r8 adoxq %rcx, %r9 mulxq 0x88(%rsp), %rax, %rcx adcxq %rax, %r9 adoxq %rcx, %r10 mulxq 0x90(%rsp), %rax, %rcx adcxq %rax, %r10 adoxq %rcx, %r11 mulxq 0x98(%rsp), %rax, %rdx adcxq %rax, %r11 adoxq %r12, %rdx adcq $0x1, %rdx addq %rdx, %r8 movq $0x100000000, %rax mulxq %rax, %rax, %rcx sbbq $0x0, %rax sbbq $0x0, %rcx subq %rax, %r9 sbbq %rcx, %r10 movq $0xffffffff00000001, %rax mulxq %rax, %rax, %rcx sbbq %rax, %r11 sbbq %rcx, %rdx decq %rdx movl $0xffffffff, %eax andq %rdx, %rax xorl %ecx, %ecx subq %rax, %rcx addq %rdx, %r8 movq %r8, 0xa0(%rsp) adcq %rax, %r9 movq %r9, 0xa8(%rsp) adcq $0x0, %r10 movq %r10, 0xb0(%rsp) adcq %rcx, %r11 movq %r11, 0xb8(%rsp) movq 0x40(%rsp), %rax subq (%rsp), %rax movq 0x48(%rsp), %rcx sbbq 0x8(%rsp), %rcx movq 0x50(%rsp), %r8 sbbq 0x10(%rsp), %r8 movq 0x58(%rsp), %r9 sbbq 0x18(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0x40(%rsp) adcq %r10, %rcx movq %rcx, 0x48(%rsp) adcq $0x0, %r8 movq %r8, 0x50(%rsp) adcq %rdx, %r9 movq %r9, 0x58(%rsp) movq 0x20(%rsp), %rdx mulxq %rdx, %r8, %r15 mulxq 0x28(%rsp), %r9, %r10 mulxq 0x38(%rsp), %r11, %r12 movq 0x30(%rsp), %rdx mulxq 0x38(%rsp), %r13, %r14 xorl %ebp, %ebp mulxq 0x20(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x28(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 movq 0x38(%rsp), %rdx mulxq 0x28(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rbp, %r13 adoxq %rbp, %r14 adcq %rbp, %r14 xorl %ebp, %ebp adcxq %r9, %r9 adoxq %r15, %r9 movq 0x28(%rsp), %rdx mulxq %rdx, %rax, %rdx adcxq %r10, %r10 adoxq %rax, %r10 adcxq %r11, %r11 adoxq %rdx, %r11 movq 0x30(%rsp), %rdx mulxq %rdx, %rax, %rdx adcxq %r12, %r12 adoxq %rax, %r12 adcxq %r13, %r13 adoxq %rdx, %r13 movq 0x38(%rsp), %rdx mulxq %rdx, %rax, %r15 adcxq %r14, %r14 adoxq %rax, %r14 adcxq %rbp, %r15 adoxq %rbp, %r15 xorl %ebp, %ebp movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 movq $0xffffffff00000001, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %rbp, %r13 movl %ebp, %r9d adoxq %rbp, %r9 adcxq %rbp, %r9 addq %r9, %r14 adcq %rbp, %r15 movl %ebp, %r8d adcq %rbp, %r8 xorl %ebp, %ebp movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 movq $0xffffffff00000001, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %rbp, %r15 adoxq %rbp, %r8 adcq %rbp, %r8 movl $0x1, %ecx addq %r12, %rcx leaq -0x1(%rdx), %rdx adcq %r13, %rdx leaq -0x1(%rbp), %rbp movq %rbp, %rax adcq %r14, %rbp movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %rbp, %r14 cmovbq %r11, %r15 movq %r12, (%rsp) movq %r13, 0x8(%rsp) movq %r14, 0x10(%rsp) movq %r15, 0x18(%rsp) xorl %r13d, %r13d movq 0x60(%rsp), %rdx mulxq 0xa0(%rsp), %r8, %r9 mulxq 0xa8(%rsp), %rbx, %r10 adcq %rbx, %r9 mulxq 0xb0(%rsp), %rbx, %r11 adcq %rbx, %r10 mulxq 0xb8(%rsp), %rbx, %r12 adcq %rbx, %r11 adcq %r13, %r12 movq 0x68(%rsp), %rdx xorl %r14d, %r14d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xb8(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcq %r14, %r13 xorl %r15d, %r15d movq $0x100000000, %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r9, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r8, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r9, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adcxq %r15, %r13 adoxq %r15, %r14 adcq %r15, %r14 movq 0x70(%rsp), %rdx xorl %r8d, %r8d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 adoxq %r8, %r14 mulxq 0xb8(%rsp), %rax, %rbx adcq %rax, %r13 adcq %rbx, %r14 adcq %r8, %r15 movq 0x78(%rsp), %rdx xorl %r9d, %r9d mulxq 0xa0(%rsp), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0xa8(%rsp), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0xb0(%rsp), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 adoxq %r9, %r15 mulxq 0xb8(%rsp), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r9, %r8 xorl %r9d, %r9d movq $0x100000000, %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq %r11, %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 notq %rdx leaq 0x2(%rdx), %rdx mulxq %r10, %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq %r11, %rax, %rbx adcxq %rax, %r14 adoxq %rbx, %r15 adcxq %r9, %r15 adoxq %r9, %r8 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rdx adcq %r13, %rdx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rdx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x60(%rsp) movq %r13, 0x68(%rsp) movq %r14, 0x70(%rsp) movq %r15, 0x78(%rsp) movq 0x40(%rsp), %rax subq 0x20(%rsp), %rax movq 0x48(%rsp), %rcx sbbq 0x28(%rsp), %rcx movq 0x50(%rsp), %r8 sbbq 0x30(%rsp), %r8 movq 0x58(%rsp), %r9 sbbq 0x38(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0x40(%rdi) adcq %r10, %rcx movq %rcx, 0x48(%rdi) adcq $0x0, %r8 movq %r8, 0x50(%rdi) adcq %rdx, %r9 movq %r9, 0x58(%rdi) movq 0x98(%rsp), %r11 movq %r11, %rdx movq 0x90(%rsp), %r10 shldq $0x2, %r10, %r11 movq 0x88(%rsp), %r9 shldq $0x2, %r9, %r10 movq 0x80(%rsp), %r8 shldq $0x2, %r8, %r9 shlq $0x2, %r8 shrq $0x3e, %rdx addq $0x1, %rdx subq 0xa0(%rsp), %r8 sbbq 0xa8(%rsp), %r9 sbbq 0xb0(%rsp), %r10 sbbq 0xb8(%rsp), %r11 sbbq $0x0, %rdx addq %rdx, %r8 movq $0x100000000, %rax mulxq %rax, %rax, %rcx sbbq $0x0, %rax sbbq $0x0, %rcx subq %rax, %r9 sbbq %rcx, %r10 movq $0xffffffff00000001, %rax mulxq %rax, %rax, %rcx sbbq %rax, %r11 sbbq %rcx, %rdx decq %rdx movl $0xffffffff, %eax andq %rdx, %rax xorl %ecx, %ecx subq %rax, %rcx addq %rdx, %r8 movq %r8, (%rdi) adcq %rax, %r9 movq %r9, 0x8(%rdi) adcq $0x0, %r10 movq %r10, 0x10(%rdi) adcq %rcx, %r11 movq %r11, 0x18(%rdi) movq $0xffffffffffffffff, %r8 xorl %r10d, %r10d subq (%rsp), %r8 movq $0xffffffff, %r9 sbbq 0x8(%rsp), %r9 sbbq 0x10(%rsp), %r10 movq $0xffffffff00000001, %r11 sbbq 0x18(%rsp), %r11 movq %r11, %r12 shldq $0x3, %r10, %r11 shldq $0x3, %r9, %r10 shldq $0x3, %r8, %r9 shlq $0x3, %r8 shrq $0x3d, %r12 movq $0x3, %rdx xorl %eax, %eax mulxq 0x60(%rsp), %rax, %rcx adcxq %rax, %r8 adoxq %rcx, %r9 mulxq 0x68(%rsp), %rax, %rcx adcxq %rax, %r9 adoxq %rcx, %r10 mulxq 0x70(%rsp), %rax, %rcx adcxq %rax, %r10 adoxq %rcx, %r11 mulxq 0x78(%rsp), %rax, %rdx adcxq %rax, %r11 adoxq %r12, %rdx adcq $0x1, %rdx addq %rdx, %r8 movq $0x100000000, %rax mulxq %rax, %rax, %rcx sbbq $0x0, %rax sbbq $0x0, %rcx subq %rax, %r9 sbbq %rcx, %r10 movq $0xffffffff00000001, %rax mulxq %rax, %rax, %rcx sbbq %rax, %r11 sbbq %rcx, %rdx decq %rdx movl $0xffffffff, %eax andq %rdx, %rax xorl %ecx, %ecx subq %rax, %rcx addq %rdx, %r8 movq %r8, 0x20(%rdi) adcq %rax, %r9 movq %r9, 0x28(%rdi) adcq $0x0, %r10 movq %r10, 0x30(%rdi) adcq %rcx, %r11 movq %r11, 0x38(%rdi) addq $0xc0, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
129,123
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p256/p256_montjscalarmul_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery-Jacobian form scalar multiplication for P-256 // Input scalar[4], point[12]; output res[12] // // extern void p256_montjscalarmul_alt // (uint64_t res[static 12], // uint64_t scalar[static 4], // uint64_t point[static 12]); // // This function is a variant of its affine point version p256_scalarmul. // Here, input and output points are assumed to be in Jacobian form with // their coordinates in the Montgomery domain. Thus, if priming indicates // Montgomery form, x' = (2^256 * x) mod p_256 etc., each point argument // is a triple (x',y',z') representing the affine point (x/z^2,y/z^3) when // z' is nonzero or the point at infinity (group identity) if z' = 0. // // Given scalar = n and point = P, assumed to be on the NIST elliptic // curve P-256, returns a representation of n * P. If the result is the // point at infinity (either because the input point was or because the // scalar was a multiple of p_256) then the output is guaranteed to // represent the point at infinity, i.e. to have its z coordinate zero. // // Standard x86-64 ABI: RDI = res, RSI = scalar, RDX = point // Microsoft x64 ABI: RCX = res, RDX = scalar, R8 = point // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p256_montjscalarmul_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(p256_montjscalarmul_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 32 // Intermediate variables on the stack. Uppercase syntactic variants // make x86_att version simpler to generate. #define SCALARB (0*NUMSIZE) #define scalarb (0*NUMSIZE)(%rsp) #define ACC (1*NUMSIZE) #define acc (1*NUMSIZE)(%rsp) #define TABENT (4*NUMSIZE) #define tabent (4*NUMSIZE)(%rsp) #define TAB (7*NUMSIZE) #define tab (7*NUMSIZE)(%rsp) #define res (31*NUMSIZE)(%rsp) #define NSPACE (32*NUMSIZE) // Avoid using .rep for the sake of the BoringSSL/AWS-LC delocator, // which doesn't accept repetitions, assembler macros etc. #define selectblock(I) \ cmpq $I, %rdi ; \ cmovzq TAB+96*(I-1)(%rsp), %rax ; \ cmovzq TAB+96*(I-1)+8(%rsp), %rbx ; \ cmovzq TAB+96*(I-1)+16(%rsp), %rcx ; \ cmovzq TAB+96*(I-1)+24(%rsp), %rdx ; \ cmovzq TAB+96*(I-1)+32(%rsp), %r8 ; \ cmovzq TAB+96*(I-1)+40(%rsp), %r9 ; \ cmovzq TAB+96*(I-1)+48(%rsp), %r10 ; \ cmovzq TAB+96*(I-1)+56(%rsp), %r11 ; \ cmovzq TAB+96*(I-1)+64(%rsp), %r12 ; \ cmovzq TAB+96*(I-1)+72(%rsp), %r13 ; \ cmovzq TAB+96*(I-1)+80(%rsp), %r14 ; \ cmovzq TAB+96*(I-1)+88(%rsp), %r15 S2N_BN_SYMBOL(p256_montjscalarmul_alt): // The Windows version literally calls the standard ABI version. // This simplifies the proofs since subroutine offsets are fixed. #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx callq p256_montjscalarmul_alt_standard popq %rsi popq %rdi ret p256_montjscalarmul_alt_standard: #endif // Real start of the standard ABI code. pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbp pushq %rbx subq $NSPACE, %rsp // Preserve the "res" and "point" input arguments. We load and process the // scalar immediately so we don't bother preserving that input argument. // Also, "point" is only needed early on and so its register gets re-used. movq %rdx, %rbx movq %rdi, res // Load the digits of group order n_256 = [%r15;%r14;%r13;%r12] movq $0xf3b9cac2fc632551, %r12 movq $0xbce6faada7179e84, %r13 movq $0xffffffffffffffff, %r14 movq $0xffffffff00000000, %r15 // First, reduce the input scalar mod n_256, i.e. conditionally subtract n_256 movq (%rsi), %r8 subq %r12, %r8 movq 8(%rsi), %r9 sbbq %r13, %r9 movq 16(%rsi), %r10 sbbq %r14, %r10 movq 24(%rsi), %r11 sbbq %r15, %r11 cmovcq (%rsi), %r8 cmovcq 8(%rsi), %r9 cmovcq 16(%rsi), %r10 cmovcq 24(%rsi), %r11 // Now if the top bit of the reduced scalar is set, negate it mod n_256, // i.e. do n |-> n_256 - n. Remember the sign in %rbp so we can // correspondingly negate the point below. subq %r8, %r12 sbbq %r9, %r13 sbbq %r10, %r14 sbbq %r11, %r15 movq %r11, %rbp shrq $63, %rbp cmovnzq %r12, %r8 cmovnzq %r13, %r9 cmovnzq %r14, %r10 cmovnzq %r15, %r11 // In either case then add the recoding constant 0x08888...888 to allow // signed digits. movq $0x8888888888888888, %rax addq %rax, %r8 adcq %rax, %r9 adcq %rax, %r10 adcq %rax, %r11 btc $63, %r11 movq %r8, SCALARB(%rsp) movq %r9, SCALARB+8(%rsp) movq %r10, SCALARB+16(%rsp) movq %r11, SCALARB+24(%rsp) // Set the tab[0] table entry to the input point = 1 * P, except // that we negate it if the top bit of the scalar was set. This // negation takes care over the y = 0 case to maintain all the // coordinates < p_256 throughout, even though triples (x,y,z) // with y = 0 can only represent a point on the curve when z = 0 // and it represents the point at infinity regardless of x and y. movq (%rbx), %rax movq %rax, TAB(%rsp) movq 8(%rbx), %rax movq %rax, TAB+8(%rsp) movq 16(%rbx), %rax movq %rax, TAB+16(%rsp) movq 24(%rbx), %rax movq %rax, TAB+24(%rsp) movq 32(%rbx), %r12 movq %r12, %rax movq 40(%rbx), %r13 orq %r13, %rax movq 48(%rbx), %r14 movq %r14, %rcx movq 56(%rbx), %r15 orq %r15, %rcx orq %rcx, %rax cmovzq %rax, %rbp xorl %r10d, %r10d leaq -1(%r10), %r8 movq $0x00000000ffffffff, %r11 movq %r11, %r9 negq %r11 subq %r12, %r8 sbbq %r13, %r9 sbbq %r14, %r10 sbbq %r15, %r11 testq %rbp, %rbp cmovzq %r12, %r8 cmovzq %r13, %r9 cmovzq %r14, %r10 cmovzq %r15, %r11 movq %r8, TAB+32(%rsp) movq %r9, TAB+40(%rsp) movq %r10, TAB+48(%rsp) movq %r11, TAB+56(%rsp) movq 64(%rbx), %rax movq %rax, TAB+64(%rsp) movq 72(%rbx), %rax movq %rax, TAB+72(%rsp) movq 80(%rbx), %rax movq %rax, TAB+80(%rsp) movq 88(%rbx), %rax movq %rax, TAB+88(%rsp) // Compute and record tab[1] = 2 * p, ..., tab[7] = 8 * P leaq TAB+96*1(%rsp), %rdi leaq TAB(%rsp), %rsi callq p256_montjscalarmul_alt_p256_montjdouble leaq TAB+96*2(%rsp), %rdi leaq TAB+96*1(%rsp), %rsi leaq TAB(%rsp), %rdx callq p256_montjscalarmul_alt_p256_montjadd leaq TAB+96*3(%rsp), %rdi leaq TAB+96*1(%rsp), %rsi callq p256_montjscalarmul_alt_p256_montjdouble leaq TAB+96*4(%rsp), %rdi leaq TAB+96*3(%rsp), %rsi leaq TAB(%rsp), %rdx callq p256_montjscalarmul_alt_p256_montjadd leaq TAB+96*5(%rsp), %rdi leaq TAB+96*2(%rsp), %rsi callq p256_montjscalarmul_alt_p256_montjdouble leaq TAB+96*6(%rsp), %rdi leaq TAB+96*5(%rsp), %rsi leaq TAB(%rsp), %rdx callq p256_montjscalarmul_alt_p256_montjadd leaq TAB+96*7(%rsp), %rdi leaq TAB+96*3(%rsp), %rsi callq p256_montjscalarmul_alt_p256_montjdouble // Set up accumulator as table entry for top 4 bits (constant-time indexing) movq SCALARB+24(%rsp), %rdi shrq $60, %rdi xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) movq %rax, ACC(%rsp) movq %rbx, ACC+8(%rsp) movq %rcx, ACC+16(%rsp) movq %rdx, ACC+24(%rsp) movq %r8, ACC+32(%rsp) movq %r9, ACC+40(%rsp) movq %r10, ACC+48(%rsp) movq %r11, ACC+56(%rsp) movq %r12, ACC+64(%rsp) movq %r13, ACC+72(%rsp) movq %r14, ACC+80(%rsp) movq %r15, ACC+88(%rsp) // Main loop over size-4 bitfield movl $252, %ebp p256_montjscalarmul_alt_mainloop: subq $4, %rbp leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p256_montjscalarmul_alt_p256_montjdouble leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p256_montjscalarmul_alt_p256_montjdouble leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p256_montjscalarmul_alt_p256_montjdouble leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p256_montjscalarmul_alt_p256_montjdouble movq %rbp, %rax shrq $6, %rax movq (%rsp,%rax,8), %rdi movq %rbp, %rcx shrq %cl, %rdi andq $15, %rdi subq $8, %rdi sbbq %rsi, %rsi // %rsi = sign of digit (-1 = negative) xorq %rsi, %rdi subq %rsi, %rdi // %rdi = absolute value of digit xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d selectblock(1) selectblock(2) selectblock(3) selectblock(4) selectblock(5) selectblock(6) selectblock(7) selectblock(8) // Store it to "tabent" with the y coordinate optionally negated // Again, do it carefully to give coordinates < p_256 even in // the degenerate case y = 0 (when z = 0 for points on the curve). movq %rax, TABENT(%rsp) movq %rbx, TABENT+8(%rsp) movq %rcx, TABENT+16(%rsp) movq %rdx, TABENT+24(%rsp) movq %r12, TABENT+64(%rsp) movq %r13, TABENT+72(%rsp) movq %r14, TABENT+80(%rsp) movq %r15, TABENT+88(%rsp) movq %r8, %rax xorl %r14d, %r14d orq %r9, %rax leaq -1(%r14), %r12 movq %r10, %rcx movq $0x00000000ffffffff, %r15 orq %r11, %rcx movq %r15, %r13 negq %r15 orq %rcx, %rax cmovzq %rax, %rsi subq %r8, %r12 sbbq %r9, %r13 sbbq %r10, %r14 sbbq %r11, %r15 testq %rsi, %rsi cmovnzq %r12, %r8 cmovnzq %r13, %r9 cmovnzq %r14, %r10 cmovnzq %r15, %r11 movq %r8, TABENT+32(%rsp) movq %r9, TABENT+40(%rsp) movq %r10, TABENT+48(%rsp) movq %r11, TABENT+56(%rsp) leaq TABENT(%rsp), %rdx leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p256_montjscalarmul_alt_p256_montjadd testq %rbp, %rbp jne p256_montjscalarmul_alt_mainloop // That's the end of the main loop, and we just need to copy the // result in "acc" to the output. movq res, %rdi movq ACC(%rsp), %rax movq %rax, (%rdi) movq ACC+8(%rsp), %rax movq %rax, 8(%rdi) movq ACC+16(%rsp), %rax movq %rax, 16(%rdi) movq ACC+24(%rsp), %rax movq %rax, 24(%rdi) movq ACC+32(%rsp), %rax movq %rax, 32(%rdi) movq ACC+40(%rsp), %rax movq %rax, 40(%rdi) movq ACC+48(%rsp), %rax movq %rax, 48(%rdi) movq ACC+56(%rsp), %rax movq %rax, 56(%rdi) movq ACC+64(%rsp), %rax movq %rax, 64(%rdi) movq ACC+72(%rsp), %rax movq %rax, 72(%rdi) movq ACC+80(%rsp), %rax movq %rax, 80(%rdi) movq ACC+88(%rsp), %rax movq %rax, 88(%rdi) // Restore stack and registers and return addq $NSPACE, %rsp popq %rbx popq %rbp popq %r12 popq %r13 popq %r14 popq %r15 ret // Local copies of subroutines, complete clones at the moment p256_montjscalarmul_alt_p256_montjadd: pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $0xe0, %rsp movq %rdx, %rbp movq 0x40(%rsi), %rax movq %rax, %rbx mulq %rax movq %rax, %r8 movq %rdx, %r15 movq 0x48(%rsi), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x58(%rsi), %rax movq %rax, %r13 mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x50(%rsi), %rax movq %rax, %rbx mulq %r13 movq %rax, %r13 movq %rdx, %r14 movq 0x40(%rsi), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx movq 0x48(%rsi), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq 0x58(%rsi), %rbx movq 0x48(%rsi), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 xorl %ecx, %ecx addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %rcx, %rcx movq 0x48(%rsi), %rax mulq %rax addq %r15, %r9 adcq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 movq 0x50(%rsi), %rax mulq %rax negq %r15 adcq %rax, %r12 adcq %rdx, %r13 sbbq %r15, %r15 movq 0x58(%rsi), %rax mulq %rax negq %r15 adcq %rax, %r14 adcq %rcx, %rdx movq %rdx, %r15 movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %rcx, %rcx movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx xorl %r8d, %r8d movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r8, %r14 adcq %r8, %r15 adcq %r8, %r8 movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx xorl %r9d, %r9d movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx leaq -0x1(%rbx), %rbx adcq %r13, %rbx leaq -0x1(%r9), %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, (%rsp) movq %r13, 0x8(%rsp) movq %r14, 0x10(%rsp) movq %r15, 0x18(%rsp) movq 0x40(%rbp), %rax movq %rax, %rbx mulq %rax movq %rax, %r8 movq %rdx, %r15 movq 0x48(%rbp), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x58(%rbp), %rax movq %rax, %r13 mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x50(%rbp), %rax movq %rax, %rbx mulq %r13 movq %rax, %r13 movq %rdx, %r14 movq 0x40(%rbp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx movq 0x48(%rbp), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq 0x58(%rbp), %rbx movq 0x48(%rbp), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 xorl %ecx, %ecx addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %rcx, %rcx movq 0x48(%rbp), %rax mulq %rax addq %r15, %r9 adcq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 movq 0x50(%rbp), %rax mulq %rax negq %r15 adcq %rax, %r12 adcq %rdx, %r13 sbbq %r15, %r15 movq 0x58(%rbp), %rax mulq %rax negq %r15 adcq %rax, %r14 adcq %rcx, %rdx movq %rdx, %r15 movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %rcx, %rcx movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx xorl %r8d, %r8d movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r8, %r14 adcq %r8, %r15 adcq %r8, %r8 movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx xorl %r9d, %r9d movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx leaq -0x1(%rbx), %rbx adcq %r13, %rbx leaq -0x1(%r9), %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0xa0(%rsp) movq %r13, 0xa8(%rsp) movq %r14, 0xb0(%rsp) movq %r15, 0xb8(%rsp) movq 0x20(%rsi), %rbx movq 0x40(%rbp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x48(%rbp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x50(%rbp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x58(%rbp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x28(%rsi), %rbx xorl %r13d, %r13d movq 0x40(%rbp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0x48(%rbp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0x50(%rbp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0x58(%rbp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x30(%rsi), %rbx xorl %r15d, %r15d movq 0x40(%rbp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x48(%rbp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x50(%rbp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x58(%rbp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x38(%rsi), %rbx xorl %r8d, %r8d movq 0x40(%rbp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x48(%rbp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x50(%rbp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x58(%rbp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0xc0(%rsp) movq %r13, 0xc8(%rsp) movq %r14, 0xd0(%rsp) movq %r15, 0xd8(%rsp) movq 0x20(%rbp), %rbx movq 0x40(%rsi), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x48(%rsi), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x50(%rsi), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x58(%rsi), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x28(%rbp), %rbx xorl %r13d, %r13d movq 0x40(%rsi), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0x48(%rsi), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0x50(%rsi), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0x58(%rsi), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x30(%rbp), %rbx xorl %r15d, %r15d movq 0x40(%rsi), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x48(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x50(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x58(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x38(%rbp), %rbx xorl %r8d, %r8d movq 0x40(%rsi), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x48(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x50(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x58(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x20(%rsp) movq %r13, 0x28(%rsp) movq %r14, 0x30(%rsp) movq %r15, 0x38(%rsp) movq 0x0(%rbp), %rbx movq (%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x8(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x10(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x18(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x8(%rbp), %rbx xorl %r13d, %r13d movq (%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0x8(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0x10(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0x18(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x10(%rbp), %rbx xorl %r15d, %r15d movq (%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x10(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x18(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x18(%rbp), %rbx xorl %r8d, %r8d movq (%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x10(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x18(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x40(%rsp) movq %r13, 0x48(%rsp) movq %r14, 0x50(%rsp) movq %r15, 0x58(%rsp) movq (%rsi), %rbx movq 0xa0(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0xa8(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0xb0(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0xb8(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x8(%rsi), %rbx xorl %r13d, %r13d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0xa8(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0xb0(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0xb8(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x10(%rsi), %rbx xorl %r15d, %r15d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0xa8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0xb0(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0xb8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x18(%rsi), %rbx xorl %r8d, %r8d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0xa8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0xb0(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0xb8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x80(%rsp) movq %r13, 0x88(%rsp) movq %r14, 0x90(%rsp) movq %r15, 0x98(%rsp) movq 0x20(%rsp), %rbx movq (%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x8(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x10(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x18(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x28(%rsp), %rbx xorl %r13d, %r13d movq (%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0x8(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0x10(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0x18(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x30(%rsp), %rbx xorl %r15d, %r15d movq (%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x10(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x18(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x38(%rsp), %rbx xorl %r8d, %r8d movq (%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x10(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x18(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x20(%rsp) movq %r13, 0x28(%rsp) movq %r14, 0x30(%rsp) movq %r15, 0x38(%rsp) movq 0xc0(%rsp), %rbx movq 0xa0(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0xa8(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0xb0(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0xb8(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0xc8(%rsp), %rbx xorl %r13d, %r13d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0xa8(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0xb0(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0xb8(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0xd0(%rsp), %rbx xorl %r15d, %r15d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0xa8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0xb0(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0xb8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0xd8(%rsp), %rbx xorl %r8d, %r8d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0xa8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0xb0(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0xb8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0xc0(%rsp) movq %r13, 0xc8(%rsp) movq %r14, 0xd0(%rsp) movq %r15, 0xd8(%rsp) movq 0x40(%rsp), %rax subq 0x80(%rsp), %rax movq 0x48(%rsp), %rcx sbbq 0x88(%rsp), %rcx movq 0x50(%rsp), %r8 sbbq 0x90(%rsp), %r8 movq 0x58(%rsp), %r9 sbbq 0x98(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0xa0(%rsp) adcq %r10, %rcx movq %rcx, 0xa8(%rsp) adcq $0x0, %r8 movq %r8, 0xb0(%rsp) adcq %rdx, %r9 movq %r9, 0xb8(%rsp) movq 0x20(%rsp), %rax subq 0xc0(%rsp), %rax movq 0x28(%rsp), %rcx sbbq 0xc8(%rsp), %rcx movq 0x30(%rsp), %r8 sbbq 0xd0(%rsp), %r8 movq 0x38(%rsp), %r9 sbbq 0xd8(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0x20(%rsp) adcq %r10, %rcx movq %rcx, 0x28(%rsp) adcq $0x0, %r8 movq %r8, 0x30(%rsp) adcq %rdx, %r9 movq %r9, 0x38(%rsp) movq 0xa0(%rsp), %rax movq %rax, %rbx mulq %rax movq %rax, %r8 movq %rdx, %r15 movq 0xa8(%rsp), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0xb8(%rsp), %rax movq %rax, %r13 mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0xb0(%rsp), %rax movq %rax, %rbx mulq %r13 movq %rax, %r13 movq %rdx, %r14 movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx movq 0xa8(%rsp), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq 0xb8(%rsp), %rbx movq 0xa8(%rsp), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 xorl %ecx, %ecx addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %rcx, %rcx movq 0xa8(%rsp), %rax mulq %rax addq %r15, %r9 adcq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 movq 0xb0(%rsp), %rax mulq %rax negq %r15 adcq %rax, %r12 adcq %rdx, %r13 sbbq %r15, %r15 movq 0xb8(%rsp), %rax mulq %rax negq %r15 adcq %rax, %r14 adcq %rcx, %rdx movq %rdx, %r15 movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %rcx, %rcx movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx xorl %r8d, %r8d movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r8, %r14 adcq %r8, %r15 adcq %r8, %r8 movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx xorl %r9d, %r9d movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx leaq -0x1(%rbx), %rbx adcq %r13, %rbx leaq -0x1(%r9), %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x60(%rsp) movq %r13, 0x68(%rsp) movq %r14, 0x70(%rsp) movq %r15, 0x78(%rsp) movq 0x20(%rsp), %rax movq %rax, %rbx mulq %rax movq %rax, %r8 movq %rdx, %r15 movq 0x28(%rsp), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x38(%rsp), %rax movq %rax, %r13 mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x30(%rsp), %rax movq %rax, %rbx mulq %r13 movq %rax, %r13 movq %rdx, %r14 movq 0x20(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx movq 0x28(%rsp), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq 0x38(%rsp), %rbx movq 0x28(%rsp), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 xorl %ecx, %ecx addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %rcx, %rcx movq 0x28(%rsp), %rax mulq %rax addq %r15, %r9 adcq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 movq 0x30(%rsp), %rax mulq %rax negq %r15 adcq %rax, %r12 adcq %rdx, %r13 sbbq %r15, %r15 movq 0x38(%rsp), %rax mulq %rax negq %r15 adcq %rax, %r14 adcq %rcx, %rdx movq %rdx, %r15 movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %rcx, %rcx movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx xorl %r8d, %r8d movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r8, %r14 adcq %r8, %r15 adcq %r8, %r8 movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx xorl %r9d, %r9d movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx leaq -0x1(%rbx), %rbx adcq %r13, %rbx leaq -0x1(%r9), %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, (%rsp) movq %r13, 0x8(%rsp) movq %r14, 0x10(%rsp) movq %r15, 0x18(%rsp) movq 0x80(%rsp), %rbx movq 0x60(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x68(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x70(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x78(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x88(%rsp), %rbx xorl %r13d, %r13d movq 0x60(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0x68(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0x70(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0x78(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x90(%rsp), %rbx xorl %r15d, %r15d movq 0x60(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x68(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x70(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x78(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x98(%rsp), %rbx xorl %r8d, %r8d movq 0x60(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x68(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x70(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x78(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x80(%rsp) movq %r13, 0x88(%rsp) movq %r14, 0x90(%rsp) movq %r15, 0x98(%rsp) movq 0x40(%rsp), %rbx movq 0x60(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x68(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x70(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x78(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x48(%rsp), %rbx xorl %r13d, %r13d movq 0x60(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0x68(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0x70(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0x78(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x50(%rsp), %rbx xorl %r15d, %r15d movq 0x60(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x68(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x70(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x78(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x58(%rsp), %rbx xorl %r8d, %r8d movq 0x60(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x68(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x70(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x78(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x40(%rsp) movq %r13, 0x48(%rsp) movq %r14, 0x50(%rsp) movq %r15, 0x58(%rsp) movq (%rsp), %rax subq 0x80(%rsp), %rax movq 0x8(%rsp), %rcx sbbq 0x88(%rsp), %rcx movq 0x10(%rsp), %r8 sbbq 0x90(%rsp), %r8 movq 0x18(%rsp), %r9 sbbq 0x98(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, (%rsp) adcq %r10, %rcx movq %rcx, 0x8(%rsp) adcq $0x0, %r8 movq %r8, 0x10(%rsp) adcq %rdx, %r9 movq %r9, 0x18(%rsp) movq 0x40(%rsp), %rax subq 0x80(%rsp), %rax movq 0x48(%rsp), %rcx sbbq 0x88(%rsp), %rcx movq 0x50(%rsp), %r8 sbbq 0x90(%rsp), %r8 movq 0x58(%rsp), %r9 sbbq 0x98(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0x60(%rsp) adcq %r10, %rcx movq %rcx, 0x68(%rsp) adcq $0x0, %r8 movq %r8, 0x70(%rsp) adcq %rdx, %r9 movq %r9, 0x78(%rsp) movq 0x40(%rsi), %rbx movq 0xa0(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0xa8(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0xb0(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0xb8(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x48(%rsi), %rbx xorl %r13d, %r13d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0xa8(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0xb0(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0xb8(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x50(%rsi), %rbx xorl %r15d, %r15d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0xa8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0xb0(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0xb8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x58(%rsi), %rbx xorl %r8d, %r8d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0xa8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0xb0(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0xb8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0xa0(%rsp) movq %r13, 0xa8(%rsp) movq %r14, 0xb0(%rsp) movq %r15, 0xb8(%rsp) movq (%rsp), %rax subq 0x40(%rsp), %rax movq 0x8(%rsp), %rcx sbbq 0x48(%rsp), %rcx movq 0x10(%rsp), %r8 sbbq 0x50(%rsp), %r8 movq 0x18(%rsp), %r9 sbbq 0x58(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, (%rsp) adcq %r10, %rcx movq %rcx, 0x8(%rsp) adcq $0x0, %r8 movq %r8, 0x10(%rsp) adcq %rdx, %r9 movq %r9, 0x18(%rsp) movq 0x80(%rsp), %rax subq (%rsp), %rax movq 0x88(%rsp), %rcx sbbq 0x8(%rsp), %rcx movq 0x90(%rsp), %r8 sbbq 0x10(%rsp), %r8 movq 0x98(%rsp), %r9 sbbq 0x18(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0x80(%rsp) adcq %r10, %rcx movq %rcx, 0x88(%rsp) adcq $0x0, %r8 movq %r8, 0x90(%rsp) adcq %rdx, %r9 movq %r9, 0x98(%rsp) movq 0xc0(%rsp), %rbx movq 0x60(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x68(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x70(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x78(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0xc8(%rsp), %rbx xorl %r13d, %r13d movq 0x60(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0x68(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0x70(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0x78(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0xd0(%rsp), %rbx xorl %r15d, %r15d movq 0x60(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x68(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x70(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x78(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0xd8(%rsp), %rbx xorl %r8d, %r8d movq 0x60(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x68(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x70(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x78(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x60(%rsp) movq %r13, 0x68(%rsp) movq %r14, 0x70(%rsp) movq %r15, 0x78(%rsp) movq 0x40(%rbp), %rbx movq 0xa0(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0xa8(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0xb0(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0xb8(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x48(%rbp), %rbx xorl %r13d, %r13d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0xa8(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0xb0(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0xb8(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x50(%rbp), %rbx xorl %r15d, %r15d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0xa8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0xb0(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0xb8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x58(%rbp), %rbx xorl %r8d, %r8d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0xa8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0xb0(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0xb8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0xa0(%rsp) movq %r13, 0xa8(%rsp) movq %r14, 0xb0(%rsp) movq %r15, 0xb8(%rsp) movq 0x80(%rsp), %rbx movq 0x20(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x28(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x30(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x38(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x88(%rsp), %rbx xorl %r13d, %r13d movq 0x20(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0x28(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0x30(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0x38(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x90(%rsp), %rbx xorl %r15d, %r15d movq 0x20(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x28(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x30(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x38(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x98(%rsp), %rbx xorl %r8d, %r8d movq 0x20(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x28(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x30(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x38(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x80(%rsp) movq %r13, 0x88(%rsp) movq %r14, 0x90(%rsp) movq %r15, 0x98(%rsp) movq 0x80(%rsp), %rax subq 0x60(%rsp), %rax movq 0x88(%rsp), %rcx sbbq 0x68(%rsp), %rcx movq 0x90(%rsp), %r8 sbbq 0x70(%rsp), %r8 movq 0x98(%rsp), %r9 sbbq 0x78(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0x80(%rsp) adcq %r10, %rcx movq %rcx, 0x88(%rsp) adcq $0x0, %r8 movq %r8, 0x90(%rsp) adcq %rdx, %r9 movq %r9, 0x98(%rsp) movq 0x40(%rsi), %r8 movq 0x48(%rsi), %r9 movq 0x50(%rsi), %r10 movq 0x58(%rsi), %r11 movq %r8, %rax movq %r9, %rdx orq %r10, %rax orq %r11, %rdx orq %rdx, %rax negq %rax sbbq %rax, %rax movq 0x40(%rbp), %r12 movq 0x48(%rbp), %r13 movq 0x50(%rbp), %r14 movq 0x58(%rbp), %r15 movq %r12, %rbx movq %r13, %rdx orq %r14, %rbx orq %r15, %rdx orq %rdx, %rbx negq %rbx sbbq %rbx, %rbx cmpq %rax, %rbx cmovbq %r8, %r12 cmovbq %r9, %r13 cmovbq %r10, %r14 cmovbq %r11, %r15 cmoveq 0xa0(%rsp), %r12 cmoveq 0xa8(%rsp), %r13 cmoveq 0xb0(%rsp), %r14 cmoveq 0xb8(%rsp), %r15 movq (%rsp), %rax cmovbq (%rsi), %rax cmova 0x0(%rbp), %rax movq 0x8(%rsp), %rbx cmovbq 0x8(%rsi), %rbx cmova 0x8(%rbp), %rbx movq 0x10(%rsp), %rcx cmovbq 0x10(%rsi), %rcx cmova 0x10(%rbp), %rcx movq 0x18(%rsp), %rdx cmovbq 0x18(%rsi), %rdx cmova 0x18(%rbp), %rdx movq 0x80(%rsp), %r8 cmovbq 0x20(%rsi), %r8 cmova 0x20(%rbp), %r8 movq 0x88(%rsp), %r9 cmovbq 0x28(%rsi), %r9 cmova 0x28(%rbp), %r9 movq 0x90(%rsp), %r10 cmovbq 0x30(%rsi), %r10 cmova 0x30(%rbp), %r10 movq 0x98(%rsp), %r11 cmovbq 0x38(%rsi), %r11 cmova 0x38(%rbp), %r11 movq %rax, (%rdi) movq %rbx, 0x8(%rdi) movq %rcx, 0x10(%rdi) movq %rdx, 0x18(%rdi) movq %r8, 0x20(%rdi) movq %r9, 0x28(%rdi) movq %r10, 0x30(%rdi) movq %r11, 0x38(%rdi) movq %r12, 0x40(%rdi) movq %r13, 0x48(%rdi) movq %r14, 0x50(%rdi) movq %r15, 0x58(%rdi) addq $0xe0, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret p256_montjscalarmul_alt_p256_montjdouble: pushq %rbx pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $0xc0, %rsp movq 0x40(%rsi), %rax movq %rax, %rbx mulq %rax movq %rax, %r8 movq %rdx, %r15 movq 0x48(%rsi), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x58(%rsi), %rax movq %rax, %r13 mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x50(%rsi), %rax movq %rax, %rbx mulq %r13 movq %rax, %r13 movq %rdx, %r14 movq 0x40(%rsi), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx movq 0x48(%rsi), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq 0x58(%rsi), %rbx movq 0x48(%rsi), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 xorl %ecx, %ecx addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %rcx, %rcx movq 0x48(%rsi), %rax mulq %rax addq %r15, %r9 adcq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 movq 0x50(%rsi), %rax mulq %rax negq %r15 adcq %rax, %r12 adcq %rdx, %r13 sbbq %r15, %r15 movq 0x58(%rsi), %rax mulq %rax negq %r15 adcq %rax, %r14 adcq %rcx, %rdx movq %rdx, %r15 movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %rcx, %rcx movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx xorl %r8d, %r8d movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r8, %r14 adcq %r8, %r15 adcq %r8, %r8 movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx xorl %r9d, %r9d movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx leaq -0x1(%rbx), %rbx adcq %r13, %rbx leaq -0x1(%r9), %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, (%rsp) movq %r13, 0x8(%rsp) movq %r14, 0x10(%rsp) movq %r15, 0x18(%rsp) movq 0x20(%rsi), %rax movq %rax, %rbx mulq %rax movq %rax, %r8 movq %rdx, %r15 movq 0x28(%rsi), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x38(%rsi), %rax movq %rax, %r13 mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x30(%rsi), %rax movq %rax, %rbx mulq %r13 movq %rax, %r13 movq %rdx, %r14 movq 0x20(%rsi), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx movq 0x28(%rsi), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq 0x38(%rsi), %rbx movq 0x28(%rsi), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 xorl %ecx, %ecx addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %rcx, %rcx movq 0x28(%rsi), %rax mulq %rax addq %r15, %r9 adcq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 movq 0x30(%rsi), %rax mulq %rax negq %r15 adcq %rax, %r12 adcq %rdx, %r13 sbbq %r15, %r15 movq 0x38(%rsi), %rax mulq %rax negq %r15 adcq %rax, %r14 adcq %rcx, %rdx movq %rdx, %r15 movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %rcx, %rcx movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx xorl %r8d, %r8d movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r8, %r14 adcq %r8, %r15 adcq %r8, %r8 movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx xorl %r9d, %r9d movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx leaq -0x1(%rbx), %rbx adcq %r13, %rbx leaq -0x1(%r9), %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x20(%rsp) movq %r13, 0x28(%rsp) movq %r14, 0x30(%rsp) movq %r15, 0x38(%rsp) movq (%rsi), %rax subq (%rsp), %rax movq 0x8(%rsi), %rcx sbbq 0x8(%rsp), %rcx movq 0x10(%rsi), %r8 sbbq 0x10(%rsp), %r8 movq 0x18(%rsi), %r9 sbbq 0x18(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0x60(%rsp) adcq %r10, %rcx movq %rcx, 0x68(%rsp) adcq $0x0, %r8 movq %r8, 0x70(%rsp) adcq %rdx, %r9 movq %r9, 0x78(%rsp) movq (%rsi), %rax addq (%rsp), %rax movq 0x8(%rsi), %rcx adcq 0x8(%rsp), %rcx movq 0x10(%rsi), %r8 adcq 0x10(%rsp), %r8 movq 0x18(%rsi), %r9 adcq 0x18(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx subq %r11, %rax movq %rax, 0x40(%rsp) sbbq %r10, %rcx movq %rcx, 0x48(%rsp) sbbq $0x0, %r8 movq %r8, 0x50(%rsp) sbbq %rdx, %r9 movq %r9, 0x58(%rsp) movq 0x60(%rsp), %rbx movq 0x40(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x48(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x50(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x58(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x68(%rsp), %rbx xorl %r13d, %r13d movq 0x40(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0x48(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0x50(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0x58(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x70(%rsp), %rbx xorl %r15d, %r15d movq 0x40(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x48(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x50(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x58(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x78(%rsp), %rbx xorl %r8d, %r8d movq 0x40(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x48(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x50(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x58(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x60(%rsp) movq %r13, 0x68(%rsp) movq %r14, 0x70(%rsp) movq %r15, 0x78(%rsp) xorq %r11, %r11 movq 0x20(%rsi), %rax addq 0x40(%rsi), %rax movq 0x28(%rsi), %rcx adcq 0x48(%rsi), %rcx movq 0x30(%rsi), %r8 adcq 0x50(%rsi), %r8 movq 0x38(%rsi), %r9 adcq 0x58(%rsi), %r9 adcq %r11, %r11 subq $0xffffffffffffffff, %rax movl $0xffffffff, %r10d sbbq %r10, %rcx sbbq $0x0, %r8 movabsq $0xffffffff00000001, %rdx sbbq %rdx, %r9 sbbq $0x0, %r11 andq %r11, %r10 andq %r11, %rdx addq %r11, %rax movq %rax, 0x40(%rsp) adcq %r10, %rcx movq %rcx, 0x48(%rsp) adcq $0x0, %r8 movq %r8, 0x50(%rsp) adcq %rdx, %r9 movq %r9, 0x58(%rsp) movq 0x20(%rsp), %rbx movq (%rsi), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x8(%rsi), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x10(%rsi), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x18(%rsi), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x28(%rsp), %rbx xorl %r13d, %r13d movq (%rsi), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0x8(%rsi), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0x10(%rsi), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0x18(%rsi), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x30(%rsp), %rbx xorl %r15d, %r15d movq (%rsi), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x8(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x10(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x18(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x38(%rsp), %rbx xorl %r8d, %r8d movq (%rsi), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x8(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x10(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x18(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x80(%rsp) movq %r13, 0x88(%rsp) movq %r14, 0x90(%rsp) movq %r15, 0x98(%rsp) movq 0x60(%rsp), %rax movq %rax, %rbx mulq %rax movq %rax, %r8 movq %rdx, %r15 movq 0x68(%rsp), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x78(%rsp), %rax movq %rax, %r13 mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x70(%rsp), %rax movq %rax, %rbx mulq %r13 movq %rax, %r13 movq %rdx, %r14 movq 0x60(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx movq 0x68(%rsp), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq 0x78(%rsp), %rbx movq 0x68(%rsp), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 xorl %ecx, %ecx addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %rcx, %rcx movq 0x68(%rsp), %rax mulq %rax addq %r15, %r9 adcq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 movq 0x70(%rsp), %rax mulq %rax negq %r15 adcq %rax, %r12 adcq %rdx, %r13 sbbq %r15, %r15 movq 0x78(%rsp), %rax mulq %rax negq %r15 adcq %rax, %r14 adcq %rcx, %rdx movq %rdx, %r15 movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %rcx, %rcx movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx xorl %r8d, %r8d movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r8, %r14 adcq %r8, %r15 adcq %r8, %r8 movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx xorl %r9d, %r9d movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx leaq -0x1(%rbx), %rbx adcq %r13, %rbx leaq -0x1(%r9), %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0xa0(%rsp) movq %r13, 0xa8(%rsp) movq %r14, 0xb0(%rsp) movq %r15, 0xb8(%rsp) movq 0x40(%rsp), %rax movq %rax, %rbx mulq %rax movq %rax, %r8 movq %rdx, %r15 movq 0x48(%rsp), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x58(%rsp), %rax movq %rax, %r13 mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x50(%rsp), %rax movq %rax, %rbx mulq %r13 movq %rax, %r13 movq %rdx, %r14 movq 0x40(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx movq 0x48(%rsp), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq 0x58(%rsp), %rbx movq 0x48(%rsp), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 xorl %ecx, %ecx addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %rcx, %rcx movq 0x48(%rsp), %rax mulq %rax addq %r15, %r9 adcq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 movq 0x50(%rsp), %rax mulq %rax negq %r15 adcq %rax, %r12 adcq %rdx, %r13 sbbq %r15, %r15 movq 0x58(%rsp), %rax mulq %rax negq %r15 adcq %rax, %r14 adcq %rcx, %rdx movq %rdx, %r15 movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %rcx, %rcx movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx xorl %r8d, %r8d movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r8, %r14 adcq %r8, %r15 adcq %r8, %r8 movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx xorl %r9d, %r9d movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx leaq -0x1(%rbx), %rbx adcq %r13, %rbx leaq -0x1(%r9), %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x40(%rsp) movq %r13, 0x48(%rsp) movq %r14, 0x50(%rsp) movq %r15, 0x58(%rsp) movq $0xffffffffffffffff, %r9 xorl %r11d, %r11d subq 0xa0(%rsp), %r9 movabsq $0xffffffff, %r10 sbbq 0xa8(%rsp), %r10 sbbq 0xb0(%rsp), %r11 movabsq $0xffffffff00000001, %r12 sbbq 0xb8(%rsp), %r12 movq $0x9, %rcx movq %r9, %rax mulq %rcx movq %rax, %r8 movq %rdx, %r9 movq %r10, %rax xorl %r10d, %r10d mulq %rcx addq %rax, %r9 adcq %rdx, %r10 movq %r11, %rax xorl %r11d, %r11d mulq %rcx addq %rax, %r10 adcq %rdx, %r11 movq %r12, %rax xorl %r12d, %r12d mulq %rcx addq %rax, %r11 adcq %rdx, %r12 movl $0xc, %ecx movq 0x80(%rsp), %rax mulq %rcx addq %rax, %r8 adcq %rdx, %r9 sbbq %rbx, %rbx movq 0x88(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %rbx, %rbx movq 0x90(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rbx, %rbx movq 0x98(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r11 adcq %rdx, %r12 leaq 0x1(%r12), %rcx movabsq $0xffffffff00000001, %rax mulq %rcx movq %rcx, %rbx shlq $0x20, %rbx addq %rcx, %r8 sbbq $0x0, %rbx subq %rbx, %r9 sbbq $0x0, %r10 sbbq %rax, %r11 sbbq %rdx, %rcx decq %rcx movl $0xffffffff, %eax andq %rcx, %rax xorl %edx, %edx subq %rax, %rdx addq %rcx, %r8 movq %r8, 0xa0(%rsp) adcq %rax, %r9 movq %r9, 0xa8(%rsp) adcq $0x0, %r10 movq %r10, 0xb0(%rsp) adcq %rdx, %r11 movq %r11, 0xb8(%rsp) movq 0x40(%rsp), %rax subq (%rsp), %rax movq 0x48(%rsp), %rcx sbbq 0x8(%rsp), %rcx movq 0x50(%rsp), %r8 sbbq 0x10(%rsp), %r8 movq 0x58(%rsp), %r9 sbbq 0x18(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0x40(%rsp) adcq %r10, %rcx movq %rcx, 0x48(%rsp) adcq $0x0, %r8 movq %r8, 0x50(%rsp) adcq %rdx, %r9 movq %r9, 0x58(%rsp) movq 0x20(%rsp), %rax movq %rax, %rbx mulq %rax movq %rax, %r8 movq %rdx, %r15 movq 0x28(%rsp), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x38(%rsp), %rax movq %rax, %r13 mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x30(%rsp), %rax movq %rax, %rbx mulq %r13 movq %rax, %r13 movq %rdx, %r14 movq 0x20(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx movq 0x28(%rsp), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq 0x38(%rsp), %rbx movq 0x28(%rsp), %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 xorl %ecx, %ecx addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %rcx, %rcx movq 0x28(%rsp), %rax mulq %rax addq %r15, %r9 adcq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 movq 0x30(%rsp), %rax mulq %rax negq %r15 adcq %rax, %r12 adcq %rdx, %r13 sbbq %r15, %r15 movq 0x38(%rsp), %rax mulq %rax negq %r15 adcq %rax, %r14 adcq %rcx, %rdx movq %rdx, %r15 movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %rcx, %rcx movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx xorl %r8d, %r8d movq %r9, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r8, %r14 adcq %r8, %r15 adcq %r8, %r8 movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx xorl %r9d, %r9d movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx leaq -0x1(%rbx), %rbx adcq %r13, %rbx leaq -0x1(%r9), %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, (%rsp) movq %r13, 0x8(%rsp) movq %r14, 0x10(%rsp) movq %r15, 0x18(%rsp) movq 0x60(%rsp), %rbx movq 0xa0(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0xa8(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0xb0(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0xb8(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x68(%rsp), %rbx xorl %r13d, %r13d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r14, %r14 movq 0xa8(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r14, %r14 movq 0xb0(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r14, %r14 movq 0xb8(%rsp), %rax mulq %rbx subq %r14, %rdx addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movabsq $0x100000000, %rbx movq %r8, %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r15, %r15 notq %rbx leaq 0x2(%rbx), %rbx movq %r8, %rax mulq %rbx subq %r15, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r15, %r15 movq %r9, %rax mulq %rbx subq %r15, %rdx addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x70(%rsp), %rbx xorl %r15d, %r15d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0xa8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0xb0(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0xb8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x78(%rsp), %rbx xorl %r8d, %r8d movq 0xa0(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0xa8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0xb0(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0xb8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r8, %r8 xorl %r9d, %r9d movabsq $0x100000000, %rbx movq %r10, %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rcx, %rcx notq %rbx leaq 0x2(%rbx), %rbx movq %r10, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rcx, %rcx movq %r11, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq %r9, %r8 movl $0x1, %ecx addq %r12, %rcx decq %rbx adcq %r13, %rbx decq %r9 movq %r9, %rax adcq %r14, %r9 movl $0xfffffffe, %r11d adcq %r15, %r11 adcq %r8, %rax cmovbq %rcx, %r12 cmovbq %rbx, %r13 cmovbq %r9, %r14 cmovbq %r11, %r15 movq %r12, 0x60(%rsp) movq %r13, 0x68(%rsp) movq %r14, 0x70(%rsp) movq %r15, 0x78(%rsp) movq 0x40(%rsp), %rax subq 0x20(%rsp), %rax movq 0x48(%rsp), %rcx sbbq 0x28(%rsp), %rcx movq 0x50(%rsp), %r8 sbbq 0x30(%rsp), %r8 movq 0x58(%rsp), %r9 sbbq 0x38(%rsp), %r9 movl $0xffffffff, %r10d sbbq %r11, %r11 xorq %rdx, %rdx andq %r11, %r10 subq %r10, %rdx addq %r11, %rax movq %rax, 0x40(%rdi) adcq %r10, %rcx movq %rcx, 0x48(%rdi) adcq $0x0, %r8 movq %r8, 0x50(%rdi) adcq %rdx, %r9 movq %r9, 0x58(%rdi) movq 0x98(%rsp), %r11 movq %r11, %rcx movq 0x90(%rsp), %r10 shldq $0x2, %r10, %r11 movq 0x88(%rsp), %r9 shldq $0x2, %r9, %r10 movq 0x80(%rsp), %r8 shldq $0x2, %r8, %r9 shlq $0x2, %r8 shrq $0x3e, %rcx addq $0x1, %rcx subq 0xa0(%rsp), %r8 sbbq 0xa8(%rsp), %r9 sbbq 0xb0(%rsp), %r10 sbbq 0xb8(%rsp), %r11 sbbq $0x0, %rcx movabsq $0xffffffff00000001, %rax mulq %rcx movq %rcx, %rbx shlq $0x20, %rbx addq %rcx, %r8 sbbq $0x0, %rbx subq %rbx, %r9 sbbq $0x0, %r10 sbbq %rax, %r11 sbbq %rdx, %rcx decq %rcx movl $0xffffffff, %eax andq %rcx, %rax xorl %edx, %edx subq %rax, %rdx addq %rcx, %r8 movq %r8, (%rdi) adcq %rax, %r9 movq %r9, 0x8(%rdi) adcq $0x0, %r10 movq %r10, 0x10(%rdi) adcq %rdx, %r11 movq %r11, 0x18(%rdi) movq $0xffffffffffffffff, %r8 xorl %r10d, %r10d subq (%rsp), %r8 movabsq $0xffffffff, %r9 sbbq 0x8(%rsp), %r9 sbbq 0x10(%rsp), %r10 movabsq $0xffffffff00000001, %r11 sbbq 0x18(%rsp), %r11 movq %r11, %r12 shldq $0x3, %r10, %r11 shldq $0x3, %r9, %r10 shldq $0x3, %r8, %r9 shlq $0x3, %r8 shrq $0x3d, %r12 movl $0x3, %ecx movq 0x60(%rsp), %rax mulq %rcx addq %rax, %r8 adcq %rdx, %r9 sbbq %rbx, %rbx movq 0x68(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %rbx, %rbx movq 0x70(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rbx, %rbx movq 0x78(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r11 adcq %rdx, %r12 leaq 0x1(%r12), %rcx movabsq $0xffffffff00000001, %rax mulq %rcx movq %rcx, %rbx shlq $0x20, %rbx addq %rcx, %r8 sbbq $0x0, %rbx subq %rbx, %r9 sbbq $0x0, %r10 sbbq %rax, %r11 sbbq %rdx, %rcx decq %rcx movl $0xffffffff, %eax andq %rcx, %rax xorl %edx, %edx subq %rax, %rdx addq %rcx, %r8 movq %r8, 0x20(%rdi) adcq %rax, %r9 movq %r9, 0x28(%rdi) adcq $0x0, %r10 movq %r10, 0x30(%rdi) adcq %rdx, %r11 movq %r11, 0x38(%rdi) addq $0xc0, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbx ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
90,362
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p256/bignum_montinv_p256.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery inverse modulo p_256 = 2^256 - 2^224 + 2^192 + 2^96 - 1 // Input x[4]; output z[4] // // extern void bignum_montinv_p256(uint64_t z[static 4],uint64_t x[static 4]); // // If the 4-digit input x is coprime to p_256, i.e. is not divisible // by it, returns z < p_256 such that x * z == 2^512 (mod p_256). This // is effectively "Montgomery inverse" because if we consider x and z as // Montgomery forms of X and Z, i.e. x == 2^256 * X and z == 2^256 * Z // (both mod p_256) then X * Z == 1 (mod p_256). That is, this function // gives the analog of the modular inverse bignum_inv_p256 but with both // input and output in the Montgomery domain. Note that x does not need // to be reduced modulo p_256, but the output always is. If the input // is divisible (i.e. is 0 or p_256), then there can be no solution to // the congruence x * z == 2^512 (mod p_256), and z = 0 is returned. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montinv_p256) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montinv_p256) .text // Size in bytes of a 64-bit word #define N 8 // Pointer-offset pairs for temporaries on stack #define f 0(%rsp) #define g (5*N)(%rsp) #define u (10*N)(%rsp) #define v (15*N)(%rsp) #define tmp (20*N)(%rsp) #define tmp2 (21*N)(%rsp) #define i (22*N)(%rsp) #define d (23*N)(%rsp) #define mat (24*N)(%rsp) // Backup for the input pointer #define res (28*N)(%rsp) // Total size to reserve on the stack #define NSPACE (30*N) // Syntactic variants to make x86_att version simpler to generate #define F 0 #define G (5*N) #define U (10*N) #define V (15*N) #define MAT (24*N) #define ff (%rsp) #define gg (5*N)(%rsp) // --------------------------------------------------------------------------- // Core signed almost-Montgomery reduction macro from u[4..0] to u[3..0]. // --------------------------------------------------------------------------- #define amontred(P) \ /* We only know the input is -2^316 < x < 2^316. To do traditional */ \ /* unsigned Montgomery reduction, start by adding 2^61 * p_256. */ \ movq $0xe000000000000000, %r8 ; \ addq P, %r8 ; \ movq $0xffffffffffffffff, %r9 ; \ adcq 8+P, %r9 ; \ movq $0x000000001fffffff, %r10 ; \ adcq 16+P, %r10 ; \ movq $0x2000000000000000, %r11 ; \ adcq 24+P, %r11 ; \ movq $0x1fffffffe0000000, %r12 ; \ adcq 32+P, %r12 ; \ /* Let [%r8;%rbx] = 2^32 * w and [%rdx;%rax] = (2^64 - 2^32 + 1) * w */ \ /* where w is the lowest word */ \ movq %r8, %rbx ; \ shlq $32, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %r8; \ shrq $32, %r8 ; \ /* Hence basic addition of (2^256 - 2^224 + 2^192 + 2^96) * w */ \ addq %rbx, %r9 ; \ adcq %r8, %r10 ; \ adcq %rax, %r11 ; \ adcq %rdx, %r12 ; \ /* Now capture carry and subtract p_256 if set (almost-Montgomery) */ \ sbbq %rax, %rax ; \ movl $0x00000000ffffffff, %ebx ; \ andq %rax, %rbx ; \ movq $0xffffffff00000001, %rdx ; \ andq %rax, %rdx ; \ subq %rax, %r9 ; \ movq %r9, P ; \ sbbq %rbx, %r10 ; \ movq %r10, 8+P ; \ sbbq $0, %r11 ; \ movq %r11, 16+P ; \ sbbq %rdx, %r12 ; \ movq %r12, 24+P // Very similar to a subroutine call to the s2n-bignum word_divstep59. // But different in register usage and returning the final matrix as // // [ %r8 %r10] // [ %r12 %r14] // // and also returning the matrix still negated (which doesn't matter) #define divstep59(din,fin,gin) \ movq din, %rsi ; \ movq fin, %rdx ; \ movq gin, %rcx ; \ movq %rdx, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq $0xfffffffffffffffe, %rax ; \ xorl %ebp, %ebp ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %rdx ; \ leaq (%rcx,%rax), %rdi ; \ shlq $0x16, %rdx ; \ shlq $0x16, %rdi ; \ sarq $0x2b, %rdx ; \ sarq $0x2b, %rdi ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %rbx ; \ leaq (%rcx,%rax), %rcx ; \ sarq $0x2a, %rbx ; \ sarq $0x2a, %rcx ; \ movq %rdx, MAT(%rsp) ; \ movq %rbx, MAT+0x8(%rsp) ; \ movq %rdi, MAT+0x10(%rsp) ; \ movq %rcx, MAT+0x18(%rsp) ; \ movq fin, %r12 ; \ imulq %r12, %rdi ; \ imulq %rdx, %r12 ; \ movq gin, %r13 ; \ imulq %r13, %rbx ; \ imulq %rcx, %r13 ; \ addq %rbx, %r12 ; \ addq %rdi, %r13 ; \ sarq $0x14, %r12 ; \ sarq $0x14, %r13 ; \ movq %r12, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ movq %r13, %rcx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq $0xfffffffffffffffe, %rax ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %r8 ; \ leaq (%rcx,%rax), %r10 ; \ shlq $0x16, %r8 ; \ shlq $0x16, %r10 ; \ sarq $0x2b, %r8 ; \ sarq $0x2b, %r10 ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %r15 ; \ leaq (%rcx,%rax), %r11 ; \ sarq $0x2a, %r15 ; \ sarq $0x2a, %r11 ; \ movq %r13, %rbx ; \ movq %r12, %rcx ; \ imulq %r8, %r12 ; \ imulq %r15, %rbx ; \ addq %rbx, %r12 ; \ imulq %r11, %r13 ; \ imulq %r10, %rcx ; \ addq %rcx, %r13 ; \ sarq $0x14, %r12 ; \ sarq $0x14, %r13 ; \ movq %r12, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ movq %r13, %rcx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq MAT(%rsp), %rax ; \ imulq %r8, %rax ; \ movq MAT+0x10(%rsp), %rdx ; \ imulq %r15, %rdx ; \ imulq MAT+0x8(%rsp), %r8 ; \ imulq MAT+0x18(%rsp), %r15 ; \ addq %r8, %r15 ; \ leaq (%rax,%rdx), %r9 ; \ movq MAT(%rsp), %rax ; \ imulq %r10, %rax ; \ movq MAT+0x10(%rsp), %rdx ; \ imulq %r11, %rdx ; \ imulq MAT+0x8(%rsp), %r10 ; \ imulq MAT+0x18(%rsp), %r11 ; \ addq %r10, %r11 ; \ leaq (%rax,%rdx), %r13 ; \ movq $0xfffffffffffffffe, %rax ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %r8 ; \ leaq (%rcx,%rax), %r12 ; \ shlq $0x15, %r8 ; \ shlq $0x15, %r12 ; \ sarq $0x2b, %r8 ; \ sarq $0x2b, %r12 ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %r10 ; \ leaq (%rcx,%rax), %r14 ; \ sarq $0x2b, %r10 ; \ sarq $0x2b, %r14 ; \ movq %r9, %rax ; \ imulq %r8, %rax ; \ movq %r13, %rdx ; \ imulq %r10, %rdx ; \ imulq %r15, %r8 ; \ imulq %r11, %r10 ; \ addq %r8, %r10 ; \ leaq (%rax,%rdx), %r8 ; \ movq %r9, %rax ; \ imulq %r12, %rax ; \ movq %r13, %rdx ; \ imulq %r14, %rdx ; \ imulq %r15, %r12 ; \ imulq %r11, %r14 ; \ addq %r12, %r14 ; \ leaq (%rax,%rdx), %r12 S2N_BN_SYMBOL(bignum_montinv_p256): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // Save registers and make room for temporaries pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp // Save the return pointer for the end so we can overwrite %rdi later movq %rdi, res // Create constant [%rdx;%rcx;%rbx;%rax] = p_256 and copy it into the variable f // including the 5th zero digit xorl %ecx, %ecx movl $0x00000000ffffffff, %edx movq %rdx, %rbx leaq -1(%rcx), %rax negq %rdx movq %rax, F(%rsp) movq %rbx, F+8(%rsp) movq %rcx, F+16(%rsp) movq %rdx, F+24(%rsp) movq %rcx, F+32(%rsp) // Now reduce the input modulo p_256, first negating the constant to get // [%rdx;%rcx;%rbx;%rax] = 2^256 - p_256, adding it to x and hence getting // the comparison x < p_256 <=> (2^256 - p_256) + x < 2^256 and choosing // g accordingly. movq (%rsi), %r8 movq 8(%rsi), %r9 movq 16(%rsi), %r10 movq 24(%rsi), %r11 leaq 1(%rcx), %rax addq %r8, %rax leaq -1(%rdx), %rbx adcq %r9, %rbx notq %rcx adcq %r10, %rcx notq %rdx adcq %r11, %rdx cmovncq %r8, %rax cmovncq %r9, %rbx cmovncq %r10, %rcx cmovncq %r11, %rdx movq %rax, G(%rsp) movq %rbx, G+8(%rsp) movq %rcx, G+16(%rsp) movq %rdx, G+24(%rsp) xorl %eax, %eax movq %rax, G+32(%rsp) // Also maintain reduced < 2^256 vector [u,v] such that // [f,g] == x * 2^{5*i-562} * [u,v] (mod p_256) // starting with [p_256,x] == x * 2^{5*0-562} * [0,2^562] (mod p_256) // The weird-looking 5*i modifications come in because we are doing // 64-bit word-sized Montgomery reductions at each stage, which is // 5 bits more than the 59-bit requirement to keep things stable. // After the 10th and last iteration and sign adjustment, when // f == 1 for in-scope cases, we have x * 2^{50-562} * u == 1, i.e. // x * u == 2^512 as required. xorl %eax, %eax movq %rax, U(%rsp) movq %rax, U+8(%rsp) movq %rax, U+16(%rsp) movq %rax, U+24(%rsp) movq $0x000c000000140000, %rax movq %rax, V(%rsp) movq $0xffe8000000000000, %rax movq %rax, V+8(%rsp) movq $0xfffbffffffefffff, %rax movq %rax, V+16(%rsp) movq $0x000bffffffebffff, %rax movq %rax, V+24(%rsp) // Start of main loop. We jump into the middle so that the divstep // portion is common to the special tenth iteration after a uniform // first 9. movq $10, i movq $1, d jmp bignum_montinv_p256_midloop bignum_montinv_p256_loop: // Separate out the matrix into sign-magnitude pairs movq %r8, %r9 sarq $63, %r9 xorq %r9, %r8 subq %r9, %r8 movq %r10, %r11 sarq $63, %r11 xorq %r11, %r10 subq %r11, %r10 movq %r12, %r13 sarq $63, %r13 xorq %r13, %r12 subq %r13, %r12 movq %r14, %r15 sarq $63, %r15 xorq %r15, %r14 subq %r15, %r14 // Adjust the initial values to allow for complement instead of negation // This initial offset is the same for [f,g] and [u,v] compositions. // Save it in temporary storage for the [u,v] part and do [f,g] first. movq %r8, %rax andq %r9, %rax movq %r10, %rdi andq %r11, %rdi addq %rax, %rdi movq %rdi, tmp movq %r12, %rax andq %r13, %rax movq %r14, %rsi andq %r15, %rsi addq %rax, %rsi movq %rsi, tmp2 // Now the computation of the updated f and g values. This maintains a // 2-word carry between stages so we can conveniently insert the shift // right by 59 before storing back, and not overwrite digits we need // again of the old f and g values. // // Digit 0 of [f,g] xorl %ebx, %ebx movq F(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq G(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rdi adcq %rdx, %rbx xorl %ebp, %ebp movq F(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rsi adcq %rdx, %rbp movq G(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp // Digit 1 of [f,g] xorl %ecx, %ecx movq F+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq G+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx shrdq $59, %rbx, %rdi movq %rdi, F(%rsp) xorl %edi, %edi movq F+N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbp adcq %rdx, %rdi movq G+N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rdi shrdq $59, %rbp, %rsi movq %rsi, G(%rsp) // Digit 2 of [f,g] xorl %esi, %esi movq F+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rsi movq G+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rsi shrdq $59, %rcx, %rbx movq %rbx, F+N(%rsp) xorl %ebx, %ebx movq F+2*N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rdi adcq %rdx, %rbx movq G+2*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rdi adcq %rdx, %rbx shrdq $59, %rdi, %rbp movq %rbp, G+N(%rsp) // Digits 3 and 4 of [f,g] movq F+3*N(%rsp), %rax xorq %r9, %rax movq F+4*N(%rsp), %rbp xorq %r9, %rbp andq %r8, %rbp negq %rbp mulq %r8 addq %rax, %rsi adcq %rdx, %rbp movq G+3*N(%rsp), %rax xorq %r11, %rax movq G+4*N(%rsp), %rdx xorq %r11, %rdx andq %r10, %rdx subq %rdx, %rbp mulq %r10 addq %rax, %rsi adcq %rdx, %rbp shrdq $59, %rsi, %rcx movq %rcx, F+2*N(%rsp) shrdq $59, %rbp, %rsi sarq $59, %rbp movq F+3*N(%rsp), %rax movq %rsi, F+3*N(%rsp) movq F+4*N(%rsp), %rsi movq %rbp, F+4*N(%rsp) xorq %r13, %rax xorq %r13, %rsi andq %r12, %rsi negq %rsi mulq %r12 addq %rax, %rbx adcq %rdx, %rsi movq G+3*N(%rsp), %rax xorq %r15, %rax movq G+4*N(%rsp), %rdx xorq %r15, %rdx andq %r14, %rdx subq %rdx, %rsi mulq %r14 addq %rax, %rbx adcq %rdx, %rsi shrdq $59, %rbx, %rdi movq %rdi, G+2*N(%rsp) shrdq $59, %rsi, %rbx movq %rbx, G+3*N(%rsp) sarq $59, %rsi movq %rsi, G+4*N(%rsp) // Get the initial carries back from storage and do the [u,v] accumulation movq tmp, %rbx movq tmp2, %rbp // Digit 0 of [u,v] xorl %ecx, %ecx movq U(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq V(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq U(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, U(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq V(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, V(%rsp) // Digit 1 of [u,v] xorl %ebx, %ebx movq U+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq V+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq U+N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, U+N(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq V+N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, V+N(%rsp) // Digit 2 of [u,v] xorl %ecx, %ecx movq U+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq V+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq U+2*N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, U+2*N(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq V+2*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, V+2*N(%rsp) // Digits 3 and 4 of u (top is unsigned) movq U+3*N(%rsp), %rax xorq %r9, %rax movq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq V+3*N(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rcx adcq %rbx, %rdx // Preload for last use of old u digit 3 movq U+3*N(%rsp), %rax movq %rcx, U+3*N(%rsp) movq %rdx, U+4*N(%rsp) // Digits 3 and 4 of v (top is unsigned) xorq %r13, %rax movq %r13, %rcx andq %r12, %rcx negq %rcx mulq %r12 addq %rax, %rsi adcq %rdx, %rcx movq V+3*N(%rsp), %rax xorq %r15, %rax movq %r15, %rdx andq %r14, %rdx subq %rdx, %rcx mulq %r14 addq %rax, %rsi adcq %rcx, %rdx movq %rsi, V+3*N(%rsp) movq %rdx, V+4*N(%rsp) // Montgomery reduction of u amontred(u) // Montgomery reduction of v amontred(v) bignum_montinv_p256_midloop: divstep59(d,ff,gg) movq %rsi, d // Next iteration decq i jnz bignum_montinv_p256_loop // The 10th and last iteration does not need anything except the // u value and the sign of f; the latter can be obtained from the // lowest word of f. So it's done differently from the main loop. // Find the sign of the new f. For this we just need one digit // since we know (for in-scope cases) that f is either +1 or -1. // We don't explicitly shift right by 59 either, but looking at // bit 63 (or any bit >= 60) of the unshifted result is enough // to distinguish -1 from +1; this is then made into a mask. movq F(%rsp), %rax movq G(%rsp), %rcx imulq %r8, %rax imulq %r10, %rcx addq %rcx, %rax sarq $63, %rax // Now separate out the matrix into sign-magnitude pairs // and adjust each one based on the sign of f. // // Note that at this point we expect |f|=1 and we got its // sign above, so then since [f,0] == x * 2^{-512} [u,v] (mod p_256) // we want to flip the sign of u according to that of f. movq %r8, %r9 sarq $63, %r9 xorq %r9, %r8 subq %r9, %r8 xorq %rax, %r9 movq %r10, %r11 sarq $63, %r11 xorq %r11, %r10 subq %r11, %r10 xorq %rax, %r11 movq %r12, %r13 sarq $63, %r13 xorq %r13, %r12 subq %r13, %r12 xorq %rax, %r13 movq %r14, %r15 sarq $63, %r15 xorq %r15, %r14 subq %r15, %r14 xorq %rax, %r15 // Adjust the initial value to allow for complement instead of negation movq %r8, %rax andq %r9, %rax movq %r10, %r12 andq %r11, %r12 addq %rax, %r12 // Digit 0 of [u] xorl %r13d, %r13d movq U(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r12 adcq %rdx, %r13 movq V(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r12 adcq %rdx, %r13 // Digit 1 of [u] xorl %r14d, %r14d movq U+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r13 adcq %rdx, %r14 movq V+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r13 adcq %rdx, %r14 // Digit 2 of [u] xorl %r15d, %r15d movq U+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq V+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 adcq %rdx, %r15 // Digits 3 and 4 of u (top is unsigned) movq U+3*N(%rsp), %rax xorq %r9, %rax andq %r8, %r9 negq %r9 mulq %r8 addq %rax, %r15 adcq %rdx, %r9 movq V+3*N(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %r9 mulq %r10 addq %rax, %r15 adcq %rdx, %r9 // Store back and Montgomery reduce u movq %r12, U(%rsp) movq %r13, U+N(%rsp) movq %r14, U+2*N(%rsp) movq %r15, U+3*N(%rsp) movq %r9, U+4*N(%rsp) amontred(u) // Perform final strict reduction mod p_256 and copy to output movq U(%rsp), %r8 movq U+N(%rsp), %r9 movq U+2*N(%rsp), %r10 movq U+3*N(%rsp), %r11 movl $1, %eax movl $0xffffffff, %ebx leaq -2(%rax), %rcx leaq -1(%rbx), %rdx notq %rbx addq %r8, %rax adcq %r9, %rbx adcq %r10, %rcx adcq %r11, %rdx cmovncq %r8, %rax cmovncq %r9, %rbx cmovncq %r10, %rcx cmovncq %r11, %rdx movq res, %rdi movq %rax, (%rdi) movq %rbx, N(%rdi) movq %rcx, 2*N(%rdi) movq %rdx, 3*N(%rdi) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
19,045
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/edwards25519_decode.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Decode compressed 256-bit form of edwards25519 point // Input c[32] (bytes); output function return and z[8] // // extern uint64_t edwards25519_decode(uint64_t z[static 8], const uint8_t c[static 32]); // // This interprets the input byte string as a little-endian number // representing a point (x,y) on the edwards25519 curve, encoded as // 2^255 * x_0 + y where x_0 is the least significant bit of x. It // returns the full pair of coordinates x (at z) and y (at z+4). The // return code is 0 for success and 1 for failure, which means that // the input does not correspond to the encoding of any edwards25519 // point. This can happen for three reasons, where y = the lowest // 255 bits of the input: // // * y >= p_25519 // Input y coordinate is not reduced // * (y^2 - 1) * (1 + d_25519 * y^2) has no modular square root // There is no x such that (x,y) is on the curve // * y^2 = 1 and top bit of input is set // Cannot be the canonical encoding of (0,1) or (0,-1) // // Standard x86-64 ABI: RDI = z, RSI = c // Microsoft x64 ABI: RCX = z, RDX = c // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(edwards25519_decode) S2N_BN_SYM_PRIVACY_DIRECTIVE(edwards25519_decode) .text // Size in bytes of a 64-bit word #define N 8 // Pointer-offset pairs for temporaries on stack #define y 0(%rsp) #define s (4*N)(%rsp) #define t (8*N)(%rsp) #define u (12*N)(%rsp) #define v (16*N)(%rsp) #define w (20*N)(%rsp) #define q (24*N)(%rsp) #define res (28*N)(%rsp) #define sgnbit (29*N)(%rsp) #define badun (30*N)(%rsp) // Total size to reserve on the stack #define NSPACE (32*N) // Corrupted versions when stack is down 8 more #define q8 (25*N)(%rsp) // Syntactic variants to make x86_att version simpler to generate #define Y 0 #define S (4*N) #define T (8*N) #define U (12*N) #define V (16*N) #define W (20*N) #define Q8 (25*N) S2N_BN_SYMBOL(edwards25519_decode): // In this case the Windows form literally makes a subroutine call. // This avoids hassle arising from subroutine offsets #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi callq edwards25519_decode_standard popq %rsi popq %rdi ret edwards25519_decode_standard: #endif // Save registers and make room for temporaries pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp // Save the return pointer for the end so we can overwrite %rdi later movq %rdi, res // Load the inputs, which can be done word-wise since x86 is little-endian. // Let y be the lowest 255 bits of the input and sgnbit the desired parity. // If y >= p_25519 then already flag the input as invalid (badun = 1). movq (%rsi), %rax movq %rax, Y(%rsp) movq 8(%rsi), %rbx movq %rbx, Y+8(%rsp) xorl %ebp, %ebp movq 16(%rsi), %rcx movq %rcx, Y+16(%rsp) movq 24(%rsi), %rdx btr $63, %rdx movq %rdx, Y+24(%rsp) adcq %rbp, %rbp movq %rbp, sgnbit addq $19, %rax adcq $0, %rbx adcq $0, %rcx adcq $0, %rdx shrq $63, %rdx movq %rdx, badun // u = y^2 - 1 (actually y + 2^255-20, not reduced modulo) // v = 1 + d * y^2 (not reduced modulo from the +1) // w = u * v leaq V(%rsp), %rdi movq $1, %rsi leaq Y(%rsp), %rdx callq edwards25519_decode_nsqr_p25519 movq V(%rsp), %rax subq $20, %rax movq V+8(%rsp), %rbx sbbq $0, %rbx movq V+16(%rsp), %rcx sbbq $0, %rcx movq V+24(%rsp), %rdx sbbq $0, %rdx btc $63, %rdx movq %rax, U(%rsp) movq %rbx, U+8(%rsp) movq %rcx, U+16(%rsp) movq %rdx, U+24(%rsp) movq $0x75eb4dca135978a3, %rax movq %rax, W(%rsp) movq $0x00700a4d4141d8ab, %rax movq %rax, W+8(%rsp) movq $0x8cc740797779e898, %rax movq %rax, W+16(%rsp) movq $0x52036cee2b6ffe73, %rax movq %rax, W+24(%rsp) leaq V(%rsp), %rdi leaq W(%rsp), %rsi leaq V(%rsp), %rdx callq edwards25519_decode_mul_p25519 movq V(%rsp), %rax addq $1, %rax movq V+8(%rsp), %rbx adcq $0, %rbx movq V+16(%rsp), %rcx adcq $0, %rcx movq V+24(%rsp), %rdx adcq $0, %rdx movq %rax, V(%rsp) movq %rbx, V+8(%rsp) movq %rcx, V+16(%rsp) movq %rdx, V+24(%rsp) leaq W(%rsp), %rdi leaq U(%rsp), %rsi leaq V(%rsp), %rdx callq edwards25519_decode_mul_p25519 // Get s = w^{252-3} as a candidate inverse square root 1/sqrt(w). // This power tower computation is the same as bignum_invsqrt_p25519 leaq T(%rsp), %rdi movq $1, %rsi leaq W(%rsp), %rdx callq edwards25519_decode_nsqr_p25519 leaq T(%rsp), %rdi leaq T(%rsp), %rsi leaq W(%rsp), %rdx callq edwards25519_decode_mul_p25519 leaq S(%rsp), %rdi movq $2, %rsi leaq T(%rsp), %rdx callq edwards25519_decode_nsqr_p25519 leaq T(%rsp), %rdi leaq S(%rsp), %rsi leaq T(%rsp), %rdx callq edwards25519_decode_mul_p25519 leaq S(%rsp), %rdi movq $1, %rsi leaq T(%rsp), %rdx callq edwards25519_decode_nsqr_p25519 leaq V(%rsp), %rdi leaq S(%rsp), %rsi leaq W(%rsp), %rdx callq edwards25519_decode_mul_p25519 leaq S(%rsp), %rdi movq $5, %rsi leaq V(%rsp), %rdx callq edwards25519_decode_nsqr_p25519 leaq T(%rsp), %rdi leaq S(%rsp), %rsi leaq V(%rsp), %rdx callq edwards25519_decode_mul_p25519 leaq S(%rsp), %rdi movq $10, %rsi leaq T(%rsp), %rdx callq edwards25519_decode_nsqr_p25519 leaq T(%rsp), %rdi leaq S(%rsp), %rsi leaq T(%rsp), %rdx callq edwards25519_decode_mul_p25519 leaq S(%rsp), %rdi movq $5, %rsi leaq T(%rsp), %rdx callq edwards25519_decode_nsqr_p25519 leaq V(%rsp), %rdi leaq S(%rsp), %rsi leaq V(%rsp), %rdx callq edwards25519_decode_mul_p25519 leaq S(%rsp), %rdi movq $25, %rsi leaq V(%rsp), %rdx callq edwards25519_decode_nsqr_p25519 leaq T(%rsp), %rdi leaq S(%rsp), %rsi leaq V(%rsp), %rdx callq edwards25519_decode_mul_p25519 leaq S(%rsp), %rdi movq $50, %rsi leaq T(%rsp), %rdx callq edwards25519_decode_nsqr_p25519 leaq T(%rsp), %rdi leaq S(%rsp), %rsi leaq T(%rsp), %rdx callq edwards25519_decode_mul_p25519 leaq S(%rsp), %rdi movq $25, %rsi leaq T(%rsp), %rdx callq edwards25519_decode_nsqr_p25519 leaq V(%rsp), %rdi leaq S(%rsp), %rsi leaq V(%rsp), %rdx callq edwards25519_decode_mul_p25519 leaq S(%rsp), %rdi movq $125, %rsi leaq V(%rsp), %rdx callq edwards25519_decode_nsqr_p25519 leaq V(%rsp), %rdi leaq S(%rsp), %rsi leaq V(%rsp), %rdx callq edwards25519_decode_mul_p25519 leaq S(%rsp), %rdi movq $2, %rsi leaq V(%rsp), %rdx callq edwards25519_decode_nsqr_p25519 leaq S(%rsp), %rdi leaq S(%rsp), %rsi leaq W(%rsp), %rdx callq edwards25519_decode_mul_p25519 // Compute v' = s^2 * w to discriminate whether the square root sqrt(u/v) // exists, in which case we should get 0, 1 or -1. leaq V(%rsp), %rdi movq $1, %rsi leaq S(%rsp), %rdx callq edwards25519_decode_nsqr_p25519 leaq V(%rsp), %rdi leaq V(%rsp), %rsi leaq W(%rsp), %rdx callq edwards25519_decode_mul_p25519 // Get the two candidates for sqrt(u / v), one being s = u * w^{252-3} // and the other being t = s * j_25519 where j_25519 = sqrt(-1). leaq S(%rsp), %rdi leaq U(%rsp), %rsi leaq S(%rsp), %rdx callq edwards25519_decode_mul_p25519 movq $0xc4ee1b274a0ea0b0, %rax movq %rax, T(%rsp) movq $0x2f431806ad2fe478, %rax movq %rax, T+8(%rsp) movq $0x2b4d00993dfbd7a7, %rax movq %rax, T+16(%rsp) movq $0x2b8324804fc1df0b, %rax movq %rax, T+24(%rsp) leaq T(%rsp), %rdi leaq S(%rsp), %rsi leaq T(%rsp), %rdx callq edwards25519_decode_mul_p25519 // %rax = 0 <=> s^2 * w = 0 or 1 movq V(%rsp), %r8 movq V+8(%rsp), %r9 movq V+16(%rsp), %r10 movq V+24(%rsp), %r11 movl $1, %eax notq %rax andq %r8, %rax orq %r9, %rax orq %r10, %rax orq %r11, %rax // %r8 = 0 <=> s^2 * w = -1 (mod p_25519, i.e. s^2 * w = 2^255 - 20) addq $20, %r8 notq %r9 notq %r10 bts $63, %r11 addq $1, %r11 orq %r9, %r8 orq %r11, %r10 orq %r10, %r8 // If s^2 * w is not 0 or 1 then replace s by t testq %rax, %rax movq S(%rsp), %r12 movq T(%rsp), %rbx cmovnzq %rbx, %r12 movq S+8(%rsp), %r13 movq T+8(%rsp), %rbx cmovnzq %rbx, %r13 movq S+16(%rsp), %r14 movq T+16(%rsp), %rbx cmovnzq %rbx, %r14 movq S+24(%rsp), %r15 movq T+24(%rsp), %rbx cmovnzq %rbx, %r15 movq %r12, S(%rsp) movq %r13, S+8(%rsp) movq %r14, S+16(%rsp) movq %r15, S+24(%rsp) // Check invalidity, occurring if s^2 * w is not in {0,1,-1} cmovzq %rax, %r8 negq %r8 sbbq %r8, %r8 negq %r8 orq %r8, badun // Let [%r11;%r10;%r9;%r8] = s and [%r15;%r14;%r13;%r12] = p_25519 - s movq S(%rsp), %r8 movq $-19, %r12 subq %r8, %r12 movq S+8(%rsp), %r9 movq $-1, %r13 sbbq %r9, %r13 movq S+16(%rsp), %r10 movq $-1, %r14 sbbq %r10, %r14 movq S+24(%rsp), %r11 movq $0x7FFFFFFFFFFFFFFF, %r15 sbbq %r11, %r15 // Decide whether a flip is apparently indicated, s_0 <=> sgnbit // Decide also if s = 0 by OR-ing its digits. Now if a flip is indicated: // - if s = 0 then mark as invalid // - if s <> 0 then indeed flip movl $1, %ecx andq %r8, %rcx xorq sgnbit, %rcx movq badun, %rdx movq %rdx, %rsi orq %rcx, %rdx xorl %ebp, %ebp movq %r8, %rax movq %r9, %rbx orq %r10, %rax orq %r11, %rbx orq %rbx, %rax cmovzq %rbp, %rcx cmovnzq %rsi, %rdx // Actual selection of x as s or -s, copying of y and return of validity testq %rcx, %rcx cmovnzq %r12, %r8 cmovnzq %r13, %r9 cmovnzq %r14, %r10 cmovnzq %r15, %r11 movq res, %rdi movq %r8, (%rdi) movq %r9, 8(%rdi) movq %r10, 16(%rdi) movq %r11, 24(%rdi) movq Y(%rsp), %rcx movq %rcx, 32(%rdi) movq Y+8(%rsp), %rcx movq %rcx, 40(%rdi) movq Y+16(%rsp), %rcx movq %rcx, 48(%rdi) movq Y+24(%rsp), %rcx movq %rcx, 56(%rdi) movq %rdx, %rax // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret // ************************************************************* // Local z = x * y // ************************************************************* edwards25519_decode_mul_p25519: movq %rdx, %rcx xorl %ebp, %ebp movq (%rcx), %rdx mulxq (%rsi), %r8, %r9 mulxq 0x8(%rsi), %rax, %r10 addq %rax, %r9 mulxq 0x10(%rsi), %rax, %r11 adcq %rax, %r10 mulxq 0x18(%rsi), %rax, %r12 adcq %rax, %r11 adcq %rbp, %r12 xorl %ebp, %ebp movq 0x8(%rcx), %rdx mulxq (%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x18(%rsi), %rax, %r13 adcxq %rax, %r12 adoxq %rbp, %r13 adcq %rbp, %r13 xorl %ebp, %ebp movq 0x10(%rcx), %rdx mulxq (%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x18(%rsi), %rax, %r14 adcxq %rax, %r13 adoxq %rbp, %r14 adcq %rbp, %r14 xorl %ebp, %ebp movq 0x18(%rcx), %rdx mulxq (%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x18(%rsi), %rcx, %r15 mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 movl $0x26, %edx mulxq %r15, %rax, %rbx adcxq %rcx, %r14 adoxq %rbp, %r15 adcq %rbp, %r15 addq %r11, %rax adcq %rbp, %rbx btq $0x3f, %rax adcq %rbx, %rbx leaq 0x1(%rbx), %rcx imulq $0x13, %rcx, %rcx xorl %ebp, %ebp adoxq %rcx, %r8 mulxq %r12, %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq %r13, %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq %r14, %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq %r15, %rax, %rbx adcq %rax, %r11 shlq $0x3f, %rcx cmpq %rcx, %r11 movl $0x13, %eax cmovns %rbp, %rax subq %rax, %r8 sbbq %rbp, %r9 sbbq %rbp, %r10 sbbq %rbp, %r11 btr $0x3f, %r11 movq %r8, (%rdi) movq %r9, 0x8(%rdi) movq %r10, 0x10(%rdi) movq %r11, 0x18(%rdi) ret // ************************************************************* // Local z = 2^n * x // ************************************************************* edwards25519_decode_nsqr_p25519: // Copy input argument into q movq (%rdx), %rax movq 8(%rdx), %rbx movq 16(%rdx), %rcx movq 24(%rdx), %rdx movq %rax, Q8(%rsp) movq %rbx, Q8+8(%rsp) movq %rcx, Q8+16(%rsp) movq %rdx, Q8+24(%rsp) // Main squaring loop, accumulating in u consistently and // only ensuring the intermediates are < 2 * p_25519 = 2^256 - 38 edwards25519_decode_loop: movq Q8(%rsp), %rdx mulxq %rdx, %r8, %r15 mulxq Q8+0x8(%rsp), %r9, %r10 mulxq Q8+0x18(%rsp), %r11, %r12 movq Q8+0x10(%rsp), %rdx mulxq Q8+0x18(%rsp), %r13, %r14 xorl %ebx, %ebx mulxq Q8(%rsp), %rax, %rcx adcxq %rax, %r10 adoxq %rcx, %r11 mulxq Q8+0x8(%rsp), %rax, %rcx adcxq %rax, %r11 adoxq %rcx, %r12 movq Q8+0x18(%rsp), %rdx mulxq Q8+0x8(%rsp), %rax, %rcx adcxq %rax, %r12 adoxq %rcx, %r13 adcxq %rbx, %r13 adoxq %rbx, %r14 adcq %rbx, %r14 xorl %ebx, %ebx adcxq %r9, %r9 adoxq %r15, %r9 movq Q8+0x8(%rsp), %rdx mulxq %rdx, %rax, %rdx adcxq %r10, %r10 adoxq %rax, %r10 adcxq %r11, %r11 adoxq %rdx, %r11 movq Q8+0x10(%rsp), %rdx mulxq %rdx, %rax, %rdx adcxq %r12, %r12 adoxq %rax, %r12 adcxq %r13, %r13 adoxq %rdx, %r13 movq Q8+0x18(%rsp), %rdx mulxq %rdx, %rax, %r15 adcxq %r14, %r14 adoxq %rax, %r14 adcxq %rbx, %r15 adoxq %rbx, %r15 movl $0x26, %edx xorl %ebx, %ebx mulxq %r12, %rax, %rcx adcxq %rax, %r8 adoxq %rcx, %r9 mulxq %r13, %rax, %rcx adcxq %rax, %r9 adoxq %rcx, %r10 mulxq %r14, %rax, %rcx adcxq %rax, %r10 adoxq %rcx, %r11 mulxq %r15, %rax, %r12 adcxq %rax, %r11 adoxq %rbx, %r12 adcxq %rbx, %r12 shldq $0x1, %r11, %r12 btr $0x3f, %r11 movl $0x13, %edx imulq %r12, %rdx addq %rdx, %r8 adcq %rbx, %r9 adcq %rbx, %r10 adcq %rbx, %r11 movq %r8, Q8(%rsp) movq %r9, Q8+0x8(%rsp) movq %r10, Q8+0x10(%rsp) movq %r11, Q8+0x18(%rsp) // Loop as applicable decq %rsi jnz edwards25519_decode_loop // We know the intermediate result x < 2^256 - 38, and now we do strict // modular reduction mod 2^255 - 19. Note x < 2^255 - 19 <=> x + 19 < 2^255 // which is equivalent to a "ns" condition. We just use the results where // they were in registers [%r11;%r10;%r9;%r8] instead of re-loading them. movl $19, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx addq %r8, %rax adcq %r9, %rbx adcq %r10, %rcx adcq %r11, %rdx cmovns %r8, %rax cmovns %r9, %rbx cmovns %r10, %rcx cmovns %r11, %rdx btr $63, %rdx movq %rax, (%rdi) movq %rbx, 8(%rdi) movq %rcx, 16(%rdi) movq %rdx, 24(%rdi) ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
296,797
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/curve25519_x25519base.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // The x25519 function for curve25519 on base element 9 // Input scalar[4]; output res[4] // // extern void curve25519_x25519base // (uint64_t res[static 4],uint64_t scalar[static 4]) // // The function has a second prototype considering the arguments as arrays // of bytes rather than 64-bit words. The underlying code is the same, since // the x86 platform is little-endian. // // extern void curve25519_x25519base_byte // (uint8_t res[static 32],uint8_t scalar[static 32]) // // Given a scalar n, returns the X coordinate of n * G where G = (9,...) is // the standard generator. The scalar is first slightly modified/mangled // as specified in the relevant RFC (https://www.rfc-editor.org/rfc/rfc7748). // // Standard x86-64 ABI: RDI = res, RSI = scalar // Microsoft x64 ABI: RCX = res, RDX = scalar // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(curve25519_x25519base) S2N_BN_SYM_PRIVACY_DIRECTIVE(curve25519_x25519base) S2N_BN_SYM_VISIBILITY_DIRECTIVE(curve25519_x25519base_byte) S2N_BN_SYM_PRIVACY_DIRECTIVE(curve25519_x25519base_byte) .text // Size of individual field elements #define NUMSIZE 32 // Pointer-offset pairs for result and temporaries on stack with some aliasing. // The result "resx" assumes the "res" pointer has been preloaded into %rbp. #define resx (0*NUMSIZE)(%rbp) #define scalar (0*NUMSIZE)(%rsp) #define tabent (1*NUMSIZE)(%rsp) #define ymx_2 (1*NUMSIZE)(%rsp) #define xpy_2 (2*NUMSIZE)(%rsp) #define kxy_2 (3*NUMSIZE)(%rsp) #define acc (4*NUMSIZE)(%rsp) #define x_1 (4*NUMSIZE)(%rsp) #define y_1 (5*NUMSIZE)(%rsp) #define z_1 (6*NUMSIZE)(%rsp) #define w_1 (7*NUMSIZE)(%rsp) #define x_3 (4*NUMSIZE)(%rsp) #define y_3 (5*NUMSIZE)(%rsp) #define z_3 (6*NUMSIZE)(%rsp) #define w_3 (7*NUMSIZE)(%rsp) #define tmpspace (8*NUMSIZE)(%rsp) #define t0 (8*NUMSIZE)(%rsp) #define t1 (9*NUMSIZE)(%rsp) #define t2 (10*NUMSIZE)(%rsp) #define t3 (11*NUMSIZE)(%rsp) #define t4 (12*NUMSIZE)(%rsp) #define t5 (13*NUMSIZE)(%rsp) // Stable homes for the input result pointer, and other variables #define res 14*NUMSIZE(%rsp) #define i 14*NUMSIZE+8(%rsp) #define bias 14*NUMSIZE+16(%rsp) #define bf 14*NUMSIZE+24(%rsp) #define ix 14*NUMSIZE+24(%rsp) #define tab 15*NUMSIZE(%rsp) // Total size to reserve on the stack #define NSPACE (15*NUMSIZE+8) // Macro wrapping up the basic field multiplication, only trivially // different from a pure function call to bignum_mul_p25519. #define mul_p25519(P0,P1,P2) \ xorl %esi, %esi ; \ movq P2, %rdx ; \ mulxq P1, %r8, %r9 ; \ mulxq 0x8+P1, %rax, %r10 ; \ addq %rax, %r9 ; \ mulxq 0x10+P1, %rax, %r11 ; \ adcq %rax, %r10 ; \ mulxq 0x18+P1, %rax, %r12 ; \ adcq %rax, %r11 ; \ adcq %rsi, %r12 ; \ xorl %esi, %esi ; \ movq 0x8+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x18+P1, %rax, %r13 ; \ adcxq %rax, %r12 ; \ adoxq %rsi, %r13 ; \ adcxq %rsi, %r13 ; \ xorl %esi, %esi ; \ movq 0x10+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x18+P1, %rax, %r14 ; \ adcxq %rax, %r13 ; \ adoxq %rsi, %r14 ; \ adcxq %rsi, %r14 ; \ xorl %esi, %esi ; \ movq 0x18+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x18+P1, %rax, %r15 ; \ adcxq %rax, %r14 ; \ adoxq %rsi, %r15 ; \ adcxq %rsi, %r15 ; \ movl $0x26, %edx ; \ xorl %esi, %esi ; \ mulxq %r12, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ mulxq %r13, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq %r14, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq %r15, %rax, %r12 ; \ adcxq %rax, %r11 ; \ adoxq %rsi, %r12 ; \ adcxq %rsi, %r12 ; \ shldq $0x1, %r11, %r12 ; \ movl $0x13, %edx ; \ incq %r12; \ bts $63, %r11 ; \ mulxq %r12, %rax, %rbx ; \ addq %rax, %r8 ; \ adcq %rbx, %r9 ; \ adcq %rsi, %r10 ; \ adcq %rsi, %r11 ; \ sbbq %rax, %rax ; \ notq %rax; \ andq %rdx, %rax ; \ subq %rax, %r8 ; \ sbbq %rsi, %r9 ; \ sbbq %rsi, %r10 ; \ sbbq %rsi, %r11 ; \ btr $63, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // A version of multiplication that only guarantees output < 2 * p_25519. // This basically skips the +1 and final correction in quotient estimation. #define mul_4(P0,P1,P2) \ xorl %ecx, %ecx ; \ movq P2, %rdx ; \ mulxq P1, %r8, %r9 ; \ mulxq 0x8+P1, %rax, %r10 ; \ addq %rax, %r9 ; \ mulxq 0x10+P1, %rax, %r11 ; \ adcq %rax, %r10 ; \ mulxq 0x18+P1, %rax, %r12 ; \ adcq %rax, %r11 ; \ adcq %rcx, %r12 ; \ xorl %ecx, %ecx ; \ movq 0x8+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x18+P1, %rax, %r13 ; \ adcxq %rax, %r12 ; \ adoxq %rcx, %r13 ; \ adcxq %rcx, %r13 ; \ xorl %ecx, %ecx ; \ movq 0x10+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x18+P1, %rax, %r14 ; \ adcxq %rax, %r13 ; \ adoxq %rcx, %r14 ; \ adcxq %rcx, %r14 ; \ xorl %ecx, %ecx ; \ movq 0x18+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x18+P1, %rax, %r15 ; \ adcxq %rax, %r14 ; \ adoxq %rcx, %r15 ; \ adcxq %rcx, %r15 ; \ movl $0x26, %edx ; \ xorl %ecx, %ecx ; \ mulxq %r12, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ mulxq %r13, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq %r14, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq %r15, %rax, %r12 ; \ adcxq %rax, %r11 ; \ adoxq %rcx, %r12 ; \ adcxq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Modular subtraction with double modulus 2 * p_25519 = 2^256 - 38 #define sub_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ebx, %ebx ; \ subq P2, %r8 ; \ movq 8+P1, %r9 ; \ sbbq 8+P2, %r9 ; \ movl $38, %ecx ; \ movq 16+P1, %r10 ; \ sbbq 16+P2, %r10 ; \ movq 24+P1, %rax ; \ sbbq 24+P2, %rax ; \ cmovncq %rbx, %rcx ; \ subq %rcx, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq %rbx, %rax ; \ movq %r8, P0 ; \ movq %r9, 8+P0 ; \ movq %r10, 16+P0 ; \ movq %rax, 24+P0 // Modular addition and doubling with double modulus 2 * p_25519 = 2^256 - 38. // This only ensures that the result fits in 4 digits, not that it is reduced // even w.r.t. double modulus. The result is always correct modulo provided // the sum of the inputs is < 2^256 + 2^256 - 38, so in particular provided // at least one of them is reduced double modulo. #define add_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ecx, %ecx ; \ addq P2, %r8 ; \ movq 0x8+P1, %r9 ; \ adcq 0x8+P2, %r9 ; \ movq 0x10+P1, %r10 ; \ adcq 0x10+P2, %r10 ; \ movq 0x18+P1, %r11 ; \ adcq 0x18+P2, %r11 ; \ movl $38, %eax ; \ cmovncq %rcx, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 #define double_twice4(P0,P1) \ movq P1, %r8 ; \ xorl %ecx, %ecx ; \ addq %r8, %r8 ; \ movq 0x8+P1, %r9 ; \ adcq %r9, %r9 ; \ movq 0x10+P1, %r10 ; \ adcq %r10, %r10 ; \ movq 0x18+P1, %r11 ; \ adcq %r11, %r11 ; \ movl $38, %eax ; \ cmovncq %rcx, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 S2N_BN_SYMBOL(curve25519_x25519base): S2N_BN_SYMBOL(curve25519_x25519base_byte): // In this case the Windows form literally makes a subroutine call. // This avoids hassle arising from keeping code and data together. #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi callq curve25519_x25519base_curve25519_x25519base_standard popq %rsi popq %rdi ret curve25519_x25519base_curve25519_x25519base_standard: #endif // Save registers, make room for temps, preserve input arguments. pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp // Move the output pointer to a stable place movq %rdi, res // Copy the input scalar to its local variable while mangling it. // In principle the mangling is into 01xxx...xxx000, but actually // we only clear the top two bits so 00xxx...xxxxxx. The additional // 2^254 * G is taken care of by the starting value for the addition // chain below, while we never look at the three low bits at all. movq (%rsi), %rax movq %rax, (%rsp) movq 8(%rsi), %rax movq %rax, 8(%rsp) movq 16(%rsi), %rax movq %rax, 16(%rsp) movq $0x3fffffffffffffff, %rax andq 24(%rsi), %rax movq %rax, 24(%rsp) // The main part of the computation is on the edwards25519 curve in // extended-projective coordinates (X,Y,Z,T), representing a point // (x,y) via x = X/Z, y = Y/Z and x * y = T/Z (so X * Y = T * Z). // Only at the very end do we translate back to curve25519. So G // below means the generator within edwards25519 corresponding to // (9,...) for curve25519, via the standard isomorphism. // // Initialize accumulator "acc" to either (2^254 + 8) * G or just 2^254 * G // depending on bit 3 of the scalar, the only nonzero bit of the bottom 4. // Thus, we have effectively dealt with bits 0, 1, 2, 3, 254 and 255. movq (%rsp), %rax andq $8, %rax leaq curve25519_x25519base_edwards25519_0g(%rip), %r10 leaq curve25519_x25519base_edwards25519_8g(%rip), %r11 movq (%r10), %rax movq (%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*16(%rsp) movq 8*1(%r10), %rax movq 8*1(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*17(%rsp) movq 8*2(%r10), %rax movq 8*2(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*18(%rsp) movq 8*3(%r10), %rax movq 8*3(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*19(%rsp) movq 8*4(%r10), %rax movq 8*4(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*20(%rsp) movq 8*5(%r10), %rax movq 8*5(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*21(%rsp) movq 8*6(%r10), %rax movq 8*6(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*22(%rsp) movq 8*7(%r10), %rax movq 8*7(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*23(%rsp) movl $1, %eax movq %rax, 8*24(%rsp) movl $0, %eax movq %rax, 8*25(%rsp) movq %rax, 8*26(%rsp) movq %rax, 8*27(%rsp) movq 8*8(%r10), %rax movq 8*8(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*28(%rsp) movq 8*9(%r10), %rax movq 8*9(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*29(%rsp) movq 8*10(%r10), %rax movq 8*10(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*30(%rsp) movq 8*11(%r10), %rax movq 8*11(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*31(%rsp) // The counter "i" tracks the bit position for which the scalar has // already been absorbed, starting at 4 and going up in chunks of 4. // // The pointer "tab" points at the current block of the table for // multiples (2^i * j) * G at the current bit position i; 1 <= j <= 8. // // The bias is always either 0 and 1 and needs to be added to the // partially processed scalar implicitly. This is used to absorb 4 bits // of scalar per iteration from 3-bit table indexing by exploiting // negation: (16 * h + l) * G = (16 * (h + 1) - (16 - l)) * G is used // when l >= 9. Note that we can't have any bias left over at the // end because of the clearing of bit 255 of the scalar, meaning the // l >= 9 case cannot arise on the last iteration. movq $4, i leaq curve25519_x25519base_edwards25519_gtable(%rip), %rax movq %rax, tab movq $0, bias // Start of the main loop, repeated 63 times for i = 4, 8, ..., 252 curve25519_x25519base_scalarloop: // Look at the next 4-bit field "bf", adding the previous bias as well. // Choose the table index "ix" as bf when bf <= 8 and 16 - bf for bf >= 9, // setting the bias to 1 for the next iteration in the latter case. movq i, %rax movq %rax, %rcx shrq $6, %rax movq (%rsp,%rax,8), %rax // Exploiting scalar = sp exactly shrq %cl, %rax andq $15, %rax addq bias, %rax movq %rax, bf cmpq $9, bf sbbq %rax, %rax incq %rax movq %rax, bias movq $16, %rdi subq bf, %rdi cmpq $0, bias cmovzq bf, %rdi movq %rdi, ix // Perform constant-time lookup in the table to get element number "ix". // The table entry for the affine point (x,y) is actually a triple // (y - x,x + y,2 * d * x * y) to precompute parts of the addition. // Note that "ix" can be 0, so we set up the appropriate identity first. movl $1, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx movl $1, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d movq tab, %rbp cmpq $1, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $2, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $3, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $4, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $5, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $6, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $7, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $8, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp movq %rbp, tab // We now have the triple from the table in registers as follows // // [%rdx;%rcx;%rbx;%rax] = y - x // [%r11;%r10;%r9;%r8] = x + y // [%r15;%r14;%r13;%r12] = 2 * d * x * y // // In case bias = 1 we need to negate this. For Edwards curves // -(x,y) = (-x,y), i.e. we need to negate the x coordinate. // In this processed encoding, that amounts to swapping the // first two fields and negating the third. // // The optional negation here also pretends bias = 0 whenever // ix = 0 so that it doesn't need to handle the case of zero // inputs, since no non-trivial table entries are zero. Note // that in the zero case the whole negation is trivial, and // so indeed is the swapping. cmpq $0, bias movq %rax, %rsi cmovnzq %r8, %rsi cmovnzq %rax, %r8 movq %rsi, 32(%rsp) movq %r8, 64(%rsp) movq %rbx, %rsi cmovnzq %r9, %rsi cmovnzq %rbx, %r9 movq %rsi, 40(%rsp) movq %r9, 72(%rsp) movq %rcx, %rsi cmovnzq %r10, %rsi cmovnzq %rcx, %r10 movq %rsi, 48(%rsp) movq %r10, 80(%rsp) movq %rdx, %rsi cmovnzq %r11, %rsi cmovnzq %rdx, %r11 movq %rsi, 56(%rsp) movq %r11, 88(%rsp) movq $-19, %rax movq $-1, %rbx movq $-1, %rcx movq $0x7fffffffffffffff, %rdx subq %r12, %rax sbbq %r13, %rbx sbbq %r14, %rcx sbbq %r15, %rdx movq ix, %r8 movq bias, %r9 testq %r8, %r8 cmovzq %r8, %r9 testq %r9, %r9 cmovzq %r12, %rax cmovzq %r13, %rbx cmovzq %r14, %rcx cmovzq %r15, %rdx movq %rax, 96(%rsp) movq %rbx, 104(%rsp) movq %rcx, 112(%rsp) movq %rdx, 120(%rsp) // Extended-projective and precomputed mixed addition. // This is effectively the same as calling the standalone // function edwards25519_pepadd(acc,acc,tabent), but we // only retain slightly weaker normalization < 2 * p_25519 // throughout the inner loop, so the computation is // slightly different, and faster overall. double_twice4(t0,z_1) sub_twice4(t1,y_1,x_1) add_twice4(t2,y_1,x_1) mul_4(t3,w_1,kxy_2) mul_4(t1,t1,ymx_2) mul_4(t2,t2,xpy_2) sub_twice4(t4,t0,t3) add_twice4(t0,t0,t3) sub_twice4(t5,t2,t1) add_twice4(t1,t2,t1) mul_4(z_3,t4,t0) mul_4(x_3,t5,t4) mul_4(y_3,t0,t1) mul_4(w_3,t5,t1) // End of the main loop; move on by 4 bits. addq $4, i cmpq $256, i jc curve25519_x25519base_scalarloop // Now we need to translate from Edwards curve edwards25519 back // to the Montgomery form curve25519. The mapping in the affine // representations is // // (x,y) |-> ((1 + y) / (1 - y), c * (1 + y) / ((1 - y) * x)) // // For x25519, we only need the x coordinate, and we compute this as // // (1 + y) / (1 - y) = (x + x * y) / (x - x * y) // = (X/Z + T/Z) / (X/Z - T/Z) // = (X + T) / (X - T) // = (X + T) * inverse(X - T) // // We could equally well use (Z + Y) / (Z - Y), but the above has the // same cost, and it more explicitly forces zero output whenever X = 0, // regardless of how the modular inverse behaves on zero inputs. In // the present setting (base point 9, mangled scalar) that doesn't // really matter anyway since X = 0 never arises, but it seems a // little bit tidier. Note that both Edwards point (0,1) which maps to // the Montgomery point at infinity, and Edwards (0,-1) which maps to // Montgomery (0,0) [this is the 2-torsion point] are both by definition // mapped to 0 by the X coordinate mapping used to define curve25519. // // First the addition and subtraction: add_twice4(t1,x_3,w_3) sub_twice4(t2,x_3,w_3) // Prepare to call the modular inverse function to get t0 = 1/t2 // Note that this works for the weakly normalized z_3 equally well. // The non-coprime case z_3 == 0 (mod p_25519) cannot arise anyway. leaq 256(%rsp), %rdi leaq 320(%rsp), %rsi // Inline copy of bignum_inv_p25519, identical except for stripping out // the prologue and epilogue saving and restoring registers and making // and reclaiming room on the stack. For more details and explanations see // "x86/curve25519/bignum_inv_p25519.S". Note that the stack it uses for // its own temporaries is 208 bytes, so it has no effect on variables // that are needed in the rest of our computation here: res, t0, t1, t2. movq %rdi, 0xc0(%rsp) xorl %eax, %eax leaq -0x13(%rax), %rcx notq %rax movq %rcx, (%rsp) movq %rax, 0x8(%rsp) movq %rax, 0x10(%rsp) btr $0x3f, %rax movq %rax, 0x18(%rsp) movq (%rsi), %rdx movq 0x8(%rsi), %rcx movq 0x10(%rsi), %r8 movq 0x18(%rsi), %r9 movl $0x1, %eax xorl %r10d, %r10d bts $0x3f, %r9 adcq %r10, %rax imulq $0x13, %rax, %rax addq %rax, %rdx adcq %r10, %rcx adcq %r10, %r8 adcq %r10, %r9 movl $0x13, %eax cmovbq %r10, %rax subq %rax, %rdx sbbq %r10, %rcx sbbq %r10, %r8 sbbq %r10, %r9 btr $0x3f, %r9 movq %rdx, 0x20(%rsp) movq %rcx, 0x28(%rsp) movq %r8, 0x30(%rsp) movq %r9, 0x38(%rsp) xorl %eax, %eax movq %rax, 0x40(%rsp) movq %rax, 0x48(%rsp) movq %rax, 0x50(%rsp) movq %rax, 0x58(%rsp) movabsq $0xa0f99e2375022099, %rax movq %rax, 0x60(%rsp) movabsq $0xa8c68f3f1d132595, %rax movq %rax, 0x68(%rsp) movabsq $0x6c6c893805ac5242, %rax movq %rax, 0x70(%rsp) movabsq $0x276508b241770615, %rax movq %rax, 0x78(%rsp) movq $0xa, 0x90(%rsp) movq $0x1, 0x98(%rsp) jmp curve25519_x25519base_midloop curve25519_x25519base_inverseloop: movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 movq %r8, %rax andq %r9, %rax movq %r10, %rdi andq %r11, %rdi addq %rax, %rdi movq %rdi, 0x80(%rsp) movq %r12, %rax andq %r13, %rax movq %r14, %rsi andq %r15, %rsi addq %rax, %rsi movq %rsi, 0x88(%rsp) xorl %ebx, %ebx movq (%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq 0x20(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rdi adcq %rdx, %rbx xorl %ebp, %ebp movq (%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rsi adcq %rdx, %rbp movq 0x20(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp xorl %ecx, %ecx movq 0x8(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x28(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx shrdq $0x3b, %rbx, %rdi movq %rdi, (%rsp) xorl %edi, %edi movq 0x8(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbp adcq %rdx, %rdi movq 0x28(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rdi shrdq $0x3b, %rbp, %rsi movq %rsi, 0x20(%rsp) xorl %esi, %esi movq 0x10(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rsi movq 0x30(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rsi shrdq $0x3b, %rcx, %rbx movq %rbx, 0x8(%rsp) xorl %ebx, %ebx movq 0x10(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rdi adcq %rdx, %rbx movq 0x30(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rdi adcq %rdx, %rbx shrdq $0x3b, %rdi, %rbp movq %rbp, 0x28(%rsp) movq 0x18(%rsp), %rax xorq %r9, %rax movq %rax, %rbp sarq $0x3f, %rbp andq %r8, %rbp negq %rbp mulq %r8 addq %rax, %rsi adcq %rdx, %rbp movq 0x38(%rsp), %rax xorq %r11, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r10, %rdx subq %rdx, %rbp mulq %r10 addq %rax, %rsi adcq %rdx, %rbp shrdq $0x3b, %rsi, %rcx movq %rcx, 0x10(%rsp) shrdq $0x3b, %rbp, %rsi movq 0x18(%rsp), %rax movq %rsi, 0x18(%rsp) xorq %r13, %rax movq %rax, %rsi sarq $0x3f, %rsi andq %r12, %rsi negq %rsi mulq %r12 addq %rax, %rbx adcq %rdx, %rsi movq 0x38(%rsp), %rax xorq %r15, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r14, %rdx subq %rdx, %rsi mulq %r14 addq %rax, %rbx adcq %rdx, %rsi shrdq $0x3b, %rbx, %rdi movq %rdi, 0x30(%rsp) shrdq $0x3b, %rsi, %rbx movq %rbx, 0x38(%rsp) movq 0x80(%rsp), %rbx movq 0x88(%rsp), %rbp xorl %ecx, %ecx movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x40(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x40(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x60(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x60(%rsp) xorl %ebx, %ebx movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq 0x48(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, 0x48(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq 0x68(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, 0x68(%rsp) xorl %ecx, %ecx movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x50(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x50(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x70(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x70(%rsp) movq 0x58(%rsp), %rax xorq %r9, %rax movq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rcx adcq %rbx, %rdx movq %rdx, %rbx shldq $0x1, %rcx, %rdx sarq $0x3f, %rbx addq %rbx, %rdx movl $0x13, %eax imulq %rdx movq 0x40(%rsp), %r8 addq %rax, %r8 movq %r8, 0x40(%rsp) movq 0x48(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x48(%rsp) movq 0x50(%rsp), %r8 adcq %rbx, %r8 movq %r8, 0x50(%rsp) adcq %rbx, %rcx shlq $0x3f, %rax addq %rax, %rcx movq 0x58(%rsp), %rax movq %rcx, 0x58(%rsp) xorq %r13, %rax movq %r13, %rcx andq %r12, %rcx negq %rcx mulq %r12 addq %rax, %rsi adcq %rdx, %rcx movq 0x78(%rsp), %rax xorq %r15, %rax movq %r15, %rdx andq %r14, %rdx subq %rdx, %rcx mulq %r14 addq %rax, %rsi adcq %rcx, %rdx movq %rdx, %rcx shldq $0x1, %rsi, %rdx sarq $0x3f, %rcx movl $0x13, %eax addq %rcx, %rdx imulq %rdx movq 0x60(%rsp), %r8 addq %rax, %r8 movq %r8, 0x60(%rsp) movq 0x68(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x68(%rsp) movq 0x70(%rsp), %r8 adcq %rcx, %r8 movq %r8, 0x70(%rsp) adcq %rcx, %rsi shlq $0x3f, %rax addq %rax, %rsi movq %rsi, 0x78(%rsp) curve25519_x25519base_midloop: movq 0x98(%rsp), %rsi movq (%rsp), %rdx movq 0x20(%rsp), %rcx movq %rdx, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax xorl %ebp, %ebp movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %rdx leaq (%rcx,%rax), %rdi shlq $0x16, %rdx shlq $0x16, %rdi sarq $0x2b, %rdx sarq $0x2b, %rdi movabsq $0x20000100000, %rax leaq (%rbx,%rax), %rbx leaq (%rcx,%rax), %rcx sarq $0x2a, %rbx sarq $0x2a, %rcx movq %rdx, 0xa0(%rsp) movq %rbx, 0xa8(%rsp) movq %rdi, 0xb0(%rsp) movq %rcx, 0xb8(%rsp) movq (%rsp), %r12 imulq %r12, %rdi imulq %rdx, %r12 movq 0x20(%rsp), %r13 imulq %r13, %rbx imulq %rcx, %r13 addq %rbx, %r12 addq %rdi, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r10 shlq $0x16, %r8 shlq $0x16, %r10 sarq $0x2b, %r8 sarq $0x2b, %r10 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r15 leaq (%rcx,%rax), %r11 sarq $0x2a, %r15 sarq $0x2a, %r11 movq %r13, %rbx movq %r12, %rcx imulq %r8, %r12 imulq %r15, %rbx addq %rbx, %r12 imulq %r11, %r13 imulq %r10, %rcx addq %rcx, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq 0xa0(%rsp), %rax imulq %r8, %rax movq 0xb0(%rsp), %rdx imulq %r15, %rdx imulq 0xa8(%rsp), %r8 imulq 0xb8(%rsp), %r15 addq %r8, %r15 leaq (%rax,%rdx), %r9 movq 0xa0(%rsp), %rax imulq %r10, %rax movq 0xb0(%rsp), %rdx imulq %r11, %rdx imulq 0xa8(%rsp), %r10 imulq 0xb8(%rsp), %r11 addq %r10, %r11 leaq (%rax,%rdx), %r13 movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r12 shlq $0x15, %r8 shlq $0x15, %r12 sarq $0x2b, %r8 sarq $0x2b, %r12 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r10 leaq (%rcx,%rax), %r14 sarq $0x2b, %r10 sarq $0x2b, %r14 movq %r9, %rax imulq %r8, %rax movq %r13, %rdx imulq %r10, %rdx imulq %r15, %r8 imulq %r11, %r10 addq %r8, %r10 leaq (%rax,%rdx), %r8 movq %r9, %rax imulq %r12, %rax movq %r13, %rdx imulq %r14, %rdx imulq %r15, %r12 imulq %r11, %r14 addq %r12, %r14 leaq (%rax,%rdx), %r12 movq %rsi, 0x98(%rsp) decq 0x90(%rsp) jne curve25519_x25519base_inverseloop movq (%rsp), %rax movq 0x20(%rsp), %rcx imulq %r8, %rax imulq %r10, %rcx addq %rcx, %rax sarq $0x3f, %rax movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 xorq %rax, %r9 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 xorq %rax, %r11 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 xorq %rax, %r13 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 xorq %rax, %r15 movq %r8, %rax andq %r9, %rax movq %r10, %r12 andq %r11, %r12 addq %rax, %r12 xorl %r13d, %r13d movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r12 adcq %rdx, %r13 movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r13 adcq %rdx, %r14 movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 adcq %rdx, %r15 movq 0x58(%rsp), %rax xorq %r9, %rax andq %r8, %r9 negq %r9 mulq %r8 addq %rax, %r15 adcq %rdx, %r9 movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %r9 mulq %r10 addq %rax, %r15 adcq %rdx, %r9 movq %r9, %rax shldq $0x1, %r15, %rax sarq $0x3f, %r9 movl $0x13, %ebx leaq 0x1(%rax,%r9,1), %rax imulq %rbx xorl %ebp, %ebp addq %rax, %r12 adcq %rdx, %r13 adcq %r9, %r14 adcq %r9, %r15 shlq $0x3f, %rax addq %rax, %r15 cmovns %rbp, %rbx subq %rbx, %r12 sbbq %rbp, %r13 sbbq %rbp, %r14 sbbq %rbp, %r15 btr $0x3f, %r15 movq 0xc0(%rsp), %rdi movq %r12, (%rdi) movq %r13, 0x8(%rdi) movq %r14, 0x10(%rdi) movq %r15, 0x18(%rdi) // The final result is (X + T) / (X - T) // This is the only operation in the whole computation that // fully reduces modulo p_25519 since now we want the canonical // answer as output. movq res, %rbp mul_p25519(resx,t1,t0) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret // **************************************************************************** // The precomputed data (all read-only). This is currently part of the same // text section, which gives position-independent code with simple PC-relative // addressing. However it could be put in a separate section via something like // // .section .rodata // **************************************************************************** // 2^254 * G and (2^254 + 8) * G in extended-projective coordinates // but with z = 1 assumed and hence left out, so they are (X,Y,T) only. curve25519_x25519base_edwards25519_0g: .quad 0x251037f7cf4e861d .quad 0x10ede0fb19fb128f .quad 0x96c033b175f5e2c8 .quad 0x055f070d6c15fb0d .quad 0x7c52af2c97473e69 .quad 0x022f82391bad8378 .quad 0x9991e1b02adb476f .quad 0x511144a03a99b855 .quad 0x5fafc3b88ff2e4ae .quad 0x855e4ff0de1230ff .quad 0x72e302a348492870 .quad 0x1253c19e53dbe1bc curve25519_x25519base_edwards25519_8g: .quad 0x331d086e0d9abcaa .quad 0x1e23c96d311a10c9 .quad 0x96d0f95e58c13478 .quad 0x2f72f7384fcfcc59 .quad 0x39a6cd1cfd7d87c9 .quad 0x9867a0abd8ae153a .quad 0xa49d2a5f35986745 .quad 0x57012940cdfe82e1 .quad 0x5046a6532ec5544a .quad 0x6d674004739ff6c9 .quad 0x9bbaa44b234a70e3 .quad 0x5e6d8901138cf386 // Precomputed table of multiples of generator for edwards25519 // all in precomputed extended-projective (y-x,x+y,2*d*x*y) triples. curve25519_x25519base_edwards25519_gtable: // 2^4 * 1 * G .quad 0x7ec851ca553e2df3 .quad 0xa71284cba64878b3 .quad 0xe6b5e4193288d1e7 .quad 0x4cf210ec5a9a8883 .quad 0x322d04a52d9021f6 .quad 0xb9c19f3375c6bf9c .quad 0x587a3a4342d20b09 .quad 0x143b1cf8aa64fe61 .quad 0x9f867c7d968acaab .quad 0x5f54258e27092729 .quad 0xd0a7d34bea180975 .quad 0x21b546a3374126e1 // 2^4 * 2 * G .quad 0xa94ff858a2888343 .quad 0xce0ed4565313ed3c .quad 0xf55c3dcfb5bf34fa .quad 0x0a653ca5c9eab371 .quad 0x490a7a45d185218f .quad 0x9a15377846049335 .quad 0x0060ea09cc31e1f6 .quad 0x7e041577f86ee965 .quad 0x66b2a496ce5b67f3 .quad 0xff5492d8bd569796 .quad 0x503cec294a592cd0 .quad 0x566943650813acb2 // 2^4 * 3 * G .quad 0xb818db0c26620798 .quad 0x5d5c31d9606e354a .quad 0x0982fa4f00a8cdc7 .quad 0x17e12bcd4653e2d4 .quad 0x5672f9eb1dabb69d .quad 0xba70b535afe853fc .quad 0x47ac0f752796d66d .quad 0x32a5351794117275 .quad 0xd3a644a6df648437 .quad 0x703b6559880fbfdd .quad 0xcb852540ad3a1aa5 .quad 0x0900b3f78e4c6468 // 2^4 * 4 * G .quad 0x0a851b9f679d651b .quad 0xe108cb61033342f2 .quad 0xd601f57fe88b30a3 .quad 0x371f3acaed2dd714 .quad 0xed280fbec816ad31 .quad 0x52d9595bd8e6efe3 .quad 0x0fe71772f6c623f5 .quad 0x4314030b051e293c .quad 0xd560005efbf0bcad .quad 0x8eb70f2ed1870c5e .quad 0x201f9033d084e6a0 .quad 0x4c3a5ae1ce7b6670 // 2^4 * 5 * G .quad 0x4138a434dcb8fa95 .quad 0x870cf67d6c96840b .quad 0xde388574297be82c .quad 0x7c814db27262a55a .quad 0xbaf875e4c93da0dd .quad 0xb93282a771b9294d .quad 0x80d63fb7f4c6c460 .quad 0x6de9c73dea66c181 .quad 0x478904d5a04df8f2 .quad 0xfafbae4ab10142d3 .quad 0xf6c8ac63555d0998 .quad 0x5aac4a412f90b104 // 2^4 * 6 * G .quad 0xc64f326b3ac92908 .quad 0x5551b282e663e1e0 .quad 0x476b35f54a1a4b83 .quad 0x1b9da3fe189f68c2 .quad 0x603a0d0abd7f5134 .quad 0x8089c932e1d3ae46 .quad 0xdf2591398798bd63 .quad 0x1c145cd274ba0235 .quad 0x32e8386475f3d743 .quad 0x365b8baf6ae5d9ef .quad 0x825238b6385b681e .quad 0x234929c1167d65e1 // 2^4 * 7 * G .quad 0x984decaba077ade8 .quad 0x383f77ad19eb389d .quad 0xc7ec6b7e2954d794 .quad 0x59c77b3aeb7c3a7a .quad 0x48145cc21d099fcf .quad 0x4535c192cc28d7e5 .quad 0x80e7c1e548247e01 .quad 0x4a5f28743b2973ee .quad 0xd3add725225ccf62 .quad 0x911a3381b2152c5d .quad 0xd8b39fad5b08f87d .quad 0x6f05606b4799fe3b // 2^4 * 8 * G .quad 0x9ffe9e92177ba962 .quad 0x98aee71d0de5cae1 .quad 0x3ff4ae942d831044 .quad 0x714de12e58533ac8 .quad 0x5b433149f91b6483 .quad 0xadb5dc655a2cbf62 .quad 0x87fa8412632827b3 .quad 0x60895e91ab49f8d8 .quad 0xe9ecf2ed0cf86c18 .quad 0xb46d06120735dfd4 .quad 0xbc9da09804b96be7 .quad 0x73e2e62fd96dc26b // 2^8 * 1 * G .quad 0xed5b635449aa515e .quad 0xa865c49f0bc6823a .quad 0x850c1fe95b42d1c4 .quad 0x30d76d6f03d315b9 .quad 0x2eccdd0e632f9c1d .quad 0x51d0b69676893115 .quad 0x52dfb76ba8637a58 .quad 0x6dd37d49a00eef39 .quad 0x6c4444172106e4c7 .quad 0xfb53d680928d7f69 .quad 0xb4739ea4694d3f26 .quad 0x10c697112e864bb0 // 2^8 * 2 * G .quad 0x6493c4277dbe5fde .quad 0x265d4fad19ad7ea2 .quad 0x0e00dfc846304590 .quad 0x25e61cabed66fe09 .quad 0x0ca62aa08358c805 .quad 0x6a3d4ae37a204247 .quad 0x7464d3a63b11eddc .quad 0x03bf9baf550806ef .quad 0x3f13e128cc586604 .quad 0x6f5873ecb459747e .quad 0xa0b63dedcc1268f5 .quad 0x566d78634586e22c // 2^8 * 3 * G .quad 0x1637a49f9cc10834 .quad 0xbc8e56d5a89bc451 .quad 0x1cb5ec0f7f7fd2db .quad 0x33975bca5ecc35d9 .quad 0xa1054285c65a2fd0 .quad 0x6c64112af31667c3 .quad 0x680ae240731aee58 .quad 0x14fba5f34793b22a .quad 0x3cd746166985f7d4 .quad 0x593e5e84c9c80057 .quad 0x2fc3f2b67b61131e .quad 0x14829cea83fc526c // 2^8 * 4 * G .quad 0xff437b8497dd95c2 .quad 0x6c744e30aa4eb5a7 .quad 0x9e0c5d613c85e88b .quad 0x2fd9c71e5f758173 .quad 0x21e70b2f4e71ecb8 .quad 0xe656ddb940a477e3 .quad 0xbf6556cece1d4f80 .quad 0x05fc3bc4535d7b7e .quad 0x24b8b3ae52afdedd .quad 0x3495638ced3b30cf .quad 0x33a4bc83a9be8195 .quad 0x373767475c651f04 // 2^8 * 5 * G .quad 0x2fba99fd40d1add9 .quad 0xb307166f96f4d027 .quad 0x4363f05215f03bae .quad 0x1fbea56c3b18f999 .quad 0x634095cb14246590 .quad 0xef12144016c15535 .quad 0x9e38140c8910bc60 .quad 0x6bf5905730907c8c .quad 0x0fa778f1e1415b8a .quad 0x06409ff7bac3a77e .quad 0x6f52d7b89aa29a50 .quad 0x02521cf67a635a56 // 2^8 * 6 * G .quad 0x513fee0b0a9d5294 .quad 0x8f98e75c0fdf5a66 .quad 0xd4618688bfe107ce .quad 0x3fa00a7e71382ced .quad 0xb1146720772f5ee4 .quad 0xe8f894b196079ace .quad 0x4af8224d00ac824a .quad 0x001753d9f7cd6cc4 .quad 0x3c69232d963ddb34 .quad 0x1dde87dab4973858 .quad 0xaad7d1f9a091f285 .quad 0x12b5fe2fa048edb6 // 2^8 * 7 * G .quad 0x71f0fbc496fce34d .quad 0x73b9826badf35bed .quad 0xd2047261ff28c561 .quad 0x749b76f96fb1206f .quad 0xdf2b7c26ad6f1e92 .quad 0x4b66d323504b8913 .quad 0x8c409dc0751c8bc3 .quad 0x6f7e93c20796c7b8 .quad 0x1f5af604aea6ae05 .quad 0xc12351f1bee49c99 .quad 0x61a808b5eeff6b66 .quad 0x0fcec10f01e02151 // 2^8 * 8 * G .quad 0x644d58a649fe1e44 .quad 0x21fcaea231ad777e .quad 0x02441c5a887fd0d2 .quad 0x4901aa7183c511f3 .quad 0x3df2d29dc4244e45 .quad 0x2b020e7493d8de0a .quad 0x6cc8067e820c214d .quad 0x413779166feab90a .quad 0x08b1b7548c1af8f0 .quad 0xce0f7a7c246299b4 .quad 0xf760b0f91e06d939 .quad 0x41bb887b726d1213 // 2^12 * 1 * G .quad 0x9267806c567c49d8 .quad 0x066d04ccca791e6a .quad 0xa69f5645e3cc394b .quad 0x5c95b686a0788cd2 .quad 0x97d980e0aa39f7d2 .quad 0x35d0384252c6b51c .quad 0x7d43f49307cd55aa .quad 0x56bd36cfb78ac362 .quad 0x2ac519c10d14a954 .quad 0xeaf474b494b5fa90 .quad 0xe6af8382a9f87a5a .quad 0x0dea6db1879be094 // 2^12 * 2 * G .quad 0xaa66bf547344e5ab .quad 0xda1258888f1b4309 .quad 0x5e87d2b3fd564b2f .quad 0x5b2c78885483b1dd .quad 0x15baeb74d6a8797a .quad 0x7ef55cf1fac41732 .quad 0x29001f5a3c8b05c5 .quad 0x0ad7cc8752eaccfb .quad 0x52151362793408cf .quad 0xeb0f170319963d94 .quad 0xa833b2fa883d9466 .quad 0x093a7fa775003c78 // 2^12 * 3 * G .quad 0xe5107de63a16d7be .quad 0xa377ffdc9af332cf .quad 0x70d5bf18440b677f .quad 0x6a252b19a4a31403 .quad 0xb8e9604460a91286 .quad 0x7f3fd8047778d3de .quad 0x67d01e31bf8a5e2d .quad 0x7b038a06c27b653e .quad 0x9ed919d5d36990f3 .quad 0x5213aebbdb4eb9f2 .quad 0xc708ea054cb99135 .quad 0x58ded57f72260e56 // 2^12 * 4 * G .quad 0x78e79dade9413d77 .quad 0xf257f9d59729e67d .quad 0x59db910ee37aa7e6 .quad 0x6aa11b5bbb9e039c .quad 0xda6d53265b0fd48b .quad 0x8960823193bfa988 .quad 0xd78ac93261d57e28 .quad 0x79f2942d3a5c8143 .quad 0x97da2f25b6c88de9 .quad 0x251ba7eaacf20169 .quad 0x09b44f87ef4eb4e4 .quad 0x7d90ab1bbc6a7da5 // 2^12 * 5 * G .quad 0x9acca683a7016bfe .quad 0x90505f4df2c50b6d .quad 0x6b610d5fcce435aa .quad 0x19a10d446198ff96 .quad 0x1a07a3f496b3c397 .quad 0x11ceaa188f4e2532 .quad 0x7d9498d5a7751bf0 .quad 0x19ed161f508dd8a0 .quad 0x560a2cd687dce6ca .quad 0x7f3568c48664cf4d .quad 0x8741e95222803a38 .quad 0x483bdab1595653fc // 2^12 * 6 * G .quad 0xfa780f148734fa49 .quad 0x106f0b70360534e0 .quad 0x2210776fe3e307bd .quad 0x3286c109dde6a0fe .quad 0xd6cf4d0ab4da80f6 .quad 0x82483e45f8307fe0 .quad 0x05005269ae6f9da4 .quad 0x1c7052909cf7877a .quad 0x32ee7de2874e98d4 .quad 0x14c362e9b97e0c60 .quad 0x5781dcde6a60a38a .quad 0x217dd5eaaa7aa840 // 2^12 * 7 * G .quad 0x9db7c4d0248e1eb0 .quad 0xe07697e14d74bf52 .quad 0x1e6a9b173c562354 .quad 0x7fa7c21f795a4965 .quad 0x8bdf1fb9be8c0ec8 .quad 0x00bae7f8e30a0282 .quad 0x4963991dad6c4f6c .quad 0x07058a6e5df6f60a .quad 0xe9eb02c4db31f67f .quad 0xed25fd8910bcfb2b .quad 0x46c8131f5c5cddb4 .quad 0x33b21c13a0cb9bce // 2^12 * 8 * G .quad 0x360692f8087d8e31 .quad 0xf4dcc637d27163f7 .quad 0x25a4e62065ea5963 .quad 0x659bf72e5ac160d9 .quad 0x9aafb9b05ee38c5b .quad 0xbf9d2d4e071a13c7 .quad 0x8eee6e6de933290a .quad 0x1c3bab17ae109717 .quad 0x1c9ab216c7cab7b0 .quad 0x7d65d37407bbc3cc .quad 0x52744750504a58d5 .quad 0x09f2606b131a2990 // 2^16 * 1 * G .quad 0x40e87d44744346be .quad 0x1d48dad415b52b25 .quad 0x7c3a8a18a13b603e .quad 0x4eb728c12fcdbdf7 .quad 0x7e234c597c6691ae .quad 0x64889d3d0a85b4c8 .quad 0xdae2c90c354afae7 .quad 0x0a871e070c6a9e1d .quad 0x3301b5994bbc8989 .quad 0x736bae3a5bdd4260 .quad 0x0d61ade219d59e3c .quad 0x3ee7300f2685d464 // 2^16 * 2 * G .quad 0xf5d255e49e7dd6b7 .quad 0x8016115c610b1eac .quad 0x3c99975d92e187ca .quad 0x13815762979125c2 .quad 0x43fa7947841e7518 .quad 0xe5c6fa59639c46d7 .quad 0xa1065e1de3052b74 .quad 0x7d47c6a2cfb89030 .quad 0x3fdad0148ef0d6e0 .quad 0x9d3e749a91546f3c .quad 0x71ec621026bb8157 .quad 0x148cf58d34c9ec80 // 2^16 * 3 * G .quad 0x46a492f67934f027 .quad 0x469984bef6840aa9 .quad 0x5ca1bc2a89611854 .quad 0x3ff2fa1ebd5dbbd4 .quad 0xe2572f7d9ae4756d .quad 0x56c345bb88f3487f .quad 0x9fd10b6d6960a88d .quad 0x278febad4eaea1b9 .quad 0xb1aa681f8c933966 .quad 0x8c21949c20290c98 .quad 0x39115291219d3c52 .quad 0x4104dd02fe9c677b // 2^16 * 4 * G .quad 0x72b2bf5e1124422a .quad 0xa1fa0c3398a33ab5 .quad 0x94cb6101fa52b666 .quad 0x2c863b00afaf53d5 .quad 0x81214e06db096ab8 .quad 0x21a8b6c90ce44f35 .quad 0x6524c12a409e2af5 .quad 0x0165b5a48efca481 .quad 0xf190a474a0846a76 .quad 0x12eff984cd2f7cc0 .quad 0x695e290658aa2b8f .quad 0x591b67d9bffec8b8 // 2^16 * 5 * G .quad 0x312f0d1c80b49bfa .quad 0x5979515eabf3ec8a .quad 0x727033c09ef01c88 .quad 0x3de02ec7ca8f7bcb .quad 0x99b9b3719f18b55d .quad 0xe465e5faa18c641e .quad 0x61081136c29f05ed .quad 0x489b4f867030128b .quad 0xd232102d3aeb92ef .quad 0xe16253b46116a861 .quad 0x3d7eabe7190baa24 .quad 0x49f5fbba496cbebf // 2^16 * 6 * G .quad 0x30949a108a5bcfd4 .quad 0xdc40dd70bc6473eb .quad 0x92c294c1307c0d1c .quad 0x5604a86dcbfa6e74 .quad 0x155d628c1e9c572e .quad 0x8a4d86acc5884741 .quad 0x91a352f6515763eb .quad 0x06a1a6c28867515b .quad 0x7288d1d47c1764b6 .quad 0x72541140e0418b51 .quad 0x9f031a6018acf6d1 .quad 0x20989e89fe2742c6 // 2^16 * 7 * G .quad 0x499777fd3a2dcc7f .quad 0x32857c2ca54fd892 .quad 0xa279d864d207e3a0 .quad 0x0403ed1d0ca67e29 .quad 0x1674278b85eaec2e .quad 0x5621dc077acb2bdf .quad 0x640a4c1661cbf45a .quad 0x730b9950f70595d3 .quad 0xc94b2d35874ec552 .quad 0xc5e6c8cf98246f8d .quad 0xf7cb46fa16c035ce .quad 0x5bd7454308303dcc // 2^16 * 8 * G .quad 0x7f9ad19528b24cc2 .quad 0x7f6b54656335c181 .quad 0x66b8b66e4fc07236 .quad 0x133a78007380ad83 .quad 0x85c4932115e7792a .quad 0xc64c89a2bdcdddc9 .quad 0x9d1e3da8ada3d762 .quad 0x5bb7db123067f82c .quad 0x0961f467c6ca62be .quad 0x04ec21d6211952ee .quad 0x182360779bd54770 .quad 0x740dca6d58f0e0d2 // 2^20 * 1 * G .quad 0x50b70bf5d3f0af0b .quad 0x4feaf48ae32e71f7 .quad 0x60e84ed3a55bbd34 .quad 0x00ed489b3f50d1ed .quad 0x3906c72aed261ae5 .quad 0x9ab68fd988e100f7 .quad 0xf5e9059af3360197 .quad 0x0e53dc78bf2b6d47 .quad 0xb90829bf7971877a .quad 0x5e4444636d17e631 .quad 0x4d05c52e18276893 .quad 0x27632d9a5a4a4af5 // 2^20 * 2 * G .quad 0xd11ff05154b260ce .quad 0xd86dc38e72f95270 .quad 0x601fcd0d267cc138 .quad 0x2b67916429e90ccd .quad 0xa98285d187eaffdb .quad 0xa5b4fbbbd8d0a864 .quad 0xb658f27f022663f7 .quad 0x3bbc2b22d99ce282 .quad 0xb917c952583c0a58 .quad 0x653ff9b80fe4c6f3 .quad 0x9b0da7d7bcdf3c0c .quad 0x43a0eeb6ab54d60e // 2^20 * 3 * G .quad 0x396966a46d4a5487 .quad 0xf811a18aac2bb3ba .quad 0x66e4685b5628b26b .quad 0x70a477029d929b92 .quad 0x3ac6322357875fe8 .quad 0xd9d4f4ecf5fbcb8f .quad 0x8dee8493382bb620 .quad 0x50c5eaa14c799fdc .quad 0xdd0edc8bd6f2fb3c .quad 0x54c63aa79cc7b7a0 .quad 0xae0b032b2c8d9f1a .quad 0x6f9ce107602967fb // 2^20 * 4 * G .quad 0xad1054b1cde1c22a .quad 0xc4a8e90248eb32df .quad 0x5f3e7b33accdc0ea .quad 0x72364713fc79963e .quad 0x139693063520e0b5 .quad 0x437fcf7c88ea03fe .quad 0xf7d4c40bd3c959bc .quad 0x699154d1f893ded9 .quad 0x315d5c75b4b27526 .quad 0xcccb842d0236daa5 .quad 0x22f0c8a3345fee8e .quad 0x73975a617d39dbed // 2^20 * 5 * G .quad 0xe4024df96375da10 .quad 0x78d3251a1830c870 .quad 0x902b1948658cd91c .quad 0x7e18b10b29b7438a .quad 0x6f37f392f4433e46 .quad 0x0e19b9a11f566b18 .quad 0x220fb78a1fd1d662 .quad 0x362a4258a381c94d .quad 0x9071d9132b6beb2f .quad 0x0f26e9ad28418247 .quad 0xeab91ec9bdec925d .quad 0x4be65bc8f48af2de // 2^20 * 6 * G .quad 0x78487feba36e7028 .quad 0x5f3f13001dd8ce34 .quad 0x934fb12d4b30c489 .quad 0x056c244d397f0a2b .quad 0x1d50fba257c26234 .quad 0x7bd4823adeb0678b .quad 0xc2b0dc6ea6538af5 .quad 0x5665eec6351da73e .quad 0xdb3ee00943bfb210 .quad 0x4972018720800ac2 .quad 0x26ab5d6173bd8667 .quad 0x20b209c2ab204938 // 2^20 * 7 * G .quad 0x549e342ac07fb34b .quad 0x02d8220821373d93 .quad 0xbc262d70acd1f567 .quad 0x7a92c9fdfbcac784 .quad 0x1fcca94516bd3289 .quad 0x448d65aa41420428 .quad 0x59c3b7b216a55d62 .quad 0x49992cc64e612cd8 .quad 0x65bd1bea70f801de .quad 0x1befb7c0fe49e28a .quad 0xa86306cdb1b2ae4a .quad 0x3b7ac0cd265c2a09 // 2^20 * 8 * G .quad 0x822bee438c01bcec .quad 0x530cb525c0fbc73b .quad 0x48519034c1953fe9 .quad 0x265cc261e09a0f5b .quad 0xf0d54e4f22ed39a7 .quad 0xa2aae91e5608150a .quad 0xf421b2e9eddae875 .quad 0x31bc531d6b7de992 .quad 0xdf3d134da980f971 .quad 0x7a4fb8d1221a22a7 .quad 0x3df7d42035aad6d8 .quad 0x2a14edcc6a1a125e // 2^24 * 1 * G .quad 0xdf48ee0752cfce4e .quad 0xc3fffaf306ec08b7 .quad 0x05710b2ab95459c4 .quad 0x161d25fa963ea38d .quad 0x231a8c570478433c .quad 0xb7b5270ec281439d .quad 0xdbaa99eae3d9079f .quad 0x2c03f5256c2b03d9 .quad 0x790f18757b53a47d .quad 0x307b0130cf0c5879 .quad 0x31903d77257ef7f9 .quad 0x699468bdbd96bbaf // 2^24 * 2 * G .quad 0xbd1f2f46f4dafecf .quad 0x7cef0114a47fd6f7 .quad 0xd31ffdda4a47b37f .quad 0x525219a473905785 .quad 0xd8dd3de66aa91948 .quad 0x485064c22fc0d2cc .quad 0x9b48246634fdea2f .quad 0x293e1c4e6c4a2e3a .quad 0x376e134b925112e1 .quad 0x703778b5dca15da0 .quad 0xb04589af461c3111 .quad 0x5b605c447f032823 // 2^24 * 3 * G .quad 0xb965805920c47c89 .quad 0xe7f0100c923b8fcc .quad 0x0001256502e2ef77 .quad 0x24a76dcea8aeb3ee .quad 0x3be9fec6f0e7f04c .quad 0x866a579e75e34962 .quad 0x5542ef161e1de61a .quad 0x2f12fef4cc5abdd5 .quad 0x0a4522b2dfc0c740 .quad 0x10d06e7f40c9a407 .quad 0xc6cf144178cff668 .quad 0x5e607b2518a43790 // 2^24 * 4 * G .quad 0x58b31d8f6cdf1818 .quad 0x35cfa74fc36258a2 .quad 0xe1b3ff4f66e61d6e .quad 0x5067acab6ccdd5f7 .quad 0xa02c431ca596cf14 .quad 0xe3c42d40aed3e400 .quad 0xd24526802e0f26db .quad 0x201f33139e457068 .quad 0xfd527f6b08039d51 .quad 0x18b14964017c0006 .quad 0xd5220eb02e25a4a8 .quad 0x397cba8862460375 // 2^24 * 5 * G .quad 0x30c13093f05959b2 .quad 0xe23aa18de9a97976 .quad 0x222fd491721d5e26 .quad 0x2339d320766e6c3a .quad 0x7815c3fbc81379e7 .quad 0xa6619420dde12af1 .quad 0xffa9c0f885a8fdd5 .quad 0x771b4022c1e1c252 .quad 0xd87dd986513a2fa7 .quad 0xf5ac9b71f9d4cf08 .quad 0xd06bc31b1ea283b3 .quad 0x331a189219971a76 // 2^24 * 6 * G .quad 0xf5166f45fb4f80c6 .quad 0x9c36c7de61c775cf .quad 0xe3d4e81b9041d91c .quad 0x31167c6b83bdfe21 .quad 0x26512f3a9d7572af .quad 0x5bcbe28868074a9e .quad 0x84edc1c11180f7c4 .quad 0x1ac9619ff649a67b .quad 0xf22b3842524b1068 .quad 0x5068343bee9ce987 .quad 0xfc9d71844a6250c8 .quad 0x612436341f08b111 // 2^24 * 7 * G .quad 0xd99d41db874e898d .quad 0x09fea5f16c07dc20 .quad 0x793d2c67d00f9bbc .quad 0x46ebe2309e5eff40 .quad 0x8b6349e31a2d2638 .quad 0x9ddfb7009bd3fd35 .quad 0x7f8bf1b8a3a06ba4 .quad 0x1522aa3178d90445 .quad 0x2c382f5369614938 .quad 0xdafe409ab72d6d10 .quad 0xe8c83391b646f227 .quad 0x45fe70f50524306c // 2^24 * 8 * G .quad 0xda4875a6960c0b8c .quad 0x5b68d076ef0e2f20 .quad 0x07fb51cf3d0b8fd4 .quad 0x428d1623a0e392d4 .quad 0x62f24920c8951491 .quad 0x05f007c83f630ca2 .quad 0x6fbb45d2f5c9d4b8 .quad 0x16619f6db57a2245 .quad 0x084f4a4401a308fd .quad 0xa82219c376a5caac .quad 0xdeb8de4643d1bc7d .quad 0x1d81592d60bd38c6 // 2^28 * 1 * G .quad 0xd833d7beec2a4c38 .quad 0x2c9162830acc20ed .quad 0xe93a47aa92df7581 .quad 0x702d67a3333c4a81 .quad 0x3a4a369a2f89c8a1 .quad 0x63137a1d7c8de80d .quad 0xbcac008a78eda015 .quad 0x2cb8b3a5b483b03f .quad 0x36e417cbcb1b90a1 .quad 0x33b3ddaa7f11794e .quad 0x3f510808885bc607 .quad 0x24141dc0e6a8020d // 2^28 * 2 * G .quad 0x59f73c773fefee9d .quad 0xb3f1ef89c1cf989d .quad 0xe35dfb42e02e545f .quad 0x5766120b47a1b47c .quad 0x91925dccbd83157d .quad 0x3ca1205322cc8094 .quad 0x28e57f183f90d6e4 .quad 0x1a4714cede2e767b .quad 0xdb20ba0fb8b6b7ff .quad 0xb732c3b677511fa1 .quad 0xa92b51c099f02d89 .quad 0x4f3875ad489ca5f1 // 2^28 * 3 * G .quad 0xc7fc762f4932ab22 .quad 0x7ac0edf72f4c3c1b .quad 0x5f6b55aa9aa895e8 .quad 0x3680274dad0a0081 .quad 0x79ed13f6ee73eec0 .quad 0xa5c6526d69110bb1 .quad 0xe48928c38603860c .quad 0x722a1446fd7059f5 .quad 0xd0959fe9a8cf8819 .quad 0xd0a995508475a99c .quad 0x6eac173320b09cc5 .quad 0x628ecf04331b1095 // 2^28 * 4 * G .quad 0x98bcb118a9d0ddbc .quad 0xee449e3408b4802b .quad 0x87089226b8a6b104 .quad 0x685f349a45c7915d .quad 0x9b41acf85c74ccf1 .quad 0xb673318108265251 .quad 0x99c92aed11adb147 .quad 0x7a47d70d34ecb40f .quad 0x60a0c4cbcc43a4f5 .quad 0x775c66ca3677bea9 .quad 0xa17aa1752ff8f5ed .quad 0x11ded9020e01fdc0 // 2^28 * 5 * G .quad 0x890e7809caefe704 .quad 0x8728296de30e8c6c .quad 0x4c5cd2a392aeb1c9 .quad 0x194263d15771531f .quad 0x471f95b03bea93b7 .quad 0x0552d7d43313abd3 .quad 0xbd9370e2e17e3f7b .quad 0x7b120f1db20e5bec .quad 0x17d2fb3d86502d7a .quad 0xb564d84450a69352 .quad 0x7da962c8a60ed75d .quad 0x00d0f85b318736aa // 2^28 * 6 * G .quad 0x978b142e777c84fd .quad 0xf402644705a8c062 .quad 0xa67ad51be7e612c7 .quad 0x2f7b459698dd6a33 .quad 0xa6753c1efd7621c1 .quad 0x69c0b4a7445671f5 .quad 0x971f527405b23c11 .quad 0x387bc74851a8c7cd .quad 0x81894b4d4a52a9a8 .quad 0xadd93e12f6b8832f .quad 0x184d8548b61bd638 .quad 0x3f1c62dbd6c9f6cd // 2^28 * 7 * G .quad 0x2e8f1f0091910c1f .quad 0xa4df4fe0bff2e12c .quad 0x60c6560aee927438 .quad 0x6338283facefc8fa .quad 0x3fad3e40148f693d .quad 0x052656e194eb9a72 .quad 0x2f4dcbfd184f4e2f .quad 0x406f8db1c482e18b .quad 0x9e630d2c7f191ee4 .quad 0x4fbf8301bc3ff670 .quad 0x787d8e4e7afb73c4 .quad 0x50d83d5be8f58fa5 // 2^28 * 8 * G .quad 0x85683916c11a1897 .quad 0x2d69a4efe506d008 .quad 0x39af1378f664bd01 .quad 0x65942131361517c6 .quad 0xc0accf90b4d3b66d .quad 0xa7059de561732e60 .quad 0x033d1f7870c6b0ba .quad 0x584161cd26d946e4 .quad 0xbbf2b1a072d27ca2 .quad 0xbf393c59fbdec704 .quad 0xe98dbbcee262b81e .quad 0x02eebd0b3029b589 // 2^32 * 1 * G .quad 0x61368756a60dac5f .quad 0x17e02f6aebabdc57 .quad 0x7f193f2d4cce0f7d .quad 0x20234a7789ecdcf0 .quad 0x8765b69f7b85c5e8 .quad 0x6ff0678bd168bab2 .quad 0x3a70e77c1d330f9b .quad 0x3a5f6d51b0af8e7c .quad 0x76d20db67178b252 .quad 0x071c34f9d51ed160 .quad 0xf62a4a20b3e41170 .quad 0x7cd682353cffe366 // 2^32 * 2 * G .quad 0x0be1a45bd887fab6 .quad 0x2a846a32ba403b6e .quad 0xd9921012e96e6000 .quad 0x2838c8863bdc0943 .quad 0xa665cd6068acf4f3 .quad 0x42d92d183cd7e3d3 .quad 0x5759389d336025d9 .quad 0x3ef0253b2b2cd8ff .quad 0xd16bb0cf4a465030 .quad 0xfa496b4115c577ab .quad 0x82cfae8af4ab419d .quad 0x21dcb8a606a82812 // 2^32 * 3 * G .quad 0x5c6004468c9d9fc8 .quad 0x2540096ed42aa3cb .quad 0x125b4d4c12ee2f9c .quad 0x0bc3d08194a31dab .quad 0x9a8d00fabe7731ba .quad 0x8203607e629e1889 .quad 0xb2cc023743f3d97f .quad 0x5d840dbf6c6f678b .quad 0x706e380d309fe18b .quad 0x6eb02da6b9e165c7 .quad 0x57bbba997dae20ab .quad 0x3a4276232ac196dd // 2^32 * 4 * G .quad 0x4b42432c8a7084fa .quad 0x898a19e3dfb9e545 .quad 0xbe9f00219c58e45d .quad 0x1ff177cea16debd1 .quad 0x3bf8c172db447ecb .quad 0x5fcfc41fc6282dbd .quad 0x80acffc075aa15fe .quad 0x0770c9e824e1a9f9 .quad 0xcf61d99a45b5b5fd .quad 0x860984e91b3a7924 .quad 0xe7300919303e3e89 .quad 0x39f264fd41500b1e // 2^32 * 5 * G .quad 0xa7ad3417dbe7e29c .quad 0xbd94376a2b9c139c .quad 0xa0e91b8e93597ba9 .quad 0x1712d73468889840 .quad 0xd19b4aabfe097be1 .quad 0xa46dfce1dfe01929 .quad 0xc3c908942ca6f1ff .quad 0x65c621272c35f14e .quad 0xe72b89f8ce3193dd .quad 0x4d103356a125c0bb .quad 0x0419a93d2e1cfe83 .quad 0x22f9800ab19ce272 // 2^32 * 6 * G .quad 0x605a368a3e9ef8cb .quad 0xe3e9c022a5504715 .quad 0x553d48b05f24248f .quad 0x13f416cd647626e5 .quad 0x42029fdd9a6efdac .quad 0xb912cebe34a54941 .quad 0x640f64b987bdf37b .quad 0x4171a4d38598cab4 .quad 0xfa2758aa99c94c8c .quad 0x23006f6fb000b807 .quad 0xfbd291ddadda5392 .quad 0x508214fa574bd1ab // 2^32 * 7 * G .quad 0xc20269153ed6fe4b .quad 0xa65a6739511d77c4 .quad 0xcbde26462c14af94 .quad 0x22f960ec6faba74b .quad 0x461a15bb53d003d6 .quad 0xb2102888bcf3c965 .quad 0x27c576756c683a5a .quad 0x3a7758a4c86cb447 .quad 0x548111f693ae5076 .quad 0x1dae21df1dfd54a6 .quad 0x12248c90f3115e65 .quad 0x5d9fd15f8de7f494 // 2^32 * 8 * G .quad 0x031408d36d63727f .quad 0x6a379aefd7c7b533 .quad 0xa9e18fc5ccaee24b .quad 0x332f35914f8fbed3 .quad 0x3f244d2aeed7521e .quad 0x8e3a9028432e9615 .quad 0xe164ba772e9c16d4 .quad 0x3bc187fa47eb98d8 .quad 0x6d470115ea86c20c .quad 0x998ab7cb6c46d125 .quad 0xd77832b53a660188 .quad 0x450d81ce906fba03 // 2^36 * 1 * G .quad 0xf8ae4d2ad8453902 .quad 0x7018058ee8db2d1d .quad 0xaab3995fc7d2c11e .quad 0x53b16d2324ccca79 .quad 0x23264d66b2cae0b5 .quad 0x7dbaed33ebca6576 .quad 0x030ebed6f0d24ac8 .quad 0x2a887f78f7635510 .quad 0x2a23b9e75c012d4f .quad 0x0c974651cae1f2ea .quad 0x2fb63273675d70ca .quad 0x0ba7250b864403f5 // 2^36 * 2 * G .quad 0xbb0d18fd029c6421 .quad 0xbc2d142189298f02 .quad 0x8347f8e68b250e96 .quad 0x7b9f2fe8032d71c9 .quad 0xdd63589386f86d9c .quad 0x61699176e13a85a4 .quad 0x2e5111954eaa7d57 .quad 0x32c21b57fb60bdfb .quad 0xd87823cd319e0780 .quad 0xefc4cfc1897775c5 .quad 0x4854fb129a0ab3f7 .quad 0x12c49d417238c371 // 2^36 * 3 * G .quad 0x0950b533ffe83769 .quad 0x21861c1d8e1d6bd1 .quad 0xf022d8381302e510 .quad 0x2509200c6391cab4 .quad 0x09b3a01783799542 .quad 0x626dd08faad5ee3f .quad 0xba00bceeeb70149f .quad 0x1421b246a0a444c9 .quad 0x4aa43a8e8c24a7c7 .quad 0x04c1f540d8f05ef5 .quad 0xadba5e0c0b3eb9dc .quad 0x2ab5504448a49ce3 // 2^36 * 4 * G .quad 0x2ed227266f0f5dec .quad 0x9824ee415ed50824 .quad 0x807bec7c9468d415 .quad 0x7093bae1b521e23f .quad 0xdc07ac631c5d3afa .quad 0x58615171f9df8c6c .quad 0x72a079d89d73e2b0 .quad 0x7301f4ceb4eae15d .quad 0x6409e759d6722c41 .quad 0xa674e1cf72bf729b .quad 0xbc0a24eb3c21e569 .quad 0x390167d24ebacb23 // 2^36 * 5 * G .quad 0x27f58e3bba353f1c .quad 0x4c47764dbf6a4361 .quad 0xafbbc4e56e562650 .quad 0x07db2ee6aae1a45d .quad 0xd7bb054ba2f2120b .quad 0xe2b9ceaeb10589b7 .quad 0x3fe8bac8f3c0edbe .quad 0x4cbd40767112cb69 .quad 0x0b603cc029c58176 .quad 0x5988e3825cb15d61 .quad 0x2bb61413dcf0ad8d .quad 0x7b8eec6c74183287 // 2^36 * 6 * G .quad 0xe4ca40782cd27cb0 .quad 0xdaf9c323fbe967bd .quad 0xb29bd34a8ad41e9e .quad 0x72810497626ede4d .quad 0x32fee570fc386b73 .quad 0xda8b0141da3a8cc7 .quad 0x975ffd0ac8968359 .quad 0x6ee809a1b132a855 .quad 0x9444bb31fcfd863a .quad 0x2fe3690a3e4e48c5 .quad 0xdc29c867d088fa25 .quad 0x13bd1e38d173292e // 2^36 * 7 * G .quad 0xd32b4cd8696149b5 .quad 0xe55937d781d8aab7 .quad 0x0bcb2127ae122b94 .quad 0x41e86fcfb14099b0 .quad 0x223fb5cf1dfac521 .quad 0x325c25316f554450 .quad 0x030b98d7659177ac .quad 0x1ed018b64f88a4bd .quad 0x3630dfa1b802a6b0 .quad 0x880f874742ad3bd5 .quad 0x0af90d6ceec5a4d4 .quad 0x746a247a37cdc5d9 // 2^36 * 8 * G .quad 0xd531b8bd2b7b9af6 .quad 0x5005093537fc5b51 .quad 0x232fcf25c593546d .quad 0x20a365142bb40f49 .quad 0x6eccd85278d941ed .quad 0x2254ae83d22f7843 .quad 0xc522d02e7bbfcdb7 .quad 0x681e3351bff0e4e2 .quad 0x8b64b59d83034f45 .quad 0x2f8b71f21fa20efb .quad 0x69249495ba6550e4 .quad 0x539ef98e45d5472b // 2^40 * 1 * G .quad 0x6e7bb6a1a6205275 .quad 0xaa4f21d7413c8e83 .quad 0x6f56d155e88f5cb2 .quad 0x2de25d4ba6345be1 .quad 0xd074d8961cae743f .quad 0xf86d18f5ee1c63ed .quad 0x97bdc55be7f4ed29 .quad 0x4cbad279663ab108 .quad 0x80d19024a0d71fcd .quad 0xc525c20afb288af8 .quad 0xb1a3974b5f3a6419 .quad 0x7d7fbcefe2007233 // 2^40 * 2 * G .quad 0xfaef1e6a266b2801 .quad 0x866c68c4d5739f16 .quad 0xf68a2fbc1b03762c .quad 0x5975435e87b75a8d .quad 0xcd7c5dc5f3c29094 .quad 0xc781a29a2a9105ab .quad 0x80c61d36421c3058 .quad 0x4f9cd196dcd8d4d7 .quad 0x199297d86a7b3768 .quad 0xd0d058241ad17a63 .quad 0xba029cad5c1c0c17 .quad 0x7ccdd084387a0307 // 2^40 * 3 * G .quad 0xdca6422c6d260417 .quad 0xae153d50948240bd .quad 0xa9c0c1b4fb68c677 .quad 0x428bd0ed61d0cf53 .quad 0x9b0c84186760cc93 .quad 0xcdae007a1ab32a99 .quad 0xa88dec86620bda18 .quad 0x3593ca848190ca44 .quad 0x9213189a5e849aa7 .quad 0xd4d8c33565d8facd .quad 0x8c52545b53fdbbd1 .quad 0x27398308da2d63e6 // 2^40 * 4 * G .quad 0x42c38d28435ed413 .quad 0xbd50f3603278ccc9 .quad 0xbb07ab1a79da03ef .quad 0x269597aebe8c3355 .quad 0xb9a10e4c0a702453 .quad 0x0fa25866d57d1bde .quad 0xffb9d9b5cd27daf7 .quad 0x572c2945492c33fd .quad 0xc77fc745d6cd30be .quad 0xe4dfe8d3e3baaefb .quad 0xa22c8830aa5dda0c .quad 0x7f985498c05bca80 // 2^40 * 5 * G .quad 0x3849ce889f0be117 .quad 0x8005ad1b7b54a288 .quad 0x3da3c39f23fc921c .quad 0x76c2ec470a31f304 .quad 0xd35615520fbf6363 .quad 0x08045a45cf4dfba6 .quad 0xeec24fbc873fa0c2 .quad 0x30f2653cd69b12e7 .quad 0x8a08c938aac10c85 .quad 0x46179b60db276bcb .quad 0xa920c01e0e6fac70 .quad 0x2f1273f1596473da // 2^40 * 6 * G .quad 0x4739fc7c8ae01e11 .quad 0xfd5274904a6aab9f .quad 0x41d98a8287728f2e .quad 0x5d9e572ad85b69f2 .quad 0x30488bd755a70bc0 .quad 0x06d6b5a4f1d442e7 .quad 0xead1a69ebc596162 .quad 0x38ac1997edc5f784 .quad 0x0666b517a751b13b .quad 0x747d06867e9b858c .quad 0xacacc011454dde49 .quad 0x22dfcd9cbfe9e69c // 2^40 * 7 * G .quad 0x8ddbd2e0c30d0cd9 .quad 0xad8e665facbb4333 .quad 0x8f6b258c322a961f .quad 0x6b2916c05448c1c7 .quad 0x56ec59b4103be0a1 .quad 0x2ee3baecd259f969 .quad 0x797cb29413f5cd32 .quad 0x0fe9877824cde472 .quad 0x7edb34d10aba913b .quad 0x4ea3cd822e6dac0e .quad 0x66083dff6578f815 .quad 0x4c303f307ff00a17 // 2^40 * 8 * G .quad 0xd30a3bd617b28c85 .quad 0xc5d377b739773bea .quad 0xc6c6e78c1e6a5cbf .quad 0x0d61b8f78b2ab7c4 .quad 0x29fc03580dd94500 .quad 0xecd27aa46fbbec93 .quad 0x130a155fc2e2a7f8 .quad 0x416b151ab706a1d5 .quad 0x56a8d7efe9c136b0 .quad 0xbd07e5cd58e44b20 .quad 0xafe62fda1b57e0ab .quad 0x191a2af74277e8d2 // 2^44 * 1 * G .quad 0xd550095bab6f4985 .quad 0x04f4cd5b4fbfaf1a .quad 0x9d8e2ed12a0c7540 .quad 0x2bc24e04b2212286 .quad 0x09d4b60b2fe09a14 .quad 0xc384f0afdbb1747e .quad 0x58e2ea8978b5fd6e .quad 0x519ef577b5e09b0a .quad 0x1863d7d91124cca9 .quad 0x7ac08145b88a708e .quad 0x2bcd7309857031f5 .quad 0x62337a6e8ab8fae5 // 2^44 * 2 * G .quad 0x4bcef17f06ffca16 .quad 0xde06e1db692ae16a .quad 0x0753702d614f42b0 .quad 0x5f6041b45b9212d0 .quad 0xd1ab324e1b3a1273 .quad 0x18947cf181055340 .quad 0x3b5d9567a98c196e .quad 0x7fa00425802e1e68 .quad 0x7d531574028c2705 .quad 0x80317d69db0d75fe .quad 0x30fface8ef8c8ddd .quad 0x7e9de97bb6c3e998 // 2^44 * 3 * G .quad 0x1558967b9e6585a3 .quad 0x97c99ce098e98b92 .quad 0x10af149b6eb3adad .quad 0x42181fe8f4d38cfa .quad 0xf004be62a24d40dd .quad 0xba0659910452d41f .quad 0x81c45ee162a44234 .quad 0x4cb829d8a22266ef .quad 0x1dbcaa8407b86681 .quad 0x081f001e8b26753b .quad 0x3cd7ce6a84048e81 .quad 0x78af11633f25f22c // 2^44 * 4 * G .quad 0x8416ebd40b50babc .quad 0x1508722628208bee .quad 0xa3148fafb9c1c36d .quad 0x0d07daacd32d7d5d .quad 0x3241c00e7d65318c .quad 0xe6bee5dcd0e86de7 .quad 0x118b2dc2fbc08c26 .quad 0x680d04a7fc603dc3 .quad 0xf9c2414a695aa3eb .quad 0xdaa42c4c05a68f21 .quad 0x7c6c23987f93963e .quad 0x210e8cd30c3954e3 // 2^44 * 5 * G .quad 0xac4201f210a71c06 .quad 0x6a65e0aef3bfb021 .quad 0xbc42c35c393632f7 .quad 0x56ea8db1865f0742 .quad 0x2b50f16137fe6c26 .quad 0xe102bcd856e404d8 .quad 0x12b0f1414c561f6b .quad 0x51b17bc8d028ec91 .quad 0xfff5fb4bcf535119 .quad 0xf4989d79df1108a0 .quad 0xbdfcea659a3ba325 .quad 0x18a11f1174d1a6f2 // 2^44 * 6 * G .quad 0x407375ab3f6bba29 .quad 0x9ec3b6d8991e482e .quad 0x99c80e82e55f92e9 .quad 0x307c13b6fb0c0ae1 .quad 0xfbd63cdad27a5f2c .quad 0xf00fc4bc8aa106d7 .quad 0x53fb5c1a8e64a430 .quad 0x04eaabe50c1a2e85 .quad 0x24751021cb8ab5e7 .quad 0xfc2344495c5010eb .quad 0x5f1e717b4e5610a1 .quad 0x44da5f18c2710cd5 // 2^44 * 7 * G .quad 0x033cc55ff1b82eb5 .quad 0xb15ae36d411cae52 .quad 0xba40b6198ffbacd3 .quad 0x768edce1532e861f .quad 0x9156fe6b89d8eacc .quad 0xe6b79451e23126a1 .quad 0xbd7463d93944eb4e .quad 0x726373f6767203ae .quad 0xe305ca72eb7ef68a .quad 0x662cf31f70eadb23 .quad 0x18f026fdb4c45b68 .quad 0x513b5384b5d2ecbd // 2^44 * 8 * G .quad 0x46d46280c729989e .quad 0x4b93fbd05368a5dd .quad 0x63df3f81d1765a89 .quad 0x34cebd64b9a0a223 .quad 0x5e2702878af34ceb .quad 0x900b0409b946d6ae .quad 0x6512ebf7dabd8512 .quad 0x61d9b76988258f81 .quad 0xa6c5a71349b7d94b .quad 0xa3f3d15823eb9446 .quad 0x0416fbd277484834 .quad 0x69d45e6f2c70812f // 2^48 * 1 * G .quad 0xce16f74bc53c1431 .quad 0x2b9725ce2072edde .quad 0xb8b9c36fb5b23ee7 .quad 0x7e2e0e450b5cc908 .quad 0x9fe62b434f460efb .quad 0xded303d4a63607d6 .quad 0xf052210eb7a0da24 .quad 0x237e7dbe00545b93 .quad 0x013575ed6701b430 .quad 0x231094e69f0bfd10 .quad 0x75320f1583e47f22 .quad 0x71afa699b11155e3 // 2^48 * 2 * G .quad 0x65ce6f9b3953b61d .quad 0xc65839eaafa141e6 .quad 0x0f435ffda9f759fe .quad 0x021142e9c2b1c28e .quad 0xea423c1c473b50d6 .quad 0x51e87a1f3b38ef10 .quad 0x9b84bf5fb2c9be95 .quad 0x00731fbc78f89a1c .quad 0xe430c71848f81880 .quad 0xbf960c225ecec119 .quad 0xb6dae0836bba15e3 .quad 0x4c4d6f3347e15808 // 2^48 * 3 * G .quad 0x18f7eccfc17d1fc9 .quad 0x6c75f5a651403c14 .quad 0xdbde712bf7ee0cdf .quad 0x193fddaaa7e47a22 .quad 0x2f0cddfc988f1970 .quad 0x6b916227b0b9f51b .quad 0x6ec7b6c4779176be .quad 0x38bf9500a88f9fa8 .quad 0x1fd2c93c37e8876f .quad 0xa2f61e5a18d1462c .quad 0x5080f58239241276 .quad 0x6a6fb99ebf0d4969 // 2^48 * 4 * G .quad 0x6a46c1bb560855eb .quad 0x2416bb38f893f09d .quad 0xd71d11378f71acc1 .quad 0x75f76914a31896ea .quad 0xeeb122b5b6e423c6 .quad 0x939d7010f286ff8e .quad 0x90a92a831dcf5d8c .quad 0x136fda9f42c5eb10 .quad 0xf94cdfb1a305bdd1 .quad 0x0f364b9d9ff82c08 .quad 0x2a87d8a5c3bb588a .quad 0x022183510be8dcba // 2^48 * 5 * G .quad 0x4af766385ead2d14 .quad 0xa08ed880ca7c5830 .quad 0x0d13a6e610211e3d .quad 0x6a071ce17b806c03 .quad 0x9d5a710143307a7f .quad 0xb063de9ec47da45f .quad 0x22bbfe52be927ad3 .quad 0x1387c441fd40426c .quad 0xb5d3c3d187978af8 .quad 0x722b5a3d7f0e4413 .quad 0x0d7b4848bb477ca0 .quad 0x3171b26aaf1edc92 // 2^48 * 6 * G .quad 0xa92f319097564ca8 .quad 0xff7bb84c2275e119 .quad 0x4f55fe37a4875150 .quad 0x221fd4873cf0835a .quad 0xa60db7d8b28a47d1 .quad 0xa6bf14d61770a4f1 .quad 0xd4a1f89353ddbd58 .quad 0x6c514a63344243e9 .quad 0x2322204f3a156341 .quad 0xfb73e0e9ba0a032d .quad 0xfce0dd4c410f030e .quad 0x48daa596fb924aaa // 2^48 * 7 * G .quad 0x6eca8e665ca59cc7 .quad 0xa847254b2e38aca0 .quad 0x31afc708d21e17ce .quad 0x676dd6fccad84af7 .quad 0x14f61d5dc84c9793 .quad 0x9941f9e3ef418206 .quad 0xcdf5b88f346277ac .quad 0x58c837fa0e8a79a9 .quad 0x0cf9688596fc9058 .quad 0x1ddcbbf37b56a01b .quad 0xdcc2e77d4935d66a .quad 0x1c4f73f2c6a57f0a // 2^48 * 8 * G .quad 0x0e7a4fbd305fa0bb .quad 0x829d4ce054c663ad .quad 0xf421c3832fe33848 .quad 0x795ac80d1bf64c42 .quad 0xb36e706efc7c3484 .quad 0x73dfc9b4c3c1cf61 .quad 0xeb1d79c9781cc7e5 .quad 0x70459adb7daf675c .quad 0x1b91db4991b42bb3 .quad 0x572696234b02dcca .quad 0x9fdf9ee51f8c78dc .quad 0x5fe162848ce21fd3 // 2^52 * 1 * G .quad 0xe2790aae4d077c41 .quad 0x8b938270db7469a3 .quad 0x6eb632dc8abd16a2 .quad 0x720814ecaa064b72 .quad 0x315c29c795115389 .quad 0xd7e0e507862f74ce .quad 0x0c4a762185927432 .quad 0x72de6c984a25a1e4 .quad 0xae9ab553bf6aa310 .quad 0x050a50a9806d6e1b .quad 0x92bb7403adff5139 .quad 0x0394d27645be618b // 2^52 * 2 * G .quad 0x4d572251857eedf4 .quad 0xe3724edde19e93c5 .quad 0x8a71420e0b797035 .quad 0x3b3c833687abe743 .quad 0xf5396425b23545a4 .quad 0x15a7a27e98fbb296 .quad 0xab6c52bc636fdd86 .quad 0x79d995a8419334ee .quad 0xcd8a8ea61195dd75 .quad 0xa504d8a81dd9a82f .quad 0x540dca81a35879b6 .quad 0x60dd16a379c86a8a // 2^52 * 3 * G .quad 0x35a2c8487381e559 .quad 0x596ffea6d78082cb .quad 0xcb9771ebdba7b653 .quad 0x5a08b5019b4da685 .quad 0x3501d6f8153e47b8 .quad 0xb7a9675414a2f60c .quad 0x112ee8b6455d9523 .quad 0x4e62a3c18112ea8a .quad 0xc8d4ac04516ab786 .quad 0x595af3215295b23d .quad 0xd6edd234db0230c1 .quad 0x0929efe8825b41cc // 2^52 * 4 * G .quad 0x5f0601d1cbd0f2d3 .quad 0x736e412f6132bb7f .quad 0x83604432238dde87 .quad 0x1e3a5272f5c0753c .quad 0x8b3172b7ad56651d .quad 0x01581b7a3fabd717 .quad 0x2dc94df6424df6e4 .quad 0x30376e5d2c29284f .quad 0xd2918da78159a59c .quad 0x6bdc1cd93f0713f3 .quad 0x565f7a934acd6590 .quad 0x53daacec4cb4c128 // 2^52 * 5 * G .quad 0x4ca73bd79cc8a7d6 .quad 0x4d4a738f47e9a9b2 .quad 0xf4cbf12942f5fe00 .quad 0x01a13ff9bdbf0752 .quad 0x99852bc3852cfdb0 .quad 0x2cc12e9559d6ed0b .quad 0x70f9e2bf9b5ac27b .quad 0x4f3b8c117959ae99 .quad 0x55b6c9c82ff26412 .quad 0x1ac4a8c91fb667a8 .quad 0xd527bfcfeb778bf2 .quad 0x303337da7012a3be // 2^52 * 6 * G .quad 0x955422228c1c9d7c .quad 0x01fac1371a9b340f .quad 0x7e8d9177925b48d7 .quad 0x53f8ad5661b3e31b .quad 0x976d3ccbfad2fdd1 .quad 0xcb88839737a640a8 .quad 0x2ff00c1d6734cb25 .quad 0x269ff4dc789c2d2b .quad 0x0c003fbdc08d678d .quad 0x4d982fa37ead2b17 .quad 0xc07e6bcdb2e582f1 .quad 0x296c7291df412a44 // 2^52 * 7 * G .quad 0x7903de2b33daf397 .quad 0xd0ff0619c9a624b3 .quad 0x8a1d252b555b3e18 .quad 0x2b6d581c52e0b7c0 .quad 0xdfb23205dab8b59e .quad 0x465aeaa0c8092250 .quad 0xd133c1189a725d18 .quad 0x2327370261f117d1 .quad 0x3d0543d3623e7986 .quad 0x679414c2c278a354 .quad 0xae43f0cc726196f6 .quad 0x7836c41f8245eaba // 2^52 * 8 * G .quad 0xe7a254db49e95a81 .quad 0x5192d5d008b0ad73 .quad 0x4d20e5b1d00afc07 .quad 0x5d55f8012cf25f38 .quad 0xca651e848011937c .quad 0xc6b0c46e6ef41a28 .quad 0xb7021ba75f3f8d52 .quad 0x119dff99ead7b9fd .quad 0x43eadfcbf4b31d4d .quad 0xc6503f7411148892 .quad 0xfeee68c5060d3b17 .quad 0x329293b3dd4a0ac8 // 2^56 * 1 * G .quad 0x4e59214fe194961a .quad 0x49be7dc70d71cd4f .quad 0x9300cfd23b50f22d .quad 0x4789d446fc917232 .quad 0x2879852d5d7cb208 .quad 0xb8dedd70687df2e7 .quad 0xdc0bffab21687891 .quad 0x2b44c043677daa35 .quad 0x1a1c87ab074eb78e .quad 0xfac6d18e99daf467 .quad 0x3eacbbcd484f9067 .quad 0x60c52eef2bb9a4e4 // 2^56 * 2 * G .quad 0x0b5d89bc3bfd8bf1 .quad 0xb06b9237c9f3551a .quad 0x0e4c16b0d53028f5 .quad 0x10bc9c312ccfcaab .quad 0x702bc5c27cae6d11 .quad 0x44c7699b54a48cab .quad 0xefbc4056ba492eb2 .quad 0x70d77248d9b6676d .quad 0xaa8ae84b3ec2a05b .quad 0x98699ef4ed1781e0 .quad 0x794513e4708e85d1 .quad 0x63755bd3a976f413 // 2^56 * 3 * G .quad 0xb55fa03e2ad10853 .quad 0x356f75909ee63569 .quad 0x9ff9f1fdbe69b890 .quad 0x0d8cc1c48bc16f84 .quad 0x3dc7101897f1acb7 .quad 0x5dda7d5ec165bbd8 .quad 0x508e5b9c0fa1020f .quad 0x2763751737c52a56 .quad 0x029402d36eb419a9 .quad 0xf0b44e7e77b460a5 .quad 0xcfa86230d43c4956 .quad 0x70c2dd8a7ad166e7 // 2^56 * 4 * G .quad 0x656194509f6fec0e .quad 0xee2e7ea946c6518d .quad 0x9733c1f367e09b5c .quad 0x2e0fac6363948495 .quad 0x91d4967db8ed7e13 .quad 0x74252f0ad776817a .quad 0xe40982e00d852564 .quad 0x32b8613816a53ce5 .quad 0x79e7f7bee448cd64 .quad 0x6ac83a67087886d0 .quad 0xf89fd4d9a0e4db2e .quad 0x4179215c735a4f41 // 2^56 * 5 * G .quad 0x8c7094e7d7dced2a .quad 0x97fb8ac347d39c70 .quad 0xe13be033a906d902 .quad 0x700344a30cd99d76 .quad 0xe4ae33b9286bcd34 .quad 0xb7ef7eb6559dd6dc .quad 0x278b141fb3d38e1f .quad 0x31fa85662241c286 .quad 0xaf826c422e3622f4 .quad 0xc12029879833502d .quad 0x9bc1b7e12b389123 .quad 0x24bb2312a9952489 // 2^56 * 6 * G .quad 0xb1a8ed1732de67c3 .quad 0x3cb49418461b4948 .quad 0x8ebd434376cfbcd2 .quad 0x0fee3e871e188008 .quad 0x41f80c2af5f85c6b .quad 0x687284c304fa6794 .quad 0x8945df99a3ba1bad .quad 0x0d1d2af9ffeb5d16 .quad 0xa9da8aa132621edf .quad 0x30b822a159226579 .quad 0x4004197ba79ac193 .quad 0x16acd79718531d76 // 2^56 * 7 * G .quad 0x72df72af2d9b1d3d .quad 0x63462a36a432245a .quad 0x3ecea07916b39637 .quad 0x123e0ef6b9302309 .quad 0xc959c6c57887b6ad .quad 0x94e19ead5f90feba .quad 0x16e24e62a342f504 .quad 0x164ed34b18161700 .quad 0x487ed94c192fe69a .quad 0x61ae2cea3a911513 .quad 0x877bf6d3b9a4de27 .quad 0x78da0fc61073f3eb // 2^56 * 8 * G .quad 0x5bf15d28e52bc66a .quad 0x2c47e31870f01a8e .quad 0x2419afbc06c28bdd .quad 0x2d25deeb256b173a .quad 0xa29f80f1680c3a94 .quad 0x71f77e151ae9e7e6 .quad 0x1100f15848017973 .quad 0x054aa4b316b38ddd .quad 0xdfc8468d19267cb8 .quad 0x0b28789c66e54daf .quad 0x2aeb1d2a666eec17 .quad 0x134610a6ab7da760 // 2^60 * 1 * G .quad 0xcaf55ec27c59b23f .quad 0x99aeed3e154d04f2 .quad 0x68441d72e14141f4 .quad 0x140345133932a0a2 .quad 0xd91430e0dc028c3c .quad 0x0eb955a85217c771 .quad 0x4b09e1ed2c99a1fa .quad 0x42881af2bd6a743c .quad 0x7bfec69aab5cad3d .quad 0xc23e8cd34cb2cfad .quad 0x685dd14bfb37d6a2 .quad 0x0ad6d64415677a18 // 2^60 * 2 * G .quad 0x781a439e417becb5 .quad 0x4ac5938cd10e0266 .quad 0x5da385110692ac24 .quad 0x11b065a2ade31233 .quad 0x7914892847927e9f .quad 0x33dad6ef370aa877 .quad 0x1f8f24fa11122703 .quad 0x5265ac2f2adf9592 .quad 0x405fdd309afcb346 .quad 0xd9723d4428e63f54 .quad 0x94c01df05f65aaae .quad 0x43e4dc3ae14c0809 // 2^60 * 3 * G .quad 0xbc12c7f1a938a517 .quad 0x473028ab3180b2e1 .quad 0x3f78571efbcd254a .quad 0x74e534426ff6f90f .quad 0xea6f7ac3adc2c6a3 .quad 0xd0e928f6e9717c94 .quad 0xe2d379ead645eaf5 .quad 0x46dd8785c51ffbbe .quad 0x709801be375c8898 .quad 0x4b06dab5e3fd8348 .quad 0x75880ced27230714 .quad 0x2b09468fdd2f4c42 // 2^60 * 4 * G .quad 0x97c749eeb701cb96 .quad 0x83f438d4b6a369c3 .quad 0x62962b8b9a402cd9 .quad 0x6976c7509888df7b .quad 0x5b97946582ffa02a .quad 0xda096a51fea8f549 .quad 0xa06351375f77af9b .quad 0x1bcfde61201d1e76 .quad 0x4a4a5490246a59a2 .quad 0xd63ebddee87fdd90 .quad 0xd9437c670d2371fa .quad 0x69e87308d30f8ed6 // 2^60 * 5 * G .quad 0x435a8bb15656beb0 .quad 0xf8fac9ba4f4d5bca .quad 0xb9b278c41548c075 .quad 0x3eb0ef76e892b622 .quad 0x0f80bf028bc80303 .quad 0x6aae16b37a18cefb .quad 0xdd47ea47d72cd6a3 .quad 0x61943588f4ed39aa .quad 0xd26e5c3e91039f85 .quad 0xc0e9e77df6f33aa9 .quad 0xe8968c5570066a93 .quad 0x3c34d1881faaaddd // 2^60 * 6 * G .quad 0x3f9d2b5ea09f9ec0 .quad 0x1dab3b6fb623a890 .quad 0xa09ba3ea72d926c4 .quad 0x374193513fd8b36d .quad 0xbd5b0b8f2fffe0d9 .quad 0x6aa254103ed24fb9 .quad 0x2ac7d7bcb26821c4 .quad 0x605b394b60dca36a .quad 0xb4e856e45a9d1ed2 .quad 0xefe848766c97a9a2 .quad 0xb104cf641e5eee7d .quad 0x2f50b81c88a71c8f // 2^60 * 7 * G .quad 0x31723c61fc6811bb .quad 0x9cb450486211800f .quad 0x768933d347995753 .quad 0x3491a53502752fcd .quad 0x2b552ca0a7da522a .quad 0x3230b336449b0250 .quad 0xf2c4c5bca4b99fb9 .quad 0x7b2c674958074a22 .quad 0xd55165883ed28cdf .quad 0x12d84fd2d362de39 .quad 0x0a874ad3e3378e4f .quad 0x000d2b1f7c763e74 // 2^60 * 8 * G .quad 0x3d420811d06d4a67 .quad 0xbefc048590e0ffe3 .quad 0xf870c6b7bd487bde .quad 0x6e2a7316319afa28 .quad 0x9624778c3e94a8ab .quad 0x0ad6f3cee9a78bec .quad 0x948ac7810d743c4f .quad 0x76627935aaecfccc .quad 0x56a8ac24d6d59a9f .quad 0xc8db753e3096f006 .quad 0x477f41e68f4c5299 .quad 0x588d851cf6c86114 // 2^64 * 1 * G .quad 0x51138ec78df6b0fe .quad 0x5397da89e575f51b .quad 0x09207a1d717af1b9 .quad 0x2102fdba2b20d650 .quad 0xcd2a65e777d1f515 .quad 0x548991878faa60f1 .quad 0xb1b73bbcdabc06e5 .quad 0x654878cba97cc9fb .quad 0x969ee405055ce6a1 .quad 0x36bca7681251ad29 .quad 0x3a1af517aa7da415 .quad 0x0ad725db29ecb2ba // 2^64 * 2 * G .quad 0xdc4267b1834e2457 .quad 0xb67544b570ce1bc5 .quad 0x1af07a0bf7d15ed7 .quad 0x4aefcffb71a03650 .quad 0xfec7bc0c9b056f85 .quad 0x537d5268e7f5ffd7 .quad 0x77afc6624312aefa .quad 0x4f675f5302399fd9 .quad 0xc32d36360415171e .quad 0xcd2bef118998483b .quad 0x870a6eadd0945110 .quad 0x0bccbb72a2a86561 // 2^64 * 3 * G .quad 0x185e962feab1a9c8 .quad 0x86e7e63565147dcd .quad 0xb092e031bb5b6df2 .quad 0x4024f0ab59d6b73e .quad 0x186d5e4c50fe1296 .quad 0xe0397b82fee89f7e .quad 0x3bc7f6c5507031b0 .quad 0x6678fd69108f37c2 .quad 0x1586fa31636863c2 .quad 0x07f68c48572d33f2 .quad 0x4f73cc9f789eaefc .quad 0x2d42e2108ead4701 // 2^64 * 4 * G .quad 0x97f5131594dfd29b .quad 0x6155985d313f4c6a .quad 0xeba13f0708455010 .quad 0x676b2608b8d2d322 .quad 0x21717b0d0f537593 .quad 0x914e690b131e064c .quad 0x1bb687ae752ae09f .quad 0x420bf3a79b423c6e .quad 0x8138ba651c5b2b47 .quad 0x8671b6ec311b1b80 .quad 0x7bff0cb1bc3135b0 .quad 0x745d2ffa9c0cf1e0 // 2^64 * 5 * G .quad 0xbf525a1e2bc9c8bd .quad 0xea5b260826479d81 .quad 0xd511c70edf0155db .quad 0x1ae23ceb960cf5d0 .quad 0x6036df5721d34e6a .quad 0xb1db8827997bb3d0 .quad 0xd3c209c3c8756afa .quad 0x06e15be54c1dc839 .quad 0x5b725d871932994a .quad 0x32351cb5ceb1dab0 .quad 0x7dc41549dab7ca05 .quad 0x58ded861278ec1f7 // 2^64 * 6 * G .quad 0xd8173793f266c55c .quad 0xc8c976c5cc454e49 .quad 0x5ce382f8bc26c3a8 .quad 0x2ff39de85485f6f9 .quad 0x2dfb5ba8b6c2c9a8 .quad 0x48eeef8ef52c598c .quad 0x33809107f12d1573 .quad 0x08ba696b531d5bd8 .quad 0x77ed3eeec3efc57a .quad 0x04e05517d4ff4811 .quad 0xea3d7a3ff1a671cb .quad 0x120633b4947cfe54 // 2^64 * 7 * G .quad 0x0b94987891610042 .quad 0x4ee7b13cecebfae8 .quad 0x70be739594f0a4c0 .quad 0x35d30a99b4d59185 .quad 0x82bd31474912100a .quad 0xde237b6d7e6fbe06 .quad 0xe11e761911ea79c6 .quad 0x07433be3cb393bde .quad 0xff7944c05ce997f4 .quad 0x575d3de4b05c51a3 .quad 0x583381fd5a76847c .quad 0x2d873ede7af6da9f // 2^64 * 8 * G .quad 0x157a316443373409 .quad 0xfab8b7eef4aa81d9 .quad 0xb093fee6f5a64806 .quad 0x2e773654707fa7b6 .quad 0xaa6202e14e5df981 .quad 0xa20d59175015e1f5 .quad 0x18a275d3bae21d6c .quad 0x0543618a01600253 .quad 0x0deabdf4974c23c1 .quad 0xaa6f0a259dce4693 .quad 0x04202cb8a29aba2c .quad 0x4b1443362d07960d // 2^68 * 1 * G .quad 0x47b837f753242cec .quad 0x256dc48cc04212f2 .quad 0xe222fbfbe1d928c5 .quad 0x48ea295bad8a2c07 .quad 0x299b1c3f57c5715e .quad 0x96cb929e6b686d90 .quad 0x3004806447235ab3 .quad 0x2c435c24a44d9fe1 .quad 0x0607c97c80f8833f .quad 0x0e851578ca25ec5b .quad 0x54f7450b161ebb6f .quad 0x7bcb4792a0def80e // 2^68 * 2 * G .quad 0x8487e3d02bc73659 .quad 0x4baf8445059979df .quad 0xd17c975adcad6fbf .quad 0x57369f0bdefc96b6 .quad 0x1cecd0a0045224c2 .quad 0x757f1b1b69e53952 .quad 0x775b7a925289f681 .quad 0x1b6cc62016736148 .quad 0xf1a9990175638698 .quad 0x353dd1beeeaa60d3 .quad 0x849471334c9ba488 .quad 0x63fa6e6843ade311 // 2^68 * 3 * G .quad 0xd15c20536597c168 .quad 0x9f73740098d28789 .quad 0x18aee7f13257ba1f .quad 0x3418bfda07346f14 .quad 0x2195becdd24b5eb7 .quad 0x5e41f18cc0cd44f9 .quad 0xdf28074441ca9ede .quad 0x07073b98f35b7d67 .quad 0xd03c676c4ce530d4 .quad 0x0b64c0473b5df9f4 .quad 0x065cef8b19b3a31e .quad 0x3084d661533102c9 // 2^68 * 4 * G .quad 0xe1f6b79ebf8469ad .quad 0x15801004e2663135 .quad 0x9a498330af74181b .quad 0x3ba2504f049b673c .quad 0x9a6ce876760321fd .quad 0x7fe2b5109eb63ad8 .quad 0x00e7d4ae8ac80592 .quad 0x73d86b7abb6f723a .quad 0x0b52b5606dba5ab6 .quad 0xa9134f0fbbb1edab .quad 0x30a9520d9b04a635 .quad 0x6813b8f37973e5db // 2^68 * 5 * G .quad 0x9854b054334127c1 .quad 0x105d047882fbff25 .quad 0xdb49f7f944186f4f .quad 0x1768e838bed0b900 .quad 0xf194ca56f3157e29 .quad 0x136d35705ef528a5 .quad 0xdd4cef778b0599bc .quad 0x7d5472af24f833ed .quad 0xd0ef874daf33da47 .quad 0x00d3be5db6e339f9 .quad 0x3f2a8a2f9c9ceece .quad 0x5d1aeb792352435a // 2^68 * 6 * G .quad 0xf59e6bb319cd63ca .quad 0x670c159221d06839 .quad 0xb06d565b2150cab6 .quad 0x20fb199d104f12a3 .quad 0x12c7bfaeb61ba775 .quad 0xb84e621fe263bffd .quad 0x0b47a5c35c840dcf .quad 0x7e83be0bccaf8634 .quad 0x61943dee6d99c120 .quad 0x86101f2e460b9fe0 .quad 0x6bb2f1518ee8598d .quad 0x76b76289fcc475cc // 2^68 * 7 * G .quad 0x791b4cc1756286fa .quad 0xdbced317d74a157c .quad 0x7e732421ea72bde6 .quad 0x01fe18491131c8e9 .quad 0x4245f1a1522ec0b3 .quad 0x558785b22a75656d .quad 0x1d485a2548a1b3c0 .quad 0x60959eccd58fe09f .quad 0x3ebfeb7ba8ed7a09 .quad 0x49fdc2bbe502789c .quad 0x44ebce5d3c119428 .quad 0x35e1eb55be947f4a // 2^68 * 8 * G .quad 0xdbdae701c5738dd3 .quad 0xf9c6f635b26f1bee .quad 0x61e96a8042f15ef4 .quad 0x3aa1d11faf60a4d8 .quad 0x14fd6dfa726ccc74 .quad 0x3b084cfe2f53b965 .quad 0xf33ae4f552a2c8b4 .quad 0x59aab07a0d40166a .quad 0x77bcec4c925eac25 .quad 0x1848718460137738 .quad 0x5b374337fea9f451 .quad 0x1865e78ec8e6aa46 // 2^72 * 1 * G .quad 0xccc4b7c7b66e1f7a .quad 0x44157e25f50c2f7e .quad 0x3ef06dfc713eaf1c .quad 0x582f446752da63f7 .quad 0x967c54e91c529ccb .quad 0x30f6269264c635fb .quad 0x2747aff478121965 .quad 0x17038418eaf66f5c .quad 0xc6317bd320324ce4 .quad 0xa81042e8a4488bc4 .quad 0xb21ef18b4e5a1364 .quad 0x0c2a1c4bcda28dc9 // 2^72 * 2 * G .quad 0xd24dc7d06f1f0447 .quad 0xb2269e3edb87c059 .quad 0xd15b0272fbb2d28f .quad 0x7c558bd1c6f64877 .quad 0xedc4814869bd6945 .quad 0x0d6d907dbe1c8d22 .quad 0xc63bd212d55cc5ab .quad 0x5a6a9b30a314dc83 .quad 0xd0ec1524d396463d .quad 0x12bb628ac35a24f0 .quad 0xa50c3a791cbc5fa4 .quad 0x0404a5ca0afbafc3 // 2^72 * 3 * G .quad 0x8c1f40070aa743d6 .quad 0xccbad0cb5b265ee8 .quad 0x574b046b668fd2de .quad 0x46395bfdcadd9633 .quad 0x62bc9e1b2a416fd1 .quad 0xb5c6f728e350598b .quad 0x04343fd83d5d6967 .quad 0x39527516e7f8ee98 .quad 0x117fdb2d1a5d9a9c .quad 0x9c7745bcd1005c2a .quad 0xefd4bef154d56fea .quad 0x76579a29e822d016 // 2^72 * 4 * G .quad 0x45b68e7e49c02a17 .quad 0x23cd51a2bca9a37f .quad 0x3ed65f11ec224c1b .quad 0x43a384dc9e05bdb1 .quad 0x333cb51352b434f2 .quad 0xd832284993de80e1 .quad 0xb5512887750d35ce .quad 0x02c514bb2a2777c1 .quad 0x684bd5da8bf1b645 .quad 0xfb8bd37ef6b54b53 .quad 0x313916d7a9b0d253 .quad 0x1160920961548059 // 2^72 * 5 * G .quad 0xb44d166929dacfaa .quad 0xda529f4c8413598f .quad 0xe9ef63ca453d5559 .quad 0x351e125bc5698e0b .quad 0x7a385616369b4dcd .quad 0x75c02ca7655c3563 .quad 0x7dc21bf9d4f18021 .quad 0x2f637d7491e6e042 .quad 0xd4b49b461af67bbe .quad 0xd603037ac8ab8961 .quad 0x71dee19ff9a699fb .quad 0x7f182d06e7ce2a9a // 2^72 * 6 * G .quad 0x7a7c8e64ab0168ec .quad 0xcb5a4a5515edc543 .quad 0x095519d347cd0eda .quad 0x67d4ac8c343e93b0 .quad 0x09454b728e217522 .quad 0xaa58e8f4d484b8d8 .quad 0xd358254d7f46903c .quad 0x44acc043241c5217 .quad 0x1c7d6bbb4f7a5777 .quad 0x8b35fed4918313e1 .quad 0x4adca1c6c96b4684 .quad 0x556d1c8312ad71bd // 2^72 * 7 * G .quad 0x17ef40e30c8d3982 .quad 0x31f7073e15a3fa34 .quad 0x4f21f3cb0773646e .quad 0x746c6c6d1d824eff .quad 0x81f06756b11be821 .quad 0x0faff82310a3f3dd .quad 0xf8b2d0556a99465d .quad 0x097abe38cc8c7f05 .quad 0x0c49c9877ea52da4 .quad 0x4c4369559bdc1d43 .quad 0x022c3809f7ccebd2 .quad 0x577e14a34bee84bd // 2^72 * 8 * G .quad 0xf0e268ac61a73b0a .quad 0xf2fafa103791a5f5 .quad 0xc1e13e826b6d00e9 .quad 0x60fa7ee96fd78f42 .quad 0x94fecebebd4dd72b .quad 0xf46a4fda060f2211 .quad 0x124a5977c0c8d1ff .quad 0x705304b8fb009295 .quad 0xb63d1d354d296ec6 .quad 0xf3c3053e5fad31d8 .quad 0x670b958cb4bd42ec .quad 0x21398e0ca16353fd // 2^76 * 1 * G .quad 0x216ab2ca8da7d2ef .quad 0x366ad9dd99f42827 .quad 0xae64b9004fdd3c75 .quad 0x403a395b53909e62 .quad 0x86c5fc16861b7e9a .quad 0xf6a330476a27c451 .quad 0x01667267a1e93597 .quad 0x05ffb9cd6082dfeb .quad 0xa617fa9ff53f6139 .quad 0x60f2b5e513e66cb6 .quad 0xd7a8beefb3448aa4 .quad 0x7a2932856f5ea192 // 2^76 * 2 * G .quad 0x0b39d761b02de888 .quad 0x5f550e7ed2414e1f .quad 0xa6bfa45822e1a940 .quad 0x050a2f7dfd447b99 .quad 0xb89c444879639302 .quad 0x4ae4f19350c67f2c .quad 0xf0b35da8c81af9c6 .quad 0x39d0003546871017 .quad 0x437c3b33a650db77 .quad 0x6bafe81dbac52bb2 .quad 0xfe99402d2db7d318 .quad 0x2b5b7eec372ba6ce // 2^76 * 3 * G .quad 0xb3bc4bbd83f50eef .quad 0x508f0c998c927866 .quad 0x43e76587c8b7e66e .quad 0x0f7655a3a47f98d9 .quad 0xa694404d613ac8f4 .quad 0x500c3c2bfa97e72c .quad 0x874104d21fcec210 .quad 0x1b205fb38604a8ee .quad 0x55ecad37d24b133c .quad 0x441e147d6038c90b .quad 0x656683a1d62c6fee .quad 0x0157d5dc87e0ecae // 2^76 * 4 * G .quad 0xf2a7af510354c13d .quad 0xd7a0b145aa372b60 .quad 0x2869b96a05a3d470 .quad 0x6528e42d82460173 .quad 0x95265514d71eb524 .quad 0xe603d8815df14593 .quad 0x147cdf410d4de6b7 .quad 0x5293b1730437c850 .quad 0x23d0e0814bccf226 .quad 0x92c745cd8196fb93 .quad 0x8b61796c59541e5b .quad 0x40a44df0c021f978 // 2^76 * 5 * G .quad 0xdaa869894f20ea6a .quad 0xea14a3d14c620618 .quad 0x6001fccb090bf8be .quad 0x35f4e822947e9cf0 .quad 0x86c96e514bc5d095 .quad 0xf20d4098fca6804a .quad 0x27363d89c826ea5d .quad 0x39ca36565719cacf .quad 0x97506f2f6f87b75c .quad 0xc624aea0034ae070 .quad 0x1ec856e3aad34dd6 .quad 0x055b0be0e440e58f // 2^76 * 6 * G .quad 0x6469a17d89735d12 .quad 0xdb6f27d5e662b9f1 .quad 0x9fcba3286a395681 .quad 0x363b8004d269af25 .quad 0x4d12a04b6ea33da2 .quad 0x57cf4c15e36126dd .quad 0x90ec9675ee44d967 .quad 0x64ca348d2a985aac .quad 0x99588e19e4c4912d .quad 0xefcc3b4e1ca5ce6b .quad 0x4522ea60fa5b98d5 .quad 0x7064bbab1de4a819 // 2^76 * 7 * G .quad 0xb919e1515a770641 .quad 0xa9a2e2c74e7f8039 .quad 0x7527250b3df23109 .quad 0x756a7330ac27b78b .quad 0xa290c06142542129 .quad 0xf2e2c2aebe8d5b90 .quad 0xcf2458db76abfe1b .quad 0x02157ade83d626bf .quad 0x3e46972a1b9a038b .quad 0x2e4ee66a7ee03fb4 .quad 0x81a248776edbb4ca .quad 0x1a944ee88ecd0563 // 2^76 * 8 * G .quad 0xd5a91d1151039372 .quad 0x2ed377b799ca26de .quad 0xa17202acfd366b6b .quad 0x0730291bd6901995 .quad 0xbb40a859182362d6 .quad 0xb99f55778a4d1abb .quad 0x8d18b427758559f6 .quad 0x26c20fe74d26235a .quad 0x648d1d9fe9cc22f5 .quad 0x66bc561928dd577c .quad 0x47d3ed21652439d1 .quad 0x49d271acedaf8b49 // 2^80 * 1 * G .quad 0x89f5058a382b33f3 .quad 0x5ae2ba0bad48c0b4 .quad 0x8f93b503a53db36e .quad 0x5aa3ed9d95a232e6 .quad 0x2798aaf9b4b75601 .quad 0x5eac72135c8dad72 .quad 0xd2ceaa6161b7a023 .quad 0x1bbfb284e98f7d4e .quad 0x656777e9c7d96561 .quad 0xcb2b125472c78036 .quad 0x65053299d9506eee .quad 0x4a07e14e5e8957cc // 2^80 * 2 * G .quad 0x4ee412cb980df999 .quad 0xa315d76f3c6ec771 .quad 0xbba5edde925c77fd .quad 0x3f0bac391d313402 .quad 0x240b58cdc477a49b .quad 0xfd38dade6447f017 .quad 0x19928d32a7c86aad .quad 0x50af7aed84afa081 .quad 0x6e4fde0115f65be5 .quad 0x29982621216109b2 .quad 0x780205810badd6d9 .quad 0x1921a316baebd006 // 2^80 * 3 * G .quad 0x89422f7edfb870fc .quad 0x2c296beb4f76b3bd .quad 0x0738f1d436c24df7 .quad 0x6458df41e273aeb0 .quad 0xd75aad9ad9f3c18b .quad 0x566a0eef60b1c19c .quad 0x3e9a0bac255c0ed9 .quad 0x7b049deca062c7f5 .quad 0xdccbe37a35444483 .quad 0x758879330fedbe93 .quad 0x786004c312c5dd87 .quad 0x6093dccbc2950e64 // 2^80 * 4 * G .quad 0x1ff39a8585e0706d .quad 0x36d0a5d8b3e73933 .quad 0x43b9f2e1718f453b .quad 0x57d1ea084827a97c .quad 0x6bdeeebe6084034b .quad 0x3199c2b6780fb854 .quad 0x973376abb62d0695 .quad 0x6e3180c98b647d90 .quad 0xee7ab6e7a128b071 .quad 0xa4c1596d93a88baa .quad 0xf7b4de82b2216130 .quad 0x363e999ddd97bd18 // 2^80 * 5 * G .quad 0x96a843c135ee1fc4 .quad 0x976eb35508e4c8cf .quad 0xb42f6801b58cd330 .quad 0x48ee9b78693a052b .quad 0x2f1848dce24baec6 .quad 0x769b7255babcaf60 .quad 0x90cb3c6e3cefe931 .quad 0x231f979bc6f9b355 .quad 0x5c31de4bcc2af3c6 .quad 0xb04bb030fe208d1f .quad 0xb78d7009c14fb466 .quad 0x079bfa9b08792413 // 2^80 * 6 * G .quad 0xe3903a51da300df4 .quad 0x843964233da95ab0 .quad 0xed3cf12d0b356480 .quad 0x038c77f684817194 .quad 0xf3c9ed80a2d54245 .quad 0x0aa08b7877f63952 .quad 0xd76dac63d1085475 .quad 0x1ef4fb159470636b .quad 0x854e5ee65b167bec .quad 0x59590a4296d0cdc2 .quad 0x72b2df3498102199 .quad 0x575ee92a4a0bff56 // 2^80 * 7 * G .quad 0xd4c080908a182fcf .quad 0x30e170c299489dbd .quad 0x05babd5752f733de .quad 0x43d4e7112cd3fd00 .quad 0x5d46bc450aa4d801 .quad 0xc3af1227a533b9d8 .quad 0x389e3b262b8906c2 .quad 0x200a1e7e382f581b .quad 0x518db967eaf93ac5 .quad 0x71bc989b056652c0 .quad 0xfe2b85d9567197f5 .quad 0x050eca52651e4e38 // 2^80 * 8 * G .quad 0xc3431ade453f0c9c .quad 0xe9f5045eff703b9b .quad 0xfcd97ac9ed847b3d .quad 0x4b0ee6c21c58f4c6 .quad 0x97ac397660e668ea .quad 0x9b19bbfe153ab497 .quad 0x4cb179b534eca79f .quad 0x6151c09fa131ae57 .quad 0x3af55c0dfdf05d96 .quad 0xdd262ee02ab4ee7a .quad 0x11b2bb8712171709 .quad 0x1fef24fa800f030b // 2^84 * 1 * G .quad 0xb496123a6b6c6609 .quad 0xa750fe8580ab5938 .quad 0xf471bf39b7c27a5f .quad 0x507903ce77ac193c .quad 0xff91a66a90166220 .quad 0xf22552ae5bf1e009 .quad 0x7dff85d87f90df7c .quad 0x4f620ffe0c736fb9 .quad 0x62f90d65dfde3e34 .quad 0xcf28c592b9fa5fad .quad 0x99c86ef9c6164510 .quad 0x25d448044a256c84 // 2^84 * 2 * G .quad 0xbd68230ec7e9b16f .quad 0x0eb1b9c1c1c5795d .quad 0x7943c8c495b6b1ff .quad 0x2f9faf620bbacf5e .quad 0x2c7c4415c9022b55 .quad 0x56a0d241812eb1fe .quad 0xf02ea1c9d7b65e0d .quad 0x4180512fd5323b26 .quad 0xa4ff3e698a48a5db .quad 0xba6a3806bd95403b .quad 0x9f7ce1af47d5b65d .quad 0x15e087e55939d2fb // 2^84 * 3 * G .quad 0x12207543745c1496 .quad 0xdaff3cfdda38610c .quad 0xe4e797272c71c34f .quad 0x39c07b1934bdede9 .quad 0x8894186efb963f38 .quad 0x48a00e80dc639bd5 .quad 0xa4e8092be96c1c99 .quad 0x5a097d54ca573661 .quad 0x2d45892b17c9e755 .quad 0xd033fd7289308df8 .quad 0x6c2fe9d9525b8bd9 .quad 0x2edbecf1c11cc079 // 2^84 * 4 * G .quad 0x1616a4e3c715a0d2 .quad 0x53623cb0f8341d4d .quad 0x96ef5329c7e899cb .quad 0x3d4e8dbba668baa6 .quad 0xee0f0fddd087a25f .quad 0x9c7531555c3e34ee .quad 0x660c572e8fab3ab5 .quad 0x0854fc44544cd3b2 .quad 0x61eba0c555edad19 .quad 0x24b533fef0a83de6 .quad 0x3b77042883baa5f8 .quad 0x678f82b898a47e8d // 2^84 * 5 * G .quad 0xb1491d0bd6900c54 .quad 0x3539722c9d132636 .quad 0x4db928920b362bc9 .quad 0x4d7cd1fea68b69df .quad 0x1e09d94057775696 .quad 0xeed1265c3cd951db .quad 0xfa9dac2b20bce16f .quad 0x0f7f76e0e8d089f4 .quad 0x36d9ebc5d485b00c .quad 0xa2596492e4adb365 .quad 0xc1659480c2119ccd .quad 0x45306349186e0d5f // 2^84 * 6 * G .quad 0x94ddd0c1a6cdff1d .quad 0x55f6f115e84213ae .quad 0x6c935f85992fcf6a .quad 0x067ee0f54a37f16f .quad 0x96a414ec2b072491 .quad 0x1bb2218127a7b65b .quad 0x6d2849596e8a4af0 .quad 0x65f3b08ccd27765f .quad 0xecb29fff199801f7 .quad 0x9d361d1fa2a0f72f .quad 0x25f11d2375fd2f49 .quad 0x124cefe80fe10fe2 // 2^84 * 7 * G .quad 0x4c126cf9d18df255 .quad 0xc1d471e9147a63b6 .quad 0x2c6d3c73f3c93b5f .quad 0x6be3a6a2e3ff86a2 .quad 0x1518e85b31b16489 .quad 0x8faadcb7db710bfb .quad 0x39b0bdf4a14ae239 .quad 0x05f4cbea503d20c1 .quad 0xce040e9ec04145bc .quad 0xc71ff4e208f6834c .quad 0xbd546e8dab8847a3 .quad 0x64666aa0a4d2aba5 // 2^84 * 8 * G .quad 0x6841435a7c06d912 .quad 0xca123c21bb3f830b .quad 0xd4b37b27b1cbe278 .quad 0x1d753b84c76f5046 .quad 0xb0c53bf73337e94c .quad 0x7cb5697e11e14f15 .quad 0x4b84abac1930c750 .quad 0x28dd4abfe0640468 .quad 0x7dc0b64c44cb9f44 .quad 0x18a3e1ace3925dbf .quad 0x7a3034862d0457c4 .quad 0x4c498bf78a0c892e // 2^88 * 1 * G .quad 0x37d653fb1aa73196 .quad 0x0f9495303fd76418 .quad 0xad200b09fb3a17b2 .quad 0x544d49292fc8613e .quad 0x22d2aff530976b86 .quad 0x8d90b806c2d24604 .quad 0xdca1896c4de5bae5 .quad 0x28005fe6c8340c17 .quad 0x6aefba9f34528688 .quad 0x5c1bff9425107da1 .quad 0xf75bbbcd66d94b36 .quad 0x72e472930f316dfa // 2^88 * 2 * G .quad 0x2695208c9781084f .quad 0xb1502a0b23450ee1 .quad 0xfd9daea603efde02 .quad 0x5a9d2e8c2733a34c .quad 0x07f3f635d32a7627 .quad 0x7aaa4d865f6566f0 .quad 0x3c85e79728d04450 .quad 0x1fee7f000fe06438 .quad 0x765305da03dbf7e5 .quad 0xa4daf2491434cdbd .quad 0x7b4ad5cdd24a88ec .quad 0x00f94051ee040543 // 2^88 * 3 * G .quad 0x8d356b23c3d330b2 .quad 0xf21c8b9bb0471b06 .quad 0xb36c316c6e42b83c .quad 0x07d79c7e8beab10d .quad 0xd7ef93bb07af9753 .quad 0x583ed0cf3db766a7 .quad 0xce6998bf6e0b1ec5 .quad 0x47b7ffd25dd40452 .quad 0x87fbfb9cbc08dd12 .quad 0x8a066b3ae1eec29b .quad 0x0d57242bdb1fc1bf .quad 0x1c3520a35ea64bb6 // 2^88 * 4 * G .quad 0x80d253a6bccba34a .quad 0x3e61c3a13838219b .quad 0x90c3b6019882e396 .quad 0x1c3d05775d0ee66f .quad 0xcda86f40216bc059 .quad 0x1fbb231d12bcd87e .quad 0xb4956a9e17c70990 .quad 0x38750c3b66d12e55 .quad 0x692ef1409422e51a .quad 0xcbc0c73c2b5df671 .quad 0x21014fe7744ce029 .quad 0x0621e2c7d330487c // 2^88 * 5 * G .quad 0xaf9860cc8259838d .quad 0x90ea48c1c69f9adc .quad 0x6526483765581e30 .quad 0x0007d6097bd3a5bc .quad 0xb7ae1796b0dbf0f3 .quad 0x54dfafb9e17ce196 .quad 0x25923071e9aaa3b4 .quad 0x5d8e589ca1002e9d .quad 0xc0bf1d950842a94b .quad 0xb2d3c363588f2e3e .quad 0x0a961438bb51e2ef .quad 0x1583d7783c1cbf86 // 2^88 * 6 * G .quad 0xeceea2ef5da27ae1 .quad 0x597c3a1455670174 .quad 0xc9a62a126609167a .quad 0x252a5f2e81ed8f70 .quad 0x90034704cc9d28c7 .quad 0x1d1b679ef72cc58f .quad 0x16e12b5fbe5b8726 .quad 0x4958064e83c5580a .quad 0x0d2894265066e80d .quad 0xfcc3f785307c8c6b .quad 0x1b53da780c1112fd .quad 0x079c170bd843b388 // 2^88 * 7 * G .quad 0x0506ece464fa6fff .quad 0xbee3431e6205e523 .quad 0x3579422451b8ea42 .quad 0x6dec05e34ac9fb00 .quad 0xcdd6cd50c0d5d056 .quad 0x9af7686dbb03573b .quad 0x3ca6723ff3c3ef48 .quad 0x6768c0d7317b8acc .quad 0x94b625e5f155c1b3 .quad 0x417bf3a7997b7b91 .quad 0xc22cbddc6d6b2600 .quad 0x51445e14ddcd52f4 // 2^88 * 8 * G .quad 0x57502b4b3b144951 .quad 0x8e67ff6b444bbcb3 .quad 0xb8bd6927166385db .quad 0x13186f31e39295c8 .quad 0x893147ab2bbea455 .quad 0x8c53a24f92079129 .quad 0x4b49f948be30f7a7 .quad 0x12e990086e4fd43d .quad 0xf10c96b37fdfbb2e .quad 0x9f9a935e121ceaf9 .quad 0xdf1136c43a5b983f .quad 0x77b2e3f05d3e99af // 2^92 * 1 * G .quad 0xfd0d75879cf12657 .quad 0xe82fef94e53a0e29 .quad 0xcc34a7f05bbb4be7 .quad 0x0b251172a50c38a2 .quad 0x9532f48fcc5cd29b .quad 0x2ba851bea3ce3671 .quad 0x32dacaa051122941 .quad 0x478d99d9350004f2 .quad 0x1d5ad94890bb02c0 .quad 0x50e208b10ec25115 .quad 0xa26a22894ef21702 .quad 0x4dc923343b524805 // 2^92 * 2 * G .quad 0xe3828c400f8086b6 .quad 0x3f77e6f7979f0dc8 .quad 0x7ef6de304df42cb4 .quad 0x5265797cb6abd784 .quad 0x3ad3e3ebf36c4975 .quad 0xd75d25a537862125 .quad 0xe873943da025a516 .quad 0x6bbc7cb4c411c847 .quad 0x3c6f9cd1d4a50d56 .quad 0xb6244077c6feab7e .quad 0x6ff9bf483580972e .quad 0x00375883b332acfb // 2^92 * 3 * G .quad 0x0001b2cd28cb0940 .quad 0x63fb51a06f1c24c9 .quad 0xb5ad8691dcd5ca31 .quad 0x67238dbd8c450660 .quad 0xc98bec856c75c99c .quad 0xe44184c000e33cf4 .quad 0x0a676b9bba907634 .quad 0x669e2cb571f379d7 .quad 0xcb116b73a49bd308 .quad 0x025aad6b2392729e .quad 0xb4793efa3f55d9b1 .quad 0x72a1056140678bb9 // 2^92 * 4 * G .quad 0xa2b6812b1cc9249d .quad 0x62866eee21211f58 .quad 0x2cb5c5b85df10ece .quad 0x03a6b259e263ae00 .quad 0x0d8d2909e2e505b6 .quad 0x98ca78abc0291230 .quad 0x77ef5569a9b12327 .quad 0x7c77897b81439b47 .quad 0xf1c1b5e2de331cb5 .quad 0x5a9f5d8e15fca420 .quad 0x9fa438f17bd932b1 .quad 0x2a381bf01c6146e7 // 2^92 * 5 * G .quad 0xac9b9879cfc811c1 .quad 0x8b7d29813756e567 .quad 0x50da4e607c70edfc .quad 0x5dbca62f884400b6 .quad 0xf7c0be32b534166f .quad 0x27e6ca6419cf70d4 .quad 0x934df7d7a957a759 .quad 0x5701461dabdec2aa .quad 0x2c6747402c915c25 .quad 0x1bdcd1a80b0d340a .quad 0x5e5601bd07b43f5f .quad 0x2555b4e05539a242 // 2^92 * 6 * G .quad 0x6fc09f5266ddd216 .quad 0xdce560a7c8e37048 .quad 0xec65939da2df62fd .quad 0x7a869ae7e52ed192 .quad 0x78409b1d87e463d4 .quad 0xad4da95acdfb639d .quad 0xec28773755259b9c .quad 0x69c806e9c31230ab .quad 0x7b48f57414bb3f22 .quad 0x68c7cee4aedccc88 .quad 0xed2f936179ed80be .quad 0x25d70b885f77bc4b // 2^92 * 7 * G .quad 0x4151c3d9762bf4de .quad 0x083f435f2745d82b .quad 0x29775a2e0d23ddd5 .quad 0x138e3a6269a5db24 .quad 0x98459d29bb1ae4d4 .quad 0x56b9c4c739f954ec .quad 0x832743f6c29b4b3e .quad 0x21ea8e2798b6878a .quad 0x87bef4b46a5a7b9c .quad 0xd2299d1b5fc1d062 .quad 0x82409818dd321648 .quad 0x5c5abeb1e5a2e03d // 2^92 * 8 * G .quad 0x14722af4b73c2ddb .quad 0xbc470c5f5a05060d .quad 0x00943eac2581b02e .quad 0x0e434b3b1f499c8f .quad 0x02cde6de1306a233 .quad 0x7b5a52a2116f8ec7 .quad 0xe1c681f4c1163b5b .quad 0x241d350660d32643 .quad 0x6be4404d0ebc52c7 .quad 0xae46233bb1a791f5 .quad 0x2aec170ed25db42b .quad 0x1d8dfd966645d694 // 2^96 * 1 * G .quad 0x296fa9c59c2ec4de .quad 0xbc8b61bf4f84f3cb .quad 0x1c7706d917a8f908 .quad 0x63b795fc7ad3255d .quad 0xd598639c12ddb0a4 .quad 0xa5d19f30c024866b .quad 0xd17c2f0358fce460 .quad 0x07a195152e095e8a .quad 0xa8368f02389e5fc8 .quad 0x90433b02cf8de43b .quad 0xafa1fd5dc5412643 .quad 0x3e8fe83d032f0137 // 2^96 * 2 * G .quad 0x2f8b15b90570a294 .quad 0x94f2427067084549 .quad 0xde1c5ae161bbfd84 .quad 0x75ba3b797fac4007 .quad 0x08704c8de8efd13c .quad 0xdfc51a8e33e03731 .quad 0xa59d5da51260cde3 .quad 0x22d60899a6258c86 .quad 0x6239dbc070cdd196 .quad 0x60fe8a8b6c7d8a9a .quad 0xb38847bceb401260 .quad 0x0904d07b87779e5e // 2^96 * 3 * G .quad 0xb4ce1fd4ddba919c .quad 0xcf31db3ec74c8daa .quad 0x2c63cc63ad86cc51 .quad 0x43e2143fbc1dde07 .quad 0xf4322d6648f940b9 .quad 0x06952f0cbd2d0c39 .quad 0x167697ada081f931 .quad 0x6240aacebaf72a6c .quad 0xf834749c5ba295a0 .quad 0xd6947c5bca37d25a .quad 0x66f13ba7e7c9316a .quad 0x56bdaf238db40cac // 2^96 * 4 * G .quad 0x362ab9e3f53533eb .quad 0x338568d56eb93d40 .quad 0x9e0e14521d5a5572 .quad 0x1d24a86d83741318 .quad 0x1310d36cc19d3bb2 .quad 0x062a6bb7622386b9 .quad 0x7c9b8591d7a14f5c .quad 0x03aa31507e1e5754 .quad 0xf4ec7648ffd4ce1f .quad 0xe045eaf054ac8c1c .quad 0x88d225821d09357c .quad 0x43b261dc9aeb4859 // 2^96 * 5 * G .quad 0xe55b1e1988bb79bb .quad 0xa09ed07dc17a359d .quad 0xb02c2ee2603dea33 .quad 0x326055cf5b276bc2 .quad 0x19513d8b6c951364 .quad 0x94fe7126000bf47b .quad 0x028d10ddd54f9567 .quad 0x02b4d5e242940964 .quad 0xb4a155cb28d18df2 .quad 0xeacc4646186ce508 .quad 0xc49cf4936c824389 .quad 0x27a6c809ae5d3410 // 2^96 * 6 * G .quad 0x8ba6ebcd1f0db188 .quad 0x37d3d73a675a5be8 .quad 0xf22edfa315f5585a .quad 0x2cb67174ff60a17e .quad 0xcd2c270ac43d6954 .quad 0xdd4a3e576a66cab2 .quad 0x79fa592469d7036c .quad 0x221503603d8c2599 .quad 0x59eecdf9390be1d0 .quad 0xa9422044728ce3f1 .quad 0x82891c667a94f0f4 .quad 0x7b1df4b73890f436 // 2^96 * 7 * G .quad 0xe492f2e0b3b2a224 .quad 0x7c6c9e062b551160 .quad 0x15eb8fe20d7f7b0e .quad 0x61fcef2658fc5992 .quad 0x5f2e221807f8f58c .quad 0xe3555c9fd49409d4 .quad 0xb2aaa88d1fb6a630 .quad 0x68698245d352e03d .quad 0xdbb15d852a18187a .quad 0xf3e4aad386ddacd7 .quad 0x44bae2810ff6c482 .quad 0x46cf4c473daf01cf // 2^96 * 8 * G .quad 0x426525ed9ec4e5f9 .quad 0x0e5eda0116903303 .quad 0x72b1a7f2cbe5cadc .quad 0x29387bcd14eb5f40 .quad 0x213c6ea7f1498140 .quad 0x7c1e7ef8392b4854 .quad 0x2488c38c5629ceba .quad 0x1065aae50d8cc5bb .quad 0x1c2c4525df200d57 .quad 0x5c3b2dd6bfca674a .quad 0x0a07e7b1e1834030 .quad 0x69a198e64f1ce716 // 2^100 * 1 * G .quad 0x7afcd613efa9d697 .quad 0x0cc45aa41c067959 .quad 0xa56fe104c1fada96 .quad 0x3a73b70472e40365 .quad 0x7b26e56b9e2d4734 .quad 0xc4c7132b81c61675 .quad 0xef5c9525ec9cde7f .quad 0x39c80b16e71743ad .quad 0x0f196e0d1b826c68 .quad 0xf71ff0e24960e3db .quad 0x6113167023b7436c .quad 0x0cf0ea5877da7282 // 2^100 * 2 * G .quad 0x196c80a4ddd4ccbd .quad 0x22e6f55d95f2dd9d .quad 0xc75e33c740d6c71b .quad 0x7bb51279cb3c042f .quad 0xe332ced43ba6945a .quad 0xde0b1361e881c05d .quad 0x1ad40f095e67ed3b .quad 0x5da8acdab8c63d5d .quad 0xc4b6664a3a70159f .quad 0x76194f0f0a904e14 .quad 0xa5614c39a4096c13 .quad 0x6cd0ff50979feced // 2^100 * 3 * G .quad 0xc0e067e78f4428ac .quad 0x14835ab0a61135e3 .quad 0xf21d14f338062935 .quad 0x6390a4c8df04849c .quad 0x7fecfabdb04ba18e .quad 0xd0fc7bfc3bddbcf7 .quad 0xa41d486e057a131c .quad 0x641a4391f2223a61 .quad 0xc5c6b95aa606a8db .quad 0x914b7f9eb06825f1 .quad 0x2a731f6b44fc9eff .quad 0x30ddf38562705cfc // 2^100 * 4 * G .quad 0x4e3dcbdad1bff7f9 .quad 0xc9118e8220645717 .quad 0xbacccebc0f189d56 .quad 0x1b4822e9d4467668 .quad 0x33bef2bd68bcd52c .quad 0xc649dbb069482ef2 .quad 0xb5b6ee0c41cb1aee .quad 0x5c294d270212a7e5 .quad 0xab360a7f25563781 .quad 0x2512228a480f7958 .quad 0xc75d05276114b4e3 .quad 0x222d9625d976fe2a // 2^100 * 5 * G .quad 0x1c717f85b372ace1 .quad 0x81930e694638bf18 .quad 0x239cad056bc08b58 .quad 0x0b34271c87f8fff4 .quad 0x0f94be7e0a344f85 .quad 0xeb2faa8c87f22c38 .quad 0x9ce1e75e4ee16f0f .quad 0x43e64e5418a08dea .quad 0x8155e2521a35ce63 .quad 0xbe100d4df912028e .quad 0xbff80bf8a57ddcec .quad 0x57342dc96d6bc6e4 // 2^100 * 6 * G .quad 0xefeef065c8ce5998 .quad 0xbf029510b5cbeaa2 .quad 0x8c64a10620b7c458 .quad 0x35134fb231c24855 .quad 0xf3c3bcb71e707bf6 .quad 0x351d9b8c7291a762 .quad 0x00502e6edad69a33 .quad 0x522f521f1ec8807f .quad 0x272c1f46f9a3902b .quad 0xc91ba3b799657bcc .quad 0xae614b304f8a1c0e .quad 0x7afcaad70b99017b // 2^100 * 7 * G .quad 0xc25ded54a4b8be41 .quad 0x902d13e11bb0e2dd .quad 0x41f43233cde82ab2 .quad 0x1085faa5c3aae7cb .quad 0xa88141ecef842b6b .quad 0x55e7b14797abe6c5 .quad 0x8c748f9703784ffe .quad 0x5b50a1f7afcd00b7 .quad 0x9b840f66f1361315 .quad 0x18462242701003e9 .quad 0x65ed45fae4a25080 .quad 0x0a2862393fda7320 // 2^100 * 8 * G .quad 0x46ab13c8347cbc9d .quad 0x3849e8d499c12383 .quad 0x4cea314087d64ac9 .quad 0x1f354134b1a29ee7 .quad 0x960e737b6ecb9d17 .quad 0xfaf24948d67ceae1 .quad 0x37e7a9b4d55e1b89 .quad 0x5cb7173cb46c59eb .quad 0x4a89e68b82b7abf0 .quad 0xf41cd9279ba6b7b9 .quad 0x16e6c210e18d876f .quad 0x7cacdb0f7f1b09c6 // 2^104 * 1 * G .quad 0x9062b2e0d91a78bc .quad 0x47c9889cc8509667 .quad 0x9df54a66405070b8 .quad 0x7369e6a92493a1bf .quad 0xe1014434dcc5caed .quad 0x47ed5d963c84fb33 .quad 0x70019576ed86a0e7 .quad 0x25b2697bd267f9e4 .quad 0x9d673ffb13986864 .quad 0x3ca5fbd9415dc7b8 .quad 0xe04ecc3bdf273b5e .quad 0x1420683db54e4cd2 // 2^104 * 2 * G .quad 0xb478bd1e249dd197 .quad 0x620c35005e58c102 .quad 0xfb02d32fccbaac5c .quad 0x60b63bebf508a72d .quad 0x34eebb6fc1cc5ad0 .quad 0x6a1b0ce99646ac8b .quad 0xd3b0da49a66bde53 .quad 0x31e83b4161d081c1 .quad 0x97e8c7129e062b4f .quad 0x49e48f4f29320ad8 .quad 0x5bece14b6f18683f .quad 0x55cf1eb62d550317 // 2^104 * 3 * G .quad 0x5879101065c23d58 .quad 0x8b9d086d5094819c .quad 0xe2402fa912c55fa7 .quad 0x669a6564570891d4 .quad 0x3076b5e37df58c52 .quad 0xd73ab9dde799cc36 .quad 0xbd831ce34913ee20 .quad 0x1a56fbaa62ba0133 .quad 0x943e6b505c9dc9ec .quad 0x302557bba77c371a .quad 0x9873ae5641347651 .quad 0x13c4836799c58a5c // 2^104 * 4 * G .quad 0x423a5d465ab3e1b9 .quad 0xfc13c187c7f13f61 .quad 0x19f83664ecb5b9b6 .quad 0x66f80c93a637b607 .quad 0xc4dcfb6a5d8bd080 .quad 0xdeebc4ec571a4842 .quad 0xd4b2e883b8e55365 .quad 0x50bdc87dc8e5b827 .quad 0x606d37836edfe111 .quad 0x32353e15f011abd9 .quad 0x64b03ac325b73b96 .quad 0x1dd56444725fd5ae // 2^104 * 5 * G .quad 0x8fa47ff83362127d .quad 0xbc9f6ac471cd7c15 .quad 0x6e71454349220c8b .quad 0x0e645912219f732e .quad 0xc297e60008bac89a .quad 0x7d4cea11eae1c3e0 .quad 0xf3e38be19fe7977c .quad 0x3a3a450f63a305cd .quad 0x078f2f31d8394627 .quad 0x389d3183de94a510 .quad 0xd1e36c6d17996f80 .quad 0x318c8d9393a9a87b // 2^104 * 6 * G .quad 0xf2745d032afffe19 .quad 0x0c9f3c497f24db66 .quad 0xbc98d3e3ba8598ef .quad 0x224c7c679a1d5314 .quad 0x5d669e29ab1dd398 .quad 0xfc921658342d9e3b .quad 0x55851dfdf35973cd .quad 0x509a41c325950af6 .quad 0xbdc06edca6f925e9 .quad 0x793ef3f4641b1f33 .quad 0x82ec12809d833e89 .quad 0x05bff02328a11389 // 2^104 * 7 * G .quad 0x3632137023cae00b .quad 0x544acf0ad1accf59 .quad 0x96741049d21a1c88 .quad 0x780b8cc3fa2a44a7 .quad 0x6881a0dd0dc512e4 .quad 0x4fe70dc844a5fafe .quad 0x1f748e6b8f4a5240 .quad 0x576277cdee01a3ea .quad 0x1ef38abc234f305f .quad 0x9a577fbd1405de08 .quad 0x5e82a51434e62a0d .quad 0x5ff418726271b7a1 // 2^104 * 8 * G .quad 0x398e080c1789db9d .quad 0xa7602025f3e778f5 .quad 0xfa98894c06bd035d .quad 0x106a03dc25a966be .quad 0xe5db47e813b69540 .quad 0xf35d2a3b432610e1 .quad 0xac1f26e938781276 .quad 0x29d4db8ca0a0cb69 .quad 0xd9ad0aaf333353d0 .quad 0x38669da5acd309e5 .quad 0x3c57658ac888f7f0 .quad 0x4ab38a51052cbefa // 2^108 * 1 * G .quad 0xdfdacbee4324c0e9 .quad 0x054442883f955bb7 .quad 0xdef7aaa8ea31609f .quad 0x68aee70642287cff .quad 0xf68fe2e8809de054 .quad 0xe3bc096a9c82bad1 .quad 0x076353d40aadbf45 .quad 0x7b9b1fb5dea1959e .quad 0xf01cc8f17471cc0c .quad 0x95242e37579082bb .quad 0x27776093d3e46b5f .quad 0x2d13d55a28bd85fb // 2^108 * 2 * G .quad 0xfac5d2065b35b8da .quad 0xa8da8a9a85624bb7 .quad 0xccd2ca913d21cd0f .quad 0x6b8341ee8bf90d58 .quad 0xbf019cce7aee7a52 .quad 0xa8ded2b6e454ead3 .quad 0x3c619f0b87a8bb19 .quad 0x3619b5d7560916d8 .quad 0x3579f26b0282c4b2 .quad 0x64d592f24fafefae .quad 0xb7cded7b28c8c7c0 .quad 0x6a927b6b7173a8d7 // 2^108 * 3 * G .quad 0x1f6db24f986e4656 .quad 0x1021c02ed1e9105b .quad 0xf8ff3fff2cc0a375 .quad 0x1d2a6bf8c6c82592 .quad 0x8d7040863ece88eb .quad 0xf0e307a980eec08c .quad 0xac2250610d788fda .quad 0x056d92a43a0d478d .quad 0x1b05a196fc3da5a1 .quad 0x77d7a8c243b59ed0 .quad 0x06da3d6297d17918 .quad 0x66fbb494f12353f7 // 2^108 * 4 * G .quad 0x751a50b9d85c0fb8 .quad 0xd1afdc258bcf097b .quad 0x2f16a6a38309a969 .quad 0x14ddff9ee5b00659 .quad 0xd6d70996f12309d6 .quad 0xdbfb2385e9c3d539 .quad 0x46d602b0f7552411 .quad 0x270a0b0557843e0c .quad 0x61ff0640a7862bcc .quad 0x81cac09a5f11abfe .quad 0x9047830455d12abb .quad 0x19a4bde1945ae873 // 2^108 * 5 * G .quad 0x9b9f26f520a6200a .quad 0x64804443cf13eaf8 .quad 0x8a63673f8631edd3 .quad 0x72bbbce11ed39dc1 .quad 0x40c709dec076c49f .quad 0x657bfaf27f3e53f6 .quad 0x40662331eca042c4 .quad 0x14b375487eb4df04 .quad 0xae853c94ab66dc47 .quad 0xeb62343edf762d6e .quad 0xf08e0e186fb2f7d1 .quad 0x4f0b1c02700ab37a // 2^108 * 6 * G .quad 0xe1706787d81951fa .quad 0xa10a2c8eb290c77b .quad 0xe7382fa03ed66773 .quad 0x0a4d84710bcc4b54 .quad 0x79fd21ccc1b2e23f .quad 0x4ae7c281453df52a .quad 0xc8172ec9d151486b .quad 0x68abe9443e0a7534 .quad 0xda12c6c407831dcb .quad 0x0da230d74d5c510d .quad 0x4ab1531e6bd404e1 .quad 0x4106b166bcf440ef // 2^108 * 7 * G .quad 0x02e57a421cd23668 .quad 0x4ad9fb5d0eaef6fd .quad 0x954e6727b1244480 .quad 0x7f792f9d2699f331 .quad 0xa485ccd539e4ecf2 .quad 0x5aa3f3ad0555bab5 .quad 0x145e3439937df82d .quad 0x1238b51e1214283f .quad 0x0b886b925fd4d924 .quad 0x60906f7a3626a80d .quad 0xecd367b4b98abd12 .quad 0x2876beb1def344cf // 2^108 * 8 * G .quad 0xdc84e93563144691 .quad 0x632fe8a0d61f23f4 .quad 0x4caa800612a9a8d5 .quad 0x48f9dbfa0e9918d3 .quad 0xd594b3333a8a85f8 .quad 0x4ea37689e78d7d58 .quad 0x73bf9f455e8e351f .quad 0x5507d7d2bc41ebb4 .quad 0x1ceb2903299572fc .quad 0x7c8ccaa29502d0ee .quad 0x91bfa43411cce67b .quad 0x5784481964a831e7 // 2^112 * 1 * G .quad 0xda7c2b256768d593 .quad 0x98c1c0574422ca13 .quad 0xf1a80bd5ca0ace1d .quad 0x29cdd1adc088a690 .quad 0xd6cfd1ef5fddc09c .quad 0xe82b3efdf7575dce .quad 0x25d56b5d201634c2 .quad 0x3041c6bb04ed2b9b .quad 0x0ff2f2f9d956e148 .quad 0xade797759f356b2e .quad 0x1a4698bb5f6c025c .quad 0x104bbd6814049a7b // 2^112 * 2 * G .quad 0x51f0fd3168f1ed67 .quad 0x2c811dcdd86f3bc2 .quad 0x44dc5c4304d2f2de .quad 0x5be8cc57092a7149 .quad 0xa95d9a5fd67ff163 .quad 0xe92be69d4cc75681 .quad 0xb7f8024cde20f257 .quad 0x204f2a20fb072df5 .quad 0xc8143b3d30ebb079 .quad 0x7589155abd652e30 .quad 0x653c3c318f6d5c31 .quad 0x2570fb17c279161f // 2^112 * 3 * G .quad 0x3efa367f2cb61575 .quad 0xf5f96f761cd6026c .quad 0xe8c7142a65b52562 .quad 0x3dcb65ea53030acd .quad 0x192ea9550bb8245a .quad 0xc8e6fba88f9050d1 .quad 0x7986ea2d88a4c935 .quad 0x241c5f91de018668 .quad 0x28d8172940de6caa .quad 0x8fbf2cf022d9733a .quad 0x16d7fcdd235b01d1 .quad 0x08420edd5fcdf0e5 // 2^112 * 4 * G .quad 0xcdff20ab8362fa4a .quad 0x57e118d4e21a3e6e .quad 0xe3179617fc39e62b .quad 0x0d9a53efbc1769fd .quad 0x0358c34e04f410ce .quad 0xb6135b5a276e0685 .quad 0x5d9670c7ebb91521 .quad 0x04d654f321db889c .quad 0x5e7dc116ddbdb5d5 .quad 0x2954deb68da5dd2d .quad 0x1cb608173334a292 .quad 0x4a7a4f2618991ad7 // 2^112 * 5 * G .quad 0xf4a718025fb15f95 .quad 0x3df65f346b5c1b8f .quad 0xcdfcf08500e01112 .quad 0x11b50c4cddd31848 .quad 0x24c3b291af372a4b .quad 0x93da8270718147f2 .quad 0xdd84856486899ef2 .quad 0x4a96314223e0ee33 .quad 0xa6e8274408a4ffd6 .quad 0x738e177e9c1576d9 .quad 0x773348b63d02b3f2 .quad 0x4f4bce4dce6bcc51 // 2^112 * 6 * G .quad 0xa71fce5ae2242584 .quad 0x26ea725692f58a9e .quad 0xd21a09d71cea3cf4 .quad 0x73fcdd14b71c01e6 .quad 0x30e2616ec49d0b6f .quad 0xe456718fcaec2317 .quad 0x48eb409bf26b4fa6 .quad 0x3042cee561595f37 .quad 0x427e7079449bac41 .quad 0x855ae36dbce2310a .quad 0x4cae76215f841a7c .quad 0x389e740c9a9ce1d6 // 2^112 * 7 * G .quad 0x64fcb3ae34dcb9ce .quad 0x97500323e348d0ad .quad 0x45b3f07d62c6381b .quad 0x61545379465a6788 .quad 0xc9bd78f6570eac28 .quad 0xe55b0b3227919ce1 .quad 0x65fc3eaba19b91ed .quad 0x25c425e5d6263690 .quad 0x3f3e06a6f1d7de6e .quad 0x3ef976278e062308 .quad 0x8c14f6264e8a6c77 .quad 0x6539a08915484759 // 2^112 * 8 * G .quad 0xe9d21f74c3d2f773 .quad 0xc150544125c46845 .quad 0x624e5ce8f9b99e33 .quad 0x11c5e4aac5cd186c .quad 0xddc4dbd414bb4a19 .quad 0x19b2bc3c98424f8e .quad 0x48a89fd736ca7169 .quad 0x0f65320ef019bd90 .quad 0xd486d1b1cafde0c6 .quad 0x4f3fe6e3163b5181 .quad 0x59a8af0dfaf2939a .quad 0x4cabc7bdec33072a // 2^116 * 1 * G .quad 0x16faa8fb532f7428 .quad 0xdbd42ea046a4e272 .quad 0x5337653b8b9ea480 .quad 0x4065947223973f03 .quad 0xf7c0a19c1a54a044 .quad 0x4a1c5e2477bd9fbb .quad 0xa6e3ca115af22972 .quad 0x1819bb953f2e9e0d .quad 0x498fbb795e042e84 .quad 0x7d0dd89a7698b714 .quad 0x8bfb0ba427fe6295 .quad 0x36ba82e721200524 // 2^116 * 2 * G .quad 0xd60ecbb74245ec41 .quad 0xfd9be89e34348716 .quad 0xc9240afee42284de .quad 0x4472f648d0531db4 .quad 0xc8d69d0a57274ed5 .quad 0x45ba803260804b17 .quad 0xdf3cda102255dfac .quad 0x77d221232709b339 .quad 0x498a6d7064ad94d8 .quad 0xa5b5c8fd9af62263 .quad 0x8ca8ed0545c141f4 .quad 0x2c63bec3662d358c // 2^116 * 3 * G .quad 0x7fe60d8bea787955 .quad 0xb9dc117eb5f401b7 .quad 0x91c7c09a19355cce .quad 0x22692ef59442bedf .quad 0x9a518b3a8586f8bf .quad 0x9ee71af6cbb196f0 .quad 0xaa0625e6a2385cf2 .quad 0x1deb2176ddd7c8d1 .quad 0x8563d19a2066cf6c .quad 0x401bfd8c4dcc7cd7 .quad 0xd976a6becd0d8f62 .quad 0x67cfd773a278b05e // 2^116 * 4 * G .quad 0x8dec31faef3ee475 .quad 0x99dbff8a9e22fd92 .quad 0x512d11594e26cab1 .quad 0x0cde561eec4310b9 .quad 0x2d5fa9855a4e586a .quad 0x65f8f7a449beab7e .quad 0xaa074dddf21d33d3 .quad 0x185cba721bcb9dee .quad 0x93869da3f4e3cb41 .quad 0xbf0392f540f7977e .quad 0x026204fcd0463b83 .quad 0x3ec91a769eec6eed // 2^116 * 5 * G .quad 0x1e9df75bf78166ad .quad 0x4dfda838eb0cd7af .quad 0xba002ed8c1eaf988 .quad 0x13fedb3e11f33cfc .quad 0x0fad2fb7b0a3402f .quad 0x46615ecbfb69f4a8 .quad 0xf745bcc8c5f8eaa6 .quad 0x7a5fa8794a94e896 .quad 0x52958faa13cd67a1 .quad 0x965ee0818bdbb517 .quad 0x16e58daa2e8845b3 .quad 0x357d397d5499da8f // 2^116 * 6 * G .quad 0x1ebfa05fb0bace6c .quad 0xc934620c1caf9a1e .quad 0xcc771cc41d82b61a .quad 0x2d94a16aa5f74fec .quad 0x481dacb4194bfbf8 .quad 0x4d77e3f1bae58299 .quad 0x1ef4612e7d1372a0 .quad 0x3a8d867e70ff69e1 .quad 0x6f58cd5d55aff958 .quad 0xba3eaa5c75567721 .quad 0x75c123999165227d .quad 0x69be1343c2f2b35e // 2^116 * 7 * G .quad 0x0e091d5ee197c92a .quad 0x4f51019f2945119f .quad 0x143679b9f034e99c .quad 0x7d88112e4d24c696 .quad 0x82bbbdac684b8de3 .quad 0xa2f4c7d03fca0718 .quad 0x337f92fbe096aaa8 .quad 0x200d4d8c63587376 .quad 0x208aed4b4893b32b .quad 0x3efbf23ebe59b964 .quad 0xd762deb0dba5e507 .quad 0x69607bd681bd9d94 // 2^116 * 8 * G .quad 0xf6be021068de1ce1 .quad 0xe8d518e70edcbc1f .quad 0xe3effdd01b5505a5 .quad 0x35f63353d3ec3fd0 .quad 0x3b7f3bd49323a902 .quad 0x7c21b5566b2c6e53 .quad 0xe5ba8ff53a7852a7 .quad 0x28bc77a5838ece00 .quad 0x63ba78a8e25d8036 .quad 0x63651e0094333490 .quad 0x48d82f20288ce532 .quad 0x3a31abfa36b57524 // 2^120 * 1 * G .quad 0x239e9624089c0a2e .quad 0xc748c4c03afe4738 .quad 0x17dbed2a764fa12a .quad 0x639b93f0321c8582 .quad 0xc08f788f3f78d289 .quad 0xfe30a72ca1404d9f .quad 0xf2778bfccf65cc9d .quad 0x7ee498165acb2021 .quad 0x7bd508e39111a1c3 .quad 0x2b2b90d480907489 .quad 0xe7d2aec2ae72fd19 .quad 0x0edf493c85b602a6 // 2^120 * 2 * G .quad 0xaecc8158599b5a68 .quad 0xea574f0febade20e .quad 0x4fe41d7422b67f07 .quad 0x403b92e3019d4fb4 .quad 0x6767c4d284764113 .quad 0xa090403ff7f5f835 .quad 0x1c8fcffacae6bede .quad 0x04c00c54d1dfa369 .quad 0x4dc22f818b465cf8 .quad 0x71a0f35a1480eff8 .quad 0xaee8bfad04c7d657 .quad 0x355bb12ab26176f4 // 2^120 * 3 * G .quad 0xa71e64cc7493bbf4 .quad 0xe5bd84d9eca3b0c3 .quad 0x0a6bc50cfa05e785 .quad 0x0f9b8132182ec312 .quad 0xa301dac75a8c7318 .quad 0xed90039db3ceaa11 .quad 0x6f077cbf3bae3f2d .quad 0x7518eaf8e052ad8e .quad 0xa48859c41b7f6c32 .quad 0x0f2d60bcf4383298 .quad 0x1815a929c9b1d1d9 .quad 0x47c3871bbb1755c4 // 2^120 * 4 * G .quad 0x5144539771ec4f48 .quad 0xf805b17dc98c5d6e .quad 0xf762c11a47c3c66b .quad 0x00b89b85764699dc .quad 0xfbe65d50c85066b0 .quad 0x62ecc4b0b3a299b0 .quad 0xe53754ea441ae8e0 .quad 0x08fea02ce8d48d5f .quad 0x824ddd7668deead0 .quad 0xc86445204b685d23 .quad 0xb514cfcd5d89d665 .quad 0x473829a74f75d537 // 2^120 * 5 * G .quad 0x82d2da754679c418 .quad 0xe63bd7d8b2618df0 .quad 0x355eef24ac47eb0a .quad 0x2078684c4833c6b4 .quad 0x23d9533aad3902c9 .quad 0x64c2ddceef03588f .quad 0x15257390cfe12fb4 .quad 0x6c668b4d44e4d390 .quad 0x3b48cf217a78820c .quad 0xf76a0ab281273e97 .quad 0xa96c65a78c8eed7b .quad 0x7411a6054f8a433f // 2^120 * 6 * G .quad 0x4d659d32b99dc86d .quad 0x044cdc75603af115 .quad 0xb34c712cdcc2e488 .quad 0x7c136574fb8134ff .quad 0x579ae53d18b175b4 .quad 0x68713159f392a102 .quad 0x8455ecba1eef35f5 .quad 0x1ec9a872458c398f .quad 0xb8e6a4d400a2509b .quad 0x9b81d7020bc882b4 .quad 0x57e7cc9bf1957561 .quad 0x3add88a5c7cd6460 // 2^120 * 7 * G .quad 0xab895770b635dcf2 .quad 0x02dfef6cf66c1fbc .quad 0x85530268beb6d187 .quad 0x249929fccc879e74 .quad 0x85c298d459393046 .quad 0x8f7e35985ff659ec .quad 0x1d2ca22af2f66e3a .quad 0x61ba1131a406a720 .quad 0xa3d0a0f116959029 .quad 0x023b6b6cba7ebd89 .quad 0x7bf15a3e26783307 .quad 0x5620310cbbd8ece7 // 2^120 * 8 * G .quad 0x528993434934d643 .quad 0xb9dbf806a51222f5 .quad 0x8f6d878fc3f41c22 .quad 0x37676a2a4d9d9730 .quad 0x6646b5f477e285d6 .quad 0x40e8ff676c8f6193 .quad 0xa6ec7311abb594dd .quad 0x7ec846f3658cec4d .quad 0x9b5e8f3f1da22ec7 .quad 0x130f1d776c01cd13 .quad 0x214c8fcfa2989fb8 .quad 0x6daaf723399b9dd5 // 2^124 * 1 * G .quad 0x591e4a5610628564 .quad 0x2a4bb87ca8b4df34 .quad 0xde2a2572e7a38e43 .quad 0x3cbdabd9fee5046e .quad 0x81aebbdd2cd13070 .quad 0x962e4325f85a0e9e .quad 0xde9391aacadffecb .quad 0x53177fda52c230e6 .quad 0xa7bc970650b9de79 .quad 0x3d12a7fbc301b59b .quad 0x02652e68d36ae38c .quad 0x79d739835a6199dc // 2^124 * 2 * G .quad 0xd9354df64131c1bd .quad 0x758094a186ec5822 .quad 0x4464ee12e459f3c2 .quad 0x6c11fce4cb133282 .quad 0x21c9d9920d591737 .quad 0x9bea41d2e9b46cd6 .quad 0xe20e84200d89bfca .quad 0x79d99f946eae5ff8 .quad 0xf17b483568673205 .quad 0x387deae83caad96c .quad 0x61b471fd56ffe386 .quad 0x31741195b745a599 // 2^124 * 3 * G .quad 0xe8d10190b77a360b .quad 0x99b983209995e702 .quad 0xbd4fdff8fa0247aa .quad 0x2772e344e0d36a87 .quad 0x17f8ba683b02a047 .quad 0x50212096feefb6c8 .quad 0x70139be21556cbe2 .quad 0x203e44a11d98915b .quad 0xd6863eba37b9e39f .quad 0x105bc169723b5a23 .quad 0x104f6459a65c0762 .quad 0x567951295b4d38d4 // 2^124 * 4 * G .quad 0x535fd60613037524 .quad 0xe210adf6b0fbc26a .quad 0xac8d0a9b23e990ae .quad 0x47204d08d72fdbf9 .quad 0x07242eb30d4b497f .quad 0x1ef96306b9bccc87 .quad 0x37950934d8116f45 .quad 0x05468d6201405b04 .quad 0x00f565a9f93267de .quad 0xcecfd78dc0d58e8a .quad 0xa215e2dcf318e28e .quad 0x4599ee919b633352 // 2^124 * 5 * G .quad 0xd3c220ca70e0e76b .quad 0xb12bea58ea9f3094 .quad 0x294ddec8c3271282 .quad 0x0c3539e1a1d1d028 .quad 0xac746d6b861ae579 .quad 0x31ab0650f6aea9dc .quad 0x241d661140256d4c .quad 0x2f485e853d21a5de .quad 0x329744839c0833f3 .quad 0x6fe6257fd2abc484 .quad 0x5327d1814b358817 .quad 0x65712585893fe9bc // 2^124 * 6 * G .quad 0x9c102fb732a61161 .quad 0xe48e10dd34d520a8 .quad 0x365c63546f9a9176 .quad 0x32f6fe4c046f6006 .quad 0x81c29f1bd708ee3f .quad 0xddcb5a05ae6407d0 .quad 0x97aec1d7d2a3eba7 .quad 0x1590521a91d50831 .quad 0x40a3a11ec7910acc .quad 0x9013dff8f16d27ae .quad 0x1a9720d8abb195d4 .quad 0x1bb9fe452ea98463 // 2^124 * 7 * G .quad 0xe9d1d950b3d54f9e .quad 0x2d5f9cbee00d33c1 .quad 0x51c2c656a04fc6ac .quad 0x65c091ee3c1cbcc9 .quad 0xcf5e6c95cc36747c .quad 0x294201536b0bc30d .quad 0x453ac67cee797af0 .quad 0x5eae6ab32a8bb3c9 .quad 0x7083661114f118ea .quad 0x2b37b87b94349cad .quad 0x7273f51cb4e99f40 .quad 0x78a2a95823d75698 // 2^124 * 8 * G .quad 0xa2b072e95c8c2ace .quad 0x69cffc96651e9c4b .quad 0x44328ef842e7b42b .quad 0x5dd996c122aadeb3 .quad 0xb4f23c425ef83207 .quad 0xabf894d3c9a934b5 .quad 0xd0708c1339fd87f7 .quad 0x1876789117166130 .quad 0x925b5ef0670c507c .quad 0x819bc842b93c33bf .quad 0x10792e9a70dd003f .quad 0x59ad4b7a6e28dc74 // 2^128 * 1 * G .quad 0x5f3a7562eb3dbe47 .quad 0xf7ea38548ebda0b8 .quad 0x00c3e53145747299 .quad 0x1304e9e71627d551 .quad 0x583b04bfacad8ea2 .quad 0x29b743e8148be884 .quad 0x2b1e583b0810c5db .quad 0x2b5449e58eb3bbaa .quad 0x789814d26adc9cfe .quad 0x3c1bab3f8b48dd0b .quad 0xda0fe1fff979c60a .quad 0x4468de2d7c2dd693 // 2^128 * 2 * G .quad 0x51bb355e9419469e .quad 0x33e6dc4c23ddc754 .quad 0x93a5b6d6447f9962 .quad 0x6cce7c6ffb44bd63 .quad 0x4b9ad8c6f86307ce .quad 0x21113531435d0c28 .quad 0xd4a866c5657a772c .quad 0x5da6427e63247352 .quad 0x1a94c688deac22ca .quad 0xb9066ef7bbae1ff8 .quad 0x88ad8c388d59580f .quad 0x58f29abfe79f2ca8 // 2^128 * 3 * G .quad 0xe90ecfab8de73e68 .quad 0x54036f9f377e76a5 .quad 0xf0495b0bbe015982 .quad 0x577629c4a7f41e36 .quad 0x4b5a64bf710ecdf6 .quad 0xb14ce538462c293c .quad 0x3643d056d50b3ab9 .quad 0x6af93724185b4870 .quad 0x3220024509c6a888 .quad 0xd2e036134b558973 .quad 0x83e236233c33289f .quad 0x701f25bb0caec18f // 2^128 * 4 * G .quad 0xc3a8b0f8e4616ced .quad 0xf700660e9e25a87d .quad 0x61e3061ff4bca59c .quad 0x2e0c92bfbdc40be9 .quad 0x9d18f6d97cbec113 .quad 0x844a06e674bfdbe4 .quad 0x20f5b522ac4e60d6 .quad 0x720a5bc050955e51 .quad 0x0c3f09439b805a35 .quad 0xe84e8b376242abfc .quad 0x691417f35c229346 .quad 0x0e9b9cbb144ef0ec // 2^128 * 5 * G .quad 0xfbbad48ffb5720ad .quad 0xee81916bdbf90d0e .quad 0xd4813152635543bf .quad 0x221104eb3f337bd8 .quad 0x8dee9bd55db1beee .quad 0xc9c3ab370a723fb9 .quad 0x44a8f1bf1c68d791 .quad 0x366d44191cfd3cde .quad 0x9e3c1743f2bc8c14 .quad 0x2eda26fcb5856c3b .quad 0xccb82f0e68a7fb97 .quad 0x4167a4e6bc593244 // 2^128 * 6 * G .quad 0x643b9d2876f62700 .quad 0x5d1d9d400e7668eb .quad 0x1b4b430321fc0684 .quad 0x7938bb7e2255246a .quad 0xc2be2665f8ce8fee .quad 0xe967ff14e880d62c .quad 0xf12e6e7e2f364eee .quad 0x34b33370cb7ed2f6 .quad 0xcdc591ee8681d6cc .quad 0xce02109ced85a753 .quad 0xed7485c158808883 .quad 0x1176fc6e2dfe65e4 // 2^128 * 7 * G .quad 0xb4af6cd05b9c619b .quad 0x2ddfc9f4b2a58480 .quad 0x3d4fa502ebe94dc4 .quad 0x08fc3a4c677d5f34 .quad 0xdb90e28949770eb8 .quad 0x98fbcc2aacf440a3 .quad 0x21354ffeded7879b .quad 0x1f6a3e54f26906b6 .quad 0x60a4c199d30734ea .quad 0x40c085b631165cd6 .quad 0xe2333e23f7598295 .quad 0x4f2fad0116b900d1 // 2^128 * 8 * G .quad 0x44beb24194ae4e54 .quad 0x5f541c511857ef6c .quad 0xa61e6b2d368d0498 .quad 0x445484a4972ef7ab .quad 0x962cd91db73bb638 .quad 0xe60577aafc129c08 .quad 0x6f619b39f3b61689 .quad 0x3451995f2944ee81 .quad 0x9152fcd09fea7d7c .quad 0x4a816c94b0935cf6 .quad 0x258e9aaa47285c40 .quad 0x10b89ca6042893b7 // 2^132 * 1 * G .quad 0x9b2a426e3b646025 .quad 0x32127190385ce4cf .quad 0xa25cffc2dd6dea45 .quad 0x06409010bea8de75 .quad 0xd67cded679d34aa0 .quad 0xcc0b9ec0cc4db39f .quad 0xa535a456e35d190f .quad 0x2e05d9eaf61f6fef .quad 0xc447901ad61beb59 .quad 0x661f19bce5dc880a .quad 0x24685482b7ca6827 .quad 0x293c778cefe07f26 // 2^132 * 2 * G .quad 0x86809e7007069096 .quad 0xaad75b15e4e50189 .quad 0x07f35715a21a0147 .quad 0x0487f3f112815d5e .quad 0x16c795d6a11ff200 .quad 0xcb70d0e2b15815c9 .quad 0x89f293209b5395b5 .quad 0x50b8c2d031e47b4f .quad 0x48350c08068a4962 .quad 0x6ffdd05351092c9a .quad 0x17af4f4aaf6fc8dd .quad 0x4b0553b53cdba58b // 2^132 * 3 * G .quad 0x9c65fcbe1b32ff79 .quad 0xeb75ea9f03b50f9b .quad 0xfced2a6c6c07e606 .quad 0x35106cd551717908 .quad 0xbf05211b27c152d4 .quad 0x5ec26849bd1af639 .quad 0x5e0b2caa8e6fab98 .quad 0x054c8bdd50bd0840 .quad 0x38a0b12f1dcf073d .quad 0x4b60a8a3b7f6a276 .quad 0xfed5ac25d3404f9a .quad 0x72e82d5e5505c229 // 2^132 * 4 * G .quad 0x6b0b697ff0d844c8 .quad 0xbb12f85cd979cb49 .quad 0xd2a541c6c1da0f1f .quad 0x7b7c242958ce7211 .quad 0x00d9cdfd69771d02 .quad 0x410276cd6cfbf17e .quad 0x4c45306c1cb12ec7 .quad 0x2857bf1627500861 .quad 0x9f21903f0101689e .quad 0xd779dfd3bf861005 .quad 0xa122ee5f3deb0f1b .quad 0x510df84b485a00d4 // 2^132 * 5 * G .quad 0xa54133bb9277a1fa .quad 0x74ec3b6263991237 .quad 0x1a3c54dc35d2f15a .quad 0x2d347144e482ba3a .quad 0x24b3c887c70ac15e .quad 0xb0f3a557fb81b732 .quad 0x9b2cde2fe578cc1b .quad 0x4cf7ed0703b54f8e .quad 0x6bd47c6598fbee0f .quad 0x9e4733e2ab55be2d .quad 0x1093f624127610c5 .quad 0x4e05e26ad0a1eaa4 // 2^132 * 6 * G .quad 0xda9b6b624b531f20 .quad 0x429a760e77509abb .quad 0xdbe9f522e823cb80 .quad 0x618f1856880c8f82 .quad 0x1833c773e18fe6c0 .quad 0xe3c4711ad3c87265 .quad 0x3bfd3c4f0116b283 .quad 0x1955875eb4cd4db8 .quad 0x6da6de8f0e399799 .quad 0x7ad61aa440fda178 .quad 0xb32cd8105e3563dd .quad 0x15f6beae2ae340ae // 2^132 * 7 * G .quad 0x862bcb0c31ec3a62 .quad 0x810e2b451138f3c2 .quad 0x788ec4b839dac2a4 .quad 0x28f76867ae2a9281 .quad 0xba9a0f7b9245e215 .quad 0xf368612dd98c0dbb .quad 0x2e84e4cbf220b020 .quad 0x6ba92fe962d90eda .quad 0x3e4df9655884e2aa .quad 0xbd62fbdbdbd465a5 .quad 0xd7596caa0de9e524 .quad 0x6e8042ccb2b1b3d7 // 2^132 * 8 * G .quad 0xf10d3c29ce28ca6e .quad 0xbad34540fcb6093d .quad 0xe7426ed7a2ea2d3f .quad 0x08af9d4e4ff298b9 .quad 0x1530653616521f7e .quad 0x660d06b896203dba .quad 0x2d3989bc545f0879 .quad 0x4b5303af78ebd7b0 .quad 0x72f8a6c3bebcbde8 .quad 0x4f0fca4adc3a8e89 .quad 0x6fa9d4e8c7bfdf7a .quad 0x0dcf2d679b624eb7 // 2^136 * 1 * G .quad 0x3d5947499718289c .quad 0x12ebf8c524533f26 .quad 0x0262bfcb14c3ef15 .quad 0x20b878d577b7518e .quad 0x753941be5a45f06e .quad 0xd07caeed6d9c5f65 .quad 0x11776b9c72ff51b6 .quad 0x17d2d1d9ef0d4da9 .quad 0x27f2af18073f3e6a .quad 0xfd3fe519d7521069 .quad 0x22e3b72c3ca60022 .quad 0x72214f63cc65c6a7 // 2^136 * 2 * G .quad 0xb4e37f405307a693 .quad 0xaba714d72f336795 .quad 0xd6fbd0a773761099 .quad 0x5fdf48c58171cbc9 .quad 0x1d9db7b9f43b29c9 .quad 0xd605824a4f518f75 .quad 0xf2c072bd312f9dc4 .quad 0x1f24ac855a1545b0 .quad 0x24d608328e9505aa .quad 0x4748c1d10c1420ee .quad 0xc7ffe45c06fb25a2 .quad 0x00ba739e2ae395e6 // 2^136 * 3 * G .quad 0x592e98de5c8790d6 .quad 0xe5bfb7d345c2a2df .quad 0x115a3b60f9b49922 .quad 0x03283a3e67ad78f3 .quad 0xae4426f5ea88bb26 .quad 0x360679d984973bfb .quad 0x5c9f030c26694e50 .quad 0x72297de7d518d226 .quad 0x48241dc7be0cb939 .quad 0x32f19b4d8b633080 .quad 0xd3dfc90d02289308 .quad 0x05e1296846271945 // 2^136 * 4 * G .quad 0xba82eeb32d9c495a .quad 0xceefc8fcf12bb97c .quad 0xb02dabae93b5d1e0 .quad 0x39c00c9c13698d9b .quad 0xadbfbbc8242c4550 .quad 0xbcc80cecd03081d9 .quad 0x843566a6f5c8df92 .quad 0x78cf25d38258ce4c .quad 0x15ae6b8e31489d68 .quad 0xaa851cab9c2bf087 .quad 0xc9a75a97f04efa05 .quad 0x006b52076b3ff832 // 2^136 * 5 * G .quad 0x29e0cfe19d95781c .quad 0xb681df18966310e2 .quad 0x57df39d370516b39 .quad 0x4d57e3443bc76122 .quad 0xf5cb7e16b9ce082d .quad 0x3407f14c417abc29 .quad 0xd4b36bce2bf4a7ab .quad 0x7de2e9561a9f75ce .quad 0xde70d4f4b6a55ecb .quad 0x4801527f5d85db99 .quad 0xdbc9c440d3ee9a81 .quad 0x6b2a90af1a6029ed // 2^136 * 6 * G .quad 0x6923f4fc9ae61e97 .quad 0x5735281de03f5fd1 .quad 0xa764ae43e6edd12d .quad 0x5fd8f4e9d12d3e4a .quad 0x77ebf3245bb2d80a .quad 0xd8301b472fb9079b .quad 0xc647e6f24cee7333 .quad 0x465812c8276c2109 .quad 0x4d43beb22a1062d9 .quad 0x7065fb753831dc16 .quad 0x180d4a7bde2968d7 .quad 0x05b32c2b1cb16790 // 2^136 * 7 * G .quad 0xc8c05eccd24da8fd .quad 0xa1cf1aac05dfef83 .quad 0xdbbeeff27df9cd61 .quad 0x3b5556a37b471e99 .quad 0xf7fca42c7ad58195 .quad 0x3214286e4333f3cc .quad 0xb6c29d0d340b979d .quad 0x31771a48567307e1 .quad 0x32b0c524e14dd482 .quad 0xedb351541a2ba4b6 .quad 0xa3d16048282b5af3 .quad 0x4fc079d27a7336eb // 2^136 * 8 * G .quad 0x51c938b089bf2f7f .quad 0x2497bd6502dfe9a7 .quad 0xffffc09c7880e453 .quad 0x124567cecaf98e92 .quad 0xdc348b440c86c50d .quad 0x1337cbc9cc94e651 .quad 0x6422f74d643e3cb9 .quad 0x241170c2bae3cd08 .quad 0x3ff9ab860ac473b4 .quad 0xf0911dee0113e435 .quad 0x4ae75060ebc6c4af .quad 0x3f8612966c87000d // 2^140 * 1 * G .quad 0x0c9c5303f7957be4 .quad 0xa3c31a20e085c145 .quad 0xb0721d71d0850050 .quad 0x0aba390eab0bf2da .quad 0x529fdffe638c7bf3 .quad 0xdf2b9e60388b4995 .quad 0xe027b34f1bad0249 .quad 0x7bc92fc9b9fa74ed .quad 0x9f97ef2e801ad9f9 .quad 0x83697d5479afda3a .quad 0xe906b3ffbd596b50 .quad 0x02672b37dd3fb8e0 // 2^140 * 2 * G .quad 0x48b2ca8b260885e4 .quad 0xa4286bec82b34c1c .quad 0x937e1a2617f58f74 .quad 0x741d1fcbab2ca2a5 .quad 0xee9ba729398ca7f5 .quad 0xeb9ca6257a4849db .quad 0x29eb29ce7ec544e1 .quad 0x232ca21ef736e2c8 .quad 0xbf61423d253fcb17 .quad 0x08803ceafa39eb14 .quad 0xf18602df9851c7af .quad 0x0400f3a049e3414b // 2^140 * 3 * G .quad 0xabce0476ba61c55b .quad 0x36a3d6d7c4d39716 .quad 0x6eb259d5e8d82d09 .quad 0x0c9176e984d756fb .quad 0x2efba412a06e7b06 .quad 0x146785452c8d2560 .quad 0xdf9713ebd67a91c7 .quad 0x32830ac7157eadf3 .quad 0x0e782a7ab73769e8 .quad 0x04a05d7875b18e2c .quad 0x29525226ebcceae1 .quad 0x0d794f8383eba820 // 2^140 * 4 * G .quad 0xff35f5cb9e1516f4 .quad 0xee805bcf648aae45 .quad 0xf0d73c2bb93a9ef3 .quad 0x097b0bf22092a6c2 .quad 0x7be44ce7a7a2e1ac .quad 0x411fd93efad1b8b7 .quad 0x1734a1d70d5f7c9b .quad 0x0d6592233127db16 .quad 0xc48bab1521a9d733 .quad 0xa6c2eaead61abb25 .quad 0x625c6c1cc6cb4305 .quad 0x7fc90fea93eb3a67 // 2^140 * 5 * G .quad 0x0408f1fe1f5c5926 .quad 0x1a8f2f5e3b258bf4 .quad 0x40a951a2fdc71669 .quad 0x6598ee93c98b577e .quad 0xc527deb59c7cb23d .quad 0x955391695328404e .quad 0xd64392817ccf2c7a .quad 0x6ce97dabf7d8fa11 .quad 0x25b5a8e50ef7c48f .quad 0xeb6034116f2ce532 .quad 0xc5e75173e53de537 .quad 0x73119fa08c12bb03 // 2^140 * 6 * G .quad 0xed30129453f1a4cb .quad 0xbce621c9c8f53787 .quad 0xfacb2b1338bee7b9 .quad 0x3025798a9ea8428c .quad 0x7845b94d21f4774d .quad 0xbf62f16c7897b727 .quad 0x671857c03c56522b .quad 0x3cd6a85295621212 .quad 0x3fecde923aeca999 .quad 0xbdaa5b0062e8c12f .quad 0x67b99dfc96988ade .quad 0x3f52c02852661036 // 2^140 * 7 * G .quad 0xffeaa48e2a1351c6 .quad 0x28624754fa7f53d7 .quad 0x0b5ba9e57582ddf1 .quad 0x60c0104ba696ac59 .quad 0x9258bf99eec416c6 .quad 0xac8a5017a9d2f671 .quad 0x629549ab16dea4ab .quad 0x05d0e85c99091569 .quad 0x051de020de9cbe97 .quad 0xfa07fc56b50bcf74 .quad 0x378cec9f0f11df65 .quad 0x36853c69ab96de4d // 2^140 * 8 * G .quad 0x36d9b8de78f39b2d .quad 0x7f42ed71a847b9ec .quad 0x241cd1d679bd3fde .quad 0x6a704fec92fbce6b .quad 0x4433c0b0fac5e7be .quad 0x724bae854c08dcbe .quad 0xf1f24cc446978f9b .quad 0x4a0aff6d62825fc8 .quad 0xe917fb9e61095301 .quad 0xc102df9402a092f8 .quad 0xbf09e2f5fa66190b .quad 0x681109bee0dcfe37 // 2^144 * 1 * G .quad 0x559a0cc9782a0dde .quad 0x551dcdb2ea718385 .quad 0x7f62865b31ef238c .quad 0x504aa7767973613d .quad 0x9c18fcfa36048d13 .quad 0x29159db373899ddd .quad 0xdc9f350b9f92d0aa .quad 0x26f57eee878a19d4 .quad 0x0cab2cd55687efb1 .quad 0x5180d162247af17b .quad 0x85c15a344f5a2467 .quad 0x4041943d9dba3069 // 2^144 * 2 * G .quad 0xc3c0eeba43ebcc96 .quad 0x8d749c9c26ea9caf .quad 0xd9fa95ee1c77ccc6 .quad 0x1420a1d97684340f .quad 0x4b217743a26caadd .quad 0x47a6b424648ab7ce .quad 0xcb1d4f7a03fbc9e3 .quad 0x12d931429800d019 .quad 0x00c67799d337594f .quad 0x5e3c5140b23aa47b .quad 0x44182854e35ff395 .quad 0x1b4f92314359a012 // 2^144 * 3 * G .quad 0x3e5c109d89150951 .quad 0x39cefa912de9696a .quad 0x20eae43f975f3020 .quad 0x239b572a7f132dae .quad 0x33cf3030a49866b1 .quad 0x251f73d2215f4859 .quad 0xab82aa4051def4f6 .quad 0x5ff191d56f9a23f6 .quad 0x819ed433ac2d9068 .quad 0x2883ab795fc98523 .quad 0xef4572805593eb3d .quad 0x020c526a758f36cb // 2^144 * 4 * G .quad 0x779834f89ed8dbbc .quad 0xc8f2aaf9dc7ca46c .quad 0xa9524cdca3e1b074 .quad 0x02aacc4615313877 .quad 0xe931ef59f042cc89 .quad 0x2c589c9d8e124bb6 .quad 0xadc8e18aaec75997 .quad 0x452cfe0a5602c50c .quad 0x86a0f7a0647877df .quad 0xbbc464270e607c9f .quad 0xab17ea25f1fb11c9 .quad 0x4cfb7d7b304b877b // 2^144 * 5 * G .quad 0x72b43d6cb89b75fe .quad 0x54c694d99c6adc80 .quad 0xb8c3aa373ee34c9f .quad 0x14b4622b39075364 .quad 0xe28699c29789ef12 .quad 0x2b6ecd71df57190d .quad 0xc343c857ecc970d0 .quad 0x5b1d4cbc434d3ac5 .quad 0xb6fb2615cc0a9f26 .quad 0x3a4f0e2bb88dcce5 .quad 0x1301498b3369a705 .quad 0x2f98f71258592dd1 // 2^144 * 6 * G .quad 0x0c94a74cb50f9e56 .quad 0x5b1ff4a98e8e1320 .quad 0x9a2acc2182300f67 .quad 0x3a6ae249d806aaf9 .quad 0x2e12ae444f54a701 .quad 0xfcfe3ef0a9cbd7de .quad 0xcebf890d75835de0 .quad 0x1d8062e9e7614554 .quad 0x657ada85a9907c5a .quad 0x1a0ea8b591b90f62 .quad 0x8d0e1dfbdf34b4e9 .quad 0x298b8ce8aef25ff3 // 2^144 * 7 * G .quad 0x2a927953eff70cb2 .quad 0x4b89c92a79157076 .quad 0x9418457a30a7cf6a .quad 0x34b8a8404d5ce485 .quad 0x837a72ea0a2165de .quad 0x3fab07b40bcf79f6 .quad 0x521636c77738ae70 .quad 0x6ba6271803a7d7dc .quad 0xc26eecb583693335 .quad 0xd5a813df63b5fefd .quad 0xa293aa9aa4b22573 .quad 0x71d62bdd465e1c6a // 2^144 * 8 * G .quad 0x6533cc28d378df80 .quad 0xf6db43790a0fa4b4 .quad 0xe3645ff9f701da5a .quad 0x74d5f317f3172ba4 .quad 0xcd2db5dab1f75ef5 .quad 0xd77f95cf16b065f5 .quad 0x14571fea3f49f085 .quad 0x1c333621262b2b3d .quad 0xa86fe55467d9ca81 .quad 0x398b7c752b298c37 .quad 0xda6d0892e3ac623b .quad 0x4aebcc4547e9d98c // 2^148 * 1 * G .quad 0x53175a7205d21a77 .quad 0xb0c04422d3b934d4 .quad 0xadd9f24bdd5deadc .quad 0x074f46e69f10ff8c .quad 0x0de9b204a059a445 .quad 0xe15cb4aa4b17ad0f .quad 0xe1bbec521f79c557 .quad 0x2633f1b9d071081b .quad 0xc1fb4177018b9910 .quad 0xa6ea20dc6c0fe140 .quad 0xd661f3e74354c6ff .quad 0x5ecb72e6f1a3407a // 2^148 * 2 * G .quad 0xa515a31b2259fb4e .quad 0x0960f3972bcac52f .quad 0xedb52fec8d3454cb .quad 0x382e2720c476c019 .quad 0xfeeae106e8e86997 .quad 0x9863337f98d09383 .quad 0x9470480eaa06ebef .quad 0x038b6898d4c5c2d0 .quad 0xf391c51d8ace50a6 .quad 0x3142d0b9ae2d2948 .quad 0xdb4d5a1a7f24ca80 .quad 0x21aeba8b59250ea8 // 2^148 * 3 * G .quad 0x24f13b34cf405530 .quad 0x3c44ea4a43088af7 .quad 0x5dd5c5170006a482 .quad 0x118eb8f8890b086d .quad 0x53853600f0087f23 .quad 0x4c461879da7d5784 .quad 0x6af303deb41f6860 .quad 0x0a3c16c5c27c18ed .quad 0x17e49c17cc947f3d .quad 0xccc6eda6aac1d27b .quad 0xdf6092ceb0f08e56 .quad 0x4909b3e22c67c36b // 2^148 * 4 * G .quad 0x9c9c85ea63fe2e89 .quad 0xbe1baf910e9412ec .quad 0x8f7baa8a86fbfe7b .quad 0x0fb17f9fef968b6c .quad 0x59a16676706ff64e .quad 0x10b953dd0d86a53d .quad 0x5848e1e6ce5c0b96 .quad 0x2d8b78e712780c68 .quad 0x79d5c62eafc3902b .quad 0x773a215289e80728 .quad 0xc38ae640e10120b9 .quad 0x09ae23717b2b1a6d // 2^148 * 5 * G .quad 0xbb6a192a4e4d083c .quad 0x34ace0630029e192 .quad 0x98245a59aafabaeb .quad 0x6d9c8a9ada97faac .quad 0x10ab8fa1ad32b1d0 .quad 0xe9aced1be2778b24 .quad 0xa8856bc0373de90f .quad 0x66f35ddddda53996 .quad 0xd27d9afb24997323 .quad 0x1bb7e07ef6f01d2e .quad 0x2ba7472df52ecc7f .quad 0x03019b4f646f9dc8 // 2^148 * 6 * G .quad 0x04a186b5565345cd .quad 0xeee76610bcc4116a .quad 0x689c73b478fb2a45 .quad 0x387dcbff65697512 .quad 0xaf09b214e6b3dc6b .quad 0x3f7573b5ad7d2f65 .quad 0xd019d988100a23b0 .quad 0x392b63a58b5c35f7 .quad 0x4093addc9c07c205 .quad 0xc565be15f532c37e .quad 0x63dbecfd1583402a .quad 0x61722b4aef2e032e // 2^148 * 7 * G .quad 0x0012aafeecbd47af .quad 0x55a266fb1cd46309 .quad 0xf203eb680967c72c .quad 0x39633944ca3c1429 .quad 0xd6b07a5581cb0e3c .quad 0x290ff006d9444969 .quad 0x08680b6a16dcda1f .quad 0x5568d2b75a06de59 .quad 0x8d0cb88c1b37cfe1 .quad 0x05b6a5a3053818f3 .quad 0xf2e9bc04b787d959 .quad 0x6beba1249add7f64 // 2^148 * 8 * G .quad 0x1d06005ca5b1b143 .quad 0x6d4c6bb87fd1cda2 .quad 0x6ef5967653fcffe7 .quad 0x097c29e8c1ce1ea5 .quad 0x5c3cecb943f5a53b .quad 0x9cc9a61d06c08df2 .quad 0xcfba639a85895447 .quad 0x5a845ae80df09fd5 .quad 0x4ce97dbe5deb94ca .quad 0x38d0a4388c709c48 .quad 0xc43eced4a169d097 .quad 0x0a1249fff7e587c3 // 2^152 * 1 * G .quad 0x12f0071b276d01c9 .quad 0xe7b8bac586c48c70 .quad 0x5308129b71d6fba9 .quad 0x5d88fbf95a3db792 .quad 0x0b408d9e7354b610 .quad 0x806b32535ba85b6e .quad 0xdbe63a034a58a207 .quad 0x173bd9ddc9a1df2c .quad 0x2b500f1efe5872df .quad 0x58d6582ed43918c1 .quad 0xe6ed278ec9673ae0 .quad 0x06e1cd13b19ea319 // 2^152 * 2 * G .quad 0x40d0ad516f166f23 .quad 0x118e32931fab6abe .quad 0x3fe35e14a04d088e .quad 0x3080603526e16266 .quad 0x472baf629e5b0353 .quad 0x3baa0b90278d0447 .quad 0x0c785f469643bf27 .quad 0x7f3a6a1a8d837b13 .quad 0xf7e644395d3d800b .quad 0x95a8d555c901edf6 .quad 0x68cd7830592c6339 .quad 0x30d0fded2e51307e // 2^152 * 3 * G .quad 0xe0594d1af21233b3 .quad 0x1bdbe78ef0cc4d9c .quad 0x6965187f8f499a77 .quad 0x0a9214202c099868 .quad 0x9cb4971e68b84750 .quad 0xa09572296664bbcf .quad 0x5c8de72672fa412b .quad 0x4615084351c589d9 .quad 0xbc9019c0aeb9a02e .quad 0x55c7110d16034cae .quad 0x0e6df501659932ec .quad 0x3bca0d2895ca5dfe // 2^152 * 4 * G .quad 0x40f031bc3c5d62a4 .quad 0x19fc8b3ecff07a60 .quad 0x98183da2130fb545 .quad 0x5631deddae8f13cd .quad 0x9c688eb69ecc01bf .quad 0xf0bc83ada644896f .quad 0xca2d955f5f7a9fe2 .quad 0x4ea8b4038df28241 .quad 0x2aed460af1cad202 .quad 0x46305305a48cee83 .quad 0x9121774549f11a5f .quad 0x24ce0930542ca463 // 2^152 * 5 * G .quad 0x1fe890f5fd06c106 .quad 0xb5c468355d8810f2 .quad 0x827808fe6e8caf3e .quad 0x41d4e3c28a06d74b .quad 0x3fcfa155fdf30b85 .quad 0xd2f7168e36372ea4 .quad 0xb2e064de6492f844 .quad 0x549928a7324f4280 .quad 0xf26e32a763ee1a2e .quad 0xae91e4b7d25ffdea .quad 0xbc3bd33bd17f4d69 .quad 0x491b66dec0dcff6a // 2^152 * 6 * G .quad 0x98f5b13dc7ea32a7 .quad 0xe3d5f8cc7e16db98 .quad 0xac0abf52cbf8d947 .quad 0x08f338d0c85ee4ac .quad 0x75f04a8ed0da64a1 .quad 0xed222caf67e2284b .quad 0x8234a3791f7b7ba4 .quad 0x4cf6b8b0b7018b67 .quad 0xc383a821991a73bd .quad 0xab27bc01df320c7a .quad 0xc13d331b84777063 .quad 0x530d4a82eb078a99 // 2^152 * 7 * G .quad 0x004c3630e1f94825 .quad 0x7e2d78268cab535a .quad 0xc7482323cc84ff8b .quad 0x65ea753f101770b9 .quad 0x6d6973456c9abf9e .quad 0x257fb2fc4900a880 .quad 0x2bacf412c8cfb850 .quad 0x0db3e7e00cbfbd5b .quad 0x3d66fc3ee2096363 .quad 0x81d62c7f61b5cb6b .quad 0x0fbe044213443b1a .quad 0x02a4ec1921e1a1db // 2^152 * 8 * G .quad 0x5ce6259a3b24b8a2 .quad 0xb8577acc45afa0b8 .quad 0xcccbe6e88ba07037 .quad 0x3d143c51127809bf .quad 0xf5c86162f1cf795f .quad 0x118c861926ee57f2 .quad 0x172124851c063578 .quad 0x36d12b5dec067fcf .quad 0x126d279179154557 .quad 0xd5e48f5cfc783a0a .quad 0x36bdb6e8df179bac .quad 0x2ef517885ba82859 // 2^156 * 1 * G .quad 0x88bd438cd11e0d4a .quad 0x30cb610d43ccf308 .quad 0xe09a0e3791937bcc .quad 0x4559135b25b1720c .quad 0x1ea436837c6da1e9 .quad 0xf9c189af1fb9bdbe .quad 0x303001fcce5dd155 .quad 0x28a7c99ebc57be52 .quad 0xb8fd9399e8d19e9d .quad 0x908191cb962423ff .quad 0xb2b948d747c742a3 .quad 0x37f33226d7fb44c4 // 2^156 * 2 * G .quad 0x0dae8767b55f6e08 .quad 0x4a43b3b35b203a02 .quad 0xe3725a6e80af8c79 .quad 0x0f7a7fd1705fa7a3 .quad 0x33912553c821b11d .quad 0x66ed42c241e301df .quad 0x066fcc11104222fd .quad 0x307a3b41c192168f .quad 0x8eeb5d076eb55ce0 .quad 0x2fc536bfaa0d925a .quad 0xbe81830fdcb6c6e8 .quad 0x556c7045827baf52 // 2^156 * 3 * G .quad 0x8e2b517302e9d8b7 .quad 0xe3e52269248714e8 .quad 0xbd4fbd774ca960b5 .quad 0x6f4b4199c5ecada9 .quad 0xb94b90022bf44406 .quad 0xabd4237eff90b534 .quad 0x7600a960faf86d3a .quad 0x2f45abdac2322ee3 .quad 0x61af4912c8ef8a6a .quad 0xe58fa4fe43fb6e5e .quad 0xb5afcc5d6fd427cf .quad 0x6a5393281e1e11eb // 2^156 * 4 * G .quad 0xf3da5139a5d1ee89 .quad 0x8145457cff936988 .quad 0x3f622fed00e188c4 .quad 0x0f513815db8b5a3d .quad 0x0fff04fe149443cf .quad 0x53cac6d9865cddd7 .quad 0x31385b03531ed1b7 .quad 0x5846a27cacd1039d .quad 0x4ff5cdac1eb08717 .quad 0x67e8b29590f2e9bc .quad 0x44093b5e237afa99 .quad 0x0d414bed8708b8b2 // 2^156 * 5 * G .quad 0xcfb68265fd0e75f6 .quad 0xe45b3e28bb90e707 .quad 0x7242a8de9ff92c7a .quad 0x685b3201933202dd .quad 0x81886a92294ac9e8 .quad 0x23162b45d55547be .quad 0x94cfbc4403715983 .quad 0x50eb8fdb134bc401 .quad 0xc0b73ec6d6b330cd .quad 0x84e44807132faff1 .quad 0x732b7352c4a5dee1 .quad 0x5d7c7cf1aa7cd2d2 // 2^156 * 6 * G .quad 0xaf3b46bf7a4aafa2 .quad 0xb78705ec4d40d411 .quad 0x114f0c6aca7c15e3 .quad 0x3f364faaa9489d4d .quad 0x33d1013e9b73a562 .quad 0x925cef5748ec26e1 .quad 0xa7fce614dd468058 .quad 0x78b0fad41e9aa438 .quad 0xbf56a431ed05b488 .quad 0xa533e66c9c495c7e .quad 0xe8652baf87f3651a .quad 0x0241800059d66c33 // 2^156 * 7 * G .quad 0xceb077fea37a5be4 .quad 0xdb642f02e5a5eeb7 .quad 0xc2e6d0c5471270b8 .quad 0x4771b65538e4529c .quad 0x28350c7dcf38ea01 .quad 0x7c6cdbc0b2917ab6 .quad 0xace7cfbe857082f7 .quad 0x4d2845aba2d9a1e0 .quad 0xbb537fe0447070de .quad 0xcba744436dd557df .quad 0xd3b5a3473600dbcb .quad 0x4aeabbe6f9ffd7f8 // 2^156 * 8 * G .quad 0x4630119e40d8f78c .quad 0xa01a9bc53c710e11 .quad 0x486d2b258910dd79 .quad 0x1e6c47b3db0324e5 .quad 0x6a2134bcc4a9c8f2 .quad 0xfbf8fd1c8ace2e37 .quad 0x000ae3049911a0ba .quad 0x046e3a616bc89b9e .quad 0x14e65442f03906be .quad 0x4a019d54e362be2a .quad 0x68ccdfec8dc230c7 .quad 0x7cfb7e3faf6b861c // 2^160 * 1 * G .quad 0x4637974e8c58aedc .quad 0xb9ef22fbabf041a4 .quad 0xe185d956e980718a .quad 0x2f1b78fab143a8a6 .quad 0x96eebffb305b2f51 .quad 0xd3f938ad889596b8 .quad 0xf0f52dc746d5dd25 .quad 0x57968290bb3a0095 .quad 0xf71ab8430a20e101 .quad 0xf393658d24f0ec47 .quad 0xcf7509a86ee2eed1 .quad 0x7dc43e35dc2aa3e1 // 2^160 * 2 * G .quad 0x85966665887dd9c3 .quad 0xc90f9b314bb05355 .quad 0xc6e08df8ef2079b1 .quad 0x7ef72016758cc12f .quad 0x5a782a5c273e9718 .quad 0x3576c6995e4efd94 .quad 0x0f2ed8051f237d3e .quad 0x044fb81d82d50a99 .quad 0xc1df18c5a907e3d9 .quad 0x57b3371dce4c6359 .quad 0xca704534b201bb49 .quad 0x7f79823f9c30dd2e // 2^160 * 3 * G .quad 0x8334d239a3b513e8 .quad 0xc13670d4b91fa8d8 .quad 0x12b54136f590bd33 .quad 0x0a4e0373d784d9b4 .quad 0x6a9c1ff068f587ba .quad 0x0827894e0050c8de .quad 0x3cbf99557ded5be7 .quad 0x64a9b0431c06d6f0 .quad 0x2eb3d6a15b7d2919 .quad 0xb0b4f6a0d53a8235 .quad 0x7156ce4389a45d47 .quad 0x071a7d0ace18346c // 2^160 * 4 * G .quad 0xd3072daac887ba0b .quad 0x01262905bfa562ee .quad 0xcf543002c0ef768b .quad 0x2c3bcc7146ea7e9c .quad 0xcc0c355220e14431 .quad 0x0d65950709b15141 .quad 0x9af5621b209d5f36 .quad 0x7c69bcf7617755d3 .quad 0x07f0d7eb04e8295f .quad 0x10db18252f50f37d .quad 0xe951a9a3171798d7 .quad 0x6f5a9a7322aca51d // 2^160 * 5 * G .quad 0x8ba1000c2f41c6c5 .quad 0xc49f79c10cfefb9b .quad 0x4efa47703cc51c9f .quad 0x494e21a2e147afca .quad 0xe729d4eba3d944be .quad 0x8d9e09408078af9e .quad 0x4525567a47869c03 .quad 0x02ab9680ee8d3b24 .quad 0xefa48a85dde50d9a .quad 0x219a224e0fb9a249 .quad 0xfa091f1dd91ef6d9 .quad 0x6b5d76cbea46bb34 // 2^160 * 6 * G .quad 0x8857556cec0cd994 .quad 0x6472dc6f5cd01dba .quad 0xaf0169148f42b477 .quad 0x0ae333f685277354 .quad 0xe0f941171e782522 .quad 0xf1e6ae74036936d3 .quad 0x408b3ea2d0fcc746 .quad 0x16fb869c03dd313e .quad 0x288e199733b60962 .quad 0x24fc72b4d8abe133 .quad 0x4811f7ed0991d03e .quad 0x3f81e38b8f70d075 // 2^160 * 7 * G .quad 0x7f910fcc7ed9affe .quad 0x545cb8a12465874b .quad 0xa8397ed24b0c4704 .quad 0x50510fc104f50993 .quad 0x0adb7f355f17c824 .quad 0x74b923c3d74299a4 .quad 0xd57c3e8bcbf8eaf7 .quad 0x0ad3e2d34cdedc3d .quad 0x6f0c0fc5336e249d .quad 0x745ede19c331cfd9 .quad 0xf2d6fd0009eefe1c .quad 0x127c158bf0fa1ebe // 2^160 * 8 * G .quad 0xf6197c422e9879a2 .quad 0xa44addd452ca3647 .quad 0x9b413fc14b4eaccb .quad 0x354ef87d07ef4f68 .quad 0xdea28fc4ae51b974 .quad 0x1d9973d3744dfe96 .quad 0x6240680b873848a8 .quad 0x4ed82479d167df95 .quad 0xfee3b52260c5d975 .quad 0x50352efceb41b0b8 .quad 0x8808ac30a9f6653c .quad 0x302d92d20539236d // 2^164 * 1 * G .quad 0x4c59023fcb3efb7c .quad 0x6c2fcb99c63c2a94 .quad 0xba4190e2c3c7e084 .quad 0x0e545daea51874d9 .quad 0x957b8b8b0df53c30 .quad 0x2a1c770a8e60f098 .quad 0xbbc7a670345796de .quad 0x22a48f9a90c99bc9 .quad 0x6b7dc0dc8d3fac58 .quad 0x5497cd6ce6e42bfd .quad 0x542f7d1bf400d305 .quad 0x4159f47f048d9136 // 2^164 * 2 * G .quad 0x20ad660839e31e32 .quad 0xf81e1bd58405be50 .quad 0xf8064056f4dabc69 .quad 0x14d23dd4ce71b975 .quad 0x748515a8bbd24839 .quad 0x77128347afb02b55 .quad 0x50ba2ac649a2a17f .quad 0x060525513ad730f1 .quad 0xf2398e098aa27f82 .quad 0x6d7982bb89a1b024 .quad 0xfa694084214dd24c .quad 0x71ab966fa32301c3 // 2^164 * 3 * G .quad 0x2dcbd8e34ded02fc .quad 0x1151f3ec596f22aa .quad 0xbca255434e0328da .quad 0x35768fbe92411b22 .quad 0xb1088a0702809955 .quad 0x43b273ea0b43c391 .quad 0xca9b67aefe0686ed .quad 0x605eecbf8335f4ed .quad 0x83200a656c340431 .quad 0x9fcd71678ee59c2f .quad 0x75d4613f71300f8a .quad 0x7a912faf60f542f9 // 2^164 * 4 * G .quad 0xb204585e5edc1a43 .quad 0x9f0e16ee5897c73c .quad 0x5b82c0ae4e70483c .quad 0x624a170e2bddf9be .quad 0x253f4f8dfa2d5597 .quad 0x25e49c405477130c .quad 0x00c052e5996b1102 .quad 0x33cb966e33bb6c4a .quad 0x597028047f116909 .quad 0x828ac41c1e564467 .quad 0x70417dbde6217387 .quad 0x721627aefbac4384 // 2^164 * 5 * G .quad 0x97d03bc38736add5 .quad 0x2f1422afc532b130 .quad 0x3aa68a057101bbc4 .quad 0x4c946cf7e74f9fa7 .quad 0xfd3097bc410b2f22 .quad 0xf1a05da7b5cfa844 .quad 0x61289a1def57ca74 .quad 0x245ea199bb821902 .quad 0xaedca66978d477f8 .quad 0x1898ba3c29117fe1 .quad 0xcf73f983720cbd58 .quad 0x67da12e6b8b56351 // 2^164 * 6 * G .quad 0x7067e187b4bd6e07 .quad 0x6e8f0203c7d1fe74 .quad 0x93c6aa2f38c85a30 .quad 0x76297d1f3d75a78a .quad 0x2b7ef3d38ec8308c .quad 0x828fd7ec71eb94ab .quad 0x807c3b36c5062abd .quad 0x0cb64cb831a94141 .quad 0x3030fc33534c6378 .quad 0xb9635c5ce541e861 .quad 0x15d9a9bed9b2c728 .quad 0x49233ea3f3775dcb // 2^164 * 7 * G .quad 0x629398fa8dbffc3a .quad 0xe12fe52dd54db455 .quad 0xf3be11dfdaf25295 .quad 0x628b140dce5e7b51 .quad 0x7b3985fe1c9f249b .quad 0x4fd6b2d5a1233293 .quad 0xceb345941adf4d62 .quad 0x6987ff6f542de50c .quad 0x47e241428f83753c .quad 0x6317bebc866af997 .quad 0xdabb5b433d1a9829 .quad 0x074d8d245287fb2d // 2^164 * 8 * G .quad 0x8337d9cd440bfc31 .quad 0x729d2ca1af318fd7 .quad 0xa040a4a4772c2070 .quad 0x46002ef03a7349be .quad 0x481875c6c0e31488 .quad 0x219429b2e22034b4 .quad 0x7223c98a31283b65 .quad 0x3420d60b342277f9 .quad 0xfaa23adeaffe65f7 .quad 0x78261ed45be0764c .quad 0x441c0a1e2f164403 .quad 0x5aea8e567a87d395 // 2^168 * 1 * G .quad 0x7813c1a2bca4283d .quad 0xed62f091a1863dd9 .quad 0xaec7bcb8c268fa86 .quad 0x10e5d3b76f1cae4c .quad 0x2dbc6fb6e4e0f177 .quad 0x04e1bf29a4bd6a93 .quad 0x5e1966d4787af6e8 .quad 0x0edc5f5eb426d060 .quad 0x5453bfd653da8e67 .quad 0xe9dc1eec24a9f641 .quad 0xbf87263b03578a23 .quad 0x45b46c51361cba72 // 2^168 * 2 * G .quad 0xa9402abf314f7fa1 .quad 0xe257f1dc8e8cf450 .quad 0x1dbbd54b23a8be84 .quad 0x2177bfa36dcb713b .quad 0xce9d4ddd8a7fe3e4 .quad 0xab13645676620e30 .quad 0x4b594f7bb30e9958 .quad 0x5c1c0aef321229df .quad 0x37081bbcfa79db8f .quad 0x6048811ec25f59b3 .quad 0x087a76659c832487 .quad 0x4ae619387d8ab5bb // 2^168 * 3 * G .quad 0x8ddbf6aa5344a32e .quad 0x7d88eab4b41b4078 .quad 0x5eb0eb974a130d60 .quad 0x1a00d91b17bf3e03 .quad 0x61117e44985bfb83 .quad 0xfce0462a71963136 .quad 0x83ac3448d425904b .quad 0x75685abe5ba43d64 .quad 0x6e960933eb61f2b2 .quad 0x543d0fa8c9ff4952 .quad 0xdf7275107af66569 .quad 0x135529b623b0e6aa // 2^168 * 4 * G .quad 0x18f0dbd7add1d518 .quad 0x979f7888cfc11f11 .quad 0x8732e1f07114759b .quad 0x79b5b81a65ca3a01 .quad 0xf5c716bce22e83fe .quad 0xb42beb19e80985c1 .quad 0xec9da63714254aae .quad 0x5972ea051590a613 .quad 0x0fd4ac20dc8f7811 .quad 0x9a9ad294ac4d4fa8 .quad 0xc01b2d64b3360434 .quad 0x4f7e9c95905f3bdb // 2^168 * 5 * G .quad 0x62674bbc5781302e .quad 0xd8520f3989addc0f .quad 0x8c2999ae53fbd9c6 .quad 0x31993ad92e638e4c .quad 0x71c8443d355299fe .quad 0x8bcd3b1cdbebead7 .quad 0x8092499ef1a49466 .quad 0x1942eec4a144adc8 .quad 0x7dac5319ae234992 .quad 0x2c1b3d910cea3e92 .quad 0x553ce494253c1122 .quad 0x2a0a65314ef9ca75 // 2^168 * 6 * G .quad 0x2db7937ff7f927c2 .quad 0xdb741f0617d0a635 .quad 0x5982f3a21155af76 .quad 0x4cf6e218647c2ded .quad 0xcf361acd3c1c793a .quad 0x2f9ebcac5a35bc3b .quad 0x60e860e9a8cda6ab .quad 0x055dc39b6dea1a13 .quad 0xb119227cc28d5bb6 .quad 0x07e24ebc774dffab .quad 0xa83c78cee4a32c89 .quad 0x121a307710aa24b6 // 2^168 * 7 * G .quad 0xe4db5d5e9f034a97 .quad 0xe153fc093034bc2d .quad 0x460546919551d3b1 .quad 0x333fc76c7a40e52d .quad 0xd659713ec77483c9 .quad 0x88bfe077b82b96af .quad 0x289e28231097bcd3 .quad 0x527bb94a6ced3a9b .quad 0x563d992a995b482e .quad 0x3405d07c6e383801 .quad 0x485035de2f64d8e5 .quad 0x6b89069b20a7a9f7 // 2^168 * 8 * G .quad 0x812aa0416270220d .quad 0x995a89faf9245b4e .quad 0xffadc4ce5072ef05 .quad 0x23bc2103aa73eb73 .quad 0x4082fa8cb5c7db77 .quad 0x068686f8c734c155 .quad 0x29e6c8d9f6e7a57e .quad 0x0473d308a7639bcf .quad 0xcaee792603589e05 .quad 0x2b4b421246dcc492 .quad 0x02a1ef74e601a94f .quad 0x102f73bfde04341a // 2^172 * 1 * G .quad 0xb5a2d50c7ec20d3e .quad 0xc64bdd6ea0c97263 .quad 0x56e89052c1ff734d .quad 0x4929c6f72b2ffaba .quad 0x358ecba293a36247 .quad 0xaf8f9862b268fd65 .quad 0x412f7e9968a01c89 .quad 0x5786f312cd754524 .quad 0x337788ffca14032c .quad 0xf3921028447f1ee3 .quad 0x8b14071f231bccad .quad 0x4c817b4bf2344783 // 2^172 * 2 * G .quad 0x0ff853852871b96e .quad 0xe13e9fab60c3f1bb .quad 0xeefd595325344402 .quad 0x0a37c37075b7744b .quad 0x413ba057a40b4484 .quad 0xba4c2e1a4f5f6a43 .quad 0x614ba0a5aee1d61c .quad 0x78a1531a8b05dc53 .quad 0x6cbdf1703ad0562b .quad 0x8ecf4830c92521a3 .quad 0xdaebd303fd8424e7 .quad 0x72ad82a42e5ec56f // 2^172 * 3 * G .quad 0x3f9e8e35bafb65f6 .quad 0x39d69ec8f27293a1 .quad 0x6cb8cd958cf6a3d0 .quad 0x1734778173adae6d .quad 0xc368939167024bc3 .quad 0x8e69d16d49502fda .quad 0xfcf2ec3ce45f4b29 .quad 0x065f669ea3b4cbc4 .quad 0x8a00aec75532db4d .quad 0xb869a4e443e31bb1 .quad 0x4a0f8552d3a7f515 .quad 0x19adeb7c303d7c08 // 2^172 * 4 * G .quad 0xc720cb6153ead9a3 .quad 0x55b2c97f512b636e .quad 0xb1e35b5fd40290b1 .quad 0x2fd9ccf13b530ee2 .quad 0x9d05ba7d43c31794 .quad 0x2470c8ff93322526 .quad 0x8323dec816197438 .quad 0x2852709881569b53 .quad 0x07bd475b47f796b8 .quad 0xd2c7b013542c8f54 .quad 0x2dbd23f43b24f87e .quad 0x6551afd77b0901d6 // 2^172 * 5 * G .quad 0x4546baaf54aac27f .quad 0xf6f66fecb2a45a28 .quad 0x582d1b5b562bcfe8 .quad 0x44b123f3920f785f .quad 0x68a24ce3a1d5c9ac .quad 0xbb77a33d10ff6461 .quad 0x0f86ce4425d3166e .quad 0x56507c0950b9623b .quad 0x1206f0b7d1713e63 .quad 0x353fe3d915bafc74 .quad 0x194ceb970ad9d94d .quad 0x62fadd7cf9d03ad3 // 2^172 * 6 * G .quad 0xc6b5967b5598a074 .quad 0x5efe91ce8e493e25 .quad 0xd4b72c4549280888 .quad 0x20ef1149a26740c2 .quad 0x3cd7bc61e7ce4594 .quad 0xcd6b35a9b7dd267e .quad 0xa080abc84366ef27 .quad 0x6ec7c46f59c79711 .quad 0x2f07ad636f09a8a2 .quad 0x8697e6ce24205e7d .quad 0xc0aefc05ee35a139 .quad 0x15e80958b5f9d897 // 2^172 * 7 * G .quad 0x25a5ef7d0c3e235b .quad 0x6c39c17fbe134ee7 .quad 0xc774e1342dc5c327 .quad 0x021354b892021f39 .quad 0x4dd1ed355bb061c4 .quad 0x42dc0cef941c0700 .quad 0x61305dc1fd86340e .quad 0x56b2cc930e55a443 .quad 0x1df79da6a6bfc5a2 .quad 0x02f3a2749fde4369 .quad 0xb323d9f2cda390a7 .quad 0x7be0847b8774d363 // 2^172 * 8 * G .quad 0x8c99cc5a8b3f55c3 .quad 0x0611d7253fded2a0 .quad 0xed2995ff36b70a36 .quad 0x1f699a54d78a2619 .quad 0x1466f5af5307fa11 .quad 0x817fcc7ded6c0af2 .quad 0x0a6de44ec3a4a3fb .quad 0x74071475bc927d0b .quad 0xe77292f373e7ea8a .quad 0x296537d2cb045a31 .quad 0x1bd0653ed3274fde .quad 0x2f9a2c4476bd2966 // 2^176 * 1 * G .quad 0xeb18b9ab7f5745c6 .quad 0x023a8aee5787c690 .quad 0xb72712da2df7afa9 .quad 0x36597d25ea5c013d .quad 0xa2b4dae0b5511c9a .quad 0x7ac860292bffff06 .quad 0x981f375df5504234 .quad 0x3f6bd725da4ea12d .quad 0x734d8d7b106058ac .quad 0xd940579e6fc6905f .quad 0x6466f8f99202932d .quad 0x7b7ecc19da60d6d0 // 2^176 * 2 * G .quad 0x78c2373c695c690d .quad 0xdd252e660642906e .quad 0x951d44444ae12bd2 .quad 0x4235ad7601743956 .quad 0x6dae4a51a77cfa9b .quad 0x82263654e7a38650 .quad 0x09bbffcd8f2d82db .quad 0x03bedc661bf5caba .quad 0x6258cb0d078975f5 .quad 0x492942549189f298 .quad 0xa0cab423e2e36ee4 .quad 0x0e7ce2b0cdf066a1 // 2^176 * 3 * G .quad 0xc494643ac48c85a3 .quad 0xfd361df43c6139ad .quad 0x09db17dd3ae94d48 .quad 0x666e0a5d8fb4674a .quad 0xfea6fedfd94b70f9 .quad 0xf130c051c1fcba2d .quad 0x4882d47e7f2fab89 .quad 0x615256138aeceeb5 .quad 0x2abbf64e4870cb0d .quad 0xcd65bcf0aa458b6b .quad 0x9abe4eba75e8985d .quad 0x7f0bc810d514dee4 // 2^176 * 4 * G .quad 0xb9006ba426f4136f .quad 0x8d67369e57e03035 .quad 0xcbc8dfd94f463c28 .quad 0x0d1f8dbcf8eedbf5 .quad 0x83ac9dad737213a0 .quad 0x9ff6f8ba2ef72e98 .quad 0x311e2edd43ec6957 .quad 0x1d3a907ddec5ab75 .quad 0xba1693313ed081dc .quad 0x29329fad851b3480 .quad 0x0128013c030321cb .quad 0x00011b44a31bfde3 // 2^176 * 5 * G .quad 0x3fdfa06c3fc66c0c .quad 0x5d40e38e4dd60dd2 .quad 0x7ae38b38268e4d71 .quad 0x3ac48d916e8357e1 .quad 0x16561f696a0aa75c .quad 0xc1bf725c5852bd6a .quad 0x11a8dd7f9a7966ad .quad 0x63d988a2d2851026 .quad 0x00120753afbd232e .quad 0xe92bceb8fdd8f683 .quad 0xf81669b384e72b91 .quad 0x33fad52b2368a066 // 2^176 * 6 * G .quad 0x540649c6c5e41e16 .quad 0x0af86430333f7735 .quad 0xb2acfcd2f305e746 .quad 0x16c0f429a256dca7 .quad 0x8d2cc8d0c422cfe8 .quad 0x072b4f7b05a13acb .quad 0xa3feb6e6ecf6a56f .quad 0x3cc355ccb90a71e2 .quad 0xe9b69443903e9131 .quad 0xb8a494cb7a5637ce .quad 0xc87cd1a4baba9244 .quad 0x631eaf426bae7568 // 2^176 * 7 * G .quad 0xb3e90410da66fe9f .quad 0x85dd4b526c16e5a6 .quad 0xbc3d97611ef9bf83 .quad 0x5599648b1ea919b5 .quad 0x47d975b9a3700de8 .quad 0x7280c5fbe2f80552 .quad 0x53658f2732e45de1 .quad 0x431f2c7f665f80b5 .quad 0xd6026344858f7b19 .quad 0x14ab352fa1ea514a .quad 0x8900441a2090a9d7 .quad 0x7b04715f91253b26 // 2^176 * 8 * G .quad 0x83edbd28acf6ae43 .quad 0x86357c8b7d5c7ab4 .quad 0xc0404769b7eb2c44 .quad 0x59b37bf5c2f6583f .quad 0xb376c280c4e6bac6 .quad 0x970ed3dd6d1d9b0b .quad 0xb09a9558450bf944 .quad 0x48d0acfa57cde223 .quad 0xb60f26e47dabe671 .quad 0xf1d1a197622f3a37 .quad 0x4208ce7ee9960394 .quad 0x16234191336d3bdb // 2^180 * 1 * G .quad 0xf19aeac733a63aef .quad 0x2c7fba5d4442454e .quad 0x5da87aa04795e441 .quad 0x413051e1a4e0b0f5 .quad 0x852dd1fd3d578bbe .quad 0x2b65ce72c3286108 .quad 0x658c07f4eace2273 .quad 0x0933f804ec38ab40 .quad 0xa7ab69798d496476 .quad 0x8121aadefcb5abc8 .quad 0xa5dc12ef7b539472 .quad 0x07fd47065e45351a // 2^180 * 2 * G .quad 0xc8583c3d258d2bcd .quad 0x17029a4daf60b73f .quad 0xfa0fc9d6416a3781 .quad 0x1c1e5fba38b3fb23 .quad 0x304211559ae8e7c3 .quad 0xf281b229944882a5 .quad 0x8a13ac2e378250e4 .quad 0x014afa0954ba48f4 .quad 0xcb3197001bb3666c .quad 0x330060524bffecb9 .quad 0x293711991a88233c .quad 0x291884363d4ed364 // 2^180 * 3 * G .quad 0x033c6805dc4babfa .quad 0x2c15bf5e5596ecc1 .quad 0x1bc70624b59b1d3b .quad 0x3ede9850a19f0ec5 .quad 0xfb9d37c3bc1ab6eb .quad 0x02be14534d57a240 .quad 0xf4d73415f8a5e1f6 .quad 0x5964f4300ccc8188 .quad 0xe44a23152d096800 .quad 0x5c08c55970866996 .quad 0xdf2db60a46affb6e .quad 0x579155c1f856fd89 // 2^180 * 4 * G .quad 0x96324edd12e0c9ef .quad 0x468b878df2420297 .quad 0x199a3776a4f573be .quad 0x1e7fbcf18e91e92a .quad 0xb5f16b630817e7a6 .quad 0x808c69233c351026 .quad 0x324a983b54cef201 .quad 0x53c092084a485345 .quad 0xd2d41481f1cbafbf .quad 0x231d2db6716174e5 .quad 0x0b7d7656e2a55c98 .quad 0x3e955cd82aa495f6 // 2^180 * 5 * G .quad 0xe48f535e3ed15433 .quad 0xd075692a0d7270a3 .quad 0x40fbd21daade6387 .quad 0x14264887cf4495f5 .quad 0xab39f3ef61bb3a3f .quad 0x8eb400652eb9193e .quad 0xb5de6ecc38c11f74 .quad 0x654d7e9626f3c49f .quad 0xe564cfdd5c7d2ceb .quad 0x82eeafded737ccb9 .quad 0x6107db62d1f9b0ab .quad 0x0b6baac3b4358dbb // 2^180 * 6 * G .quad 0x7ae62bcb8622fe98 .quad 0x47762256ceb891af .quad 0x1a5a92bcf2e406b4 .quad 0x7d29401784e41501 .quad 0x204abad63700a93b .quad 0xbe0023d3da779373 .quad 0xd85f0346633ab709 .quad 0x00496dc490820412 .quad 0x1c74b88dc27e6360 .quad 0x074854268d14850c .quad 0xa145fb7b3e0dcb30 .quad 0x10843f1b43803b23 // 2^180 * 7 * G .quad 0xc5f90455376276dd .quad 0xce59158dd7645cd9 .quad 0x92f65d511d366b39 .quad 0x11574b6e526996c4 .quad 0xd56f672de324689b .quad 0xd1da8aedb394a981 .quad 0xdd7b58fe9168cfed .quad 0x7ce246cd4d56c1e8 .quad 0xb8f4308e7f80be53 .quad 0x5f3cb8cb34a9d397 .quad 0x18a961bd33cc2b2c .quad 0x710045fb3a9af671 // 2^180 * 8 * G .quad 0x73f93d36101b95eb .quad 0xfaef33794f6f4486 .quad 0x5651735f8f15e562 .quad 0x7fa3f19058b40da1 .quad 0xa03fc862059d699e .quad 0x2370cfa19a619e69 .quad 0xc4fe3b122f823deb .quad 0x1d1b056fa7f0844e .quad 0x1bc64631e56bf61f .quad 0xd379ab106e5382a3 .quad 0x4d58c57e0540168d .quad 0x566256628442d8e4 // 2^184 * 1 * G .quad 0xb9e499def6267ff6 .quad 0x7772ca7b742c0843 .quad 0x23a0153fe9a4f2b1 .quad 0x2cdfdfecd5d05006 .quad 0xdd499cd61ff38640 .quad 0x29cd9bc3063625a0 .quad 0x51e2d8023dd73dc3 .quad 0x4a25707a203b9231 .quad 0x2ab7668a53f6ed6a .quad 0x304242581dd170a1 .quad 0x4000144c3ae20161 .quad 0x5721896d248e49fc // 2^184 * 2 * G .quad 0x0b6e5517fd181bae .quad 0x9022629f2bb963b4 .quad 0x5509bce932064625 .quad 0x578edd74f63c13da .quad 0x285d5091a1d0da4e .quad 0x4baa6fa7b5fe3e08 .quad 0x63e5177ce19393b3 .quad 0x03c935afc4b030fd .quad 0x997276c6492b0c3d .quad 0x47ccc2c4dfe205fc .quad 0xdcd29b84dd623a3c .quad 0x3ec2ab590288c7a2 // 2^184 * 3 * G .quad 0xa1a0d27be4d87bb9 .quad 0xa98b4deb61391aed .quad 0x99a0ddd073cb9b83 .quad 0x2dd5c25a200fcace .quad 0xa7213a09ae32d1cb .quad 0x0f2b87df40f5c2d5 .quad 0x0baea4c6e81eab29 .quad 0x0e1bf66c6adbac5e .quad 0xe2abd5e9792c887e .quad 0x1a020018cb926d5d .quad 0xbfba69cdbaae5f1e .quad 0x730548b35ae88f5f // 2^184 * 4 * G .quad 0xc43551a3cba8b8ee .quad 0x65a26f1db2115f16 .quad 0x760f4f52ab8c3850 .quad 0x3043443b411db8ca .quad 0x805b094ba1d6e334 .quad 0xbf3ef17709353f19 .quad 0x423f06cb0622702b .quad 0x585a2277d87845dd .quad 0xa18a5f8233d48962 .quad 0x6698c4b5ec78257f .quad 0xa78e6fa5373e41ff .quad 0x7656278950ef981f // 2^184 * 5 * G .quad 0x38c3cf59d51fc8c0 .quad 0x9bedd2fd0506b6f2 .quad 0x26bf109fab570e8f .quad 0x3f4160a8c1b846a6 .quad 0xe17073a3ea86cf9d .quad 0x3a8cfbb707155fdc .quad 0x4853e7fc31838a8e .quad 0x28bbf484b613f616 .quad 0xf2612f5c6f136c7c .quad 0xafead107f6dd11be .quad 0x527e9ad213de6f33 .quad 0x1e79cb358188f75d // 2^184 * 6 * G .quad 0x013436c3eef7e3f1 .quad 0x828b6a7ffe9e10f8 .quad 0x7ff908e5bcf9defc .quad 0x65d7951b3a3b3831 .quad 0x77e953d8f5e08181 .quad 0x84a50c44299dded9 .quad 0xdc6c2d0c864525e5 .quad 0x478ab52d39d1f2f4 .quad 0x66a6a4d39252d159 .quad 0xe5dde1bc871ac807 .quad 0xb82c6b40a6c1c96f .quad 0x16d87a411a212214 // 2^184 * 7 * G .quad 0xb3bd7e5a42066215 .quad 0x879be3cd0c5a24c1 .quad 0x57c05db1d6f994b7 .quad 0x28f87c8165f38ca6 .quad 0xfba4d5e2d54e0583 .quad 0xe21fafd72ebd99fa .quad 0x497ac2736ee9778f .quad 0x1f990b577a5a6dde .quad 0xa3344ead1be8f7d6 .quad 0x7d1e50ebacea798f .quad 0x77c6569e520de052 .quad 0x45882fe1534d6d3e // 2^184 * 8 * G .quad 0x6669345d757983d6 .quad 0x62b6ed1117aa11a6 .quad 0x7ddd1857985e128f .quad 0x688fe5b8f626f6dd .quad 0xd8ac9929943c6fe4 .quad 0xb5f9f161a38392a2 .quad 0x2699db13bec89af3 .quad 0x7dcf843ce405f074 .quad 0x6c90d6484a4732c0 .quad 0xd52143fdca563299 .quad 0xb3be28c3915dc6e1 .quad 0x6739687e7327191b // 2^188 * 1 * G .quad 0x9f65c5ea200814cf .quad 0x840536e169a31740 .quad 0x8b0ed13925c8b4ad .quad 0x0080dbafe936361d .quad 0x8ce5aad0c9cb971f .quad 0x1156aaa99fd54a29 .quad 0x41f7247015af9b78 .quad 0x1fe8cca8420f49aa .quad 0x72a1848f3c0cc82a .quad 0x38c560c2877c9e54 .quad 0x5004e228ce554140 .quad 0x042418a103429d71 // 2^188 * 2 * G .quad 0x899dea51abf3ff5f .quad 0x9b93a8672fc2d8ba .quad 0x2c38cb97be6ebd5c .quad 0x114d578497263b5d .quad 0x58e84c6f20816247 .quad 0x8db2b2b6e36fd793 .quad 0x977182561d484d85 .quad 0x0822024f8632abd7 .quad 0xb301bb7c6b1beca3 .quad 0x55393f6dc6eb1375 .quad 0x910d281097b6e4eb .quad 0x1ad4548d9d479ea3 // 2^188 * 3 * G .quad 0xcd5a7da0389a48fd .quad 0xb38fa4aa9a78371e .quad 0xc6d9761b2cdb8e6c .quad 0x35cf51dbc97e1443 .quad 0xa06fe66d0fe9fed3 .quad 0xa8733a401c587909 .quad 0x30d14d800df98953 .quad 0x41ce5876c7b30258 .quad 0x59ac3bc5d670c022 .quad 0xeae67c109b119406 .quad 0x9798bdf0b3782fda .quad 0x651e3201fd074092 // 2^188 * 4 * G .quad 0xd63d8483ef30c5cf .quad 0x4cd4b4962361cc0c .quad 0xee90e500a48426ac .quad 0x0af51d7d18c14eeb .quad 0xa57ba4a01efcae9e .quad 0x769f4beedc308a94 .quad 0xd1f10eeb3603cb2e .quad 0x4099ce5e7e441278 .quad 0x1ac98e4f8a5121e9 .quad 0x7dae9544dbfa2fe0 .quad 0x8320aa0dd6430df9 .quad 0x667282652c4a2fb5 // 2^188 * 5 * G .quad 0x874621f4d86bc9ab .quad 0xb54c7bbe56fe6fea .quad 0x077a24257fadc22c .quad 0x1ab53be419b90d39 .quad 0xada8b6e02946db23 .quad 0x1c0ce51a7b253ab7 .quad 0x8448c85a66dd485b .quad 0x7f1fc025d0675adf .quad 0xd8ee1b18319ea6aa .quad 0x004d88083a21f0da .quad 0x3bd6aa1d883a4f4b .quad 0x4db9a3a6dfd9fd14 // 2^188 * 6 * G .quad 0x8ce7b23bb99c0755 .quad 0x35c5d6edc4f50f7a .quad 0x7e1e2ed2ed9b50c3 .quad 0x36305f16e8934da1 .quad 0xd95b00bbcbb77c68 .quad 0xddbc846a91f17849 .quad 0x7cf700aebe28d9b3 .quad 0x5ce1285c85d31f3e .quad 0x31b6972d98b0bde8 .quad 0x7d920706aca6de5b .quad 0xe67310f8908a659f .quad 0x50fac2a6efdf0235 // 2^188 * 7 * G .quad 0xf3d3a9f35b880f5a .quad 0xedec050cdb03e7c2 .quad 0xa896981ff9f0b1a2 .quad 0x49a4ae2bac5e34a4 .quad 0x295b1c86f6f449bc .quad 0x51b2e84a1f0ab4dd .quad 0xc001cb30aa8e551d .quad 0x6a28d35944f43662 .quad 0x28bb12ee04a740e0 .quad 0x14313bbd9bce8174 .quad 0x72f5b5e4e8c10c40 .quad 0x7cbfb19936adcd5b // 2^188 * 8 * G .quad 0xa311ddc26b89792d .quad 0x1b30b4c6da512664 .quad 0x0ca77b4ccf150859 .quad 0x1de443df1b009408 .quad 0x8e793a7acc36e6e0 .quad 0xf9fab7a37d586eed .quad 0x3a4f9692bae1f4e4 .quad 0x1c14b03eff5f447e .quad 0x19647bd114a85291 .quad 0x57b76cb21034d3af .quad 0x6329db440f9d6dfa .quad 0x5ef43e586a571493 // 2^192 * 1 * G .quad 0xef782014385675a6 .quad 0xa2649f30aafda9e8 .quad 0x4cd1eb505cdfa8cb .quad 0x46115aba1d4dc0b3 .quad 0xa66dcc9dc80c1ac0 .quad 0x97a05cf41b38a436 .quad 0xa7ebf3be95dbd7c6 .quad 0x7da0b8f68d7e7dab .quad 0xd40f1953c3b5da76 .quad 0x1dac6f7321119e9b .quad 0x03cc6021feb25960 .quad 0x5a5f887e83674b4b // 2^192 * 2 * G .quad 0x8f6301cf70a13d11 .quad 0xcfceb815350dd0c4 .quad 0xf70297d4a4bca47e .quad 0x3669b656e44d1434 .quad 0x9e9628d3a0a643b9 .quad 0xb5c3cb00e6c32064 .quad 0x9b5302897c2dec32 .quad 0x43e37ae2d5d1c70c .quad 0x387e3f06eda6e133 .quad 0x67301d5199a13ac0 .quad 0xbd5ad8f836263811 .quad 0x6a21e6cd4fd5e9be // 2^192 * 3 * G .quad 0xf1c6170a3046e65f .quad 0x58712a2a00d23524 .quad 0x69dbbd3c8c82b755 .quad 0x586bf9f1a195ff57 .quad 0xef4129126699b2e3 .quad 0x71d30847708d1301 .quad 0x325432d01182b0bd .quad 0x45371b07001e8b36 .quad 0xa6db088d5ef8790b .quad 0x5278f0dc610937e5 .quad 0xac0349d261a16eb8 .quad 0x0eafb03790e52179 // 2^192 * 4 * G .quad 0x960555c13748042f .quad 0x219a41e6820baa11 .quad 0x1c81f73873486d0c .quad 0x309acc675a02c661 .quad 0x5140805e0f75ae1d .quad 0xec02fbe32662cc30 .quad 0x2cebdf1eea92396d .quad 0x44ae3344c5435bb3 .quad 0x9cf289b9bba543ee .quad 0xf3760e9d5ac97142 .quad 0x1d82e5c64f9360aa .quad 0x62d5221b7f94678f // 2^192 * 5 * G .quad 0x524c299c18d0936d .quad 0xc86bb56c8a0c1a0c .quad 0xa375052edb4a8631 .quad 0x5c0efde4bc754562 .quad 0x7585d4263af77a3c .quad 0xdfae7b11fee9144d .quad 0xa506708059f7193d .quad 0x14f29a5383922037 .quad 0xdf717edc25b2d7f5 .quad 0x21f970db99b53040 .quad 0xda9234b7c3ed4c62 .quad 0x5e72365c7bee093e // 2^192 * 6 * G .quad 0x575bfc074571217f .quad 0x3779675d0694d95b .quad 0x9a0a37bbf4191e33 .quad 0x77f1104c47b4eabc .quad 0x7d9339062f08b33e .quad 0x5b9659e5df9f32be .quad 0xacff3dad1f9ebdfd .quad 0x70b20555cb7349b7 .quad 0xbe5113c555112c4c .quad 0x6688423a9a881fcd .quad 0x446677855e503b47 .quad 0x0e34398f4a06404a // 2^192 * 7 * G .quad 0xb67d22d93ecebde8 .quad 0x09b3e84127822f07 .quad 0x743fa61fb05b6d8d .quad 0x5e5405368a362372 .quad 0x18930b093e4b1928 .quad 0x7de3e10e73f3f640 .quad 0xf43217da73395d6f .quad 0x6f8aded6ca379c3e .quad 0xe340123dfdb7b29a .quad 0x487b97e1a21ab291 .quad 0xf9967d02fde6949e .quad 0x780de72ec8d3de97 // 2^192 * 8 * G .quad 0x0ae28545089ae7bc .quad 0x388ddecf1c7f4d06 .quad 0x38ac15510a4811b8 .quad 0x0eb28bf671928ce4 .quad 0x671feaf300f42772 .quad 0x8f72eb2a2a8c41aa .quad 0x29a17fd797373292 .quad 0x1defc6ad32b587a6 .quad 0xaf5bbe1aef5195a7 .quad 0x148c1277917b15ed .quad 0x2991f7fb7ae5da2e .quad 0x467d201bf8dd2867 // 2^196 * 1 * G .quad 0x7906ee72f7bd2e6b .quad 0x05d270d6109abf4e .quad 0x8d5cfe45b941a8a4 .quad 0x44c218671c974287 .quad 0x745f9d56296bc318 .quad 0x993580d4d8152e65 .quad 0xb0e5b13f5839e9ce .quad 0x51fc2b28d43921c0 .quad 0x1b8fd11795e2a98c .quad 0x1c4e5ee12b6b6291 .quad 0x5b30e7107424b572 .quad 0x6e6b9de84c4f4ac6 // 2^196 * 2 * G .quad 0xdff25fce4b1de151 .quad 0xd841c0c7e11c4025 .quad 0x2554b3c854749c87 .quad 0x2d292459908e0df9 .quad 0x6b7c5f10f80cb088 .quad 0x736b54dc56e42151 .quad 0xc2b620a5c6ef99c4 .quad 0x5f4c802cc3a06f42 .quad 0x9b65c8f17d0752da .quad 0x881ce338c77ee800 .quad 0xc3b514f05b62f9e3 .quad 0x66ed5dd5bec10d48 // 2^196 * 3 * G .quad 0x7d38a1c20bb2089d .quad 0x808334e196ccd412 .quad 0xc4a70b8c6c97d313 .quad 0x2eacf8bc03007f20 .quad 0xf0adf3c9cbca047d .quad 0x81c3b2cbf4552f6b .quad 0xcfda112d44735f93 .quad 0x1f23a0c77e20048c .quad 0xf235467be5bc1570 .quad 0x03d2d9020dbab38c .quad 0x27529aa2fcf9e09e .quad 0x0840bef29d34bc50 // 2^196 * 4 * G .quad 0x796dfb35dc10b287 .quad 0x27176bcd5c7ff29d .quad 0x7f3d43e8c7b24905 .quad 0x0304f5a191c54276 .quad 0xcd54e06b7f37e4eb .quad 0x8cc15f87f5e96cca .quad 0xb8248bb0d3597dce .quad 0x246affa06074400c .quad 0x37d88e68fbe45321 .quad 0x86097548c0d75032 .quad 0x4e9b13ef894a0d35 .quad 0x25a83cac5753d325 // 2^196 * 5 * G .quad 0x10222f48eed8165e .quad 0x623fc1234b8bcf3a .quad 0x1e145c09c221e8f0 .quad 0x7ccfa59fca782630 .quad 0x9f0f66293952b6e2 .quad 0x33db5e0e0934267b .quad 0xff45252bd609fedc .quad 0x06be10f5c506e0c9 .quad 0x1a9615a9b62a345f .quad 0x22050c564a52fecc .quad 0xa7a2788528bc0dfe .quad 0x5e82770a1a1ee71d // 2^196 * 6 * G .quad 0x35425183ad896a5c .quad 0xe8673afbe78d52f6 .quad 0x2c66f25f92a35f64 .quad 0x09d04f3b3b86b102 .quad 0xe802e80a42339c74 .quad 0x34175166a7fffae5 .quad 0x34865d1f1c408cae .quad 0x2cca982c605bc5ee .quad 0xfd2d5d35197dbe6e .quad 0x207c2eea8be4ffa3 .quad 0x2613d8db325ae918 .quad 0x7a325d1727741d3e // 2^196 * 7 * G .quad 0xd036b9bbd16dfde2 .quad 0xa2055757c497a829 .quad 0x8e6cc966a7f12667 .quad 0x4d3b1a791239c180 .quad 0xecd27d017e2a076a .quad 0xd788689f1636495e .quad 0x52a61af0919233e5 .quad 0x2a479df17bb1ae64 .quad 0x9e5eee8e33db2710 .quad 0x189854ded6c43ca5 .quad 0xa41c22c592718138 .quad 0x27ad5538a43a5e9b // 2^196 * 8 * G .quad 0x2746dd4b15350d61 .quad 0xd03fcbc8ee9521b7 .quad 0xe86e365a138672ca .quad 0x510e987f7e7d89e2 .quad 0xcb5a7d638e47077c .quad 0x8db7536120a1c059 .quad 0x549e1e4d8bedfdcc .quad 0x080153b7503b179d .quad 0xdda69d930a3ed3e3 .quad 0x3d386ef1cd60a722 .quad 0xc817ad58bdaa4ee6 .quad 0x23be8d554fe7372a // 2^200 * 1 * G .quad 0x95fe919a74ef4fad .quad 0x3a827becf6a308a2 .quad 0x964e01d309a47b01 .quad 0x71c43c4f5ba3c797 .quad 0xbc1ef4bd567ae7a9 .quad 0x3f624cb2d64498bd .quad 0xe41064d22c1f4ec8 .quad 0x2ef9c5a5ba384001 .quad 0xb6fd6df6fa9e74cd .quad 0xf18278bce4af267a .quad 0x8255b3d0f1ef990e .quad 0x5a758ca390c5f293 // 2^200 * 2 * G .quad 0xa2b72710d9462495 .quad 0x3aa8c6d2d57d5003 .quad 0xe3d400bfa0b487ca .quad 0x2dbae244b3eb72ec .quad 0x8ce0918b1d61dc94 .quad 0x8ded36469a813066 .quad 0xd4e6a829afe8aad3 .quad 0x0a738027f639d43f .quad 0x980f4a2f57ffe1cc .quad 0x00670d0de1839843 .quad 0x105c3f4a49fb15fd .quad 0x2698ca635126a69c // 2^200 * 3 * G .quad 0xe765318832b0ba78 .quad 0x381831f7925cff8b .quad 0x08a81b91a0291fcc .quad 0x1fb43dcc49caeb07 .quad 0x2e3d702f5e3dd90e .quad 0x9e3f0918e4d25386 .quad 0x5e773ef6024da96a .quad 0x3c004b0c4afa3332 .quad 0x9aa946ac06f4b82b .quad 0x1ca284a5a806c4f3 .quad 0x3ed3265fc6cd4787 .quad 0x6b43fd01cd1fd217 // 2^200 * 4 * G .quad 0xc7a75d4b4697c544 .quad 0x15fdf848df0fffbf .quad 0x2868b9ebaa46785a .quad 0x5a68d7105b52f714 .quad 0xb5c742583e760ef3 .quad 0x75dc52b9ee0ab990 .quad 0xbf1427c2072b923f .quad 0x73420b2d6ff0d9f0 .quad 0xaf2cf6cb9e851e06 .quad 0x8f593913c62238c4 .quad 0xda8ab89699fbf373 .quad 0x3db5632fea34bc9e // 2^200 * 5 * G .quad 0xf46eee2bf75dd9d8 .quad 0x0d17b1f6396759a5 .quad 0x1bf2d131499e7273 .quad 0x04321adf49d75f13 .quad 0x2e4990b1829825d5 .quad 0xedeaeb873e9a8991 .quad 0xeef03d394c704af8 .quad 0x59197ea495df2b0e .quad 0x04e16019e4e55aae .quad 0xe77b437a7e2f92e9 .quad 0xc7ce2dc16f159aa4 .quad 0x45eafdc1f4d70cc0 // 2^200 * 6 * G .quad 0x698401858045d72b .quad 0x4c22faa2cf2f0651 .quad 0x941a36656b222dc6 .quad 0x5a5eebc80362dade .quad 0xb60e4624cfccb1ed .quad 0x59dbc292bd5c0395 .quad 0x31a09d1ddc0481c9 .quad 0x3f73ceea5d56d940 .quad 0xb7a7bfd10a4e8dc6 .quad 0xbe57007e44c9b339 .quad 0x60c1207f1557aefa .quad 0x26058891266218db // 2^200 * 7 * G .quad 0x59f704a68360ff04 .quad 0xc3d93fde7661e6f4 .quad 0x831b2a7312873551 .quad 0x54ad0c2e4e615d57 .quad 0x4c818e3cc676e542 .quad 0x5e422c9303ceccad .quad 0xec07cccab4129f08 .quad 0x0dedfa10b24443b8 .quad 0xee3b67d5b82b522a .quad 0x36f163469fa5c1eb .quad 0xa5b4d2f26ec19fd3 .quad 0x62ecb2baa77a9408 // 2^200 * 8 * G .quad 0xe5ed795261152b3d .quad 0x4962357d0eddd7d1 .quad 0x7482c8d0b96b4c71 .quad 0x2e59f919a966d8be .quad 0x92072836afb62874 .quad 0x5fcd5e8579e104a5 .quad 0x5aad01adc630a14a .quad 0x61913d5075663f98 .quad 0x0dc62d361a3231da .quad 0xfa47583294200270 .quad 0x02d801513f9594ce .quad 0x3ddbc2a131c05d5c // 2^204 * 1 * G .quad 0x3f50a50a4ffb81ef .quad 0xb1e035093bf420bf .quad 0x9baa8e1cc6aa2cd0 .quad 0x32239861fa237a40 .quad 0xfb735ac2004a35d1 .quad 0x31de0f433a6607c3 .quad 0x7b8591bfc528d599 .quad 0x55be9a25f5bb050c .quad 0x0d005acd33db3dbf .quad 0x0111b37c80ac35e2 .quad 0x4892d66c6f88ebeb .quad 0x770eadb16508fbcd // 2^204 * 2 * G .quad 0x8451f9e05e4e89dd .quad 0xc06302ffbc793937 .quad 0x5d22749556a6495c .quad 0x09a6755ca05603fb .quad 0xf1d3b681a05071b9 .quad 0x2207659a3592ff3a .quad 0x5f0169297881e40e .quad 0x16bedd0e86ba374e .quad 0x5ecccc4f2c2737b5 .quad 0x43b79e0c2dccb703 .quad 0x33e008bc4ec43df3 .quad 0x06c1b840f07566c0 // 2^204 * 3 * G .quad 0x7688a5c6a388f877 .quad 0x02a96c14deb2b6ac .quad 0x64c9f3431b8c2af8 .quad 0x3628435554a1eed6 .quad 0x69ee9e7f9b02805c .quad 0xcbff828a547d1640 .quad 0x3d93a869b2430968 .quad 0x46b7b8cd3fe26972 .quad 0xe9812086fe7eebe0 .quad 0x4cba6be72f515437 .quad 0x1d04168b516efae9 .quad 0x5ea1391043982cb9 // 2^204 * 4 * G .quad 0x49125c9cf4702ee1 .quad 0x4520b71f8b25b32d .quad 0x33193026501fef7e .quad 0x656d8997c8d2eb2b .quad 0x6f2b3be4d5d3b002 .quad 0xafec33d96a09c880 .quad 0x035f73a4a8bcc4cc .quad 0x22c5b9284662198b .quad 0xcb58c8fe433d8939 .quad 0x89a0cb2e6a8d7e50 .quad 0x79ca955309fbbe5a .quad 0x0c626616cd7fc106 // 2^204 * 5 * G .quad 0x1ffeb80a4879b61f .quad 0x6396726e4ada21ed .quad 0x33c7b093368025ba .quad 0x471aa0c6f3c31788 .quad 0x8fdfc379fbf454b1 .quad 0x45a5a970f1a4b771 .quad 0xac921ef7bad35915 .quad 0x42d088dca81c2192 .quad 0x8fda0f37a0165199 .quad 0x0adadb77c8a0e343 .quad 0x20fbfdfcc875e820 .quad 0x1cf2bea80c2206e7 // 2^204 * 6 * G .quad 0xc2ddf1deb36202ac .quad 0x92a5fe09d2e27aa5 .quad 0x7d1648f6fc09f1d3 .quad 0x74c2cc0513bc4959 .quad 0x982d6e1a02c0412f .quad 0x90fa4c83db58e8fe .quad 0x01c2f5bcdcb18bc0 .quad 0x686e0c90216abc66 .quad 0x1fadbadba54395a7 .quad 0xb41a02a0ae0da66a .quad 0xbf19f598bba37c07 .quad 0x6a12b8acde48430d // 2^204 * 7 * G .quad 0xf8daea1f39d495d9 .quad 0x592c190e525f1dfc .quad 0xdb8cbd04c9991d1b .quad 0x11f7fda3d88f0cb7 .quad 0x793bdd801aaeeb5f .quad 0x00a2a0aac1518871 .quad 0xe8a373a31f2136b4 .quad 0x48aab888fc91ef19 .quad 0x041f7e925830f40e .quad 0x002d6ca979661c06 .quad 0x86dc9ff92b046a2e .quad 0x760360928b0493d1 // 2^204 * 8 * G .quad 0x21bb41c6120cf9c6 .quad 0xeab2aa12decda59b .quad 0xc1a72d020aa48b34 .quad 0x215d4d27e87d3b68 .quad 0xb43108e5695a0b05 .quad 0x6cb00ee8ad37a38b .quad 0x5edad6eea3537381 .quad 0x3f2602d4b6dc3224 .quad 0xc8b247b65bcaf19c .quad 0x49779dc3b1b2c652 .quad 0x89a180bbd5ece2e2 .quad 0x13f098a3cec8e039 // 2^208 * 1 * G .quad 0x9adc0ff9ce5ec54b .quad 0x039c2a6b8c2f130d .quad 0x028007c7f0f89515 .quad 0x78968314ac04b36b .quad 0xf3aa57a22796bb14 .quad 0x883abab79b07da21 .quad 0xe54be21831a0391c .quad 0x5ee7fb38d83205f9 .quad 0x538dfdcb41446a8e .quad 0xa5acfda9434937f9 .quad 0x46af908d263c8c78 .quad 0x61d0633c9bca0d09 // 2^208 * 2 * G .quad 0x63744935ffdb2566 .quad 0xc5bd6b89780b68bb .quad 0x6f1b3280553eec03 .quad 0x6e965fd847aed7f5 .quad 0xada328bcf8fc73df .quad 0xee84695da6f037fc .quad 0x637fb4db38c2a909 .quad 0x5b23ac2df8067bdc .quad 0x9ad2b953ee80527b .quad 0xe88f19aafade6d8d .quad 0x0e711704150e82cf .quad 0x79b9bbb9dd95dedc // 2^208 * 3 * G .quad 0xebb355406a3126c2 .quad 0xd26383a868c8c393 .quad 0x6c0c6429e5b97a82 .quad 0x5065f158c9fd2147 .quad 0xd1997dae8e9f7374 .quad 0xa032a2f8cfbb0816 .quad 0xcd6cba126d445f0a .quad 0x1ba811460accb834 .quad 0x708169fb0c429954 .quad 0xe14600acd76ecf67 .quad 0x2eaab98a70e645ba .quad 0x3981f39e58a4faf2 // 2^208 * 4 * G .quad 0x18fb8a7559230a93 .quad 0x1d168f6960e6f45d .quad 0x3a85a94514a93cb5 .quad 0x38dc083705acd0fd .quad 0xc845dfa56de66fde .quad 0xe152a5002c40483a .quad 0xe9d2e163c7b4f632 .quad 0x30f4452edcbc1b65 .quad 0x856d2782c5759740 .quad 0xfa134569f99cbecc .quad 0x8844fc73c0ea4e71 .quad 0x632d9a1a593f2469 // 2^208 * 5 * G .quad 0xf6bb6b15b807cba6 .quad 0x1823c7dfbc54f0d7 .quad 0xbb1d97036e29670b .quad 0x0b24f48847ed4a57 .quad 0xbf09fd11ed0c84a7 .quad 0x63f071810d9f693a .quad 0x21908c2d57cf8779 .quad 0x3a5a7df28af64ba2 .quad 0xdcdad4be511beac7 .quad 0xa4538075ed26ccf2 .quad 0xe19cff9f005f9a65 .quad 0x34fcf74475481f63 // 2^208 * 6 * G .quad 0xc197e04c789767ca .quad 0xb8714dcb38d9467d .quad 0x55de888283f95fa8 .quad 0x3d3bdc164dfa63f7 .quad 0xa5bb1dab78cfaa98 .quad 0x5ceda267190b72f2 .quad 0x9309c9110a92608e .quad 0x0119a3042fb374b0 .quad 0x67a2d89ce8c2177d .quad 0x669da5f66895d0c1 .quad 0xf56598e5b282a2b0 .quad 0x56c088f1ede20a73 // 2^208 * 7 * G .quad 0x336d3d1110a86e17 .quad 0xd7f388320b75b2fa .quad 0xf915337625072988 .quad 0x09674c6b99108b87 .quad 0x581b5fac24f38f02 .quad 0xa90be9febae30cbd .quad 0x9a2169028acf92f0 .quad 0x038b7ea48359038f .quad 0x9f4ef82199316ff8 .quad 0x2f49d282eaa78d4f .quad 0x0971a5ab5aef3174 .quad 0x6e5e31025969eb65 // 2^208 * 8 * G .quad 0xb16c62f587e593fb .quad 0x4999eddeca5d3e71 .quad 0xb491c1e014cc3e6d .quad 0x08f5114789a8dba8 .quad 0x3304fb0e63066222 .quad 0xfb35068987acba3f .quad 0xbd1924778c1061a3 .quad 0x3058ad43d1838620 .quad 0x323c0ffde57663d0 .quad 0x05c3df38a22ea610 .quad 0xbdc78abdac994f9a .quad 0x26549fa4efe3dc99 // 2^212 * 1 * G .quad 0x738b38d787ce8f89 .quad 0xb62658e24179a88d .quad 0x30738c9cf151316d .quad 0x49128c7f727275c9 .quad 0x04dbbc17f75396b9 .quad 0x69e6a2d7d2f86746 .quad 0xc6409d99f53eabc6 .quad 0x606175f6332e25d2 .quad 0x4021370ef540e7dd .quad 0x0910d6f5a1f1d0a5 .quad 0x4634aacd5b06b807 .quad 0x6a39e6356944f235 // 2^212 * 2 * G .quad 0x96cd5640df90f3e7 .quad 0x6c3a760edbfa25ea .quad 0x24f3ef0959e33cc4 .quad 0x42889e7e530d2e58 .quad 0x1da1965774049e9d .quad 0xfbcd6ea198fe352b .quad 0xb1cbcd50cc5236a6 .quad 0x1f5ec83d3f9846e2 .quad 0x8efb23c3328ccb75 .quad 0xaf42a207dd876ee9 .quad 0x20fbdadc5dfae796 .quad 0x241e246b06bf9f51 // 2^212 * 3 * G .quad 0x29e68e57ad6e98f6 .quad 0x4c9260c80b462065 .quad 0x3f00862ea51ebb4b .quad 0x5bc2c77fb38d9097 .quad 0x7eaafc9a6280bbb8 .quad 0x22a70f12f403d809 .quad 0x31ce40bb1bfc8d20 .quad 0x2bc65635e8bd53ee .quad 0xe8d5dc9fa96bad93 .quad 0xe58fb17dde1947dc .quad 0x681532ea65185fa3 .quad 0x1fdd6c3b034a7830 // 2^212 * 4 * G .quad 0x0a64e28c55dc18fe .quad 0xe3df9e993399ebdd .quad 0x79ac432370e2e652 .quad 0x35ff7fc33ae4cc0e .quad 0x9c13a6a52dd8f7a9 .quad 0x2dbb1f8c3efdcabf .quad 0x961e32405e08f7b5 .quad 0x48c8a121bbe6c9e5 .quad 0xfc415a7c59646445 .quad 0xd224b2d7c128b615 .quad 0x6035c9c905fbb912 .quad 0x42d7a91274429fab // 2^212 * 5 * G .quad 0x4e6213e3eaf72ed3 .quad 0x6794981a43acd4e7 .quad 0xff547cde6eb508cb .quad 0x6fed19dd10fcb532 .quad 0xa9a48947933da5bc .quad 0x4a58920ec2e979ec .quad 0x96d8800013e5ac4c .quad 0x453692d74b48b147 .quad 0xdd775d99a8559c6f .quad 0xf42a2140df003e24 .quad 0x5223e229da928a66 .quad 0x063f46ba6d38f22c // 2^212 * 6 * G .quad 0xd2d242895f536694 .quad 0xca33a2c542939b2c .quad 0x986fada6c7ddb95c .quad 0x5a152c042f712d5d .quad 0x39843cb737346921 .quad 0xa747fb0738c89447 .quad 0xcb8d8031a245307e .quad 0x67810f8e6d82f068 .quad 0x3eeb8fbcd2287db4 .quad 0x72c7d3a301a03e93 .quad 0x5473e88cbd98265a .quad 0x7324aa515921b403 // 2^212 * 7 * G .quad 0x857942f46c3cbe8e .quad 0xa1d364b14730c046 .quad 0x1c8ed914d23c41bf .quad 0x0838e161eef6d5d2 .quad 0xad23f6dae82354cb .quad 0x6962502ab6571a6d .quad 0x9b651636e38e37d1 .quad 0x5cac5005d1a3312f .quad 0x8cc154cce9e39904 .quad 0x5b3a040b84de6846 .quad 0xc4d8a61cb1be5d6e .quad 0x40fb897bd8861f02 // 2^212 * 8 * G .quad 0x84c5aa9062de37a1 .quad 0x421da5000d1d96e1 .quad 0x788286306a9242d9 .quad 0x3c5e464a690d10da .quad 0xe57ed8475ab10761 .quad 0x71435e206fd13746 .quad 0x342f824ecd025632 .quad 0x4b16281ea8791e7b .quad 0xd1c101d50b813381 .quad 0xdee60f1176ee6828 .quad 0x0cb68893383f6409 .quad 0x6183c565f6ff484a // 2^216 * 1 * G .quad 0x741d5a461e6bf9d6 .quad 0x2305b3fc7777a581 .quad 0xd45574a26474d3d9 .quad 0x1926e1dc6401e0ff .quad 0xdb468549af3f666e .quad 0xd77fcf04f14a0ea5 .quad 0x3df23ff7a4ba0c47 .quad 0x3a10dfe132ce3c85 .quad 0xe07f4e8aea17cea0 .quad 0x2fd515463a1fc1fd .quad 0x175322fd31f2c0f1 .quad 0x1fa1d01d861e5d15 // 2^216 * 2 * G .quad 0xcc8055947d599832 .quad 0x1e4656da37f15520 .quad 0x99f6f7744e059320 .quad 0x773563bc6a75cf33 .quad 0x38dcac00d1df94ab .quad 0x2e712bddd1080de9 .quad 0x7f13e93efdd5e262 .quad 0x73fced18ee9a01e5 .quad 0x06b1e90863139cb3 .quad 0xa493da67c5a03ecd .quad 0x8d77cec8ad638932 .quad 0x1f426b701b864f44 // 2^216 * 3 * G .quad 0xefc9264c41911c01 .quad 0xf1a3b7b817a22c25 .quad 0x5875da6bf30f1447 .quad 0x4e1af5271d31b090 .quad 0xf17e35c891a12552 .quad 0xb76b8153575e9c76 .quad 0xfa83406f0d9b723e .quad 0x0b76bb1b3fa7e438 .quad 0x08b8c1f97f92939b .quad 0xbe6771cbd444ab6e .quad 0x22e5646399bb8017 .quad 0x7b6dd61eb772a955 // 2^216 * 4 * G .quad 0xb7adc1e850f33d92 .quad 0x7998fa4f608cd5cf .quad 0xad962dbd8dfc5bdb .quad 0x703e9bceaf1d2f4f .quad 0x5730abf9ab01d2c7 .quad 0x16fb76dc40143b18 .quad 0x866cbe65a0cbb281 .quad 0x53fa9b659bff6afe .quad 0x6c14c8e994885455 .quad 0x843a5d6665aed4e5 .quad 0x181bb73ebcd65af1 .quad 0x398d93e5c4c61f50 // 2^216 * 5 * G .quad 0x1c4bd16733e248f3 .quad 0xbd9e128715bf0a5f .quad 0xd43f8cf0a10b0376 .quad 0x53b09b5ddf191b13 .quad 0xc3877c60d2e7e3f2 .quad 0x3b34aaa030828bb1 .quad 0x283e26e7739ef138 .quad 0x699c9c9002c30577 .quad 0xf306a7235946f1cc .quad 0x921718b5cce5d97d .quad 0x28cdd24781b4e975 .quad 0x51caf30c6fcdd907 // 2^216 * 6 * G .quad 0xa60ba7427674e00a .quad 0x630e8570a17a7bf3 .quad 0x3758563dcf3324cc .quad 0x5504aa292383fdaa .quad 0x737af99a18ac54c7 .quad 0x903378dcc51cb30f .quad 0x2b89bc334ce10cc7 .quad 0x12ae29c189f8e99a .quad 0xa99ec0cb1f0d01cf .quad 0x0dd1efcc3a34f7ae .quad 0x55ca7521d09c4e22 .quad 0x5fd14fe958eba5ea // 2^216 * 7 * G .quad 0xb5dc2ddf2845ab2c .quad 0x069491b10a7fe993 .quad 0x4daaf3d64002e346 .quad 0x093ff26e586474d1 .quad 0x3c42fe5ebf93cb8e .quad 0xbedfa85136d4565f .quad 0xe0f0859e884220e8 .quad 0x7dd73f960725d128 .quad 0xb10d24fe68059829 .quad 0x75730672dbaf23e5 .quad 0x1367253ab457ac29 .quad 0x2f59bcbc86b470a4 // 2^216 * 8 * G .quad 0x83847d429917135f .quad 0xad1b911f567d03d7 .quad 0x7e7748d9be77aad1 .quad 0x5458b42e2e51af4a .quad 0x7041d560b691c301 .quad 0x85201b3fadd7e71e .quad 0x16c2e16311335585 .quad 0x2aa55e3d010828b1 .quad 0xed5192e60c07444f .quad 0x42c54e2d74421d10 .quad 0x352b4c82fdb5c864 .quad 0x13e9004a8a768664 // 2^220 * 1 * G .quad 0xcbb5b5556c032bff .quad 0xdf7191b729297a3a .quad 0xc1ff7326aded81bb .quad 0x71ade8bb68be03f5 .quad 0x1e6284c5806b467c .quad 0xc5f6997be75d607b .quad 0x8b67d958b378d262 .quad 0x3d88d66a81cd8b70 .quad 0x8b767a93204ed789 .quad 0x762fcacb9fa0ae2a .quad 0x771febcc6dce4887 .quad 0x343062158ff05fb3 // 2^220 * 2 * G .quad 0xe05da1a7e1f5bf49 .quad 0x26457d6dd4736092 .quad 0x77dcb07773cc32f6 .quad 0x0a5d94969cdd5fcd .quad 0xfce219072a7b31b4 .quad 0x4d7adc75aa578016 .quad 0x0ec276a687479324 .quad 0x6d6d9d5d1fda4beb .quad 0x22b1a58ae9b08183 .quad 0xfd95d071c15c388b .quad 0xa9812376850a0517 .quad 0x33384cbabb7f335e // 2^220 * 3 * G .quad 0x3c6fa2680ca2c7b5 .quad 0x1b5082046fb64fda .quad 0xeb53349c5431d6de .quad 0x5278b38f6b879c89 .quad 0x33bc627a26218b8d .quad 0xea80b21fc7a80c61 .quad 0x9458b12b173e9ee6 .quad 0x076247be0e2f3059 .quad 0x52e105f61416375a .quad 0xec97af3685abeba4 .quad 0x26e6b50623a67c36 .quad 0x5cf0e856f3d4fb01 // 2^220 * 4 * G .quad 0xf6c968731ae8cab4 .quad 0x5e20741ecb4f92c5 .quad 0x2da53be58ccdbc3e .quad 0x2dddfea269970df7 .quad 0xbeaece313db342a8 .quad 0xcba3635b842db7ee .quad 0xe88c6620817f13ef .quad 0x1b9438aa4e76d5c6 .quad 0x8a50777e166f031a .quad 0x067b39f10fb7a328 .quad 0x1925c9a6010fbd76 .quad 0x6df9b575cc740905 // 2^220 * 5 * G .quad 0x42c1192927f6bdcf .quad 0x8f91917a403d61ca .quad 0xdc1c5a668b9e1f61 .quad 0x1596047804ec0f8d .quad 0xecdfc35b48cade41 .quad 0x6a88471fb2328270 .quad 0x740a4a2440a01b6a .quad 0x471e5796003b5f29 .quad 0xda96bbb3aced37ac .quad 0x7a2423b5e9208cea .quad 0x24cc5c3038aebae2 .quad 0x50c356afdc5dae2f // 2^220 * 6 * G .quad 0x09dcbf4341c30318 .quad 0xeeba061183181dce .quad 0xc179c0cedc1e29a1 .quad 0x1dbf7b89073f35b0 .quad 0xcfed9cdf1b31b964 .quad 0xf486a9858ca51af3 .quad 0x14897265ea8c1f84 .quad 0x784a53dd932acc00 .quad 0x2d99f9df14fc4920 .quad 0x76ccb60cc4499fe5 .quad 0xa4132cbbe5cf0003 .quad 0x3f93d82354f000ea // 2^220 * 7 * G .quad 0x8183e7689e04ce85 .quad 0x678fb71e04465341 .quad 0xad92058f6688edac .quad 0x5da350d3532b099a .quad 0xeaac12d179e14978 .quad 0xff923ff3bbebff5e .quad 0x4af663e40663ce27 .quad 0x0fd381a811a5f5ff .quad 0xf256aceca436df54 .quad 0x108b6168ae69d6e8 .quad 0x20d986cb6b5d036c .quad 0x655957b9fee2af50 // 2^220 * 8 * G .quad 0xaea8b07fa902030f .quad 0xf88c766af463d143 .quad 0x15b083663c787a60 .quad 0x08eab1148267a4a8 .quad 0xbdc1409bd002d0ac .quad 0x66660245b5ccd9a6 .quad 0x82317dc4fade85ec .quad 0x02fe934b6ad7df0d .quad 0xef5cf100cfb7ea74 .quad 0x22897633a1cb42ac .quad 0xd4ce0c54cef285e2 .quad 0x30408c048a146a55 // 2^224 * 1 * G .quad 0x739d8845832fcedb .quad 0xfa38d6c9ae6bf863 .quad 0x32bc0dcab74ffef7 .quad 0x73937e8814bce45e .quad 0xbb2e00c9193b877f .quad 0xece3a890e0dc506b .quad 0xecf3b7c036de649f .quad 0x5f46040898de9e1a .quad 0xb9037116297bf48d .quad 0xa9d13b22d4f06834 .quad 0xe19715574696bdc6 .quad 0x2cf8a4e891d5e835 // 2^224 * 2 * G .quad 0x6d93fd8707110f67 .quad 0xdd4c09d37c38b549 .quad 0x7cb16a4cc2736a86 .quad 0x2049bd6e58252a09 .quad 0x2cb5487e17d06ba2 .quad 0x24d2381c3950196b .quad 0xd7659c8185978a30 .quad 0x7a6f7f2891d6a4f6 .quad 0x7d09fd8d6a9aef49 .quad 0xf0ee60be5b3db90b .quad 0x4c21b52c519ebfd4 .quad 0x6011aadfc545941d // 2^224 * 3 * G .quad 0x5f67926dcf95f83c .quad 0x7c7e856171289071 .quad 0xd6a1e7f3998f7a5b .quad 0x6fc5cc1b0b62f9e0 .quad 0x63ded0c802cbf890 .quad 0xfbd098ca0dff6aaa .quad 0x624d0afdb9b6ed99 .quad 0x69ce18b779340b1e .quad 0xd1ef5528b29879cb .quad 0xdd1aae3cd47e9092 .quad 0x127e0442189f2352 .quad 0x15596b3ae57101f1 // 2^224 * 4 * G .quad 0x462739d23f9179a2 .quad 0xff83123197d6ddcf .quad 0x1307deb553f2148a .quad 0x0d2237687b5f4dda .quad 0x09ff31167e5124ca .quad 0x0be4158bd9c745df .quad 0x292b7d227ef556e5 .quad 0x3aa4e241afb6d138 .quad 0x2cc138bf2a3305f5 .quad 0x48583f8fa2e926c3 .quad 0x083ab1a25549d2eb .quad 0x32fcaa6e4687a36c // 2^224 * 5 * G .quad 0x7bc56e8dc57d9af5 .quad 0x3e0bd2ed9df0bdf2 .quad 0xaac014de22efe4a3 .quad 0x4627e9cefebd6a5c .quad 0x3207a4732787ccdf .quad 0x17e31908f213e3f8 .quad 0xd5b2ecd7f60d964e .quad 0x746f6336c2600be9 .quad 0x3f4af345ab6c971c .quad 0xe288eb729943731f .quad 0x33596a8a0344186d .quad 0x7b4917007ed66293 // 2^224 * 6 * G .quad 0x2d85fb5cab84b064 .quad 0x497810d289f3bc14 .quad 0x476adc447b15ce0c .quad 0x122ba376f844fd7b .quad 0x54341b28dd53a2dd .quad 0xaa17905bdf42fc3f .quad 0x0ff592d94dd2f8f4 .quad 0x1d03620fe08cd37d .quad 0xc20232cda2b4e554 .quad 0x9ed0fd42115d187f .quad 0x2eabb4be7dd479d9 .quad 0x02c70bf52b68ec4c // 2^224 * 7 * G .quad 0xa287ec4b5d0b2fbb .quad 0x415c5790074882ca .quad 0xe044a61ec1d0815c .quad 0x26334f0a409ef5e0 .quad 0xace532bf458d72e1 .quad 0x5be768e07cb73cb5 .quad 0x56cf7d94ee8bbde7 .quad 0x6b0697e3feb43a03 .quad 0xb6c8f04adf62a3c0 .quad 0x3ef000ef076da45d .quad 0x9c9cb95849f0d2a9 .quad 0x1cc37f43441b2fae // 2^224 * 8 * G .quad 0x508f565a5cc7324f .quad 0xd061c4c0e506a922 .quad 0xfb18abdb5c45ac19 .quad 0x6c6809c10380314a .quad 0xd76656f1c9ceaeb9 .quad 0x1c5b15f818e5656a .quad 0x26e72832844c2334 .quad 0x3a346f772f196838 .quad 0xd2d55112e2da6ac8 .quad 0xe9bd0331b1e851ed .quad 0x960746dd8ec67262 .quad 0x05911b9f6ef7c5d0 // 2^228 * 1 * G .quad 0xe9dcd756b637ff2d .quad 0xec4c348fc987f0c4 .quad 0xced59285f3fbc7b7 .quad 0x3305354793e1ea87 .quad 0x01c18980c5fe9f94 .quad 0xcd656769716fd5c8 .quad 0x816045c3d195a086 .quad 0x6e2b7f3266cc7982 .quad 0xcc802468f7c3568f .quad 0x9de9ba8219974cb3 .quad 0xabb7229cb5b81360 .quad 0x44e2017a6fbeba62 // 2^228 * 2 * G .quad 0xc4c2a74354dab774 .quad 0x8e5d4c3c4eaf031a .quad 0xb76c23d242838f17 .quad 0x749a098f68dce4ea .quad 0x87f82cf3b6ca6ecd .quad 0x580f893e18f4a0c2 .quad 0x058930072604e557 .quad 0x6cab6ac256d19c1d .quad 0xdcdfe0a02cc1de60 .quad 0x032665ff51c5575b .quad 0x2c0c32f1073abeeb .quad 0x6a882014cd7b8606 // 2^228 * 3 * G .quad 0xa52a92fea4747fb5 .quad 0xdc12a4491fa5ab89 .quad 0xd82da94bb847a4ce .quad 0x4d77edce9512cc4e .quad 0xd111d17caf4feb6e .quad 0x050bba42b33aa4a3 .quad 0x17514c3ceeb46c30 .quad 0x54bedb8b1bc27d75 .quad 0x77c8e14577e2189c .quad 0xa3e46f6aff99c445 .quad 0x3144dfc86d335343 .quad 0x3a96559e7c4216a9 // 2^228 * 4 * G .quad 0x12550d37f42ad2ee .quad 0x8b78e00498a1fbf5 .quad 0x5d53078233894cb2 .quad 0x02c84e4e3e498d0c .quad 0x4493896880baaa52 .quad 0x4c98afc4f285940e .quad 0xef4aa79ba45448b6 .quad 0x5278c510a57aae7f .quad 0xa54dd074294c0b94 .quad 0xf55d46b8df18ffb6 .quad 0xf06fecc58dae8366 .quad 0x588657668190d165 // 2^228 * 5 * G .quad 0xd47712311aef7117 .quad 0x50343101229e92c7 .quad 0x7a95e1849d159b97 .quad 0x2449959b8b5d29c9 .quad 0xbf5834f03de25cc3 .quad 0xb887c8aed6815496 .quad 0x5105221a9481e892 .quad 0x6760ed19f7723f93 .quad 0x669ba3b7ac35e160 .quad 0x2eccf73fba842056 .quad 0x1aec1f17c0804f07 .quad 0x0d96bc031856f4e7 // 2^228 * 6 * G .quad 0x3318be7775c52d82 .quad 0x4cb764b554d0aab9 .quad 0xabcf3d27cc773d91 .quad 0x3bf4d1848123288a .quad 0xb1d534b0cc7505e1 .quad 0x32cd003416c35288 .quad 0xcb36a5800762c29d .quad 0x5bfe69b9237a0bf8 .quad 0x183eab7e78a151ab .quad 0xbbe990c999093763 .quad 0xff717d6e4ac7e335 .quad 0x4c5cddb325f39f88 // 2^228 * 7 * G .quad 0xc0f6b74d6190a6eb .quad 0x20ea81a42db8f4e4 .quad 0xa8bd6f7d97315760 .quad 0x33b1d60262ac7c21 .quad 0x57750967e7a9f902 .quad 0x2c37fdfc4f5b467e .quad 0xb261663a3177ba46 .quad 0x3a375e78dc2d532b .quad 0x8141e72f2d4dddea .quad 0xe6eafe9862c607c8 .quad 0x23c28458573cafd0 .quad 0x46b9476f4ff97346 // 2^228 * 8 * G .quad 0x0c1ffea44f901e5c .quad 0x2b0b6fb72184b782 .quad 0xe587ff910114db88 .quad 0x37130f364785a142 .quad 0x1215505c0d58359f .quad 0x2a2013c7fc28c46b .quad 0x24a0a1af89ea664e .quad 0x4400b638a1130e1f .quad 0x3a01b76496ed19c3 .quad 0x31e00ab0ed327230 .quad 0x520a885783ca15b1 .quad 0x06aab9875accbec7 // 2^232 * 1 * G .quad 0xc1339983f5df0ebb .quad 0xc0f3758f512c4cac .quad 0x2cf1130a0bb398e1 .quad 0x6b3cecf9aa270c62 .quad 0x5349acf3512eeaef .quad 0x20c141d31cc1cb49 .quad 0x24180c07a99a688d .quad 0x555ef9d1c64b2d17 .quad 0x36a770ba3b73bd08 .quad 0x624aef08a3afbf0c .quad 0x5737ff98b40946f2 .quad 0x675f4de13381749d // 2^232 * 2 * G .quad 0x0e2c52036b1782fc .quad 0x64816c816cad83b4 .quad 0xd0dcbdd96964073e .quad 0x13d99df70164c520 .quad 0xa12ff6d93bdab31d .quad 0x0725d80f9d652dfe .quad 0x019c4ff39abe9487 .quad 0x60f450b882cd3c43 .quad 0x014b5ec321e5c0ca .quad 0x4fcb69c9d719bfa2 .quad 0x4e5f1c18750023a0 .quad 0x1c06de9e55edac80 // 2^232 * 3 * G .quad 0x990f7ad6a33ec4e2 .quad 0x6608f938be2ee08e .quad 0x9ca143c563284515 .quad 0x4cf38a1fec2db60d .quad 0xffd52b40ff6d69aa .quad 0x34530b18dc4049bb .quad 0x5e4a5c2fa34d9897 .quad 0x78096f8e7d32ba2d .quad 0xa0aaaa650dfa5ce7 .quad 0xf9c49e2a48b5478c .quad 0x4f09cc7d7003725b .quad 0x373cad3a26091abe // 2^232 * 4 * G .quad 0xb294634d82c9f57c .quad 0x1fcbfde124934536 .quad 0x9e9c4db3418cdb5a .quad 0x0040f3d9454419fc .quad 0xf1bea8fb89ddbbad .quad 0x3bcb2cbc61aeaecb .quad 0x8f58a7bb1f9b8d9d .quad 0x21547eda5112a686 .quad 0xdefde939fd5986d3 .quad 0xf4272c89510a380c .quad 0xb72ba407bb3119b9 .quad 0x63550a334a254df4 // 2^232 * 5 * G .quad 0x6507d6edb569cf37 .quad 0x178429b00ca52ee1 .quad 0xea7c0090eb6bd65d .quad 0x3eea62c7daf78f51 .quad 0x9bba584572547b49 .quad 0xf305c6fae2c408e0 .quad 0x60e8fa69c734f18d .quad 0x39a92bafaa7d767a .quad 0x9d24c713e693274e .quad 0x5f63857768dbd375 .quad 0x70525560eb8ab39a .quad 0x68436a0665c9c4cd // 2^232 * 6 * G .quad 0xbc0235e8202f3f27 .quad 0xc75c00e264f975b0 .quad 0x91a4e9d5a38c2416 .quad 0x17b6e7f68ab789f9 .quad 0x1e56d317e820107c .quad 0xc5266844840ae965 .quad 0xc1e0a1c6320ffc7a .quad 0x5373669c91611472 .quad 0x5d2814ab9a0e5257 .quad 0x908f2084c9cab3fc .quad 0xafcaf5885b2d1eca .quad 0x1cb4b5a678f87d11 // 2^232 * 7 * G .quad 0xb664c06b394afc6c .quad 0x0c88de2498da5fb1 .quad 0x4f8d03164bcad834 .quad 0x330bca78de7434a2 .quad 0x6b74aa62a2a007e7 .quad 0xf311e0b0f071c7b1 .quad 0x5707e438000be223 .quad 0x2dc0fd2d82ef6eac .quad 0x982eff841119744e .quad 0xf9695e962b074724 .quad 0xc58ac14fbfc953fb .quad 0x3c31be1b369f1cf5 // 2^232 * 8 * G .quad 0xb0f4864d08948aee .quad 0x07dc19ee91ba1c6f .quad 0x7975cdaea6aca158 .quad 0x330b61134262d4bb .quad 0xc168bc93f9cb4272 .quad 0xaeb8711fc7cedb98 .quad 0x7f0e52aa34ac8d7a .quad 0x41cec1097e7d55bb .quad 0xf79619d7a26d808a .quad 0xbb1fd49e1d9e156d .quad 0x73d7c36cdba1df27 .quad 0x26b44cd91f28777d // 2^236 * 1 * G .quad 0x300a9035393aa6d8 .quad 0x2b501131a12bb1cd .quad 0x7b1ff677f093c222 .quad 0x4309c1f8cab82bad .quad 0xaf44842db0285f37 .quad 0x8753189047efc8df .quad 0x9574e091f820979a .quad 0x0e378d6069615579 .quad 0xd9fa917183075a55 .quad 0x4bdb5ad26b009fdc .quad 0x7829ad2cd63def0e .quad 0x078fc54975fd3877 // 2^236 * 2 * G .quad 0x87dfbd1428878f2d .quad 0x134636dd1e9421a1 .quad 0x4f17c951257341a3 .quad 0x5df98d4bad296cb8 .quad 0xe2004b5bb833a98a .quad 0x44775dec2d4c3330 .quad 0x3aa244067eace913 .quad 0x272630e3d58e00a9 .quad 0xf3678fd0ecc90b54 .quad 0xf001459b12043599 .quad 0x26725fbc3758b89b .quad 0x4325e4aa73a719ae // 2^236 * 3 * G .quad 0x657dc6ef433c3493 .quad 0x65375e9f80dbf8c3 .quad 0x47fd2d465b372dae .quad 0x4966ab79796e7947 .quad 0xed24629acf69f59d .quad 0x2a4a1ccedd5abbf4 .quad 0x3535ca1f56b2d67b .quad 0x5d8c68d043b1b42d .quad 0xee332d4de3b42b0a .quad 0xd84e5a2b16a4601c .quad 0x78243877078ba3e4 .quad 0x77ed1eb4184ee437 // 2^236 * 4 * G .quad 0xbfd4e13f201839a0 .quad 0xaeefffe23e3df161 .quad 0xb65b04f06b5d1fe3 .quad 0x52e085fb2b62fbc0 .quad 0x185d43f89e92ed1a .quad 0xb04a1eeafe4719c6 .quad 0x499fbe88a6f03f4f .quad 0x5d8b0d2f3c859bdd .quad 0x124079eaa54cf2ba .quad 0xd72465eb001b26e7 .quad 0x6843bcfdc97af7fd .quad 0x0524b42b55eacd02 // 2^236 * 5 * G .quad 0xfd0d5dbee45447b0 .quad 0x6cec351a092005ee .quad 0x99a47844567579cb .quad 0x59d242a216e7fa45 .quad 0xbc18dcad9b829eac .quad 0x23ae7d28b5f579d0 .quad 0xc346122a69384233 .quad 0x1a6110b2e7d4ac89 .quad 0x4f833f6ae66997ac .quad 0x6849762a361839a4 .quad 0x6985dec1970ab525 .quad 0x53045e89dcb1f546 // 2^236 * 6 * G .quad 0xcb8bb346d75353db .quad 0xfcfcb24bae511e22 .quad 0xcba48d40d50ae6ef .quad 0x26e3bae5f4f7cb5d .quad 0x84da3cde8d45fe12 .quad 0xbd42c218e444e2d2 .quad 0xa85196781f7e3598 .quad 0x7642c93f5616e2b2 .quad 0x2323daa74595f8e4 .quad 0xde688c8b857abeb4 .quad 0x3fc48e961c59326e .quad 0x0b2e73ca15c9b8ba // 2^236 * 7 * G .quad 0xd6bb4428c17f5026 .quad 0x9eb27223fb5a9ca7 .quad 0xe37ba5031919c644 .quad 0x21ce380db59a6602 .quad 0x0e3fbfaf79c03a55 .quad 0x3077af054cbb5acf .quad 0xd5c55245db3de39f .quad 0x015e68c1476a4af7 .quad 0xc1d5285220066a38 .quad 0x95603e523570aef3 .quad 0x832659a7226b8a4d .quad 0x5dd689091f8eedc9 // 2^236 * 8 * G .quad 0xcbac84debfd3c856 .quad 0x1624c348b35ff244 .quad 0xb7f88dca5d9cad07 .quad 0x3b0e574da2c2ebe8 .quad 0x1d022591a5313084 .quad 0xca2d4aaed6270872 .quad 0x86a12b852f0bfd20 .quad 0x56e6c439ad7da748 .quad 0xc704ff4942bdbae6 .quad 0x5e21ade2b2de1f79 .quad 0xe95db3f35652fad8 .quad 0x0822b5378f08ebc1 // 2^240 * 1 * G .quad 0x51f048478f387475 .quad 0xb25dbcf49cbecb3c .quad 0x9aab1244d99f2055 .quad 0x2c709e6c1c10a5d6 .quad 0xe1b7f29362730383 .quad 0x4b5279ffebca8a2c .quad 0xdafc778abfd41314 .quad 0x7deb10149c72610f .quad 0xcb62af6a8766ee7a .quad 0x66cbec045553cd0e .quad 0x588001380f0be4b5 .quad 0x08e68e9ff62ce2ea // 2^240 * 2 * G .quad 0x34ad500a4bc130ad .quad 0x8d38db493d0bd49c .quad 0xa25c3d98500a89be .quad 0x2f1f3f87eeba3b09 .quad 0x2f2d09d50ab8f2f9 .quad 0xacb9218dc55923df .quad 0x4a8f342673766cb9 .quad 0x4cb13bd738f719f5 .quad 0xf7848c75e515b64a .quad 0xa59501badb4a9038 .quad 0xc20d313f3f751b50 .quad 0x19a1e353c0ae2ee8 // 2^240 * 3 * G .quad 0x7d1c7560bafa05c3 .quad 0xb3e1a0a0c6e55e61 .quad 0xe3529718c0d66473 .quad 0x41546b11c20c3486 .quad 0xb42172cdd596bdbd .quad 0x93e0454398eefc40 .quad 0x9fb15347b44109b5 .quad 0x736bd3990266ae34 .quad 0x85532d509334b3b4 .quad 0x46fd114b60816573 .quad 0xcc5f5f30425c8375 .quad 0x412295a2b87fab5c // 2^240 * 4 * G .quad 0x19c99b88f57ed6e9 .quad 0x5393cb266df8c825 .quad 0x5cee3213b30ad273 .quad 0x14e153ebb52d2e34 .quad 0x2e655261e293eac6 .quad 0x845a92032133acdb .quad 0x460975cb7900996b .quad 0x0760bb8d195add80 .quad 0x413e1a17cde6818a .quad 0x57156da9ed69a084 .quad 0x2cbf268f46caccb1 .quad 0x6b34be9bc33ac5f2 // 2^240 * 5 * G .quad 0xf3df2f643a78c0b2 .quad 0x4c3e971ef22e027c .quad 0xec7d1c5e49c1b5a3 .quad 0x2012c18f0922dd2d .quad 0x11fc69656571f2d3 .quad 0xc6c9e845530e737a .quad 0xe33ae7a2d4fe5035 .quad 0x01b9c7b62e6dd30b .quad 0x880b55e55ac89d29 .quad 0x1483241f45a0a763 .quad 0x3d36efdfc2e76c1f .quad 0x08af5b784e4bade8 // 2^240 * 6 * G .quad 0x283499dc881f2533 .quad 0x9d0525da779323b6 .quad 0x897addfb673441f4 .quad 0x32b79d71163a168d .quad 0xe27314d289cc2c4b .quad 0x4be4bd11a287178d .quad 0x18d528d6fa3364ce .quad 0x6423c1d5afd9826e .quad 0xcc85f8d9edfcb36a .quad 0x22bcc28f3746e5f9 .quad 0xe49de338f9e5d3cd .quad 0x480a5efbc13e2dcc // 2^240 * 7 * G .quad 0x0b51e70b01622071 .quad 0x06b505cf8b1dafc5 .quad 0x2c6bb061ef5aabcd .quad 0x47aa27600cb7bf31 .quad 0xb6614ce442ce221f .quad 0x6e199dcc4c053928 .quad 0x663fb4a4dc1cbe03 .quad 0x24b31d47691c8e06 .quad 0x2a541eedc015f8c3 .quad 0x11a4fe7e7c693f7c .quad 0xf0af66134ea278d6 .quad 0x545b585d14dda094 // 2^240 * 8 * G .quad 0x67bf275ea0d43a0f .quad 0xade68e34089beebe .quad 0x4289134cd479e72e .quad 0x0f62f9c332ba5454 .quad 0x6204e4d0e3b321e1 .quad 0x3baa637a28ff1e95 .quad 0x0b0ccffd5b99bd9e .quad 0x4d22dc3e64c8d071 .quad 0xfcb46589d63b5f39 .quad 0x5cae6a3f57cbcf61 .quad 0xfebac2d2953afa05 .quad 0x1c0fa01a36371436 // 2^244 * 1 * G .quad 0xe7547449bc7cd692 .quad 0x0f9abeaae6f73ddf .quad 0x4af01ca700837e29 .quad 0x63ab1b5d3f1bc183 .quad 0xc11ee5e854c53fae .quad 0x6a0b06c12b4f3ff4 .quad 0x33540f80e0b67a72 .quad 0x15f18fc3cd07e3ef .quad 0x32750763b028f48c .quad 0x06020740556a065f .quad 0xd53bd812c3495b58 .quad 0x08706c9b865f508d // 2^244 * 2 * G .quad 0xf37ca2ab3d343dff .quad 0x1a8c6a2d80abc617 .quad 0x8e49e035d4ccffca .quad 0x48b46beebaa1d1b9 .quad 0xcc991b4138b41246 .quad 0x243b9c526f9ac26b .quad 0xb9ef494db7cbabbd .quad 0x5fba433dd082ed00 .quad 0x9c49e355c9941ad0 .quad 0xb9734ade74498f84 .quad 0x41c3fed066663e5c .quad 0x0ecfedf8e8e710b3 // 2^244 * 3 * G .quad 0x76430f9f9cd470d9 .quad 0xb62acc9ba42f6008 .quad 0x1898297c59adad5e .quad 0x7789dd2db78c5080 .quad 0x744f7463e9403762 .quad 0xf79a8dee8dfcc9c9 .quad 0x163a649655e4cde3 .quad 0x3b61788db284f435 .quad 0xb22228190d6ef6b2 .quad 0xa94a66b246ce4bfa .quad 0x46c1a77a4f0b6cc7 .quad 0x4236ccffeb7338cf // 2^244 * 4 * G .quad 0x8497404d0d55e274 .quad 0x6c6663d9c4ad2b53 .quad 0xec2fb0d9ada95734 .quad 0x2617e120cdb8f73c .quad 0x3bd82dbfda777df6 .quad 0x71b177cc0b98369e .quad 0x1d0e8463850c3699 .quad 0x5a71945b48e2d1f1 .quad 0x6f203dd5405b4b42 .quad 0x327ec60410b24509 .quad 0x9c347230ac2a8846 .quad 0x77de29fc11ffeb6a // 2^244 * 5 * G .quad 0xb0ac57c983b778a8 .quad 0x53cdcca9d7fe912c .quad 0x61c2b854ff1f59dc .quad 0x3a1a2cf0f0de7dac .quad 0x835e138fecced2ca .quad 0x8c9eaf13ea963b9a .quad 0xc95fbfc0b2160ea6 .quad 0x575e66f3ad877892 .quad 0x99803a27c88fcb3a .quad 0x345a6789275ec0b0 .quad 0x459789d0ff6c2be5 .quad 0x62f882651e70a8b2 // 2^244 * 6 * G .quad 0x085ae2c759ff1be4 .quad 0x149145c93b0e40b7 .quad 0xc467e7fa7ff27379 .quad 0x4eeecf0ad5c73a95 .quad 0x6d822986698a19e0 .quad 0xdc9821e174d78a71 .quad 0x41a85f31f6cb1f47 .quad 0x352721c2bcda9c51 .quad 0x48329952213fc985 .quad 0x1087cf0d368a1746 .quad 0x8e5261b166c15aa5 .quad 0x2d5b2d842ed24c21 // 2^244 * 7 * G .quad 0x02cfebd9ebd3ded1 .quad 0xd45b217739021974 .quad 0x7576f813fe30a1b7 .quad 0x5691b6f9a34ef6c2 .quad 0x5eb7d13d196ac533 .quad 0x377234ecdb80be2b .quad 0xe144cffc7cf5ae24 .quad 0x5226bcf9c441acec .quad 0x79ee6c7223e5b547 .quad 0x6f5f50768330d679 .quad 0xed73e1e96d8adce9 .quad 0x27c3da1e1d8ccc03 // 2^244 * 8 * G .quad 0x7eb9efb23fe24c74 .quad 0x3e50f49f1651be01 .quad 0x3ea732dc21858dea .quad 0x17377bd75bb810f9 .quad 0x28302e71630ef9f6 .quad 0xc2d4a2032b64cee0 .quad 0x090820304b6292be .quad 0x5fca747aa82adf18 .quad 0x232a03c35c258ea5 .quad 0x86f23a2c6bcb0cf1 .quad 0x3dad8d0d2e442166 .quad 0x04a8933cab76862b // 2^248 * 1 * G .quad 0xd2c604b622943dff .quad 0xbc8cbece44cfb3a0 .quad 0x5d254ff397808678 .quad 0x0fa3614f3b1ca6bf .quad 0x69082b0e8c936a50 .quad 0xf9c9a035c1dac5b6 .quad 0x6fb73e54c4dfb634 .quad 0x4005419b1d2bc140 .quad 0xa003febdb9be82f0 .quad 0x2089c1af3a44ac90 .quad 0xf8499f911954fa8e .quad 0x1fba218aef40ab42 // 2^248 * 2 * G .quad 0xab549448fac8f53e .quad 0x81f6e89a7ba63741 .quad 0x74fd6c7d6c2b5e01 .quad 0x392e3acaa8c86e42 .quad 0x4f3e57043e7b0194 .quad 0xa81d3eee08daaf7f .quad 0xc839c6ab99dcdef1 .quad 0x6c535d13ff7761d5 .quad 0x4cbd34e93e8a35af .quad 0x2e0781445887e816 .quad 0x19319c76f29ab0ab .quad 0x25e17fe4d50ac13b // 2^248 * 3 * G .quad 0x0a289bd71e04f676 .quad 0x208e1c52d6420f95 .quad 0x5186d8b034691fab .quad 0x255751442a9fb351 .quad 0x915f7ff576f121a7 .quad 0xc34a32272fcd87e3 .quad 0xccba2fde4d1be526 .quad 0x6bba828f8969899b .quad 0xe2d1bc6690fe3901 .quad 0x4cb54a18a0997ad5 .quad 0x971d6914af8460d4 .quad 0x559d504f7f6b7be4 // 2^248 * 4 * G .quad 0xa7738378b3eb54d5 .quad 0x1d69d366a5553c7c .quad 0x0a26cf62f92800ba .quad 0x01ab12d5807e3217 .quad 0x9c4891e7f6d266fd .quad 0x0744a19b0307781b .quad 0x88388f1d6061e23b .quad 0x123ea6a3354bd50e .quad 0x118d189041e32d96 .quad 0xb9ede3c2d8315848 .quad 0x1eab4271d83245d9 .quad 0x4a3961e2c918a154 // 2^248 * 5 * G .quad 0x71dc3be0f8e6bba0 .quad 0xd6cef8347effe30a .quad 0xa992425fe13a476a .quad 0x2cd6bce3fb1db763 .quad 0x0327d644f3233f1e .quad 0x499a260e34fcf016 .quad 0x83b5a716f2dab979 .quad 0x68aceead9bd4111f .quad 0x38b4c90ef3d7c210 .quad 0x308e6e24b7ad040c .quad 0x3860d9f1b7e73e23 .quad 0x595760d5b508f597 // 2^248 * 6 * G .quad 0x6129bfe104aa6397 .quad 0x8f960008a4a7fccb .quad 0x3f8bc0897d909458 .quad 0x709fa43edcb291a9 .quad 0x882acbebfd022790 .quad 0x89af3305c4115760 .quad 0x65f492e37d3473f4 .quad 0x2cb2c5df54515a2b .quad 0xeb0a5d8c63fd2aca .quad 0xd22bc1662e694eff .quad 0x2723f36ef8cbb03a .quad 0x70f029ecf0c8131f // 2^248 * 7 * G .quad 0x461307b32eed3e33 .quad 0xae042f33a45581e7 .quad 0xc94449d3195f0366 .quad 0x0b7d5d8a6c314858 .quad 0x2a6aafaa5e10b0b9 .quad 0x78f0a370ef041aa9 .quad 0x773efb77aa3ad61f .quad 0x44eca5a2a74bd9e1 .quad 0x25d448327b95d543 .quad 0x70d38300a3340f1d .quad 0xde1c531c60e1c52b .quad 0x272224512c7de9e4 // 2^248 * 8 * G .quad 0x1abc92af49c5342e .quad 0xffeed811b2e6fad0 .quad 0xefa28c8dfcc84e29 .quad 0x11b5df18a44cc543 .quad 0xbf7bbb8a42a975fc .quad 0x8c5c397796ada358 .quad 0xe27fc76fcdedaa48 .quad 0x19735fd7f6bc20a6 .quad 0xe3ab90d042c84266 .quad 0xeb848e0f7f19547e .quad 0x2503a1d065a497b9 .quad 0x0fef911191df895f // 2^252 * 1 * G .quad 0xb1507ca1ab1c6eb9 .quad 0xbd448f3e16b687b3 .quad 0x3455fb7f2c7a91ab .quad 0x7579229e2f2adec1 .quad 0x6ab5dcb85b1c16b7 .quad 0x94c0fce83c7b27a5 .quad 0xa4b11c1a735517be .quad 0x499238d0ba0eafaa .quad 0xecf46e527aba8b57 .quad 0x15a08c478bd1647b .quad 0x7af1c6a65f706fef .quad 0x6345fa78f03a30d5 // 2^252 * 2 * G .quad 0xdf02f95f1015e7a1 .quad 0x790ec41da9b40263 .quad 0x4d3a0ea133ea1107 .quad 0x54f70be7e33af8c9 .quad 0x93d3cbe9bdd8f0a4 .quad 0xdb152c1bfd177302 .quad 0x7dbddc6d7f17a875 .quad 0x3e1a71cc8f426efe .quad 0xc83ca3e390babd62 .quad 0x80ede3670291c833 .quad 0xc88038ccd37900c4 .quad 0x2c5fc0231ec31fa1 // 2^252 * 3 * G .quad 0xfeba911717038b4f .quad 0xe5123721c9deef81 .quad 0x1c97e4e75d0d8834 .quad 0x68afae7a23dc3bc6 .quad 0xc422e4d102456e65 .quad 0x87414ac1cad47b91 .quad 0x1592e2bba2b6ffdd .quad 0x75d9d2bff5c2100f .quad 0x5bd9b4763626e81c .quad 0x89966936bca02edd .quad 0x0a41193d61f077b3 .quad 0x3097a24200ce5471 // 2^252 * 4 * G .quad 0x57427734c7f8b84c .quad 0xf141a13e01b270e9 .quad 0x02d1adfeb4e564a6 .quad 0x4bb23d92ce83bd48 .quad 0xa162e7246695c486 .quad 0x131d633435a89607 .quad 0x30521561a0d12a37 .quad 0x56704bada6afb363 .quad 0xaf6c4aa752f912b9 .quad 0x5e665f6cd86770c8 .quad 0x4c35ac83a3c8cd58 .quad 0x2b7a29c010a58a7e // 2^252 * 5 * G .quad 0xc4007f77d0c1cec3 .quad 0x8d1020b6bac492f8 .quad 0x32ec29d57e69daaf .quad 0x599408759d95fce0 .quad 0x33810a23bf00086e .quad 0xafce925ee736ff7c .quad 0x3d60e670e24922d4 .quad 0x11ce9e714f96061b .quad 0x219ef713d815bac1 .quad 0xf141465d485be25c .quad 0x6d5447cc4e513c51 .quad 0x174926be5ef44393 // 2^252 * 6 * G .quad 0xb5deb2f9fc5bd5bb .quad 0x92daa72ae1d810e1 .quad 0xafc4cfdcb72a1c59 .quad 0x497d78813fc22a24 .quad 0x3ef5d41593ea022e .quad 0x5cbcc1a20ed0eed6 .quad 0x8fd24ecf07382c8c .quad 0x6fa42ead06d8e1ad .quad 0xe276824a1f73371f .quad 0x7f7cf01c4f5b6736 .quad 0x7e201fe304fa46e7 .quad 0x785a36a357808c96 // 2^252 * 7 * G .quad 0x825fbdfd63014d2b .quad 0xc852369c6ca7578b .quad 0x5b2fcd285c0b5df0 .quad 0x12ab214c58048c8f .quad 0x070442985d517bc3 .quad 0x6acd56c7ae653678 .quad 0x00a27983985a7763 .quad 0x5167effae512662b .quad 0xbd4ea9e10f53c4b6 .quad 0x1673dc5f8ac91a14 .quad 0xa8f81a4e2acc1aba .quad 0x33a92a7924332a25 // 2^252 * 8 * G .quad 0x9dd1f49927996c02 .quad 0x0cb3b058e04d1752 .quad 0x1f7e88967fd02c3e .quad 0x2f964268cb8b3eb1 .quad 0x7ba95ba0218f2ada .quad 0xcff42287330fb9ca .quad 0xdada496d56c6d907 .quad 0x5380c296f4beee54 .quad 0x9d4f270466898d0a .quad 0x3d0987990aff3f7a .quad 0xd09ef36267daba45 .quad 0x7761455e7b1c669c #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
21,140
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/edwards25519_decode_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Decode compressed 256-bit form of edwards25519 point // Input c[32] (bytes); output function return and z[8] // // extern uint64_t edwards25519_decode_alt(uint64_t z[static 8], const uint8_t c[static 32]); // // This interprets the input byte string as a little-endian number // representing a point (x,y) on the edwards25519 curve, encoded as // 2^255 * x_0 + y where x_0 is the least significant bit of x. It // returns the full pair of coordinates x (at z) and y (at z+4). The // return code is 0 for success and 1 for failure, which means that // the input does not correspond to the encoding of any edwards25519 // point. This can happen for three reasons, where y = the lowest // 255 bits of the input: // // * y >= p_25519 // Input y coordinate is not reduced // * (y^2 - 1) * (1 + d_25519 * y^2) has no modular square root // There is no x such that (x,y) is on the curve // * y^2 = 1 and top bit of input is set // Cannot be the canonical encoding of (0,1) or (0,-1) // // Standard x86-64 ABI: RDI = z, RSI = c // Microsoft x64 ABI: RCX = z, RDX = c // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(edwards25519_decode_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(edwards25519_decode_alt) .text // Size in bytes of a 64-bit word #define N 8 // Pointer-offset pairs for temporaries on stack #define y 0(%rsp) #define s (4*N)(%rsp) #define t (8*N)(%rsp) #define u (12*N)(%rsp) #define v (16*N)(%rsp) #define w (20*N)(%rsp) #define q (24*N)(%rsp) #define res (28*N)(%rsp) #define sgnbit (29*N)(%rsp) #define badun (30*N)(%rsp) // Total size to reserve on the stack #define NSPACE (32*N) // Corrupted versions when stack is down 8 more #define q8 (25*N)(%rsp) // Syntactic variants to make x86_att version simpler to generate #define Y 0 #define S (4*N) #define T (8*N) #define U (12*N) #define V (16*N) #define W (20*N) #define Q8 (25*N) S2N_BN_SYMBOL(edwards25519_decode_alt): // In this case the Windows form literally makes a subroutine call. // This avoids hassle arising from subroutine offsets #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi callq edwards25519_decode_alt_standard popq %rsi popq %rdi ret edwards25519_decode_alt_standard: #endif // Save registers and make room for temporaries pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp // Save the return pointer for the end so we can overwrite %rdi later movq %rdi, res // Load the inputs, which can be done word-wise since x86 is little-endian. // Let y be the lowest 255 bits of the input and sgnbit the desired parity. // If y >= p_25519 then already flag the input as invalid (badun = 1). movq (%rsi), %rax movq %rax, Y(%rsp) movq 8(%rsi), %rbx movq %rbx, Y+8(%rsp) xorl %ebp, %ebp movq 16(%rsi), %rcx movq %rcx, Y+16(%rsp) movq 24(%rsi), %rdx btr $63, %rdx movq %rdx, Y+24(%rsp) adcq %rbp, %rbp movq %rbp, sgnbit addq $19, %rax adcq $0, %rbx adcq $0, %rcx adcq $0, %rdx shrq $63, %rdx movq %rdx, badun // u = y^2 - 1 (actually y + 2^255-20, not reduced modulo) // v = 1 + d * y^2 (not reduced modulo from the +1) // w = u * v leaq V(%rsp), %rdi movq $1, %rsi leaq Y(%rsp), %rdx callq edwards25519_decode_alt_nsqr_p25519 movq V(%rsp), %rax subq $20, %rax movq V+8(%rsp), %rbx sbbq $0, %rbx movq V+16(%rsp), %rcx sbbq $0, %rcx movq V+24(%rsp), %rdx sbbq $0, %rdx btc $63, %rdx movq %rax, U(%rsp) movq %rbx, U+8(%rsp) movq %rcx, U+16(%rsp) movq %rdx, U+24(%rsp) movq $0x75eb4dca135978a3, %rax movq %rax, W(%rsp) movq $0x00700a4d4141d8ab, %rax movq %rax, W+8(%rsp) movq $0x8cc740797779e898, %rax movq %rax, W+16(%rsp) movq $0x52036cee2b6ffe73, %rax movq %rax, W+24(%rsp) leaq V(%rsp), %rdi leaq W(%rsp), %rsi leaq V(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 movq V(%rsp), %rax addq $1, %rax movq V+8(%rsp), %rbx adcq $0, %rbx movq V+16(%rsp), %rcx adcq $0, %rcx movq V+24(%rsp), %rdx adcq $0, %rdx movq %rax, V(%rsp) movq %rbx, V+8(%rsp) movq %rcx, V+16(%rsp) movq %rdx, V+24(%rsp) leaq W(%rsp), %rdi leaq U(%rsp), %rsi leaq V(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 // Get s = w^{252-3} as a candidate inverse square root 1/sqrt(w). // This power tower computation is the same as bignum_invsqrt_p25519 leaq T(%rsp), %rdi movq $1, %rsi leaq W(%rsp), %rdx callq edwards25519_decode_alt_nsqr_p25519 leaq T(%rsp), %rdi leaq T(%rsp), %rsi leaq W(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 leaq S(%rsp), %rdi movq $2, %rsi leaq T(%rsp), %rdx callq edwards25519_decode_alt_nsqr_p25519 leaq T(%rsp), %rdi leaq S(%rsp), %rsi leaq T(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 leaq S(%rsp), %rdi movq $1, %rsi leaq T(%rsp), %rdx callq edwards25519_decode_alt_nsqr_p25519 leaq V(%rsp), %rdi leaq S(%rsp), %rsi leaq W(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 leaq S(%rsp), %rdi movq $5, %rsi leaq V(%rsp), %rdx callq edwards25519_decode_alt_nsqr_p25519 leaq T(%rsp), %rdi leaq S(%rsp), %rsi leaq V(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 leaq S(%rsp), %rdi movq $10, %rsi leaq T(%rsp), %rdx callq edwards25519_decode_alt_nsqr_p25519 leaq T(%rsp), %rdi leaq S(%rsp), %rsi leaq T(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 leaq S(%rsp), %rdi movq $5, %rsi leaq T(%rsp), %rdx callq edwards25519_decode_alt_nsqr_p25519 leaq V(%rsp), %rdi leaq S(%rsp), %rsi leaq V(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 leaq S(%rsp), %rdi movq $25, %rsi leaq V(%rsp), %rdx callq edwards25519_decode_alt_nsqr_p25519 leaq T(%rsp), %rdi leaq S(%rsp), %rsi leaq V(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 leaq S(%rsp), %rdi movq $50, %rsi leaq T(%rsp), %rdx callq edwards25519_decode_alt_nsqr_p25519 leaq T(%rsp), %rdi leaq S(%rsp), %rsi leaq T(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 leaq S(%rsp), %rdi movq $25, %rsi leaq T(%rsp), %rdx callq edwards25519_decode_alt_nsqr_p25519 leaq V(%rsp), %rdi leaq S(%rsp), %rsi leaq V(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 leaq S(%rsp), %rdi movq $125, %rsi leaq V(%rsp), %rdx callq edwards25519_decode_alt_nsqr_p25519 leaq V(%rsp), %rdi leaq S(%rsp), %rsi leaq V(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 leaq S(%rsp), %rdi movq $2, %rsi leaq V(%rsp), %rdx callq edwards25519_decode_alt_nsqr_p25519 leaq S(%rsp), %rdi leaq S(%rsp), %rsi leaq W(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 // Compute v' = s^2 * w to discriminate whether the square root sqrt(u/v) // exists, in which case we should get 0, 1 or -1. leaq V(%rsp), %rdi movq $1, %rsi leaq S(%rsp), %rdx callq edwards25519_decode_alt_nsqr_p25519 leaq V(%rsp), %rdi leaq V(%rsp), %rsi leaq W(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 // Get the two candidates for sqrt(u / v), one being s = u * w^{252-3} // and the other being t = s * j_25519 where j_25519 = sqrt(-1). leaq S(%rsp), %rdi leaq U(%rsp), %rsi leaq S(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 movq $0xc4ee1b274a0ea0b0, %rax movq %rax, T(%rsp) movq $0x2f431806ad2fe478, %rax movq %rax, T+8(%rsp) movq $0x2b4d00993dfbd7a7, %rax movq %rax, T+16(%rsp) movq $0x2b8324804fc1df0b, %rax movq %rax, T+24(%rsp) leaq T(%rsp), %rdi leaq S(%rsp), %rsi leaq T(%rsp), %rdx callq edwards25519_decode_alt_mul_p25519 // %rax = 0 <=> s^2 * w = 0 or 1 movq V(%rsp), %r8 movq V+8(%rsp), %r9 movq V+16(%rsp), %r10 movq V+24(%rsp), %r11 movl $1, %eax notq %rax andq %r8, %rax orq %r9, %rax orq %r10, %rax orq %r11, %rax // %r8 = 0 <=> s^2 * w = -1 (mod p_25519, i.e. s^2 * w = 2^255 - 20) addq $20, %r8 notq %r9 notq %r10 bts $63, %r11 addq $1, %r11 orq %r9, %r8 orq %r11, %r10 orq %r10, %r8 // If s^2 * w is not 0 or 1 then replace s by t testq %rax, %rax movq S(%rsp), %r12 movq T(%rsp), %rbx cmovnzq %rbx, %r12 movq S+8(%rsp), %r13 movq T+8(%rsp), %rbx cmovnzq %rbx, %r13 movq S+16(%rsp), %r14 movq T+16(%rsp), %rbx cmovnzq %rbx, %r14 movq S+24(%rsp), %r15 movq T+24(%rsp), %rbx cmovnzq %rbx, %r15 movq %r12, S(%rsp) movq %r13, S+8(%rsp) movq %r14, S+16(%rsp) movq %r15, S+24(%rsp) // Check invalidity, occurring if s^2 * w is not in {0,1,-1} cmovzq %rax, %r8 negq %r8 sbbq %r8, %r8 negq %r8 orq %r8, badun // Let [%r11;%r10;%r9;%r8] = s and [%r15;%r14;%r13;%r12] = p_25519 - s movq S(%rsp), %r8 movq $-19, %r12 subq %r8, %r12 movq S+8(%rsp), %r9 movq $-1, %r13 sbbq %r9, %r13 movq S+16(%rsp), %r10 movq $-1, %r14 sbbq %r10, %r14 movq S+24(%rsp), %r11 movq $0x7FFFFFFFFFFFFFFF, %r15 sbbq %r11, %r15 // Decide whether a flip is apparently indicated, s_0 <=> sgnbit // Decide also if s = 0 by OR-ing its digits. Now if a flip is indicated: // - if s = 0 then mark as invalid // - if s <> 0 then indeed flip movl $1, %ecx andq %r8, %rcx xorq sgnbit, %rcx movq badun, %rdx movq %rdx, %rsi orq %rcx, %rdx xorl %ebp, %ebp movq %r8, %rax movq %r9, %rbx orq %r10, %rax orq %r11, %rbx orq %rbx, %rax cmovzq %rbp, %rcx cmovnzq %rsi, %rdx // Actual selection of x as s or -s, copying of y and return of validity testq %rcx, %rcx cmovnzq %r12, %r8 cmovnzq %r13, %r9 cmovnzq %r14, %r10 cmovnzq %r15, %r11 movq res, %rdi movq %r8, (%rdi) movq %r9, 8(%rdi) movq %r10, 16(%rdi) movq %r11, 24(%rdi) movq Y(%rsp), %rcx movq %rcx, 32(%rdi) movq Y+8(%rsp), %rcx movq %rcx, 40(%rdi) movq Y+16(%rsp), %rcx movq %rcx, 48(%rdi) movq Y+24(%rsp), %rcx movq %rcx, 56(%rdi) movq %rdx, %rax // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret // ************************************************************* // Local z = x * y // ************************************************************* edwards25519_decode_alt_mul_p25519: movq %rdx, %rcx movq (%rsi), %rax mulq (%rcx) movq %rax, %r8 movq %rdx, %r9 xorq %r10, %r10 xorq %r11, %r11 movq (%rsi), %rax mulq 0x8(%rcx) addq %rax, %r9 adcq %rdx, %r10 movq 0x8(%rsi), %rax mulq (%rcx) addq %rax, %r9 adcq %rdx, %r10 adcq $0x0, %r11 xorq %r12, %r12 movq (%rsi), %rax mulq 0x10(%rcx) addq %rax, %r10 adcq %rdx, %r11 adcq %r12, %r12 movq 0x8(%rsi), %rax mulq 0x8(%rcx) addq %rax, %r10 adcq %rdx, %r11 adcq $0x0, %r12 movq 0x10(%rsi), %rax mulq (%rcx) addq %rax, %r10 adcq %rdx, %r11 adcq $0x0, %r12 xorq %r13, %r13 movq (%rsi), %rax mulq 0x18(%rcx) addq %rax, %r11 adcq %rdx, %r12 adcq %r13, %r13 movq 0x8(%rsi), %rax mulq 0x10(%rcx) addq %rax, %r11 adcq %rdx, %r12 adcq $0x0, %r13 movq 0x10(%rsi), %rax mulq 0x8(%rcx) addq %rax, %r11 adcq %rdx, %r12 adcq $0x0, %r13 movq 0x18(%rsi), %rax mulq (%rcx) addq %rax, %r11 adcq %rdx, %r12 adcq $0x0, %r13 xorq %r14, %r14 movq 0x8(%rsi), %rax mulq 0x18(%rcx) addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x10(%rsi), %rax mulq 0x10(%rcx) addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 movq 0x18(%rsi), %rax mulq 0x8(%rcx) addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 xorq %r15, %r15 movq 0x10(%rsi), %rax mulq 0x18(%rcx) addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x18(%rsi), %rax mulq 0x10(%rcx) addq %rax, %r13 adcq %rdx, %r14 adcq $0x0, %r15 movq 0x18(%rsi), %rax mulq 0x18(%rcx) addq %rax, %r14 adcq %rdx, %r15 movl $0x26, %esi movq %r12, %rax mulq %rsi addq %rax, %r8 adcq %rdx, %r9 sbbq %rcx, %rcx movq %r13, %rax mulq %rsi subq %rcx, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %rcx, %rcx movq %r14, %rax mulq %rsi subq %rcx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx movq %r15, %rax mulq %rsi subq %rcx, %rdx xorq %rcx, %rcx addq %rax, %r11 movq %rdx, %r12 adcq %rcx, %r12 shldq $0x1, %r11, %r12 leaq 0x1(%r12), %rax movl $0x13, %esi bts $0x3f, %r11 imulq %rsi, %rax addq %rax, %r8 adcq %rcx, %r9 adcq %rcx, %r10 adcq %rcx, %r11 sbbq %rax, %rax notq %rax andq %rsi, %rax subq %rax, %r8 sbbq %rcx, %r9 sbbq %rcx, %r10 sbbq %rcx, %r11 btr $0x3f, %r11 movq %r8, (%rdi) movq %r9, 0x8(%rdi) movq %r10, 0x10(%rdi) movq %r11, 0x18(%rdi) ret // ************************************************************* // Local z = 2^n * x // ************************************************************* edwards25519_decode_alt_nsqr_p25519: // Copy input argument into q movq (%rdx), %rax movq 8(%rdx), %rbx movq 16(%rdx), %rcx movq 24(%rdx), %rdx movq %rax, Q8(%rsp) movq %rbx, Q8+8(%rsp) movq %rcx, Q8+16(%rsp) movq %rdx, Q8+24(%rsp) // Main squaring loop, accumulating in u consistently and // only ensuring the intermediates are < 2 * p_25519 = 2^256 - 38 edwards25519_decode_alt_loop: movq Q8(%rsp), %rax mulq %rax movq %rax, %r8 movq %rdx, %r9 xorq %r10, %r10 xorq %r11, %r11 movq Q8(%rsp), %rax mulq Q8+0x8(%rsp) addq %rax, %rax adcq %rdx, %rdx adcq $0x0, %r11 addq %rax, %r9 adcq %rdx, %r10 adcq $0x0, %r11 xorq %r12, %r12 movq Q8+0x8(%rsp), %rax mulq %rax addq %rax, %r10 adcq %rdx, %r11 adcq $0x0, %r12 movq Q8(%rsp), %rax mulq Q8+0x10(%rsp) addq %rax, %rax adcq %rdx, %rdx adcq $0x0, %r12 addq %rax, %r10 adcq %rdx, %r11 adcq $0x0, %r12 xorq %r13, %r13 movq Q8(%rsp), %rax mulq Q8+0x18(%rsp) addq %rax, %rax adcq %rdx, %rdx adcq $0x0, %r13 addq %rax, %r11 adcq %rdx, %r12 adcq $0x0, %r13 movq Q8+0x8(%rsp), %rax mulq Q8+0x10(%rsp) addq %rax, %rax adcq %rdx, %rdx adcq $0x0, %r13 addq %rax, %r11 adcq %rdx, %r12 adcq $0x0, %r13 xorq %r14, %r14 movq Q8+0x8(%rsp), %rax mulq Q8+0x18(%rsp) addq %rax, %rax adcq %rdx, %rdx adcq $0x0, %r14 addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 movq Q8+0x10(%rsp), %rax mulq %rax addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 xorq %r15, %r15 movq Q8+0x10(%rsp), %rax mulq Q8+0x18(%rsp) addq %rax, %rax adcq %rdx, %rdx adcq $0x0, %r15 addq %rax, %r13 adcq %rdx, %r14 adcq $0x0, %r15 movq Q8+0x18(%rsp), %rax mulq %rax addq %rax, %r14 adcq %rdx, %r15 movl $0x26, %ebx movq %r12, %rax mulq %rbx addq %rax, %r8 adcq %rdx, %r9 sbbq %rcx, %rcx movq %r13, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %rcx, %rcx movq %r14, %rax mulq %rbx subq %rcx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rcx, %rcx movq %r15, %rax mulq %rbx subq %rcx, %rdx xorq %rcx, %rcx addq %rax, %r11 movq %rdx, %r12 adcq %rcx, %r12 shldq $0x1, %r11, %r12 btr $0x3f, %r11 movl $0x13, %edx imulq %r12, %rdx addq %rdx, %r8 adcq %rcx, %r9 adcq %rcx, %r10 adcq %rcx, %r11 movq %r8, Q8(%rsp) movq %r9, Q8+0x8(%rsp) movq %r10, Q8+0x10(%rsp) movq %r11, Q8+0x18(%rsp) // Loop as applicable decq %rsi jnz edwards25519_decode_alt_loop // We know the intermediate result x < 2^256 - 38, and now we do strict // modular reduction mod 2^255 - 19. Note x < 2^255 - 19 <=> x + 19 < 2^255 // which is equivalent to a "ns" condition. We just use the results where // they were in registers [%r11;%r10;%r9;%r8] instead of re-loading them. movl $19, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx addq %r8, %rax adcq %r9, %rbx adcq %r10, %rcx adcq %r11, %rdx cmovns %r8, %rax cmovns %r9, %rbx cmovns %r10, %rcx cmovns %r11, %rdx btr $63, %rdx movq %rax, (%rdi) movq %rbx, 8(%rdi) movq %rcx, 16(%rdi) movq %rdx, 24(%rdi) ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
2,379
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/edwards25519_encode.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Encode edwards25519 point into compressed form as 256-bit number // Input p[8]; output z[32] (bytes) // // extern void edwards25519_encode // (uint8_t z[static 32], uint64_t p[static 8]); // // This assumes that the input buffer p points to a pair of 256-bit // numbers x (at p) and y (at p+4) representing a point (x,y) on the // edwards25519 curve. It is assumed that both x and y are < p_25519 // but there is no checking of this, nor of the fact that (x,y) is // in fact on the curve. // // The output in z is a little-endian array of bytes corresponding to // the standard compressed encoding of a point as 2^255 * x_0 + y // where x_0 is the least significant bit of x. // See "https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.2" // In this implementation, y is simply truncated to 255 bits, but if // it is reduced mod p_25519 as expected this does not affect values. // // Standard x86-64 ABI: RDI = z, RSI = p // Microsoft x64 ABI: RCX = z, RDX = p // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(edwards25519_encode) S2N_BN_SYM_PRIVACY_DIRECTIVE(edwards25519_encode) .text #define z %rdi #define p %rsi #define y0 %rax #define y1 %rcx #define y2 %rdx #define y3 %r8 #define xb %r9 S2N_BN_SYMBOL(edwards25519_encode): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // Load lowest word of x coordinate in xb and full y as [y3;y2;y1;y0]. movq (p), xb movq 32(p), y0 movq 40(p), y1 movq 48(p), y2 movq 56(p), y3 // Compute the encoded form, making the LSB of x the MSB of the encoding btr $63, y3 shlq $63, xb orq xb, y3 // Store back (by the word, since x86 is little-endian anyway) movq y0, (z) movq y1, 8(z) movq y2, 16(z) movq y3, 24(z) #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
79,887
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/curve25519_x25519_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // The x25519 function for curve25519 // Inputs scalar[4], point[4]; output res[4] // // extern void curve25519_x25519_alt // (uint64_t res[static 4],uint64_t scalar[static 4],uint64_t point[static 4]) // // The function has a second prototype considering the arguments as arrays // of bytes rather than 64-bit words. The underlying code is the same, since // the x86 platform is little-endian. // // extern void curve25519_x25519_byte_alt // (uint8_t res[static 32],uint8_t scalar[static 32],uint8_t point[static 32]) // // Given a scalar n and the X coordinate of an input point P = (X,Y) on // curve25519 (Y can live in any extension field of characteristic 2^255-19), // this returns the X coordinate of n * P = (X, Y), or 0 when n * P is the // point at infinity. Both n and X inputs are first slightly modified/mangled // as specified in the relevant RFC (https://www.rfc-editor.org/rfc/rfc7748); // in particular the lower three bits of n are set to zero. Does not implement // the zero-check specified in Section 6.1. // // Standard x86-64 ABI: RDI = res, RSI = scalar, RDX = point // Microsoft x64 ABI: RCX = res, RDX = scalar, R8 = point // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(curve25519_x25519_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(curve25519_x25519_alt) S2N_BN_SYM_VISIBILITY_DIRECTIVE(curve25519_x25519_byte_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(curve25519_x25519_byte_alt) .text // Size of individual field elements #define NUMSIZE 32 // Stable homes for the input result argument during the whole body // and other variables that are only needed prior to the modular inverse. #define res 12*NUMSIZE(%rsp) #define i 12*NUMSIZE+8(%rsp) #define swap 12*NUMSIZE+16(%rsp) // Pointers to result x coord to be written, assuming the base "res" // has been loaded into %rbp #define resx 0(%rbp) // Pointer-offset pairs for temporaries on stack with some aliasing. // Both dmsn and dnsm need space for >= 5 digits, and we allocate 8 #define scalar (0*NUMSIZE)(%rsp) #define pointx (1*NUMSIZE)(%rsp) #define dm (2*NUMSIZE)(%rsp) #define zm (3*NUMSIZE)(%rsp) #define sm (3*NUMSIZE)(%rsp) #define dpro (3*NUMSIZE)(%rsp) #define sn (4*NUMSIZE)(%rsp) #define dn (5*NUMSIZE)(%rsp) #define e (5*NUMSIZE)(%rsp) #define dmsn (6*NUMSIZE)(%rsp) #define p (6*NUMSIZE)(%rsp) #define zn (7*NUMSIZE)(%rsp) #define xm (8*NUMSIZE)(%rsp) #define dnsm (8*NUMSIZE)(%rsp) #define spro (8*NUMSIZE)(%rsp) #define xn (10*NUMSIZE)(%rsp) #define s (10*NUMSIZE)(%rsp) #define d (11*NUMSIZE)(%rsp) // Total size to reserve on the stack // This includes space for the 3 other variables above // and rounds up to a multiple of 32 #define NSPACE (13*NUMSIZE) // Macro wrapping up the basic field operation bignum_mul_p25519_alt, only // trivially different from a pure function call to that subroutine. #define mul_p25519(P0,P1,P2) \ movq P1, %rax ; \ mulq P2; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ xorq %r10, %r10 ; \ xorq %r11, %r11 ; \ movq P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ movq 0x8+P1, %rax ; \ mulq P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ adcq $0x0, %r11 ; \ xorq %r12, %r12 ; \ movq P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq %r12, %r12 ; \ movq 0x8+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ movq 0x10+P1, %rax ; \ mulq P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ xorq %r13, %r13 ; \ movq P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq %r13, %r13 ; \ movq 0x8+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x10+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x18+P1, %rax ; \ mulq P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ xorq %r14, %r14 ; \ movq 0x8+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq %r14, %r14 ; \ movq 0x10+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ movq 0x18+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ xorq %r15, %r15 ; \ movq 0x10+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq %r15, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ movl $0x26, %esi ; \ movq %r12, %rax ; \ mulq %rsi; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %rcx, %rcx ; \ movq %r13, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %rcx, %rcx ; \ movq %r14, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rcx, %rcx ; \ movq %r15, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ xorq %rcx, %rcx ; \ addq %rax, %r11 ; \ movq %rdx, %r12 ; \ adcq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ leaq 0x1(%r12), %rax ; \ movl $0x13, %esi ; \ bts $63, %r11 ; \ imulq %rsi, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ sbbq %rax, %rax ; \ notq %rax; \ andq %rsi, %rax ; \ subq %rax, %r8 ; \ sbbq %rcx, %r9 ; \ sbbq %rcx, %r10 ; \ sbbq %rcx, %r11 ; \ btr $63, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // A version of multiplication that only guarantees output < 2 * p_25519. // This basically skips the +1 and final correction in quotient estimation. #define mul_4(P0,P1,P2) \ movq P1, %rax ; \ mulq P2; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ xorq %r10, %r10 ; \ xorq %r11, %r11 ; \ movq P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ movq 0x8+P1, %rax ; \ mulq P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ adcq $0x0, %r11 ; \ xorq %r12, %r12 ; \ movq P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq %r12, %r12 ; \ movq 0x8+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ movq 0x10+P1, %rax ; \ mulq P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ xorq %r13, %r13 ; \ movq P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq %r13, %r13 ; \ movq 0x8+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x10+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x18+P1, %rax ; \ mulq P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ xorq %r14, %r14 ; \ movq 0x8+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq %r14, %r14 ; \ movq 0x10+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ movq 0x18+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ xorq %r15, %r15 ; \ movq 0x10+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq %r15, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ movl $0x26, %esi ; \ movq %r12, %rax ; \ mulq %rsi; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %rcx, %rcx ; \ movq %r13, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %rcx, %rcx ; \ movq %r14, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rcx, %rcx ; \ movq %r15, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ xorq %rcx, %rcx ; \ addq %rax, %r11 ; \ movq %rdx, %r12 ; \ adcq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Multiplication just giving a 5-digit result (actually < 39 * p_25519) // by not doing anything beyond the first stage of reduction #define mul_5(P0,P1,P2) \ movq P1, %rax ; \ mulq P2; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ xorq %r10, %r10 ; \ xorq %r11, %r11 ; \ movq P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ movq 0x8+P1, %rax ; \ mulq P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ adcq $0x0, %r11 ; \ xorq %r12, %r12 ; \ movq P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq %r12, %r12 ; \ movq 0x8+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ movq 0x10+P1, %rax ; \ mulq P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ xorq %r13, %r13 ; \ movq P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq %r13, %r13 ; \ movq 0x8+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x10+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x18+P1, %rax ; \ mulq P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ xorq %r14, %r14 ; \ movq 0x8+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq %r14, %r14 ; \ movq 0x10+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ movq 0x18+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ xorq %r15, %r15 ; \ movq 0x10+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq %r15, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ movl $0x26, %esi ; \ movq %r12, %rax ; \ mulq %rsi; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %rcx, %rcx ; \ movq %r13, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %rcx, %rcx ; \ movq %r14, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rcx, %rcx ; \ movq %r15, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ xorq %rcx, %rcx ; \ addq %rax, %r11 ; \ movq %rdx, %r12 ; \ adcq %rcx, %r12 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 ; \ movq %r12, 0x20+P0 // Squaring just giving a result < 2 * p_25519, which is done by // basically skipping the +1 in the quotient estimate and the final // optional correction. #define sqr_4(P0,P1) \ movq P1, %rax ; \ mulq %rax; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ xorq %r10, %r10 ; \ xorq %r11, %r11 ; \ movq P1, %rax ; \ mulq 0x8+P1; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0x0, %r11 ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ adcq $0x0, %r11 ; \ xorq %r12, %r12 ; \ movq 0x8+P1, %rax ; \ mulq %rax; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ movq P1, %rax ; \ mulq 0x10+P1; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0x0, %r12 ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ xorq %r13, %r13 ; \ movq P1, %rax ; \ mulq 0x18+P1; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0x0, %r13 ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x8+P1, %rax ; \ mulq 0x10+P1; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0x0, %r13 ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ xorq %r14, %r14 ; \ movq 0x8+P1, %rax ; \ mulq 0x18+P1; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0x0, %r14 ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ movq 0x10+P1, %rax ; \ mulq %rax; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ xorq %r15, %r15 ; \ movq 0x10+P1, %rax ; \ mulq 0x18+P1; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0x0, %r15 ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x18+P1, %rax ; \ mulq %rax; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ movl $0x26, %esi ; \ movq %r12, %rax ; \ mulq %rsi; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %rcx, %rcx ; \ movq %r13, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %rcx, %rcx ; \ movq %r14, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rcx, %rcx ; \ movq %r15, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ xorq %rcx, %rcx ; \ addq %rax, %r11 ; \ movq %rdx, %r12 ; \ adcq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Add 5-digit inputs and normalize to 4 digits #define add5_4(P0,P1,P2) \ movq P1, %r8 ; \ addq P2, %r8 ; \ movq 8+P1, %r9 ; \ adcq 8+P2, %r9 ; \ movq 16+P1, %r10 ; \ adcq 16+P2, %r10 ; \ movq 24+P1, %r11 ; \ adcq 24+P2, %r11 ; \ movq 32+P1, %r12 ; \ adcq 32+P2, %r12 ; \ xorl %ebx, %ebx ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rbx, %r9 ; \ adcq %rbx, %r10 ; \ adcq %rbx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Modular addition with double modulus 2 * p_25519 = 2^256 - 38. // This only ensures that the result fits in 4 digits, not that it is reduced // even w.r.t. double modulus. The result is always correct modulo provided // the sum of the inputs is < 2^256 + 2^256 - 38, so in particular provided // at least one of them is reduced double modulo. #define add_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ecx, %ecx ; \ addq P2, %r8 ; \ movq 0x8+P1, %r9 ; \ adcq 0x8+P2, %r9 ; \ movq 0x10+P1, %r10 ; \ adcq 0x10+P2, %r10 ; \ movq 0x18+P1, %r11 ; \ adcq 0x18+P2, %r11 ; \ movl $38, %eax ; \ cmovncq %rcx, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Modular subtraction with double modulus 2 * p_25519 = 2^256 - 38 #define sub_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ebx, %ebx ; \ subq P2, %r8 ; \ movq 8+P1, %r9 ; \ sbbq 8+P2, %r9 ; \ movl $38, %ecx ; \ movq 16+P1, %r10 ; \ sbbq 16+P2, %r10 ; \ movq 24+P1, %rax ; \ sbbq 24+P2, %rax ; \ cmovncq %rbx, %rcx ; \ subq %rcx, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq %rbx, %rax ; \ movq %r8, P0 ; \ movq %r9, 8+P0 ; \ movq %r10, 16+P0 ; \ movq %rax, 24+P0 // 5-digit subtraction with upward bias to make it positive, adding // 1000 * (2^255 - 19) = 2^256 * 500 - 19000, then normalizing to 4 digits #define sub5_4(P0,P1,P2) \ movq P1, %r8 ; \ subq P2, %r8 ; \ movq 8+P1, %r9 ; \ sbbq 8+P2, %r9 ; \ movq 16+P1, %r10 ; \ sbbq 16+P2, %r10 ; \ movq 24+P1, %r11 ; \ sbbq 24+P2, %r11 ; \ movq 32+P1, %r12 ; \ sbbq 32+P2, %r12 ; \ xorl %ebx, %ebx ; \ subq $19000, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq %rbx, %r11 ; \ sbbq %rbx, %r12 ; \ addq $500, %r12 ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rbx, %r9 ; \ adcq %rbx, %r10 ; \ adcq %rbx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Combined z = c * x + y with reduction only < 2 * p_25519 // It is assumed that 19 * (c * x + y) < 2^60 * 2^256 so we // don't need a high mul in the final part. #define cmadd_4(P0,C1,P2,P3) \ movq $C1, %rsi ; \ movq P2, %rax ; \ mulq %rsi; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ movq 0x8+P2, %rax ; \ xorq %r10, %r10 ; \ mulq %rsi; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ movq 0x10+P2, %rax ; \ mulq %rsi; \ addq %rax, %r10 ; \ adcq $0x0, %rdx ; \ movq 0x18+P2, %rax ; \ movq %rdx, %r11 ; \ mulq %rsi; \ xorl %esi, %esi ; \ addq %rax, %r11 ; \ adcq %rsi, %rdx ; \ addq P3, %r8 ; \ adcq 0x8+P3, %r9 ; \ adcq 0x10+P3, %r10 ; \ adcq 0x18+P3, %r11 ; \ adcq %rsi, %rdx ; \ shldq $0x1, %r11, %rdx ; \ btr $63, %r11 ; \ movl $0x13, %ebx ; \ imulq %rbx, %rdx ; \ addq %rdx, %r8 ; \ adcq %rsi, %r9 ; \ adcq %rsi, %r10 ; \ adcq %rsi, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Multiplex: z := if NZ then x else y #define mux_4(P0,P1,P2) \ movq P1, %rax ; \ movq P2, %rcx ; \ cmovzq %rcx, %rax ; \ movq %rax, P0 ; \ movq 8+P1, %rax ; \ movq 8+P2, %rcx ; \ cmovzq %rcx, %rax ; \ movq %rax, 8+P0 ; \ movq 16+P1, %rax ; \ movq 16+P2, %rcx ; \ cmovzq %rcx, %rax ; \ movq %rax, 16+P0 ; \ movq 24+P1, %rax ; \ movq 24+P2, %rcx ; \ cmovzq %rcx, %rax ; \ movq %rax, 24+P0 S2N_BN_SYMBOL(curve25519_x25519_alt): S2N_BN_SYMBOL(curve25519_x25519_byte_alt): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save registers, make room for temps, preserve input arguments. pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp // Move the output pointer to a stable place movq %rdi, res // Copy the inputs to the local variables with minimal mangling: // // - The scalar is in principle turned into 01xxx...xxx000 but // in the structure below the special handling of these bits is // explicit in the main computation; the scalar is just copied. // // - The point x coord is reduced mod 2^255 by masking off the // top bit. In the main loop we only need reduction < 2 * p_25519. movq (%rsi), %rax movq %rax, (%rsp) movq 8(%rsi), %rax movq %rax, 8(%rsp) movq 16(%rsi), %rax movq %rax, 16(%rsp) movq 24(%rsi), %rax movq %rax, 24(%rsp) movq (%rdx), %r8 movq 8(%rdx), %r9 movq 16(%rdx), %r10 movq 24(%rdx), %r11 btr $63, %r11 movq %r8, 32(%rsp) movq %r9, 40(%rsp) movq %r10, 48(%rsp) movq %r11, 56(%rsp) // Initialize with explicit doubling in order to handle set bit 254. // Set swap = 1 and (xm,zm) = (x,1) then double as (xn,zn) = 2 * (x,1). // We use the fact that the point x coordinate is still in registers. // Since zm = 1 we could do the doubling with an operation count of // 2 * S + M instead of 2 * S + 2 * M, but it doesn't seem worth // the slight complication arising from a different linear combination. movl $1, %eax movq %rax, swap movq %r8, 256(%rsp) movq %rax, 96(%rsp) xorl %eax, %eax movq %r9, 264(%rsp) movq %rax, 104(%rsp) movq %r10, 272(%rsp) movq %rax, 112(%rsp) movq %r11, 280(%rsp) movq %rax, 120(%rsp) sub_twice4(d,xm,zm) add_twice4(s,xm,zm) sqr_4(d,d) sqr_4(s,s) sub_twice4(p,s,d) cmadd_4(e,0x1db42,p,d) mul_4(xn,s,d) mul_4(zn,p,e) // The main loop over unmodified bits from i = 253, ..., i = 3 (inclusive). // This is a classic Montgomery ladder, with the main coordinates only // reduced mod 2 * p_25519, some intermediate results even more loosely. movl $253, %eax movq %rax, i curve25519_x25519_alt_scalarloop: // sm = xm + zm; sn = xn + zn; dm = xm - zm; dn = xn - zn sub_twice4(dm,xm,zm) add_twice4(sn,xn,zn) sub_twice4(dn,xn,zn) add_twice4(sm,xm,zm) // DOUBLING: mux d = xt - zt and s = xt + zt for appropriate choice of (xt,zt) movq i, %rdx movq %rdx, %rcx shrq $6, %rdx movq (%rsp,%rdx,8), %rdx shrq %cl, %rdx andq $1, %rdx cmpq swap, %rdx movq %rdx, swap mux_4(d,dm,dn) mux_4(s,sm,sn) // ADDING: dmsn = dm * sn; dnsm = sm * dn mul_5(dnsm,sm,dn) mul_5(dmsn,sn,dm) // DOUBLING: d = (xt - zt)^2 sqr_4(d,d) // ADDING: dpro = (dmsn - dnsm)^2, spro = (dmsn + dnsm)^2 // DOUBLING: s = (xt + zt)^2 sub5_4(dpro,dmsn,dnsm) add5_4(spro,dmsn,dnsm) sqr_4(s,s) sqr_4(dpro,dpro) // DOUBLING: p = 4 * xt * zt = s - d sub_twice4(p,s,d) // ADDING: xm' = (dmsn + dnsm)^2 sqr_4(xm,spro) // DOUBLING: e = 121666 * p + d cmadd_4(e,0x1db42,p,d) // DOUBLING: xn' = (xt + zt)^2 * (xt - zt)^2 = s * d mul_4(xn,s,d) // DOUBLING: zn' = (4 * xt * zt) * ((xt - zt)^2 + 121666 * (4 * xt * zt)) // = p * (d + 121666 * p) mul_4(zn,p,e) // ADDING: zm' = x * (dmsn - dnsm)^2 mul_4(zm,dpro,pointx) // Loop down as far as 3 (inclusive) movq i, %rax subq $1, %rax movq %rax, i cmpq $3, %rax jnc curve25519_x25519_alt_scalarloop // Multiplex directly into (xn,zn) then do three pure doubling steps; // this accounts for the implicit zeroing of the three lowest bits // of the scalar. movq swap, %rdx testq %rdx, %rdx mux_4(xn,xm,xn) mux_4(zn,zm,zn) sub_twice4(d,xn,zn) add_twice4(s,xn,zn) sqr_4(d,d) sqr_4(s,s) sub_twice4(p,s,d) cmadd_4(e,0x1db42,p,d) mul_4(xn,s,d) mul_4(zn,p,e) sub_twice4(d,xn,zn) add_twice4(s,xn,zn) sqr_4(d,d) sqr_4(s,s) sub_twice4(p,s,d) cmadd_4(e,0x1db42,p,d) mul_4(xn,s,d) mul_4(zn,p,e) sub_twice4(d,xn,zn) add_twice4(s,xn,zn) sqr_4(d,d) sqr_4(s,s) sub_twice4(p,s,d) cmadd_4(e,0x1db42,p,d) mul_4(xn,s,d) mul_4(zn,p,e) // The projective result of the scalar multiplication is now (xn,zn). // Prepare to call the modular inverse function to get zn' = 1/zn leaq 224(%rsp), %rdi leaq 224(%rsp), %rsi // Inline copy of bignum_inv_p25519, identical except for stripping out // the prologue and epilogue saving and restoring registers and making // and reclaiming room on the stack. For more details and explanations see // "x86/curve25519/bignum_inv_p25519.S". Note that the stack it uses for // its own temporaries is 208 bytes, so it has no effect on variables // that are needed in the rest of our computation here: res, xn and zn. movq %rdi, 0xc0(%rsp) xorl %eax, %eax leaq -0x13(%rax), %rcx notq %rax movq %rcx, (%rsp) movq %rax, 0x8(%rsp) movq %rax, 0x10(%rsp) btr $0x3f, %rax movq %rax, 0x18(%rsp) movq (%rsi), %rdx movq 0x8(%rsi), %rcx movq 0x10(%rsi), %r8 movq 0x18(%rsi), %r9 movl $0x1, %eax xorl %r10d, %r10d bts $0x3f, %r9 adcq %r10, %rax imulq $0x13, %rax, %rax addq %rax, %rdx adcq %r10, %rcx adcq %r10, %r8 adcq %r10, %r9 movl $0x13, %eax cmovbq %r10, %rax subq %rax, %rdx sbbq %r10, %rcx sbbq %r10, %r8 sbbq %r10, %r9 btr $0x3f, %r9 movq %rdx, 0x20(%rsp) movq %rcx, 0x28(%rsp) movq %r8, 0x30(%rsp) movq %r9, 0x38(%rsp) xorl %eax, %eax movq %rax, 0x40(%rsp) movq %rax, 0x48(%rsp) movq %rax, 0x50(%rsp) movq %rax, 0x58(%rsp) movabsq $0xa0f99e2375022099, %rax movq %rax, 0x60(%rsp) movabsq $0xa8c68f3f1d132595, %rax movq %rax, 0x68(%rsp) movabsq $0x6c6c893805ac5242, %rax movq %rax, 0x70(%rsp) movabsq $0x276508b241770615, %rax movq %rax, 0x78(%rsp) movq $0xa, 0x90(%rsp) movq $0x1, 0x98(%rsp) jmp curve25519_x25519_alt_midloop curve25519_x25519_alt_inverseloop: movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 movq %r8, %rax andq %r9, %rax movq %r10, %rdi andq %r11, %rdi addq %rax, %rdi movq %rdi, 0x80(%rsp) movq %r12, %rax andq %r13, %rax movq %r14, %rsi andq %r15, %rsi addq %rax, %rsi movq %rsi, 0x88(%rsp) xorl %ebx, %ebx movq (%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq 0x20(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rdi adcq %rdx, %rbx xorl %ebp, %ebp movq (%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rsi adcq %rdx, %rbp movq 0x20(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp xorl %ecx, %ecx movq 0x8(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x28(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx shrdq $0x3b, %rbx, %rdi movq %rdi, (%rsp) xorl %edi, %edi movq 0x8(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbp adcq %rdx, %rdi movq 0x28(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rdi shrdq $0x3b, %rbp, %rsi movq %rsi, 0x20(%rsp) xorl %esi, %esi movq 0x10(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rsi movq 0x30(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rsi shrdq $0x3b, %rcx, %rbx movq %rbx, 0x8(%rsp) xorl %ebx, %ebx movq 0x10(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rdi adcq %rdx, %rbx movq 0x30(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rdi adcq %rdx, %rbx shrdq $0x3b, %rdi, %rbp movq %rbp, 0x28(%rsp) movq 0x18(%rsp), %rax xorq %r9, %rax movq %rax, %rbp sarq $0x3f, %rbp andq %r8, %rbp negq %rbp mulq %r8 addq %rax, %rsi adcq %rdx, %rbp movq 0x38(%rsp), %rax xorq %r11, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r10, %rdx subq %rdx, %rbp mulq %r10 addq %rax, %rsi adcq %rdx, %rbp shrdq $0x3b, %rsi, %rcx movq %rcx, 0x10(%rsp) shrdq $0x3b, %rbp, %rsi movq 0x18(%rsp), %rax movq %rsi, 0x18(%rsp) xorq %r13, %rax movq %rax, %rsi sarq $0x3f, %rsi andq %r12, %rsi negq %rsi mulq %r12 addq %rax, %rbx adcq %rdx, %rsi movq 0x38(%rsp), %rax xorq %r15, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r14, %rdx subq %rdx, %rsi mulq %r14 addq %rax, %rbx adcq %rdx, %rsi shrdq $0x3b, %rbx, %rdi movq %rdi, 0x30(%rsp) shrdq $0x3b, %rsi, %rbx movq %rbx, 0x38(%rsp) movq 0x80(%rsp), %rbx movq 0x88(%rsp), %rbp xorl %ecx, %ecx movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x40(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x40(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x60(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x60(%rsp) xorl %ebx, %ebx movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq 0x48(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, 0x48(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq 0x68(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, 0x68(%rsp) xorl %ecx, %ecx movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x50(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x50(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x70(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x70(%rsp) movq 0x58(%rsp), %rax xorq %r9, %rax movq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rcx adcq %rbx, %rdx movq %rdx, %rbx shldq $0x1, %rcx, %rdx sarq $0x3f, %rbx addq %rbx, %rdx movl $0x13, %eax imulq %rdx movq 0x40(%rsp), %r8 addq %rax, %r8 movq %r8, 0x40(%rsp) movq 0x48(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x48(%rsp) movq 0x50(%rsp), %r8 adcq %rbx, %r8 movq %r8, 0x50(%rsp) adcq %rbx, %rcx shlq $0x3f, %rax addq %rax, %rcx movq 0x58(%rsp), %rax movq %rcx, 0x58(%rsp) xorq %r13, %rax movq %r13, %rcx andq %r12, %rcx negq %rcx mulq %r12 addq %rax, %rsi adcq %rdx, %rcx movq 0x78(%rsp), %rax xorq %r15, %rax movq %r15, %rdx andq %r14, %rdx subq %rdx, %rcx mulq %r14 addq %rax, %rsi adcq %rcx, %rdx movq %rdx, %rcx shldq $0x1, %rsi, %rdx sarq $0x3f, %rcx movl $0x13, %eax addq %rcx, %rdx imulq %rdx movq 0x60(%rsp), %r8 addq %rax, %r8 movq %r8, 0x60(%rsp) movq 0x68(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x68(%rsp) movq 0x70(%rsp), %r8 adcq %rcx, %r8 movq %r8, 0x70(%rsp) adcq %rcx, %rsi shlq $0x3f, %rax addq %rax, %rsi movq %rsi, 0x78(%rsp) curve25519_x25519_alt_midloop: movq 0x98(%rsp), %rsi movq (%rsp), %rdx movq 0x20(%rsp), %rcx movq %rdx, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax xorl %ebp, %ebp movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %rdx leaq (%rcx,%rax), %rdi shlq $0x16, %rdx shlq $0x16, %rdi sarq $0x2b, %rdx sarq $0x2b, %rdi movabsq $0x20000100000, %rax leaq (%rbx,%rax), %rbx leaq (%rcx,%rax), %rcx sarq $0x2a, %rbx sarq $0x2a, %rcx movq %rdx, 0xa0(%rsp) movq %rbx, 0xa8(%rsp) movq %rdi, 0xb0(%rsp) movq %rcx, 0xb8(%rsp) movq (%rsp), %r12 imulq %r12, %rdi imulq %rdx, %r12 movq 0x20(%rsp), %r13 imulq %r13, %rbx imulq %rcx, %r13 addq %rbx, %r12 addq %rdi, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r10 shlq $0x16, %r8 shlq $0x16, %r10 sarq $0x2b, %r8 sarq $0x2b, %r10 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r15 leaq (%rcx,%rax), %r11 sarq $0x2a, %r15 sarq $0x2a, %r11 movq %r13, %rbx movq %r12, %rcx imulq %r8, %r12 imulq %r15, %rbx addq %rbx, %r12 imulq %r11, %r13 imulq %r10, %rcx addq %rcx, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq 0xa0(%rsp), %rax imulq %r8, %rax movq 0xb0(%rsp), %rdx imulq %r15, %rdx imulq 0xa8(%rsp), %r8 imulq 0xb8(%rsp), %r15 addq %r8, %r15 leaq (%rax,%rdx), %r9 movq 0xa0(%rsp), %rax imulq %r10, %rax movq 0xb0(%rsp), %rdx imulq %r11, %rdx imulq 0xa8(%rsp), %r10 imulq 0xb8(%rsp), %r11 addq %r10, %r11 leaq (%rax,%rdx), %r13 movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r12 shlq $0x15, %r8 shlq $0x15, %r12 sarq $0x2b, %r8 sarq $0x2b, %r12 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r10 leaq (%rcx,%rax), %r14 sarq $0x2b, %r10 sarq $0x2b, %r14 movq %r9, %rax imulq %r8, %rax movq %r13, %rdx imulq %r10, %rdx imulq %r15, %r8 imulq %r11, %r10 addq %r8, %r10 leaq (%rax,%rdx), %r8 movq %r9, %rax imulq %r12, %rax movq %r13, %rdx imulq %r14, %rdx imulq %r15, %r12 imulq %r11, %r14 addq %r12, %r14 leaq (%rax,%rdx), %r12 movq %rsi, 0x98(%rsp) decq 0x90(%rsp) jne curve25519_x25519_alt_inverseloop movq (%rsp), %rax movq 0x20(%rsp), %rcx imulq %r8, %rax imulq %r10, %rcx addq %rcx, %rax sarq $0x3f, %rax movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 xorq %rax, %r9 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 xorq %rax, %r11 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 xorq %rax, %r13 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 xorq %rax, %r15 movq %r8, %rax andq %r9, %rax movq %r10, %r12 andq %r11, %r12 addq %rax, %r12 xorl %r13d, %r13d movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r12 adcq %rdx, %r13 movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r13 adcq %rdx, %r14 movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 adcq %rdx, %r15 movq 0x58(%rsp), %rax xorq %r9, %rax andq %r8, %r9 negq %r9 mulq %r8 addq %rax, %r15 adcq %rdx, %r9 movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %r9 mulq %r10 addq %rax, %r15 adcq %rdx, %r9 movq %r9, %rax shldq $0x1, %r15, %rax sarq $0x3f, %r9 movl $0x13, %ebx leaq 0x1(%rax,%r9,1), %rax imulq %rbx xorl %ebp, %ebp addq %rax, %r12 adcq %rdx, %r13 adcq %r9, %r14 adcq %r9, %r15 shlq $0x3f, %rax addq %rax, %r15 cmovns %rbp, %rbx subq %rbx, %r12 sbbq %rbp, %r13 sbbq %rbp, %r14 sbbq %rbp, %r15 btr $0x3f, %r15 movq 0xc0(%rsp), %rdi movq %r12, (%rdi) movq %r13, 0x8(%rdi) movq %r14, 0x10(%rdi) movq %r15, 0x18(%rdi) // Now the result is xn * (1/zn), fully reduced modulo p. // Note that in the degenerate case zn = 0 (mod p_25519), the // modular inverse code above will produce 1/zn = 0, giving // the correct overall X25519 result of zero for the point at // infinity. movq res, %rbp mul_p25519(resx,xn,zn) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
8,706
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/bignum_madd_n25519.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply-add modulo the order of the curve25519/edwards25519 basepoint // Inputs x[4], y[4], c[4]; output z[4] // // extern void bignum_madd_n25519 // (uint64_t z[static 4], uint64_t x[static 4], // uint64_t y[static 4], uint64_t c[static 4]); // // Performs z := (x * y + c) mod n_25519, where the modulus is // n_25519 = 2^252 + 27742317777372353535851937790883648493, the // order of the curve25519/edwards25519 basepoint. The result z // and the inputs x, y and c are all 4 digits (256 bits). // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y, RCX = c // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y, R9 = c // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_madd_n25519) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_madd_n25519) .text // Single round of modular reduction mod_n25519, mapping // [m4;m3;m2;m1;m0] = m to [m3;m2;m1;m0] = m mod n_25519, // *assuming* the input m < 2^64 * n_25519. This is very // close to the loop body of the bignum_mod_n25519 function. #define reduce(m4,m3,m2,m1,m0) \ movq m4, %rbx ; \ shldq $0x4, m3, %rbx ; \ shrq $0x3c, m4 ; \ subq m4, %rbx ; \ shlq $0x4, m3 ; \ shrdq $0x4, m4, m3 ; \ movabsq $0x5812631a5cf5d3ed, %rax ; \ mulq %rbx; \ movq %rax, %rbp ; \ movq %rdx, %rcx ; \ movabsq $0x14def9dea2f79cd6, %rax ; \ mulq %rbx; \ addq %rax, %rcx ; \ adcq $0x0, %rdx ; \ subq %rbp, m0 ; \ sbbq %rcx, m1 ; \ sbbq %rdx, m2 ; \ sbbq $0x0, m3 ; \ sbbq %rbx, %rbx ; \ movabsq $0x5812631a5cf5d3ed, %rax ; \ andq %rbx, %rax ; \ movabsq $0x14def9dea2f79cd6, %rdx ; \ andq %rbx, %rdx ; \ movabsq $0x1000000000000000, %rbx ; \ andq %rax, %rbx ; \ addq %rax, m0 ; \ adcq %rdx, m1 ; \ adcq $0x0, m2 ; \ adcq %rbx, m3 // Special case of "reduce" with m4 = 0. As well as not using m4, // the quotient selection is slightly simpler, just floor(m/2^252) // versus min (floor(m/2^252)) (2^63-1). #define reduce0(m3,m2,m1,m0) \ movq m3, %rbx ; \ shrq $60, %rbx ; \ shlq $4, m3 ; \ shrq $4, m3 ; \ movabsq $0x5812631a5cf5d3ed, %rax ; \ mulq %rbx; \ movq %rax, %rbp ; \ movq %rdx, %rcx ; \ movabsq $0x14def9dea2f79cd6, %rax ; \ mulq %rbx; \ addq %rax, %rcx ; \ adcq $0x0, %rdx ; \ subq %rbp, m0 ; \ sbbq %rcx, m1 ; \ sbbq %rdx, m2 ; \ sbbq $0x0, m3 ; \ sbbq %rbx, %rbx ; \ movabsq $0x5812631a5cf5d3ed, %rax ; \ andq %rbx, %rax ; \ movabsq $0x14def9dea2f79cd6, %rdx ; \ andq %rbx, %rdx ; \ movabsq $0x1000000000000000, %rbx ; \ andq %rax, %rbx ; \ addq %rax, m0 ; \ adcq %rdx, m1 ; \ adcq $0x0, m2 ; \ adcq %rbx, m3 S2N_BN_SYMBOL(bignum_madd_n25519): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Save some additional registers for use pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 // First compute [%r15;%r14;%r13;%r12;%r11;%r10;%r9;%r8] = x * y + c. This is // a multiply-add variant of an ADCX/ADOX-based schoolbook multiplier, // starting the accumulation with the c term and doing the zeroth row // in the same uniform fashion, otherwise similar to the start of // bignum_mul_p256k1. movq (%rcx), %r8 movq 8(%rcx), %r9 movq 16(%rcx), %r10 movq 24(%rcx), %r11 movq %rdx, %rcx xorl %ebp, %ebp movq (%rcx), %rdx mulxq (%rsi), %rax, %rbx adcxq %rax, %r8 adoxq %rbx, %r9 mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x18(%rsi), %rax, %r12 adcxq %rax, %r11 adoxq %rbp, %r12 adcxq %rbp, %r12 xorl %ebp, %ebp movq 0x8(%rcx), %rdx mulxq (%rsi), %rax, %rbx adcxq %rax, %r9 adoxq %rbx, %r10 mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x18(%rsi), %rax, %r13 adcxq %rax, %r12 adoxq %rbp, %r13 adcxq %rbp, %r13 xorl %ebp, %ebp movq 0x10(%rcx), %rdx mulxq (%rsi), %rax, %rbx adcxq %rax, %r10 adoxq %rbx, %r11 mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x18(%rsi), %rax, %r14 adcxq %rax, %r13 adoxq %rbp, %r14 adcxq %rbp, %r14 xorl %ebp, %ebp movq 0x18(%rcx), %rdx mulxq (%rsi), %rax, %rbx adcxq %rax, %r11 adoxq %rbx, %r12 mulxq 0x8(%rsi), %rax, %rbx adcxq %rax, %r12 adoxq %rbx, %r13 mulxq 0x10(%rsi), %rax, %rbx adcxq %rax, %r13 adoxq %rbx, %r14 mulxq 0x18(%rsi), %rax, %r15 adcxq %rax, %r14 adoxq %rbp, %r15 adcxq %rbp, %r15 // Now do the modular reduction and write back reduce0(%r15,%r14,%r13,%r12) reduce(%r15,%r14,%r13,%r12,%r11) reduce(%r14,%r13,%r12,%r11,%r10) reduce(%r13,%r12,%r11,%r10,%r9) reduce(%r12,%r11,%r10,%r9,%r8) movq %r8, (%rdi) movq %r9, 8(%rdi) movq %r10, 16(%rdi) movq %r11, 24(%rdi) // Restore registers and return popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
2,045
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/bignum_neg_p25519.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Negate modulo p_25519, z := (-x) mod p_25519, assuming x reduced // Input x[4]; output z[4] // // extern void bignum_neg_p25519 // (uint64_t z[static 4], uint64_t x[static 4]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_neg_p25519) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_neg_p25519) .text #define z %rdi #define x %rsi #define q %rdx #define n0 %rax #define n1 %rcx #define n2 %r8 #define n3 %r9 #define c %r10 #define qshort %esi S2N_BN_SYMBOL(bignum_neg_p25519): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // Load the 4 digits of x and let q be an OR of all the digits movq (x), n0 movq n0, q movq 8(x), n1 orq n1, q movq 16(x), n2 orq n2, q movq 24(x), n3 orq n3, q // Turn q into a strict x <> 0 bitmask, and c into a masked constant [-19] // so that [q;q;q;c] = [2^256 - 19], masked according to nonzeroness of x negq q sbbq q, q movq $-19, c andq q, c // Now just do [2^256 - 19] - x and then mask to 255 bits, // which means in effect the required [2^255 - 19] - x subq n0, c movq c, (z) movq q, c sbbq n1, c movq c, 8(z) movq q, c sbbq n2, c movq c, 16(z) sbbq n3, q btr $63, q movq q, 24(z) #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
300,618
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/curve25519_x25519base_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // The x25519 function for curve25519 on base element 9 // Input scalar[4]; output res[4] // // extern void curve25519_x25519base_alt // (uint64_t res[static 4],uint64_t scalar[static 4]); // // The function has a second prototype considering the arguments as arrays // of bytes rather than 64-bit words. The underlying code is the same, since // the x86 platform is little-endian. // // extern void curve25519_x25519base_byte_alt // (uint8_t res[static 32],uint8_t scalar[static 32]) // // Given a scalar n, returns the X coordinate of n * G where G = (9,...) is // the standard generator. The scalar is first slightly modified/mangled // as specified in the relevant RFC (https://www.rfc-editor.org/rfc/rfc7748). // // Standard x86-64 ABI: RDI = res, RSI = scalar // Microsoft x64 ABI: RCX = res, RDX = scalar // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(curve25519_x25519base_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(curve25519_x25519base_alt) S2N_BN_SYM_VISIBILITY_DIRECTIVE(curve25519_x25519base_byte_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(curve25519_x25519base_byte_alt) .text // Size of individual field elements #define NUMSIZE 32 // Pointer-offset pairs for result and temporaries on stack with some aliasing. // The result "resx" assumes the "res" pointer has been preloaded into %rbp. #define resx (0*NUMSIZE)(%rbp) #define scalar (0*NUMSIZE)(%rsp) #define tabent (1*NUMSIZE)(%rsp) #define ymx_2 (1*NUMSIZE)(%rsp) #define xpy_2 (2*NUMSIZE)(%rsp) #define kxy_2 (3*NUMSIZE)(%rsp) #define acc (4*NUMSIZE)(%rsp) #define x_1 (4*NUMSIZE)(%rsp) #define y_1 (5*NUMSIZE)(%rsp) #define z_1 (6*NUMSIZE)(%rsp) #define w_1 (7*NUMSIZE)(%rsp) #define x_3 (4*NUMSIZE)(%rsp) #define y_3 (5*NUMSIZE)(%rsp) #define z_3 (6*NUMSIZE)(%rsp) #define w_3 (7*NUMSIZE)(%rsp) #define tmpspace (8*NUMSIZE)(%rsp) #define t0 (8*NUMSIZE)(%rsp) #define t1 (9*NUMSIZE)(%rsp) #define t2 (10*NUMSIZE)(%rsp) #define t3 (11*NUMSIZE)(%rsp) #define t4 (12*NUMSIZE)(%rsp) #define t5 (13*NUMSIZE)(%rsp) // Stable homes for the input result pointer, and other variables #define res 14*NUMSIZE(%rsp) #define i 14*NUMSIZE+8(%rsp) #define bias 14*NUMSIZE+16(%rsp) #define bf 14*NUMSIZE+24(%rsp) #define ix 14*NUMSIZE+24(%rsp) #define tab 15*NUMSIZE(%rsp) // Total size to reserve on the stack #define NSPACE (15*NUMSIZE+8) // Macro wrapping up the basic field multiplication, only trivially // different from a pure function call to bignum_mul_p25519_alt. #define mul_p25519(P0,P1,P2) \ movq P1, %rax ; \ mulq P2; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ xorq %r10, %r10 ; \ xorq %r11, %r11 ; \ movq P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ movq 0x8+P1, %rax ; \ mulq P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ adcq $0x0, %r11 ; \ xorq %r12, %r12 ; \ movq P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq %r12, %r12 ; \ movq 0x8+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ movq 0x10+P1, %rax ; \ mulq P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ xorq %r13, %r13 ; \ movq P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq %r13, %r13 ; \ movq 0x8+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x10+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x18+P1, %rax ; \ mulq P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ xorq %r14, %r14 ; \ movq 0x8+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq %r14, %r14 ; \ movq 0x10+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ movq 0x18+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ xorq %r15, %r15 ; \ movq 0x10+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq %r15, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ movl $0x26, %esi ; \ movq %r12, %rax ; \ mulq %rsi; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %rcx, %rcx ; \ movq %r13, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %rcx, %rcx ; \ movq %r14, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rcx, %rcx ; \ movq %r15, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ xorq %rcx, %rcx ; \ addq %rax, %r11 ; \ movq %rdx, %r12 ; \ adcq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ leaq 0x1(%r12), %rax ; \ movl $0x13, %esi ; \ bts $63, %r11 ; \ imulq %rsi, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ sbbq %rax, %rax ; \ notq %rax; \ andq %rsi, %rax ; \ subq %rax, %r8 ; \ sbbq %rcx, %r9 ; \ sbbq %rcx, %r10 ; \ sbbq %rcx, %r11 ; \ btr $63, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // A version of multiplication that only guarantees output < 2 * p_25519. // This basically skips the +1 and final correction in quotient estimation. #define mul_4(P0,P1,P2) \ movq P1, %rax ; \ mulq P2; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ xorq %r10, %r10 ; \ xorq %r11, %r11 ; \ movq P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ movq 0x8+P1, %rax ; \ mulq P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ adcq $0x0, %r11 ; \ xorq %r12, %r12 ; \ movq P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq %r12, %r12 ; \ movq 0x8+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ movq 0x10+P1, %rax ; \ mulq P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ xorq %r13, %r13 ; \ movq P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq %r13, %r13 ; \ movq 0x8+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x10+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x18+P1, %rax ; \ mulq P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ xorq %r14, %r14 ; \ movq 0x8+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq %r14, %r14 ; \ movq 0x10+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ movq 0x18+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ xorq %r15, %r15 ; \ movq 0x10+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq %r15, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ movl $0x26, %ebx ; \ movq %r12, %rax ; \ mulq %rbx; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %rcx, %rcx ; \ movq %r13, %rax ; \ mulq %rbx; \ subq %rcx, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %rcx, %rcx ; \ movq %r14, %rax ; \ mulq %rbx; \ subq %rcx, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rcx, %rcx ; \ movq %r15, %rax ; \ mulq %rbx; \ subq %rcx, %rdx ; \ xorq %rcx, %rcx ; \ addq %rax, %r11 ; \ movq %rdx, %r12 ; \ adcq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Modular subtraction with double modulus 2 * p_25519 = 2^256 - 38 #define sub_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ebx, %ebx ; \ subq P2, %r8 ; \ movq 8+P1, %r9 ; \ sbbq 8+P2, %r9 ; \ movl $38, %ecx ; \ movq 16+P1, %r10 ; \ sbbq 16+P2, %r10 ; \ movq 24+P1, %rax ; \ sbbq 24+P2, %rax ; \ cmovncq %rbx, %rcx ; \ subq %rcx, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq %rbx, %rax ; \ movq %r8, P0 ; \ movq %r9, 8+P0 ; \ movq %r10, 16+P0 ; \ movq %rax, 24+P0 // Modular addition and doubling with double modulus 2 * p_25519 = 2^256 - 38. // This only ensures that the result fits in 4 digits, not that it is reduced // even w.r.t. double modulus. The result is always correct modulo provided // the sum of the inputs is < 2^256 + 2^256 - 38, so in particular provided // at least one of them is reduced double modulo. #define add_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ecx, %ecx ; \ addq P2, %r8 ; \ movq 0x8+P1, %r9 ; \ adcq 0x8+P2, %r9 ; \ movq 0x10+P1, %r10 ; \ adcq 0x10+P2, %r10 ; \ movq 0x18+P1, %r11 ; \ adcq 0x18+P2, %r11 ; \ movl $38, %eax ; \ cmovncq %rcx, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 #define double_twice4(P0,P1) \ movq P1, %r8 ; \ xorl %ecx, %ecx ; \ addq %r8, %r8 ; \ movq 0x8+P1, %r9 ; \ adcq %r9, %r9 ; \ movq 0x10+P1, %r10 ; \ adcq %r10, %r10 ; \ movq 0x18+P1, %r11 ; \ adcq %r11, %r11 ; \ movl $38, %eax ; \ cmovncq %rcx, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 S2N_BN_SYMBOL(curve25519_x25519base_alt): S2N_BN_SYMBOL(curve25519_x25519base_byte_alt): // In this case the Windows form literally makes a subroutine call. // This avoids hassle arising from keeping code and data together. #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi callq curve25519_x25519base_alt_curve25519_x25519base_alt_standard popq %rsi popq %rdi ret curve25519_x25519base_alt_curve25519_x25519base_alt_standard: #endif // Save registers, make room for temps, preserve input arguments. pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp // Move the output pointer to a stable place movq %rdi, res // Copy the input scalar to its local variable while mangling it. // In principle the mangling is into 01xxx...xxx000, but actually // we only clear the top two bits so 00xxx...xxxxxx. The additional // 2^254 * G is taken care of by the starting value for the addition // chain below, while we never look at the three low bits at all. movq (%rsi), %rax movq %rax, (%rsp) movq 8(%rsi), %rax movq %rax, 8(%rsp) movq 16(%rsi), %rax movq %rax, 16(%rsp) movq $0x3fffffffffffffff, %rax andq 24(%rsi), %rax movq %rax, 24(%rsp) // The main part of the computation is on the edwards25519 curve in // extended-projective coordinates (X,Y,Z,T), representing a point // (x,y) via x = X/Z, y = Y/Z and x * y = T/Z (so X * Y = T * Z). // Only at the very end do we translate back to curve25519. So G // below means the generator within edwards25519 corresponding to // (9,...) for curve25519, via the standard isomorphism. // // Initialize accumulator "acc" to either (2^254 + 8) * G or just 2^254 * G // depending on bit 3 of the scalar, the only nonzero bit of the bottom 4. // Thus, we have effectively dealt with bits 0, 1, 2, 3, 254 and 255. movq (%rsp), %rax andq $8, %rax leaq curve25519_x25519base_alt_edwards25519_0g(%rip), %r10 leaq curve25519_x25519base_alt_edwards25519_8g(%rip), %r11 movq (%r10), %rax movq (%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*16(%rsp) movq 8*1(%r10), %rax movq 8*1(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*17(%rsp) movq 8*2(%r10), %rax movq 8*2(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*18(%rsp) movq 8*3(%r10), %rax movq 8*3(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*19(%rsp) movq 8*4(%r10), %rax movq 8*4(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*20(%rsp) movq 8*5(%r10), %rax movq 8*5(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*21(%rsp) movq 8*6(%r10), %rax movq 8*6(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*22(%rsp) movq 8*7(%r10), %rax movq 8*7(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*23(%rsp) movl $1, %eax movq %rax, 8*24(%rsp) movl $0, %eax movq %rax, 8*25(%rsp) movq %rax, 8*26(%rsp) movq %rax, 8*27(%rsp) movq 8*8(%r10), %rax movq 8*8(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*28(%rsp) movq 8*9(%r10), %rax movq 8*9(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*29(%rsp) movq 8*10(%r10), %rax movq 8*10(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*30(%rsp) movq 8*11(%r10), %rax movq 8*11(%r11), %rcx cmovnzq %rcx, %rax movq %rax, 8*31(%rsp) // The counter "i" tracks the bit position for which the scalar has // already been absorbed, starting at 4 and going up in chunks of 4. // // The pointer "tab" points at the current block of the table for // multiples (2^i * j) * G at the current bit position i; 1 <= j <= 8. // // The bias is always either 0 and 1 and needs to be added to the // partially processed scalar implicitly. This is used to absorb 4 bits // of scalar per iteration from 3-bit table indexing by exploiting // negation: (16 * h + l) * G = (16 * (h + 1) - (16 - l)) * G is used // when l >= 9. Note that we can't have any bias left over at the // end because of the clearing of bit 255 of the scalar, meaning the // l >= 9 case cannot arise on the last iteration. movq $4, i leaq curve25519_x25519base_alt_edwards25519_gtable(%rip), %rax movq %rax, tab movq $0, bias // Start of the main loop, repeated 63 times for i = 4, 8, ..., 252 curve25519_x25519base_alt_scalarloop: // Look at the next 4-bit field "bf", adding the previous bias as well. // Choose the table index "ix" as bf when bf <= 8 and 16 - bf for bf >= 9, // setting the bias to 1 for the next iteration in the latter case. movq i, %rax movq %rax, %rcx shrq $6, %rax movq (%rsp,%rax,8), %rax // Exploiting scalar = sp exactly shrq %cl, %rax andq $15, %rax addq bias, %rax movq %rax, bf cmpq $9, bf sbbq %rax, %rax incq %rax movq %rax, bias movq $16, %rdi subq bf, %rdi cmpq $0, bias cmovzq bf, %rdi movq %rdi, ix // Perform constant-time lookup in the table to get element number "ix". // The table entry for the affine point (x,y) is actually a triple // (y - x,x + y,2 * d * x * y) to precompute parts of the addition. // Note that "ix" can be 0, so we set up the appropriate identity first. movl $1, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx movl $1, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d movq tab, %rbp cmpq $1, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $2, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $3, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $4, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $5, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $6, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $7, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $8, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp movq %rbp, tab // We now have the triple from the table in registers as follows // // [%rdx;%rcx;%rbx;%rax] = y - x // [%r11;%r10;%r9;%r8] = x + y // [%r15;%r14;%r13;%r12] = 2 * d * x * y // // In case bias = 1 we need to negate this. For Edwards curves // -(x,y) = (-x,y), i.e. we need to negate the x coordinate. // In this processed encoding, that amounts to swapping the // first two fields and negating the third. // // The optional negation here also pretends bias = 0 whenever // ix = 0 so that it doesn't need to handle the case of zero // inputs, since no non-trivial table entries are zero. Note // that in the zero case the whole negation is trivial, and // so indeed is the swapping. cmpq $0, bias movq %rax, %rsi cmovnzq %r8, %rsi cmovnzq %rax, %r8 movq %rsi, 32(%rsp) movq %r8, 64(%rsp) movq %rbx, %rsi cmovnzq %r9, %rsi cmovnzq %rbx, %r9 movq %rsi, 40(%rsp) movq %r9, 72(%rsp) movq %rcx, %rsi cmovnzq %r10, %rsi cmovnzq %rcx, %r10 movq %rsi, 48(%rsp) movq %r10, 80(%rsp) movq %rdx, %rsi cmovnzq %r11, %rsi cmovnzq %rdx, %r11 movq %rsi, 56(%rsp) movq %r11, 88(%rsp) movq $-19, %rax movq $-1, %rbx movq $-1, %rcx movq $0x7fffffffffffffff, %rdx subq %r12, %rax sbbq %r13, %rbx sbbq %r14, %rcx sbbq %r15, %rdx movq ix, %r8 movq bias, %r9 testq %r8, %r8 cmovzq %r8, %r9 testq %r9, %r9 cmovzq %r12, %rax cmovzq %r13, %rbx cmovzq %r14, %rcx cmovzq %r15, %rdx movq %rax, 96(%rsp) movq %rbx, 104(%rsp) movq %rcx, 112(%rsp) movq %rdx, 120(%rsp) // Extended-projective and precomputed mixed addition. // This is effectively the same as calling the standalone // function edwards25519_pepadd_alt(acc,acc,tabent), but we // only retain slightly weaker normalization < 2 * p_25519 // throughout the inner loop, so the computation is // slightly different, and faster overall. double_twice4(t0,z_1) sub_twice4(t1,y_1,x_1) add_twice4(t2,y_1,x_1) mul_4(t3,w_1,kxy_2) mul_4(t1,t1,ymx_2) mul_4(t2,t2,xpy_2) sub_twice4(t4,t0,t3) add_twice4(t0,t0,t3) sub_twice4(t5,t2,t1) add_twice4(t1,t2,t1) mul_4(z_3,t4,t0) mul_4(x_3,t5,t4) mul_4(y_3,t0,t1) mul_4(w_3,t5,t1) // End of the main loop; move on by 4 bits. addq $4, i cmpq $256, i jc curve25519_x25519base_alt_scalarloop // Now we need to translate from Edwards curve edwards25519 back // to the Montgomery form curve25519. The mapping in the affine // representations is // // (x,y) |-> ((1 + y) / (1 - y), c * (1 + y) / ((1 - y) * x)) // // For x25519, we only need the x coordinate, and we compute this as // // (1 + y) / (1 - y) = (x + x * y) / (x - x * y) // = (X/Z + T/Z) / (X/Z - T/Z) // = (X + T) / (X - T) // = (X + T) * inverse(X - T) // // We could equally well use (Z + Y) / (Z - Y), but the above has the // same cost, and it more explicitly forces zero output whenever X = 0, // regardless of how the modular inverse behaves on zero inputs. In // the present setting (base point 9, mangled scalar) that doesn't // really matter anyway since X = 0 never arises, but it seems a // little bit tidier. Note that both Edwards point (0,1) which maps to // the Montgomery point at infinity, and Edwards (0,-1) which maps to // Montgomery (0,0) [this is the 2-torsion point] are both by definition // mapped to 0 by the X coordinate mapping used to define curve25519. // // First the addition and subtraction: add_twice4(t1,x_3,w_3) sub_twice4(t2,x_3,w_3) // Prepare to call the modular inverse function to get t0 = 1/t2 // Note that this works for the weakly normalized z_3 equally well. // The non-coprime case z_3 == 0 (mod p_25519) cannot arise anyway. leaq 256(%rsp), %rdi leaq 320(%rsp), %rsi // Inline copy of bignum_inv_p25519, identical except for stripping out // the prologue and epilogue saving and restoring registers and making // and reclaiming room on the stack. For more details and explanations see // "x86/curve25519/bignum_inv_p25519.S". Note that the stack it uses for // its own temporaries is 208 bytes, so it has no effect on variables // that are needed in the rest of our computation here: res, t0, t1, t2. movq %rdi, 0xc0(%rsp) xorl %eax, %eax leaq -0x13(%rax), %rcx notq %rax movq %rcx, (%rsp) movq %rax, 0x8(%rsp) movq %rax, 0x10(%rsp) btr $0x3f, %rax movq %rax, 0x18(%rsp) movq (%rsi), %rdx movq 0x8(%rsi), %rcx movq 0x10(%rsi), %r8 movq 0x18(%rsi), %r9 movl $0x1, %eax xorl %r10d, %r10d bts $0x3f, %r9 adcq %r10, %rax imulq $0x13, %rax, %rax addq %rax, %rdx adcq %r10, %rcx adcq %r10, %r8 adcq %r10, %r9 movl $0x13, %eax cmovbq %r10, %rax subq %rax, %rdx sbbq %r10, %rcx sbbq %r10, %r8 sbbq %r10, %r9 btr $0x3f, %r9 movq %rdx, 0x20(%rsp) movq %rcx, 0x28(%rsp) movq %r8, 0x30(%rsp) movq %r9, 0x38(%rsp) xorl %eax, %eax movq %rax, 0x40(%rsp) movq %rax, 0x48(%rsp) movq %rax, 0x50(%rsp) movq %rax, 0x58(%rsp) movabsq $0xa0f99e2375022099, %rax movq %rax, 0x60(%rsp) movabsq $0xa8c68f3f1d132595, %rax movq %rax, 0x68(%rsp) movabsq $0x6c6c893805ac5242, %rax movq %rax, 0x70(%rsp) movabsq $0x276508b241770615, %rax movq %rax, 0x78(%rsp) movq $0xa, 0x90(%rsp) movq $0x1, 0x98(%rsp) jmp curve25519_x25519base_alt_midloop curve25519_x25519base_alt_inverseloop: movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 movq %r8, %rax andq %r9, %rax movq %r10, %rdi andq %r11, %rdi addq %rax, %rdi movq %rdi, 0x80(%rsp) movq %r12, %rax andq %r13, %rax movq %r14, %rsi andq %r15, %rsi addq %rax, %rsi movq %rsi, 0x88(%rsp) xorl %ebx, %ebx movq (%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq 0x20(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rdi adcq %rdx, %rbx xorl %ebp, %ebp movq (%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rsi adcq %rdx, %rbp movq 0x20(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp xorl %ecx, %ecx movq 0x8(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x28(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx shrdq $0x3b, %rbx, %rdi movq %rdi, (%rsp) xorl %edi, %edi movq 0x8(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbp adcq %rdx, %rdi movq 0x28(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rdi shrdq $0x3b, %rbp, %rsi movq %rsi, 0x20(%rsp) xorl %esi, %esi movq 0x10(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rsi movq 0x30(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rsi shrdq $0x3b, %rcx, %rbx movq %rbx, 0x8(%rsp) xorl %ebx, %ebx movq 0x10(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rdi adcq %rdx, %rbx movq 0x30(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rdi adcq %rdx, %rbx shrdq $0x3b, %rdi, %rbp movq %rbp, 0x28(%rsp) movq 0x18(%rsp), %rax xorq %r9, %rax movq %rax, %rbp sarq $0x3f, %rbp andq %r8, %rbp negq %rbp mulq %r8 addq %rax, %rsi adcq %rdx, %rbp movq 0x38(%rsp), %rax xorq %r11, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r10, %rdx subq %rdx, %rbp mulq %r10 addq %rax, %rsi adcq %rdx, %rbp shrdq $0x3b, %rsi, %rcx movq %rcx, 0x10(%rsp) shrdq $0x3b, %rbp, %rsi movq 0x18(%rsp), %rax movq %rsi, 0x18(%rsp) xorq %r13, %rax movq %rax, %rsi sarq $0x3f, %rsi andq %r12, %rsi negq %rsi mulq %r12 addq %rax, %rbx adcq %rdx, %rsi movq 0x38(%rsp), %rax xorq %r15, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r14, %rdx subq %rdx, %rsi mulq %r14 addq %rax, %rbx adcq %rdx, %rsi shrdq $0x3b, %rbx, %rdi movq %rdi, 0x30(%rsp) shrdq $0x3b, %rsi, %rbx movq %rbx, 0x38(%rsp) movq 0x80(%rsp), %rbx movq 0x88(%rsp), %rbp xorl %ecx, %ecx movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x40(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x40(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x60(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x60(%rsp) xorl %ebx, %ebx movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq 0x48(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, 0x48(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq 0x68(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, 0x68(%rsp) xorl %ecx, %ecx movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x50(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x50(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x70(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x70(%rsp) movq 0x58(%rsp), %rax xorq %r9, %rax movq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rcx adcq %rbx, %rdx movq %rdx, %rbx shldq $0x1, %rcx, %rdx sarq $0x3f, %rbx addq %rbx, %rdx movl $0x13, %eax imulq %rdx movq 0x40(%rsp), %r8 addq %rax, %r8 movq %r8, 0x40(%rsp) movq 0x48(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x48(%rsp) movq 0x50(%rsp), %r8 adcq %rbx, %r8 movq %r8, 0x50(%rsp) adcq %rbx, %rcx shlq $0x3f, %rax addq %rax, %rcx movq 0x58(%rsp), %rax movq %rcx, 0x58(%rsp) xorq %r13, %rax movq %r13, %rcx andq %r12, %rcx negq %rcx mulq %r12 addq %rax, %rsi adcq %rdx, %rcx movq 0x78(%rsp), %rax xorq %r15, %rax movq %r15, %rdx andq %r14, %rdx subq %rdx, %rcx mulq %r14 addq %rax, %rsi adcq %rcx, %rdx movq %rdx, %rcx shldq $0x1, %rsi, %rdx sarq $0x3f, %rcx movl $0x13, %eax addq %rcx, %rdx imulq %rdx movq 0x60(%rsp), %r8 addq %rax, %r8 movq %r8, 0x60(%rsp) movq 0x68(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x68(%rsp) movq 0x70(%rsp), %r8 adcq %rcx, %r8 movq %r8, 0x70(%rsp) adcq %rcx, %rsi shlq $0x3f, %rax addq %rax, %rsi movq %rsi, 0x78(%rsp) curve25519_x25519base_alt_midloop: movq 0x98(%rsp), %rsi movq (%rsp), %rdx movq 0x20(%rsp), %rcx movq %rdx, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax xorl %ebp, %ebp movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %rdx leaq (%rcx,%rax), %rdi shlq $0x16, %rdx shlq $0x16, %rdi sarq $0x2b, %rdx sarq $0x2b, %rdi movabsq $0x20000100000, %rax leaq (%rbx,%rax), %rbx leaq (%rcx,%rax), %rcx sarq $0x2a, %rbx sarq $0x2a, %rcx movq %rdx, 0xa0(%rsp) movq %rbx, 0xa8(%rsp) movq %rdi, 0xb0(%rsp) movq %rcx, 0xb8(%rsp) movq (%rsp), %r12 imulq %r12, %rdi imulq %rdx, %r12 movq 0x20(%rsp), %r13 imulq %r13, %rbx imulq %rcx, %r13 addq %rbx, %r12 addq %rdi, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r10 shlq $0x16, %r8 shlq $0x16, %r10 sarq $0x2b, %r8 sarq $0x2b, %r10 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r15 leaq (%rcx,%rax), %r11 sarq $0x2a, %r15 sarq $0x2a, %r11 movq %r13, %rbx movq %r12, %rcx imulq %r8, %r12 imulq %r15, %rbx addq %rbx, %r12 imulq %r11, %r13 imulq %r10, %rcx addq %rcx, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq 0xa0(%rsp), %rax imulq %r8, %rax movq 0xb0(%rsp), %rdx imulq %r15, %rdx imulq 0xa8(%rsp), %r8 imulq 0xb8(%rsp), %r15 addq %r8, %r15 leaq (%rax,%rdx), %r9 movq 0xa0(%rsp), %rax imulq %r10, %rax movq 0xb0(%rsp), %rdx imulq %r11, %rdx imulq 0xa8(%rsp), %r10 imulq 0xb8(%rsp), %r11 addq %r10, %r11 leaq (%rax,%rdx), %r13 movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r12 shlq $0x15, %r8 shlq $0x15, %r12 sarq $0x2b, %r8 sarq $0x2b, %r12 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r10 leaq (%rcx,%rax), %r14 sarq $0x2b, %r10 sarq $0x2b, %r14 movq %r9, %rax imulq %r8, %rax movq %r13, %rdx imulq %r10, %rdx imulq %r15, %r8 imulq %r11, %r10 addq %r8, %r10 leaq (%rax,%rdx), %r8 movq %r9, %rax imulq %r12, %rax movq %r13, %rdx imulq %r14, %rdx imulq %r15, %r12 imulq %r11, %r14 addq %r12, %r14 leaq (%rax,%rdx), %r12 movq %rsi, 0x98(%rsp) decq 0x90(%rsp) jne curve25519_x25519base_alt_inverseloop movq (%rsp), %rax movq 0x20(%rsp), %rcx imulq %r8, %rax imulq %r10, %rcx addq %rcx, %rax sarq $0x3f, %rax movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 xorq %rax, %r9 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 xorq %rax, %r11 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 xorq %rax, %r13 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 xorq %rax, %r15 movq %r8, %rax andq %r9, %rax movq %r10, %r12 andq %r11, %r12 addq %rax, %r12 xorl %r13d, %r13d movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r12 adcq %rdx, %r13 movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r13 adcq %rdx, %r14 movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 adcq %rdx, %r15 movq 0x58(%rsp), %rax xorq %r9, %rax andq %r8, %r9 negq %r9 mulq %r8 addq %rax, %r15 adcq %rdx, %r9 movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %r9 mulq %r10 addq %rax, %r15 adcq %rdx, %r9 movq %r9, %rax shldq $0x1, %r15, %rax sarq $0x3f, %r9 movl $0x13, %ebx leaq 0x1(%rax,%r9,1), %rax imulq %rbx xorl %ebp, %ebp addq %rax, %r12 adcq %rdx, %r13 adcq %r9, %r14 adcq %r9, %r15 shlq $0x3f, %rax addq %rax, %r15 cmovns %rbp, %rbx subq %rbx, %r12 sbbq %rbp, %r13 sbbq %rbp, %r14 sbbq %rbp, %r15 btr $0x3f, %r15 movq 0xc0(%rsp), %rdi movq %r12, (%rdi) movq %r13, 0x8(%rdi) movq %r14, 0x10(%rdi) movq %r15, 0x18(%rdi) // The final result is (X + T) / (X - T) // This is the only operation in the whole computation that // fully reduces modulo p_25519 since now we want the canonical // answer as output. movq res, %rbp mul_p25519(resx,t1,t0) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret // **************************************************************************** // The precomputed data (all read-only). This is currently part of the same // text section, which gives position-independent code with simple PC-relative // addressing. However it could be put in a separate section via something like // // .section .rodata // **************************************************************************** // 2^254 * G and (2^254 + 8) * G in extended-projective coordinates // but with z = 1 assumed and hence left out, so they are (X,Y,T) only. curve25519_x25519base_alt_edwards25519_0g: .quad 0x251037f7cf4e861d .quad 0x10ede0fb19fb128f .quad 0x96c033b175f5e2c8 .quad 0x055f070d6c15fb0d .quad 0x7c52af2c97473e69 .quad 0x022f82391bad8378 .quad 0x9991e1b02adb476f .quad 0x511144a03a99b855 .quad 0x5fafc3b88ff2e4ae .quad 0x855e4ff0de1230ff .quad 0x72e302a348492870 .quad 0x1253c19e53dbe1bc curve25519_x25519base_alt_edwards25519_8g: .quad 0x331d086e0d9abcaa .quad 0x1e23c96d311a10c9 .quad 0x96d0f95e58c13478 .quad 0x2f72f7384fcfcc59 .quad 0x39a6cd1cfd7d87c9 .quad 0x9867a0abd8ae153a .quad 0xa49d2a5f35986745 .quad 0x57012940cdfe82e1 .quad 0x5046a6532ec5544a .quad 0x6d674004739ff6c9 .quad 0x9bbaa44b234a70e3 .quad 0x5e6d8901138cf386 // Precomputed table of multiples of generator for edwards25519 // all in precomputed extended-projective (y-x,x+y,2*d*x*y) triples. curve25519_x25519base_alt_edwards25519_gtable: // 2^4 * 1 * G .quad 0x7ec851ca553e2df3 .quad 0xa71284cba64878b3 .quad 0xe6b5e4193288d1e7 .quad 0x4cf210ec5a9a8883 .quad 0x322d04a52d9021f6 .quad 0xb9c19f3375c6bf9c .quad 0x587a3a4342d20b09 .quad 0x143b1cf8aa64fe61 .quad 0x9f867c7d968acaab .quad 0x5f54258e27092729 .quad 0xd0a7d34bea180975 .quad 0x21b546a3374126e1 // 2^4 * 2 * G .quad 0xa94ff858a2888343 .quad 0xce0ed4565313ed3c .quad 0xf55c3dcfb5bf34fa .quad 0x0a653ca5c9eab371 .quad 0x490a7a45d185218f .quad 0x9a15377846049335 .quad 0x0060ea09cc31e1f6 .quad 0x7e041577f86ee965 .quad 0x66b2a496ce5b67f3 .quad 0xff5492d8bd569796 .quad 0x503cec294a592cd0 .quad 0x566943650813acb2 // 2^4 * 3 * G .quad 0xb818db0c26620798 .quad 0x5d5c31d9606e354a .quad 0x0982fa4f00a8cdc7 .quad 0x17e12bcd4653e2d4 .quad 0x5672f9eb1dabb69d .quad 0xba70b535afe853fc .quad 0x47ac0f752796d66d .quad 0x32a5351794117275 .quad 0xd3a644a6df648437 .quad 0x703b6559880fbfdd .quad 0xcb852540ad3a1aa5 .quad 0x0900b3f78e4c6468 // 2^4 * 4 * G .quad 0x0a851b9f679d651b .quad 0xe108cb61033342f2 .quad 0xd601f57fe88b30a3 .quad 0x371f3acaed2dd714 .quad 0xed280fbec816ad31 .quad 0x52d9595bd8e6efe3 .quad 0x0fe71772f6c623f5 .quad 0x4314030b051e293c .quad 0xd560005efbf0bcad .quad 0x8eb70f2ed1870c5e .quad 0x201f9033d084e6a0 .quad 0x4c3a5ae1ce7b6670 // 2^4 * 5 * G .quad 0x4138a434dcb8fa95 .quad 0x870cf67d6c96840b .quad 0xde388574297be82c .quad 0x7c814db27262a55a .quad 0xbaf875e4c93da0dd .quad 0xb93282a771b9294d .quad 0x80d63fb7f4c6c460 .quad 0x6de9c73dea66c181 .quad 0x478904d5a04df8f2 .quad 0xfafbae4ab10142d3 .quad 0xf6c8ac63555d0998 .quad 0x5aac4a412f90b104 // 2^4 * 6 * G .quad 0xc64f326b3ac92908 .quad 0x5551b282e663e1e0 .quad 0x476b35f54a1a4b83 .quad 0x1b9da3fe189f68c2 .quad 0x603a0d0abd7f5134 .quad 0x8089c932e1d3ae46 .quad 0xdf2591398798bd63 .quad 0x1c145cd274ba0235 .quad 0x32e8386475f3d743 .quad 0x365b8baf6ae5d9ef .quad 0x825238b6385b681e .quad 0x234929c1167d65e1 // 2^4 * 7 * G .quad 0x984decaba077ade8 .quad 0x383f77ad19eb389d .quad 0xc7ec6b7e2954d794 .quad 0x59c77b3aeb7c3a7a .quad 0x48145cc21d099fcf .quad 0x4535c192cc28d7e5 .quad 0x80e7c1e548247e01 .quad 0x4a5f28743b2973ee .quad 0xd3add725225ccf62 .quad 0x911a3381b2152c5d .quad 0xd8b39fad5b08f87d .quad 0x6f05606b4799fe3b // 2^4 * 8 * G .quad 0x9ffe9e92177ba962 .quad 0x98aee71d0de5cae1 .quad 0x3ff4ae942d831044 .quad 0x714de12e58533ac8 .quad 0x5b433149f91b6483 .quad 0xadb5dc655a2cbf62 .quad 0x87fa8412632827b3 .quad 0x60895e91ab49f8d8 .quad 0xe9ecf2ed0cf86c18 .quad 0xb46d06120735dfd4 .quad 0xbc9da09804b96be7 .quad 0x73e2e62fd96dc26b // 2^8 * 1 * G .quad 0xed5b635449aa515e .quad 0xa865c49f0bc6823a .quad 0x850c1fe95b42d1c4 .quad 0x30d76d6f03d315b9 .quad 0x2eccdd0e632f9c1d .quad 0x51d0b69676893115 .quad 0x52dfb76ba8637a58 .quad 0x6dd37d49a00eef39 .quad 0x6c4444172106e4c7 .quad 0xfb53d680928d7f69 .quad 0xb4739ea4694d3f26 .quad 0x10c697112e864bb0 // 2^8 * 2 * G .quad 0x6493c4277dbe5fde .quad 0x265d4fad19ad7ea2 .quad 0x0e00dfc846304590 .quad 0x25e61cabed66fe09 .quad 0x0ca62aa08358c805 .quad 0x6a3d4ae37a204247 .quad 0x7464d3a63b11eddc .quad 0x03bf9baf550806ef .quad 0x3f13e128cc586604 .quad 0x6f5873ecb459747e .quad 0xa0b63dedcc1268f5 .quad 0x566d78634586e22c // 2^8 * 3 * G .quad 0x1637a49f9cc10834 .quad 0xbc8e56d5a89bc451 .quad 0x1cb5ec0f7f7fd2db .quad 0x33975bca5ecc35d9 .quad 0xa1054285c65a2fd0 .quad 0x6c64112af31667c3 .quad 0x680ae240731aee58 .quad 0x14fba5f34793b22a .quad 0x3cd746166985f7d4 .quad 0x593e5e84c9c80057 .quad 0x2fc3f2b67b61131e .quad 0x14829cea83fc526c // 2^8 * 4 * G .quad 0xff437b8497dd95c2 .quad 0x6c744e30aa4eb5a7 .quad 0x9e0c5d613c85e88b .quad 0x2fd9c71e5f758173 .quad 0x21e70b2f4e71ecb8 .quad 0xe656ddb940a477e3 .quad 0xbf6556cece1d4f80 .quad 0x05fc3bc4535d7b7e .quad 0x24b8b3ae52afdedd .quad 0x3495638ced3b30cf .quad 0x33a4bc83a9be8195 .quad 0x373767475c651f04 // 2^8 * 5 * G .quad 0x2fba99fd40d1add9 .quad 0xb307166f96f4d027 .quad 0x4363f05215f03bae .quad 0x1fbea56c3b18f999 .quad 0x634095cb14246590 .quad 0xef12144016c15535 .quad 0x9e38140c8910bc60 .quad 0x6bf5905730907c8c .quad 0x0fa778f1e1415b8a .quad 0x06409ff7bac3a77e .quad 0x6f52d7b89aa29a50 .quad 0x02521cf67a635a56 // 2^8 * 6 * G .quad 0x513fee0b0a9d5294 .quad 0x8f98e75c0fdf5a66 .quad 0xd4618688bfe107ce .quad 0x3fa00a7e71382ced .quad 0xb1146720772f5ee4 .quad 0xe8f894b196079ace .quad 0x4af8224d00ac824a .quad 0x001753d9f7cd6cc4 .quad 0x3c69232d963ddb34 .quad 0x1dde87dab4973858 .quad 0xaad7d1f9a091f285 .quad 0x12b5fe2fa048edb6 // 2^8 * 7 * G .quad 0x71f0fbc496fce34d .quad 0x73b9826badf35bed .quad 0xd2047261ff28c561 .quad 0x749b76f96fb1206f .quad 0xdf2b7c26ad6f1e92 .quad 0x4b66d323504b8913 .quad 0x8c409dc0751c8bc3 .quad 0x6f7e93c20796c7b8 .quad 0x1f5af604aea6ae05 .quad 0xc12351f1bee49c99 .quad 0x61a808b5eeff6b66 .quad 0x0fcec10f01e02151 // 2^8 * 8 * G .quad 0x644d58a649fe1e44 .quad 0x21fcaea231ad777e .quad 0x02441c5a887fd0d2 .quad 0x4901aa7183c511f3 .quad 0x3df2d29dc4244e45 .quad 0x2b020e7493d8de0a .quad 0x6cc8067e820c214d .quad 0x413779166feab90a .quad 0x08b1b7548c1af8f0 .quad 0xce0f7a7c246299b4 .quad 0xf760b0f91e06d939 .quad 0x41bb887b726d1213 // 2^12 * 1 * G .quad 0x9267806c567c49d8 .quad 0x066d04ccca791e6a .quad 0xa69f5645e3cc394b .quad 0x5c95b686a0788cd2 .quad 0x97d980e0aa39f7d2 .quad 0x35d0384252c6b51c .quad 0x7d43f49307cd55aa .quad 0x56bd36cfb78ac362 .quad 0x2ac519c10d14a954 .quad 0xeaf474b494b5fa90 .quad 0xe6af8382a9f87a5a .quad 0x0dea6db1879be094 // 2^12 * 2 * G .quad 0xaa66bf547344e5ab .quad 0xda1258888f1b4309 .quad 0x5e87d2b3fd564b2f .quad 0x5b2c78885483b1dd .quad 0x15baeb74d6a8797a .quad 0x7ef55cf1fac41732 .quad 0x29001f5a3c8b05c5 .quad 0x0ad7cc8752eaccfb .quad 0x52151362793408cf .quad 0xeb0f170319963d94 .quad 0xa833b2fa883d9466 .quad 0x093a7fa775003c78 // 2^12 * 3 * G .quad 0xe5107de63a16d7be .quad 0xa377ffdc9af332cf .quad 0x70d5bf18440b677f .quad 0x6a252b19a4a31403 .quad 0xb8e9604460a91286 .quad 0x7f3fd8047778d3de .quad 0x67d01e31bf8a5e2d .quad 0x7b038a06c27b653e .quad 0x9ed919d5d36990f3 .quad 0x5213aebbdb4eb9f2 .quad 0xc708ea054cb99135 .quad 0x58ded57f72260e56 // 2^12 * 4 * G .quad 0x78e79dade9413d77 .quad 0xf257f9d59729e67d .quad 0x59db910ee37aa7e6 .quad 0x6aa11b5bbb9e039c .quad 0xda6d53265b0fd48b .quad 0x8960823193bfa988 .quad 0xd78ac93261d57e28 .quad 0x79f2942d3a5c8143 .quad 0x97da2f25b6c88de9 .quad 0x251ba7eaacf20169 .quad 0x09b44f87ef4eb4e4 .quad 0x7d90ab1bbc6a7da5 // 2^12 * 5 * G .quad 0x9acca683a7016bfe .quad 0x90505f4df2c50b6d .quad 0x6b610d5fcce435aa .quad 0x19a10d446198ff96 .quad 0x1a07a3f496b3c397 .quad 0x11ceaa188f4e2532 .quad 0x7d9498d5a7751bf0 .quad 0x19ed161f508dd8a0 .quad 0x560a2cd687dce6ca .quad 0x7f3568c48664cf4d .quad 0x8741e95222803a38 .quad 0x483bdab1595653fc // 2^12 * 6 * G .quad 0xfa780f148734fa49 .quad 0x106f0b70360534e0 .quad 0x2210776fe3e307bd .quad 0x3286c109dde6a0fe .quad 0xd6cf4d0ab4da80f6 .quad 0x82483e45f8307fe0 .quad 0x05005269ae6f9da4 .quad 0x1c7052909cf7877a .quad 0x32ee7de2874e98d4 .quad 0x14c362e9b97e0c60 .quad 0x5781dcde6a60a38a .quad 0x217dd5eaaa7aa840 // 2^12 * 7 * G .quad 0x9db7c4d0248e1eb0 .quad 0xe07697e14d74bf52 .quad 0x1e6a9b173c562354 .quad 0x7fa7c21f795a4965 .quad 0x8bdf1fb9be8c0ec8 .quad 0x00bae7f8e30a0282 .quad 0x4963991dad6c4f6c .quad 0x07058a6e5df6f60a .quad 0xe9eb02c4db31f67f .quad 0xed25fd8910bcfb2b .quad 0x46c8131f5c5cddb4 .quad 0x33b21c13a0cb9bce // 2^12 * 8 * G .quad 0x360692f8087d8e31 .quad 0xf4dcc637d27163f7 .quad 0x25a4e62065ea5963 .quad 0x659bf72e5ac160d9 .quad 0x9aafb9b05ee38c5b .quad 0xbf9d2d4e071a13c7 .quad 0x8eee6e6de933290a .quad 0x1c3bab17ae109717 .quad 0x1c9ab216c7cab7b0 .quad 0x7d65d37407bbc3cc .quad 0x52744750504a58d5 .quad 0x09f2606b131a2990 // 2^16 * 1 * G .quad 0x40e87d44744346be .quad 0x1d48dad415b52b25 .quad 0x7c3a8a18a13b603e .quad 0x4eb728c12fcdbdf7 .quad 0x7e234c597c6691ae .quad 0x64889d3d0a85b4c8 .quad 0xdae2c90c354afae7 .quad 0x0a871e070c6a9e1d .quad 0x3301b5994bbc8989 .quad 0x736bae3a5bdd4260 .quad 0x0d61ade219d59e3c .quad 0x3ee7300f2685d464 // 2^16 * 2 * G .quad 0xf5d255e49e7dd6b7 .quad 0x8016115c610b1eac .quad 0x3c99975d92e187ca .quad 0x13815762979125c2 .quad 0x43fa7947841e7518 .quad 0xe5c6fa59639c46d7 .quad 0xa1065e1de3052b74 .quad 0x7d47c6a2cfb89030 .quad 0x3fdad0148ef0d6e0 .quad 0x9d3e749a91546f3c .quad 0x71ec621026bb8157 .quad 0x148cf58d34c9ec80 // 2^16 * 3 * G .quad 0x46a492f67934f027 .quad 0x469984bef6840aa9 .quad 0x5ca1bc2a89611854 .quad 0x3ff2fa1ebd5dbbd4 .quad 0xe2572f7d9ae4756d .quad 0x56c345bb88f3487f .quad 0x9fd10b6d6960a88d .quad 0x278febad4eaea1b9 .quad 0xb1aa681f8c933966 .quad 0x8c21949c20290c98 .quad 0x39115291219d3c52 .quad 0x4104dd02fe9c677b // 2^16 * 4 * G .quad 0x72b2bf5e1124422a .quad 0xa1fa0c3398a33ab5 .quad 0x94cb6101fa52b666 .quad 0x2c863b00afaf53d5 .quad 0x81214e06db096ab8 .quad 0x21a8b6c90ce44f35 .quad 0x6524c12a409e2af5 .quad 0x0165b5a48efca481 .quad 0xf190a474a0846a76 .quad 0x12eff984cd2f7cc0 .quad 0x695e290658aa2b8f .quad 0x591b67d9bffec8b8 // 2^16 * 5 * G .quad 0x312f0d1c80b49bfa .quad 0x5979515eabf3ec8a .quad 0x727033c09ef01c88 .quad 0x3de02ec7ca8f7bcb .quad 0x99b9b3719f18b55d .quad 0xe465e5faa18c641e .quad 0x61081136c29f05ed .quad 0x489b4f867030128b .quad 0xd232102d3aeb92ef .quad 0xe16253b46116a861 .quad 0x3d7eabe7190baa24 .quad 0x49f5fbba496cbebf // 2^16 * 6 * G .quad 0x30949a108a5bcfd4 .quad 0xdc40dd70bc6473eb .quad 0x92c294c1307c0d1c .quad 0x5604a86dcbfa6e74 .quad 0x155d628c1e9c572e .quad 0x8a4d86acc5884741 .quad 0x91a352f6515763eb .quad 0x06a1a6c28867515b .quad 0x7288d1d47c1764b6 .quad 0x72541140e0418b51 .quad 0x9f031a6018acf6d1 .quad 0x20989e89fe2742c6 // 2^16 * 7 * G .quad 0x499777fd3a2dcc7f .quad 0x32857c2ca54fd892 .quad 0xa279d864d207e3a0 .quad 0x0403ed1d0ca67e29 .quad 0x1674278b85eaec2e .quad 0x5621dc077acb2bdf .quad 0x640a4c1661cbf45a .quad 0x730b9950f70595d3 .quad 0xc94b2d35874ec552 .quad 0xc5e6c8cf98246f8d .quad 0xf7cb46fa16c035ce .quad 0x5bd7454308303dcc // 2^16 * 8 * G .quad 0x7f9ad19528b24cc2 .quad 0x7f6b54656335c181 .quad 0x66b8b66e4fc07236 .quad 0x133a78007380ad83 .quad 0x85c4932115e7792a .quad 0xc64c89a2bdcdddc9 .quad 0x9d1e3da8ada3d762 .quad 0x5bb7db123067f82c .quad 0x0961f467c6ca62be .quad 0x04ec21d6211952ee .quad 0x182360779bd54770 .quad 0x740dca6d58f0e0d2 // 2^20 * 1 * G .quad 0x50b70bf5d3f0af0b .quad 0x4feaf48ae32e71f7 .quad 0x60e84ed3a55bbd34 .quad 0x00ed489b3f50d1ed .quad 0x3906c72aed261ae5 .quad 0x9ab68fd988e100f7 .quad 0xf5e9059af3360197 .quad 0x0e53dc78bf2b6d47 .quad 0xb90829bf7971877a .quad 0x5e4444636d17e631 .quad 0x4d05c52e18276893 .quad 0x27632d9a5a4a4af5 // 2^20 * 2 * G .quad 0xd11ff05154b260ce .quad 0xd86dc38e72f95270 .quad 0x601fcd0d267cc138 .quad 0x2b67916429e90ccd .quad 0xa98285d187eaffdb .quad 0xa5b4fbbbd8d0a864 .quad 0xb658f27f022663f7 .quad 0x3bbc2b22d99ce282 .quad 0xb917c952583c0a58 .quad 0x653ff9b80fe4c6f3 .quad 0x9b0da7d7bcdf3c0c .quad 0x43a0eeb6ab54d60e // 2^20 * 3 * G .quad 0x396966a46d4a5487 .quad 0xf811a18aac2bb3ba .quad 0x66e4685b5628b26b .quad 0x70a477029d929b92 .quad 0x3ac6322357875fe8 .quad 0xd9d4f4ecf5fbcb8f .quad 0x8dee8493382bb620 .quad 0x50c5eaa14c799fdc .quad 0xdd0edc8bd6f2fb3c .quad 0x54c63aa79cc7b7a0 .quad 0xae0b032b2c8d9f1a .quad 0x6f9ce107602967fb // 2^20 * 4 * G .quad 0xad1054b1cde1c22a .quad 0xc4a8e90248eb32df .quad 0x5f3e7b33accdc0ea .quad 0x72364713fc79963e .quad 0x139693063520e0b5 .quad 0x437fcf7c88ea03fe .quad 0xf7d4c40bd3c959bc .quad 0x699154d1f893ded9 .quad 0x315d5c75b4b27526 .quad 0xcccb842d0236daa5 .quad 0x22f0c8a3345fee8e .quad 0x73975a617d39dbed // 2^20 * 5 * G .quad 0xe4024df96375da10 .quad 0x78d3251a1830c870 .quad 0x902b1948658cd91c .quad 0x7e18b10b29b7438a .quad 0x6f37f392f4433e46 .quad 0x0e19b9a11f566b18 .quad 0x220fb78a1fd1d662 .quad 0x362a4258a381c94d .quad 0x9071d9132b6beb2f .quad 0x0f26e9ad28418247 .quad 0xeab91ec9bdec925d .quad 0x4be65bc8f48af2de // 2^20 * 6 * G .quad 0x78487feba36e7028 .quad 0x5f3f13001dd8ce34 .quad 0x934fb12d4b30c489 .quad 0x056c244d397f0a2b .quad 0x1d50fba257c26234 .quad 0x7bd4823adeb0678b .quad 0xc2b0dc6ea6538af5 .quad 0x5665eec6351da73e .quad 0xdb3ee00943bfb210 .quad 0x4972018720800ac2 .quad 0x26ab5d6173bd8667 .quad 0x20b209c2ab204938 // 2^20 * 7 * G .quad 0x549e342ac07fb34b .quad 0x02d8220821373d93 .quad 0xbc262d70acd1f567 .quad 0x7a92c9fdfbcac784 .quad 0x1fcca94516bd3289 .quad 0x448d65aa41420428 .quad 0x59c3b7b216a55d62 .quad 0x49992cc64e612cd8 .quad 0x65bd1bea70f801de .quad 0x1befb7c0fe49e28a .quad 0xa86306cdb1b2ae4a .quad 0x3b7ac0cd265c2a09 // 2^20 * 8 * G .quad 0x822bee438c01bcec .quad 0x530cb525c0fbc73b .quad 0x48519034c1953fe9 .quad 0x265cc261e09a0f5b .quad 0xf0d54e4f22ed39a7 .quad 0xa2aae91e5608150a .quad 0xf421b2e9eddae875 .quad 0x31bc531d6b7de992 .quad 0xdf3d134da980f971 .quad 0x7a4fb8d1221a22a7 .quad 0x3df7d42035aad6d8 .quad 0x2a14edcc6a1a125e // 2^24 * 1 * G .quad 0xdf48ee0752cfce4e .quad 0xc3fffaf306ec08b7 .quad 0x05710b2ab95459c4 .quad 0x161d25fa963ea38d .quad 0x231a8c570478433c .quad 0xb7b5270ec281439d .quad 0xdbaa99eae3d9079f .quad 0x2c03f5256c2b03d9 .quad 0x790f18757b53a47d .quad 0x307b0130cf0c5879 .quad 0x31903d77257ef7f9 .quad 0x699468bdbd96bbaf // 2^24 * 2 * G .quad 0xbd1f2f46f4dafecf .quad 0x7cef0114a47fd6f7 .quad 0xd31ffdda4a47b37f .quad 0x525219a473905785 .quad 0xd8dd3de66aa91948 .quad 0x485064c22fc0d2cc .quad 0x9b48246634fdea2f .quad 0x293e1c4e6c4a2e3a .quad 0x376e134b925112e1 .quad 0x703778b5dca15da0 .quad 0xb04589af461c3111 .quad 0x5b605c447f032823 // 2^24 * 3 * G .quad 0xb965805920c47c89 .quad 0xe7f0100c923b8fcc .quad 0x0001256502e2ef77 .quad 0x24a76dcea8aeb3ee .quad 0x3be9fec6f0e7f04c .quad 0x866a579e75e34962 .quad 0x5542ef161e1de61a .quad 0x2f12fef4cc5abdd5 .quad 0x0a4522b2dfc0c740 .quad 0x10d06e7f40c9a407 .quad 0xc6cf144178cff668 .quad 0x5e607b2518a43790 // 2^24 * 4 * G .quad 0x58b31d8f6cdf1818 .quad 0x35cfa74fc36258a2 .quad 0xe1b3ff4f66e61d6e .quad 0x5067acab6ccdd5f7 .quad 0xa02c431ca596cf14 .quad 0xe3c42d40aed3e400 .quad 0xd24526802e0f26db .quad 0x201f33139e457068 .quad 0xfd527f6b08039d51 .quad 0x18b14964017c0006 .quad 0xd5220eb02e25a4a8 .quad 0x397cba8862460375 // 2^24 * 5 * G .quad 0x30c13093f05959b2 .quad 0xe23aa18de9a97976 .quad 0x222fd491721d5e26 .quad 0x2339d320766e6c3a .quad 0x7815c3fbc81379e7 .quad 0xa6619420dde12af1 .quad 0xffa9c0f885a8fdd5 .quad 0x771b4022c1e1c252 .quad 0xd87dd986513a2fa7 .quad 0xf5ac9b71f9d4cf08 .quad 0xd06bc31b1ea283b3 .quad 0x331a189219971a76 // 2^24 * 6 * G .quad 0xf5166f45fb4f80c6 .quad 0x9c36c7de61c775cf .quad 0xe3d4e81b9041d91c .quad 0x31167c6b83bdfe21 .quad 0x26512f3a9d7572af .quad 0x5bcbe28868074a9e .quad 0x84edc1c11180f7c4 .quad 0x1ac9619ff649a67b .quad 0xf22b3842524b1068 .quad 0x5068343bee9ce987 .quad 0xfc9d71844a6250c8 .quad 0x612436341f08b111 // 2^24 * 7 * G .quad 0xd99d41db874e898d .quad 0x09fea5f16c07dc20 .quad 0x793d2c67d00f9bbc .quad 0x46ebe2309e5eff40 .quad 0x8b6349e31a2d2638 .quad 0x9ddfb7009bd3fd35 .quad 0x7f8bf1b8a3a06ba4 .quad 0x1522aa3178d90445 .quad 0x2c382f5369614938 .quad 0xdafe409ab72d6d10 .quad 0xe8c83391b646f227 .quad 0x45fe70f50524306c // 2^24 * 8 * G .quad 0xda4875a6960c0b8c .quad 0x5b68d076ef0e2f20 .quad 0x07fb51cf3d0b8fd4 .quad 0x428d1623a0e392d4 .quad 0x62f24920c8951491 .quad 0x05f007c83f630ca2 .quad 0x6fbb45d2f5c9d4b8 .quad 0x16619f6db57a2245 .quad 0x084f4a4401a308fd .quad 0xa82219c376a5caac .quad 0xdeb8de4643d1bc7d .quad 0x1d81592d60bd38c6 // 2^28 * 1 * G .quad 0xd833d7beec2a4c38 .quad 0x2c9162830acc20ed .quad 0xe93a47aa92df7581 .quad 0x702d67a3333c4a81 .quad 0x3a4a369a2f89c8a1 .quad 0x63137a1d7c8de80d .quad 0xbcac008a78eda015 .quad 0x2cb8b3a5b483b03f .quad 0x36e417cbcb1b90a1 .quad 0x33b3ddaa7f11794e .quad 0x3f510808885bc607 .quad 0x24141dc0e6a8020d // 2^28 * 2 * G .quad 0x59f73c773fefee9d .quad 0xb3f1ef89c1cf989d .quad 0xe35dfb42e02e545f .quad 0x5766120b47a1b47c .quad 0x91925dccbd83157d .quad 0x3ca1205322cc8094 .quad 0x28e57f183f90d6e4 .quad 0x1a4714cede2e767b .quad 0xdb20ba0fb8b6b7ff .quad 0xb732c3b677511fa1 .quad 0xa92b51c099f02d89 .quad 0x4f3875ad489ca5f1 // 2^28 * 3 * G .quad 0xc7fc762f4932ab22 .quad 0x7ac0edf72f4c3c1b .quad 0x5f6b55aa9aa895e8 .quad 0x3680274dad0a0081 .quad 0x79ed13f6ee73eec0 .quad 0xa5c6526d69110bb1 .quad 0xe48928c38603860c .quad 0x722a1446fd7059f5 .quad 0xd0959fe9a8cf8819 .quad 0xd0a995508475a99c .quad 0x6eac173320b09cc5 .quad 0x628ecf04331b1095 // 2^28 * 4 * G .quad 0x98bcb118a9d0ddbc .quad 0xee449e3408b4802b .quad 0x87089226b8a6b104 .quad 0x685f349a45c7915d .quad 0x9b41acf85c74ccf1 .quad 0xb673318108265251 .quad 0x99c92aed11adb147 .quad 0x7a47d70d34ecb40f .quad 0x60a0c4cbcc43a4f5 .quad 0x775c66ca3677bea9 .quad 0xa17aa1752ff8f5ed .quad 0x11ded9020e01fdc0 // 2^28 * 5 * G .quad 0x890e7809caefe704 .quad 0x8728296de30e8c6c .quad 0x4c5cd2a392aeb1c9 .quad 0x194263d15771531f .quad 0x471f95b03bea93b7 .quad 0x0552d7d43313abd3 .quad 0xbd9370e2e17e3f7b .quad 0x7b120f1db20e5bec .quad 0x17d2fb3d86502d7a .quad 0xb564d84450a69352 .quad 0x7da962c8a60ed75d .quad 0x00d0f85b318736aa // 2^28 * 6 * G .quad 0x978b142e777c84fd .quad 0xf402644705a8c062 .quad 0xa67ad51be7e612c7 .quad 0x2f7b459698dd6a33 .quad 0xa6753c1efd7621c1 .quad 0x69c0b4a7445671f5 .quad 0x971f527405b23c11 .quad 0x387bc74851a8c7cd .quad 0x81894b4d4a52a9a8 .quad 0xadd93e12f6b8832f .quad 0x184d8548b61bd638 .quad 0x3f1c62dbd6c9f6cd // 2^28 * 7 * G .quad 0x2e8f1f0091910c1f .quad 0xa4df4fe0bff2e12c .quad 0x60c6560aee927438 .quad 0x6338283facefc8fa .quad 0x3fad3e40148f693d .quad 0x052656e194eb9a72 .quad 0x2f4dcbfd184f4e2f .quad 0x406f8db1c482e18b .quad 0x9e630d2c7f191ee4 .quad 0x4fbf8301bc3ff670 .quad 0x787d8e4e7afb73c4 .quad 0x50d83d5be8f58fa5 // 2^28 * 8 * G .quad 0x85683916c11a1897 .quad 0x2d69a4efe506d008 .quad 0x39af1378f664bd01 .quad 0x65942131361517c6 .quad 0xc0accf90b4d3b66d .quad 0xa7059de561732e60 .quad 0x033d1f7870c6b0ba .quad 0x584161cd26d946e4 .quad 0xbbf2b1a072d27ca2 .quad 0xbf393c59fbdec704 .quad 0xe98dbbcee262b81e .quad 0x02eebd0b3029b589 // 2^32 * 1 * G .quad 0x61368756a60dac5f .quad 0x17e02f6aebabdc57 .quad 0x7f193f2d4cce0f7d .quad 0x20234a7789ecdcf0 .quad 0x8765b69f7b85c5e8 .quad 0x6ff0678bd168bab2 .quad 0x3a70e77c1d330f9b .quad 0x3a5f6d51b0af8e7c .quad 0x76d20db67178b252 .quad 0x071c34f9d51ed160 .quad 0xf62a4a20b3e41170 .quad 0x7cd682353cffe366 // 2^32 * 2 * G .quad 0x0be1a45bd887fab6 .quad 0x2a846a32ba403b6e .quad 0xd9921012e96e6000 .quad 0x2838c8863bdc0943 .quad 0xa665cd6068acf4f3 .quad 0x42d92d183cd7e3d3 .quad 0x5759389d336025d9 .quad 0x3ef0253b2b2cd8ff .quad 0xd16bb0cf4a465030 .quad 0xfa496b4115c577ab .quad 0x82cfae8af4ab419d .quad 0x21dcb8a606a82812 // 2^32 * 3 * G .quad 0x5c6004468c9d9fc8 .quad 0x2540096ed42aa3cb .quad 0x125b4d4c12ee2f9c .quad 0x0bc3d08194a31dab .quad 0x9a8d00fabe7731ba .quad 0x8203607e629e1889 .quad 0xb2cc023743f3d97f .quad 0x5d840dbf6c6f678b .quad 0x706e380d309fe18b .quad 0x6eb02da6b9e165c7 .quad 0x57bbba997dae20ab .quad 0x3a4276232ac196dd // 2^32 * 4 * G .quad 0x4b42432c8a7084fa .quad 0x898a19e3dfb9e545 .quad 0xbe9f00219c58e45d .quad 0x1ff177cea16debd1 .quad 0x3bf8c172db447ecb .quad 0x5fcfc41fc6282dbd .quad 0x80acffc075aa15fe .quad 0x0770c9e824e1a9f9 .quad 0xcf61d99a45b5b5fd .quad 0x860984e91b3a7924 .quad 0xe7300919303e3e89 .quad 0x39f264fd41500b1e // 2^32 * 5 * G .quad 0xa7ad3417dbe7e29c .quad 0xbd94376a2b9c139c .quad 0xa0e91b8e93597ba9 .quad 0x1712d73468889840 .quad 0xd19b4aabfe097be1 .quad 0xa46dfce1dfe01929 .quad 0xc3c908942ca6f1ff .quad 0x65c621272c35f14e .quad 0xe72b89f8ce3193dd .quad 0x4d103356a125c0bb .quad 0x0419a93d2e1cfe83 .quad 0x22f9800ab19ce272 // 2^32 * 6 * G .quad 0x605a368a3e9ef8cb .quad 0xe3e9c022a5504715 .quad 0x553d48b05f24248f .quad 0x13f416cd647626e5 .quad 0x42029fdd9a6efdac .quad 0xb912cebe34a54941 .quad 0x640f64b987bdf37b .quad 0x4171a4d38598cab4 .quad 0xfa2758aa99c94c8c .quad 0x23006f6fb000b807 .quad 0xfbd291ddadda5392 .quad 0x508214fa574bd1ab // 2^32 * 7 * G .quad 0xc20269153ed6fe4b .quad 0xa65a6739511d77c4 .quad 0xcbde26462c14af94 .quad 0x22f960ec6faba74b .quad 0x461a15bb53d003d6 .quad 0xb2102888bcf3c965 .quad 0x27c576756c683a5a .quad 0x3a7758a4c86cb447 .quad 0x548111f693ae5076 .quad 0x1dae21df1dfd54a6 .quad 0x12248c90f3115e65 .quad 0x5d9fd15f8de7f494 // 2^32 * 8 * G .quad 0x031408d36d63727f .quad 0x6a379aefd7c7b533 .quad 0xa9e18fc5ccaee24b .quad 0x332f35914f8fbed3 .quad 0x3f244d2aeed7521e .quad 0x8e3a9028432e9615 .quad 0xe164ba772e9c16d4 .quad 0x3bc187fa47eb98d8 .quad 0x6d470115ea86c20c .quad 0x998ab7cb6c46d125 .quad 0xd77832b53a660188 .quad 0x450d81ce906fba03 // 2^36 * 1 * G .quad 0xf8ae4d2ad8453902 .quad 0x7018058ee8db2d1d .quad 0xaab3995fc7d2c11e .quad 0x53b16d2324ccca79 .quad 0x23264d66b2cae0b5 .quad 0x7dbaed33ebca6576 .quad 0x030ebed6f0d24ac8 .quad 0x2a887f78f7635510 .quad 0x2a23b9e75c012d4f .quad 0x0c974651cae1f2ea .quad 0x2fb63273675d70ca .quad 0x0ba7250b864403f5 // 2^36 * 2 * G .quad 0xbb0d18fd029c6421 .quad 0xbc2d142189298f02 .quad 0x8347f8e68b250e96 .quad 0x7b9f2fe8032d71c9 .quad 0xdd63589386f86d9c .quad 0x61699176e13a85a4 .quad 0x2e5111954eaa7d57 .quad 0x32c21b57fb60bdfb .quad 0xd87823cd319e0780 .quad 0xefc4cfc1897775c5 .quad 0x4854fb129a0ab3f7 .quad 0x12c49d417238c371 // 2^36 * 3 * G .quad 0x0950b533ffe83769 .quad 0x21861c1d8e1d6bd1 .quad 0xf022d8381302e510 .quad 0x2509200c6391cab4 .quad 0x09b3a01783799542 .quad 0x626dd08faad5ee3f .quad 0xba00bceeeb70149f .quad 0x1421b246a0a444c9 .quad 0x4aa43a8e8c24a7c7 .quad 0x04c1f540d8f05ef5 .quad 0xadba5e0c0b3eb9dc .quad 0x2ab5504448a49ce3 // 2^36 * 4 * G .quad 0x2ed227266f0f5dec .quad 0x9824ee415ed50824 .quad 0x807bec7c9468d415 .quad 0x7093bae1b521e23f .quad 0xdc07ac631c5d3afa .quad 0x58615171f9df8c6c .quad 0x72a079d89d73e2b0 .quad 0x7301f4ceb4eae15d .quad 0x6409e759d6722c41 .quad 0xa674e1cf72bf729b .quad 0xbc0a24eb3c21e569 .quad 0x390167d24ebacb23 // 2^36 * 5 * G .quad 0x27f58e3bba353f1c .quad 0x4c47764dbf6a4361 .quad 0xafbbc4e56e562650 .quad 0x07db2ee6aae1a45d .quad 0xd7bb054ba2f2120b .quad 0xe2b9ceaeb10589b7 .quad 0x3fe8bac8f3c0edbe .quad 0x4cbd40767112cb69 .quad 0x0b603cc029c58176 .quad 0x5988e3825cb15d61 .quad 0x2bb61413dcf0ad8d .quad 0x7b8eec6c74183287 // 2^36 * 6 * G .quad 0xe4ca40782cd27cb0 .quad 0xdaf9c323fbe967bd .quad 0xb29bd34a8ad41e9e .quad 0x72810497626ede4d .quad 0x32fee570fc386b73 .quad 0xda8b0141da3a8cc7 .quad 0x975ffd0ac8968359 .quad 0x6ee809a1b132a855 .quad 0x9444bb31fcfd863a .quad 0x2fe3690a3e4e48c5 .quad 0xdc29c867d088fa25 .quad 0x13bd1e38d173292e // 2^36 * 7 * G .quad 0xd32b4cd8696149b5 .quad 0xe55937d781d8aab7 .quad 0x0bcb2127ae122b94 .quad 0x41e86fcfb14099b0 .quad 0x223fb5cf1dfac521 .quad 0x325c25316f554450 .quad 0x030b98d7659177ac .quad 0x1ed018b64f88a4bd .quad 0x3630dfa1b802a6b0 .quad 0x880f874742ad3bd5 .quad 0x0af90d6ceec5a4d4 .quad 0x746a247a37cdc5d9 // 2^36 * 8 * G .quad 0xd531b8bd2b7b9af6 .quad 0x5005093537fc5b51 .quad 0x232fcf25c593546d .quad 0x20a365142bb40f49 .quad 0x6eccd85278d941ed .quad 0x2254ae83d22f7843 .quad 0xc522d02e7bbfcdb7 .quad 0x681e3351bff0e4e2 .quad 0x8b64b59d83034f45 .quad 0x2f8b71f21fa20efb .quad 0x69249495ba6550e4 .quad 0x539ef98e45d5472b // 2^40 * 1 * G .quad 0x6e7bb6a1a6205275 .quad 0xaa4f21d7413c8e83 .quad 0x6f56d155e88f5cb2 .quad 0x2de25d4ba6345be1 .quad 0xd074d8961cae743f .quad 0xf86d18f5ee1c63ed .quad 0x97bdc55be7f4ed29 .quad 0x4cbad279663ab108 .quad 0x80d19024a0d71fcd .quad 0xc525c20afb288af8 .quad 0xb1a3974b5f3a6419 .quad 0x7d7fbcefe2007233 // 2^40 * 2 * G .quad 0xfaef1e6a266b2801 .quad 0x866c68c4d5739f16 .quad 0xf68a2fbc1b03762c .quad 0x5975435e87b75a8d .quad 0xcd7c5dc5f3c29094 .quad 0xc781a29a2a9105ab .quad 0x80c61d36421c3058 .quad 0x4f9cd196dcd8d4d7 .quad 0x199297d86a7b3768 .quad 0xd0d058241ad17a63 .quad 0xba029cad5c1c0c17 .quad 0x7ccdd084387a0307 // 2^40 * 3 * G .quad 0xdca6422c6d260417 .quad 0xae153d50948240bd .quad 0xa9c0c1b4fb68c677 .quad 0x428bd0ed61d0cf53 .quad 0x9b0c84186760cc93 .quad 0xcdae007a1ab32a99 .quad 0xa88dec86620bda18 .quad 0x3593ca848190ca44 .quad 0x9213189a5e849aa7 .quad 0xd4d8c33565d8facd .quad 0x8c52545b53fdbbd1 .quad 0x27398308da2d63e6 // 2^40 * 4 * G .quad 0x42c38d28435ed413 .quad 0xbd50f3603278ccc9 .quad 0xbb07ab1a79da03ef .quad 0x269597aebe8c3355 .quad 0xb9a10e4c0a702453 .quad 0x0fa25866d57d1bde .quad 0xffb9d9b5cd27daf7 .quad 0x572c2945492c33fd .quad 0xc77fc745d6cd30be .quad 0xe4dfe8d3e3baaefb .quad 0xa22c8830aa5dda0c .quad 0x7f985498c05bca80 // 2^40 * 5 * G .quad 0x3849ce889f0be117 .quad 0x8005ad1b7b54a288 .quad 0x3da3c39f23fc921c .quad 0x76c2ec470a31f304 .quad 0xd35615520fbf6363 .quad 0x08045a45cf4dfba6 .quad 0xeec24fbc873fa0c2 .quad 0x30f2653cd69b12e7 .quad 0x8a08c938aac10c85 .quad 0x46179b60db276bcb .quad 0xa920c01e0e6fac70 .quad 0x2f1273f1596473da // 2^40 * 6 * G .quad 0x4739fc7c8ae01e11 .quad 0xfd5274904a6aab9f .quad 0x41d98a8287728f2e .quad 0x5d9e572ad85b69f2 .quad 0x30488bd755a70bc0 .quad 0x06d6b5a4f1d442e7 .quad 0xead1a69ebc596162 .quad 0x38ac1997edc5f784 .quad 0x0666b517a751b13b .quad 0x747d06867e9b858c .quad 0xacacc011454dde49 .quad 0x22dfcd9cbfe9e69c // 2^40 * 7 * G .quad 0x8ddbd2e0c30d0cd9 .quad 0xad8e665facbb4333 .quad 0x8f6b258c322a961f .quad 0x6b2916c05448c1c7 .quad 0x56ec59b4103be0a1 .quad 0x2ee3baecd259f969 .quad 0x797cb29413f5cd32 .quad 0x0fe9877824cde472 .quad 0x7edb34d10aba913b .quad 0x4ea3cd822e6dac0e .quad 0x66083dff6578f815 .quad 0x4c303f307ff00a17 // 2^40 * 8 * G .quad 0xd30a3bd617b28c85 .quad 0xc5d377b739773bea .quad 0xc6c6e78c1e6a5cbf .quad 0x0d61b8f78b2ab7c4 .quad 0x29fc03580dd94500 .quad 0xecd27aa46fbbec93 .quad 0x130a155fc2e2a7f8 .quad 0x416b151ab706a1d5 .quad 0x56a8d7efe9c136b0 .quad 0xbd07e5cd58e44b20 .quad 0xafe62fda1b57e0ab .quad 0x191a2af74277e8d2 // 2^44 * 1 * G .quad 0xd550095bab6f4985 .quad 0x04f4cd5b4fbfaf1a .quad 0x9d8e2ed12a0c7540 .quad 0x2bc24e04b2212286 .quad 0x09d4b60b2fe09a14 .quad 0xc384f0afdbb1747e .quad 0x58e2ea8978b5fd6e .quad 0x519ef577b5e09b0a .quad 0x1863d7d91124cca9 .quad 0x7ac08145b88a708e .quad 0x2bcd7309857031f5 .quad 0x62337a6e8ab8fae5 // 2^44 * 2 * G .quad 0x4bcef17f06ffca16 .quad 0xde06e1db692ae16a .quad 0x0753702d614f42b0 .quad 0x5f6041b45b9212d0 .quad 0xd1ab324e1b3a1273 .quad 0x18947cf181055340 .quad 0x3b5d9567a98c196e .quad 0x7fa00425802e1e68 .quad 0x7d531574028c2705 .quad 0x80317d69db0d75fe .quad 0x30fface8ef8c8ddd .quad 0x7e9de97bb6c3e998 // 2^44 * 3 * G .quad 0x1558967b9e6585a3 .quad 0x97c99ce098e98b92 .quad 0x10af149b6eb3adad .quad 0x42181fe8f4d38cfa .quad 0xf004be62a24d40dd .quad 0xba0659910452d41f .quad 0x81c45ee162a44234 .quad 0x4cb829d8a22266ef .quad 0x1dbcaa8407b86681 .quad 0x081f001e8b26753b .quad 0x3cd7ce6a84048e81 .quad 0x78af11633f25f22c // 2^44 * 4 * G .quad 0x8416ebd40b50babc .quad 0x1508722628208bee .quad 0xa3148fafb9c1c36d .quad 0x0d07daacd32d7d5d .quad 0x3241c00e7d65318c .quad 0xe6bee5dcd0e86de7 .quad 0x118b2dc2fbc08c26 .quad 0x680d04a7fc603dc3 .quad 0xf9c2414a695aa3eb .quad 0xdaa42c4c05a68f21 .quad 0x7c6c23987f93963e .quad 0x210e8cd30c3954e3 // 2^44 * 5 * G .quad 0xac4201f210a71c06 .quad 0x6a65e0aef3bfb021 .quad 0xbc42c35c393632f7 .quad 0x56ea8db1865f0742 .quad 0x2b50f16137fe6c26 .quad 0xe102bcd856e404d8 .quad 0x12b0f1414c561f6b .quad 0x51b17bc8d028ec91 .quad 0xfff5fb4bcf535119 .quad 0xf4989d79df1108a0 .quad 0xbdfcea659a3ba325 .quad 0x18a11f1174d1a6f2 // 2^44 * 6 * G .quad 0x407375ab3f6bba29 .quad 0x9ec3b6d8991e482e .quad 0x99c80e82e55f92e9 .quad 0x307c13b6fb0c0ae1 .quad 0xfbd63cdad27a5f2c .quad 0xf00fc4bc8aa106d7 .quad 0x53fb5c1a8e64a430 .quad 0x04eaabe50c1a2e85 .quad 0x24751021cb8ab5e7 .quad 0xfc2344495c5010eb .quad 0x5f1e717b4e5610a1 .quad 0x44da5f18c2710cd5 // 2^44 * 7 * G .quad 0x033cc55ff1b82eb5 .quad 0xb15ae36d411cae52 .quad 0xba40b6198ffbacd3 .quad 0x768edce1532e861f .quad 0x9156fe6b89d8eacc .quad 0xe6b79451e23126a1 .quad 0xbd7463d93944eb4e .quad 0x726373f6767203ae .quad 0xe305ca72eb7ef68a .quad 0x662cf31f70eadb23 .quad 0x18f026fdb4c45b68 .quad 0x513b5384b5d2ecbd // 2^44 * 8 * G .quad 0x46d46280c729989e .quad 0x4b93fbd05368a5dd .quad 0x63df3f81d1765a89 .quad 0x34cebd64b9a0a223 .quad 0x5e2702878af34ceb .quad 0x900b0409b946d6ae .quad 0x6512ebf7dabd8512 .quad 0x61d9b76988258f81 .quad 0xa6c5a71349b7d94b .quad 0xa3f3d15823eb9446 .quad 0x0416fbd277484834 .quad 0x69d45e6f2c70812f // 2^48 * 1 * G .quad 0xce16f74bc53c1431 .quad 0x2b9725ce2072edde .quad 0xb8b9c36fb5b23ee7 .quad 0x7e2e0e450b5cc908 .quad 0x9fe62b434f460efb .quad 0xded303d4a63607d6 .quad 0xf052210eb7a0da24 .quad 0x237e7dbe00545b93 .quad 0x013575ed6701b430 .quad 0x231094e69f0bfd10 .quad 0x75320f1583e47f22 .quad 0x71afa699b11155e3 // 2^48 * 2 * G .quad 0x65ce6f9b3953b61d .quad 0xc65839eaafa141e6 .quad 0x0f435ffda9f759fe .quad 0x021142e9c2b1c28e .quad 0xea423c1c473b50d6 .quad 0x51e87a1f3b38ef10 .quad 0x9b84bf5fb2c9be95 .quad 0x00731fbc78f89a1c .quad 0xe430c71848f81880 .quad 0xbf960c225ecec119 .quad 0xb6dae0836bba15e3 .quad 0x4c4d6f3347e15808 // 2^48 * 3 * G .quad 0x18f7eccfc17d1fc9 .quad 0x6c75f5a651403c14 .quad 0xdbde712bf7ee0cdf .quad 0x193fddaaa7e47a22 .quad 0x2f0cddfc988f1970 .quad 0x6b916227b0b9f51b .quad 0x6ec7b6c4779176be .quad 0x38bf9500a88f9fa8 .quad 0x1fd2c93c37e8876f .quad 0xa2f61e5a18d1462c .quad 0x5080f58239241276 .quad 0x6a6fb99ebf0d4969 // 2^48 * 4 * G .quad 0x6a46c1bb560855eb .quad 0x2416bb38f893f09d .quad 0xd71d11378f71acc1 .quad 0x75f76914a31896ea .quad 0xeeb122b5b6e423c6 .quad 0x939d7010f286ff8e .quad 0x90a92a831dcf5d8c .quad 0x136fda9f42c5eb10 .quad 0xf94cdfb1a305bdd1 .quad 0x0f364b9d9ff82c08 .quad 0x2a87d8a5c3bb588a .quad 0x022183510be8dcba // 2^48 * 5 * G .quad 0x4af766385ead2d14 .quad 0xa08ed880ca7c5830 .quad 0x0d13a6e610211e3d .quad 0x6a071ce17b806c03 .quad 0x9d5a710143307a7f .quad 0xb063de9ec47da45f .quad 0x22bbfe52be927ad3 .quad 0x1387c441fd40426c .quad 0xb5d3c3d187978af8 .quad 0x722b5a3d7f0e4413 .quad 0x0d7b4848bb477ca0 .quad 0x3171b26aaf1edc92 // 2^48 * 6 * G .quad 0xa92f319097564ca8 .quad 0xff7bb84c2275e119 .quad 0x4f55fe37a4875150 .quad 0x221fd4873cf0835a .quad 0xa60db7d8b28a47d1 .quad 0xa6bf14d61770a4f1 .quad 0xd4a1f89353ddbd58 .quad 0x6c514a63344243e9 .quad 0x2322204f3a156341 .quad 0xfb73e0e9ba0a032d .quad 0xfce0dd4c410f030e .quad 0x48daa596fb924aaa // 2^48 * 7 * G .quad 0x6eca8e665ca59cc7 .quad 0xa847254b2e38aca0 .quad 0x31afc708d21e17ce .quad 0x676dd6fccad84af7 .quad 0x14f61d5dc84c9793 .quad 0x9941f9e3ef418206 .quad 0xcdf5b88f346277ac .quad 0x58c837fa0e8a79a9 .quad 0x0cf9688596fc9058 .quad 0x1ddcbbf37b56a01b .quad 0xdcc2e77d4935d66a .quad 0x1c4f73f2c6a57f0a // 2^48 * 8 * G .quad 0x0e7a4fbd305fa0bb .quad 0x829d4ce054c663ad .quad 0xf421c3832fe33848 .quad 0x795ac80d1bf64c42 .quad 0xb36e706efc7c3484 .quad 0x73dfc9b4c3c1cf61 .quad 0xeb1d79c9781cc7e5 .quad 0x70459adb7daf675c .quad 0x1b91db4991b42bb3 .quad 0x572696234b02dcca .quad 0x9fdf9ee51f8c78dc .quad 0x5fe162848ce21fd3 // 2^52 * 1 * G .quad 0xe2790aae4d077c41 .quad 0x8b938270db7469a3 .quad 0x6eb632dc8abd16a2 .quad 0x720814ecaa064b72 .quad 0x315c29c795115389 .quad 0xd7e0e507862f74ce .quad 0x0c4a762185927432 .quad 0x72de6c984a25a1e4 .quad 0xae9ab553bf6aa310 .quad 0x050a50a9806d6e1b .quad 0x92bb7403adff5139 .quad 0x0394d27645be618b // 2^52 * 2 * G .quad 0x4d572251857eedf4 .quad 0xe3724edde19e93c5 .quad 0x8a71420e0b797035 .quad 0x3b3c833687abe743 .quad 0xf5396425b23545a4 .quad 0x15a7a27e98fbb296 .quad 0xab6c52bc636fdd86 .quad 0x79d995a8419334ee .quad 0xcd8a8ea61195dd75 .quad 0xa504d8a81dd9a82f .quad 0x540dca81a35879b6 .quad 0x60dd16a379c86a8a // 2^52 * 3 * G .quad 0x35a2c8487381e559 .quad 0x596ffea6d78082cb .quad 0xcb9771ebdba7b653 .quad 0x5a08b5019b4da685 .quad 0x3501d6f8153e47b8 .quad 0xb7a9675414a2f60c .quad 0x112ee8b6455d9523 .quad 0x4e62a3c18112ea8a .quad 0xc8d4ac04516ab786 .quad 0x595af3215295b23d .quad 0xd6edd234db0230c1 .quad 0x0929efe8825b41cc // 2^52 * 4 * G .quad 0x5f0601d1cbd0f2d3 .quad 0x736e412f6132bb7f .quad 0x83604432238dde87 .quad 0x1e3a5272f5c0753c .quad 0x8b3172b7ad56651d .quad 0x01581b7a3fabd717 .quad 0x2dc94df6424df6e4 .quad 0x30376e5d2c29284f .quad 0xd2918da78159a59c .quad 0x6bdc1cd93f0713f3 .quad 0x565f7a934acd6590 .quad 0x53daacec4cb4c128 // 2^52 * 5 * G .quad 0x4ca73bd79cc8a7d6 .quad 0x4d4a738f47e9a9b2 .quad 0xf4cbf12942f5fe00 .quad 0x01a13ff9bdbf0752 .quad 0x99852bc3852cfdb0 .quad 0x2cc12e9559d6ed0b .quad 0x70f9e2bf9b5ac27b .quad 0x4f3b8c117959ae99 .quad 0x55b6c9c82ff26412 .quad 0x1ac4a8c91fb667a8 .quad 0xd527bfcfeb778bf2 .quad 0x303337da7012a3be // 2^52 * 6 * G .quad 0x955422228c1c9d7c .quad 0x01fac1371a9b340f .quad 0x7e8d9177925b48d7 .quad 0x53f8ad5661b3e31b .quad 0x976d3ccbfad2fdd1 .quad 0xcb88839737a640a8 .quad 0x2ff00c1d6734cb25 .quad 0x269ff4dc789c2d2b .quad 0x0c003fbdc08d678d .quad 0x4d982fa37ead2b17 .quad 0xc07e6bcdb2e582f1 .quad 0x296c7291df412a44 // 2^52 * 7 * G .quad 0x7903de2b33daf397 .quad 0xd0ff0619c9a624b3 .quad 0x8a1d252b555b3e18 .quad 0x2b6d581c52e0b7c0 .quad 0xdfb23205dab8b59e .quad 0x465aeaa0c8092250 .quad 0xd133c1189a725d18 .quad 0x2327370261f117d1 .quad 0x3d0543d3623e7986 .quad 0x679414c2c278a354 .quad 0xae43f0cc726196f6 .quad 0x7836c41f8245eaba // 2^52 * 8 * G .quad 0xe7a254db49e95a81 .quad 0x5192d5d008b0ad73 .quad 0x4d20e5b1d00afc07 .quad 0x5d55f8012cf25f38 .quad 0xca651e848011937c .quad 0xc6b0c46e6ef41a28 .quad 0xb7021ba75f3f8d52 .quad 0x119dff99ead7b9fd .quad 0x43eadfcbf4b31d4d .quad 0xc6503f7411148892 .quad 0xfeee68c5060d3b17 .quad 0x329293b3dd4a0ac8 // 2^56 * 1 * G .quad 0x4e59214fe194961a .quad 0x49be7dc70d71cd4f .quad 0x9300cfd23b50f22d .quad 0x4789d446fc917232 .quad 0x2879852d5d7cb208 .quad 0xb8dedd70687df2e7 .quad 0xdc0bffab21687891 .quad 0x2b44c043677daa35 .quad 0x1a1c87ab074eb78e .quad 0xfac6d18e99daf467 .quad 0x3eacbbcd484f9067 .quad 0x60c52eef2bb9a4e4 // 2^56 * 2 * G .quad 0x0b5d89bc3bfd8bf1 .quad 0xb06b9237c9f3551a .quad 0x0e4c16b0d53028f5 .quad 0x10bc9c312ccfcaab .quad 0x702bc5c27cae6d11 .quad 0x44c7699b54a48cab .quad 0xefbc4056ba492eb2 .quad 0x70d77248d9b6676d .quad 0xaa8ae84b3ec2a05b .quad 0x98699ef4ed1781e0 .quad 0x794513e4708e85d1 .quad 0x63755bd3a976f413 // 2^56 * 3 * G .quad 0xb55fa03e2ad10853 .quad 0x356f75909ee63569 .quad 0x9ff9f1fdbe69b890 .quad 0x0d8cc1c48bc16f84 .quad 0x3dc7101897f1acb7 .quad 0x5dda7d5ec165bbd8 .quad 0x508e5b9c0fa1020f .quad 0x2763751737c52a56 .quad 0x029402d36eb419a9 .quad 0xf0b44e7e77b460a5 .quad 0xcfa86230d43c4956 .quad 0x70c2dd8a7ad166e7 // 2^56 * 4 * G .quad 0x656194509f6fec0e .quad 0xee2e7ea946c6518d .quad 0x9733c1f367e09b5c .quad 0x2e0fac6363948495 .quad 0x91d4967db8ed7e13 .quad 0x74252f0ad776817a .quad 0xe40982e00d852564 .quad 0x32b8613816a53ce5 .quad 0x79e7f7bee448cd64 .quad 0x6ac83a67087886d0 .quad 0xf89fd4d9a0e4db2e .quad 0x4179215c735a4f41 // 2^56 * 5 * G .quad 0x8c7094e7d7dced2a .quad 0x97fb8ac347d39c70 .quad 0xe13be033a906d902 .quad 0x700344a30cd99d76 .quad 0xe4ae33b9286bcd34 .quad 0xb7ef7eb6559dd6dc .quad 0x278b141fb3d38e1f .quad 0x31fa85662241c286 .quad 0xaf826c422e3622f4 .quad 0xc12029879833502d .quad 0x9bc1b7e12b389123 .quad 0x24bb2312a9952489 // 2^56 * 6 * G .quad 0xb1a8ed1732de67c3 .quad 0x3cb49418461b4948 .quad 0x8ebd434376cfbcd2 .quad 0x0fee3e871e188008 .quad 0x41f80c2af5f85c6b .quad 0x687284c304fa6794 .quad 0x8945df99a3ba1bad .quad 0x0d1d2af9ffeb5d16 .quad 0xa9da8aa132621edf .quad 0x30b822a159226579 .quad 0x4004197ba79ac193 .quad 0x16acd79718531d76 // 2^56 * 7 * G .quad 0x72df72af2d9b1d3d .quad 0x63462a36a432245a .quad 0x3ecea07916b39637 .quad 0x123e0ef6b9302309 .quad 0xc959c6c57887b6ad .quad 0x94e19ead5f90feba .quad 0x16e24e62a342f504 .quad 0x164ed34b18161700 .quad 0x487ed94c192fe69a .quad 0x61ae2cea3a911513 .quad 0x877bf6d3b9a4de27 .quad 0x78da0fc61073f3eb // 2^56 * 8 * G .quad 0x5bf15d28e52bc66a .quad 0x2c47e31870f01a8e .quad 0x2419afbc06c28bdd .quad 0x2d25deeb256b173a .quad 0xa29f80f1680c3a94 .quad 0x71f77e151ae9e7e6 .quad 0x1100f15848017973 .quad 0x054aa4b316b38ddd .quad 0xdfc8468d19267cb8 .quad 0x0b28789c66e54daf .quad 0x2aeb1d2a666eec17 .quad 0x134610a6ab7da760 // 2^60 * 1 * G .quad 0xcaf55ec27c59b23f .quad 0x99aeed3e154d04f2 .quad 0x68441d72e14141f4 .quad 0x140345133932a0a2 .quad 0xd91430e0dc028c3c .quad 0x0eb955a85217c771 .quad 0x4b09e1ed2c99a1fa .quad 0x42881af2bd6a743c .quad 0x7bfec69aab5cad3d .quad 0xc23e8cd34cb2cfad .quad 0x685dd14bfb37d6a2 .quad 0x0ad6d64415677a18 // 2^60 * 2 * G .quad 0x781a439e417becb5 .quad 0x4ac5938cd10e0266 .quad 0x5da385110692ac24 .quad 0x11b065a2ade31233 .quad 0x7914892847927e9f .quad 0x33dad6ef370aa877 .quad 0x1f8f24fa11122703 .quad 0x5265ac2f2adf9592 .quad 0x405fdd309afcb346 .quad 0xd9723d4428e63f54 .quad 0x94c01df05f65aaae .quad 0x43e4dc3ae14c0809 // 2^60 * 3 * G .quad 0xbc12c7f1a938a517 .quad 0x473028ab3180b2e1 .quad 0x3f78571efbcd254a .quad 0x74e534426ff6f90f .quad 0xea6f7ac3adc2c6a3 .quad 0xd0e928f6e9717c94 .quad 0xe2d379ead645eaf5 .quad 0x46dd8785c51ffbbe .quad 0x709801be375c8898 .quad 0x4b06dab5e3fd8348 .quad 0x75880ced27230714 .quad 0x2b09468fdd2f4c42 // 2^60 * 4 * G .quad 0x97c749eeb701cb96 .quad 0x83f438d4b6a369c3 .quad 0x62962b8b9a402cd9 .quad 0x6976c7509888df7b .quad 0x5b97946582ffa02a .quad 0xda096a51fea8f549 .quad 0xa06351375f77af9b .quad 0x1bcfde61201d1e76 .quad 0x4a4a5490246a59a2 .quad 0xd63ebddee87fdd90 .quad 0xd9437c670d2371fa .quad 0x69e87308d30f8ed6 // 2^60 * 5 * G .quad 0x435a8bb15656beb0 .quad 0xf8fac9ba4f4d5bca .quad 0xb9b278c41548c075 .quad 0x3eb0ef76e892b622 .quad 0x0f80bf028bc80303 .quad 0x6aae16b37a18cefb .quad 0xdd47ea47d72cd6a3 .quad 0x61943588f4ed39aa .quad 0xd26e5c3e91039f85 .quad 0xc0e9e77df6f33aa9 .quad 0xe8968c5570066a93 .quad 0x3c34d1881faaaddd // 2^60 * 6 * G .quad 0x3f9d2b5ea09f9ec0 .quad 0x1dab3b6fb623a890 .quad 0xa09ba3ea72d926c4 .quad 0x374193513fd8b36d .quad 0xbd5b0b8f2fffe0d9 .quad 0x6aa254103ed24fb9 .quad 0x2ac7d7bcb26821c4 .quad 0x605b394b60dca36a .quad 0xb4e856e45a9d1ed2 .quad 0xefe848766c97a9a2 .quad 0xb104cf641e5eee7d .quad 0x2f50b81c88a71c8f // 2^60 * 7 * G .quad 0x31723c61fc6811bb .quad 0x9cb450486211800f .quad 0x768933d347995753 .quad 0x3491a53502752fcd .quad 0x2b552ca0a7da522a .quad 0x3230b336449b0250 .quad 0xf2c4c5bca4b99fb9 .quad 0x7b2c674958074a22 .quad 0xd55165883ed28cdf .quad 0x12d84fd2d362de39 .quad 0x0a874ad3e3378e4f .quad 0x000d2b1f7c763e74 // 2^60 * 8 * G .quad 0x3d420811d06d4a67 .quad 0xbefc048590e0ffe3 .quad 0xf870c6b7bd487bde .quad 0x6e2a7316319afa28 .quad 0x9624778c3e94a8ab .quad 0x0ad6f3cee9a78bec .quad 0x948ac7810d743c4f .quad 0x76627935aaecfccc .quad 0x56a8ac24d6d59a9f .quad 0xc8db753e3096f006 .quad 0x477f41e68f4c5299 .quad 0x588d851cf6c86114 // 2^64 * 1 * G .quad 0x51138ec78df6b0fe .quad 0x5397da89e575f51b .quad 0x09207a1d717af1b9 .quad 0x2102fdba2b20d650 .quad 0xcd2a65e777d1f515 .quad 0x548991878faa60f1 .quad 0xb1b73bbcdabc06e5 .quad 0x654878cba97cc9fb .quad 0x969ee405055ce6a1 .quad 0x36bca7681251ad29 .quad 0x3a1af517aa7da415 .quad 0x0ad725db29ecb2ba // 2^64 * 2 * G .quad 0xdc4267b1834e2457 .quad 0xb67544b570ce1bc5 .quad 0x1af07a0bf7d15ed7 .quad 0x4aefcffb71a03650 .quad 0xfec7bc0c9b056f85 .quad 0x537d5268e7f5ffd7 .quad 0x77afc6624312aefa .quad 0x4f675f5302399fd9 .quad 0xc32d36360415171e .quad 0xcd2bef118998483b .quad 0x870a6eadd0945110 .quad 0x0bccbb72a2a86561 // 2^64 * 3 * G .quad 0x185e962feab1a9c8 .quad 0x86e7e63565147dcd .quad 0xb092e031bb5b6df2 .quad 0x4024f0ab59d6b73e .quad 0x186d5e4c50fe1296 .quad 0xe0397b82fee89f7e .quad 0x3bc7f6c5507031b0 .quad 0x6678fd69108f37c2 .quad 0x1586fa31636863c2 .quad 0x07f68c48572d33f2 .quad 0x4f73cc9f789eaefc .quad 0x2d42e2108ead4701 // 2^64 * 4 * G .quad 0x97f5131594dfd29b .quad 0x6155985d313f4c6a .quad 0xeba13f0708455010 .quad 0x676b2608b8d2d322 .quad 0x21717b0d0f537593 .quad 0x914e690b131e064c .quad 0x1bb687ae752ae09f .quad 0x420bf3a79b423c6e .quad 0x8138ba651c5b2b47 .quad 0x8671b6ec311b1b80 .quad 0x7bff0cb1bc3135b0 .quad 0x745d2ffa9c0cf1e0 // 2^64 * 5 * G .quad 0xbf525a1e2bc9c8bd .quad 0xea5b260826479d81 .quad 0xd511c70edf0155db .quad 0x1ae23ceb960cf5d0 .quad 0x6036df5721d34e6a .quad 0xb1db8827997bb3d0 .quad 0xd3c209c3c8756afa .quad 0x06e15be54c1dc839 .quad 0x5b725d871932994a .quad 0x32351cb5ceb1dab0 .quad 0x7dc41549dab7ca05 .quad 0x58ded861278ec1f7 // 2^64 * 6 * G .quad 0xd8173793f266c55c .quad 0xc8c976c5cc454e49 .quad 0x5ce382f8bc26c3a8 .quad 0x2ff39de85485f6f9 .quad 0x2dfb5ba8b6c2c9a8 .quad 0x48eeef8ef52c598c .quad 0x33809107f12d1573 .quad 0x08ba696b531d5bd8 .quad 0x77ed3eeec3efc57a .quad 0x04e05517d4ff4811 .quad 0xea3d7a3ff1a671cb .quad 0x120633b4947cfe54 // 2^64 * 7 * G .quad 0x0b94987891610042 .quad 0x4ee7b13cecebfae8 .quad 0x70be739594f0a4c0 .quad 0x35d30a99b4d59185 .quad 0x82bd31474912100a .quad 0xde237b6d7e6fbe06 .quad 0xe11e761911ea79c6 .quad 0x07433be3cb393bde .quad 0xff7944c05ce997f4 .quad 0x575d3de4b05c51a3 .quad 0x583381fd5a76847c .quad 0x2d873ede7af6da9f // 2^64 * 8 * G .quad 0x157a316443373409 .quad 0xfab8b7eef4aa81d9 .quad 0xb093fee6f5a64806 .quad 0x2e773654707fa7b6 .quad 0xaa6202e14e5df981 .quad 0xa20d59175015e1f5 .quad 0x18a275d3bae21d6c .quad 0x0543618a01600253 .quad 0x0deabdf4974c23c1 .quad 0xaa6f0a259dce4693 .quad 0x04202cb8a29aba2c .quad 0x4b1443362d07960d // 2^68 * 1 * G .quad 0x47b837f753242cec .quad 0x256dc48cc04212f2 .quad 0xe222fbfbe1d928c5 .quad 0x48ea295bad8a2c07 .quad 0x299b1c3f57c5715e .quad 0x96cb929e6b686d90 .quad 0x3004806447235ab3 .quad 0x2c435c24a44d9fe1 .quad 0x0607c97c80f8833f .quad 0x0e851578ca25ec5b .quad 0x54f7450b161ebb6f .quad 0x7bcb4792a0def80e // 2^68 * 2 * G .quad 0x8487e3d02bc73659 .quad 0x4baf8445059979df .quad 0xd17c975adcad6fbf .quad 0x57369f0bdefc96b6 .quad 0x1cecd0a0045224c2 .quad 0x757f1b1b69e53952 .quad 0x775b7a925289f681 .quad 0x1b6cc62016736148 .quad 0xf1a9990175638698 .quad 0x353dd1beeeaa60d3 .quad 0x849471334c9ba488 .quad 0x63fa6e6843ade311 // 2^68 * 3 * G .quad 0xd15c20536597c168 .quad 0x9f73740098d28789 .quad 0x18aee7f13257ba1f .quad 0x3418bfda07346f14 .quad 0x2195becdd24b5eb7 .quad 0x5e41f18cc0cd44f9 .quad 0xdf28074441ca9ede .quad 0x07073b98f35b7d67 .quad 0xd03c676c4ce530d4 .quad 0x0b64c0473b5df9f4 .quad 0x065cef8b19b3a31e .quad 0x3084d661533102c9 // 2^68 * 4 * G .quad 0xe1f6b79ebf8469ad .quad 0x15801004e2663135 .quad 0x9a498330af74181b .quad 0x3ba2504f049b673c .quad 0x9a6ce876760321fd .quad 0x7fe2b5109eb63ad8 .quad 0x00e7d4ae8ac80592 .quad 0x73d86b7abb6f723a .quad 0x0b52b5606dba5ab6 .quad 0xa9134f0fbbb1edab .quad 0x30a9520d9b04a635 .quad 0x6813b8f37973e5db // 2^68 * 5 * G .quad 0x9854b054334127c1 .quad 0x105d047882fbff25 .quad 0xdb49f7f944186f4f .quad 0x1768e838bed0b900 .quad 0xf194ca56f3157e29 .quad 0x136d35705ef528a5 .quad 0xdd4cef778b0599bc .quad 0x7d5472af24f833ed .quad 0xd0ef874daf33da47 .quad 0x00d3be5db6e339f9 .quad 0x3f2a8a2f9c9ceece .quad 0x5d1aeb792352435a // 2^68 * 6 * G .quad 0xf59e6bb319cd63ca .quad 0x670c159221d06839 .quad 0xb06d565b2150cab6 .quad 0x20fb199d104f12a3 .quad 0x12c7bfaeb61ba775 .quad 0xb84e621fe263bffd .quad 0x0b47a5c35c840dcf .quad 0x7e83be0bccaf8634 .quad 0x61943dee6d99c120 .quad 0x86101f2e460b9fe0 .quad 0x6bb2f1518ee8598d .quad 0x76b76289fcc475cc // 2^68 * 7 * G .quad 0x791b4cc1756286fa .quad 0xdbced317d74a157c .quad 0x7e732421ea72bde6 .quad 0x01fe18491131c8e9 .quad 0x4245f1a1522ec0b3 .quad 0x558785b22a75656d .quad 0x1d485a2548a1b3c0 .quad 0x60959eccd58fe09f .quad 0x3ebfeb7ba8ed7a09 .quad 0x49fdc2bbe502789c .quad 0x44ebce5d3c119428 .quad 0x35e1eb55be947f4a // 2^68 * 8 * G .quad 0xdbdae701c5738dd3 .quad 0xf9c6f635b26f1bee .quad 0x61e96a8042f15ef4 .quad 0x3aa1d11faf60a4d8 .quad 0x14fd6dfa726ccc74 .quad 0x3b084cfe2f53b965 .quad 0xf33ae4f552a2c8b4 .quad 0x59aab07a0d40166a .quad 0x77bcec4c925eac25 .quad 0x1848718460137738 .quad 0x5b374337fea9f451 .quad 0x1865e78ec8e6aa46 // 2^72 * 1 * G .quad 0xccc4b7c7b66e1f7a .quad 0x44157e25f50c2f7e .quad 0x3ef06dfc713eaf1c .quad 0x582f446752da63f7 .quad 0x967c54e91c529ccb .quad 0x30f6269264c635fb .quad 0x2747aff478121965 .quad 0x17038418eaf66f5c .quad 0xc6317bd320324ce4 .quad 0xa81042e8a4488bc4 .quad 0xb21ef18b4e5a1364 .quad 0x0c2a1c4bcda28dc9 // 2^72 * 2 * G .quad 0xd24dc7d06f1f0447 .quad 0xb2269e3edb87c059 .quad 0xd15b0272fbb2d28f .quad 0x7c558bd1c6f64877 .quad 0xedc4814869bd6945 .quad 0x0d6d907dbe1c8d22 .quad 0xc63bd212d55cc5ab .quad 0x5a6a9b30a314dc83 .quad 0xd0ec1524d396463d .quad 0x12bb628ac35a24f0 .quad 0xa50c3a791cbc5fa4 .quad 0x0404a5ca0afbafc3 // 2^72 * 3 * G .quad 0x8c1f40070aa743d6 .quad 0xccbad0cb5b265ee8 .quad 0x574b046b668fd2de .quad 0x46395bfdcadd9633 .quad 0x62bc9e1b2a416fd1 .quad 0xb5c6f728e350598b .quad 0x04343fd83d5d6967 .quad 0x39527516e7f8ee98 .quad 0x117fdb2d1a5d9a9c .quad 0x9c7745bcd1005c2a .quad 0xefd4bef154d56fea .quad 0x76579a29e822d016 // 2^72 * 4 * G .quad 0x45b68e7e49c02a17 .quad 0x23cd51a2bca9a37f .quad 0x3ed65f11ec224c1b .quad 0x43a384dc9e05bdb1 .quad 0x333cb51352b434f2 .quad 0xd832284993de80e1 .quad 0xb5512887750d35ce .quad 0x02c514bb2a2777c1 .quad 0x684bd5da8bf1b645 .quad 0xfb8bd37ef6b54b53 .quad 0x313916d7a9b0d253 .quad 0x1160920961548059 // 2^72 * 5 * G .quad 0xb44d166929dacfaa .quad 0xda529f4c8413598f .quad 0xe9ef63ca453d5559 .quad 0x351e125bc5698e0b .quad 0x7a385616369b4dcd .quad 0x75c02ca7655c3563 .quad 0x7dc21bf9d4f18021 .quad 0x2f637d7491e6e042 .quad 0xd4b49b461af67bbe .quad 0xd603037ac8ab8961 .quad 0x71dee19ff9a699fb .quad 0x7f182d06e7ce2a9a // 2^72 * 6 * G .quad 0x7a7c8e64ab0168ec .quad 0xcb5a4a5515edc543 .quad 0x095519d347cd0eda .quad 0x67d4ac8c343e93b0 .quad 0x09454b728e217522 .quad 0xaa58e8f4d484b8d8 .quad 0xd358254d7f46903c .quad 0x44acc043241c5217 .quad 0x1c7d6bbb4f7a5777 .quad 0x8b35fed4918313e1 .quad 0x4adca1c6c96b4684 .quad 0x556d1c8312ad71bd // 2^72 * 7 * G .quad 0x17ef40e30c8d3982 .quad 0x31f7073e15a3fa34 .quad 0x4f21f3cb0773646e .quad 0x746c6c6d1d824eff .quad 0x81f06756b11be821 .quad 0x0faff82310a3f3dd .quad 0xf8b2d0556a99465d .quad 0x097abe38cc8c7f05 .quad 0x0c49c9877ea52da4 .quad 0x4c4369559bdc1d43 .quad 0x022c3809f7ccebd2 .quad 0x577e14a34bee84bd // 2^72 * 8 * G .quad 0xf0e268ac61a73b0a .quad 0xf2fafa103791a5f5 .quad 0xc1e13e826b6d00e9 .quad 0x60fa7ee96fd78f42 .quad 0x94fecebebd4dd72b .quad 0xf46a4fda060f2211 .quad 0x124a5977c0c8d1ff .quad 0x705304b8fb009295 .quad 0xb63d1d354d296ec6 .quad 0xf3c3053e5fad31d8 .quad 0x670b958cb4bd42ec .quad 0x21398e0ca16353fd // 2^76 * 1 * G .quad 0x216ab2ca8da7d2ef .quad 0x366ad9dd99f42827 .quad 0xae64b9004fdd3c75 .quad 0x403a395b53909e62 .quad 0x86c5fc16861b7e9a .quad 0xf6a330476a27c451 .quad 0x01667267a1e93597 .quad 0x05ffb9cd6082dfeb .quad 0xa617fa9ff53f6139 .quad 0x60f2b5e513e66cb6 .quad 0xd7a8beefb3448aa4 .quad 0x7a2932856f5ea192 // 2^76 * 2 * G .quad 0x0b39d761b02de888 .quad 0x5f550e7ed2414e1f .quad 0xa6bfa45822e1a940 .quad 0x050a2f7dfd447b99 .quad 0xb89c444879639302 .quad 0x4ae4f19350c67f2c .quad 0xf0b35da8c81af9c6 .quad 0x39d0003546871017 .quad 0x437c3b33a650db77 .quad 0x6bafe81dbac52bb2 .quad 0xfe99402d2db7d318 .quad 0x2b5b7eec372ba6ce // 2^76 * 3 * G .quad 0xb3bc4bbd83f50eef .quad 0x508f0c998c927866 .quad 0x43e76587c8b7e66e .quad 0x0f7655a3a47f98d9 .quad 0xa694404d613ac8f4 .quad 0x500c3c2bfa97e72c .quad 0x874104d21fcec210 .quad 0x1b205fb38604a8ee .quad 0x55ecad37d24b133c .quad 0x441e147d6038c90b .quad 0x656683a1d62c6fee .quad 0x0157d5dc87e0ecae // 2^76 * 4 * G .quad 0xf2a7af510354c13d .quad 0xd7a0b145aa372b60 .quad 0x2869b96a05a3d470 .quad 0x6528e42d82460173 .quad 0x95265514d71eb524 .quad 0xe603d8815df14593 .quad 0x147cdf410d4de6b7 .quad 0x5293b1730437c850 .quad 0x23d0e0814bccf226 .quad 0x92c745cd8196fb93 .quad 0x8b61796c59541e5b .quad 0x40a44df0c021f978 // 2^76 * 5 * G .quad 0xdaa869894f20ea6a .quad 0xea14a3d14c620618 .quad 0x6001fccb090bf8be .quad 0x35f4e822947e9cf0 .quad 0x86c96e514bc5d095 .quad 0xf20d4098fca6804a .quad 0x27363d89c826ea5d .quad 0x39ca36565719cacf .quad 0x97506f2f6f87b75c .quad 0xc624aea0034ae070 .quad 0x1ec856e3aad34dd6 .quad 0x055b0be0e440e58f // 2^76 * 6 * G .quad 0x6469a17d89735d12 .quad 0xdb6f27d5e662b9f1 .quad 0x9fcba3286a395681 .quad 0x363b8004d269af25 .quad 0x4d12a04b6ea33da2 .quad 0x57cf4c15e36126dd .quad 0x90ec9675ee44d967 .quad 0x64ca348d2a985aac .quad 0x99588e19e4c4912d .quad 0xefcc3b4e1ca5ce6b .quad 0x4522ea60fa5b98d5 .quad 0x7064bbab1de4a819 // 2^76 * 7 * G .quad 0xb919e1515a770641 .quad 0xa9a2e2c74e7f8039 .quad 0x7527250b3df23109 .quad 0x756a7330ac27b78b .quad 0xa290c06142542129 .quad 0xf2e2c2aebe8d5b90 .quad 0xcf2458db76abfe1b .quad 0x02157ade83d626bf .quad 0x3e46972a1b9a038b .quad 0x2e4ee66a7ee03fb4 .quad 0x81a248776edbb4ca .quad 0x1a944ee88ecd0563 // 2^76 * 8 * G .quad 0xd5a91d1151039372 .quad 0x2ed377b799ca26de .quad 0xa17202acfd366b6b .quad 0x0730291bd6901995 .quad 0xbb40a859182362d6 .quad 0xb99f55778a4d1abb .quad 0x8d18b427758559f6 .quad 0x26c20fe74d26235a .quad 0x648d1d9fe9cc22f5 .quad 0x66bc561928dd577c .quad 0x47d3ed21652439d1 .quad 0x49d271acedaf8b49 // 2^80 * 1 * G .quad 0x89f5058a382b33f3 .quad 0x5ae2ba0bad48c0b4 .quad 0x8f93b503a53db36e .quad 0x5aa3ed9d95a232e6 .quad 0x2798aaf9b4b75601 .quad 0x5eac72135c8dad72 .quad 0xd2ceaa6161b7a023 .quad 0x1bbfb284e98f7d4e .quad 0x656777e9c7d96561 .quad 0xcb2b125472c78036 .quad 0x65053299d9506eee .quad 0x4a07e14e5e8957cc // 2^80 * 2 * G .quad 0x4ee412cb980df999 .quad 0xa315d76f3c6ec771 .quad 0xbba5edde925c77fd .quad 0x3f0bac391d313402 .quad 0x240b58cdc477a49b .quad 0xfd38dade6447f017 .quad 0x19928d32a7c86aad .quad 0x50af7aed84afa081 .quad 0x6e4fde0115f65be5 .quad 0x29982621216109b2 .quad 0x780205810badd6d9 .quad 0x1921a316baebd006 // 2^80 * 3 * G .quad 0x89422f7edfb870fc .quad 0x2c296beb4f76b3bd .quad 0x0738f1d436c24df7 .quad 0x6458df41e273aeb0 .quad 0xd75aad9ad9f3c18b .quad 0x566a0eef60b1c19c .quad 0x3e9a0bac255c0ed9 .quad 0x7b049deca062c7f5 .quad 0xdccbe37a35444483 .quad 0x758879330fedbe93 .quad 0x786004c312c5dd87 .quad 0x6093dccbc2950e64 // 2^80 * 4 * G .quad 0x1ff39a8585e0706d .quad 0x36d0a5d8b3e73933 .quad 0x43b9f2e1718f453b .quad 0x57d1ea084827a97c .quad 0x6bdeeebe6084034b .quad 0x3199c2b6780fb854 .quad 0x973376abb62d0695 .quad 0x6e3180c98b647d90 .quad 0xee7ab6e7a128b071 .quad 0xa4c1596d93a88baa .quad 0xf7b4de82b2216130 .quad 0x363e999ddd97bd18 // 2^80 * 5 * G .quad 0x96a843c135ee1fc4 .quad 0x976eb35508e4c8cf .quad 0xb42f6801b58cd330 .quad 0x48ee9b78693a052b .quad 0x2f1848dce24baec6 .quad 0x769b7255babcaf60 .quad 0x90cb3c6e3cefe931 .quad 0x231f979bc6f9b355 .quad 0x5c31de4bcc2af3c6 .quad 0xb04bb030fe208d1f .quad 0xb78d7009c14fb466 .quad 0x079bfa9b08792413 // 2^80 * 6 * G .quad 0xe3903a51da300df4 .quad 0x843964233da95ab0 .quad 0xed3cf12d0b356480 .quad 0x038c77f684817194 .quad 0xf3c9ed80a2d54245 .quad 0x0aa08b7877f63952 .quad 0xd76dac63d1085475 .quad 0x1ef4fb159470636b .quad 0x854e5ee65b167bec .quad 0x59590a4296d0cdc2 .quad 0x72b2df3498102199 .quad 0x575ee92a4a0bff56 // 2^80 * 7 * G .quad 0xd4c080908a182fcf .quad 0x30e170c299489dbd .quad 0x05babd5752f733de .quad 0x43d4e7112cd3fd00 .quad 0x5d46bc450aa4d801 .quad 0xc3af1227a533b9d8 .quad 0x389e3b262b8906c2 .quad 0x200a1e7e382f581b .quad 0x518db967eaf93ac5 .quad 0x71bc989b056652c0 .quad 0xfe2b85d9567197f5 .quad 0x050eca52651e4e38 // 2^80 * 8 * G .quad 0xc3431ade453f0c9c .quad 0xe9f5045eff703b9b .quad 0xfcd97ac9ed847b3d .quad 0x4b0ee6c21c58f4c6 .quad 0x97ac397660e668ea .quad 0x9b19bbfe153ab497 .quad 0x4cb179b534eca79f .quad 0x6151c09fa131ae57 .quad 0x3af55c0dfdf05d96 .quad 0xdd262ee02ab4ee7a .quad 0x11b2bb8712171709 .quad 0x1fef24fa800f030b // 2^84 * 1 * G .quad 0xb496123a6b6c6609 .quad 0xa750fe8580ab5938 .quad 0xf471bf39b7c27a5f .quad 0x507903ce77ac193c .quad 0xff91a66a90166220 .quad 0xf22552ae5bf1e009 .quad 0x7dff85d87f90df7c .quad 0x4f620ffe0c736fb9 .quad 0x62f90d65dfde3e34 .quad 0xcf28c592b9fa5fad .quad 0x99c86ef9c6164510 .quad 0x25d448044a256c84 // 2^84 * 2 * G .quad 0xbd68230ec7e9b16f .quad 0x0eb1b9c1c1c5795d .quad 0x7943c8c495b6b1ff .quad 0x2f9faf620bbacf5e .quad 0x2c7c4415c9022b55 .quad 0x56a0d241812eb1fe .quad 0xf02ea1c9d7b65e0d .quad 0x4180512fd5323b26 .quad 0xa4ff3e698a48a5db .quad 0xba6a3806bd95403b .quad 0x9f7ce1af47d5b65d .quad 0x15e087e55939d2fb // 2^84 * 3 * G .quad 0x12207543745c1496 .quad 0xdaff3cfdda38610c .quad 0xe4e797272c71c34f .quad 0x39c07b1934bdede9 .quad 0x8894186efb963f38 .quad 0x48a00e80dc639bd5 .quad 0xa4e8092be96c1c99 .quad 0x5a097d54ca573661 .quad 0x2d45892b17c9e755 .quad 0xd033fd7289308df8 .quad 0x6c2fe9d9525b8bd9 .quad 0x2edbecf1c11cc079 // 2^84 * 4 * G .quad 0x1616a4e3c715a0d2 .quad 0x53623cb0f8341d4d .quad 0x96ef5329c7e899cb .quad 0x3d4e8dbba668baa6 .quad 0xee0f0fddd087a25f .quad 0x9c7531555c3e34ee .quad 0x660c572e8fab3ab5 .quad 0x0854fc44544cd3b2 .quad 0x61eba0c555edad19 .quad 0x24b533fef0a83de6 .quad 0x3b77042883baa5f8 .quad 0x678f82b898a47e8d // 2^84 * 5 * G .quad 0xb1491d0bd6900c54 .quad 0x3539722c9d132636 .quad 0x4db928920b362bc9 .quad 0x4d7cd1fea68b69df .quad 0x1e09d94057775696 .quad 0xeed1265c3cd951db .quad 0xfa9dac2b20bce16f .quad 0x0f7f76e0e8d089f4 .quad 0x36d9ebc5d485b00c .quad 0xa2596492e4adb365 .quad 0xc1659480c2119ccd .quad 0x45306349186e0d5f // 2^84 * 6 * G .quad 0x94ddd0c1a6cdff1d .quad 0x55f6f115e84213ae .quad 0x6c935f85992fcf6a .quad 0x067ee0f54a37f16f .quad 0x96a414ec2b072491 .quad 0x1bb2218127a7b65b .quad 0x6d2849596e8a4af0 .quad 0x65f3b08ccd27765f .quad 0xecb29fff199801f7 .quad 0x9d361d1fa2a0f72f .quad 0x25f11d2375fd2f49 .quad 0x124cefe80fe10fe2 // 2^84 * 7 * G .quad 0x4c126cf9d18df255 .quad 0xc1d471e9147a63b6 .quad 0x2c6d3c73f3c93b5f .quad 0x6be3a6a2e3ff86a2 .quad 0x1518e85b31b16489 .quad 0x8faadcb7db710bfb .quad 0x39b0bdf4a14ae239 .quad 0x05f4cbea503d20c1 .quad 0xce040e9ec04145bc .quad 0xc71ff4e208f6834c .quad 0xbd546e8dab8847a3 .quad 0x64666aa0a4d2aba5 // 2^84 * 8 * G .quad 0x6841435a7c06d912 .quad 0xca123c21bb3f830b .quad 0xd4b37b27b1cbe278 .quad 0x1d753b84c76f5046 .quad 0xb0c53bf73337e94c .quad 0x7cb5697e11e14f15 .quad 0x4b84abac1930c750 .quad 0x28dd4abfe0640468 .quad 0x7dc0b64c44cb9f44 .quad 0x18a3e1ace3925dbf .quad 0x7a3034862d0457c4 .quad 0x4c498bf78a0c892e // 2^88 * 1 * G .quad 0x37d653fb1aa73196 .quad 0x0f9495303fd76418 .quad 0xad200b09fb3a17b2 .quad 0x544d49292fc8613e .quad 0x22d2aff530976b86 .quad 0x8d90b806c2d24604 .quad 0xdca1896c4de5bae5 .quad 0x28005fe6c8340c17 .quad 0x6aefba9f34528688 .quad 0x5c1bff9425107da1 .quad 0xf75bbbcd66d94b36 .quad 0x72e472930f316dfa // 2^88 * 2 * G .quad 0x2695208c9781084f .quad 0xb1502a0b23450ee1 .quad 0xfd9daea603efde02 .quad 0x5a9d2e8c2733a34c .quad 0x07f3f635d32a7627 .quad 0x7aaa4d865f6566f0 .quad 0x3c85e79728d04450 .quad 0x1fee7f000fe06438 .quad 0x765305da03dbf7e5 .quad 0xa4daf2491434cdbd .quad 0x7b4ad5cdd24a88ec .quad 0x00f94051ee040543 // 2^88 * 3 * G .quad 0x8d356b23c3d330b2 .quad 0xf21c8b9bb0471b06 .quad 0xb36c316c6e42b83c .quad 0x07d79c7e8beab10d .quad 0xd7ef93bb07af9753 .quad 0x583ed0cf3db766a7 .quad 0xce6998bf6e0b1ec5 .quad 0x47b7ffd25dd40452 .quad 0x87fbfb9cbc08dd12 .quad 0x8a066b3ae1eec29b .quad 0x0d57242bdb1fc1bf .quad 0x1c3520a35ea64bb6 // 2^88 * 4 * G .quad 0x80d253a6bccba34a .quad 0x3e61c3a13838219b .quad 0x90c3b6019882e396 .quad 0x1c3d05775d0ee66f .quad 0xcda86f40216bc059 .quad 0x1fbb231d12bcd87e .quad 0xb4956a9e17c70990 .quad 0x38750c3b66d12e55 .quad 0x692ef1409422e51a .quad 0xcbc0c73c2b5df671 .quad 0x21014fe7744ce029 .quad 0x0621e2c7d330487c // 2^88 * 5 * G .quad 0xaf9860cc8259838d .quad 0x90ea48c1c69f9adc .quad 0x6526483765581e30 .quad 0x0007d6097bd3a5bc .quad 0xb7ae1796b0dbf0f3 .quad 0x54dfafb9e17ce196 .quad 0x25923071e9aaa3b4 .quad 0x5d8e589ca1002e9d .quad 0xc0bf1d950842a94b .quad 0xb2d3c363588f2e3e .quad 0x0a961438bb51e2ef .quad 0x1583d7783c1cbf86 // 2^88 * 6 * G .quad 0xeceea2ef5da27ae1 .quad 0x597c3a1455670174 .quad 0xc9a62a126609167a .quad 0x252a5f2e81ed8f70 .quad 0x90034704cc9d28c7 .quad 0x1d1b679ef72cc58f .quad 0x16e12b5fbe5b8726 .quad 0x4958064e83c5580a .quad 0x0d2894265066e80d .quad 0xfcc3f785307c8c6b .quad 0x1b53da780c1112fd .quad 0x079c170bd843b388 // 2^88 * 7 * G .quad 0x0506ece464fa6fff .quad 0xbee3431e6205e523 .quad 0x3579422451b8ea42 .quad 0x6dec05e34ac9fb00 .quad 0xcdd6cd50c0d5d056 .quad 0x9af7686dbb03573b .quad 0x3ca6723ff3c3ef48 .quad 0x6768c0d7317b8acc .quad 0x94b625e5f155c1b3 .quad 0x417bf3a7997b7b91 .quad 0xc22cbddc6d6b2600 .quad 0x51445e14ddcd52f4 // 2^88 * 8 * G .quad 0x57502b4b3b144951 .quad 0x8e67ff6b444bbcb3 .quad 0xb8bd6927166385db .quad 0x13186f31e39295c8 .quad 0x893147ab2bbea455 .quad 0x8c53a24f92079129 .quad 0x4b49f948be30f7a7 .quad 0x12e990086e4fd43d .quad 0xf10c96b37fdfbb2e .quad 0x9f9a935e121ceaf9 .quad 0xdf1136c43a5b983f .quad 0x77b2e3f05d3e99af // 2^92 * 1 * G .quad 0xfd0d75879cf12657 .quad 0xe82fef94e53a0e29 .quad 0xcc34a7f05bbb4be7 .quad 0x0b251172a50c38a2 .quad 0x9532f48fcc5cd29b .quad 0x2ba851bea3ce3671 .quad 0x32dacaa051122941 .quad 0x478d99d9350004f2 .quad 0x1d5ad94890bb02c0 .quad 0x50e208b10ec25115 .quad 0xa26a22894ef21702 .quad 0x4dc923343b524805 // 2^92 * 2 * G .quad 0xe3828c400f8086b6 .quad 0x3f77e6f7979f0dc8 .quad 0x7ef6de304df42cb4 .quad 0x5265797cb6abd784 .quad 0x3ad3e3ebf36c4975 .quad 0xd75d25a537862125 .quad 0xe873943da025a516 .quad 0x6bbc7cb4c411c847 .quad 0x3c6f9cd1d4a50d56 .quad 0xb6244077c6feab7e .quad 0x6ff9bf483580972e .quad 0x00375883b332acfb // 2^92 * 3 * G .quad 0x0001b2cd28cb0940 .quad 0x63fb51a06f1c24c9 .quad 0xb5ad8691dcd5ca31 .quad 0x67238dbd8c450660 .quad 0xc98bec856c75c99c .quad 0xe44184c000e33cf4 .quad 0x0a676b9bba907634 .quad 0x669e2cb571f379d7 .quad 0xcb116b73a49bd308 .quad 0x025aad6b2392729e .quad 0xb4793efa3f55d9b1 .quad 0x72a1056140678bb9 // 2^92 * 4 * G .quad 0xa2b6812b1cc9249d .quad 0x62866eee21211f58 .quad 0x2cb5c5b85df10ece .quad 0x03a6b259e263ae00 .quad 0x0d8d2909e2e505b6 .quad 0x98ca78abc0291230 .quad 0x77ef5569a9b12327 .quad 0x7c77897b81439b47 .quad 0xf1c1b5e2de331cb5 .quad 0x5a9f5d8e15fca420 .quad 0x9fa438f17bd932b1 .quad 0x2a381bf01c6146e7 // 2^92 * 5 * G .quad 0xac9b9879cfc811c1 .quad 0x8b7d29813756e567 .quad 0x50da4e607c70edfc .quad 0x5dbca62f884400b6 .quad 0xf7c0be32b534166f .quad 0x27e6ca6419cf70d4 .quad 0x934df7d7a957a759 .quad 0x5701461dabdec2aa .quad 0x2c6747402c915c25 .quad 0x1bdcd1a80b0d340a .quad 0x5e5601bd07b43f5f .quad 0x2555b4e05539a242 // 2^92 * 6 * G .quad 0x6fc09f5266ddd216 .quad 0xdce560a7c8e37048 .quad 0xec65939da2df62fd .quad 0x7a869ae7e52ed192 .quad 0x78409b1d87e463d4 .quad 0xad4da95acdfb639d .quad 0xec28773755259b9c .quad 0x69c806e9c31230ab .quad 0x7b48f57414bb3f22 .quad 0x68c7cee4aedccc88 .quad 0xed2f936179ed80be .quad 0x25d70b885f77bc4b // 2^92 * 7 * G .quad 0x4151c3d9762bf4de .quad 0x083f435f2745d82b .quad 0x29775a2e0d23ddd5 .quad 0x138e3a6269a5db24 .quad 0x98459d29bb1ae4d4 .quad 0x56b9c4c739f954ec .quad 0x832743f6c29b4b3e .quad 0x21ea8e2798b6878a .quad 0x87bef4b46a5a7b9c .quad 0xd2299d1b5fc1d062 .quad 0x82409818dd321648 .quad 0x5c5abeb1e5a2e03d // 2^92 * 8 * G .quad 0x14722af4b73c2ddb .quad 0xbc470c5f5a05060d .quad 0x00943eac2581b02e .quad 0x0e434b3b1f499c8f .quad 0x02cde6de1306a233 .quad 0x7b5a52a2116f8ec7 .quad 0xe1c681f4c1163b5b .quad 0x241d350660d32643 .quad 0x6be4404d0ebc52c7 .quad 0xae46233bb1a791f5 .quad 0x2aec170ed25db42b .quad 0x1d8dfd966645d694 // 2^96 * 1 * G .quad 0x296fa9c59c2ec4de .quad 0xbc8b61bf4f84f3cb .quad 0x1c7706d917a8f908 .quad 0x63b795fc7ad3255d .quad 0xd598639c12ddb0a4 .quad 0xa5d19f30c024866b .quad 0xd17c2f0358fce460 .quad 0x07a195152e095e8a .quad 0xa8368f02389e5fc8 .quad 0x90433b02cf8de43b .quad 0xafa1fd5dc5412643 .quad 0x3e8fe83d032f0137 // 2^96 * 2 * G .quad 0x2f8b15b90570a294 .quad 0x94f2427067084549 .quad 0xde1c5ae161bbfd84 .quad 0x75ba3b797fac4007 .quad 0x08704c8de8efd13c .quad 0xdfc51a8e33e03731 .quad 0xa59d5da51260cde3 .quad 0x22d60899a6258c86 .quad 0x6239dbc070cdd196 .quad 0x60fe8a8b6c7d8a9a .quad 0xb38847bceb401260 .quad 0x0904d07b87779e5e // 2^96 * 3 * G .quad 0xb4ce1fd4ddba919c .quad 0xcf31db3ec74c8daa .quad 0x2c63cc63ad86cc51 .quad 0x43e2143fbc1dde07 .quad 0xf4322d6648f940b9 .quad 0x06952f0cbd2d0c39 .quad 0x167697ada081f931 .quad 0x6240aacebaf72a6c .quad 0xf834749c5ba295a0 .quad 0xd6947c5bca37d25a .quad 0x66f13ba7e7c9316a .quad 0x56bdaf238db40cac // 2^96 * 4 * G .quad 0x362ab9e3f53533eb .quad 0x338568d56eb93d40 .quad 0x9e0e14521d5a5572 .quad 0x1d24a86d83741318 .quad 0x1310d36cc19d3bb2 .quad 0x062a6bb7622386b9 .quad 0x7c9b8591d7a14f5c .quad 0x03aa31507e1e5754 .quad 0xf4ec7648ffd4ce1f .quad 0xe045eaf054ac8c1c .quad 0x88d225821d09357c .quad 0x43b261dc9aeb4859 // 2^96 * 5 * G .quad 0xe55b1e1988bb79bb .quad 0xa09ed07dc17a359d .quad 0xb02c2ee2603dea33 .quad 0x326055cf5b276bc2 .quad 0x19513d8b6c951364 .quad 0x94fe7126000bf47b .quad 0x028d10ddd54f9567 .quad 0x02b4d5e242940964 .quad 0xb4a155cb28d18df2 .quad 0xeacc4646186ce508 .quad 0xc49cf4936c824389 .quad 0x27a6c809ae5d3410 // 2^96 * 6 * G .quad 0x8ba6ebcd1f0db188 .quad 0x37d3d73a675a5be8 .quad 0xf22edfa315f5585a .quad 0x2cb67174ff60a17e .quad 0xcd2c270ac43d6954 .quad 0xdd4a3e576a66cab2 .quad 0x79fa592469d7036c .quad 0x221503603d8c2599 .quad 0x59eecdf9390be1d0 .quad 0xa9422044728ce3f1 .quad 0x82891c667a94f0f4 .quad 0x7b1df4b73890f436 // 2^96 * 7 * G .quad 0xe492f2e0b3b2a224 .quad 0x7c6c9e062b551160 .quad 0x15eb8fe20d7f7b0e .quad 0x61fcef2658fc5992 .quad 0x5f2e221807f8f58c .quad 0xe3555c9fd49409d4 .quad 0xb2aaa88d1fb6a630 .quad 0x68698245d352e03d .quad 0xdbb15d852a18187a .quad 0xf3e4aad386ddacd7 .quad 0x44bae2810ff6c482 .quad 0x46cf4c473daf01cf // 2^96 * 8 * G .quad 0x426525ed9ec4e5f9 .quad 0x0e5eda0116903303 .quad 0x72b1a7f2cbe5cadc .quad 0x29387bcd14eb5f40 .quad 0x213c6ea7f1498140 .quad 0x7c1e7ef8392b4854 .quad 0x2488c38c5629ceba .quad 0x1065aae50d8cc5bb .quad 0x1c2c4525df200d57 .quad 0x5c3b2dd6bfca674a .quad 0x0a07e7b1e1834030 .quad 0x69a198e64f1ce716 // 2^100 * 1 * G .quad 0x7afcd613efa9d697 .quad 0x0cc45aa41c067959 .quad 0xa56fe104c1fada96 .quad 0x3a73b70472e40365 .quad 0x7b26e56b9e2d4734 .quad 0xc4c7132b81c61675 .quad 0xef5c9525ec9cde7f .quad 0x39c80b16e71743ad .quad 0x0f196e0d1b826c68 .quad 0xf71ff0e24960e3db .quad 0x6113167023b7436c .quad 0x0cf0ea5877da7282 // 2^100 * 2 * G .quad 0x196c80a4ddd4ccbd .quad 0x22e6f55d95f2dd9d .quad 0xc75e33c740d6c71b .quad 0x7bb51279cb3c042f .quad 0xe332ced43ba6945a .quad 0xde0b1361e881c05d .quad 0x1ad40f095e67ed3b .quad 0x5da8acdab8c63d5d .quad 0xc4b6664a3a70159f .quad 0x76194f0f0a904e14 .quad 0xa5614c39a4096c13 .quad 0x6cd0ff50979feced // 2^100 * 3 * G .quad 0xc0e067e78f4428ac .quad 0x14835ab0a61135e3 .quad 0xf21d14f338062935 .quad 0x6390a4c8df04849c .quad 0x7fecfabdb04ba18e .quad 0xd0fc7bfc3bddbcf7 .quad 0xa41d486e057a131c .quad 0x641a4391f2223a61 .quad 0xc5c6b95aa606a8db .quad 0x914b7f9eb06825f1 .quad 0x2a731f6b44fc9eff .quad 0x30ddf38562705cfc // 2^100 * 4 * G .quad 0x4e3dcbdad1bff7f9 .quad 0xc9118e8220645717 .quad 0xbacccebc0f189d56 .quad 0x1b4822e9d4467668 .quad 0x33bef2bd68bcd52c .quad 0xc649dbb069482ef2 .quad 0xb5b6ee0c41cb1aee .quad 0x5c294d270212a7e5 .quad 0xab360a7f25563781 .quad 0x2512228a480f7958 .quad 0xc75d05276114b4e3 .quad 0x222d9625d976fe2a // 2^100 * 5 * G .quad 0x1c717f85b372ace1 .quad 0x81930e694638bf18 .quad 0x239cad056bc08b58 .quad 0x0b34271c87f8fff4 .quad 0x0f94be7e0a344f85 .quad 0xeb2faa8c87f22c38 .quad 0x9ce1e75e4ee16f0f .quad 0x43e64e5418a08dea .quad 0x8155e2521a35ce63 .quad 0xbe100d4df912028e .quad 0xbff80bf8a57ddcec .quad 0x57342dc96d6bc6e4 // 2^100 * 6 * G .quad 0xefeef065c8ce5998 .quad 0xbf029510b5cbeaa2 .quad 0x8c64a10620b7c458 .quad 0x35134fb231c24855 .quad 0xf3c3bcb71e707bf6 .quad 0x351d9b8c7291a762 .quad 0x00502e6edad69a33 .quad 0x522f521f1ec8807f .quad 0x272c1f46f9a3902b .quad 0xc91ba3b799657bcc .quad 0xae614b304f8a1c0e .quad 0x7afcaad70b99017b // 2^100 * 7 * G .quad 0xc25ded54a4b8be41 .quad 0x902d13e11bb0e2dd .quad 0x41f43233cde82ab2 .quad 0x1085faa5c3aae7cb .quad 0xa88141ecef842b6b .quad 0x55e7b14797abe6c5 .quad 0x8c748f9703784ffe .quad 0x5b50a1f7afcd00b7 .quad 0x9b840f66f1361315 .quad 0x18462242701003e9 .quad 0x65ed45fae4a25080 .quad 0x0a2862393fda7320 // 2^100 * 8 * G .quad 0x46ab13c8347cbc9d .quad 0x3849e8d499c12383 .quad 0x4cea314087d64ac9 .quad 0x1f354134b1a29ee7 .quad 0x960e737b6ecb9d17 .quad 0xfaf24948d67ceae1 .quad 0x37e7a9b4d55e1b89 .quad 0x5cb7173cb46c59eb .quad 0x4a89e68b82b7abf0 .quad 0xf41cd9279ba6b7b9 .quad 0x16e6c210e18d876f .quad 0x7cacdb0f7f1b09c6 // 2^104 * 1 * G .quad 0x9062b2e0d91a78bc .quad 0x47c9889cc8509667 .quad 0x9df54a66405070b8 .quad 0x7369e6a92493a1bf .quad 0xe1014434dcc5caed .quad 0x47ed5d963c84fb33 .quad 0x70019576ed86a0e7 .quad 0x25b2697bd267f9e4 .quad 0x9d673ffb13986864 .quad 0x3ca5fbd9415dc7b8 .quad 0xe04ecc3bdf273b5e .quad 0x1420683db54e4cd2 // 2^104 * 2 * G .quad 0xb478bd1e249dd197 .quad 0x620c35005e58c102 .quad 0xfb02d32fccbaac5c .quad 0x60b63bebf508a72d .quad 0x34eebb6fc1cc5ad0 .quad 0x6a1b0ce99646ac8b .quad 0xd3b0da49a66bde53 .quad 0x31e83b4161d081c1 .quad 0x97e8c7129e062b4f .quad 0x49e48f4f29320ad8 .quad 0x5bece14b6f18683f .quad 0x55cf1eb62d550317 // 2^104 * 3 * G .quad 0x5879101065c23d58 .quad 0x8b9d086d5094819c .quad 0xe2402fa912c55fa7 .quad 0x669a6564570891d4 .quad 0x3076b5e37df58c52 .quad 0xd73ab9dde799cc36 .quad 0xbd831ce34913ee20 .quad 0x1a56fbaa62ba0133 .quad 0x943e6b505c9dc9ec .quad 0x302557bba77c371a .quad 0x9873ae5641347651 .quad 0x13c4836799c58a5c // 2^104 * 4 * G .quad 0x423a5d465ab3e1b9 .quad 0xfc13c187c7f13f61 .quad 0x19f83664ecb5b9b6 .quad 0x66f80c93a637b607 .quad 0xc4dcfb6a5d8bd080 .quad 0xdeebc4ec571a4842 .quad 0xd4b2e883b8e55365 .quad 0x50bdc87dc8e5b827 .quad 0x606d37836edfe111 .quad 0x32353e15f011abd9 .quad 0x64b03ac325b73b96 .quad 0x1dd56444725fd5ae // 2^104 * 5 * G .quad 0x8fa47ff83362127d .quad 0xbc9f6ac471cd7c15 .quad 0x6e71454349220c8b .quad 0x0e645912219f732e .quad 0xc297e60008bac89a .quad 0x7d4cea11eae1c3e0 .quad 0xf3e38be19fe7977c .quad 0x3a3a450f63a305cd .quad 0x078f2f31d8394627 .quad 0x389d3183de94a510 .quad 0xd1e36c6d17996f80 .quad 0x318c8d9393a9a87b // 2^104 * 6 * G .quad 0xf2745d032afffe19 .quad 0x0c9f3c497f24db66 .quad 0xbc98d3e3ba8598ef .quad 0x224c7c679a1d5314 .quad 0x5d669e29ab1dd398 .quad 0xfc921658342d9e3b .quad 0x55851dfdf35973cd .quad 0x509a41c325950af6 .quad 0xbdc06edca6f925e9 .quad 0x793ef3f4641b1f33 .quad 0x82ec12809d833e89 .quad 0x05bff02328a11389 // 2^104 * 7 * G .quad 0x3632137023cae00b .quad 0x544acf0ad1accf59 .quad 0x96741049d21a1c88 .quad 0x780b8cc3fa2a44a7 .quad 0x6881a0dd0dc512e4 .quad 0x4fe70dc844a5fafe .quad 0x1f748e6b8f4a5240 .quad 0x576277cdee01a3ea .quad 0x1ef38abc234f305f .quad 0x9a577fbd1405de08 .quad 0x5e82a51434e62a0d .quad 0x5ff418726271b7a1 // 2^104 * 8 * G .quad 0x398e080c1789db9d .quad 0xa7602025f3e778f5 .quad 0xfa98894c06bd035d .quad 0x106a03dc25a966be .quad 0xe5db47e813b69540 .quad 0xf35d2a3b432610e1 .quad 0xac1f26e938781276 .quad 0x29d4db8ca0a0cb69 .quad 0xd9ad0aaf333353d0 .quad 0x38669da5acd309e5 .quad 0x3c57658ac888f7f0 .quad 0x4ab38a51052cbefa // 2^108 * 1 * G .quad 0xdfdacbee4324c0e9 .quad 0x054442883f955bb7 .quad 0xdef7aaa8ea31609f .quad 0x68aee70642287cff .quad 0xf68fe2e8809de054 .quad 0xe3bc096a9c82bad1 .quad 0x076353d40aadbf45 .quad 0x7b9b1fb5dea1959e .quad 0xf01cc8f17471cc0c .quad 0x95242e37579082bb .quad 0x27776093d3e46b5f .quad 0x2d13d55a28bd85fb // 2^108 * 2 * G .quad 0xfac5d2065b35b8da .quad 0xa8da8a9a85624bb7 .quad 0xccd2ca913d21cd0f .quad 0x6b8341ee8bf90d58 .quad 0xbf019cce7aee7a52 .quad 0xa8ded2b6e454ead3 .quad 0x3c619f0b87a8bb19 .quad 0x3619b5d7560916d8 .quad 0x3579f26b0282c4b2 .quad 0x64d592f24fafefae .quad 0xb7cded7b28c8c7c0 .quad 0x6a927b6b7173a8d7 // 2^108 * 3 * G .quad 0x1f6db24f986e4656 .quad 0x1021c02ed1e9105b .quad 0xf8ff3fff2cc0a375 .quad 0x1d2a6bf8c6c82592 .quad 0x8d7040863ece88eb .quad 0xf0e307a980eec08c .quad 0xac2250610d788fda .quad 0x056d92a43a0d478d .quad 0x1b05a196fc3da5a1 .quad 0x77d7a8c243b59ed0 .quad 0x06da3d6297d17918 .quad 0x66fbb494f12353f7 // 2^108 * 4 * G .quad 0x751a50b9d85c0fb8 .quad 0xd1afdc258bcf097b .quad 0x2f16a6a38309a969 .quad 0x14ddff9ee5b00659 .quad 0xd6d70996f12309d6 .quad 0xdbfb2385e9c3d539 .quad 0x46d602b0f7552411 .quad 0x270a0b0557843e0c .quad 0x61ff0640a7862bcc .quad 0x81cac09a5f11abfe .quad 0x9047830455d12abb .quad 0x19a4bde1945ae873 // 2^108 * 5 * G .quad 0x9b9f26f520a6200a .quad 0x64804443cf13eaf8 .quad 0x8a63673f8631edd3 .quad 0x72bbbce11ed39dc1 .quad 0x40c709dec076c49f .quad 0x657bfaf27f3e53f6 .quad 0x40662331eca042c4 .quad 0x14b375487eb4df04 .quad 0xae853c94ab66dc47 .quad 0xeb62343edf762d6e .quad 0xf08e0e186fb2f7d1 .quad 0x4f0b1c02700ab37a // 2^108 * 6 * G .quad 0xe1706787d81951fa .quad 0xa10a2c8eb290c77b .quad 0xe7382fa03ed66773 .quad 0x0a4d84710bcc4b54 .quad 0x79fd21ccc1b2e23f .quad 0x4ae7c281453df52a .quad 0xc8172ec9d151486b .quad 0x68abe9443e0a7534 .quad 0xda12c6c407831dcb .quad 0x0da230d74d5c510d .quad 0x4ab1531e6bd404e1 .quad 0x4106b166bcf440ef // 2^108 * 7 * G .quad 0x02e57a421cd23668 .quad 0x4ad9fb5d0eaef6fd .quad 0x954e6727b1244480 .quad 0x7f792f9d2699f331 .quad 0xa485ccd539e4ecf2 .quad 0x5aa3f3ad0555bab5 .quad 0x145e3439937df82d .quad 0x1238b51e1214283f .quad 0x0b886b925fd4d924 .quad 0x60906f7a3626a80d .quad 0xecd367b4b98abd12 .quad 0x2876beb1def344cf // 2^108 * 8 * G .quad 0xdc84e93563144691 .quad 0x632fe8a0d61f23f4 .quad 0x4caa800612a9a8d5 .quad 0x48f9dbfa0e9918d3 .quad 0xd594b3333a8a85f8 .quad 0x4ea37689e78d7d58 .quad 0x73bf9f455e8e351f .quad 0x5507d7d2bc41ebb4 .quad 0x1ceb2903299572fc .quad 0x7c8ccaa29502d0ee .quad 0x91bfa43411cce67b .quad 0x5784481964a831e7 // 2^112 * 1 * G .quad 0xda7c2b256768d593 .quad 0x98c1c0574422ca13 .quad 0xf1a80bd5ca0ace1d .quad 0x29cdd1adc088a690 .quad 0xd6cfd1ef5fddc09c .quad 0xe82b3efdf7575dce .quad 0x25d56b5d201634c2 .quad 0x3041c6bb04ed2b9b .quad 0x0ff2f2f9d956e148 .quad 0xade797759f356b2e .quad 0x1a4698bb5f6c025c .quad 0x104bbd6814049a7b // 2^112 * 2 * G .quad 0x51f0fd3168f1ed67 .quad 0x2c811dcdd86f3bc2 .quad 0x44dc5c4304d2f2de .quad 0x5be8cc57092a7149 .quad 0xa95d9a5fd67ff163 .quad 0xe92be69d4cc75681 .quad 0xb7f8024cde20f257 .quad 0x204f2a20fb072df5 .quad 0xc8143b3d30ebb079 .quad 0x7589155abd652e30 .quad 0x653c3c318f6d5c31 .quad 0x2570fb17c279161f // 2^112 * 3 * G .quad 0x3efa367f2cb61575 .quad 0xf5f96f761cd6026c .quad 0xe8c7142a65b52562 .quad 0x3dcb65ea53030acd .quad 0x192ea9550bb8245a .quad 0xc8e6fba88f9050d1 .quad 0x7986ea2d88a4c935 .quad 0x241c5f91de018668 .quad 0x28d8172940de6caa .quad 0x8fbf2cf022d9733a .quad 0x16d7fcdd235b01d1 .quad 0x08420edd5fcdf0e5 // 2^112 * 4 * G .quad 0xcdff20ab8362fa4a .quad 0x57e118d4e21a3e6e .quad 0xe3179617fc39e62b .quad 0x0d9a53efbc1769fd .quad 0x0358c34e04f410ce .quad 0xb6135b5a276e0685 .quad 0x5d9670c7ebb91521 .quad 0x04d654f321db889c .quad 0x5e7dc116ddbdb5d5 .quad 0x2954deb68da5dd2d .quad 0x1cb608173334a292 .quad 0x4a7a4f2618991ad7 // 2^112 * 5 * G .quad 0xf4a718025fb15f95 .quad 0x3df65f346b5c1b8f .quad 0xcdfcf08500e01112 .quad 0x11b50c4cddd31848 .quad 0x24c3b291af372a4b .quad 0x93da8270718147f2 .quad 0xdd84856486899ef2 .quad 0x4a96314223e0ee33 .quad 0xa6e8274408a4ffd6 .quad 0x738e177e9c1576d9 .quad 0x773348b63d02b3f2 .quad 0x4f4bce4dce6bcc51 // 2^112 * 6 * G .quad 0xa71fce5ae2242584 .quad 0x26ea725692f58a9e .quad 0xd21a09d71cea3cf4 .quad 0x73fcdd14b71c01e6 .quad 0x30e2616ec49d0b6f .quad 0xe456718fcaec2317 .quad 0x48eb409bf26b4fa6 .quad 0x3042cee561595f37 .quad 0x427e7079449bac41 .quad 0x855ae36dbce2310a .quad 0x4cae76215f841a7c .quad 0x389e740c9a9ce1d6 // 2^112 * 7 * G .quad 0x64fcb3ae34dcb9ce .quad 0x97500323e348d0ad .quad 0x45b3f07d62c6381b .quad 0x61545379465a6788 .quad 0xc9bd78f6570eac28 .quad 0xe55b0b3227919ce1 .quad 0x65fc3eaba19b91ed .quad 0x25c425e5d6263690 .quad 0x3f3e06a6f1d7de6e .quad 0x3ef976278e062308 .quad 0x8c14f6264e8a6c77 .quad 0x6539a08915484759 // 2^112 * 8 * G .quad 0xe9d21f74c3d2f773 .quad 0xc150544125c46845 .quad 0x624e5ce8f9b99e33 .quad 0x11c5e4aac5cd186c .quad 0xddc4dbd414bb4a19 .quad 0x19b2bc3c98424f8e .quad 0x48a89fd736ca7169 .quad 0x0f65320ef019bd90 .quad 0xd486d1b1cafde0c6 .quad 0x4f3fe6e3163b5181 .quad 0x59a8af0dfaf2939a .quad 0x4cabc7bdec33072a // 2^116 * 1 * G .quad 0x16faa8fb532f7428 .quad 0xdbd42ea046a4e272 .quad 0x5337653b8b9ea480 .quad 0x4065947223973f03 .quad 0xf7c0a19c1a54a044 .quad 0x4a1c5e2477bd9fbb .quad 0xa6e3ca115af22972 .quad 0x1819bb953f2e9e0d .quad 0x498fbb795e042e84 .quad 0x7d0dd89a7698b714 .quad 0x8bfb0ba427fe6295 .quad 0x36ba82e721200524 // 2^116 * 2 * G .quad 0xd60ecbb74245ec41 .quad 0xfd9be89e34348716 .quad 0xc9240afee42284de .quad 0x4472f648d0531db4 .quad 0xc8d69d0a57274ed5 .quad 0x45ba803260804b17 .quad 0xdf3cda102255dfac .quad 0x77d221232709b339 .quad 0x498a6d7064ad94d8 .quad 0xa5b5c8fd9af62263 .quad 0x8ca8ed0545c141f4 .quad 0x2c63bec3662d358c // 2^116 * 3 * G .quad 0x7fe60d8bea787955 .quad 0xb9dc117eb5f401b7 .quad 0x91c7c09a19355cce .quad 0x22692ef59442bedf .quad 0x9a518b3a8586f8bf .quad 0x9ee71af6cbb196f0 .quad 0xaa0625e6a2385cf2 .quad 0x1deb2176ddd7c8d1 .quad 0x8563d19a2066cf6c .quad 0x401bfd8c4dcc7cd7 .quad 0xd976a6becd0d8f62 .quad 0x67cfd773a278b05e // 2^116 * 4 * G .quad 0x8dec31faef3ee475 .quad 0x99dbff8a9e22fd92 .quad 0x512d11594e26cab1 .quad 0x0cde561eec4310b9 .quad 0x2d5fa9855a4e586a .quad 0x65f8f7a449beab7e .quad 0xaa074dddf21d33d3 .quad 0x185cba721bcb9dee .quad 0x93869da3f4e3cb41 .quad 0xbf0392f540f7977e .quad 0x026204fcd0463b83 .quad 0x3ec91a769eec6eed // 2^116 * 5 * G .quad 0x1e9df75bf78166ad .quad 0x4dfda838eb0cd7af .quad 0xba002ed8c1eaf988 .quad 0x13fedb3e11f33cfc .quad 0x0fad2fb7b0a3402f .quad 0x46615ecbfb69f4a8 .quad 0xf745bcc8c5f8eaa6 .quad 0x7a5fa8794a94e896 .quad 0x52958faa13cd67a1 .quad 0x965ee0818bdbb517 .quad 0x16e58daa2e8845b3 .quad 0x357d397d5499da8f // 2^116 * 6 * G .quad 0x1ebfa05fb0bace6c .quad 0xc934620c1caf9a1e .quad 0xcc771cc41d82b61a .quad 0x2d94a16aa5f74fec .quad 0x481dacb4194bfbf8 .quad 0x4d77e3f1bae58299 .quad 0x1ef4612e7d1372a0 .quad 0x3a8d867e70ff69e1 .quad 0x6f58cd5d55aff958 .quad 0xba3eaa5c75567721 .quad 0x75c123999165227d .quad 0x69be1343c2f2b35e // 2^116 * 7 * G .quad 0x0e091d5ee197c92a .quad 0x4f51019f2945119f .quad 0x143679b9f034e99c .quad 0x7d88112e4d24c696 .quad 0x82bbbdac684b8de3 .quad 0xa2f4c7d03fca0718 .quad 0x337f92fbe096aaa8 .quad 0x200d4d8c63587376 .quad 0x208aed4b4893b32b .quad 0x3efbf23ebe59b964 .quad 0xd762deb0dba5e507 .quad 0x69607bd681bd9d94 // 2^116 * 8 * G .quad 0xf6be021068de1ce1 .quad 0xe8d518e70edcbc1f .quad 0xe3effdd01b5505a5 .quad 0x35f63353d3ec3fd0 .quad 0x3b7f3bd49323a902 .quad 0x7c21b5566b2c6e53 .quad 0xe5ba8ff53a7852a7 .quad 0x28bc77a5838ece00 .quad 0x63ba78a8e25d8036 .quad 0x63651e0094333490 .quad 0x48d82f20288ce532 .quad 0x3a31abfa36b57524 // 2^120 * 1 * G .quad 0x239e9624089c0a2e .quad 0xc748c4c03afe4738 .quad 0x17dbed2a764fa12a .quad 0x639b93f0321c8582 .quad 0xc08f788f3f78d289 .quad 0xfe30a72ca1404d9f .quad 0xf2778bfccf65cc9d .quad 0x7ee498165acb2021 .quad 0x7bd508e39111a1c3 .quad 0x2b2b90d480907489 .quad 0xe7d2aec2ae72fd19 .quad 0x0edf493c85b602a6 // 2^120 * 2 * G .quad 0xaecc8158599b5a68 .quad 0xea574f0febade20e .quad 0x4fe41d7422b67f07 .quad 0x403b92e3019d4fb4 .quad 0x6767c4d284764113 .quad 0xa090403ff7f5f835 .quad 0x1c8fcffacae6bede .quad 0x04c00c54d1dfa369 .quad 0x4dc22f818b465cf8 .quad 0x71a0f35a1480eff8 .quad 0xaee8bfad04c7d657 .quad 0x355bb12ab26176f4 // 2^120 * 3 * G .quad 0xa71e64cc7493bbf4 .quad 0xe5bd84d9eca3b0c3 .quad 0x0a6bc50cfa05e785 .quad 0x0f9b8132182ec312 .quad 0xa301dac75a8c7318 .quad 0xed90039db3ceaa11 .quad 0x6f077cbf3bae3f2d .quad 0x7518eaf8e052ad8e .quad 0xa48859c41b7f6c32 .quad 0x0f2d60bcf4383298 .quad 0x1815a929c9b1d1d9 .quad 0x47c3871bbb1755c4 // 2^120 * 4 * G .quad 0x5144539771ec4f48 .quad 0xf805b17dc98c5d6e .quad 0xf762c11a47c3c66b .quad 0x00b89b85764699dc .quad 0xfbe65d50c85066b0 .quad 0x62ecc4b0b3a299b0 .quad 0xe53754ea441ae8e0 .quad 0x08fea02ce8d48d5f .quad 0x824ddd7668deead0 .quad 0xc86445204b685d23 .quad 0xb514cfcd5d89d665 .quad 0x473829a74f75d537 // 2^120 * 5 * G .quad 0x82d2da754679c418 .quad 0xe63bd7d8b2618df0 .quad 0x355eef24ac47eb0a .quad 0x2078684c4833c6b4 .quad 0x23d9533aad3902c9 .quad 0x64c2ddceef03588f .quad 0x15257390cfe12fb4 .quad 0x6c668b4d44e4d390 .quad 0x3b48cf217a78820c .quad 0xf76a0ab281273e97 .quad 0xa96c65a78c8eed7b .quad 0x7411a6054f8a433f // 2^120 * 6 * G .quad 0x4d659d32b99dc86d .quad 0x044cdc75603af115 .quad 0xb34c712cdcc2e488 .quad 0x7c136574fb8134ff .quad 0x579ae53d18b175b4 .quad 0x68713159f392a102 .quad 0x8455ecba1eef35f5 .quad 0x1ec9a872458c398f .quad 0xb8e6a4d400a2509b .quad 0x9b81d7020bc882b4 .quad 0x57e7cc9bf1957561 .quad 0x3add88a5c7cd6460 // 2^120 * 7 * G .quad 0xab895770b635dcf2 .quad 0x02dfef6cf66c1fbc .quad 0x85530268beb6d187 .quad 0x249929fccc879e74 .quad 0x85c298d459393046 .quad 0x8f7e35985ff659ec .quad 0x1d2ca22af2f66e3a .quad 0x61ba1131a406a720 .quad 0xa3d0a0f116959029 .quad 0x023b6b6cba7ebd89 .quad 0x7bf15a3e26783307 .quad 0x5620310cbbd8ece7 // 2^120 * 8 * G .quad 0x528993434934d643 .quad 0xb9dbf806a51222f5 .quad 0x8f6d878fc3f41c22 .quad 0x37676a2a4d9d9730 .quad 0x6646b5f477e285d6 .quad 0x40e8ff676c8f6193 .quad 0xa6ec7311abb594dd .quad 0x7ec846f3658cec4d .quad 0x9b5e8f3f1da22ec7 .quad 0x130f1d776c01cd13 .quad 0x214c8fcfa2989fb8 .quad 0x6daaf723399b9dd5 // 2^124 * 1 * G .quad 0x591e4a5610628564 .quad 0x2a4bb87ca8b4df34 .quad 0xde2a2572e7a38e43 .quad 0x3cbdabd9fee5046e .quad 0x81aebbdd2cd13070 .quad 0x962e4325f85a0e9e .quad 0xde9391aacadffecb .quad 0x53177fda52c230e6 .quad 0xa7bc970650b9de79 .quad 0x3d12a7fbc301b59b .quad 0x02652e68d36ae38c .quad 0x79d739835a6199dc // 2^124 * 2 * G .quad 0xd9354df64131c1bd .quad 0x758094a186ec5822 .quad 0x4464ee12e459f3c2 .quad 0x6c11fce4cb133282 .quad 0x21c9d9920d591737 .quad 0x9bea41d2e9b46cd6 .quad 0xe20e84200d89bfca .quad 0x79d99f946eae5ff8 .quad 0xf17b483568673205 .quad 0x387deae83caad96c .quad 0x61b471fd56ffe386 .quad 0x31741195b745a599 // 2^124 * 3 * G .quad 0xe8d10190b77a360b .quad 0x99b983209995e702 .quad 0xbd4fdff8fa0247aa .quad 0x2772e344e0d36a87 .quad 0x17f8ba683b02a047 .quad 0x50212096feefb6c8 .quad 0x70139be21556cbe2 .quad 0x203e44a11d98915b .quad 0xd6863eba37b9e39f .quad 0x105bc169723b5a23 .quad 0x104f6459a65c0762 .quad 0x567951295b4d38d4 // 2^124 * 4 * G .quad 0x535fd60613037524 .quad 0xe210adf6b0fbc26a .quad 0xac8d0a9b23e990ae .quad 0x47204d08d72fdbf9 .quad 0x07242eb30d4b497f .quad 0x1ef96306b9bccc87 .quad 0x37950934d8116f45 .quad 0x05468d6201405b04 .quad 0x00f565a9f93267de .quad 0xcecfd78dc0d58e8a .quad 0xa215e2dcf318e28e .quad 0x4599ee919b633352 // 2^124 * 5 * G .quad 0xd3c220ca70e0e76b .quad 0xb12bea58ea9f3094 .quad 0x294ddec8c3271282 .quad 0x0c3539e1a1d1d028 .quad 0xac746d6b861ae579 .quad 0x31ab0650f6aea9dc .quad 0x241d661140256d4c .quad 0x2f485e853d21a5de .quad 0x329744839c0833f3 .quad 0x6fe6257fd2abc484 .quad 0x5327d1814b358817 .quad 0x65712585893fe9bc // 2^124 * 6 * G .quad 0x9c102fb732a61161 .quad 0xe48e10dd34d520a8 .quad 0x365c63546f9a9176 .quad 0x32f6fe4c046f6006 .quad 0x81c29f1bd708ee3f .quad 0xddcb5a05ae6407d0 .quad 0x97aec1d7d2a3eba7 .quad 0x1590521a91d50831 .quad 0x40a3a11ec7910acc .quad 0x9013dff8f16d27ae .quad 0x1a9720d8abb195d4 .quad 0x1bb9fe452ea98463 // 2^124 * 7 * G .quad 0xe9d1d950b3d54f9e .quad 0x2d5f9cbee00d33c1 .quad 0x51c2c656a04fc6ac .quad 0x65c091ee3c1cbcc9 .quad 0xcf5e6c95cc36747c .quad 0x294201536b0bc30d .quad 0x453ac67cee797af0 .quad 0x5eae6ab32a8bb3c9 .quad 0x7083661114f118ea .quad 0x2b37b87b94349cad .quad 0x7273f51cb4e99f40 .quad 0x78a2a95823d75698 // 2^124 * 8 * G .quad 0xa2b072e95c8c2ace .quad 0x69cffc96651e9c4b .quad 0x44328ef842e7b42b .quad 0x5dd996c122aadeb3 .quad 0xb4f23c425ef83207 .quad 0xabf894d3c9a934b5 .quad 0xd0708c1339fd87f7 .quad 0x1876789117166130 .quad 0x925b5ef0670c507c .quad 0x819bc842b93c33bf .quad 0x10792e9a70dd003f .quad 0x59ad4b7a6e28dc74 // 2^128 * 1 * G .quad 0x5f3a7562eb3dbe47 .quad 0xf7ea38548ebda0b8 .quad 0x00c3e53145747299 .quad 0x1304e9e71627d551 .quad 0x583b04bfacad8ea2 .quad 0x29b743e8148be884 .quad 0x2b1e583b0810c5db .quad 0x2b5449e58eb3bbaa .quad 0x789814d26adc9cfe .quad 0x3c1bab3f8b48dd0b .quad 0xda0fe1fff979c60a .quad 0x4468de2d7c2dd693 // 2^128 * 2 * G .quad 0x51bb355e9419469e .quad 0x33e6dc4c23ddc754 .quad 0x93a5b6d6447f9962 .quad 0x6cce7c6ffb44bd63 .quad 0x4b9ad8c6f86307ce .quad 0x21113531435d0c28 .quad 0xd4a866c5657a772c .quad 0x5da6427e63247352 .quad 0x1a94c688deac22ca .quad 0xb9066ef7bbae1ff8 .quad 0x88ad8c388d59580f .quad 0x58f29abfe79f2ca8 // 2^128 * 3 * G .quad 0xe90ecfab8de73e68 .quad 0x54036f9f377e76a5 .quad 0xf0495b0bbe015982 .quad 0x577629c4a7f41e36 .quad 0x4b5a64bf710ecdf6 .quad 0xb14ce538462c293c .quad 0x3643d056d50b3ab9 .quad 0x6af93724185b4870 .quad 0x3220024509c6a888 .quad 0xd2e036134b558973 .quad 0x83e236233c33289f .quad 0x701f25bb0caec18f // 2^128 * 4 * G .quad 0xc3a8b0f8e4616ced .quad 0xf700660e9e25a87d .quad 0x61e3061ff4bca59c .quad 0x2e0c92bfbdc40be9 .quad 0x9d18f6d97cbec113 .quad 0x844a06e674bfdbe4 .quad 0x20f5b522ac4e60d6 .quad 0x720a5bc050955e51 .quad 0x0c3f09439b805a35 .quad 0xe84e8b376242abfc .quad 0x691417f35c229346 .quad 0x0e9b9cbb144ef0ec // 2^128 * 5 * G .quad 0xfbbad48ffb5720ad .quad 0xee81916bdbf90d0e .quad 0xd4813152635543bf .quad 0x221104eb3f337bd8 .quad 0x8dee9bd55db1beee .quad 0xc9c3ab370a723fb9 .quad 0x44a8f1bf1c68d791 .quad 0x366d44191cfd3cde .quad 0x9e3c1743f2bc8c14 .quad 0x2eda26fcb5856c3b .quad 0xccb82f0e68a7fb97 .quad 0x4167a4e6bc593244 // 2^128 * 6 * G .quad 0x643b9d2876f62700 .quad 0x5d1d9d400e7668eb .quad 0x1b4b430321fc0684 .quad 0x7938bb7e2255246a .quad 0xc2be2665f8ce8fee .quad 0xe967ff14e880d62c .quad 0xf12e6e7e2f364eee .quad 0x34b33370cb7ed2f6 .quad 0xcdc591ee8681d6cc .quad 0xce02109ced85a753 .quad 0xed7485c158808883 .quad 0x1176fc6e2dfe65e4 // 2^128 * 7 * G .quad 0xb4af6cd05b9c619b .quad 0x2ddfc9f4b2a58480 .quad 0x3d4fa502ebe94dc4 .quad 0x08fc3a4c677d5f34 .quad 0xdb90e28949770eb8 .quad 0x98fbcc2aacf440a3 .quad 0x21354ffeded7879b .quad 0x1f6a3e54f26906b6 .quad 0x60a4c199d30734ea .quad 0x40c085b631165cd6 .quad 0xe2333e23f7598295 .quad 0x4f2fad0116b900d1 // 2^128 * 8 * G .quad 0x44beb24194ae4e54 .quad 0x5f541c511857ef6c .quad 0xa61e6b2d368d0498 .quad 0x445484a4972ef7ab .quad 0x962cd91db73bb638 .quad 0xe60577aafc129c08 .quad 0x6f619b39f3b61689 .quad 0x3451995f2944ee81 .quad 0x9152fcd09fea7d7c .quad 0x4a816c94b0935cf6 .quad 0x258e9aaa47285c40 .quad 0x10b89ca6042893b7 // 2^132 * 1 * G .quad 0x9b2a426e3b646025 .quad 0x32127190385ce4cf .quad 0xa25cffc2dd6dea45 .quad 0x06409010bea8de75 .quad 0xd67cded679d34aa0 .quad 0xcc0b9ec0cc4db39f .quad 0xa535a456e35d190f .quad 0x2e05d9eaf61f6fef .quad 0xc447901ad61beb59 .quad 0x661f19bce5dc880a .quad 0x24685482b7ca6827 .quad 0x293c778cefe07f26 // 2^132 * 2 * G .quad 0x86809e7007069096 .quad 0xaad75b15e4e50189 .quad 0x07f35715a21a0147 .quad 0x0487f3f112815d5e .quad 0x16c795d6a11ff200 .quad 0xcb70d0e2b15815c9 .quad 0x89f293209b5395b5 .quad 0x50b8c2d031e47b4f .quad 0x48350c08068a4962 .quad 0x6ffdd05351092c9a .quad 0x17af4f4aaf6fc8dd .quad 0x4b0553b53cdba58b // 2^132 * 3 * G .quad 0x9c65fcbe1b32ff79 .quad 0xeb75ea9f03b50f9b .quad 0xfced2a6c6c07e606 .quad 0x35106cd551717908 .quad 0xbf05211b27c152d4 .quad 0x5ec26849bd1af639 .quad 0x5e0b2caa8e6fab98 .quad 0x054c8bdd50bd0840 .quad 0x38a0b12f1dcf073d .quad 0x4b60a8a3b7f6a276 .quad 0xfed5ac25d3404f9a .quad 0x72e82d5e5505c229 // 2^132 * 4 * G .quad 0x6b0b697ff0d844c8 .quad 0xbb12f85cd979cb49 .quad 0xd2a541c6c1da0f1f .quad 0x7b7c242958ce7211 .quad 0x00d9cdfd69771d02 .quad 0x410276cd6cfbf17e .quad 0x4c45306c1cb12ec7 .quad 0x2857bf1627500861 .quad 0x9f21903f0101689e .quad 0xd779dfd3bf861005 .quad 0xa122ee5f3deb0f1b .quad 0x510df84b485a00d4 // 2^132 * 5 * G .quad 0xa54133bb9277a1fa .quad 0x74ec3b6263991237 .quad 0x1a3c54dc35d2f15a .quad 0x2d347144e482ba3a .quad 0x24b3c887c70ac15e .quad 0xb0f3a557fb81b732 .quad 0x9b2cde2fe578cc1b .quad 0x4cf7ed0703b54f8e .quad 0x6bd47c6598fbee0f .quad 0x9e4733e2ab55be2d .quad 0x1093f624127610c5 .quad 0x4e05e26ad0a1eaa4 // 2^132 * 6 * G .quad 0xda9b6b624b531f20 .quad 0x429a760e77509abb .quad 0xdbe9f522e823cb80 .quad 0x618f1856880c8f82 .quad 0x1833c773e18fe6c0 .quad 0xe3c4711ad3c87265 .quad 0x3bfd3c4f0116b283 .quad 0x1955875eb4cd4db8 .quad 0x6da6de8f0e399799 .quad 0x7ad61aa440fda178 .quad 0xb32cd8105e3563dd .quad 0x15f6beae2ae340ae // 2^132 * 7 * G .quad 0x862bcb0c31ec3a62 .quad 0x810e2b451138f3c2 .quad 0x788ec4b839dac2a4 .quad 0x28f76867ae2a9281 .quad 0xba9a0f7b9245e215 .quad 0xf368612dd98c0dbb .quad 0x2e84e4cbf220b020 .quad 0x6ba92fe962d90eda .quad 0x3e4df9655884e2aa .quad 0xbd62fbdbdbd465a5 .quad 0xd7596caa0de9e524 .quad 0x6e8042ccb2b1b3d7 // 2^132 * 8 * G .quad 0xf10d3c29ce28ca6e .quad 0xbad34540fcb6093d .quad 0xe7426ed7a2ea2d3f .quad 0x08af9d4e4ff298b9 .quad 0x1530653616521f7e .quad 0x660d06b896203dba .quad 0x2d3989bc545f0879 .quad 0x4b5303af78ebd7b0 .quad 0x72f8a6c3bebcbde8 .quad 0x4f0fca4adc3a8e89 .quad 0x6fa9d4e8c7bfdf7a .quad 0x0dcf2d679b624eb7 // 2^136 * 1 * G .quad 0x3d5947499718289c .quad 0x12ebf8c524533f26 .quad 0x0262bfcb14c3ef15 .quad 0x20b878d577b7518e .quad 0x753941be5a45f06e .quad 0xd07caeed6d9c5f65 .quad 0x11776b9c72ff51b6 .quad 0x17d2d1d9ef0d4da9 .quad 0x27f2af18073f3e6a .quad 0xfd3fe519d7521069 .quad 0x22e3b72c3ca60022 .quad 0x72214f63cc65c6a7 // 2^136 * 2 * G .quad 0xb4e37f405307a693 .quad 0xaba714d72f336795 .quad 0xd6fbd0a773761099 .quad 0x5fdf48c58171cbc9 .quad 0x1d9db7b9f43b29c9 .quad 0xd605824a4f518f75 .quad 0xf2c072bd312f9dc4 .quad 0x1f24ac855a1545b0 .quad 0x24d608328e9505aa .quad 0x4748c1d10c1420ee .quad 0xc7ffe45c06fb25a2 .quad 0x00ba739e2ae395e6 // 2^136 * 3 * G .quad 0x592e98de5c8790d6 .quad 0xe5bfb7d345c2a2df .quad 0x115a3b60f9b49922 .quad 0x03283a3e67ad78f3 .quad 0xae4426f5ea88bb26 .quad 0x360679d984973bfb .quad 0x5c9f030c26694e50 .quad 0x72297de7d518d226 .quad 0x48241dc7be0cb939 .quad 0x32f19b4d8b633080 .quad 0xd3dfc90d02289308 .quad 0x05e1296846271945 // 2^136 * 4 * G .quad 0xba82eeb32d9c495a .quad 0xceefc8fcf12bb97c .quad 0xb02dabae93b5d1e0 .quad 0x39c00c9c13698d9b .quad 0xadbfbbc8242c4550 .quad 0xbcc80cecd03081d9 .quad 0x843566a6f5c8df92 .quad 0x78cf25d38258ce4c .quad 0x15ae6b8e31489d68 .quad 0xaa851cab9c2bf087 .quad 0xc9a75a97f04efa05 .quad 0x006b52076b3ff832 // 2^136 * 5 * G .quad 0x29e0cfe19d95781c .quad 0xb681df18966310e2 .quad 0x57df39d370516b39 .quad 0x4d57e3443bc76122 .quad 0xf5cb7e16b9ce082d .quad 0x3407f14c417abc29 .quad 0xd4b36bce2bf4a7ab .quad 0x7de2e9561a9f75ce .quad 0xde70d4f4b6a55ecb .quad 0x4801527f5d85db99 .quad 0xdbc9c440d3ee9a81 .quad 0x6b2a90af1a6029ed // 2^136 * 6 * G .quad 0x6923f4fc9ae61e97 .quad 0x5735281de03f5fd1 .quad 0xa764ae43e6edd12d .quad 0x5fd8f4e9d12d3e4a .quad 0x77ebf3245bb2d80a .quad 0xd8301b472fb9079b .quad 0xc647e6f24cee7333 .quad 0x465812c8276c2109 .quad 0x4d43beb22a1062d9 .quad 0x7065fb753831dc16 .quad 0x180d4a7bde2968d7 .quad 0x05b32c2b1cb16790 // 2^136 * 7 * G .quad 0xc8c05eccd24da8fd .quad 0xa1cf1aac05dfef83 .quad 0xdbbeeff27df9cd61 .quad 0x3b5556a37b471e99 .quad 0xf7fca42c7ad58195 .quad 0x3214286e4333f3cc .quad 0xb6c29d0d340b979d .quad 0x31771a48567307e1 .quad 0x32b0c524e14dd482 .quad 0xedb351541a2ba4b6 .quad 0xa3d16048282b5af3 .quad 0x4fc079d27a7336eb // 2^136 * 8 * G .quad 0x51c938b089bf2f7f .quad 0x2497bd6502dfe9a7 .quad 0xffffc09c7880e453 .quad 0x124567cecaf98e92 .quad 0xdc348b440c86c50d .quad 0x1337cbc9cc94e651 .quad 0x6422f74d643e3cb9 .quad 0x241170c2bae3cd08 .quad 0x3ff9ab860ac473b4 .quad 0xf0911dee0113e435 .quad 0x4ae75060ebc6c4af .quad 0x3f8612966c87000d // 2^140 * 1 * G .quad 0x0c9c5303f7957be4 .quad 0xa3c31a20e085c145 .quad 0xb0721d71d0850050 .quad 0x0aba390eab0bf2da .quad 0x529fdffe638c7bf3 .quad 0xdf2b9e60388b4995 .quad 0xe027b34f1bad0249 .quad 0x7bc92fc9b9fa74ed .quad 0x9f97ef2e801ad9f9 .quad 0x83697d5479afda3a .quad 0xe906b3ffbd596b50 .quad 0x02672b37dd3fb8e0 // 2^140 * 2 * G .quad 0x48b2ca8b260885e4 .quad 0xa4286bec82b34c1c .quad 0x937e1a2617f58f74 .quad 0x741d1fcbab2ca2a5 .quad 0xee9ba729398ca7f5 .quad 0xeb9ca6257a4849db .quad 0x29eb29ce7ec544e1 .quad 0x232ca21ef736e2c8 .quad 0xbf61423d253fcb17 .quad 0x08803ceafa39eb14 .quad 0xf18602df9851c7af .quad 0x0400f3a049e3414b // 2^140 * 3 * G .quad 0xabce0476ba61c55b .quad 0x36a3d6d7c4d39716 .quad 0x6eb259d5e8d82d09 .quad 0x0c9176e984d756fb .quad 0x2efba412a06e7b06 .quad 0x146785452c8d2560 .quad 0xdf9713ebd67a91c7 .quad 0x32830ac7157eadf3 .quad 0x0e782a7ab73769e8 .quad 0x04a05d7875b18e2c .quad 0x29525226ebcceae1 .quad 0x0d794f8383eba820 // 2^140 * 4 * G .quad 0xff35f5cb9e1516f4 .quad 0xee805bcf648aae45 .quad 0xf0d73c2bb93a9ef3 .quad 0x097b0bf22092a6c2 .quad 0x7be44ce7a7a2e1ac .quad 0x411fd93efad1b8b7 .quad 0x1734a1d70d5f7c9b .quad 0x0d6592233127db16 .quad 0xc48bab1521a9d733 .quad 0xa6c2eaead61abb25 .quad 0x625c6c1cc6cb4305 .quad 0x7fc90fea93eb3a67 // 2^140 * 5 * G .quad 0x0408f1fe1f5c5926 .quad 0x1a8f2f5e3b258bf4 .quad 0x40a951a2fdc71669 .quad 0x6598ee93c98b577e .quad 0xc527deb59c7cb23d .quad 0x955391695328404e .quad 0xd64392817ccf2c7a .quad 0x6ce97dabf7d8fa11 .quad 0x25b5a8e50ef7c48f .quad 0xeb6034116f2ce532 .quad 0xc5e75173e53de537 .quad 0x73119fa08c12bb03 // 2^140 * 6 * G .quad 0xed30129453f1a4cb .quad 0xbce621c9c8f53787 .quad 0xfacb2b1338bee7b9 .quad 0x3025798a9ea8428c .quad 0x7845b94d21f4774d .quad 0xbf62f16c7897b727 .quad 0x671857c03c56522b .quad 0x3cd6a85295621212 .quad 0x3fecde923aeca999 .quad 0xbdaa5b0062e8c12f .quad 0x67b99dfc96988ade .quad 0x3f52c02852661036 // 2^140 * 7 * G .quad 0xffeaa48e2a1351c6 .quad 0x28624754fa7f53d7 .quad 0x0b5ba9e57582ddf1 .quad 0x60c0104ba696ac59 .quad 0x9258bf99eec416c6 .quad 0xac8a5017a9d2f671 .quad 0x629549ab16dea4ab .quad 0x05d0e85c99091569 .quad 0x051de020de9cbe97 .quad 0xfa07fc56b50bcf74 .quad 0x378cec9f0f11df65 .quad 0x36853c69ab96de4d // 2^140 * 8 * G .quad 0x36d9b8de78f39b2d .quad 0x7f42ed71a847b9ec .quad 0x241cd1d679bd3fde .quad 0x6a704fec92fbce6b .quad 0x4433c0b0fac5e7be .quad 0x724bae854c08dcbe .quad 0xf1f24cc446978f9b .quad 0x4a0aff6d62825fc8 .quad 0xe917fb9e61095301 .quad 0xc102df9402a092f8 .quad 0xbf09e2f5fa66190b .quad 0x681109bee0dcfe37 // 2^144 * 1 * G .quad 0x559a0cc9782a0dde .quad 0x551dcdb2ea718385 .quad 0x7f62865b31ef238c .quad 0x504aa7767973613d .quad 0x9c18fcfa36048d13 .quad 0x29159db373899ddd .quad 0xdc9f350b9f92d0aa .quad 0x26f57eee878a19d4 .quad 0x0cab2cd55687efb1 .quad 0x5180d162247af17b .quad 0x85c15a344f5a2467 .quad 0x4041943d9dba3069 // 2^144 * 2 * G .quad 0xc3c0eeba43ebcc96 .quad 0x8d749c9c26ea9caf .quad 0xd9fa95ee1c77ccc6 .quad 0x1420a1d97684340f .quad 0x4b217743a26caadd .quad 0x47a6b424648ab7ce .quad 0xcb1d4f7a03fbc9e3 .quad 0x12d931429800d019 .quad 0x00c67799d337594f .quad 0x5e3c5140b23aa47b .quad 0x44182854e35ff395 .quad 0x1b4f92314359a012 // 2^144 * 3 * G .quad 0x3e5c109d89150951 .quad 0x39cefa912de9696a .quad 0x20eae43f975f3020 .quad 0x239b572a7f132dae .quad 0x33cf3030a49866b1 .quad 0x251f73d2215f4859 .quad 0xab82aa4051def4f6 .quad 0x5ff191d56f9a23f6 .quad 0x819ed433ac2d9068 .quad 0x2883ab795fc98523 .quad 0xef4572805593eb3d .quad 0x020c526a758f36cb // 2^144 * 4 * G .quad 0x779834f89ed8dbbc .quad 0xc8f2aaf9dc7ca46c .quad 0xa9524cdca3e1b074 .quad 0x02aacc4615313877 .quad 0xe931ef59f042cc89 .quad 0x2c589c9d8e124bb6 .quad 0xadc8e18aaec75997 .quad 0x452cfe0a5602c50c .quad 0x86a0f7a0647877df .quad 0xbbc464270e607c9f .quad 0xab17ea25f1fb11c9 .quad 0x4cfb7d7b304b877b // 2^144 * 5 * G .quad 0x72b43d6cb89b75fe .quad 0x54c694d99c6adc80 .quad 0xb8c3aa373ee34c9f .quad 0x14b4622b39075364 .quad 0xe28699c29789ef12 .quad 0x2b6ecd71df57190d .quad 0xc343c857ecc970d0 .quad 0x5b1d4cbc434d3ac5 .quad 0xb6fb2615cc0a9f26 .quad 0x3a4f0e2bb88dcce5 .quad 0x1301498b3369a705 .quad 0x2f98f71258592dd1 // 2^144 * 6 * G .quad 0x0c94a74cb50f9e56 .quad 0x5b1ff4a98e8e1320 .quad 0x9a2acc2182300f67 .quad 0x3a6ae249d806aaf9 .quad 0x2e12ae444f54a701 .quad 0xfcfe3ef0a9cbd7de .quad 0xcebf890d75835de0 .quad 0x1d8062e9e7614554 .quad 0x657ada85a9907c5a .quad 0x1a0ea8b591b90f62 .quad 0x8d0e1dfbdf34b4e9 .quad 0x298b8ce8aef25ff3 // 2^144 * 7 * G .quad 0x2a927953eff70cb2 .quad 0x4b89c92a79157076 .quad 0x9418457a30a7cf6a .quad 0x34b8a8404d5ce485 .quad 0x837a72ea0a2165de .quad 0x3fab07b40bcf79f6 .quad 0x521636c77738ae70 .quad 0x6ba6271803a7d7dc .quad 0xc26eecb583693335 .quad 0xd5a813df63b5fefd .quad 0xa293aa9aa4b22573 .quad 0x71d62bdd465e1c6a // 2^144 * 8 * G .quad 0x6533cc28d378df80 .quad 0xf6db43790a0fa4b4 .quad 0xe3645ff9f701da5a .quad 0x74d5f317f3172ba4 .quad 0xcd2db5dab1f75ef5 .quad 0xd77f95cf16b065f5 .quad 0x14571fea3f49f085 .quad 0x1c333621262b2b3d .quad 0xa86fe55467d9ca81 .quad 0x398b7c752b298c37 .quad 0xda6d0892e3ac623b .quad 0x4aebcc4547e9d98c // 2^148 * 1 * G .quad 0x53175a7205d21a77 .quad 0xb0c04422d3b934d4 .quad 0xadd9f24bdd5deadc .quad 0x074f46e69f10ff8c .quad 0x0de9b204a059a445 .quad 0xe15cb4aa4b17ad0f .quad 0xe1bbec521f79c557 .quad 0x2633f1b9d071081b .quad 0xc1fb4177018b9910 .quad 0xa6ea20dc6c0fe140 .quad 0xd661f3e74354c6ff .quad 0x5ecb72e6f1a3407a // 2^148 * 2 * G .quad 0xa515a31b2259fb4e .quad 0x0960f3972bcac52f .quad 0xedb52fec8d3454cb .quad 0x382e2720c476c019 .quad 0xfeeae106e8e86997 .quad 0x9863337f98d09383 .quad 0x9470480eaa06ebef .quad 0x038b6898d4c5c2d0 .quad 0xf391c51d8ace50a6 .quad 0x3142d0b9ae2d2948 .quad 0xdb4d5a1a7f24ca80 .quad 0x21aeba8b59250ea8 // 2^148 * 3 * G .quad 0x24f13b34cf405530 .quad 0x3c44ea4a43088af7 .quad 0x5dd5c5170006a482 .quad 0x118eb8f8890b086d .quad 0x53853600f0087f23 .quad 0x4c461879da7d5784 .quad 0x6af303deb41f6860 .quad 0x0a3c16c5c27c18ed .quad 0x17e49c17cc947f3d .quad 0xccc6eda6aac1d27b .quad 0xdf6092ceb0f08e56 .quad 0x4909b3e22c67c36b // 2^148 * 4 * G .quad 0x9c9c85ea63fe2e89 .quad 0xbe1baf910e9412ec .quad 0x8f7baa8a86fbfe7b .quad 0x0fb17f9fef968b6c .quad 0x59a16676706ff64e .quad 0x10b953dd0d86a53d .quad 0x5848e1e6ce5c0b96 .quad 0x2d8b78e712780c68 .quad 0x79d5c62eafc3902b .quad 0x773a215289e80728 .quad 0xc38ae640e10120b9 .quad 0x09ae23717b2b1a6d // 2^148 * 5 * G .quad 0xbb6a192a4e4d083c .quad 0x34ace0630029e192 .quad 0x98245a59aafabaeb .quad 0x6d9c8a9ada97faac .quad 0x10ab8fa1ad32b1d0 .quad 0xe9aced1be2778b24 .quad 0xa8856bc0373de90f .quad 0x66f35ddddda53996 .quad 0xd27d9afb24997323 .quad 0x1bb7e07ef6f01d2e .quad 0x2ba7472df52ecc7f .quad 0x03019b4f646f9dc8 // 2^148 * 6 * G .quad 0x04a186b5565345cd .quad 0xeee76610bcc4116a .quad 0x689c73b478fb2a45 .quad 0x387dcbff65697512 .quad 0xaf09b214e6b3dc6b .quad 0x3f7573b5ad7d2f65 .quad 0xd019d988100a23b0 .quad 0x392b63a58b5c35f7 .quad 0x4093addc9c07c205 .quad 0xc565be15f532c37e .quad 0x63dbecfd1583402a .quad 0x61722b4aef2e032e // 2^148 * 7 * G .quad 0x0012aafeecbd47af .quad 0x55a266fb1cd46309 .quad 0xf203eb680967c72c .quad 0x39633944ca3c1429 .quad 0xd6b07a5581cb0e3c .quad 0x290ff006d9444969 .quad 0x08680b6a16dcda1f .quad 0x5568d2b75a06de59 .quad 0x8d0cb88c1b37cfe1 .quad 0x05b6a5a3053818f3 .quad 0xf2e9bc04b787d959 .quad 0x6beba1249add7f64 // 2^148 * 8 * G .quad 0x1d06005ca5b1b143 .quad 0x6d4c6bb87fd1cda2 .quad 0x6ef5967653fcffe7 .quad 0x097c29e8c1ce1ea5 .quad 0x5c3cecb943f5a53b .quad 0x9cc9a61d06c08df2 .quad 0xcfba639a85895447 .quad 0x5a845ae80df09fd5 .quad 0x4ce97dbe5deb94ca .quad 0x38d0a4388c709c48 .quad 0xc43eced4a169d097 .quad 0x0a1249fff7e587c3 // 2^152 * 1 * G .quad 0x12f0071b276d01c9 .quad 0xe7b8bac586c48c70 .quad 0x5308129b71d6fba9 .quad 0x5d88fbf95a3db792 .quad 0x0b408d9e7354b610 .quad 0x806b32535ba85b6e .quad 0xdbe63a034a58a207 .quad 0x173bd9ddc9a1df2c .quad 0x2b500f1efe5872df .quad 0x58d6582ed43918c1 .quad 0xe6ed278ec9673ae0 .quad 0x06e1cd13b19ea319 // 2^152 * 2 * G .quad 0x40d0ad516f166f23 .quad 0x118e32931fab6abe .quad 0x3fe35e14a04d088e .quad 0x3080603526e16266 .quad 0x472baf629e5b0353 .quad 0x3baa0b90278d0447 .quad 0x0c785f469643bf27 .quad 0x7f3a6a1a8d837b13 .quad 0xf7e644395d3d800b .quad 0x95a8d555c901edf6 .quad 0x68cd7830592c6339 .quad 0x30d0fded2e51307e // 2^152 * 3 * G .quad 0xe0594d1af21233b3 .quad 0x1bdbe78ef0cc4d9c .quad 0x6965187f8f499a77 .quad 0x0a9214202c099868 .quad 0x9cb4971e68b84750 .quad 0xa09572296664bbcf .quad 0x5c8de72672fa412b .quad 0x4615084351c589d9 .quad 0xbc9019c0aeb9a02e .quad 0x55c7110d16034cae .quad 0x0e6df501659932ec .quad 0x3bca0d2895ca5dfe // 2^152 * 4 * G .quad 0x40f031bc3c5d62a4 .quad 0x19fc8b3ecff07a60 .quad 0x98183da2130fb545 .quad 0x5631deddae8f13cd .quad 0x9c688eb69ecc01bf .quad 0xf0bc83ada644896f .quad 0xca2d955f5f7a9fe2 .quad 0x4ea8b4038df28241 .quad 0x2aed460af1cad202 .quad 0x46305305a48cee83 .quad 0x9121774549f11a5f .quad 0x24ce0930542ca463 // 2^152 * 5 * G .quad 0x1fe890f5fd06c106 .quad 0xb5c468355d8810f2 .quad 0x827808fe6e8caf3e .quad 0x41d4e3c28a06d74b .quad 0x3fcfa155fdf30b85 .quad 0xd2f7168e36372ea4 .quad 0xb2e064de6492f844 .quad 0x549928a7324f4280 .quad 0xf26e32a763ee1a2e .quad 0xae91e4b7d25ffdea .quad 0xbc3bd33bd17f4d69 .quad 0x491b66dec0dcff6a // 2^152 * 6 * G .quad 0x98f5b13dc7ea32a7 .quad 0xe3d5f8cc7e16db98 .quad 0xac0abf52cbf8d947 .quad 0x08f338d0c85ee4ac .quad 0x75f04a8ed0da64a1 .quad 0xed222caf67e2284b .quad 0x8234a3791f7b7ba4 .quad 0x4cf6b8b0b7018b67 .quad 0xc383a821991a73bd .quad 0xab27bc01df320c7a .quad 0xc13d331b84777063 .quad 0x530d4a82eb078a99 // 2^152 * 7 * G .quad 0x004c3630e1f94825 .quad 0x7e2d78268cab535a .quad 0xc7482323cc84ff8b .quad 0x65ea753f101770b9 .quad 0x6d6973456c9abf9e .quad 0x257fb2fc4900a880 .quad 0x2bacf412c8cfb850 .quad 0x0db3e7e00cbfbd5b .quad 0x3d66fc3ee2096363 .quad 0x81d62c7f61b5cb6b .quad 0x0fbe044213443b1a .quad 0x02a4ec1921e1a1db // 2^152 * 8 * G .quad 0x5ce6259a3b24b8a2 .quad 0xb8577acc45afa0b8 .quad 0xcccbe6e88ba07037 .quad 0x3d143c51127809bf .quad 0xf5c86162f1cf795f .quad 0x118c861926ee57f2 .quad 0x172124851c063578 .quad 0x36d12b5dec067fcf .quad 0x126d279179154557 .quad 0xd5e48f5cfc783a0a .quad 0x36bdb6e8df179bac .quad 0x2ef517885ba82859 // 2^156 * 1 * G .quad 0x88bd438cd11e0d4a .quad 0x30cb610d43ccf308 .quad 0xe09a0e3791937bcc .quad 0x4559135b25b1720c .quad 0x1ea436837c6da1e9 .quad 0xf9c189af1fb9bdbe .quad 0x303001fcce5dd155 .quad 0x28a7c99ebc57be52 .quad 0xb8fd9399e8d19e9d .quad 0x908191cb962423ff .quad 0xb2b948d747c742a3 .quad 0x37f33226d7fb44c4 // 2^156 * 2 * G .quad 0x0dae8767b55f6e08 .quad 0x4a43b3b35b203a02 .quad 0xe3725a6e80af8c79 .quad 0x0f7a7fd1705fa7a3 .quad 0x33912553c821b11d .quad 0x66ed42c241e301df .quad 0x066fcc11104222fd .quad 0x307a3b41c192168f .quad 0x8eeb5d076eb55ce0 .quad 0x2fc536bfaa0d925a .quad 0xbe81830fdcb6c6e8 .quad 0x556c7045827baf52 // 2^156 * 3 * G .quad 0x8e2b517302e9d8b7 .quad 0xe3e52269248714e8 .quad 0xbd4fbd774ca960b5 .quad 0x6f4b4199c5ecada9 .quad 0xb94b90022bf44406 .quad 0xabd4237eff90b534 .quad 0x7600a960faf86d3a .quad 0x2f45abdac2322ee3 .quad 0x61af4912c8ef8a6a .quad 0xe58fa4fe43fb6e5e .quad 0xb5afcc5d6fd427cf .quad 0x6a5393281e1e11eb // 2^156 * 4 * G .quad 0xf3da5139a5d1ee89 .quad 0x8145457cff936988 .quad 0x3f622fed00e188c4 .quad 0x0f513815db8b5a3d .quad 0x0fff04fe149443cf .quad 0x53cac6d9865cddd7 .quad 0x31385b03531ed1b7 .quad 0x5846a27cacd1039d .quad 0x4ff5cdac1eb08717 .quad 0x67e8b29590f2e9bc .quad 0x44093b5e237afa99 .quad 0x0d414bed8708b8b2 // 2^156 * 5 * G .quad 0xcfb68265fd0e75f6 .quad 0xe45b3e28bb90e707 .quad 0x7242a8de9ff92c7a .quad 0x685b3201933202dd .quad 0x81886a92294ac9e8 .quad 0x23162b45d55547be .quad 0x94cfbc4403715983 .quad 0x50eb8fdb134bc401 .quad 0xc0b73ec6d6b330cd .quad 0x84e44807132faff1 .quad 0x732b7352c4a5dee1 .quad 0x5d7c7cf1aa7cd2d2 // 2^156 * 6 * G .quad 0xaf3b46bf7a4aafa2 .quad 0xb78705ec4d40d411 .quad 0x114f0c6aca7c15e3 .quad 0x3f364faaa9489d4d .quad 0x33d1013e9b73a562 .quad 0x925cef5748ec26e1 .quad 0xa7fce614dd468058 .quad 0x78b0fad41e9aa438 .quad 0xbf56a431ed05b488 .quad 0xa533e66c9c495c7e .quad 0xe8652baf87f3651a .quad 0x0241800059d66c33 // 2^156 * 7 * G .quad 0xceb077fea37a5be4 .quad 0xdb642f02e5a5eeb7 .quad 0xc2e6d0c5471270b8 .quad 0x4771b65538e4529c .quad 0x28350c7dcf38ea01 .quad 0x7c6cdbc0b2917ab6 .quad 0xace7cfbe857082f7 .quad 0x4d2845aba2d9a1e0 .quad 0xbb537fe0447070de .quad 0xcba744436dd557df .quad 0xd3b5a3473600dbcb .quad 0x4aeabbe6f9ffd7f8 // 2^156 * 8 * G .quad 0x4630119e40d8f78c .quad 0xa01a9bc53c710e11 .quad 0x486d2b258910dd79 .quad 0x1e6c47b3db0324e5 .quad 0x6a2134bcc4a9c8f2 .quad 0xfbf8fd1c8ace2e37 .quad 0x000ae3049911a0ba .quad 0x046e3a616bc89b9e .quad 0x14e65442f03906be .quad 0x4a019d54e362be2a .quad 0x68ccdfec8dc230c7 .quad 0x7cfb7e3faf6b861c // 2^160 * 1 * G .quad 0x4637974e8c58aedc .quad 0xb9ef22fbabf041a4 .quad 0xe185d956e980718a .quad 0x2f1b78fab143a8a6 .quad 0x96eebffb305b2f51 .quad 0xd3f938ad889596b8 .quad 0xf0f52dc746d5dd25 .quad 0x57968290bb3a0095 .quad 0xf71ab8430a20e101 .quad 0xf393658d24f0ec47 .quad 0xcf7509a86ee2eed1 .quad 0x7dc43e35dc2aa3e1 // 2^160 * 2 * G .quad 0x85966665887dd9c3 .quad 0xc90f9b314bb05355 .quad 0xc6e08df8ef2079b1 .quad 0x7ef72016758cc12f .quad 0x5a782a5c273e9718 .quad 0x3576c6995e4efd94 .quad 0x0f2ed8051f237d3e .quad 0x044fb81d82d50a99 .quad 0xc1df18c5a907e3d9 .quad 0x57b3371dce4c6359 .quad 0xca704534b201bb49 .quad 0x7f79823f9c30dd2e // 2^160 * 3 * G .quad 0x8334d239a3b513e8 .quad 0xc13670d4b91fa8d8 .quad 0x12b54136f590bd33 .quad 0x0a4e0373d784d9b4 .quad 0x6a9c1ff068f587ba .quad 0x0827894e0050c8de .quad 0x3cbf99557ded5be7 .quad 0x64a9b0431c06d6f0 .quad 0x2eb3d6a15b7d2919 .quad 0xb0b4f6a0d53a8235 .quad 0x7156ce4389a45d47 .quad 0x071a7d0ace18346c // 2^160 * 4 * G .quad 0xd3072daac887ba0b .quad 0x01262905bfa562ee .quad 0xcf543002c0ef768b .quad 0x2c3bcc7146ea7e9c .quad 0xcc0c355220e14431 .quad 0x0d65950709b15141 .quad 0x9af5621b209d5f36 .quad 0x7c69bcf7617755d3 .quad 0x07f0d7eb04e8295f .quad 0x10db18252f50f37d .quad 0xe951a9a3171798d7 .quad 0x6f5a9a7322aca51d // 2^160 * 5 * G .quad 0x8ba1000c2f41c6c5 .quad 0xc49f79c10cfefb9b .quad 0x4efa47703cc51c9f .quad 0x494e21a2e147afca .quad 0xe729d4eba3d944be .quad 0x8d9e09408078af9e .quad 0x4525567a47869c03 .quad 0x02ab9680ee8d3b24 .quad 0xefa48a85dde50d9a .quad 0x219a224e0fb9a249 .quad 0xfa091f1dd91ef6d9 .quad 0x6b5d76cbea46bb34 // 2^160 * 6 * G .quad 0x8857556cec0cd994 .quad 0x6472dc6f5cd01dba .quad 0xaf0169148f42b477 .quad 0x0ae333f685277354 .quad 0xe0f941171e782522 .quad 0xf1e6ae74036936d3 .quad 0x408b3ea2d0fcc746 .quad 0x16fb869c03dd313e .quad 0x288e199733b60962 .quad 0x24fc72b4d8abe133 .quad 0x4811f7ed0991d03e .quad 0x3f81e38b8f70d075 // 2^160 * 7 * G .quad 0x7f910fcc7ed9affe .quad 0x545cb8a12465874b .quad 0xa8397ed24b0c4704 .quad 0x50510fc104f50993 .quad 0x0adb7f355f17c824 .quad 0x74b923c3d74299a4 .quad 0xd57c3e8bcbf8eaf7 .quad 0x0ad3e2d34cdedc3d .quad 0x6f0c0fc5336e249d .quad 0x745ede19c331cfd9 .quad 0xf2d6fd0009eefe1c .quad 0x127c158bf0fa1ebe // 2^160 * 8 * G .quad 0xf6197c422e9879a2 .quad 0xa44addd452ca3647 .quad 0x9b413fc14b4eaccb .quad 0x354ef87d07ef4f68 .quad 0xdea28fc4ae51b974 .quad 0x1d9973d3744dfe96 .quad 0x6240680b873848a8 .quad 0x4ed82479d167df95 .quad 0xfee3b52260c5d975 .quad 0x50352efceb41b0b8 .quad 0x8808ac30a9f6653c .quad 0x302d92d20539236d // 2^164 * 1 * G .quad 0x4c59023fcb3efb7c .quad 0x6c2fcb99c63c2a94 .quad 0xba4190e2c3c7e084 .quad 0x0e545daea51874d9 .quad 0x957b8b8b0df53c30 .quad 0x2a1c770a8e60f098 .quad 0xbbc7a670345796de .quad 0x22a48f9a90c99bc9 .quad 0x6b7dc0dc8d3fac58 .quad 0x5497cd6ce6e42bfd .quad 0x542f7d1bf400d305 .quad 0x4159f47f048d9136 // 2^164 * 2 * G .quad 0x20ad660839e31e32 .quad 0xf81e1bd58405be50 .quad 0xf8064056f4dabc69 .quad 0x14d23dd4ce71b975 .quad 0x748515a8bbd24839 .quad 0x77128347afb02b55 .quad 0x50ba2ac649a2a17f .quad 0x060525513ad730f1 .quad 0xf2398e098aa27f82 .quad 0x6d7982bb89a1b024 .quad 0xfa694084214dd24c .quad 0x71ab966fa32301c3 // 2^164 * 3 * G .quad 0x2dcbd8e34ded02fc .quad 0x1151f3ec596f22aa .quad 0xbca255434e0328da .quad 0x35768fbe92411b22 .quad 0xb1088a0702809955 .quad 0x43b273ea0b43c391 .quad 0xca9b67aefe0686ed .quad 0x605eecbf8335f4ed .quad 0x83200a656c340431 .quad 0x9fcd71678ee59c2f .quad 0x75d4613f71300f8a .quad 0x7a912faf60f542f9 // 2^164 * 4 * G .quad 0xb204585e5edc1a43 .quad 0x9f0e16ee5897c73c .quad 0x5b82c0ae4e70483c .quad 0x624a170e2bddf9be .quad 0x253f4f8dfa2d5597 .quad 0x25e49c405477130c .quad 0x00c052e5996b1102 .quad 0x33cb966e33bb6c4a .quad 0x597028047f116909 .quad 0x828ac41c1e564467 .quad 0x70417dbde6217387 .quad 0x721627aefbac4384 // 2^164 * 5 * G .quad 0x97d03bc38736add5 .quad 0x2f1422afc532b130 .quad 0x3aa68a057101bbc4 .quad 0x4c946cf7e74f9fa7 .quad 0xfd3097bc410b2f22 .quad 0xf1a05da7b5cfa844 .quad 0x61289a1def57ca74 .quad 0x245ea199bb821902 .quad 0xaedca66978d477f8 .quad 0x1898ba3c29117fe1 .quad 0xcf73f983720cbd58 .quad 0x67da12e6b8b56351 // 2^164 * 6 * G .quad 0x7067e187b4bd6e07 .quad 0x6e8f0203c7d1fe74 .quad 0x93c6aa2f38c85a30 .quad 0x76297d1f3d75a78a .quad 0x2b7ef3d38ec8308c .quad 0x828fd7ec71eb94ab .quad 0x807c3b36c5062abd .quad 0x0cb64cb831a94141 .quad 0x3030fc33534c6378 .quad 0xb9635c5ce541e861 .quad 0x15d9a9bed9b2c728 .quad 0x49233ea3f3775dcb // 2^164 * 7 * G .quad 0x629398fa8dbffc3a .quad 0xe12fe52dd54db455 .quad 0xf3be11dfdaf25295 .quad 0x628b140dce5e7b51 .quad 0x7b3985fe1c9f249b .quad 0x4fd6b2d5a1233293 .quad 0xceb345941adf4d62 .quad 0x6987ff6f542de50c .quad 0x47e241428f83753c .quad 0x6317bebc866af997 .quad 0xdabb5b433d1a9829 .quad 0x074d8d245287fb2d // 2^164 * 8 * G .quad 0x8337d9cd440bfc31 .quad 0x729d2ca1af318fd7 .quad 0xa040a4a4772c2070 .quad 0x46002ef03a7349be .quad 0x481875c6c0e31488 .quad 0x219429b2e22034b4 .quad 0x7223c98a31283b65 .quad 0x3420d60b342277f9 .quad 0xfaa23adeaffe65f7 .quad 0x78261ed45be0764c .quad 0x441c0a1e2f164403 .quad 0x5aea8e567a87d395 // 2^168 * 1 * G .quad 0x7813c1a2bca4283d .quad 0xed62f091a1863dd9 .quad 0xaec7bcb8c268fa86 .quad 0x10e5d3b76f1cae4c .quad 0x2dbc6fb6e4e0f177 .quad 0x04e1bf29a4bd6a93 .quad 0x5e1966d4787af6e8 .quad 0x0edc5f5eb426d060 .quad 0x5453bfd653da8e67 .quad 0xe9dc1eec24a9f641 .quad 0xbf87263b03578a23 .quad 0x45b46c51361cba72 // 2^168 * 2 * G .quad 0xa9402abf314f7fa1 .quad 0xe257f1dc8e8cf450 .quad 0x1dbbd54b23a8be84 .quad 0x2177bfa36dcb713b .quad 0xce9d4ddd8a7fe3e4 .quad 0xab13645676620e30 .quad 0x4b594f7bb30e9958 .quad 0x5c1c0aef321229df .quad 0x37081bbcfa79db8f .quad 0x6048811ec25f59b3 .quad 0x087a76659c832487 .quad 0x4ae619387d8ab5bb // 2^168 * 3 * G .quad 0x8ddbf6aa5344a32e .quad 0x7d88eab4b41b4078 .quad 0x5eb0eb974a130d60 .quad 0x1a00d91b17bf3e03 .quad 0x61117e44985bfb83 .quad 0xfce0462a71963136 .quad 0x83ac3448d425904b .quad 0x75685abe5ba43d64 .quad 0x6e960933eb61f2b2 .quad 0x543d0fa8c9ff4952 .quad 0xdf7275107af66569 .quad 0x135529b623b0e6aa // 2^168 * 4 * G .quad 0x18f0dbd7add1d518 .quad 0x979f7888cfc11f11 .quad 0x8732e1f07114759b .quad 0x79b5b81a65ca3a01 .quad 0xf5c716bce22e83fe .quad 0xb42beb19e80985c1 .quad 0xec9da63714254aae .quad 0x5972ea051590a613 .quad 0x0fd4ac20dc8f7811 .quad 0x9a9ad294ac4d4fa8 .quad 0xc01b2d64b3360434 .quad 0x4f7e9c95905f3bdb // 2^168 * 5 * G .quad 0x62674bbc5781302e .quad 0xd8520f3989addc0f .quad 0x8c2999ae53fbd9c6 .quad 0x31993ad92e638e4c .quad 0x71c8443d355299fe .quad 0x8bcd3b1cdbebead7 .quad 0x8092499ef1a49466 .quad 0x1942eec4a144adc8 .quad 0x7dac5319ae234992 .quad 0x2c1b3d910cea3e92 .quad 0x553ce494253c1122 .quad 0x2a0a65314ef9ca75 // 2^168 * 6 * G .quad 0x2db7937ff7f927c2 .quad 0xdb741f0617d0a635 .quad 0x5982f3a21155af76 .quad 0x4cf6e218647c2ded .quad 0xcf361acd3c1c793a .quad 0x2f9ebcac5a35bc3b .quad 0x60e860e9a8cda6ab .quad 0x055dc39b6dea1a13 .quad 0xb119227cc28d5bb6 .quad 0x07e24ebc774dffab .quad 0xa83c78cee4a32c89 .quad 0x121a307710aa24b6 // 2^168 * 7 * G .quad 0xe4db5d5e9f034a97 .quad 0xe153fc093034bc2d .quad 0x460546919551d3b1 .quad 0x333fc76c7a40e52d .quad 0xd659713ec77483c9 .quad 0x88bfe077b82b96af .quad 0x289e28231097bcd3 .quad 0x527bb94a6ced3a9b .quad 0x563d992a995b482e .quad 0x3405d07c6e383801 .quad 0x485035de2f64d8e5 .quad 0x6b89069b20a7a9f7 // 2^168 * 8 * G .quad 0x812aa0416270220d .quad 0x995a89faf9245b4e .quad 0xffadc4ce5072ef05 .quad 0x23bc2103aa73eb73 .quad 0x4082fa8cb5c7db77 .quad 0x068686f8c734c155 .quad 0x29e6c8d9f6e7a57e .quad 0x0473d308a7639bcf .quad 0xcaee792603589e05 .quad 0x2b4b421246dcc492 .quad 0x02a1ef74e601a94f .quad 0x102f73bfde04341a // 2^172 * 1 * G .quad 0xb5a2d50c7ec20d3e .quad 0xc64bdd6ea0c97263 .quad 0x56e89052c1ff734d .quad 0x4929c6f72b2ffaba .quad 0x358ecba293a36247 .quad 0xaf8f9862b268fd65 .quad 0x412f7e9968a01c89 .quad 0x5786f312cd754524 .quad 0x337788ffca14032c .quad 0xf3921028447f1ee3 .quad 0x8b14071f231bccad .quad 0x4c817b4bf2344783 // 2^172 * 2 * G .quad 0x0ff853852871b96e .quad 0xe13e9fab60c3f1bb .quad 0xeefd595325344402 .quad 0x0a37c37075b7744b .quad 0x413ba057a40b4484 .quad 0xba4c2e1a4f5f6a43 .quad 0x614ba0a5aee1d61c .quad 0x78a1531a8b05dc53 .quad 0x6cbdf1703ad0562b .quad 0x8ecf4830c92521a3 .quad 0xdaebd303fd8424e7 .quad 0x72ad82a42e5ec56f // 2^172 * 3 * G .quad 0x3f9e8e35bafb65f6 .quad 0x39d69ec8f27293a1 .quad 0x6cb8cd958cf6a3d0 .quad 0x1734778173adae6d .quad 0xc368939167024bc3 .quad 0x8e69d16d49502fda .quad 0xfcf2ec3ce45f4b29 .quad 0x065f669ea3b4cbc4 .quad 0x8a00aec75532db4d .quad 0xb869a4e443e31bb1 .quad 0x4a0f8552d3a7f515 .quad 0x19adeb7c303d7c08 // 2^172 * 4 * G .quad 0xc720cb6153ead9a3 .quad 0x55b2c97f512b636e .quad 0xb1e35b5fd40290b1 .quad 0x2fd9ccf13b530ee2 .quad 0x9d05ba7d43c31794 .quad 0x2470c8ff93322526 .quad 0x8323dec816197438 .quad 0x2852709881569b53 .quad 0x07bd475b47f796b8 .quad 0xd2c7b013542c8f54 .quad 0x2dbd23f43b24f87e .quad 0x6551afd77b0901d6 // 2^172 * 5 * G .quad 0x4546baaf54aac27f .quad 0xf6f66fecb2a45a28 .quad 0x582d1b5b562bcfe8 .quad 0x44b123f3920f785f .quad 0x68a24ce3a1d5c9ac .quad 0xbb77a33d10ff6461 .quad 0x0f86ce4425d3166e .quad 0x56507c0950b9623b .quad 0x1206f0b7d1713e63 .quad 0x353fe3d915bafc74 .quad 0x194ceb970ad9d94d .quad 0x62fadd7cf9d03ad3 // 2^172 * 6 * G .quad 0xc6b5967b5598a074 .quad 0x5efe91ce8e493e25 .quad 0xd4b72c4549280888 .quad 0x20ef1149a26740c2 .quad 0x3cd7bc61e7ce4594 .quad 0xcd6b35a9b7dd267e .quad 0xa080abc84366ef27 .quad 0x6ec7c46f59c79711 .quad 0x2f07ad636f09a8a2 .quad 0x8697e6ce24205e7d .quad 0xc0aefc05ee35a139 .quad 0x15e80958b5f9d897 // 2^172 * 7 * G .quad 0x25a5ef7d0c3e235b .quad 0x6c39c17fbe134ee7 .quad 0xc774e1342dc5c327 .quad 0x021354b892021f39 .quad 0x4dd1ed355bb061c4 .quad 0x42dc0cef941c0700 .quad 0x61305dc1fd86340e .quad 0x56b2cc930e55a443 .quad 0x1df79da6a6bfc5a2 .quad 0x02f3a2749fde4369 .quad 0xb323d9f2cda390a7 .quad 0x7be0847b8774d363 // 2^172 * 8 * G .quad 0x8c99cc5a8b3f55c3 .quad 0x0611d7253fded2a0 .quad 0xed2995ff36b70a36 .quad 0x1f699a54d78a2619 .quad 0x1466f5af5307fa11 .quad 0x817fcc7ded6c0af2 .quad 0x0a6de44ec3a4a3fb .quad 0x74071475bc927d0b .quad 0xe77292f373e7ea8a .quad 0x296537d2cb045a31 .quad 0x1bd0653ed3274fde .quad 0x2f9a2c4476bd2966 // 2^176 * 1 * G .quad 0xeb18b9ab7f5745c6 .quad 0x023a8aee5787c690 .quad 0xb72712da2df7afa9 .quad 0x36597d25ea5c013d .quad 0xa2b4dae0b5511c9a .quad 0x7ac860292bffff06 .quad 0x981f375df5504234 .quad 0x3f6bd725da4ea12d .quad 0x734d8d7b106058ac .quad 0xd940579e6fc6905f .quad 0x6466f8f99202932d .quad 0x7b7ecc19da60d6d0 // 2^176 * 2 * G .quad 0x78c2373c695c690d .quad 0xdd252e660642906e .quad 0x951d44444ae12bd2 .quad 0x4235ad7601743956 .quad 0x6dae4a51a77cfa9b .quad 0x82263654e7a38650 .quad 0x09bbffcd8f2d82db .quad 0x03bedc661bf5caba .quad 0x6258cb0d078975f5 .quad 0x492942549189f298 .quad 0xa0cab423e2e36ee4 .quad 0x0e7ce2b0cdf066a1 // 2^176 * 3 * G .quad 0xc494643ac48c85a3 .quad 0xfd361df43c6139ad .quad 0x09db17dd3ae94d48 .quad 0x666e0a5d8fb4674a .quad 0xfea6fedfd94b70f9 .quad 0xf130c051c1fcba2d .quad 0x4882d47e7f2fab89 .quad 0x615256138aeceeb5 .quad 0x2abbf64e4870cb0d .quad 0xcd65bcf0aa458b6b .quad 0x9abe4eba75e8985d .quad 0x7f0bc810d514dee4 // 2^176 * 4 * G .quad 0xb9006ba426f4136f .quad 0x8d67369e57e03035 .quad 0xcbc8dfd94f463c28 .quad 0x0d1f8dbcf8eedbf5 .quad 0x83ac9dad737213a0 .quad 0x9ff6f8ba2ef72e98 .quad 0x311e2edd43ec6957 .quad 0x1d3a907ddec5ab75 .quad 0xba1693313ed081dc .quad 0x29329fad851b3480 .quad 0x0128013c030321cb .quad 0x00011b44a31bfde3 // 2^176 * 5 * G .quad 0x3fdfa06c3fc66c0c .quad 0x5d40e38e4dd60dd2 .quad 0x7ae38b38268e4d71 .quad 0x3ac48d916e8357e1 .quad 0x16561f696a0aa75c .quad 0xc1bf725c5852bd6a .quad 0x11a8dd7f9a7966ad .quad 0x63d988a2d2851026 .quad 0x00120753afbd232e .quad 0xe92bceb8fdd8f683 .quad 0xf81669b384e72b91 .quad 0x33fad52b2368a066 // 2^176 * 6 * G .quad 0x540649c6c5e41e16 .quad 0x0af86430333f7735 .quad 0xb2acfcd2f305e746 .quad 0x16c0f429a256dca7 .quad 0x8d2cc8d0c422cfe8 .quad 0x072b4f7b05a13acb .quad 0xa3feb6e6ecf6a56f .quad 0x3cc355ccb90a71e2 .quad 0xe9b69443903e9131 .quad 0xb8a494cb7a5637ce .quad 0xc87cd1a4baba9244 .quad 0x631eaf426bae7568 // 2^176 * 7 * G .quad 0xb3e90410da66fe9f .quad 0x85dd4b526c16e5a6 .quad 0xbc3d97611ef9bf83 .quad 0x5599648b1ea919b5 .quad 0x47d975b9a3700de8 .quad 0x7280c5fbe2f80552 .quad 0x53658f2732e45de1 .quad 0x431f2c7f665f80b5 .quad 0xd6026344858f7b19 .quad 0x14ab352fa1ea514a .quad 0x8900441a2090a9d7 .quad 0x7b04715f91253b26 // 2^176 * 8 * G .quad 0x83edbd28acf6ae43 .quad 0x86357c8b7d5c7ab4 .quad 0xc0404769b7eb2c44 .quad 0x59b37bf5c2f6583f .quad 0xb376c280c4e6bac6 .quad 0x970ed3dd6d1d9b0b .quad 0xb09a9558450bf944 .quad 0x48d0acfa57cde223 .quad 0xb60f26e47dabe671 .quad 0xf1d1a197622f3a37 .quad 0x4208ce7ee9960394 .quad 0x16234191336d3bdb // 2^180 * 1 * G .quad 0xf19aeac733a63aef .quad 0x2c7fba5d4442454e .quad 0x5da87aa04795e441 .quad 0x413051e1a4e0b0f5 .quad 0x852dd1fd3d578bbe .quad 0x2b65ce72c3286108 .quad 0x658c07f4eace2273 .quad 0x0933f804ec38ab40 .quad 0xa7ab69798d496476 .quad 0x8121aadefcb5abc8 .quad 0xa5dc12ef7b539472 .quad 0x07fd47065e45351a // 2^180 * 2 * G .quad 0xc8583c3d258d2bcd .quad 0x17029a4daf60b73f .quad 0xfa0fc9d6416a3781 .quad 0x1c1e5fba38b3fb23 .quad 0x304211559ae8e7c3 .quad 0xf281b229944882a5 .quad 0x8a13ac2e378250e4 .quad 0x014afa0954ba48f4 .quad 0xcb3197001bb3666c .quad 0x330060524bffecb9 .quad 0x293711991a88233c .quad 0x291884363d4ed364 // 2^180 * 3 * G .quad 0x033c6805dc4babfa .quad 0x2c15bf5e5596ecc1 .quad 0x1bc70624b59b1d3b .quad 0x3ede9850a19f0ec5 .quad 0xfb9d37c3bc1ab6eb .quad 0x02be14534d57a240 .quad 0xf4d73415f8a5e1f6 .quad 0x5964f4300ccc8188 .quad 0xe44a23152d096800 .quad 0x5c08c55970866996 .quad 0xdf2db60a46affb6e .quad 0x579155c1f856fd89 // 2^180 * 4 * G .quad 0x96324edd12e0c9ef .quad 0x468b878df2420297 .quad 0x199a3776a4f573be .quad 0x1e7fbcf18e91e92a .quad 0xb5f16b630817e7a6 .quad 0x808c69233c351026 .quad 0x324a983b54cef201 .quad 0x53c092084a485345 .quad 0xd2d41481f1cbafbf .quad 0x231d2db6716174e5 .quad 0x0b7d7656e2a55c98 .quad 0x3e955cd82aa495f6 // 2^180 * 5 * G .quad 0xe48f535e3ed15433 .quad 0xd075692a0d7270a3 .quad 0x40fbd21daade6387 .quad 0x14264887cf4495f5 .quad 0xab39f3ef61bb3a3f .quad 0x8eb400652eb9193e .quad 0xb5de6ecc38c11f74 .quad 0x654d7e9626f3c49f .quad 0xe564cfdd5c7d2ceb .quad 0x82eeafded737ccb9 .quad 0x6107db62d1f9b0ab .quad 0x0b6baac3b4358dbb // 2^180 * 6 * G .quad 0x7ae62bcb8622fe98 .quad 0x47762256ceb891af .quad 0x1a5a92bcf2e406b4 .quad 0x7d29401784e41501 .quad 0x204abad63700a93b .quad 0xbe0023d3da779373 .quad 0xd85f0346633ab709 .quad 0x00496dc490820412 .quad 0x1c74b88dc27e6360 .quad 0x074854268d14850c .quad 0xa145fb7b3e0dcb30 .quad 0x10843f1b43803b23 // 2^180 * 7 * G .quad 0xc5f90455376276dd .quad 0xce59158dd7645cd9 .quad 0x92f65d511d366b39 .quad 0x11574b6e526996c4 .quad 0xd56f672de324689b .quad 0xd1da8aedb394a981 .quad 0xdd7b58fe9168cfed .quad 0x7ce246cd4d56c1e8 .quad 0xb8f4308e7f80be53 .quad 0x5f3cb8cb34a9d397 .quad 0x18a961bd33cc2b2c .quad 0x710045fb3a9af671 // 2^180 * 8 * G .quad 0x73f93d36101b95eb .quad 0xfaef33794f6f4486 .quad 0x5651735f8f15e562 .quad 0x7fa3f19058b40da1 .quad 0xa03fc862059d699e .quad 0x2370cfa19a619e69 .quad 0xc4fe3b122f823deb .quad 0x1d1b056fa7f0844e .quad 0x1bc64631e56bf61f .quad 0xd379ab106e5382a3 .quad 0x4d58c57e0540168d .quad 0x566256628442d8e4 // 2^184 * 1 * G .quad 0xb9e499def6267ff6 .quad 0x7772ca7b742c0843 .quad 0x23a0153fe9a4f2b1 .quad 0x2cdfdfecd5d05006 .quad 0xdd499cd61ff38640 .quad 0x29cd9bc3063625a0 .quad 0x51e2d8023dd73dc3 .quad 0x4a25707a203b9231 .quad 0x2ab7668a53f6ed6a .quad 0x304242581dd170a1 .quad 0x4000144c3ae20161 .quad 0x5721896d248e49fc // 2^184 * 2 * G .quad 0x0b6e5517fd181bae .quad 0x9022629f2bb963b4 .quad 0x5509bce932064625 .quad 0x578edd74f63c13da .quad 0x285d5091a1d0da4e .quad 0x4baa6fa7b5fe3e08 .quad 0x63e5177ce19393b3 .quad 0x03c935afc4b030fd .quad 0x997276c6492b0c3d .quad 0x47ccc2c4dfe205fc .quad 0xdcd29b84dd623a3c .quad 0x3ec2ab590288c7a2 // 2^184 * 3 * G .quad 0xa1a0d27be4d87bb9 .quad 0xa98b4deb61391aed .quad 0x99a0ddd073cb9b83 .quad 0x2dd5c25a200fcace .quad 0xa7213a09ae32d1cb .quad 0x0f2b87df40f5c2d5 .quad 0x0baea4c6e81eab29 .quad 0x0e1bf66c6adbac5e .quad 0xe2abd5e9792c887e .quad 0x1a020018cb926d5d .quad 0xbfba69cdbaae5f1e .quad 0x730548b35ae88f5f // 2^184 * 4 * G .quad 0xc43551a3cba8b8ee .quad 0x65a26f1db2115f16 .quad 0x760f4f52ab8c3850 .quad 0x3043443b411db8ca .quad 0x805b094ba1d6e334 .quad 0xbf3ef17709353f19 .quad 0x423f06cb0622702b .quad 0x585a2277d87845dd .quad 0xa18a5f8233d48962 .quad 0x6698c4b5ec78257f .quad 0xa78e6fa5373e41ff .quad 0x7656278950ef981f // 2^184 * 5 * G .quad 0x38c3cf59d51fc8c0 .quad 0x9bedd2fd0506b6f2 .quad 0x26bf109fab570e8f .quad 0x3f4160a8c1b846a6 .quad 0xe17073a3ea86cf9d .quad 0x3a8cfbb707155fdc .quad 0x4853e7fc31838a8e .quad 0x28bbf484b613f616 .quad 0xf2612f5c6f136c7c .quad 0xafead107f6dd11be .quad 0x527e9ad213de6f33 .quad 0x1e79cb358188f75d // 2^184 * 6 * G .quad 0x013436c3eef7e3f1 .quad 0x828b6a7ffe9e10f8 .quad 0x7ff908e5bcf9defc .quad 0x65d7951b3a3b3831 .quad 0x77e953d8f5e08181 .quad 0x84a50c44299dded9 .quad 0xdc6c2d0c864525e5 .quad 0x478ab52d39d1f2f4 .quad 0x66a6a4d39252d159 .quad 0xe5dde1bc871ac807 .quad 0xb82c6b40a6c1c96f .quad 0x16d87a411a212214 // 2^184 * 7 * G .quad 0xb3bd7e5a42066215 .quad 0x879be3cd0c5a24c1 .quad 0x57c05db1d6f994b7 .quad 0x28f87c8165f38ca6 .quad 0xfba4d5e2d54e0583 .quad 0xe21fafd72ebd99fa .quad 0x497ac2736ee9778f .quad 0x1f990b577a5a6dde .quad 0xa3344ead1be8f7d6 .quad 0x7d1e50ebacea798f .quad 0x77c6569e520de052 .quad 0x45882fe1534d6d3e // 2^184 * 8 * G .quad 0x6669345d757983d6 .quad 0x62b6ed1117aa11a6 .quad 0x7ddd1857985e128f .quad 0x688fe5b8f626f6dd .quad 0xd8ac9929943c6fe4 .quad 0xb5f9f161a38392a2 .quad 0x2699db13bec89af3 .quad 0x7dcf843ce405f074 .quad 0x6c90d6484a4732c0 .quad 0xd52143fdca563299 .quad 0xb3be28c3915dc6e1 .quad 0x6739687e7327191b // 2^188 * 1 * G .quad 0x9f65c5ea200814cf .quad 0x840536e169a31740 .quad 0x8b0ed13925c8b4ad .quad 0x0080dbafe936361d .quad 0x8ce5aad0c9cb971f .quad 0x1156aaa99fd54a29 .quad 0x41f7247015af9b78 .quad 0x1fe8cca8420f49aa .quad 0x72a1848f3c0cc82a .quad 0x38c560c2877c9e54 .quad 0x5004e228ce554140 .quad 0x042418a103429d71 // 2^188 * 2 * G .quad 0x899dea51abf3ff5f .quad 0x9b93a8672fc2d8ba .quad 0x2c38cb97be6ebd5c .quad 0x114d578497263b5d .quad 0x58e84c6f20816247 .quad 0x8db2b2b6e36fd793 .quad 0x977182561d484d85 .quad 0x0822024f8632abd7 .quad 0xb301bb7c6b1beca3 .quad 0x55393f6dc6eb1375 .quad 0x910d281097b6e4eb .quad 0x1ad4548d9d479ea3 // 2^188 * 3 * G .quad 0xcd5a7da0389a48fd .quad 0xb38fa4aa9a78371e .quad 0xc6d9761b2cdb8e6c .quad 0x35cf51dbc97e1443 .quad 0xa06fe66d0fe9fed3 .quad 0xa8733a401c587909 .quad 0x30d14d800df98953 .quad 0x41ce5876c7b30258 .quad 0x59ac3bc5d670c022 .quad 0xeae67c109b119406 .quad 0x9798bdf0b3782fda .quad 0x651e3201fd074092 // 2^188 * 4 * G .quad 0xd63d8483ef30c5cf .quad 0x4cd4b4962361cc0c .quad 0xee90e500a48426ac .quad 0x0af51d7d18c14eeb .quad 0xa57ba4a01efcae9e .quad 0x769f4beedc308a94 .quad 0xd1f10eeb3603cb2e .quad 0x4099ce5e7e441278 .quad 0x1ac98e4f8a5121e9 .quad 0x7dae9544dbfa2fe0 .quad 0x8320aa0dd6430df9 .quad 0x667282652c4a2fb5 // 2^188 * 5 * G .quad 0x874621f4d86bc9ab .quad 0xb54c7bbe56fe6fea .quad 0x077a24257fadc22c .quad 0x1ab53be419b90d39 .quad 0xada8b6e02946db23 .quad 0x1c0ce51a7b253ab7 .quad 0x8448c85a66dd485b .quad 0x7f1fc025d0675adf .quad 0xd8ee1b18319ea6aa .quad 0x004d88083a21f0da .quad 0x3bd6aa1d883a4f4b .quad 0x4db9a3a6dfd9fd14 // 2^188 * 6 * G .quad 0x8ce7b23bb99c0755 .quad 0x35c5d6edc4f50f7a .quad 0x7e1e2ed2ed9b50c3 .quad 0x36305f16e8934da1 .quad 0xd95b00bbcbb77c68 .quad 0xddbc846a91f17849 .quad 0x7cf700aebe28d9b3 .quad 0x5ce1285c85d31f3e .quad 0x31b6972d98b0bde8 .quad 0x7d920706aca6de5b .quad 0xe67310f8908a659f .quad 0x50fac2a6efdf0235 // 2^188 * 7 * G .quad 0xf3d3a9f35b880f5a .quad 0xedec050cdb03e7c2 .quad 0xa896981ff9f0b1a2 .quad 0x49a4ae2bac5e34a4 .quad 0x295b1c86f6f449bc .quad 0x51b2e84a1f0ab4dd .quad 0xc001cb30aa8e551d .quad 0x6a28d35944f43662 .quad 0x28bb12ee04a740e0 .quad 0x14313bbd9bce8174 .quad 0x72f5b5e4e8c10c40 .quad 0x7cbfb19936adcd5b // 2^188 * 8 * G .quad 0xa311ddc26b89792d .quad 0x1b30b4c6da512664 .quad 0x0ca77b4ccf150859 .quad 0x1de443df1b009408 .quad 0x8e793a7acc36e6e0 .quad 0xf9fab7a37d586eed .quad 0x3a4f9692bae1f4e4 .quad 0x1c14b03eff5f447e .quad 0x19647bd114a85291 .quad 0x57b76cb21034d3af .quad 0x6329db440f9d6dfa .quad 0x5ef43e586a571493 // 2^192 * 1 * G .quad 0xef782014385675a6 .quad 0xa2649f30aafda9e8 .quad 0x4cd1eb505cdfa8cb .quad 0x46115aba1d4dc0b3 .quad 0xa66dcc9dc80c1ac0 .quad 0x97a05cf41b38a436 .quad 0xa7ebf3be95dbd7c6 .quad 0x7da0b8f68d7e7dab .quad 0xd40f1953c3b5da76 .quad 0x1dac6f7321119e9b .quad 0x03cc6021feb25960 .quad 0x5a5f887e83674b4b // 2^192 * 2 * G .quad 0x8f6301cf70a13d11 .quad 0xcfceb815350dd0c4 .quad 0xf70297d4a4bca47e .quad 0x3669b656e44d1434 .quad 0x9e9628d3a0a643b9 .quad 0xb5c3cb00e6c32064 .quad 0x9b5302897c2dec32 .quad 0x43e37ae2d5d1c70c .quad 0x387e3f06eda6e133 .quad 0x67301d5199a13ac0 .quad 0xbd5ad8f836263811 .quad 0x6a21e6cd4fd5e9be // 2^192 * 3 * G .quad 0xf1c6170a3046e65f .quad 0x58712a2a00d23524 .quad 0x69dbbd3c8c82b755 .quad 0x586bf9f1a195ff57 .quad 0xef4129126699b2e3 .quad 0x71d30847708d1301 .quad 0x325432d01182b0bd .quad 0x45371b07001e8b36 .quad 0xa6db088d5ef8790b .quad 0x5278f0dc610937e5 .quad 0xac0349d261a16eb8 .quad 0x0eafb03790e52179 // 2^192 * 4 * G .quad 0x960555c13748042f .quad 0x219a41e6820baa11 .quad 0x1c81f73873486d0c .quad 0x309acc675a02c661 .quad 0x5140805e0f75ae1d .quad 0xec02fbe32662cc30 .quad 0x2cebdf1eea92396d .quad 0x44ae3344c5435bb3 .quad 0x9cf289b9bba543ee .quad 0xf3760e9d5ac97142 .quad 0x1d82e5c64f9360aa .quad 0x62d5221b7f94678f // 2^192 * 5 * G .quad 0x524c299c18d0936d .quad 0xc86bb56c8a0c1a0c .quad 0xa375052edb4a8631 .quad 0x5c0efde4bc754562 .quad 0x7585d4263af77a3c .quad 0xdfae7b11fee9144d .quad 0xa506708059f7193d .quad 0x14f29a5383922037 .quad 0xdf717edc25b2d7f5 .quad 0x21f970db99b53040 .quad 0xda9234b7c3ed4c62 .quad 0x5e72365c7bee093e // 2^192 * 6 * G .quad 0x575bfc074571217f .quad 0x3779675d0694d95b .quad 0x9a0a37bbf4191e33 .quad 0x77f1104c47b4eabc .quad 0x7d9339062f08b33e .quad 0x5b9659e5df9f32be .quad 0xacff3dad1f9ebdfd .quad 0x70b20555cb7349b7 .quad 0xbe5113c555112c4c .quad 0x6688423a9a881fcd .quad 0x446677855e503b47 .quad 0x0e34398f4a06404a // 2^192 * 7 * G .quad 0xb67d22d93ecebde8 .quad 0x09b3e84127822f07 .quad 0x743fa61fb05b6d8d .quad 0x5e5405368a362372 .quad 0x18930b093e4b1928 .quad 0x7de3e10e73f3f640 .quad 0xf43217da73395d6f .quad 0x6f8aded6ca379c3e .quad 0xe340123dfdb7b29a .quad 0x487b97e1a21ab291 .quad 0xf9967d02fde6949e .quad 0x780de72ec8d3de97 // 2^192 * 8 * G .quad 0x0ae28545089ae7bc .quad 0x388ddecf1c7f4d06 .quad 0x38ac15510a4811b8 .quad 0x0eb28bf671928ce4 .quad 0x671feaf300f42772 .quad 0x8f72eb2a2a8c41aa .quad 0x29a17fd797373292 .quad 0x1defc6ad32b587a6 .quad 0xaf5bbe1aef5195a7 .quad 0x148c1277917b15ed .quad 0x2991f7fb7ae5da2e .quad 0x467d201bf8dd2867 // 2^196 * 1 * G .quad 0x7906ee72f7bd2e6b .quad 0x05d270d6109abf4e .quad 0x8d5cfe45b941a8a4 .quad 0x44c218671c974287 .quad 0x745f9d56296bc318 .quad 0x993580d4d8152e65 .quad 0xb0e5b13f5839e9ce .quad 0x51fc2b28d43921c0 .quad 0x1b8fd11795e2a98c .quad 0x1c4e5ee12b6b6291 .quad 0x5b30e7107424b572 .quad 0x6e6b9de84c4f4ac6 // 2^196 * 2 * G .quad 0xdff25fce4b1de151 .quad 0xd841c0c7e11c4025 .quad 0x2554b3c854749c87 .quad 0x2d292459908e0df9 .quad 0x6b7c5f10f80cb088 .quad 0x736b54dc56e42151 .quad 0xc2b620a5c6ef99c4 .quad 0x5f4c802cc3a06f42 .quad 0x9b65c8f17d0752da .quad 0x881ce338c77ee800 .quad 0xc3b514f05b62f9e3 .quad 0x66ed5dd5bec10d48 // 2^196 * 3 * G .quad 0x7d38a1c20bb2089d .quad 0x808334e196ccd412 .quad 0xc4a70b8c6c97d313 .quad 0x2eacf8bc03007f20 .quad 0xf0adf3c9cbca047d .quad 0x81c3b2cbf4552f6b .quad 0xcfda112d44735f93 .quad 0x1f23a0c77e20048c .quad 0xf235467be5bc1570 .quad 0x03d2d9020dbab38c .quad 0x27529aa2fcf9e09e .quad 0x0840bef29d34bc50 // 2^196 * 4 * G .quad 0x796dfb35dc10b287 .quad 0x27176bcd5c7ff29d .quad 0x7f3d43e8c7b24905 .quad 0x0304f5a191c54276 .quad 0xcd54e06b7f37e4eb .quad 0x8cc15f87f5e96cca .quad 0xb8248bb0d3597dce .quad 0x246affa06074400c .quad 0x37d88e68fbe45321 .quad 0x86097548c0d75032 .quad 0x4e9b13ef894a0d35 .quad 0x25a83cac5753d325 // 2^196 * 5 * G .quad 0x10222f48eed8165e .quad 0x623fc1234b8bcf3a .quad 0x1e145c09c221e8f0 .quad 0x7ccfa59fca782630 .quad 0x9f0f66293952b6e2 .quad 0x33db5e0e0934267b .quad 0xff45252bd609fedc .quad 0x06be10f5c506e0c9 .quad 0x1a9615a9b62a345f .quad 0x22050c564a52fecc .quad 0xa7a2788528bc0dfe .quad 0x5e82770a1a1ee71d // 2^196 * 6 * G .quad 0x35425183ad896a5c .quad 0xe8673afbe78d52f6 .quad 0x2c66f25f92a35f64 .quad 0x09d04f3b3b86b102 .quad 0xe802e80a42339c74 .quad 0x34175166a7fffae5 .quad 0x34865d1f1c408cae .quad 0x2cca982c605bc5ee .quad 0xfd2d5d35197dbe6e .quad 0x207c2eea8be4ffa3 .quad 0x2613d8db325ae918 .quad 0x7a325d1727741d3e // 2^196 * 7 * G .quad 0xd036b9bbd16dfde2 .quad 0xa2055757c497a829 .quad 0x8e6cc966a7f12667 .quad 0x4d3b1a791239c180 .quad 0xecd27d017e2a076a .quad 0xd788689f1636495e .quad 0x52a61af0919233e5 .quad 0x2a479df17bb1ae64 .quad 0x9e5eee8e33db2710 .quad 0x189854ded6c43ca5 .quad 0xa41c22c592718138 .quad 0x27ad5538a43a5e9b // 2^196 * 8 * G .quad 0x2746dd4b15350d61 .quad 0xd03fcbc8ee9521b7 .quad 0xe86e365a138672ca .quad 0x510e987f7e7d89e2 .quad 0xcb5a7d638e47077c .quad 0x8db7536120a1c059 .quad 0x549e1e4d8bedfdcc .quad 0x080153b7503b179d .quad 0xdda69d930a3ed3e3 .quad 0x3d386ef1cd60a722 .quad 0xc817ad58bdaa4ee6 .quad 0x23be8d554fe7372a // 2^200 * 1 * G .quad 0x95fe919a74ef4fad .quad 0x3a827becf6a308a2 .quad 0x964e01d309a47b01 .quad 0x71c43c4f5ba3c797 .quad 0xbc1ef4bd567ae7a9 .quad 0x3f624cb2d64498bd .quad 0xe41064d22c1f4ec8 .quad 0x2ef9c5a5ba384001 .quad 0xb6fd6df6fa9e74cd .quad 0xf18278bce4af267a .quad 0x8255b3d0f1ef990e .quad 0x5a758ca390c5f293 // 2^200 * 2 * G .quad 0xa2b72710d9462495 .quad 0x3aa8c6d2d57d5003 .quad 0xe3d400bfa0b487ca .quad 0x2dbae244b3eb72ec .quad 0x8ce0918b1d61dc94 .quad 0x8ded36469a813066 .quad 0xd4e6a829afe8aad3 .quad 0x0a738027f639d43f .quad 0x980f4a2f57ffe1cc .quad 0x00670d0de1839843 .quad 0x105c3f4a49fb15fd .quad 0x2698ca635126a69c // 2^200 * 3 * G .quad 0xe765318832b0ba78 .quad 0x381831f7925cff8b .quad 0x08a81b91a0291fcc .quad 0x1fb43dcc49caeb07 .quad 0x2e3d702f5e3dd90e .quad 0x9e3f0918e4d25386 .quad 0x5e773ef6024da96a .quad 0x3c004b0c4afa3332 .quad 0x9aa946ac06f4b82b .quad 0x1ca284a5a806c4f3 .quad 0x3ed3265fc6cd4787 .quad 0x6b43fd01cd1fd217 // 2^200 * 4 * G .quad 0xc7a75d4b4697c544 .quad 0x15fdf848df0fffbf .quad 0x2868b9ebaa46785a .quad 0x5a68d7105b52f714 .quad 0xb5c742583e760ef3 .quad 0x75dc52b9ee0ab990 .quad 0xbf1427c2072b923f .quad 0x73420b2d6ff0d9f0 .quad 0xaf2cf6cb9e851e06 .quad 0x8f593913c62238c4 .quad 0xda8ab89699fbf373 .quad 0x3db5632fea34bc9e // 2^200 * 5 * G .quad 0xf46eee2bf75dd9d8 .quad 0x0d17b1f6396759a5 .quad 0x1bf2d131499e7273 .quad 0x04321adf49d75f13 .quad 0x2e4990b1829825d5 .quad 0xedeaeb873e9a8991 .quad 0xeef03d394c704af8 .quad 0x59197ea495df2b0e .quad 0x04e16019e4e55aae .quad 0xe77b437a7e2f92e9 .quad 0xc7ce2dc16f159aa4 .quad 0x45eafdc1f4d70cc0 // 2^200 * 6 * G .quad 0x698401858045d72b .quad 0x4c22faa2cf2f0651 .quad 0x941a36656b222dc6 .quad 0x5a5eebc80362dade .quad 0xb60e4624cfccb1ed .quad 0x59dbc292bd5c0395 .quad 0x31a09d1ddc0481c9 .quad 0x3f73ceea5d56d940 .quad 0xb7a7bfd10a4e8dc6 .quad 0xbe57007e44c9b339 .quad 0x60c1207f1557aefa .quad 0x26058891266218db // 2^200 * 7 * G .quad 0x59f704a68360ff04 .quad 0xc3d93fde7661e6f4 .quad 0x831b2a7312873551 .quad 0x54ad0c2e4e615d57 .quad 0x4c818e3cc676e542 .quad 0x5e422c9303ceccad .quad 0xec07cccab4129f08 .quad 0x0dedfa10b24443b8 .quad 0xee3b67d5b82b522a .quad 0x36f163469fa5c1eb .quad 0xa5b4d2f26ec19fd3 .quad 0x62ecb2baa77a9408 // 2^200 * 8 * G .quad 0xe5ed795261152b3d .quad 0x4962357d0eddd7d1 .quad 0x7482c8d0b96b4c71 .quad 0x2e59f919a966d8be .quad 0x92072836afb62874 .quad 0x5fcd5e8579e104a5 .quad 0x5aad01adc630a14a .quad 0x61913d5075663f98 .quad 0x0dc62d361a3231da .quad 0xfa47583294200270 .quad 0x02d801513f9594ce .quad 0x3ddbc2a131c05d5c // 2^204 * 1 * G .quad 0x3f50a50a4ffb81ef .quad 0xb1e035093bf420bf .quad 0x9baa8e1cc6aa2cd0 .quad 0x32239861fa237a40 .quad 0xfb735ac2004a35d1 .quad 0x31de0f433a6607c3 .quad 0x7b8591bfc528d599 .quad 0x55be9a25f5bb050c .quad 0x0d005acd33db3dbf .quad 0x0111b37c80ac35e2 .quad 0x4892d66c6f88ebeb .quad 0x770eadb16508fbcd // 2^204 * 2 * G .quad 0x8451f9e05e4e89dd .quad 0xc06302ffbc793937 .quad 0x5d22749556a6495c .quad 0x09a6755ca05603fb .quad 0xf1d3b681a05071b9 .quad 0x2207659a3592ff3a .quad 0x5f0169297881e40e .quad 0x16bedd0e86ba374e .quad 0x5ecccc4f2c2737b5 .quad 0x43b79e0c2dccb703 .quad 0x33e008bc4ec43df3 .quad 0x06c1b840f07566c0 // 2^204 * 3 * G .quad 0x7688a5c6a388f877 .quad 0x02a96c14deb2b6ac .quad 0x64c9f3431b8c2af8 .quad 0x3628435554a1eed6 .quad 0x69ee9e7f9b02805c .quad 0xcbff828a547d1640 .quad 0x3d93a869b2430968 .quad 0x46b7b8cd3fe26972 .quad 0xe9812086fe7eebe0 .quad 0x4cba6be72f515437 .quad 0x1d04168b516efae9 .quad 0x5ea1391043982cb9 // 2^204 * 4 * G .quad 0x49125c9cf4702ee1 .quad 0x4520b71f8b25b32d .quad 0x33193026501fef7e .quad 0x656d8997c8d2eb2b .quad 0x6f2b3be4d5d3b002 .quad 0xafec33d96a09c880 .quad 0x035f73a4a8bcc4cc .quad 0x22c5b9284662198b .quad 0xcb58c8fe433d8939 .quad 0x89a0cb2e6a8d7e50 .quad 0x79ca955309fbbe5a .quad 0x0c626616cd7fc106 // 2^204 * 5 * G .quad 0x1ffeb80a4879b61f .quad 0x6396726e4ada21ed .quad 0x33c7b093368025ba .quad 0x471aa0c6f3c31788 .quad 0x8fdfc379fbf454b1 .quad 0x45a5a970f1a4b771 .quad 0xac921ef7bad35915 .quad 0x42d088dca81c2192 .quad 0x8fda0f37a0165199 .quad 0x0adadb77c8a0e343 .quad 0x20fbfdfcc875e820 .quad 0x1cf2bea80c2206e7 // 2^204 * 6 * G .quad 0xc2ddf1deb36202ac .quad 0x92a5fe09d2e27aa5 .quad 0x7d1648f6fc09f1d3 .quad 0x74c2cc0513bc4959 .quad 0x982d6e1a02c0412f .quad 0x90fa4c83db58e8fe .quad 0x01c2f5bcdcb18bc0 .quad 0x686e0c90216abc66 .quad 0x1fadbadba54395a7 .quad 0xb41a02a0ae0da66a .quad 0xbf19f598bba37c07 .quad 0x6a12b8acde48430d // 2^204 * 7 * G .quad 0xf8daea1f39d495d9 .quad 0x592c190e525f1dfc .quad 0xdb8cbd04c9991d1b .quad 0x11f7fda3d88f0cb7 .quad 0x793bdd801aaeeb5f .quad 0x00a2a0aac1518871 .quad 0xe8a373a31f2136b4 .quad 0x48aab888fc91ef19 .quad 0x041f7e925830f40e .quad 0x002d6ca979661c06 .quad 0x86dc9ff92b046a2e .quad 0x760360928b0493d1 // 2^204 * 8 * G .quad 0x21bb41c6120cf9c6 .quad 0xeab2aa12decda59b .quad 0xc1a72d020aa48b34 .quad 0x215d4d27e87d3b68 .quad 0xb43108e5695a0b05 .quad 0x6cb00ee8ad37a38b .quad 0x5edad6eea3537381 .quad 0x3f2602d4b6dc3224 .quad 0xc8b247b65bcaf19c .quad 0x49779dc3b1b2c652 .quad 0x89a180bbd5ece2e2 .quad 0x13f098a3cec8e039 // 2^208 * 1 * G .quad 0x9adc0ff9ce5ec54b .quad 0x039c2a6b8c2f130d .quad 0x028007c7f0f89515 .quad 0x78968314ac04b36b .quad 0xf3aa57a22796bb14 .quad 0x883abab79b07da21 .quad 0xe54be21831a0391c .quad 0x5ee7fb38d83205f9 .quad 0x538dfdcb41446a8e .quad 0xa5acfda9434937f9 .quad 0x46af908d263c8c78 .quad 0x61d0633c9bca0d09 // 2^208 * 2 * G .quad 0x63744935ffdb2566 .quad 0xc5bd6b89780b68bb .quad 0x6f1b3280553eec03 .quad 0x6e965fd847aed7f5 .quad 0xada328bcf8fc73df .quad 0xee84695da6f037fc .quad 0x637fb4db38c2a909 .quad 0x5b23ac2df8067bdc .quad 0x9ad2b953ee80527b .quad 0xe88f19aafade6d8d .quad 0x0e711704150e82cf .quad 0x79b9bbb9dd95dedc // 2^208 * 3 * G .quad 0xebb355406a3126c2 .quad 0xd26383a868c8c393 .quad 0x6c0c6429e5b97a82 .quad 0x5065f158c9fd2147 .quad 0xd1997dae8e9f7374 .quad 0xa032a2f8cfbb0816 .quad 0xcd6cba126d445f0a .quad 0x1ba811460accb834 .quad 0x708169fb0c429954 .quad 0xe14600acd76ecf67 .quad 0x2eaab98a70e645ba .quad 0x3981f39e58a4faf2 // 2^208 * 4 * G .quad 0x18fb8a7559230a93 .quad 0x1d168f6960e6f45d .quad 0x3a85a94514a93cb5 .quad 0x38dc083705acd0fd .quad 0xc845dfa56de66fde .quad 0xe152a5002c40483a .quad 0xe9d2e163c7b4f632 .quad 0x30f4452edcbc1b65 .quad 0x856d2782c5759740 .quad 0xfa134569f99cbecc .quad 0x8844fc73c0ea4e71 .quad 0x632d9a1a593f2469 // 2^208 * 5 * G .quad 0xf6bb6b15b807cba6 .quad 0x1823c7dfbc54f0d7 .quad 0xbb1d97036e29670b .quad 0x0b24f48847ed4a57 .quad 0xbf09fd11ed0c84a7 .quad 0x63f071810d9f693a .quad 0x21908c2d57cf8779 .quad 0x3a5a7df28af64ba2 .quad 0xdcdad4be511beac7 .quad 0xa4538075ed26ccf2 .quad 0xe19cff9f005f9a65 .quad 0x34fcf74475481f63 // 2^208 * 6 * G .quad 0xc197e04c789767ca .quad 0xb8714dcb38d9467d .quad 0x55de888283f95fa8 .quad 0x3d3bdc164dfa63f7 .quad 0xa5bb1dab78cfaa98 .quad 0x5ceda267190b72f2 .quad 0x9309c9110a92608e .quad 0x0119a3042fb374b0 .quad 0x67a2d89ce8c2177d .quad 0x669da5f66895d0c1 .quad 0xf56598e5b282a2b0 .quad 0x56c088f1ede20a73 // 2^208 * 7 * G .quad 0x336d3d1110a86e17 .quad 0xd7f388320b75b2fa .quad 0xf915337625072988 .quad 0x09674c6b99108b87 .quad 0x581b5fac24f38f02 .quad 0xa90be9febae30cbd .quad 0x9a2169028acf92f0 .quad 0x038b7ea48359038f .quad 0x9f4ef82199316ff8 .quad 0x2f49d282eaa78d4f .quad 0x0971a5ab5aef3174 .quad 0x6e5e31025969eb65 // 2^208 * 8 * G .quad 0xb16c62f587e593fb .quad 0x4999eddeca5d3e71 .quad 0xb491c1e014cc3e6d .quad 0x08f5114789a8dba8 .quad 0x3304fb0e63066222 .quad 0xfb35068987acba3f .quad 0xbd1924778c1061a3 .quad 0x3058ad43d1838620 .quad 0x323c0ffde57663d0 .quad 0x05c3df38a22ea610 .quad 0xbdc78abdac994f9a .quad 0x26549fa4efe3dc99 // 2^212 * 1 * G .quad 0x738b38d787ce8f89 .quad 0xb62658e24179a88d .quad 0x30738c9cf151316d .quad 0x49128c7f727275c9 .quad 0x04dbbc17f75396b9 .quad 0x69e6a2d7d2f86746 .quad 0xc6409d99f53eabc6 .quad 0x606175f6332e25d2 .quad 0x4021370ef540e7dd .quad 0x0910d6f5a1f1d0a5 .quad 0x4634aacd5b06b807 .quad 0x6a39e6356944f235 // 2^212 * 2 * G .quad 0x96cd5640df90f3e7 .quad 0x6c3a760edbfa25ea .quad 0x24f3ef0959e33cc4 .quad 0x42889e7e530d2e58 .quad 0x1da1965774049e9d .quad 0xfbcd6ea198fe352b .quad 0xb1cbcd50cc5236a6 .quad 0x1f5ec83d3f9846e2 .quad 0x8efb23c3328ccb75 .quad 0xaf42a207dd876ee9 .quad 0x20fbdadc5dfae796 .quad 0x241e246b06bf9f51 // 2^212 * 3 * G .quad 0x29e68e57ad6e98f6 .quad 0x4c9260c80b462065 .quad 0x3f00862ea51ebb4b .quad 0x5bc2c77fb38d9097 .quad 0x7eaafc9a6280bbb8 .quad 0x22a70f12f403d809 .quad 0x31ce40bb1bfc8d20 .quad 0x2bc65635e8bd53ee .quad 0xe8d5dc9fa96bad93 .quad 0xe58fb17dde1947dc .quad 0x681532ea65185fa3 .quad 0x1fdd6c3b034a7830 // 2^212 * 4 * G .quad 0x0a64e28c55dc18fe .quad 0xe3df9e993399ebdd .quad 0x79ac432370e2e652 .quad 0x35ff7fc33ae4cc0e .quad 0x9c13a6a52dd8f7a9 .quad 0x2dbb1f8c3efdcabf .quad 0x961e32405e08f7b5 .quad 0x48c8a121bbe6c9e5 .quad 0xfc415a7c59646445 .quad 0xd224b2d7c128b615 .quad 0x6035c9c905fbb912 .quad 0x42d7a91274429fab // 2^212 * 5 * G .quad 0x4e6213e3eaf72ed3 .quad 0x6794981a43acd4e7 .quad 0xff547cde6eb508cb .quad 0x6fed19dd10fcb532 .quad 0xa9a48947933da5bc .quad 0x4a58920ec2e979ec .quad 0x96d8800013e5ac4c .quad 0x453692d74b48b147 .quad 0xdd775d99a8559c6f .quad 0xf42a2140df003e24 .quad 0x5223e229da928a66 .quad 0x063f46ba6d38f22c // 2^212 * 6 * G .quad 0xd2d242895f536694 .quad 0xca33a2c542939b2c .quad 0x986fada6c7ddb95c .quad 0x5a152c042f712d5d .quad 0x39843cb737346921 .quad 0xa747fb0738c89447 .quad 0xcb8d8031a245307e .quad 0x67810f8e6d82f068 .quad 0x3eeb8fbcd2287db4 .quad 0x72c7d3a301a03e93 .quad 0x5473e88cbd98265a .quad 0x7324aa515921b403 // 2^212 * 7 * G .quad 0x857942f46c3cbe8e .quad 0xa1d364b14730c046 .quad 0x1c8ed914d23c41bf .quad 0x0838e161eef6d5d2 .quad 0xad23f6dae82354cb .quad 0x6962502ab6571a6d .quad 0x9b651636e38e37d1 .quad 0x5cac5005d1a3312f .quad 0x8cc154cce9e39904 .quad 0x5b3a040b84de6846 .quad 0xc4d8a61cb1be5d6e .quad 0x40fb897bd8861f02 // 2^212 * 8 * G .quad 0x84c5aa9062de37a1 .quad 0x421da5000d1d96e1 .quad 0x788286306a9242d9 .quad 0x3c5e464a690d10da .quad 0xe57ed8475ab10761 .quad 0x71435e206fd13746 .quad 0x342f824ecd025632 .quad 0x4b16281ea8791e7b .quad 0xd1c101d50b813381 .quad 0xdee60f1176ee6828 .quad 0x0cb68893383f6409 .quad 0x6183c565f6ff484a // 2^216 * 1 * G .quad 0x741d5a461e6bf9d6 .quad 0x2305b3fc7777a581 .quad 0xd45574a26474d3d9 .quad 0x1926e1dc6401e0ff .quad 0xdb468549af3f666e .quad 0xd77fcf04f14a0ea5 .quad 0x3df23ff7a4ba0c47 .quad 0x3a10dfe132ce3c85 .quad 0xe07f4e8aea17cea0 .quad 0x2fd515463a1fc1fd .quad 0x175322fd31f2c0f1 .quad 0x1fa1d01d861e5d15 // 2^216 * 2 * G .quad 0xcc8055947d599832 .quad 0x1e4656da37f15520 .quad 0x99f6f7744e059320 .quad 0x773563bc6a75cf33 .quad 0x38dcac00d1df94ab .quad 0x2e712bddd1080de9 .quad 0x7f13e93efdd5e262 .quad 0x73fced18ee9a01e5 .quad 0x06b1e90863139cb3 .quad 0xa493da67c5a03ecd .quad 0x8d77cec8ad638932 .quad 0x1f426b701b864f44 // 2^216 * 3 * G .quad 0xefc9264c41911c01 .quad 0xf1a3b7b817a22c25 .quad 0x5875da6bf30f1447 .quad 0x4e1af5271d31b090 .quad 0xf17e35c891a12552 .quad 0xb76b8153575e9c76 .quad 0xfa83406f0d9b723e .quad 0x0b76bb1b3fa7e438 .quad 0x08b8c1f97f92939b .quad 0xbe6771cbd444ab6e .quad 0x22e5646399bb8017 .quad 0x7b6dd61eb772a955 // 2^216 * 4 * G .quad 0xb7adc1e850f33d92 .quad 0x7998fa4f608cd5cf .quad 0xad962dbd8dfc5bdb .quad 0x703e9bceaf1d2f4f .quad 0x5730abf9ab01d2c7 .quad 0x16fb76dc40143b18 .quad 0x866cbe65a0cbb281 .quad 0x53fa9b659bff6afe .quad 0x6c14c8e994885455 .quad 0x843a5d6665aed4e5 .quad 0x181bb73ebcd65af1 .quad 0x398d93e5c4c61f50 // 2^216 * 5 * G .quad 0x1c4bd16733e248f3 .quad 0xbd9e128715bf0a5f .quad 0xd43f8cf0a10b0376 .quad 0x53b09b5ddf191b13 .quad 0xc3877c60d2e7e3f2 .quad 0x3b34aaa030828bb1 .quad 0x283e26e7739ef138 .quad 0x699c9c9002c30577 .quad 0xf306a7235946f1cc .quad 0x921718b5cce5d97d .quad 0x28cdd24781b4e975 .quad 0x51caf30c6fcdd907 // 2^216 * 6 * G .quad 0xa60ba7427674e00a .quad 0x630e8570a17a7bf3 .quad 0x3758563dcf3324cc .quad 0x5504aa292383fdaa .quad 0x737af99a18ac54c7 .quad 0x903378dcc51cb30f .quad 0x2b89bc334ce10cc7 .quad 0x12ae29c189f8e99a .quad 0xa99ec0cb1f0d01cf .quad 0x0dd1efcc3a34f7ae .quad 0x55ca7521d09c4e22 .quad 0x5fd14fe958eba5ea // 2^216 * 7 * G .quad 0xb5dc2ddf2845ab2c .quad 0x069491b10a7fe993 .quad 0x4daaf3d64002e346 .quad 0x093ff26e586474d1 .quad 0x3c42fe5ebf93cb8e .quad 0xbedfa85136d4565f .quad 0xe0f0859e884220e8 .quad 0x7dd73f960725d128 .quad 0xb10d24fe68059829 .quad 0x75730672dbaf23e5 .quad 0x1367253ab457ac29 .quad 0x2f59bcbc86b470a4 // 2^216 * 8 * G .quad 0x83847d429917135f .quad 0xad1b911f567d03d7 .quad 0x7e7748d9be77aad1 .quad 0x5458b42e2e51af4a .quad 0x7041d560b691c301 .quad 0x85201b3fadd7e71e .quad 0x16c2e16311335585 .quad 0x2aa55e3d010828b1 .quad 0xed5192e60c07444f .quad 0x42c54e2d74421d10 .quad 0x352b4c82fdb5c864 .quad 0x13e9004a8a768664 // 2^220 * 1 * G .quad 0xcbb5b5556c032bff .quad 0xdf7191b729297a3a .quad 0xc1ff7326aded81bb .quad 0x71ade8bb68be03f5 .quad 0x1e6284c5806b467c .quad 0xc5f6997be75d607b .quad 0x8b67d958b378d262 .quad 0x3d88d66a81cd8b70 .quad 0x8b767a93204ed789 .quad 0x762fcacb9fa0ae2a .quad 0x771febcc6dce4887 .quad 0x343062158ff05fb3 // 2^220 * 2 * G .quad 0xe05da1a7e1f5bf49 .quad 0x26457d6dd4736092 .quad 0x77dcb07773cc32f6 .quad 0x0a5d94969cdd5fcd .quad 0xfce219072a7b31b4 .quad 0x4d7adc75aa578016 .quad 0x0ec276a687479324 .quad 0x6d6d9d5d1fda4beb .quad 0x22b1a58ae9b08183 .quad 0xfd95d071c15c388b .quad 0xa9812376850a0517 .quad 0x33384cbabb7f335e // 2^220 * 3 * G .quad 0x3c6fa2680ca2c7b5 .quad 0x1b5082046fb64fda .quad 0xeb53349c5431d6de .quad 0x5278b38f6b879c89 .quad 0x33bc627a26218b8d .quad 0xea80b21fc7a80c61 .quad 0x9458b12b173e9ee6 .quad 0x076247be0e2f3059 .quad 0x52e105f61416375a .quad 0xec97af3685abeba4 .quad 0x26e6b50623a67c36 .quad 0x5cf0e856f3d4fb01 // 2^220 * 4 * G .quad 0xf6c968731ae8cab4 .quad 0x5e20741ecb4f92c5 .quad 0x2da53be58ccdbc3e .quad 0x2dddfea269970df7 .quad 0xbeaece313db342a8 .quad 0xcba3635b842db7ee .quad 0xe88c6620817f13ef .quad 0x1b9438aa4e76d5c6 .quad 0x8a50777e166f031a .quad 0x067b39f10fb7a328 .quad 0x1925c9a6010fbd76 .quad 0x6df9b575cc740905 // 2^220 * 5 * G .quad 0x42c1192927f6bdcf .quad 0x8f91917a403d61ca .quad 0xdc1c5a668b9e1f61 .quad 0x1596047804ec0f8d .quad 0xecdfc35b48cade41 .quad 0x6a88471fb2328270 .quad 0x740a4a2440a01b6a .quad 0x471e5796003b5f29 .quad 0xda96bbb3aced37ac .quad 0x7a2423b5e9208cea .quad 0x24cc5c3038aebae2 .quad 0x50c356afdc5dae2f // 2^220 * 6 * G .quad 0x09dcbf4341c30318 .quad 0xeeba061183181dce .quad 0xc179c0cedc1e29a1 .quad 0x1dbf7b89073f35b0 .quad 0xcfed9cdf1b31b964 .quad 0xf486a9858ca51af3 .quad 0x14897265ea8c1f84 .quad 0x784a53dd932acc00 .quad 0x2d99f9df14fc4920 .quad 0x76ccb60cc4499fe5 .quad 0xa4132cbbe5cf0003 .quad 0x3f93d82354f000ea // 2^220 * 7 * G .quad 0x8183e7689e04ce85 .quad 0x678fb71e04465341 .quad 0xad92058f6688edac .quad 0x5da350d3532b099a .quad 0xeaac12d179e14978 .quad 0xff923ff3bbebff5e .quad 0x4af663e40663ce27 .quad 0x0fd381a811a5f5ff .quad 0xf256aceca436df54 .quad 0x108b6168ae69d6e8 .quad 0x20d986cb6b5d036c .quad 0x655957b9fee2af50 // 2^220 * 8 * G .quad 0xaea8b07fa902030f .quad 0xf88c766af463d143 .quad 0x15b083663c787a60 .quad 0x08eab1148267a4a8 .quad 0xbdc1409bd002d0ac .quad 0x66660245b5ccd9a6 .quad 0x82317dc4fade85ec .quad 0x02fe934b6ad7df0d .quad 0xef5cf100cfb7ea74 .quad 0x22897633a1cb42ac .quad 0xd4ce0c54cef285e2 .quad 0x30408c048a146a55 // 2^224 * 1 * G .quad 0x739d8845832fcedb .quad 0xfa38d6c9ae6bf863 .quad 0x32bc0dcab74ffef7 .quad 0x73937e8814bce45e .quad 0xbb2e00c9193b877f .quad 0xece3a890e0dc506b .quad 0xecf3b7c036de649f .quad 0x5f46040898de9e1a .quad 0xb9037116297bf48d .quad 0xa9d13b22d4f06834 .quad 0xe19715574696bdc6 .quad 0x2cf8a4e891d5e835 // 2^224 * 2 * G .quad 0x6d93fd8707110f67 .quad 0xdd4c09d37c38b549 .quad 0x7cb16a4cc2736a86 .quad 0x2049bd6e58252a09 .quad 0x2cb5487e17d06ba2 .quad 0x24d2381c3950196b .quad 0xd7659c8185978a30 .quad 0x7a6f7f2891d6a4f6 .quad 0x7d09fd8d6a9aef49 .quad 0xf0ee60be5b3db90b .quad 0x4c21b52c519ebfd4 .quad 0x6011aadfc545941d // 2^224 * 3 * G .quad 0x5f67926dcf95f83c .quad 0x7c7e856171289071 .quad 0xd6a1e7f3998f7a5b .quad 0x6fc5cc1b0b62f9e0 .quad 0x63ded0c802cbf890 .quad 0xfbd098ca0dff6aaa .quad 0x624d0afdb9b6ed99 .quad 0x69ce18b779340b1e .quad 0xd1ef5528b29879cb .quad 0xdd1aae3cd47e9092 .quad 0x127e0442189f2352 .quad 0x15596b3ae57101f1 // 2^224 * 4 * G .quad 0x462739d23f9179a2 .quad 0xff83123197d6ddcf .quad 0x1307deb553f2148a .quad 0x0d2237687b5f4dda .quad 0x09ff31167e5124ca .quad 0x0be4158bd9c745df .quad 0x292b7d227ef556e5 .quad 0x3aa4e241afb6d138 .quad 0x2cc138bf2a3305f5 .quad 0x48583f8fa2e926c3 .quad 0x083ab1a25549d2eb .quad 0x32fcaa6e4687a36c // 2^224 * 5 * G .quad 0x7bc56e8dc57d9af5 .quad 0x3e0bd2ed9df0bdf2 .quad 0xaac014de22efe4a3 .quad 0x4627e9cefebd6a5c .quad 0x3207a4732787ccdf .quad 0x17e31908f213e3f8 .quad 0xd5b2ecd7f60d964e .quad 0x746f6336c2600be9 .quad 0x3f4af345ab6c971c .quad 0xe288eb729943731f .quad 0x33596a8a0344186d .quad 0x7b4917007ed66293 // 2^224 * 6 * G .quad 0x2d85fb5cab84b064 .quad 0x497810d289f3bc14 .quad 0x476adc447b15ce0c .quad 0x122ba376f844fd7b .quad 0x54341b28dd53a2dd .quad 0xaa17905bdf42fc3f .quad 0x0ff592d94dd2f8f4 .quad 0x1d03620fe08cd37d .quad 0xc20232cda2b4e554 .quad 0x9ed0fd42115d187f .quad 0x2eabb4be7dd479d9 .quad 0x02c70bf52b68ec4c // 2^224 * 7 * G .quad 0xa287ec4b5d0b2fbb .quad 0x415c5790074882ca .quad 0xe044a61ec1d0815c .quad 0x26334f0a409ef5e0 .quad 0xace532bf458d72e1 .quad 0x5be768e07cb73cb5 .quad 0x56cf7d94ee8bbde7 .quad 0x6b0697e3feb43a03 .quad 0xb6c8f04adf62a3c0 .quad 0x3ef000ef076da45d .quad 0x9c9cb95849f0d2a9 .quad 0x1cc37f43441b2fae // 2^224 * 8 * G .quad 0x508f565a5cc7324f .quad 0xd061c4c0e506a922 .quad 0xfb18abdb5c45ac19 .quad 0x6c6809c10380314a .quad 0xd76656f1c9ceaeb9 .quad 0x1c5b15f818e5656a .quad 0x26e72832844c2334 .quad 0x3a346f772f196838 .quad 0xd2d55112e2da6ac8 .quad 0xe9bd0331b1e851ed .quad 0x960746dd8ec67262 .quad 0x05911b9f6ef7c5d0 // 2^228 * 1 * G .quad 0xe9dcd756b637ff2d .quad 0xec4c348fc987f0c4 .quad 0xced59285f3fbc7b7 .quad 0x3305354793e1ea87 .quad 0x01c18980c5fe9f94 .quad 0xcd656769716fd5c8 .quad 0x816045c3d195a086 .quad 0x6e2b7f3266cc7982 .quad 0xcc802468f7c3568f .quad 0x9de9ba8219974cb3 .quad 0xabb7229cb5b81360 .quad 0x44e2017a6fbeba62 // 2^228 * 2 * G .quad 0xc4c2a74354dab774 .quad 0x8e5d4c3c4eaf031a .quad 0xb76c23d242838f17 .quad 0x749a098f68dce4ea .quad 0x87f82cf3b6ca6ecd .quad 0x580f893e18f4a0c2 .quad 0x058930072604e557 .quad 0x6cab6ac256d19c1d .quad 0xdcdfe0a02cc1de60 .quad 0x032665ff51c5575b .quad 0x2c0c32f1073abeeb .quad 0x6a882014cd7b8606 // 2^228 * 3 * G .quad 0xa52a92fea4747fb5 .quad 0xdc12a4491fa5ab89 .quad 0xd82da94bb847a4ce .quad 0x4d77edce9512cc4e .quad 0xd111d17caf4feb6e .quad 0x050bba42b33aa4a3 .quad 0x17514c3ceeb46c30 .quad 0x54bedb8b1bc27d75 .quad 0x77c8e14577e2189c .quad 0xa3e46f6aff99c445 .quad 0x3144dfc86d335343 .quad 0x3a96559e7c4216a9 // 2^228 * 4 * G .quad 0x12550d37f42ad2ee .quad 0x8b78e00498a1fbf5 .quad 0x5d53078233894cb2 .quad 0x02c84e4e3e498d0c .quad 0x4493896880baaa52 .quad 0x4c98afc4f285940e .quad 0xef4aa79ba45448b6 .quad 0x5278c510a57aae7f .quad 0xa54dd074294c0b94 .quad 0xf55d46b8df18ffb6 .quad 0xf06fecc58dae8366 .quad 0x588657668190d165 // 2^228 * 5 * G .quad 0xd47712311aef7117 .quad 0x50343101229e92c7 .quad 0x7a95e1849d159b97 .quad 0x2449959b8b5d29c9 .quad 0xbf5834f03de25cc3 .quad 0xb887c8aed6815496 .quad 0x5105221a9481e892 .quad 0x6760ed19f7723f93 .quad 0x669ba3b7ac35e160 .quad 0x2eccf73fba842056 .quad 0x1aec1f17c0804f07 .quad 0x0d96bc031856f4e7 // 2^228 * 6 * G .quad 0x3318be7775c52d82 .quad 0x4cb764b554d0aab9 .quad 0xabcf3d27cc773d91 .quad 0x3bf4d1848123288a .quad 0xb1d534b0cc7505e1 .quad 0x32cd003416c35288 .quad 0xcb36a5800762c29d .quad 0x5bfe69b9237a0bf8 .quad 0x183eab7e78a151ab .quad 0xbbe990c999093763 .quad 0xff717d6e4ac7e335 .quad 0x4c5cddb325f39f88 // 2^228 * 7 * G .quad 0xc0f6b74d6190a6eb .quad 0x20ea81a42db8f4e4 .quad 0xa8bd6f7d97315760 .quad 0x33b1d60262ac7c21 .quad 0x57750967e7a9f902 .quad 0x2c37fdfc4f5b467e .quad 0xb261663a3177ba46 .quad 0x3a375e78dc2d532b .quad 0x8141e72f2d4dddea .quad 0xe6eafe9862c607c8 .quad 0x23c28458573cafd0 .quad 0x46b9476f4ff97346 // 2^228 * 8 * G .quad 0x0c1ffea44f901e5c .quad 0x2b0b6fb72184b782 .quad 0xe587ff910114db88 .quad 0x37130f364785a142 .quad 0x1215505c0d58359f .quad 0x2a2013c7fc28c46b .quad 0x24a0a1af89ea664e .quad 0x4400b638a1130e1f .quad 0x3a01b76496ed19c3 .quad 0x31e00ab0ed327230 .quad 0x520a885783ca15b1 .quad 0x06aab9875accbec7 // 2^232 * 1 * G .quad 0xc1339983f5df0ebb .quad 0xc0f3758f512c4cac .quad 0x2cf1130a0bb398e1 .quad 0x6b3cecf9aa270c62 .quad 0x5349acf3512eeaef .quad 0x20c141d31cc1cb49 .quad 0x24180c07a99a688d .quad 0x555ef9d1c64b2d17 .quad 0x36a770ba3b73bd08 .quad 0x624aef08a3afbf0c .quad 0x5737ff98b40946f2 .quad 0x675f4de13381749d // 2^232 * 2 * G .quad 0x0e2c52036b1782fc .quad 0x64816c816cad83b4 .quad 0xd0dcbdd96964073e .quad 0x13d99df70164c520 .quad 0xa12ff6d93bdab31d .quad 0x0725d80f9d652dfe .quad 0x019c4ff39abe9487 .quad 0x60f450b882cd3c43 .quad 0x014b5ec321e5c0ca .quad 0x4fcb69c9d719bfa2 .quad 0x4e5f1c18750023a0 .quad 0x1c06de9e55edac80 // 2^232 * 3 * G .quad 0x990f7ad6a33ec4e2 .quad 0x6608f938be2ee08e .quad 0x9ca143c563284515 .quad 0x4cf38a1fec2db60d .quad 0xffd52b40ff6d69aa .quad 0x34530b18dc4049bb .quad 0x5e4a5c2fa34d9897 .quad 0x78096f8e7d32ba2d .quad 0xa0aaaa650dfa5ce7 .quad 0xf9c49e2a48b5478c .quad 0x4f09cc7d7003725b .quad 0x373cad3a26091abe // 2^232 * 4 * G .quad 0xb294634d82c9f57c .quad 0x1fcbfde124934536 .quad 0x9e9c4db3418cdb5a .quad 0x0040f3d9454419fc .quad 0xf1bea8fb89ddbbad .quad 0x3bcb2cbc61aeaecb .quad 0x8f58a7bb1f9b8d9d .quad 0x21547eda5112a686 .quad 0xdefde939fd5986d3 .quad 0xf4272c89510a380c .quad 0xb72ba407bb3119b9 .quad 0x63550a334a254df4 // 2^232 * 5 * G .quad 0x6507d6edb569cf37 .quad 0x178429b00ca52ee1 .quad 0xea7c0090eb6bd65d .quad 0x3eea62c7daf78f51 .quad 0x9bba584572547b49 .quad 0xf305c6fae2c408e0 .quad 0x60e8fa69c734f18d .quad 0x39a92bafaa7d767a .quad 0x9d24c713e693274e .quad 0x5f63857768dbd375 .quad 0x70525560eb8ab39a .quad 0x68436a0665c9c4cd // 2^232 * 6 * G .quad 0xbc0235e8202f3f27 .quad 0xc75c00e264f975b0 .quad 0x91a4e9d5a38c2416 .quad 0x17b6e7f68ab789f9 .quad 0x1e56d317e820107c .quad 0xc5266844840ae965 .quad 0xc1e0a1c6320ffc7a .quad 0x5373669c91611472 .quad 0x5d2814ab9a0e5257 .quad 0x908f2084c9cab3fc .quad 0xafcaf5885b2d1eca .quad 0x1cb4b5a678f87d11 // 2^232 * 7 * G .quad 0xb664c06b394afc6c .quad 0x0c88de2498da5fb1 .quad 0x4f8d03164bcad834 .quad 0x330bca78de7434a2 .quad 0x6b74aa62a2a007e7 .quad 0xf311e0b0f071c7b1 .quad 0x5707e438000be223 .quad 0x2dc0fd2d82ef6eac .quad 0x982eff841119744e .quad 0xf9695e962b074724 .quad 0xc58ac14fbfc953fb .quad 0x3c31be1b369f1cf5 // 2^232 * 8 * G .quad 0xb0f4864d08948aee .quad 0x07dc19ee91ba1c6f .quad 0x7975cdaea6aca158 .quad 0x330b61134262d4bb .quad 0xc168bc93f9cb4272 .quad 0xaeb8711fc7cedb98 .quad 0x7f0e52aa34ac8d7a .quad 0x41cec1097e7d55bb .quad 0xf79619d7a26d808a .quad 0xbb1fd49e1d9e156d .quad 0x73d7c36cdba1df27 .quad 0x26b44cd91f28777d // 2^236 * 1 * G .quad 0x300a9035393aa6d8 .quad 0x2b501131a12bb1cd .quad 0x7b1ff677f093c222 .quad 0x4309c1f8cab82bad .quad 0xaf44842db0285f37 .quad 0x8753189047efc8df .quad 0x9574e091f820979a .quad 0x0e378d6069615579 .quad 0xd9fa917183075a55 .quad 0x4bdb5ad26b009fdc .quad 0x7829ad2cd63def0e .quad 0x078fc54975fd3877 // 2^236 * 2 * G .quad 0x87dfbd1428878f2d .quad 0x134636dd1e9421a1 .quad 0x4f17c951257341a3 .quad 0x5df98d4bad296cb8 .quad 0xe2004b5bb833a98a .quad 0x44775dec2d4c3330 .quad 0x3aa244067eace913 .quad 0x272630e3d58e00a9 .quad 0xf3678fd0ecc90b54 .quad 0xf001459b12043599 .quad 0x26725fbc3758b89b .quad 0x4325e4aa73a719ae // 2^236 * 3 * G .quad 0x657dc6ef433c3493 .quad 0x65375e9f80dbf8c3 .quad 0x47fd2d465b372dae .quad 0x4966ab79796e7947 .quad 0xed24629acf69f59d .quad 0x2a4a1ccedd5abbf4 .quad 0x3535ca1f56b2d67b .quad 0x5d8c68d043b1b42d .quad 0xee332d4de3b42b0a .quad 0xd84e5a2b16a4601c .quad 0x78243877078ba3e4 .quad 0x77ed1eb4184ee437 // 2^236 * 4 * G .quad 0xbfd4e13f201839a0 .quad 0xaeefffe23e3df161 .quad 0xb65b04f06b5d1fe3 .quad 0x52e085fb2b62fbc0 .quad 0x185d43f89e92ed1a .quad 0xb04a1eeafe4719c6 .quad 0x499fbe88a6f03f4f .quad 0x5d8b0d2f3c859bdd .quad 0x124079eaa54cf2ba .quad 0xd72465eb001b26e7 .quad 0x6843bcfdc97af7fd .quad 0x0524b42b55eacd02 // 2^236 * 5 * G .quad 0xfd0d5dbee45447b0 .quad 0x6cec351a092005ee .quad 0x99a47844567579cb .quad 0x59d242a216e7fa45 .quad 0xbc18dcad9b829eac .quad 0x23ae7d28b5f579d0 .quad 0xc346122a69384233 .quad 0x1a6110b2e7d4ac89 .quad 0x4f833f6ae66997ac .quad 0x6849762a361839a4 .quad 0x6985dec1970ab525 .quad 0x53045e89dcb1f546 // 2^236 * 6 * G .quad 0xcb8bb346d75353db .quad 0xfcfcb24bae511e22 .quad 0xcba48d40d50ae6ef .quad 0x26e3bae5f4f7cb5d .quad 0x84da3cde8d45fe12 .quad 0xbd42c218e444e2d2 .quad 0xa85196781f7e3598 .quad 0x7642c93f5616e2b2 .quad 0x2323daa74595f8e4 .quad 0xde688c8b857abeb4 .quad 0x3fc48e961c59326e .quad 0x0b2e73ca15c9b8ba // 2^236 * 7 * G .quad 0xd6bb4428c17f5026 .quad 0x9eb27223fb5a9ca7 .quad 0xe37ba5031919c644 .quad 0x21ce380db59a6602 .quad 0x0e3fbfaf79c03a55 .quad 0x3077af054cbb5acf .quad 0xd5c55245db3de39f .quad 0x015e68c1476a4af7 .quad 0xc1d5285220066a38 .quad 0x95603e523570aef3 .quad 0x832659a7226b8a4d .quad 0x5dd689091f8eedc9 // 2^236 * 8 * G .quad 0xcbac84debfd3c856 .quad 0x1624c348b35ff244 .quad 0xb7f88dca5d9cad07 .quad 0x3b0e574da2c2ebe8 .quad 0x1d022591a5313084 .quad 0xca2d4aaed6270872 .quad 0x86a12b852f0bfd20 .quad 0x56e6c439ad7da748 .quad 0xc704ff4942bdbae6 .quad 0x5e21ade2b2de1f79 .quad 0xe95db3f35652fad8 .quad 0x0822b5378f08ebc1 // 2^240 * 1 * G .quad 0x51f048478f387475 .quad 0xb25dbcf49cbecb3c .quad 0x9aab1244d99f2055 .quad 0x2c709e6c1c10a5d6 .quad 0xe1b7f29362730383 .quad 0x4b5279ffebca8a2c .quad 0xdafc778abfd41314 .quad 0x7deb10149c72610f .quad 0xcb62af6a8766ee7a .quad 0x66cbec045553cd0e .quad 0x588001380f0be4b5 .quad 0x08e68e9ff62ce2ea // 2^240 * 2 * G .quad 0x34ad500a4bc130ad .quad 0x8d38db493d0bd49c .quad 0xa25c3d98500a89be .quad 0x2f1f3f87eeba3b09 .quad 0x2f2d09d50ab8f2f9 .quad 0xacb9218dc55923df .quad 0x4a8f342673766cb9 .quad 0x4cb13bd738f719f5 .quad 0xf7848c75e515b64a .quad 0xa59501badb4a9038 .quad 0xc20d313f3f751b50 .quad 0x19a1e353c0ae2ee8 // 2^240 * 3 * G .quad 0x7d1c7560bafa05c3 .quad 0xb3e1a0a0c6e55e61 .quad 0xe3529718c0d66473 .quad 0x41546b11c20c3486 .quad 0xb42172cdd596bdbd .quad 0x93e0454398eefc40 .quad 0x9fb15347b44109b5 .quad 0x736bd3990266ae34 .quad 0x85532d509334b3b4 .quad 0x46fd114b60816573 .quad 0xcc5f5f30425c8375 .quad 0x412295a2b87fab5c // 2^240 * 4 * G .quad 0x19c99b88f57ed6e9 .quad 0x5393cb266df8c825 .quad 0x5cee3213b30ad273 .quad 0x14e153ebb52d2e34 .quad 0x2e655261e293eac6 .quad 0x845a92032133acdb .quad 0x460975cb7900996b .quad 0x0760bb8d195add80 .quad 0x413e1a17cde6818a .quad 0x57156da9ed69a084 .quad 0x2cbf268f46caccb1 .quad 0x6b34be9bc33ac5f2 // 2^240 * 5 * G .quad 0xf3df2f643a78c0b2 .quad 0x4c3e971ef22e027c .quad 0xec7d1c5e49c1b5a3 .quad 0x2012c18f0922dd2d .quad 0x11fc69656571f2d3 .quad 0xc6c9e845530e737a .quad 0xe33ae7a2d4fe5035 .quad 0x01b9c7b62e6dd30b .quad 0x880b55e55ac89d29 .quad 0x1483241f45a0a763 .quad 0x3d36efdfc2e76c1f .quad 0x08af5b784e4bade8 // 2^240 * 6 * G .quad 0x283499dc881f2533 .quad 0x9d0525da779323b6 .quad 0x897addfb673441f4 .quad 0x32b79d71163a168d .quad 0xe27314d289cc2c4b .quad 0x4be4bd11a287178d .quad 0x18d528d6fa3364ce .quad 0x6423c1d5afd9826e .quad 0xcc85f8d9edfcb36a .quad 0x22bcc28f3746e5f9 .quad 0xe49de338f9e5d3cd .quad 0x480a5efbc13e2dcc // 2^240 * 7 * G .quad 0x0b51e70b01622071 .quad 0x06b505cf8b1dafc5 .quad 0x2c6bb061ef5aabcd .quad 0x47aa27600cb7bf31 .quad 0xb6614ce442ce221f .quad 0x6e199dcc4c053928 .quad 0x663fb4a4dc1cbe03 .quad 0x24b31d47691c8e06 .quad 0x2a541eedc015f8c3 .quad 0x11a4fe7e7c693f7c .quad 0xf0af66134ea278d6 .quad 0x545b585d14dda094 // 2^240 * 8 * G .quad 0x67bf275ea0d43a0f .quad 0xade68e34089beebe .quad 0x4289134cd479e72e .quad 0x0f62f9c332ba5454 .quad 0x6204e4d0e3b321e1 .quad 0x3baa637a28ff1e95 .quad 0x0b0ccffd5b99bd9e .quad 0x4d22dc3e64c8d071 .quad 0xfcb46589d63b5f39 .quad 0x5cae6a3f57cbcf61 .quad 0xfebac2d2953afa05 .quad 0x1c0fa01a36371436 // 2^244 * 1 * G .quad 0xe7547449bc7cd692 .quad 0x0f9abeaae6f73ddf .quad 0x4af01ca700837e29 .quad 0x63ab1b5d3f1bc183 .quad 0xc11ee5e854c53fae .quad 0x6a0b06c12b4f3ff4 .quad 0x33540f80e0b67a72 .quad 0x15f18fc3cd07e3ef .quad 0x32750763b028f48c .quad 0x06020740556a065f .quad 0xd53bd812c3495b58 .quad 0x08706c9b865f508d // 2^244 * 2 * G .quad 0xf37ca2ab3d343dff .quad 0x1a8c6a2d80abc617 .quad 0x8e49e035d4ccffca .quad 0x48b46beebaa1d1b9 .quad 0xcc991b4138b41246 .quad 0x243b9c526f9ac26b .quad 0xb9ef494db7cbabbd .quad 0x5fba433dd082ed00 .quad 0x9c49e355c9941ad0 .quad 0xb9734ade74498f84 .quad 0x41c3fed066663e5c .quad 0x0ecfedf8e8e710b3 // 2^244 * 3 * G .quad 0x76430f9f9cd470d9 .quad 0xb62acc9ba42f6008 .quad 0x1898297c59adad5e .quad 0x7789dd2db78c5080 .quad 0x744f7463e9403762 .quad 0xf79a8dee8dfcc9c9 .quad 0x163a649655e4cde3 .quad 0x3b61788db284f435 .quad 0xb22228190d6ef6b2 .quad 0xa94a66b246ce4bfa .quad 0x46c1a77a4f0b6cc7 .quad 0x4236ccffeb7338cf // 2^244 * 4 * G .quad 0x8497404d0d55e274 .quad 0x6c6663d9c4ad2b53 .quad 0xec2fb0d9ada95734 .quad 0x2617e120cdb8f73c .quad 0x3bd82dbfda777df6 .quad 0x71b177cc0b98369e .quad 0x1d0e8463850c3699 .quad 0x5a71945b48e2d1f1 .quad 0x6f203dd5405b4b42 .quad 0x327ec60410b24509 .quad 0x9c347230ac2a8846 .quad 0x77de29fc11ffeb6a // 2^244 * 5 * G .quad 0xb0ac57c983b778a8 .quad 0x53cdcca9d7fe912c .quad 0x61c2b854ff1f59dc .quad 0x3a1a2cf0f0de7dac .quad 0x835e138fecced2ca .quad 0x8c9eaf13ea963b9a .quad 0xc95fbfc0b2160ea6 .quad 0x575e66f3ad877892 .quad 0x99803a27c88fcb3a .quad 0x345a6789275ec0b0 .quad 0x459789d0ff6c2be5 .quad 0x62f882651e70a8b2 // 2^244 * 6 * G .quad 0x085ae2c759ff1be4 .quad 0x149145c93b0e40b7 .quad 0xc467e7fa7ff27379 .quad 0x4eeecf0ad5c73a95 .quad 0x6d822986698a19e0 .quad 0xdc9821e174d78a71 .quad 0x41a85f31f6cb1f47 .quad 0x352721c2bcda9c51 .quad 0x48329952213fc985 .quad 0x1087cf0d368a1746 .quad 0x8e5261b166c15aa5 .quad 0x2d5b2d842ed24c21 // 2^244 * 7 * G .quad 0x02cfebd9ebd3ded1 .quad 0xd45b217739021974 .quad 0x7576f813fe30a1b7 .quad 0x5691b6f9a34ef6c2 .quad 0x5eb7d13d196ac533 .quad 0x377234ecdb80be2b .quad 0xe144cffc7cf5ae24 .quad 0x5226bcf9c441acec .quad 0x79ee6c7223e5b547 .quad 0x6f5f50768330d679 .quad 0xed73e1e96d8adce9 .quad 0x27c3da1e1d8ccc03 // 2^244 * 8 * G .quad 0x7eb9efb23fe24c74 .quad 0x3e50f49f1651be01 .quad 0x3ea732dc21858dea .quad 0x17377bd75bb810f9 .quad 0x28302e71630ef9f6 .quad 0xc2d4a2032b64cee0 .quad 0x090820304b6292be .quad 0x5fca747aa82adf18 .quad 0x232a03c35c258ea5 .quad 0x86f23a2c6bcb0cf1 .quad 0x3dad8d0d2e442166 .quad 0x04a8933cab76862b // 2^248 * 1 * G .quad 0xd2c604b622943dff .quad 0xbc8cbece44cfb3a0 .quad 0x5d254ff397808678 .quad 0x0fa3614f3b1ca6bf .quad 0x69082b0e8c936a50 .quad 0xf9c9a035c1dac5b6 .quad 0x6fb73e54c4dfb634 .quad 0x4005419b1d2bc140 .quad 0xa003febdb9be82f0 .quad 0x2089c1af3a44ac90 .quad 0xf8499f911954fa8e .quad 0x1fba218aef40ab42 // 2^248 * 2 * G .quad 0xab549448fac8f53e .quad 0x81f6e89a7ba63741 .quad 0x74fd6c7d6c2b5e01 .quad 0x392e3acaa8c86e42 .quad 0x4f3e57043e7b0194 .quad 0xa81d3eee08daaf7f .quad 0xc839c6ab99dcdef1 .quad 0x6c535d13ff7761d5 .quad 0x4cbd34e93e8a35af .quad 0x2e0781445887e816 .quad 0x19319c76f29ab0ab .quad 0x25e17fe4d50ac13b // 2^248 * 3 * G .quad 0x0a289bd71e04f676 .quad 0x208e1c52d6420f95 .quad 0x5186d8b034691fab .quad 0x255751442a9fb351 .quad 0x915f7ff576f121a7 .quad 0xc34a32272fcd87e3 .quad 0xccba2fde4d1be526 .quad 0x6bba828f8969899b .quad 0xe2d1bc6690fe3901 .quad 0x4cb54a18a0997ad5 .quad 0x971d6914af8460d4 .quad 0x559d504f7f6b7be4 // 2^248 * 4 * G .quad 0xa7738378b3eb54d5 .quad 0x1d69d366a5553c7c .quad 0x0a26cf62f92800ba .quad 0x01ab12d5807e3217 .quad 0x9c4891e7f6d266fd .quad 0x0744a19b0307781b .quad 0x88388f1d6061e23b .quad 0x123ea6a3354bd50e .quad 0x118d189041e32d96 .quad 0xb9ede3c2d8315848 .quad 0x1eab4271d83245d9 .quad 0x4a3961e2c918a154 // 2^248 * 5 * G .quad 0x71dc3be0f8e6bba0 .quad 0xd6cef8347effe30a .quad 0xa992425fe13a476a .quad 0x2cd6bce3fb1db763 .quad 0x0327d644f3233f1e .quad 0x499a260e34fcf016 .quad 0x83b5a716f2dab979 .quad 0x68aceead9bd4111f .quad 0x38b4c90ef3d7c210 .quad 0x308e6e24b7ad040c .quad 0x3860d9f1b7e73e23 .quad 0x595760d5b508f597 // 2^248 * 6 * G .quad 0x6129bfe104aa6397 .quad 0x8f960008a4a7fccb .quad 0x3f8bc0897d909458 .quad 0x709fa43edcb291a9 .quad 0x882acbebfd022790 .quad 0x89af3305c4115760 .quad 0x65f492e37d3473f4 .quad 0x2cb2c5df54515a2b .quad 0xeb0a5d8c63fd2aca .quad 0xd22bc1662e694eff .quad 0x2723f36ef8cbb03a .quad 0x70f029ecf0c8131f // 2^248 * 7 * G .quad 0x461307b32eed3e33 .quad 0xae042f33a45581e7 .quad 0xc94449d3195f0366 .quad 0x0b7d5d8a6c314858 .quad 0x2a6aafaa5e10b0b9 .quad 0x78f0a370ef041aa9 .quad 0x773efb77aa3ad61f .quad 0x44eca5a2a74bd9e1 .quad 0x25d448327b95d543 .quad 0x70d38300a3340f1d .quad 0xde1c531c60e1c52b .quad 0x272224512c7de9e4 // 2^248 * 8 * G .quad 0x1abc92af49c5342e .quad 0xffeed811b2e6fad0 .quad 0xefa28c8dfcc84e29 .quad 0x11b5df18a44cc543 .quad 0xbf7bbb8a42a975fc .quad 0x8c5c397796ada358 .quad 0xe27fc76fcdedaa48 .quad 0x19735fd7f6bc20a6 .quad 0xe3ab90d042c84266 .quad 0xeb848e0f7f19547e .quad 0x2503a1d065a497b9 .quad 0x0fef911191df895f // 2^252 * 1 * G .quad 0xb1507ca1ab1c6eb9 .quad 0xbd448f3e16b687b3 .quad 0x3455fb7f2c7a91ab .quad 0x7579229e2f2adec1 .quad 0x6ab5dcb85b1c16b7 .quad 0x94c0fce83c7b27a5 .quad 0xa4b11c1a735517be .quad 0x499238d0ba0eafaa .quad 0xecf46e527aba8b57 .quad 0x15a08c478bd1647b .quad 0x7af1c6a65f706fef .quad 0x6345fa78f03a30d5 // 2^252 * 2 * G .quad 0xdf02f95f1015e7a1 .quad 0x790ec41da9b40263 .quad 0x4d3a0ea133ea1107 .quad 0x54f70be7e33af8c9 .quad 0x93d3cbe9bdd8f0a4 .quad 0xdb152c1bfd177302 .quad 0x7dbddc6d7f17a875 .quad 0x3e1a71cc8f426efe .quad 0xc83ca3e390babd62 .quad 0x80ede3670291c833 .quad 0xc88038ccd37900c4 .quad 0x2c5fc0231ec31fa1 // 2^252 * 3 * G .quad 0xfeba911717038b4f .quad 0xe5123721c9deef81 .quad 0x1c97e4e75d0d8834 .quad 0x68afae7a23dc3bc6 .quad 0xc422e4d102456e65 .quad 0x87414ac1cad47b91 .quad 0x1592e2bba2b6ffdd .quad 0x75d9d2bff5c2100f .quad 0x5bd9b4763626e81c .quad 0x89966936bca02edd .quad 0x0a41193d61f077b3 .quad 0x3097a24200ce5471 // 2^252 * 4 * G .quad 0x57427734c7f8b84c .quad 0xf141a13e01b270e9 .quad 0x02d1adfeb4e564a6 .quad 0x4bb23d92ce83bd48 .quad 0xa162e7246695c486 .quad 0x131d633435a89607 .quad 0x30521561a0d12a37 .quad 0x56704bada6afb363 .quad 0xaf6c4aa752f912b9 .quad 0x5e665f6cd86770c8 .quad 0x4c35ac83a3c8cd58 .quad 0x2b7a29c010a58a7e // 2^252 * 5 * G .quad 0xc4007f77d0c1cec3 .quad 0x8d1020b6bac492f8 .quad 0x32ec29d57e69daaf .quad 0x599408759d95fce0 .quad 0x33810a23bf00086e .quad 0xafce925ee736ff7c .quad 0x3d60e670e24922d4 .quad 0x11ce9e714f96061b .quad 0x219ef713d815bac1 .quad 0xf141465d485be25c .quad 0x6d5447cc4e513c51 .quad 0x174926be5ef44393 // 2^252 * 6 * G .quad 0xb5deb2f9fc5bd5bb .quad 0x92daa72ae1d810e1 .quad 0xafc4cfdcb72a1c59 .quad 0x497d78813fc22a24 .quad 0x3ef5d41593ea022e .quad 0x5cbcc1a20ed0eed6 .quad 0x8fd24ecf07382c8c .quad 0x6fa42ead06d8e1ad .quad 0xe276824a1f73371f .quad 0x7f7cf01c4f5b6736 .quad 0x7e201fe304fa46e7 .quad 0x785a36a357808c96 // 2^252 * 7 * G .quad 0x825fbdfd63014d2b .quad 0xc852369c6ca7578b .quad 0x5b2fcd285c0b5df0 .quad 0x12ab214c58048c8f .quad 0x070442985d517bc3 .quad 0x6acd56c7ae653678 .quad 0x00a27983985a7763 .quad 0x5167effae512662b .quad 0xbd4ea9e10f53c4b6 .quad 0x1673dc5f8ac91a14 .quad 0xa8f81a4e2acc1aba .quad 0x33a92a7924332a25 // 2^252 * 8 * G .quad 0x9dd1f49927996c02 .quad 0x0cb3b058e04d1752 .quad 0x1f7e88967fd02c3e .quad 0x2f964268cb8b3eb1 .quad 0x7ba95ba0218f2ada .quad 0xcff42287330fb9ca .quad 0xdada496d56c6d907 .quad 0x5380c296f4beee54 .quad 0x9d4f270466898d0a .quad 0x3d0987990aff3f7a .quad 0xd09ef36267daba45 .quad 0x7761455e7b1c669c #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
297,283
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/edwards25519_scalarmulbase.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Scalar multiplication for the edwards25519 standard basepoint // Input scalar[4]; output res[8] // // extern void edwards25519_scalarmulbase // (uint64_t res[static 8],uint64_t scalar[static 4]); // // Given a scalar n, returns point (X,Y) = n * B where B = (...,4/5) is // the standard basepoint for the edwards25519 (Ed25519) curve. // // Standard x86-64 ABI: RDI = res, RSI = scalar // Microsoft x64 ABI: RCX = res, RDX = scalar // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(edwards25519_scalarmulbase) S2N_BN_SYM_PRIVACY_DIRECTIVE(edwards25519_scalarmulbase) .text // Size of individual field elements #define NUMSIZE 32 // Pointer-offset pairs for result and temporaries on stack with some aliasing. // The result "resx" assumes the "res" pointer has been preloaded into %rbp. #define resx (0*NUMSIZE)(%rbp) #define resy (1*NUMSIZE)(%rbp) #define scalar (0*NUMSIZE)(%rsp) #define tabent (1*NUMSIZE)(%rsp) #define ymx_2 (1*NUMSIZE)(%rsp) #define xpy_2 (2*NUMSIZE)(%rsp) #define kxy_2 (3*NUMSIZE)(%rsp) #define t0 (4*NUMSIZE)(%rsp) #define t1 (5*NUMSIZE)(%rsp) #define t2 (6*NUMSIZE)(%rsp) #define t3 (7*NUMSIZE)(%rsp) #define t4 (8*NUMSIZE)(%rsp) #define t5 (9*NUMSIZE)(%rsp) #define acc (10*NUMSIZE)(%rsp) #define x_1 (10*NUMSIZE)(%rsp) #define y_1 (11*NUMSIZE)(%rsp) #define z_1 (12*NUMSIZE)(%rsp) #define w_1 (13*NUMSIZE)(%rsp) #define x_3 (10*NUMSIZE)(%rsp) #define y_3 (11*NUMSIZE)(%rsp) #define z_3 (12*NUMSIZE)(%rsp) #define w_3 (13*NUMSIZE)(%rsp) // Stable homes for the input result pointer, and other variables #define res 14*NUMSIZE(%rsp) #define i 14*NUMSIZE+8(%rsp) #define bias 14*NUMSIZE+16(%rsp) #define bf 14*NUMSIZE+24(%rsp) #define ix 14*NUMSIZE+24(%rsp) #define tab 15*NUMSIZE(%rsp) // Total size to reserve on the stack #define NSPACE (15*NUMSIZE+8) // Syntactic variants to make x86_att version simpler to generate #define SCALAR 0 #define TABENT (1*NUMSIZE) #define ACC (10*NUMSIZE) #define X3 (10*NUMSIZE) #define Z3 (12*NUMSIZE) #define W3 (13*NUMSIZE) // Macro wrapping up the basic field multiplication, only trivially // different from a pure function call to bignum_mul_p25519. #define mul_p25519(P0,P1,P2) \ xorl %esi, %esi ; \ movq P2, %rdx ; \ mulxq P1, %r8, %r9 ; \ mulxq 0x8+P1, %rax, %r10 ; \ addq %rax, %r9 ; \ mulxq 0x10+P1, %rax, %r11 ; \ adcq %rax, %r10 ; \ mulxq 0x18+P1, %rax, %r12 ; \ adcq %rax, %r11 ; \ adcq %rsi, %r12 ; \ xorl %esi, %esi ; \ movq 0x8+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x18+P1, %rax, %r13 ; \ adcxq %rax, %r12 ; \ adoxq %rsi, %r13 ; \ adcxq %rsi, %r13 ; \ xorl %esi, %esi ; \ movq 0x10+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x18+P1, %rax, %r14 ; \ adcxq %rax, %r13 ; \ adoxq %rsi, %r14 ; \ adcxq %rsi, %r14 ; \ xorl %esi, %esi ; \ movq 0x18+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x18+P1, %rax, %r15 ; \ adcxq %rax, %r14 ; \ adoxq %rsi, %r15 ; \ adcxq %rsi, %r15 ; \ movl $0x26, %edx ; \ xorl %esi, %esi ; \ mulxq %r12, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ mulxq %r13, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq %r14, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq %r15, %rax, %r12 ; \ adcxq %rax, %r11 ; \ adoxq %rsi, %r12 ; \ adcxq %rsi, %r12 ; \ shldq $0x1, %r11, %r12 ; \ movl $0x13, %edx ; \ incq %r12; \ bts $63, %r11 ; \ mulxq %r12, %rax, %rbx ; \ addq %rax, %r8 ; \ adcq %rbx, %r9 ; \ adcq %rsi, %r10 ; \ adcq %rsi, %r11 ; \ sbbq %rax, %rax ; \ notq %rax; \ andq %rdx, %rax ; \ subq %rax, %r8 ; \ sbbq %rsi, %r9 ; \ sbbq %rsi, %r10 ; \ sbbq %rsi, %r11 ; \ btr $63, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // A version of multiplication that only guarantees output < 2 * p_25519. // This basically skips the +1 and final correction in quotient estimation. #define mul_4(P0,P1,P2) \ xorl %ecx, %ecx ; \ movq P2, %rdx ; \ mulxq P1, %r8, %r9 ; \ mulxq 0x8+P1, %rax, %r10 ; \ addq %rax, %r9 ; \ mulxq 0x10+P1, %rax, %r11 ; \ adcq %rax, %r10 ; \ mulxq 0x18+P1, %rax, %r12 ; \ adcq %rax, %r11 ; \ adcq %rcx, %r12 ; \ xorl %ecx, %ecx ; \ movq 0x8+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x18+P1, %rax, %r13 ; \ adcxq %rax, %r12 ; \ adoxq %rcx, %r13 ; \ adcxq %rcx, %r13 ; \ xorl %ecx, %ecx ; \ movq 0x10+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x18+P1, %rax, %r14 ; \ adcxq %rax, %r13 ; \ adoxq %rcx, %r14 ; \ adcxq %rcx, %r14 ; \ xorl %ecx, %ecx ; \ movq 0x18+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x18+P1, %rax, %r15 ; \ adcxq %rax, %r14 ; \ adoxq %rcx, %r15 ; \ adcxq %rcx, %r15 ; \ movl $0x26, %edx ; \ xorl %ecx, %ecx ; \ mulxq %r12, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ mulxq %r13, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq %r14, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq %r15, %rax, %r12 ; \ adcxq %rax, %r11 ; \ adoxq %rcx, %r12 ; \ adcxq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Modular subtraction with double modulus 2 * p_25519 = 2^256 - 38 #define sub_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ebx, %ebx ; \ subq P2, %r8 ; \ movq 8+P1, %r9 ; \ sbbq 8+P2, %r9 ; \ movl $38, %ecx ; \ movq 16+P1, %r10 ; \ sbbq 16+P2, %r10 ; \ movq 24+P1, %rax ; \ sbbq 24+P2, %rax ; \ cmovncq %rbx, %rcx ; \ subq %rcx, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq %rbx, %rax ; \ movq %r8, P0 ; \ movq %r9, 8+P0 ; \ movq %r10, 16+P0 ; \ movq %rax, 24+P0 // Modular addition and doubling with double modulus 2 * p_25519 = 2^256 - 38. // This only ensures that the result fits in 4 digits, not that it is reduced // even w.r.t. double modulus. The result is always correct modulo provided // the sum of the inputs is < 2^256 + 2^256 - 38, so in particular provided // at least one of them is reduced double modulo. #define add_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ecx, %ecx ; \ addq P2, %r8 ; \ movq 0x8+P1, %r9 ; \ adcq 0x8+P2, %r9 ; \ movq 0x10+P1, %r10 ; \ adcq 0x10+P2, %r10 ; \ movq 0x18+P1, %r11 ; \ adcq 0x18+P2, %r11 ; \ movl $38, %eax ; \ cmovncq %rcx, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 #define double_twice4(P0,P1) \ movq P1, %r8 ; \ xorl %ecx, %ecx ; \ addq %r8, %r8 ; \ movq 0x8+P1, %r9 ; \ adcq %r9, %r9 ; \ movq 0x10+P1, %r10 ; \ adcq %r10, %r10 ; \ movq 0x18+P1, %r11 ; \ adcq %r11, %r11 ; \ movl $38, %eax ; \ cmovncq %rcx, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 S2N_BN_SYMBOL(edwards25519_scalarmulbase): // In this case the Windows form literally makes a subroutine call. // This avoids hassle arising from keeping code and data together. #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi callq edwards25519_scalarmulbase_standard popq %rsi popq %rdi ret edwards25519_scalarmulbase_standard: #endif // Save registers, make room for temps, preserve input arguments. pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp // Move the output pointer to a stable place movq %rdi, res // Copy the input scalar x to its local variable while reducing it // modulo 2^252 + m where m = 27742317777372353535851937790883648493; // this is the order of the basepoint so this doesn't change the result. // First do q = floor(x/2^252) and x' = x - q * (2^252 + m), which gives // an initial result -15 * m <= x' < 2^252 movq (%rsi), %r8 movq 8(%rsi), %r9 movq 16(%rsi), %r10 movq 24(%rsi), %r11 movq %r11, %rcx shrq $60, %rcx movq $0x5812631a5cf5d3ed, %rax mulq %rcx movq %rax, %r12 movq %rdx, %r13 movq $0x14def9dea2f79cd6, %rax mulq %rcx addq %rax, %r13 adcq $0, %rdx shlq $60, %rcx subq %r12, %r8 sbbq %r13, %r9 sbbq %rdx, %r10 sbbq %rcx, %r11 // If x' < 0 then just directly negate it; this makes sure the // reduced argument is strictly 0 <= x' < 2^252, but now we need // to record (done via bit 255 of the reduced scalar, which is // ignored in the main loop) when we negated so we can flip // the end result to compensate. sbbq %rax, %rax xorq %rax, %r8 xorq %rax, %r9 xorq %rax, %r10 xorq %rax, %r11 negq %rax adcq $0, %r8 adcq $0, %r9 adcq $0, %r10 adcq $0, %r11 shlq $63, %rax orq %rax, %r11 // And before we store the scalar, test and reset bit 251 to // initialize the main loop just below. movq %r8, SCALAR(%rsp) movq %r9, SCALAR+8(%rsp) movq %r10, SCALAR+16(%rsp) btr $59, %r11 movq %r11, SCALAR+24(%rsp) // The main part of the computation is in extended-projective coordinates // (X,Y,Z,T), representing an affine point on the edwards25519 curve // (x,y) via x = X/Z, y = Y/Z and x * y = T/Z (so X * Y = T * Z). // In comments B means the standard basepoint (x,4/5) = // (0x216....f25d51a,0x6666..666658). // // Initialize accumulator "acc" to either 0 or 2^251 * B depending on // bit 251 of the (reduced) scalar. That leaves bits 0..250 to handle. leaq edwards25519_scalarmulbase_0g(%rip), %r10 leaq edwards25519_scalarmulbase_251g(%rip), %r11 movq (%r10), %rax movq (%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC(%rsp) movq 8*1(%r10), %rax movq 8*1(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+8(%rsp) movq 8*2(%r10), %rax movq 8*2(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+16(%rsp) movq 8*3(%r10), %rax movq 8*3(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+24(%rsp) movq 8*4(%r10), %rax movq 8*4(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+32(%rsp) movq 8*5(%r10), %rax movq 8*5(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+40(%rsp) movq 8*6(%r10), %rax movq 8*6(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+48(%rsp) movq 8*7(%r10), %rax movq 8*7(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+56(%rsp) movl $1, %eax movq %rax, ACC+64(%rsp) movl $0, %eax movq %rax, ACC+72(%rsp) movq %rax, ACC+80(%rsp) movq %rax, ACC+88(%rsp) movq 8*8(%r10), %rax movq 8*8(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+96(%rsp) movq 8*9(%r10), %rax movq 8*9(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+104(%rsp) movq 8*10(%r10), %rax movq 8*10(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+112(%rsp) movq 8*11(%r10), %rax movq 8*11(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+120(%rsp) // The counter "i" tracks the bit position for which the scalar has // already been absorbed, starting at 0 and going up in chunks of 4. // // The pointer "tab" points at the current block of the table for // multiples (2^i * j) * B at the current bit position i; 1 <= j <= 8. // // The bias is always either 0 and 1 and needs to be added to the // partially processed scalar implicitly. This is used to absorb 4 bits // of scalar per iteration from 3-bit table indexing by exploiting // negation: (16 * h + l) * B = (16 * (h + 1) - (16 - l)) * B is used // when l >= 9. Note that we can't have any bias left over at the // end because we made sure bit 251 is clear in the reduced scalar. movq $0, i leaq edwards25519_scalarmulbase_gtable(%rip), %rax movq %rax, tab movq $0, bias // Start of the main loop, repeated 63 times for i = 4, 8, ..., 252 edwards25519_scalarmulbase_scalarloop: // Look at the next 4-bit field "bf", adding the previous bias as well. // Choose the table index "ix" as bf when bf <= 8 and 16 - bf for bf >= 9, // setting the bias to 1 for the next iteration in the latter case. movq i, %rax movq %rax, %rcx shrq $6, %rax movq (%rsp,%rax,8), %rax // Exploiting scalar = sp exactly shrq %cl, %rax andq $15, %rax addq bias, %rax movq %rax, bf cmpq $9, bf sbbq %rax, %rax incq %rax movq %rax, bias movq $16, %rdi subq bf, %rdi cmpq $0, bias cmovzq bf, %rdi movq %rdi, ix // Perform constant-time lookup in the table to get element number "ix". // The table entry for the affine point (x,y) is actually a triple // (y - x,x + y,2 * d * x * y) to precompute parts of the addition. // Note that "ix" can be 0, so we set up the appropriate identity first. movl $1, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx movl $1, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d movq tab, %rbp cmpq $1, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $2, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $3, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $4, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $5, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $6, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $7, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $8, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp movq %rbp, tab // We now have the triple from the table in registers as follows // // [%rdx;%rcx;%rbx;%rax] = y - x // [%r11;%r10;%r9;%r8] = x + y // [%r15;%r14;%r13;%r12] = 2 * d * x * y // // In case bias = 1 we need to negate this. For Edwards curves // -(x,y) = (-x,y), i.e. we need to negate the x coordinate. // In this processed encoding, that amounts to swapping the // first two fields and negating the third. // // The optional negation here also pretends bias = 0 whenever // ix = 0 so that it doesn't need to handle the case of zero // inputs, since no non-trivial table entries are zero. Note // that in the zero case the whole negation is trivial, and // so indeed is the swapping. cmpq $0, bias movq %rax, %rsi cmovnzq %r8, %rsi cmovnzq %rax, %r8 movq %rsi, TABENT(%rsp) movq %r8, TABENT+32(%rsp) movq %rbx, %rsi cmovnzq %r9, %rsi cmovnzq %rbx, %r9 movq %rsi, TABENT+8(%rsp) movq %r9, TABENT+40(%rsp) movq %rcx, %rsi cmovnzq %r10, %rsi cmovnzq %rcx, %r10 movq %rsi, TABENT+16(%rsp) movq %r10, TABENT+48(%rsp) movq %rdx, %rsi cmovnzq %r11, %rsi cmovnzq %rdx, %r11 movq %rsi, TABENT+24(%rsp) movq %r11, TABENT+56(%rsp) movq $-19, %rax movq $-1, %rbx movq $-1, %rcx movq $0x7fffffffffffffff, %rdx subq %r12, %rax sbbq %r13, %rbx sbbq %r14, %rcx sbbq %r15, %rdx movq ix, %r8 movq bias, %r9 testq %r8, %r8 cmovzq %r8, %r9 testq %r9, %r9 cmovzq %r12, %rax cmovzq %r13, %rbx cmovzq %r14, %rcx cmovzq %r15, %rdx movq %rax, TABENT+64(%rsp) movq %rbx, TABENT+72(%rsp) movq %rcx, TABENT+80(%rsp) movq %rdx, TABENT+88(%rsp) // Extended-projective and precomputed mixed addition. // This is effectively the same as calling the standalone // function edwards25519_pepadd(acc,acc,tabent), but we // only retain slightly weaker normalization < 2 * p_25519 // throughout the inner loop, so the computation is // slightly different, and faster overall. double_twice4(t0,z_1) sub_twice4(t1,y_1,x_1) add_twice4(t2,y_1,x_1) mul_4(t3,w_1,kxy_2) mul_4(t1,t1,ymx_2) mul_4(t2,t2,xpy_2) sub_twice4(t4,t0,t3) add_twice4(t0,t0,t3) sub_twice4(t5,t2,t1) add_twice4(t1,t2,t1) mul_4(z_3,t4,t0) mul_4(x_3,t5,t4) mul_4(y_3,t0,t1) mul_4(w_3,t5,t1) // End of the main loop; move on by 4 bits. addq $4, i cmpq $252, i jc edwards25519_scalarmulbase_scalarloop // Insert the optional negation of the projective X coordinate, and // so by extension the final affine x coordinate x = X/Z and thus // the point P = (x,y). We only know X < 2 * p_25519, so we do the // negation as 2 * p_25519 - X to keep it nonnegative. From this // point on we don't need any normalization of the coordinates // except for making sure that they fit in 4 digits. movq X3(%rsp), %r8 movq X3+8(%rsp), %r9 movq X3+16(%rsp), %r10 movq X3+24(%rsp), %r11 movq $0xffffffffffffffda, %r12 subq %r8, %r12 movq $0xffffffffffffffff, %r13 sbbq %r9, %r13 movq $0xffffffffffffffff, %r14 sbbq %r10, %r14 movq $0xffffffffffffffff, %r15 sbbq %r11, %r15 movq SCALAR+24(%rsp), %rax btq $63, %rax cmovcq %r12, %r8 cmovcq %r13, %r9 cmovcq %r14, %r10 cmovcq %r15, %r11 movq %r8, X3(%rsp) movq %r9, X3+8(%rsp) movq %r10, X3+16(%rsp) movq %r11, X3+24(%rsp) // Now we need to map out of the extended-projective representation // (X,Y,Z,W) back to the affine form (x,y) = (X/Z,Y/Z). This means // first calling the modular inverse to get w_3 = 1/z_3. leaq W3(%rsp), %rdi leaq Z3(%rsp), %rsi // Inline copy of bignum_inv_p25519, identical except for stripping out // the prologue and epilogue saving and restoring registers and making // and reclaiming room on the stack. For more details and explanations see // "x86/curve25519/bignum_inv_p25519.S". Note that the stack it uses for // its own temporaries is 208 bytes, so it has no effect on variables // that are needed in the rest of our computation here: res, x_3, y_3, // z_3 and w_3. movq %rdi, 0xc0(%rsp) xorl %eax, %eax leaq -0x13(%rax), %rcx notq %rax movq %rcx, (%rsp) movq %rax, 0x8(%rsp) movq %rax, 0x10(%rsp) btr $0x3f, %rax movq %rax, 0x18(%rsp) movq (%rsi), %rdx movq 0x8(%rsi), %rcx movq 0x10(%rsi), %r8 movq 0x18(%rsi), %r9 movl $0x1, %eax xorl %r10d, %r10d bts $0x3f, %r9 adcq %r10, %rax imulq $0x13, %rax, %rax addq %rax, %rdx adcq %r10, %rcx adcq %r10, %r8 adcq %r10, %r9 movl $0x13, %eax cmovbq %r10, %rax subq %rax, %rdx sbbq %r10, %rcx sbbq %r10, %r8 sbbq %r10, %r9 btr $0x3f, %r9 movq %rdx, 0x20(%rsp) movq %rcx, 0x28(%rsp) movq %r8, 0x30(%rsp) movq %r9, 0x38(%rsp) xorl %eax, %eax movq %rax, 0x40(%rsp) movq %rax, 0x48(%rsp) movq %rax, 0x50(%rsp) movq %rax, 0x58(%rsp) movabsq $0xa0f99e2375022099, %rax movq %rax, 0x60(%rsp) movabsq $0xa8c68f3f1d132595, %rax movq %rax, 0x68(%rsp) movabsq $0x6c6c893805ac5242, %rax movq %rax, 0x70(%rsp) movabsq $0x276508b241770615, %rax movq %rax, 0x78(%rsp) movq $0xa, 0x90(%rsp) movq $0x1, 0x98(%rsp) jmp edwards25519_scalarmulbase_midloop edwards25519_scalarmulbase_inverseloop: movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 movq %r8, %rax andq %r9, %rax movq %r10, %rdi andq %r11, %rdi addq %rax, %rdi movq %rdi, 0x80(%rsp) movq %r12, %rax andq %r13, %rax movq %r14, %rsi andq %r15, %rsi addq %rax, %rsi movq %rsi, 0x88(%rsp) xorl %ebx, %ebx movq (%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq 0x20(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rdi adcq %rdx, %rbx xorl %ebp, %ebp movq (%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rsi adcq %rdx, %rbp movq 0x20(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp xorl %ecx, %ecx movq 0x8(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x28(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx shrdq $0x3b, %rbx, %rdi movq %rdi, (%rsp) xorl %edi, %edi movq 0x8(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbp adcq %rdx, %rdi movq 0x28(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rdi shrdq $0x3b, %rbp, %rsi movq %rsi, 0x20(%rsp) xorl %esi, %esi movq 0x10(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rsi movq 0x30(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rsi shrdq $0x3b, %rcx, %rbx movq %rbx, 0x8(%rsp) xorl %ebx, %ebx movq 0x10(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rdi adcq %rdx, %rbx movq 0x30(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rdi adcq %rdx, %rbx shrdq $0x3b, %rdi, %rbp movq %rbp, 0x28(%rsp) movq 0x18(%rsp), %rax xorq %r9, %rax movq %rax, %rbp sarq $0x3f, %rbp andq %r8, %rbp negq %rbp mulq %r8 addq %rax, %rsi adcq %rdx, %rbp movq 0x38(%rsp), %rax xorq %r11, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r10, %rdx subq %rdx, %rbp mulq %r10 addq %rax, %rsi adcq %rdx, %rbp shrdq $0x3b, %rsi, %rcx movq %rcx, 0x10(%rsp) shrdq $0x3b, %rbp, %rsi movq 0x18(%rsp), %rax movq %rsi, 0x18(%rsp) xorq %r13, %rax movq %rax, %rsi sarq $0x3f, %rsi andq %r12, %rsi negq %rsi mulq %r12 addq %rax, %rbx adcq %rdx, %rsi movq 0x38(%rsp), %rax xorq %r15, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r14, %rdx subq %rdx, %rsi mulq %r14 addq %rax, %rbx adcq %rdx, %rsi shrdq $0x3b, %rbx, %rdi movq %rdi, 0x30(%rsp) shrdq $0x3b, %rsi, %rbx movq %rbx, 0x38(%rsp) movq 0x80(%rsp), %rbx movq 0x88(%rsp), %rbp xorl %ecx, %ecx movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x40(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x40(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x60(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x60(%rsp) xorl %ebx, %ebx movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq 0x48(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, 0x48(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq 0x68(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, 0x68(%rsp) xorl %ecx, %ecx movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x50(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x50(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x70(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x70(%rsp) movq 0x58(%rsp), %rax xorq %r9, %rax movq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rcx adcq %rbx, %rdx movq %rdx, %rbx shldq $0x1, %rcx, %rdx sarq $0x3f, %rbx addq %rbx, %rdx movl $0x13, %eax imulq %rdx movq 0x40(%rsp), %r8 addq %rax, %r8 movq %r8, 0x40(%rsp) movq 0x48(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x48(%rsp) movq 0x50(%rsp), %r8 adcq %rbx, %r8 movq %r8, 0x50(%rsp) adcq %rbx, %rcx shlq $0x3f, %rax addq %rax, %rcx movq 0x58(%rsp), %rax movq %rcx, 0x58(%rsp) xorq %r13, %rax movq %r13, %rcx andq %r12, %rcx negq %rcx mulq %r12 addq %rax, %rsi adcq %rdx, %rcx movq 0x78(%rsp), %rax xorq %r15, %rax movq %r15, %rdx andq %r14, %rdx subq %rdx, %rcx mulq %r14 addq %rax, %rsi adcq %rcx, %rdx movq %rdx, %rcx shldq $0x1, %rsi, %rdx sarq $0x3f, %rcx movl $0x13, %eax addq %rcx, %rdx imulq %rdx movq 0x60(%rsp), %r8 addq %rax, %r8 movq %r8, 0x60(%rsp) movq 0x68(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x68(%rsp) movq 0x70(%rsp), %r8 adcq %rcx, %r8 movq %r8, 0x70(%rsp) adcq %rcx, %rsi shlq $0x3f, %rax addq %rax, %rsi movq %rsi, 0x78(%rsp) edwards25519_scalarmulbase_midloop: movq 0x98(%rsp), %rsi movq (%rsp), %rdx movq 0x20(%rsp), %rcx movq %rdx, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax xorl %ebp, %ebp movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %rdx leaq (%rcx,%rax), %rdi shlq $0x16, %rdx shlq $0x16, %rdi sarq $0x2b, %rdx sarq $0x2b, %rdi movabsq $0x20000100000, %rax leaq (%rbx,%rax), %rbx leaq (%rcx,%rax), %rcx sarq $0x2a, %rbx sarq $0x2a, %rcx movq %rdx, 0xa0(%rsp) movq %rbx, 0xa8(%rsp) movq %rdi, 0xb0(%rsp) movq %rcx, 0xb8(%rsp) movq (%rsp), %r12 imulq %r12, %rdi imulq %rdx, %r12 movq 0x20(%rsp), %r13 imulq %r13, %rbx imulq %rcx, %r13 addq %rbx, %r12 addq %rdi, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r10 shlq $0x16, %r8 shlq $0x16, %r10 sarq $0x2b, %r8 sarq $0x2b, %r10 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r15 leaq (%rcx,%rax), %r11 sarq $0x2a, %r15 sarq $0x2a, %r11 movq %r13, %rbx movq %r12, %rcx imulq %r8, %r12 imulq %r15, %rbx addq %rbx, %r12 imulq %r11, %r13 imulq %r10, %rcx addq %rcx, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq 0xa0(%rsp), %rax imulq %r8, %rax movq 0xb0(%rsp), %rdx imulq %r15, %rdx imulq 0xa8(%rsp), %r8 imulq 0xb8(%rsp), %r15 addq %r8, %r15 leaq (%rax,%rdx), %r9 movq 0xa0(%rsp), %rax imulq %r10, %rax movq 0xb0(%rsp), %rdx imulq %r11, %rdx imulq 0xa8(%rsp), %r10 imulq 0xb8(%rsp), %r11 addq %r10, %r11 leaq (%rax,%rdx), %r13 movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r12 shlq $0x15, %r8 shlq $0x15, %r12 sarq $0x2b, %r8 sarq $0x2b, %r12 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r10 leaq (%rcx,%rax), %r14 sarq $0x2b, %r10 sarq $0x2b, %r14 movq %r9, %rax imulq %r8, %rax movq %r13, %rdx imulq %r10, %rdx imulq %r15, %r8 imulq %r11, %r10 addq %r8, %r10 leaq (%rax,%rdx), %r8 movq %r9, %rax imulq %r12, %rax movq %r13, %rdx imulq %r14, %rdx imulq %r15, %r12 imulq %r11, %r14 addq %r12, %r14 leaq (%rax,%rdx), %r12 movq %rsi, 0x98(%rsp) decq 0x90(%rsp) jne edwards25519_scalarmulbase_inverseloop movq (%rsp), %rax movq 0x20(%rsp), %rcx imulq %r8, %rax imulq %r10, %rcx addq %rcx, %rax sarq $0x3f, %rax movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 xorq %rax, %r9 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 xorq %rax, %r11 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 xorq %rax, %r13 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 xorq %rax, %r15 movq %r8, %rax andq %r9, %rax movq %r10, %r12 andq %r11, %r12 addq %rax, %r12 xorl %r13d, %r13d movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r12 adcq %rdx, %r13 movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r13 adcq %rdx, %r14 movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 adcq %rdx, %r15 movq 0x58(%rsp), %rax xorq %r9, %rax andq %r8, %r9 negq %r9 mulq %r8 addq %rax, %r15 adcq %rdx, %r9 movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %r9 mulq %r10 addq %rax, %r15 adcq %rdx, %r9 movq %r9, %rax shldq $0x1, %r15, %rax sarq $0x3f, %r9 movl $0x13, %ebx leaq 0x1(%rax,%r9,1), %rax imulq %rbx xorl %ebp, %ebp addq %rax, %r12 adcq %rdx, %r13 adcq %r9, %r14 adcq %r9, %r15 shlq $0x3f, %rax addq %rax, %r15 cmovns %rbp, %rbx subq %rbx, %r12 sbbq %rbp, %r13 sbbq %rbp, %r14 sbbq %rbp, %r15 btr $0x3f, %r15 movq 0xc0(%rsp), %rdi movq %r12, (%rdi) movq %r13, 0x8(%rdi) movq %r14, 0x10(%rdi) movq %r15, 0x18(%rdi) // The final result is x = X * inv(Z), y = Y * inv(Z). // These are the only operations in the whole computation that // fully reduce modulo p_25519 since now we want the canonical // answer as output. movq res, %rbp mul_p25519(resx,x_3,w_3) mul_p25519(resy,y_3,w_3) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret // **************************************************************************** // The precomputed data (all read-only). This is currently part of the same // text section, which gives position-independent code with simple PC-relative // addressing. However it could be put in a separate section via something like // // .section .rodata // **************************************************************************** // 0 * B = 0 and 2^251 * B in extended-projective coordinates // but with Z = 1 assumed and hence left out, so they are (X,Y,T) only. edwards25519_scalarmulbase_0g: .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000001 .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000000 edwards25519_scalarmulbase_251g: .quad 0x525f946d7c7220e7 .quad 0x4636b0b2f1e35444 .quad 0x796e9d70e892ae0f .quad 0x03dec05fa937adb1 .quad 0x6d1c271cc6375515 .quad 0x462588c4a4ca4f14 .quad 0x691129fee55afc39 .quad 0x15949f784d8472f5 .quad 0xbd89e510afad0049 .quad 0x4d1f08c073b9860e .quad 0x07716e8b2d00af9d .quad 0x70d685f68f859714 // Precomputed table of multiples of generator for edwards25519 // all in precomputed extended-projective (y-x,x+y,2*d*x*y) triples. edwards25519_scalarmulbase_gtable: // 2^0 * 1 * G .quad 0x9d103905d740913e .quad 0xfd399f05d140beb3 .quad 0xa5c18434688f8a09 .quad 0x44fd2f9298f81267 .quad 0x2fbc93c6f58c3b85 .quad 0xcf932dc6fb8c0e19 .quad 0x270b4898643d42c2 .quad 0x07cf9d3a33d4ba65 .quad 0xabc91205877aaa68 .quad 0x26d9e823ccaac49e .quad 0x5a1b7dcbdd43598c .quad 0x6f117b689f0c65a8 // 2^0 * 2 * G .quad 0x8a99a56042b4d5a8 .quad 0x8f2b810c4e60acf6 .quad 0xe09e236bb16e37aa .quad 0x6bb595a669c92555 .quad 0x9224e7fc933c71d7 .quad 0x9f469d967a0ff5b5 .quad 0x5aa69a65e1d60702 .quad 0x590c063fa87d2e2e .quad 0x43faa8b3a59b7a5f .quad 0x36c16bdd5d9acf78 .quad 0x500fa0840b3d6a31 .quad 0x701af5b13ea50b73 // 2^0 * 3 * G .quad 0x56611fe8a4fcd265 .quad 0x3bd353fde5c1ba7d .quad 0x8131f31a214bd6bd .quad 0x2ab91587555bda62 .quad 0xaf25b0a84cee9730 .quad 0x025a8430e8864b8a .quad 0xc11b50029f016732 .quad 0x7a164e1b9a80f8f4 .quad 0x14ae933f0dd0d889 .quad 0x589423221c35da62 .quad 0xd170e5458cf2db4c .quad 0x5a2826af12b9b4c6 // 2^0 * 4 * G .quad 0x95fe050a056818bf .quad 0x327e89715660faa9 .quad 0xc3e8e3cd06a05073 .quad 0x27933f4c7445a49a .quad 0x287351b98efc099f .quad 0x6765c6f47dfd2538 .quad 0xca348d3dfb0a9265 .quad 0x680e910321e58727 .quad 0x5a13fbe9c476ff09 .quad 0x6e9e39457b5cc172 .quad 0x5ddbdcf9102b4494 .quad 0x7f9d0cbf63553e2b // 2^0 * 5 * G .quad 0x7f9182c3a447d6ba .quad 0xd50014d14b2729b7 .quad 0xe33cf11cb864a087 .quad 0x154a7e73eb1b55f3 .quad 0xa212bc4408a5bb33 .quad 0x8d5048c3c75eed02 .quad 0xdd1beb0c5abfec44 .quad 0x2945ccf146e206eb .quad 0xbcbbdbf1812a8285 .quad 0x270e0807d0bdd1fc .quad 0xb41b670b1bbda72d .quad 0x43aabe696b3bb69a // 2^0 * 6 * G .quad 0x499806b67b7d8ca4 .quad 0x575be28427d22739 .quad 0xbb085ce7204553b9 .quad 0x38b64c41ae417884 .quad 0x3a0ceeeb77157131 .quad 0x9b27158900c8af88 .quad 0x8065b668da59a736 .quad 0x51e57bb6a2cc38bd .quad 0x85ac326702ea4b71 .quad 0xbe70e00341a1bb01 .quad 0x53e4a24b083bc144 .quad 0x10b8e91a9f0d61e3 // 2^0 * 7 * G .quad 0xba6f2c9aaa3221b1 .quad 0x6ca021533bba23a7 .quad 0x9dea764f92192c3a .quad 0x1d6edd5d2e5317e0 .quad 0x6b1a5cd0944ea3bf .quad 0x7470353ab39dc0d2 .quad 0x71b2528228542e49 .quad 0x461bea69283c927e .quad 0xf1836dc801b8b3a2 .quad 0xb3035f47053ea49a .quad 0x529c41ba5877adf3 .quad 0x7a9fbb1c6a0f90a7 // 2^0 * 8 * G .quad 0xe2a75dedf39234d9 .quad 0x963d7680e1b558f9 .quad 0x2c2741ac6e3c23fb .quad 0x3a9024a1320e01c3 .quad 0x59b7596604dd3e8f .quad 0x6cb30377e288702c .quad 0xb1339c665ed9c323 .quad 0x0915e76061bce52f .quad 0xe7c1f5d9c9a2911a .quad 0xb8a371788bcca7d7 .quad 0x636412190eb62a32 .quad 0x26907c5c2ecc4e95 // 2^4 * 1 * G .quad 0x7ec851ca553e2df3 .quad 0xa71284cba64878b3 .quad 0xe6b5e4193288d1e7 .quad 0x4cf210ec5a9a8883 .quad 0x322d04a52d9021f6 .quad 0xb9c19f3375c6bf9c .quad 0x587a3a4342d20b09 .quad 0x143b1cf8aa64fe61 .quad 0x9f867c7d968acaab .quad 0x5f54258e27092729 .quad 0xd0a7d34bea180975 .quad 0x21b546a3374126e1 // 2^4 * 2 * G .quad 0xa94ff858a2888343 .quad 0xce0ed4565313ed3c .quad 0xf55c3dcfb5bf34fa .quad 0x0a653ca5c9eab371 .quad 0x490a7a45d185218f .quad 0x9a15377846049335 .quad 0x0060ea09cc31e1f6 .quad 0x7e041577f86ee965 .quad 0x66b2a496ce5b67f3 .quad 0xff5492d8bd569796 .quad 0x503cec294a592cd0 .quad 0x566943650813acb2 // 2^4 * 3 * G .quad 0xb818db0c26620798 .quad 0x5d5c31d9606e354a .quad 0x0982fa4f00a8cdc7 .quad 0x17e12bcd4653e2d4 .quad 0x5672f9eb1dabb69d .quad 0xba70b535afe853fc .quad 0x47ac0f752796d66d .quad 0x32a5351794117275 .quad 0xd3a644a6df648437 .quad 0x703b6559880fbfdd .quad 0xcb852540ad3a1aa5 .quad 0x0900b3f78e4c6468 // 2^4 * 4 * G .quad 0x0a851b9f679d651b .quad 0xe108cb61033342f2 .quad 0xd601f57fe88b30a3 .quad 0x371f3acaed2dd714 .quad 0xed280fbec816ad31 .quad 0x52d9595bd8e6efe3 .quad 0x0fe71772f6c623f5 .quad 0x4314030b051e293c .quad 0xd560005efbf0bcad .quad 0x8eb70f2ed1870c5e .quad 0x201f9033d084e6a0 .quad 0x4c3a5ae1ce7b6670 // 2^4 * 5 * G .quad 0x4138a434dcb8fa95 .quad 0x870cf67d6c96840b .quad 0xde388574297be82c .quad 0x7c814db27262a55a .quad 0xbaf875e4c93da0dd .quad 0xb93282a771b9294d .quad 0x80d63fb7f4c6c460 .quad 0x6de9c73dea66c181 .quad 0x478904d5a04df8f2 .quad 0xfafbae4ab10142d3 .quad 0xf6c8ac63555d0998 .quad 0x5aac4a412f90b104 // 2^4 * 6 * G .quad 0xc64f326b3ac92908 .quad 0x5551b282e663e1e0 .quad 0x476b35f54a1a4b83 .quad 0x1b9da3fe189f68c2 .quad 0x603a0d0abd7f5134 .quad 0x8089c932e1d3ae46 .quad 0xdf2591398798bd63 .quad 0x1c145cd274ba0235 .quad 0x32e8386475f3d743 .quad 0x365b8baf6ae5d9ef .quad 0x825238b6385b681e .quad 0x234929c1167d65e1 // 2^4 * 7 * G .quad 0x984decaba077ade8 .quad 0x383f77ad19eb389d .quad 0xc7ec6b7e2954d794 .quad 0x59c77b3aeb7c3a7a .quad 0x48145cc21d099fcf .quad 0x4535c192cc28d7e5 .quad 0x80e7c1e548247e01 .quad 0x4a5f28743b2973ee .quad 0xd3add725225ccf62 .quad 0x911a3381b2152c5d .quad 0xd8b39fad5b08f87d .quad 0x6f05606b4799fe3b // 2^4 * 8 * G .quad 0x9ffe9e92177ba962 .quad 0x98aee71d0de5cae1 .quad 0x3ff4ae942d831044 .quad 0x714de12e58533ac8 .quad 0x5b433149f91b6483 .quad 0xadb5dc655a2cbf62 .quad 0x87fa8412632827b3 .quad 0x60895e91ab49f8d8 .quad 0xe9ecf2ed0cf86c18 .quad 0xb46d06120735dfd4 .quad 0xbc9da09804b96be7 .quad 0x73e2e62fd96dc26b // 2^8 * 1 * G .quad 0xed5b635449aa515e .quad 0xa865c49f0bc6823a .quad 0x850c1fe95b42d1c4 .quad 0x30d76d6f03d315b9 .quad 0x2eccdd0e632f9c1d .quad 0x51d0b69676893115 .quad 0x52dfb76ba8637a58 .quad 0x6dd37d49a00eef39 .quad 0x6c4444172106e4c7 .quad 0xfb53d680928d7f69 .quad 0xb4739ea4694d3f26 .quad 0x10c697112e864bb0 // 2^8 * 2 * G .quad 0x6493c4277dbe5fde .quad 0x265d4fad19ad7ea2 .quad 0x0e00dfc846304590 .quad 0x25e61cabed66fe09 .quad 0x0ca62aa08358c805 .quad 0x6a3d4ae37a204247 .quad 0x7464d3a63b11eddc .quad 0x03bf9baf550806ef .quad 0x3f13e128cc586604 .quad 0x6f5873ecb459747e .quad 0xa0b63dedcc1268f5 .quad 0x566d78634586e22c // 2^8 * 3 * G .quad 0x1637a49f9cc10834 .quad 0xbc8e56d5a89bc451 .quad 0x1cb5ec0f7f7fd2db .quad 0x33975bca5ecc35d9 .quad 0xa1054285c65a2fd0 .quad 0x6c64112af31667c3 .quad 0x680ae240731aee58 .quad 0x14fba5f34793b22a .quad 0x3cd746166985f7d4 .quad 0x593e5e84c9c80057 .quad 0x2fc3f2b67b61131e .quad 0x14829cea83fc526c // 2^8 * 4 * G .quad 0xff437b8497dd95c2 .quad 0x6c744e30aa4eb5a7 .quad 0x9e0c5d613c85e88b .quad 0x2fd9c71e5f758173 .quad 0x21e70b2f4e71ecb8 .quad 0xe656ddb940a477e3 .quad 0xbf6556cece1d4f80 .quad 0x05fc3bc4535d7b7e .quad 0x24b8b3ae52afdedd .quad 0x3495638ced3b30cf .quad 0x33a4bc83a9be8195 .quad 0x373767475c651f04 // 2^8 * 5 * G .quad 0x2fba99fd40d1add9 .quad 0xb307166f96f4d027 .quad 0x4363f05215f03bae .quad 0x1fbea56c3b18f999 .quad 0x634095cb14246590 .quad 0xef12144016c15535 .quad 0x9e38140c8910bc60 .quad 0x6bf5905730907c8c .quad 0x0fa778f1e1415b8a .quad 0x06409ff7bac3a77e .quad 0x6f52d7b89aa29a50 .quad 0x02521cf67a635a56 // 2^8 * 6 * G .quad 0x513fee0b0a9d5294 .quad 0x8f98e75c0fdf5a66 .quad 0xd4618688bfe107ce .quad 0x3fa00a7e71382ced .quad 0xb1146720772f5ee4 .quad 0xe8f894b196079ace .quad 0x4af8224d00ac824a .quad 0x001753d9f7cd6cc4 .quad 0x3c69232d963ddb34 .quad 0x1dde87dab4973858 .quad 0xaad7d1f9a091f285 .quad 0x12b5fe2fa048edb6 // 2^8 * 7 * G .quad 0x71f0fbc496fce34d .quad 0x73b9826badf35bed .quad 0xd2047261ff28c561 .quad 0x749b76f96fb1206f .quad 0xdf2b7c26ad6f1e92 .quad 0x4b66d323504b8913 .quad 0x8c409dc0751c8bc3 .quad 0x6f7e93c20796c7b8 .quad 0x1f5af604aea6ae05 .quad 0xc12351f1bee49c99 .quad 0x61a808b5eeff6b66 .quad 0x0fcec10f01e02151 // 2^8 * 8 * G .quad 0x644d58a649fe1e44 .quad 0x21fcaea231ad777e .quad 0x02441c5a887fd0d2 .quad 0x4901aa7183c511f3 .quad 0x3df2d29dc4244e45 .quad 0x2b020e7493d8de0a .quad 0x6cc8067e820c214d .quad 0x413779166feab90a .quad 0x08b1b7548c1af8f0 .quad 0xce0f7a7c246299b4 .quad 0xf760b0f91e06d939 .quad 0x41bb887b726d1213 // 2^12 * 1 * G .quad 0x9267806c567c49d8 .quad 0x066d04ccca791e6a .quad 0xa69f5645e3cc394b .quad 0x5c95b686a0788cd2 .quad 0x97d980e0aa39f7d2 .quad 0x35d0384252c6b51c .quad 0x7d43f49307cd55aa .quad 0x56bd36cfb78ac362 .quad 0x2ac519c10d14a954 .quad 0xeaf474b494b5fa90 .quad 0xe6af8382a9f87a5a .quad 0x0dea6db1879be094 // 2^12 * 2 * G .quad 0xaa66bf547344e5ab .quad 0xda1258888f1b4309 .quad 0x5e87d2b3fd564b2f .quad 0x5b2c78885483b1dd .quad 0x15baeb74d6a8797a .quad 0x7ef55cf1fac41732 .quad 0x29001f5a3c8b05c5 .quad 0x0ad7cc8752eaccfb .quad 0x52151362793408cf .quad 0xeb0f170319963d94 .quad 0xa833b2fa883d9466 .quad 0x093a7fa775003c78 // 2^12 * 3 * G .quad 0xe5107de63a16d7be .quad 0xa377ffdc9af332cf .quad 0x70d5bf18440b677f .quad 0x6a252b19a4a31403 .quad 0xb8e9604460a91286 .quad 0x7f3fd8047778d3de .quad 0x67d01e31bf8a5e2d .quad 0x7b038a06c27b653e .quad 0x9ed919d5d36990f3 .quad 0x5213aebbdb4eb9f2 .quad 0xc708ea054cb99135 .quad 0x58ded57f72260e56 // 2^12 * 4 * G .quad 0x78e79dade9413d77 .quad 0xf257f9d59729e67d .quad 0x59db910ee37aa7e6 .quad 0x6aa11b5bbb9e039c .quad 0xda6d53265b0fd48b .quad 0x8960823193bfa988 .quad 0xd78ac93261d57e28 .quad 0x79f2942d3a5c8143 .quad 0x97da2f25b6c88de9 .quad 0x251ba7eaacf20169 .quad 0x09b44f87ef4eb4e4 .quad 0x7d90ab1bbc6a7da5 // 2^12 * 5 * G .quad 0x9acca683a7016bfe .quad 0x90505f4df2c50b6d .quad 0x6b610d5fcce435aa .quad 0x19a10d446198ff96 .quad 0x1a07a3f496b3c397 .quad 0x11ceaa188f4e2532 .quad 0x7d9498d5a7751bf0 .quad 0x19ed161f508dd8a0 .quad 0x560a2cd687dce6ca .quad 0x7f3568c48664cf4d .quad 0x8741e95222803a38 .quad 0x483bdab1595653fc // 2^12 * 6 * G .quad 0xfa780f148734fa49 .quad 0x106f0b70360534e0 .quad 0x2210776fe3e307bd .quad 0x3286c109dde6a0fe .quad 0xd6cf4d0ab4da80f6 .quad 0x82483e45f8307fe0 .quad 0x05005269ae6f9da4 .quad 0x1c7052909cf7877a .quad 0x32ee7de2874e98d4 .quad 0x14c362e9b97e0c60 .quad 0x5781dcde6a60a38a .quad 0x217dd5eaaa7aa840 // 2^12 * 7 * G .quad 0x9db7c4d0248e1eb0 .quad 0xe07697e14d74bf52 .quad 0x1e6a9b173c562354 .quad 0x7fa7c21f795a4965 .quad 0x8bdf1fb9be8c0ec8 .quad 0x00bae7f8e30a0282 .quad 0x4963991dad6c4f6c .quad 0x07058a6e5df6f60a .quad 0xe9eb02c4db31f67f .quad 0xed25fd8910bcfb2b .quad 0x46c8131f5c5cddb4 .quad 0x33b21c13a0cb9bce // 2^12 * 8 * G .quad 0x360692f8087d8e31 .quad 0xf4dcc637d27163f7 .quad 0x25a4e62065ea5963 .quad 0x659bf72e5ac160d9 .quad 0x9aafb9b05ee38c5b .quad 0xbf9d2d4e071a13c7 .quad 0x8eee6e6de933290a .quad 0x1c3bab17ae109717 .quad 0x1c9ab216c7cab7b0 .quad 0x7d65d37407bbc3cc .quad 0x52744750504a58d5 .quad 0x09f2606b131a2990 // 2^16 * 1 * G .quad 0x40e87d44744346be .quad 0x1d48dad415b52b25 .quad 0x7c3a8a18a13b603e .quad 0x4eb728c12fcdbdf7 .quad 0x7e234c597c6691ae .quad 0x64889d3d0a85b4c8 .quad 0xdae2c90c354afae7 .quad 0x0a871e070c6a9e1d .quad 0x3301b5994bbc8989 .quad 0x736bae3a5bdd4260 .quad 0x0d61ade219d59e3c .quad 0x3ee7300f2685d464 // 2^16 * 2 * G .quad 0xf5d255e49e7dd6b7 .quad 0x8016115c610b1eac .quad 0x3c99975d92e187ca .quad 0x13815762979125c2 .quad 0x43fa7947841e7518 .quad 0xe5c6fa59639c46d7 .quad 0xa1065e1de3052b74 .quad 0x7d47c6a2cfb89030 .quad 0x3fdad0148ef0d6e0 .quad 0x9d3e749a91546f3c .quad 0x71ec621026bb8157 .quad 0x148cf58d34c9ec80 // 2^16 * 3 * G .quad 0x46a492f67934f027 .quad 0x469984bef6840aa9 .quad 0x5ca1bc2a89611854 .quad 0x3ff2fa1ebd5dbbd4 .quad 0xe2572f7d9ae4756d .quad 0x56c345bb88f3487f .quad 0x9fd10b6d6960a88d .quad 0x278febad4eaea1b9 .quad 0xb1aa681f8c933966 .quad 0x8c21949c20290c98 .quad 0x39115291219d3c52 .quad 0x4104dd02fe9c677b // 2^16 * 4 * G .quad 0x72b2bf5e1124422a .quad 0xa1fa0c3398a33ab5 .quad 0x94cb6101fa52b666 .quad 0x2c863b00afaf53d5 .quad 0x81214e06db096ab8 .quad 0x21a8b6c90ce44f35 .quad 0x6524c12a409e2af5 .quad 0x0165b5a48efca481 .quad 0xf190a474a0846a76 .quad 0x12eff984cd2f7cc0 .quad 0x695e290658aa2b8f .quad 0x591b67d9bffec8b8 // 2^16 * 5 * G .quad 0x312f0d1c80b49bfa .quad 0x5979515eabf3ec8a .quad 0x727033c09ef01c88 .quad 0x3de02ec7ca8f7bcb .quad 0x99b9b3719f18b55d .quad 0xe465e5faa18c641e .quad 0x61081136c29f05ed .quad 0x489b4f867030128b .quad 0xd232102d3aeb92ef .quad 0xe16253b46116a861 .quad 0x3d7eabe7190baa24 .quad 0x49f5fbba496cbebf // 2^16 * 6 * G .quad 0x30949a108a5bcfd4 .quad 0xdc40dd70bc6473eb .quad 0x92c294c1307c0d1c .quad 0x5604a86dcbfa6e74 .quad 0x155d628c1e9c572e .quad 0x8a4d86acc5884741 .quad 0x91a352f6515763eb .quad 0x06a1a6c28867515b .quad 0x7288d1d47c1764b6 .quad 0x72541140e0418b51 .quad 0x9f031a6018acf6d1 .quad 0x20989e89fe2742c6 // 2^16 * 7 * G .quad 0x499777fd3a2dcc7f .quad 0x32857c2ca54fd892 .quad 0xa279d864d207e3a0 .quad 0x0403ed1d0ca67e29 .quad 0x1674278b85eaec2e .quad 0x5621dc077acb2bdf .quad 0x640a4c1661cbf45a .quad 0x730b9950f70595d3 .quad 0xc94b2d35874ec552 .quad 0xc5e6c8cf98246f8d .quad 0xf7cb46fa16c035ce .quad 0x5bd7454308303dcc // 2^16 * 8 * G .quad 0x7f9ad19528b24cc2 .quad 0x7f6b54656335c181 .quad 0x66b8b66e4fc07236 .quad 0x133a78007380ad83 .quad 0x85c4932115e7792a .quad 0xc64c89a2bdcdddc9 .quad 0x9d1e3da8ada3d762 .quad 0x5bb7db123067f82c .quad 0x0961f467c6ca62be .quad 0x04ec21d6211952ee .quad 0x182360779bd54770 .quad 0x740dca6d58f0e0d2 // 2^20 * 1 * G .quad 0x50b70bf5d3f0af0b .quad 0x4feaf48ae32e71f7 .quad 0x60e84ed3a55bbd34 .quad 0x00ed489b3f50d1ed .quad 0x3906c72aed261ae5 .quad 0x9ab68fd988e100f7 .quad 0xf5e9059af3360197 .quad 0x0e53dc78bf2b6d47 .quad 0xb90829bf7971877a .quad 0x5e4444636d17e631 .quad 0x4d05c52e18276893 .quad 0x27632d9a5a4a4af5 // 2^20 * 2 * G .quad 0xd11ff05154b260ce .quad 0xd86dc38e72f95270 .quad 0x601fcd0d267cc138 .quad 0x2b67916429e90ccd .quad 0xa98285d187eaffdb .quad 0xa5b4fbbbd8d0a864 .quad 0xb658f27f022663f7 .quad 0x3bbc2b22d99ce282 .quad 0xb917c952583c0a58 .quad 0x653ff9b80fe4c6f3 .quad 0x9b0da7d7bcdf3c0c .quad 0x43a0eeb6ab54d60e // 2^20 * 3 * G .quad 0x396966a46d4a5487 .quad 0xf811a18aac2bb3ba .quad 0x66e4685b5628b26b .quad 0x70a477029d929b92 .quad 0x3ac6322357875fe8 .quad 0xd9d4f4ecf5fbcb8f .quad 0x8dee8493382bb620 .quad 0x50c5eaa14c799fdc .quad 0xdd0edc8bd6f2fb3c .quad 0x54c63aa79cc7b7a0 .quad 0xae0b032b2c8d9f1a .quad 0x6f9ce107602967fb // 2^20 * 4 * G .quad 0xad1054b1cde1c22a .quad 0xc4a8e90248eb32df .quad 0x5f3e7b33accdc0ea .quad 0x72364713fc79963e .quad 0x139693063520e0b5 .quad 0x437fcf7c88ea03fe .quad 0xf7d4c40bd3c959bc .quad 0x699154d1f893ded9 .quad 0x315d5c75b4b27526 .quad 0xcccb842d0236daa5 .quad 0x22f0c8a3345fee8e .quad 0x73975a617d39dbed // 2^20 * 5 * G .quad 0xe4024df96375da10 .quad 0x78d3251a1830c870 .quad 0x902b1948658cd91c .quad 0x7e18b10b29b7438a .quad 0x6f37f392f4433e46 .quad 0x0e19b9a11f566b18 .quad 0x220fb78a1fd1d662 .quad 0x362a4258a381c94d .quad 0x9071d9132b6beb2f .quad 0x0f26e9ad28418247 .quad 0xeab91ec9bdec925d .quad 0x4be65bc8f48af2de // 2^20 * 6 * G .quad 0x78487feba36e7028 .quad 0x5f3f13001dd8ce34 .quad 0x934fb12d4b30c489 .quad 0x056c244d397f0a2b .quad 0x1d50fba257c26234 .quad 0x7bd4823adeb0678b .quad 0xc2b0dc6ea6538af5 .quad 0x5665eec6351da73e .quad 0xdb3ee00943bfb210 .quad 0x4972018720800ac2 .quad 0x26ab5d6173bd8667 .quad 0x20b209c2ab204938 // 2^20 * 7 * G .quad 0x549e342ac07fb34b .quad 0x02d8220821373d93 .quad 0xbc262d70acd1f567 .quad 0x7a92c9fdfbcac784 .quad 0x1fcca94516bd3289 .quad 0x448d65aa41420428 .quad 0x59c3b7b216a55d62 .quad 0x49992cc64e612cd8 .quad 0x65bd1bea70f801de .quad 0x1befb7c0fe49e28a .quad 0xa86306cdb1b2ae4a .quad 0x3b7ac0cd265c2a09 // 2^20 * 8 * G .quad 0x822bee438c01bcec .quad 0x530cb525c0fbc73b .quad 0x48519034c1953fe9 .quad 0x265cc261e09a0f5b .quad 0xf0d54e4f22ed39a7 .quad 0xa2aae91e5608150a .quad 0xf421b2e9eddae875 .quad 0x31bc531d6b7de992 .quad 0xdf3d134da980f971 .quad 0x7a4fb8d1221a22a7 .quad 0x3df7d42035aad6d8 .quad 0x2a14edcc6a1a125e // 2^24 * 1 * G .quad 0xdf48ee0752cfce4e .quad 0xc3fffaf306ec08b7 .quad 0x05710b2ab95459c4 .quad 0x161d25fa963ea38d .quad 0x231a8c570478433c .quad 0xb7b5270ec281439d .quad 0xdbaa99eae3d9079f .quad 0x2c03f5256c2b03d9 .quad 0x790f18757b53a47d .quad 0x307b0130cf0c5879 .quad 0x31903d77257ef7f9 .quad 0x699468bdbd96bbaf // 2^24 * 2 * G .quad 0xbd1f2f46f4dafecf .quad 0x7cef0114a47fd6f7 .quad 0xd31ffdda4a47b37f .quad 0x525219a473905785 .quad 0xd8dd3de66aa91948 .quad 0x485064c22fc0d2cc .quad 0x9b48246634fdea2f .quad 0x293e1c4e6c4a2e3a .quad 0x376e134b925112e1 .quad 0x703778b5dca15da0 .quad 0xb04589af461c3111 .quad 0x5b605c447f032823 // 2^24 * 3 * G .quad 0xb965805920c47c89 .quad 0xe7f0100c923b8fcc .quad 0x0001256502e2ef77 .quad 0x24a76dcea8aeb3ee .quad 0x3be9fec6f0e7f04c .quad 0x866a579e75e34962 .quad 0x5542ef161e1de61a .quad 0x2f12fef4cc5abdd5 .quad 0x0a4522b2dfc0c740 .quad 0x10d06e7f40c9a407 .quad 0xc6cf144178cff668 .quad 0x5e607b2518a43790 // 2^24 * 4 * G .quad 0x58b31d8f6cdf1818 .quad 0x35cfa74fc36258a2 .quad 0xe1b3ff4f66e61d6e .quad 0x5067acab6ccdd5f7 .quad 0xa02c431ca596cf14 .quad 0xe3c42d40aed3e400 .quad 0xd24526802e0f26db .quad 0x201f33139e457068 .quad 0xfd527f6b08039d51 .quad 0x18b14964017c0006 .quad 0xd5220eb02e25a4a8 .quad 0x397cba8862460375 // 2^24 * 5 * G .quad 0x30c13093f05959b2 .quad 0xe23aa18de9a97976 .quad 0x222fd491721d5e26 .quad 0x2339d320766e6c3a .quad 0x7815c3fbc81379e7 .quad 0xa6619420dde12af1 .quad 0xffa9c0f885a8fdd5 .quad 0x771b4022c1e1c252 .quad 0xd87dd986513a2fa7 .quad 0xf5ac9b71f9d4cf08 .quad 0xd06bc31b1ea283b3 .quad 0x331a189219971a76 // 2^24 * 6 * G .quad 0xf5166f45fb4f80c6 .quad 0x9c36c7de61c775cf .quad 0xe3d4e81b9041d91c .quad 0x31167c6b83bdfe21 .quad 0x26512f3a9d7572af .quad 0x5bcbe28868074a9e .quad 0x84edc1c11180f7c4 .quad 0x1ac9619ff649a67b .quad 0xf22b3842524b1068 .quad 0x5068343bee9ce987 .quad 0xfc9d71844a6250c8 .quad 0x612436341f08b111 // 2^24 * 7 * G .quad 0xd99d41db874e898d .quad 0x09fea5f16c07dc20 .quad 0x793d2c67d00f9bbc .quad 0x46ebe2309e5eff40 .quad 0x8b6349e31a2d2638 .quad 0x9ddfb7009bd3fd35 .quad 0x7f8bf1b8a3a06ba4 .quad 0x1522aa3178d90445 .quad 0x2c382f5369614938 .quad 0xdafe409ab72d6d10 .quad 0xe8c83391b646f227 .quad 0x45fe70f50524306c // 2^24 * 8 * G .quad 0xda4875a6960c0b8c .quad 0x5b68d076ef0e2f20 .quad 0x07fb51cf3d0b8fd4 .quad 0x428d1623a0e392d4 .quad 0x62f24920c8951491 .quad 0x05f007c83f630ca2 .quad 0x6fbb45d2f5c9d4b8 .quad 0x16619f6db57a2245 .quad 0x084f4a4401a308fd .quad 0xa82219c376a5caac .quad 0xdeb8de4643d1bc7d .quad 0x1d81592d60bd38c6 // 2^28 * 1 * G .quad 0xd833d7beec2a4c38 .quad 0x2c9162830acc20ed .quad 0xe93a47aa92df7581 .quad 0x702d67a3333c4a81 .quad 0x3a4a369a2f89c8a1 .quad 0x63137a1d7c8de80d .quad 0xbcac008a78eda015 .quad 0x2cb8b3a5b483b03f .quad 0x36e417cbcb1b90a1 .quad 0x33b3ddaa7f11794e .quad 0x3f510808885bc607 .quad 0x24141dc0e6a8020d // 2^28 * 2 * G .quad 0x59f73c773fefee9d .quad 0xb3f1ef89c1cf989d .quad 0xe35dfb42e02e545f .quad 0x5766120b47a1b47c .quad 0x91925dccbd83157d .quad 0x3ca1205322cc8094 .quad 0x28e57f183f90d6e4 .quad 0x1a4714cede2e767b .quad 0xdb20ba0fb8b6b7ff .quad 0xb732c3b677511fa1 .quad 0xa92b51c099f02d89 .quad 0x4f3875ad489ca5f1 // 2^28 * 3 * G .quad 0xc7fc762f4932ab22 .quad 0x7ac0edf72f4c3c1b .quad 0x5f6b55aa9aa895e8 .quad 0x3680274dad0a0081 .quad 0x79ed13f6ee73eec0 .quad 0xa5c6526d69110bb1 .quad 0xe48928c38603860c .quad 0x722a1446fd7059f5 .quad 0xd0959fe9a8cf8819 .quad 0xd0a995508475a99c .quad 0x6eac173320b09cc5 .quad 0x628ecf04331b1095 // 2^28 * 4 * G .quad 0x98bcb118a9d0ddbc .quad 0xee449e3408b4802b .quad 0x87089226b8a6b104 .quad 0x685f349a45c7915d .quad 0x9b41acf85c74ccf1 .quad 0xb673318108265251 .quad 0x99c92aed11adb147 .quad 0x7a47d70d34ecb40f .quad 0x60a0c4cbcc43a4f5 .quad 0x775c66ca3677bea9 .quad 0xa17aa1752ff8f5ed .quad 0x11ded9020e01fdc0 // 2^28 * 5 * G .quad 0x890e7809caefe704 .quad 0x8728296de30e8c6c .quad 0x4c5cd2a392aeb1c9 .quad 0x194263d15771531f .quad 0x471f95b03bea93b7 .quad 0x0552d7d43313abd3 .quad 0xbd9370e2e17e3f7b .quad 0x7b120f1db20e5bec .quad 0x17d2fb3d86502d7a .quad 0xb564d84450a69352 .quad 0x7da962c8a60ed75d .quad 0x00d0f85b318736aa // 2^28 * 6 * G .quad 0x978b142e777c84fd .quad 0xf402644705a8c062 .quad 0xa67ad51be7e612c7 .quad 0x2f7b459698dd6a33 .quad 0xa6753c1efd7621c1 .quad 0x69c0b4a7445671f5 .quad 0x971f527405b23c11 .quad 0x387bc74851a8c7cd .quad 0x81894b4d4a52a9a8 .quad 0xadd93e12f6b8832f .quad 0x184d8548b61bd638 .quad 0x3f1c62dbd6c9f6cd // 2^28 * 7 * G .quad 0x2e8f1f0091910c1f .quad 0xa4df4fe0bff2e12c .quad 0x60c6560aee927438 .quad 0x6338283facefc8fa .quad 0x3fad3e40148f693d .quad 0x052656e194eb9a72 .quad 0x2f4dcbfd184f4e2f .quad 0x406f8db1c482e18b .quad 0x9e630d2c7f191ee4 .quad 0x4fbf8301bc3ff670 .quad 0x787d8e4e7afb73c4 .quad 0x50d83d5be8f58fa5 // 2^28 * 8 * G .quad 0x85683916c11a1897 .quad 0x2d69a4efe506d008 .quad 0x39af1378f664bd01 .quad 0x65942131361517c6 .quad 0xc0accf90b4d3b66d .quad 0xa7059de561732e60 .quad 0x033d1f7870c6b0ba .quad 0x584161cd26d946e4 .quad 0xbbf2b1a072d27ca2 .quad 0xbf393c59fbdec704 .quad 0xe98dbbcee262b81e .quad 0x02eebd0b3029b589 // 2^32 * 1 * G .quad 0x61368756a60dac5f .quad 0x17e02f6aebabdc57 .quad 0x7f193f2d4cce0f7d .quad 0x20234a7789ecdcf0 .quad 0x8765b69f7b85c5e8 .quad 0x6ff0678bd168bab2 .quad 0x3a70e77c1d330f9b .quad 0x3a5f6d51b0af8e7c .quad 0x76d20db67178b252 .quad 0x071c34f9d51ed160 .quad 0xf62a4a20b3e41170 .quad 0x7cd682353cffe366 // 2^32 * 2 * G .quad 0x0be1a45bd887fab6 .quad 0x2a846a32ba403b6e .quad 0xd9921012e96e6000 .quad 0x2838c8863bdc0943 .quad 0xa665cd6068acf4f3 .quad 0x42d92d183cd7e3d3 .quad 0x5759389d336025d9 .quad 0x3ef0253b2b2cd8ff .quad 0xd16bb0cf4a465030 .quad 0xfa496b4115c577ab .quad 0x82cfae8af4ab419d .quad 0x21dcb8a606a82812 // 2^32 * 3 * G .quad 0x5c6004468c9d9fc8 .quad 0x2540096ed42aa3cb .quad 0x125b4d4c12ee2f9c .quad 0x0bc3d08194a31dab .quad 0x9a8d00fabe7731ba .quad 0x8203607e629e1889 .quad 0xb2cc023743f3d97f .quad 0x5d840dbf6c6f678b .quad 0x706e380d309fe18b .quad 0x6eb02da6b9e165c7 .quad 0x57bbba997dae20ab .quad 0x3a4276232ac196dd // 2^32 * 4 * G .quad 0x4b42432c8a7084fa .quad 0x898a19e3dfb9e545 .quad 0xbe9f00219c58e45d .quad 0x1ff177cea16debd1 .quad 0x3bf8c172db447ecb .quad 0x5fcfc41fc6282dbd .quad 0x80acffc075aa15fe .quad 0x0770c9e824e1a9f9 .quad 0xcf61d99a45b5b5fd .quad 0x860984e91b3a7924 .quad 0xe7300919303e3e89 .quad 0x39f264fd41500b1e // 2^32 * 5 * G .quad 0xa7ad3417dbe7e29c .quad 0xbd94376a2b9c139c .quad 0xa0e91b8e93597ba9 .quad 0x1712d73468889840 .quad 0xd19b4aabfe097be1 .quad 0xa46dfce1dfe01929 .quad 0xc3c908942ca6f1ff .quad 0x65c621272c35f14e .quad 0xe72b89f8ce3193dd .quad 0x4d103356a125c0bb .quad 0x0419a93d2e1cfe83 .quad 0x22f9800ab19ce272 // 2^32 * 6 * G .quad 0x605a368a3e9ef8cb .quad 0xe3e9c022a5504715 .quad 0x553d48b05f24248f .quad 0x13f416cd647626e5 .quad 0x42029fdd9a6efdac .quad 0xb912cebe34a54941 .quad 0x640f64b987bdf37b .quad 0x4171a4d38598cab4 .quad 0xfa2758aa99c94c8c .quad 0x23006f6fb000b807 .quad 0xfbd291ddadda5392 .quad 0x508214fa574bd1ab // 2^32 * 7 * G .quad 0xc20269153ed6fe4b .quad 0xa65a6739511d77c4 .quad 0xcbde26462c14af94 .quad 0x22f960ec6faba74b .quad 0x461a15bb53d003d6 .quad 0xb2102888bcf3c965 .quad 0x27c576756c683a5a .quad 0x3a7758a4c86cb447 .quad 0x548111f693ae5076 .quad 0x1dae21df1dfd54a6 .quad 0x12248c90f3115e65 .quad 0x5d9fd15f8de7f494 // 2^32 * 8 * G .quad 0x031408d36d63727f .quad 0x6a379aefd7c7b533 .quad 0xa9e18fc5ccaee24b .quad 0x332f35914f8fbed3 .quad 0x3f244d2aeed7521e .quad 0x8e3a9028432e9615 .quad 0xe164ba772e9c16d4 .quad 0x3bc187fa47eb98d8 .quad 0x6d470115ea86c20c .quad 0x998ab7cb6c46d125 .quad 0xd77832b53a660188 .quad 0x450d81ce906fba03 // 2^36 * 1 * G .quad 0xf8ae4d2ad8453902 .quad 0x7018058ee8db2d1d .quad 0xaab3995fc7d2c11e .quad 0x53b16d2324ccca79 .quad 0x23264d66b2cae0b5 .quad 0x7dbaed33ebca6576 .quad 0x030ebed6f0d24ac8 .quad 0x2a887f78f7635510 .quad 0x2a23b9e75c012d4f .quad 0x0c974651cae1f2ea .quad 0x2fb63273675d70ca .quad 0x0ba7250b864403f5 // 2^36 * 2 * G .quad 0xbb0d18fd029c6421 .quad 0xbc2d142189298f02 .quad 0x8347f8e68b250e96 .quad 0x7b9f2fe8032d71c9 .quad 0xdd63589386f86d9c .quad 0x61699176e13a85a4 .quad 0x2e5111954eaa7d57 .quad 0x32c21b57fb60bdfb .quad 0xd87823cd319e0780 .quad 0xefc4cfc1897775c5 .quad 0x4854fb129a0ab3f7 .quad 0x12c49d417238c371 // 2^36 * 3 * G .quad 0x0950b533ffe83769 .quad 0x21861c1d8e1d6bd1 .quad 0xf022d8381302e510 .quad 0x2509200c6391cab4 .quad 0x09b3a01783799542 .quad 0x626dd08faad5ee3f .quad 0xba00bceeeb70149f .quad 0x1421b246a0a444c9 .quad 0x4aa43a8e8c24a7c7 .quad 0x04c1f540d8f05ef5 .quad 0xadba5e0c0b3eb9dc .quad 0x2ab5504448a49ce3 // 2^36 * 4 * G .quad 0x2ed227266f0f5dec .quad 0x9824ee415ed50824 .quad 0x807bec7c9468d415 .quad 0x7093bae1b521e23f .quad 0xdc07ac631c5d3afa .quad 0x58615171f9df8c6c .quad 0x72a079d89d73e2b0 .quad 0x7301f4ceb4eae15d .quad 0x6409e759d6722c41 .quad 0xa674e1cf72bf729b .quad 0xbc0a24eb3c21e569 .quad 0x390167d24ebacb23 // 2^36 * 5 * G .quad 0x27f58e3bba353f1c .quad 0x4c47764dbf6a4361 .quad 0xafbbc4e56e562650 .quad 0x07db2ee6aae1a45d .quad 0xd7bb054ba2f2120b .quad 0xe2b9ceaeb10589b7 .quad 0x3fe8bac8f3c0edbe .quad 0x4cbd40767112cb69 .quad 0x0b603cc029c58176 .quad 0x5988e3825cb15d61 .quad 0x2bb61413dcf0ad8d .quad 0x7b8eec6c74183287 // 2^36 * 6 * G .quad 0xe4ca40782cd27cb0 .quad 0xdaf9c323fbe967bd .quad 0xb29bd34a8ad41e9e .quad 0x72810497626ede4d .quad 0x32fee570fc386b73 .quad 0xda8b0141da3a8cc7 .quad 0x975ffd0ac8968359 .quad 0x6ee809a1b132a855 .quad 0x9444bb31fcfd863a .quad 0x2fe3690a3e4e48c5 .quad 0xdc29c867d088fa25 .quad 0x13bd1e38d173292e // 2^36 * 7 * G .quad 0xd32b4cd8696149b5 .quad 0xe55937d781d8aab7 .quad 0x0bcb2127ae122b94 .quad 0x41e86fcfb14099b0 .quad 0x223fb5cf1dfac521 .quad 0x325c25316f554450 .quad 0x030b98d7659177ac .quad 0x1ed018b64f88a4bd .quad 0x3630dfa1b802a6b0 .quad 0x880f874742ad3bd5 .quad 0x0af90d6ceec5a4d4 .quad 0x746a247a37cdc5d9 // 2^36 * 8 * G .quad 0xd531b8bd2b7b9af6 .quad 0x5005093537fc5b51 .quad 0x232fcf25c593546d .quad 0x20a365142bb40f49 .quad 0x6eccd85278d941ed .quad 0x2254ae83d22f7843 .quad 0xc522d02e7bbfcdb7 .quad 0x681e3351bff0e4e2 .quad 0x8b64b59d83034f45 .quad 0x2f8b71f21fa20efb .quad 0x69249495ba6550e4 .quad 0x539ef98e45d5472b // 2^40 * 1 * G .quad 0x6e7bb6a1a6205275 .quad 0xaa4f21d7413c8e83 .quad 0x6f56d155e88f5cb2 .quad 0x2de25d4ba6345be1 .quad 0xd074d8961cae743f .quad 0xf86d18f5ee1c63ed .quad 0x97bdc55be7f4ed29 .quad 0x4cbad279663ab108 .quad 0x80d19024a0d71fcd .quad 0xc525c20afb288af8 .quad 0xb1a3974b5f3a6419 .quad 0x7d7fbcefe2007233 // 2^40 * 2 * G .quad 0xfaef1e6a266b2801 .quad 0x866c68c4d5739f16 .quad 0xf68a2fbc1b03762c .quad 0x5975435e87b75a8d .quad 0xcd7c5dc5f3c29094 .quad 0xc781a29a2a9105ab .quad 0x80c61d36421c3058 .quad 0x4f9cd196dcd8d4d7 .quad 0x199297d86a7b3768 .quad 0xd0d058241ad17a63 .quad 0xba029cad5c1c0c17 .quad 0x7ccdd084387a0307 // 2^40 * 3 * G .quad 0xdca6422c6d260417 .quad 0xae153d50948240bd .quad 0xa9c0c1b4fb68c677 .quad 0x428bd0ed61d0cf53 .quad 0x9b0c84186760cc93 .quad 0xcdae007a1ab32a99 .quad 0xa88dec86620bda18 .quad 0x3593ca848190ca44 .quad 0x9213189a5e849aa7 .quad 0xd4d8c33565d8facd .quad 0x8c52545b53fdbbd1 .quad 0x27398308da2d63e6 // 2^40 * 4 * G .quad 0x42c38d28435ed413 .quad 0xbd50f3603278ccc9 .quad 0xbb07ab1a79da03ef .quad 0x269597aebe8c3355 .quad 0xb9a10e4c0a702453 .quad 0x0fa25866d57d1bde .quad 0xffb9d9b5cd27daf7 .quad 0x572c2945492c33fd .quad 0xc77fc745d6cd30be .quad 0xe4dfe8d3e3baaefb .quad 0xa22c8830aa5dda0c .quad 0x7f985498c05bca80 // 2^40 * 5 * G .quad 0x3849ce889f0be117 .quad 0x8005ad1b7b54a288 .quad 0x3da3c39f23fc921c .quad 0x76c2ec470a31f304 .quad 0xd35615520fbf6363 .quad 0x08045a45cf4dfba6 .quad 0xeec24fbc873fa0c2 .quad 0x30f2653cd69b12e7 .quad 0x8a08c938aac10c85 .quad 0x46179b60db276bcb .quad 0xa920c01e0e6fac70 .quad 0x2f1273f1596473da // 2^40 * 6 * G .quad 0x4739fc7c8ae01e11 .quad 0xfd5274904a6aab9f .quad 0x41d98a8287728f2e .quad 0x5d9e572ad85b69f2 .quad 0x30488bd755a70bc0 .quad 0x06d6b5a4f1d442e7 .quad 0xead1a69ebc596162 .quad 0x38ac1997edc5f784 .quad 0x0666b517a751b13b .quad 0x747d06867e9b858c .quad 0xacacc011454dde49 .quad 0x22dfcd9cbfe9e69c // 2^40 * 7 * G .quad 0x8ddbd2e0c30d0cd9 .quad 0xad8e665facbb4333 .quad 0x8f6b258c322a961f .quad 0x6b2916c05448c1c7 .quad 0x56ec59b4103be0a1 .quad 0x2ee3baecd259f969 .quad 0x797cb29413f5cd32 .quad 0x0fe9877824cde472 .quad 0x7edb34d10aba913b .quad 0x4ea3cd822e6dac0e .quad 0x66083dff6578f815 .quad 0x4c303f307ff00a17 // 2^40 * 8 * G .quad 0xd30a3bd617b28c85 .quad 0xc5d377b739773bea .quad 0xc6c6e78c1e6a5cbf .quad 0x0d61b8f78b2ab7c4 .quad 0x29fc03580dd94500 .quad 0xecd27aa46fbbec93 .quad 0x130a155fc2e2a7f8 .quad 0x416b151ab706a1d5 .quad 0x56a8d7efe9c136b0 .quad 0xbd07e5cd58e44b20 .quad 0xafe62fda1b57e0ab .quad 0x191a2af74277e8d2 // 2^44 * 1 * G .quad 0xd550095bab6f4985 .quad 0x04f4cd5b4fbfaf1a .quad 0x9d8e2ed12a0c7540 .quad 0x2bc24e04b2212286 .quad 0x09d4b60b2fe09a14 .quad 0xc384f0afdbb1747e .quad 0x58e2ea8978b5fd6e .quad 0x519ef577b5e09b0a .quad 0x1863d7d91124cca9 .quad 0x7ac08145b88a708e .quad 0x2bcd7309857031f5 .quad 0x62337a6e8ab8fae5 // 2^44 * 2 * G .quad 0x4bcef17f06ffca16 .quad 0xde06e1db692ae16a .quad 0x0753702d614f42b0 .quad 0x5f6041b45b9212d0 .quad 0xd1ab324e1b3a1273 .quad 0x18947cf181055340 .quad 0x3b5d9567a98c196e .quad 0x7fa00425802e1e68 .quad 0x7d531574028c2705 .quad 0x80317d69db0d75fe .quad 0x30fface8ef8c8ddd .quad 0x7e9de97bb6c3e998 // 2^44 * 3 * G .quad 0x1558967b9e6585a3 .quad 0x97c99ce098e98b92 .quad 0x10af149b6eb3adad .quad 0x42181fe8f4d38cfa .quad 0xf004be62a24d40dd .quad 0xba0659910452d41f .quad 0x81c45ee162a44234 .quad 0x4cb829d8a22266ef .quad 0x1dbcaa8407b86681 .quad 0x081f001e8b26753b .quad 0x3cd7ce6a84048e81 .quad 0x78af11633f25f22c // 2^44 * 4 * G .quad 0x8416ebd40b50babc .quad 0x1508722628208bee .quad 0xa3148fafb9c1c36d .quad 0x0d07daacd32d7d5d .quad 0x3241c00e7d65318c .quad 0xe6bee5dcd0e86de7 .quad 0x118b2dc2fbc08c26 .quad 0x680d04a7fc603dc3 .quad 0xf9c2414a695aa3eb .quad 0xdaa42c4c05a68f21 .quad 0x7c6c23987f93963e .quad 0x210e8cd30c3954e3 // 2^44 * 5 * G .quad 0xac4201f210a71c06 .quad 0x6a65e0aef3bfb021 .quad 0xbc42c35c393632f7 .quad 0x56ea8db1865f0742 .quad 0x2b50f16137fe6c26 .quad 0xe102bcd856e404d8 .quad 0x12b0f1414c561f6b .quad 0x51b17bc8d028ec91 .quad 0xfff5fb4bcf535119 .quad 0xf4989d79df1108a0 .quad 0xbdfcea659a3ba325 .quad 0x18a11f1174d1a6f2 // 2^44 * 6 * G .quad 0x407375ab3f6bba29 .quad 0x9ec3b6d8991e482e .quad 0x99c80e82e55f92e9 .quad 0x307c13b6fb0c0ae1 .quad 0xfbd63cdad27a5f2c .quad 0xf00fc4bc8aa106d7 .quad 0x53fb5c1a8e64a430 .quad 0x04eaabe50c1a2e85 .quad 0x24751021cb8ab5e7 .quad 0xfc2344495c5010eb .quad 0x5f1e717b4e5610a1 .quad 0x44da5f18c2710cd5 // 2^44 * 7 * G .quad 0x033cc55ff1b82eb5 .quad 0xb15ae36d411cae52 .quad 0xba40b6198ffbacd3 .quad 0x768edce1532e861f .quad 0x9156fe6b89d8eacc .quad 0xe6b79451e23126a1 .quad 0xbd7463d93944eb4e .quad 0x726373f6767203ae .quad 0xe305ca72eb7ef68a .quad 0x662cf31f70eadb23 .quad 0x18f026fdb4c45b68 .quad 0x513b5384b5d2ecbd // 2^44 * 8 * G .quad 0x46d46280c729989e .quad 0x4b93fbd05368a5dd .quad 0x63df3f81d1765a89 .quad 0x34cebd64b9a0a223 .quad 0x5e2702878af34ceb .quad 0x900b0409b946d6ae .quad 0x6512ebf7dabd8512 .quad 0x61d9b76988258f81 .quad 0xa6c5a71349b7d94b .quad 0xa3f3d15823eb9446 .quad 0x0416fbd277484834 .quad 0x69d45e6f2c70812f // 2^48 * 1 * G .quad 0xce16f74bc53c1431 .quad 0x2b9725ce2072edde .quad 0xb8b9c36fb5b23ee7 .quad 0x7e2e0e450b5cc908 .quad 0x9fe62b434f460efb .quad 0xded303d4a63607d6 .quad 0xf052210eb7a0da24 .quad 0x237e7dbe00545b93 .quad 0x013575ed6701b430 .quad 0x231094e69f0bfd10 .quad 0x75320f1583e47f22 .quad 0x71afa699b11155e3 // 2^48 * 2 * G .quad 0x65ce6f9b3953b61d .quad 0xc65839eaafa141e6 .quad 0x0f435ffda9f759fe .quad 0x021142e9c2b1c28e .quad 0xea423c1c473b50d6 .quad 0x51e87a1f3b38ef10 .quad 0x9b84bf5fb2c9be95 .quad 0x00731fbc78f89a1c .quad 0xe430c71848f81880 .quad 0xbf960c225ecec119 .quad 0xb6dae0836bba15e3 .quad 0x4c4d6f3347e15808 // 2^48 * 3 * G .quad 0x18f7eccfc17d1fc9 .quad 0x6c75f5a651403c14 .quad 0xdbde712bf7ee0cdf .quad 0x193fddaaa7e47a22 .quad 0x2f0cddfc988f1970 .quad 0x6b916227b0b9f51b .quad 0x6ec7b6c4779176be .quad 0x38bf9500a88f9fa8 .quad 0x1fd2c93c37e8876f .quad 0xa2f61e5a18d1462c .quad 0x5080f58239241276 .quad 0x6a6fb99ebf0d4969 // 2^48 * 4 * G .quad 0x6a46c1bb560855eb .quad 0x2416bb38f893f09d .quad 0xd71d11378f71acc1 .quad 0x75f76914a31896ea .quad 0xeeb122b5b6e423c6 .quad 0x939d7010f286ff8e .quad 0x90a92a831dcf5d8c .quad 0x136fda9f42c5eb10 .quad 0xf94cdfb1a305bdd1 .quad 0x0f364b9d9ff82c08 .quad 0x2a87d8a5c3bb588a .quad 0x022183510be8dcba // 2^48 * 5 * G .quad 0x4af766385ead2d14 .quad 0xa08ed880ca7c5830 .quad 0x0d13a6e610211e3d .quad 0x6a071ce17b806c03 .quad 0x9d5a710143307a7f .quad 0xb063de9ec47da45f .quad 0x22bbfe52be927ad3 .quad 0x1387c441fd40426c .quad 0xb5d3c3d187978af8 .quad 0x722b5a3d7f0e4413 .quad 0x0d7b4848bb477ca0 .quad 0x3171b26aaf1edc92 // 2^48 * 6 * G .quad 0xa92f319097564ca8 .quad 0xff7bb84c2275e119 .quad 0x4f55fe37a4875150 .quad 0x221fd4873cf0835a .quad 0xa60db7d8b28a47d1 .quad 0xa6bf14d61770a4f1 .quad 0xd4a1f89353ddbd58 .quad 0x6c514a63344243e9 .quad 0x2322204f3a156341 .quad 0xfb73e0e9ba0a032d .quad 0xfce0dd4c410f030e .quad 0x48daa596fb924aaa // 2^48 * 7 * G .quad 0x6eca8e665ca59cc7 .quad 0xa847254b2e38aca0 .quad 0x31afc708d21e17ce .quad 0x676dd6fccad84af7 .quad 0x14f61d5dc84c9793 .quad 0x9941f9e3ef418206 .quad 0xcdf5b88f346277ac .quad 0x58c837fa0e8a79a9 .quad 0x0cf9688596fc9058 .quad 0x1ddcbbf37b56a01b .quad 0xdcc2e77d4935d66a .quad 0x1c4f73f2c6a57f0a // 2^48 * 8 * G .quad 0x0e7a4fbd305fa0bb .quad 0x829d4ce054c663ad .quad 0xf421c3832fe33848 .quad 0x795ac80d1bf64c42 .quad 0xb36e706efc7c3484 .quad 0x73dfc9b4c3c1cf61 .quad 0xeb1d79c9781cc7e5 .quad 0x70459adb7daf675c .quad 0x1b91db4991b42bb3 .quad 0x572696234b02dcca .quad 0x9fdf9ee51f8c78dc .quad 0x5fe162848ce21fd3 // 2^52 * 1 * G .quad 0xe2790aae4d077c41 .quad 0x8b938270db7469a3 .quad 0x6eb632dc8abd16a2 .quad 0x720814ecaa064b72 .quad 0x315c29c795115389 .quad 0xd7e0e507862f74ce .quad 0x0c4a762185927432 .quad 0x72de6c984a25a1e4 .quad 0xae9ab553bf6aa310 .quad 0x050a50a9806d6e1b .quad 0x92bb7403adff5139 .quad 0x0394d27645be618b // 2^52 * 2 * G .quad 0x4d572251857eedf4 .quad 0xe3724edde19e93c5 .quad 0x8a71420e0b797035 .quad 0x3b3c833687abe743 .quad 0xf5396425b23545a4 .quad 0x15a7a27e98fbb296 .quad 0xab6c52bc636fdd86 .quad 0x79d995a8419334ee .quad 0xcd8a8ea61195dd75 .quad 0xa504d8a81dd9a82f .quad 0x540dca81a35879b6 .quad 0x60dd16a379c86a8a // 2^52 * 3 * G .quad 0x35a2c8487381e559 .quad 0x596ffea6d78082cb .quad 0xcb9771ebdba7b653 .quad 0x5a08b5019b4da685 .quad 0x3501d6f8153e47b8 .quad 0xb7a9675414a2f60c .quad 0x112ee8b6455d9523 .quad 0x4e62a3c18112ea8a .quad 0xc8d4ac04516ab786 .quad 0x595af3215295b23d .quad 0xd6edd234db0230c1 .quad 0x0929efe8825b41cc // 2^52 * 4 * G .quad 0x5f0601d1cbd0f2d3 .quad 0x736e412f6132bb7f .quad 0x83604432238dde87 .quad 0x1e3a5272f5c0753c .quad 0x8b3172b7ad56651d .quad 0x01581b7a3fabd717 .quad 0x2dc94df6424df6e4 .quad 0x30376e5d2c29284f .quad 0xd2918da78159a59c .quad 0x6bdc1cd93f0713f3 .quad 0x565f7a934acd6590 .quad 0x53daacec4cb4c128 // 2^52 * 5 * G .quad 0x4ca73bd79cc8a7d6 .quad 0x4d4a738f47e9a9b2 .quad 0xf4cbf12942f5fe00 .quad 0x01a13ff9bdbf0752 .quad 0x99852bc3852cfdb0 .quad 0x2cc12e9559d6ed0b .quad 0x70f9e2bf9b5ac27b .quad 0x4f3b8c117959ae99 .quad 0x55b6c9c82ff26412 .quad 0x1ac4a8c91fb667a8 .quad 0xd527bfcfeb778bf2 .quad 0x303337da7012a3be // 2^52 * 6 * G .quad 0x955422228c1c9d7c .quad 0x01fac1371a9b340f .quad 0x7e8d9177925b48d7 .quad 0x53f8ad5661b3e31b .quad 0x976d3ccbfad2fdd1 .quad 0xcb88839737a640a8 .quad 0x2ff00c1d6734cb25 .quad 0x269ff4dc789c2d2b .quad 0x0c003fbdc08d678d .quad 0x4d982fa37ead2b17 .quad 0xc07e6bcdb2e582f1 .quad 0x296c7291df412a44 // 2^52 * 7 * G .quad 0x7903de2b33daf397 .quad 0xd0ff0619c9a624b3 .quad 0x8a1d252b555b3e18 .quad 0x2b6d581c52e0b7c0 .quad 0xdfb23205dab8b59e .quad 0x465aeaa0c8092250 .quad 0xd133c1189a725d18 .quad 0x2327370261f117d1 .quad 0x3d0543d3623e7986 .quad 0x679414c2c278a354 .quad 0xae43f0cc726196f6 .quad 0x7836c41f8245eaba // 2^52 * 8 * G .quad 0xe7a254db49e95a81 .quad 0x5192d5d008b0ad73 .quad 0x4d20e5b1d00afc07 .quad 0x5d55f8012cf25f38 .quad 0xca651e848011937c .quad 0xc6b0c46e6ef41a28 .quad 0xb7021ba75f3f8d52 .quad 0x119dff99ead7b9fd .quad 0x43eadfcbf4b31d4d .quad 0xc6503f7411148892 .quad 0xfeee68c5060d3b17 .quad 0x329293b3dd4a0ac8 // 2^56 * 1 * G .quad 0x4e59214fe194961a .quad 0x49be7dc70d71cd4f .quad 0x9300cfd23b50f22d .quad 0x4789d446fc917232 .quad 0x2879852d5d7cb208 .quad 0xb8dedd70687df2e7 .quad 0xdc0bffab21687891 .quad 0x2b44c043677daa35 .quad 0x1a1c87ab074eb78e .quad 0xfac6d18e99daf467 .quad 0x3eacbbcd484f9067 .quad 0x60c52eef2bb9a4e4 // 2^56 * 2 * G .quad 0x0b5d89bc3bfd8bf1 .quad 0xb06b9237c9f3551a .quad 0x0e4c16b0d53028f5 .quad 0x10bc9c312ccfcaab .quad 0x702bc5c27cae6d11 .quad 0x44c7699b54a48cab .quad 0xefbc4056ba492eb2 .quad 0x70d77248d9b6676d .quad 0xaa8ae84b3ec2a05b .quad 0x98699ef4ed1781e0 .quad 0x794513e4708e85d1 .quad 0x63755bd3a976f413 // 2^56 * 3 * G .quad 0xb55fa03e2ad10853 .quad 0x356f75909ee63569 .quad 0x9ff9f1fdbe69b890 .quad 0x0d8cc1c48bc16f84 .quad 0x3dc7101897f1acb7 .quad 0x5dda7d5ec165bbd8 .quad 0x508e5b9c0fa1020f .quad 0x2763751737c52a56 .quad 0x029402d36eb419a9 .quad 0xf0b44e7e77b460a5 .quad 0xcfa86230d43c4956 .quad 0x70c2dd8a7ad166e7 // 2^56 * 4 * G .quad 0x656194509f6fec0e .quad 0xee2e7ea946c6518d .quad 0x9733c1f367e09b5c .quad 0x2e0fac6363948495 .quad 0x91d4967db8ed7e13 .quad 0x74252f0ad776817a .quad 0xe40982e00d852564 .quad 0x32b8613816a53ce5 .quad 0x79e7f7bee448cd64 .quad 0x6ac83a67087886d0 .quad 0xf89fd4d9a0e4db2e .quad 0x4179215c735a4f41 // 2^56 * 5 * G .quad 0x8c7094e7d7dced2a .quad 0x97fb8ac347d39c70 .quad 0xe13be033a906d902 .quad 0x700344a30cd99d76 .quad 0xe4ae33b9286bcd34 .quad 0xb7ef7eb6559dd6dc .quad 0x278b141fb3d38e1f .quad 0x31fa85662241c286 .quad 0xaf826c422e3622f4 .quad 0xc12029879833502d .quad 0x9bc1b7e12b389123 .quad 0x24bb2312a9952489 // 2^56 * 6 * G .quad 0xb1a8ed1732de67c3 .quad 0x3cb49418461b4948 .quad 0x8ebd434376cfbcd2 .quad 0x0fee3e871e188008 .quad 0x41f80c2af5f85c6b .quad 0x687284c304fa6794 .quad 0x8945df99a3ba1bad .quad 0x0d1d2af9ffeb5d16 .quad 0xa9da8aa132621edf .quad 0x30b822a159226579 .quad 0x4004197ba79ac193 .quad 0x16acd79718531d76 // 2^56 * 7 * G .quad 0x72df72af2d9b1d3d .quad 0x63462a36a432245a .quad 0x3ecea07916b39637 .quad 0x123e0ef6b9302309 .quad 0xc959c6c57887b6ad .quad 0x94e19ead5f90feba .quad 0x16e24e62a342f504 .quad 0x164ed34b18161700 .quad 0x487ed94c192fe69a .quad 0x61ae2cea3a911513 .quad 0x877bf6d3b9a4de27 .quad 0x78da0fc61073f3eb // 2^56 * 8 * G .quad 0x5bf15d28e52bc66a .quad 0x2c47e31870f01a8e .quad 0x2419afbc06c28bdd .quad 0x2d25deeb256b173a .quad 0xa29f80f1680c3a94 .quad 0x71f77e151ae9e7e6 .quad 0x1100f15848017973 .quad 0x054aa4b316b38ddd .quad 0xdfc8468d19267cb8 .quad 0x0b28789c66e54daf .quad 0x2aeb1d2a666eec17 .quad 0x134610a6ab7da760 // 2^60 * 1 * G .quad 0xcaf55ec27c59b23f .quad 0x99aeed3e154d04f2 .quad 0x68441d72e14141f4 .quad 0x140345133932a0a2 .quad 0xd91430e0dc028c3c .quad 0x0eb955a85217c771 .quad 0x4b09e1ed2c99a1fa .quad 0x42881af2bd6a743c .quad 0x7bfec69aab5cad3d .quad 0xc23e8cd34cb2cfad .quad 0x685dd14bfb37d6a2 .quad 0x0ad6d64415677a18 // 2^60 * 2 * G .quad 0x781a439e417becb5 .quad 0x4ac5938cd10e0266 .quad 0x5da385110692ac24 .quad 0x11b065a2ade31233 .quad 0x7914892847927e9f .quad 0x33dad6ef370aa877 .quad 0x1f8f24fa11122703 .quad 0x5265ac2f2adf9592 .quad 0x405fdd309afcb346 .quad 0xd9723d4428e63f54 .quad 0x94c01df05f65aaae .quad 0x43e4dc3ae14c0809 // 2^60 * 3 * G .quad 0xbc12c7f1a938a517 .quad 0x473028ab3180b2e1 .quad 0x3f78571efbcd254a .quad 0x74e534426ff6f90f .quad 0xea6f7ac3adc2c6a3 .quad 0xd0e928f6e9717c94 .quad 0xe2d379ead645eaf5 .quad 0x46dd8785c51ffbbe .quad 0x709801be375c8898 .quad 0x4b06dab5e3fd8348 .quad 0x75880ced27230714 .quad 0x2b09468fdd2f4c42 // 2^60 * 4 * G .quad 0x97c749eeb701cb96 .quad 0x83f438d4b6a369c3 .quad 0x62962b8b9a402cd9 .quad 0x6976c7509888df7b .quad 0x5b97946582ffa02a .quad 0xda096a51fea8f549 .quad 0xa06351375f77af9b .quad 0x1bcfde61201d1e76 .quad 0x4a4a5490246a59a2 .quad 0xd63ebddee87fdd90 .quad 0xd9437c670d2371fa .quad 0x69e87308d30f8ed6 // 2^60 * 5 * G .quad 0x435a8bb15656beb0 .quad 0xf8fac9ba4f4d5bca .quad 0xb9b278c41548c075 .quad 0x3eb0ef76e892b622 .quad 0x0f80bf028bc80303 .quad 0x6aae16b37a18cefb .quad 0xdd47ea47d72cd6a3 .quad 0x61943588f4ed39aa .quad 0xd26e5c3e91039f85 .quad 0xc0e9e77df6f33aa9 .quad 0xe8968c5570066a93 .quad 0x3c34d1881faaaddd // 2^60 * 6 * G .quad 0x3f9d2b5ea09f9ec0 .quad 0x1dab3b6fb623a890 .quad 0xa09ba3ea72d926c4 .quad 0x374193513fd8b36d .quad 0xbd5b0b8f2fffe0d9 .quad 0x6aa254103ed24fb9 .quad 0x2ac7d7bcb26821c4 .quad 0x605b394b60dca36a .quad 0xb4e856e45a9d1ed2 .quad 0xefe848766c97a9a2 .quad 0xb104cf641e5eee7d .quad 0x2f50b81c88a71c8f // 2^60 * 7 * G .quad 0x31723c61fc6811bb .quad 0x9cb450486211800f .quad 0x768933d347995753 .quad 0x3491a53502752fcd .quad 0x2b552ca0a7da522a .quad 0x3230b336449b0250 .quad 0xf2c4c5bca4b99fb9 .quad 0x7b2c674958074a22 .quad 0xd55165883ed28cdf .quad 0x12d84fd2d362de39 .quad 0x0a874ad3e3378e4f .quad 0x000d2b1f7c763e74 // 2^60 * 8 * G .quad 0x3d420811d06d4a67 .quad 0xbefc048590e0ffe3 .quad 0xf870c6b7bd487bde .quad 0x6e2a7316319afa28 .quad 0x9624778c3e94a8ab .quad 0x0ad6f3cee9a78bec .quad 0x948ac7810d743c4f .quad 0x76627935aaecfccc .quad 0x56a8ac24d6d59a9f .quad 0xc8db753e3096f006 .quad 0x477f41e68f4c5299 .quad 0x588d851cf6c86114 // 2^64 * 1 * G .quad 0x51138ec78df6b0fe .quad 0x5397da89e575f51b .quad 0x09207a1d717af1b9 .quad 0x2102fdba2b20d650 .quad 0xcd2a65e777d1f515 .quad 0x548991878faa60f1 .quad 0xb1b73bbcdabc06e5 .quad 0x654878cba97cc9fb .quad 0x969ee405055ce6a1 .quad 0x36bca7681251ad29 .quad 0x3a1af517aa7da415 .quad 0x0ad725db29ecb2ba // 2^64 * 2 * G .quad 0xdc4267b1834e2457 .quad 0xb67544b570ce1bc5 .quad 0x1af07a0bf7d15ed7 .quad 0x4aefcffb71a03650 .quad 0xfec7bc0c9b056f85 .quad 0x537d5268e7f5ffd7 .quad 0x77afc6624312aefa .quad 0x4f675f5302399fd9 .quad 0xc32d36360415171e .quad 0xcd2bef118998483b .quad 0x870a6eadd0945110 .quad 0x0bccbb72a2a86561 // 2^64 * 3 * G .quad 0x185e962feab1a9c8 .quad 0x86e7e63565147dcd .quad 0xb092e031bb5b6df2 .quad 0x4024f0ab59d6b73e .quad 0x186d5e4c50fe1296 .quad 0xe0397b82fee89f7e .quad 0x3bc7f6c5507031b0 .quad 0x6678fd69108f37c2 .quad 0x1586fa31636863c2 .quad 0x07f68c48572d33f2 .quad 0x4f73cc9f789eaefc .quad 0x2d42e2108ead4701 // 2^64 * 4 * G .quad 0x97f5131594dfd29b .quad 0x6155985d313f4c6a .quad 0xeba13f0708455010 .quad 0x676b2608b8d2d322 .quad 0x21717b0d0f537593 .quad 0x914e690b131e064c .quad 0x1bb687ae752ae09f .quad 0x420bf3a79b423c6e .quad 0x8138ba651c5b2b47 .quad 0x8671b6ec311b1b80 .quad 0x7bff0cb1bc3135b0 .quad 0x745d2ffa9c0cf1e0 // 2^64 * 5 * G .quad 0xbf525a1e2bc9c8bd .quad 0xea5b260826479d81 .quad 0xd511c70edf0155db .quad 0x1ae23ceb960cf5d0 .quad 0x6036df5721d34e6a .quad 0xb1db8827997bb3d0 .quad 0xd3c209c3c8756afa .quad 0x06e15be54c1dc839 .quad 0x5b725d871932994a .quad 0x32351cb5ceb1dab0 .quad 0x7dc41549dab7ca05 .quad 0x58ded861278ec1f7 // 2^64 * 6 * G .quad 0xd8173793f266c55c .quad 0xc8c976c5cc454e49 .quad 0x5ce382f8bc26c3a8 .quad 0x2ff39de85485f6f9 .quad 0x2dfb5ba8b6c2c9a8 .quad 0x48eeef8ef52c598c .quad 0x33809107f12d1573 .quad 0x08ba696b531d5bd8 .quad 0x77ed3eeec3efc57a .quad 0x04e05517d4ff4811 .quad 0xea3d7a3ff1a671cb .quad 0x120633b4947cfe54 // 2^64 * 7 * G .quad 0x0b94987891610042 .quad 0x4ee7b13cecebfae8 .quad 0x70be739594f0a4c0 .quad 0x35d30a99b4d59185 .quad 0x82bd31474912100a .quad 0xde237b6d7e6fbe06 .quad 0xe11e761911ea79c6 .quad 0x07433be3cb393bde .quad 0xff7944c05ce997f4 .quad 0x575d3de4b05c51a3 .quad 0x583381fd5a76847c .quad 0x2d873ede7af6da9f // 2^64 * 8 * G .quad 0x157a316443373409 .quad 0xfab8b7eef4aa81d9 .quad 0xb093fee6f5a64806 .quad 0x2e773654707fa7b6 .quad 0xaa6202e14e5df981 .quad 0xa20d59175015e1f5 .quad 0x18a275d3bae21d6c .quad 0x0543618a01600253 .quad 0x0deabdf4974c23c1 .quad 0xaa6f0a259dce4693 .quad 0x04202cb8a29aba2c .quad 0x4b1443362d07960d // 2^68 * 1 * G .quad 0x47b837f753242cec .quad 0x256dc48cc04212f2 .quad 0xe222fbfbe1d928c5 .quad 0x48ea295bad8a2c07 .quad 0x299b1c3f57c5715e .quad 0x96cb929e6b686d90 .quad 0x3004806447235ab3 .quad 0x2c435c24a44d9fe1 .quad 0x0607c97c80f8833f .quad 0x0e851578ca25ec5b .quad 0x54f7450b161ebb6f .quad 0x7bcb4792a0def80e // 2^68 * 2 * G .quad 0x8487e3d02bc73659 .quad 0x4baf8445059979df .quad 0xd17c975adcad6fbf .quad 0x57369f0bdefc96b6 .quad 0x1cecd0a0045224c2 .quad 0x757f1b1b69e53952 .quad 0x775b7a925289f681 .quad 0x1b6cc62016736148 .quad 0xf1a9990175638698 .quad 0x353dd1beeeaa60d3 .quad 0x849471334c9ba488 .quad 0x63fa6e6843ade311 // 2^68 * 3 * G .quad 0xd15c20536597c168 .quad 0x9f73740098d28789 .quad 0x18aee7f13257ba1f .quad 0x3418bfda07346f14 .quad 0x2195becdd24b5eb7 .quad 0x5e41f18cc0cd44f9 .quad 0xdf28074441ca9ede .quad 0x07073b98f35b7d67 .quad 0xd03c676c4ce530d4 .quad 0x0b64c0473b5df9f4 .quad 0x065cef8b19b3a31e .quad 0x3084d661533102c9 // 2^68 * 4 * G .quad 0xe1f6b79ebf8469ad .quad 0x15801004e2663135 .quad 0x9a498330af74181b .quad 0x3ba2504f049b673c .quad 0x9a6ce876760321fd .quad 0x7fe2b5109eb63ad8 .quad 0x00e7d4ae8ac80592 .quad 0x73d86b7abb6f723a .quad 0x0b52b5606dba5ab6 .quad 0xa9134f0fbbb1edab .quad 0x30a9520d9b04a635 .quad 0x6813b8f37973e5db // 2^68 * 5 * G .quad 0x9854b054334127c1 .quad 0x105d047882fbff25 .quad 0xdb49f7f944186f4f .quad 0x1768e838bed0b900 .quad 0xf194ca56f3157e29 .quad 0x136d35705ef528a5 .quad 0xdd4cef778b0599bc .quad 0x7d5472af24f833ed .quad 0xd0ef874daf33da47 .quad 0x00d3be5db6e339f9 .quad 0x3f2a8a2f9c9ceece .quad 0x5d1aeb792352435a // 2^68 * 6 * G .quad 0xf59e6bb319cd63ca .quad 0x670c159221d06839 .quad 0xb06d565b2150cab6 .quad 0x20fb199d104f12a3 .quad 0x12c7bfaeb61ba775 .quad 0xb84e621fe263bffd .quad 0x0b47a5c35c840dcf .quad 0x7e83be0bccaf8634 .quad 0x61943dee6d99c120 .quad 0x86101f2e460b9fe0 .quad 0x6bb2f1518ee8598d .quad 0x76b76289fcc475cc // 2^68 * 7 * G .quad 0x791b4cc1756286fa .quad 0xdbced317d74a157c .quad 0x7e732421ea72bde6 .quad 0x01fe18491131c8e9 .quad 0x4245f1a1522ec0b3 .quad 0x558785b22a75656d .quad 0x1d485a2548a1b3c0 .quad 0x60959eccd58fe09f .quad 0x3ebfeb7ba8ed7a09 .quad 0x49fdc2bbe502789c .quad 0x44ebce5d3c119428 .quad 0x35e1eb55be947f4a // 2^68 * 8 * G .quad 0xdbdae701c5738dd3 .quad 0xf9c6f635b26f1bee .quad 0x61e96a8042f15ef4 .quad 0x3aa1d11faf60a4d8 .quad 0x14fd6dfa726ccc74 .quad 0x3b084cfe2f53b965 .quad 0xf33ae4f552a2c8b4 .quad 0x59aab07a0d40166a .quad 0x77bcec4c925eac25 .quad 0x1848718460137738 .quad 0x5b374337fea9f451 .quad 0x1865e78ec8e6aa46 // 2^72 * 1 * G .quad 0xccc4b7c7b66e1f7a .quad 0x44157e25f50c2f7e .quad 0x3ef06dfc713eaf1c .quad 0x582f446752da63f7 .quad 0x967c54e91c529ccb .quad 0x30f6269264c635fb .quad 0x2747aff478121965 .quad 0x17038418eaf66f5c .quad 0xc6317bd320324ce4 .quad 0xa81042e8a4488bc4 .quad 0xb21ef18b4e5a1364 .quad 0x0c2a1c4bcda28dc9 // 2^72 * 2 * G .quad 0xd24dc7d06f1f0447 .quad 0xb2269e3edb87c059 .quad 0xd15b0272fbb2d28f .quad 0x7c558bd1c6f64877 .quad 0xedc4814869bd6945 .quad 0x0d6d907dbe1c8d22 .quad 0xc63bd212d55cc5ab .quad 0x5a6a9b30a314dc83 .quad 0xd0ec1524d396463d .quad 0x12bb628ac35a24f0 .quad 0xa50c3a791cbc5fa4 .quad 0x0404a5ca0afbafc3 // 2^72 * 3 * G .quad 0x8c1f40070aa743d6 .quad 0xccbad0cb5b265ee8 .quad 0x574b046b668fd2de .quad 0x46395bfdcadd9633 .quad 0x62bc9e1b2a416fd1 .quad 0xb5c6f728e350598b .quad 0x04343fd83d5d6967 .quad 0x39527516e7f8ee98 .quad 0x117fdb2d1a5d9a9c .quad 0x9c7745bcd1005c2a .quad 0xefd4bef154d56fea .quad 0x76579a29e822d016 // 2^72 * 4 * G .quad 0x45b68e7e49c02a17 .quad 0x23cd51a2bca9a37f .quad 0x3ed65f11ec224c1b .quad 0x43a384dc9e05bdb1 .quad 0x333cb51352b434f2 .quad 0xd832284993de80e1 .quad 0xb5512887750d35ce .quad 0x02c514bb2a2777c1 .quad 0x684bd5da8bf1b645 .quad 0xfb8bd37ef6b54b53 .quad 0x313916d7a9b0d253 .quad 0x1160920961548059 // 2^72 * 5 * G .quad 0xb44d166929dacfaa .quad 0xda529f4c8413598f .quad 0xe9ef63ca453d5559 .quad 0x351e125bc5698e0b .quad 0x7a385616369b4dcd .quad 0x75c02ca7655c3563 .quad 0x7dc21bf9d4f18021 .quad 0x2f637d7491e6e042 .quad 0xd4b49b461af67bbe .quad 0xd603037ac8ab8961 .quad 0x71dee19ff9a699fb .quad 0x7f182d06e7ce2a9a // 2^72 * 6 * G .quad 0x7a7c8e64ab0168ec .quad 0xcb5a4a5515edc543 .quad 0x095519d347cd0eda .quad 0x67d4ac8c343e93b0 .quad 0x09454b728e217522 .quad 0xaa58e8f4d484b8d8 .quad 0xd358254d7f46903c .quad 0x44acc043241c5217 .quad 0x1c7d6bbb4f7a5777 .quad 0x8b35fed4918313e1 .quad 0x4adca1c6c96b4684 .quad 0x556d1c8312ad71bd // 2^72 * 7 * G .quad 0x17ef40e30c8d3982 .quad 0x31f7073e15a3fa34 .quad 0x4f21f3cb0773646e .quad 0x746c6c6d1d824eff .quad 0x81f06756b11be821 .quad 0x0faff82310a3f3dd .quad 0xf8b2d0556a99465d .quad 0x097abe38cc8c7f05 .quad 0x0c49c9877ea52da4 .quad 0x4c4369559bdc1d43 .quad 0x022c3809f7ccebd2 .quad 0x577e14a34bee84bd // 2^72 * 8 * G .quad 0xf0e268ac61a73b0a .quad 0xf2fafa103791a5f5 .quad 0xc1e13e826b6d00e9 .quad 0x60fa7ee96fd78f42 .quad 0x94fecebebd4dd72b .quad 0xf46a4fda060f2211 .quad 0x124a5977c0c8d1ff .quad 0x705304b8fb009295 .quad 0xb63d1d354d296ec6 .quad 0xf3c3053e5fad31d8 .quad 0x670b958cb4bd42ec .quad 0x21398e0ca16353fd // 2^76 * 1 * G .quad 0x216ab2ca8da7d2ef .quad 0x366ad9dd99f42827 .quad 0xae64b9004fdd3c75 .quad 0x403a395b53909e62 .quad 0x86c5fc16861b7e9a .quad 0xf6a330476a27c451 .quad 0x01667267a1e93597 .quad 0x05ffb9cd6082dfeb .quad 0xa617fa9ff53f6139 .quad 0x60f2b5e513e66cb6 .quad 0xd7a8beefb3448aa4 .quad 0x7a2932856f5ea192 // 2^76 * 2 * G .quad 0x0b39d761b02de888 .quad 0x5f550e7ed2414e1f .quad 0xa6bfa45822e1a940 .quad 0x050a2f7dfd447b99 .quad 0xb89c444879639302 .quad 0x4ae4f19350c67f2c .quad 0xf0b35da8c81af9c6 .quad 0x39d0003546871017 .quad 0x437c3b33a650db77 .quad 0x6bafe81dbac52bb2 .quad 0xfe99402d2db7d318 .quad 0x2b5b7eec372ba6ce // 2^76 * 3 * G .quad 0xb3bc4bbd83f50eef .quad 0x508f0c998c927866 .quad 0x43e76587c8b7e66e .quad 0x0f7655a3a47f98d9 .quad 0xa694404d613ac8f4 .quad 0x500c3c2bfa97e72c .quad 0x874104d21fcec210 .quad 0x1b205fb38604a8ee .quad 0x55ecad37d24b133c .quad 0x441e147d6038c90b .quad 0x656683a1d62c6fee .quad 0x0157d5dc87e0ecae // 2^76 * 4 * G .quad 0xf2a7af510354c13d .quad 0xd7a0b145aa372b60 .quad 0x2869b96a05a3d470 .quad 0x6528e42d82460173 .quad 0x95265514d71eb524 .quad 0xe603d8815df14593 .quad 0x147cdf410d4de6b7 .quad 0x5293b1730437c850 .quad 0x23d0e0814bccf226 .quad 0x92c745cd8196fb93 .quad 0x8b61796c59541e5b .quad 0x40a44df0c021f978 // 2^76 * 5 * G .quad 0xdaa869894f20ea6a .quad 0xea14a3d14c620618 .quad 0x6001fccb090bf8be .quad 0x35f4e822947e9cf0 .quad 0x86c96e514bc5d095 .quad 0xf20d4098fca6804a .quad 0x27363d89c826ea5d .quad 0x39ca36565719cacf .quad 0x97506f2f6f87b75c .quad 0xc624aea0034ae070 .quad 0x1ec856e3aad34dd6 .quad 0x055b0be0e440e58f // 2^76 * 6 * G .quad 0x6469a17d89735d12 .quad 0xdb6f27d5e662b9f1 .quad 0x9fcba3286a395681 .quad 0x363b8004d269af25 .quad 0x4d12a04b6ea33da2 .quad 0x57cf4c15e36126dd .quad 0x90ec9675ee44d967 .quad 0x64ca348d2a985aac .quad 0x99588e19e4c4912d .quad 0xefcc3b4e1ca5ce6b .quad 0x4522ea60fa5b98d5 .quad 0x7064bbab1de4a819 // 2^76 * 7 * G .quad 0xb919e1515a770641 .quad 0xa9a2e2c74e7f8039 .quad 0x7527250b3df23109 .quad 0x756a7330ac27b78b .quad 0xa290c06142542129 .quad 0xf2e2c2aebe8d5b90 .quad 0xcf2458db76abfe1b .quad 0x02157ade83d626bf .quad 0x3e46972a1b9a038b .quad 0x2e4ee66a7ee03fb4 .quad 0x81a248776edbb4ca .quad 0x1a944ee88ecd0563 // 2^76 * 8 * G .quad 0xd5a91d1151039372 .quad 0x2ed377b799ca26de .quad 0xa17202acfd366b6b .quad 0x0730291bd6901995 .quad 0xbb40a859182362d6 .quad 0xb99f55778a4d1abb .quad 0x8d18b427758559f6 .quad 0x26c20fe74d26235a .quad 0x648d1d9fe9cc22f5 .quad 0x66bc561928dd577c .quad 0x47d3ed21652439d1 .quad 0x49d271acedaf8b49 // 2^80 * 1 * G .quad 0x89f5058a382b33f3 .quad 0x5ae2ba0bad48c0b4 .quad 0x8f93b503a53db36e .quad 0x5aa3ed9d95a232e6 .quad 0x2798aaf9b4b75601 .quad 0x5eac72135c8dad72 .quad 0xd2ceaa6161b7a023 .quad 0x1bbfb284e98f7d4e .quad 0x656777e9c7d96561 .quad 0xcb2b125472c78036 .quad 0x65053299d9506eee .quad 0x4a07e14e5e8957cc // 2^80 * 2 * G .quad 0x4ee412cb980df999 .quad 0xa315d76f3c6ec771 .quad 0xbba5edde925c77fd .quad 0x3f0bac391d313402 .quad 0x240b58cdc477a49b .quad 0xfd38dade6447f017 .quad 0x19928d32a7c86aad .quad 0x50af7aed84afa081 .quad 0x6e4fde0115f65be5 .quad 0x29982621216109b2 .quad 0x780205810badd6d9 .quad 0x1921a316baebd006 // 2^80 * 3 * G .quad 0x89422f7edfb870fc .quad 0x2c296beb4f76b3bd .quad 0x0738f1d436c24df7 .quad 0x6458df41e273aeb0 .quad 0xd75aad9ad9f3c18b .quad 0x566a0eef60b1c19c .quad 0x3e9a0bac255c0ed9 .quad 0x7b049deca062c7f5 .quad 0xdccbe37a35444483 .quad 0x758879330fedbe93 .quad 0x786004c312c5dd87 .quad 0x6093dccbc2950e64 // 2^80 * 4 * G .quad 0x1ff39a8585e0706d .quad 0x36d0a5d8b3e73933 .quad 0x43b9f2e1718f453b .quad 0x57d1ea084827a97c .quad 0x6bdeeebe6084034b .quad 0x3199c2b6780fb854 .quad 0x973376abb62d0695 .quad 0x6e3180c98b647d90 .quad 0xee7ab6e7a128b071 .quad 0xa4c1596d93a88baa .quad 0xf7b4de82b2216130 .quad 0x363e999ddd97bd18 // 2^80 * 5 * G .quad 0x96a843c135ee1fc4 .quad 0x976eb35508e4c8cf .quad 0xb42f6801b58cd330 .quad 0x48ee9b78693a052b .quad 0x2f1848dce24baec6 .quad 0x769b7255babcaf60 .quad 0x90cb3c6e3cefe931 .quad 0x231f979bc6f9b355 .quad 0x5c31de4bcc2af3c6 .quad 0xb04bb030fe208d1f .quad 0xb78d7009c14fb466 .quad 0x079bfa9b08792413 // 2^80 * 6 * G .quad 0xe3903a51da300df4 .quad 0x843964233da95ab0 .quad 0xed3cf12d0b356480 .quad 0x038c77f684817194 .quad 0xf3c9ed80a2d54245 .quad 0x0aa08b7877f63952 .quad 0xd76dac63d1085475 .quad 0x1ef4fb159470636b .quad 0x854e5ee65b167bec .quad 0x59590a4296d0cdc2 .quad 0x72b2df3498102199 .quad 0x575ee92a4a0bff56 // 2^80 * 7 * G .quad 0xd4c080908a182fcf .quad 0x30e170c299489dbd .quad 0x05babd5752f733de .quad 0x43d4e7112cd3fd00 .quad 0x5d46bc450aa4d801 .quad 0xc3af1227a533b9d8 .quad 0x389e3b262b8906c2 .quad 0x200a1e7e382f581b .quad 0x518db967eaf93ac5 .quad 0x71bc989b056652c0 .quad 0xfe2b85d9567197f5 .quad 0x050eca52651e4e38 // 2^80 * 8 * G .quad 0xc3431ade453f0c9c .quad 0xe9f5045eff703b9b .quad 0xfcd97ac9ed847b3d .quad 0x4b0ee6c21c58f4c6 .quad 0x97ac397660e668ea .quad 0x9b19bbfe153ab497 .quad 0x4cb179b534eca79f .quad 0x6151c09fa131ae57 .quad 0x3af55c0dfdf05d96 .quad 0xdd262ee02ab4ee7a .quad 0x11b2bb8712171709 .quad 0x1fef24fa800f030b // 2^84 * 1 * G .quad 0xb496123a6b6c6609 .quad 0xa750fe8580ab5938 .quad 0xf471bf39b7c27a5f .quad 0x507903ce77ac193c .quad 0xff91a66a90166220 .quad 0xf22552ae5bf1e009 .quad 0x7dff85d87f90df7c .quad 0x4f620ffe0c736fb9 .quad 0x62f90d65dfde3e34 .quad 0xcf28c592b9fa5fad .quad 0x99c86ef9c6164510 .quad 0x25d448044a256c84 // 2^84 * 2 * G .quad 0xbd68230ec7e9b16f .quad 0x0eb1b9c1c1c5795d .quad 0x7943c8c495b6b1ff .quad 0x2f9faf620bbacf5e .quad 0x2c7c4415c9022b55 .quad 0x56a0d241812eb1fe .quad 0xf02ea1c9d7b65e0d .quad 0x4180512fd5323b26 .quad 0xa4ff3e698a48a5db .quad 0xba6a3806bd95403b .quad 0x9f7ce1af47d5b65d .quad 0x15e087e55939d2fb // 2^84 * 3 * G .quad 0x12207543745c1496 .quad 0xdaff3cfdda38610c .quad 0xe4e797272c71c34f .quad 0x39c07b1934bdede9 .quad 0x8894186efb963f38 .quad 0x48a00e80dc639bd5 .quad 0xa4e8092be96c1c99 .quad 0x5a097d54ca573661 .quad 0x2d45892b17c9e755 .quad 0xd033fd7289308df8 .quad 0x6c2fe9d9525b8bd9 .quad 0x2edbecf1c11cc079 // 2^84 * 4 * G .quad 0x1616a4e3c715a0d2 .quad 0x53623cb0f8341d4d .quad 0x96ef5329c7e899cb .quad 0x3d4e8dbba668baa6 .quad 0xee0f0fddd087a25f .quad 0x9c7531555c3e34ee .quad 0x660c572e8fab3ab5 .quad 0x0854fc44544cd3b2 .quad 0x61eba0c555edad19 .quad 0x24b533fef0a83de6 .quad 0x3b77042883baa5f8 .quad 0x678f82b898a47e8d // 2^84 * 5 * G .quad 0xb1491d0bd6900c54 .quad 0x3539722c9d132636 .quad 0x4db928920b362bc9 .quad 0x4d7cd1fea68b69df .quad 0x1e09d94057775696 .quad 0xeed1265c3cd951db .quad 0xfa9dac2b20bce16f .quad 0x0f7f76e0e8d089f4 .quad 0x36d9ebc5d485b00c .quad 0xa2596492e4adb365 .quad 0xc1659480c2119ccd .quad 0x45306349186e0d5f // 2^84 * 6 * G .quad 0x94ddd0c1a6cdff1d .quad 0x55f6f115e84213ae .quad 0x6c935f85992fcf6a .quad 0x067ee0f54a37f16f .quad 0x96a414ec2b072491 .quad 0x1bb2218127a7b65b .quad 0x6d2849596e8a4af0 .quad 0x65f3b08ccd27765f .quad 0xecb29fff199801f7 .quad 0x9d361d1fa2a0f72f .quad 0x25f11d2375fd2f49 .quad 0x124cefe80fe10fe2 // 2^84 * 7 * G .quad 0x4c126cf9d18df255 .quad 0xc1d471e9147a63b6 .quad 0x2c6d3c73f3c93b5f .quad 0x6be3a6a2e3ff86a2 .quad 0x1518e85b31b16489 .quad 0x8faadcb7db710bfb .quad 0x39b0bdf4a14ae239 .quad 0x05f4cbea503d20c1 .quad 0xce040e9ec04145bc .quad 0xc71ff4e208f6834c .quad 0xbd546e8dab8847a3 .quad 0x64666aa0a4d2aba5 // 2^84 * 8 * G .quad 0x6841435a7c06d912 .quad 0xca123c21bb3f830b .quad 0xd4b37b27b1cbe278 .quad 0x1d753b84c76f5046 .quad 0xb0c53bf73337e94c .quad 0x7cb5697e11e14f15 .quad 0x4b84abac1930c750 .quad 0x28dd4abfe0640468 .quad 0x7dc0b64c44cb9f44 .quad 0x18a3e1ace3925dbf .quad 0x7a3034862d0457c4 .quad 0x4c498bf78a0c892e // 2^88 * 1 * G .quad 0x37d653fb1aa73196 .quad 0x0f9495303fd76418 .quad 0xad200b09fb3a17b2 .quad 0x544d49292fc8613e .quad 0x22d2aff530976b86 .quad 0x8d90b806c2d24604 .quad 0xdca1896c4de5bae5 .quad 0x28005fe6c8340c17 .quad 0x6aefba9f34528688 .quad 0x5c1bff9425107da1 .quad 0xf75bbbcd66d94b36 .quad 0x72e472930f316dfa // 2^88 * 2 * G .quad 0x2695208c9781084f .quad 0xb1502a0b23450ee1 .quad 0xfd9daea603efde02 .quad 0x5a9d2e8c2733a34c .quad 0x07f3f635d32a7627 .quad 0x7aaa4d865f6566f0 .quad 0x3c85e79728d04450 .quad 0x1fee7f000fe06438 .quad 0x765305da03dbf7e5 .quad 0xa4daf2491434cdbd .quad 0x7b4ad5cdd24a88ec .quad 0x00f94051ee040543 // 2^88 * 3 * G .quad 0x8d356b23c3d330b2 .quad 0xf21c8b9bb0471b06 .quad 0xb36c316c6e42b83c .quad 0x07d79c7e8beab10d .quad 0xd7ef93bb07af9753 .quad 0x583ed0cf3db766a7 .quad 0xce6998bf6e0b1ec5 .quad 0x47b7ffd25dd40452 .quad 0x87fbfb9cbc08dd12 .quad 0x8a066b3ae1eec29b .quad 0x0d57242bdb1fc1bf .quad 0x1c3520a35ea64bb6 // 2^88 * 4 * G .quad 0x80d253a6bccba34a .quad 0x3e61c3a13838219b .quad 0x90c3b6019882e396 .quad 0x1c3d05775d0ee66f .quad 0xcda86f40216bc059 .quad 0x1fbb231d12bcd87e .quad 0xb4956a9e17c70990 .quad 0x38750c3b66d12e55 .quad 0x692ef1409422e51a .quad 0xcbc0c73c2b5df671 .quad 0x21014fe7744ce029 .quad 0x0621e2c7d330487c // 2^88 * 5 * G .quad 0xaf9860cc8259838d .quad 0x90ea48c1c69f9adc .quad 0x6526483765581e30 .quad 0x0007d6097bd3a5bc .quad 0xb7ae1796b0dbf0f3 .quad 0x54dfafb9e17ce196 .quad 0x25923071e9aaa3b4 .quad 0x5d8e589ca1002e9d .quad 0xc0bf1d950842a94b .quad 0xb2d3c363588f2e3e .quad 0x0a961438bb51e2ef .quad 0x1583d7783c1cbf86 // 2^88 * 6 * G .quad 0xeceea2ef5da27ae1 .quad 0x597c3a1455670174 .quad 0xc9a62a126609167a .quad 0x252a5f2e81ed8f70 .quad 0x90034704cc9d28c7 .quad 0x1d1b679ef72cc58f .quad 0x16e12b5fbe5b8726 .quad 0x4958064e83c5580a .quad 0x0d2894265066e80d .quad 0xfcc3f785307c8c6b .quad 0x1b53da780c1112fd .quad 0x079c170bd843b388 // 2^88 * 7 * G .quad 0x0506ece464fa6fff .quad 0xbee3431e6205e523 .quad 0x3579422451b8ea42 .quad 0x6dec05e34ac9fb00 .quad 0xcdd6cd50c0d5d056 .quad 0x9af7686dbb03573b .quad 0x3ca6723ff3c3ef48 .quad 0x6768c0d7317b8acc .quad 0x94b625e5f155c1b3 .quad 0x417bf3a7997b7b91 .quad 0xc22cbddc6d6b2600 .quad 0x51445e14ddcd52f4 // 2^88 * 8 * G .quad 0x57502b4b3b144951 .quad 0x8e67ff6b444bbcb3 .quad 0xb8bd6927166385db .quad 0x13186f31e39295c8 .quad 0x893147ab2bbea455 .quad 0x8c53a24f92079129 .quad 0x4b49f948be30f7a7 .quad 0x12e990086e4fd43d .quad 0xf10c96b37fdfbb2e .quad 0x9f9a935e121ceaf9 .quad 0xdf1136c43a5b983f .quad 0x77b2e3f05d3e99af // 2^92 * 1 * G .quad 0xfd0d75879cf12657 .quad 0xe82fef94e53a0e29 .quad 0xcc34a7f05bbb4be7 .quad 0x0b251172a50c38a2 .quad 0x9532f48fcc5cd29b .quad 0x2ba851bea3ce3671 .quad 0x32dacaa051122941 .quad 0x478d99d9350004f2 .quad 0x1d5ad94890bb02c0 .quad 0x50e208b10ec25115 .quad 0xa26a22894ef21702 .quad 0x4dc923343b524805 // 2^92 * 2 * G .quad 0xe3828c400f8086b6 .quad 0x3f77e6f7979f0dc8 .quad 0x7ef6de304df42cb4 .quad 0x5265797cb6abd784 .quad 0x3ad3e3ebf36c4975 .quad 0xd75d25a537862125 .quad 0xe873943da025a516 .quad 0x6bbc7cb4c411c847 .quad 0x3c6f9cd1d4a50d56 .quad 0xb6244077c6feab7e .quad 0x6ff9bf483580972e .quad 0x00375883b332acfb // 2^92 * 3 * G .quad 0x0001b2cd28cb0940 .quad 0x63fb51a06f1c24c9 .quad 0xb5ad8691dcd5ca31 .quad 0x67238dbd8c450660 .quad 0xc98bec856c75c99c .quad 0xe44184c000e33cf4 .quad 0x0a676b9bba907634 .quad 0x669e2cb571f379d7 .quad 0xcb116b73a49bd308 .quad 0x025aad6b2392729e .quad 0xb4793efa3f55d9b1 .quad 0x72a1056140678bb9 // 2^92 * 4 * G .quad 0xa2b6812b1cc9249d .quad 0x62866eee21211f58 .quad 0x2cb5c5b85df10ece .quad 0x03a6b259e263ae00 .quad 0x0d8d2909e2e505b6 .quad 0x98ca78abc0291230 .quad 0x77ef5569a9b12327 .quad 0x7c77897b81439b47 .quad 0xf1c1b5e2de331cb5 .quad 0x5a9f5d8e15fca420 .quad 0x9fa438f17bd932b1 .quad 0x2a381bf01c6146e7 // 2^92 * 5 * G .quad 0xac9b9879cfc811c1 .quad 0x8b7d29813756e567 .quad 0x50da4e607c70edfc .quad 0x5dbca62f884400b6 .quad 0xf7c0be32b534166f .quad 0x27e6ca6419cf70d4 .quad 0x934df7d7a957a759 .quad 0x5701461dabdec2aa .quad 0x2c6747402c915c25 .quad 0x1bdcd1a80b0d340a .quad 0x5e5601bd07b43f5f .quad 0x2555b4e05539a242 // 2^92 * 6 * G .quad 0x6fc09f5266ddd216 .quad 0xdce560a7c8e37048 .quad 0xec65939da2df62fd .quad 0x7a869ae7e52ed192 .quad 0x78409b1d87e463d4 .quad 0xad4da95acdfb639d .quad 0xec28773755259b9c .quad 0x69c806e9c31230ab .quad 0x7b48f57414bb3f22 .quad 0x68c7cee4aedccc88 .quad 0xed2f936179ed80be .quad 0x25d70b885f77bc4b // 2^92 * 7 * G .quad 0x4151c3d9762bf4de .quad 0x083f435f2745d82b .quad 0x29775a2e0d23ddd5 .quad 0x138e3a6269a5db24 .quad 0x98459d29bb1ae4d4 .quad 0x56b9c4c739f954ec .quad 0x832743f6c29b4b3e .quad 0x21ea8e2798b6878a .quad 0x87bef4b46a5a7b9c .quad 0xd2299d1b5fc1d062 .quad 0x82409818dd321648 .quad 0x5c5abeb1e5a2e03d // 2^92 * 8 * G .quad 0x14722af4b73c2ddb .quad 0xbc470c5f5a05060d .quad 0x00943eac2581b02e .quad 0x0e434b3b1f499c8f .quad 0x02cde6de1306a233 .quad 0x7b5a52a2116f8ec7 .quad 0xe1c681f4c1163b5b .quad 0x241d350660d32643 .quad 0x6be4404d0ebc52c7 .quad 0xae46233bb1a791f5 .quad 0x2aec170ed25db42b .quad 0x1d8dfd966645d694 // 2^96 * 1 * G .quad 0x296fa9c59c2ec4de .quad 0xbc8b61bf4f84f3cb .quad 0x1c7706d917a8f908 .quad 0x63b795fc7ad3255d .quad 0xd598639c12ddb0a4 .quad 0xa5d19f30c024866b .quad 0xd17c2f0358fce460 .quad 0x07a195152e095e8a .quad 0xa8368f02389e5fc8 .quad 0x90433b02cf8de43b .quad 0xafa1fd5dc5412643 .quad 0x3e8fe83d032f0137 // 2^96 * 2 * G .quad 0x2f8b15b90570a294 .quad 0x94f2427067084549 .quad 0xde1c5ae161bbfd84 .quad 0x75ba3b797fac4007 .quad 0x08704c8de8efd13c .quad 0xdfc51a8e33e03731 .quad 0xa59d5da51260cde3 .quad 0x22d60899a6258c86 .quad 0x6239dbc070cdd196 .quad 0x60fe8a8b6c7d8a9a .quad 0xb38847bceb401260 .quad 0x0904d07b87779e5e // 2^96 * 3 * G .quad 0xb4ce1fd4ddba919c .quad 0xcf31db3ec74c8daa .quad 0x2c63cc63ad86cc51 .quad 0x43e2143fbc1dde07 .quad 0xf4322d6648f940b9 .quad 0x06952f0cbd2d0c39 .quad 0x167697ada081f931 .quad 0x6240aacebaf72a6c .quad 0xf834749c5ba295a0 .quad 0xd6947c5bca37d25a .quad 0x66f13ba7e7c9316a .quad 0x56bdaf238db40cac // 2^96 * 4 * G .quad 0x362ab9e3f53533eb .quad 0x338568d56eb93d40 .quad 0x9e0e14521d5a5572 .quad 0x1d24a86d83741318 .quad 0x1310d36cc19d3bb2 .quad 0x062a6bb7622386b9 .quad 0x7c9b8591d7a14f5c .quad 0x03aa31507e1e5754 .quad 0xf4ec7648ffd4ce1f .quad 0xe045eaf054ac8c1c .quad 0x88d225821d09357c .quad 0x43b261dc9aeb4859 // 2^96 * 5 * G .quad 0xe55b1e1988bb79bb .quad 0xa09ed07dc17a359d .quad 0xb02c2ee2603dea33 .quad 0x326055cf5b276bc2 .quad 0x19513d8b6c951364 .quad 0x94fe7126000bf47b .quad 0x028d10ddd54f9567 .quad 0x02b4d5e242940964 .quad 0xb4a155cb28d18df2 .quad 0xeacc4646186ce508 .quad 0xc49cf4936c824389 .quad 0x27a6c809ae5d3410 // 2^96 * 6 * G .quad 0x8ba6ebcd1f0db188 .quad 0x37d3d73a675a5be8 .quad 0xf22edfa315f5585a .quad 0x2cb67174ff60a17e .quad 0xcd2c270ac43d6954 .quad 0xdd4a3e576a66cab2 .quad 0x79fa592469d7036c .quad 0x221503603d8c2599 .quad 0x59eecdf9390be1d0 .quad 0xa9422044728ce3f1 .quad 0x82891c667a94f0f4 .quad 0x7b1df4b73890f436 // 2^96 * 7 * G .quad 0xe492f2e0b3b2a224 .quad 0x7c6c9e062b551160 .quad 0x15eb8fe20d7f7b0e .quad 0x61fcef2658fc5992 .quad 0x5f2e221807f8f58c .quad 0xe3555c9fd49409d4 .quad 0xb2aaa88d1fb6a630 .quad 0x68698245d352e03d .quad 0xdbb15d852a18187a .quad 0xf3e4aad386ddacd7 .quad 0x44bae2810ff6c482 .quad 0x46cf4c473daf01cf // 2^96 * 8 * G .quad 0x426525ed9ec4e5f9 .quad 0x0e5eda0116903303 .quad 0x72b1a7f2cbe5cadc .quad 0x29387bcd14eb5f40 .quad 0x213c6ea7f1498140 .quad 0x7c1e7ef8392b4854 .quad 0x2488c38c5629ceba .quad 0x1065aae50d8cc5bb .quad 0x1c2c4525df200d57 .quad 0x5c3b2dd6bfca674a .quad 0x0a07e7b1e1834030 .quad 0x69a198e64f1ce716 // 2^100 * 1 * G .quad 0x7afcd613efa9d697 .quad 0x0cc45aa41c067959 .quad 0xa56fe104c1fada96 .quad 0x3a73b70472e40365 .quad 0x7b26e56b9e2d4734 .quad 0xc4c7132b81c61675 .quad 0xef5c9525ec9cde7f .quad 0x39c80b16e71743ad .quad 0x0f196e0d1b826c68 .quad 0xf71ff0e24960e3db .quad 0x6113167023b7436c .quad 0x0cf0ea5877da7282 // 2^100 * 2 * G .quad 0x196c80a4ddd4ccbd .quad 0x22e6f55d95f2dd9d .quad 0xc75e33c740d6c71b .quad 0x7bb51279cb3c042f .quad 0xe332ced43ba6945a .quad 0xde0b1361e881c05d .quad 0x1ad40f095e67ed3b .quad 0x5da8acdab8c63d5d .quad 0xc4b6664a3a70159f .quad 0x76194f0f0a904e14 .quad 0xa5614c39a4096c13 .quad 0x6cd0ff50979feced // 2^100 * 3 * G .quad 0xc0e067e78f4428ac .quad 0x14835ab0a61135e3 .quad 0xf21d14f338062935 .quad 0x6390a4c8df04849c .quad 0x7fecfabdb04ba18e .quad 0xd0fc7bfc3bddbcf7 .quad 0xa41d486e057a131c .quad 0x641a4391f2223a61 .quad 0xc5c6b95aa606a8db .quad 0x914b7f9eb06825f1 .quad 0x2a731f6b44fc9eff .quad 0x30ddf38562705cfc // 2^100 * 4 * G .quad 0x4e3dcbdad1bff7f9 .quad 0xc9118e8220645717 .quad 0xbacccebc0f189d56 .quad 0x1b4822e9d4467668 .quad 0x33bef2bd68bcd52c .quad 0xc649dbb069482ef2 .quad 0xb5b6ee0c41cb1aee .quad 0x5c294d270212a7e5 .quad 0xab360a7f25563781 .quad 0x2512228a480f7958 .quad 0xc75d05276114b4e3 .quad 0x222d9625d976fe2a // 2^100 * 5 * G .quad 0x1c717f85b372ace1 .quad 0x81930e694638bf18 .quad 0x239cad056bc08b58 .quad 0x0b34271c87f8fff4 .quad 0x0f94be7e0a344f85 .quad 0xeb2faa8c87f22c38 .quad 0x9ce1e75e4ee16f0f .quad 0x43e64e5418a08dea .quad 0x8155e2521a35ce63 .quad 0xbe100d4df912028e .quad 0xbff80bf8a57ddcec .quad 0x57342dc96d6bc6e4 // 2^100 * 6 * G .quad 0xefeef065c8ce5998 .quad 0xbf029510b5cbeaa2 .quad 0x8c64a10620b7c458 .quad 0x35134fb231c24855 .quad 0xf3c3bcb71e707bf6 .quad 0x351d9b8c7291a762 .quad 0x00502e6edad69a33 .quad 0x522f521f1ec8807f .quad 0x272c1f46f9a3902b .quad 0xc91ba3b799657bcc .quad 0xae614b304f8a1c0e .quad 0x7afcaad70b99017b // 2^100 * 7 * G .quad 0xc25ded54a4b8be41 .quad 0x902d13e11bb0e2dd .quad 0x41f43233cde82ab2 .quad 0x1085faa5c3aae7cb .quad 0xa88141ecef842b6b .quad 0x55e7b14797abe6c5 .quad 0x8c748f9703784ffe .quad 0x5b50a1f7afcd00b7 .quad 0x9b840f66f1361315 .quad 0x18462242701003e9 .quad 0x65ed45fae4a25080 .quad 0x0a2862393fda7320 // 2^100 * 8 * G .quad 0x46ab13c8347cbc9d .quad 0x3849e8d499c12383 .quad 0x4cea314087d64ac9 .quad 0x1f354134b1a29ee7 .quad 0x960e737b6ecb9d17 .quad 0xfaf24948d67ceae1 .quad 0x37e7a9b4d55e1b89 .quad 0x5cb7173cb46c59eb .quad 0x4a89e68b82b7abf0 .quad 0xf41cd9279ba6b7b9 .quad 0x16e6c210e18d876f .quad 0x7cacdb0f7f1b09c6 // 2^104 * 1 * G .quad 0x9062b2e0d91a78bc .quad 0x47c9889cc8509667 .quad 0x9df54a66405070b8 .quad 0x7369e6a92493a1bf .quad 0xe1014434dcc5caed .quad 0x47ed5d963c84fb33 .quad 0x70019576ed86a0e7 .quad 0x25b2697bd267f9e4 .quad 0x9d673ffb13986864 .quad 0x3ca5fbd9415dc7b8 .quad 0xe04ecc3bdf273b5e .quad 0x1420683db54e4cd2 // 2^104 * 2 * G .quad 0xb478bd1e249dd197 .quad 0x620c35005e58c102 .quad 0xfb02d32fccbaac5c .quad 0x60b63bebf508a72d .quad 0x34eebb6fc1cc5ad0 .quad 0x6a1b0ce99646ac8b .quad 0xd3b0da49a66bde53 .quad 0x31e83b4161d081c1 .quad 0x97e8c7129e062b4f .quad 0x49e48f4f29320ad8 .quad 0x5bece14b6f18683f .quad 0x55cf1eb62d550317 // 2^104 * 3 * G .quad 0x5879101065c23d58 .quad 0x8b9d086d5094819c .quad 0xe2402fa912c55fa7 .quad 0x669a6564570891d4 .quad 0x3076b5e37df58c52 .quad 0xd73ab9dde799cc36 .quad 0xbd831ce34913ee20 .quad 0x1a56fbaa62ba0133 .quad 0x943e6b505c9dc9ec .quad 0x302557bba77c371a .quad 0x9873ae5641347651 .quad 0x13c4836799c58a5c // 2^104 * 4 * G .quad 0x423a5d465ab3e1b9 .quad 0xfc13c187c7f13f61 .quad 0x19f83664ecb5b9b6 .quad 0x66f80c93a637b607 .quad 0xc4dcfb6a5d8bd080 .quad 0xdeebc4ec571a4842 .quad 0xd4b2e883b8e55365 .quad 0x50bdc87dc8e5b827 .quad 0x606d37836edfe111 .quad 0x32353e15f011abd9 .quad 0x64b03ac325b73b96 .quad 0x1dd56444725fd5ae // 2^104 * 5 * G .quad 0x8fa47ff83362127d .quad 0xbc9f6ac471cd7c15 .quad 0x6e71454349220c8b .quad 0x0e645912219f732e .quad 0xc297e60008bac89a .quad 0x7d4cea11eae1c3e0 .quad 0xf3e38be19fe7977c .quad 0x3a3a450f63a305cd .quad 0x078f2f31d8394627 .quad 0x389d3183de94a510 .quad 0xd1e36c6d17996f80 .quad 0x318c8d9393a9a87b // 2^104 * 6 * G .quad 0xf2745d032afffe19 .quad 0x0c9f3c497f24db66 .quad 0xbc98d3e3ba8598ef .quad 0x224c7c679a1d5314 .quad 0x5d669e29ab1dd398 .quad 0xfc921658342d9e3b .quad 0x55851dfdf35973cd .quad 0x509a41c325950af6 .quad 0xbdc06edca6f925e9 .quad 0x793ef3f4641b1f33 .quad 0x82ec12809d833e89 .quad 0x05bff02328a11389 // 2^104 * 7 * G .quad 0x3632137023cae00b .quad 0x544acf0ad1accf59 .quad 0x96741049d21a1c88 .quad 0x780b8cc3fa2a44a7 .quad 0x6881a0dd0dc512e4 .quad 0x4fe70dc844a5fafe .quad 0x1f748e6b8f4a5240 .quad 0x576277cdee01a3ea .quad 0x1ef38abc234f305f .quad 0x9a577fbd1405de08 .quad 0x5e82a51434e62a0d .quad 0x5ff418726271b7a1 // 2^104 * 8 * G .quad 0x398e080c1789db9d .quad 0xa7602025f3e778f5 .quad 0xfa98894c06bd035d .quad 0x106a03dc25a966be .quad 0xe5db47e813b69540 .quad 0xf35d2a3b432610e1 .quad 0xac1f26e938781276 .quad 0x29d4db8ca0a0cb69 .quad 0xd9ad0aaf333353d0 .quad 0x38669da5acd309e5 .quad 0x3c57658ac888f7f0 .quad 0x4ab38a51052cbefa // 2^108 * 1 * G .quad 0xdfdacbee4324c0e9 .quad 0x054442883f955bb7 .quad 0xdef7aaa8ea31609f .quad 0x68aee70642287cff .quad 0xf68fe2e8809de054 .quad 0xe3bc096a9c82bad1 .quad 0x076353d40aadbf45 .quad 0x7b9b1fb5dea1959e .quad 0xf01cc8f17471cc0c .quad 0x95242e37579082bb .quad 0x27776093d3e46b5f .quad 0x2d13d55a28bd85fb // 2^108 * 2 * G .quad 0xfac5d2065b35b8da .quad 0xa8da8a9a85624bb7 .quad 0xccd2ca913d21cd0f .quad 0x6b8341ee8bf90d58 .quad 0xbf019cce7aee7a52 .quad 0xa8ded2b6e454ead3 .quad 0x3c619f0b87a8bb19 .quad 0x3619b5d7560916d8 .quad 0x3579f26b0282c4b2 .quad 0x64d592f24fafefae .quad 0xb7cded7b28c8c7c0 .quad 0x6a927b6b7173a8d7 // 2^108 * 3 * G .quad 0x1f6db24f986e4656 .quad 0x1021c02ed1e9105b .quad 0xf8ff3fff2cc0a375 .quad 0x1d2a6bf8c6c82592 .quad 0x8d7040863ece88eb .quad 0xf0e307a980eec08c .quad 0xac2250610d788fda .quad 0x056d92a43a0d478d .quad 0x1b05a196fc3da5a1 .quad 0x77d7a8c243b59ed0 .quad 0x06da3d6297d17918 .quad 0x66fbb494f12353f7 // 2^108 * 4 * G .quad 0x751a50b9d85c0fb8 .quad 0xd1afdc258bcf097b .quad 0x2f16a6a38309a969 .quad 0x14ddff9ee5b00659 .quad 0xd6d70996f12309d6 .quad 0xdbfb2385e9c3d539 .quad 0x46d602b0f7552411 .quad 0x270a0b0557843e0c .quad 0x61ff0640a7862bcc .quad 0x81cac09a5f11abfe .quad 0x9047830455d12abb .quad 0x19a4bde1945ae873 // 2^108 * 5 * G .quad 0x9b9f26f520a6200a .quad 0x64804443cf13eaf8 .quad 0x8a63673f8631edd3 .quad 0x72bbbce11ed39dc1 .quad 0x40c709dec076c49f .quad 0x657bfaf27f3e53f6 .quad 0x40662331eca042c4 .quad 0x14b375487eb4df04 .quad 0xae853c94ab66dc47 .quad 0xeb62343edf762d6e .quad 0xf08e0e186fb2f7d1 .quad 0x4f0b1c02700ab37a // 2^108 * 6 * G .quad 0xe1706787d81951fa .quad 0xa10a2c8eb290c77b .quad 0xe7382fa03ed66773 .quad 0x0a4d84710bcc4b54 .quad 0x79fd21ccc1b2e23f .quad 0x4ae7c281453df52a .quad 0xc8172ec9d151486b .quad 0x68abe9443e0a7534 .quad 0xda12c6c407831dcb .quad 0x0da230d74d5c510d .quad 0x4ab1531e6bd404e1 .quad 0x4106b166bcf440ef // 2^108 * 7 * G .quad 0x02e57a421cd23668 .quad 0x4ad9fb5d0eaef6fd .quad 0x954e6727b1244480 .quad 0x7f792f9d2699f331 .quad 0xa485ccd539e4ecf2 .quad 0x5aa3f3ad0555bab5 .quad 0x145e3439937df82d .quad 0x1238b51e1214283f .quad 0x0b886b925fd4d924 .quad 0x60906f7a3626a80d .quad 0xecd367b4b98abd12 .quad 0x2876beb1def344cf // 2^108 * 8 * G .quad 0xdc84e93563144691 .quad 0x632fe8a0d61f23f4 .quad 0x4caa800612a9a8d5 .quad 0x48f9dbfa0e9918d3 .quad 0xd594b3333a8a85f8 .quad 0x4ea37689e78d7d58 .quad 0x73bf9f455e8e351f .quad 0x5507d7d2bc41ebb4 .quad 0x1ceb2903299572fc .quad 0x7c8ccaa29502d0ee .quad 0x91bfa43411cce67b .quad 0x5784481964a831e7 // 2^112 * 1 * G .quad 0xda7c2b256768d593 .quad 0x98c1c0574422ca13 .quad 0xf1a80bd5ca0ace1d .quad 0x29cdd1adc088a690 .quad 0xd6cfd1ef5fddc09c .quad 0xe82b3efdf7575dce .quad 0x25d56b5d201634c2 .quad 0x3041c6bb04ed2b9b .quad 0x0ff2f2f9d956e148 .quad 0xade797759f356b2e .quad 0x1a4698bb5f6c025c .quad 0x104bbd6814049a7b // 2^112 * 2 * G .quad 0x51f0fd3168f1ed67 .quad 0x2c811dcdd86f3bc2 .quad 0x44dc5c4304d2f2de .quad 0x5be8cc57092a7149 .quad 0xa95d9a5fd67ff163 .quad 0xe92be69d4cc75681 .quad 0xb7f8024cde20f257 .quad 0x204f2a20fb072df5 .quad 0xc8143b3d30ebb079 .quad 0x7589155abd652e30 .quad 0x653c3c318f6d5c31 .quad 0x2570fb17c279161f // 2^112 * 3 * G .quad 0x3efa367f2cb61575 .quad 0xf5f96f761cd6026c .quad 0xe8c7142a65b52562 .quad 0x3dcb65ea53030acd .quad 0x192ea9550bb8245a .quad 0xc8e6fba88f9050d1 .quad 0x7986ea2d88a4c935 .quad 0x241c5f91de018668 .quad 0x28d8172940de6caa .quad 0x8fbf2cf022d9733a .quad 0x16d7fcdd235b01d1 .quad 0x08420edd5fcdf0e5 // 2^112 * 4 * G .quad 0xcdff20ab8362fa4a .quad 0x57e118d4e21a3e6e .quad 0xe3179617fc39e62b .quad 0x0d9a53efbc1769fd .quad 0x0358c34e04f410ce .quad 0xb6135b5a276e0685 .quad 0x5d9670c7ebb91521 .quad 0x04d654f321db889c .quad 0x5e7dc116ddbdb5d5 .quad 0x2954deb68da5dd2d .quad 0x1cb608173334a292 .quad 0x4a7a4f2618991ad7 // 2^112 * 5 * G .quad 0xf4a718025fb15f95 .quad 0x3df65f346b5c1b8f .quad 0xcdfcf08500e01112 .quad 0x11b50c4cddd31848 .quad 0x24c3b291af372a4b .quad 0x93da8270718147f2 .quad 0xdd84856486899ef2 .quad 0x4a96314223e0ee33 .quad 0xa6e8274408a4ffd6 .quad 0x738e177e9c1576d9 .quad 0x773348b63d02b3f2 .quad 0x4f4bce4dce6bcc51 // 2^112 * 6 * G .quad 0xa71fce5ae2242584 .quad 0x26ea725692f58a9e .quad 0xd21a09d71cea3cf4 .quad 0x73fcdd14b71c01e6 .quad 0x30e2616ec49d0b6f .quad 0xe456718fcaec2317 .quad 0x48eb409bf26b4fa6 .quad 0x3042cee561595f37 .quad 0x427e7079449bac41 .quad 0x855ae36dbce2310a .quad 0x4cae76215f841a7c .quad 0x389e740c9a9ce1d6 // 2^112 * 7 * G .quad 0x64fcb3ae34dcb9ce .quad 0x97500323e348d0ad .quad 0x45b3f07d62c6381b .quad 0x61545379465a6788 .quad 0xc9bd78f6570eac28 .quad 0xe55b0b3227919ce1 .quad 0x65fc3eaba19b91ed .quad 0x25c425e5d6263690 .quad 0x3f3e06a6f1d7de6e .quad 0x3ef976278e062308 .quad 0x8c14f6264e8a6c77 .quad 0x6539a08915484759 // 2^112 * 8 * G .quad 0xe9d21f74c3d2f773 .quad 0xc150544125c46845 .quad 0x624e5ce8f9b99e33 .quad 0x11c5e4aac5cd186c .quad 0xddc4dbd414bb4a19 .quad 0x19b2bc3c98424f8e .quad 0x48a89fd736ca7169 .quad 0x0f65320ef019bd90 .quad 0xd486d1b1cafde0c6 .quad 0x4f3fe6e3163b5181 .quad 0x59a8af0dfaf2939a .quad 0x4cabc7bdec33072a // 2^116 * 1 * G .quad 0x16faa8fb532f7428 .quad 0xdbd42ea046a4e272 .quad 0x5337653b8b9ea480 .quad 0x4065947223973f03 .quad 0xf7c0a19c1a54a044 .quad 0x4a1c5e2477bd9fbb .quad 0xa6e3ca115af22972 .quad 0x1819bb953f2e9e0d .quad 0x498fbb795e042e84 .quad 0x7d0dd89a7698b714 .quad 0x8bfb0ba427fe6295 .quad 0x36ba82e721200524 // 2^116 * 2 * G .quad 0xd60ecbb74245ec41 .quad 0xfd9be89e34348716 .quad 0xc9240afee42284de .quad 0x4472f648d0531db4 .quad 0xc8d69d0a57274ed5 .quad 0x45ba803260804b17 .quad 0xdf3cda102255dfac .quad 0x77d221232709b339 .quad 0x498a6d7064ad94d8 .quad 0xa5b5c8fd9af62263 .quad 0x8ca8ed0545c141f4 .quad 0x2c63bec3662d358c // 2^116 * 3 * G .quad 0x7fe60d8bea787955 .quad 0xb9dc117eb5f401b7 .quad 0x91c7c09a19355cce .quad 0x22692ef59442bedf .quad 0x9a518b3a8586f8bf .quad 0x9ee71af6cbb196f0 .quad 0xaa0625e6a2385cf2 .quad 0x1deb2176ddd7c8d1 .quad 0x8563d19a2066cf6c .quad 0x401bfd8c4dcc7cd7 .quad 0xd976a6becd0d8f62 .quad 0x67cfd773a278b05e // 2^116 * 4 * G .quad 0x8dec31faef3ee475 .quad 0x99dbff8a9e22fd92 .quad 0x512d11594e26cab1 .quad 0x0cde561eec4310b9 .quad 0x2d5fa9855a4e586a .quad 0x65f8f7a449beab7e .quad 0xaa074dddf21d33d3 .quad 0x185cba721bcb9dee .quad 0x93869da3f4e3cb41 .quad 0xbf0392f540f7977e .quad 0x026204fcd0463b83 .quad 0x3ec91a769eec6eed // 2^116 * 5 * G .quad 0x1e9df75bf78166ad .quad 0x4dfda838eb0cd7af .quad 0xba002ed8c1eaf988 .quad 0x13fedb3e11f33cfc .quad 0x0fad2fb7b0a3402f .quad 0x46615ecbfb69f4a8 .quad 0xf745bcc8c5f8eaa6 .quad 0x7a5fa8794a94e896 .quad 0x52958faa13cd67a1 .quad 0x965ee0818bdbb517 .quad 0x16e58daa2e8845b3 .quad 0x357d397d5499da8f // 2^116 * 6 * G .quad 0x1ebfa05fb0bace6c .quad 0xc934620c1caf9a1e .quad 0xcc771cc41d82b61a .quad 0x2d94a16aa5f74fec .quad 0x481dacb4194bfbf8 .quad 0x4d77e3f1bae58299 .quad 0x1ef4612e7d1372a0 .quad 0x3a8d867e70ff69e1 .quad 0x6f58cd5d55aff958 .quad 0xba3eaa5c75567721 .quad 0x75c123999165227d .quad 0x69be1343c2f2b35e // 2^116 * 7 * G .quad 0x0e091d5ee197c92a .quad 0x4f51019f2945119f .quad 0x143679b9f034e99c .quad 0x7d88112e4d24c696 .quad 0x82bbbdac684b8de3 .quad 0xa2f4c7d03fca0718 .quad 0x337f92fbe096aaa8 .quad 0x200d4d8c63587376 .quad 0x208aed4b4893b32b .quad 0x3efbf23ebe59b964 .quad 0xd762deb0dba5e507 .quad 0x69607bd681bd9d94 // 2^116 * 8 * G .quad 0xf6be021068de1ce1 .quad 0xe8d518e70edcbc1f .quad 0xe3effdd01b5505a5 .quad 0x35f63353d3ec3fd0 .quad 0x3b7f3bd49323a902 .quad 0x7c21b5566b2c6e53 .quad 0xe5ba8ff53a7852a7 .quad 0x28bc77a5838ece00 .quad 0x63ba78a8e25d8036 .quad 0x63651e0094333490 .quad 0x48d82f20288ce532 .quad 0x3a31abfa36b57524 // 2^120 * 1 * G .quad 0x239e9624089c0a2e .quad 0xc748c4c03afe4738 .quad 0x17dbed2a764fa12a .quad 0x639b93f0321c8582 .quad 0xc08f788f3f78d289 .quad 0xfe30a72ca1404d9f .quad 0xf2778bfccf65cc9d .quad 0x7ee498165acb2021 .quad 0x7bd508e39111a1c3 .quad 0x2b2b90d480907489 .quad 0xe7d2aec2ae72fd19 .quad 0x0edf493c85b602a6 // 2^120 * 2 * G .quad 0xaecc8158599b5a68 .quad 0xea574f0febade20e .quad 0x4fe41d7422b67f07 .quad 0x403b92e3019d4fb4 .quad 0x6767c4d284764113 .quad 0xa090403ff7f5f835 .quad 0x1c8fcffacae6bede .quad 0x04c00c54d1dfa369 .quad 0x4dc22f818b465cf8 .quad 0x71a0f35a1480eff8 .quad 0xaee8bfad04c7d657 .quad 0x355bb12ab26176f4 // 2^120 * 3 * G .quad 0xa71e64cc7493bbf4 .quad 0xe5bd84d9eca3b0c3 .quad 0x0a6bc50cfa05e785 .quad 0x0f9b8132182ec312 .quad 0xa301dac75a8c7318 .quad 0xed90039db3ceaa11 .quad 0x6f077cbf3bae3f2d .quad 0x7518eaf8e052ad8e .quad 0xa48859c41b7f6c32 .quad 0x0f2d60bcf4383298 .quad 0x1815a929c9b1d1d9 .quad 0x47c3871bbb1755c4 // 2^120 * 4 * G .quad 0x5144539771ec4f48 .quad 0xf805b17dc98c5d6e .quad 0xf762c11a47c3c66b .quad 0x00b89b85764699dc .quad 0xfbe65d50c85066b0 .quad 0x62ecc4b0b3a299b0 .quad 0xe53754ea441ae8e0 .quad 0x08fea02ce8d48d5f .quad 0x824ddd7668deead0 .quad 0xc86445204b685d23 .quad 0xb514cfcd5d89d665 .quad 0x473829a74f75d537 // 2^120 * 5 * G .quad 0x82d2da754679c418 .quad 0xe63bd7d8b2618df0 .quad 0x355eef24ac47eb0a .quad 0x2078684c4833c6b4 .quad 0x23d9533aad3902c9 .quad 0x64c2ddceef03588f .quad 0x15257390cfe12fb4 .quad 0x6c668b4d44e4d390 .quad 0x3b48cf217a78820c .quad 0xf76a0ab281273e97 .quad 0xa96c65a78c8eed7b .quad 0x7411a6054f8a433f // 2^120 * 6 * G .quad 0x4d659d32b99dc86d .quad 0x044cdc75603af115 .quad 0xb34c712cdcc2e488 .quad 0x7c136574fb8134ff .quad 0x579ae53d18b175b4 .quad 0x68713159f392a102 .quad 0x8455ecba1eef35f5 .quad 0x1ec9a872458c398f .quad 0xb8e6a4d400a2509b .quad 0x9b81d7020bc882b4 .quad 0x57e7cc9bf1957561 .quad 0x3add88a5c7cd6460 // 2^120 * 7 * G .quad 0xab895770b635dcf2 .quad 0x02dfef6cf66c1fbc .quad 0x85530268beb6d187 .quad 0x249929fccc879e74 .quad 0x85c298d459393046 .quad 0x8f7e35985ff659ec .quad 0x1d2ca22af2f66e3a .quad 0x61ba1131a406a720 .quad 0xa3d0a0f116959029 .quad 0x023b6b6cba7ebd89 .quad 0x7bf15a3e26783307 .quad 0x5620310cbbd8ece7 // 2^120 * 8 * G .quad 0x528993434934d643 .quad 0xb9dbf806a51222f5 .quad 0x8f6d878fc3f41c22 .quad 0x37676a2a4d9d9730 .quad 0x6646b5f477e285d6 .quad 0x40e8ff676c8f6193 .quad 0xa6ec7311abb594dd .quad 0x7ec846f3658cec4d .quad 0x9b5e8f3f1da22ec7 .quad 0x130f1d776c01cd13 .quad 0x214c8fcfa2989fb8 .quad 0x6daaf723399b9dd5 // 2^124 * 1 * G .quad 0x591e4a5610628564 .quad 0x2a4bb87ca8b4df34 .quad 0xde2a2572e7a38e43 .quad 0x3cbdabd9fee5046e .quad 0x81aebbdd2cd13070 .quad 0x962e4325f85a0e9e .quad 0xde9391aacadffecb .quad 0x53177fda52c230e6 .quad 0xa7bc970650b9de79 .quad 0x3d12a7fbc301b59b .quad 0x02652e68d36ae38c .quad 0x79d739835a6199dc // 2^124 * 2 * G .quad 0xd9354df64131c1bd .quad 0x758094a186ec5822 .quad 0x4464ee12e459f3c2 .quad 0x6c11fce4cb133282 .quad 0x21c9d9920d591737 .quad 0x9bea41d2e9b46cd6 .quad 0xe20e84200d89bfca .quad 0x79d99f946eae5ff8 .quad 0xf17b483568673205 .quad 0x387deae83caad96c .quad 0x61b471fd56ffe386 .quad 0x31741195b745a599 // 2^124 * 3 * G .quad 0xe8d10190b77a360b .quad 0x99b983209995e702 .quad 0xbd4fdff8fa0247aa .quad 0x2772e344e0d36a87 .quad 0x17f8ba683b02a047 .quad 0x50212096feefb6c8 .quad 0x70139be21556cbe2 .quad 0x203e44a11d98915b .quad 0xd6863eba37b9e39f .quad 0x105bc169723b5a23 .quad 0x104f6459a65c0762 .quad 0x567951295b4d38d4 // 2^124 * 4 * G .quad 0x535fd60613037524 .quad 0xe210adf6b0fbc26a .quad 0xac8d0a9b23e990ae .quad 0x47204d08d72fdbf9 .quad 0x07242eb30d4b497f .quad 0x1ef96306b9bccc87 .quad 0x37950934d8116f45 .quad 0x05468d6201405b04 .quad 0x00f565a9f93267de .quad 0xcecfd78dc0d58e8a .quad 0xa215e2dcf318e28e .quad 0x4599ee919b633352 // 2^124 * 5 * G .quad 0xd3c220ca70e0e76b .quad 0xb12bea58ea9f3094 .quad 0x294ddec8c3271282 .quad 0x0c3539e1a1d1d028 .quad 0xac746d6b861ae579 .quad 0x31ab0650f6aea9dc .quad 0x241d661140256d4c .quad 0x2f485e853d21a5de .quad 0x329744839c0833f3 .quad 0x6fe6257fd2abc484 .quad 0x5327d1814b358817 .quad 0x65712585893fe9bc // 2^124 * 6 * G .quad 0x9c102fb732a61161 .quad 0xe48e10dd34d520a8 .quad 0x365c63546f9a9176 .quad 0x32f6fe4c046f6006 .quad 0x81c29f1bd708ee3f .quad 0xddcb5a05ae6407d0 .quad 0x97aec1d7d2a3eba7 .quad 0x1590521a91d50831 .quad 0x40a3a11ec7910acc .quad 0x9013dff8f16d27ae .quad 0x1a9720d8abb195d4 .quad 0x1bb9fe452ea98463 // 2^124 * 7 * G .quad 0xe9d1d950b3d54f9e .quad 0x2d5f9cbee00d33c1 .quad 0x51c2c656a04fc6ac .quad 0x65c091ee3c1cbcc9 .quad 0xcf5e6c95cc36747c .quad 0x294201536b0bc30d .quad 0x453ac67cee797af0 .quad 0x5eae6ab32a8bb3c9 .quad 0x7083661114f118ea .quad 0x2b37b87b94349cad .quad 0x7273f51cb4e99f40 .quad 0x78a2a95823d75698 // 2^124 * 8 * G .quad 0xa2b072e95c8c2ace .quad 0x69cffc96651e9c4b .quad 0x44328ef842e7b42b .quad 0x5dd996c122aadeb3 .quad 0xb4f23c425ef83207 .quad 0xabf894d3c9a934b5 .quad 0xd0708c1339fd87f7 .quad 0x1876789117166130 .quad 0x925b5ef0670c507c .quad 0x819bc842b93c33bf .quad 0x10792e9a70dd003f .quad 0x59ad4b7a6e28dc74 // 2^128 * 1 * G .quad 0x5f3a7562eb3dbe47 .quad 0xf7ea38548ebda0b8 .quad 0x00c3e53145747299 .quad 0x1304e9e71627d551 .quad 0x583b04bfacad8ea2 .quad 0x29b743e8148be884 .quad 0x2b1e583b0810c5db .quad 0x2b5449e58eb3bbaa .quad 0x789814d26adc9cfe .quad 0x3c1bab3f8b48dd0b .quad 0xda0fe1fff979c60a .quad 0x4468de2d7c2dd693 // 2^128 * 2 * G .quad 0x51bb355e9419469e .quad 0x33e6dc4c23ddc754 .quad 0x93a5b6d6447f9962 .quad 0x6cce7c6ffb44bd63 .quad 0x4b9ad8c6f86307ce .quad 0x21113531435d0c28 .quad 0xd4a866c5657a772c .quad 0x5da6427e63247352 .quad 0x1a94c688deac22ca .quad 0xb9066ef7bbae1ff8 .quad 0x88ad8c388d59580f .quad 0x58f29abfe79f2ca8 // 2^128 * 3 * G .quad 0xe90ecfab8de73e68 .quad 0x54036f9f377e76a5 .quad 0xf0495b0bbe015982 .quad 0x577629c4a7f41e36 .quad 0x4b5a64bf710ecdf6 .quad 0xb14ce538462c293c .quad 0x3643d056d50b3ab9 .quad 0x6af93724185b4870 .quad 0x3220024509c6a888 .quad 0xd2e036134b558973 .quad 0x83e236233c33289f .quad 0x701f25bb0caec18f // 2^128 * 4 * G .quad 0xc3a8b0f8e4616ced .quad 0xf700660e9e25a87d .quad 0x61e3061ff4bca59c .quad 0x2e0c92bfbdc40be9 .quad 0x9d18f6d97cbec113 .quad 0x844a06e674bfdbe4 .quad 0x20f5b522ac4e60d6 .quad 0x720a5bc050955e51 .quad 0x0c3f09439b805a35 .quad 0xe84e8b376242abfc .quad 0x691417f35c229346 .quad 0x0e9b9cbb144ef0ec // 2^128 * 5 * G .quad 0xfbbad48ffb5720ad .quad 0xee81916bdbf90d0e .quad 0xd4813152635543bf .quad 0x221104eb3f337bd8 .quad 0x8dee9bd55db1beee .quad 0xc9c3ab370a723fb9 .quad 0x44a8f1bf1c68d791 .quad 0x366d44191cfd3cde .quad 0x9e3c1743f2bc8c14 .quad 0x2eda26fcb5856c3b .quad 0xccb82f0e68a7fb97 .quad 0x4167a4e6bc593244 // 2^128 * 6 * G .quad 0x643b9d2876f62700 .quad 0x5d1d9d400e7668eb .quad 0x1b4b430321fc0684 .quad 0x7938bb7e2255246a .quad 0xc2be2665f8ce8fee .quad 0xe967ff14e880d62c .quad 0xf12e6e7e2f364eee .quad 0x34b33370cb7ed2f6 .quad 0xcdc591ee8681d6cc .quad 0xce02109ced85a753 .quad 0xed7485c158808883 .quad 0x1176fc6e2dfe65e4 // 2^128 * 7 * G .quad 0xb4af6cd05b9c619b .quad 0x2ddfc9f4b2a58480 .quad 0x3d4fa502ebe94dc4 .quad 0x08fc3a4c677d5f34 .quad 0xdb90e28949770eb8 .quad 0x98fbcc2aacf440a3 .quad 0x21354ffeded7879b .quad 0x1f6a3e54f26906b6 .quad 0x60a4c199d30734ea .quad 0x40c085b631165cd6 .quad 0xe2333e23f7598295 .quad 0x4f2fad0116b900d1 // 2^128 * 8 * G .quad 0x44beb24194ae4e54 .quad 0x5f541c511857ef6c .quad 0xa61e6b2d368d0498 .quad 0x445484a4972ef7ab .quad 0x962cd91db73bb638 .quad 0xe60577aafc129c08 .quad 0x6f619b39f3b61689 .quad 0x3451995f2944ee81 .quad 0x9152fcd09fea7d7c .quad 0x4a816c94b0935cf6 .quad 0x258e9aaa47285c40 .quad 0x10b89ca6042893b7 // 2^132 * 1 * G .quad 0x9b2a426e3b646025 .quad 0x32127190385ce4cf .quad 0xa25cffc2dd6dea45 .quad 0x06409010bea8de75 .quad 0xd67cded679d34aa0 .quad 0xcc0b9ec0cc4db39f .quad 0xa535a456e35d190f .quad 0x2e05d9eaf61f6fef .quad 0xc447901ad61beb59 .quad 0x661f19bce5dc880a .quad 0x24685482b7ca6827 .quad 0x293c778cefe07f26 // 2^132 * 2 * G .quad 0x86809e7007069096 .quad 0xaad75b15e4e50189 .quad 0x07f35715a21a0147 .quad 0x0487f3f112815d5e .quad 0x16c795d6a11ff200 .quad 0xcb70d0e2b15815c9 .quad 0x89f293209b5395b5 .quad 0x50b8c2d031e47b4f .quad 0x48350c08068a4962 .quad 0x6ffdd05351092c9a .quad 0x17af4f4aaf6fc8dd .quad 0x4b0553b53cdba58b // 2^132 * 3 * G .quad 0x9c65fcbe1b32ff79 .quad 0xeb75ea9f03b50f9b .quad 0xfced2a6c6c07e606 .quad 0x35106cd551717908 .quad 0xbf05211b27c152d4 .quad 0x5ec26849bd1af639 .quad 0x5e0b2caa8e6fab98 .quad 0x054c8bdd50bd0840 .quad 0x38a0b12f1dcf073d .quad 0x4b60a8a3b7f6a276 .quad 0xfed5ac25d3404f9a .quad 0x72e82d5e5505c229 // 2^132 * 4 * G .quad 0x6b0b697ff0d844c8 .quad 0xbb12f85cd979cb49 .quad 0xd2a541c6c1da0f1f .quad 0x7b7c242958ce7211 .quad 0x00d9cdfd69771d02 .quad 0x410276cd6cfbf17e .quad 0x4c45306c1cb12ec7 .quad 0x2857bf1627500861 .quad 0x9f21903f0101689e .quad 0xd779dfd3bf861005 .quad 0xa122ee5f3deb0f1b .quad 0x510df84b485a00d4 // 2^132 * 5 * G .quad 0xa54133bb9277a1fa .quad 0x74ec3b6263991237 .quad 0x1a3c54dc35d2f15a .quad 0x2d347144e482ba3a .quad 0x24b3c887c70ac15e .quad 0xb0f3a557fb81b732 .quad 0x9b2cde2fe578cc1b .quad 0x4cf7ed0703b54f8e .quad 0x6bd47c6598fbee0f .quad 0x9e4733e2ab55be2d .quad 0x1093f624127610c5 .quad 0x4e05e26ad0a1eaa4 // 2^132 * 6 * G .quad 0xda9b6b624b531f20 .quad 0x429a760e77509abb .quad 0xdbe9f522e823cb80 .quad 0x618f1856880c8f82 .quad 0x1833c773e18fe6c0 .quad 0xe3c4711ad3c87265 .quad 0x3bfd3c4f0116b283 .quad 0x1955875eb4cd4db8 .quad 0x6da6de8f0e399799 .quad 0x7ad61aa440fda178 .quad 0xb32cd8105e3563dd .quad 0x15f6beae2ae340ae // 2^132 * 7 * G .quad 0x862bcb0c31ec3a62 .quad 0x810e2b451138f3c2 .quad 0x788ec4b839dac2a4 .quad 0x28f76867ae2a9281 .quad 0xba9a0f7b9245e215 .quad 0xf368612dd98c0dbb .quad 0x2e84e4cbf220b020 .quad 0x6ba92fe962d90eda .quad 0x3e4df9655884e2aa .quad 0xbd62fbdbdbd465a5 .quad 0xd7596caa0de9e524 .quad 0x6e8042ccb2b1b3d7 // 2^132 * 8 * G .quad 0xf10d3c29ce28ca6e .quad 0xbad34540fcb6093d .quad 0xe7426ed7a2ea2d3f .quad 0x08af9d4e4ff298b9 .quad 0x1530653616521f7e .quad 0x660d06b896203dba .quad 0x2d3989bc545f0879 .quad 0x4b5303af78ebd7b0 .quad 0x72f8a6c3bebcbde8 .quad 0x4f0fca4adc3a8e89 .quad 0x6fa9d4e8c7bfdf7a .quad 0x0dcf2d679b624eb7 // 2^136 * 1 * G .quad 0x3d5947499718289c .quad 0x12ebf8c524533f26 .quad 0x0262bfcb14c3ef15 .quad 0x20b878d577b7518e .quad 0x753941be5a45f06e .quad 0xd07caeed6d9c5f65 .quad 0x11776b9c72ff51b6 .quad 0x17d2d1d9ef0d4da9 .quad 0x27f2af18073f3e6a .quad 0xfd3fe519d7521069 .quad 0x22e3b72c3ca60022 .quad 0x72214f63cc65c6a7 // 2^136 * 2 * G .quad 0xb4e37f405307a693 .quad 0xaba714d72f336795 .quad 0xd6fbd0a773761099 .quad 0x5fdf48c58171cbc9 .quad 0x1d9db7b9f43b29c9 .quad 0xd605824a4f518f75 .quad 0xf2c072bd312f9dc4 .quad 0x1f24ac855a1545b0 .quad 0x24d608328e9505aa .quad 0x4748c1d10c1420ee .quad 0xc7ffe45c06fb25a2 .quad 0x00ba739e2ae395e6 // 2^136 * 3 * G .quad 0x592e98de5c8790d6 .quad 0xe5bfb7d345c2a2df .quad 0x115a3b60f9b49922 .quad 0x03283a3e67ad78f3 .quad 0xae4426f5ea88bb26 .quad 0x360679d984973bfb .quad 0x5c9f030c26694e50 .quad 0x72297de7d518d226 .quad 0x48241dc7be0cb939 .quad 0x32f19b4d8b633080 .quad 0xd3dfc90d02289308 .quad 0x05e1296846271945 // 2^136 * 4 * G .quad 0xba82eeb32d9c495a .quad 0xceefc8fcf12bb97c .quad 0xb02dabae93b5d1e0 .quad 0x39c00c9c13698d9b .quad 0xadbfbbc8242c4550 .quad 0xbcc80cecd03081d9 .quad 0x843566a6f5c8df92 .quad 0x78cf25d38258ce4c .quad 0x15ae6b8e31489d68 .quad 0xaa851cab9c2bf087 .quad 0xc9a75a97f04efa05 .quad 0x006b52076b3ff832 // 2^136 * 5 * G .quad 0x29e0cfe19d95781c .quad 0xb681df18966310e2 .quad 0x57df39d370516b39 .quad 0x4d57e3443bc76122 .quad 0xf5cb7e16b9ce082d .quad 0x3407f14c417abc29 .quad 0xd4b36bce2bf4a7ab .quad 0x7de2e9561a9f75ce .quad 0xde70d4f4b6a55ecb .quad 0x4801527f5d85db99 .quad 0xdbc9c440d3ee9a81 .quad 0x6b2a90af1a6029ed // 2^136 * 6 * G .quad 0x6923f4fc9ae61e97 .quad 0x5735281de03f5fd1 .quad 0xa764ae43e6edd12d .quad 0x5fd8f4e9d12d3e4a .quad 0x77ebf3245bb2d80a .quad 0xd8301b472fb9079b .quad 0xc647e6f24cee7333 .quad 0x465812c8276c2109 .quad 0x4d43beb22a1062d9 .quad 0x7065fb753831dc16 .quad 0x180d4a7bde2968d7 .quad 0x05b32c2b1cb16790 // 2^136 * 7 * G .quad 0xc8c05eccd24da8fd .quad 0xa1cf1aac05dfef83 .quad 0xdbbeeff27df9cd61 .quad 0x3b5556a37b471e99 .quad 0xf7fca42c7ad58195 .quad 0x3214286e4333f3cc .quad 0xb6c29d0d340b979d .quad 0x31771a48567307e1 .quad 0x32b0c524e14dd482 .quad 0xedb351541a2ba4b6 .quad 0xa3d16048282b5af3 .quad 0x4fc079d27a7336eb // 2^136 * 8 * G .quad 0x51c938b089bf2f7f .quad 0x2497bd6502dfe9a7 .quad 0xffffc09c7880e453 .quad 0x124567cecaf98e92 .quad 0xdc348b440c86c50d .quad 0x1337cbc9cc94e651 .quad 0x6422f74d643e3cb9 .quad 0x241170c2bae3cd08 .quad 0x3ff9ab860ac473b4 .quad 0xf0911dee0113e435 .quad 0x4ae75060ebc6c4af .quad 0x3f8612966c87000d // 2^140 * 1 * G .quad 0x0c9c5303f7957be4 .quad 0xa3c31a20e085c145 .quad 0xb0721d71d0850050 .quad 0x0aba390eab0bf2da .quad 0x529fdffe638c7bf3 .quad 0xdf2b9e60388b4995 .quad 0xe027b34f1bad0249 .quad 0x7bc92fc9b9fa74ed .quad 0x9f97ef2e801ad9f9 .quad 0x83697d5479afda3a .quad 0xe906b3ffbd596b50 .quad 0x02672b37dd3fb8e0 // 2^140 * 2 * G .quad 0x48b2ca8b260885e4 .quad 0xa4286bec82b34c1c .quad 0x937e1a2617f58f74 .quad 0x741d1fcbab2ca2a5 .quad 0xee9ba729398ca7f5 .quad 0xeb9ca6257a4849db .quad 0x29eb29ce7ec544e1 .quad 0x232ca21ef736e2c8 .quad 0xbf61423d253fcb17 .quad 0x08803ceafa39eb14 .quad 0xf18602df9851c7af .quad 0x0400f3a049e3414b // 2^140 * 3 * G .quad 0xabce0476ba61c55b .quad 0x36a3d6d7c4d39716 .quad 0x6eb259d5e8d82d09 .quad 0x0c9176e984d756fb .quad 0x2efba412a06e7b06 .quad 0x146785452c8d2560 .quad 0xdf9713ebd67a91c7 .quad 0x32830ac7157eadf3 .quad 0x0e782a7ab73769e8 .quad 0x04a05d7875b18e2c .quad 0x29525226ebcceae1 .quad 0x0d794f8383eba820 // 2^140 * 4 * G .quad 0xff35f5cb9e1516f4 .quad 0xee805bcf648aae45 .quad 0xf0d73c2bb93a9ef3 .quad 0x097b0bf22092a6c2 .quad 0x7be44ce7a7a2e1ac .quad 0x411fd93efad1b8b7 .quad 0x1734a1d70d5f7c9b .quad 0x0d6592233127db16 .quad 0xc48bab1521a9d733 .quad 0xa6c2eaead61abb25 .quad 0x625c6c1cc6cb4305 .quad 0x7fc90fea93eb3a67 // 2^140 * 5 * G .quad 0x0408f1fe1f5c5926 .quad 0x1a8f2f5e3b258bf4 .quad 0x40a951a2fdc71669 .quad 0x6598ee93c98b577e .quad 0xc527deb59c7cb23d .quad 0x955391695328404e .quad 0xd64392817ccf2c7a .quad 0x6ce97dabf7d8fa11 .quad 0x25b5a8e50ef7c48f .quad 0xeb6034116f2ce532 .quad 0xc5e75173e53de537 .quad 0x73119fa08c12bb03 // 2^140 * 6 * G .quad 0xed30129453f1a4cb .quad 0xbce621c9c8f53787 .quad 0xfacb2b1338bee7b9 .quad 0x3025798a9ea8428c .quad 0x7845b94d21f4774d .quad 0xbf62f16c7897b727 .quad 0x671857c03c56522b .quad 0x3cd6a85295621212 .quad 0x3fecde923aeca999 .quad 0xbdaa5b0062e8c12f .quad 0x67b99dfc96988ade .quad 0x3f52c02852661036 // 2^140 * 7 * G .quad 0xffeaa48e2a1351c6 .quad 0x28624754fa7f53d7 .quad 0x0b5ba9e57582ddf1 .quad 0x60c0104ba696ac59 .quad 0x9258bf99eec416c6 .quad 0xac8a5017a9d2f671 .quad 0x629549ab16dea4ab .quad 0x05d0e85c99091569 .quad 0x051de020de9cbe97 .quad 0xfa07fc56b50bcf74 .quad 0x378cec9f0f11df65 .quad 0x36853c69ab96de4d // 2^140 * 8 * G .quad 0x36d9b8de78f39b2d .quad 0x7f42ed71a847b9ec .quad 0x241cd1d679bd3fde .quad 0x6a704fec92fbce6b .quad 0x4433c0b0fac5e7be .quad 0x724bae854c08dcbe .quad 0xf1f24cc446978f9b .quad 0x4a0aff6d62825fc8 .quad 0xe917fb9e61095301 .quad 0xc102df9402a092f8 .quad 0xbf09e2f5fa66190b .quad 0x681109bee0dcfe37 // 2^144 * 1 * G .quad 0x559a0cc9782a0dde .quad 0x551dcdb2ea718385 .quad 0x7f62865b31ef238c .quad 0x504aa7767973613d .quad 0x9c18fcfa36048d13 .quad 0x29159db373899ddd .quad 0xdc9f350b9f92d0aa .quad 0x26f57eee878a19d4 .quad 0x0cab2cd55687efb1 .quad 0x5180d162247af17b .quad 0x85c15a344f5a2467 .quad 0x4041943d9dba3069 // 2^144 * 2 * G .quad 0xc3c0eeba43ebcc96 .quad 0x8d749c9c26ea9caf .quad 0xd9fa95ee1c77ccc6 .quad 0x1420a1d97684340f .quad 0x4b217743a26caadd .quad 0x47a6b424648ab7ce .quad 0xcb1d4f7a03fbc9e3 .quad 0x12d931429800d019 .quad 0x00c67799d337594f .quad 0x5e3c5140b23aa47b .quad 0x44182854e35ff395 .quad 0x1b4f92314359a012 // 2^144 * 3 * G .quad 0x3e5c109d89150951 .quad 0x39cefa912de9696a .quad 0x20eae43f975f3020 .quad 0x239b572a7f132dae .quad 0x33cf3030a49866b1 .quad 0x251f73d2215f4859 .quad 0xab82aa4051def4f6 .quad 0x5ff191d56f9a23f6 .quad 0x819ed433ac2d9068 .quad 0x2883ab795fc98523 .quad 0xef4572805593eb3d .quad 0x020c526a758f36cb // 2^144 * 4 * G .quad 0x779834f89ed8dbbc .quad 0xc8f2aaf9dc7ca46c .quad 0xa9524cdca3e1b074 .quad 0x02aacc4615313877 .quad 0xe931ef59f042cc89 .quad 0x2c589c9d8e124bb6 .quad 0xadc8e18aaec75997 .quad 0x452cfe0a5602c50c .quad 0x86a0f7a0647877df .quad 0xbbc464270e607c9f .quad 0xab17ea25f1fb11c9 .quad 0x4cfb7d7b304b877b // 2^144 * 5 * G .quad 0x72b43d6cb89b75fe .quad 0x54c694d99c6adc80 .quad 0xb8c3aa373ee34c9f .quad 0x14b4622b39075364 .quad 0xe28699c29789ef12 .quad 0x2b6ecd71df57190d .quad 0xc343c857ecc970d0 .quad 0x5b1d4cbc434d3ac5 .quad 0xb6fb2615cc0a9f26 .quad 0x3a4f0e2bb88dcce5 .quad 0x1301498b3369a705 .quad 0x2f98f71258592dd1 // 2^144 * 6 * G .quad 0x0c94a74cb50f9e56 .quad 0x5b1ff4a98e8e1320 .quad 0x9a2acc2182300f67 .quad 0x3a6ae249d806aaf9 .quad 0x2e12ae444f54a701 .quad 0xfcfe3ef0a9cbd7de .quad 0xcebf890d75835de0 .quad 0x1d8062e9e7614554 .quad 0x657ada85a9907c5a .quad 0x1a0ea8b591b90f62 .quad 0x8d0e1dfbdf34b4e9 .quad 0x298b8ce8aef25ff3 // 2^144 * 7 * G .quad 0x2a927953eff70cb2 .quad 0x4b89c92a79157076 .quad 0x9418457a30a7cf6a .quad 0x34b8a8404d5ce485 .quad 0x837a72ea0a2165de .quad 0x3fab07b40bcf79f6 .quad 0x521636c77738ae70 .quad 0x6ba6271803a7d7dc .quad 0xc26eecb583693335 .quad 0xd5a813df63b5fefd .quad 0xa293aa9aa4b22573 .quad 0x71d62bdd465e1c6a // 2^144 * 8 * G .quad 0x6533cc28d378df80 .quad 0xf6db43790a0fa4b4 .quad 0xe3645ff9f701da5a .quad 0x74d5f317f3172ba4 .quad 0xcd2db5dab1f75ef5 .quad 0xd77f95cf16b065f5 .quad 0x14571fea3f49f085 .quad 0x1c333621262b2b3d .quad 0xa86fe55467d9ca81 .quad 0x398b7c752b298c37 .quad 0xda6d0892e3ac623b .quad 0x4aebcc4547e9d98c // 2^148 * 1 * G .quad 0x53175a7205d21a77 .quad 0xb0c04422d3b934d4 .quad 0xadd9f24bdd5deadc .quad 0x074f46e69f10ff8c .quad 0x0de9b204a059a445 .quad 0xe15cb4aa4b17ad0f .quad 0xe1bbec521f79c557 .quad 0x2633f1b9d071081b .quad 0xc1fb4177018b9910 .quad 0xa6ea20dc6c0fe140 .quad 0xd661f3e74354c6ff .quad 0x5ecb72e6f1a3407a // 2^148 * 2 * G .quad 0xa515a31b2259fb4e .quad 0x0960f3972bcac52f .quad 0xedb52fec8d3454cb .quad 0x382e2720c476c019 .quad 0xfeeae106e8e86997 .quad 0x9863337f98d09383 .quad 0x9470480eaa06ebef .quad 0x038b6898d4c5c2d0 .quad 0xf391c51d8ace50a6 .quad 0x3142d0b9ae2d2948 .quad 0xdb4d5a1a7f24ca80 .quad 0x21aeba8b59250ea8 // 2^148 * 3 * G .quad 0x24f13b34cf405530 .quad 0x3c44ea4a43088af7 .quad 0x5dd5c5170006a482 .quad 0x118eb8f8890b086d .quad 0x53853600f0087f23 .quad 0x4c461879da7d5784 .quad 0x6af303deb41f6860 .quad 0x0a3c16c5c27c18ed .quad 0x17e49c17cc947f3d .quad 0xccc6eda6aac1d27b .quad 0xdf6092ceb0f08e56 .quad 0x4909b3e22c67c36b // 2^148 * 4 * G .quad 0x9c9c85ea63fe2e89 .quad 0xbe1baf910e9412ec .quad 0x8f7baa8a86fbfe7b .quad 0x0fb17f9fef968b6c .quad 0x59a16676706ff64e .quad 0x10b953dd0d86a53d .quad 0x5848e1e6ce5c0b96 .quad 0x2d8b78e712780c68 .quad 0x79d5c62eafc3902b .quad 0x773a215289e80728 .quad 0xc38ae640e10120b9 .quad 0x09ae23717b2b1a6d // 2^148 * 5 * G .quad 0xbb6a192a4e4d083c .quad 0x34ace0630029e192 .quad 0x98245a59aafabaeb .quad 0x6d9c8a9ada97faac .quad 0x10ab8fa1ad32b1d0 .quad 0xe9aced1be2778b24 .quad 0xa8856bc0373de90f .quad 0x66f35ddddda53996 .quad 0xd27d9afb24997323 .quad 0x1bb7e07ef6f01d2e .quad 0x2ba7472df52ecc7f .quad 0x03019b4f646f9dc8 // 2^148 * 6 * G .quad 0x04a186b5565345cd .quad 0xeee76610bcc4116a .quad 0x689c73b478fb2a45 .quad 0x387dcbff65697512 .quad 0xaf09b214e6b3dc6b .quad 0x3f7573b5ad7d2f65 .quad 0xd019d988100a23b0 .quad 0x392b63a58b5c35f7 .quad 0x4093addc9c07c205 .quad 0xc565be15f532c37e .quad 0x63dbecfd1583402a .quad 0x61722b4aef2e032e // 2^148 * 7 * G .quad 0x0012aafeecbd47af .quad 0x55a266fb1cd46309 .quad 0xf203eb680967c72c .quad 0x39633944ca3c1429 .quad 0xd6b07a5581cb0e3c .quad 0x290ff006d9444969 .quad 0x08680b6a16dcda1f .quad 0x5568d2b75a06de59 .quad 0x8d0cb88c1b37cfe1 .quad 0x05b6a5a3053818f3 .quad 0xf2e9bc04b787d959 .quad 0x6beba1249add7f64 // 2^148 * 8 * G .quad 0x1d06005ca5b1b143 .quad 0x6d4c6bb87fd1cda2 .quad 0x6ef5967653fcffe7 .quad 0x097c29e8c1ce1ea5 .quad 0x5c3cecb943f5a53b .quad 0x9cc9a61d06c08df2 .quad 0xcfba639a85895447 .quad 0x5a845ae80df09fd5 .quad 0x4ce97dbe5deb94ca .quad 0x38d0a4388c709c48 .quad 0xc43eced4a169d097 .quad 0x0a1249fff7e587c3 // 2^152 * 1 * G .quad 0x12f0071b276d01c9 .quad 0xe7b8bac586c48c70 .quad 0x5308129b71d6fba9 .quad 0x5d88fbf95a3db792 .quad 0x0b408d9e7354b610 .quad 0x806b32535ba85b6e .quad 0xdbe63a034a58a207 .quad 0x173bd9ddc9a1df2c .quad 0x2b500f1efe5872df .quad 0x58d6582ed43918c1 .quad 0xe6ed278ec9673ae0 .quad 0x06e1cd13b19ea319 // 2^152 * 2 * G .quad 0x40d0ad516f166f23 .quad 0x118e32931fab6abe .quad 0x3fe35e14a04d088e .quad 0x3080603526e16266 .quad 0x472baf629e5b0353 .quad 0x3baa0b90278d0447 .quad 0x0c785f469643bf27 .quad 0x7f3a6a1a8d837b13 .quad 0xf7e644395d3d800b .quad 0x95a8d555c901edf6 .quad 0x68cd7830592c6339 .quad 0x30d0fded2e51307e // 2^152 * 3 * G .quad 0xe0594d1af21233b3 .quad 0x1bdbe78ef0cc4d9c .quad 0x6965187f8f499a77 .quad 0x0a9214202c099868 .quad 0x9cb4971e68b84750 .quad 0xa09572296664bbcf .quad 0x5c8de72672fa412b .quad 0x4615084351c589d9 .quad 0xbc9019c0aeb9a02e .quad 0x55c7110d16034cae .quad 0x0e6df501659932ec .quad 0x3bca0d2895ca5dfe // 2^152 * 4 * G .quad 0x40f031bc3c5d62a4 .quad 0x19fc8b3ecff07a60 .quad 0x98183da2130fb545 .quad 0x5631deddae8f13cd .quad 0x9c688eb69ecc01bf .quad 0xf0bc83ada644896f .quad 0xca2d955f5f7a9fe2 .quad 0x4ea8b4038df28241 .quad 0x2aed460af1cad202 .quad 0x46305305a48cee83 .quad 0x9121774549f11a5f .quad 0x24ce0930542ca463 // 2^152 * 5 * G .quad 0x1fe890f5fd06c106 .quad 0xb5c468355d8810f2 .quad 0x827808fe6e8caf3e .quad 0x41d4e3c28a06d74b .quad 0x3fcfa155fdf30b85 .quad 0xd2f7168e36372ea4 .quad 0xb2e064de6492f844 .quad 0x549928a7324f4280 .quad 0xf26e32a763ee1a2e .quad 0xae91e4b7d25ffdea .quad 0xbc3bd33bd17f4d69 .quad 0x491b66dec0dcff6a // 2^152 * 6 * G .quad 0x98f5b13dc7ea32a7 .quad 0xe3d5f8cc7e16db98 .quad 0xac0abf52cbf8d947 .quad 0x08f338d0c85ee4ac .quad 0x75f04a8ed0da64a1 .quad 0xed222caf67e2284b .quad 0x8234a3791f7b7ba4 .quad 0x4cf6b8b0b7018b67 .quad 0xc383a821991a73bd .quad 0xab27bc01df320c7a .quad 0xc13d331b84777063 .quad 0x530d4a82eb078a99 // 2^152 * 7 * G .quad 0x004c3630e1f94825 .quad 0x7e2d78268cab535a .quad 0xc7482323cc84ff8b .quad 0x65ea753f101770b9 .quad 0x6d6973456c9abf9e .quad 0x257fb2fc4900a880 .quad 0x2bacf412c8cfb850 .quad 0x0db3e7e00cbfbd5b .quad 0x3d66fc3ee2096363 .quad 0x81d62c7f61b5cb6b .quad 0x0fbe044213443b1a .quad 0x02a4ec1921e1a1db // 2^152 * 8 * G .quad 0x5ce6259a3b24b8a2 .quad 0xb8577acc45afa0b8 .quad 0xcccbe6e88ba07037 .quad 0x3d143c51127809bf .quad 0xf5c86162f1cf795f .quad 0x118c861926ee57f2 .quad 0x172124851c063578 .quad 0x36d12b5dec067fcf .quad 0x126d279179154557 .quad 0xd5e48f5cfc783a0a .quad 0x36bdb6e8df179bac .quad 0x2ef517885ba82859 // 2^156 * 1 * G .quad 0x88bd438cd11e0d4a .quad 0x30cb610d43ccf308 .quad 0xe09a0e3791937bcc .quad 0x4559135b25b1720c .quad 0x1ea436837c6da1e9 .quad 0xf9c189af1fb9bdbe .quad 0x303001fcce5dd155 .quad 0x28a7c99ebc57be52 .quad 0xb8fd9399e8d19e9d .quad 0x908191cb962423ff .quad 0xb2b948d747c742a3 .quad 0x37f33226d7fb44c4 // 2^156 * 2 * G .quad 0x0dae8767b55f6e08 .quad 0x4a43b3b35b203a02 .quad 0xe3725a6e80af8c79 .quad 0x0f7a7fd1705fa7a3 .quad 0x33912553c821b11d .quad 0x66ed42c241e301df .quad 0x066fcc11104222fd .quad 0x307a3b41c192168f .quad 0x8eeb5d076eb55ce0 .quad 0x2fc536bfaa0d925a .quad 0xbe81830fdcb6c6e8 .quad 0x556c7045827baf52 // 2^156 * 3 * G .quad 0x8e2b517302e9d8b7 .quad 0xe3e52269248714e8 .quad 0xbd4fbd774ca960b5 .quad 0x6f4b4199c5ecada9 .quad 0xb94b90022bf44406 .quad 0xabd4237eff90b534 .quad 0x7600a960faf86d3a .quad 0x2f45abdac2322ee3 .quad 0x61af4912c8ef8a6a .quad 0xe58fa4fe43fb6e5e .quad 0xb5afcc5d6fd427cf .quad 0x6a5393281e1e11eb // 2^156 * 4 * G .quad 0xf3da5139a5d1ee89 .quad 0x8145457cff936988 .quad 0x3f622fed00e188c4 .quad 0x0f513815db8b5a3d .quad 0x0fff04fe149443cf .quad 0x53cac6d9865cddd7 .quad 0x31385b03531ed1b7 .quad 0x5846a27cacd1039d .quad 0x4ff5cdac1eb08717 .quad 0x67e8b29590f2e9bc .quad 0x44093b5e237afa99 .quad 0x0d414bed8708b8b2 // 2^156 * 5 * G .quad 0xcfb68265fd0e75f6 .quad 0xe45b3e28bb90e707 .quad 0x7242a8de9ff92c7a .quad 0x685b3201933202dd .quad 0x81886a92294ac9e8 .quad 0x23162b45d55547be .quad 0x94cfbc4403715983 .quad 0x50eb8fdb134bc401 .quad 0xc0b73ec6d6b330cd .quad 0x84e44807132faff1 .quad 0x732b7352c4a5dee1 .quad 0x5d7c7cf1aa7cd2d2 // 2^156 * 6 * G .quad 0xaf3b46bf7a4aafa2 .quad 0xb78705ec4d40d411 .quad 0x114f0c6aca7c15e3 .quad 0x3f364faaa9489d4d .quad 0x33d1013e9b73a562 .quad 0x925cef5748ec26e1 .quad 0xa7fce614dd468058 .quad 0x78b0fad41e9aa438 .quad 0xbf56a431ed05b488 .quad 0xa533e66c9c495c7e .quad 0xe8652baf87f3651a .quad 0x0241800059d66c33 // 2^156 * 7 * G .quad 0xceb077fea37a5be4 .quad 0xdb642f02e5a5eeb7 .quad 0xc2e6d0c5471270b8 .quad 0x4771b65538e4529c .quad 0x28350c7dcf38ea01 .quad 0x7c6cdbc0b2917ab6 .quad 0xace7cfbe857082f7 .quad 0x4d2845aba2d9a1e0 .quad 0xbb537fe0447070de .quad 0xcba744436dd557df .quad 0xd3b5a3473600dbcb .quad 0x4aeabbe6f9ffd7f8 // 2^156 * 8 * G .quad 0x4630119e40d8f78c .quad 0xa01a9bc53c710e11 .quad 0x486d2b258910dd79 .quad 0x1e6c47b3db0324e5 .quad 0x6a2134bcc4a9c8f2 .quad 0xfbf8fd1c8ace2e37 .quad 0x000ae3049911a0ba .quad 0x046e3a616bc89b9e .quad 0x14e65442f03906be .quad 0x4a019d54e362be2a .quad 0x68ccdfec8dc230c7 .quad 0x7cfb7e3faf6b861c // 2^160 * 1 * G .quad 0x4637974e8c58aedc .quad 0xb9ef22fbabf041a4 .quad 0xe185d956e980718a .quad 0x2f1b78fab143a8a6 .quad 0x96eebffb305b2f51 .quad 0xd3f938ad889596b8 .quad 0xf0f52dc746d5dd25 .quad 0x57968290bb3a0095 .quad 0xf71ab8430a20e101 .quad 0xf393658d24f0ec47 .quad 0xcf7509a86ee2eed1 .quad 0x7dc43e35dc2aa3e1 // 2^160 * 2 * G .quad 0x85966665887dd9c3 .quad 0xc90f9b314bb05355 .quad 0xc6e08df8ef2079b1 .quad 0x7ef72016758cc12f .quad 0x5a782a5c273e9718 .quad 0x3576c6995e4efd94 .quad 0x0f2ed8051f237d3e .quad 0x044fb81d82d50a99 .quad 0xc1df18c5a907e3d9 .quad 0x57b3371dce4c6359 .quad 0xca704534b201bb49 .quad 0x7f79823f9c30dd2e // 2^160 * 3 * G .quad 0x8334d239a3b513e8 .quad 0xc13670d4b91fa8d8 .quad 0x12b54136f590bd33 .quad 0x0a4e0373d784d9b4 .quad 0x6a9c1ff068f587ba .quad 0x0827894e0050c8de .quad 0x3cbf99557ded5be7 .quad 0x64a9b0431c06d6f0 .quad 0x2eb3d6a15b7d2919 .quad 0xb0b4f6a0d53a8235 .quad 0x7156ce4389a45d47 .quad 0x071a7d0ace18346c // 2^160 * 4 * G .quad 0xd3072daac887ba0b .quad 0x01262905bfa562ee .quad 0xcf543002c0ef768b .quad 0x2c3bcc7146ea7e9c .quad 0xcc0c355220e14431 .quad 0x0d65950709b15141 .quad 0x9af5621b209d5f36 .quad 0x7c69bcf7617755d3 .quad 0x07f0d7eb04e8295f .quad 0x10db18252f50f37d .quad 0xe951a9a3171798d7 .quad 0x6f5a9a7322aca51d // 2^160 * 5 * G .quad 0x8ba1000c2f41c6c5 .quad 0xc49f79c10cfefb9b .quad 0x4efa47703cc51c9f .quad 0x494e21a2e147afca .quad 0xe729d4eba3d944be .quad 0x8d9e09408078af9e .quad 0x4525567a47869c03 .quad 0x02ab9680ee8d3b24 .quad 0xefa48a85dde50d9a .quad 0x219a224e0fb9a249 .quad 0xfa091f1dd91ef6d9 .quad 0x6b5d76cbea46bb34 // 2^160 * 6 * G .quad 0x8857556cec0cd994 .quad 0x6472dc6f5cd01dba .quad 0xaf0169148f42b477 .quad 0x0ae333f685277354 .quad 0xe0f941171e782522 .quad 0xf1e6ae74036936d3 .quad 0x408b3ea2d0fcc746 .quad 0x16fb869c03dd313e .quad 0x288e199733b60962 .quad 0x24fc72b4d8abe133 .quad 0x4811f7ed0991d03e .quad 0x3f81e38b8f70d075 // 2^160 * 7 * G .quad 0x7f910fcc7ed9affe .quad 0x545cb8a12465874b .quad 0xa8397ed24b0c4704 .quad 0x50510fc104f50993 .quad 0x0adb7f355f17c824 .quad 0x74b923c3d74299a4 .quad 0xd57c3e8bcbf8eaf7 .quad 0x0ad3e2d34cdedc3d .quad 0x6f0c0fc5336e249d .quad 0x745ede19c331cfd9 .quad 0xf2d6fd0009eefe1c .quad 0x127c158bf0fa1ebe // 2^160 * 8 * G .quad 0xf6197c422e9879a2 .quad 0xa44addd452ca3647 .quad 0x9b413fc14b4eaccb .quad 0x354ef87d07ef4f68 .quad 0xdea28fc4ae51b974 .quad 0x1d9973d3744dfe96 .quad 0x6240680b873848a8 .quad 0x4ed82479d167df95 .quad 0xfee3b52260c5d975 .quad 0x50352efceb41b0b8 .quad 0x8808ac30a9f6653c .quad 0x302d92d20539236d // 2^164 * 1 * G .quad 0x4c59023fcb3efb7c .quad 0x6c2fcb99c63c2a94 .quad 0xba4190e2c3c7e084 .quad 0x0e545daea51874d9 .quad 0x957b8b8b0df53c30 .quad 0x2a1c770a8e60f098 .quad 0xbbc7a670345796de .quad 0x22a48f9a90c99bc9 .quad 0x6b7dc0dc8d3fac58 .quad 0x5497cd6ce6e42bfd .quad 0x542f7d1bf400d305 .quad 0x4159f47f048d9136 // 2^164 * 2 * G .quad 0x20ad660839e31e32 .quad 0xf81e1bd58405be50 .quad 0xf8064056f4dabc69 .quad 0x14d23dd4ce71b975 .quad 0x748515a8bbd24839 .quad 0x77128347afb02b55 .quad 0x50ba2ac649a2a17f .quad 0x060525513ad730f1 .quad 0xf2398e098aa27f82 .quad 0x6d7982bb89a1b024 .quad 0xfa694084214dd24c .quad 0x71ab966fa32301c3 // 2^164 * 3 * G .quad 0x2dcbd8e34ded02fc .quad 0x1151f3ec596f22aa .quad 0xbca255434e0328da .quad 0x35768fbe92411b22 .quad 0xb1088a0702809955 .quad 0x43b273ea0b43c391 .quad 0xca9b67aefe0686ed .quad 0x605eecbf8335f4ed .quad 0x83200a656c340431 .quad 0x9fcd71678ee59c2f .quad 0x75d4613f71300f8a .quad 0x7a912faf60f542f9 // 2^164 * 4 * G .quad 0xb204585e5edc1a43 .quad 0x9f0e16ee5897c73c .quad 0x5b82c0ae4e70483c .quad 0x624a170e2bddf9be .quad 0x253f4f8dfa2d5597 .quad 0x25e49c405477130c .quad 0x00c052e5996b1102 .quad 0x33cb966e33bb6c4a .quad 0x597028047f116909 .quad 0x828ac41c1e564467 .quad 0x70417dbde6217387 .quad 0x721627aefbac4384 // 2^164 * 5 * G .quad 0x97d03bc38736add5 .quad 0x2f1422afc532b130 .quad 0x3aa68a057101bbc4 .quad 0x4c946cf7e74f9fa7 .quad 0xfd3097bc410b2f22 .quad 0xf1a05da7b5cfa844 .quad 0x61289a1def57ca74 .quad 0x245ea199bb821902 .quad 0xaedca66978d477f8 .quad 0x1898ba3c29117fe1 .quad 0xcf73f983720cbd58 .quad 0x67da12e6b8b56351 // 2^164 * 6 * G .quad 0x7067e187b4bd6e07 .quad 0x6e8f0203c7d1fe74 .quad 0x93c6aa2f38c85a30 .quad 0x76297d1f3d75a78a .quad 0x2b7ef3d38ec8308c .quad 0x828fd7ec71eb94ab .quad 0x807c3b36c5062abd .quad 0x0cb64cb831a94141 .quad 0x3030fc33534c6378 .quad 0xb9635c5ce541e861 .quad 0x15d9a9bed9b2c728 .quad 0x49233ea3f3775dcb // 2^164 * 7 * G .quad 0x629398fa8dbffc3a .quad 0xe12fe52dd54db455 .quad 0xf3be11dfdaf25295 .quad 0x628b140dce5e7b51 .quad 0x7b3985fe1c9f249b .quad 0x4fd6b2d5a1233293 .quad 0xceb345941adf4d62 .quad 0x6987ff6f542de50c .quad 0x47e241428f83753c .quad 0x6317bebc866af997 .quad 0xdabb5b433d1a9829 .quad 0x074d8d245287fb2d // 2^164 * 8 * G .quad 0x8337d9cd440bfc31 .quad 0x729d2ca1af318fd7 .quad 0xa040a4a4772c2070 .quad 0x46002ef03a7349be .quad 0x481875c6c0e31488 .quad 0x219429b2e22034b4 .quad 0x7223c98a31283b65 .quad 0x3420d60b342277f9 .quad 0xfaa23adeaffe65f7 .quad 0x78261ed45be0764c .quad 0x441c0a1e2f164403 .quad 0x5aea8e567a87d395 // 2^168 * 1 * G .quad 0x7813c1a2bca4283d .quad 0xed62f091a1863dd9 .quad 0xaec7bcb8c268fa86 .quad 0x10e5d3b76f1cae4c .quad 0x2dbc6fb6e4e0f177 .quad 0x04e1bf29a4bd6a93 .quad 0x5e1966d4787af6e8 .quad 0x0edc5f5eb426d060 .quad 0x5453bfd653da8e67 .quad 0xe9dc1eec24a9f641 .quad 0xbf87263b03578a23 .quad 0x45b46c51361cba72 // 2^168 * 2 * G .quad 0xa9402abf314f7fa1 .quad 0xe257f1dc8e8cf450 .quad 0x1dbbd54b23a8be84 .quad 0x2177bfa36dcb713b .quad 0xce9d4ddd8a7fe3e4 .quad 0xab13645676620e30 .quad 0x4b594f7bb30e9958 .quad 0x5c1c0aef321229df .quad 0x37081bbcfa79db8f .quad 0x6048811ec25f59b3 .quad 0x087a76659c832487 .quad 0x4ae619387d8ab5bb // 2^168 * 3 * G .quad 0x8ddbf6aa5344a32e .quad 0x7d88eab4b41b4078 .quad 0x5eb0eb974a130d60 .quad 0x1a00d91b17bf3e03 .quad 0x61117e44985bfb83 .quad 0xfce0462a71963136 .quad 0x83ac3448d425904b .quad 0x75685abe5ba43d64 .quad 0x6e960933eb61f2b2 .quad 0x543d0fa8c9ff4952 .quad 0xdf7275107af66569 .quad 0x135529b623b0e6aa // 2^168 * 4 * G .quad 0x18f0dbd7add1d518 .quad 0x979f7888cfc11f11 .quad 0x8732e1f07114759b .quad 0x79b5b81a65ca3a01 .quad 0xf5c716bce22e83fe .quad 0xb42beb19e80985c1 .quad 0xec9da63714254aae .quad 0x5972ea051590a613 .quad 0x0fd4ac20dc8f7811 .quad 0x9a9ad294ac4d4fa8 .quad 0xc01b2d64b3360434 .quad 0x4f7e9c95905f3bdb // 2^168 * 5 * G .quad 0x62674bbc5781302e .quad 0xd8520f3989addc0f .quad 0x8c2999ae53fbd9c6 .quad 0x31993ad92e638e4c .quad 0x71c8443d355299fe .quad 0x8bcd3b1cdbebead7 .quad 0x8092499ef1a49466 .quad 0x1942eec4a144adc8 .quad 0x7dac5319ae234992 .quad 0x2c1b3d910cea3e92 .quad 0x553ce494253c1122 .quad 0x2a0a65314ef9ca75 // 2^168 * 6 * G .quad 0x2db7937ff7f927c2 .quad 0xdb741f0617d0a635 .quad 0x5982f3a21155af76 .quad 0x4cf6e218647c2ded .quad 0xcf361acd3c1c793a .quad 0x2f9ebcac5a35bc3b .quad 0x60e860e9a8cda6ab .quad 0x055dc39b6dea1a13 .quad 0xb119227cc28d5bb6 .quad 0x07e24ebc774dffab .quad 0xa83c78cee4a32c89 .quad 0x121a307710aa24b6 // 2^168 * 7 * G .quad 0xe4db5d5e9f034a97 .quad 0xe153fc093034bc2d .quad 0x460546919551d3b1 .quad 0x333fc76c7a40e52d .quad 0xd659713ec77483c9 .quad 0x88bfe077b82b96af .quad 0x289e28231097bcd3 .quad 0x527bb94a6ced3a9b .quad 0x563d992a995b482e .quad 0x3405d07c6e383801 .quad 0x485035de2f64d8e5 .quad 0x6b89069b20a7a9f7 // 2^168 * 8 * G .quad 0x812aa0416270220d .quad 0x995a89faf9245b4e .quad 0xffadc4ce5072ef05 .quad 0x23bc2103aa73eb73 .quad 0x4082fa8cb5c7db77 .quad 0x068686f8c734c155 .quad 0x29e6c8d9f6e7a57e .quad 0x0473d308a7639bcf .quad 0xcaee792603589e05 .quad 0x2b4b421246dcc492 .quad 0x02a1ef74e601a94f .quad 0x102f73bfde04341a // 2^172 * 1 * G .quad 0xb5a2d50c7ec20d3e .quad 0xc64bdd6ea0c97263 .quad 0x56e89052c1ff734d .quad 0x4929c6f72b2ffaba .quad 0x358ecba293a36247 .quad 0xaf8f9862b268fd65 .quad 0x412f7e9968a01c89 .quad 0x5786f312cd754524 .quad 0x337788ffca14032c .quad 0xf3921028447f1ee3 .quad 0x8b14071f231bccad .quad 0x4c817b4bf2344783 // 2^172 * 2 * G .quad 0x0ff853852871b96e .quad 0xe13e9fab60c3f1bb .quad 0xeefd595325344402 .quad 0x0a37c37075b7744b .quad 0x413ba057a40b4484 .quad 0xba4c2e1a4f5f6a43 .quad 0x614ba0a5aee1d61c .quad 0x78a1531a8b05dc53 .quad 0x6cbdf1703ad0562b .quad 0x8ecf4830c92521a3 .quad 0xdaebd303fd8424e7 .quad 0x72ad82a42e5ec56f // 2^172 * 3 * G .quad 0x3f9e8e35bafb65f6 .quad 0x39d69ec8f27293a1 .quad 0x6cb8cd958cf6a3d0 .quad 0x1734778173adae6d .quad 0xc368939167024bc3 .quad 0x8e69d16d49502fda .quad 0xfcf2ec3ce45f4b29 .quad 0x065f669ea3b4cbc4 .quad 0x8a00aec75532db4d .quad 0xb869a4e443e31bb1 .quad 0x4a0f8552d3a7f515 .quad 0x19adeb7c303d7c08 // 2^172 * 4 * G .quad 0xc720cb6153ead9a3 .quad 0x55b2c97f512b636e .quad 0xb1e35b5fd40290b1 .quad 0x2fd9ccf13b530ee2 .quad 0x9d05ba7d43c31794 .quad 0x2470c8ff93322526 .quad 0x8323dec816197438 .quad 0x2852709881569b53 .quad 0x07bd475b47f796b8 .quad 0xd2c7b013542c8f54 .quad 0x2dbd23f43b24f87e .quad 0x6551afd77b0901d6 // 2^172 * 5 * G .quad 0x4546baaf54aac27f .quad 0xf6f66fecb2a45a28 .quad 0x582d1b5b562bcfe8 .quad 0x44b123f3920f785f .quad 0x68a24ce3a1d5c9ac .quad 0xbb77a33d10ff6461 .quad 0x0f86ce4425d3166e .quad 0x56507c0950b9623b .quad 0x1206f0b7d1713e63 .quad 0x353fe3d915bafc74 .quad 0x194ceb970ad9d94d .quad 0x62fadd7cf9d03ad3 // 2^172 * 6 * G .quad 0xc6b5967b5598a074 .quad 0x5efe91ce8e493e25 .quad 0xd4b72c4549280888 .quad 0x20ef1149a26740c2 .quad 0x3cd7bc61e7ce4594 .quad 0xcd6b35a9b7dd267e .quad 0xa080abc84366ef27 .quad 0x6ec7c46f59c79711 .quad 0x2f07ad636f09a8a2 .quad 0x8697e6ce24205e7d .quad 0xc0aefc05ee35a139 .quad 0x15e80958b5f9d897 // 2^172 * 7 * G .quad 0x25a5ef7d0c3e235b .quad 0x6c39c17fbe134ee7 .quad 0xc774e1342dc5c327 .quad 0x021354b892021f39 .quad 0x4dd1ed355bb061c4 .quad 0x42dc0cef941c0700 .quad 0x61305dc1fd86340e .quad 0x56b2cc930e55a443 .quad 0x1df79da6a6bfc5a2 .quad 0x02f3a2749fde4369 .quad 0xb323d9f2cda390a7 .quad 0x7be0847b8774d363 // 2^172 * 8 * G .quad 0x8c99cc5a8b3f55c3 .quad 0x0611d7253fded2a0 .quad 0xed2995ff36b70a36 .quad 0x1f699a54d78a2619 .quad 0x1466f5af5307fa11 .quad 0x817fcc7ded6c0af2 .quad 0x0a6de44ec3a4a3fb .quad 0x74071475bc927d0b .quad 0xe77292f373e7ea8a .quad 0x296537d2cb045a31 .quad 0x1bd0653ed3274fde .quad 0x2f9a2c4476bd2966 // 2^176 * 1 * G .quad 0xeb18b9ab7f5745c6 .quad 0x023a8aee5787c690 .quad 0xb72712da2df7afa9 .quad 0x36597d25ea5c013d .quad 0xa2b4dae0b5511c9a .quad 0x7ac860292bffff06 .quad 0x981f375df5504234 .quad 0x3f6bd725da4ea12d .quad 0x734d8d7b106058ac .quad 0xd940579e6fc6905f .quad 0x6466f8f99202932d .quad 0x7b7ecc19da60d6d0 // 2^176 * 2 * G .quad 0x78c2373c695c690d .quad 0xdd252e660642906e .quad 0x951d44444ae12bd2 .quad 0x4235ad7601743956 .quad 0x6dae4a51a77cfa9b .quad 0x82263654e7a38650 .quad 0x09bbffcd8f2d82db .quad 0x03bedc661bf5caba .quad 0x6258cb0d078975f5 .quad 0x492942549189f298 .quad 0xa0cab423e2e36ee4 .quad 0x0e7ce2b0cdf066a1 // 2^176 * 3 * G .quad 0xc494643ac48c85a3 .quad 0xfd361df43c6139ad .quad 0x09db17dd3ae94d48 .quad 0x666e0a5d8fb4674a .quad 0xfea6fedfd94b70f9 .quad 0xf130c051c1fcba2d .quad 0x4882d47e7f2fab89 .quad 0x615256138aeceeb5 .quad 0x2abbf64e4870cb0d .quad 0xcd65bcf0aa458b6b .quad 0x9abe4eba75e8985d .quad 0x7f0bc810d514dee4 // 2^176 * 4 * G .quad 0xb9006ba426f4136f .quad 0x8d67369e57e03035 .quad 0xcbc8dfd94f463c28 .quad 0x0d1f8dbcf8eedbf5 .quad 0x83ac9dad737213a0 .quad 0x9ff6f8ba2ef72e98 .quad 0x311e2edd43ec6957 .quad 0x1d3a907ddec5ab75 .quad 0xba1693313ed081dc .quad 0x29329fad851b3480 .quad 0x0128013c030321cb .quad 0x00011b44a31bfde3 // 2^176 * 5 * G .quad 0x3fdfa06c3fc66c0c .quad 0x5d40e38e4dd60dd2 .quad 0x7ae38b38268e4d71 .quad 0x3ac48d916e8357e1 .quad 0x16561f696a0aa75c .quad 0xc1bf725c5852bd6a .quad 0x11a8dd7f9a7966ad .quad 0x63d988a2d2851026 .quad 0x00120753afbd232e .quad 0xe92bceb8fdd8f683 .quad 0xf81669b384e72b91 .quad 0x33fad52b2368a066 // 2^176 * 6 * G .quad 0x540649c6c5e41e16 .quad 0x0af86430333f7735 .quad 0xb2acfcd2f305e746 .quad 0x16c0f429a256dca7 .quad 0x8d2cc8d0c422cfe8 .quad 0x072b4f7b05a13acb .quad 0xa3feb6e6ecf6a56f .quad 0x3cc355ccb90a71e2 .quad 0xe9b69443903e9131 .quad 0xb8a494cb7a5637ce .quad 0xc87cd1a4baba9244 .quad 0x631eaf426bae7568 // 2^176 * 7 * G .quad 0xb3e90410da66fe9f .quad 0x85dd4b526c16e5a6 .quad 0xbc3d97611ef9bf83 .quad 0x5599648b1ea919b5 .quad 0x47d975b9a3700de8 .quad 0x7280c5fbe2f80552 .quad 0x53658f2732e45de1 .quad 0x431f2c7f665f80b5 .quad 0xd6026344858f7b19 .quad 0x14ab352fa1ea514a .quad 0x8900441a2090a9d7 .quad 0x7b04715f91253b26 // 2^176 * 8 * G .quad 0x83edbd28acf6ae43 .quad 0x86357c8b7d5c7ab4 .quad 0xc0404769b7eb2c44 .quad 0x59b37bf5c2f6583f .quad 0xb376c280c4e6bac6 .quad 0x970ed3dd6d1d9b0b .quad 0xb09a9558450bf944 .quad 0x48d0acfa57cde223 .quad 0xb60f26e47dabe671 .quad 0xf1d1a197622f3a37 .quad 0x4208ce7ee9960394 .quad 0x16234191336d3bdb // 2^180 * 1 * G .quad 0xf19aeac733a63aef .quad 0x2c7fba5d4442454e .quad 0x5da87aa04795e441 .quad 0x413051e1a4e0b0f5 .quad 0x852dd1fd3d578bbe .quad 0x2b65ce72c3286108 .quad 0x658c07f4eace2273 .quad 0x0933f804ec38ab40 .quad 0xa7ab69798d496476 .quad 0x8121aadefcb5abc8 .quad 0xa5dc12ef7b539472 .quad 0x07fd47065e45351a // 2^180 * 2 * G .quad 0xc8583c3d258d2bcd .quad 0x17029a4daf60b73f .quad 0xfa0fc9d6416a3781 .quad 0x1c1e5fba38b3fb23 .quad 0x304211559ae8e7c3 .quad 0xf281b229944882a5 .quad 0x8a13ac2e378250e4 .quad 0x014afa0954ba48f4 .quad 0xcb3197001bb3666c .quad 0x330060524bffecb9 .quad 0x293711991a88233c .quad 0x291884363d4ed364 // 2^180 * 3 * G .quad 0x033c6805dc4babfa .quad 0x2c15bf5e5596ecc1 .quad 0x1bc70624b59b1d3b .quad 0x3ede9850a19f0ec5 .quad 0xfb9d37c3bc1ab6eb .quad 0x02be14534d57a240 .quad 0xf4d73415f8a5e1f6 .quad 0x5964f4300ccc8188 .quad 0xe44a23152d096800 .quad 0x5c08c55970866996 .quad 0xdf2db60a46affb6e .quad 0x579155c1f856fd89 // 2^180 * 4 * G .quad 0x96324edd12e0c9ef .quad 0x468b878df2420297 .quad 0x199a3776a4f573be .quad 0x1e7fbcf18e91e92a .quad 0xb5f16b630817e7a6 .quad 0x808c69233c351026 .quad 0x324a983b54cef201 .quad 0x53c092084a485345 .quad 0xd2d41481f1cbafbf .quad 0x231d2db6716174e5 .quad 0x0b7d7656e2a55c98 .quad 0x3e955cd82aa495f6 // 2^180 * 5 * G .quad 0xe48f535e3ed15433 .quad 0xd075692a0d7270a3 .quad 0x40fbd21daade6387 .quad 0x14264887cf4495f5 .quad 0xab39f3ef61bb3a3f .quad 0x8eb400652eb9193e .quad 0xb5de6ecc38c11f74 .quad 0x654d7e9626f3c49f .quad 0xe564cfdd5c7d2ceb .quad 0x82eeafded737ccb9 .quad 0x6107db62d1f9b0ab .quad 0x0b6baac3b4358dbb // 2^180 * 6 * G .quad 0x7ae62bcb8622fe98 .quad 0x47762256ceb891af .quad 0x1a5a92bcf2e406b4 .quad 0x7d29401784e41501 .quad 0x204abad63700a93b .quad 0xbe0023d3da779373 .quad 0xd85f0346633ab709 .quad 0x00496dc490820412 .quad 0x1c74b88dc27e6360 .quad 0x074854268d14850c .quad 0xa145fb7b3e0dcb30 .quad 0x10843f1b43803b23 // 2^180 * 7 * G .quad 0xc5f90455376276dd .quad 0xce59158dd7645cd9 .quad 0x92f65d511d366b39 .quad 0x11574b6e526996c4 .quad 0xd56f672de324689b .quad 0xd1da8aedb394a981 .quad 0xdd7b58fe9168cfed .quad 0x7ce246cd4d56c1e8 .quad 0xb8f4308e7f80be53 .quad 0x5f3cb8cb34a9d397 .quad 0x18a961bd33cc2b2c .quad 0x710045fb3a9af671 // 2^180 * 8 * G .quad 0x73f93d36101b95eb .quad 0xfaef33794f6f4486 .quad 0x5651735f8f15e562 .quad 0x7fa3f19058b40da1 .quad 0xa03fc862059d699e .quad 0x2370cfa19a619e69 .quad 0xc4fe3b122f823deb .quad 0x1d1b056fa7f0844e .quad 0x1bc64631e56bf61f .quad 0xd379ab106e5382a3 .quad 0x4d58c57e0540168d .quad 0x566256628442d8e4 // 2^184 * 1 * G .quad 0xb9e499def6267ff6 .quad 0x7772ca7b742c0843 .quad 0x23a0153fe9a4f2b1 .quad 0x2cdfdfecd5d05006 .quad 0xdd499cd61ff38640 .quad 0x29cd9bc3063625a0 .quad 0x51e2d8023dd73dc3 .quad 0x4a25707a203b9231 .quad 0x2ab7668a53f6ed6a .quad 0x304242581dd170a1 .quad 0x4000144c3ae20161 .quad 0x5721896d248e49fc // 2^184 * 2 * G .quad 0x0b6e5517fd181bae .quad 0x9022629f2bb963b4 .quad 0x5509bce932064625 .quad 0x578edd74f63c13da .quad 0x285d5091a1d0da4e .quad 0x4baa6fa7b5fe3e08 .quad 0x63e5177ce19393b3 .quad 0x03c935afc4b030fd .quad 0x997276c6492b0c3d .quad 0x47ccc2c4dfe205fc .quad 0xdcd29b84dd623a3c .quad 0x3ec2ab590288c7a2 // 2^184 * 3 * G .quad 0xa1a0d27be4d87bb9 .quad 0xa98b4deb61391aed .quad 0x99a0ddd073cb9b83 .quad 0x2dd5c25a200fcace .quad 0xa7213a09ae32d1cb .quad 0x0f2b87df40f5c2d5 .quad 0x0baea4c6e81eab29 .quad 0x0e1bf66c6adbac5e .quad 0xe2abd5e9792c887e .quad 0x1a020018cb926d5d .quad 0xbfba69cdbaae5f1e .quad 0x730548b35ae88f5f // 2^184 * 4 * G .quad 0xc43551a3cba8b8ee .quad 0x65a26f1db2115f16 .quad 0x760f4f52ab8c3850 .quad 0x3043443b411db8ca .quad 0x805b094ba1d6e334 .quad 0xbf3ef17709353f19 .quad 0x423f06cb0622702b .quad 0x585a2277d87845dd .quad 0xa18a5f8233d48962 .quad 0x6698c4b5ec78257f .quad 0xa78e6fa5373e41ff .quad 0x7656278950ef981f // 2^184 * 5 * G .quad 0x38c3cf59d51fc8c0 .quad 0x9bedd2fd0506b6f2 .quad 0x26bf109fab570e8f .quad 0x3f4160a8c1b846a6 .quad 0xe17073a3ea86cf9d .quad 0x3a8cfbb707155fdc .quad 0x4853e7fc31838a8e .quad 0x28bbf484b613f616 .quad 0xf2612f5c6f136c7c .quad 0xafead107f6dd11be .quad 0x527e9ad213de6f33 .quad 0x1e79cb358188f75d // 2^184 * 6 * G .quad 0x013436c3eef7e3f1 .quad 0x828b6a7ffe9e10f8 .quad 0x7ff908e5bcf9defc .quad 0x65d7951b3a3b3831 .quad 0x77e953d8f5e08181 .quad 0x84a50c44299dded9 .quad 0xdc6c2d0c864525e5 .quad 0x478ab52d39d1f2f4 .quad 0x66a6a4d39252d159 .quad 0xe5dde1bc871ac807 .quad 0xb82c6b40a6c1c96f .quad 0x16d87a411a212214 // 2^184 * 7 * G .quad 0xb3bd7e5a42066215 .quad 0x879be3cd0c5a24c1 .quad 0x57c05db1d6f994b7 .quad 0x28f87c8165f38ca6 .quad 0xfba4d5e2d54e0583 .quad 0xe21fafd72ebd99fa .quad 0x497ac2736ee9778f .quad 0x1f990b577a5a6dde .quad 0xa3344ead1be8f7d6 .quad 0x7d1e50ebacea798f .quad 0x77c6569e520de052 .quad 0x45882fe1534d6d3e // 2^184 * 8 * G .quad 0x6669345d757983d6 .quad 0x62b6ed1117aa11a6 .quad 0x7ddd1857985e128f .quad 0x688fe5b8f626f6dd .quad 0xd8ac9929943c6fe4 .quad 0xb5f9f161a38392a2 .quad 0x2699db13bec89af3 .quad 0x7dcf843ce405f074 .quad 0x6c90d6484a4732c0 .quad 0xd52143fdca563299 .quad 0xb3be28c3915dc6e1 .quad 0x6739687e7327191b // 2^188 * 1 * G .quad 0x9f65c5ea200814cf .quad 0x840536e169a31740 .quad 0x8b0ed13925c8b4ad .quad 0x0080dbafe936361d .quad 0x8ce5aad0c9cb971f .quad 0x1156aaa99fd54a29 .quad 0x41f7247015af9b78 .quad 0x1fe8cca8420f49aa .quad 0x72a1848f3c0cc82a .quad 0x38c560c2877c9e54 .quad 0x5004e228ce554140 .quad 0x042418a103429d71 // 2^188 * 2 * G .quad 0x899dea51abf3ff5f .quad 0x9b93a8672fc2d8ba .quad 0x2c38cb97be6ebd5c .quad 0x114d578497263b5d .quad 0x58e84c6f20816247 .quad 0x8db2b2b6e36fd793 .quad 0x977182561d484d85 .quad 0x0822024f8632abd7 .quad 0xb301bb7c6b1beca3 .quad 0x55393f6dc6eb1375 .quad 0x910d281097b6e4eb .quad 0x1ad4548d9d479ea3 // 2^188 * 3 * G .quad 0xcd5a7da0389a48fd .quad 0xb38fa4aa9a78371e .quad 0xc6d9761b2cdb8e6c .quad 0x35cf51dbc97e1443 .quad 0xa06fe66d0fe9fed3 .quad 0xa8733a401c587909 .quad 0x30d14d800df98953 .quad 0x41ce5876c7b30258 .quad 0x59ac3bc5d670c022 .quad 0xeae67c109b119406 .quad 0x9798bdf0b3782fda .quad 0x651e3201fd074092 // 2^188 * 4 * G .quad 0xd63d8483ef30c5cf .quad 0x4cd4b4962361cc0c .quad 0xee90e500a48426ac .quad 0x0af51d7d18c14eeb .quad 0xa57ba4a01efcae9e .quad 0x769f4beedc308a94 .quad 0xd1f10eeb3603cb2e .quad 0x4099ce5e7e441278 .quad 0x1ac98e4f8a5121e9 .quad 0x7dae9544dbfa2fe0 .quad 0x8320aa0dd6430df9 .quad 0x667282652c4a2fb5 // 2^188 * 5 * G .quad 0x874621f4d86bc9ab .quad 0xb54c7bbe56fe6fea .quad 0x077a24257fadc22c .quad 0x1ab53be419b90d39 .quad 0xada8b6e02946db23 .quad 0x1c0ce51a7b253ab7 .quad 0x8448c85a66dd485b .quad 0x7f1fc025d0675adf .quad 0xd8ee1b18319ea6aa .quad 0x004d88083a21f0da .quad 0x3bd6aa1d883a4f4b .quad 0x4db9a3a6dfd9fd14 // 2^188 * 6 * G .quad 0x8ce7b23bb99c0755 .quad 0x35c5d6edc4f50f7a .quad 0x7e1e2ed2ed9b50c3 .quad 0x36305f16e8934da1 .quad 0xd95b00bbcbb77c68 .quad 0xddbc846a91f17849 .quad 0x7cf700aebe28d9b3 .quad 0x5ce1285c85d31f3e .quad 0x31b6972d98b0bde8 .quad 0x7d920706aca6de5b .quad 0xe67310f8908a659f .quad 0x50fac2a6efdf0235 // 2^188 * 7 * G .quad 0xf3d3a9f35b880f5a .quad 0xedec050cdb03e7c2 .quad 0xa896981ff9f0b1a2 .quad 0x49a4ae2bac5e34a4 .quad 0x295b1c86f6f449bc .quad 0x51b2e84a1f0ab4dd .quad 0xc001cb30aa8e551d .quad 0x6a28d35944f43662 .quad 0x28bb12ee04a740e0 .quad 0x14313bbd9bce8174 .quad 0x72f5b5e4e8c10c40 .quad 0x7cbfb19936adcd5b // 2^188 * 8 * G .quad 0xa311ddc26b89792d .quad 0x1b30b4c6da512664 .quad 0x0ca77b4ccf150859 .quad 0x1de443df1b009408 .quad 0x8e793a7acc36e6e0 .quad 0xf9fab7a37d586eed .quad 0x3a4f9692bae1f4e4 .quad 0x1c14b03eff5f447e .quad 0x19647bd114a85291 .quad 0x57b76cb21034d3af .quad 0x6329db440f9d6dfa .quad 0x5ef43e586a571493 // 2^192 * 1 * G .quad 0xef782014385675a6 .quad 0xa2649f30aafda9e8 .quad 0x4cd1eb505cdfa8cb .quad 0x46115aba1d4dc0b3 .quad 0xa66dcc9dc80c1ac0 .quad 0x97a05cf41b38a436 .quad 0xa7ebf3be95dbd7c6 .quad 0x7da0b8f68d7e7dab .quad 0xd40f1953c3b5da76 .quad 0x1dac6f7321119e9b .quad 0x03cc6021feb25960 .quad 0x5a5f887e83674b4b // 2^192 * 2 * G .quad 0x8f6301cf70a13d11 .quad 0xcfceb815350dd0c4 .quad 0xf70297d4a4bca47e .quad 0x3669b656e44d1434 .quad 0x9e9628d3a0a643b9 .quad 0xb5c3cb00e6c32064 .quad 0x9b5302897c2dec32 .quad 0x43e37ae2d5d1c70c .quad 0x387e3f06eda6e133 .quad 0x67301d5199a13ac0 .quad 0xbd5ad8f836263811 .quad 0x6a21e6cd4fd5e9be // 2^192 * 3 * G .quad 0xf1c6170a3046e65f .quad 0x58712a2a00d23524 .quad 0x69dbbd3c8c82b755 .quad 0x586bf9f1a195ff57 .quad 0xef4129126699b2e3 .quad 0x71d30847708d1301 .quad 0x325432d01182b0bd .quad 0x45371b07001e8b36 .quad 0xa6db088d5ef8790b .quad 0x5278f0dc610937e5 .quad 0xac0349d261a16eb8 .quad 0x0eafb03790e52179 // 2^192 * 4 * G .quad 0x960555c13748042f .quad 0x219a41e6820baa11 .quad 0x1c81f73873486d0c .quad 0x309acc675a02c661 .quad 0x5140805e0f75ae1d .quad 0xec02fbe32662cc30 .quad 0x2cebdf1eea92396d .quad 0x44ae3344c5435bb3 .quad 0x9cf289b9bba543ee .quad 0xf3760e9d5ac97142 .quad 0x1d82e5c64f9360aa .quad 0x62d5221b7f94678f // 2^192 * 5 * G .quad 0x524c299c18d0936d .quad 0xc86bb56c8a0c1a0c .quad 0xa375052edb4a8631 .quad 0x5c0efde4bc754562 .quad 0x7585d4263af77a3c .quad 0xdfae7b11fee9144d .quad 0xa506708059f7193d .quad 0x14f29a5383922037 .quad 0xdf717edc25b2d7f5 .quad 0x21f970db99b53040 .quad 0xda9234b7c3ed4c62 .quad 0x5e72365c7bee093e // 2^192 * 6 * G .quad 0x575bfc074571217f .quad 0x3779675d0694d95b .quad 0x9a0a37bbf4191e33 .quad 0x77f1104c47b4eabc .quad 0x7d9339062f08b33e .quad 0x5b9659e5df9f32be .quad 0xacff3dad1f9ebdfd .quad 0x70b20555cb7349b7 .quad 0xbe5113c555112c4c .quad 0x6688423a9a881fcd .quad 0x446677855e503b47 .quad 0x0e34398f4a06404a // 2^192 * 7 * G .quad 0xb67d22d93ecebde8 .quad 0x09b3e84127822f07 .quad 0x743fa61fb05b6d8d .quad 0x5e5405368a362372 .quad 0x18930b093e4b1928 .quad 0x7de3e10e73f3f640 .quad 0xf43217da73395d6f .quad 0x6f8aded6ca379c3e .quad 0xe340123dfdb7b29a .quad 0x487b97e1a21ab291 .quad 0xf9967d02fde6949e .quad 0x780de72ec8d3de97 // 2^192 * 8 * G .quad 0x0ae28545089ae7bc .quad 0x388ddecf1c7f4d06 .quad 0x38ac15510a4811b8 .quad 0x0eb28bf671928ce4 .quad 0x671feaf300f42772 .quad 0x8f72eb2a2a8c41aa .quad 0x29a17fd797373292 .quad 0x1defc6ad32b587a6 .quad 0xaf5bbe1aef5195a7 .quad 0x148c1277917b15ed .quad 0x2991f7fb7ae5da2e .quad 0x467d201bf8dd2867 // 2^196 * 1 * G .quad 0x7906ee72f7bd2e6b .quad 0x05d270d6109abf4e .quad 0x8d5cfe45b941a8a4 .quad 0x44c218671c974287 .quad 0x745f9d56296bc318 .quad 0x993580d4d8152e65 .quad 0xb0e5b13f5839e9ce .quad 0x51fc2b28d43921c0 .quad 0x1b8fd11795e2a98c .quad 0x1c4e5ee12b6b6291 .quad 0x5b30e7107424b572 .quad 0x6e6b9de84c4f4ac6 // 2^196 * 2 * G .quad 0xdff25fce4b1de151 .quad 0xd841c0c7e11c4025 .quad 0x2554b3c854749c87 .quad 0x2d292459908e0df9 .quad 0x6b7c5f10f80cb088 .quad 0x736b54dc56e42151 .quad 0xc2b620a5c6ef99c4 .quad 0x5f4c802cc3a06f42 .quad 0x9b65c8f17d0752da .quad 0x881ce338c77ee800 .quad 0xc3b514f05b62f9e3 .quad 0x66ed5dd5bec10d48 // 2^196 * 3 * G .quad 0x7d38a1c20bb2089d .quad 0x808334e196ccd412 .quad 0xc4a70b8c6c97d313 .quad 0x2eacf8bc03007f20 .quad 0xf0adf3c9cbca047d .quad 0x81c3b2cbf4552f6b .quad 0xcfda112d44735f93 .quad 0x1f23a0c77e20048c .quad 0xf235467be5bc1570 .quad 0x03d2d9020dbab38c .quad 0x27529aa2fcf9e09e .quad 0x0840bef29d34bc50 // 2^196 * 4 * G .quad 0x796dfb35dc10b287 .quad 0x27176bcd5c7ff29d .quad 0x7f3d43e8c7b24905 .quad 0x0304f5a191c54276 .quad 0xcd54e06b7f37e4eb .quad 0x8cc15f87f5e96cca .quad 0xb8248bb0d3597dce .quad 0x246affa06074400c .quad 0x37d88e68fbe45321 .quad 0x86097548c0d75032 .quad 0x4e9b13ef894a0d35 .quad 0x25a83cac5753d325 // 2^196 * 5 * G .quad 0x10222f48eed8165e .quad 0x623fc1234b8bcf3a .quad 0x1e145c09c221e8f0 .quad 0x7ccfa59fca782630 .quad 0x9f0f66293952b6e2 .quad 0x33db5e0e0934267b .quad 0xff45252bd609fedc .quad 0x06be10f5c506e0c9 .quad 0x1a9615a9b62a345f .quad 0x22050c564a52fecc .quad 0xa7a2788528bc0dfe .quad 0x5e82770a1a1ee71d // 2^196 * 6 * G .quad 0x35425183ad896a5c .quad 0xe8673afbe78d52f6 .quad 0x2c66f25f92a35f64 .quad 0x09d04f3b3b86b102 .quad 0xe802e80a42339c74 .quad 0x34175166a7fffae5 .quad 0x34865d1f1c408cae .quad 0x2cca982c605bc5ee .quad 0xfd2d5d35197dbe6e .quad 0x207c2eea8be4ffa3 .quad 0x2613d8db325ae918 .quad 0x7a325d1727741d3e // 2^196 * 7 * G .quad 0xd036b9bbd16dfde2 .quad 0xa2055757c497a829 .quad 0x8e6cc966a7f12667 .quad 0x4d3b1a791239c180 .quad 0xecd27d017e2a076a .quad 0xd788689f1636495e .quad 0x52a61af0919233e5 .quad 0x2a479df17bb1ae64 .quad 0x9e5eee8e33db2710 .quad 0x189854ded6c43ca5 .quad 0xa41c22c592718138 .quad 0x27ad5538a43a5e9b // 2^196 * 8 * G .quad 0x2746dd4b15350d61 .quad 0xd03fcbc8ee9521b7 .quad 0xe86e365a138672ca .quad 0x510e987f7e7d89e2 .quad 0xcb5a7d638e47077c .quad 0x8db7536120a1c059 .quad 0x549e1e4d8bedfdcc .quad 0x080153b7503b179d .quad 0xdda69d930a3ed3e3 .quad 0x3d386ef1cd60a722 .quad 0xc817ad58bdaa4ee6 .quad 0x23be8d554fe7372a // 2^200 * 1 * G .quad 0x95fe919a74ef4fad .quad 0x3a827becf6a308a2 .quad 0x964e01d309a47b01 .quad 0x71c43c4f5ba3c797 .quad 0xbc1ef4bd567ae7a9 .quad 0x3f624cb2d64498bd .quad 0xe41064d22c1f4ec8 .quad 0x2ef9c5a5ba384001 .quad 0xb6fd6df6fa9e74cd .quad 0xf18278bce4af267a .quad 0x8255b3d0f1ef990e .quad 0x5a758ca390c5f293 // 2^200 * 2 * G .quad 0xa2b72710d9462495 .quad 0x3aa8c6d2d57d5003 .quad 0xe3d400bfa0b487ca .quad 0x2dbae244b3eb72ec .quad 0x8ce0918b1d61dc94 .quad 0x8ded36469a813066 .quad 0xd4e6a829afe8aad3 .quad 0x0a738027f639d43f .quad 0x980f4a2f57ffe1cc .quad 0x00670d0de1839843 .quad 0x105c3f4a49fb15fd .quad 0x2698ca635126a69c // 2^200 * 3 * G .quad 0xe765318832b0ba78 .quad 0x381831f7925cff8b .quad 0x08a81b91a0291fcc .quad 0x1fb43dcc49caeb07 .quad 0x2e3d702f5e3dd90e .quad 0x9e3f0918e4d25386 .quad 0x5e773ef6024da96a .quad 0x3c004b0c4afa3332 .quad 0x9aa946ac06f4b82b .quad 0x1ca284a5a806c4f3 .quad 0x3ed3265fc6cd4787 .quad 0x6b43fd01cd1fd217 // 2^200 * 4 * G .quad 0xc7a75d4b4697c544 .quad 0x15fdf848df0fffbf .quad 0x2868b9ebaa46785a .quad 0x5a68d7105b52f714 .quad 0xb5c742583e760ef3 .quad 0x75dc52b9ee0ab990 .quad 0xbf1427c2072b923f .quad 0x73420b2d6ff0d9f0 .quad 0xaf2cf6cb9e851e06 .quad 0x8f593913c62238c4 .quad 0xda8ab89699fbf373 .quad 0x3db5632fea34bc9e // 2^200 * 5 * G .quad 0xf46eee2bf75dd9d8 .quad 0x0d17b1f6396759a5 .quad 0x1bf2d131499e7273 .quad 0x04321adf49d75f13 .quad 0x2e4990b1829825d5 .quad 0xedeaeb873e9a8991 .quad 0xeef03d394c704af8 .quad 0x59197ea495df2b0e .quad 0x04e16019e4e55aae .quad 0xe77b437a7e2f92e9 .quad 0xc7ce2dc16f159aa4 .quad 0x45eafdc1f4d70cc0 // 2^200 * 6 * G .quad 0x698401858045d72b .quad 0x4c22faa2cf2f0651 .quad 0x941a36656b222dc6 .quad 0x5a5eebc80362dade .quad 0xb60e4624cfccb1ed .quad 0x59dbc292bd5c0395 .quad 0x31a09d1ddc0481c9 .quad 0x3f73ceea5d56d940 .quad 0xb7a7bfd10a4e8dc6 .quad 0xbe57007e44c9b339 .quad 0x60c1207f1557aefa .quad 0x26058891266218db // 2^200 * 7 * G .quad 0x59f704a68360ff04 .quad 0xc3d93fde7661e6f4 .quad 0x831b2a7312873551 .quad 0x54ad0c2e4e615d57 .quad 0x4c818e3cc676e542 .quad 0x5e422c9303ceccad .quad 0xec07cccab4129f08 .quad 0x0dedfa10b24443b8 .quad 0xee3b67d5b82b522a .quad 0x36f163469fa5c1eb .quad 0xa5b4d2f26ec19fd3 .quad 0x62ecb2baa77a9408 // 2^200 * 8 * G .quad 0xe5ed795261152b3d .quad 0x4962357d0eddd7d1 .quad 0x7482c8d0b96b4c71 .quad 0x2e59f919a966d8be .quad 0x92072836afb62874 .quad 0x5fcd5e8579e104a5 .quad 0x5aad01adc630a14a .quad 0x61913d5075663f98 .quad 0x0dc62d361a3231da .quad 0xfa47583294200270 .quad 0x02d801513f9594ce .quad 0x3ddbc2a131c05d5c // 2^204 * 1 * G .quad 0x3f50a50a4ffb81ef .quad 0xb1e035093bf420bf .quad 0x9baa8e1cc6aa2cd0 .quad 0x32239861fa237a40 .quad 0xfb735ac2004a35d1 .quad 0x31de0f433a6607c3 .quad 0x7b8591bfc528d599 .quad 0x55be9a25f5bb050c .quad 0x0d005acd33db3dbf .quad 0x0111b37c80ac35e2 .quad 0x4892d66c6f88ebeb .quad 0x770eadb16508fbcd // 2^204 * 2 * G .quad 0x8451f9e05e4e89dd .quad 0xc06302ffbc793937 .quad 0x5d22749556a6495c .quad 0x09a6755ca05603fb .quad 0xf1d3b681a05071b9 .quad 0x2207659a3592ff3a .quad 0x5f0169297881e40e .quad 0x16bedd0e86ba374e .quad 0x5ecccc4f2c2737b5 .quad 0x43b79e0c2dccb703 .quad 0x33e008bc4ec43df3 .quad 0x06c1b840f07566c0 // 2^204 * 3 * G .quad 0x7688a5c6a388f877 .quad 0x02a96c14deb2b6ac .quad 0x64c9f3431b8c2af8 .quad 0x3628435554a1eed6 .quad 0x69ee9e7f9b02805c .quad 0xcbff828a547d1640 .quad 0x3d93a869b2430968 .quad 0x46b7b8cd3fe26972 .quad 0xe9812086fe7eebe0 .quad 0x4cba6be72f515437 .quad 0x1d04168b516efae9 .quad 0x5ea1391043982cb9 // 2^204 * 4 * G .quad 0x49125c9cf4702ee1 .quad 0x4520b71f8b25b32d .quad 0x33193026501fef7e .quad 0x656d8997c8d2eb2b .quad 0x6f2b3be4d5d3b002 .quad 0xafec33d96a09c880 .quad 0x035f73a4a8bcc4cc .quad 0x22c5b9284662198b .quad 0xcb58c8fe433d8939 .quad 0x89a0cb2e6a8d7e50 .quad 0x79ca955309fbbe5a .quad 0x0c626616cd7fc106 // 2^204 * 5 * G .quad 0x1ffeb80a4879b61f .quad 0x6396726e4ada21ed .quad 0x33c7b093368025ba .quad 0x471aa0c6f3c31788 .quad 0x8fdfc379fbf454b1 .quad 0x45a5a970f1a4b771 .quad 0xac921ef7bad35915 .quad 0x42d088dca81c2192 .quad 0x8fda0f37a0165199 .quad 0x0adadb77c8a0e343 .quad 0x20fbfdfcc875e820 .quad 0x1cf2bea80c2206e7 // 2^204 * 6 * G .quad 0xc2ddf1deb36202ac .quad 0x92a5fe09d2e27aa5 .quad 0x7d1648f6fc09f1d3 .quad 0x74c2cc0513bc4959 .quad 0x982d6e1a02c0412f .quad 0x90fa4c83db58e8fe .quad 0x01c2f5bcdcb18bc0 .quad 0x686e0c90216abc66 .quad 0x1fadbadba54395a7 .quad 0xb41a02a0ae0da66a .quad 0xbf19f598bba37c07 .quad 0x6a12b8acde48430d // 2^204 * 7 * G .quad 0xf8daea1f39d495d9 .quad 0x592c190e525f1dfc .quad 0xdb8cbd04c9991d1b .quad 0x11f7fda3d88f0cb7 .quad 0x793bdd801aaeeb5f .quad 0x00a2a0aac1518871 .quad 0xe8a373a31f2136b4 .quad 0x48aab888fc91ef19 .quad 0x041f7e925830f40e .quad 0x002d6ca979661c06 .quad 0x86dc9ff92b046a2e .quad 0x760360928b0493d1 // 2^204 * 8 * G .quad 0x21bb41c6120cf9c6 .quad 0xeab2aa12decda59b .quad 0xc1a72d020aa48b34 .quad 0x215d4d27e87d3b68 .quad 0xb43108e5695a0b05 .quad 0x6cb00ee8ad37a38b .quad 0x5edad6eea3537381 .quad 0x3f2602d4b6dc3224 .quad 0xc8b247b65bcaf19c .quad 0x49779dc3b1b2c652 .quad 0x89a180bbd5ece2e2 .quad 0x13f098a3cec8e039 // 2^208 * 1 * G .quad 0x9adc0ff9ce5ec54b .quad 0x039c2a6b8c2f130d .quad 0x028007c7f0f89515 .quad 0x78968314ac04b36b .quad 0xf3aa57a22796bb14 .quad 0x883abab79b07da21 .quad 0xe54be21831a0391c .quad 0x5ee7fb38d83205f9 .quad 0x538dfdcb41446a8e .quad 0xa5acfda9434937f9 .quad 0x46af908d263c8c78 .quad 0x61d0633c9bca0d09 // 2^208 * 2 * G .quad 0x63744935ffdb2566 .quad 0xc5bd6b89780b68bb .quad 0x6f1b3280553eec03 .quad 0x6e965fd847aed7f5 .quad 0xada328bcf8fc73df .quad 0xee84695da6f037fc .quad 0x637fb4db38c2a909 .quad 0x5b23ac2df8067bdc .quad 0x9ad2b953ee80527b .quad 0xe88f19aafade6d8d .quad 0x0e711704150e82cf .quad 0x79b9bbb9dd95dedc // 2^208 * 3 * G .quad 0xebb355406a3126c2 .quad 0xd26383a868c8c393 .quad 0x6c0c6429e5b97a82 .quad 0x5065f158c9fd2147 .quad 0xd1997dae8e9f7374 .quad 0xa032a2f8cfbb0816 .quad 0xcd6cba126d445f0a .quad 0x1ba811460accb834 .quad 0x708169fb0c429954 .quad 0xe14600acd76ecf67 .quad 0x2eaab98a70e645ba .quad 0x3981f39e58a4faf2 // 2^208 * 4 * G .quad 0x18fb8a7559230a93 .quad 0x1d168f6960e6f45d .quad 0x3a85a94514a93cb5 .quad 0x38dc083705acd0fd .quad 0xc845dfa56de66fde .quad 0xe152a5002c40483a .quad 0xe9d2e163c7b4f632 .quad 0x30f4452edcbc1b65 .quad 0x856d2782c5759740 .quad 0xfa134569f99cbecc .quad 0x8844fc73c0ea4e71 .quad 0x632d9a1a593f2469 // 2^208 * 5 * G .quad 0xf6bb6b15b807cba6 .quad 0x1823c7dfbc54f0d7 .quad 0xbb1d97036e29670b .quad 0x0b24f48847ed4a57 .quad 0xbf09fd11ed0c84a7 .quad 0x63f071810d9f693a .quad 0x21908c2d57cf8779 .quad 0x3a5a7df28af64ba2 .quad 0xdcdad4be511beac7 .quad 0xa4538075ed26ccf2 .quad 0xe19cff9f005f9a65 .quad 0x34fcf74475481f63 // 2^208 * 6 * G .quad 0xc197e04c789767ca .quad 0xb8714dcb38d9467d .quad 0x55de888283f95fa8 .quad 0x3d3bdc164dfa63f7 .quad 0xa5bb1dab78cfaa98 .quad 0x5ceda267190b72f2 .quad 0x9309c9110a92608e .quad 0x0119a3042fb374b0 .quad 0x67a2d89ce8c2177d .quad 0x669da5f66895d0c1 .quad 0xf56598e5b282a2b0 .quad 0x56c088f1ede20a73 // 2^208 * 7 * G .quad 0x336d3d1110a86e17 .quad 0xd7f388320b75b2fa .quad 0xf915337625072988 .quad 0x09674c6b99108b87 .quad 0x581b5fac24f38f02 .quad 0xa90be9febae30cbd .quad 0x9a2169028acf92f0 .quad 0x038b7ea48359038f .quad 0x9f4ef82199316ff8 .quad 0x2f49d282eaa78d4f .quad 0x0971a5ab5aef3174 .quad 0x6e5e31025969eb65 // 2^208 * 8 * G .quad 0xb16c62f587e593fb .quad 0x4999eddeca5d3e71 .quad 0xb491c1e014cc3e6d .quad 0x08f5114789a8dba8 .quad 0x3304fb0e63066222 .quad 0xfb35068987acba3f .quad 0xbd1924778c1061a3 .quad 0x3058ad43d1838620 .quad 0x323c0ffde57663d0 .quad 0x05c3df38a22ea610 .quad 0xbdc78abdac994f9a .quad 0x26549fa4efe3dc99 // 2^212 * 1 * G .quad 0x738b38d787ce8f89 .quad 0xb62658e24179a88d .quad 0x30738c9cf151316d .quad 0x49128c7f727275c9 .quad 0x04dbbc17f75396b9 .quad 0x69e6a2d7d2f86746 .quad 0xc6409d99f53eabc6 .quad 0x606175f6332e25d2 .quad 0x4021370ef540e7dd .quad 0x0910d6f5a1f1d0a5 .quad 0x4634aacd5b06b807 .quad 0x6a39e6356944f235 // 2^212 * 2 * G .quad 0x96cd5640df90f3e7 .quad 0x6c3a760edbfa25ea .quad 0x24f3ef0959e33cc4 .quad 0x42889e7e530d2e58 .quad 0x1da1965774049e9d .quad 0xfbcd6ea198fe352b .quad 0xb1cbcd50cc5236a6 .quad 0x1f5ec83d3f9846e2 .quad 0x8efb23c3328ccb75 .quad 0xaf42a207dd876ee9 .quad 0x20fbdadc5dfae796 .quad 0x241e246b06bf9f51 // 2^212 * 3 * G .quad 0x29e68e57ad6e98f6 .quad 0x4c9260c80b462065 .quad 0x3f00862ea51ebb4b .quad 0x5bc2c77fb38d9097 .quad 0x7eaafc9a6280bbb8 .quad 0x22a70f12f403d809 .quad 0x31ce40bb1bfc8d20 .quad 0x2bc65635e8bd53ee .quad 0xe8d5dc9fa96bad93 .quad 0xe58fb17dde1947dc .quad 0x681532ea65185fa3 .quad 0x1fdd6c3b034a7830 // 2^212 * 4 * G .quad 0x0a64e28c55dc18fe .quad 0xe3df9e993399ebdd .quad 0x79ac432370e2e652 .quad 0x35ff7fc33ae4cc0e .quad 0x9c13a6a52dd8f7a9 .quad 0x2dbb1f8c3efdcabf .quad 0x961e32405e08f7b5 .quad 0x48c8a121bbe6c9e5 .quad 0xfc415a7c59646445 .quad 0xd224b2d7c128b615 .quad 0x6035c9c905fbb912 .quad 0x42d7a91274429fab // 2^212 * 5 * G .quad 0x4e6213e3eaf72ed3 .quad 0x6794981a43acd4e7 .quad 0xff547cde6eb508cb .quad 0x6fed19dd10fcb532 .quad 0xa9a48947933da5bc .quad 0x4a58920ec2e979ec .quad 0x96d8800013e5ac4c .quad 0x453692d74b48b147 .quad 0xdd775d99a8559c6f .quad 0xf42a2140df003e24 .quad 0x5223e229da928a66 .quad 0x063f46ba6d38f22c // 2^212 * 6 * G .quad 0xd2d242895f536694 .quad 0xca33a2c542939b2c .quad 0x986fada6c7ddb95c .quad 0x5a152c042f712d5d .quad 0x39843cb737346921 .quad 0xa747fb0738c89447 .quad 0xcb8d8031a245307e .quad 0x67810f8e6d82f068 .quad 0x3eeb8fbcd2287db4 .quad 0x72c7d3a301a03e93 .quad 0x5473e88cbd98265a .quad 0x7324aa515921b403 // 2^212 * 7 * G .quad 0x857942f46c3cbe8e .quad 0xa1d364b14730c046 .quad 0x1c8ed914d23c41bf .quad 0x0838e161eef6d5d2 .quad 0xad23f6dae82354cb .quad 0x6962502ab6571a6d .quad 0x9b651636e38e37d1 .quad 0x5cac5005d1a3312f .quad 0x8cc154cce9e39904 .quad 0x5b3a040b84de6846 .quad 0xc4d8a61cb1be5d6e .quad 0x40fb897bd8861f02 // 2^212 * 8 * G .quad 0x84c5aa9062de37a1 .quad 0x421da5000d1d96e1 .quad 0x788286306a9242d9 .quad 0x3c5e464a690d10da .quad 0xe57ed8475ab10761 .quad 0x71435e206fd13746 .quad 0x342f824ecd025632 .quad 0x4b16281ea8791e7b .quad 0xd1c101d50b813381 .quad 0xdee60f1176ee6828 .quad 0x0cb68893383f6409 .quad 0x6183c565f6ff484a // 2^216 * 1 * G .quad 0x741d5a461e6bf9d6 .quad 0x2305b3fc7777a581 .quad 0xd45574a26474d3d9 .quad 0x1926e1dc6401e0ff .quad 0xdb468549af3f666e .quad 0xd77fcf04f14a0ea5 .quad 0x3df23ff7a4ba0c47 .quad 0x3a10dfe132ce3c85 .quad 0xe07f4e8aea17cea0 .quad 0x2fd515463a1fc1fd .quad 0x175322fd31f2c0f1 .quad 0x1fa1d01d861e5d15 // 2^216 * 2 * G .quad 0xcc8055947d599832 .quad 0x1e4656da37f15520 .quad 0x99f6f7744e059320 .quad 0x773563bc6a75cf33 .quad 0x38dcac00d1df94ab .quad 0x2e712bddd1080de9 .quad 0x7f13e93efdd5e262 .quad 0x73fced18ee9a01e5 .quad 0x06b1e90863139cb3 .quad 0xa493da67c5a03ecd .quad 0x8d77cec8ad638932 .quad 0x1f426b701b864f44 // 2^216 * 3 * G .quad 0xefc9264c41911c01 .quad 0xf1a3b7b817a22c25 .quad 0x5875da6bf30f1447 .quad 0x4e1af5271d31b090 .quad 0xf17e35c891a12552 .quad 0xb76b8153575e9c76 .quad 0xfa83406f0d9b723e .quad 0x0b76bb1b3fa7e438 .quad 0x08b8c1f97f92939b .quad 0xbe6771cbd444ab6e .quad 0x22e5646399bb8017 .quad 0x7b6dd61eb772a955 // 2^216 * 4 * G .quad 0xb7adc1e850f33d92 .quad 0x7998fa4f608cd5cf .quad 0xad962dbd8dfc5bdb .quad 0x703e9bceaf1d2f4f .quad 0x5730abf9ab01d2c7 .quad 0x16fb76dc40143b18 .quad 0x866cbe65a0cbb281 .quad 0x53fa9b659bff6afe .quad 0x6c14c8e994885455 .quad 0x843a5d6665aed4e5 .quad 0x181bb73ebcd65af1 .quad 0x398d93e5c4c61f50 // 2^216 * 5 * G .quad 0x1c4bd16733e248f3 .quad 0xbd9e128715bf0a5f .quad 0xd43f8cf0a10b0376 .quad 0x53b09b5ddf191b13 .quad 0xc3877c60d2e7e3f2 .quad 0x3b34aaa030828bb1 .quad 0x283e26e7739ef138 .quad 0x699c9c9002c30577 .quad 0xf306a7235946f1cc .quad 0x921718b5cce5d97d .quad 0x28cdd24781b4e975 .quad 0x51caf30c6fcdd907 // 2^216 * 6 * G .quad 0xa60ba7427674e00a .quad 0x630e8570a17a7bf3 .quad 0x3758563dcf3324cc .quad 0x5504aa292383fdaa .quad 0x737af99a18ac54c7 .quad 0x903378dcc51cb30f .quad 0x2b89bc334ce10cc7 .quad 0x12ae29c189f8e99a .quad 0xa99ec0cb1f0d01cf .quad 0x0dd1efcc3a34f7ae .quad 0x55ca7521d09c4e22 .quad 0x5fd14fe958eba5ea // 2^216 * 7 * G .quad 0xb5dc2ddf2845ab2c .quad 0x069491b10a7fe993 .quad 0x4daaf3d64002e346 .quad 0x093ff26e586474d1 .quad 0x3c42fe5ebf93cb8e .quad 0xbedfa85136d4565f .quad 0xe0f0859e884220e8 .quad 0x7dd73f960725d128 .quad 0xb10d24fe68059829 .quad 0x75730672dbaf23e5 .quad 0x1367253ab457ac29 .quad 0x2f59bcbc86b470a4 // 2^216 * 8 * G .quad 0x83847d429917135f .quad 0xad1b911f567d03d7 .quad 0x7e7748d9be77aad1 .quad 0x5458b42e2e51af4a .quad 0x7041d560b691c301 .quad 0x85201b3fadd7e71e .quad 0x16c2e16311335585 .quad 0x2aa55e3d010828b1 .quad 0xed5192e60c07444f .quad 0x42c54e2d74421d10 .quad 0x352b4c82fdb5c864 .quad 0x13e9004a8a768664 // 2^220 * 1 * G .quad 0xcbb5b5556c032bff .quad 0xdf7191b729297a3a .quad 0xc1ff7326aded81bb .quad 0x71ade8bb68be03f5 .quad 0x1e6284c5806b467c .quad 0xc5f6997be75d607b .quad 0x8b67d958b378d262 .quad 0x3d88d66a81cd8b70 .quad 0x8b767a93204ed789 .quad 0x762fcacb9fa0ae2a .quad 0x771febcc6dce4887 .quad 0x343062158ff05fb3 // 2^220 * 2 * G .quad 0xe05da1a7e1f5bf49 .quad 0x26457d6dd4736092 .quad 0x77dcb07773cc32f6 .quad 0x0a5d94969cdd5fcd .quad 0xfce219072a7b31b4 .quad 0x4d7adc75aa578016 .quad 0x0ec276a687479324 .quad 0x6d6d9d5d1fda4beb .quad 0x22b1a58ae9b08183 .quad 0xfd95d071c15c388b .quad 0xa9812376850a0517 .quad 0x33384cbabb7f335e // 2^220 * 3 * G .quad 0x3c6fa2680ca2c7b5 .quad 0x1b5082046fb64fda .quad 0xeb53349c5431d6de .quad 0x5278b38f6b879c89 .quad 0x33bc627a26218b8d .quad 0xea80b21fc7a80c61 .quad 0x9458b12b173e9ee6 .quad 0x076247be0e2f3059 .quad 0x52e105f61416375a .quad 0xec97af3685abeba4 .quad 0x26e6b50623a67c36 .quad 0x5cf0e856f3d4fb01 // 2^220 * 4 * G .quad 0xf6c968731ae8cab4 .quad 0x5e20741ecb4f92c5 .quad 0x2da53be58ccdbc3e .quad 0x2dddfea269970df7 .quad 0xbeaece313db342a8 .quad 0xcba3635b842db7ee .quad 0xe88c6620817f13ef .quad 0x1b9438aa4e76d5c6 .quad 0x8a50777e166f031a .quad 0x067b39f10fb7a328 .quad 0x1925c9a6010fbd76 .quad 0x6df9b575cc740905 // 2^220 * 5 * G .quad 0x42c1192927f6bdcf .quad 0x8f91917a403d61ca .quad 0xdc1c5a668b9e1f61 .quad 0x1596047804ec0f8d .quad 0xecdfc35b48cade41 .quad 0x6a88471fb2328270 .quad 0x740a4a2440a01b6a .quad 0x471e5796003b5f29 .quad 0xda96bbb3aced37ac .quad 0x7a2423b5e9208cea .quad 0x24cc5c3038aebae2 .quad 0x50c356afdc5dae2f // 2^220 * 6 * G .quad 0x09dcbf4341c30318 .quad 0xeeba061183181dce .quad 0xc179c0cedc1e29a1 .quad 0x1dbf7b89073f35b0 .quad 0xcfed9cdf1b31b964 .quad 0xf486a9858ca51af3 .quad 0x14897265ea8c1f84 .quad 0x784a53dd932acc00 .quad 0x2d99f9df14fc4920 .quad 0x76ccb60cc4499fe5 .quad 0xa4132cbbe5cf0003 .quad 0x3f93d82354f000ea // 2^220 * 7 * G .quad 0x8183e7689e04ce85 .quad 0x678fb71e04465341 .quad 0xad92058f6688edac .quad 0x5da350d3532b099a .quad 0xeaac12d179e14978 .quad 0xff923ff3bbebff5e .quad 0x4af663e40663ce27 .quad 0x0fd381a811a5f5ff .quad 0xf256aceca436df54 .quad 0x108b6168ae69d6e8 .quad 0x20d986cb6b5d036c .quad 0x655957b9fee2af50 // 2^220 * 8 * G .quad 0xaea8b07fa902030f .quad 0xf88c766af463d143 .quad 0x15b083663c787a60 .quad 0x08eab1148267a4a8 .quad 0xbdc1409bd002d0ac .quad 0x66660245b5ccd9a6 .quad 0x82317dc4fade85ec .quad 0x02fe934b6ad7df0d .quad 0xef5cf100cfb7ea74 .quad 0x22897633a1cb42ac .quad 0xd4ce0c54cef285e2 .quad 0x30408c048a146a55 // 2^224 * 1 * G .quad 0x739d8845832fcedb .quad 0xfa38d6c9ae6bf863 .quad 0x32bc0dcab74ffef7 .quad 0x73937e8814bce45e .quad 0xbb2e00c9193b877f .quad 0xece3a890e0dc506b .quad 0xecf3b7c036de649f .quad 0x5f46040898de9e1a .quad 0xb9037116297bf48d .quad 0xa9d13b22d4f06834 .quad 0xe19715574696bdc6 .quad 0x2cf8a4e891d5e835 // 2^224 * 2 * G .quad 0x6d93fd8707110f67 .quad 0xdd4c09d37c38b549 .quad 0x7cb16a4cc2736a86 .quad 0x2049bd6e58252a09 .quad 0x2cb5487e17d06ba2 .quad 0x24d2381c3950196b .quad 0xd7659c8185978a30 .quad 0x7a6f7f2891d6a4f6 .quad 0x7d09fd8d6a9aef49 .quad 0xf0ee60be5b3db90b .quad 0x4c21b52c519ebfd4 .quad 0x6011aadfc545941d // 2^224 * 3 * G .quad 0x5f67926dcf95f83c .quad 0x7c7e856171289071 .quad 0xd6a1e7f3998f7a5b .quad 0x6fc5cc1b0b62f9e0 .quad 0x63ded0c802cbf890 .quad 0xfbd098ca0dff6aaa .quad 0x624d0afdb9b6ed99 .quad 0x69ce18b779340b1e .quad 0xd1ef5528b29879cb .quad 0xdd1aae3cd47e9092 .quad 0x127e0442189f2352 .quad 0x15596b3ae57101f1 // 2^224 * 4 * G .quad 0x462739d23f9179a2 .quad 0xff83123197d6ddcf .quad 0x1307deb553f2148a .quad 0x0d2237687b5f4dda .quad 0x09ff31167e5124ca .quad 0x0be4158bd9c745df .quad 0x292b7d227ef556e5 .quad 0x3aa4e241afb6d138 .quad 0x2cc138bf2a3305f5 .quad 0x48583f8fa2e926c3 .quad 0x083ab1a25549d2eb .quad 0x32fcaa6e4687a36c // 2^224 * 5 * G .quad 0x7bc56e8dc57d9af5 .quad 0x3e0bd2ed9df0bdf2 .quad 0xaac014de22efe4a3 .quad 0x4627e9cefebd6a5c .quad 0x3207a4732787ccdf .quad 0x17e31908f213e3f8 .quad 0xd5b2ecd7f60d964e .quad 0x746f6336c2600be9 .quad 0x3f4af345ab6c971c .quad 0xe288eb729943731f .quad 0x33596a8a0344186d .quad 0x7b4917007ed66293 // 2^224 * 6 * G .quad 0x2d85fb5cab84b064 .quad 0x497810d289f3bc14 .quad 0x476adc447b15ce0c .quad 0x122ba376f844fd7b .quad 0x54341b28dd53a2dd .quad 0xaa17905bdf42fc3f .quad 0x0ff592d94dd2f8f4 .quad 0x1d03620fe08cd37d .quad 0xc20232cda2b4e554 .quad 0x9ed0fd42115d187f .quad 0x2eabb4be7dd479d9 .quad 0x02c70bf52b68ec4c // 2^224 * 7 * G .quad 0xa287ec4b5d0b2fbb .quad 0x415c5790074882ca .quad 0xe044a61ec1d0815c .quad 0x26334f0a409ef5e0 .quad 0xace532bf458d72e1 .quad 0x5be768e07cb73cb5 .quad 0x56cf7d94ee8bbde7 .quad 0x6b0697e3feb43a03 .quad 0xb6c8f04adf62a3c0 .quad 0x3ef000ef076da45d .quad 0x9c9cb95849f0d2a9 .quad 0x1cc37f43441b2fae // 2^224 * 8 * G .quad 0x508f565a5cc7324f .quad 0xd061c4c0e506a922 .quad 0xfb18abdb5c45ac19 .quad 0x6c6809c10380314a .quad 0xd76656f1c9ceaeb9 .quad 0x1c5b15f818e5656a .quad 0x26e72832844c2334 .quad 0x3a346f772f196838 .quad 0xd2d55112e2da6ac8 .quad 0xe9bd0331b1e851ed .quad 0x960746dd8ec67262 .quad 0x05911b9f6ef7c5d0 // 2^228 * 1 * G .quad 0xe9dcd756b637ff2d .quad 0xec4c348fc987f0c4 .quad 0xced59285f3fbc7b7 .quad 0x3305354793e1ea87 .quad 0x01c18980c5fe9f94 .quad 0xcd656769716fd5c8 .quad 0x816045c3d195a086 .quad 0x6e2b7f3266cc7982 .quad 0xcc802468f7c3568f .quad 0x9de9ba8219974cb3 .quad 0xabb7229cb5b81360 .quad 0x44e2017a6fbeba62 // 2^228 * 2 * G .quad 0xc4c2a74354dab774 .quad 0x8e5d4c3c4eaf031a .quad 0xb76c23d242838f17 .quad 0x749a098f68dce4ea .quad 0x87f82cf3b6ca6ecd .quad 0x580f893e18f4a0c2 .quad 0x058930072604e557 .quad 0x6cab6ac256d19c1d .quad 0xdcdfe0a02cc1de60 .quad 0x032665ff51c5575b .quad 0x2c0c32f1073abeeb .quad 0x6a882014cd7b8606 // 2^228 * 3 * G .quad 0xa52a92fea4747fb5 .quad 0xdc12a4491fa5ab89 .quad 0xd82da94bb847a4ce .quad 0x4d77edce9512cc4e .quad 0xd111d17caf4feb6e .quad 0x050bba42b33aa4a3 .quad 0x17514c3ceeb46c30 .quad 0x54bedb8b1bc27d75 .quad 0x77c8e14577e2189c .quad 0xa3e46f6aff99c445 .quad 0x3144dfc86d335343 .quad 0x3a96559e7c4216a9 // 2^228 * 4 * G .quad 0x12550d37f42ad2ee .quad 0x8b78e00498a1fbf5 .quad 0x5d53078233894cb2 .quad 0x02c84e4e3e498d0c .quad 0x4493896880baaa52 .quad 0x4c98afc4f285940e .quad 0xef4aa79ba45448b6 .quad 0x5278c510a57aae7f .quad 0xa54dd074294c0b94 .quad 0xf55d46b8df18ffb6 .quad 0xf06fecc58dae8366 .quad 0x588657668190d165 // 2^228 * 5 * G .quad 0xd47712311aef7117 .quad 0x50343101229e92c7 .quad 0x7a95e1849d159b97 .quad 0x2449959b8b5d29c9 .quad 0xbf5834f03de25cc3 .quad 0xb887c8aed6815496 .quad 0x5105221a9481e892 .quad 0x6760ed19f7723f93 .quad 0x669ba3b7ac35e160 .quad 0x2eccf73fba842056 .quad 0x1aec1f17c0804f07 .quad 0x0d96bc031856f4e7 // 2^228 * 6 * G .quad 0x3318be7775c52d82 .quad 0x4cb764b554d0aab9 .quad 0xabcf3d27cc773d91 .quad 0x3bf4d1848123288a .quad 0xb1d534b0cc7505e1 .quad 0x32cd003416c35288 .quad 0xcb36a5800762c29d .quad 0x5bfe69b9237a0bf8 .quad 0x183eab7e78a151ab .quad 0xbbe990c999093763 .quad 0xff717d6e4ac7e335 .quad 0x4c5cddb325f39f88 // 2^228 * 7 * G .quad 0xc0f6b74d6190a6eb .quad 0x20ea81a42db8f4e4 .quad 0xa8bd6f7d97315760 .quad 0x33b1d60262ac7c21 .quad 0x57750967e7a9f902 .quad 0x2c37fdfc4f5b467e .quad 0xb261663a3177ba46 .quad 0x3a375e78dc2d532b .quad 0x8141e72f2d4dddea .quad 0xe6eafe9862c607c8 .quad 0x23c28458573cafd0 .quad 0x46b9476f4ff97346 // 2^228 * 8 * G .quad 0x0c1ffea44f901e5c .quad 0x2b0b6fb72184b782 .quad 0xe587ff910114db88 .quad 0x37130f364785a142 .quad 0x1215505c0d58359f .quad 0x2a2013c7fc28c46b .quad 0x24a0a1af89ea664e .quad 0x4400b638a1130e1f .quad 0x3a01b76496ed19c3 .quad 0x31e00ab0ed327230 .quad 0x520a885783ca15b1 .quad 0x06aab9875accbec7 // 2^232 * 1 * G .quad 0xc1339983f5df0ebb .quad 0xc0f3758f512c4cac .quad 0x2cf1130a0bb398e1 .quad 0x6b3cecf9aa270c62 .quad 0x5349acf3512eeaef .quad 0x20c141d31cc1cb49 .quad 0x24180c07a99a688d .quad 0x555ef9d1c64b2d17 .quad 0x36a770ba3b73bd08 .quad 0x624aef08a3afbf0c .quad 0x5737ff98b40946f2 .quad 0x675f4de13381749d // 2^232 * 2 * G .quad 0x0e2c52036b1782fc .quad 0x64816c816cad83b4 .quad 0xd0dcbdd96964073e .quad 0x13d99df70164c520 .quad 0xa12ff6d93bdab31d .quad 0x0725d80f9d652dfe .quad 0x019c4ff39abe9487 .quad 0x60f450b882cd3c43 .quad 0x014b5ec321e5c0ca .quad 0x4fcb69c9d719bfa2 .quad 0x4e5f1c18750023a0 .quad 0x1c06de9e55edac80 // 2^232 * 3 * G .quad 0x990f7ad6a33ec4e2 .quad 0x6608f938be2ee08e .quad 0x9ca143c563284515 .quad 0x4cf38a1fec2db60d .quad 0xffd52b40ff6d69aa .quad 0x34530b18dc4049bb .quad 0x5e4a5c2fa34d9897 .quad 0x78096f8e7d32ba2d .quad 0xa0aaaa650dfa5ce7 .quad 0xf9c49e2a48b5478c .quad 0x4f09cc7d7003725b .quad 0x373cad3a26091abe // 2^232 * 4 * G .quad 0xb294634d82c9f57c .quad 0x1fcbfde124934536 .quad 0x9e9c4db3418cdb5a .quad 0x0040f3d9454419fc .quad 0xf1bea8fb89ddbbad .quad 0x3bcb2cbc61aeaecb .quad 0x8f58a7bb1f9b8d9d .quad 0x21547eda5112a686 .quad 0xdefde939fd5986d3 .quad 0xf4272c89510a380c .quad 0xb72ba407bb3119b9 .quad 0x63550a334a254df4 // 2^232 * 5 * G .quad 0x6507d6edb569cf37 .quad 0x178429b00ca52ee1 .quad 0xea7c0090eb6bd65d .quad 0x3eea62c7daf78f51 .quad 0x9bba584572547b49 .quad 0xf305c6fae2c408e0 .quad 0x60e8fa69c734f18d .quad 0x39a92bafaa7d767a .quad 0x9d24c713e693274e .quad 0x5f63857768dbd375 .quad 0x70525560eb8ab39a .quad 0x68436a0665c9c4cd // 2^232 * 6 * G .quad 0xbc0235e8202f3f27 .quad 0xc75c00e264f975b0 .quad 0x91a4e9d5a38c2416 .quad 0x17b6e7f68ab789f9 .quad 0x1e56d317e820107c .quad 0xc5266844840ae965 .quad 0xc1e0a1c6320ffc7a .quad 0x5373669c91611472 .quad 0x5d2814ab9a0e5257 .quad 0x908f2084c9cab3fc .quad 0xafcaf5885b2d1eca .quad 0x1cb4b5a678f87d11 // 2^232 * 7 * G .quad 0xb664c06b394afc6c .quad 0x0c88de2498da5fb1 .quad 0x4f8d03164bcad834 .quad 0x330bca78de7434a2 .quad 0x6b74aa62a2a007e7 .quad 0xf311e0b0f071c7b1 .quad 0x5707e438000be223 .quad 0x2dc0fd2d82ef6eac .quad 0x982eff841119744e .quad 0xf9695e962b074724 .quad 0xc58ac14fbfc953fb .quad 0x3c31be1b369f1cf5 // 2^232 * 8 * G .quad 0xb0f4864d08948aee .quad 0x07dc19ee91ba1c6f .quad 0x7975cdaea6aca158 .quad 0x330b61134262d4bb .quad 0xc168bc93f9cb4272 .quad 0xaeb8711fc7cedb98 .quad 0x7f0e52aa34ac8d7a .quad 0x41cec1097e7d55bb .quad 0xf79619d7a26d808a .quad 0xbb1fd49e1d9e156d .quad 0x73d7c36cdba1df27 .quad 0x26b44cd91f28777d // 2^236 * 1 * G .quad 0x300a9035393aa6d8 .quad 0x2b501131a12bb1cd .quad 0x7b1ff677f093c222 .quad 0x4309c1f8cab82bad .quad 0xaf44842db0285f37 .quad 0x8753189047efc8df .quad 0x9574e091f820979a .quad 0x0e378d6069615579 .quad 0xd9fa917183075a55 .quad 0x4bdb5ad26b009fdc .quad 0x7829ad2cd63def0e .quad 0x078fc54975fd3877 // 2^236 * 2 * G .quad 0x87dfbd1428878f2d .quad 0x134636dd1e9421a1 .quad 0x4f17c951257341a3 .quad 0x5df98d4bad296cb8 .quad 0xe2004b5bb833a98a .quad 0x44775dec2d4c3330 .quad 0x3aa244067eace913 .quad 0x272630e3d58e00a9 .quad 0xf3678fd0ecc90b54 .quad 0xf001459b12043599 .quad 0x26725fbc3758b89b .quad 0x4325e4aa73a719ae // 2^236 * 3 * G .quad 0x657dc6ef433c3493 .quad 0x65375e9f80dbf8c3 .quad 0x47fd2d465b372dae .quad 0x4966ab79796e7947 .quad 0xed24629acf69f59d .quad 0x2a4a1ccedd5abbf4 .quad 0x3535ca1f56b2d67b .quad 0x5d8c68d043b1b42d .quad 0xee332d4de3b42b0a .quad 0xd84e5a2b16a4601c .quad 0x78243877078ba3e4 .quad 0x77ed1eb4184ee437 // 2^236 * 4 * G .quad 0xbfd4e13f201839a0 .quad 0xaeefffe23e3df161 .quad 0xb65b04f06b5d1fe3 .quad 0x52e085fb2b62fbc0 .quad 0x185d43f89e92ed1a .quad 0xb04a1eeafe4719c6 .quad 0x499fbe88a6f03f4f .quad 0x5d8b0d2f3c859bdd .quad 0x124079eaa54cf2ba .quad 0xd72465eb001b26e7 .quad 0x6843bcfdc97af7fd .quad 0x0524b42b55eacd02 // 2^236 * 5 * G .quad 0xfd0d5dbee45447b0 .quad 0x6cec351a092005ee .quad 0x99a47844567579cb .quad 0x59d242a216e7fa45 .quad 0xbc18dcad9b829eac .quad 0x23ae7d28b5f579d0 .quad 0xc346122a69384233 .quad 0x1a6110b2e7d4ac89 .quad 0x4f833f6ae66997ac .quad 0x6849762a361839a4 .quad 0x6985dec1970ab525 .quad 0x53045e89dcb1f546 // 2^236 * 6 * G .quad 0xcb8bb346d75353db .quad 0xfcfcb24bae511e22 .quad 0xcba48d40d50ae6ef .quad 0x26e3bae5f4f7cb5d .quad 0x84da3cde8d45fe12 .quad 0xbd42c218e444e2d2 .quad 0xa85196781f7e3598 .quad 0x7642c93f5616e2b2 .quad 0x2323daa74595f8e4 .quad 0xde688c8b857abeb4 .quad 0x3fc48e961c59326e .quad 0x0b2e73ca15c9b8ba // 2^236 * 7 * G .quad 0xd6bb4428c17f5026 .quad 0x9eb27223fb5a9ca7 .quad 0xe37ba5031919c644 .quad 0x21ce380db59a6602 .quad 0x0e3fbfaf79c03a55 .quad 0x3077af054cbb5acf .quad 0xd5c55245db3de39f .quad 0x015e68c1476a4af7 .quad 0xc1d5285220066a38 .quad 0x95603e523570aef3 .quad 0x832659a7226b8a4d .quad 0x5dd689091f8eedc9 // 2^236 * 8 * G .quad 0xcbac84debfd3c856 .quad 0x1624c348b35ff244 .quad 0xb7f88dca5d9cad07 .quad 0x3b0e574da2c2ebe8 .quad 0x1d022591a5313084 .quad 0xca2d4aaed6270872 .quad 0x86a12b852f0bfd20 .quad 0x56e6c439ad7da748 .quad 0xc704ff4942bdbae6 .quad 0x5e21ade2b2de1f79 .quad 0xe95db3f35652fad8 .quad 0x0822b5378f08ebc1 // 2^240 * 1 * G .quad 0x51f048478f387475 .quad 0xb25dbcf49cbecb3c .quad 0x9aab1244d99f2055 .quad 0x2c709e6c1c10a5d6 .quad 0xe1b7f29362730383 .quad 0x4b5279ffebca8a2c .quad 0xdafc778abfd41314 .quad 0x7deb10149c72610f .quad 0xcb62af6a8766ee7a .quad 0x66cbec045553cd0e .quad 0x588001380f0be4b5 .quad 0x08e68e9ff62ce2ea // 2^240 * 2 * G .quad 0x34ad500a4bc130ad .quad 0x8d38db493d0bd49c .quad 0xa25c3d98500a89be .quad 0x2f1f3f87eeba3b09 .quad 0x2f2d09d50ab8f2f9 .quad 0xacb9218dc55923df .quad 0x4a8f342673766cb9 .quad 0x4cb13bd738f719f5 .quad 0xf7848c75e515b64a .quad 0xa59501badb4a9038 .quad 0xc20d313f3f751b50 .quad 0x19a1e353c0ae2ee8 // 2^240 * 3 * G .quad 0x7d1c7560bafa05c3 .quad 0xb3e1a0a0c6e55e61 .quad 0xe3529718c0d66473 .quad 0x41546b11c20c3486 .quad 0xb42172cdd596bdbd .quad 0x93e0454398eefc40 .quad 0x9fb15347b44109b5 .quad 0x736bd3990266ae34 .quad 0x85532d509334b3b4 .quad 0x46fd114b60816573 .quad 0xcc5f5f30425c8375 .quad 0x412295a2b87fab5c // 2^240 * 4 * G .quad 0x19c99b88f57ed6e9 .quad 0x5393cb266df8c825 .quad 0x5cee3213b30ad273 .quad 0x14e153ebb52d2e34 .quad 0x2e655261e293eac6 .quad 0x845a92032133acdb .quad 0x460975cb7900996b .quad 0x0760bb8d195add80 .quad 0x413e1a17cde6818a .quad 0x57156da9ed69a084 .quad 0x2cbf268f46caccb1 .quad 0x6b34be9bc33ac5f2 // 2^240 * 5 * G .quad 0xf3df2f643a78c0b2 .quad 0x4c3e971ef22e027c .quad 0xec7d1c5e49c1b5a3 .quad 0x2012c18f0922dd2d .quad 0x11fc69656571f2d3 .quad 0xc6c9e845530e737a .quad 0xe33ae7a2d4fe5035 .quad 0x01b9c7b62e6dd30b .quad 0x880b55e55ac89d29 .quad 0x1483241f45a0a763 .quad 0x3d36efdfc2e76c1f .quad 0x08af5b784e4bade8 // 2^240 * 6 * G .quad 0x283499dc881f2533 .quad 0x9d0525da779323b6 .quad 0x897addfb673441f4 .quad 0x32b79d71163a168d .quad 0xe27314d289cc2c4b .quad 0x4be4bd11a287178d .quad 0x18d528d6fa3364ce .quad 0x6423c1d5afd9826e .quad 0xcc85f8d9edfcb36a .quad 0x22bcc28f3746e5f9 .quad 0xe49de338f9e5d3cd .quad 0x480a5efbc13e2dcc // 2^240 * 7 * G .quad 0x0b51e70b01622071 .quad 0x06b505cf8b1dafc5 .quad 0x2c6bb061ef5aabcd .quad 0x47aa27600cb7bf31 .quad 0xb6614ce442ce221f .quad 0x6e199dcc4c053928 .quad 0x663fb4a4dc1cbe03 .quad 0x24b31d47691c8e06 .quad 0x2a541eedc015f8c3 .quad 0x11a4fe7e7c693f7c .quad 0xf0af66134ea278d6 .quad 0x545b585d14dda094 // 2^240 * 8 * G .quad 0x67bf275ea0d43a0f .quad 0xade68e34089beebe .quad 0x4289134cd479e72e .quad 0x0f62f9c332ba5454 .quad 0x6204e4d0e3b321e1 .quad 0x3baa637a28ff1e95 .quad 0x0b0ccffd5b99bd9e .quad 0x4d22dc3e64c8d071 .quad 0xfcb46589d63b5f39 .quad 0x5cae6a3f57cbcf61 .quad 0xfebac2d2953afa05 .quad 0x1c0fa01a36371436 // 2^244 * 1 * G .quad 0xe7547449bc7cd692 .quad 0x0f9abeaae6f73ddf .quad 0x4af01ca700837e29 .quad 0x63ab1b5d3f1bc183 .quad 0xc11ee5e854c53fae .quad 0x6a0b06c12b4f3ff4 .quad 0x33540f80e0b67a72 .quad 0x15f18fc3cd07e3ef .quad 0x32750763b028f48c .quad 0x06020740556a065f .quad 0xd53bd812c3495b58 .quad 0x08706c9b865f508d // 2^244 * 2 * G .quad 0xf37ca2ab3d343dff .quad 0x1a8c6a2d80abc617 .quad 0x8e49e035d4ccffca .quad 0x48b46beebaa1d1b9 .quad 0xcc991b4138b41246 .quad 0x243b9c526f9ac26b .quad 0xb9ef494db7cbabbd .quad 0x5fba433dd082ed00 .quad 0x9c49e355c9941ad0 .quad 0xb9734ade74498f84 .quad 0x41c3fed066663e5c .quad 0x0ecfedf8e8e710b3 // 2^244 * 3 * G .quad 0x76430f9f9cd470d9 .quad 0xb62acc9ba42f6008 .quad 0x1898297c59adad5e .quad 0x7789dd2db78c5080 .quad 0x744f7463e9403762 .quad 0xf79a8dee8dfcc9c9 .quad 0x163a649655e4cde3 .quad 0x3b61788db284f435 .quad 0xb22228190d6ef6b2 .quad 0xa94a66b246ce4bfa .quad 0x46c1a77a4f0b6cc7 .quad 0x4236ccffeb7338cf // 2^244 * 4 * G .quad 0x8497404d0d55e274 .quad 0x6c6663d9c4ad2b53 .quad 0xec2fb0d9ada95734 .quad 0x2617e120cdb8f73c .quad 0x3bd82dbfda777df6 .quad 0x71b177cc0b98369e .quad 0x1d0e8463850c3699 .quad 0x5a71945b48e2d1f1 .quad 0x6f203dd5405b4b42 .quad 0x327ec60410b24509 .quad 0x9c347230ac2a8846 .quad 0x77de29fc11ffeb6a // 2^244 * 5 * G .quad 0xb0ac57c983b778a8 .quad 0x53cdcca9d7fe912c .quad 0x61c2b854ff1f59dc .quad 0x3a1a2cf0f0de7dac .quad 0x835e138fecced2ca .quad 0x8c9eaf13ea963b9a .quad 0xc95fbfc0b2160ea6 .quad 0x575e66f3ad877892 .quad 0x99803a27c88fcb3a .quad 0x345a6789275ec0b0 .quad 0x459789d0ff6c2be5 .quad 0x62f882651e70a8b2 // 2^244 * 6 * G .quad 0x085ae2c759ff1be4 .quad 0x149145c93b0e40b7 .quad 0xc467e7fa7ff27379 .quad 0x4eeecf0ad5c73a95 .quad 0x6d822986698a19e0 .quad 0xdc9821e174d78a71 .quad 0x41a85f31f6cb1f47 .quad 0x352721c2bcda9c51 .quad 0x48329952213fc985 .quad 0x1087cf0d368a1746 .quad 0x8e5261b166c15aa5 .quad 0x2d5b2d842ed24c21 // 2^244 * 7 * G .quad 0x02cfebd9ebd3ded1 .quad 0xd45b217739021974 .quad 0x7576f813fe30a1b7 .quad 0x5691b6f9a34ef6c2 .quad 0x5eb7d13d196ac533 .quad 0x377234ecdb80be2b .quad 0xe144cffc7cf5ae24 .quad 0x5226bcf9c441acec .quad 0x79ee6c7223e5b547 .quad 0x6f5f50768330d679 .quad 0xed73e1e96d8adce9 .quad 0x27c3da1e1d8ccc03 // 2^244 * 8 * G .quad 0x7eb9efb23fe24c74 .quad 0x3e50f49f1651be01 .quad 0x3ea732dc21858dea .quad 0x17377bd75bb810f9 .quad 0x28302e71630ef9f6 .quad 0xc2d4a2032b64cee0 .quad 0x090820304b6292be .quad 0x5fca747aa82adf18 .quad 0x232a03c35c258ea5 .quad 0x86f23a2c6bcb0cf1 .quad 0x3dad8d0d2e442166 .quad 0x04a8933cab76862b // 2^248 * 1 * G .quad 0xd2c604b622943dff .quad 0xbc8cbece44cfb3a0 .quad 0x5d254ff397808678 .quad 0x0fa3614f3b1ca6bf .quad 0x69082b0e8c936a50 .quad 0xf9c9a035c1dac5b6 .quad 0x6fb73e54c4dfb634 .quad 0x4005419b1d2bc140 .quad 0xa003febdb9be82f0 .quad 0x2089c1af3a44ac90 .quad 0xf8499f911954fa8e .quad 0x1fba218aef40ab42 // 2^248 * 2 * G .quad 0xab549448fac8f53e .quad 0x81f6e89a7ba63741 .quad 0x74fd6c7d6c2b5e01 .quad 0x392e3acaa8c86e42 .quad 0x4f3e57043e7b0194 .quad 0xa81d3eee08daaf7f .quad 0xc839c6ab99dcdef1 .quad 0x6c535d13ff7761d5 .quad 0x4cbd34e93e8a35af .quad 0x2e0781445887e816 .quad 0x19319c76f29ab0ab .quad 0x25e17fe4d50ac13b // 2^248 * 3 * G .quad 0x0a289bd71e04f676 .quad 0x208e1c52d6420f95 .quad 0x5186d8b034691fab .quad 0x255751442a9fb351 .quad 0x915f7ff576f121a7 .quad 0xc34a32272fcd87e3 .quad 0xccba2fde4d1be526 .quad 0x6bba828f8969899b .quad 0xe2d1bc6690fe3901 .quad 0x4cb54a18a0997ad5 .quad 0x971d6914af8460d4 .quad 0x559d504f7f6b7be4 // 2^248 * 4 * G .quad 0xa7738378b3eb54d5 .quad 0x1d69d366a5553c7c .quad 0x0a26cf62f92800ba .quad 0x01ab12d5807e3217 .quad 0x9c4891e7f6d266fd .quad 0x0744a19b0307781b .quad 0x88388f1d6061e23b .quad 0x123ea6a3354bd50e .quad 0x118d189041e32d96 .quad 0xb9ede3c2d8315848 .quad 0x1eab4271d83245d9 .quad 0x4a3961e2c918a154 // 2^248 * 5 * G .quad 0x71dc3be0f8e6bba0 .quad 0xd6cef8347effe30a .quad 0xa992425fe13a476a .quad 0x2cd6bce3fb1db763 .quad 0x0327d644f3233f1e .quad 0x499a260e34fcf016 .quad 0x83b5a716f2dab979 .quad 0x68aceead9bd4111f .quad 0x38b4c90ef3d7c210 .quad 0x308e6e24b7ad040c .quad 0x3860d9f1b7e73e23 .quad 0x595760d5b508f597 // 2^248 * 6 * G .quad 0x6129bfe104aa6397 .quad 0x8f960008a4a7fccb .quad 0x3f8bc0897d909458 .quad 0x709fa43edcb291a9 .quad 0x882acbebfd022790 .quad 0x89af3305c4115760 .quad 0x65f492e37d3473f4 .quad 0x2cb2c5df54515a2b .quad 0xeb0a5d8c63fd2aca .quad 0xd22bc1662e694eff .quad 0x2723f36ef8cbb03a .quad 0x70f029ecf0c8131f // 2^248 * 7 * G .quad 0x461307b32eed3e33 .quad 0xae042f33a45581e7 .quad 0xc94449d3195f0366 .quad 0x0b7d5d8a6c314858 .quad 0x2a6aafaa5e10b0b9 .quad 0x78f0a370ef041aa9 .quad 0x773efb77aa3ad61f .quad 0x44eca5a2a74bd9e1 .quad 0x25d448327b95d543 .quad 0x70d38300a3340f1d .quad 0xde1c531c60e1c52b .quad 0x272224512c7de9e4 // 2^248 * 8 * G .quad 0x1abc92af49c5342e .quad 0xffeed811b2e6fad0 .quad 0xefa28c8dfcc84e29 .quad 0x11b5df18a44cc543 .quad 0xbf7bbb8a42a975fc .quad 0x8c5c397796ada358 .quad 0xe27fc76fcdedaa48 .quad 0x19735fd7f6bc20a6 .quad 0xe3ab90d042c84266 .quad 0xeb848e0f7f19547e .quad 0x2503a1d065a497b9 .quad 0x0fef911191df895f #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
114,681
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/edwards25519_scalarmuldouble_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Double scalar multiplication for edwards25519, fresh and base point // Input scalar[4], point[8], bscalar[4]; output res[8] // // extern void edwards25519_scalarmuldouble_alt // (uint64_t res[static 8],uint64_t scalar[static 4], // uint64_t point[static 8],uint64_t bscalar[static 4]); // // Given scalar = n, point = P and bscalar = m, returns in res // the point (X,Y) = n * P + m * B where B = (...,4/5) is // the standard basepoint for the edwards25519 (Ed25519) curve. // // Both 256-bit coordinates of the input point P are implicitly // reduced modulo 2^255-19 if they are not already in reduced form, // but the conventional usage is that they *are* already reduced. // The scalars can be arbitrary 256-bit numbers but may also be // considered as implicitly reduced modulo the group order. // // Standard x86-64 ABI: RDI = res, RSI = scalar, RDX = point, RCX = bscalar // Microsoft x64 ABI: RCX = res, RDX = scalar, R8 = point, R9 = bscalar // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(edwards25519_scalarmuldouble_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(edwards25519_scalarmuldouble_alt) .text // Size of individual field elements #define NUMSIZE 32 // Pointer-offset pairs for result and temporaries on stack with some aliasing. // Both "resx" and "resy" assume the "res" pointer has been preloaded into %rbp. #define resx (0*NUMSIZE)(%rbp) #define resy (1*NUMSIZE)(%rbp) #define scalar (0*NUMSIZE)(%rsp) #define bscalar (1*NUMSIZE)(%rsp) #define tabent (2*NUMSIZE)(%rsp) #define btabent (6*NUMSIZE)(%rsp) #define acc (9*NUMSIZE)(%rsp) #define tab (13*NUMSIZE)(%rsp) // Additional variables kept on the stack #define bf 45*NUMSIZE(%rsp) #define cf 45*NUMSIZE+8(%rsp) #define i 45*NUMSIZE+16(%rsp) #define res 45*NUMSIZE+24(%rsp) // Total size to reserve on the stack (excluding local subroutines) #define NSPACE (46*NUMSIZE) // Syntactic variants to make x86_att forms easier to generate #define SCALAR (0*NUMSIZE) #define BSCALAR (1*NUMSIZE) #define TABENT (2*NUMSIZE) #define BTABENT (6*NUMSIZE) #define ACC (9*NUMSIZE) #define TAB (13*NUMSIZE) // Sub-references used in local subroutines with local stack #define x_0 0(%rdi) #define y_0 NUMSIZE(%rdi) #define z_0 (2*NUMSIZE)(%rdi) #define w_0 (3*NUMSIZE)(%rdi) #define x_1 0(%rsi) #define y_1 NUMSIZE(%rsi) #define z_1 (2*NUMSIZE)(%rsi) #define w_1 (3*NUMSIZE)(%rsi) #define x_2 0(%rbp) #define y_2 NUMSIZE(%rbp) #define z_2 (2*NUMSIZE)(%rbp) #define w_2 (3*NUMSIZE)(%rbp) #define t0 (0*NUMSIZE)(%rsp) #define t1 (1*NUMSIZE)(%rsp) #define t2 (2*NUMSIZE)(%rsp) #define t3 (3*NUMSIZE)(%rsp) #define t4 (4*NUMSIZE)(%rsp) #define t5 (5*NUMSIZE)(%rsp) // Macro wrapping up the basic field multiplication, only trivially // different from a pure function call to bignum_mul_p25519_alt. #define mul_p25519(P0,P1,P2) \ movq P1, %rax ; \ mulq P2; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ xorq %r10, %r10 ; \ xorq %r11, %r11 ; \ movq P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ movq 0x8+P1, %rax ; \ mulq P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ adcq $0x0, %r11 ; \ xorq %r12, %r12 ; \ movq P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq %r12, %r12 ; \ movq 0x8+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ movq 0x10+P1, %rax ; \ mulq P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ xorq %r13, %r13 ; \ movq P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq %r13, %r13 ; \ movq 0x8+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x10+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x18+P1, %rax ; \ mulq P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ xorq %r14, %r14 ; \ movq 0x8+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq %r14, %r14 ; \ movq 0x10+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ movq 0x18+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ xorq %r15, %r15 ; \ movq 0x10+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq %r15, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ movl $0x26, %esi ; \ movq %r12, %rax ; \ mulq %rsi; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %rcx, %rcx ; \ movq %r13, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %rcx, %rcx ; \ movq %r14, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rcx, %rcx ; \ movq %r15, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ xorq %rcx, %rcx ; \ addq %rax, %r11 ; \ movq %rdx, %r12 ; \ adcq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ leaq 0x1(%r12), %rax ; \ movl $0x13, %esi ; \ bts $63, %r11 ; \ imulq %rsi, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ sbbq %rax, %rax ; \ notq %rax; \ andq %rsi, %rax ; \ subq %rax, %r8 ; \ sbbq %rcx, %r9 ; \ sbbq %rcx, %r10 ; \ sbbq %rcx, %r11 ; \ btr $63, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // A version of multiplication that only guarantees output < 2 * p_25519. // This basically skips the +1 and final correction in quotient estimation. #define mul_4(P0,P1,P2) \ movq P1, %rax ; \ mulq P2; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ xorq %r10, %r10 ; \ xorq %r11, %r11 ; \ movq P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ movq 0x8+P1, %rax ; \ mulq P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ adcq $0x0, %r11 ; \ xorq %r12, %r12 ; \ movq P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq %r12, %r12 ; \ movq 0x8+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ movq 0x10+P1, %rax ; \ mulq P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ xorq %r13, %r13 ; \ movq P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq %r13, %r13 ; \ movq 0x8+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x10+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x18+P1, %rax ; \ mulq P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ xorq %r14, %r14 ; \ movq 0x8+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq %r14, %r14 ; \ movq 0x10+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ movq 0x18+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ xorq %r15, %r15 ; \ movq 0x10+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq %r15, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ movl $0x26, %ebx ; \ movq %r12, %rax ; \ mulq %rbx; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %rcx, %rcx ; \ movq %r13, %rax ; \ mulq %rbx; \ subq %rcx, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %rcx, %rcx ; \ movq %r14, %rax ; \ mulq %rbx; \ subq %rcx, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rcx, %rcx ; \ movq %r15, %rax ; \ mulq %rbx; \ subq %rcx, %rdx ; \ xorq %rcx, %rcx ; \ addq %rax, %r11 ; \ movq %rdx, %r12 ; \ adcq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Squaring just giving a result < 2 * p_25519, which is done by // basically skipping the +1 in the quotient estimate and the final // optional correction. #define sqr_4(P0,P1) \ movq P1, %rax ; \ mulq %rax; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ xorq %r10, %r10 ; \ xorq %r11, %r11 ; \ movq P1, %rax ; \ mulq 0x8+P1; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0x0, %r11 ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ adcq $0x0, %r11 ; \ xorq %r12, %r12 ; \ movq 0x8+P1, %rax ; \ mulq %rax; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ movq P1, %rax ; \ mulq 0x10+P1; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0x0, %r12 ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ xorq %r13, %r13 ; \ movq P1, %rax ; \ mulq 0x18+P1; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0x0, %r13 ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x8+P1, %rax ; \ mulq 0x10+P1; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0x0, %r13 ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ xorq %r14, %r14 ; \ movq 0x8+P1, %rax ; \ mulq 0x18+P1; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0x0, %r14 ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ movq 0x10+P1, %rax ; \ mulq %rax; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ xorq %r15, %r15 ; \ movq 0x10+P1, %rax ; \ mulq 0x18+P1; \ addq %rax, %rax ; \ adcq %rdx, %rdx ; \ adcq $0x0, %r15 ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x18+P1, %rax ; \ mulq %rax; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ movl $0x26, %ebx ; \ movq %r12, %rax ; \ mulq %rbx; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %rcx, %rcx ; \ movq %r13, %rax ; \ mulq %rbx; \ subq %rcx, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %rcx, %rcx ; \ movq %r14, %rax ; \ mulq %rbx; \ subq %rcx, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rcx, %rcx ; \ movq %r15, %rax ; \ mulq %rbx; \ subq %rcx, %rdx ; \ xorq %rcx, %rcx ; \ addq %rax, %r11 ; \ movq %rdx, %r12 ; \ adcq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Modular subtraction with double modulus 2 * p_25519 = 2^256 - 38 #define sub_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ebx, %ebx ; \ subq P2, %r8 ; \ movq 8+P1, %r9 ; \ sbbq 8+P2, %r9 ; \ movl $38, %ecx ; \ movq 16+P1, %r10 ; \ sbbq 16+P2, %r10 ; \ movq 24+P1, %rax ; \ sbbq 24+P2, %rax ; \ cmovncq %rbx, %rcx ; \ subq %rcx, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq %rbx, %rax ; \ movq %r8, P0 ; \ movq %r9, 8+P0 ; \ movq %r10, 16+P0 ; \ movq %rax, 24+P0 // Modular addition and doubling with double modulus 2 * p_25519 = 2^256 - 38. // This only ensures that the result fits in 4 digits, not that it is reduced // even w.r.t. double modulus. The result is always correct modulo provided // the sum of the inputs is < 2^256 + 2^256 - 38, so in particular provided // at least one of them is reduced double modulo. #define add_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ecx, %ecx ; \ addq P2, %r8 ; \ movq 0x8+P1, %r9 ; \ adcq 0x8+P2, %r9 ; \ movq 0x10+P1, %r10 ; \ adcq 0x10+P2, %r10 ; \ movq 0x18+P1, %r11 ; \ adcq 0x18+P2, %r11 ; \ movl $38, %eax ; \ cmovncq %rcx, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 #define double_twice4(P0,P1) \ movq P1, %r8 ; \ xorl %ecx, %ecx ; \ addq %r8, %r8 ; \ movq 0x8+P1, %r9 ; \ adcq %r9, %r9 ; \ movq 0x10+P1, %r10 ; \ adcq %r10, %r10 ; \ movq 0x18+P1, %r11 ; \ adcq %r11, %r11 ; \ movl $38, %eax ; \ cmovncq %rcx, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Load the constant k_25519 = 2 * d_25519 using immediate operations #define load_k25519(P0) \ movq $0xebd69b9426b2f159, %rax ; \ movq %rax, P0 ; \ movq $0x00e0149a8283b156, %rax ; \ movq %rax, 8+P0 ; \ movq $0x198e80f2eef3d130, %rax ; \ movq %rax, 16+P0 ; \ movq $0x2406d9dc56dffce7, %rax ; \ movq %rax, 24+P0 S2N_BN_SYMBOL(edwards25519_scalarmuldouble_alt): // In this case the Windows form literally makes a subroutine call. // This avoids hassle arising from keeping code and data together. #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx callq edwards25519_scalarmuldouble_alt_standard popq %rsi popq %rdi ret edwards25519_scalarmuldouble_alt_standard: #endif // Save registers, make room for temps, preserve input arguments. pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp // Move the output pointer to a stable place movq %rdi, res // Copy scalars while recoding all 4-bit nybbles except the top // one (bits 252..255) into signed 4-bit digits. This is essentially // done just by adding the recoding constant 0x0888..888, after // which all digits except the first have an implicit bias of -8, // so 0 -> -8, 1 -> -7, ... 7 -> -1, 8 -> 0, 9 -> 1, ... 15 -> 7. // (We could literally create 2s complement signed nybbles by // XORing with the same constant 0x0888..888 afterwards, but it // doesn't seem to make the end usage any simpler.) // // In order to ensure that the unrecoded top nybble (bits 252..255) // does not become > 8 as a result of carries lower down from the // recoding, we first (conceptually) subtract the group order iff // the top digit of the scalar is > 2^63. In the implementation the // reduction and recoding are combined by optionally using the // modified recoding constant 0x0888...888 + (2^256 - group_order). movq (%rcx), %r8 movq 8(%rcx), %r9 movq 16(%rcx), %r10 movq 24(%rcx), %r11 movq $0xc7f56fb5a0d9e920, %r12 movq $0xe190b99370cba1d5, %r13 movq $0x8888888888888887, %r14 movq $0x8888888888888888, %r15 movq $0x8000000000000000, %rax movq $0x0888888888888888, %rbx cmpq %r11, %rax cmovncq %r15, %r12 cmovncq %r15, %r13 cmovncq %r15, %r14 cmovncq %rbx, %r15 addq %r12, %r8 adcq %r13, %r9 adcq %r14, %r10 adcq %r15, %r11 movq %r8, BSCALAR(%rsp) movq %r9, BSCALAR+8(%rsp) movq %r10, BSCALAR+16(%rsp) movq %r11, BSCALAR+24(%rsp) movq (%rsi), %r8 movq 8(%rsi), %r9 movq 16(%rsi), %r10 movq 24(%rsi), %r11 movq $0xc7f56fb5a0d9e920, %r12 movq $0xe190b99370cba1d5, %r13 movq $0x8888888888888887, %r14 movq $0x8888888888888888, %r15 movq $0x8000000000000000, %rax movq $0x0888888888888888, %rbx cmpq %r11, %rax cmovncq %r15, %r12 cmovncq %r15, %r13 cmovncq %r15, %r14 cmovncq %rbx, %r15 addq %r12, %r8 adcq %r13, %r9 adcq %r14, %r10 adcq %r15, %r11 movq %r8, SCALAR(%rsp) movq %r9, SCALAR+8(%rsp) movq %r10, SCALAR+16(%rsp) movq %r11, SCALAR+24(%rsp) // Create table of multiples 1..8 of the general input point at "tab". // Reduce the input coordinates x and y modulo 2^256 - 38 first, for the // sake of definiteness; this is the reduction that will be maintained. // We could slightly optimize the additions because we know the input // point is affine (so Z = 1), but it doesn't seem worth the complication. movl $38, %eax movq (%rdx), %r8 xorl %ebx, %ebx movq 8(%rdx), %r9 xorl %ecx, %ecx movq 16(%rdx), %r10 xorl %esi, %esi movq 24(%rdx), %r11 addq %r8, %rax adcq %r9, %rbx adcq %r10, %rcx adcq %r11, %rsi cmovncq %r8, %rax movq %rax, TAB(%rsp) cmovncq %r9, %rbx movq %rbx, TAB+8(%rsp) cmovncq %r10, %rcx movq %rcx, TAB+16(%rsp) cmovncq %r11, %rsi movq %rsi, TAB+24(%rsp) movl $38, %eax movq 32(%rdx), %r8 xorl %ebx, %ebx movq 40(%rdx), %r9 xorl %ecx, %ecx movq 48(%rdx), %r10 xorl %esi, %esi movq 56(%rdx), %r11 addq %r8, %rax adcq %r9, %rbx adcq %r10, %rcx adcq %r11, %rsi cmovncq %r8, %rax movq %rax, TAB+32(%rsp) cmovncq %r9, %rbx movq %rbx, TAB+40(%rsp) cmovncq %r10, %rcx movq %rcx, TAB+48(%rsp) cmovncq %r11, %rsi movq %rsi, TAB+56(%rsp) movl $1, %eax movq %rax, TAB+64(%rsp) xorl %eax, %eax movq %rax, TAB+72(%rsp) movq %rax, TAB+80(%rsp) movq %rax, TAB+88(%rsp) leaq TAB+96(%rsp), %rdi leaq TAB(%rsp), %rsi leaq TAB+32(%rsp), %rbp mul_4(x_0,x_1,x_2) // Multiple 2 leaq TAB+1*128(%rsp), %rdi leaq TAB(%rsp), %rsi callq edwards25519_scalarmuldouble_alt_epdouble // Multiple 3 leaq TAB+2*128(%rsp), %rdi leaq TAB(%rsp), %rsi leaq TAB+1*128(%rsp), %rbp callq edwards25519_scalarmuldouble_alt_epadd // Multiple 4 leaq TAB+3*128(%rsp), %rdi leaq TAB+1*128(%rsp), %rsi callq edwards25519_scalarmuldouble_alt_epdouble // Multiple 5 leaq TAB+4*128(%rsp), %rdi leaq TAB(%rsp), %rsi leaq TAB+3*128(%rsp), %rbp callq edwards25519_scalarmuldouble_alt_epadd // Multiple 6 leaq TAB+5*128(%rsp), %rdi leaq TAB+2*128(%rsp), %rsi callq edwards25519_scalarmuldouble_alt_epdouble // Multiple 7 leaq TAB+6*128(%rsp), %rdi leaq TAB(%rsp), %rsi leaq TAB+5*128(%rsp), %rbp callq edwards25519_scalarmuldouble_alt_epadd // Multiple 8 leaq TAB+7*128(%rsp), %rdi leaq TAB+3*128(%rsp), %rsi callq edwards25519_scalarmuldouble_alt_epdouble // Handle the initialization, starting the loop counter at i = 252 // and initializing acc to the sum of the table entries for the // top nybbles of the scalars (the ones with no implicit -8 bias). movq $252, %rax movq %rax, i // Index for btable entry... movq BSCALAR+24(%rsp), %rax shrq $60, %rax movq %rax, bf // ...and constant-time indexing based on that index movl $1, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx movl $1, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d leaq edwards25519_scalarmuldouble_alt_table(%rip), %rbp cmpq $1, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $2, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $3, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $4, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $5, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $6, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $7, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $8, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 movq %rax, BTABENT(%rsp) movq %rbx, BTABENT+8(%rsp) movq %rcx, BTABENT+16(%rsp) movq %rdx, BTABENT+24(%rsp) movq %r8, BTABENT+32(%rsp) movq %r9, BTABENT+40(%rsp) movq %r10, BTABENT+48(%rsp) movq %r11, BTABENT+56(%rsp) movq %r12, BTABENT+64(%rsp) movq %r13, BTABENT+72(%rsp) movq %r14, BTABENT+80(%rsp) movq %r15, BTABENT+88(%rsp) // Index for table entry... movq SCALAR+24(%rsp), %rax shrq $60, %rax movq %rax, bf // ...and constant-time indexing based on that index. // Do the Y and Z fields first, to save on registers... movl $1, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx movl $1, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d leaq TAB+32(%rsp), %rbp cmpq $1, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $2, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $3, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $4, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $5, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $6, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $7, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $8, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq %rax, TABENT+32(%rsp) movq %rbx, TABENT+40(%rsp) movq %rcx, TABENT+48(%rsp) movq %rdx, TABENT+56(%rsp) movq %r8, TABENT+64(%rsp) movq %r9, TABENT+72(%rsp) movq %r10, TABENT+80(%rsp) movq %r11, TABENT+88(%rsp) // ...followed by the X and W fields leaq TAB(%rsp), %rbp xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d cmpq $1, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $2, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $3, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $4, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $5, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $6, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $7, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $8, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 movq %rax, TABENT(%rsp) movq %rbx, TABENT+8(%rsp) movq %rcx, TABENT+16(%rsp) movq %rdx, TABENT+24(%rsp) movq %r8, TABENT+96(%rsp) movq %r9, TABENT+104(%rsp) movq %r10, TABENT+112(%rsp) movq %r11, TABENT+120(%rsp) // Add those elements to initialize the accumulator for bit position 252 leaq ACC(%rsp), %rdi leaq TABENT(%rsp), %rsi leaq BTABENT(%rsp), %rbp callq edwards25519_scalarmuldouble_alt_pepadd // Main loop with acc = [scalar/2^i] * point + [bscalar/2^i] * basepoint // Start with i = 252 for bits 248..251 and go down four at a time to 3..0 edwards25519_scalarmuldouble_alt_loop: movq i, %rax subq $4, %rax movq %rax, i // Double to acc' = 2 * acc leaq ACC(%rsp), %rdi leaq ACC(%rsp), %rsi callq edwards25519_scalarmuldouble_alt_pdouble // Get btable entry, first getting the adjusted bitfield... movq i, %rax movq %rax, %rcx shrq $6, %rax movq 32(%rsp,%rax,8), %rax shrq %cl, %rax andq $15, %rax subq $8, %rax sbbq %rcx, %rcx xorq %rcx, %rax subq %rcx, %rax movq %rcx, cf movq %rax, bf // ... then doing constant-time lookup with the appropriate index... movl $1, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx movl $1, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d leaq edwards25519_scalarmuldouble_alt_table(%rip), %rbp cmpq $1, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $2, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $3, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $4, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $5, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $6, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $7, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $8, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 // ... then optionally negating before storing. The table entry // is in precomputed form and we currently have // // [%rdx;%rcx;%rbx;%rax] = y - x // [%r11;%r10;%r9;%r8] = x + y // [%r15;%r14;%r13;%r12] = 2 * d * x * y // // Negation for Edwards curves is -(x,y) = (-x,y), which in this modified // form amounts to swapping the first two fields and negating the third. // The negation does not always fully reduce even mod 2^256-38 in the zero // case, instead giving -0 = 2^256-38. But that is fine since the result is // always fed to a multiplication inside the "pepadd" function below that // handles any 256-bit input. movq cf, %rdi testq %rdi, %rdi movq %rax, %rsi cmovnzq %r8, %rsi cmovnzq %rax, %r8 movq %rsi, BTABENT(%rsp) movq %r8, BTABENT+32(%rsp) movq %rbx, %rsi cmovnzq %r9, %rsi cmovnzq %rbx, %r9 movq %rsi, BTABENT+8(%rsp) movq %r9, BTABENT+40(%rsp) movq %rcx, %rsi cmovnzq %r10, %rsi cmovnzq %rcx, %r10 movq %rsi, BTABENT+16(%rsp) movq %r10, BTABENT+48(%rsp) movq %rdx, %rsi cmovnzq %r11, %rsi cmovnzq %rdx, %r11 movq %rsi, BTABENT+24(%rsp) movq %r11, BTABENT+56(%rsp) xorq %rdi, %r12 xorq %rdi, %r13 xorq %rdi, %r14 xorq %rdi, %r15 andq $37, %rdi subq %rdi, %r12 sbbq $0, %r13 sbbq $0, %r14 sbbq $0, %r15 movq %r12, BTABENT+64(%rsp) movq %r13, BTABENT+72(%rsp) movq %r14, BTABENT+80(%rsp) movq %r15, BTABENT+88(%rsp) // Get table entry, first getting the adjusted bitfield... movq i, %rax movq %rax, %rcx shrq $6, %rax movq (%rsp,%rax,8), %rax shrq %cl, %rax andq $15, %rax subq $8, %rax sbbq %rcx, %rcx xorq %rcx, %rax subq %rcx, %rax movq %rcx, cf movq %rax, bf // ...and constant-time indexing based on that index // Do the Y and Z fields first, to save on registers // and store them back (they don't need any modification) movl $1, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx movl $1, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d leaq TAB+32(%rsp), %rbp cmpq $1, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $2, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $3, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $4, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $5, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $6, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $7, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $8, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq %rax, TABENT+32(%rsp) movq %rbx, TABENT+40(%rsp) movq %rcx, TABENT+48(%rsp) movq %rdx, TABENT+56(%rsp) movq %r8, TABENT+64(%rsp) movq %r9, TABENT+72(%rsp) movq %r10, TABENT+80(%rsp) movq %r11, TABENT+88(%rsp) // Now do the X and W fields... leaq TAB(%rsp), %rbp xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d cmpq $1, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $2, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $3, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $4, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $5, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $6, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $7, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $8, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 // ... then optionally negate before storing the X and W fields. This // time the table entry is extended-projective, and is here: // // [%rdx;%rcx;%rbx;%rax] = X // [tabent+32] = Y // [tabent+64] = Z // [%r11;%r10;%r9;%r8] = W // // This time we just need to negate the X and the W fields. // The crude way negation is done can result in values of X or W // (when initially zero before negation) being exactly equal to // 2^256-38, but the "pepadd" function handles that correctly. movq cf, %rdi xorq %rdi, %rax xorq %rdi, %rbx xorq %rdi, %rcx xorq %rdi, %rdx xorq %rdi, %r8 xorq %rdi, %r9 xorq %rdi, %r10 xorq %rdi, %r11 andq $37, %rdi subq %rdi, %rax sbbq $0, %rbx sbbq $0, %rcx sbbq $0, %rdx movq %rax, TABENT(%rsp) movq %rbx, TABENT+8(%rsp) movq %rcx, TABENT+16(%rsp) movq %rdx, TABENT+24(%rsp) subq %rdi, %r8 sbbq $0, %r9 sbbq $0, %r10 sbbq $0, %r11 movq %r8, TABENT+96(%rsp) movq %r9, TABENT+104(%rsp) movq %r10, TABENT+112(%rsp) movq %r11, TABENT+120(%rsp) // Double to acc' = 4 * acc leaq ACC(%rsp), %rdi leaq ACC(%rsp), %rsi callq edwards25519_scalarmuldouble_alt_pdouble // Add tabent := tabent + btabent leaq TABENT(%rsp), %rdi leaq TABENT(%rsp), %rsi leaq BTABENT(%rsp), %rbp callq edwards25519_scalarmuldouble_alt_pepadd // Double to acc' = 8 * acc leaq ACC(%rsp), %rdi leaq ACC(%rsp), %rsi callq edwards25519_scalarmuldouble_alt_pdouble // Double to acc' = 16 * acc leaq ACC(%rsp), %rdi leaq ACC(%rsp), %rsi callq edwards25519_scalarmuldouble_alt_epdouble // Add table entry, acc := acc + tabent leaq ACC(%rsp), %rdi leaq ACC(%rsp), %rsi leaq TABENT(%rsp), %rbp callq edwards25519_scalarmuldouble_alt_epadd // Loop down movq i, %rax testq %rax, %rax jnz edwards25519_scalarmuldouble_alt_loop // Prepare to call the modular inverse function to get tab = 1/z leaq TAB(%rsp), %rdi leaq ACC+64(%rsp), %rsi // Inline copy of bignum_inv_p25519, identical except for stripping out // the prologue and epilogue saving and restoring registers and making // and reclaiming room on the stack. For more details and explanations see // "x86/curve25519/bignum_inv_p25519.S". Note that the stack it uses for // its own temporaries is 208 bytes, so it has no effect on variables // that are needed in the rest of our computation here: res, tab and acc. movq %rdi, 0xc0(%rsp) xorl %eax, %eax leaq -0x13(%rax), %rcx notq %rax movq %rcx, (%rsp) movq %rax, 0x8(%rsp) movq %rax, 0x10(%rsp) btr $0x3f, %rax movq %rax, 0x18(%rsp) movq (%rsi), %rdx movq 0x8(%rsi), %rcx movq 0x10(%rsi), %r8 movq 0x18(%rsi), %r9 movl $0x1, %eax xorl %r10d, %r10d bts $0x3f, %r9 adcq %r10, %rax imulq $0x13, %rax, %rax addq %rax, %rdx adcq %r10, %rcx adcq %r10, %r8 adcq %r10, %r9 movl $0x13, %eax cmovbq %r10, %rax subq %rax, %rdx sbbq %r10, %rcx sbbq %r10, %r8 sbbq %r10, %r9 btr $0x3f, %r9 movq %rdx, 0x20(%rsp) movq %rcx, 0x28(%rsp) movq %r8, 0x30(%rsp) movq %r9, 0x38(%rsp) xorl %eax, %eax movq %rax, 0x40(%rsp) movq %rax, 0x48(%rsp) movq %rax, 0x50(%rsp) movq %rax, 0x58(%rsp) movabsq $0xa0f99e2375022099, %rax movq %rax, 0x60(%rsp) movabsq $0xa8c68f3f1d132595, %rax movq %rax, 0x68(%rsp) movabsq $0x6c6c893805ac5242, %rax movq %rax, 0x70(%rsp) movabsq $0x276508b241770615, %rax movq %rax, 0x78(%rsp) movq $0xa, 0x90(%rsp) movq $0x1, 0x98(%rsp) jmp edwards25519_scalarmuldouble_alt_midloop edwards25519_scalarmuldouble_alt_inverseloop: movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 movq %r8, %rax andq %r9, %rax movq %r10, %rdi andq %r11, %rdi addq %rax, %rdi movq %rdi, 0x80(%rsp) movq %r12, %rax andq %r13, %rax movq %r14, %rsi andq %r15, %rsi addq %rax, %rsi movq %rsi, 0x88(%rsp) xorl %ebx, %ebx movq (%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq 0x20(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rdi adcq %rdx, %rbx xorl %ebp, %ebp movq (%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rsi adcq %rdx, %rbp movq 0x20(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp xorl %ecx, %ecx movq 0x8(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x28(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx shrdq $0x3b, %rbx, %rdi movq %rdi, (%rsp) xorl %edi, %edi movq 0x8(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbp adcq %rdx, %rdi movq 0x28(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rdi shrdq $0x3b, %rbp, %rsi movq %rsi, 0x20(%rsp) xorl %esi, %esi movq 0x10(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rsi movq 0x30(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rsi shrdq $0x3b, %rcx, %rbx movq %rbx, 0x8(%rsp) xorl %ebx, %ebx movq 0x10(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rdi adcq %rdx, %rbx movq 0x30(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rdi adcq %rdx, %rbx shrdq $0x3b, %rdi, %rbp movq %rbp, 0x28(%rsp) movq 0x18(%rsp), %rax xorq %r9, %rax movq %rax, %rbp sarq $0x3f, %rbp andq %r8, %rbp negq %rbp mulq %r8 addq %rax, %rsi adcq %rdx, %rbp movq 0x38(%rsp), %rax xorq %r11, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r10, %rdx subq %rdx, %rbp mulq %r10 addq %rax, %rsi adcq %rdx, %rbp shrdq $0x3b, %rsi, %rcx movq %rcx, 0x10(%rsp) shrdq $0x3b, %rbp, %rsi movq 0x18(%rsp), %rax movq %rsi, 0x18(%rsp) xorq %r13, %rax movq %rax, %rsi sarq $0x3f, %rsi andq %r12, %rsi negq %rsi mulq %r12 addq %rax, %rbx adcq %rdx, %rsi movq 0x38(%rsp), %rax xorq %r15, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r14, %rdx subq %rdx, %rsi mulq %r14 addq %rax, %rbx adcq %rdx, %rsi shrdq $0x3b, %rbx, %rdi movq %rdi, 0x30(%rsp) shrdq $0x3b, %rsi, %rbx movq %rbx, 0x38(%rsp) movq 0x80(%rsp), %rbx movq 0x88(%rsp), %rbp xorl %ecx, %ecx movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x40(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x40(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x60(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x60(%rsp) xorl %ebx, %ebx movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq 0x48(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, 0x48(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq 0x68(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, 0x68(%rsp) xorl %ecx, %ecx movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x50(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x50(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x70(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x70(%rsp) movq 0x58(%rsp), %rax xorq %r9, %rax movq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rcx adcq %rbx, %rdx movq %rdx, %rbx shldq $0x1, %rcx, %rdx sarq $0x3f, %rbx addq %rbx, %rdx movl $0x13, %eax imulq %rdx movq 0x40(%rsp), %r8 addq %rax, %r8 movq %r8, 0x40(%rsp) movq 0x48(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x48(%rsp) movq 0x50(%rsp), %r8 adcq %rbx, %r8 movq %r8, 0x50(%rsp) adcq %rbx, %rcx shlq $0x3f, %rax addq %rax, %rcx movq 0x58(%rsp), %rax movq %rcx, 0x58(%rsp) xorq %r13, %rax movq %r13, %rcx andq %r12, %rcx negq %rcx mulq %r12 addq %rax, %rsi adcq %rdx, %rcx movq 0x78(%rsp), %rax xorq %r15, %rax movq %r15, %rdx andq %r14, %rdx subq %rdx, %rcx mulq %r14 addq %rax, %rsi adcq %rcx, %rdx movq %rdx, %rcx shldq $0x1, %rsi, %rdx sarq $0x3f, %rcx movl $0x13, %eax addq %rcx, %rdx imulq %rdx movq 0x60(%rsp), %r8 addq %rax, %r8 movq %r8, 0x60(%rsp) movq 0x68(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x68(%rsp) movq 0x70(%rsp), %r8 adcq %rcx, %r8 movq %r8, 0x70(%rsp) adcq %rcx, %rsi shlq $0x3f, %rax addq %rax, %rsi movq %rsi, 0x78(%rsp) edwards25519_scalarmuldouble_alt_midloop: movq 0x98(%rsp), %rsi movq (%rsp), %rdx movq 0x20(%rsp), %rcx movq %rdx, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax xorl %ebp, %ebp movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %rdx leaq (%rcx,%rax), %rdi shlq $0x16, %rdx shlq $0x16, %rdi sarq $0x2b, %rdx sarq $0x2b, %rdi movabsq $0x20000100000, %rax leaq (%rbx,%rax), %rbx leaq (%rcx,%rax), %rcx sarq $0x2a, %rbx sarq $0x2a, %rcx movq %rdx, 0xa0(%rsp) movq %rbx, 0xa8(%rsp) movq %rdi, 0xb0(%rsp) movq %rcx, 0xb8(%rsp) movq (%rsp), %r12 imulq %r12, %rdi imulq %rdx, %r12 movq 0x20(%rsp), %r13 imulq %r13, %rbx imulq %rcx, %r13 addq %rbx, %r12 addq %rdi, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r10 shlq $0x16, %r8 shlq $0x16, %r10 sarq $0x2b, %r8 sarq $0x2b, %r10 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r15 leaq (%rcx,%rax), %r11 sarq $0x2a, %r15 sarq $0x2a, %r11 movq %r13, %rbx movq %r12, %rcx imulq %r8, %r12 imulq %r15, %rbx addq %rbx, %r12 imulq %r11, %r13 imulq %r10, %rcx addq %rcx, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq 0xa0(%rsp), %rax imulq %r8, %rax movq 0xb0(%rsp), %rdx imulq %r15, %rdx imulq 0xa8(%rsp), %r8 imulq 0xb8(%rsp), %r15 addq %r8, %r15 leaq (%rax,%rdx), %r9 movq 0xa0(%rsp), %rax imulq %r10, %rax movq 0xb0(%rsp), %rdx imulq %r11, %rdx imulq 0xa8(%rsp), %r10 imulq 0xb8(%rsp), %r11 addq %r10, %r11 leaq (%rax,%rdx), %r13 movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r12 shlq $0x15, %r8 shlq $0x15, %r12 sarq $0x2b, %r8 sarq $0x2b, %r12 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r10 leaq (%rcx,%rax), %r14 sarq $0x2b, %r10 sarq $0x2b, %r14 movq %r9, %rax imulq %r8, %rax movq %r13, %rdx imulq %r10, %rdx imulq %r15, %r8 imulq %r11, %r10 addq %r8, %r10 leaq (%rax,%rdx), %r8 movq %r9, %rax imulq %r12, %rax movq %r13, %rdx imulq %r14, %rdx imulq %r15, %r12 imulq %r11, %r14 addq %r12, %r14 leaq (%rax,%rdx), %r12 movq %rsi, 0x98(%rsp) decq 0x90(%rsp) jne edwards25519_scalarmuldouble_alt_inverseloop movq (%rsp), %rax movq 0x20(%rsp), %rcx imulq %r8, %rax imulq %r10, %rcx addq %rcx, %rax sarq $0x3f, %rax movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 xorq %rax, %r9 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 xorq %rax, %r11 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 xorq %rax, %r13 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 xorq %rax, %r15 movq %r8, %rax andq %r9, %rax movq %r10, %r12 andq %r11, %r12 addq %rax, %r12 xorl %r13d, %r13d movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r12 adcq %rdx, %r13 movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r13 adcq %rdx, %r14 movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 adcq %rdx, %r15 movq 0x58(%rsp), %rax xorq %r9, %rax andq %r8, %r9 negq %r9 mulq %r8 addq %rax, %r15 adcq %rdx, %r9 movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %r9 mulq %r10 addq %rax, %r15 adcq %rdx, %r9 movq %r9, %rax shldq $0x1, %r15, %rax sarq $0x3f, %r9 movl $0x13, %ebx leaq 0x1(%rax,%r9,1), %rax imulq %rbx xorl %ebp, %ebp addq %rax, %r12 adcq %rdx, %r13 adcq %r9, %r14 adcq %r9, %r15 shlq $0x3f, %rax addq %rax, %r15 cmovns %rbp, %rbx subq %rbx, %r12 sbbq %rbp, %r13 sbbq %rbp, %r14 sbbq %rbp, %r15 btr $0x3f, %r15 movq 0xc0(%rsp), %rdi movq %r12, (%rdi) movq %r13, 0x8(%rdi) movq %r14, 0x10(%rdi) movq %r15, 0x18(%rdi) // Store result movq res, %rdi leaq ACC(%rsp), %rsi leaq TAB(%rsp), %rbp mul_p25519(x_0,x_1,x_2) movq res, %rdi addq $32, %rdi leaq ACC+32(%rsp), %rsi leaq TAB(%rsp), %rbp mul_p25519(x_0,x_1,x_2) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret // **************************************************************************** // Localized versions of subroutines. // These are close to the standalone functions "edwards25519_epdouble" etc., // but are only maintaining reduction modulo 2^256 - 38, not 2^255 - 19. // **************************************************************************** edwards25519_scalarmuldouble_alt_epdouble: sub $(5*NUMSIZE), %rsp add_twice4(t0,x_1,y_1) sqr_4(t1,z_1) sqr_4(t2,x_1) sqr_4(t3,y_1) double_twice4(t1,t1) sqr_4(t0,t0) add_twice4(t4,t2,t3) sub_twice4(t2,t2,t3) add_twice4(t3,t1,t2) sub_twice4(t1,t4,t0) mul_4(y_0,t2,t4) mul_4(z_0,t3,t2) mul_4(w_0,t1,t4) mul_4(x_0,t1,t3) add $(5*NUMSIZE), %rsp ret edwards25519_scalarmuldouble_alt_pdouble: sub $(5*NUMSIZE), %rsp add_twice4(t0,x_1,y_1) sqr_4(t1,z_1) sqr_4(t2,x_1) sqr_4(t3,y_1) double_twice4(t1,t1) sqr_4(t0,t0) add_twice4(t4,t2,t3) sub_twice4(t2,t2,t3) add_twice4(t3,t1,t2) sub_twice4(t1,t4,t0) mul_4(y_0,t2,t4) mul_4(z_0,t3,t2) mul_4(x_0,t1,t3) add $(5*NUMSIZE), %rsp ret edwards25519_scalarmuldouble_alt_epadd: sub $(6*NUMSIZE), %rsp mul_4(t0,w_1,w_2) sub_twice4(t1,y_1,x_1) sub_twice4(t2,y_2,x_2) add_twice4(t3,y_1,x_1) add_twice4(t4,y_2,x_2) double_twice4(t5,z_2) mul_4(t1,t1,t2) mul_4(t3,t3,t4) load_k25519(t2) mul_4(t2,t2,t0) mul_4(t4,z_1,t5) sub_twice4(t0,t3,t1) add_twice4(t5,t3,t1) sub_twice4(t1,t4,t2) add_twice4(t3,t4,t2) mul_4(w_0,t0,t5) mul_4(x_0,t0,t1) mul_4(y_0,t3,t5) mul_4(z_0,t1,t3) add $(6*NUMSIZE), %rsp ret edwards25519_scalarmuldouble_alt_pepadd: sub $(6*NUMSIZE), %rsp double_twice4(t0,z_1); sub_twice4(t1,y_1,x_1); add_twice4(t2,y_1,x_1); mul_4(t3,w_1,z_2); mul_4(t1,t1,x_2); mul_4(t2,t2,y_2); sub_twice4(t4,t0,t3); add_twice4(t0,t0,t3); sub_twice4(t5,t2,t1); add_twice4(t1,t2,t1); mul_4(z_0,t4,t0); mul_4(x_0,t5,t4); mul_4(y_0,t0,t1); mul_4(w_0,t5,t1); add $(6*NUMSIZE), %rsp ret // **************************************************************************** // The precomputed data (all read-only). This is currently part of the same // text section, which gives position-independent code with simple PC-relative // addressing. However it could be put in a separate section via something like // // .section .rodata // **************************************************************************** // Precomputed table of multiples of generator for edwards25519 // all in precomputed extended-projective (y-x,x+y,2*d*x*y) triples. edwards25519_scalarmuldouble_alt_table: // 1 * G .quad 0x9d103905d740913e .quad 0xfd399f05d140beb3 .quad 0xa5c18434688f8a09 .quad 0x44fd2f9298f81267 .quad 0x2fbc93c6f58c3b85 .quad 0xcf932dc6fb8c0e19 .quad 0x270b4898643d42c2 .quad 0x07cf9d3a33d4ba65 .quad 0xabc91205877aaa68 .quad 0x26d9e823ccaac49e .quad 0x5a1b7dcbdd43598c .quad 0x6f117b689f0c65a8 // 2 * G .quad 0x8a99a56042b4d5a8 .quad 0x8f2b810c4e60acf6 .quad 0xe09e236bb16e37aa .quad 0x6bb595a669c92555 .quad 0x9224e7fc933c71d7 .quad 0x9f469d967a0ff5b5 .quad 0x5aa69a65e1d60702 .quad 0x590c063fa87d2e2e .quad 0x43faa8b3a59b7a5f .quad 0x36c16bdd5d9acf78 .quad 0x500fa0840b3d6a31 .quad 0x701af5b13ea50b73 // 3 * G .quad 0x56611fe8a4fcd265 .quad 0x3bd353fde5c1ba7d .quad 0x8131f31a214bd6bd .quad 0x2ab91587555bda62 .quad 0xaf25b0a84cee9730 .quad 0x025a8430e8864b8a .quad 0xc11b50029f016732 .quad 0x7a164e1b9a80f8f4 .quad 0x14ae933f0dd0d889 .quad 0x589423221c35da62 .quad 0xd170e5458cf2db4c .quad 0x5a2826af12b9b4c6 // 4 * G .quad 0x95fe050a056818bf .quad 0x327e89715660faa9 .quad 0xc3e8e3cd06a05073 .quad 0x27933f4c7445a49a .quad 0x287351b98efc099f .quad 0x6765c6f47dfd2538 .quad 0xca348d3dfb0a9265 .quad 0x680e910321e58727 .quad 0x5a13fbe9c476ff09 .quad 0x6e9e39457b5cc172 .quad 0x5ddbdcf9102b4494 .quad 0x7f9d0cbf63553e2b // 5 * G .quad 0x7f9182c3a447d6ba .quad 0xd50014d14b2729b7 .quad 0xe33cf11cb864a087 .quad 0x154a7e73eb1b55f3 .quad 0xa212bc4408a5bb33 .quad 0x8d5048c3c75eed02 .quad 0xdd1beb0c5abfec44 .quad 0x2945ccf146e206eb .quad 0xbcbbdbf1812a8285 .quad 0x270e0807d0bdd1fc .quad 0xb41b670b1bbda72d .quad 0x43aabe696b3bb69a // 6 * G .quad 0x499806b67b7d8ca4 .quad 0x575be28427d22739 .quad 0xbb085ce7204553b9 .quad 0x38b64c41ae417884 .quad 0x3a0ceeeb77157131 .quad 0x9b27158900c8af88 .quad 0x8065b668da59a736 .quad 0x51e57bb6a2cc38bd .quad 0x85ac326702ea4b71 .quad 0xbe70e00341a1bb01 .quad 0x53e4a24b083bc144 .quad 0x10b8e91a9f0d61e3 // 7 * G .quad 0xba6f2c9aaa3221b1 .quad 0x6ca021533bba23a7 .quad 0x9dea764f92192c3a .quad 0x1d6edd5d2e5317e0 .quad 0x6b1a5cd0944ea3bf .quad 0x7470353ab39dc0d2 .quad 0x71b2528228542e49 .quad 0x461bea69283c927e .quad 0xf1836dc801b8b3a2 .quad 0xb3035f47053ea49a .quad 0x529c41ba5877adf3 .quad 0x7a9fbb1c6a0f90a7 // 8 * G .quad 0xe2a75dedf39234d9 .quad 0x963d7680e1b558f9 .quad 0x2c2741ac6e3c23fb .quad 0x3a9024a1320e01c3 .quad 0x59b7596604dd3e8f .quad 0x6cb30377e288702c .quad 0xb1339c665ed9c323 .quad 0x0915e76061bce52f .quad 0xe7c1f5d9c9a2911a .quad 0xb8a371788bcca7d7 .quad 0x636412190eb62a32 .quad 0x26907c5c2ecc4e95 #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
71,797
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/curve25519_x25519.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // The x25519 function for curve25519 // Inputs scalar[4], point[4]; output res[4] // // extern void curve25519_x25519 // (uint64_t res[static 4],uint64_t scalar[static 4],uint64_t point[static 4]) // // The function has a second prototype considering the arguments as arrays // of bytes rather than 64-bit words. The underlying code is the same, since // the x86 platform is little-endian. // // extern void curve25519_x25519_byte // (uint8_t res[static 32],uint8_t scalar[static 32],uint8_t point[static 32]) // // Given a scalar n and the X coordinate of an input point P = (X,Y) on // curve25519 (Y can live in any extension field of characteristic 2^255-19), // this returns the X coordinate of n * P = (X, Y), or 0 when n * P is the // point at infinity. Both n and X inputs are first slightly modified/mangled // as specified in the relevant RFC (https://www.rfc-editor.org/rfc/rfc7748); // in particular the lower three bits of n are set to zero. Does not implement // the zero-check specified in Section 6.1. // // Standard x86-64 ABI: RDI = res, RSI = scalar, RDX = point // Microsoft x64 ABI: RCX = res, RDX = scalar, R8 = point // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(curve25519_x25519) S2N_BN_SYM_PRIVACY_DIRECTIVE(curve25519_x25519) S2N_BN_SYM_VISIBILITY_DIRECTIVE(curve25519_x25519_byte) S2N_BN_SYM_PRIVACY_DIRECTIVE(curve25519_x25519_byte) .text // Size of individual field elements #define NUMSIZE 32 // Stable homes for the input result argument during the whole body // and other variables that are only needed prior to the modular inverse. #define res 12*NUMSIZE(%rsp) #define i 12*NUMSIZE+8(%rsp) #define swap 12*NUMSIZE+16(%rsp) // Pointers to result x coord to be written, assuming the base "res" // has been loaded into %rbp #define resx 0(%rbp) // Pointer-offset pairs for temporaries on stack with some aliasing. // Both dmsn and dnsm need space for >= 5 digits, and we allocate 8 #define scalar (0*NUMSIZE)(%rsp) #define pointx (1*NUMSIZE)(%rsp) #define dm (2*NUMSIZE)(%rsp) #define zm (3*NUMSIZE)(%rsp) #define sm (3*NUMSIZE)(%rsp) #define dpro (3*NUMSIZE)(%rsp) #define sn (4*NUMSIZE)(%rsp) #define dn (5*NUMSIZE)(%rsp) #define e (5*NUMSIZE)(%rsp) #define dmsn (6*NUMSIZE)(%rsp) #define p (6*NUMSIZE)(%rsp) #define zn (7*NUMSIZE)(%rsp) #define xm (8*NUMSIZE)(%rsp) #define dnsm (8*NUMSIZE)(%rsp) #define spro (8*NUMSIZE)(%rsp) #define xn (10*NUMSIZE)(%rsp) #define s (10*NUMSIZE)(%rsp) #define d (11*NUMSIZE)(%rsp) // Total size to reserve on the stack // This includes space for the 3 other variables above // and rounds up to a multiple of 32 #define NSPACE (13*NUMSIZE) // Macro wrapping up the basic field operation bignum_mul_p25519, only // trivially different from a pure function call to that subroutine. #define mul_p25519(P0,P1,P2) \ xorl %edi, %edi ; \ movq P2, %rdx ; \ mulxq P1, %r8, %r9 ; \ mulxq 0x8+P1, %rax, %r10 ; \ addq %rax, %r9 ; \ mulxq 0x10+P1, %rax, %r11 ; \ adcq %rax, %r10 ; \ mulxq 0x18+P1, %rax, %r12 ; \ adcq %rax, %r11 ; \ adcq %rdi, %r12 ; \ xorl %edi, %edi ; \ movq 0x8+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x18+P1, %rax, %r13 ; \ adcxq %rax, %r12 ; \ adoxq %rdi, %r13 ; \ adcxq %rdi, %r13 ; \ xorl %edi, %edi ; \ movq 0x10+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x18+P1, %rax, %r14 ; \ adcxq %rax, %r13 ; \ adoxq %rdi, %r14 ; \ adcxq %rdi, %r14 ; \ xorl %edi, %edi ; \ movq 0x18+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x18+P1, %rax, %r15 ; \ adcxq %rax, %r14 ; \ adoxq %rdi, %r15 ; \ adcxq %rdi, %r15 ; \ movl $0x26, %edx ; \ xorl %edi, %edi ; \ mulxq %r12, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ mulxq %r13, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq %r14, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq %r15, %rax, %r12 ; \ adcxq %rax, %r11 ; \ adoxq %rdi, %r12 ; \ adcxq %rdi, %r12 ; \ shldq $0x1, %r11, %r12 ; \ movl $0x13, %edx ; \ incq %r12; \ bts $63, %r11 ; \ mulxq %r12, %rax, %rbx ; \ addq %rax, %r8 ; \ adcq %rbx, %r9 ; \ adcq %rdi, %r10 ; \ adcq %rdi, %r11 ; \ sbbq %rax, %rax ; \ notq %rax; \ andq %rdx, %rax ; \ subq %rax, %r8 ; \ sbbq %rdi, %r9 ; \ sbbq %rdi, %r10 ; \ sbbq %rdi, %r11 ; \ btr $63, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // A version of multiplication that only guarantees output < 2 * p_25519. // This basically skips the +1 and final correction in quotient estimation. #define mul_4(P0,P1,P2) \ xorl %ecx, %ecx ; \ movq P2, %rdx ; \ mulxq P1, %r8, %r9 ; \ mulxq 0x8+P1, %rax, %r10 ; \ addq %rax, %r9 ; \ mulxq 0x10+P1, %rax, %r11 ; \ adcq %rax, %r10 ; \ mulxq 0x18+P1, %rax, %r12 ; \ adcq %rax, %r11 ; \ adcq %rcx, %r12 ; \ xorl %ecx, %ecx ; \ movq 0x8+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x18+P1, %rax, %r13 ; \ adcxq %rax, %r12 ; \ adoxq %rcx, %r13 ; \ adcxq %rcx, %r13 ; \ xorl %ecx, %ecx ; \ movq 0x10+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x18+P1, %rax, %r14 ; \ adcxq %rax, %r13 ; \ adoxq %rcx, %r14 ; \ adcxq %rcx, %r14 ; \ xorl %ecx, %ecx ; \ movq 0x18+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x18+P1, %rax, %r15 ; \ adcxq %rax, %r14 ; \ adoxq %rcx, %r15 ; \ adcxq %rcx, %r15 ; \ movl $0x26, %edx ; \ xorl %ecx, %ecx ; \ mulxq %r12, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ mulxq %r13, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq %r14, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq %r15, %rax, %r12 ; \ adcxq %rax, %r11 ; \ adoxq %rcx, %r12 ; \ adcxq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Multiplication just giving a 5-digit result (actually < 39 * p_25519) // by not doing anything beyond the first stage of reduction #define mul_5(P0,P1,P2) \ xorl %edi, %edi ; \ movq P2, %rdx ; \ mulxq P1, %r8, %r9 ; \ mulxq 0x8+P1, %rax, %r10 ; \ addq %rax, %r9 ; \ mulxq 0x10+P1, %rax, %r11 ; \ adcq %rax, %r10 ; \ mulxq 0x18+P1, %rax, %r12 ; \ adcq %rax, %r11 ; \ adcq %rdi, %r12 ; \ xorl %edi, %edi ; \ movq 0x8+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x18+P1, %rax, %r13 ; \ adcxq %rax, %r12 ; \ adoxq %rdi, %r13 ; \ adcxq %rdi, %r13 ; \ xorl %edi, %edi ; \ movq 0x10+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x18+P1, %rax, %r14 ; \ adcxq %rax, %r13 ; \ adoxq %rdi, %r14 ; \ adcxq %rdi, %r14 ; \ xorl %edi, %edi ; \ movq 0x18+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x18+P1, %rax, %r15 ; \ adcxq %rax, %r14 ; \ adoxq %rdi, %r15 ; \ adcxq %rdi, %r15 ; \ movl $0x26, %edx ; \ xorl %edi, %edi ; \ mulxq %r12, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ mulxq %r13, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq %r14, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq %r15, %rax, %r12 ; \ adcxq %rax, %r11 ; \ adoxq %rdi, %r12 ; \ adcxq %rdi, %r12 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 ; \ movq %r12, 0x20+P0 // Squaring just giving a result < 2 * p_25519, which is done by // basically skipping the +1 in the quotient estimate and the final // optional correction. #define sqr_4(P0,P1) \ movq P1, %rdx ; \ mulxq %rdx, %r8, %r15 ; \ mulxq 0x8+P1, %r9, %r10 ; \ mulxq 0x18+P1, %r11, %r12 ; \ movq 0x10+P1, %rdx ; \ mulxq 0x18+P1, %r13, %r14 ; \ xorl %ebx, %ebx ; \ mulxq P1, %rax, %rcx ; \ adcxq %rax, %r10 ; \ adoxq %rcx, %r11 ; \ mulxq 0x8+P1, %rax, %rcx ; \ adcxq %rax, %r11 ; \ adoxq %rcx, %r12 ; \ movq 0x18+P1, %rdx ; \ mulxq 0x8+P1, %rax, %rcx ; \ adcxq %rax, %r12 ; \ adoxq %rcx, %r13 ; \ adcxq %rbx, %r13 ; \ adoxq %rbx, %r14 ; \ adcq %rbx, %r14 ; \ xorl %ebx, %ebx ; \ adcxq %r9, %r9 ; \ adoxq %r15, %r9 ; \ movq 0x8+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r10, %r10 ; \ adoxq %rax, %r10 ; \ adcxq %r11, %r11 ; \ adoxq %rdx, %r11 ; \ movq 0x10+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r12, %r12 ; \ adoxq %rax, %r12 ; \ adcxq %r13, %r13 ; \ adoxq %rdx, %r13 ; \ movq 0x18+P1, %rdx ; \ mulxq %rdx, %rax, %r15 ; \ adcxq %r14, %r14 ; \ adoxq %rax, %r14 ; \ adcxq %rbx, %r15 ; \ adoxq %rbx, %r15 ; \ movl $0x26, %edx ; \ xorl %ebx, %ebx ; \ mulxq %r12, %rax, %rcx ; \ adcxq %rax, %r8 ; \ adoxq %rcx, %r9 ; \ mulxq %r13, %rax, %rcx ; \ adcxq %rax, %r9 ; \ adoxq %rcx, %r10 ; \ mulxq %r14, %rax, %rcx ; \ adcxq %rax, %r10 ; \ adoxq %rcx, %r11 ; \ mulxq %r15, %rax, %r12 ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ adcxq %rbx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rbx, %r9 ; \ adcq %rbx, %r10 ; \ adcq %rbx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Add 5-digit inputs and normalize to 4 digits #define add5_4(P0,P1,P2) \ movq P1, %r8 ; \ addq P2, %r8 ; \ movq 8+P1, %r9 ; \ adcq 8+P2, %r9 ; \ movq 16+P1, %r10 ; \ adcq 16+P2, %r10 ; \ movq 24+P1, %r11 ; \ adcq 24+P2, %r11 ; \ movq 32+P1, %r12 ; \ adcq 32+P2, %r12 ; \ xorl %ebx, %ebx ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rbx, %r9 ; \ adcq %rbx, %r10 ; \ adcq %rbx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Modular addition with double modulus 2 * p_25519 = 2^256 - 38. // This only ensures that the result fits in 4 digits, not that it is reduced // even w.r.t. double modulus. The result is always correct modulo provided // the sum of the inputs is < 2^256 + 2^256 - 38, so in particular provided // at least one of them is reduced double modulo. #define add_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ecx, %ecx ; \ addq P2, %r8 ; \ movq 0x8+P1, %r9 ; \ adcq 0x8+P2, %r9 ; \ movq 0x10+P1, %r10 ; \ adcq 0x10+P2, %r10 ; \ movq 0x18+P1, %r11 ; \ adcq 0x18+P2, %r11 ; \ movl $38, %eax ; \ cmovncq %rcx, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Modular subtraction with double modulus 2 * p_25519 = 2^256 - 38 #define sub_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ebx, %ebx ; \ subq P2, %r8 ; \ movq 8+P1, %r9 ; \ sbbq 8+P2, %r9 ; \ movl $38, %ecx ; \ movq 16+P1, %r10 ; \ sbbq 16+P2, %r10 ; \ movq 24+P1, %rax ; \ sbbq 24+P2, %rax ; \ cmovncq %rbx, %rcx ; \ subq %rcx, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq %rbx, %rax ; \ movq %r8, P0 ; \ movq %r9, 8+P0 ; \ movq %r10, 16+P0 ; \ movq %rax, 24+P0 // 5-digit subtraction with upward bias to make it positive, adding // 1000 * (2^255 - 19) = 2^256 * 500 - 19000, then normalizing to 4 digits #define sub5_4(P0,P1,P2) \ movq P1, %r8 ; \ subq P2, %r8 ; \ movq 8+P1, %r9 ; \ sbbq 8+P2, %r9 ; \ movq 16+P1, %r10 ; \ sbbq 16+P2, %r10 ; \ movq 24+P1, %r11 ; \ sbbq 24+P2, %r11 ; \ movq 32+P1, %r12 ; \ sbbq 32+P2, %r12 ; \ xorl %ebx, %ebx ; \ subq $19000, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq %rbx, %r11 ; \ sbbq %rbx, %r12 ; \ addq $500, %r12 ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rbx, %r9 ; \ adcq %rbx, %r10 ; \ adcq %rbx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Combined z = c * x + y with reduction only < 2 * p_25519 // It is assumed that 19 * (c * x + y) < 2^60 * 2^256 so we // don't need a high mul in the final part. #define cmadd_4(P0,C1,P2,P3) \ movq P3, %r8 ; \ movq 8+P3, %r9 ; \ movq 16+P3, %r10 ; \ movq 24+P3, %r11 ; \ xorl %edi, %edi ; \ movq $C1, %rdx ; \ mulxq P2, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ mulxq 8+P2, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq 16+P2, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 24+P2, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rdi, %rbx ; \ adcxq %rdi, %rbx ; \ shldq $0x1, %r11, %rbx ; \ btr $63, %r11 ; \ movl $0x13, %edx ; \ imulq %rdx, %rbx ; \ addq %rbx, %r8 ; \ adcq %rdi, %r9 ; \ adcq %rdi, %r10 ; \ adcq %rdi, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Multiplex: z := if NZ then x else y #define mux_4(P0,P1,P2) \ movq P1, %rax ; \ movq P2, %rcx ; \ cmovzq %rcx, %rax ; \ movq %rax, P0 ; \ movq 8+P1, %rax ; \ movq 8+P2, %rcx ; \ cmovzq %rcx, %rax ; \ movq %rax, 8+P0 ; \ movq 16+P1, %rax ; \ movq 16+P2, %rcx ; \ cmovzq %rcx, %rax ; \ movq %rax, 16+P0 ; \ movq 24+P1, %rax ; \ movq 24+P2, %rcx ; \ cmovzq %rcx, %rax ; \ movq %rax, 24+P0 S2N_BN_SYMBOL(curve25519_x25519): S2N_BN_SYMBOL(curve25519_x25519_byte): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save registers, make room for temps, preserve input arguments. pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp // Move the output pointer to a stable place movq %rdi, res // Copy the inputs to the local variables with minimal mangling: // // - The scalar is in principle turned into 01xxx...xxx000 but // in the structure below the special handling of these bits is // explicit in the main computation; the scalar is just copied. // // - The point x coord is reduced mod 2^255 by masking off the // top bit. In the main loop we only need reduction < 2 * p_25519. movq (%rsi), %rax movq %rax, (%rsp) movq 8(%rsi), %rax movq %rax, 8(%rsp) movq 16(%rsi), %rax movq %rax, 16(%rsp) movq 24(%rsi), %rax movq %rax, 24(%rsp) movq (%rdx), %r8 movq 8(%rdx), %r9 movq 16(%rdx), %r10 movq 24(%rdx), %r11 btr $63, %r11 movq %r8, 32(%rsp) movq %r9, 40(%rsp) movq %r10, 48(%rsp) movq %r11, 56(%rsp) // Initialize with explicit doubling in order to handle set bit 254. // Set swap = 1 and (xm,zm) = (x,1) then double as (xn,zn) = 2 * (x,1). // We use the fact that the point x coordinate is still in registers. // Since zm = 1 we could do the doubling with an operation count of // 2 * S + M instead of 2 * S + 2 * M, but it doesn't seem worth // the slight complication arising from a different linear combination. movl $1, %eax movq %rax, swap movq %r8, 256(%rsp) movq %rax, 96(%rsp) xorl %eax, %eax movq %r9, 264(%rsp) movq %rax, 104(%rsp) movq %r10, 272(%rsp) movq %rax, 112(%rsp) movq %r11, 280(%rsp) movq %rax, 120(%rsp) sub_twice4(d,xm,zm) add_twice4(s,xm,zm) sqr_4(d,d) sqr_4(s,s) sub_twice4(p,s,d) cmadd_4(e,0x1db42,p,d) mul_4(xn,s,d) mul_4(zn,p,e) // The main loop over unmodified bits from i = 253, ..., i = 3 (inclusive). // This is a classic Montgomery ladder, with the main coordinates only // reduced mod 2 * p_25519, some intermediate results even more loosely. movl $253, %eax movq %rax, i curve25519_x25519_scalarloop: // sm = xm + zm; sn = xn + zn; dm = xm - zm; dn = xn - zn sub_twice4(dm,xm,zm) add_twice4(sn,xn,zn) sub_twice4(dn,xn,zn) add_twice4(sm,xm,zm) // DOUBLING: mux d = xt - zt and s = xt + zt for appropriate choice of (xt,zt) movq i, %rdx movq %rdx, %rcx shrq $6, %rdx movq (%rsp,%rdx,8), %rdx shrq %cl, %rdx andq $1, %rdx cmpq swap, %rdx movq %rdx, swap mux_4(d,dm,dn) mux_4(s,sm,sn) // ADDING: dmsn = dm * sn; dnsm = sm * dn mul_5(dnsm,sm,dn) mul_5(dmsn,sn,dm) // DOUBLING: d = (xt - zt)^2 sqr_4(d,d) // ADDING: dpro = (dmsn - dnsm)^2, spro = (dmsn + dnsm)^2 // DOUBLING: s = (xt + zt)^2 sub5_4(dpro,dmsn,dnsm) add5_4(spro,dmsn,dnsm) sqr_4(s,s) sqr_4(dpro,dpro) // DOUBLING: p = 4 * xt * zt = s - d sub_twice4(p,s,d) // ADDING: xm' = (dmsn + dnsm)^2 sqr_4(xm,spro) // DOUBLING: e = 121666 * p + d cmadd_4(e,0x1db42,p,d) // DOUBLING: xn' = (xt + zt)^2 * (xt - zt)^2 = s * d mul_4(xn,s,d) // DOUBLING: zn' = (4 * xt * zt) * ((xt - zt)^2 + 121666 * (4 * xt * zt)) // = p * (d + 121666 * p) mul_4(zn,p,e) // ADDING: zm' = x * (dmsn - dnsm)^2 mul_4(zm,dpro,pointx) // Loop down as far as 3 (inclusive) movq i, %rax subq $1, %rax movq %rax, i cmpq $3, %rax jnc curve25519_x25519_scalarloop // Multiplex directly into (xn,zn) then do three pure doubling steps; // this accounts for the implicit zeroing of the three lowest bits // of the scalar. movq swap, %rdx testq %rdx, %rdx mux_4(xn,xm,xn) mux_4(zn,zm,zn) sub_twice4(d,xn,zn) add_twice4(s,xn,zn) sqr_4(d,d) sqr_4(s,s) sub_twice4(p,s,d) cmadd_4(e,0x1db42,p,d) mul_4(xn,s,d) mul_4(zn,p,e) sub_twice4(d,xn,zn) add_twice4(s,xn,zn) sqr_4(d,d) sqr_4(s,s) sub_twice4(p,s,d) cmadd_4(e,0x1db42,p,d) mul_4(xn,s,d) mul_4(zn,p,e) sub_twice4(d,xn,zn) add_twice4(s,xn,zn) sqr_4(d,d) sqr_4(s,s) sub_twice4(p,s,d) cmadd_4(e,0x1db42,p,d) mul_4(xn,s,d) mul_4(zn,p,e) // The projective result of the scalar multiplication is now (xn,zn). // Prepare to call the modular inverse function to get zn' = 1/zn leaq 224(%rsp), %rdi leaq 224(%rsp), %rsi // Inline copy of bignum_inv_p25519, identical except for stripping out // the prologue and epilogue saving and restoring registers and making // and reclaiming room on the stack. For more details and explanations see // "x86/curve25519/bignum_inv_p25519.S". Note that the stack it uses for // its own temporaries is 208 bytes, so it has no effect on variables // that are needed in the rest of our computation here: res, xn and zn. movq %rdi, 0xc0(%rsp) xorl %eax, %eax leaq -0x13(%rax), %rcx notq %rax movq %rcx, (%rsp) movq %rax, 0x8(%rsp) movq %rax, 0x10(%rsp) btr $0x3f, %rax movq %rax, 0x18(%rsp) movq (%rsi), %rdx movq 0x8(%rsi), %rcx movq 0x10(%rsi), %r8 movq 0x18(%rsi), %r9 movl $0x1, %eax xorl %r10d, %r10d bts $0x3f, %r9 adcq %r10, %rax imulq $0x13, %rax, %rax addq %rax, %rdx adcq %r10, %rcx adcq %r10, %r8 adcq %r10, %r9 movl $0x13, %eax cmovbq %r10, %rax subq %rax, %rdx sbbq %r10, %rcx sbbq %r10, %r8 sbbq %r10, %r9 btr $0x3f, %r9 movq %rdx, 0x20(%rsp) movq %rcx, 0x28(%rsp) movq %r8, 0x30(%rsp) movq %r9, 0x38(%rsp) xorl %eax, %eax movq %rax, 0x40(%rsp) movq %rax, 0x48(%rsp) movq %rax, 0x50(%rsp) movq %rax, 0x58(%rsp) movabsq $0xa0f99e2375022099, %rax movq %rax, 0x60(%rsp) movabsq $0xa8c68f3f1d132595, %rax movq %rax, 0x68(%rsp) movabsq $0x6c6c893805ac5242, %rax movq %rax, 0x70(%rsp) movabsq $0x276508b241770615, %rax movq %rax, 0x78(%rsp) movq $0xa, 0x90(%rsp) movq $0x1, 0x98(%rsp) jmp curve25519_x25519_midloop curve25519_x25519_inverseloop: movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 movq %r8, %rax andq %r9, %rax movq %r10, %rdi andq %r11, %rdi addq %rax, %rdi movq %rdi, 0x80(%rsp) movq %r12, %rax andq %r13, %rax movq %r14, %rsi andq %r15, %rsi addq %rax, %rsi movq %rsi, 0x88(%rsp) xorl %ebx, %ebx movq (%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq 0x20(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rdi adcq %rdx, %rbx xorl %ebp, %ebp movq (%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rsi adcq %rdx, %rbp movq 0x20(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp xorl %ecx, %ecx movq 0x8(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x28(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx shrdq $0x3b, %rbx, %rdi movq %rdi, (%rsp) xorl %edi, %edi movq 0x8(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbp adcq %rdx, %rdi movq 0x28(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rdi shrdq $0x3b, %rbp, %rsi movq %rsi, 0x20(%rsp) xorl %esi, %esi movq 0x10(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rsi movq 0x30(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rsi shrdq $0x3b, %rcx, %rbx movq %rbx, 0x8(%rsp) xorl %ebx, %ebx movq 0x10(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rdi adcq %rdx, %rbx movq 0x30(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rdi adcq %rdx, %rbx shrdq $0x3b, %rdi, %rbp movq %rbp, 0x28(%rsp) movq 0x18(%rsp), %rax xorq %r9, %rax movq %rax, %rbp sarq $0x3f, %rbp andq %r8, %rbp negq %rbp mulq %r8 addq %rax, %rsi adcq %rdx, %rbp movq 0x38(%rsp), %rax xorq %r11, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r10, %rdx subq %rdx, %rbp mulq %r10 addq %rax, %rsi adcq %rdx, %rbp shrdq $0x3b, %rsi, %rcx movq %rcx, 0x10(%rsp) shrdq $0x3b, %rbp, %rsi movq 0x18(%rsp), %rax movq %rsi, 0x18(%rsp) xorq %r13, %rax movq %rax, %rsi sarq $0x3f, %rsi andq %r12, %rsi negq %rsi mulq %r12 addq %rax, %rbx adcq %rdx, %rsi movq 0x38(%rsp), %rax xorq %r15, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r14, %rdx subq %rdx, %rsi mulq %r14 addq %rax, %rbx adcq %rdx, %rsi shrdq $0x3b, %rbx, %rdi movq %rdi, 0x30(%rsp) shrdq $0x3b, %rsi, %rbx movq %rbx, 0x38(%rsp) movq 0x80(%rsp), %rbx movq 0x88(%rsp), %rbp xorl %ecx, %ecx movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x40(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x40(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x60(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x60(%rsp) xorl %ebx, %ebx movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq 0x48(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, 0x48(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq 0x68(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, 0x68(%rsp) xorl %ecx, %ecx movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x50(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x50(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x70(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x70(%rsp) movq 0x58(%rsp), %rax xorq %r9, %rax movq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rcx adcq %rbx, %rdx movq %rdx, %rbx shldq $0x1, %rcx, %rdx sarq $0x3f, %rbx addq %rbx, %rdx movl $0x13, %eax imulq %rdx movq 0x40(%rsp), %r8 addq %rax, %r8 movq %r8, 0x40(%rsp) movq 0x48(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x48(%rsp) movq 0x50(%rsp), %r8 adcq %rbx, %r8 movq %r8, 0x50(%rsp) adcq %rbx, %rcx shlq $0x3f, %rax addq %rax, %rcx movq 0x58(%rsp), %rax movq %rcx, 0x58(%rsp) xorq %r13, %rax movq %r13, %rcx andq %r12, %rcx negq %rcx mulq %r12 addq %rax, %rsi adcq %rdx, %rcx movq 0x78(%rsp), %rax xorq %r15, %rax movq %r15, %rdx andq %r14, %rdx subq %rdx, %rcx mulq %r14 addq %rax, %rsi adcq %rcx, %rdx movq %rdx, %rcx shldq $0x1, %rsi, %rdx sarq $0x3f, %rcx movl $0x13, %eax addq %rcx, %rdx imulq %rdx movq 0x60(%rsp), %r8 addq %rax, %r8 movq %r8, 0x60(%rsp) movq 0x68(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x68(%rsp) movq 0x70(%rsp), %r8 adcq %rcx, %r8 movq %r8, 0x70(%rsp) adcq %rcx, %rsi shlq $0x3f, %rax addq %rax, %rsi movq %rsi, 0x78(%rsp) curve25519_x25519_midloop: movq 0x98(%rsp), %rsi movq (%rsp), %rdx movq 0x20(%rsp), %rcx movq %rdx, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax xorl %ebp, %ebp movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %rdx leaq (%rcx,%rax), %rdi shlq $0x16, %rdx shlq $0x16, %rdi sarq $0x2b, %rdx sarq $0x2b, %rdi movabsq $0x20000100000, %rax leaq (%rbx,%rax), %rbx leaq (%rcx,%rax), %rcx sarq $0x2a, %rbx sarq $0x2a, %rcx movq %rdx, 0xa0(%rsp) movq %rbx, 0xa8(%rsp) movq %rdi, 0xb0(%rsp) movq %rcx, 0xb8(%rsp) movq (%rsp), %r12 imulq %r12, %rdi imulq %rdx, %r12 movq 0x20(%rsp), %r13 imulq %r13, %rbx imulq %rcx, %r13 addq %rbx, %r12 addq %rdi, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r10 shlq $0x16, %r8 shlq $0x16, %r10 sarq $0x2b, %r8 sarq $0x2b, %r10 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r15 leaq (%rcx,%rax), %r11 sarq $0x2a, %r15 sarq $0x2a, %r11 movq %r13, %rbx movq %r12, %rcx imulq %r8, %r12 imulq %r15, %rbx addq %rbx, %r12 imulq %r11, %r13 imulq %r10, %rcx addq %rcx, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq 0xa0(%rsp), %rax imulq %r8, %rax movq 0xb0(%rsp), %rdx imulq %r15, %rdx imulq 0xa8(%rsp), %r8 imulq 0xb8(%rsp), %r15 addq %r8, %r15 leaq (%rax,%rdx), %r9 movq 0xa0(%rsp), %rax imulq %r10, %rax movq 0xb0(%rsp), %rdx imulq %r11, %rdx imulq 0xa8(%rsp), %r10 imulq 0xb8(%rsp), %r11 addq %r10, %r11 leaq (%rax,%rdx), %r13 movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r12 shlq $0x15, %r8 shlq $0x15, %r12 sarq $0x2b, %r8 sarq $0x2b, %r12 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r10 leaq (%rcx,%rax), %r14 sarq $0x2b, %r10 sarq $0x2b, %r14 movq %r9, %rax imulq %r8, %rax movq %r13, %rdx imulq %r10, %rdx imulq %r15, %r8 imulq %r11, %r10 addq %r8, %r10 leaq (%rax,%rdx), %r8 movq %r9, %rax imulq %r12, %rax movq %r13, %rdx imulq %r14, %rdx imulq %r15, %r12 imulq %r11, %r14 addq %r12, %r14 leaq (%rax,%rdx), %r12 movq %rsi, 0x98(%rsp) decq 0x90(%rsp) jne curve25519_x25519_inverseloop movq (%rsp), %rax movq 0x20(%rsp), %rcx imulq %r8, %rax imulq %r10, %rcx addq %rcx, %rax sarq $0x3f, %rax movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 xorq %rax, %r9 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 xorq %rax, %r11 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 xorq %rax, %r13 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 xorq %rax, %r15 movq %r8, %rax andq %r9, %rax movq %r10, %r12 andq %r11, %r12 addq %rax, %r12 xorl %r13d, %r13d movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r12 adcq %rdx, %r13 movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r13 adcq %rdx, %r14 movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 adcq %rdx, %r15 movq 0x58(%rsp), %rax xorq %r9, %rax andq %r8, %r9 negq %r9 mulq %r8 addq %rax, %r15 adcq %rdx, %r9 movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %r9 mulq %r10 addq %rax, %r15 adcq %rdx, %r9 movq %r9, %rax shldq $0x1, %r15, %rax sarq $0x3f, %r9 movl $0x13, %ebx leaq 0x1(%rax,%r9,1), %rax imulq %rbx xorl %ebp, %ebp addq %rax, %r12 adcq %rdx, %r13 adcq %r9, %r14 adcq %r9, %r15 shlq $0x3f, %rax addq %rax, %r15 cmovns %rbp, %rbx subq %rbx, %r12 sbbq %rbp, %r13 sbbq %rbp, %r14 sbbq %rbp, %r15 btr $0x3f, %r15 movq 0xc0(%rsp), %rdi movq %r12, (%rdi) movq %r13, 0x8(%rdi) movq %r14, 0x10(%rdi) movq %r15, 0x18(%rdi) // Now the result is xn * (1/zn), fully reduced modulo p. // Note that in the degenerate case zn = 0 (mod p_25519), the // modular inverse code above will produce 1/zn = 0, giving // the correct overall X25519 result of zero for the point at // infinity. movq res, %rbp mul_p25519(resx,xn,zn) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
9,211
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/bignum_madd_n25519_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply-add modulo the order of the curve25519/edwards25519 basepoint // Inputs x[4], y[4], c[4]; output z[4] // // extern void bignum_madd_n25519_alt // (uint64_t z[static 4], uint64_t x[static 4], // uint64_t y[static 4], uint64_t c[static 4]); // // Performs z := (x * y + c) mod n_25519, where the modulus is // n_25519 = 2^252 + 27742317777372353535851937790883648493, the // order of the curve25519/edwards25519 basepoint. The result z // and the inputs x, y and c are all 4 digits (256 bits). // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y, RCX = c // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y, R9 = c // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_madd_n25519_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_madd_n25519_alt) .text // Single round of modular reduction mod_n25519, mapping // [m4;m3;m2;m1;m0] = m to [m3;m2;m1;m0] = m mod n_25519, // *assuming* the input m < 2^64 * n_25519. This is very // close to the loop body of the bignum_mod_n25519 function. #define reduce(m4,m3,m2,m1,m0) \ movq m4, %rbx ; \ shldq $0x4, m3, %rbx ; \ shrq $0x3c, m4 ; \ subq m4, %rbx ; \ shlq $0x4, m3 ; \ shrdq $0x4, m4, m3 ; \ movabsq $0x5812631a5cf5d3ed, %rax ; \ mulq %rbx; \ movq %rax, %rbp ; \ movq %rdx, %rcx ; \ movabsq $0x14def9dea2f79cd6, %rax ; \ mulq %rbx; \ addq %rax, %rcx ; \ adcq $0x0, %rdx ; \ subq %rbp, m0 ; \ sbbq %rcx, m1 ; \ sbbq %rdx, m2 ; \ sbbq $0x0, m3 ; \ sbbq %rbx, %rbx ; \ movabsq $0x5812631a5cf5d3ed, %rax ; \ andq %rbx, %rax ; \ movabsq $0x14def9dea2f79cd6, %rdx ; \ andq %rbx, %rdx ; \ movabsq $0x1000000000000000, %rbx ; \ andq %rax, %rbx ; \ addq %rax, m0 ; \ adcq %rdx, m1 ; \ adcq $0x0, m2 ; \ adcq %rbx, m3 // Special case of "reduce" with m4 = 0. As well as not using m4, // the quotient selection is slightly simpler, just floor(m/2^252) // versus min (floor(m/2^252)) (2^63-1). #define reduce0(m3,m2,m1,m0) \ movq m3, %rbx ; \ shrq $60, %rbx ; \ shlq $4, m3 ; \ shrq $4, m3 ; \ movabsq $0x5812631a5cf5d3ed, %rax ; \ mulq %rbx; \ movq %rax, %rbp ; \ movq %rdx, %rcx ; \ movabsq $0x14def9dea2f79cd6, %rax ; \ mulq %rbx; \ addq %rax, %rcx ; \ adcq $0x0, %rdx ; \ subq %rbp, m0 ; \ sbbq %rcx, m1 ; \ sbbq %rdx, m2 ; \ sbbq $0x0, m3 ; \ sbbq %rbx, %rbx ; \ movabsq $0x5812631a5cf5d3ed, %rax ; \ andq %rbx, %rax ; \ movabsq $0x14def9dea2f79cd6, %rdx ; \ andq %rbx, %rdx ; \ movabsq $0x1000000000000000, %rbx ; \ andq %rax, %rbx ; \ addq %rax, m0 ; \ adcq %rdx, m1 ; \ adcq $0x0, m2 ; \ adcq %rbx, m3 S2N_BN_SYMBOL(bignum_madd_n25519_alt): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx #endif // Save some additional registers for use pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 // First compute [%r15;%r14;%r13;%r12;%r11;%r10;%r9;%r8] = x * y + c. This inserts // some addition terms for c into a core Comba multiplier similar to // the start of bignum_mul_p256k1_alt. movq %rdx, %rbp movq (%rsi), %rax mulq (%rbp) addq (%rcx), %rax adcq $0, %rdx movq %rax, %r8 movq %rdx, %r9 xorq %r10, %r10 xorq %r11, %r11 movq (%rsi), %rax mulq 0x8(%rbp) addq 8(%rcx), %rax adcq $0, %rdx addq %rax, %r9 adcq %rdx, %r10 movq 0x8(%rsi), %rax mulq (%rbp) addq %rax, %r9 adcq %rdx, %r10 adcq $0x0, %r11 xorq %r12, %r12 movq (%rsi), %rax mulq 0x10(%rbp) addq 16(%rcx), %rax adcq $0, %rdx addq %rax, %r10 adcq %rdx, %r11 adcq %r12, %r12 movq 0x8(%rsi), %rax mulq 0x8(%rbp) addq %rax, %r10 adcq %rdx, %r11 adcq $0x0, %r12 movq 0x10(%rsi), %rax mulq (%rbp) addq %rax, %r10 adcq %rdx, %r11 adcq $0x0, %r12 xorq %r13, %r13 movq (%rsi), %rax mulq 0x18(%rbp) addq 24(%rcx), %rax adcq $0, %rdx addq %rax, %r11 adcq %rdx, %r12 adcq %r13, %r13 movq 0x8(%rsi), %rax mulq 0x10(%rbp) addq %rax, %r11 adcq %rdx, %r12 adcq $0x0, %r13 movq 0x10(%rsi), %rax mulq 0x8(%rbp) addq %rax, %r11 adcq %rdx, %r12 adcq $0x0, %r13 movq 0x18(%rsi), %rax mulq (%rbp) addq %rax, %r11 adcq %rdx, %r12 adcq $0x0, %r13 xorq %r14, %r14 movq 0x8(%rsi), %rax mulq 0x18(%rbp) addq %rax, %r12 adcq %rdx, %r13 adcq %r14, %r14 movq 0x10(%rsi), %rax mulq 0x10(%rbp) addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 movq 0x18(%rsi), %rax mulq 0x8(%rbp) addq %rax, %r12 adcq %rdx, %r13 adcq $0x0, %r14 xorq %r15, %r15 movq 0x10(%rsi), %rax mulq 0x18(%rbp) addq %rax, %r13 adcq %rdx, %r14 adcq %r15, %r15 movq 0x18(%rsi), %rax mulq 0x10(%rbp) addq %rax, %r13 adcq %rdx, %r14 adcq $0x0, %r15 movq 0x18(%rsi), %rax mulq 0x18(%rbp) addq %rax, %r14 adcq %rdx, %r15 // Now do the modular reduction and write back reduce0(%r15,%r14,%r13,%r12) reduce(%r15,%r14,%r13,%r12,%r11) reduce(%r14,%r13,%r12,%r11,%r10) reduce(%r13,%r12,%r11,%r10,%r9) reduce(%r12,%r11,%r10,%r9,%r8) movq %r8, (%rdi) movq %r9, 8(%rdi) movq %r10, 16(%rdi) movq %r11, 24(%rdi) // Restore registers and return popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
108,690
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/edwards25519_scalarmuldouble.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Double scalar multiplication for edwards25519, fresh and base point // Input scalar[4], point[8], bscalar[4]; output res[8] // // extern void edwards25519_scalarmuldouble // (uint64_t res[static 8],uint64_t scalar[static 4], // uint64_t point[static 8],uint64_t bscalar[static 4]); // // Given scalar = n, point = P and bscalar = m, returns in res // the point (X,Y) = n * P + m * B where B = (...,4/5) is // the standard basepoint for the edwards25519 (Ed25519) curve. // // Both 256-bit coordinates of the input point P are implicitly // reduced modulo 2^255-19 if they are not already in reduced form, // but the conventional usage is that they *are* already reduced. // The scalars can be arbitrary 256-bit numbers but may also be // considered as implicitly reduced modulo the group order. // // Standard x86-64 ABI: RDI = res, RSI = scalar, RDX = point, RCX = bscalar // Microsoft x64 ABI: RCX = res, RDX = scalar, R8 = point, R9 = bscalar // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(edwards25519_scalarmuldouble) S2N_BN_SYM_PRIVACY_DIRECTIVE(edwards25519_scalarmuldouble) .text // Size of individual field elements #define NUMSIZE 32 // Pointer-offset pairs for result and temporaries on stack with some aliasing. // Both "resx" and "resy" assume the "res" pointer has been preloaded into %rbp. #define resx (0*NUMSIZE)(%rbp) #define resy (1*NUMSIZE)(%rbp) #define scalar (0*NUMSIZE)(%rsp) #define bscalar (1*NUMSIZE)(%rsp) #define tabent (2*NUMSIZE)(%rsp) #define btabent (6*NUMSIZE)(%rsp) #define acc (9*NUMSIZE)(%rsp) #define tab (13*NUMSIZE)(%rsp) // Additional variables kept on the stack #define bf 45*NUMSIZE(%rsp) #define cf 45*NUMSIZE+8(%rsp) #define i 45*NUMSIZE+16(%rsp) #define res 45*NUMSIZE+24(%rsp) // Total size to reserve on the stack (excluding local subroutines) #define NSPACE (46*NUMSIZE) // Syntactic variants to make x86_att forms easier to generate #define SCALAR (0*NUMSIZE) #define BSCALAR (1*NUMSIZE) #define TABENT (2*NUMSIZE) #define BTABENT (6*NUMSIZE) #define ACC (9*NUMSIZE) #define TAB (13*NUMSIZE) // Sub-references used in local subroutines with local stack #define x_0 0(%rdi) #define y_0 NUMSIZE(%rdi) #define z_0 (2*NUMSIZE)(%rdi) #define w_0 (3*NUMSIZE)(%rdi) #define x_1 0(%rsi) #define y_1 NUMSIZE(%rsi) #define z_1 (2*NUMSIZE)(%rsi) #define w_1 (3*NUMSIZE)(%rsi) #define x_2 0(%rbp) #define y_2 NUMSIZE(%rbp) #define z_2 (2*NUMSIZE)(%rbp) #define w_2 (3*NUMSIZE)(%rbp) #define t0 (0*NUMSIZE)(%rsp) #define t1 (1*NUMSIZE)(%rsp) #define t2 (2*NUMSIZE)(%rsp) #define t3 (3*NUMSIZE)(%rsp) #define t4 (4*NUMSIZE)(%rsp) #define t5 (5*NUMSIZE)(%rsp) // Macro wrapping up the basic field multiplication, only trivially // different from a pure function call to bignum_mul_p25519. #define mul_p25519(P0,P1,P2) \ xorl %ecx, %ecx ; \ movq P2, %rdx ; \ mulxq P1, %r8, %r9 ; \ mulxq 0x8+P1, %rax, %r10 ; \ addq %rax, %r9 ; \ mulxq 0x10+P1, %rax, %r11 ; \ adcq %rax, %r10 ; \ mulxq 0x18+P1, %rax, %r12 ; \ adcq %rax, %r11 ; \ adcq %rcx, %r12 ; \ xorl %ecx, %ecx ; \ movq 0x8+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x18+P1, %rax, %r13 ; \ adcxq %rax, %r12 ; \ adoxq %rcx, %r13 ; \ adcxq %rcx, %r13 ; \ xorl %ecx, %ecx ; \ movq 0x10+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x18+P1, %rax, %r14 ; \ adcxq %rax, %r13 ; \ adoxq %rcx, %r14 ; \ adcxq %rcx, %r14 ; \ xorl %ecx, %ecx ; \ movq 0x18+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x18+P1, %rax, %r15 ; \ adcxq %rax, %r14 ; \ adoxq %rcx, %r15 ; \ adcxq %rcx, %r15 ; \ movl $0x26, %edx ; \ xorl %ecx, %ecx ; \ mulxq %r12, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ mulxq %r13, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq %r14, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq %r15, %rax, %r12 ; \ adcxq %rax, %r11 ; \ adoxq %rcx, %r12 ; \ adcxq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ movl $0x13, %edx ; \ incq %r12; \ bts $63, %r11 ; \ mulxq %r12, %rax, %rbx ; \ addq %rax, %r8 ; \ adcq %rbx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ sbbq %rax, %rax ; \ notq %rax; \ andq %rdx, %rax ; \ subq %rax, %r8 ; \ sbbq %rcx, %r9 ; \ sbbq %rcx, %r10 ; \ sbbq %rcx, %r11 ; \ btr $63, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // A version of multiplication that only guarantees output < 2 * p_25519. // This basically skips the +1 and final correction in quotient estimation. #define mul_4(P0,P1,P2) \ xorl %ecx, %ecx ; \ movq P2, %rdx ; \ mulxq P1, %r8, %r9 ; \ mulxq 0x8+P1, %rax, %r10 ; \ addq %rax, %r9 ; \ mulxq 0x10+P1, %rax, %r11 ; \ adcq %rax, %r10 ; \ mulxq 0x18+P1, %rax, %r12 ; \ adcq %rax, %r11 ; \ adcq %rcx, %r12 ; \ xorl %ecx, %ecx ; \ movq 0x8+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x18+P1, %rax, %r13 ; \ adcxq %rax, %r12 ; \ adoxq %rcx, %r13 ; \ adcxq %rcx, %r13 ; \ xorl %ecx, %ecx ; \ movq 0x10+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x18+P1, %rax, %r14 ; \ adcxq %rax, %r13 ; \ adoxq %rcx, %r14 ; \ adcxq %rcx, %r14 ; \ xorl %ecx, %ecx ; \ movq 0x18+P2, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x18+P1, %rax, %r15 ; \ adcxq %rax, %r14 ; \ adoxq %rcx, %r15 ; \ adcxq %rcx, %r15 ; \ movl $0x26, %edx ; \ xorl %ecx, %ecx ; \ mulxq %r12, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ mulxq %r13, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq %r14, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq %r15, %rax, %r12 ; \ adcxq %rax, %r11 ; \ adoxq %rcx, %r12 ; \ adcxq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Squaring just giving a result < 2 * p_25519, which is done by // basically skipping the +1 in the quotient estimate and the final // optional correction. #define sqr_4(P0,P1) \ movq P1, %rdx ; \ mulxq %rdx, %r8, %r15 ; \ mulxq 0x8+P1, %r9, %r10 ; \ mulxq 0x18+P1, %r11, %r12 ; \ movq 0x10+P1, %rdx ; \ mulxq 0x18+P1, %r13, %r14 ; \ xorl %ebx, %ebx ; \ mulxq P1, %rax, %rcx ; \ adcxq %rax, %r10 ; \ adoxq %rcx, %r11 ; \ mulxq 0x8+P1, %rax, %rcx ; \ adcxq %rax, %r11 ; \ adoxq %rcx, %r12 ; \ movq 0x18+P1, %rdx ; \ mulxq 0x8+P1, %rax, %rcx ; \ adcxq %rax, %r12 ; \ adoxq %rcx, %r13 ; \ adcxq %rbx, %r13 ; \ adoxq %rbx, %r14 ; \ adcq %rbx, %r14 ; \ xorl %ebx, %ebx ; \ adcxq %r9, %r9 ; \ adoxq %r15, %r9 ; \ movq 0x8+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r10, %r10 ; \ adoxq %rax, %r10 ; \ adcxq %r11, %r11 ; \ adoxq %rdx, %r11 ; \ movq 0x10+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r12, %r12 ; \ adoxq %rax, %r12 ; \ adcxq %r13, %r13 ; \ adoxq %rdx, %r13 ; \ movq 0x18+P1, %rdx ; \ mulxq %rdx, %rax, %r15 ; \ adcxq %r14, %r14 ; \ adoxq %rax, %r14 ; \ adcxq %rbx, %r15 ; \ adoxq %rbx, %r15 ; \ movl $0x26, %edx ; \ xorl %ebx, %ebx ; \ mulxq %r12, %rax, %rcx ; \ adcxq %rax, %r8 ; \ adoxq %rcx, %r9 ; \ mulxq %r13, %rax, %rcx ; \ adcxq %rax, %r9 ; \ adoxq %rcx, %r10 ; \ mulxq %r14, %rax, %rcx ; \ adcxq %rax, %r10 ; \ adoxq %rcx, %r11 ; \ mulxq %r15, %rax, %r12 ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ adcxq %rbx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rbx, %r9 ; \ adcq %rbx, %r10 ; \ adcq %rbx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Modular subtraction with double modulus 2 * p_25519 = 2^256 - 38 #define sub_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ebx, %ebx ; \ subq P2, %r8 ; \ movq 8+P1, %r9 ; \ sbbq 8+P2, %r9 ; \ movl $38, %ecx ; \ movq 16+P1, %r10 ; \ sbbq 16+P2, %r10 ; \ movq 24+P1, %rax ; \ sbbq 24+P2, %rax ; \ cmovncq %rbx, %rcx ; \ subq %rcx, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq %rbx, %rax ; \ movq %r8, P0 ; \ movq %r9, 8+P0 ; \ movq %r10, 16+P0 ; \ movq %rax, 24+P0 // Modular addition and doubling with double modulus 2 * p_25519 = 2^256 - 38. // This only ensures that the result fits in 4 digits, not that it is reduced // even w.r.t. double modulus. The result is always correct modulo provided // the sum of the inputs is < 2^256 + 2^256 - 38, so in particular provided // at least one of them is reduced double modulo. #define add_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ecx, %ecx ; \ addq P2, %r8 ; \ movq 0x8+P1, %r9 ; \ adcq 0x8+P2, %r9 ; \ movq 0x10+P1, %r10 ; \ adcq 0x10+P2, %r10 ; \ movq 0x18+P1, %r11 ; \ adcq 0x18+P2, %r11 ; \ movl $38, %eax ; \ cmovncq %rcx, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 #define double_twice4(P0,P1) \ movq P1, %r8 ; \ xorl %ecx, %ecx ; \ addq %r8, %r8 ; \ movq 0x8+P1, %r9 ; \ adcq %r9, %r9 ; \ movq 0x10+P1, %r10 ; \ adcq %r10, %r10 ; \ movq 0x18+P1, %r11 ; \ adcq %r11, %r11 ; \ movl $38, %eax ; \ cmovncq %rcx, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Load the constant k_25519 = 2 * d_25519 using immediate operations #define load_k25519(P0) \ movq $0xebd69b9426b2f159, %rax ; \ movq %rax, P0 ; \ movq $0x00e0149a8283b156, %rax ; \ movq %rax, 8+P0 ; \ movq $0x198e80f2eef3d130, %rax ; \ movq %rax, 16+P0 ; \ movq $0x2406d9dc56dffce7, %rax ; \ movq %rax, 24+P0 S2N_BN_SYMBOL(edwards25519_scalarmuldouble): // In this case the Windows form literally makes a subroutine call. // This avoids hassle arising from keeping code and data together. #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx movq %r9, %rcx callq edwards25519_scalarmuldouble_standard popq %rsi popq %rdi ret edwards25519_scalarmuldouble_standard: #endif // Save registers, make room for temps, preserve input arguments. pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp // Move the output pointer to a stable place movq %rdi, res // Copy scalars while recoding all 4-bit nybbles except the top // one (bits 252..255) into signed 4-bit digits. This is essentially // done just by adding the recoding constant 0x0888..888, after // which all digits except the first have an implicit bias of -8, // so 0 -> -8, 1 -> -7, ... 7 -> -1, 8 -> 0, 9 -> 1, ... 15 -> 7. // (We could literally create 2s complement signed nybbles by // XORing with the same constant 0x0888..888 afterwards, but it // doesn't seem to make the end usage any simpler.) // // In order to ensure that the unrecoded top nybble (bits 252..255) // does not become > 8 as a result of carries lower down from the // recoding, we first (conceptually) subtract the group order iff // the top digit of the scalar is > 2^63. In the implementation the // reduction and recoding are combined by optionally using the // modified recoding constant 0x0888...888 + (2^256 - group_order). movq (%rcx), %r8 movq 8(%rcx), %r9 movq 16(%rcx), %r10 movq 24(%rcx), %r11 movq $0xc7f56fb5a0d9e920, %r12 movq $0xe190b99370cba1d5, %r13 movq $0x8888888888888887, %r14 movq $0x8888888888888888, %r15 movq $0x8000000000000000, %rax movq $0x0888888888888888, %rbx cmpq %r11, %rax cmovncq %r15, %r12 cmovncq %r15, %r13 cmovncq %r15, %r14 cmovncq %rbx, %r15 addq %r12, %r8 adcq %r13, %r9 adcq %r14, %r10 adcq %r15, %r11 movq %r8, BSCALAR(%rsp) movq %r9, BSCALAR+8(%rsp) movq %r10, BSCALAR+16(%rsp) movq %r11, BSCALAR+24(%rsp) movq (%rsi), %r8 movq 8(%rsi), %r9 movq 16(%rsi), %r10 movq 24(%rsi), %r11 movq $0xc7f56fb5a0d9e920, %r12 movq $0xe190b99370cba1d5, %r13 movq $0x8888888888888887, %r14 movq $0x8888888888888888, %r15 movq $0x8000000000000000, %rax movq $0x0888888888888888, %rbx cmpq %r11, %rax cmovncq %r15, %r12 cmovncq %r15, %r13 cmovncq %r15, %r14 cmovncq %rbx, %r15 addq %r12, %r8 adcq %r13, %r9 adcq %r14, %r10 adcq %r15, %r11 movq %r8, SCALAR(%rsp) movq %r9, SCALAR+8(%rsp) movq %r10, SCALAR+16(%rsp) movq %r11, SCALAR+24(%rsp) // Create table of multiples 1..8 of the general input point at "tab". // Reduce the input coordinates x and y modulo 2^256 - 38 first, for the // sake of definiteness; this is the reduction that will be maintained. // We could slightly optimize the additions because we know the input // point is affine (so Z = 1), but it doesn't seem worth the complication. movl $38, %eax movq (%rdx), %r8 xorl %ebx, %ebx movq 8(%rdx), %r9 xorl %ecx, %ecx movq 16(%rdx), %r10 xorl %esi, %esi movq 24(%rdx), %r11 addq %r8, %rax adcq %r9, %rbx adcq %r10, %rcx adcq %r11, %rsi cmovncq %r8, %rax movq %rax, TAB(%rsp) cmovncq %r9, %rbx movq %rbx, TAB+8(%rsp) cmovncq %r10, %rcx movq %rcx, TAB+16(%rsp) cmovncq %r11, %rsi movq %rsi, TAB+24(%rsp) movl $38, %eax movq 32(%rdx), %r8 xorl %ebx, %ebx movq 40(%rdx), %r9 xorl %ecx, %ecx movq 48(%rdx), %r10 xorl %esi, %esi movq 56(%rdx), %r11 addq %r8, %rax adcq %r9, %rbx adcq %r10, %rcx adcq %r11, %rsi cmovncq %r8, %rax movq %rax, TAB+32(%rsp) cmovncq %r9, %rbx movq %rbx, TAB+40(%rsp) cmovncq %r10, %rcx movq %rcx, TAB+48(%rsp) cmovncq %r11, %rsi movq %rsi, TAB+56(%rsp) movl $1, %eax movq %rax, TAB+64(%rsp) xorl %eax, %eax movq %rax, TAB+72(%rsp) movq %rax, TAB+80(%rsp) movq %rax, TAB+88(%rsp) leaq TAB+96(%rsp), %rdi leaq TAB(%rsp), %rsi leaq TAB+32(%rsp), %rbp mul_4(x_0,x_1,x_2) // Multiple 2 leaq TAB+1*128(%rsp), %rdi leaq TAB(%rsp), %rsi callq edwards25519_scalarmuldouble_epdouble // Multiple 3 leaq TAB+2*128(%rsp), %rdi leaq TAB(%rsp), %rsi leaq TAB+1*128(%rsp), %rbp callq edwards25519_scalarmuldouble_epadd // Multiple 4 leaq TAB+3*128(%rsp), %rdi leaq TAB+1*128(%rsp), %rsi callq edwards25519_scalarmuldouble_epdouble // Multiple 5 leaq TAB+4*128(%rsp), %rdi leaq TAB(%rsp), %rsi leaq TAB+3*128(%rsp), %rbp callq edwards25519_scalarmuldouble_epadd // Multiple 6 leaq TAB+5*128(%rsp), %rdi leaq TAB+2*128(%rsp), %rsi callq edwards25519_scalarmuldouble_epdouble // Multiple 7 leaq TAB+6*128(%rsp), %rdi leaq TAB(%rsp), %rsi leaq TAB+5*128(%rsp), %rbp callq edwards25519_scalarmuldouble_epadd // Multiple 8 leaq TAB+7*128(%rsp), %rdi leaq TAB+3*128(%rsp), %rsi callq edwards25519_scalarmuldouble_epdouble // Handle the initialization, starting the loop counter at i = 252 // and initializing acc to the sum of the table entries for the // top nybbles of the scalars (the ones with no implicit -8 bias). movq $252, %rax movq %rax, i // Index for btable entry... movq BSCALAR+24(%rsp), %rax shrq $60, %rax movq %rax, bf // ...and constant-time indexing based on that index movl $1, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx movl $1, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d leaq edwards25519_scalarmuldouble_table(%rip), %rbp cmpq $1, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $2, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $3, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $4, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $5, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $6, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $7, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $8, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 movq %rax, BTABENT(%rsp) movq %rbx, BTABENT+8(%rsp) movq %rcx, BTABENT+16(%rsp) movq %rdx, BTABENT+24(%rsp) movq %r8, BTABENT+32(%rsp) movq %r9, BTABENT+40(%rsp) movq %r10, BTABENT+48(%rsp) movq %r11, BTABENT+56(%rsp) movq %r12, BTABENT+64(%rsp) movq %r13, BTABENT+72(%rsp) movq %r14, BTABENT+80(%rsp) movq %r15, BTABENT+88(%rsp) // Index for table entry... movq SCALAR+24(%rsp), %rax shrq $60, %rax movq %rax, bf // ...and constant-time indexing based on that index. // Do the Y and Z fields first, to save on registers... movl $1, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx movl $1, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d leaq TAB+32(%rsp), %rbp cmpq $1, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $2, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $3, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $4, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $5, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $6, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $7, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $8, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq %rax, TABENT+32(%rsp) movq %rbx, TABENT+40(%rsp) movq %rcx, TABENT+48(%rsp) movq %rdx, TABENT+56(%rsp) movq %r8, TABENT+64(%rsp) movq %r9, TABENT+72(%rsp) movq %r10, TABENT+80(%rsp) movq %r11, TABENT+88(%rsp) // ...followed by the X and W fields leaq TAB(%rsp), %rbp xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d cmpq $1, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $2, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $3, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $4, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $5, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $6, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $7, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $8, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 movq %rax, TABENT(%rsp) movq %rbx, TABENT+8(%rsp) movq %rcx, TABENT+16(%rsp) movq %rdx, TABENT+24(%rsp) movq %r8, TABENT+96(%rsp) movq %r9, TABENT+104(%rsp) movq %r10, TABENT+112(%rsp) movq %r11, TABENT+120(%rsp) // Add those elements to initialize the accumulator for bit position 252 leaq ACC(%rsp), %rdi leaq TABENT(%rsp), %rsi leaq BTABENT(%rsp), %rbp callq edwards25519_scalarmuldouble_pepadd // Main loop with acc = [scalar/2^i] * point + [bscalar/2^i] * basepoint // Start with i = 252 for bits 248..251 and go down four at a time to 3..0 edwards25519_scalarmuldouble_loop: movq i, %rax subq $4, %rax movq %rax, i // Double to acc' = 2 * acc leaq ACC(%rsp), %rdi leaq ACC(%rsp), %rsi callq edwards25519_scalarmuldouble_pdouble // Get btable entry, first getting the adjusted bitfield... movq i, %rax movq %rax, %rcx shrq $6, %rax movq 32(%rsp,%rax,8), %rax shrq %cl, %rax andq $15, %rax subq $8, %rax sbbq %rcx, %rcx xorq %rcx, %rax subq %rcx, %rax movq %rcx, cf movq %rax, bf // ... then doing constant-time lookup with the appropriate index... movl $1, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx movl $1, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d leaq edwards25519_scalarmuldouble_table(%rip), %rbp cmpq $1, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $2, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $3, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $4, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $5, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $6, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $7, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $8, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 // ... then optionally negating before storing. The table entry // is in precomputed form and we currently have // // [%rdx;%rcx;%rbx;%rax] = y - x // [%r11;%r10;%r9;%r8] = x + y // [%r15;%r14;%r13;%r12] = 2 * d * x * y // // Negation for Edwards curves is -(x,y) = (-x,y), which in this modified // form amounts to swapping the first two fields and negating the third. // The negation does not always fully reduce even mod 2^256-38 in the zero // case, instead giving -0 = 2^256-38. But that is fine since the result is // always fed to a multiplication inside the "pepadd" function below that // handles any 256-bit input. movq cf, %rdi testq %rdi, %rdi movq %rax, %rsi cmovnzq %r8, %rsi cmovnzq %rax, %r8 movq %rsi, BTABENT(%rsp) movq %r8, BTABENT+32(%rsp) movq %rbx, %rsi cmovnzq %r9, %rsi cmovnzq %rbx, %r9 movq %rsi, BTABENT+8(%rsp) movq %r9, BTABENT+40(%rsp) movq %rcx, %rsi cmovnzq %r10, %rsi cmovnzq %rcx, %r10 movq %rsi, BTABENT+16(%rsp) movq %r10, BTABENT+48(%rsp) movq %rdx, %rsi cmovnzq %r11, %rsi cmovnzq %rdx, %r11 movq %rsi, BTABENT+24(%rsp) movq %r11, BTABENT+56(%rsp) xorq %rdi, %r12 xorq %rdi, %r13 xorq %rdi, %r14 xorq %rdi, %r15 andq $37, %rdi subq %rdi, %r12 sbbq $0, %r13 sbbq $0, %r14 sbbq $0, %r15 movq %r12, BTABENT+64(%rsp) movq %r13, BTABENT+72(%rsp) movq %r14, BTABENT+80(%rsp) movq %r15, BTABENT+88(%rsp) // Get table entry, first getting the adjusted bitfield... movq i, %rax movq %rax, %rcx shrq $6, %rax movq (%rsp,%rax,8), %rax shrq %cl, %rax andq $15, %rax subq $8, %rax sbbq %rcx, %rcx xorq %rcx, %rax subq %rcx, %rax movq %rcx, cf movq %rax, bf // ...and constant-time indexing based on that index // Do the Y and Z fields first, to save on registers // and store them back (they don't need any modification) movl $1, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx movl $1, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d leaq TAB+32(%rsp), %rbp cmpq $1, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $2, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $3, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $4, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $5, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $6, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $7, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $8, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq %rax, TABENT+32(%rsp) movq %rbx, TABENT+40(%rsp) movq %rcx, TABENT+48(%rsp) movq %rdx, TABENT+56(%rsp) movq %r8, TABENT+64(%rsp) movq %r9, TABENT+72(%rsp) movq %r10, TABENT+80(%rsp) movq %r11, TABENT+88(%rsp) // Now do the X and W fields... leaq TAB(%rsp), %rbp xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d cmpq $1, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $2, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $3, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $4, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $5, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $6, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $7, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 addq $128, %rbp cmpq $8, bf movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 96(%rbp), %rsi cmovzq %rsi, %r8 movq 104(%rbp), %rsi cmovzq %rsi, %r9 movq 112(%rbp), %rsi cmovzq %rsi, %r10 movq 120(%rbp), %rsi cmovzq %rsi, %r11 // ... then optionally negate before storing the X and W fields. This // time the table entry is extended-projective, and is here: // // [%rdx;%rcx;%rbx;%rax] = X // [tabent+32] = Y // [tabent+64] = Z // [%r11;%r10;%r9;%r8] = W // // This time we just need to negate the X and the W fields. // The crude way negation is done can result in values of X or W // (when initially zero before negation) being exactly equal to // 2^256-38, but the "pepadd" function handles that correctly. movq cf, %rdi xorq %rdi, %rax xorq %rdi, %rbx xorq %rdi, %rcx xorq %rdi, %rdx xorq %rdi, %r8 xorq %rdi, %r9 xorq %rdi, %r10 xorq %rdi, %r11 andq $37, %rdi subq %rdi, %rax sbbq $0, %rbx sbbq $0, %rcx sbbq $0, %rdx movq %rax, TABENT(%rsp) movq %rbx, TABENT+8(%rsp) movq %rcx, TABENT+16(%rsp) movq %rdx, TABENT+24(%rsp) subq %rdi, %r8 sbbq $0, %r9 sbbq $0, %r10 sbbq $0, %r11 movq %r8, TABENT+96(%rsp) movq %r9, TABENT+104(%rsp) movq %r10, TABENT+112(%rsp) movq %r11, TABENT+120(%rsp) // Double to acc' = 4 * acc leaq ACC(%rsp), %rdi leaq ACC(%rsp), %rsi callq edwards25519_scalarmuldouble_pdouble // Add tabent := tabent + btabent leaq TABENT(%rsp), %rdi leaq TABENT(%rsp), %rsi leaq BTABENT(%rsp), %rbp callq edwards25519_scalarmuldouble_pepadd // Double to acc' = 8 * acc leaq ACC(%rsp), %rdi leaq ACC(%rsp), %rsi callq edwards25519_scalarmuldouble_pdouble // Double to acc' = 16 * acc leaq ACC(%rsp), %rdi leaq ACC(%rsp), %rsi callq edwards25519_scalarmuldouble_epdouble // Add table entry, acc := acc + tabent leaq ACC(%rsp), %rdi leaq ACC(%rsp), %rsi leaq TABENT(%rsp), %rbp callq edwards25519_scalarmuldouble_epadd // Loop down movq i, %rax testq %rax, %rax jnz edwards25519_scalarmuldouble_loop // Prepare to call the modular inverse function to get tab = 1/z leaq TAB(%rsp), %rdi leaq ACC+64(%rsp), %rsi // Inline copy of bignum_inv_p25519, identical except for stripping out // the prologue and epilogue saving and restoring registers and making // and reclaiming room on the stack. For more details and explanations see // "x86/curve25519/bignum_inv_p25519.S". Note that the stack it uses for // its own temporaries is 208 bytes, so it has no effect on variables // that are needed in the rest of our computation here: res, tab and acc. movq %rdi, 0xc0(%rsp) xorl %eax, %eax leaq -0x13(%rax), %rcx notq %rax movq %rcx, (%rsp) movq %rax, 0x8(%rsp) movq %rax, 0x10(%rsp) btr $0x3f, %rax movq %rax, 0x18(%rsp) movq (%rsi), %rdx movq 0x8(%rsi), %rcx movq 0x10(%rsi), %r8 movq 0x18(%rsi), %r9 movl $0x1, %eax xorl %r10d, %r10d bts $0x3f, %r9 adcq %r10, %rax imulq $0x13, %rax, %rax addq %rax, %rdx adcq %r10, %rcx adcq %r10, %r8 adcq %r10, %r9 movl $0x13, %eax cmovbq %r10, %rax subq %rax, %rdx sbbq %r10, %rcx sbbq %r10, %r8 sbbq %r10, %r9 btr $0x3f, %r9 movq %rdx, 0x20(%rsp) movq %rcx, 0x28(%rsp) movq %r8, 0x30(%rsp) movq %r9, 0x38(%rsp) xorl %eax, %eax movq %rax, 0x40(%rsp) movq %rax, 0x48(%rsp) movq %rax, 0x50(%rsp) movq %rax, 0x58(%rsp) movabsq $0xa0f99e2375022099, %rax movq %rax, 0x60(%rsp) movabsq $0xa8c68f3f1d132595, %rax movq %rax, 0x68(%rsp) movabsq $0x6c6c893805ac5242, %rax movq %rax, 0x70(%rsp) movabsq $0x276508b241770615, %rax movq %rax, 0x78(%rsp) movq $0xa, 0x90(%rsp) movq $0x1, 0x98(%rsp) jmp edwards25519_scalarmuldouble_midloop edwards25519_scalarmuldouble_inverseloop: movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 movq %r8, %rax andq %r9, %rax movq %r10, %rdi andq %r11, %rdi addq %rax, %rdi movq %rdi, 0x80(%rsp) movq %r12, %rax andq %r13, %rax movq %r14, %rsi andq %r15, %rsi addq %rax, %rsi movq %rsi, 0x88(%rsp) xorl %ebx, %ebx movq (%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq 0x20(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rdi adcq %rdx, %rbx xorl %ebp, %ebp movq (%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rsi adcq %rdx, %rbp movq 0x20(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp xorl %ecx, %ecx movq 0x8(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x28(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx shrdq $0x3b, %rbx, %rdi movq %rdi, (%rsp) xorl %edi, %edi movq 0x8(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbp adcq %rdx, %rdi movq 0x28(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rdi shrdq $0x3b, %rbp, %rsi movq %rsi, 0x20(%rsp) xorl %esi, %esi movq 0x10(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rsi movq 0x30(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rsi shrdq $0x3b, %rcx, %rbx movq %rbx, 0x8(%rsp) xorl %ebx, %ebx movq 0x10(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rdi adcq %rdx, %rbx movq 0x30(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rdi adcq %rdx, %rbx shrdq $0x3b, %rdi, %rbp movq %rbp, 0x28(%rsp) movq 0x18(%rsp), %rax xorq %r9, %rax movq %rax, %rbp sarq $0x3f, %rbp andq %r8, %rbp negq %rbp mulq %r8 addq %rax, %rsi adcq %rdx, %rbp movq 0x38(%rsp), %rax xorq %r11, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r10, %rdx subq %rdx, %rbp mulq %r10 addq %rax, %rsi adcq %rdx, %rbp shrdq $0x3b, %rsi, %rcx movq %rcx, 0x10(%rsp) shrdq $0x3b, %rbp, %rsi movq 0x18(%rsp), %rax movq %rsi, 0x18(%rsp) xorq %r13, %rax movq %rax, %rsi sarq $0x3f, %rsi andq %r12, %rsi negq %rsi mulq %r12 addq %rax, %rbx adcq %rdx, %rsi movq 0x38(%rsp), %rax xorq %r15, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r14, %rdx subq %rdx, %rsi mulq %r14 addq %rax, %rbx adcq %rdx, %rsi shrdq $0x3b, %rbx, %rdi movq %rdi, 0x30(%rsp) shrdq $0x3b, %rsi, %rbx movq %rbx, 0x38(%rsp) movq 0x80(%rsp), %rbx movq 0x88(%rsp), %rbp xorl %ecx, %ecx movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x40(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x40(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x60(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x60(%rsp) xorl %ebx, %ebx movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq 0x48(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, 0x48(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq 0x68(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, 0x68(%rsp) xorl %ecx, %ecx movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x50(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x50(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x70(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x70(%rsp) movq 0x58(%rsp), %rax xorq %r9, %rax movq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rcx adcq %rbx, %rdx movq %rdx, %rbx shldq $0x1, %rcx, %rdx sarq $0x3f, %rbx addq %rbx, %rdx movl $0x13, %eax imulq %rdx movq 0x40(%rsp), %r8 addq %rax, %r8 movq %r8, 0x40(%rsp) movq 0x48(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x48(%rsp) movq 0x50(%rsp), %r8 adcq %rbx, %r8 movq %r8, 0x50(%rsp) adcq %rbx, %rcx shlq $0x3f, %rax addq %rax, %rcx movq 0x58(%rsp), %rax movq %rcx, 0x58(%rsp) xorq %r13, %rax movq %r13, %rcx andq %r12, %rcx negq %rcx mulq %r12 addq %rax, %rsi adcq %rdx, %rcx movq 0x78(%rsp), %rax xorq %r15, %rax movq %r15, %rdx andq %r14, %rdx subq %rdx, %rcx mulq %r14 addq %rax, %rsi adcq %rcx, %rdx movq %rdx, %rcx shldq $0x1, %rsi, %rdx sarq $0x3f, %rcx movl $0x13, %eax addq %rcx, %rdx imulq %rdx movq 0x60(%rsp), %r8 addq %rax, %r8 movq %r8, 0x60(%rsp) movq 0x68(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x68(%rsp) movq 0x70(%rsp), %r8 adcq %rcx, %r8 movq %r8, 0x70(%rsp) adcq %rcx, %rsi shlq $0x3f, %rax addq %rax, %rsi movq %rsi, 0x78(%rsp) edwards25519_scalarmuldouble_midloop: movq 0x98(%rsp), %rsi movq (%rsp), %rdx movq 0x20(%rsp), %rcx movq %rdx, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax xorl %ebp, %ebp movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %rdx leaq (%rcx,%rax), %rdi shlq $0x16, %rdx shlq $0x16, %rdi sarq $0x2b, %rdx sarq $0x2b, %rdi movabsq $0x20000100000, %rax leaq (%rbx,%rax), %rbx leaq (%rcx,%rax), %rcx sarq $0x2a, %rbx sarq $0x2a, %rcx movq %rdx, 0xa0(%rsp) movq %rbx, 0xa8(%rsp) movq %rdi, 0xb0(%rsp) movq %rcx, 0xb8(%rsp) movq (%rsp), %r12 imulq %r12, %rdi imulq %rdx, %r12 movq 0x20(%rsp), %r13 imulq %r13, %rbx imulq %rcx, %r13 addq %rbx, %r12 addq %rdi, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r10 shlq $0x16, %r8 shlq $0x16, %r10 sarq $0x2b, %r8 sarq $0x2b, %r10 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r15 leaq (%rcx,%rax), %r11 sarq $0x2a, %r15 sarq $0x2a, %r11 movq %r13, %rbx movq %r12, %rcx imulq %r8, %r12 imulq %r15, %rbx addq %rbx, %r12 imulq %r11, %r13 imulq %r10, %rcx addq %rcx, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq 0xa0(%rsp), %rax imulq %r8, %rax movq 0xb0(%rsp), %rdx imulq %r15, %rdx imulq 0xa8(%rsp), %r8 imulq 0xb8(%rsp), %r15 addq %r8, %r15 leaq (%rax,%rdx), %r9 movq 0xa0(%rsp), %rax imulq %r10, %rax movq 0xb0(%rsp), %rdx imulq %r11, %rdx imulq 0xa8(%rsp), %r10 imulq 0xb8(%rsp), %r11 addq %r10, %r11 leaq (%rax,%rdx), %r13 movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r12 shlq $0x15, %r8 shlq $0x15, %r12 sarq $0x2b, %r8 sarq $0x2b, %r12 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r10 leaq (%rcx,%rax), %r14 sarq $0x2b, %r10 sarq $0x2b, %r14 movq %r9, %rax imulq %r8, %rax movq %r13, %rdx imulq %r10, %rdx imulq %r15, %r8 imulq %r11, %r10 addq %r8, %r10 leaq (%rax,%rdx), %r8 movq %r9, %rax imulq %r12, %rax movq %r13, %rdx imulq %r14, %rdx imulq %r15, %r12 imulq %r11, %r14 addq %r12, %r14 leaq (%rax,%rdx), %r12 movq %rsi, 0x98(%rsp) decq 0x90(%rsp) jne edwards25519_scalarmuldouble_inverseloop movq (%rsp), %rax movq 0x20(%rsp), %rcx imulq %r8, %rax imulq %r10, %rcx addq %rcx, %rax sarq $0x3f, %rax movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 xorq %rax, %r9 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 xorq %rax, %r11 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 xorq %rax, %r13 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 xorq %rax, %r15 movq %r8, %rax andq %r9, %rax movq %r10, %r12 andq %r11, %r12 addq %rax, %r12 xorl %r13d, %r13d movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r12 adcq %rdx, %r13 movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r13 adcq %rdx, %r14 movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 adcq %rdx, %r15 movq 0x58(%rsp), %rax xorq %r9, %rax andq %r8, %r9 negq %r9 mulq %r8 addq %rax, %r15 adcq %rdx, %r9 movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %r9 mulq %r10 addq %rax, %r15 adcq %rdx, %r9 movq %r9, %rax shldq $0x1, %r15, %rax sarq $0x3f, %r9 movl $0x13, %ebx leaq 0x1(%rax,%r9,1), %rax imulq %rbx xorl %ebp, %ebp addq %rax, %r12 adcq %rdx, %r13 adcq %r9, %r14 adcq %r9, %r15 shlq $0x3f, %rax addq %rax, %r15 cmovns %rbp, %rbx subq %rbx, %r12 sbbq %rbp, %r13 sbbq %rbp, %r14 sbbq %rbp, %r15 btr $0x3f, %r15 movq 0xc0(%rsp), %rdi movq %r12, (%rdi) movq %r13, 0x8(%rdi) movq %r14, 0x10(%rdi) movq %r15, 0x18(%rdi) // Store result movq res, %rdi leaq ACC(%rsp), %rsi leaq TAB(%rsp), %rbp mul_p25519(x_0,x_1,x_2) movq res, %rdi addq $32, %rdi leaq ACC+32(%rsp), %rsi leaq TAB(%rsp), %rbp mul_p25519(x_0,x_1,x_2) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret // **************************************************************************** // Localized versions of subroutines. // These are close to the standalone functions "edwards25519_epdouble" etc., // but are only maintaining reduction modulo 2^256 - 38, not 2^255 - 19. // **************************************************************************** edwards25519_scalarmuldouble_epdouble: sub $(5*NUMSIZE), %rsp add_twice4(t0,x_1,y_1) sqr_4(t1,z_1) sqr_4(t2,x_1) sqr_4(t3,y_1) double_twice4(t1,t1) sqr_4(t0,t0) add_twice4(t4,t2,t3) sub_twice4(t2,t2,t3) add_twice4(t3,t1,t2) sub_twice4(t1,t4,t0) mul_4(y_0,t2,t4) mul_4(z_0,t3,t2) mul_4(w_0,t1,t4) mul_4(x_0,t1,t3) add $(5*NUMSIZE), %rsp ret edwards25519_scalarmuldouble_pdouble: sub $(5*NUMSIZE), %rsp add_twice4(t0,x_1,y_1) sqr_4(t1,z_1) sqr_4(t2,x_1) sqr_4(t3,y_1) double_twice4(t1,t1) sqr_4(t0,t0) add_twice4(t4,t2,t3) sub_twice4(t2,t2,t3) add_twice4(t3,t1,t2) sub_twice4(t1,t4,t0) mul_4(y_0,t2,t4) mul_4(z_0,t3,t2) mul_4(x_0,t1,t3) add $(5*NUMSIZE), %rsp ret edwards25519_scalarmuldouble_epadd: sub $(6*NUMSIZE), %rsp mul_4(t0,w_1,w_2) sub_twice4(t1,y_1,x_1) sub_twice4(t2,y_2,x_2) add_twice4(t3,y_1,x_1) add_twice4(t4,y_2,x_2) double_twice4(t5,z_2) mul_4(t1,t1,t2) mul_4(t3,t3,t4) load_k25519(t2) mul_4(t2,t2,t0) mul_4(t4,z_1,t5) sub_twice4(t0,t3,t1) add_twice4(t5,t3,t1) sub_twice4(t1,t4,t2) add_twice4(t3,t4,t2) mul_4(w_0,t0,t5) mul_4(x_0,t0,t1) mul_4(y_0,t3,t5) mul_4(z_0,t1,t3) add $(6*NUMSIZE), %rsp ret edwards25519_scalarmuldouble_pepadd: sub $(6*NUMSIZE), %rsp double_twice4(t0,z_1); sub_twice4(t1,y_1,x_1); add_twice4(t2,y_1,x_1); mul_4(t3,w_1,z_2); mul_4(t1,t1,x_2); mul_4(t2,t2,y_2); sub_twice4(t4,t0,t3); add_twice4(t0,t0,t3); sub_twice4(t5,t2,t1); add_twice4(t1,t2,t1); mul_4(z_0,t4,t0); mul_4(x_0,t5,t4); mul_4(y_0,t0,t1); mul_4(w_0,t5,t1); add $(6*NUMSIZE), %rsp ret // **************************************************************************** // The precomputed data (all read-only). This is currently part of the same // text section, which gives position-independent code with simple PC-relative // addressing. However it could be put in a separate section via something like // // .section .rodata // **************************************************************************** // Precomputed table of multiples of generator for edwards25519 // all in precomputed extended-projective (y-x,x+y,2*d*x*y) triples. edwards25519_scalarmuldouble_table: // 1 * G .quad 0x9d103905d740913e .quad 0xfd399f05d140beb3 .quad 0xa5c18434688f8a09 .quad 0x44fd2f9298f81267 .quad 0x2fbc93c6f58c3b85 .quad 0xcf932dc6fb8c0e19 .quad 0x270b4898643d42c2 .quad 0x07cf9d3a33d4ba65 .quad 0xabc91205877aaa68 .quad 0x26d9e823ccaac49e .quad 0x5a1b7dcbdd43598c .quad 0x6f117b689f0c65a8 // 2 * G .quad 0x8a99a56042b4d5a8 .quad 0x8f2b810c4e60acf6 .quad 0xe09e236bb16e37aa .quad 0x6bb595a669c92555 .quad 0x9224e7fc933c71d7 .quad 0x9f469d967a0ff5b5 .quad 0x5aa69a65e1d60702 .quad 0x590c063fa87d2e2e .quad 0x43faa8b3a59b7a5f .quad 0x36c16bdd5d9acf78 .quad 0x500fa0840b3d6a31 .quad 0x701af5b13ea50b73 // 3 * G .quad 0x56611fe8a4fcd265 .quad 0x3bd353fde5c1ba7d .quad 0x8131f31a214bd6bd .quad 0x2ab91587555bda62 .quad 0xaf25b0a84cee9730 .quad 0x025a8430e8864b8a .quad 0xc11b50029f016732 .quad 0x7a164e1b9a80f8f4 .quad 0x14ae933f0dd0d889 .quad 0x589423221c35da62 .quad 0xd170e5458cf2db4c .quad 0x5a2826af12b9b4c6 // 4 * G .quad 0x95fe050a056818bf .quad 0x327e89715660faa9 .quad 0xc3e8e3cd06a05073 .quad 0x27933f4c7445a49a .quad 0x287351b98efc099f .quad 0x6765c6f47dfd2538 .quad 0xca348d3dfb0a9265 .quad 0x680e910321e58727 .quad 0x5a13fbe9c476ff09 .quad 0x6e9e39457b5cc172 .quad 0x5ddbdcf9102b4494 .quad 0x7f9d0cbf63553e2b // 5 * G .quad 0x7f9182c3a447d6ba .quad 0xd50014d14b2729b7 .quad 0xe33cf11cb864a087 .quad 0x154a7e73eb1b55f3 .quad 0xa212bc4408a5bb33 .quad 0x8d5048c3c75eed02 .quad 0xdd1beb0c5abfec44 .quad 0x2945ccf146e206eb .quad 0xbcbbdbf1812a8285 .quad 0x270e0807d0bdd1fc .quad 0xb41b670b1bbda72d .quad 0x43aabe696b3bb69a // 6 * G .quad 0x499806b67b7d8ca4 .quad 0x575be28427d22739 .quad 0xbb085ce7204553b9 .quad 0x38b64c41ae417884 .quad 0x3a0ceeeb77157131 .quad 0x9b27158900c8af88 .quad 0x8065b668da59a736 .quad 0x51e57bb6a2cc38bd .quad 0x85ac326702ea4b71 .quad 0xbe70e00341a1bb01 .quad 0x53e4a24b083bc144 .quad 0x10b8e91a9f0d61e3 // 7 * G .quad 0xba6f2c9aaa3221b1 .quad 0x6ca021533bba23a7 .quad 0x9dea764f92192c3a .quad 0x1d6edd5d2e5317e0 .quad 0x6b1a5cd0944ea3bf .quad 0x7470353ab39dc0d2 .quad 0x71b2528228542e49 .quad 0x461bea69283c927e .quad 0xf1836dc801b8b3a2 .quad 0xb3035f47053ea49a .quad 0x529c41ba5877adf3 .quad 0x7a9fbb1c6a0f90a7 // 8 * G .quad 0xe2a75dedf39234d9 .quad 0x963d7680e1b558f9 .quad 0x2c2741ac6e3c23fb .quad 0x3a9024a1320e01c3 .quad 0x59b7596604dd3e8f .quad 0x6cb30377e288702c .quad 0xb1339c665ed9c323 .quad 0x0915e76061bce52f .quad 0xe7c1f5d9c9a2911a .quad 0xb8a371788bcca7d7 .quad 0x636412190eb62a32 .quad 0x26907c5c2ecc4e95 #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
5,458
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/bignum_mod_n25519.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo basepoint order, z := x mod n_25519 // Input x[k]; output z[4] // // extern void bignum_mod_n25519 // (uint64_t z[static 4], uint64_t k, uint64_t *x); // // Reduction is modulo the order of the curve25519/edwards25519 basepoint, // which is n_25519 = 2^252 + 27742317777372353535851937790883648493 // // Standard x86-64 ABI: RDI = z, RSI = k, RDX = x // Microsoft x64 ABI: RCX = z, RDX = k, R8 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n25519) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n25519) .text #define z %rdi #define k %rsi #define x %rcx #define m0 %r8 #define m1 %r9 #define m2 %r10 #define m3 %r11 #define d %r12 #define q %rbx S2N_BN_SYMBOL(bignum_mod_n25519): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save extra registers pushq %rbx pushq %rbp pushq %r12 // If the input is already <= 3 words long, go to a trivial "copy" path cmpq $4, k jc shortinput // Otherwise load the top 4 digits (top-down) and reduce k by 4 // This [m3;m2;m1;m0] is the initial x where we begin reduction. subq $4, k movq 24(%rdx,k,8), m3 movq 16(%rdx,k,8), m2 movq 8(%rdx,k,8), m1 movq (%rdx,k,8), m0 // Move x into another register to leave %rdx free for multiplies movq %rdx, x // Get the quotient estimate q = floor(x/2^252). // Also delete it from m3, in effect doing x' = x - q * 2^252 movq m3, q shrq $60, q shlq $4, m3 shrq $4, m3 // Let [%rdx;d;%rbp] = q * (n_25519 - 2^252) movq $0x5812631a5cf5d3ed, %rax mulq q movq %rax, %rbp movq %rdx, d movq $0x14def9dea2f79cd6, %rax mulq q addq %rax, d adcq $0, %rdx // Subtract to get x' - q * (n_25519 - 2^252) = x - q * n_25519 subq %rbp, m0 sbbq d, m1 sbbq %rdx, m2 sbbq $0, m3 // Get a bitmask for the borrow and create a masked version of // non-trivial digits of [%rbx;0;%rdx;%rax] = n_25519, then add it. // The masked n3 digit exploits the fact that bit 60 of n0 is set. sbbq %rbx, %rbx movq $0x5812631a5cf5d3ed, %rax andq %rbx, %rax movq $0x14def9dea2f79cd6, %rdx andq %rbx, %rdx movq $0x1000000000000000, %rbx andq %rax, %rbx addq %rax, m0 adcq %rdx, m1 adcq $0, m2 adcq %rbx, m3 // Now do (k-4) iterations of 5->4 word modular reduction. Each one // is similar to the sequence above except for the more refined quotient // estimation process. testq k, k jz writeback bignum_mod_n25519_loop: // Assume that the new 5-digit x is 2^64 * previous_x + next_digit. // Get the quotient estimate q = max (floor(x/2^252)) (2^64 - 1) // and first compute x' = x - 2^252 * q. movq m3, q shldq $4, m2, q shrq $60, m3 subq m3, q shlq $4, m2 shrdq $4, m3, m2 // Let [%rdx;m3;%rbp] = q * (n_25519 - 2^252) movq $0x5812631a5cf5d3ed, %rax mulq q movq %rax, %rbp movq %rdx, m3 movq $0x14def9dea2f79cd6, %rax mulq q addq %rax, m3 adcq $0, %rdx // Load the next digit movq -8(x,k,8), d // Subtract to get x' - q * (n_25519 - 2^252) = x - q * n_25519 subq %rbp, d sbbq m3, m0 sbbq %rdx, m1 sbbq $0, m2 // Get a bitmask for the borrow and create a masked version of // non-trivial digits of [%rbx;0;%rdx;%rax] = n_25519, then add it. // The masked n3 digit exploits the fact that bit 60 of n0 is set. sbbq %rbx, %rbx movq $0x5812631a5cf5d3ed, %rax andq %rbx, %rax movq $0x14def9dea2f79cd6, %rdx andq %rbx, %rdx movq $0x1000000000000000, %rbx andq %rax, %rbx addq %rax, d adcq %rdx, m0 adcq $0, m1 adcq %rbx, m2 // Now shuffle registers up and loop movq m2, m3 movq m1, m2 movq m0, m1 movq d, m0 decq k jnz bignum_mod_n25519_loop // Write back writeback: movq m0, (z) movq m1, 8(z) movq m2, 16(z) movq m3, 24(z) // Restore registers and return popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret shortinput: xorq m0, m0 xorq m1, m1 xorq m2, m2 xorq m3, m3 testq k, k jz writeback movq (%rdx), m0 decq k jz writeback movq 8(%rdx), m1 decq k jz writeback movq 16(%rdx), m2 jmp writeback #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
301,075
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/curve25519/edwards25519_scalarmulbase_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Scalar multiplication for the edwards25519 standard basepoint // Input scalar[4]; output res[8] // // extern void edwards25519_scalarmulbase_alt // (uint64_t res[static 8],uint64_t scalar[static 4]); // // Given a scalar n, returns point (X,Y) = n * B where B = (...,4/5) is // the standard basepoint for the edwards25519 (Ed25519) curve. // // Standard x86-64 ABI: RDI = res, RSI = scalar // Microsoft x64 ABI: RCX = res, RDX = scalar // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(edwards25519_scalarmulbase_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(edwards25519_scalarmulbase_alt) .text // Size of individual field elements #define NUMSIZE 32 // Pointer-offset pairs for result and temporaries on stack with some aliasing. // The result "resx" assumes the "res" pointer has been preloaded into %rbp. #define resx (0*NUMSIZE)(%rbp) #define resy (1*NUMSIZE)(%rbp) #define scalar (0*NUMSIZE)(%rsp) #define tabent (1*NUMSIZE)(%rsp) #define ymx_2 (1*NUMSIZE)(%rsp) #define xpy_2 (2*NUMSIZE)(%rsp) #define kxy_2 (3*NUMSIZE)(%rsp) #define t0 (4*NUMSIZE)(%rsp) #define t1 (5*NUMSIZE)(%rsp) #define t2 (6*NUMSIZE)(%rsp) #define t3 (7*NUMSIZE)(%rsp) #define t4 (8*NUMSIZE)(%rsp) #define t5 (9*NUMSIZE)(%rsp) #define acc (10*NUMSIZE)(%rsp) #define x_1 (10*NUMSIZE)(%rsp) #define y_1 (11*NUMSIZE)(%rsp) #define z_1 (12*NUMSIZE)(%rsp) #define w_1 (13*NUMSIZE)(%rsp) #define x_3 (10*NUMSIZE)(%rsp) #define y_3 (11*NUMSIZE)(%rsp) #define z_3 (12*NUMSIZE)(%rsp) #define w_3 (13*NUMSIZE)(%rsp) // Stable homes for the input result pointer, and other variables #define res 14*NUMSIZE(%rsp) #define i 14*NUMSIZE+8(%rsp) #define bias 14*NUMSIZE+16(%rsp) #define bf 14*NUMSIZE+24(%rsp) #define ix 14*NUMSIZE+24(%rsp) #define tab 15*NUMSIZE(%rsp) // Total size to reserve on the stack #define NSPACE (15*NUMSIZE+8) // Syntactic variants to make x86_att version simpler to generate #define SCALAR 0 #define TABENT (1*NUMSIZE) #define ACC (10*NUMSIZE) #define X3 (10*NUMSIZE) #define Z3 (12*NUMSIZE) #define W3 (13*NUMSIZE) // Macro wrapping up the basic field multiplication, only trivially // different from a pure function call to bignum_mul_p25519_alt. #define mul_p25519(P0,P1,P2) \ movq P1, %rax ; \ mulq P2; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ xorq %r10, %r10 ; \ xorq %r11, %r11 ; \ movq P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ movq 0x8+P1, %rax ; \ mulq P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ adcq $0x0, %r11 ; \ xorq %r12, %r12 ; \ movq P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq %r12, %r12 ; \ movq 0x8+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ movq 0x10+P1, %rax ; \ mulq P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ xorq %r13, %r13 ; \ movq P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq %r13, %r13 ; \ movq 0x8+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x10+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x18+P1, %rax ; \ mulq P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ xorq %r14, %r14 ; \ movq 0x8+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq %r14, %r14 ; \ movq 0x10+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ movq 0x18+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ xorq %r15, %r15 ; \ movq 0x10+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq %r15, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ movl $0x26, %esi ; \ movq %r12, %rax ; \ mulq %rsi; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %rcx, %rcx ; \ movq %r13, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %rcx, %rcx ; \ movq %r14, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rcx, %rcx ; \ movq %r15, %rax ; \ mulq %rsi; \ subq %rcx, %rdx ; \ xorq %rcx, %rcx ; \ addq %rax, %r11 ; \ movq %rdx, %r12 ; \ adcq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ leaq 0x1(%r12), %rax ; \ movl $0x13, %esi ; \ bts $63, %r11 ; \ imulq %rsi, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ sbbq %rax, %rax ; \ notq %rax; \ andq %rsi, %rax ; \ subq %rax, %r8 ; \ sbbq %rcx, %r9 ; \ sbbq %rcx, %r10 ; \ sbbq %rcx, %r11 ; \ btr $63, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // A version of multiplication that only guarantees output < 2 * p_25519. // This basically skips the +1 and final correction in quotient estimation. #define mul_4(P0,P1,P2) \ movq P1, %rax ; \ mulq P2; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ xorq %r10, %r10 ; \ xorq %r11, %r11 ; \ movq P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ movq 0x8+P1, %rax ; \ mulq P2; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ adcq $0x0, %r11 ; \ xorq %r12, %r12 ; \ movq P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq %r12, %r12 ; \ movq 0x8+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ movq 0x10+P1, %rax ; \ mulq P2; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ xorq %r13, %r13 ; \ movq P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq %r13, %r13 ; \ movq 0x8+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x10+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ movq 0x18+P1, %rax ; \ mulq P2; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ adcq $0x0, %r13 ; \ xorq %r14, %r14 ; \ movq 0x8+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq %r14, %r14 ; \ movq 0x10+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ movq 0x18+P1, %rax ; \ mulq 0x8+P2; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ adcq $0x0, %r14 ; \ xorq %r15, %r15 ; \ movq 0x10+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq %r15, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x10+P2; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x18+P1, %rax ; \ mulq 0x18+P2; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ movl $0x26, %ebx ; \ movq %r12, %rax ; \ mulq %rbx; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %rcx, %rcx ; \ movq %r13, %rax ; \ mulq %rbx; \ subq %rcx, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %rcx, %rcx ; \ movq %r14, %rax ; \ mulq %rbx; \ subq %rcx, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rcx, %rcx ; \ movq %r15, %rax ; \ mulq %rbx; \ subq %rcx, %rdx ; \ xorq %rcx, %rcx ; \ addq %rax, %r11 ; \ movq %rdx, %r12 ; \ adcq %rcx, %r12 ; \ shldq $0x1, %r11, %r12 ; \ btr $0x3f, %r11 ; \ movl $0x13, %edx ; \ imulq %r12, %rdx ; \ addq %rdx, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 // Modular subtraction with double modulus 2 * p_25519 = 2^256 - 38 #define sub_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ebx, %ebx ; \ subq P2, %r8 ; \ movq 8+P1, %r9 ; \ sbbq 8+P2, %r9 ; \ movl $38, %ecx ; \ movq 16+P1, %r10 ; \ sbbq 16+P2, %r10 ; \ movq 24+P1, %rax ; \ sbbq 24+P2, %rax ; \ cmovncq %rbx, %rcx ; \ subq %rcx, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq %rbx, %rax ; \ movq %r8, P0 ; \ movq %r9, 8+P0 ; \ movq %r10, 16+P0 ; \ movq %rax, 24+P0 // Modular addition and doubling with double modulus 2 * p_25519 = 2^256 - 38. // This only ensures that the result fits in 4 digits, not that it is reduced // even w.r.t. double modulus. The result is always correct modulo provided // the sum of the inputs is < 2^256 + 2^256 - 38, so in particular provided // at least one of them is reduced double modulo. #define add_twice4(P0,P1,P2) \ movq P1, %r8 ; \ xorl %ecx, %ecx ; \ addq P2, %r8 ; \ movq 0x8+P1, %r9 ; \ adcq 0x8+P2, %r9 ; \ movq 0x10+P1, %r10 ; \ adcq 0x10+P2, %r10 ; \ movq 0x18+P1, %r11 ; \ adcq 0x18+P2, %r11 ; \ movl $38, %eax ; \ cmovncq %rcx, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 #define double_twice4(P0,P1) \ movq P1, %r8 ; \ xorl %ecx, %ecx ; \ addq %r8, %r8 ; \ movq 0x8+P1, %r9 ; \ adcq %r9, %r9 ; \ movq 0x10+P1, %r10 ; \ adcq %r10, %r10 ; \ movq 0x18+P1, %r11 ; \ adcq %r11, %r11 ; \ movl $38, %eax ; \ cmovncq %rcx, %rax ; \ addq %rax, %r8 ; \ adcq %rcx, %r9 ; \ adcq %rcx, %r10 ; \ adcq %rcx, %r11 ; \ movq %r8, P0 ; \ movq %r9, 0x8+P0 ; \ movq %r10, 0x10+P0 ; \ movq %r11, 0x18+P0 S2N_BN_SYMBOL(edwards25519_scalarmulbase_alt): // In this case the Windows form literally makes a subroutine call. // This avoids hassle arising from keeping code and data together. #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi callq edwards25519_scalarmulbase_alt_standard popq %rsi popq %rdi ret edwards25519_scalarmulbase_alt_standard: #endif // Save registers, make room for temps, preserve input arguments. pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp // Move the output pointer to a stable place movq %rdi, res // Copy the input scalar x to its local variable while reducing it // modulo 2^252 + m where m = 27742317777372353535851937790883648493; // this is the order of the basepoint so this doesn't change the result. // First do q = floor(x/2^252) and x' = x - q * (2^252 + m), which gives // an initial result -15 * m <= x' < 2^252 movq (%rsi), %r8 movq 8(%rsi), %r9 movq 16(%rsi), %r10 movq 24(%rsi), %r11 movq %r11, %rcx shrq $60, %rcx movq $0x5812631a5cf5d3ed, %rax mulq %rcx movq %rax, %r12 movq %rdx, %r13 movq $0x14def9dea2f79cd6, %rax mulq %rcx addq %rax, %r13 adcq $0, %rdx shlq $60, %rcx subq %r12, %r8 sbbq %r13, %r9 sbbq %rdx, %r10 sbbq %rcx, %r11 // If x' < 0 then just directly negate it; this makes sure the // reduced argument is strictly 0 <= x' < 2^252, but now we need // to record (done via bit 255 of the reduced scalar, which is // ignored in the main loop) when we negated so we can flip // the end result to compensate. sbbq %rax, %rax xorq %rax, %r8 xorq %rax, %r9 xorq %rax, %r10 xorq %rax, %r11 negq %rax adcq $0, %r8 adcq $0, %r9 adcq $0, %r10 adcq $0, %r11 shlq $63, %rax orq %rax, %r11 // And before we store the scalar, test and reset bit 251 to // initialize the main loop just below. movq %r8, SCALAR(%rsp) movq %r9, SCALAR+8(%rsp) movq %r10, SCALAR+16(%rsp) btr $59, %r11 movq %r11, SCALAR+24(%rsp) // The main part of the computation is in extended-projective coordinates // (X,Y,Z,T), representing an affine point on the edwards25519 curve // (x,y) via x = X/Z, y = Y/Z and x * y = T/Z (so X * Y = T * Z). // In comments B means the standard basepoint (x,4/5) = // (0x216....f25d51a,0x6666..666658). // // Initialize accumulator "acc" to either 0 or 2^251 * B depending on // bit 251 of the (reduced) scalar. That leaves bits 0..250 to handle. leaq edwards25519_scalarmulbase_alt_0g(%rip), %r10 leaq edwards25519_scalarmulbase_alt_251g(%rip), %r11 movq (%r10), %rax movq (%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC(%rsp) movq 8*1(%r10), %rax movq 8*1(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+8(%rsp) movq 8*2(%r10), %rax movq 8*2(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+16(%rsp) movq 8*3(%r10), %rax movq 8*3(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+24(%rsp) movq 8*4(%r10), %rax movq 8*4(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+32(%rsp) movq 8*5(%r10), %rax movq 8*5(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+40(%rsp) movq 8*6(%r10), %rax movq 8*6(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+48(%rsp) movq 8*7(%r10), %rax movq 8*7(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+56(%rsp) movl $1, %eax movq %rax, ACC+64(%rsp) movl $0, %eax movq %rax, ACC+72(%rsp) movq %rax, ACC+80(%rsp) movq %rax, ACC+88(%rsp) movq 8*8(%r10), %rax movq 8*8(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+96(%rsp) movq 8*9(%r10), %rax movq 8*9(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+104(%rsp) movq 8*10(%r10), %rax movq 8*10(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+112(%rsp) movq 8*11(%r10), %rax movq 8*11(%r11), %rcx cmovcq %rcx, %rax movq %rax, ACC+120(%rsp) // The counter "i" tracks the bit position for which the scalar has // already been absorbed, starting at 0 and going up in chunks of 4. // // The pointer "tab" points at the current block of the table for // multiples (2^i * j) * B at the current bit position i; 1 <= j <= 8. // // The bias is always either 0 and 1 and needs to be added to the // partially processed scalar implicitly. This is used to absorb 4 bits // of scalar per iteration from 3-bit table indexing by exploiting // negation: (16 * h + l) * B = (16 * (h + 1) - (16 - l)) * B is used // when l >= 9. Note that we can't have any bias left over at the // end because we made sure bit 251 is clear in the reduced scalar. movq $0, i leaq edwards25519_scalarmulbase_alt_gtable(%rip), %rax movq %rax, tab movq $0, bias // Start of the main loop, repeated 63 times for i = 4, 8, ..., 252 edwards25519_scalarmulbase_alt_scalarloop: // Look at the next 4-bit field "bf", adding the previous bias as well. // Choose the table index "ix" as bf when bf <= 8 and 16 - bf for bf >= 9, // setting the bias to 1 for the next iteration in the latter case. movq i, %rax movq %rax, %rcx shrq $6, %rax movq (%rsp,%rax,8), %rax // Exploiting scalar = sp exactly shrq %cl, %rax andq $15, %rax addq bias, %rax movq %rax, bf cmpq $9, bf sbbq %rax, %rax incq %rax movq %rax, bias movq $16, %rdi subq bf, %rdi cmpq $0, bias cmovzq bf, %rdi movq %rdi, ix // Perform constant-time lookup in the table to get element number "ix". // The table entry for the affine point (x,y) is actually a triple // (y - x,x + y,2 * d * x * y) to precompute parts of the addition. // Note that "ix" can be 0, so we set up the appropriate identity first. movl $1, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx movl $1, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d movq tab, %rbp cmpq $1, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $2, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $3, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $4, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $5, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $6, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $7, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp cmpq $8, ix movq (%rbp), %rsi cmovzq %rsi, %rax movq 8(%rbp), %rsi cmovzq %rsi, %rbx movq 16(%rbp), %rsi cmovzq %rsi, %rcx movq 24(%rbp), %rsi cmovzq %rsi, %rdx movq 32(%rbp), %rsi cmovzq %rsi, %r8 movq 40(%rbp), %rsi cmovzq %rsi, %r9 movq 48(%rbp), %rsi cmovzq %rsi, %r10 movq 56(%rbp), %rsi cmovzq %rsi, %r11 movq 64(%rbp), %rsi cmovzq %rsi, %r12 movq 72(%rbp), %rsi cmovzq %rsi, %r13 movq 80(%rbp), %rsi cmovzq %rsi, %r14 movq 88(%rbp), %rsi cmovzq %rsi, %r15 addq $96, %rbp movq %rbp, tab // We now have the triple from the table in registers as follows // // [%rdx;%rcx;%rbx;%rax] = y - x // [%r11;%r10;%r9;%r8] = x + y // [%r15;%r14;%r13;%r12] = 2 * d * x * y // // In case bias = 1 we need to negate this. For Edwards curves // -(x,y) = (-x,y), i.e. we need to negate the x coordinate. // In this processed encoding, that amounts to swapping the // first two fields and negating the third. // // The optional negation here also pretends bias = 0 whenever // ix = 0 so that it doesn't need to handle the case of zero // inputs, since no non-trivial table entries are zero. Note // that in the zero case the whole negation is trivial, and // so indeed is the swapping. cmpq $0, bias movq %rax, %rsi cmovnzq %r8, %rsi cmovnzq %rax, %r8 movq %rsi, TABENT(%rsp) movq %r8, TABENT+32(%rsp) movq %rbx, %rsi cmovnzq %r9, %rsi cmovnzq %rbx, %r9 movq %rsi, TABENT+8(%rsp) movq %r9, TABENT+40(%rsp) movq %rcx, %rsi cmovnzq %r10, %rsi cmovnzq %rcx, %r10 movq %rsi, TABENT+16(%rsp) movq %r10, TABENT+48(%rsp) movq %rdx, %rsi cmovnzq %r11, %rsi cmovnzq %rdx, %r11 movq %rsi, TABENT+24(%rsp) movq %r11, TABENT+56(%rsp) movq $-19, %rax movq $-1, %rbx movq $-1, %rcx movq $0x7fffffffffffffff, %rdx subq %r12, %rax sbbq %r13, %rbx sbbq %r14, %rcx sbbq %r15, %rdx movq ix, %r8 movq bias, %r9 testq %r8, %r8 cmovzq %r8, %r9 testq %r9, %r9 cmovzq %r12, %rax cmovzq %r13, %rbx cmovzq %r14, %rcx cmovzq %r15, %rdx movq %rax, TABENT+64(%rsp) movq %rbx, TABENT+72(%rsp) movq %rcx, TABENT+80(%rsp) movq %rdx, TABENT+88(%rsp) // Extended-projective and precomputed mixed addition. // This is effectively the same as calling the standalone // function edwards25519_pepadd(acc,acc,tabent), but we // only retain slightly weaker normalization < 2 * p_25519 // throughout the inner loop, so the computation is // slightly different, and faster overall. double_twice4(t0,z_1) sub_twice4(t1,y_1,x_1) add_twice4(t2,y_1,x_1) mul_4(t3,w_1,kxy_2) mul_4(t1,t1,ymx_2) mul_4(t2,t2,xpy_2) sub_twice4(t4,t0,t3) add_twice4(t0,t0,t3) sub_twice4(t5,t2,t1) add_twice4(t1,t2,t1) mul_4(z_3,t4,t0) mul_4(x_3,t5,t4) mul_4(y_3,t0,t1) mul_4(w_3,t5,t1) // End of the main loop; move on by 4 bits. addq $4, i cmpq $252, i jc edwards25519_scalarmulbase_alt_scalarloop // Insert the optional negation of the projective X coordinate, and // so by extension the final affine x coordinate x = X/Z and thus // the point P = (x,y). We only know X < 2 * p_25519, so we do the // negation as 2 * p_25519 - X to keep it nonnegative. From this // point on we don't need any normalization of the coordinates // except for making sure that they fit in 4 digits. movq X3(%rsp), %r8 movq X3+8(%rsp), %r9 movq X3+16(%rsp), %r10 movq X3+24(%rsp), %r11 movq $0xffffffffffffffda, %r12 subq %r8, %r12 movq $0xffffffffffffffff, %r13 sbbq %r9, %r13 movq $0xffffffffffffffff, %r14 sbbq %r10, %r14 movq $0xffffffffffffffff, %r15 sbbq %r11, %r15 movq SCALAR+24(%rsp), %rax btq $63, %rax cmovcq %r12, %r8 cmovcq %r13, %r9 cmovcq %r14, %r10 cmovcq %r15, %r11 movq %r8, X3(%rsp) movq %r9, X3+8(%rsp) movq %r10, X3+16(%rsp) movq %r11, X3+24(%rsp) // Now we need to map out of the extended-projective representation // (X,Y,Z,W) back to the affine form (x,y) = (X/Z,Y/Z). This means // first calling the modular inverse to get w_3 = 1/z_3. leaq W3(%rsp), %rdi leaq Z3(%rsp), %rsi // Inline copy of bignum_inv_p25519, identical except for stripping out // the prologue and epilogue saving and restoring registers and making // and reclaiming room on the stack. For more details and explanations see // "x86/curve25519/bignum_inv_p25519.S". Note that the stack it uses for // its own temporaries is 208 bytes, so it has no effect on variables // that are needed in the rest of our computation here: res, x_3, y_3, // z_3 and w_3. movq %rdi, 0xc0(%rsp) xorl %eax, %eax leaq -0x13(%rax), %rcx notq %rax movq %rcx, (%rsp) movq %rax, 0x8(%rsp) movq %rax, 0x10(%rsp) btr $0x3f, %rax movq %rax, 0x18(%rsp) movq (%rsi), %rdx movq 0x8(%rsi), %rcx movq 0x10(%rsi), %r8 movq 0x18(%rsi), %r9 movl $0x1, %eax xorl %r10d, %r10d bts $0x3f, %r9 adcq %r10, %rax imulq $0x13, %rax, %rax addq %rax, %rdx adcq %r10, %rcx adcq %r10, %r8 adcq %r10, %r9 movl $0x13, %eax cmovbq %r10, %rax subq %rax, %rdx sbbq %r10, %rcx sbbq %r10, %r8 sbbq %r10, %r9 btr $0x3f, %r9 movq %rdx, 0x20(%rsp) movq %rcx, 0x28(%rsp) movq %r8, 0x30(%rsp) movq %r9, 0x38(%rsp) xorl %eax, %eax movq %rax, 0x40(%rsp) movq %rax, 0x48(%rsp) movq %rax, 0x50(%rsp) movq %rax, 0x58(%rsp) movabsq $0xa0f99e2375022099, %rax movq %rax, 0x60(%rsp) movabsq $0xa8c68f3f1d132595, %rax movq %rax, 0x68(%rsp) movabsq $0x6c6c893805ac5242, %rax movq %rax, 0x70(%rsp) movabsq $0x276508b241770615, %rax movq %rax, 0x78(%rsp) movq $0xa, 0x90(%rsp) movq $0x1, 0x98(%rsp) jmp edwards25519_scalarmulbase_alt_midloop edwards25519_scalarmulbase_alt_inverseloop: movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 movq %r8, %rax andq %r9, %rax movq %r10, %rdi andq %r11, %rdi addq %rax, %rdi movq %rdi, 0x80(%rsp) movq %r12, %rax andq %r13, %rax movq %r14, %rsi andq %r15, %rsi addq %rax, %rsi movq %rsi, 0x88(%rsp) xorl %ebx, %ebx movq (%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq 0x20(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rdi adcq %rdx, %rbx xorl %ebp, %ebp movq (%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rsi adcq %rdx, %rbp movq 0x20(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp xorl %ecx, %ecx movq 0x8(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x28(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx shrdq $0x3b, %rbx, %rdi movq %rdi, (%rsp) xorl %edi, %edi movq 0x8(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbp adcq %rdx, %rdi movq 0x28(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rdi shrdq $0x3b, %rbp, %rsi movq %rsi, 0x20(%rsp) xorl %esi, %esi movq 0x10(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rsi movq 0x30(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rsi shrdq $0x3b, %rcx, %rbx movq %rbx, 0x8(%rsp) xorl %ebx, %ebx movq 0x10(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rdi adcq %rdx, %rbx movq 0x30(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rdi adcq %rdx, %rbx shrdq $0x3b, %rdi, %rbp movq %rbp, 0x28(%rsp) movq 0x18(%rsp), %rax xorq %r9, %rax movq %rax, %rbp sarq $0x3f, %rbp andq %r8, %rbp negq %rbp mulq %r8 addq %rax, %rsi adcq %rdx, %rbp movq 0x38(%rsp), %rax xorq %r11, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r10, %rdx subq %rdx, %rbp mulq %r10 addq %rax, %rsi adcq %rdx, %rbp shrdq $0x3b, %rsi, %rcx movq %rcx, 0x10(%rsp) shrdq $0x3b, %rbp, %rsi movq 0x18(%rsp), %rax movq %rsi, 0x18(%rsp) xorq %r13, %rax movq %rax, %rsi sarq $0x3f, %rsi andq %r12, %rsi negq %rsi mulq %r12 addq %rax, %rbx adcq %rdx, %rsi movq 0x38(%rsp), %rax xorq %r15, %rax movq %rax, %rdx sarq $0x3f, %rdx andq %r14, %rdx subq %rdx, %rsi mulq %r14 addq %rax, %rbx adcq %rdx, %rsi shrdq $0x3b, %rbx, %rdi movq %rdi, 0x30(%rsp) shrdq $0x3b, %rsi, %rbx movq %rbx, 0x38(%rsp) movq 0x80(%rsp), %rbx movq 0x88(%rsp), %rbp xorl %ecx, %ecx movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x40(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x40(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x60(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x60(%rsp) xorl %ebx, %ebx movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq 0x48(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, 0x48(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq 0x68(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, 0x68(%rsp) xorl %ecx, %ecx movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq 0x50(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, 0x50(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq 0x70(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, 0x70(%rsp) movq 0x58(%rsp), %rax xorq %r9, %rax movq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rcx adcq %rbx, %rdx movq %rdx, %rbx shldq $0x1, %rcx, %rdx sarq $0x3f, %rbx addq %rbx, %rdx movl $0x13, %eax imulq %rdx movq 0x40(%rsp), %r8 addq %rax, %r8 movq %r8, 0x40(%rsp) movq 0x48(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x48(%rsp) movq 0x50(%rsp), %r8 adcq %rbx, %r8 movq %r8, 0x50(%rsp) adcq %rbx, %rcx shlq $0x3f, %rax addq %rax, %rcx movq 0x58(%rsp), %rax movq %rcx, 0x58(%rsp) xorq %r13, %rax movq %r13, %rcx andq %r12, %rcx negq %rcx mulq %r12 addq %rax, %rsi adcq %rdx, %rcx movq 0x78(%rsp), %rax xorq %r15, %rax movq %r15, %rdx andq %r14, %rdx subq %rdx, %rcx mulq %r14 addq %rax, %rsi adcq %rcx, %rdx movq %rdx, %rcx shldq $0x1, %rsi, %rdx sarq $0x3f, %rcx movl $0x13, %eax addq %rcx, %rdx imulq %rdx movq 0x60(%rsp), %r8 addq %rax, %r8 movq %r8, 0x60(%rsp) movq 0x68(%rsp), %r8 adcq %rdx, %r8 movq %r8, 0x68(%rsp) movq 0x70(%rsp), %r8 adcq %rcx, %r8 movq %r8, 0x70(%rsp) adcq %rcx, %rsi shlq $0x3f, %rax addq %rax, %rsi movq %rsi, 0x78(%rsp) edwards25519_scalarmulbase_alt_midloop: movq 0x98(%rsp), %rsi movq (%rsp), %rdx movq 0x20(%rsp), %rcx movq %rdx, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax xorl %ebp, %ebp movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %rdx leaq (%rcx,%rax), %rdi shlq $0x16, %rdx shlq $0x16, %rdi sarq $0x2b, %rdx sarq $0x2b, %rdi movabsq $0x20000100000, %rax leaq (%rbx,%rax), %rbx leaq (%rcx,%rax), %rcx sarq $0x2a, %rbx sarq $0x2a, %rcx movq %rdx, 0xa0(%rsp) movq %rbx, 0xa8(%rsp) movq %rdi, 0xb0(%rsp) movq %rcx, 0xb8(%rsp) movq (%rsp), %r12 imulq %r12, %rdi imulq %rdx, %r12 movq 0x20(%rsp), %r13 imulq %r13, %rbx imulq %rcx, %r13 addq %rbx, %r12 addq %rdi, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r10 shlq $0x16, %r8 shlq $0x16, %r10 sarq $0x2b, %r8 sarq $0x2b, %r10 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r15 leaq (%rcx,%rax), %r11 sarq $0x2a, %r15 sarq $0x2a, %r11 movq %r13, %rbx movq %r12, %rcx imulq %r8, %r12 imulq %r15, %rbx addq %rbx, %r12 imulq %r11, %r13 imulq %r10, %rcx addq %rcx, %r13 sarq $0x14, %r12 sarq $0x14, %r13 movq %r12, %rbx andq $0xfffff, %rbx movabsq $0xfffffe0000000000, %rax orq %rax, %rbx movq %r13, %rcx andq $0xfffff, %rcx movabsq $0xc000000000000000, %rax orq %rax, %rcx movq 0xa0(%rsp), %rax imulq %r8, %rax movq 0xb0(%rsp), %rdx imulq %r15, %rdx imulq 0xa8(%rsp), %r8 imulq 0xb8(%rsp), %r15 addq %r8, %r15 leaq (%rax,%rdx), %r9 movq 0xa0(%rsp), %rax imulq %r10, %rax movq 0xb0(%rsp), %rdx imulq %r11, %rdx imulq 0xa8(%rsp), %r10 imulq 0xb8(%rsp), %r11 addq %r10, %r11 leaq (%rax,%rdx), %r13 movq $0xfffffffffffffffe, %rax movl $0x2, %edx movq %rbx, %rdi movq %rax, %r8 testq %rsi, %rsi cmovs %rbp, %r8 testq $0x1, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx cmovs %rbp, %r8 movq %rbx, %rdi testq %rdx, %rcx cmoveq %rbp, %r8 cmoveq %rbp, %rdi sarq $1, %rcx xorq %r8, %rdi xorq %r8, %rsi btq $0x3f, %r8 cmovbq %rcx, %rbx movq %rax, %r8 subq %rax, %rsi leaq (%rcx,%rdi), %rcx sarq $1, %rcx movl $0x100000, %eax leaq (%rbx,%rax), %r8 leaq (%rcx,%rax), %r12 shlq $0x15, %r8 shlq $0x15, %r12 sarq $0x2b, %r8 sarq $0x2b, %r12 movabsq $0x20000100000, %rax leaq (%rbx,%rax), %r10 leaq (%rcx,%rax), %r14 sarq $0x2b, %r10 sarq $0x2b, %r14 movq %r9, %rax imulq %r8, %rax movq %r13, %rdx imulq %r10, %rdx imulq %r15, %r8 imulq %r11, %r10 addq %r8, %r10 leaq (%rax,%rdx), %r8 movq %r9, %rax imulq %r12, %rax movq %r13, %rdx imulq %r14, %rdx imulq %r15, %r12 imulq %r11, %r14 addq %r12, %r14 leaq (%rax,%rdx), %r12 movq %rsi, 0x98(%rsp) decq 0x90(%rsp) jne edwards25519_scalarmulbase_alt_inverseloop movq (%rsp), %rax movq 0x20(%rsp), %rcx imulq %r8, %rax imulq %r10, %rcx addq %rcx, %rax sarq $0x3f, %rax movq %r8, %r9 sarq $0x3f, %r9 xorq %r9, %r8 subq %r9, %r8 xorq %rax, %r9 movq %r10, %r11 sarq $0x3f, %r11 xorq %r11, %r10 subq %r11, %r10 xorq %rax, %r11 movq %r12, %r13 sarq $0x3f, %r13 xorq %r13, %r12 subq %r13, %r12 xorq %rax, %r13 movq %r14, %r15 sarq $0x3f, %r15 xorq %r15, %r14 subq %r15, %r14 xorq %rax, %r15 movq %r8, %rax andq %r9, %rax movq %r10, %r12 andq %r11, %r12 addq %rax, %r12 xorl %r13d, %r13d movq 0x40(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r12 adcq %rdx, %r13 movq 0x60(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r12 adcq %rdx, %r13 xorl %r14d, %r14d movq 0x48(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r13 adcq %rdx, %r14 movq 0x68(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq 0x50(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq 0x70(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 adcq %rdx, %r15 movq 0x58(%rsp), %rax xorq %r9, %rax andq %r8, %r9 negq %r9 mulq %r8 addq %rax, %r15 adcq %rdx, %r9 movq 0x78(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %r9 mulq %r10 addq %rax, %r15 adcq %rdx, %r9 movq %r9, %rax shldq $0x1, %r15, %rax sarq $0x3f, %r9 movl $0x13, %ebx leaq 0x1(%rax,%r9,1), %rax imulq %rbx xorl %ebp, %ebp addq %rax, %r12 adcq %rdx, %r13 adcq %r9, %r14 adcq %r9, %r15 shlq $0x3f, %rax addq %rax, %r15 cmovns %rbp, %rbx subq %rbx, %r12 sbbq %rbp, %r13 sbbq %rbp, %r14 sbbq %rbp, %r15 btr $0x3f, %r15 movq 0xc0(%rsp), %rdi movq %r12, (%rdi) movq %r13, 0x8(%rdi) movq %r14, 0x10(%rdi) movq %r15, 0x18(%rdi) // The final result is x = X * inv(Z), y = Y * inv(Z). // These are the only operations in the whole computation that // fully reduce modulo p_25519 since now we want the canonical // answer as output. movq res, %rbp mul_p25519(resx,x_3,w_3) mul_p25519(resy,y_3,w_3) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret // **************************************************************************** // The precomputed data (all read-only). This is currently part of the same // text section, which gives position-independent code with simple PC-relative // addressing. However it could be put in a separate section via something like // // .section .rodata // **************************************************************************** // 0 * B = 0 and 2^251 * B in extended-projective coordinates // but with Z = 1 assumed and hence left out, so they are (X,Y,T) only. edwards25519_scalarmulbase_alt_0g: .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000001 .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000000 .quad 0x0000000000000000 edwards25519_scalarmulbase_alt_251g: .quad 0x525f946d7c7220e7 .quad 0x4636b0b2f1e35444 .quad 0x796e9d70e892ae0f .quad 0x03dec05fa937adb1 .quad 0x6d1c271cc6375515 .quad 0x462588c4a4ca4f14 .quad 0x691129fee55afc39 .quad 0x15949f784d8472f5 .quad 0xbd89e510afad0049 .quad 0x4d1f08c073b9860e .quad 0x07716e8b2d00af9d .quad 0x70d685f68f859714 // Precomputed table of multiples of generator for edwards25519 // all in precomputed extended-projective (y-x,x+y,2*d*x*y) triples. edwards25519_scalarmulbase_alt_gtable: // 2^0 * 1 * G .quad 0x9d103905d740913e .quad 0xfd399f05d140beb3 .quad 0xa5c18434688f8a09 .quad 0x44fd2f9298f81267 .quad 0x2fbc93c6f58c3b85 .quad 0xcf932dc6fb8c0e19 .quad 0x270b4898643d42c2 .quad 0x07cf9d3a33d4ba65 .quad 0xabc91205877aaa68 .quad 0x26d9e823ccaac49e .quad 0x5a1b7dcbdd43598c .quad 0x6f117b689f0c65a8 // 2^0 * 2 * G .quad 0x8a99a56042b4d5a8 .quad 0x8f2b810c4e60acf6 .quad 0xe09e236bb16e37aa .quad 0x6bb595a669c92555 .quad 0x9224e7fc933c71d7 .quad 0x9f469d967a0ff5b5 .quad 0x5aa69a65e1d60702 .quad 0x590c063fa87d2e2e .quad 0x43faa8b3a59b7a5f .quad 0x36c16bdd5d9acf78 .quad 0x500fa0840b3d6a31 .quad 0x701af5b13ea50b73 // 2^0 * 3 * G .quad 0x56611fe8a4fcd265 .quad 0x3bd353fde5c1ba7d .quad 0x8131f31a214bd6bd .quad 0x2ab91587555bda62 .quad 0xaf25b0a84cee9730 .quad 0x025a8430e8864b8a .quad 0xc11b50029f016732 .quad 0x7a164e1b9a80f8f4 .quad 0x14ae933f0dd0d889 .quad 0x589423221c35da62 .quad 0xd170e5458cf2db4c .quad 0x5a2826af12b9b4c6 // 2^0 * 4 * G .quad 0x95fe050a056818bf .quad 0x327e89715660faa9 .quad 0xc3e8e3cd06a05073 .quad 0x27933f4c7445a49a .quad 0x287351b98efc099f .quad 0x6765c6f47dfd2538 .quad 0xca348d3dfb0a9265 .quad 0x680e910321e58727 .quad 0x5a13fbe9c476ff09 .quad 0x6e9e39457b5cc172 .quad 0x5ddbdcf9102b4494 .quad 0x7f9d0cbf63553e2b // 2^0 * 5 * G .quad 0x7f9182c3a447d6ba .quad 0xd50014d14b2729b7 .quad 0xe33cf11cb864a087 .quad 0x154a7e73eb1b55f3 .quad 0xa212bc4408a5bb33 .quad 0x8d5048c3c75eed02 .quad 0xdd1beb0c5abfec44 .quad 0x2945ccf146e206eb .quad 0xbcbbdbf1812a8285 .quad 0x270e0807d0bdd1fc .quad 0xb41b670b1bbda72d .quad 0x43aabe696b3bb69a // 2^0 * 6 * G .quad 0x499806b67b7d8ca4 .quad 0x575be28427d22739 .quad 0xbb085ce7204553b9 .quad 0x38b64c41ae417884 .quad 0x3a0ceeeb77157131 .quad 0x9b27158900c8af88 .quad 0x8065b668da59a736 .quad 0x51e57bb6a2cc38bd .quad 0x85ac326702ea4b71 .quad 0xbe70e00341a1bb01 .quad 0x53e4a24b083bc144 .quad 0x10b8e91a9f0d61e3 // 2^0 * 7 * G .quad 0xba6f2c9aaa3221b1 .quad 0x6ca021533bba23a7 .quad 0x9dea764f92192c3a .quad 0x1d6edd5d2e5317e0 .quad 0x6b1a5cd0944ea3bf .quad 0x7470353ab39dc0d2 .quad 0x71b2528228542e49 .quad 0x461bea69283c927e .quad 0xf1836dc801b8b3a2 .quad 0xb3035f47053ea49a .quad 0x529c41ba5877adf3 .quad 0x7a9fbb1c6a0f90a7 // 2^0 * 8 * G .quad 0xe2a75dedf39234d9 .quad 0x963d7680e1b558f9 .quad 0x2c2741ac6e3c23fb .quad 0x3a9024a1320e01c3 .quad 0x59b7596604dd3e8f .quad 0x6cb30377e288702c .quad 0xb1339c665ed9c323 .quad 0x0915e76061bce52f .quad 0xe7c1f5d9c9a2911a .quad 0xb8a371788bcca7d7 .quad 0x636412190eb62a32 .quad 0x26907c5c2ecc4e95 // 2^4 * 1 * G .quad 0x7ec851ca553e2df3 .quad 0xa71284cba64878b3 .quad 0xe6b5e4193288d1e7 .quad 0x4cf210ec5a9a8883 .quad 0x322d04a52d9021f6 .quad 0xb9c19f3375c6bf9c .quad 0x587a3a4342d20b09 .quad 0x143b1cf8aa64fe61 .quad 0x9f867c7d968acaab .quad 0x5f54258e27092729 .quad 0xd0a7d34bea180975 .quad 0x21b546a3374126e1 // 2^4 * 2 * G .quad 0xa94ff858a2888343 .quad 0xce0ed4565313ed3c .quad 0xf55c3dcfb5bf34fa .quad 0x0a653ca5c9eab371 .quad 0x490a7a45d185218f .quad 0x9a15377846049335 .quad 0x0060ea09cc31e1f6 .quad 0x7e041577f86ee965 .quad 0x66b2a496ce5b67f3 .quad 0xff5492d8bd569796 .quad 0x503cec294a592cd0 .quad 0x566943650813acb2 // 2^4 * 3 * G .quad 0xb818db0c26620798 .quad 0x5d5c31d9606e354a .quad 0x0982fa4f00a8cdc7 .quad 0x17e12bcd4653e2d4 .quad 0x5672f9eb1dabb69d .quad 0xba70b535afe853fc .quad 0x47ac0f752796d66d .quad 0x32a5351794117275 .quad 0xd3a644a6df648437 .quad 0x703b6559880fbfdd .quad 0xcb852540ad3a1aa5 .quad 0x0900b3f78e4c6468 // 2^4 * 4 * G .quad 0x0a851b9f679d651b .quad 0xe108cb61033342f2 .quad 0xd601f57fe88b30a3 .quad 0x371f3acaed2dd714 .quad 0xed280fbec816ad31 .quad 0x52d9595bd8e6efe3 .quad 0x0fe71772f6c623f5 .quad 0x4314030b051e293c .quad 0xd560005efbf0bcad .quad 0x8eb70f2ed1870c5e .quad 0x201f9033d084e6a0 .quad 0x4c3a5ae1ce7b6670 // 2^4 * 5 * G .quad 0x4138a434dcb8fa95 .quad 0x870cf67d6c96840b .quad 0xde388574297be82c .quad 0x7c814db27262a55a .quad 0xbaf875e4c93da0dd .quad 0xb93282a771b9294d .quad 0x80d63fb7f4c6c460 .quad 0x6de9c73dea66c181 .quad 0x478904d5a04df8f2 .quad 0xfafbae4ab10142d3 .quad 0xf6c8ac63555d0998 .quad 0x5aac4a412f90b104 // 2^4 * 6 * G .quad 0xc64f326b3ac92908 .quad 0x5551b282e663e1e0 .quad 0x476b35f54a1a4b83 .quad 0x1b9da3fe189f68c2 .quad 0x603a0d0abd7f5134 .quad 0x8089c932e1d3ae46 .quad 0xdf2591398798bd63 .quad 0x1c145cd274ba0235 .quad 0x32e8386475f3d743 .quad 0x365b8baf6ae5d9ef .quad 0x825238b6385b681e .quad 0x234929c1167d65e1 // 2^4 * 7 * G .quad 0x984decaba077ade8 .quad 0x383f77ad19eb389d .quad 0xc7ec6b7e2954d794 .quad 0x59c77b3aeb7c3a7a .quad 0x48145cc21d099fcf .quad 0x4535c192cc28d7e5 .quad 0x80e7c1e548247e01 .quad 0x4a5f28743b2973ee .quad 0xd3add725225ccf62 .quad 0x911a3381b2152c5d .quad 0xd8b39fad5b08f87d .quad 0x6f05606b4799fe3b // 2^4 * 8 * G .quad 0x9ffe9e92177ba962 .quad 0x98aee71d0de5cae1 .quad 0x3ff4ae942d831044 .quad 0x714de12e58533ac8 .quad 0x5b433149f91b6483 .quad 0xadb5dc655a2cbf62 .quad 0x87fa8412632827b3 .quad 0x60895e91ab49f8d8 .quad 0xe9ecf2ed0cf86c18 .quad 0xb46d06120735dfd4 .quad 0xbc9da09804b96be7 .quad 0x73e2e62fd96dc26b // 2^8 * 1 * G .quad 0xed5b635449aa515e .quad 0xa865c49f0bc6823a .quad 0x850c1fe95b42d1c4 .quad 0x30d76d6f03d315b9 .quad 0x2eccdd0e632f9c1d .quad 0x51d0b69676893115 .quad 0x52dfb76ba8637a58 .quad 0x6dd37d49a00eef39 .quad 0x6c4444172106e4c7 .quad 0xfb53d680928d7f69 .quad 0xb4739ea4694d3f26 .quad 0x10c697112e864bb0 // 2^8 * 2 * G .quad 0x6493c4277dbe5fde .quad 0x265d4fad19ad7ea2 .quad 0x0e00dfc846304590 .quad 0x25e61cabed66fe09 .quad 0x0ca62aa08358c805 .quad 0x6a3d4ae37a204247 .quad 0x7464d3a63b11eddc .quad 0x03bf9baf550806ef .quad 0x3f13e128cc586604 .quad 0x6f5873ecb459747e .quad 0xa0b63dedcc1268f5 .quad 0x566d78634586e22c // 2^8 * 3 * G .quad 0x1637a49f9cc10834 .quad 0xbc8e56d5a89bc451 .quad 0x1cb5ec0f7f7fd2db .quad 0x33975bca5ecc35d9 .quad 0xa1054285c65a2fd0 .quad 0x6c64112af31667c3 .quad 0x680ae240731aee58 .quad 0x14fba5f34793b22a .quad 0x3cd746166985f7d4 .quad 0x593e5e84c9c80057 .quad 0x2fc3f2b67b61131e .quad 0x14829cea83fc526c // 2^8 * 4 * G .quad 0xff437b8497dd95c2 .quad 0x6c744e30aa4eb5a7 .quad 0x9e0c5d613c85e88b .quad 0x2fd9c71e5f758173 .quad 0x21e70b2f4e71ecb8 .quad 0xe656ddb940a477e3 .quad 0xbf6556cece1d4f80 .quad 0x05fc3bc4535d7b7e .quad 0x24b8b3ae52afdedd .quad 0x3495638ced3b30cf .quad 0x33a4bc83a9be8195 .quad 0x373767475c651f04 // 2^8 * 5 * G .quad 0x2fba99fd40d1add9 .quad 0xb307166f96f4d027 .quad 0x4363f05215f03bae .quad 0x1fbea56c3b18f999 .quad 0x634095cb14246590 .quad 0xef12144016c15535 .quad 0x9e38140c8910bc60 .quad 0x6bf5905730907c8c .quad 0x0fa778f1e1415b8a .quad 0x06409ff7bac3a77e .quad 0x6f52d7b89aa29a50 .quad 0x02521cf67a635a56 // 2^8 * 6 * G .quad 0x513fee0b0a9d5294 .quad 0x8f98e75c0fdf5a66 .quad 0xd4618688bfe107ce .quad 0x3fa00a7e71382ced .quad 0xb1146720772f5ee4 .quad 0xe8f894b196079ace .quad 0x4af8224d00ac824a .quad 0x001753d9f7cd6cc4 .quad 0x3c69232d963ddb34 .quad 0x1dde87dab4973858 .quad 0xaad7d1f9a091f285 .quad 0x12b5fe2fa048edb6 // 2^8 * 7 * G .quad 0x71f0fbc496fce34d .quad 0x73b9826badf35bed .quad 0xd2047261ff28c561 .quad 0x749b76f96fb1206f .quad 0xdf2b7c26ad6f1e92 .quad 0x4b66d323504b8913 .quad 0x8c409dc0751c8bc3 .quad 0x6f7e93c20796c7b8 .quad 0x1f5af604aea6ae05 .quad 0xc12351f1bee49c99 .quad 0x61a808b5eeff6b66 .quad 0x0fcec10f01e02151 // 2^8 * 8 * G .quad 0x644d58a649fe1e44 .quad 0x21fcaea231ad777e .quad 0x02441c5a887fd0d2 .quad 0x4901aa7183c511f3 .quad 0x3df2d29dc4244e45 .quad 0x2b020e7493d8de0a .quad 0x6cc8067e820c214d .quad 0x413779166feab90a .quad 0x08b1b7548c1af8f0 .quad 0xce0f7a7c246299b4 .quad 0xf760b0f91e06d939 .quad 0x41bb887b726d1213 // 2^12 * 1 * G .quad 0x9267806c567c49d8 .quad 0x066d04ccca791e6a .quad 0xa69f5645e3cc394b .quad 0x5c95b686a0788cd2 .quad 0x97d980e0aa39f7d2 .quad 0x35d0384252c6b51c .quad 0x7d43f49307cd55aa .quad 0x56bd36cfb78ac362 .quad 0x2ac519c10d14a954 .quad 0xeaf474b494b5fa90 .quad 0xe6af8382a9f87a5a .quad 0x0dea6db1879be094 // 2^12 * 2 * G .quad 0xaa66bf547344e5ab .quad 0xda1258888f1b4309 .quad 0x5e87d2b3fd564b2f .quad 0x5b2c78885483b1dd .quad 0x15baeb74d6a8797a .quad 0x7ef55cf1fac41732 .quad 0x29001f5a3c8b05c5 .quad 0x0ad7cc8752eaccfb .quad 0x52151362793408cf .quad 0xeb0f170319963d94 .quad 0xa833b2fa883d9466 .quad 0x093a7fa775003c78 // 2^12 * 3 * G .quad 0xe5107de63a16d7be .quad 0xa377ffdc9af332cf .quad 0x70d5bf18440b677f .quad 0x6a252b19a4a31403 .quad 0xb8e9604460a91286 .quad 0x7f3fd8047778d3de .quad 0x67d01e31bf8a5e2d .quad 0x7b038a06c27b653e .quad 0x9ed919d5d36990f3 .quad 0x5213aebbdb4eb9f2 .quad 0xc708ea054cb99135 .quad 0x58ded57f72260e56 // 2^12 * 4 * G .quad 0x78e79dade9413d77 .quad 0xf257f9d59729e67d .quad 0x59db910ee37aa7e6 .quad 0x6aa11b5bbb9e039c .quad 0xda6d53265b0fd48b .quad 0x8960823193bfa988 .quad 0xd78ac93261d57e28 .quad 0x79f2942d3a5c8143 .quad 0x97da2f25b6c88de9 .quad 0x251ba7eaacf20169 .quad 0x09b44f87ef4eb4e4 .quad 0x7d90ab1bbc6a7da5 // 2^12 * 5 * G .quad 0x9acca683a7016bfe .quad 0x90505f4df2c50b6d .quad 0x6b610d5fcce435aa .quad 0x19a10d446198ff96 .quad 0x1a07a3f496b3c397 .quad 0x11ceaa188f4e2532 .quad 0x7d9498d5a7751bf0 .quad 0x19ed161f508dd8a0 .quad 0x560a2cd687dce6ca .quad 0x7f3568c48664cf4d .quad 0x8741e95222803a38 .quad 0x483bdab1595653fc // 2^12 * 6 * G .quad 0xfa780f148734fa49 .quad 0x106f0b70360534e0 .quad 0x2210776fe3e307bd .quad 0x3286c109dde6a0fe .quad 0xd6cf4d0ab4da80f6 .quad 0x82483e45f8307fe0 .quad 0x05005269ae6f9da4 .quad 0x1c7052909cf7877a .quad 0x32ee7de2874e98d4 .quad 0x14c362e9b97e0c60 .quad 0x5781dcde6a60a38a .quad 0x217dd5eaaa7aa840 // 2^12 * 7 * G .quad 0x9db7c4d0248e1eb0 .quad 0xe07697e14d74bf52 .quad 0x1e6a9b173c562354 .quad 0x7fa7c21f795a4965 .quad 0x8bdf1fb9be8c0ec8 .quad 0x00bae7f8e30a0282 .quad 0x4963991dad6c4f6c .quad 0x07058a6e5df6f60a .quad 0xe9eb02c4db31f67f .quad 0xed25fd8910bcfb2b .quad 0x46c8131f5c5cddb4 .quad 0x33b21c13a0cb9bce // 2^12 * 8 * G .quad 0x360692f8087d8e31 .quad 0xf4dcc637d27163f7 .quad 0x25a4e62065ea5963 .quad 0x659bf72e5ac160d9 .quad 0x9aafb9b05ee38c5b .quad 0xbf9d2d4e071a13c7 .quad 0x8eee6e6de933290a .quad 0x1c3bab17ae109717 .quad 0x1c9ab216c7cab7b0 .quad 0x7d65d37407bbc3cc .quad 0x52744750504a58d5 .quad 0x09f2606b131a2990 // 2^16 * 1 * G .quad 0x40e87d44744346be .quad 0x1d48dad415b52b25 .quad 0x7c3a8a18a13b603e .quad 0x4eb728c12fcdbdf7 .quad 0x7e234c597c6691ae .quad 0x64889d3d0a85b4c8 .quad 0xdae2c90c354afae7 .quad 0x0a871e070c6a9e1d .quad 0x3301b5994bbc8989 .quad 0x736bae3a5bdd4260 .quad 0x0d61ade219d59e3c .quad 0x3ee7300f2685d464 // 2^16 * 2 * G .quad 0xf5d255e49e7dd6b7 .quad 0x8016115c610b1eac .quad 0x3c99975d92e187ca .quad 0x13815762979125c2 .quad 0x43fa7947841e7518 .quad 0xe5c6fa59639c46d7 .quad 0xa1065e1de3052b74 .quad 0x7d47c6a2cfb89030 .quad 0x3fdad0148ef0d6e0 .quad 0x9d3e749a91546f3c .quad 0x71ec621026bb8157 .quad 0x148cf58d34c9ec80 // 2^16 * 3 * G .quad 0x46a492f67934f027 .quad 0x469984bef6840aa9 .quad 0x5ca1bc2a89611854 .quad 0x3ff2fa1ebd5dbbd4 .quad 0xe2572f7d9ae4756d .quad 0x56c345bb88f3487f .quad 0x9fd10b6d6960a88d .quad 0x278febad4eaea1b9 .quad 0xb1aa681f8c933966 .quad 0x8c21949c20290c98 .quad 0x39115291219d3c52 .quad 0x4104dd02fe9c677b // 2^16 * 4 * G .quad 0x72b2bf5e1124422a .quad 0xa1fa0c3398a33ab5 .quad 0x94cb6101fa52b666 .quad 0x2c863b00afaf53d5 .quad 0x81214e06db096ab8 .quad 0x21a8b6c90ce44f35 .quad 0x6524c12a409e2af5 .quad 0x0165b5a48efca481 .quad 0xf190a474a0846a76 .quad 0x12eff984cd2f7cc0 .quad 0x695e290658aa2b8f .quad 0x591b67d9bffec8b8 // 2^16 * 5 * G .quad 0x312f0d1c80b49bfa .quad 0x5979515eabf3ec8a .quad 0x727033c09ef01c88 .quad 0x3de02ec7ca8f7bcb .quad 0x99b9b3719f18b55d .quad 0xe465e5faa18c641e .quad 0x61081136c29f05ed .quad 0x489b4f867030128b .quad 0xd232102d3aeb92ef .quad 0xe16253b46116a861 .quad 0x3d7eabe7190baa24 .quad 0x49f5fbba496cbebf // 2^16 * 6 * G .quad 0x30949a108a5bcfd4 .quad 0xdc40dd70bc6473eb .quad 0x92c294c1307c0d1c .quad 0x5604a86dcbfa6e74 .quad 0x155d628c1e9c572e .quad 0x8a4d86acc5884741 .quad 0x91a352f6515763eb .quad 0x06a1a6c28867515b .quad 0x7288d1d47c1764b6 .quad 0x72541140e0418b51 .quad 0x9f031a6018acf6d1 .quad 0x20989e89fe2742c6 // 2^16 * 7 * G .quad 0x499777fd3a2dcc7f .quad 0x32857c2ca54fd892 .quad 0xa279d864d207e3a0 .quad 0x0403ed1d0ca67e29 .quad 0x1674278b85eaec2e .quad 0x5621dc077acb2bdf .quad 0x640a4c1661cbf45a .quad 0x730b9950f70595d3 .quad 0xc94b2d35874ec552 .quad 0xc5e6c8cf98246f8d .quad 0xf7cb46fa16c035ce .quad 0x5bd7454308303dcc // 2^16 * 8 * G .quad 0x7f9ad19528b24cc2 .quad 0x7f6b54656335c181 .quad 0x66b8b66e4fc07236 .quad 0x133a78007380ad83 .quad 0x85c4932115e7792a .quad 0xc64c89a2bdcdddc9 .quad 0x9d1e3da8ada3d762 .quad 0x5bb7db123067f82c .quad 0x0961f467c6ca62be .quad 0x04ec21d6211952ee .quad 0x182360779bd54770 .quad 0x740dca6d58f0e0d2 // 2^20 * 1 * G .quad 0x50b70bf5d3f0af0b .quad 0x4feaf48ae32e71f7 .quad 0x60e84ed3a55bbd34 .quad 0x00ed489b3f50d1ed .quad 0x3906c72aed261ae5 .quad 0x9ab68fd988e100f7 .quad 0xf5e9059af3360197 .quad 0x0e53dc78bf2b6d47 .quad 0xb90829bf7971877a .quad 0x5e4444636d17e631 .quad 0x4d05c52e18276893 .quad 0x27632d9a5a4a4af5 // 2^20 * 2 * G .quad 0xd11ff05154b260ce .quad 0xd86dc38e72f95270 .quad 0x601fcd0d267cc138 .quad 0x2b67916429e90ccd .quad 0xa98285d187eaffdb .quad 0xa5b4fbbbd8d0a864 .quad 0xb658f27f022663f7 .quad 0x3bbc2b22d99ce282 .quad 0xb917c952583c0a58 .quad 0x653ff9b80fe4c6f3 .quad 0x9b0da7d7bcdf3c0c .quad 0x43a0eeb6ab54d60e // 2^20 * 3 * G .quad 0x396966a46d4a5487 .quad 0xf811a18aac2bb3ba .quad 0x66e4685b5628b26b .quad 0x70a477029d929b92 .quad 0x3ac6322357875fe8 .quad 0xd9d4f4ecf5fbcb8f .quad 0x8dee8493382bb620 .quad 0x50c5eaa14c799fdc .quad 0xdd0edc8bd6f2fb3c .quad 0x54c63aa79cc7b7a0 .quad 0xae0b032b2c8d9f1a .quad 0x6f9ce107602967fb // 2^20 * 4 * G .quad 0xad1054b1cde1c22a .quad 0xc4a8e90248eb32df .quad 0x5f3e7b33accdc0ea .quad 0x72364713fc79963e .quad 0x139693063520e0b5 .quad 0x437fcf7c88ea03fe .quad 0xf7d4c40bd3c959bc .quad 0x699154d1f893ded9 .quad 0x315d5c75b4b27526 .quad 0xcccb842d0236daa5 .quad 0x22f0c8a3345fee8e .quad 0x73975a617d39dbed // 2^20 * 5 * G .quad 0xe4024df96375da10 .quad 0x78d3251a1830c870 .quad 0x902b1948658cd91c .quad 0x7e18b10b29b7438a .quad 0x6f37f392f4433e46 .quad 0x0e19b9a11f566b18 .quad 0x220fb78a1fd1d662 .quad 0x362a4258a381c94d .quad 0x9071d9132b6beb2f .quad 0x0f26e9ad28418247 .quad 0xeab91ec9bdec925d .quad 0x4be65bc8f48af2de // 2^20 * 6 * G .quad 0x78487feba36e7028 .quad 0x5f3f13001dd8ce34 .quad 0x934fb12d4b30c489 .quad 0x056c244d397f0a2b .quad 0x1d50fba257c26234 .quad 0x7bd4823adeb0678b .quad 0xc2b0dc6ea6538af5 .quad 0x5665eec6351da73e .quad 0xdb3ee00943bfb210 .quad 0x4972018720800ac2 .quad 0x26ab5d6173bd8667 .quad 0x20b209c2ab204938 // 2^20 * 7 * G .quad 0x549e342ac07fb34b .quad 0x02d8220821373d93 .quad 0xbc262d70acd1f567 .quad 0x7a92c9fdfbcac784 .quad 0x1fcca94516bd3289 .quad 0x448d65aa41420428 .quad 0x59c3b7b216a55d62 .quad 0x49992cc64e612cd8 .quad 0x65bd1bea70f801de .quad 0x1befb7c0fe49e28a .quad 0xa86306cdb1b2ae4a .quad 0x3b7ac0cd265c2a09 // 2^20 * 8 * G .quad 0x822bee438c01bcec .quad 0x530cb525c0fbc73b .quad 0x48519034c1953fe9 .quad 0x265cc261e09a0f5b .quad 0xf0d54e4f22ed39a7 .quad 0xa2aae91e5608150a .quad 0xf421b2e9eddae875 .quad 0x31bc531d6b7de992 .quad 0xdf3d134da980f971 .quad 0x7a4fb8d1221a22a7 .quad 0x3df7d42035aad6d8 .quad 0x2a14edcc6a1a125e // 2^24 * 1 * G .quad 0xdf48ee0752cfce4e .quad 0xc3fffaf306ec08b7 .quad 0x05710b2ab95459c4 .quad 0x161d25fa963ea38d .quad 0x231a8c570478433c .quad 0xb7b5270ec281439d .quad 0xdbaa99eae3d9079f .quad 0x2c03f5256c2b03d9 .quad 0x790f18757b53a47d .quad 0x307b0130cf0c5879 .quad 0x31903d77257ef7f9 .quad 0x699468bdbd96bbaf // 2^24 * 2 * G .quad 0xbd1f2f46f4dafecf .quad 0x7cef0114a47fd6f7 .quad 0xd31ffdda4a47b37f .quad 0x525219a473905785 .quad 0xd8dd3de66aa91948 .quad 0x485064c22fc0d2cc .quad 0x9b48246634fdea2f .quad 0x293e1c4e6c4a2e3a .quad 0x376e134b925112e1 .quad 0x703778b5dca15da0 .quad 0xb04589af461c3111 .quad 0x5b605c447f032823 // 2^24 * 3 * G .quad 0xb965805920c47c89 .quad 0xe7f0100c923b8fcc .quad 0x0001256502e2ef77 .quad 0x24a76dcea8aeb3ee .quad 0x3be9fec6f0e7f04c .quad 0x866a579e75e34962 .quad 0x5542ef161e1de61a .quad 0x2f12fef4cc5abdd5 .quad 0x0a4522b2dfc0c740 .quad 0x10d06e7f40c9a407 .quad 0xc6cf144178cff668 .quad 0x5e607b2518a43790 // 2^24 * 4 * G .quad 0x58b31d8f6cdf1818 .quad 0x35cfa74fc36258a2 .quad 0xe1b3ff4f66e61d6e .quad 0x5067acab6ccdd5f7 .quad 0xa02c431ca596cf14 .quad 0xe3c42d40aed3e400 .quad 0xd24526802e0f26db .quad 0x201f33139e457068 .quad 0xfd527f6b08039d51 .quad 0x18b14964017c0006 .quad 0xd5220eb02e25a4a8 .quad 0x397cba8862460375 // 2^24 * 5 * G .quad 0x30c13093f05959b2 .quad 0xe23aa18de9a97976 .quad 0x222fd491721d5e26 .quad 0x2339d320766e6c3a .quad 0x7815c3fbc81379e7 .quad 0xa6619420dde12af1 .quad 0xffa9c0f885a8fdd5 .quad 0x771b4022c1e1c252 .quad 0xd87dd986513a2fa7 .quad 0xf5ac9b71f9d4cf08 .quad 0xd06bc31b1ea283b3 .quad 0x331a189219971a76 // 2^24 * 6 * G .quad 0xf5166f45fb4f80c6 .quad 0x9c36c7de61c775cf .quad 0xe3d4e81b9041d91c .quad 0x31167c6b83bdfe21 .quad 0x26512f3a9d7572af .quad 0x5bcbe28868074a9e .quad 0x84edc1c11180f7c4 .quad 0x1ac9619ff649a67b .quad 0xf22b3842524b1068 .quad 0x5068343bee9ce987 .quad 0xfc9d71844a6250c8 .quad 0x612436341f08b111 // 2^24 * 7 * G .quad 0xd99d41db874e898d .quad 0x09fea5f16c07dc20 .quad 0x793d2c67d00f9bbc .quad 0x46ebe2309e5eff40 .quad 0x8b6349e31a2d2638 .quad 0x9ddfb7009bd3fd35 .quad 0x7f8bf1b8a3a06ba4 .quad 0x1522aa3178d90445 .quad 0x2c382f5369614938 .quad 0xdafe409ab72d6d10 .quad 0xe8c83391b646f227 .quad 0x45fe70f50524306c // 2^24 * 8 * G .quad 0xda4875a6960c0b8c .quad 0x5b68d076ef0e2f20 .quad 0x07fb51cf3d0b8fd4 .quad 0x428d1623a0e392d4 .quad 0x62f24920c8951491 .quad 0x05f007c83f630ca2 .quad 0x6fbb45d2f5c9d4b8 .quad 0x16619f6db57a2245 .quad 0x084f4a4401a308fd .quad 0xa82219c376a5caac .quad 0xdeb8de4643d1bc7d .quad 0x1d81592d60bd38c6 // 2^28 * 1 * G .quad 0xd833d7beec2a4c38 .quad 0x2c9162830acc20ed .quad 0xe93a47aa92df7581 .quad 0x702d67a3333c4a81 .quad 0x3a4a369a2f89c8a1 .quad 0x63137a1d7c8de80d .quad 0xbcac008a78eda015 .quad 0x2cb8b3a5b483b03f .quad 0x36e417cbcb1b90a1 .quad 0x33b3ddaa7f11794e .quad 0x3f510808885bc607 .quad 0x24141dc0e6a8020d // 2^28 * 2 * G .quad 0x59f73c773fefee9d .quad 0xb3f1ef89c1cf989d .quad 0xe35dfb42e02e545f .quad 0x5766120b47a1b47c .quad 0x91925dccbd83157d .quad 0x3ca1205322cc8094 .quad 0x28e57f183f90d6e4 .quad 0x1a4714cede2e767b .quad 0xdb20ba0fb8b6b7ff .quad 0xb732c3b677511fa1 .quad 0xa92b51c099f02d89 .quad 0x4f3875ad489ca5f1 // 2^28 * 3 * G .quad 0xc7fc762f4932ab22 .quad 0x7ac0edf72f4c3c1b .quad 0x5f6b55aa9aa895e8 .quad 0x3680274dad0a0081 .quad 0x79ed13f6ee73eec0 .quad 0xa5c6526d69110bb1 .quad 0xe48928c38603860c .quad 0x722a1446fd7059f5 .quad 0xd0959fe9a8cf8819 .quad 0xd0a995508475a99c .quad 0x6eac173320b09cc5 .quad 0x628ecf04331b1095 // 2^28 * 4 * G .quad 0x98bcb118a9d0ddbc .quad 0xee449e3408b4802b .quad 0x87089226b8a6b104 .quad 0x685f349a45c7915d .quad 0x9b41acf85c74ccf1 .quad 0xb673318108265251 .quad 0x99c92aed11adb147 .quad 0x7a47d70d34ecb40f .quad 0x60a0c4cbcc43a4f5 .quad 0x775c66ca3677bea9 .quad 0xa17aa1752ff8f5ed .quad 0x11ded9020e01fdc0 // 2^28 * 5 * G .quad 0x890e7809caefe704 .quad 0x8728296de30e8c6c .quad 0x4c5cd2a392aeb1c9 .quad 0x194263d15771531f .quad 0x471f95b03bea93b7 .quad 0x0552d7d43313abd3 .quad 0xbd9370e2e17e3f7b .quad 0x7b120f1db20e5bec .quad 0x17d2fb3d86502d7a .quad 0xb564d84450a69352 .quad 0x7da962c8a60ed75d .quad 0x00d0f85b318736aa // 2^28 * 6 * G .quad 0x978b142e777c84fd .quad 0xf402644705a8c062 .quad 0xa67ad51be7e612c7 .quad 0x2f7b459698dd6a33 .quad 0xa6753c1efd7621c1 .quad 0x69c0b4a7445671f5 .quad 0x971f527405b23c11 .quad 0x387bc74851a8c7cd .quad 0x81894b4d4a52a9a8 .quad 0xadd93e12f6b8832f .quad 0x184d8548b61bd638 .quad 0x3f1c62dbd6c9f6cd // 2^28 * 7 * G .quad 0x2e8f1f0091910c1f .quad 0xa4df4fe0bff2e12c .quad 0x60c6560aee927438 .quad 0x6338283facefc8fa .quad 0x3fad3e40148f693d .quad 0x052656e194eb9a72 .quad 0x2f4dcbfd184f4e2f .quad 0x406f8db1c482e18b .quad 0x9e630d2c7f191ee4 .quad 0x4fbf8301bc3ff670 .quad 0x787d8e4e7afb73c4 .quad 0x50d83d5be8f58fa5 // 2^28 * 8 * G .quad 0x85683916c11a1897 .quad 0x2d69a4efe506d008 .quad 0x39af1378f664bd01 .quad 0x65942131361517c6 .quad 0xc0accf90b4d3b66d .quad 0xa7059de561732e60 .quad 0x033d1f7870c6b0ba .quad 0x584161cd26d946e4 .quad 0xbbf2b1a072d27ca2 .quad 0xbf393c59fbdec704 .quad 0xe98dbbcee262b81e .quad 0x02eebd0b3029b589 // 2^32 * 1 * G .quad 0x61368756a60dac5f .quad 0x17e02f6aebabdc57 .quad 0x7f193f2d4cce0f7d .quad 0x20234a7789ecdcf0 .quad 0x8765b69f7b85c5e8 .quad 0x6ff0678bd168bab2 .quad 0x3a70e77c1d330f9b .quad 0x3a5f6d51b0af8e7c .quad 0x76d20db67178b252 .quad 0x071c34f9d51ed160 .quad 0xf62a4a20b3e41170 .quad 0x7cd682353cffe366 // 2^32 * 2 * G .quad 0x0be1a45bd887fab6 .quad 0x2a846a32ba403b6e .quad 0xd9921012e96e6000 .quad 0x2838c8863bdc0943 .quad 0xa665cd6068acf4f3 .quad 0x42d92d183cd7e3d3 .quad 0x5759389d336025d9 .quad 0x3ef0253b2b2cd8ff .quad 0xd16bb0cf4a465030 .quad 0xfa496b4115c577ab .quad 0x82cfae8af4ab419d .quad 0x21dcb8a606a82812 // 2^32 * 3 * G .quad 0x5c6004468c9d9fc8 .quad 0x2540096ed42aa3cb .quad 0x125b4d4c12ee2f9c .quad 0x0bc3d08194a31dab .quad 0x9a8d00fabe7731ba .quad 0x8203607e629e1889 .quad 0xb2cc023743f3d97f .quad 0x5d840dbf6c6f678b .quad 0x706e380d309fe18b .quad 0x6eb02da6b9e165c7 .quad 0x57bbba997dae20ab .quad 0x3a4276232ac196dd // 2^32 * 4 * G .quad 0x4b42432c8a7084fa .quad 0x898a19e3dfb9e545 .quad 0xbe9f00219c58e45d .quad 0x1ff177cea16debd1 .quad 0x3bf8c172db447ecb .quad 0x5fcfc41fc6282dbd .quad 0x80acffc075aa15fe .quad 0x0770c9e824e1a9f9 .quad 0xcf61d99a45b5b5fd .quad 0x860984e91b3a7924 .quad 0xe7300919303e3e89 .quad 0x39f264fd41500b1e // 2^32 * 5 * G .quad 0xa7ad3417dbe7e29c .quad 0xbd94376a2b9c139c .quad 0xa0e91b8e93597ba9 .quad 0x1712d73468889840 .quad 0xd19b4aabfe097be1 .quad 0xa46dfce1dfe01929 .quad 0xc3c908942ca6f1ff .quad 0x65c621272c35f14e .quad 0xe72b89f8ce3193dd .quad 0x4d103356a125c0bb .quad 0x0419a93d2e1cfe83 .quad 0x22f9800ab19ce272 // 2^32 * 6 * G .quad 0x605a368a3e9ef8cb .quad 0xe3e9c022a5504715 .quad 0x553d48b05f24248f .quad 0x13f416cd647626e5 .quad 0x42029fdd9a6efdac .quad 0xb912cebe34a54941 .quad 0x640f64b987bdf37b .quad 0x4171a4d38598cab4 .quad 0xfa2758aa99c94c8c .quad 0x23006f6fb000b807 .quad 0xfbd291ddadda5392 .quad 0x508214fa574bd1ab // 2^32 * 7 * G .quad 0xc20269153ed6fe4b .quad 0xa65a6739511d77c4 .quad 0xcbde26462c14af94 .quad 0x22f960ec6faba74b .quad 0x461a15bb53d003d6 .quad 0xb2102888bcf3c965 .quad 0x27c576756c683a5a .quad 0x3a7758a4c86cb447 .quad 0x548111f693ae5076 .quad 0x1dae21df1dfd54a6 .quad 0x12248c90f3115e65 .quad 0x5d9fd15f8de7f494 // 2^32 * 8 * G .quad 0x031408d36d63727f .quad 0x6a379aefd7c7b533 .quad 0xa9e18fc5ccaee24b .quad 0x332f35914f8fbed3 .quad 0x3f244d2aeed7521e .quad 0x8e3a9028432e9615 .quad 0xe164ba772e9c16d4 .quad 0x3bc187fa47eb98d8 .quad 0x6d470115ea86c20c .quad 0x998ab7cb6c46d125 .quad 0xd77832b53a660188 .quad 0x450d81ce906fba03 // 2^36 * 1 * G .quad 0xf8ae4d2ad8453902 .quad 0x7018058ee8db2d1d .quad 0xaab3995fc7d2c11e .quad 0x53b16d2324ccca79 .quad 0x23264d66b2cae0b5 .quad 0x7dbaed33ebca6576 .quad 0x030ebed6f0d24ac8 .quad 0x2a887f78f7635510 .quad 0x2a23b9e75c012d4f .quad 0x0c974651cae1f2ea .quad 0x2fb63273675d70ca .quad 0x0ba7250b864403f5 // 2^36 * 2 * G .quad 0xbb0d18fd029c6421 .quad 0xbc2d142189298f02 .quad 0x8347f8e68b250e96 .quad 0x7b9f2fe8032d71c9 .quad 0xdd63589386f86d9c .quad 0x61699176e13a85a4 .quad 0x2e5111954eaa7d57 .quad 0x32c21b57fb60bdfb .quad 0xd87823cd319e0780 .quad 0xefc4cfc1897775c5 .quad 0x4854fb129a0ab3f7 .quad 0x12c49d417238c371 // 2^36 * 3 * G .quad 0x0950b533ffe83769 .quad 0x21861c1d8e1d6bd1 .quad 0xf022d8381302e510 .quad 0x2509200c6391cab4 .quad 0x09b3a01783799542 .quad 0x626dd08faad5ee3f .quad 0xba00bceeeb70149f .quad 0x1421b246a0a444c9 .quad 0x4aa43a8e8c24a7c7 .quad 0x04c1f540d8f05ef5 .quad 0xadba5e0c0b3eb9dc .quad 0x2ab5504448a49ce3 // 2^36 * 4 * G .quad 0x2ed227266f0f5dec .quad 0x9824ee415ed50824 .quad 0x807bec7c9468d415 .quad 0x7093bae1b521e23f .quad 0xdc07ac631c5d3afa .quad 0x58615171f9df8c6c .quad 0x72a079d89d73e2b0 .quad 0x7301f4ceb4eae15d .quad 0x6409e759d6722c41 .quad 0xa674e1cf72bf729b .quad 0xbc0a24eb3c21e569 .quad 0x390167d24ebacb23 // 2^36 * 5 * G .quad 0x27f58e3bba353f1c .quad 0x4c47764dbf6a4361 .quad 0xafbbc4e56e562650 .quad 0x07db2ee6aae1a45d .quad 0xd7bb054ba2f2120b .quad 0xe2b9ceaeb10589b7 .quad 0x3fe8bac8f3c0edbe .quad 0x4cbd40767112cb69 .quad 0x0b603cc029c58176 .quad 0x5988e3825cb15d61 .quad 0x2bb61413dcf0ad8d .quad 0x7b8eec6c74183287 // 2^36 * 6 * G .quad 0xe4ca40782cd27cb0 .quad 0xdaf9c323fbe967bd .quad 0xb29bd34a8ad41e9e .quad 0x72810497626ede4d .quad 0x32fee570fc386b73 .quad 0xda8b0141da3a8cc7 .quad 0x975ffd0ac8968359 .quad 0x6ee809a1b132a855 .quad 0x9444bb31fcfd863a .quad 0x2fe3690a3e4e48c5 .quad 0xdc29c867d088fa25 .quad 0x13bd1e38d173292e // 2^36 * 7 * G .quad 0xd32b4cd8696149b5 .quad 0xe55937d781d8aab7 .quad 0x0bcb2127ae122b94 .quad 0x41e86fcfb14099b0 .quad 0x223fb5cf1dfac521 .quad 0x325c25316f554450 .quad 0x030b98d7659177ac .quad 0x1ed018b64f88a4bd .quad 0x3630dfa1b802a6b0 .quad 0x880f874742ad3bd5 .quad 0x0af90d6ceec5a4d4 .quad 0x746a247a37cdc5d9 // 2^36 * 8 * G .quad 0xd531b8bd2b7b9af6 .quad 0x5005093537fc5b51 .quad 0x232fcf25c593546d .quad 0x20a365142bb40f49 .quad 0x6eccd85278d941ed .quad 0x2254ae83d22f7843 .quad 0xc522d02e7bbfcdb7 .quad 0x681e3351bff0e4e2 .quad 0x8b64b59d83034f45 .quad 0x2f8b71f21fa20efb .quad 0x69249495ba6550e4 .quad 0x539ef98e45d5472b // 2^40 * 1 * G .quad 0x6e7bb6a1a6205275 .quad 0xaa4f21d7413c8e83 .quad 0x6f56d155e88f5cb2 .quad 0x2de25d4ba6345be1 .quad 0xd074d8961cae743f .quad 0xf86d18f5ee1c63ed .quad 0x97bdc55be7f4ed29 .quad 0x4cbad279663ab108 .quad 0x80d19024a0d71fcd .quad 0xc525c20afb288af8 .quad 0xb1a3974b5f3a6419 .quad 0x7d7fbcefe2007233 // 2^40 * 2 * G .quad 0xfaef1e6a266b2801 .quad 0x866c68c4d5739f16 .quad 0xf68a2fbc1b03762c .quad 0x5975435e87b75a8d .quad 0xcd7c5dc5f3c29094 .quad 0xc781a29a2a9105ab .quad 0x80c61d36421c3058 .quad 0x4f9cd196dcd8d4d7 .quad 0x199297d86a7b3768 .quad 0xd0d058241ad17a63 .quad 0xba029cad5c1c0c17 .quad 0x7ccdd084387a0307 // 2^40 * 3 * G .quad 0xdca6422c6d260417 .quad 0xae153d50948240bd .quad 0xa9c0c1b4fb68c677 .quad 0x428bd0ed61d0cf53 .quad 0x9b0c84186760cc93 .quad 0xcdae007a1ab32a99 .quad 0xa88dec86620bda18 .quad 0x3593ca848190ca44 .quad 0x9213189a5e849aa7 .quad 0xd4d8c33565d8facd .quad 0x8c52545b53fdbbd1 .quad 0x27398308da2d63e6 // 2^40 * 4 * G .quad 0x42c38d28435ed413 .quad 0xbd50f3603278ccc9 .quad 0xbb07ab1a79da03ef .quad 0x269597aebe8c3355 .quad 0xb9a10e4c0a702453 .quad 0x0fa25866d57d1bde .quad 0xffb9d9b5cd27daf7 .quad 0x572c2945492c33fd .quad 0xc77fc745d6cd30be .quad 0xe4dfe8d3e3baaefb .quad 0xa22c8830aa5dda0c .quad 0x7f985498c05bca80 // 2^40 * 5 * G .quad 0x3849ce889f0be117 .quad 0x8005ad1b7b54a288 .quad 0x3da3c39f23fc921c .quad 0x76c2ec470a31f304 .quad 0xd35615520fbf6363 .quad 0x08045a45cf4dfba6 .quad 0xeec24fbc873fa0c2 .quad 0x30f2653cd69b12e7 .quad 0x8a08c938aac10c85 .quad 0x46179b60db276bcb .quad 0xa920c01e0e6fac70 .quad 0x2f1273f1596473da // 2^40 * 6 * G .quad 0x4739fc7c8ae01e11 .quad 0xfd5274904a6aab9f .quad 0x41d98a8287728f2e .quad 0x5d9e572ad85b69f2 .quad 0x30488bd755a70bc0 .quad 0x06d6b5a4f1d442e7 .quad 0xead1a69ebc596162 .quad 0x38ac1997edc5f784 .quad 0x0666b517a751b13b .quad 0x747d06867e9b858c .quad 0xacacc011454dde49 .quad 0x22dfcd9cbfe9e69c // 2^40 * 7 * G .quad 0x8ddbd2e0c30d0cd9 .quad 0xad8e665facbb4333 .quad 0x8f6b258c322a961f .quad 0x6b2916c05448c1c7 .quad 0x56ec59b4103be0a1 .quad 0x2ee3baecd259f969 .quad 0x797cb29413f5cd32 .quad 0x0fe9877824cde472 .quad 0x7edb34d10aba913b .quad 0x4ea3cd822e6dac0e .quad 0x66083dff6578f815 .quad 0x4c303f307ff00a17 // 2^40 * 8 * G .quad 0xd30a3bd617b28c85 .quad 0xc5d377b739773bea .quad 0xc6c6e78c1e6a5cbf .quad 0x0d61b8f78b2ab7c4 .quad 0x29fc03580dd94500 .quad 0xecd27aa46fbbec93 .quad 0x130a155fc2e2a7f8 .quad 0x416b151ab706a1d5 .quad 0x56a8d7efe9c136b0 .quad 0xbd07e5cd58e44b20 .quad 0xafe62fda1b57e0ab .quad 0x191a2af74277e8d2 // 2^44 * 1 * G .quad 0xd550095bab6f4985 .quad 0x04f4cd5b4fbfaf1a .quad 0x9d8e2ed12a0c7540 .quad 0x2bc24e04b2212286 .quad 0x09d4b60b2fe09a14 .quad 0xc384f0afdbb1747e .quad 0x58e2ea8978b5fd6e .quad 0x519ef577b5e09b0a .quad 0x1863d7d91124cca9 .quad 0x7ac08145b88a708e .quad 0x2bcd7309857031f5 .quad 0x62337a6e8ab8fae5 // 2^44 * 2 * G .quad 0x4bcef17f06ffca16 .quad 0xde06e1db692ae16a .quad 0x0753702d614f42b0 .quad 0x5f6041b45b9212d0 .quad 0xd1ab324e1b3a1273 .quad 0x18947cf181055340 .quad 0x3b5d9567a98c196e .quad 0x7fa00425802e1e68 .quad 0x7d531574028c2705 .quad 0x80317d69db0d75fe .quad 0x30fface8ef8c8ddd .quad 0x7e9de97bb6c3e998 // 2^44 * 3 * G .quad 0x1558967b9e6585a3 .quad 0x97c99ce098e98b92 .quad 0x10af149b6eb3adad .quad 0x42181fe8f4d38cfa .quad 0xf004be62a24d40dd .quad 0xba0659910452d41f .quad 0x81c45ee162a44234 .quad 0x4cb829d8a22266ef .quad 0x1dbcaa8407b86681 .quad 0x081f001e8b26753b .quad 0x3cd7ce6a84048e81 .quad 0x78af11633f25f22c // 2^44 * 4 * G .quad 0x8416ebd40b50babc .quad 0x1508722628208bee .quad 0xa3148fafb9c1c36d .quad 0x0d07daacd32d7d5d .quad 0x3241c00e7d65318c .quad 0xe6bee5dcd0e86de7 .quad 0x118b2dc2fbc08c26 .quad 0x680d04a7fc603dc3 .quad 0xf9c2414a695aa3eb .quad 0xdaa42c4c05a68f21 .quad 0x7c6c23987f93963e .quad 0x210e8cd30c3954e3 // 2^44 * 5 * G .quad 0xac4201f210a71c06 .quad 0x6a65e0aef3bfb021 .quad 0xbc42c35c393632f7 .quad 0x56ea8db1865f0742 .quad 0x2b50f16137fe6c26 .quad 0xe102bcd856e404d8 .quad 0x12b0f1414c561f6b .quad 0x51b17bc8d028ec91 .quad 0xfff5fb4bcf535119 .quad 0xf4989d79df1108a0 .quad 0xbdfcea659a3ba325 .quad 0x18a11f1174d1a6f2 // 2^44 * 6 * G .quad 0x407375ab3f6bba29 .quad 0x9ec3b6d8991e482e .quad 0x99c80e82e55f92e9 .quad 0x307c13b6fb0c0ae1 .quad 0xfbd63cdad27a5f2c .quad 0xf00fc4bc8aa106d7 .quad 0x53fb5c1a8e64a430 .quad 0x04eaabe50c1a2e85 .quad 0x24751021cb8ab5e7 .quad 0xfc2344495c5010eb .quad 0x5f1e717b4e5610a1 .quad 0x44da5f18c2710cd5 // 2^44 * 7 * G .quad 0x033cc55ff1b82eb5 .quad 0xb15ae36d411cae52 .quad 0xba40b6198ffbacd3 .quad 0x768edce1532e861f .quad 0x9156fe6b89d8eacc .quad 0xe6b79451e23126a1 .quad 0xbd7463d93944eb4e .quad 0x726373f6767203ae .quad 0xe305ca72eb7ef68a .quad 0x662cf31f70eadb23 .quad 0x18f026fdb4c45b68 .quad 0x513b5384b5d2ecbd // 2^44 * 8 * G .quad 0x46d46280c729989e .quad 0x4b93fbd05368a5dd .quad 0x63df3f81d1765a89 .quad 0x34cebd64b9a0a223 .quad 0x5e2702878af34ceb .quad 0x900b0409b946d6ae .quad 0x6512ebf7dabd8512 .quad 0x61d9b76988258f81 .quad 0xa6c5a71349b7d94b .quad 0xa3f3d15823eb9446 .quad 0x0416fbd277484834 .quad 0x69d45e6f2c70812f // 2^48 * 1 * G .quad 0xce16f74bc53c1431 .quad 0x2b9725ce2072edde .quad 0xb8b9c36fb5b23ee7 .quad 0x7e2e0e450b5cc908 .quad 0x9fe62b434f460efb .quad 0xded303d4a63607d6 .quad 0xf052210eb7a0da24 .quad 0x237e7dbe00545b93 .quad 0x013575ed6701b430 .quad 0x231094e69f0bfd10 .quad 0x75320f1583e47f22 .quad 0x71afa699b11155e3 // 2^48 * 2 * G .quad 0x65ce6f9b3953b61d .quad 0xc65839eaafa141e6 .quad 0x0f435ffda9f759fe .quad 0x021142e9c2b1c28e .quad 0xea423c1c473b50d6 .quad 0x51e87a1f3b38ef10 .quad 0x9b84bf5fb2c9be95 .quad 0x00731fbc78f89a1c .quad 0xe430c71848f81880 .quad 0xbf960c225ecec119 .quad 0xb6dae0836bba15e3 .quad 0x4c4d6f3347e15808 // 2^48 * 3 * G .quad 0x18f7eccfc17d1fc9 .quad 0x6c75f5a651403c14 .quad 0xdbde712bf7ee0cdf .quad 0x193fddaaa7e47a22 .quad 0x2f0cddfc988f1970 .quad 0x6b916227b0b9f51b .quad 0x6ec7b6c4779176be .quad 0x38bf9500a88f9fa8 .quad 0x1fd2c93c37e8876f .quad 0xa2f61e5a18d1462c .quad 0x5080f58239241276 .quad 0x6a6fb99ebf0d4969 // 2^48 * 4 * G .quad 0x6a46c1bb560855eb .quad 0x2416bb38f893f09d .quad 0xd71d11378f71acc1 .quad 0x75f76914a31896ea .quad 0xeeb122b5b6e423c6 .quad 0x939d7010f286ff8e .quad 0x90a92a831dcf5d8c .quad 0x136fda9f42c5eb10 .quad 0xf94cdfb1a305bdd1 .quad 0x0f364b9d9ff82c08 .quad 0x2a87d8a5c3bb588a .quad 0x022183510be8dcba // 2^48 * 5 * G .quad 0x4af766385ead2d14 .quad 0xa08ed880ca7c5830 .quad 0x0d13a6e610211e3d .quad 0x6a071ce17b806c03 .quad 0x9d5a710143307a7f .quad 0xb063de9ec47da45f .quad 0x22bbfe52be927ad3 .quad 0x1387c441fd40426c .quad 0xb5d3c3d187978af8 .quad 0x722b5a3d7f0e4413 .quad 0x0d7b4848bb477ca0 .quad 0x3171b26aaf1edc92 // 2^48 * 6 * G .quad 0xa92f319097564ca8 .quad 0xff7bb84c2275e119 .quad 0x4f55fe37a4875150 .quad 0x221fd4873cf0835a .quad 0xa60db7d8b28a47d1 .quad 0xa6bf14d61770a4f1 .quad 0xd4a1f89353ddbd58 .quad 0x6c514a63344243e9 .quad 0x2322204f3a156341 .quad 0xfb73e0e9ba0a032d .quad 0xfce0dd4c410f030e .quad 0x48daa596fb924aaa // 2^48 * 7 * G .quad 0x6eca8e665ca59cc7 .quad 0xa847254b2e38aca0 .quad 0x31afc708d21e17ce .quad 0x676dd6fccad84af7 .quad 0x14f61d5dc84c9793 .quad 0x9941f9e3ef418206 .quad 0xcdf5b88f346277ac .quad 0x58c837fa0e8a79a9 .quad 0x0cf9688596fc9058 .quad 0x1ddcbbf37b56a01b .quad 0xdcc2e77d4935d66a .quad 0x1c4f73f2c6a57f0a // 2^48 * 8 * G .quad 0x0e7a4fbd305fa0bb .quad 0x829d4ce054c663ad .quad 0xf421c3832fe33848 .quad 0x795ac80d1bf64c42 .quad 0xb36e706efc7c3484 .quad 0x73dfc9b4c3c1cf61 .quad 0xeb1d79c9781cc7e5 .quad 0x70459adb7daf675c .quad 0x1b91db4991b42bb3 .quad 0x572696234b02dcca .quad 0x9fdf9ee51f8c78dc .quad 0x5fe162848ce21fd3 // 2^52 * 1 * G .quad 0xe2790aae4d077c41 .quad 0x8b938270db7469a3 .quad 0x6eb632dc8abd16a2 .quad 0x720814ecaa064b72 .quad 0x315c29c795115389 .quad 0xd7e0e507862f74ce .quad 0x0c4a762185927432 .quad 0x72de6c984a25a1e4 .quad 0xae9ab553bf6aa310 .quad 0x050a50a9806d6e1b .quad 0x92bb7403adff5139 .quad 0x0394d27645be618b // 2^52 * 2 * G .quad 0x4d572251857eedf4 .quad 0xe3724edde19e93c5 .quad 0x8a71420e0b797035 .quad 0x3b3c833687abe743 .quad 0xf5396425b23545a4 .quad 0x15a7a27e98fbb296 .quad 0xab6c52bc636fdd86 .quad 0x79d995a8419334ee .quad 0xcd8a8ea61195dd75 .quad 0xa504d8a81dd9a82f .quad 0x540dca81a35879b6 .quad 0x60dd16a379c86a8a // 2^52 * 3 * G .quad 0x35a2c8487381e559 .quad 0x596ffea6d78082cb .quad 0xcb9771ebdba7b653 .quad 0x5a08b5019b4da685 .quad 0x3501d6f8153e47b8 .quad 0xb7a9675414a2f60c .quad 0x112ee8b6455d9523 .quad 0x4e62a3c18112ea8a .quad 0xc8d4ac04516ab786 .quad 0x595af3215295b23d .quad 0xd6edd234db0230c1 .quad 0x0929efe8825b41cc // 2^52 * 4 * G .quad 0x5f0601d1cbd0f2d3 .quad 0x736e412f6132bb7f .quad 0x83604432238dde87 .quad 0x1e3a5272f5c0753c .quad 0x8b3172b7ad56651d .quad 0x01581b7a3fabd717 .quad 0x2dc94df6424df6e4 .quad 0x30376e5d2c29284f .quad 0xd2918da78159a59c .quad 0x6bdc1cd93f0713f3 .quad 0x565f7a934acd6590 .quad 0x53daacec4cb4c128 // 2^52 * 5 * G .quad 0x4ca73bd79cc8a7d6 .quad 0x4d4a738f47e9a9b2 .quad 0xf4cbf12942f5fe00 .quad 0x01a13ff9bdbf0752 .quad 0x99852bc3852cfdb0 .quad 0x2cc12e9559d6ed0b .quad 0x70f9e2bf9b5ac27b .quad 0x4f3b8c117959ae99 .quad 0x55b6c9c82ff26412 .quad 0x1ac4a8c91fb667a8 .quad 0xd527bfcfeb778bf2 .quad 0x303337da7012a3be // 2^52 * 6 * G .quad 0x955422228c1c9d7c .quad 0x01fac1371a9b340f .quad 0x7e8d9177925b48d7 .quad 0x53f8ad5661b3e31b .quad 0x976d3ccbfad2fdd1 .quad 0xcb88839737a640a8 .quad 0x2ff00c1d6734cb25 .quad 0x269ff4dc789c2d2b .quad 0x0c003fbdc08d678d .quad 0x4d982fa37ead2b17 .quad 0xc07e6bcdb2e582f1 .quad 0x296c7291df412a44 // 2^52 * 7 * G .quad 0x7903de2b33daf397 .quad 0xd0ff0619c9a624b3 .quad 0x8a1d252b555b3e18 .quad 0x2b6d581c52e0b7c0 .quad 0xdfb23205dab8b59e .quad 0x465aeaa0c8092250 .quad 0xd133c1189a725d18 .quad 0x2327370261f117d1 .quad 0x3d0543d3623e7986 .quad 0x679414c2c278a354 .quad 0xae43f0cc726196f6 .quad 0x7836c41f8245eaba // 2^52 * 8 * G .quad 0xe7a254db49e95a81 .quad 0x5192d5d008b0ad73 .quad 0x4d20e5b1d00afc07 .quad 0x5d55f8012cf25f38 .quad 0xca651e848011937c .quad 0xc6b0c46e6ef41a28 .quad 0xb7021ba75f3f8d52 .quad 0x119dff99ead7b9fd .quad 0x43eadfcbf4b31d4d .quad 0xc6503f7411148892 .quad 0xfeee68c5060d3b17 .quad 0x329293b3dd4a0ac8 // 2^56 * 1 * G .quad 0x4e59214fe194961a .quad 0x49be7dc70d71cd4f .quad 0x9300cfd23b50f22d .quad 0x4789d446fc917232 .quad 0x2879852d5d7cb208 .quad 0xb8dedd70687df2e7 .quad 0xdc0bffab21687891 .quad 0x2b44c043677daa35 .quad 0x1a1c87ab074eb78e .quad 0xfac6d18e99daf467 .quad 0x3eacbbcd484f9067 .quad 0x60c52eef2bb9a4e4 // 2^56 * 2 * G .quad 0x0b5d89bc3bfd8bf1 .quad 0xb06b9237c9f3551a .quad 0x0e4c16b0d53028f5 .quad 0x10bc9c312ccfcaab .quad 0x702bc5c27cae6d11 .quad 0x44c7699b54a48cab .quad 0xefbc4056ba492eb2 .quad 0x70d77248d9b6676d .quad 0xaa8ae84b3ec2a05b .quad 0x98699ef4ed1781e0 .quad 0x794513e4708e85d1 .quad 0x63755bd3a976f413 // 2^56 * 3 * G .quad 0xb55fa03e2ad10853 .quad 0x356f75909ee63569 .quad 0x9ff9f1fdbe69b890 .quad 0x0d8cc1c48bc16f84 .quad 0x3dc7101897f1acb7 .quad 0x5dda7d5ec165bbd8 .quad 0x508e5b9c0fa1020f .quad 0x2763751737c52a56 .quad 0x029402d36eb419a9 .quad 0xf0b44e7e77b460a5 .quad 0xcfa86230d43c4956 .quad 0x70c2dd8a7ad166e7 // 2^56 * 4 * G .quad 0x656194509f6fec0e .quad 0xee2e7ea946c6518d .quad 0x9733c1f367e09b5c .quad 0x2e0fac6363948495 .quad 0x91d4967db8ed7e13 .quad 0x74252f0ad776817a .quad 0xe40982e00d852564 .quad 0x32b8613816a53ce5 .quad 0x79e7f7bee448cd64 .quad 0x6ac83a67087886d0 .quad 0xf89fd4d9a0e4db2e .quad 0x4179215c735a4f41 // 2^56 * 5 * G .quad 0x8c7094e7d7dced2a .quad 0x97fb8ac347d39c70 .quad 0xe13be033a906d902 .quad 0x700344a30cd99d76 .quad 0xe4ae33b9286bcd34 .quad 0xb7ef7eb6559dd6dc .quad 0x278b141fb3d38e1f .quad 0x31fa85662241c286 .quad 0xaf826c422e3622f4 .quad 0xc12029879833502d .quad 0x9bc1b7e12b389123 .quad 0x24bb2312a9952489 // 2^56 * 6 * G .quad 0xb1a8ed1732de67c3 .quad 0x3cb49418461b4948 .quad 0x8ebd434376cfbcd2 .quad 0x0fee3e871e188008 .quad 0x41f80c2af5f85c6b .quad 0x687284c304fa6794 .quad 0x8945df99a3ba1bad .quad 0x0d1d2af9ffeb5d16 .quad 0xa9da8aa132621edf .quad 0x30b822a159226579 .quad 0x4004197ba79ac193 .quad 0x16acd79718531d76 // 2^56 * 7 * G .quad 0x72df72af2d9b1d3d .quad 0x63462a36a432245a .quad 0x3ecea07916b39637 .quad 0x123e0ef6b9302309 .quad 0xc959c6c57887b6ad .quad 0x94e19ead5f90feba .quad 0x16e24e62a342f504 .quad 0x164ed34b18161700 .quad 0x487ed94c192fe69a .quad 0x61ae2cea3a911513 .quad 0x877bf6d3b9a4de27 .quad 0x78da0fc61073f3eb // 2^56 * 8 * G .quad 0x5bf15d28e52bc66a .quad 0x2c47e31870f01a8e .quad 0x2419afbc06c28bdd .quad 0x2d25deeb256b173a .quad 0xa29f80f1680c3a94 .quad 0x71f77e151ae9e7e6 .quad 0x1100f15848017973 .quad 0x054aa4b316b38ddd .quad 0xdfc8468d19267cb8 .quad 0x0b28789c66e54daf .quad 0x2aeb1d2a666eec17 .quad 0x134610a6ab7da760 // 2^60 * 1 * G .quad 0xcaf55ec27c59b23f .quad 0x99aeed3e154d04f2 .quad 0x68441d72e14141f4 .quad 0x140345133932a0a2 .quad 0xd91430e0dc028c3c .quad 0x0eb955a85217c771 .quad 0x4b09e1ed2c99a1fa .quad 0x42881af2bd6a743c .quad 0x7bfec69aab5cad3d .quad 0xc23e8cd34cb2cfad .quad 0x685dd14bfb37d6a2 .quad 0x0ad6d64415677a18 // 2^60 * 2 * G .quad 0x781a439e417becb5 .quad 0x4ac5938cd10e0266 .quad 0x5da385110692ac24 .quad 0x11b065a2ade31233 .quad 0x7914892847927e9f .quad 0x33dad6ef370aa877 .quad 0x1f8f24fa11122703 .quad 0x5265ac2f2adf9592 .quad 0x405fdd309afcb346 .quad 0xd9723d4428e63f54 .quad 0x94c01df05f65aaae .quad 0x43e4dc3ae14c0809 // 2^60 * 3 * G .quad 0xbc12c7f1a938a517 .quad 0x473028ab3180b2e1 .quad 0x3f78571efbcd254a .quad 0x74e534426ff6f90f .quad 0xea6f7ac3adc2c6a3 .quad 0xd0e928f6e9717c94 .quad 0xe2d379ead645eaf5 .quad 0x46dd8785c51ffbbe .quad 0x709801be375c8898 .quad 0x4b06dab5e3fd8348 .quad 0x75880ced27230714 .quad 0x2b09468fdd2f4c42 // 2^60 * 4 * G .quad 0x97c749eeb701cb96 .quad 0x83f438d4b6a369c3 .quad 0x62962b8b9a402cd9 .quad 0x6976c7509888df7b .quad 0x5b97946582ffa02a .quad 0xda096a51fea8f549 .quad 0xa06351375f77af9b .quad 0x1bcfde61201d1e76 .quad 0x4a4a5490246a59a2 .quad 0xd63ebddee87fdd90 .quad 0xd9437c670d2371fa .quad 0x69e87308d30f8ed6 // 2^60 * 5 * G .quad 0x435a8bb15656beb0 .quad 0xf8fac9ba4f4d5bca .quad 0xb9b278c41548c075 .quad 0x3eb0ef76e892b622 .quad 0x0f80bf028bc80303 .quad 0x6aae16b37a18cefb .quad 0xdd47ea47d72cd6a3 .quad 0x61943588f4ed39aa .quad 0xd26e5c3e91039f85 .quad 0xc0e9e77df6f33aa9 .quad 0xe8968c5570066a93 .quad 0x3c34d1881faaaddd // 2^60 * 6 * G .quad 0x3f9d2b5ea09f9ec0 .quad 0x1dab3b6fb623a890 .quad 0xa09ba3ea72d926c4 .quad 0x374193513fd8b36d .quad 0xbd5b0b8f2fffe0d9 .quad 0x6aa254103ed24fb9 .quad 0x2ac7d7bcb26821c4 .quad 0x605b394b60dca36a .quad 0xb4e856e45a9d1ed2 .quad 0xefe848766c97a9a2 .quad 0xb104cf641e5eee7d .quad 0x2f50b81c88a71c8f // 2^60 * 7 * G .quad 0x31723c61fc6811bb .quad 0x9cb450486211800f .quad 0x768933d347995753 .quad 0x3491a53502752fcd .quad 0x2b552ca0a7da522a .quad 0x3230b336449b0250 .quad 0xf2c4c5bca4b99fb9 .quad 0x7b2c674958074a22 .quad 0xd55165883ed28cdf .quad 0x12d84fd2d362de39 .quad 0x0a874ad3e3378e4f .quad 0x000d2b1f7c763e74 // 2^60 * 8 * G .quad 0x3d420811d06d4a67 .quad 0xbefc048590e0ffe3 .quad 0xf870c6b7bd487bde .quad 0x6e2a7316319afa28 .quad 0x9624778c3e94a8ab .quad 0x0ad6f3cee9a78bec .quad 0x948ac7810d743c4f .quad 0x76627935aaecfccc .quad 0x56a8ac24d6d59a9f .quad 0xc8db753e3096f006 .quad 0x477f41e68f4c5299 .quad 0x588d851cf6c86114 // 2^64 * 1 * G .quad 0x51138ec78df6b0fe .quad 0x5397da89e575f51b .quad 0x09207a1d717af1b9 .quad 0x2102fdba2b20d650 .quad 0xcd2a65e777d1f515 .quad 0x548991878faa60f1 .quad 0xb1b73bbcdabc06e5 .quad 0x654878cba97cc9fb .quad 0x969ee405055ce6a1 .quad 0x36bca7681251ad29 .quad 0x3a1af517aa7da415 .quad 0x0ad725db29ecb2ba // 2^64 * 2 * G .quad 0xdc4267b1834e2457 .quad 0xb67544b570ce1bc5 .quad 0x1af07a0bf7d15ed7 .quad 0x4aefcffb71a03650 .quad 0xfec7bc0c9b056f85 .quad 0x537d5268e7f5ffd7 .quad 0x77afc6624312aefa .quad 0x4f675f5302399fd9 .quad 0xc32d36360415171e .quad 0xcd2bef118998483b .quad 0x870a6eadd0945110 .quad 0x0bccbb72a2a86561 // 2^64 * 3 * G .quad 0x185e962feab1a9c8 .quad 0x86e7e63565147dcd .quad 0xb092e031bb5b6df2 .quad 0x4024f0ab59d6b73e .quad 0x186d5e4c50fe1296 .quad 0xe0397b82fee89f7e .quad 0x3bc7f6c5507031b0 .quad 0x6678fd69108f37c2 .quad 0x1586fa31636863c2 .quad 0x07f68c48572d33f2 .quad 0x4f73cc9f789eaefc .quad 0x2d42e2108ead4701 // 2^64 * 4 * G .quad 0x97f5131594dfd29b .quad 0x6155985d313f4c6a .quad 0xeba13f0708455010 .quad 0x676b2608b8d2d322 .quad 0x21717b0d0f537593 .quad 0x914e690b131e064c .quad 0x1bb687ae752ae09f .quad 0x420bf3a79b423c6e .quad 0x8138ba651c5b2b47 .quad 0x8671b6ec311b1b80 .quad 0x7bff0cb1bc3135b0 .quad 0x745d2ffa9c0cf1e0 // 2^64 * 5 * G .quad 0xbf525a1e2bc9c8bd .quad 0xea5b260826479d81 .quad 0xd511c70edf0155db .quad 0x1ae23ceb960cf5d0 .quad 0x6036df5721d34e6a .quad 0xb1db8827997bb3d0 .quad 0xd3c209c3c8756afa .quad 0x06e15be54c1dc839 .quad 0x5b725d871932994a .quad 0x32351cb5ceb1dab0 .quad 0x7dc41549dab7ca05 .quad 0x58ded861278ec1f7 // 2^64 * 6 * G .quad 0xd8173793f266c55c .quad 0xc8c976c5cc454e49 .quad 0x5ce382f8bc26c3a8 .quad 0x2ff39de85485f6f9 .quad 0x2dfb5ba8b6c2c9a8 .quad 0x48eeef8ef52c598c .quad 0x33809107f12d1573 .quad 0x08ba696b531d5bd8 .quad 0x77ed3eeec3efc57a .quad 0x04e05517d4ff4811 .quad 0xea3d7a3ff1a671cb .quad 0x120633b4947cfe54 // 2^64 * 7 * G .quad 0x0b94987891610042 .quad 0x4ee7b13cecebfae8 .quad 0x70be739594f0a4c0 .quad 0x35d30a99b4d59185 .quad 0x82bd31474912100a .quad 0xde237b6d7e6fbe06 .quad 0xe11e761911ea79c6 .quad 0x07433be3cb393bde .quad 0xff7944c05ce997f4 .quad 0x575d3de4b05c51a3 .quad 0x583381fd5a76847c .quad 0x2d873ede7af6da9f // 2^64 * 8 * G .quad 0x157a316443373409 .quad 0xfab8b7eef4aa81d9 .quad 0xb093fee6f5a64806 .quad 0x2e773654707fa7b6 .quad 0xaa6202e14e5df981 .quad 0xa20d59175015e1f5 .quad 0x18a275d3bae21d6c .quad 0x0543618a01600253 .quad 0x0deabdf4974c23c1 .quad 0xaa6f0a259dce4693 .quad 0x04202cb8a29aba2c .quad 0x4b1443362d07960d // 2^68 * 1 * G .quad 0x47b837f753242cec .quad 0x256dc48cc04212f2 .quad 0xe222fbfbe1d928c5 .quad 0x48ea295bad8a2c07 .quad 0x299b1c3f57c5715e .quad 0x96cb929e6b686d90 .quad 0x3004806447235ab3 .quad 0x2c435c24a44d9fe1 .quad 0x0607c97c80f8833f .quad 0x0e851578ca25ec5b .quad 0x54f7450b161ebb6f .quad 0x7bcb4792a0def80e // 2^68 * 2 * G .quad 0x8487e3d02bc73659 .quad 0x4baf8445059979df .quad 0xd17c975adcad6fbf .quad 0x57369f0bdefc96b6 .quad 0x1cecd0a0045224c2 .quad 0x757f1b1b69e53952 .quad 0x775b7a925289f681 .quad 0x1b6cc62016736148 .quad 0xf1a9990175638698 .quad 0x353dd1beeeaa60d3 .quad 0x849471334c9ba488 .quad 0x63fa6e6843ade311 // 2^68 * 3 * G .quad 0xd15c20536597c168 .quad 0x9f73740098d28789 .quad 0x18aee7f13257ba1f .quad 0x3418bfda07346f14 .quad 0x2195becdd24b5eb7 .quad 0x5e41f18cc0cd44f9 .quad 0xdf28074441ca9ede .quad 0x07073b98f35b7d67 .quad 0xd03c676c4ce530d4 .quad 0x0b64c0473b5df9f4 .quad 0x065cef8b19b3a31e .quad 0x3084d661533102c9 // 2^68 * 4 * G .quad 0xe1f6b79ebf8469ad .quad 0x15801004e2663135 .quad 0x9a498330af74181b .quad 0x3ba2504f049b673c .quad 0x9a6ce876760321fd .quad 0x7fe2b5109eb63ad8 .quad 0x00e7d4ae8ac80592 .quad 0x73d86b7abb6f723a .quad 0x0b52b5606dba5ab6 .quad 0xa9134f0fbbb1edab .quad 0x30a9520d9b04a635 .quad 0x6813b8f37973e5db // 2^68 * 5 * G .quad 0x9854b054334127c1 .quad 0x105d047882fbff25 .quad 0xdb49f7f944186f4f .quad 0x1768e838bed0b900 .quad 0xf194ca56f3157e29 .quad 0x136d35705ef528a5 .quad 0xdd4cef778b0599bc .quad 0x7d5472af24f833ed .quad 0xd0ef874daf33da47 .quad 0x00d3be5db6e339f9 .quad 0x3f2a8a2f9c9ceece .quad 0x5d1aeb792352435a // 2^68 * 6 * G .quad 0xf59e6bb319cd63ca .quad 0x670c159221d06839 .quad 0xb06d565b2150cab6 .quad 0x20fb199d104f12a3 .quad 0x12c7bfaeb61ba775 .quad 0xb84e621fe263bffd .quad 0x0b47a5c35c840dcf .quad 0x7e83be0bccaf8634 .quad 0x61943dee6d99c120 .quad 0x86101f2e460b9fe0 .quad 0x6bb2f1518ee8598d .quad 0x76b76289fcc475cc // 2^68 * 7 * G .quad 0x791b4cc1756286fa .quad 0xdbced317d74a157c .quad 0x7e732421ea72bde6 .quad 0x01fe18491131c8e9 .quad 0x4245f1a1522ec0b3 .quad 0x558785b22a75656d .quad 0x1d485a2548a1b3c0 .quad 0x60959eccd58fe09f .quad 0x3ebfeb7ba8ed7a09 .quad 0x49fdc2bbe502789c .quad 0x44ebce5d3c119428 .quad 0x35e1eb55be947f4a // 2^68 * 8 * G .quad 0xdbdae701c5738dd3 .quad 0xf9c6f635b26f1bee .quad 0x61e96a8042f15ef4 .quad 0x3aa1d11faf60a4d8 .quad 0x14fd6dfa726ccc74 .quad 0x3b084cfe2f53b965 .quad 0xf33ae4f552a2c8b4 .quad 0x59aab07a0d40166a .quad 0x77bcec4c925eac25 .quad 0x1848718460137738 .quad 0x5b374337fea9f451 .quad 0x1865e78ec8e6aa46 // 2^72 * 1 * G .quad 0xccc4b7c7b66e1f7a .quad 0x44157e25f50c2f7e .quad 0x3ef06dfc713eaf1c .quad 0x582f446752da63f7 .quad 0x967c54e91c529ccb .quad 0x30f6269264c635fb .quad 0x2747aff478121965 .quad 0x17038418eaf66f5c .quad 0xc6317bd320324ce4 .quad 0xa81042e8a4488bc4 .quad 0xb21ef18b4e5a1364 .quad 0x0c2a1c4bcda28dc9 // 2^72 * 2 * G .quad 0xd24dc7d06f1f0447 .quad 0xb2269e3edb87c059 .quad 0xd15b0272fbb2d28f .quad 0x7c558bd1c6f64877 .quad 0xedc4814869bd6945 .quad 0x0d6d907dbe1c8d22 .quad 0xc63bd212d55cc5ab .quad 0x5a6a9b30a314dc83 .quad 0xd0ec1524d396463d .quad 0x12bb628ac35a24f0 .quad 0xa50c3a791cbc5fa4 .quad 0x0404a5ca0afbafc3 // 2^72 * 3 * G .quad 0x8c1f40070aa743d6 .quad 0xccbad0cb5b265ee8 .quad 0x574b046b668fd2de .quad 0x46395bfdcadd9633 .quad 0x62bc9e1b2a416fd1 .quad 0xb5c6f728e350598b .quad 0x04343fd83d5d6967 .quad 0x39527516e7f8ee98 .quad 0x117fdb2d1a5d9a9c .quad 0x9c7745bcd1005c2a .quad 0xefd4bef154d56fea .quad 0x76579a29e822d016 // 2^72 * 4 * G .quad 0x45b68e7e49c02a17 .quad 0x23cd51a2bca9a37f .quad 0x3ed65f11ec224c1b .quad 0x43a384dc9e05bdb1 .quad 0x333cb51352b434f2 .quad 0xd832284993de80e1 .quad 0xb5512887750d35ce .quad 0x02c514bb2a2777c1 .quad 0x684bd5da8bf1b645 .quad 0xfb8bd37ef6b54b53 .quad 0x313916d7a9b0d253 .quad 0x1160920961548059 // 2^72 * 5 * G .quad 0xb44d166929dacfaa .quad 0xda529f4c8413598f .quad 0xe9ef63ca453d5559 .quad 0x351e125bc5698e0b .quad 0x7a385616369b4dcd .quad 0x75c02ca7655c3563 .quad 0x7dc21bf9d4f18021 .quad 0x2f637d7491e6e042 .quad 0xd4b49b461af67bbe .quad 0xd603037ac8ab8961 .quad 0x71dee19ff9a699fb .quad 0x7f182d06e7ce2a9a // 2^72 * 6 * G .quad 0x7a7c8e64ab0168ec .quad 0xcb5a4a5515edc543 .quad 0x095519d347cd0eda .quad 0x67d4ac8c343e93b0 .quad 0x09454b728e217522 .quad 0xaa58e8f4d484b8d8 .quad 0xd358254d7f46903c .quad 0x44acc043241c5217 .quad 0x1c7d6bbb4f7a5777 .quad 0x8b35fed4918313e1 .quad 0x4adca1c6c96b4684 .quad 0x556d1c8312ad71bd // 2^72 * 7 * G .quad 0x17ef40e30c8d3982 .quad 0x31f7073e15a3fa34 .quad 0x4f21f3cb0773646e .quad 0x746c6c6d1d824eff .quad 0x81f06756b11be821 .quad 0x0faff82310a3f3dd .quad 0xf8b2d0556a99465d .quad 0x097abe38cc8c7f05 .quad 0x0c49c9877ea52da4 .quad 0x4c4369559bdc1d43 .quad 0x022c3809f7ccebd2 .quad 0x577e14a34bee84bd // 2^72 * 8 * G .quad 0xf0e268ac61a73b0a .quad 0xf2fafa103791a5f5 .quad 0xc1e13e826b6d00e9 .quad 0x60fa7ee96fd78f42 .quad 0x94fecebebd4dd72b .quad 0xf46a4fda060f2211 .quad 0x124a5977c0c8d1ff .quad 0x705304b8fb009295 .quad 0xb63d1d354d296ec6 .quad 0xf3c3053e5fad31d8 .quad 0x670b958cb4bd42ec .quad 0x21398e0ca16353fd // 2^76 * 1 * G .quad 0x216ab2ca8da7d2ef .quad 0x366ad9dd99f42827 .quad 0xae64b9004fdd3c75 .quad 0x403a395b53909e62 .quad 0x86c5fc16861b7e9a .quad 0xf6a330476a27c451 .quad 0x01667267a1e93597 .quad 0x05ffb9cd6082dfeb .quad 0xa617fa9ff53f6139 .quad 0x60f2b5e513e66cb6 .quad 0xd7a8beefb3448aa4 .quad 0x7a2932856f5ea192 // 2^76 * 2 * G .quad 0x0b39d761b02de888 .quad 0x5f550e7ed2414e1f .quad 0xa6bfa45822e1a940 .quad 0x050a2f7dfd447b99 .quad 0xb89c444879639302 .quad 0x4ae4f19350c67f2c .quad 0xf0b35da8c81af9c6 .quad 0x39d0003546871017 .quad 0x437c3b33a650db77 .quad 0x6bafe81dbac52bb2 .quad 0xfe99402d2db7d318 .quad 0x2b5b7eec372ba6ce // 2^76 * 3 * G .quad 0xb3bc4bbd83f50eef .quad 0x508f0c998c927866 .quad 0x43e76587c8b7e66e .quad 0x0f7655a3a47f98d9 .quad 0xa694404d613ac8f4 .quad 0x500c3c2bfa97e72c .quad 0x874104d21fcec210 .quad 0x1b205fb38604a8ee .quad 0x55ecad37d24b133c .quad 0x441e147d6038c90b .quad 0x656683a1d62c6fee .quad 0x0157d5dc87e0ecae // 2^76 * 4 * G .quad 0xf2a7af510354c13d .quad 0xd7a0b145aa372b60 .quad 0x2869b96a05a3d470 .quad 0x6528e42d82460173 .quad 0x95265514d71eb524 .quad 0xe603d8815df14593 .quad 0x147cdf410d4de6b7 .quad 0x5293b1730437c850 .quad 0x23d0e0814bccf226 .quad 0x92c745cd8196fb93 .quad 0x8b61796c59541e5b .quad 0x40a44df0c021f978 // 2^76 * 5 * G .quad 0xdaa869894f20ea6a .quad 0xea14a3d14c620618 .quad 0x6001fccb090bf8be .quad 0x35f4e822947e9cf0 .quad 0x86c96e514bc5d095 .quad 0xf20d4098fca6804a .quad 0x27363d89c826ea5d .quad 0x39ca36565719cacf .quad 0x97506f2f6f87b75c .quad 0xc624aea0034ae070 .quad 0x1ec856e3aad34dd6 .quad 0x055b0be0e440e58f // 2^76 * 6 * G .quad 0x6469a17d89735d12 .quad 0xdb6f27d5e662b9f1 .quad 0x9fcba3286a395681 .quad 0x363b8004d269af25 .quad 0x4d12a04b6ea33da2 .quad 0x57cf4c15e36126dd .quad 0x90ec9675ee44d967 .quad 0x64ca348d2a985aac .quad 0x99588e19e4c4912d .quad 0xefcc3b4e1ca5ce6b .quad 0x4522ea60fa5b98d5 .quad 0x7064bbab1de4a819 // 2^76 * 7 * G .quad 0xb919e1515a770641 .quad 0xa9a2e2c74e7f8039 .quad 0x7527250b3df23109 .quad 0x756a7330ac27b78b .quad 0xa290c06142542129 .quad 0xf2e2c2aebe8d5b90 .quad 0xcf2458db76abfe1b .quad 0x02157ade83d626bf .quad 0x3e46972a1b9a038b .quad 0x2e4ee66a7ee03fb4 .quad 0x81a248776edbb4ca .quad 0x1a944ee88ecd0563 // 2^76 * 8 * G .quad 0xd5a91d1151039372 .quad 0x2ed377b799ca26de .quad 0xa17202acfd366b6b .quad 0x0730291bd6901995 .quad 0xbb40a859182362d6 .quad 0xb99f55778a4d1abb .quad 0x8d18b427758559f6 .quad 0x26c20fe74d26235a .quad 0x648d1d9fe9cc22f5 .quad 0x66bc561928dd577c .quad 0x47d3ed21652439d1 .quad 0x49d271acedaf8b49 // 2^80 * 1 * G .quad 0x89f5058a382b33f3 .quad 0x5ae2ba0bad48c0b4 .quad 0x8f93b503a53db36e .quad 0x5aa3ed9d95a232e6 .quad 0x2798aaf9b4b75601 .quad 0x5eac72135c8dad72 .quad 0xd2ceaa6161b7a023 .quad 0x1bbfb284e98f7d4e .quad 0x656777e9c7d96561 .quad 0xcb2b125472c78036 .quad 0x65053299d9506eee .quad 0x4a07e14e5e8957cc // 2^80 * 2 * G .quad 0x4ee412cb980df999 .quad 0xa315d76f3c6ec771 .quad 0xbba5edde925c77fd .quad 0x3f0bac391d313402 .quad 0x240b58cdc477a49b .quad 0xfd38dade6447f017 .quad 0x19928d32a7c86aad .quad 0x50af7aed84afa081 .quad 0x6e4fde0115f65be5 .quad 0x29982621216109b2 .quad 0x780205810badd6d9 .quad 0x1921a316baebd006 // 2^80 * 3 * G .quad 0x89422f7edfb870fc .quad 0x2c296beb4f76b3bd .quad 0x0738f1d436c24df7 .quad 0x6458df41e273aeb0 .quad 0xd75aad9ad9f3c18b .quad 0x566a0eef60b1c19c .quad 0x3e9a0bac255c0ed9 .quad 0x7b049deca062c7f5 .quad 0xdccbe37a35444483 .quad 0x758879330fedbe93 .quad 0x786004c312c5dd87 .quad 0x6093dccbc2950e64 // 2^80 * 4 * G .quad 0x1ff39a8585e0706d .quad 0x36d0a5d8b3e73933 .quad 0x43b9f2e1718f453b .quad 0x57d1ea084827a97c .quad 0x6bdeeebe6084034b .quad 0x3199c2b6780fb854 .quad 0x973376abb62d0695 .quad 0x6e3180c98b647d90 .quad 0xee7ab6e7a128b071 .quad 0xa4c1596d93a88baa .quad 0xf7b4de82b2216130 .quad 0x363e999ddd97bd18 // 2^80 * 5 * G .quad 0x96a843c135ee1fc4 .quad 0x976eb35508e4c8cf .quad 0xb42f6801b58cd330 .quad 0x48ee9b78693a052b .quad 0x2f1848dce24baec6 .quad 0x769b7255babcaf60 .quad 0x90cb3c6e3cefe931 .quad 0x231f979bc6f9b355 .quad 0x5c31de4bcc2af3c6 .quad 0xb04bb030fe208d1f .quad 0xb78d7009c14fb466 .quad 0x079bfa9b08792413 // 2^80 * 6 * G .quad 0xe3903a51da300df4 .quad 0x843964233da95ab0 .quad 0xed3cf12d0b356480 .quad 0x038c77f684817194 .quad 0xf3c9ed80a2d54245 .quad 0x0aa08b7877f63952 .quad 0xd76dac63d1085475 .quad 0x1ef4fb159470636b .quad 0x854e5ee65b167bec .quad 0x59590a4296d0cdc2 .quad 0x72b2df3498102199 .quad 0x575ee92a4a0bff56 // 2^80 * 7 * G .quad 0xd4c080908a182fcf .quad 0x30e170c299489dbd .quad 0x05babd5752f733de .quad 0x43d4e7112cd3fd00 .quad 0x5d46bc450aa4d801 .quad 0xc3af1227a533b9d8 .quad 0x389e3b262b8906c2 .quad 0x200a1e7e382f581b .quad 0x518db967eaf93ac5 .quad 0x71bc989b056652c0 .quad 0xfe2b85d9567197f5 .quad 0x050eca52651e4e38 // 2^80 * 8 * G .quad 0xc3431ade453f0c9c .quad 0xe9f5045eff703b9b .quad 0xfcd97ac9ed847b3d .quad 0x4b0ee6c21c58f4c6 .quad 0x97ac397660e668ea .quad 0x9b19bbfe153ab497 .quad 0x4cb179b534eca79f .quad 0x6151c09fa131ae57 .quad 0x3af55c0dfdf05d96 .quad 0xdd262ee02ab4ee7a .quad 0x11b2bb8712171709 .quad 0x1fef24fa800f030b // 2^84 * 1 * G .quad 0xb496123a6b6c6609 .quad 0xa750fe8580ab5938 .quad 0xf471bf39b7c27a5f .quad 0x507903ce77ac193c .quad 0xff91a66a90166220 .quad 0xf22552ae5bf1e009 .quad 0x7dff85d87f90df7c .quad 0x4f620ffe0c736fb9 .quad 0x62f90d65dfde3e34 .quad 0xcf28c592b9fa5fad .quad 0x99c86ef9c6164510 .quad 0x25d448044a256c84 // 2^84 * 2 * G .quad 0xbd68230ec7e9b16f .quad 0x0eb1b9c1c1c5795d .quad 0x7943c8c495b6b1ff .quad 0x2f9faf620bbacf5e .quad 0x2c7c4415c9022b55 .quad 0x56a0d241812eb1fe .quad 0xf02ea1c9d7b65e0d .quad 0x4180512fd5323b26 .quad 0xa4ff3e698a48a5db .quad 0xba6a3806bd95403b .quad 0x9f7ce1af47d5b65d .quad 0x15e087e55939d2fb // 2^84 * 3 * G .quad 0x12207543745c1496 .quad 0xdaff3cfdda38610c .quad 0xe4e797272c71c34f .quad 0x39c07b1934bdede9 .quad 0x8894186efb963f38 .quad 0x48a00e80dc639bd5 .quad 0xa4e8092be96c1c99 .quad 0x5a097d54ca573661 .quad 0x2d45892b17c9e755 .quad 0xd033fd7289308df8 .quad 0x6c2fe9d9525b8bd9 .quad 0x2edbecf1c11cc079 // 2^84 * 4 * G .quad 0x1616a4e3c715a0d2 .quad 0x53623cb0f8341d4d .quad 0x96ef5329c7e899cb .quad 0x3d4e8dbba668baa6 .quad 0xee0f0fddd087a25f .quad 0x9c7531555c3e34ee .quad 0x660c572e8fab3ab5 .quad 0x0854fc44544cd3b2 .quad 0x61eba0c555edad19 .quad 0x24b533fef0a83de6 .quad 0x3b77042883baa5f8 .quad 0x678f82b898a47e8d // 2^84 * 5 * G .quad 0xb1491d0bd6900c54 .quad 0x3539722c9d132636 .quad 0x4db928920b362bc9 .quad 0x4d7cd1fea68b69df .quad 0x1e09d94057775696 .quad 0xeed1265c3cd951db .quad 0xfa9dac2b20bce16f .quad 0x0f7f76e0e8d089f4 .quad 0x36d9ebc5d485b00c .quad 0xa2596492e4adb365 .quad 0xc1659480c2119ccd .quad 0x45306349186e0d5f // 2^84 * 6 * G .quad 0x94ddd0c1a6cdff1d .quad 0x55f6f115e84213ae .quad 0x6c935f85992fcf6a .quad 0x067ee0f54a37f16f .quad 0x96a414ec2b072491 .quad 0x1bb2218127a7b65b .quad 0x6d2849596e8a4af0 .quad 0x65f3b08ccd27765f .quad 0xecb29fff199801f7 .quad 0x9d361d1fa2a0f72f .quad 0x25f11d2375fd2f49 .quad 0x124cefe80fe10fe2 // 2^84 * 7 * G .quad 0x4c126cf9d18df255 .quad 0xc1d471e9147a63b6 .quad 0x2c6d3c73f3c93b5f .quad 0x6be3a6a2e3ff86a2 .quad 0x1518e85b31b16489 .quad 0x8faadcb7db710bfb .quad 0x39b0bdf4a14ae239 .quad 0x05f4cbea503d20c1 .quad 0xce040e9ec04145bc .quad 0xc71ff4e208f6834c .quad 0xbd546e8dab8847a3 .quad 0x64666aa0a4d2aba5 // 2^84 * 8 * G .quad 0x6841435a7c06d912 .quad 0xca123c21bb3f830b .quad 0xd4b37b27b1cbe278 .quad 0x1d753b84c76f5046 .quad 0xb0c53bf73337e94c .quad 0x7cb5697e11e14f15 .quad 0x4b84abac1930c750 .quad 0x28dd4abfe0640468 .quad 0x7dc0b64c44cb9f44 .quad 0x18a3e1ace3925dbf .quad 0x7a3034862d0457c4 .quad 0x4c498bf78a0c892e // 2^88 * 1 * G .quad 0x37d653fb1aa73196 .quad 0x0f9495303fd76418 .quad 0xad200b09fb3a17b2 .quad 0x544d49292fc8613e .quad 0x22d2aff530976b86 .quad 0x8d90b806c2d24604 .quad 0xdca1896c4de5bae5 .quad 0x28005fe6c8340c17 .quad 0x6aefba9f34528688 .quad 0x5c1bff9425107da1 .quad 0xf75bbbcd66d94b36 .quad 0x72e472930f316dfa // 2^88 * 2 * G .quad 0x2695208c9781084f .quad 0xb1502a0b23450ee1 .quad 0xfd9daea603efde02 .quad 0x5a9d2e8c2733a34c .quad 0x07f3f635d32a7627 .quad 0x7aaa4d865f6566f0 .quad 0x3c85e79728d04450 .quad 0x1fee7f000fe06438 .quad 0x765305da03dbf7e5 .quad 0xa4daf2491434cdbd .quad 0x7b4ad5cdd24a88ec .quad 0x00f94051ee040543 // 2^88 * 3 * G .quad 0x8d356b23c3d330b2 .quad 0xf21c8b9bb0471b06 .quad 0xb36c316c6e42b83c .quad 0x07d79c7e8beab10d .quad 0xd7ef93bb07af9753 .quad 0x583ed0cf3db766a7 .quad 0xce6998bf6e0b1ec5 .quad 0x47b7ffd25dd40452 .quad 0x87fbfb9cbc08dd12 .quad 0x8a066b3ae1eec29b .quad 0x0d57242bdb1fc1bf .quad 0x1c3520a35ea64bb6 // 2^88 * 4 * G .quad 0x80d253a6bccba34a .quad 0x3e61c3a13838219b .quad 0x90c3b6019882e396 .quad 0x1c3d05775d0ee66f .quad 0xcda86f40216bc059 .quad 0x1fbb231d12bcd87e .quad 0xb4956a9e17c70990 .quad 0x38750c3b66d12e55 .quad 0x692ef1409422e51a .quad 0xcbc0c73c2b5df671 .quad 0x21014fe7744ce029 .quad 0x0621e2c7d330487c // 2^88 * 5 * G .quad 0xaf9860cc8259838d .quad 0x90ea48c1c69f9adc .quad 0x6526483765581e30 .quad 0x0007d6097bd3a5bc .quad 0xb7ae1796b0dbf0f3 .quad 0x54dfafb9e17ce196 .quad 0x25923071e9aaa3b4 .quad 0x5d8e589ca1002e9d .quad 0xc0bf1d950842a94b .quad 0xb2d3c363588f2e3e .quad 0x0a961438bb51e2ef .quad 0x1583d7783c1cbf86 // 2^88 * 6 * G .quad 0xeceea2ef5da27ae1 .quad 0x597c3a1455670174 .quad 0xc9a62a126609167a .quad 0x252a5f2e81ed8f70 .quad 0x90034704cc9d28c7 .quad 0x1d1b679ef72cc58f .quad 0x16e12b5fbe5b8726 .quad 0x4958064e83c5580a .quad 0x0d2894265066e80d .quad 0xfcc3f785307c8c6b .quad 0x1b53da780c1112fd .quad 0x079c170bd843b388 // 2^88 * 7 * G .quad 0x0506ece464fa6fff .quad 0xbee3431e6205e523 .quad 0x3579422451b8ea42 .quad 0x6dec05e34ac9fb00 .quad 0xcdd6cd50c0d5d056 .quad 0x9af7686dbb03573b .quad 0x3ca6723ff3c3ef48 .quad 0x6768c0d7317b8acc .quad 0x94b625e5f155c1b3 .quad 0x417bf3a7997b7b91 .quad 0xc22cbddc6d6b2600 .quad 0x51445e14ddcd52f4 // 2^88 * 8 * G .quad 0x57502b4b3b144951 .quad 0x8e67ff6b444bbcb3 .quad 0xb8bd6927166385db .quad 0x13186f31e39295c8 .quad 0x893147ab2bbea455 .quad 0x8c53a24f92079129 .quad 0x4b49f948be30f7a7 .quad 0x12e990086e4fd43d .quad 0xf10c96b37fdfbb2e .quad 0x9f9a935e121ceaf9 .quad 0xdf1136c43a5b983f .quad 0x77b2e3f05d3e99af // 2^92 * 1 * G .quad 0xfd0d75879cf12657 .quad 0xe82fef94e53a0e29 .quad 0xcc34a7f05bbb4be7 .quad 0x0b251172a50c38a2 .quad 0x9532f48fcc5cd29b .quad 0x2ba851bea3ce3671 .quad 0x32dacaa051122941 .quad 0x478d99d9350004f2 .quad 0x1d5ad94890bb02c0 .quad 0x50e208b10ec25115 .quad 0xa26a22894ef21702 .quad 0x4dc923343b524805 // 2^92 * 2 * G .quad 0xe3828c400f8086b6 .quad 0x3f77e6f7979f0dc8 .quad 0x7ef6de304df42cb4 .quad 0x5265797cb6abd784 .quad 0x3ad3e3ebf36c4975 .quad 0xd75d25a537862125 .quad 0xe873943da025a516 .quad 0x6bbc7cb4c411c847 .quad 0x3c6f9cd1d4a50d56 .quad 0xb6244077c6feab7e .quad 0x6ff9bf483580972e .quad 0x00375883b332acfb // 2^92 * 3 * G .quad 0x0001b2cd28cb0940 .quad 0x63fb51a06f1c24c9 .quad 0xb5ad8691dcd5ca31 .quad 0x67238dbd8c450660 .quad 0xc98bec856c75c99c .quad 0xe44184c000e33cf4 .quad 0x0a676b9bba907634 .quad 0x669e2cb571f379d7 .quad 0xcb116b73a49bd308 .quad 0x025aad6b2392729e .quad 0xb4793efa3f55d9b1 .quad 0x72a1056140678bb9 // 2^92 * 4 * G .quad 0xa2b6812b1cc9249d .quad 0x62866eee21211f58 .quad 0x2cb5c5b85df10ece .quad 0x03a6b259e263ae00 .quad 0x0d8d2909e2e505b6 .quad 0x98ca78abc0291230 .quad 0x77ef5569a9b12327 .quad 0x7c77897b81439b47 .quad 0xf1c1b5e2de331cb5 .quad 0x5a9f5d8e15fca420 .quad 0x9fa438f17bd932b1 .quad 0x2a381bf01c6146e7 // 2^92 * 5 * G .quad 0xac9b9879cfc811c1 .quad 0x8b7d29813756e567 .quad 0x50da4e607c70edfc .quad 0x5dbca62f884400b6 .quad 0xf7c0be32b534166f .quad 0x27e6ca6419cf70d4 .quad 0x934df7d7a957a759 .quad 0x5701461dabdec2aa .quad 0x2c6747402c915c25 .quad 0x1bdcd1a80b0d340a .quad 0x5e5601bd07b43f5f .quad 0x2555b4e05539a242 // 2^92 * 6 * G .quad 0x6fc09f5266ddd216 .quad 0xdce560a7c8e37048 .quad 0xec65939da2df62fd .quad 0x7a869ae7e52ed192 .quad 0x78409b1d87e463d4 .quad 0xad4da95acdfb639d .quad 0xec28773755259b9c .quad 0x69c806e9c31230ab .quad 0x7b48f57414bb3f22 .quad 0x68c7cee4aedccc88 .quad 0xed2f936179ed80be .quad 0x25d70b885f77bc4b // 2^92 * 7 * G .quad 0x4151c3d9762bf4de .quad 0x083f435f2745d82b .quad 0x29775a2e0d23ddd5 .quad 0x138e3a6269a5db24 .quad 0x98459d29bb1ae4d4 .quad 0x56b9c4c739f954ec .quad 0x832743f6c29b4b3e .quad 0x21ea8e2798b6878a .quad 0x87bef4b46a5a7b9c .quad 0xd2299d1b5fc1d062 .quad 0x82409818dd321648 .quad 0x5c5abeb1e5a2e03d // 2^92 * 8 * G .quad 0x14722af4b73c2ddb .quad 0xbc470c5f5a05060d .quad 0x00943eac2581b02e .quad 0x0e434b3b1f499c8f .quad 0x02cde6de1306a233 .quad 0x7b5a52a2116f8ec7 .quad 0xe1c681f4c1163b5b .quad 0x241d350660d32643 .quad 0x6be4404d0ebc52c7 .quad 0xae46233bb1a791f5 .quad 0x2aec170ed25db42b .quad 0x1d8dfd966645d694 // 2^96 * 1 * G .quad 0x296fa9c59c2ec4de .quad 0xbc8b61bf4f84f3cb .quad 0x1c7706d917a8f908 .quad 0x63b795fc7ad3255d .quad 0xd598639c12ddb0a4 .quad 0xa5d19f30c024866b .quad 0xd17c2f0358fce460 .quad 0x07a195152e095e8a .quad 0xa8368f02389e5fc8 .quad 0x90433b02cf8de43b .quad 0xafa1fd5dc5412643 .quad 0x3e8fe83d032f0137 // 2^96 * 2 * G .quad 0x2f8b15b90570a294 .quad 0x94f2427067084549 .quad 0xde1c5ae161bbfd84 .quad 0x75ba3b797fac4007 .quad 0x08704c8de8efd13c .quad 0xdfc51a8e33e03731 .quad 0xa59d5da51260cde3 .quad 0x22d60899a6258c86 .quad 0x6239dbc070cdd196 .quad 0x60fe8a8b6c7d8a9a .quad 0xb38847bceb401260 .quad 0x0904d07b87779e5e // 2^96 * 3 * G .quad 0xb4ce1fd4ddba919c .quad 0xcf31db3ec74c8daa .quad 0x2c63cc63ad86cc51 .quad 0x43e2143fbc1dde07 .quad 0xf4322d6648f940b9 .quad 0x06952f0cbd2d0c39 .quad 0x167697ada081f931 .quad 0x6240aacebaf72a6c .quad 0xf834749c5ba295a0 .quad 0xd6947c5bca37d25a .quad 0x66f13ba7e7c9316a .quad 0x56bdaf238db40cac // 2^96 * 4 * G .quad 0x362ab9e3f53533eb .quad 0x338568d56eb93d40 .quad 0x9e0e14521d5a5572 .quad 0x1d24a86d83741318 .quad 0x1310d36cc19d3bb2 .quad 0x062a6bb7622386b9 .quad 0x7c9b8591d7a14f5c .quad 0x03aa31507e1e5754 .quad 0xf4ec7648ffd4ce1f .quad 0xe045eaf054ac8c1c .quad 0x88d225821d09357c .quad 0x43b261dc9aeb4859 // 2^96 * 5 * G .quad 0xe55b1e1988bb79bb .quad 0xa09ed07dc17a359d .quad 0xb02c2ee2603dea33 .quad 0x326055cf5b276bc2 .quad 0x19513d8b6c951364 .quad 0x94fe7126000bf47b .quad 0x028d10ddd54f9567 .quad 0x02b4d5e242940964 .quad 0xb4a155cb28d18df2 .quad 0xeacc4646186ce508 .quad 0xc49cf4936c824389 .quad 0x27a6c809ae5d3410 // 2^96 * 6 * G .quad 0x8ba6ebcd1f0db188 .quad 0x37d3d73a675a5be8 .quad 0xf22edfa315f5585a .quad 0x2cb67174ff60a17e .quad 0xcd2c270ac43d6954 .quad 0xdd4a3e576a66cab2 .quad 0x79fa592469d7036c .quad 0x221503603d8c2599 .quad 0x59eecdf9390be1d0 .quad 0xa9422044728ce3f1 .quad 0x82891c667a94f0f4 .quad 0x7b1df4b73890f436 // 2^96 * 7 * G .quad 0xe492f2e0b3b2a224 .quad 0x7c6c9e062b551160 .quad 0x15eb8fe20d7f7b0e .quad 0x61fcef2658fc5992 .quad 0x5f2e221807f8f58c .quad 0xe3555c9fd49409d4 .quad 0xb2aaa88d1fb6a630 .quad 0x68698245d352e03d .quad 0xdbb15d852a18187a .quad 0xf3e4aad386ddacd7 .quad 0x44bae2810ff6c482 .quad 0x46cf4c473daf01cf // 2^96 * 8 * G .quad 0x426525ed9ec4e5f9 .quad 0x0e5eda0116903303 .quad 0x72b1a7f2cbe5cadc .quad 0x29387bcd14eb5f40 .quad 0x213c6ea7f1498140 .quad 0x7c1e7ef8392b4854 .quad 0x2488c38c5629ceba .quad 0x1065aae50d8cc5bb .quad 0x1c2c4525df200d57 .quad 0x5c3b2dd6bfca674a .quad 0x0a07e7b1e1834030 .quad 0x69a198e64f1ce716 // 2^100 * 1 * G .quad 0x7afcd613efa9d697 .quad 0x0cc45aa41c067959 .quad 0xa56fe104c1fada96 .quad 0x3a73b70472e40365 .quad 0x7b26e56b9e2d4734 .quad 0xc4c7132b81c61675 .quad 0xef5c9525ec9cde7f .quad 0x39c80b16e71743ad .quad 0x0f196e0d1b826c68 .quad 0xf71ff0e24960e3db .quad 0x6113167023b7436c .quad 0x0cf0ea5877da7282 // 2^100 * 2 * G .quad 0x196c80a4ddd4ccbd .quad 0x22e6f55d95f2dd9d .quad 0xc75e33c740d6c71b .quad 0x7bb51279cb3c042f .quad 0xe332ced43ba6945a .quad 0xde0b1361e881c05d .quad 0x1ad40f095e67ed3b .quad 0x5da8acdab8c63d5d .quad 0xc4b6664a3a70159f .quad 0x76194f0f0a904e14 .quad 0xa5614c39a4096c13 .quad 0x6cd0ff50979feced // 2^100 * 3 * G .quad 0xc0e067e78f4428ac .quad 0x14835ab0a61135e3 .quad 0xf21d14f338062935 .quad 0x6390a4c8df04849c .quad 0x7fecfabdb04ba18e .quad 0xd0fc7bfc3bddbcf7 .quad 0xa41d486e057a131c .quad 0x641a4391f2223a61 .quad 0xc5c6b95aa606a8db .quad 0x914b7f9eb06825f1 .quad 0x2a731f6b44fc9eff .quad 0x30ddf38562705cfc // 2^100 * 4 * G .quad 0x4e3dcbdad1bff7f9 .quad 0xc9118e8220645717 .quad 0xbacccebc0f189d56 .quad 0x1b4822e9d4467668 .quad 0x33bef2bd68bcd52c .quad 0xc649dbb069482ef2 .quad 0xb5b6ee0c41cb1aee .quad 0x5c294d270212a7e5 .quad 0xab360a7f25563781 .quad 0x2512228a480f7958 .quad 0xc75d05276114b4e3 .quad 0x222d9625d976fe2a // 2^100 * 5 * G .quad 0x1c717f85b372ace1 .quad 0x81930e694638bf18 .quad 0x239cad056bc08b58 .quad 0x0b34271c87f8fff4 .quad 0x0f94be7e0a344f85 .quad 0xeb2faa8c87f22c38 .quad 0x9ce1e75e4ee16f0f .quad 0x43e64e5418a08dea .quad 0x8155e2521a35ce63 .quad 0xbe100d4df912028e .quad 0xbff80bf8a57ddcec .quad 0x57342dc96d6bc6e4 // 2^100 * 6 * G .quad 0xefeef065c8ce5998 .quad 0xbf029510b5cbeaa2 .quad 0x8c64a10620b7c458 .quad 0x35134fb231c24855 .quad 0xf3c3bcb71e707bf6 .quad 0x351d9b8c7291a762 .quad 0x00502e6edad69a33 .quad 0x522f521f1ec8807f .quad 0x272c1f46f9a3902b .quad 0xc91ba3b799657bcc .quad 0xae614b304f8a1c0e .quad 0x7afcaad70b99017b // 2^100 * 7 * G .quad 0xc25ded54a4b8be41 .quad 0x902d13e11bb0e2dd .quad 0x41f43233cde82ab2 .quad 0x1085faa5c3aae7cb .quad 0xa88141ecef842b6b .quad 0x55e7b14797abe6c5 .quad 0x8c748f9703784ffe .quad 0x5b50a1f7afcd00b7 .quad 0x9b840f66f1361315 .quad 0x18462242701003e9 .quad 0x65ed45fae4a25080 .quad 0x0a2862393fda7320 // 2^100 * 8 * G .quad 0x46ab13c8347cbc9d .quad 0x3849e8d499c12383 .quad 0x4cea314087d64ac9 .quad 0x1f354134b1a29ee7 .quad 0x960e737b6ecb9d17 .quad 0xfaf24948d67ceae1 .quad 0x37e7a9b4d55e1b89 .quad 0x5cb7173cb46c59eb .quad 0x4a89e68b82b7abf0 .quad 0xf41cd9279ba6b7b9 .quad 0x16e6c210e18d876f .quad 0x7cacdb0f7f1b09c6 // 2^104 * 1 * G .quad 0x9062b2e0d91a78bc .quad 0x47c9889cc8509667 .quad 0x9df54a66405070b8 .quad 0x7369e6a92493a1bf .quad 0xe1014434dcc5caed .quad 0x47ed5d963c84fb33 .quad 0x70019576ed86a0e7 .quad 0x25b2697bd267f9e4 .quad 0x9d673ffb13986864 .quad 0x3ca5fbd9415dc7b8 .quad 0xe04ecc3bdf273b5e .quad 0x1420683db54e4cd2 // 2^104 * 2 * G .quad 0xb478bd1e249dd197 .quad 0x620c35005e58c102 .quad 0xfb02d32fccbaac5c .quad 0x60b63bebf508a72d .quad 0x34eebb6fc1cc5ad0 .quad 0x6a1b0ce99646ac8b .quad 0xd3b0da49a66bde53 .quad 0x31e83b4161d081c1 .quad 0x97e8c7129e062b4f .quad 0x49e48f4f29320ad8 .quad 0x5bece14b6f18683f .quad 0x55cf1eb62d550317 // 2^104 * 3 * G .quad 0x5879101065c23d58 .quad 0x8b9d086d5094819c .quad 0xe2402fa912c55fa7 .quad 0x669a6564570891d4 .quad 0x3076b5e37df58c52 .quad 0xd73ab9dde799cc36 .quad 0xbd831ce34913ee20 .quad 0x1a56fbaa62ba0133 .quad 0x943e6b505c9dc9ec .quad 0x302557bba77c371a .quad 0x9873ae5641347651 .quad 0x13c4836799c58a5c // 2^104 * 4 * G .quad 0x423a5d465ab3e1b9 .quad 0xfc13c187c7f13f61 .quad 0x19f83664ecb5b9b6 .quad 0x66f80c93a637b607 .quad 0xc4dcfb6a5d8bd080 .quad 0xdeebc4ec571a4842 .quad 0xd4b2e883b8e55365 .quad 0x50bdc87dc8e5b827 .quad 0x606d37836edfe111 .quad 0x32353e15f011abd9 .quad 0x64b03ac325b73b96 .quad 0x1dd56444725fd5ae // 2^104 * 5 * G .quad 0x8fa47ff83362127d .quad 0xbc9f6ac471cd7c15 .quad 0x6e71454349220c8b .quad 0x0e645912219f732e .quad 0xc297e60008bac89a .quad 0x7d4cea11eae1c3e0 .quad 0xf3e38be19fe7977c .quad 0x3a3a450f63a305cd .quad 0x078f2f31d8394627 .quad 0x389d3183de94a510 .quad 0xd1e36c6d17996f80 .quad 0x318c8d9393a9a87b // 2^104 * 6 * G .quad 0xf2745d032afffe19 .quad 0x0c9f3c497f24db66 .quad 0xbc98d3e3ba8598ef .quad 0x224c7c679a1d5314 .quad 0x5d669e29ab1dd398 .quad 0xfc921658342d9e3b .quad 0x55851dfdf35973cd .quad 0x509a41c325950af6 .quad 0xbdc06edca6f925e9 .quad 0x793ef3f4641b1f33 .quad 0x82ec12809d833e89 .quad 0x05bff02328a11389 // 2^104 * 7 * G .quad 0x3632137023cae00b .quad 0x544acf0ad1accf59 .quad 0x96741049d21a1c88 .quad 0x780b8cc3fa2a44a7 .quad 0x6881a0dd0dc512e4 .quad 0x4fe70dc844a5fafe .quad 0x1f748e6b8f4a5240 .quad 0x576277cdee01a3ea .quad 0x1ef38abc234f305f .quad 0x9a577fbd1405de08 .quad 0x5e82a51434e62a0d .quad 0x5ff418726271b7a1 // 2^104 * 8 * G .quad 0x398e080c1789db9d .quad 0xa7602025f3e778f5 .quad 0xfa98894c06bd035d .quad 0x106a03dc25a966be .quad 0xe5db47e813b69540 .quad 0xf35d2a3b432610e1 .quad 0xac1f26e938781276 .quad 0x29d4db8ca0a0cb69 .quad 0xd9ad0aaf333353d0 .quad 0x38669da5acd309e5 .quad 0x3c57658ac888f7f0 .quad 0x4ab38a51052cbefa // 2^108 * 1 * G .quad 0xdfdacbee4324c0e9 .quad 0x054442883f955bb7 .quad 0xdef7aaa8ea31609f .quad 0x68aee70642287cff .quad 0xf68fe2e8809de054 .quad 0xe3bc096a9c82bad1 .quad 0x076353d40aadbf45 .quad 0x7b9b1fb5dea1959e .quad 0xf01cc8f17471cc0c .quad 0x95242e37579082bb .quad 0x27776093d3e46b5f .quad 0x2d13d55a28bd85fb // 2^108 * 2 * G .quad 0xfac5d2065b35b8da .quad 0xa8da8a9a85624bb7 .quad 0xccd2ca913d21cd0f .quad 0x6b8341ee8bf90d58 .quad 0xbf019cce7aee7a52 .quad 0xa8ded2b6e454ead3 .quad 0x3c619f0b87a8bb19 .quad 0x3619b5d7560916d8 .quad 0x3579f26b0282c4b2 .quad 0x64d592f24fafefae .quad 0xb7cded7b28c8c7c0 .quad 0x6a927b6b7173a8d7 // 2^108 * 3 * G .quad 0x1f6db24f986e4656 .quad 0x1021c02ed1e9105b .quad 0xf8ff3fff2cc0a375 .quad 0x1d2a6bf8c6c82592 .quad 0x8d7040863ece88eb .quad 0xf0e307a980eec08c .quad 0xac2250610d788fda .quad 0x056d92a43a0d478d .quad 0x1b05a196fc3da5a1 .quad 0x77d7a8c243b59ed0 .quad 0x06da3d6297d17918 .quad 0x66fbb494f12353f7 // 2^108 * 4 * G .quad 0x751a50b9d85c0fb8 .quad 0xd1afdc258bcf097b .quad 0x2f16a6a38309a969 .quad 0x14ddff9ee5b00659 .quad 0xd6d70996f12309d6 .quad 0xdbfb2385e9c3d539 .quad 0x46d602b0f7552411 .quad 0x270a0b0557843e0c .quad 0x61ff0640a7862bcc .quad 0x81cac09a5f11abfe .quad 0x9047830455d12abb .quad 0x19a4bde1945ae873 // 2^108 * 5 * G .quad 0x9b9f26f520a6200a .quad 0x64804443cf13eaf8 .quad 0x8a63673f8631edd3 .quad 0x72bbbce11ed39dc1 .quad 0x40c709dec076c49f .quad 0x657bfaf27f3e53f6 .quad 0x40662331eca042c4 .quad 0x14b375487eb4df04 .quad 0xae853c94ab66dc47 .quad 0xeb62343edf762d6e .quad 0xf08e0e186fb2f7d1 .quad 0x4f0b1c02700ab37a // 2^108 * 6 * G .quad 0xe1706787d81951fa .quad 0xa10a2c8eb290c77b .quad 0xe7382fa03ed66773 .quad 0x0a4d84710bcc4b54 .quad 0x79fd21ccc1b2e23f .quad 0x4ae7c281453df52a .quad 0xc8172ec9d151486b .quad 0x68abe9443e0a7534 .quad 0xda12c6c407831dcb .quad 0x0da230d74d5c510d .quad 0x4ab1531e6bd404e1 .quad 0x4106b166bcf440ef // 2^108 * 7 * G .quad 0x02e57a421cd23668 .quad 0x4ad9fb5d0eaef6fd .quad 0x954e6727b1244480 .quad 0x7f792f9d2699f331 .quad 0xa485ccd539e4ecf2 .quad 0x5aa3f3ad0555bab5 .quad 0x145e3439937df82d .quad 0x1238b51e1214283f .quad 0x0b886b925fd4d924 .quad 0x60906f7a3626a80d .quad 0xecd367b4b98abd12 .quad 0x2876beb1def344cf // 2^108 * 8 * G .quad 0xdc84e93563144691 .quad 0x632fe8a0d61f23f4 .quad 0x4caa800612a9a8d5 .quad 0x48f9dbfa0e9918d3 .quad 0xd594b3333a8a85f8 .quad 0x4ea37689e78d7d58 .quad 0x73bf9f455e8e351f .quad 0x5507d7d2bc41ebb4 .quad 0x1ceb2903299572fc .quad 0x7c8ccaa29502d0ee .quad 0x91bfa43411cce67b .quad 0x5784481964a831e7 // 2^112 * 1 * G .quad 0xda7c2b256768d593 .quad 0x98c1c0574422ca13 .quad 0xf1a80bd5ca0ace1d .quad 0x29cdd1adc088a690 .quad 0xd6cfd1ef5fddc09c .quad 0xe82b3efdf7575dce .quad 0x25d56b5d201634c2 .quad 0x3041c6bb04ed2b9b .quad 0x0ff2f2f9d956e148 .quad 0xade797759f356b2e .quad 0x1a4698bb5f6c025c .quad 0x104bbd6814049a7b // 2^112 * 2 * G .quad 0x51f0fd3168f1ed67 .quad 0x2c811dcdd86f3bc2 .quad 0x44dc5c4304d2f2de .quad 0x5be8cc57092a7149 .quad 0xa95d9a5fd67ff163 .quad 0xe92be69d4cc75681 .quad 0xb7f8024cde20f257 .quad 0x204f2a20fb072df5 .quad 0xc8143b3d30ebb079 .quad 0x7589155abd652e30 .quad 0x653c3c318f6d5c31 .quad 0x2570fb17c279161f // 2^112 * 3 * G .quad 0x3efa367f2cb61575 .quad 0xf5f96f761cd6026c .quad 0xe8c7142a65b52562 .quad 0x3dcb65ea53030acd .quad 0x192ea9550bb8245a .quad 0xc8e6fba88f9050d1 .quad 0x7986ea2d88a4c935 .quad 0x241c5f91de018668 .quad 0x28d8172940de6caa .quad 0x8fbf2cf022d9733a .quad 0x16d7fcdd235b01d1 .quad 0x08420edd5fcdf0e5 // 2^112 * 4 * G .quad 0xcdff20ab8362fa4a .quad 0x57e118d4e21a3e6e .quad 0xe3179617fc39e62b .quad 0x0d9a53efbc1769fd .quad 0x0358c34e04f410ce .quad 0xb6135b5a276e0685 .quad 0x5d9670c7ebb91521 .quad 0x04d654f321db889c .quad 0x5e7dc116ddbdb5d5 .quad 0x2954deb68da5dd2d .quad 0x1cb608173334a292 .quad 0x4a7a4f2618991ad7 // 2^112 * 5 * G .quad 0xf4a718025fb15f95 .quad 0x3df65f346b5c1b8f .quad 0xcdfcf08500e01112 .quad 0x11b50c4cddd31848 .quad 0x24c3b291af372a4b .quad 0x93da8270718147f2 .quad 0xdd84856486899ef2 .quad 0x4a96314223e0ee33 .quad 0xa6e8274408a4ffd6 .quad 0x738e177e9c1576d9 .quad 0x773348b63d02b3f2 .quad 0x4f4bce4dce6bcc51 // 2^112 * 6 * G .quad 0xa71fce5ae2242584 .quad 0x26ea725692f58a9e .quad 0xd21a09d71cea3cf4 .quad 0x73fcdd14b71c01e6 .quad 0x30e2616ec49d0b6f .quad 0xe456718fcaec2317 .quad 0x48eb409bf26b4fa6 .quad 0x3042cee561595f37 .quad 0x427e7079449bac41 .quad 0x855ae36dbce2310a .quad 0x4cae76215f841a7c .quad 0x389e740c9a9ce1d6 // 2^112 * 7 * G .quad 0x64fcb3ae34dcb9ce .quad 0x97500323e348d0ad .quad 0x45b3f07d62c6381b .quad 0x61545379465a6788 .quad 0xc9bd78f6570eac28 .quad 0xe55b0b3227919ce1 .quad 0x65fc3eaba19b91ed .quad 0x25c425e5d6263690 .quad 0x3f3e06a6f1d7de6e .quad 0x3ef976278e062308 .quad 0x8c14f6264e8a6c77 .quad 0x6539a08915484759 // 2^112 * 8 * G .quad 0xe9d21f74c3d2f773 .quad 0xc150544125c46845 .quad 0x624e5ce8f9b99e33 .quad 0x11c5e4aac5cd186c .quad 0xddc4dbd414bb4a19 .quad 0x19b2bc3c98424f8e .quad 0x48a89fd736ca7169 .quad 0x0f65320ef019bd90 .quad 0xd486d1b1cafde0c6 .quad 0x4f3fe6e3163b5181 .quad 0x59a8af0dfaf2939a .quad 0x4cabc7bdec33072a // 2^116 * 1 * G .quad 0x16faa8fb532f7428 .quad 0xdbd42ea046a4e272 .quad 0x5337653b8b9ea480 .quad 0x4065947223973f03 .quad 0xf7c0a19c1a54a044 .quad 0x4a1c5e2477bd9fbb .quad 0xa6e3ca115af22972 .quad 0x1819bb953f2e9e0d .quad 0x498fbb795e042e84 .quad 0x7d0dd89a7698b714 .quad 0x8bfb0ba427fe6295 .quad 0x36ba82e721200524 // 2^116 * 2 * G .quad 0xd60ecbb74245ec41 .quad 0xfd9be89e34348716 .quad 0xc9240afee42284de .quad 0x4472f648d0531db4 .quad 0xc8d69d0a57274ed5 .quad 0x45ba803260804b17 .quad 0xdf3cda102255dfac .quad 0x77d221232709b339 .quad 0x498a6d7064ad94d8 .quad 0xa5b5c8fd9af62263 .quad 0x8ca8ed0545c141f4 .quad 0x2c63bec3662d358c // 2^116 * 3 * G .quad 0x7fe60d8bea787955 .quad 0xb9dc117eb5f401b7 .quad 0x91c7c09a19355cce .quad 0x22692ef59442bedf .quad 0x9a518b3a8586f8bf .quad 0x9ee71af6cbb196f0 .quad 0xaa0625e6a2385cf2 .quad 0x1deb2176ddd7c8d1 .quad 0x8563d19a2066cf6c .quad 0x401bfd8c4dcc7cd7 .quad 0xd976a6becd0d8f62 .quad 0x67cfd773a278b05e // 2^116 * 4 * G .quad 0x8dec31faef3ee475 .quad 0x99dbff8a9e22fd92 .quad 0x512d11594e26cab1 .quad 0x0cde561eec4310b9 .quad 0x2d5fa9855a4e586a .quad 0x65f8f7a449beab7e .quad 0xaa074dddf21d33d3 .quad 0x185cba721bcb9dee .quad 0x93869da3f4e3cb41 .quad 0xbf0392f540f7977e .quad 0x026204fcd0463b83 .quad 0x3ec91a769eec6eed // 2^116 * 5 * G .quad 0x1e9df75bf78166ad .quad 0x4dfda838eb0cd7af .quad 0xba002ed8c1eaf988 .quad 0x13fedb3e11f33cfc .quad 0x0fad2fb7b0a3402f .quad 0x46615ecbfb69f4a8 .quad 0xf745bcc8c5f8eaa6 .quad 0x7a5fa8794a94e896 .quad 0x52958faa13cd67a1 .quad 0x965ee0818bdbb517 .quad 0x16e58daa2e8845b3 .quad 0x357d397d5499da8f // 2^116 * 6 * G .quad 0x1ebfa05fb0bace6c .quad 0xc934620c1caf9a1e .quad 0xcc771cc41d82b61a .quad 0x2d94a16aa5f74fec .quad 0x481dacb4194bfbf8 .quad 0x4d77e3f1bae58299 .quad 0x1ef4612e7d1372a0 .quad 0x3a8d867e70ff69e1 .quad 0x6f58cd5d55aff958 .quad 0xba3eaa5c75567721 .quad 0x75c123999165227d .quad 0x69be1343c2f2b35e // 2^116 * 7 * G .quad 0x0e091d5ee197c92a .quad 0x4f51019f2945119f .quad 0x143679b9f034e99c .quad 0x7d88112e4d24c696 .quad 0x82bbbdac684b8de3 .quad 0xa2f4c7d03fca0718 .quad 0x337f92fbe096aaa8 .quad 0x200d4d8c63587376 .quad 0x208aed4b4893b32b .quad 0x3efbf23ebe59b964 .quad 0xd762deb0dba5e507 .quad 0x69607bd681bd9d94 // 2^116 * 8 * G .quad 0xf6be021068de1ce1 .quad 0xe8d518e70edcbc1f .quad 0xe3effdd01b5505a5 .quad 0x35f63353d3ec3fd0 .quad 0x3b7f3bd49323a902 .quad 0x7c21b5566b2c6e53 .quad 0xe5ba8ff53a7852a7 .quad 0x28bc77a5838ece00 .quad 0x63ba78a8e25d8036 .quad 0x63651e0094333490 .quad 0x48d82f20288ce532 .quad 0x3a31abfa36b57524 // 2^120 * 1 * G .quad 0x239e9624089c0a2e .quad 0xc748c4c03afe4738 .quad 0x17dbed2a764fa12a .quad 0x639b93f0321c8582 .quad 0xc08f788f3f78d289 .quad 0xfe30a72ca1404d9f .quad 0xf2778bfccf65cc9d .quad 0x7ee498165acb2021 .quad 0x7bd508e39111a1c3 .quad 0x2b2b90d480907489 .quad 0xe7d2aec2ae72fd19 .quad 0x0edf493c85b602a6 // 2^120 * 2 * G .quad 0xaecc8158599b5a68 .quad 0xea574f0febade20e .quad 0x4fe41d7422b67f07 .quad 0x403b92e3019d4fb4 .quad 0x6767c4d284764113 .quad 0xa090403ff7f5f835 .quad 0x1c8fcffacae6bede .quad 0x04c00c54d1dfa369 .quad 0x4dc22f818b465cf8 .quad 0x71a0f35a1480eff8 .quad 0xaee8bfad04c7d657 .quad 0x355bb12ab26176f4 // 2^120 * 3 * G .quad 0xa71e64cc7493bbf4 .quad 0xe5bd84d9eca3b0c3 .quad 0x0a6bc50cfa05e785 .quad 0x0f9b8132182ec312 .quad 0xa301dac75a8c7318 .quad 0xed90039db3ceaa11 .quad 0x6f077cbf3bae3f2d .quad 0x7518eaf8e052ad8e .quad 0xa48859c41b7f6c32 .quad 0x0f2d60bcf4383298 .quad 0x1815a929c9b1d1d9 .quad 0x47c3871bbb1755c4 // 2^120 * 4 * G .quad 0x5144539771ec4f48 .quad 0xf805b17dc98c5d6e .quad 0xf762c11a47c3c66b .quad 0x00b89b85764699dc .quad 0xfbe65d50c85066b0 .quad 0x62ecc4b0b3a299b0 .quad 0xe53754ea441ae8e0 .quad 0x08fea02ce8d48d5f .quad 0x824ddd7668deead0 .quad 0xc86445204b685d23 .quad 0xb514cfcd5d89d665 .quad 0x473829a74f75d537 // 2^120 * 5 * G .quad 0x82d2da754679c418 .quad 0xe63bd7d8b2618df0 .quad 0x355eef24ac47eb0a .quad 0x2078684c4833c6b4 .quad 0x23d9533aad3902c9 .quad 0x64c2ddceef03588f .quad 0x15257390cfe12fb4 .quad 0x6c668b4d44e4d390 .quad 0x3b48cf217a78820c .quad 0xf76a0ab281273e97 .quad 0xa96c65a78c8eed7b .quad 0x7411a6054f8a433f // 2^120 * 6 * G .quad 0x4d659d32b99dc86d .quad 0x044cdc75603af115 .quad 0xb34c712cdcc2e488 .quad 0x7c136574fb8134ff .quad 0x579ae53d18b175b4 .quad 0x68713159f392a102 .quad 0x8455ecba1eef35f5 .quad 0x1ec9a872458c398f .quad 0xb8e6a4d400a2509b .quad 0x9b81d7020bc882b4 .quad 0x57e7cc9bf1957561 .quad 0x3add88a5c7cd6460 // 2^120 * 7 * G .quad 0xab895770b635dcf2 .quad 0x02dfef6cf66c1fbc .quad 0x85530268beb6d187 .quad 0x249929fccc879e74 .quad 0x85c298d459393046 .quad 0x8f7e35985ff659ec .quad 0x1d2ca22af2f66e3a .quad 0x61ba1131a406a720 .quad 0xa3d0a0f116959029 .quad 0x023b6b6cba7ebd89 .quad 0x7bf15a3e26783307 .quad 0x5620310cbbd8ece7 // 2^120 * 8 * G .quad 0x528993434934d643 .quad 0xb9dbf806a51222f5 .quad 0x8f6d878fc3f41c22 .quad 0x37676a2a4d9d9730 .quad 0x6646b5f477e285d6 .quad 0x40e8ff676c8f6193 .quad 0xa6ec7311abb594dd .quad 0x7ec846f3658cec4d .quad 0x9b5e8f3f1da22ec7 .quad 0x130f1d776c01cd13 .quad 0x214c8fcfa2989fb8 .quad 0x6daaf723399b9dd5 // 2^124 * 1 * G .quad 0x591e4a5610628564 .quad 0x2a4bb87ca8b4df34 .quad 0xde2a2572e7a38e43 .quad 0x3cbdabd9fee5046e .quad 0x81aebbdd2cd13070 .quad 0x962e4325f85a0e9e .quad 0xde9391aacadffecb .quad 0x53177fda52c230e6 .quad 0xa7bc970650b9de79 .quad 0x3d12a7fbc301b59b .quad 0x02652e68d36ae38c .quad 0x79d739835a6199dc // 2^124 * 2 * G .quad 0xd9354df64131c1bd .quad 0x758094a186ec5822 .quad 0x4464ee12e459f3c2 .quad 0x6c11fce4cb133282 .quad 0x21c9d9920d591737 .quad 0x9bea41d2e9b46cd6 .quad 0xe20e84200d89bfca .quad 0x79d99f946eae5ff8 .quad 0xf17b483568673205 .quad 0x387deae83caad96c .quad 0x61b471fd56ffe386 .quad 0x31741195b745a599 // 2^124 * 3 * G .quad 0xe8d10190b77a360b .quad 0x99b983209995e702 .quad 0xbd4fdff8fa0247aa .quad 0x2772e344e0d36a87 .quad 0x17f8ba683b02a047 .quad 0x50212096feefb6c8 .quad 0x70139be21556cbe2 .quad 0x203e44a11d98915b .quad 0xd6863eba37b9e39f .quad 0x105bc169723b5a23 .quad 0x104f6459a65c0762 .quad 0x567951295b4d38d4 // 2^124 * 4 * G .quad 0x535fd60613037524 .quad 0xe210adf6b0fbc26a .quad 0xac8d0a9b23e990ae .quad 0x47204d08d72fdbf9 .quad 0x07242eb30d4b497f .quad 0x1ef96306b9bccc87 .quad 0x37950934d8116f45 .quad 0x05468d6201405b04 .quad 0x00f565a9f93267de .quad 0xcecfd78dc0d58e8a .quad 0xa215e2dcf318e28e .quad 0x4599ee919b633352 // 2^124 * 5 * G .quad 0xd3c220ca70e0e76b .quad 0xb12bea58ea9f3094 .quad 0x294ddec8c3271282 .quad 0x0c3539e1a1d1d028 .quad 0xac746d6b861ae579 .quad 0x31ab0650f6aea9dc .quad 0x241d661140256d4c .quad 0x2f485e853d21a5de .quad 0x329744839c0833f3 .quad 0x6fe6257fd2abc484 .quad 0x5327d1814b358817 .quad 0x65712585893fe9bc // 2^124 * 6 * G .quad 0x9c102fb732a61161 .quad 0xe48e10dd34d520a8 .quad 0x365c63546f9a9176 .quad 0x32f6fe4c046f6006 .quad 0x81c29f1bd708ee3f .quad 0xddcb5a05ae6407d0 .quad 0x97aec1d7d2a3eba7 .quad 0x1590521a91d50831 .quad 0x40a3a11ec7910acc .quad 0x9013dff8f16d27ae .quad 0x1a9720d8abb195d4 .quad 0x1bb9fe452ea98463 // 2^124 * 7 * G .quad 0xe9d1d950b3d54f9e .quad 0x2d5f9cbee00d33c1 .quad 0x51c2c656a04fc6ac .quad 0x65c091ee3c1cbcc9 .quad 0xcf5e6c95cc36747c .quad 0x294201536b0bc30d .quad 0x453ac67cee797af0 .quad 0x5eae6ab32a8bb3c9 .quad 0x7083661114f118ea .quad 0x2b37b87b94349cad .quad 0x7273f51cb4e99f40 .quad 0x78a2a95823d75698 // 2^124 * 8 * G .quad 0xa2b072e95c8c2ace .quad 0x69cffc96651e9c4b .quad 0x44328ef842e7b42b .quad 0x5dd996c122aadeb3 .quad 0xb4f23c425ef83207 .quad 0xabf894d3c9a934b5 .quad 0xd0708c1339fd87f7 .quad 0x1876789117166130 .quad 0x925b5ef0670c507c .quad 0x819bc842b93c33bf .quad 0x10792e9a70dd003f .quad 0x59ad4b7a6e28dc74 // 2^128 * 1 * G .quad 0x5f3a7562eb3dbe47 .quad 0xf7ea38548ebda0b8 .quad 0x00c3e53145747299 .quad 0x1304e9e71627d551 .quad 0x583b04bfacad8ea2 .quad 0x29b743e8148be884 .quad 0x2b1e583b0810c5db .quad 0x2b5449e58eb3bbaa .quad 0x789814d26adc9cfe .quad 0x3c1bab3f8b48dd0b .quad 0xda0fe1fff979c60a .quad 0x4468de2d7c2dd693 // 2^128 * 2 * G .quad 0x51bb355e9419469e .quad 0x33e6dc4c23ddc754 .quad 0x93a5b6d6447f9962 .quad 0x6cce7c6ffb44bd63 .quad 0x4b9ad8c6f86307ce .quad 0x21113531435d0c28 .quad 0xd4a866c5657a772c .quad 0x5da6427e63247352 .quad 0x1a94c688deac22ca .quad 0xb9066ef7bbae1ff8 .quad 0x88ad8c388d59580f .quad 0x58f29abfe79f2ca8 // 2^128 * 3 * G .quad 0xe90ecfab8de73e68 .quad 0x54036f9f377e76a5 .quad 0xf0495b0bbe015982 .quad 0x577629c4a7f41e36 .quad 0x4b5a64bf710ecdf6 .quad 0xb14ce538462c293c .quad 0x3643d056d50b3ab9 .quad 0x6af93724185b4870 .quad 0x3220024509c6a888 .quad 0xd2e036134b558973 .quad 0x83e236233c33289f .quad 0x701f25bb0caec18f // 2^128 * 4 * G .quad 0xc3a8b0f8e4616ced .quad 0xf700660e9e25a87d .quad 0x61e3061ff4bca59c .quad 0x2e0c92bfbdc40be9 .quad 0x9d18f6d97cbec113 .quad 0x844a06e674bfdbe4 .quad 0x20f5b522ac4e60d6 .quad 0x720a5bc050955e51 .quad 0x0c3f09439b805a35 .quad 0xe84e8b376242abfc .quad 0x691417f35c229346 .quad 0x0e9b9cbb144ef0ec // 2^128 * 5 * G .quad 0xfbbad48ffb5720ad .quad 0xee81916bdbf90d0e .quad 0xd4813152635543bf .quad 0x221104eb3f337bd8 .quad 0x8dee9bd55db1beee .quad 0xc9c3ab370a723fb9 .quad 0x44a8f1bf1c68d791 .quad 0x366d44191cfd3cde .quad 0x9e3c1743f2bc8c14 .quad 0x2eda26fcb5856c3b .quad 0xccb82f0e68a7fb97 .quad 0x4167a4e6bc593244 // 2^128 * 6 * G .quad 0x643b9d2876f62700 .quad 0x5d1d9d400e7668eb .quad 0x1b4b430321fc0684 .quad 0x7938bb7e2255246a .quad 0xc2be2665f8ce8fee .quad 0xe967ff14e880d62c .quad 0xf12e6e7e2f364eee .quad 0x34b33370cb7ed2f6 .quad 0xcdc591ee8681d6cc .quad 0xce02109ced85a753 .quad 0xed7485c158808883 .quad 0x1176fc6e2dfe65e4 // 2^128 * 7 * G .quad 0xb4af6cd05b9c619b .quad 0x2ddfc9f4b2a58480 .quad 0x3d4fa502ebe94dc4 .quad 0x08fc3a4c677d5f34 .quad 0xdb90e28949770eb8 .quad 0x98fbcc2aacf440a3 .quad 0x21354ffeded7879b .quad 0x1f6a3e54f26906b6 .quad 0x60a4c199d30734ea .quad 0x40c085b631165cd6 .quad 0xe2333e23f7598295 .quad 0x4f2fad0116b900d1 // 2^128 * 8 * G .quad 0x44beb24194ae4e54 .quad 0x5f541c511857ef6c .quad 0xa61e6b2d368d0498 .quad 0x445484a4972ef7ab .quad 0x962cd91db73bb638 .quad 0xe60577aafc129c08 .quad 0x6f619b39f3b61689 .quad 0x3451995f2944ee81 .quad 0x9152fcd09fea7d7c .quad 0x4a816c94b0935cf6 .quad 0x258e9aaa47285c40 .quad 0x10b89ca6042893b7 // 2^132 * 1 * G .quad 0x9b2a426e3b646025 .quad 0x32127190385ce4cf .quad 0xa25cffc2dd6dea45 .quad 0x06409010bea8de75 .quad 0xd67cded679d34aa0 .quad 0xcc0b9ec0cc4db39f .quad 0xa535a456e35d190f .quad 0x2e05d9eaf61f6fef .quad 0xc447901ad61beb59 .quad 0x661f19bce5dc880a .quad 0x24685482b7ca6827 .quad 0x293c778cefe07f26 // 2^132 * 2 * G .quad 0x86809e7007069096 .quad 0xaad75b15e4e50189 .quad 0x07f35715a21a0147 .quad 0x0487f3f112815d5e .quad 0x16c795d6a11ff200 .quad 0xcb70d0e2b15815c9 .quad 0x89f293209b5395b5 .quad 0x50b8c2d031e47b4f .quad 0x48350c08068a4962 .quad 0x6ffdd05351092c9a .quad 0x17af4f4aaf6fc8dd .quad 0x4b0553b53cdba58b // 2^132 * 3 * G .quad 0x9c65fcbe1b32ff79 .quad 0xeb75ea9f03b50f9b .quad 0xfced2a6c6c07e606 .quad 0x35106cd551717908 .quad 0xbf05211b27c152d4 .quad 0x5ec26849bd1af639 .quad 0x5e0b2caa8e6fab98 .quad 0x054c8bdd50bd0840 .quad 0x38a0b12f1dcf073d .quad 0x4b60a8a3b7f6a276 .quad 0xfed5ac25d3404f9a .quad 0x72e82d5e5505c229 // 2^132 * 4 * G .quad 0x6b0b697ff0d844c8 .quad 0xbb12f85cd979cb49 .quad 0xd2a541c6c1da0f1f .quad 0x7b7c242958ce7211 .quad 0x00d9cdfd69771d02 .quad 0x410276cd6cfbf17e .quad 0x4c45306c1cb12ec7 .quad 0x2857bf1627500861 .quad 0x9f21903f0101689e .quad 0xd779dfd3bf861005 .quad 0xa122ee5f3deb0f1b .quad 0x510df84b485a00d4 // 2^132 * 5 * G .quad 0xa54133bb9277a1fa .quad 0x74ec3b6263991237 .quad 0x1a3c54dc35d2f15a .quad 0x2d347144e482ba3a .quad 0x24b3c887c70ac15e .quad 0xb0f3a557fb81b732 .quad 0x9b2cde2fe578cc1b .quad 0x4cf7ed0703b54f8e .quad 0x6bd47c6598fbee0f .quad 0x9e4733e2ab55be2d .quad 0x1093f624127610c5 .quad 0x4e05e26ad0a1eaa4 // 2^132 * 6 * G .quad 0xda9b6b624b531f20 .quad 0x429a760e77509abb .quad 0xdbe9f522e823cb80 .quad 0x618f1856880c8f82 .quad 0x1833c773e18fe6c0 .quad 0xe3c4711ad3c87265 .quad 0x3bfd3c4f0116b283 .quad 0x1955875eb4cd4db8 .quad 0x6da6de8f0e399799 .quad 0x7ad61aa440fda178 .quad 0xb32cd8105e3563dd .quad 0x15f6beae2ae340ae // 2^132 * 7 * G .quad 0x862bcb0c31ec3a62 .quad 0x810e2b451138f3c2 .quad 0x788ec4b839dac2a4 .quad 0x28f76867ae2a9281 .quad 0xba9a0f7b9245e215 .quad 0xf368612dd98c0dbb .quad 0x2e84e4cbf220b020 .quad 0x6ba92fe962d90eda .quad 0x3e4df9655884e2aa .quad 0xbd62fbdbdbd465a5 .quad 0xd7596caa0de9e524 .quad 0x6e8042ccb2b1b3d7 // 2^132 * 8 * G .quad 0xf10d3c29ce28ca6e .quad 0xbad34540fcb6093d .quad 0xe7426ed7a2ea2d3f .quad 0x08af9d4e4ff298b9 .quad 0x1530653616521f7e .quad 0x660d06b896203dba .quad 0x2d3989bc545f0879 .quad 0x4b5303af78ebd7b0 .quad 0x72f8a6c3bebcbde8 .quad 0x4f0fca4adc3a8e89 .quad 0x6fa9d4e8c7bfdf7a .quad 0x0dcf2d679b624eb7 // 2^136 * 1 * G .quad 0x3d5947499718289c .quad 0x12ebf8c524533f26 .quad 0x0262bfcb14c3ef15 .quad 0x20b878d577b7518e .quad 0x753941be5a45f06e .quad 0xd07caeed6d9c5f65 .quad 0x11776b9c72ff51b6 .quad 0x17d2d1d9ef0d4da9 .quad 0x27f2af18073f3e6a .quad 0xfd3fe519d7521069 .quad 0x22e3b72c3ca60022 .quad 0x72214f63cc65c6a7 // 2^136 * 2 * G .quad 0xb4e37f405307a693 .quad 0xaba714d72f336795 .quad 0xd6fbd0a773761099 .quad 0x5fdf48c58171cbc9 .quad 0x1d9db7b9f43b29c9 .quad 0xd605824a4f518f75 .quad 0xf2c072bd312f9dc4 .quad 0x1f24ac855a1545b0 .quad 0x24d608328e9505aa .quad 0x4748c1d10c1420ee .quad 0xc7ffe45c06fb25a2 .quad 0x00ba739e2ae395e6 // 2^136 * 3 * G .quad 0x592e98de5c8790d6 .quad 0xe5bfb7d345c2a2df .quad 0x115a3b60f9b49922 .quad 0x03283a3e67ad78f3 .quad 0xae4426f5ea88bb26 .quad 0x360679d984973bfb .quad 0x5c9f030c26694e50 .quad 0x72297de7d518d226 .quad 0x48241dc7be0cb939 .quad 0x32f19b4d8b633080 .quad 0xd3dfc90d02289308 .quad 0x05e1296846271945 // 2^136 * 4 * G .quad 0xba82eeb32d9c495a .quad 0xceefc8fcf12bb97c .quad 0xb02dabae93b5d1e0 .quad 0x39c00c9c13698d9b .quad 0xadbfbbc8242c4550 .quad 0xbcc80cecd03081d9 .quad 0x843566a6f5c8df92 .quad 0x78cf25d38258ce4c .quad 0x15ae6b8e31489d68 .quad 0xaa851cab9c2bf087 .quad 0xc9a75a97f04efa05 .quad 0x006b52076b3ff832 // 2^136 * 5 * G .quad 0x29e0cfe19d95781c .quad 0xb681df18966310e2 .quad 0x57df39d370516b39 .quad 0x4d57e3443bc76122 .quad 0xf5cb7e16b9ce082d .quad 0x3407f14c417abc29 .quad 0xd4b36bce2bf4a7ab .quad 0x7de2e9561a9f75ce .quad 0xde70d4f4b6a55ecb .quad 0x4801527f5d85db99 .quad 0xdbc9c440d3ee9a81 .quad 0x6b2a90af1a6029ed // 2^136 * 6 * G .quad 0x6923f4fc9ae61e97 .quad 0x5735281de03f5fd1 .quad 0xa764ae43e6edd12d .quad 0x5fd8f4e9d12d3e4a .quad 0x77ebf3245bb2d80a .quad 0xd8301b472fb9079b .quad 0xc647e6f24cee7333 .quad 0x465812c8276c2109 .quad 0x4d43beb22a1062d9 .quad 0x7065fb753831dc16 .quad 0x180d4a7bde2968d7 .quad 0x05b32c2b1cb16790 // 2^136 * 7 * G .quad 0xc8c05eccd24da8fd .quad 0xa1cf1aac05dfef83 .quad 0xdbbeeff27df9cd61 .quad 0x3b5556a37b471e99 .quad 0xf7fca42c7ad58195 .quad 0x3214286e4333f3cc .quad 0xb6c29d0d340b979d .quad 0x31771a48567307e1 .quad 0x32b0c524e14dd482 .quad 0xedb351541a2ba4b6 .quad 0xa3d16048282b5af3 .quad 0x4fc079d27a7336eb // 2^136 * 8 * G .quad 0x51c938b089bf2f7f .quad 0x2497bd6502dfe9a7 .quad 0xffffc09c7880e453 .quad 0x124567cecaf98e92 .quad 0xdc348b440c86c50d .quad 0x1337cbc9cc94e651 .quad 0x6422f74d643e3cb9 .quad 0x241170c2bae3cd08 .quad 0x3ff9ab860ac473b4 .quad 0xf0911dee0113e435 .quad 0x4ae75060ebc6c4af .quad 0x3f8612966c87000d // 2^140 * 1 * G .quad 0x0c9c5303f7957be4 .quad 0xa3c31a20e085c145 .quad 0xb0721d71d0850050 .quad 0x0aba390eab0bf2da .quad 0x529fdffe638c7bf3 .quad 0xdf2b9e60388b4995 .quad 0xe027b34f1bad0249 .quad 0x7bc92fc9b9fa74ed .quad 0x9f97ef2e801ad9f9 .quad 0x83697d5479afda3a .quad 0xe906b3ffbd596b50 .quad 0x02672b37dd3fb8e0 // 2^140 * 2 * G .quad 0x48b2ca8b260885e4 .quad 0xa4286bec82b34c1c .quad 0x937e1a2617f58f74 .quad 0x741d1fcbab2ca2a5 .quad 0xee9ba729398ca7f5 .quad 0xeb9ca6257a4849db .quad 0x29eb29ce7ec544e1 .quad 0x232ca21ef736e2c8 .quad 0xbf61423d253fcb17 .quad 0x08803ceafa39eb14 .quad 0xf18602df9851c7af .quad 0x0400f3a049e3414b // 2^140 * 3 * G .quad 0xabce0476ba61c55b .quad 0x36a3d6d7c4d39716 .quad 0x6eb259d5e8d82d09 .quad 0x0c9176e984d756fb .quad 0x2efba412a06e7b06 .quad 0x146785452c8d2560 .quad 0xdf9713ebd67a91c7 .quad 0x32830ac7157eadf3 .quad 0x0e782a7ab73769e8 .quad 0x04a05d7875b18e2c .quad 0x29525226ebcceae1 .quad 0x0d794f8383eba820 // 2^140 * 4 * G .quad 0xff35f5cb9e1516f4 .quad 0xee805bcf648aae45 .quad 0xf0d73c2bb93a9ef3 .quad 0x097b0bf22092a6c2 .quad 0x7be44ce7a7a2e1ac .quad 0x411fd93efad1b8b7 .quad 0x1734a1d70d5f7c9b .quad 0x0d6592233127db16 .quad 0xc48bab1521a9d733 .quad 0xa6c2eaead61abb25 .quad 0x625c6c1cc6cb4305 .quad 0x7fc90fea93eb3a67 // 2^140 * 5 * G .quad 0x0408f1fe1f5c5926 .quad 0x1a8f2f5e3b258bf4 .quad 0x40a951a2fdc71669 .quad 0x6598ee93c98b577e .quad 0xc527deb59c7cb23d .quad 0x955391695328404e .quad 0xd64392817ccf2c7a .quad 0x6ce97dabf7d8fa11 .quad 0x25b5a8e50ef7c48f .quad 0xeb6034116f2ce532 .quad 0xc5e75173e53de537 .quad 0x73119fa08c12bb03 // 2^140 * 6 * G .quad 0xed30129453f1a4cb .quad 0xbce621c9c8f53787 .quad 0xfacb2b1338bee7b9 .quad 0x3025798a9ea8428c .quad 0x7845b94d21f4774d .quad 0xbf62f16c7897b727 .quad 0x671857c03c56522b .quad 0x3cd6a85295621212 .quad 0x3fecde923aeca999 .quad 0xbdaa5b0062e8c12f .quad 0x67b99dfc96988ade .quad 0x3f52c02852661036 // 2^140 * 7 * G .quad 0xffeaa48e2a1351c6 .quad 0x28624754fa7f53d7 .quad 0x0b5ba9e57582ddf1 .quad 0x60c0104ba696ac59 .quad 0x9258bf99eec416c6 .quad 0xac8a5017a9d2f671 .quad 0x629549ab16dea4ab .quad 0x05d0e85c99091569 .quad 0x051de020de9cbe97 .quad 0xfa07fc56b50bcf74 .quad 0x378cec9f0f11df65 .quad 0x36853c69ab96de4d // 2^140 * 8 * G .quad 0x36d9b8de78f39b2d .quad 0x7f42ed71a847b9ec .quad 0x241cd1d679bd3fde .quad 0x6a704fec92fbce6b .quad 0x4433c0b0fac5e7be .quad 0x724bae854c08dcbe .quad 0xf1f24cc446978f9b .quad 0x4a0aff6d62825fc8 .quad 0xe917fb9e61095301 .quad 0xc102df9402a092f8 .quad 0xbf09e2f5fa66190b .quad 0x681109bee0dcfe37 // 2^144 * 1 * G .quad 0x559a0cc9782a0dde .quad 0x551dcdb2ea718385 .quad 0x7f62865b31ef238c .quad 0x504aa7767973613d .quad 0x9c18fcfa36048d13 .quad 0x29159db373899ddd .quad 0xdc9f350b9f92d0aa .quad 0x26f57eee878a19d4 .quad 0x0cab2cd55687efb1 .quad 0x5180d162247af17b .quad 0x85c15a344f5a2467 .quad 0x4041943d9dba3069 // 2^144 * 2 * G .quad 0xc3c0eeba43ebcc96 .quad 0x8d749c9c26ea9caf .quad 0xd9fa95ee1c77ccc6 .quad 0x1420a1d97684340f .quad 0x4b217743a26caadd .quad 0x47a6b424648ab7ce .quad 0xcb1d4f7a03fbc9e3 .quad 0x12d931429800d019 .quad 0x00c67799d337594f .quad 0x5e3c5140b23aa47b .quad 0x44182854e35ff395 .quad 0x1b4f92314359a012 // 2^144 * 3 * G .quad 0x3e5c109d89150951 .quad 0x39cefa912de9696a .quad 0x20eae43f975f3020 .quad 0x239b572a7f132dae .quad 0x33cf3030a49866b1 .quad 0x251f73d2215f4859 .quad 0xab82aa4051def4f6 .quad 0x5ff191d56f9a23f6 .quad 0x819ed433ac2d9068 .quad 0x2883ab795fc98523 .quad 0xef4572805593eb3d .quad 0x020c526a758f36cb // 2^144 * 4 * G .quad 0x779834f89ed8dbbc .quad 0xc8f2aaf9dc7ca46c .quad 0xa9524cdca3e1b074 .quad 0x02aacc4615313877 .quad 0xe931ef59f042cc89 .quad 0x2c589c9d8e124bb6 .quad 0xadc8e18aaec75997 .quad 0x452cfe0a5602c50c .quad 0x86a0f7a0647877df .quad 0xbbc464270e607c9f .quad 0xab17ea25f1fb11c9 .quad 0x4cfb7d7b304b877b // 2^144 * 5 * G .quad 0x72b43d6cb89b75fe .quad 0x54c694d99c6adc80 .quad 0xb8c3aa373ee34c9f .quad 0x14b4622b39075364 .quad 0xe28699c29789ef12 .quad 0x2b6ecd71df57190d .quad 0xc343c857ecc970d0 .quad 0x5b1d4cbc434d3ac5 .quad 0xb6fb2615cc0a9f26 .quad 0x3a4f0e2bb88dcce5 .quad 0x1301498b3369a705 .quad 0x2f98f71258592dd1 // 2^144 * 6 * G .quad 0x0c94a74cb50f9e56 .quad 0x5b1ff4a98e8e1320 .quad 0x9a2acc2182300f67 .quad 0x3a6ae249d806aaf9 .quad 0x2e12ae444f54a701 .quad 0xfcfe3ef0a9cbd7de .quad 0xcebf890d75835de0 .quad 0x1d8062e9e7614554 .quad 0x657ada85a9907c5a .quad 0x1a0ea8b591b90f62 .quad 0x8d0e1dfbdf34b4e9 .quad 0x298b8ce8aef25ff3 // 2^144 * 7 * G .quad 0x2a927953eff70cb2 .quad 0x4b89c92a79157076 .quad 0x9418457a30a7cf6a .quad 0x34b8a8404d5ce485 .quad 0x837a72ea0a2165de .quad 0x3fab07b40bcf79f6 .quad 0x521636c77738ae70 .quad 0x6ba6271803a7d7dc .quad 0xc26eecb583693335 .quad 0xd5a813df63b5fefd .quad 0xa293aa9aa4b22573 .quad 0x71d62bdd465e1c6a // 2^144 * 8 * G .quad 0x6533cc28d378df80 .quad 0xf6db43790a0fa4b4 .quad 0xe3645ff9f701da5a .quad 0x74d5f317f3172ba4 .quad 0xcd2db5dab1f75ef5 .quad 0xd77f95cf16b065f5 .quad 0x14571fea3f49f085 .quad 0x1c333621262b2b3d .quad 0xa86fe55467d9ca81 .quad 0x398b7c752b298c37 .quad 0xda6d0892e3ac623b .quad 0x4aebcc4547e9d98c // 2^148 * 1 * G .quad 0x53175a7205d21a77 .quad 0xb0c04422d3b934d4 .quad 0xadd9f24bdd5deadc .quad 0x074f46e69f10ff8c .quad 0x0de9b204a059a445 .quad 0xe15cb4aa4b17ad0f .quad 0xe1bbec521f79c557 .quad 0x2633f1b9d071081b .quad 0xc1fb4177018b9910 .quad 0xa6ea20dc6c0fe140 .quad 0xd661f3e74354c6ff .quad 0x5ecb72e6f1a3407a // 2^148 * 2 * G .quad 0xa515a31b2259fb4e .quad 0x0960f3972bcac52f .quad 0xedb52fec8d3454cb .quad 0x382e2720c476c019 .quad 0xfeeae106e8e86997 .quad 0x9863337f98d09383 .quad 0x9470480eaa06ebef .quad 0x038b6898d4c5c2d0 .quad 0xf391c51d8ace50a6 .quad 0x3142d0b9ae2d2948 .quad 0xdb4d5a1a7f24ca80 .quad 0x21aeba8b59250ea8 // 2^148 * 3 * G .quad 0x24f13b34cf405530 .quad 0x3c44ea4a43088af7 .quad 0x5dd5c5170006a482 .quad 0x118eb8f8890b086d .quad 0x53853600f0087f23 .quad 0x4c461879da7d5784 .quad 0x6af303deb41f6860 .quad 0x0a3c16c5c27c18ed .quad 0x17e49c17cc947f3d .quad 0xccc6eda6aac1d27b .quad 0xdf6092ceb0f08e56 .quad 0x4909b3e22c67c36b // 2^148 * 4 * G .quad 0x9c9c85ea63fe2e89 .quad 0xbe1baf910e9412ec .quad 0x8f7baa8a86fbfe7b .quad 0x0fb17f9fef968b6c .quad 0x59a16676706ff64e .quad 0x10b953dd0d86a53d .quad 0x5848e1e6ce5c0b96 .quad 0x2d8b78e712780c68 .quad 0x79d5c62eafc3902b .quad 0x773a215289e80728 .quad 0xc38ae640e10120b9 .quad 0x09ae23717b2b1a6d // 2^148 * 5 * G .quad 0xbb6a192a4e4d083c .quad 0x34ace0630029e192 .quad 0x98245a59aafabaeb .quad 0x6d9c8a9ada97faac .quad 0x10ab8fa1ad32b1d0 .quad 0xe9aced1be2778b24 .quad 0xa8856bc0373de90f .quad 0x66f35ddddda53996 .quad 0xd27d9afb24997323 .quad 0x1bb7e07ef6f01d2e .quad 0x2ba7472df52ecc7f .quad 0x03019b4f646f9dc8 // 2^148 * 6 * G .quad 0x04a186b5565345cd .quad 0xeee76610bcc4116a .quad 0x689c73b478fb2a45 .quad 0x387dcbff65697512 .quad 0xaf09b214e6b3dc6b .quad 0x3f7573b5ad7d2f65 .quad 0xd019d988100a23b0 .quad 0x392b63a58b5c35f7 .quad 0x4093addc9c07c205 .quad 0xc565be15f532c37e .quad 0x63dbecfd1583402a .quad 0x61722b4aef2e032e // 2^148 * 7 * G .quad 0x0012aafeecbd47af .quad 0x55a266fb1cd46309 .quad 0xf203eb680967c72c .quad 0x39633944ca3c1429 .quad 0xd6b07a5581cb0e3c .quad 0x290ff006d9444969 .quad 0x08680b6a16dcda1f .quad 0x5568d2b75a06de59 .quad 0x8d0cb88c1b37cfe1 .quad 0x05b6a5a3053818f3 .quad 0xf2e9bc04b787d959 .quad 0x6beba1249add7f64 // 2^148 * 8 * G .quad 0x1d06005ca5b1b143 .quad 0x6d4c6bb87fd1cda2 .quad 0x6ef5967653fcffe7 .quad 0x097c29e8c1ce1ea5 .quad 0x5c3cecb943f5a53b .quad 0x9cc9a61d06c08df2 .quad 0xcfba639a85895447 .quad 0x5a845ae80df09fd5 .quad 0x4ce97dbe5deb94ca .quad 0x38d0a4388c709c48 .quad 0xc43eced4a169d097 .quad 0x0a1249fff7e587c3 // 2^152 * 1 * G .quad 0x12f0071b276d01c9 .quad 0xe7b8bac586c48c70 .quad 0x5308129b71d6fba9 .quad 0x5d88fbf95a3db792 .quad 0x0b408d9e7354b610 .quad 0x806b32535ba85b6e .quad 0xdbe63a034a58a207 .quad 0x173bd9ddc9a1df2c .quad 0x2b500f1efe5872df .quad 0x58d6582ed43918c1 .quad 0xe6ed278ec9673ae0 .quad 0x06e1cd13b19ea319 // 2^152 * 2 * G .quad 0x40d0ad516f166f23 .quad 0x118e32931fab6abe .quad 0x3fe35e14a04d088e .quad 0x3080603526e16266 .quad 0x472baf629e5b0353 .quad 0x3baa0b90278d0447 .quad 0x0c785f469643bf27 .quad 0x7f3a6a1a8d837b13 .quad 0xf7e644395d3d800b .quad 0x95a8d555c901edf6 .quad 0x68cd7830592c6339 .quad 0x30d0fded2e51307e // 2^152 * 3 * G .quad 0xe0594d1af21233b3 .quad 0x1bdbe78ef0cc4d9c .quad 0x6965187f8f499a77 .quad 0x0a9214202c099868 .quad 0x9cb4971e68b84750 .quad 0xa09572296664bbcf .quad 0x5c8de72672fa412b .quad 0x4615084351c589d9 .quad 0xbc9019c0aeb9a02e .quad 0x55c7110d16034cae .quad 0x0e6df501659932ec .quad 0x3bca0d2895ca5dfe // 2^152 * 4 * G .quad 0x40f031bc3c5d62a4 .quad 0x19fc8b3ecff07a60 .quad 0x98183da2130fb545 .quad 0x5631deddae8f13cd .quad 0x9c688eb69ecc01bf .quad 0xf0bc83ada644896f .quad 0xca2d955f5f7a9fe2 .quad 0x4ea8b4038df28241 .quad 0x2aed460af1cad202 .quad 0x46305305a48cee83 .quad 0x9121774549f11a5f .quad 0x24ce0930542ca463 // 2^152 * 5 * G .quad 0x1fe890f5fd06c106 .quad 0xb5c468355d8810f2 .quad 0x827808fe6e8caf3e .quad 0x41d4e3c28a06d74b .quad 0x3fcfa155fdf30b85 .quad 0xd2f7168e36372ea4 .quad 0xb2e064de6492f844 .quad 0x549928a7324f4280 .quad 0xf26e32a763ee1a2e .quad 0xae91e4b7d25ffdea .quad 0xbc3bd33bd17f4d69 .quad 0x491b66dec0dcff6a // 2^152 * 6 * G .quad 0x98f5b13dc7ea32a7 .quad 0xe3d5f8cc7e16db98 .quad 0xac0abf52cbf8d947 .quad 0x08f338d0c85ee4ac .quad 0x75f04a8ed0da64a1 .quad 0xed222caf67e2284b .quad 0x8234a3791f7b7ba4 .quad 0x4cf6b8b0b7018b67 .quad 0xc383a821991a73bd .quad 0xab27bc01df320c7a .quad 0xc13d331b84777063 .quad 0x530d4a82eb078a99 // 2^152 * 7 * G .quad 0x004c3630e1f94825 .quad 0x7e2d78268cab535a .quad 0xc7482323cc84ff8b .quad 0x65ea753f101770b9 .quad 0x6d6973456c9abf9e .quad 0x257fb2fc4900a880 .quad 0x2bacf412c8cfb850 .quad 0x0db3e7e00cbfbd5b .quad 0x3d66fc3ee2096363 .quad 0x81d62c7f61b5cb6b .quad 0x0fbe044213443b1a .quad 0x02a4ec1921e1a1db // 2^152 * 8 * G .quad 0x5ce6259a3b24b8a2 .quad 0xb8577acc45afa0b8 .quad 0xcccbe6e88ba07037 .quad 0x3d143c51127809bf .quad 0xf5c86162f1cf795f .quad 0x118c861926ee57f2 .quad 0x172124851c063578 .quad 0x36d12b5dec067fcf .quad 0x126d279179154557 .quad 0xd5e48f5cfc783a0a .quad 0x36bdb6e8df179bac .quad 0x2ef517885ba82859 // 2^156 * 1 * G .quad 0x88bd438cd11e0d4a .quad 0x30cb610d43ccf308 .quad 0xe09a0e3791937bcc .quad 0x4559135b25b1720c .quad 0x1ea436837c6da1e9 .quad 0xf9c189af1fb9bdbe .quad 0x303001fcce5dd155 .quad 0x28a7c99ebc57be52 .quad 0xb8fd9399e8d19e9d .quad 0x908191cb962423ff .quad 0xb2b948d747c742a3 .quad 0x37f33226d7fb44c4 // 2^156 * 2 * G .quad 0x0dae8767b55f6e08 .quad 0x4a43b3b35b203a02 .quad 0xe3725a6e80af8c79 .quad 0x0f7a7fd1705fa7a3 .quad 0x33912553c821b11d .quad 0x66ed42c241e301df .quad 0x066fcc11104222fd .quad 0x307a3b41c192168f .quad 0x8eeb5d076eb55ce0 .quad 0x2fc536bfaa0d925a .quad 0xbe81830fdcb6c6e8 .quad 0x556c7045827baf52 // 2^156 * 3 * G .quad 0x8e2b517302e9d8b7 .quad 0xe3e52269248714e8 .quad 0xbd4fbd774ca960b5 .quad 0x6f4b4199c5ecada9 .quad 0xb94b90022bf44406 .quad 0xabd4237eff90b534 .quad 0x7600a960faf86d3a .quad 0x2f45abdac2322ee3 .quad 0x61af4912c8ef8a6a .quad 0xe58fa4fe43fb6e5e .quad 0xb5afcc5d6fd427cf .quad 0x6a5393281e1e11eb // 2^156 * 4 * G .quad 0xf3da5139a5d1ee89 .quad 0x8145457cff936988 .quad 0x3f622fed00e188c4 .quad 0x0f513815db8b5a3d .quad 0x0fff04fe149443cf .quad 0x53cac6d9865cddd7 .quad 0x31385b03531ed1b7 .quad 0x5846a27cacd1039d .quad 0x4ff5cdac1eb08717 .quad 0x67e8b29590f2e9bc .quad 0x44093b5e237afa99 .quad 0x0d414bed8708b8b2 // 2^156 * 5 * G .quad 0xcfb68265fd0e75f6 .quad 0xe45b3e28bb90e707 .quad 0x7242a8de9ff92c7a .quad 0x685b3201933202dd .quad 0x81886a92294ac9e8 .quad 0x23162b45d55547be .quad 0x94cfbc4403715983 .quad 0x50eb8fdb134bc401 .quad 0xc0b73ec6d6b330cd .quad 0x84e44807132faff1 .quad 0x732b7352c4a5dee1 .quad 0x5d7c7cf1aa7cd2d2 // 2^156 * 6 * G .quad 0xaf3b46bf7a4aafa2 .quad 0xb78705ec4d40d411 .quad 0x114f0c6aca7c15e3 .quad 0x3f364faaa9489d4d .quad 0x33d1013e9b73a562 .quad 0x925cef5748ec26e1 .quad 0xa7fce614dd468058 .quad 0x78b0fad41e9aa438 .quad 0xbf56a431ed05b488 .quad 0xa533e66c9c495c7e .quad 0xe8652baf87f3651a .quad 0x0241800059d66c33 // 2^156 * 7 * G .quad 0xceb077fea37a5be4 .quad 0xdb642f02e5a5eeb7 .quad 0xc2e6d0c5471270b8 .quad 0x4771b65538e4529c .quad 0x28350c7dcf38ea01 .quad 0x7c6cdbc0b2917ab6 .quad 0xace7cfbe857082f7 .quad 0x4d2845aba2d9a1e0 .quad 0xbb537fe0447070de .quad 0xcba744436dd557df .quad 0xd3b5a3473600dbcb .quad 0x4aeabbe6f9ffd7f8 // 2^156 * 8 * G .quad 0x4630119e40d8f78c .quad 0xa01a9bc53c710e11 .quad 0x486d2b258910dd79 .quad 0x1e6c47b3db0324e5 .quad 0x6a2134bcc4a9c8f2 .quad 0xfbf8fd1c8ace2e37 .quad 0x000ae3049911a0ba .quad 0x046e3a616bc89b9e .quad 0x14e65442f03906be .quad 0x4a019d54e362be2a .quad 0x68ccdfec8dc230c7 .quad 0x7cfb7e3faf6b861c // 2^160 * 1 * G .quad 0x4637974e8c58aedc .quad 0xb9ef22fbabf041a4 .quad 0xe185d956e980718a .quad 0x2f1b78fab143a8a6 .quad 0x96eebffb305b2f51 .quad 0xd3f938ad889596b8 .quad 0xf0f52dc746d5dd25 .quad 0x57968290bb3a0095 .quad 0xf71ab8430a20e101 .quad 0xf393658d24f0ec47 .quad 0xcf7509a86ee2eed1 .quad 0x7dc43e35dc2aa3e1 // 2^160 * 2 * G .quad 0x85966665887dd9c3 .quad 0xc90f9b314bb05355 .quad 0xc6e08df8ef2079b1 .quad 0x7ef72016758cc12f .quad 0x5a782a5c273e9718 .quad 0x3576c6995e4efd94 .quad 0x0f2ed8051f237d3e .quad 0x044fb81d82d50a99 .quad 0xc1df18c5a907e3d9 .quad 0x57b3371dce4c6359 .quad 0xca704534b201bb49 .quad 0x7f79823f9c30dd2e // 2^160 * 3 * G .quad 0x8334d239a3b513e8 .quad 0xc13670d4b91fa8d8 .quad 0x12b54136f590bd33 .quad 0x0a4e0373d784d9b4 .quad 0x6a9c1ff068f587ba .quad 0x0827894e0050c8de .quad 0x3cbf99557ded5be7 .quad 0x64a9b0431c06d6f0 .quad 0x2eb3d6a15b7d2919 .quad 0xb0b4f6a0d53a8235 .quad 0x7156ce4389a45d47 .quad 0x071a7d0ace18346c // 2^160 * 4 * G .quad 0xd3072daac887ba0b .quad 0x01262905bfa562ee .quad 0xcf543002c0ef768b .quad 0x2c3bcc7146ea7e9c .quad 0xcc0c355220e14431 .quad 0x0d65950709b15141 .quad 0x9af5621b209d5f36 .quad 0x7c69bcf7617755d3 .quad 0x07f0d7eb04e8295f .quad 0x10db18252f50f37d .quad 0xe951a9a3171798d7 .quad 0x6f5a9a7322aca51d // 2^160 * 5 * G .quad 0x8ba1000c2f41c6c5 .quad 0xc49f79c10cfefb9b .quad 0x4efa47703cc51c9f .quad 0x494e21a2e147afca .quad 0xe729d4eba3d944be .quad 0x8d9e09408078af9e .quad 0x4525567a47869c03 .quad 0x02ab9680ee8d3b24 .quad 0xefa48a85dde50d9a .quad 0x219a224e0fb9a249 .quad 0xfa091f1dd91ef6d9 .quad 0x6b5d76cbea46bb34 // 2^160 * 6 * G .quad 0x8857556cec0cd994 .quad 0x6472dc6f5cd01dba .quad 0xaf0169148f42b477 .quad 0x0ae333f685277354 .quad 0xe0f941171e782522 .quad 0xf1e6ae74036936d3 .quad 0x408b3ea2d0fcc746 .quad 0x16fb869c03dd313e .quad 0x288e199733b60962 .quad 0x24fc72b4d8abe133 .quad 0x4811f7ed0991d03e .quad 0x3f81e38b8f70d075 // 2^160 * 7 * G .quad 0x7f910fcc7ed9affe .quad 0x545cb8a12465874b .quad 0xa8397ed24b0c4704 .quad 0x50510fc104f50993 .quad 0x0adb7f355f17c824 .quad 0x74b923c3d74299a4 .quad 0xd57c3e8bcbf8eaf7 .quad 0x0ad3e2d34cdedc3d .quad 0x6f0c0fc5336e249d .quad 0x745ede19c331cfd9 .quad 0xf2d6fd0009eefe1c .quad 0x127c158bf0fa1ebe // 2^160 * 8 * G .quad 0xf6197c422e9879a2 .quad 0xa44addd452ca3647 .quad 0x9b413fc14b4eaccb .quad 0x354ef87d07ef4f68 .quad 0xdea28fc4ae51b974 .quad 0x1d9973d3744dfe96 .quad 0x6240680b873848a8 .quad 0x4ed82479d167df95 .quad 0xfee3b52260c5d975 .quad 0x50352efceb41b0b8 .quad 0x8808ac30a9f6653c .quad 0x302d92d20539236d // 2^164 * 1 * G .quad 0x4c59023fcb3efb7c .quad 0x6c2fcb99c63c2a94 .quad 0xba4190e2c3c7e084 .quad 0x0e545daea51874d9 .quad 0x957b8b8b0df53c30 .quad 0x2a1c770a8e60f098 .quad 0xbbc7a670345796de .quad 0x22a48f9a90c99bc9 .quad 0x6b7dc0dc8d3fac58 .quad 0x5497cd6ce6e42bfd .quad 0x542f7d1bf400d305 .quad 0x4159f47f048d9136 // 2^164 * 2 * G .quad 0x20ad660839e31e32 .quad 0xf81e1bd58405be50 .quad 0xf8064056f4dabc69 .quad 0x14d23dd4ce71b975 .quad 0x748515a8bbd24839 .quad 0x77128347afb02b55 .quad 0x50ba2ac649a2a17f .quad 0x060525513ad730f1 .quad 0xf2398e098aa27f82 .quad 0x6d7982bb89a1b024 .quad 0xfa694084214dd24c .quad 0x71ab966fa32301c3 // 2^164 * 3 * G .quad 0x2dcbd8e34ded02fc .quad 0x1151f3ec596f22aa .quad 0xbca255434e0328da .quad 0x35768fbe92411b22 .quad 0xb1088a0702809955 .quad 0x43b273ea0b43c391 .quad 0xca9b67aefe0686ed .quad 0x605eecbf8335f4ed .quad 0x83200a656c340431 .quad 0x9fcd71678ee59c2f .quad 0x75d4613f71300f8a .quad 0x7a912faf60f542f9 // 2^164 * 4 * G .quad 0xb204585e5edc1a43 .quad 0x9f0e16ee5897c73c .quad 0x5b82c0ae4e70483c .quad 0x624a170e2bddf9be .quad 0x253f4f8dfa2d5597 .quad 0x25e49c405477130c .quad 0x00c052e5996b1102 .quad 0x33cb966e33bb6c4a .quad 0x597028047f116909 .quad 0x828ac41c1e564467 .quad 0x70417dbde6217387 .quad 0x721627aefbac4384 // 2^164 * 5 * G .quad 0x97d03bc38736add5 .quad 0x2f1422afc532b130 .quad 0x3aa68a057101bbc4 .quad 0x4c946cf7e74f9fa7 .quad 0xfd3097bc410b2f22 .quad 0xf1a05da7b5cfa844 .quad 0x61289a1def57ca74 .quad 0x245ea199bb821902 .quad 0xaedca66978d477f8 .quad 0x1898ba3c29117fe1 .quad 0xcf73f983720cbd58 .quad 0x67da12e6b8b56351 // 2^164 * 6 * G .quad 0x7067e187b4bd6e07 .quad 0x6e8f0203c7d1fe74 .quad 0x93c6aa2f38c85a30 .quad 0x76297d1f3d75a78a .quad 0x2b7ef3d38ec8308c .quad 0x828fd7ec71eb94ab .quad 0x807c3b36c5062abd .quad 0x0cb64cb831a94141 .quad 0x3030fc33534c6378 .quad 0xb9635c5ce541e861 .quad 0x15d9a9bed9b2c728 .quad 0x49233ea3f3775dcb // 2^164 * 7 * G .quad 0x629398fa8dbffc3a .quad 0xe12fe52dd54db455 .quad 0xf3be11dfdaf25295 .quad 0x628b140dce5e7b51 .quad 0x7b3985fe1c9f249b .quad 0x4fd6b2d5a1233293 .quad 0xceb345941adf4d62 .quad 0x6987ff6f542de50c .quad 0x47e241428f83753c .quad 0x6317bebc866af997 .quad 0xdabb5b433d1a9829 .quad 0x074d8d245287fb2d // 2^164 * 8 * G .quad 0x8337d9cd440bfc31 .quad 0x729d2ca1af318fd7 .quad 0xa040a4a4772c2070 .quad 0x46002ef03a7349be .quad 0x481875c6c0e31488 .quad 0x219429b2e22034b4 .quad 0x7223c98a31283b65 .quad 0x3420d60b342277f9 .quad 0xfaa23adeaffe65f7 .quad 0x78261ed45be0764c .quad 0x441c0a1e2f164403 .quad 0x5aea8e567a87d395 // 2^168 * 1 * G .quad 0x7813c1a2bca4283d .quad 0xed62f091a1863dd9 .quad 0xaec7bcb8c268fa86 .quad 0x10e5d3b76f1cae4c .quad 0x2dbc6fb6e4e0f177 .quad 0x04e1bf29a4bd6a93 .quad 0x5e1966d4787af6e8 .quad 0x0edc5f5eb426d060 .quad 0x5453bfd653da8e67 .quad 0xe9dc1eec24a9f641 .quad 0xbf87263b03578a23 .quad 0x45b46c51361cba72 // 2^168 * 2 * G .quad 0xa9402abf314f7fa1 .quad 0xe257f1dc8e8cf450 .quad 0x1dbbd54b23a8be84 .quad 0x2177bfa36dcb713b .quad 0xce9d4ddd8a7fe3e4 .quad 0xab13645676620e30 .quad 0x4b594f7bb30e9958 .quad 0x5c1c0aef321229df .quad 0x37081bbcfa79db8f .quad 0x6048811ec25f59b3 .quad 0x087a76659c832487 .quad 0x4ae619387d8ab5bb // 2^168 * 3 * G .quad 0x8ddbf6aa5344a32e .quad 0x7d88eab4b41b4078 .quad 0x5eb0eb974a130d60 .quad 0x1a00d91b17bf3e03 .quad 0x61117e44985bfb83 .quad 0xfce0462a71963136 .quad 0x83ac3448d425904b .quad 0x75685abe5ba43d64 .quad 0x6e960933eb61f2b2 .quad 0x543d0fa8c9ff4952 .quad 0xdf7275107af66569 .quad 0x135529b623b0e6aa // 2^168 * 4 * G .quad 0x18f0dbd7add1d518 .quad 0x979f7888cfc11f11 .quad 0x8732e1f07114759b .quad 0x79b5b81a65ca3a01 .quad 0xf5c716bce22e83fe .quad 0xb42beb19e80985c1 .quad 0xec9da63714254aae .quad 0x5972ea051590a613 .quad 0x0fd4ac20dc8f7811 .quad 0x9a9ad294ac4d4fa8 .quad 0xc01b2d64b3360434 .quad 0x4f7e9c95905f3bdb // 2^168 * 5 * G .quad 0x62674bbc5781302e .quad 0xd8520f3989addc0f .quad 0x8c2999ae53fbd9c6 .quad 0x31993ad92e638e4c .quad 0x71c8443d355299fe .quad 0x8bcd3b1cdbebead7 .quad 0x8092499ef1a49466 .quad 0x1942eec4a144adc8 .quad 0x7dac5319ae234992 .quad 0x2c1b3d910cea3e92 .quad 0x553ce494253c1122 .quad 0x2a0a65314ef9ca75 // 2^168 * 6 * G .quad 0x2db7937ff7f927c2 .quad 0xdb741f0617d0a635 .quad 0x5982f3a21155af76 .quad 0x4cf6e218647c2ded .quad 0xcf361acd3c1c793a .quad 0x2f9ebcac5a35bc3b .quad 0x60e860e9a8cda6ab .quad 0x055dc39b6dea1a13 .quad 0xb119227cc28d5bb6 .quad 0x07e24ebc774dffab .quad 0xa83c78cee4a32c89 .quad 0x121a307710aa24b6 // 2^168 * 7 * G .quad 0xe4db5d5e9f034a97 .quad 0xe153fc093034bc2d .quad 0x460546919551d3b1 .quad 0x333fc76c7a40e52d .quad 0xd659713ec77483c9 .quad 0x88bfe077b82b96af .quad 0x289e28231097bcd3 .quad 0x527bb94a6ced3a9b .quad 0x563d992a995b482e .quad 0x3405d07c6e383801 .quad 0x485035de2f64d8e5 .quad 0x6b89069b20a7a9f7 // 2^168 * 8 * G .quad 0x812aa0416270220d .quad 0x995a89faf9245b4e .quad 0xffadc4ce5072ef05 .quad 0x23bc2103aa73eb73 .quad 0x4082fa8cb5c7db77 .quad 0x068686f8c734c155 .quad 0x29e6c8d9f6e7a57e .quad 0x0473d308a7639bcf .quad 0xcaee792603589e05 .quad 0x2b4b421246dcc492 .quad 0x02a1ef74e601a94f .quad 0x102f73bfde04341a // 2^172 * 1 * G .quad 0xb5a2d50c7ec20d3e .quad 0xc64bdd6ea0c97263 .quad 0x56e89052c1ff734d .quad 0x4929c6f72b2ffaba .quad 0x358ecba293a36247 .quad 0xaf8f9862b268fd65 .quad 0x412f7e9968a01c89 .quad 0x5786f312cd754524 .quad 0x337788ffca14032c .quad 0xf3921028447f1ee3 .quad 0x8b14071f231bccad .quad 0x4c817b4bf2344783 // 2^172 * 2 * G .quad 0x0ff853852871b96e .quad 0xe13e9fab60c3f1bb .quad 0xeefd595325344402 .quad 0x0a37c37075b7744b .quad 0x413ba057a40b4484 .quad 0xba4c2e1a4f5f6a43 .quad 0x614ba0a5aee1d61c .quad 0x78a1531a8b05dc53 .quad 0x6cbdf1703ad0562b .quad 0x8ecf4830c92521a3 .quad 0xdaebd303fd8424e7 .quad 0x72ad82a42e5ec56f // 2^172 * 3 * G .quad 0x3f9e8e35bafb65f6 .quad 0x39d69ec8f27293a1 .quad 0x6cb8cd958cf6a3d0 .quad 0x1734778173adae6d .quad 0xc368939167024bc3 .quad 0x8e69d16d49502fda .quad 0xfcf2ec3ce45f4b29 .quad 0x065f669ea3b4cbc4 .quad 0x8a00aec75532db4d .quad 0xb869a4e443e31bb1 .quad 0x4a0f8552d3a7f515 .quad 0x19adeb7c303d7c08 // 2^172 * 4 * G .quad 0xc720cb6153ead9a3 .quad 0x55b2c97f512b636e .quad 0xb1e35b5fd40290b1 .quad 0x2fd9ccf13b530ee2 .quad 0x9d05ba7d43c31794 .quad 0x2470c8ff93322526 .quad 0x8323dec816197438 .quad 0x2852709881569b53 .quad 0x07bd475b47f796b8 .quad 0xd2c7b013542c8f54 .quad 0x2dbd23f43b24f87e .quad 0x6551afd77b0901d6 // 2^172 * 5 * G .quad 0x4546baaf54aac27f .quad 0xf6f66fecb2a45a28 .quad 0x582d1b5b562bcfe8 .quad 0x44b123f3920f785f .quad 0x68a24ce3a1d5c9ac .quad 0xbb77a33d10ff6461 .quad 0x0f86ce4425d3166e .quad 0x56507c0950b9623b .quad 0x1206f0b7d1713e63 .quad 0x353fe3d915bafc74 .quad 0x194ceb970ad9d94d .quad 0x62fadd7cf9d03ad3 // 2^172 * 6 * G .quad 0xc6b5967b5598a074 .quad 0x5efe91ce8e493e25 .quad 0xd4b72c4549280888 .quad 0x20ef1149a26740c2 .quad 0x3cd7bc61e7ce4594 .quad 0xcd6b35a9b7dd267e .quad 0xa080abc84366ef27 .quad 0x6ec7c46f59c79711 .quad 0x2f07ad636f09a8a2 .quad 0x8697e6ce24205e7d .quad 0xc0aefc05ee35a139 .quad 0x15e80958b5f9d897 // 2^172 * 7 * G .quad 0x25a5ef7d0c3e235b .quad 0x6c39c17fbe134ee7 .quad 0xc774e1342dc5c327 .quad 0x021354b892021f39 .quad 0x4dd1ed355bb061c4 .quad 0x42dc0cef941c0700 .quad 0x61305dc1fd86340e .quad 0x56b2cc930e55a443 .quad 0x1df79da6a6bfc5a2 .quad 0x02f3a2749fde4369 .quad 0xb323d9f2cda390a7 .quad 0x7be0847b8774d363 // 2^172 * 8 * G .quad 0x8c99cc5a8b3f55c3 .quad 0x0611d7253fded2a0 .quad 0xed2995ff36b70a36 .quad 0x1f699a54d78a2619 .quad 0x1466f5af5307fa11 .quad 0x817fcc7ded6c0af2 .quad 0x0a6de44ec3a4a3fb .quad 0x74071475bc927d0b .quad 0xe77292f373e7ea8a .quad 0x296537d2cb045a31 .quad 0x1bd0653ed3274fde .quad 0x2f9a2c4476bd2966 // 2^176 * 1 * G .quad 0xeb18b9ab7f5745c6 .quad 0x023a8aee5787c690 .quad 0xb72712da2df7afa9 .quad 0x36597d25ea5c013d .quad 0xa2b4dae0b5511c9a .quad 0x7ac860292bffff06 .quad 0x981f375df5504234 .quad 0x3f6bd725da4ea12d .quad 0x734d8d7b106058ac .quad 0xd940579e6fc6905f .quad 0x6466f8f99202932d .quad 0x7b7ecc19da60d6d0 // 2^176 * 2 * G .quad 0x78c2373c695c690d .quad 0xdd252e660642906e .quad 0x951d44444ae12bd2 .quad 0x4235ad7601743956 .quad 0x6dae4a51a77cfa9b .quad 0x82263654e7a38650 .quad 0x09bbffcd8f2d82db .quad 0x03bedc661bf5caba .quad 0x6258cb0d078975f5 .quad 0x492942549189f298 .quad 0xa0cab423e2e36ee4 .quad 0x0e7ce2b0cdf066a1 // 2^176 * 3 * G .quad 0xc494643ac48c85a3 .quad 0xfd361df43c6139ad .quad 0x09db17dd3ae94d48 .quad 0x666e0a5d8fb4674a .quad 0xfea6fedfd94b70f9 .quad 0xf130c051c1fcba2d .quad 0x4882d47e7f2fab89 .quad 0x615256138aeceeb5 .quad 0x2abbf64e4870cb0d .quad 0xcd65bcf0aa458b6b .quad 0x9abe4eba75e8985d .quad 0x7f0bc810d514dee4 // 2^176 * 4 * G .quad 0xb9006ba426f4136f .quad 0x8d67369e57e03035 .quad 0xcbc8dfd94f463c28 .quad 0x0d1f8dbcf8eedbf5 .quad 0x83ac9dad737213a0 .quad 0x9ff6f8ba2ef72e98 .quad 0x311e2edd43ec6957 .quad 0x1d3a907ddec5ab75 .quad 0xba1693313ed081dc .quad 0x29329fad851b3480 .quad 0x0128013c030321cb .quad 0x00011b44a31bfde3 // 2^176 * 5 * G .quad 0x3fdfa06c3fc66c0c .quad 0x5d40e38e4dd60dd2 .quad 0x7ae38b38268e4d71 .quad 0x3ac48d916e8357e1 .quad 0x16561f696a0aa75c .quad 0xc1bf725c5852bd6a .quad 0x11a8dd7f9a7966ad .quad 0x63d988a2d2851026 .quad 0x00120753afbd232e .quad 0xe92bceb8fdd8f683 .quad 0xf81669b384e72b91 .quad 0x33fad52b2368a066 // 2^176 * 6 * G .quad 0x540649c6c5e41e16 .quad 0x0af86430333f7735 .quad 0xb2acfcd2f305e746 .quad 0x16c0f429a256dca7 .quad 0x8d2cc8d0c422cfe8 .quad 0x072b4f7b05a13acb .quad 0xa3feb6e6ecf6a56f .quad 0x3cc355ccb90a71e2 .quad 0xe9b69443903e9131 .quad 0xb8a494cb7a5637ce .quad 0xc87cd1a4baba9244 .quad 0x631eaf426bae7568 // 2^176 * 7 * G .quad 0xb3e90410da66fe9f .quad 0x85dd4b526c16e5a6 .quad 0xbc3d97611ef9bf83 .quad 0x5599648b1ea919b5 .quad 0x47d975b9a3700de8 .quad 0x7280c5fbe2f80552 .quad 0x53658f2732e45de1 .quad 0x431f2c7f665f80b5 .quad 0xd6026344858f7b19 .quad 0x14ab352fa1ea514a .quad 0x8900441a2090a9d7 .quad 0x7b04715f91253b26 // 2^176 * 8 * G .quad 0x83edbd28acf6ae43 .quad 0x86357c8b7d5c7ab4 .quad 0xc0404769b7eb2c44 .quad 0x59b37bf5c2f6583f .quad 0xb376c280c4e6bac6 .quad 0x970ed3dd6d1d9b0b .quad 0xb09a9558450bf944 .quad 0x48d0acfa57cde223 .quad 0xb60f26e47dabe671 .quad 0xf1d1a197622f3a37 .quad 0x4208ce7ee9960394 .quad 0x16234191336d3bdb // 2^180 * 1 * G .quad 0xf19aeac733a63aef .quad 0x2c7fba5d4442454e .quad 0x5da87aa04795e441 .quad 0x413051e1a4e0b0f5 .quad 0x852dd1fd3d578bbe .quad 0x2b65ce72c3286108 .quad 0x658c07f4eace2273 .quad 0x0933f804ec38ab40 .quad 0xa7ab69798d496476 .quad 0x8121aadefcb5abc8 .quad 0xa5dc12ef7b539472 .quad 0x07fd47065e45351a // 2^180 * 2 * G .quad 0xc8583c3d258d2bcd .quad 0x17029a4daf60b73f .quad 0xfa0fc9d6416a3781 .quad 0x1c1e5fba38b3fb23 .quad 0x304211559ae8e7c3 .quad 0xf281b229944882a5 .quad 0x8a13ac2e378250e4 .quad 0x014afa0954ba48f4 .quad 0xcb3197001bb3666c .quad 0x330060524bffecb9 .quad 0x293711991a88233c .quad 0x291884363d4ed364 // 2^180 * 3 * G .quad 0x033c6805dc4babfa .quad 0x2c15bf5e5596ecc1 .quad 0x1bc70624b59b1d3b .quad 0x3ede9850a19f0ec5 .quad 0xfb9d37c3bc1ab6eb .quad 0x02be14534d57a240 .quad 0xf4d73415f8a5e1f6 .quad 0x5964f4300ccc8188 .quad 0xe44a23152d096800 .quad 0x5c08c55970866996 .quad 0xdf2db60a46affb6e .quad 0x579155c1f856fd89 // 2^180 * 4 * G .quad 0x96324edd12e0c9ef .quad 0x468b878df2420297 .quad 0x199a3776a4f573be .quad 0x1e7fbcf18e91e92a .quad 0xb5f16b630817e7a6 .quad 0x808c69233c351026 .quad 0x324a983b54cef201 .quad 0x53c092084a485345 .quad 0xd2d41481f1cbafbf .quad 0x231d2db6716174e5 .quad 0x0b7d7656e2a55c98 .quad 0x3e955cd82aa495f6 // 2^180 * 5 * G .quad 0xe48f535e3ed15433 .quad 0xd075692a0d7270a3 .quad 0x40fbd21daade6387 .quad 0x14264887cf4495f5 .quad 0xab39f3ef61bb3a3f .quad 0x8eb400652eb9193e .quad 0xb5de6ecc38c11f74 .quad 0x654d7e9626f3c49f .quad 0xe564cfdd5c7d2ceb .quad 0x82eeafded737ccb9 .quad 0x6107db62d1f9b0ab .quad 0x0b6baac3b4358dbb // 2^180 * 6 * G .quad 0x7ae62bcb8622fe98 .quad 0x47762256ceb891af .quad 0x1a5a92bcf2e406b4 .quad 0x7d29401784e41501 .quad 0x204abad63700a93b .quad 0xbe0023d3da779373 .quad 0xd85f0346633ab709 .quad 0x00496dc490820412 .quad 0x1c74b88dc27e6360 .quad 0x074854268d14850c .quad 0xa145fb7b3e0dcb30 .quad 0x10843f1b43803b23 // 2^180 * 7 * G .quad 0xc5f90455376276dd .quad 0xce59158dd7645cd9 .quad 0x92f65d511d366b39 .quad 0x11574b6e526996c4 .quad 0xd56f672de324689b .quad 0xd1da8aedb394a981 .quad 0xdd7b58fe9168cfed .quad 0x7ce246cd4d56c1e8 .quad 0xb8f4308e7f80be53 .quad 0x5f3cb8cb34a9d397 .quad 0x18a961bd33cc2b2c .quad 0x710045fb3a9af671 // 2^180 * 8 * G .quad 0x73f93d36101b95eb .quad 0xfaef33794f6f4486 .quad 0x5651735f8f15e562 .quad 0x7fa3f19058b40da1 .quad 0xa03fc862059d699e .quad 0x2370cfa19a619e69 .quad 0xc4fe3b122f823deb .quad 0x1d1b056fa7f0844e .quad 0x1bc64631e56bf61f .quad 0xd379ab106e5382a3 .quad 0x4d58c57e0540168d .quad 0x566256628442d8e4 // 2^184 * 1 * G .quad 0xb9e499def6267ff6 .quad 0x7772ca7b742c0843 .quad 0x23a0153fe9a4f2b1 .quad 0x2cdfdfecd5d05006 .quad 0xdd499cd61ff38640 .quad 0x29cd9bc3063625a0 .quad 0x51e2d8023dd73dc3 .quad 0x4a25707a203b9231 .quad 0x2ab7668a53f6ed6a .quad 0x304242581dd170a1 .quad 0x4000144c3ae20161 .quad 0x5721896d248e49fc // 2^184 * 2 * G .quad 0x0b6e5517fd181bae .quad 0x9022629f2bb963b4 .quad 0x5509bce932064625 .quad 0x578edd74f63c13da .quad 0x285d5091a1d0da4e .quad 0x4baa6fa7b5fe3e08 .quad 0x63e5177ce19393b3 .quad 0x03c935afc4b030fd .quad 0x997276c6492b0c3d .quad 0x47ccc2c4dfe205fc .quad 0xdcd29b84dd623a3c .quad 0x3ec2ab590288c7a2 // 2^184 * 3 * G .quad 0xa1a0d27be4d87bb9 .quad 0xa98b4deb61391aed .quad 0x99a0ddd073cb9b83 .quad 0x2dd5c25a200fcace .quad 0xa7213a09ae32d1cb .quad 0x0f2b87df40f5c2d5 .quad 0x0baea4c6e81eab29 .quad 0x0e1bf66c6adbac5e .quad 0xe2abd5e9792c887e .quad 0x1a020018cb926d5d .quad 0xbfba69cdbaae5f1e .quad 0x730548b35ae88f5f // 2^184 * 4 * G .quad 0xc43551a3cba8b8ee .quad 0x65a26f1db2115f16 .quad 0x760f4f52ab8c3850 .quad 0x3043443b411db8ca .quad 0x805b094ba1d6e334 .quad 0xbf3ef17709353f19 .quad 0x423f06cb0622702b .quad 0x585a2277d87845dd .quad 0xa18a5f8233d48962 .quad 0x6698c4b5ec78257f .quad 0xa78e6fa5373e41ff .quad 0x7656278950ef981f // 2^184 * 5 * G .quad 0x38c3cf59d51fc8c0 .quad 0x9bedd2fd0506b6f2 .quad 0x26bf109fab570e8f .quad 0x3f4160a8c1b846a6 .quad 0xe17073a3ea86cf9d .quad 0x3a8cfbb707155fdc .quad 0x4853e7fc31838a8e .quad 0x28bbf484b613f616 .quad 0xf2612f5c6f136c7c .quad 0xafead107f6dd11be .quad 0x527e9ad213de6f33 .quad 0x1e79cb358188f75d // 2^184 * 6 * G .quad 0x013436c3eef7e3f1 .quad 0x828b6a7ffe9e10f8 .quad 0x7ff908e5bcf9defc .quad 0x65d7951b3a3b3831 .quad 0x77e953d8f5e08181 .quad 0x84a50c44299dded9 .quad 0xdc6c2d0c864525e5 .quad 0x478ab52d39d1f2f4 .quad 0x66a6a4d39252d159 .quad 0xe5dde1bc871ac807 .quad 0xb82c6b40a6c1c96f .quad 0x16d87a411a212214 // 2^184 * 7 * G .quad 0xb3bd7e5a42066215 .quad 0x879be3cd0c5a24c1 .quad 0x57c05db1d6f994b7 .quad 0x28f87c8165f38ca6 .quad 0xfba4d5e2d54e0583 .quad 0xe21fafd72ebd99fa .quad 0x497ac2736ee9778f .quad 0x1f990b577a5a6dde .quad 0xa3344ead1be8f7d6 .quad 0x7d1e50ebacea798f .quad 0x77c6569e520de052 .quad 0x45882fe1534d6d3e // 2^184 * 8 * G .quad 0x6669345d757983d6 .quad 0x62b6ed1117aa11a6 .quad 0x7ddd1857985e128f .quad 0x688fe5b8f626f6dd .quad 0xd8ac9929943c6fe4 .quad 0xb5f9f161a38392a2 .quad 0x2699db13bec89af3 .quad 0x7dcf843ce405f074 .quad 0x6c90d6484a4732c0 .quad 0xd52143fdca563299 .quad 0xb3be28c3915dc6e1 .quad 0x6739687e7327191b // 2^188 * 1 * G .quad 0x9f65c5ea200814cf .quad 0x840536e169a31740 .quad 0x8b0ed13925c8b4ad .quad 0x0080dbafe936361d .quad 0x8ce5aad0c9cb971f .quad 0x1156aaa99fd54a29 .quad 0x41f7247015af9b78 .quad 0x1fe8cca8420f49aa .quad 0x72a1848f3c0cc82a .quad 0x38c560c2877c9e54 .quad 0x5004e228ce554140 .quad 0x042418a103429d71 // 2^188 * 2 * G .quad 0x899dea51abf3ff5f .quad 0x9b93a8672fc2d8ba .quad 0x2c38cb97be6ebd5c .quad 0x114d578497263b5d .quad 0x58e84c6f20816247 .quad 0x8db2b2b6e36fd793 .quad 0x977182561d484d85 .quad 0x0822024f8632abd7 .quad 0xb301bb7c6b1beca3 .quad 0x55393f6dc6eb1375 .quad 0x910d281097b6e4eb .quad 0x1ad4548d9d479ea3 // 2^188 * 3 * G .quad 0xcd5a7da0389a48fd .quad 0xb38fa4aa9a78371e .quad 0xc6d9761b2cdb8e6c .quad 0x35cf51dbc97e1443 .quad 0xa06fe66d0fe9fed3 .quad 0xa8733a401c587909 .quad 0x30d14d800df98953 .quad 0x41ce5876c7b30258 .quad 0x59ac3bc5d670c022 .quad 0xeae67c109b119406 .quad 0x9798bdf0b3782fda .quad 0x651e3201fd074092 // 2^188 * 4 * G .quad 0xd63d8483ef30c5cf .quad 0x4cd4b4962361cc0c .quad 0xee90e500a48426ac .quad 0x0af51d7d18c14eeb .quad 0xa57ba4a01efcae9e .quad 0x769f4beedc308a94 .quad 0xd1f10eeb3603cb2e .quad 0x4099ce5e7e441278 .quad 0x1ac98e4f8a5121e9 .quad 0x7dae9544dbfa2fe0 .quad 0x8320aa0dd6430df9 .quad 0x667282652c4a2fb5 // 2^188 * 5 * G .quad 0x874621f4d86bc9ab .quad 0xb54c7bbe56fe6fea .quad 0x077a24257fadc22c .quad 0x1ab53be419b90d39 .quad 0xada8b6e02946db23 .quad 0x1c0ce51a7b253ab7 .quad 0x8448c85a66dd485b .quad 0x7f1fc025d0675adf .quad 0xd8ee1b18319ea6aa .quad 0x004d88083a21f0da .quad 0x3bd6aa1d883a4f4b .quad 0x4db9a3a6dfd9fd14 // 2^188 * 6 * G .quad 0x8ce7b23bb99c0755 .quad 0x35c5d6edc4f50f7a .quad 0x7e1e2ed2ed9b50c3 .quad 0x36305f16e8934da1 .quad 0xd95b00bbcbb77c68 .quad 0xddbc846a91f17849 .quad 0x7cf700aebe28d9b3 .quad 0x5ce1285c85d31f3e .quad 0x31b6972d98b0bde8 .quad 0x7d920706aca6de5b .quad 0xe67310f8908a659f .quad 0x50fac2a6efdf0235 // 2^188 * 7 * G .quad 0xf3d3a9f35b880f5a .quad 0xedec050cdb03e7c2 .quad 0xa896981ff9f0b1a2 .quad 0x49a4ae2bac5e34a4 .quad 0x295b1c86f6f449bc .quad 0x51b2e84a1f0ab4dd .quad 0xc001cb30aa8e551d .quad 0x6a28d35944f43662 .quad 0x28bb12ee04a740e0 .quad 0x14313bbd9bce8174 .quad 0x72f5b5e4e8c10c40 .quad 0x7cbfb19936adcd5b // 2^188 * 8 * G .quad 0xa311ddc26b89792d .quad 0x1b30b4c6da512664 .quad 0x0ca77b4ccf150859 .quad 0x1de443df1b009408 .quad 0x8e793a7acc36e6e0 .quad 0xf9fab7a37d586eed .quad 0x3a4f9692bae1f4e4 .quad 0x1c14b03eff5f447e .quad 0x19647bd114a85291 .quad 0x57b76cb21034d3af .quad 0x6329db440f9d6dfa .quad 0x5ef43e586a571493 // 2^192 * 1 * G .quad 0xef782014385675a6 .quad 0xa2649f30aafda9e8 .quad 0x4cd1eb505cdfa8cb .quad 0x46115aba1d4dc0b3 .quad 0xa66dcc9dc80c1ac0 .quad 0x97a05cf41b38a436 .quad 0xa7ebf3be95dbd7c6 .quad 0x7da0b8f68d7e7dab .quad 0xd40f1953c3b5da76 .quad 0x1dac6f7321119e9b .quad 0x03cc6021feb25960 .quad 0x5a5f887e83674b4b // 2^192 * 2 * G .quad 0x8f6301cf70a13d11 .quad 0xcfceb815350dd0c4 .quad 0xf70297d4a4bca47e .quad 0x3669b656e44d1434 .quad 0x9e9628d3a0a643b9 .quad 0xb5c3cb00e6c32064 .quad 0x9b5302897c2dec32 .quad 0x43e37ae2d5d1c70c .quad 0x387e3f06eda6e133 .quad 0x67301d5199a13ac0 .quad 0xbd5ad8f836263811 .quad 0x6a21e6cd4fd5e9be // 2^192 * 3 * G .quad 0xf1c6170a3046e65f .quad 0x58712a2a00d23524 .quad 0x69dbbd3c8c82b755 .quad 0x586bf9f1a195ff57 .quad 0xef4129126699b2e3 .quad 0x71d30847708d1301 .quad 0x325432d01182b0bd .quad 0x45371b07001e8b36 .quad 0xa6db088d5ef8790b .quad 0x5278f0dc610937e5 .quad 0xac0349d261a16eb8 .quad 0x0eafb03790e52179 // 2^192 * 4 * G .quad 0x960555c13748042f .quad 0x219a41e6820baa11 .quad 0x1c81f73873486d0c .quad 0x309acc675a02c661 .quad 0x5140805e0f75ae1d .quad 0xec02fbe32662cc30 .quad 0x2cebdf1eea92396d .quad 0x44ae3344c5435bb3 .quad 0x9cf289b9bba543ee .quad 0xf3760e9d5ac97142 .quad 0x1d82e5c64f9360aa .quad 0x62d5221b7f94678f // 2^192 * 5 * G .quad 0x524c299c18d0936d .quad 0xc86bb56c8a0c1a0c .quad 0xa375052edb4a8631 .quad 0x5c0efde4bc754562 .quad 0x7585d4263af77a3c .quad 0xdfae7b11fee9144d .quad 0xa506708059f7193d .quad 0x14f29a5383922037 .quad 0xdf717edc25b2d7f5 .quad 0x21f970db99b53040 .quad 0xda9234b7c3ed4c62 .quad 0x5e72365c7bee093e // 2^192 * 6 * G .quad 0x575bfc074571217f .quad 0x3779675d0694d95b .quad 0x9a0a37bbf4191e33 .quad 0x77f1104c47b4eabc .quad 0x7d9339062f08b33e .quad 0x5b9659e5df9f32be .quad 0xacff3dad1f9ebdfd .quad 0x70b20555cb7349b7 .quad 0xbe5113c555112c4c .quad 0x6688423a9a881fcd .quad 0x446677855e503b47 .quad 0x0e34398f4a06404a // 2^192 * 7 * G .quad 0xb67d22d93ecebde8 .quad 0x09b3e84127822f07 .quad 0x743fa61fb05b6d8d .quad 0x5e5405368a362372 .quad 0x18930b093e4b1928 .quad 0x7de3e10e73f3f640 .quad 0xf43217da73395d6f .quad 0x6f8aded6ca379c3e .quad 0xe340123dfdb7b29a .quad 0x487b97e1a21ab291 .quad 0xf9967d02fde6949e .quad 0x780de72ec8d3de97 // 2^192 * 8 * G .quad 0x0ae28545089ae7bc .quad 0x388ddecf1c7f4d06 .quad 0x38ac15510a4811b8 .quad 0x0eb28bf671928ce4 .quad 0x671feaf300f42772 .quad 0x8f72eb2a2a8c41aa .quad 0x29a17fd797373292 .quad 0x1defc6ad32b587a6 .quad 0xaf5bbe1aef5195a7 .quad 0x148c1277917b15ed .quad 0x2991f7fb7ae5da2e .quad 0x467d201bf8dd2867 // 2^196 * 1 * G .quad 0x7906ee72f7bd2e6b .quad 0x05d270d6109abf4e .quad 0x8d5cfe45b941a8a4 .quad 0x44c218671c974287 .quad 0x745f9d56296bc318 .quad 0x993580d4d8152e65 .quad 0xb0e5b13f5839e9ce .quad 0x51fc2b28d43921c0 .quad 0x1b8fd11795e2a98c .quad 0x1c4e5ee12b6b6291 .quad 0x5b30e7107424b572 .quad 0x6e6b9de84c4f4ac6 // 2^196 * 2 * G .quad 0xdff25fce4b1de151 .quad 0xd841c0c7e11c4025 .quad 0x2554b3c854749c87 .quad 0x2d292459908e0df9 .quad 0x6b7c5f10f80cb088 .quad 0x736b54dc56e42151 .quad 0xc2b620a5c6ef99c4 .quad 0x5f4c802cc3a06f42 .quad 0x9b65c8f17d0752da .quad 0x881ce338c77ee800 .quad 0xc3b514f05b62f9e3 .quad 0x66ed5dd5bec10d48 // 2^196 * 3 * G .quad 0x7d38a1c20bb2089d .quad 0x808334e196ccd412 .quad 0xc4a70b8c6c97d313 .quad 0x2eacf8bc03007f20 .quad 0xf0adf3c9cbca047d .quad 0x81c3b2cbf4552f6b .quad 0xcfda112d44735f93 .quad 0x1f23a0c77e20048c .quad 0xf235467be5bc1570 .quad 0x03d2d9020dbab38c .quad 0x27529aa2fcf9e09e .quad 0x0840bef29d34bc50 // 2^196 * 4 * G .quad 0x796dfb35dc10b287 .quad 0x27176bcd5c7ff29d .quad 0x7f3d43e8c7b24905 .quad 0x0304f5a191c54276 .quad 0xcd54e06b7f37e4eb .quad 0x8cc15f87f5e96cca .quad 0xb8248bb0d3597dce .quad 0x246affa06074400c .quad 0x37d88e68fbe45321 .quad 0x86097548c0d75032 .quad 0x4e9b13ef894a0d35 .quad 0x25a83cac5753d325 // 2^196 * 5 * G .quad 0x10222f48eed8165e .quad 0x623fc1234b8bcf3a .quad 0x1e145c09c221e8f0 .quad 0x7ccfa59fca782630 .quad 0x9f0f66293952b6e2 .quad 0x33db5e0e0934267b .quad 0xff45252bd609fedc .quad 0x06be10f5c506e0c9 .quad 0x1a9615a9b62a345f .quad 0x22050c564a52fecc .quad 0xa7a2788528bc0dfe .quad 0x5e82770a1a1ee71d // 2^196 * 6 * G .quad 0x35425183ad896a5c .quad 0xe8673afbe78d52f6 .quad 0x2c66f25f92a35f64 .quad 0x09d04f3b3b86b102 .quad 0xe802e80a42339c74 .quad 0x34175166a7fffae5 .quad 0x34865d1f1c408cae .quad 0x2cca982c605bc5ee .quad 0xfd2d5d35197dbe6e .quad 0x207c2eea8be4ffa3 .quad 0x2613d8db325ae918 .quad 0x7a325d1727741d3e // 2^196 * 7 * G .quad 0xd036b9bbd16dfde2 .quad 0xa2055757c497a829 .quad 0x8e6cc966a7f12667 .quad 0x4d3b1a791239c180 .quad 0xecd27d017e2a076a .quad 0xd788689f1636495e .quad 0x52a61af0919233e5 .quad 0x2a479df17bb1ae64 .quad 0x9e5eee8e33db2710 .quad 0x189854ded6c43ca5 .quad 0xa41c22c592718138 .quad 0x27ad5538a43a5e9b // 2^196 * 8 * G .quad 0x2746dd4b15350d61 .quad 0xd03fcbc8ee9521b7 .quad 0xe86e365a138672ca .quad 0x510e987f7e7d89e2 .quad 0xcb5a7d638e47077c .quad 0x8db7536120a1c059 .quad 0x549e1e4d8bedfdcc .quad 0x080153b7503b179d .quad 0xdda69d930a3ed3e3 .quad 0x3d386ef1cd60a722 .quad 0xc817ad58bdaa4ee6 .quad 0x23be8d554fe7372a // 2^200 * 1 * G .quad 0x95fe919a74ef4fad .quad 0x3a827becf6a308a2 .quad 0x964e01d309a47b01 .quad 0x71c43c4f5ba3c797 .quad 0xbc1ef4bd567ae7a9 .quad 0x3f624cb2d64498bd .quad 0xe41064d22c1f4ec8 .quad 0x2ef9c5a5ba384001 .quad 0xb6fd6df6fa9e74cd .quad 0xf18278bce4af267a .quad 0x8255b3d0f1ef990e .quad 0x5a758ca390c5f293 // 2^200 * 2 * G .quad 0xa2b72710d9462495 .quad 0x3aa8c6d2d57d5003 .quad 0xe3d400bfa0b487ca .quad 0x2dbae244b3eb72ec .quad 0x8ce0918b1d61dc94 .quad 0x8ded36469a813066 .quad 0xd4e6a829afe8aad3 .quad 0x0a738027f639d43f .quad 0x980f4a2f57ffe1cc .quad 0x00670d0de1839843 .quad 0x105c3f4a49fb15fd .quad 0x2698ca635126a69c // 2^200 * 3 * G .quad 0xe765318832b0ba78 .quad 0x381831f7925cff8b .quad 0x08a81b91a0291fcc .quad 0x1fb43dcc49caeb07 .quad 0x2e3d702f5e3dd90e .quad 0x9e3f0918e4d25386 .quad 0x5e773ef6024da96a .quad 0x3c004b0c4afa3332 .quad 0x9aa946ac06f4b82b .quad 0x1ca284a5a806c4f3 .quad 0x3ed3265fc6cd4787 .quad 0x6b43fd01cd1fd217 // 2^200 * 4 * G .quad 0xc7a75d4b4697c544 .quad 0x15fdf848df0fffbf .quad 0x2868b9ebaa46785a .quad 0x5a68d7105b52f714 .quad 0xb5c742583e760ef3 .quad 0x75dc52b9ee0ab990 .quad 0xbf1427c2072b923f .quad 0x73420b2d6ff0d9f0 .quad 0xaf2cf6cb9e851e06 .quad 0x8f593913c62238c4 .quad 0xda8ab89699fbf373 .quad 0x3db5632fea34bc9e // 2^200 * 5 * G .quad 0xf46eee2bf75dd9d8 .quad 0x0d17b1f6396759a5 .quad 0x1bf2d131499e7273 .quad 0x04321adf49d75f13 .quad 0x2e4990b1829825d5 .quad 0xedeaeb873e9a8991 .quad 0xeef03d394c704af8 .quad 0x59197ea495df2b0e .quad 0x04e16019e4e55aae .quad 0xe77b437a7e2f92e9 .quad 0xc7ce2dc16f159aa4 .quad 0x45eafdc1f4d70cc0 // 2^200 * 6 * G .quad 0x698401858045d72b .quad 0x4c22faa2cf2f0651 .quad 0x941a36656b222dc6 .quad 0x5a5eebc80362dade .quad 0xb60e4624cfccb1ed .quad 0x59dbc292bd5c0395 .quad 0x31a09d1ddc0481c9 .quad 0x3f73ceea5d56d940 .quad 0xb7a7bfd10a4e8dc6 .quad 0xbe57007e44c9b339 .quad 0x60c1207f1557aefa .quad 0x26058891266218db // 2^200 * 7 * G .quad 0x59f704a68360ff04 .quad 0xc3d93fde7661e6f4 .quad 0x831b2a7312873551 .quad 0x54ad0c2e4e615d57 .quad 0x4c818e3cc676e542 .quad 0x5e422c9303ceccad .quad 0xec07cccab4129f08 .quad 0x0dedfa10b24443b8 .quad 0xee3b67d5b82b522a .quad 0x36f163469fa5c1eb .quad 0xa5b4d2f26ec19fd3 .quad 0x62ecb2baa77a9408 // 2^200 * 8 * G .quad 0xe5ed795261152b3d .quad 0x4962357d0eddd7d1 .quad 0x7482c8d0b96b4c71 .quad 0x2e59f919a966d8be .quad 0x92072836afb62874 .quad 0x5fcd5e8579e104a5 .quad 0x5aad01adc630a14a .quad 0x61913d5075663f98 .quad 0x0dc62d361a3231da .quad 0xfa47583294200270 .quad 0x02d801513f9594ce .quad 0x3ddbc2a131c05d5c // 2^204 * 1 * G .quad 0x3f50a50a4ffb81ef .quad 0xb1e035093bf420bf .quad 0x9baa8e1cc6aa2cd0 .quad 0x32239861fa237a40 .quad 0xfb735ac2004a35d1 .quad 0x31de0f433a6607c3 .quad 0x7b8591bfc528d599 .quad 0x55be9a25f5bb050c .quad 0x0d005acd33db3dbf .quad 0x0111b37c80ac35e2 .quad 0x4892d66c6f88ebeb .quad 0x770eadb16508fbcd // 2^204 * 2 * G .quad 0x8451f9e05e4e89dd .quad 0xc06302ffbc793937 .quad 0x5d22749556a6495c .quad 0x09a6755ca05603fb .quad 0xf1d3b681a05071b9 .quad 0x2207659a3592ff3a .quad 0x5f0169297881e40e .quad 0x16bedd0e86ba374e .quad 0x5ecccc4f2c2737b5 .quad 0x43b79e0c2dccb703 .quad 0x33e008bc4ec43df3 .quad 0x06c1b840f07566c0 // 2^204 * 3 * G .quad 0x7688a5c6a388f877 .quad 0x02a96c14deb2b6ac .quad 0x64c9f3431b8c2af8 .quad 0x3628435554a1eed6 .quad 0x69ee9e7f9b02805c .quad 0xcbff828a547d1640 .quad 0x3d93a869b2430968 .quad 0x46b7b8cd3fe26972 .quad 0xe9812086fe7eebe0 .quad 0x4cba6be72f515437 .quad 0x1d04168b516efae9 .quad 0x5ea1391043982cb9 // 2^204 * 4 * G .quad 0x49125c9cf4702ee1 .quad 0x4520b71f8b25b32d .quad 0x33193026501fef7e .quad 0x656d8997c8d2eb2b .quad 0x6f2b3be4d5d3b002 .quad 0xafec33d96a09c880 .quad 0x035f73a4a8bcc4cc .quad 0x22c5b9284662198b .quad 0xcb58c8fe433d8939 .quad 0x89a0cb2e6a8d7e50 .quad 0x79ca955309fbbe5a .quad 0x0c626616cd7fc106 // 2^204 * 5 * G .quad 0x1ffeb80a4879b61f .quad 0x6396726e4ada21ed .quad 0x33c7b093368025ba .quad 0x471aa0c6f3c31788 .quad 0x8fdfc379fbf454b1 .quad 0x45a5a970f1a4b771 .quad 0xac921ef7bad35915 .quad 0x42d088dca81c2192 .quad 0x8fda0f37a0165199 .quad 0x0adadb77c8a0e343 .quad 0x20fbfdfcc875e820 .quad 0x1cf2bea80c2206e7 // 2^204 * 6 * G .quad 0xc2ddf1deb36202ac .quad 0x92a5fe09d2e27aa5 .quad 0x7d1648f6fc09f1d3 .quad 0x74c2cc0513bc4959 .quad 0x982d6e1a02c0412f .quad 0x90fa4c83db58e8fe .quad 0x01c2f5bcdcb18bc0 .quad 0x686e0c90216abc66 .quad 0x1fadbadba54395a7 .quad 0xb41a02a0ae0da66a .quad 0xbf19f598bba37c07 .quad 0x6a12b8acde48430d // 2^204 * 7 * G .quad 0xf8daea1f39d495d9 .quad 0x592c190e525f1dfc .quad 0xdb8cbd04c9991d1b .quad 0x11f7fda3d88f0cb7 .quad 0x793bdd801aaeeb5f .quad 0x00a2a0aac1518871 .quad 0xe8a373a31f2136b4 .quad 0x48aab888fc91ef19 .quad 0x041f7e925830f40e .quad 0x002d6ca979661c06 .quad 0x86dc9ff92b046a2e .quad 0x760360928b0493d1 // 2^204 * 8 * G .quad 0x21bb41c6120cf9c6 .quad 0xeab2aa12decda59b .quad 0xc1a72d020aa48b34 .quad 0x215d4d27e87d3b68 .quad 0xb43108e5695a0b05 .quad 0x6cb00ee8ad37a38b .quad 0x5edad6eea3537381 .quad 0x3f2602d4b6dc3224 .quad 0xc8b247b65bcaf19c .quad 0x49779dc3b1b2c652 .quad 0x89a180bbd5ece2e2 .quad 0x13f098a3cec8e039 // 2^208 * 1 * G .quad 0x9adc0ff9ce5ec54b .quad 0x039c2a6b8c2f130d .quad 0x028007c7f0f89515 .quad 0x78968314ac04b36b .quad 0xf3aa57a22796bb14 .quad 0x883abab79b07da21 .quad 0xe54be21831a0391c .quad 0x5ee7fb38d83205f9 .quad 0x538dfdcb41446a8e .quad 0xa5acfda9434937f9 .quad 0x46af908d263c8c78 .quad 0x61d0633c9bca0d09 // 2^208 * 2 * G .quad 0x63744935ffdb2566 .quad 0xc5bd6b89780b68bb .quad 0x6f1b3280553eec03 .quad 0x6e965fd847aed7f5 .quad 0xada328bcf8fc73df .quad 0xee84695da6f037fc .quad 0x637fb4db38c2a909 .quad 0x5b23ac2df8067bdc .quad 0x9ad2b953ee80527b .quad 0xe88f19aafade6d8d .quad 0x0e711704150e82cf .quad 0x79b9bbb9dd95dedc // 2^208 * 3 * G .quad 0xebb355406a3126c2 .quad 0xd26383a868c8c393 .quad 0x6c0c6429e5b97a82 .quad 0x5065f158c9fd2147 .quad 0xd1997dae8e9f7374 .quad 0xa032a2f8cfbb0816 .quad 0xcd6cba126d445f0a .quad 0x1ba811460accb834 .quad 0x708169fb0c429954 .quad 0xe14600acd76ecf67 .quad 0x2eaab98a70e645ba .quad 0x3981f39e58a4faf2 // 2^208 * 4 * G .quad 0x18fb8a7559230a93 .quad 0x1d168f6960e6f45d .quad 0x3a85a94514a93cb5 .quad 0x38dc083705acd0fd .quad 0xc845dfa56de66fde .quad 0xe152a5002c40483a .quad 0xe9d2e163c7b4f632 .quad 0x30f4452edcbc1b65 .quad 0x856d2782c5759740 .quad 0xfa134569f99cbecc .quad 0x8844fc73c0ea4e71 .quad 0x632d9a1a593f2469 // 2^208 * 5 * G .quad 0xf6bb6b15b807cba6 .quad 0x1823c7dfbc54f0d7 .quad 0xbb1d97036e29670b .quad 0x0b24f48847ed4a57 .quad 0xbf09fd11ed0c84a7 .quad 0x63f071810d9f693a .quad 0x21908c2d57cf8779 .quad 0x3a5a7df28af64ba2 .quad 0xdcdad4be511beac7 .quad 0xa4538075ed26ccf2 .quad 0xe19cff9f005f9a65 .quad 0x34fcf74475481f63 // 2^208 * 6 * G .quad 0xc197e04c789767ca .quad 0xb8714dcb38d9467d .quad 0x55de888283f95fa8 .quad 0x3d3bdc164dfa63f7 .quad 0xa5bb1dab78cfaa98 .quad 0x5ceda267190b72f2 .quad 0x9309c9110a92608e .quad 0x0119a3042fb374b0 .quad 0x67a2d89ce8c2177d .quad 0x669da5f66895d0c1 .quad 0xf56598e5b282a2b0 .quad 0x56c088f1ede20a73 // 2^208 * 7 * G .quad 0x336d3d1110a86e17 .quad 0xd7f388320b75b2fa .quad 0xf915337625072988 .quad 0x09674c6b99108b87 .quad 0x581b5fac24f38f02 .quad 0xa90be9febae30cbd .quad 0x9a2169028acf92f0 .quad 0x038b7ea48359038f .quad 0x9f4ef82199316ff8 .quad 0x2f49d282eaa78d4f .quad 0x0971a5ab5aef3174 .quad 0x6e5e31025969eb65 // 2^208 * 8 * G .quad 0xb16c62f587e593fb .quad 0x4999eddeca5d3e71 .quad 0xb491c1e014cc3e6d .quad 0x08f5114789a8dba8 .quad 0x3304fb0e63066222 .quad 0xfb35068987acba3f .quad 0xbd1924778c1061a3 .quad 0x3058ad43d1838620 .quad 0x323c0ffde57663d0 .quad 0x05c3df38a22ea610 .quad 0xbdc78abdac994f9a .quad 0x26549fa4efe3dc99 // 2^212 * 1 * G .quad 0x738b38d787ce8f89 .quad 0xb62658e24179a88d .quad 0x30738c9cf151316d .quad 0x49128c7f727275c9 .quad 0x04dbbc17f75396b9 .quad 0x69e6a2d7d2f86746 .quad 0xc6409d99f53eabc6 .quad 0x606175f6332e25d2 .quad 0x4021370ef540e7dd .quad 0x0910d6f5a1f1d0a5 .quad 0x4634aacd5b06b807 .quad 0x6a39e6356944f235 // 2^212 * 2 * G .quad 0x96cd5640df90f3e7 .quad 0x6c3a760edbfa25ea .quad 0x24f3ef0959e33cc4 .quad 0x42889e7e530d2e58 .quad 0x1da1965774049e9d .quad 0xfbcd6ea198fe352b .quad 0xb1cbcd50cc5236a6 .quad 0x1f5ec83d3f9846e2 .quad 0x8efb23c3328ccb75 .quad 0xaf42a207dd876ee9 .quad 0x20fbdadc5dfae796 .quad 0x241e246b06bf9f51 // 2^212 * 3 * G .quad 0x29e68e57ad6e98f6 .quad 0x4c9260c80b462065 .quad 0x3f00862ea51ebb4b .quad 0x5bc2c77fb38d9097 .quad 0x7eaafc9a6280bbb8 .quad 0x22a70f12f403d809 .quad 0x31ce40bb1bfc8d20 .quad 0x2bc65635e8bd53ee .quad 0xe8d5dc9fa96bad93 .quad 0xe58fb17dde1947dc .quad 0x681532ea65185fa3 .quad 0x1fdd6c3b034a7830 // 2^212 * 4 * G .quad 0x0a64e28c55dc18fe .quad 0xe3df9e993399ebdd .quad 0x79ac432370e2e652 .quad 0x35ff7fc33ae4cc0e .quad 0x9c13a6a52dd8f7a9 .quad 0x2dbb1f8c3efdcabf .quad 0x961e32405e08f7b5 .quad 0x48c8a121bbe6c9e5 .quad 0xfc415a7c59646445 .quad 0xd224b2d7c128b615 .quad 0x6035c9c905fbb912 .quad 0x42d7a91274429fab // 2^212 * 5 * G .quad 0x4e6213e3eaf72ed3 .quad 0x6794981a43acd4e7 .quad 0xff547cde6eb508cb .quad 0x6fed19dd10fcb532 .quad 0xa9a48947933da5bc .quad 0x4a58920ec2e979ec .quad 0x96d8800013e5ac4c .quad 0x453692d74b48b147 .quad 0xdd775d99a8559c6f .quad 0xf42a2140df003e24 .quad 0x5223e229da928a66 .quad 0x063f46ba6d38f22c // 2^212 * 6 * G .quad 0xd2d242895f536694 .quad 0xca33a2c542939b2c .quad 0x986fada6c7ddb95c .quad 0x5a152c042f712d5d .quad 0x39843cb737346921 .quad 0xa747fb0738c89447 .quad 0xcb8d8031a245307e .quad 0x67810f8e6d82f068 .quad 0x3eeb8fbcd2287db4 .quad 0x72c7d3a301a03e93 .quad 0x5473e88cbd98265a .quad 0x7324aa515921b403 // 2^212 * 7 * G .quad 0x857942f46c3cbe8e .quad 0xa1d364b14730c046 .quad 0x1c8ed914d23c41bf .quad 0x0838e161eef6d5d2 .quad 0xad23f6dae82354cb .quad 0x6962502ab6571a6d .quad 0x9b651636e38e37d1 .quad 0x5cac5005d1a3312f .quad 0x8cc154cce9e39904 .quad 0x5b3a040b84de6846 .quad 0xc4d8a61cb1be5d6e .quad 0x40fb897bd8861f02 // 2^212 * 8 * G .quad 0x84c5aa9062de37a1 .quad 0x421da5000d1d96e1 .quad 0x788286306a9242d9 .quad 0x3c5e464a690d10da .quad 0xe57ed8475ab10761 .quad 0x71435e206fd13746 .quad 0x342f824ecd025632 .quad 0x4b16281ea8791e7b .quad 0xd1c101d50b813381 .quad 0xdee60f1176ee6828 .quad 0x0cb68893383f6409 .quad 0x6183c565f6ff484a // 2^216 * 1 * G .quad 0x741d5a461e6bf9d6 .quad 0x2305b3fc7777a581 .quad 0xd45574a26474d3d9 .quad 0x1926e1dc6401e0ff .quad 0xdb468549af3f666e .quad 0xd77fcf04f14a0ea5 .quad 0x3df23ff7a4ba0c47 .quad 0x3a10dfe132ce3c85 .quad 0xe07f4e8aea17cea0 .quad 0x2fd515463a1fc1fd .quad 0x175322fd31f2c0f1 .quad 0x1fa1d01d861e5d15 // 2^216 * 2 * G .quad 0xcc8055947d599832 .quad 0x1e4656da37f15520 .quad 0x99f6f7744e059320 .quad 0x773563bc6a75cf33 .quad 0x38dcac00d1df94ab .quad 0x2e712bddd1080de9 .quad 0x7f13e93efdd5e262 .quad 0x73fced18ee9a01e5 .quad 0x06b1e90863139cb3 .quad 0xa493da67c5a03ecd .quad 0x8d77cec8ad638932 .quad 0x1f426b701b864f44 // 2^216 * 3 * G .quad 0xefc9264c41911c01 .quad 0xf1a3b7b817a22c25 .quad 0x5875da6bf30f1447 .quad 0x4e1af5271d31b090 .quad 0xf17e35c891a12552 .quad 0xb76b8153575e9c76 .quad 0xfa83406f0d9b723e .quad 0x0b76bb1b3fa7e438 .quad 0x08b8c1f97f92939b .quad 0xbe6771cbd444ab6e .quad 0x22e5646399bb8017 .quad 0x7b6dd61eb772a955 // 2^216 * 4 * G .quad 0xb7adc1e850f33d92 .quad 0x7998fa4f608cd5cf .quad 0xad962dbd8dfc5bdb .quad 0x703e9bceaf1d2f4f .quad 0x5730abf9ab01d2c7 .quad 0x16fb76dc40143b18 .quad 0x866cbe65a0cbb281 .quad 0x53fa9b659bff6afe .quad 0x6c14c8e994885455 .quad 0x843a5d6665aed4e5 .quad 0x181bb73ebcd65af1 .quad 0x398d93e5c4c61f50 // 2^216 * 5 * G .quad 0x1c4bd16733e248f3 .quad 0xbd9e128715bf0a5f .quad 0xd43f8cf0a10b0376 .quad 0x53b09b5ddf191b13 .quad 0xc3877c60d2e7e3f2 .quad 0x3b34aaa030828bb1 .quad 0x283e26e7739ef138 .quad 0x699c9c9002c30577 .quad 0xf306a7235946f1cc .quad 0x921718b5cce5d97d .quad 0x28cdd24781b4e975 .quad 0x51caf30c6fcdd907 // 2^216 * 6 * G .quad 0xa60ba7427674e00a .quad 0x630e8570a17a7bf3 .quad 0x3758563dcf3324cc .quad 0x5504aa292383fdaa .quad 0x737af99a18ac54c7 .quad 0x903378dcc51cb30f .quad 0x2b89bc334ce10cc7 .quad 0x12ae29c189f8e99a .quad 0xa99ec0cb1f0d01cf .quad 0x0dd1efcc3a34f7ae .quad 0x55ca7521d09c4e22 .quad 0x5fd14fe958eba5ea // 2^216 * 7 * G .quad 0xb5dc2ddf2845ab2c .quad 0x069491b10a7fe993 .quad 0x4daaf3d64002e346 .quad 0x093ff26e586474d1 .quad 0x3c42fe5ebf93cb8e .quad 0xbedfa85136d4565f .quad 0xe0f0859e884220e8 .quad 0x7dd73f960725d128 .quad 0xb10d24fe68059829 .quad 0x75730672dbaf23e5 .quad 0x1367253ab457ac29 .quad 0x2f59bcbc86b470a4 // 2^216 * 8 * G .quad 0x83847d429917135f .quad 0xad1b911f567d03d7 .quad 0x7e7748d9be77aad1 .quad 0x5458b42e2e51af4a .quad 0x7041d560b691c301 .quad 0x85201b3fadd7e71e .quad 0x16c2e16311335585 .quad 0x2aa55e3d010828b1 .quad 0xed5192e60c07444f .quad 0x42c54e2d74421d10 .quad 0x352b4c82fdb5c864 .quad 0x13e9004a8a768664 // 2^220 * 1 * G .quad 0xcbb5b5556c032bff .quad 0xdf7191b729297a3a .quad 0xc1ff7326aded81bb .quad 0x71ade8bb68be03f5 .quad 0x1e6284c5806b467c .quad 0xc5f6997be75d607b .quad 0x8b67d958b378d262 .quad 0x3d88d66a81cd8b70 .quad 0x8b767a93204ed789 .quad 0x762fcacb9fa0ae2a .quad 0x771febcc6dce4887 .quad 0x343062158ff05fb3 // 2^220 * 2 * G .quad 0xe05da1a7e1f5bf49 .quad 0x26457d6dd4736092 .quad 0x77dcb07773cc32f6 .quad 0x0a5d94969cdd5fcd .quad 0xfce219072a7b31b4 .quad 0x4d7adc75aa578016 .quad 0x0ec276a687479324 .quad 0x6d6d9d5d1fda4beb .quad 0x22b1a58ae9b08183 .quad 0xfd95d071c15c388b .quad 0xa9812376850a0517 .quad 0x33384cbabb7f335e // 2^220 * 3 * G .quad 0x3c6fa2680ca2c7b5 .quad 0x1b5082046fb64fda .quad 0xeb53349c5431d6de .quad 0x5278b38f6b879c89 .quad 0x33bc627a26218b8d .quad 0xea80b21fc7a80c61 .quad 0x9458b12b173e9ee6 .quad 0x076247be0e2f3059 .quad 0x52e105f61416375a .quad 0xec97af3685abeba4 .quad 0x26e6b50623a67c36 .quad 0x5cf0e856f3d4fb01 // 2^220 * 4 * G .quad 0xf6c968731ae8cab4 .quad 0x5e20741ecb4f92c5 .quad 0x2da53be58ccdbc3e .quad 0x2dddfea269970df7 .quad 0xbeaece313db342a8 .quad 0xcba3635b842db7ee .quad 0xe88c6620817f13ef .quad 0x1b9438aa4e76d5c6 .quad 0x8a50777e166f031a .quad 0x067b39f10fb7a328 .quad 0x1925c9a6010fbd76 .quad 0x6df9b575cc740905 // 2^220 * 5 * G .quad 0x42c1192927f6bdcf .quad 0x8f91917a403d61ca .quad 0xdc1c5a668b9e1f61 .quad 0x1596047804ec0f8d .quad 0xecdfc35b48cade41 .quad 0x6a88471fb2328270 .quad 0x740a4a2440a01b6a .quad 0x471e5796003b5f29 .quad 0xda96bbb3aced37ac .quad 0x7a2423b5e9208cea .quad 0x24cc5c3038aebae2 .quad 0x50c356afdc5dae2f // 2^220 * 6 * G .quad 0x09dcbf4341c30318 .quad 0xeeba061183181dce .quad 0xc179c0cedc1e29a1 .quad 0x1dbf7b89073f35b0 .quad 0xcfed9cdf1b31b964 .quad 0xf486a9858ca51af3 .quad 0x14897265ea8c1f84 .quad 0x784a53dd932acc00 .quad 0x2d99f9df14fc4920 .quad 0x76ccb60cc4499fe5 .quad 0xa4132cbbe5cf0003 .quad 0x3f93d82354f000ea // 2^220 * 7 * G .quad 0x8183e7689e04ce85 .quad 0x678fb71e04465341 .quad 0xad92058f6688edac .quad 0x5da350d3532b099a .quad 0xeaac12d179e14978 .quad 0xff923ff3bbebff5e .quad 0x4af663e40663ce27 .quad 0x0fd381a811a5f5ff .quad 0xf256aceca436df54 .quad 0x108b6168ae69d6e8 .quad 0x20d986cb6b5d036c .quad 0x655957b9fee2af50 // 2^220 * 8 * G .quad 0xaea8b07fa902030f .quad 0xf88c766af463d143 .quad 0x15b083663c787a60 .quad 0x08eab1148267a4a8 .quad 0xbdc1409bd002d0ac .quad 0x66660245b5ccd9a6 .quad 0x82317dc4fade85ec .quad 0x02fe934b6ad7df0d .quad 0xef5cf100cfb7ea74 .quad 0x22897633a1cb42ac .quad 0xd4ce0c54cef285e2 .quad 0x30408c048a146a55 // 2^224 * 1 * G .quad 0x739d8845832fcedb .quad 0xfa38d6c9ae6bf863 .quad 0x32bc0dcab74ffef7 .quad 0x73937e8814bce45e .quad 0xbb2e00c9193b877f .quad 0xece3a890e0dc506b .quad 0xecf3b7c036de649f .quad 0x5f46040898de9e1a .quad 0xb9037116297bf48d .quad 0xa9d13b22d4f06834 .quad 0xe19715574696bdc6 .quad 0x2cf8a4e891d5e835 // 2^224 * 2 * G .quad 0x6d93fd8707110f67 .quad 0xdd4c09d37c38b549 .quad 0x7cb16a4cc2736a86 .quad 0x2049bd6e58252a09 .quad 0x2cb5487e17d06ba2 .quad 0x24d2381c3950196b .quad 0xd7659c8185978a30 .quad 0x7a6f7f2891d6a4f6 .quad 0x7d09fd8d6a9aef49 .quad 0xf0ee60be5b3db90b .quad 0x4c21b52c519ebfd4 .quad 0x6011aadfc545941d // 2^224 * 3 * G .quad 0x5f67926dcf95f83c .quad 0x7c7e856171289071 .quad 0xd6a1e7f3998f7a5b .quad 0x6fc5cc1b0b62f9e0 .quad 0x63ded0c802cbf890 .quad 0xfbd098ca0dff6aaa .quad 0x624d0afdb9b6ed99 .quad 0x69ce18b779340b1e .quad 0xd1ef5528b29879cb .quad 0xdd1aae3cd47e9092 .quad 0x127e0442189f2352 .quad 0x15596b3ae57101f1 // 2^224 * 4 * G .quad 0x462739d23f9179a2 .quad 0xff83123197d6ddcf .quad 0x1307deb553f2148a .quad 0x0d2237687b5f4dda .quad 0x09ff31167e5124ca .quad 0x0be4158bd9c745df .quad 0x292b7d227ef556e5 .quad 0x3aa4e241afb6d138 .quad 0x2cc138bf2a3305f5 .quad 0x48583f8fa2e926c3 .quad 0x083ab1a25549d2eb .quad 0x32fcaa6e4687a36c // 2^224 * 5 * G .quad 0x7bc56e8dc57d9af5 .quad 0x3e0bd2ed9df0bdf2 .quad 0xaac014de22efe4a3 .quad 0x4627e9cefebd6a5c .quad 0x3207a4732787ccdf .quad 0x17e31908f213e3f8 .quad 0xd5b2ecd7f60d964e .quad 0x746f6336c2600be9 .quad 0x3f4af345ab6c971c .quad 0xe288eb729943731f .quad 0x33596a8a0344186d .quad 0x7b4917007ed66293 // 2^224 * 6 * G .quad 0x2d85fb5cab84b064 .quad 0x497810d289f3bc14 .quad 0x476adc447b15ce0c .quad 0x122ba376f844fd7b .quad 0x54341b28dd53a2dd .quad 0xaa17905bdf42fc3f .quad 0x0ff592d94dd2f8f4 .quad 0x1d03620fe08cd37d .quad 0xc20232cda2b4e554 .quad 0x9ed0fd42115d187f .quad 0x2eabb4be7dd479d9 .quad 0x02c70bf52b68ec4c // 2^224 * 7 * G .quad 0xa287ec4b5d0b2fbb .quad 0x415c5790074882ca .quad 0xe044a61ec1d0815c .quad 0x26334f0a409ef5e0 .quad 0xace532bf458d72e1 .quad 0x5be768e07cb73cb5 .quad 0x56cf7d94ee8bbde7 .quad 0x6b0697e3feb43a03 .quad 0xb6c8f04adf62a3c0 .quad 0x3ef000ef076da45d .quad 0x9c9cb95849f0d2a9 .quad 0x1cc37f43441b2fae // 2^224 * 8 * G .quad 0x508f565a5cc7324f .quad 0xd061c4c0e506a922 .quad 0xfb18abdb5c45ac19 .quad 0x6c6809c10380314a .quad 0xd76656f1c9ceaeb9 .quad 0x1c5b15f818e5656a .quad 0x26e72832844c2334 .quad 0x3a346f772f196838 .quad 0xd2d55112e2da6ac8 .quad 0xe9bd0331b1e851ed .quad 0x960746dd8ec67262 .quad 0x05911b9f6ef7c5d0 // 2^228 * 1 * G .quad 0xe9dcd756b637ff2d .quad 0xec4c348fc987f0c4 .quad 0xced59285f3fbc7b7 .quad 0x3305354793e1ea87 .quad 0x01c18980c5fe9f94 .quad 0xcd656769716fd5c8 .quad 0x816045c3d195a086 .quad 0x6e2b7f3266cc7982 .quad 0xcc802468f7c3568f .quad 0x9de9ba8219974cb3 .quad 0xabb7229cb5b81360 .quad 0x44e2017a6fbeba62 // 2^228 * 2 * G .quad 0xc4c2a74354dab774 .quad 0x8e5d4c3c4eaf031a .quad 0xb76c23d242838f17 .quad 0x749a098f68dce4ea .quad 0x87f82cf3b6ca6ecd .quad 0x580f893e18f4a0c2 .quad 0x058930072604e557 .quad 0x6cab6ac256d19c1d .quad 0xdcdfe0a02cc1de60 .quad 0x032665ff51c5575b .quad 0x2c0c32f1073abeeb .quad 0x6a882014cd7b8606 // 2^228 * 3 * G .quad 0xa52a92fea4747fb5 .quad 0xdc12a4491fa5ab89 .quad 0xd82da94bb847a4ce .quad 0x4d77edce9512cc4e .quad 0xd111d17caf4feb6e .quad 0x050bba42b33aa4a3 .quad 0x17514c3ceeb46c30 .quad 0x54bedb8b1bc27d75 .quad 0x77c8e14577e2189c .quad 0xa3e46f6aff99c445 .quad 0x3144dfc86d335343 .quad 0x3a96559e7c4216a9 // 2^228 * 4 * G .quad 0x12550d37f42ad2ee .quad 0x8b78e00498a1fbf5 .quad 0x5d53078233894cb2 .quad 0x02c84e4e3e498d0c .quad 0x4493896880baaa52 .quad 0x4c98afc4f285940e .quad 0xef4aa79ba45448b6 .quad 0x5278c510a57aae7f .quad 0xa54dd074294c0b94 .quad 0xf55d46b8df18ffb6 .quad 0xf06fecc58dae8366 .quad 0x588657668190d165 // 2^228 * 5 * G .quad 0xd47712311aef7117 .quad 0x50343101229e92c7 .quad 0x7a95e1849d159b97 .quad 0x2449959b8b5d29c9 .quad 0xbf5834f03de25cc3 .quad 0xb887c8aed6815496 .quad 0x5105221a9481e892 .quad 0x6760ed19f7723f93 .quad 0x669ba3b7ac35e160 .quad 0x2eccf73fba842056 .quad 0x1aec1f17c0804f07 .quad 0x0d96bc031856f4e7 // 2^228 * 6 * G .quad 0x3318be7775c52d82 .quad 0x4cb764b554d0aab9 .quad 0xabcf3d27cc773d91 .quad 0x3bf4d1848123288a .quad 0xb1d534b0cc7505e1 .quad 0x32cd003416c35288 .quad 0xcb36a5800762c29d .quad 0x5bfe69b9237a0bf8 .quad 0x183eab7e78a151ab .quad 0xbbe990c999093763 .quad 0xff717d6e4ac7e335 .quad 0x4c5cddb325f39f88 // 2^228 * 7 * G .quad 0xc0f6b74d6190a6eb .quad 0x20ea81a42db8f4e4 .quad 0xa8bd6f7d97315760 .quad 0x33b1d60262ac7c21 .quad 0x57750967e7a9f902 .quad 0x2c37fdfc4f5b467e .quad 0xb261663a3177ba46 .quad 0x3a375e78dc2d532b .quad 0x8141e72f2d4dddea .quad 0xe6eafe9862c607c8 .quad 0x23c28458573cafd0 .quad 0x46b9476f4ff97346 // 2^228 * 8 * G .quad 0x0c1ffea44f901e5c .quad 0x2b0b6fb72184b782 .quad 0xe587ff910114db88 .quad 0x37130f364785a142 .quad 0x1215505c0d58359f .quad 0x2a2013c7fc28c46b .quad 0x24a0a1af89ea664e .quad 0x4400b638a1130e1f .quad 0x3a01b76496ed19c3 .quad 0x31e00ab0ed327230 .quad 0x520a885783ca15b1 .quad 0x06aab9875accbec7 // 2^232 * 1 * G .quad 0xc1339983f5df0ebb .quad 0xc0f3758f512c4cac .quad 0x2cf1130a0bb398e1 .quad 0x6b3cecf9aa270c62 .quad 0x5349acf3512eeaef .quad 0x20c141d31cc1cb49 .quad 0x24180c07a99a688d .quad 0x555ef9d1c64b2d17 .quad 0x36a770ba3b73bd08 .quad 0x624aef08a3afbf0c .quad 0x5737ff98b40946f2 .quad 0x675f4de13381749d // 2^232 * 2 * G .quad 0x0e2c52036b1782fc .quad 0x64816c816cad83b4 .quad 0xd0dcbdd96964073e .quad 0x13d99df70164c520 .quad 0xa12ff6d93bdab31d .quad 0x0725d80f9d652dfe .quad 0x019c4ff39abe9487 .quad 0x60f450b882cd3c43 .quad 0x014b5ec321e5c0ca .quad 0x4fcb69c9d719bfa2 .quad 0x4e5f1c18750023a0 .quad 0x1c06de9e55edac80 // 2^232 * 3 * G .quad 0x990f7ad6a33ec4e2 .quad 0x6608f938be2ee08e .quad 0x9ca143c563284515 .quad 0x4cf38a1fec2db60d .quad 0xffd52b40ff6d69aa .quad 0x34530b18dc4049bb .quad 0x5e4a5c2fa34d9897 .quad 0x78096f8e7d32ba2d .quad 0xa0aaaa650dfa5ce7 .quad 0xf9c49e2a48b5478c .quad 0x4f09cc7d7003725b .quad 0x373cad3a26091abe // 2^232 * 4 * G .quad 0xb294634d82c9f57c .quad 0x1fcbfde124934536 .quad 0x9e9c4db3418cdb5a .quad 0x0040f3d9454419fc .quad 0xf1bea8fb89ddbbad .quad 0x3bcb2cbc61aeaecb .quad 0x8f58a7bb1f9b8d9d .quad 0x21547eda5112a686 .quad 0xdefde939fd5986d3 .quad 0xf4272c89510a380c .quad 0xb72ba407bb3119b9 .quad 0x63550a334a254df4 // 2^232 * 5 * G .quad 0x6507d6edb569cf37 .quad 0x178429b00ca52ee1 .quad 0xea7c0090eb6bd65d .quad 0x3eea62c7daf78f51 .quad 0x9bba584572547b49 .quad 0xf305c6fae2c408e0 .quad 0x60e8fa69c734f18d .quad 0x39a92bafaa7d767a .quad 0x9d24c713e693274e .quad 0x5f63857768dbd375 .quad 0x70525560eb8ab39a .quad 0x68436a0665c9c4cd // 2^232 * 6 * G .quad 0xbc0235e8202f3f27 .quad 0xc75c00e264f975b0 .quad 0x91a4e9d5a38c2416 .quad 0x17b6e7f68ab789f9 .quad 0x1e56d317e820107c .quad 0xc5266844840ae965 .quad 0xc1e0a1c6320ffc7a .quad 0x5373669c91611472 .quad 0x5d2814ab9a0e5257 .quad 0x908f2084c9cab3fc .quad 0xafcaf5885b2d1eca .quad 0x1cb4b5a678f87d11 // 2^232 * 7 * G .quad 0xb664c06b394afc6c .quad 0x0c88de2498da5fb1 .quad 0x4f8d03164bcad834 .quad 0x330bca78de7434a2 .quad 0x6b74aa62a2a007e7 .quad 0xf311e0b0f071c7b1 .quad 0x5707e438000be223 .quad 0x2dc0fd2d82ef6eac .quad 0x982eff841119744e .quad 0xf9695e962b074724 .quad 0xc58ac14fbfc953fb .quad 0x3c31be1b369f1cf5 // 2^232 * 8 * G .quad 0xb0f4864d08948aee .quad 0x07dc19ee91ba1c6f .quad 0x7975cdaea6aca158 .quad 0x330b61134262d4bb .quad 0xc168bc93f9cb4272 .quad 0xaeb8711fc7cedb98 .quad 0x7f0e52aa34ac8d7a .quad 0x41cec1097e7d55bb .quad 0xf79619d7a26d808a .quad 0xbb1fd49e1d9e156d .quad 0x73d7c36cdba1df27 .quad 0x26b44cd91f28777d // 2^236 * 1 * G .quad 0x300a9035393aa6d8 .quad 0x2b501131a12bb1cd .quad 0x7b1ff677f093c222 .quad 0x4309c1f8cab82bad .quad 0xaf44842db0285f37 .quad 0x8753189047efc8df .quad 0x9574e091f820979a .quad 0x0e378d6069615579 .quad 0xd9fa917183075a55 .quad 0x4bdb5ad26b009fdc .quad 0x7829ad2cd63def0e .quad 0x078fc54975fd3877 // 2^236 * 2 * G .quad 0x87dfbd1428878f2d .quad 0x134636dd1e9421a1 .quad 0x4f17c951257341a3 .quad 0x5df98d4bad296cb8 .quad 0xe2004b5bb833a98a .quad 0x44775dec2d4c3330 .quad 0x3aa244067eace913 .quad 0x272630e3d58e00a9 .quad 0xf3678fd0ecc90b54 .quad 0xf001459b12043599 .quad 0x26725fbc3758b89b .quad 0x4325e4aa73a719ae // 2^236 * 3 * G .quad 0x657dc6ef433c3493 .quad 0x65375e9f80dbf8c3 .quad 0x47fd2d465b372dae .quad 0x4966ab79796e7947 .quad 0xed24629acf69f59d .quad 0x2a4a1ccedd5abbf4 .quad 0x3535ca1f56b2d67b .quad 0x5d8c68d043b1b42d .quad 0xee332d4de3b42b0a .quad 0xd84e5a2b16a4601c .quad 0x78243877078ba3e4 .quad 0x77ed1eb4184ee437 // 2^236 * 4 * G .quad 0xbfd4e13f201839a0 .quad 0xaeefffe23e3df161 .quad 0xb65b04f06b5d1fe3 .quad 0x52e085fb2b62fbc0 .quad 0x185d43f89e92ed1a .quad 0xb04a1eeafe4719c6 .quad 0x499fbe88a6f03f4f .quad 0x5d8b0d2f3c859bdd .quad 0x124079eaa54cf2ba .quad 0xd72465eb001b26e7 .quad 0x6843bcfdc97af7fd .quad 0x0524b42b55eacd02 // 2^236 * 5 * G .quad 0xfd0d5dbee45447b0 .quad 0x6cec351a092005ee .quad 0x99a47844567579cb .quad 0x59d242a216e7fa45 .quad 0xbc18dcad9b829eac .quad 0x23ae7d28b5f579d0 .quad 0xc346122a69384233 .quad 0x1a6110b2e7d4ac89 .quad 0x4f833f6ae66997ac .quad 0x6849762a361839a4 .quad 0x6985dec1970ab525 .quad 0x53045e89dcb1f546 // 2^236 * 6 * G .quad 0xcb8bb346d75353db .quad 0xfcfcb24bae511e22 .quad 0xcba48d40d50ae6ef .quad 0x26e3bae5f4f7cb5d .quad 0x84da3cde8d45fe12 .quad 0xbd42c218e444e2d2 .quad 0xa85196781f7e3598 .quad 0x7642c93f5616e2b2 .quad 0x2323daa74595f8e4 .quad 0xde688c8b857abeb4 .quad 0x3fc48e961c59326e .quad 0x0b2e73ca15c9b8ba // 2^236 * 7 * G .quad 0xd6bb4428c17f5026 .quad 0x9eb27223fb5a9ca7 .quad 0xe37ba5031919c644 .quad 0x21ce380db59a6602 .quad 0x0e3fbfaf79c03a55 .quad 0x3077af054cbb5acf .quad 0xd5c55245db3de39f .quad 0x015e68c1476a4af7 .quad 0xc1d5285220066a38 .quad 0x95603e523570aef3 .quad 0x832659a7226b8a4d .quad 0x5dd689091f8eedc9 // 2^236 * 8 * G .quad 0xcbac84debfd3c856 .quad 0x1624c348b35ff244 .quad 0xb7f88dca5d9cad07 .quad 0x3b0e574da2c2ebe8 .quad 0x1d022591a5313084 .quad 0xca2d4aaed6270872 .quad 0x86a12b852f0bfd20 .quad 0x56e6c439ad7da748 .quad 0xc704ff4942bdbae6 .quad 0x5e21ade2b2de1f79 .quad 0xe95db3f35652fad8 .quad 0x0822b5378f08ebc1 // 2^240 * 1 * G .quad 0x51f048478f387475 .quad 0xb25dbcf49cbecb3c .quad 0x9aab1244d99f2055 .quad 0x2c709e6c1c10a5d6 .quad 0xe1b7f29362730383 .quad 0x4b5279ffebca8a2c .quad 0xdafc778abfd41314 .quad 0x7deb10149c72610f .quad 0xcb62af6a8766ee7a .quad 0x66cbec045553cd0e .quad 0x588001380f0be4b5 .quad 0x08e68e9ff62ce2ea // 2^240 * 2 * G .quad 0x34ad500a4bc130ad .quad 0x8d38db493d0bd49c .quad 0xa25c3d98500a89be .quad 0x2f1f3f87eeba3b09 .quad 0x2f2d09d50ab8f2f9 .quad 0xacb9218dc55923df .quad 0x4a8f342673766cb9 .quad 0x4cb13bd738f719f5 .quad 0xf7848c75e515b64a .quad 0xa59501badb4a9038 .quad 0xc20d313f3f751b50 .quad 0x19a1e353c0ae2ee8 // 2^240 * 3 * G .quad 0x7d1c7560bafa05c3 .quad 0xb3e1a0a0c6e55e61 .quad 0xe3529718c0d66473 .quad 0x41546b11c20c3486 .quad 0xb42172cdd596bdbd .quad 0x93e0454398eefc40 .quad 0x9fb15347b44109b5 .quad 0x736bd3990266ae34 .quad 0x85532d509334b3b4 .quad 0x46fd114b60816573 .quad 0xcc5f5f30425c8375 .quad 0x412295a2b87fab5c // 2^240 * 4 * G .quad 0x19c99b88f57ed6e9 .quad 0x5393cb266df8c825 .quad 0x5cee3213b30ad273 .quad 0x14e153ebb52d2e34 .quad 0x2e655261e293eac6 .quad 0x845a92032133acdb .quad 0x460975cb7900996b .quad 0x0760bb8d195add80 .quad 0x413e1a17cde6818a .quad 0x57156da9ed69a084 .quad 0x2cbf268f46caccb1 .quad 0x6b34be9bc33ac5f2 // 2^240 * 5 * G .quad 0xf3df2f643a78c0b2 .quad 0x4c3e971ef22e027c .quad 0xec7d1c5e49c1b5a3 .quad 0x2012c18f0922dd2d .quad 0x11fc69656571f2d3 .quad 0xc6c9e845530e737a .quad 0xe33ae7a2d4fe5035 .quad 0x01b9c7b62e6dd30b .quad 0x880b55e55ac89d29 .quad 0x1483241f45a0a763 .quad 0x3d36efdfc2e76c1f .quad 0x08af5b784e4bade8 // 2^240 * 6 * G .quad 0x283499dc881f2533 .quad 0x9d0525da779323b6 .quad 0x897addfb673441f4 .quad 0x32b79d71163a168d .quad 0xe27314d289cc2c4b .quad 0x4be4bd11a287178d .quad 0x18d528d6fa3364ce .quad 0x6423c1d5afd9826e .quad 0xcc85f8d9edfcb36a .quad 0x22bcc28f3746e5f9 .quad 0xe49de338f9e5d3cd .quad 0x480a5efbc13e2dcc // 2^240 * 7 * G .quad 0x0b51e70b01622071 .quad 0x06b505cf8b1dafc5 .quad 0x2c6bb061ef5aabcd .quad 0x47aa27600cb7bf31 .quad 0xb6614ce442ce221f .quad 0x6e199dcc4c053928 .quad 0x663fb4a4dc1cbe03 .quad 0x24b31d47691c8e06 .quad 0x2a541eedc015f8c3 .quad 0x11a4fe7e7c693f7c .quad 0xf0af66134ea278d6 .quad 0x545b585d14dda094 // 2^240 * 8 * G .quad 0x67bf275ea0d43a0f .quad 0xade68e34089beebe .quad 0x4289134cd479e72e .quad 0x0f62f9c332ba5454 .quad 0x6204e4d0e3b321e1 .quad 0x3baa637a28ff1e95 .quad 0x0b0ccffd5b99bd9e .quad 0x4d22dc3e64c8d071 .quad 0xfcb46589d63b5f39 .quad 0x5cae6a3f57cbcf61 .quad 0xfebac2d2953afa05 .quad 0x1c0fa01a36371436 // 2^244 * 1 * G .quad 0xe7547449bc7cd692 .quad 0x0f9abeaae6f73ddf .quad 0x4af01ca700837e29 .quad 0x63ab1b5d3f1bc183 .quad 0xc11ee5e854c53fae .quad 0x6a0b06c12b4f3ff4 .quad 0x33540f80e0b67a72 .quad 0x15f18fc3cd07e3ef .quad 0x32750763b028f48c .quad 0x06020740556a065f .quad 0xd53bd812c3495b58 .quad 0x08706c9b865f508d // 2^244 * 2 * G .quad 0xf37ca2ab3d343dff .quad 0x1a8c6a2d80abc617 .quad 0x8e49e035d4ccffca .quad 0x48b46beebaa1d1b9 .quad 0xcc991b4138b41246 .quad 0x243b9c526f9ac26b .quad 0xb9ef494db7cbabbd .quad 0x5fba433dd082ed00 .quad 0x9c49e355c9941ad0 .quad 0xb9734ade74498f84 .quad 0x41c3fed066663e5c .quad 0x0ecfedf8e8e710b3 // 2^244 * 3 * G .quad 0x76430f9f9cd470d9 .quad 0xb62acc9ba42f6008 .quad 0x1898297c59adad5e .quad 0x7789dd2db78c5080 .quad 0x744f7463e9403762 .quad 0xf79a8dee8dfcc9c9 .quad 0x163a649655e4cde3 .quad 0x3b61788db284f435 .quad 0xb22228190d6ef6b2 .quad 0xa94a66b246ce4bfa .quad 0x46c1a77a4f0b6cc7 .quad 0x4236ccffeb7338cf // 2^244 * 4 * G .quad 0x8497404d0d55e274 .quad 0x6c6663d9c4ad2b53 .quad 0xec2fb0d9ada95734 .quad 0x2617e120cdb8f73c .quad 0x3bd82dbfda777df6 .quad 0x71b177cc0b98369e .quad 0x1d0e8463850c3699 .quad 0x5a71945b48e2d1f1 .quad 0x6f203dd5405b4b42 .quad 0x327ec60410b24509 .quad 0x9c347230ac2a8846 .quad 0x77de29fc11ffeb6a // 2^244 * 5 * G .quad 0xb0ac57c983b778a8 .quad 0x53cdcca9d7fe912c .quad 0x61c2b854ff1f59dc .quad 0x3a1a2cf0f0de7dac .quad 0x835e138fecced2ca .quad 0x8c9eaf13ea963b9a .quad 0xc95fbfc0b2160ea6 .quad 0x575e66f3ad877892 .quad 0x99803a27c88fcb3a .quad 0x345a6789275ec0b0 .quad 0x459789d0ff6c2be5 .quad 0x62f882651e70a8b2 // 2^244 * 6 * G .quad 0x085ae2c759ff1be4 .quad 0x149145c93b0e40b7 .quad 0xc467e7fa7ff27379 .quad 0x4eeecf0ad5c73a95 .quad 0x6d822986698a19e0 .quad 0xdc9821e174d78a71 .quad 0x41a85f31f6cb1f47 .quad 0x352721c2bcda9c51 .quad 0x48329952213fc985 .quad 0x1087cf0d368a1746 .quad 0x8e5261b166c15aa5 .quad 0x2d5b2d842ed24c21 // 2^244 * 7 * G .quad 0x02cfebd9ebd3ded1 .quad 0xd45b217739021974 .quad 0x7576f813fe30a1b7 .quad 0x5691b6f9a34ef6c2 .quad 0x5eb7d13d196ac533 .quad 0x377234ecdb80be2b .quad 0xe144cffc7cf5ae24 .quad 0x5226bcf9c441acec .quad 0x79ee6c7223e5b547 .quad 0x6f5f50768330d679 .quad 0xed73e1e96d8adce9 .quad 0x27c3da1e1d8ccc03 // 2^244 * 8 * G .quad 0x7eb9efb23fe24c74 .quad 0x3e50f49f1651be01 .quad 0x3ea732dc21858dea .quad 0x17377bd75bb810f9 .quad 0x28302e71630ef9f6 .quad 0xc2d4a2032b64cee0 .quad 0x090820304b6292be .quad 0x5fca747aa82adf18 .quad 0x232a03c35c258ea5 .quad 0x86f23a2c6bcb0cf1 .quad 0x3dad8d0d2e442166 .quad 0x04a8933cab76862b // 2^248 * 1 * G .quad 0xd2c604b622943dff .quad 0xbc8cbece44cfb3a0 .quad 0x5d254ff397808678 .quad 0x0fa3614f3b1ca6bf .quad 0x69082b0e8c936a50 .quad 0xf9c9a035c1dac5b6 .quad 0x6fb73e54c4dfb634 .quad 0x4005419b1d2bc140 .quad 0xa003febdb9be82f0 .quad 0x2089c1af3a44ac90 .quad 0xf8499f911954fa8e .quad 0x1fba218aef40ab42 // 2^248 * 2 * G .quad 0xab549448fac8f53e .quad 0x81f6e89a7ba63741 .quad 0x74fd6c7d6c2b5e01 .quad 0x392e3acaa8c86e42 .quad 0x4f3e57043e7b0194 .quad 0xa81d3eee08daaf7f .quad 0xc839c6ab99dcdef1 .quad 0x6c535d13ff7761d5 .quad 0x4cbd34e93e8a35af .quad 0x2e0781445887e816 .quad 0x19319c76f29ab0ab .quad 0x25e17fe4d50ac13b // 2^248 * 3 * G .quad 0x0a289bd71e04f676 .quad 0x208e1c52d6420f95 .quad 0x5186d8b034691fab .quad 0x255751442a9fb351 .quad 0x915f7ff576f121a7 .quad 0xc34a32272fcd87e3 .quad 0xccba2fde4d1be526 .quad 0x6bba828f8969899b .quad 0xe2d1bc6690fe3901 .quad 0x4cb54a18a0997ad5 .quad 0x971d6914af8460d4 .quad 0x559d504f7f6b7be4 // 2^248 * 4 * G .quad 0xa7738378b3eb54d5 .quad 0x1d69d366a5553c7c .quad 0x0a26cf62f92800ba .quad 0x01ab12d5807e3217 .quad 0x9c4891e7f6d266fd .quad 0x0744a19b0307781b .quad 0x88388f1d6061e23b .quad 0x123ea6a3354bd50e .quad 0x118d189041e32d96 .quad 0xb9ede3c2d8315848 .quad 0x1eab4271d83245d9 .quad 0x4a3961e2c918a154 // 2^248 * 5 * G .quad 0x71dc3be0f8e6bba0 .quad 0xd6cef8347effe30a .quad 0xa992425fe13a476a .quad 0x2cd6bce3fb1db763 .quad 0x0327d644f3233f1e .quad 0x499a260e34fcf016 .quad 0x83b5a716f2dab979 .quad 0x68aceead9bd4111f .quad 0x38b4c90ef3d7c210 .quad 0x308e6e24b7ad040c .quad 0x3860d9f1b7e73e23 .quad 0x595760d5b508f597 // 2^248 * 6 * G .quad 0x6129bfe104aa6397 .quad 0x8f960008a4a7fccb .quad 0x3f8bc0897d909458 .quad 0x709fa43edcb291a9 .quad 0x882acbebfd022790 .quad 0x89af3305c4115760 .quad 0x65f492e37d3473f4 .quad 0x2cb2c5df54515a2b .quad 0xeb0a5d8c63fd2aca .quad 0xd22bc1662e694eff .quad 0x2723f36ef8cbb03a .quad 0x70f029ecf0c8131f // 2^248 * 7 * G .quad 0x461307b32eed3e33 .quad 0xae042f33a45581e7 .quad 0xc94449d3195f0366 .quad 0x0b7d5d8a6c314858 .quad 0x2a6aafaa5e10b0b9 .quad 0x78f0a370ef041aa9 .quad 0x773efb77aa3ad61f .quad 0x44eca5a2a74bd9e1 .quad 0x25d448327b95d543 .quad 0x70d38300a3340f1d .quad 0xde1c531c60e1c52b .quad 0x272224512c7de9e4 // 2^248 * 8 * G .quad 0x1abc92af49c5342e .quad 0xffeed811b2e6fad0 .quad 0xefa28c8dfcc84e29 .quad 0x11b5df18a44cc543 .quad 0xbf7bbb8a42a975fc .quad 0x8c5c397796ada358 .quad 0xe27fc76fcdedaa48 .quad 0x19735fd7f6bc20a6 .quad 0xe3ab90d042c84266 .quad 0xeb848e0f7f19547e .quad 0x2503a1d065a497b9 .quad 0x0fef911191df895f #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
2,774
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_mod_n384_6.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo group order, z := x mod n_384 // Input x[6]; output z[6] // // extern void bignum_mod_n384_6 // (uint64_t z[static 6], uint64_t x[static 6]); // // Reduction is modulo the group order of the NIST curve P-384. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n384_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n384_6) .text #define z %rdi #define x %rsi #define d0 %rdx #define d1 %rcx #define d2 %r8 #define d3 %r9 #define d4 %r10 #define d5 %r11 #define a %rax // Re-use the input pointer as a temporary once we're done #define c %rsi S2N_BN_SYMBOL(bignum_mod_n384_6): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // Load the input and compute x + (2^384 - n_384) movq $0x1313e695333ad68d, a movq (x), d0 addq a, d0 movq $0xa7e5f24db74f5885, d1 adcq 8(x), d1 movq $0x389cb27e0bc8d220, d2 adcq 16(x), d2 movq 24(x), d3 adcq $0, d3 movq 32(x), d4 adcq $0, d4 movq 40(x), d5 adcq $0, d5 // Now CF is set iff 2^384 <= x + (2^384 - n_384), i.e. iff n_384 <= x. // Create a mask for the condition x < n. We now want to subtract the // masked (2^384 - n_384), but because we're running out of registers // without using a save-restore sequence, we need some contortions. // Create the lowest digit (re-using a kept from above) sbbq c, c notq c andq c, a // Do the first digit of addition and writeback subq a, d0 movq d0, (z) // Preserve carry chain and do the next digit sbbq d0, d0 movq $0xa7e5f24db74f5885, a andq c, a negq d0 sbbq a, d1 movq d1, 8(z) // Preserve carry chain once more and do remaining digits sbbq d0, d0 movq $0x389cb27e0bc8d220, a andq c, a negq d0 sbbq a, d2 movq d2, 16(z) sbbq $0, d3 movq d3, 24(z) sbbq $0, d4 movq d4, 32(z) sbbq $0, d5 movq d5, 40(z) #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
2,875
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_sub_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Subtract modulo p_384, z := (x - y) mod p_384 // Inputs x[6], y[6]; output z[6] // // extern void bignum_sub_p384 // (uint64_t z[static 6], uint64_t x[static 6], uint64_t y[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_sub_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_sub_p384) .text #define z %rdi #define x %rsi #define y %rdx #define d0 %rax #define d1 %rcx #define d2 %r8 #define d3 %r9 #define d4 %r10 #define d5 %r11 // Re-use the input pointers as temporaries once we're done #define a %rsi #define c %rdx #define ashort %esi S2N_BN_SYMBOL(bignum_sub_p384): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Subtract the inputs as [d5;d4;d3;d2;d1;d0] = x - y (modulo 2^384) // Capture the top carry as a bitmask for the condition x < y movq (x), d0 subq (y), d0 movq 8(x), d1 sbbq 8(y), d1 movq 16(x), d2 sbbq 16(y), d2 movq 24(x), d3 sbbq 24(y), d3 movq 32(x), d4 sbbq 32(y), d4 movq 40(x), d5 sbbq 40(y), d5 sbbq c, c // Use mask to make r' = mask * (2^384 - p_384) for a compensating subtraction // of r_384 = 2^384 - p_384, equivalent to an addition of p_384. // We don't quite have enough ABI-modifiable registers to create all three // nonzero digits of r while maintaining d0..d5, but make the first two now. movl $0x00000000ffffffff, ashort andq a, c // c = masked 0x00000000ffffffff xorq a, a subq c, a // a = masked 0xffffffff00000001 // Do the first two digits of addition and writeback subq a, d0 movq d0, (z) sbbq c, d1 movq d1, 8(z) // Preserve the carry chain while creating the extra masked digit since // the logical operation will clear CF sbbq d0, d0 andq a, c // c = masked 0x0000000000000001 negq d0 // Do the rest of the addition and writeback sbbq c, d2 movq d2, 16(z) sbbq $0, d3 movq d3, 24(z) sbbq $0, d4 movq d4, 32(z) sbbq $0, d5 movq d5, 40(z) #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
5,043
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_mod_n384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo group order, z := x mod n_384 // Input x[k]; output z[6] // // extern void bignum_mod_n384 // (uint64_t z[static 6], uint64_t k, uint64_t *x); // // Reduction is modulo the group order of the NIST curve P-384. // // Standard x86-64 ABI: RDI = z, RSI = k, RDX = x // Microsoft x64 ABI: RCX = z, RDX = k, R8 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_n384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_n384) .text #define z %rdi #define k %rsi #define x %rcx #define m0 %r8 #define m1 %r9 #define m2 %r10 #define m3 %r11 #define m4 %r12 #define m5 %r13 #define d %r14 #define n0 %rax #define n1 %rbx #define n2 %rdx #define q %rdx #define n0short %eax #define qshort %edx S2N_BN_SYMBOL(bignum_mod_n384): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save extra registers pushq %rbx pushq %r12 pushq %r13 pushq %r14 // If the input is already <= 5 words long, go to a trivial "copy" path cmpq $6, k jc shortinput // Otherwise load the top 6 digits (top-down) and reduce k by 6 subq $6, k movq 40(%rdx,k,8), m5 movq 32(%rdx,k,8), m4 movq 24(%rdx,k,8), m3 movq 16(%rdx,k,8), m2 movq 8(%rdx,k,8), m1 movq (%rdx,k,8), m0 // Move x into another register to leave %rdx free for multiplies and use of n2 movq %rdx, x // Reduce the top 6 digits mod n_384 (a conditional subtraction of n_384) movq $0x1313e695333ad68d, n0 movq $0xa7e5f24db74f5885, n1 movq $0x389cb27e0bc8d220, n2 addq n0, m0 adcq n1, m1 adcq n2, m2 adcq $0, m3 adcq $0, m4 adcq $0, m5 sbbq d, d notq d andq d, n0 andq d, n1 andq d, n2 subq n0, m0 sbbq n1, m1 sbbq n2, m2 sbbq $0, m3 sbbq $0, m4 sbbq $0, m5 // Now do (k-6) iterations of 7->6 word modular reduction testq k, k jz writeback loop: // Compute q = min (m5 + 1) (2^64 - 1) movl $1, qshort addq m5, q sbbq d, d orq d, q // Load the next digit so current m to reduce = [m5;m4;m3;m2;m1;m0;d] movq -8(x,k,8), d // Now form [m5;m4;m3;m2;m1;m0;d] = m - q * n_384 subq q, m5 xorq n0, n0 movq $0x1313e695333ad68d, n0 mulxq n0, n0, n1 adcxq n0, d adoxq n1, m0 movq $0xa7e5f24db74f5885, n0 mulxq n0, n0, n1 adcxq n0, m0 adoxq n1, m1 movq $0x389cb27e0bc8d220, n0 mulxq n0, n0, n1 adcxq n0, m1 movl $0, n0short adoxq n0, n1 adcxq n1, m2 adcq $0, m3 adcq $0, m4 adcq $0, m5 // Now our top word m5 is either zero or all 1s. Use it for a masked // addition of n_384, which we can do by a *subtraction* of // 2^384 - n_384 from our portion movq $0x1313e695333ad68d, n0 andq m5, n0 movq $0xa7e5f24db74f5885, n1 andq m5, n1 movq $0x389cb27e0bc8d220, n2 andq m5, n2 subq n0, d sbbq n1, m0 sbbq n2, m1 sbbq $0, m2 sbbq $0, m3 sbbq $0, m4 // Now shuffle registers up and loop movq m4, m5 movq m3, m4 movq m2, m3 movq m1, m2 movq m0, m1 movq d, m0 decq k jnz loop // Write back writeback: movq m0, (z) movq m1, 8(z) movq m2, 16(z) movq m3, 24(z) movq m4, 32(z) movq m5, 40(z) // Restore registers and return popq %r14 popq %r13 popq %r12 popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret shortinput: xorq m0, m0 xorq m1, m1 xorq m2, m2 xorq m3, m3 xorq m4, m4 xorq m5, m5 testq k, k jz writeback movq (%rdx), m0 decq k jz writeback movq 8(%rdx), m1 decq k jz writeback movq 16(%rdx), m2 decq k jz writeback movq 24(%rdx), m3 decq k jz writeback movq 32(%rdx), m4 jmp writeback #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
5,156
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_mod_p384_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo field characteristic, z := x mod p_384 // Input x[k]; output z[6] // // extern void bignum_mod_p384_alt // (uint64_t z[static 6], uint64_t k, uint64_t *x); // // Standard x86-64 ABI: RDI = z, RSI = k, RDX = x // Microsoft x64 ABI: RCX = z, RDX = k, R8 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p384_alt) .text #define z %rdi #define k %rsi #define x %rcx #define m0 %r8 #define m1 %r9 #define m2 %r10 #define m3 %r11 #define m4 %r12 #define m5 %r13 #define d %r14 #define n0 %rax #define n1 %rbx #define n2 %rdx // Both alias n1 #define q %rbx #define c %rbx #define n0short %eax #define n1short %ebx #define qshort %ebx S2N_BN_SYMBOL(bignum_mod_p384_alt): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save extra registers pushq %rbx pushq %r12 pushq %r13 pushq %r14 // If the input is already <= 5 words long, go to a trivial "copy" path cmpq $6, k jc shortinput // Otherwise load the top 6 digits (top-down) and reduce k by 6 subq $6, k movq 40(%rdx,k,8), m5 movq 32(%rdx,k,8), m4 movq 24(%rdx,k,8), m3 movq 16(%rdx,k,8), m2 movq 8(%rdx,k,8), m1 movq (%rdx,k,8), m0 // Move x into another register to leave %rdx free for multiplies and use of n2 movq %rdx, x // Reduce the top 6 digits mod p_384 (a conditional subtraction of p_384) movl $0x00000000ffffffff, n0short movq $0xffffffff00000000, n1 movq $0xfffffffffffffffe, n2 subq n0, m0 sbbq n1, m1 sbbq n2, m2 sbbq $-1, m3 sbbq $-1, m4 sbbq $-1, m5 sbbq d, d andq d, n0 andq d, n1 andq d, n2 addq n0, m0 adcq n1, m1 adcq n2, m2 adcq d, m3 adcq d, m4 adcq d, m5 // Now do (k-6) iterations of 7->6 word modular reduction testq k, k jz writeback loop: // Compute q = min (m5 + 1) (2^64 - 1) movl $1, qshort addq m5, q sbbq d, d orq d, q // Load the next digit so current m to reduce = [m5;m4;m3;m2;m1;m0;d] movq -8(x,k,8), d // Now form [m5;m4;m3;m2;m1;m0;d] = m - q * p_384. To use an addition for // the main calculation we do (m - 2^384 * q) + q * (2^384 - p_384) // where 2^384 - p_384 = [0;0;0;1;0x00000000ffffffff;0xffffffff00000001]. // The extra subtraction of 2^384 * q is the first instruction. subq q, m5 movq $0xffffffff00000001, %rax mulq q addq %rax, d adcq %rdx, m0 adcq q, m1 movq q, %rax sbbq c, c movl $0x00000000ffffffff, %edx negq c mulq %rdx addq %rax, m0 adcq %rdx, m1 adcq c, m2 adcq $0, m3 adcq $0, m4 adcq $0, m5 // Now our top word m5 is either zero or all 1s. Use it for a masked // addition of p_384, which we can do by a *subtraction* of // 2^384 - p_384 from our portion movq $0xffffffff00000001, n0 andq m5, n0 movl $0x00000000ffffffff, n1short andq m5, n1 andq $1, m5 subq n0, d sbbq n1, m0 sbbq m5, m1 sbbq $0, m2 sbbq $0, m3 sbbq $0, m4 // Now shuffle registers up and loop movq m4, m5 movq m3, m4 movq m2, m3 movq m1, m2 movq m0, m1 movq d, m0 decq k jnz loop // Write back writeback: movq m0, (z) movq m1, 8(z) movq m2, 16(z) movq m3, 24(z) movq m4, 32(z) movq m5, 40(z) // Restore registers and return popq %r14 popq %r13 popq %r12 popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret shortinput: xorq m0, m0 xorq m1, m1 xorq m2, m2 xorq m3, m3 xorq m4, m4 xorq m5, m5 testq k, k jz writeback movq (%rdx), m0 decq k jz writeback movq 8(%rdx), m1 decq k jz writeback movq 16(%rdx), m2 decq k jz writeback movq 24(%rdx), m3 decq k jz writeback movq 32(%rdx), m4 jmp writeback #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
9,402
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_montmul_p384_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^384) mod p_384 // Inputs x[6], y[6]; output z[6] // // extern void bignum_montmul_p384_alt // (uint64_t z[static 6], uint64_t x[static 6], uint64_t y[static 6]); // // Does z := (2^{-384} * x * y) mod p_384, assuming that the inputs x and y // satisfy x * y <= 2^384 * p_384 (in particular this is true if we are in // the "usual" case x < p_384 and y < p_384). // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y // ----------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p384_alt) .text #define z %rdi #define x %rsi // We move the y argument here so we can use %rdx for multipliers #define y %rcx // Some temp registers for the last correction stage #define d %rax #define u %rdx #define v %rcx #define w %rbx // Add %rbx * m into a register-pair (high,low) maintaining consistent // carry-catching with carry (negated, as bitmask) and using %rax and %rdx // as temporaries #define mulpadd(carry,high,low,m) \ movq m, %rax ; \ mulq %rbx; \ subq carry, %rdx ; \ addq %rax, low ; \ adcq %rdx, high ; \ sbbq carry, carry // Initial version assuming no carry-in #define mulpadi(carry,high,low,m) \ movq m, %rax ; \ mulq %rbx; \ addq %rax, low ; \ adcq %rdx, high ; \ sbbq carry, carry // End version not catching the top carry-out #define mulpade(carry,high,low,m) \ movq m, %rax ; \ mulq %rbx; \ subq carry, %rdx ; \ addq %rax, low ; \ adcq %rdx, high // Core one-step Montgomery reduction macro. Takes input in // [d7;d6;d5;d4;d3;d2;d1;d0] and returns result in [d7;d6;d5;d4;d3;d2;d1], // adding to the existing contents, re-using d0 as a temporary internally // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // // montredc(d7,d6,d5,d4,d3,d2,d1,d0) // // This particular variant, with its mix of addition and subtraction // at the top, is not intended to maintain a coherent carry or borrow out. // It is assumed the final result would fit in [d7;d6;d5;d4;d3;d2;d1]. // which is always the case here as the top word is even always in {0,1} #define montredc(d7,d6,d5,d4,d3,d2,d1,d0) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ movq d0, %rbx ; \ shlq $32, %rbx ; \ addq d0, %rbx ; \ /* Construct [%rbp;%rdx;%rax;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel so we can re-use d0 as a temp */ \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, d0 ; \ movq $0x00000000ffffffff, %rax ; \ mulq %rbx; \ addq d0, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ /* Now subtract that and add 2^384 * w */ \ subq %rax, d1 ; \ sbbq %rdx, d2 ; \ sbbq %rbp, d3 ; \ sbbq $0, d4 ; \ sbbq $0, d5 ; \ sbbq $0, %rbx ; \ addq %rbx, d6 ; \ adcq $0, d7 S2N_BN_SYMBOL(bignum_montmul_p384_alt): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save more registers to play with pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 // Copy y into a safe register to start with movq %rdx, y // Do row 0 computation, which is a bit different: // set up initial window [%r14,%r13,%r12,%r11,%r10,%r9,%r8] = y[0] * x // Unlike later, we only need a single carry chain movq (y), %rbx movq (x), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 8(x), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 16(x), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 24(x), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 32(x), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 40(x), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d // Montgomery reduce the zeroth window montredc(%r15, %r14,%r13,%r12,%r11,%r10,%r9,%r8) // Add row 1 movq 8(y), %rbx mulpadi(%r8,%r10,%r9,(x)) mulpadd(%r8,%r11,%r10,8(x)) mulpadd(%r8,%r12,%r11,16(x)) mulpadd(%r8,%r13,%r12,24(x)) mulpadd(%r8,%r14,%r13,32(x)) mulpadd(%r8,%r15,%r14,40(x)) negq %r8 // Montgomery reduce window 1 montredc(%r8, %r15,%r14,%r13,%r12,%r11,%r10,%r9) // Add row 2 movq 16(y), %rbx mulpadi(%r9,%r11,%r10,(x)) mulpadd(%r9,%r12,%r11,8(x)) mulpadd(%r9,%r13,%r12,16(x)) mulpadd(%r9,%r14,%r13,24(x)) mulpadd(%r9,%r15,%r14,32(x)) mulpadd(%r9,%r8,%r15,40(x)) negq %r9 // Montgomery reduce window 2 montredc(%r9, %r8,%r15,%r14,%r13,%r12,%r11,%r10) // Add row 3 movq 24(y), %rbx mulpadi(%r10,%r12,%r11,(x)) mulpadd(%r10,%r13,%r12,8(x)) mulpadd(%r10,%r14,%r13,16(x)) mulpadd(%r10,%r15,%r14,24(x)) mulpadd(%r10,%r8,%r15,32(x)) mulpadd(%r10,%r9,%r8,40(x)) negq %r10 // Montgomery reduce window 3 montredc(%r10, %r9,%r8,%r15,%r14,%r13,%r12,%r11) // Add row 4 movq 32(y), %rbx mulpadi(%r11,%r13,%r12,(x)) mulpadd(%r11,%r14,%r13,8(x)) mulpadd(%r11,%r15,%r14,16(x)) mulpadd(%r11,%r8,%r15,24(x)) mulpadd(%r11,%r9,%r8,32(x)) mulpadd(%r11,%r10,%r9,40(x)) negq %r11 // Montgomery reduce window 4 montredc(%r11, %r10,%r9,%r8,%r15,%r14,%r13,%r12) // Add row 5 movq 40(y), %rbx mulpadi(%r12,%r14,%r13,(x)) mulpadd(%r12,%r15,%r14,8(x)) mulpadd(%r12,%r8,%r15,16(x)) mulpadd(%r12,%r9,%r8,24(x)) mulpadd(%r12,%r10,%r9,32(x)) mulpadd(%r12,%r11,%r10,40(x)) negq %r12 // Montgomery reduce window 5 montredc(%r12, %r11,%r10,%r9,%r8,%r15,%r14,%r13) // We now have a pre-reduced 7-word form z = [%r12; %r11;%r10;%r9;%r8;%r15;%r14] // Next, accumulate in different registers z - p_384, or more precisely // // [%r12; %r13;%rbp;%rdx;%rcx;%rbx;%rax] = z + (2^384 - p_384) xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movq $0xffffffff00000001, %rax addq %r14, %rax movl $0x00000000ffffffff, %ebx adcq %r15, %rbx movl $0x0000000000000001, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0, %r12 // ~ZF <=> %r12 >= 1 <=> z + (2^384 - p_384) >= 2^384 <=> z >= p_384, which // determines whether to use the further reduced argument or the original z. cmovnzq %rax, %r14 cmovnzq %rbx, %r15 cmovnzq %rcx, %r8 cmovnzq %rdx, %r9 cmovnzq %rbp, %r10 cmovnzq %r13, %r11 // Write back the result movq %r14, (z) movq %r15, 8(z) movq %r8, 16(z) movq %r9, 24(z) movq %r10, 32(z) movq %r11, 40(z) // Restore registers and return popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
96,628
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_montinv_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery inverse modulo p_384 = 2^384 - 2^128 - 2^96 + 2^32 - 1 // Input x[6]; output z[6] // // extern void bignum_montinv_p384(uint64_t z[static 6],uint64_t x[static 6]); // // If the 6-digit input x is coprime to p_384, i.e. is not divisible // by it, returns z < p_384 such that x * z == 2^768 (mod p_384). This // is effectively "Montgomery inverse" because if we consider x and z as // Montgomery forms of X and Z, i.e. x == 2^384 * X and z == 2^384 * Z // (both mod p_384) then X * Z == 1 (mod p_384). That is, this function // gives the analog of the modular inverse bignum_inv_p384 but with both // input and output in the Montgomery domain. Note that x does not need // to be reduced modulo p_384, but the output always is. If the input // is divisible (i.e. is 0 or p_384), then there can be no solution to // the congruence x * z == 2^768 (mod p_384), and z = 0 is returned. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montinv_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montinv_p384) .text // Size in bytes of a 64-bit word #define N 8 // Pointer-offset pairs for temporaries on stack // The u and v variables are 6 words each as expected, but the f and g // variables are 8 words each -- they need to have at least one extra // word for a sign word, and to preserve alignment we "round up" to 8. // In fact, we currently keep an extra word in u and v as well. #define f 0(%rsp) #define g (8*N)(%rsp) #define u (16*N)(%rsp) #define v (24*N)(%rsp) #define tmp (32*N)(%rsp) #define tmp2 (33*N)(%rsp) #define i (34*N)(%rsp) #define d (35*N)(%rsp) #define mat (36*N)(%rsp) // Backup for the input pointer #define res (40*N)(%rsp) // Total size to reserve on the stack #define NSPACE (42*N) // Syntactic variants to make x86_att version simpler to generate #define F 0 #define G (8*N) #define U (16*N) #define V (24*N) #define MAT (36*N) #define ff (%rsp) #define gg (8*N)(%rsp) // --------------------------------------------------------------------------- // Core signed almost-Montgomery reduction macro from P[6..0] to P[5..0]. // --------------------------------------------------------------------------- #define amontred(P) \ /* We only know the input is -2^444 < x < 2^444. To do traditional */ \ /* unsigned Montgomery reduction, start by adding 2^61 * p_384. */ \ movq $0xe000000000000000, %r8 ; \ xorl %eax, %eax ; \ addq P, %r8 ; \ movq $0x000000001fffffff, %r9 ; \ leaq -1(%rax), %rax ; \ adcq N+P, %r9 ; \ movq $0xdfffffffe0000000, %r10 ; \ adcq 2*N+P, %r10 ; \ movq 3*N+P, %r11 ; \ adcq %rax, %r11 ; \ movq 4*N+P, %r12 ; \ adcq %rax, %r12 ; \ movq 5*N+P, %r13 ; \ adcq %rax, %r13 ; \ movq $0x1fffffffffffffff, %r14 ; \ adcq 6*N+P, %r14 ; \ /* Correction multiplier is %rbx = w = [d0 + (d0<<32)] mod 2^64 */ \ movq %r8, %rbx ; \ shlq $32, %rbx ; \ addq %r8, %rbx ; \ /* Construct [%rbp;%rdx;%rax;-] = (2^384 - p_384) * w */ \ /* We know lowest word will cancel so can re-use %r8 as a temp */ \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r8 ; \ movq $0x00000000ffffffff, %rax ; \ mulq %rbx; \ addq %r8, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ /* Now subtract that and add 2^384 * w, catching carry in %rax */ \ subq %rax, %r9 ; \ sbbq %rdx, %r10 ; \ sbbq %rbp, %r11 ; \ sbbq $0, %r12 ; \ sbbq $0, %r13 ; \ sbbq $0, %r14 ; \ sbbq %rax, %rax ; \ addq %rbx, %r14 ; \ adcq $0, %rax ; \ /* Now if top is nonzero we subtract p_384 (almost-Montgomery) */ \ negq %rax; \ movq $0x00000000ffffffff, %rbx ; \ andq %rax, %rbx ; \ movq $0xffffffff00000000, %rcx ; \ andq %rax, %rcx ; \ movq $0xfffffffffffffffe, %rdx ; \ andq %rax, %rdx ; \ subq %rbx, %r9 ; \ movq %r9, P ; \ sbbq %rcx, %r10 ; \ movq %r10, N+P ; \ sbbq %rdx, %r11 ; \ movq %r11, 2*N+P ; \ sbbq %rax, %r12 ; \ movq %r12, 3*N+P ; \ sbbq %rax, %r13 ; \ movq %r13, 4*N+P ; \ sbbq %rax, %r14 ; \ movq %r14, 5*N+P // Very similar to a subroutine call to the s2n-bignum word_divstep59. // But different in register usage and returning the final matrix as // // [ %r8 %r10] // [ %r12 %r14] // // and also returning the matrix still negated (which doesn't matter) #define divstep59(din,fin,gin) \ movq din, %rsi ; \ movq fin, %rdx ; \ movq gin, %rcx ; \ movq %rdx, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq $0xfffffffffffffffe, %rax ; \ xorl %ebp, %ebp ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %rdx ; \ leaq (%rcx,%rax), %rdi ; \ shlq $0x16, %rdx ; \ shlq $0x16, %rdi ; \ sarq $0x2b, %rdx ; \ sarq $0x2b, %rdi ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %rbx ; \ leaq (%rcx,%rax), %rcx ; \ sarq $0x2a, %rbx ; \ sarq $0x2a, %rcx ; \ movq %rdx, MAT(%rsp) ; \ movq %rbx, MAT+0x8(%rsp) ; \ movq %rdi, MAT+0x10(%rsp) ; \ movq %rcx, MAT+0x18(%rsp) ; \ movq fin, %r12 ; \ imulq %r12, %rdi ; \ imulq %rdx, %r12 ; \ movq gin, %r13 ; \ imulq %r13, %rbx ; \ imulq %rcx, %r13 ; \ addq %rbx, %r12 ; \ addq %rdi, %r13 ; \ sarq $0x14, %r12 ; \ sarq $0x14, %r13 ; \ movq %r12, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ movq %r13, %rcx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq $0xfffffffffffffffe, %rax ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %r8 ; \ leaq (%rcx,%rax), %r10 ; \ shlq $0x16, %r8 ; \ shlq $0x16, %r10 ; \ sarq $0x2b, %r8 ; \ sarq $0x2b, %r10 ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %r15 ; \ leaq (%rcx,%rax), %r11 ; \ sarq $0x2a, %r15 ; \ sarq $0x2a, %r11 ; \ movq %r13, %rbx ; \ movq %r12, %rcx ; \ imulq %r8, %r12 ; \ imulq %r15, %rbx ; \ addq %rbx, %r12 ; \ imulq %r11, %r13 ; \ imulq %r10, %rcx ; \ addq %rcx, %r13 ; \ sarq $0x14, %r12 ; \ sarq $0x14, %r13 ; \ movq %r12, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ movq %r13, %rcx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq MAT(%rsp), %rax ; \ imulq %r8, %rax ; \ movq MAT+0x10(%rsp), %rdx ; \ imulq %r15, %rdx ; \ imulq MAT+0x8(%rsp), %r8 ; \ imulq MAT+0x18(%rsp), %r15 ; \ addq %r8, %r15 ; \ leaq (%rax,%rdx), %r9 ; \ movq MAT(%rsp), %rax ; \ imulq %r10, %rax ; \ movq MAT+0x10(%rsp), %rdx ; \ imulq %r11, %rdx ; \ imulq MAT+0x8(%rsp), %r10 ; \ imulq MAT+0x18(%rsp), %r11 ; \ addq %r10, %r11 ; \ leaq (%rax,%rdx), %r13 ; \ movq $0xfffffffffffffffe, %rax ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %r8 ; \ leaq (%rcx,%rax), %r12 ; \ shlq $0x15, %r8 ; \ shlq $0x15, %r12 ; \ sarq $0x2b, %r8 ; \ sarq $0x2b, %r12 ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %r10 ; \ leaq (%rcx,%rax), %r14 ; \ sarq $0x2b, %r10 ; \ sarq $0x2b, %r14 ; \ movq %r9, %rax ; \ imulq %r8, %rax ; \ movq %r13, %rdx ; \ imulq %r10, %rdx ; \ imulq %r15, %r8 ; \ imulq %r11, %r10 ; \ addq %r8, %r10 ; \ leaq (%rax,%rdx), %r8 ; \ movq %r9, %rax ; \ imulq %r12, %rax ; \ movq %r13, %rdx ; \ imulq %r14, %rdx ; \ imulq %r15, %r12 ; \ imulq %r11, %r14 ; \ addq %r12, %r14 ; \ leaq (%rax,%rdx), %r12 S2N_BN_SYMBOL(bignum_montinv_p384): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // Save registers and make room for temporaries pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp // Save the return pointer for the end so we can overwrite %rdi later movq %rdi, res // Copy the constant p_384 into f including the 7th zero digit movl $0xffffffff, %eax movq %rax, F(%rsp) movq %rax, %rbx notq %rbx movq %rbx, F+N(%rsp) xorl %ebp, %ebp leaq -2(%rbp), %rcx movq %rcx, F+2*N(%rsp) leaq -1(%rbp), %rdx movq %rdx, F+3*N(%rsp) movq %rdx, F+4*N(%rsp) movq %rdx, F+5*N(%rsp) movq %rbp, F+6*N(%rsp) // Copy input but to g, reduced mod p_384 so that g <= f as assumed // in the divstep bound proof. movq (%rsi), %r8 subq %rax, %r8 movq N(%rsi), %r9 sbbq %rbx, %r9 movq 2*N(%rsi), %r10 sbbq %rcx, %r10 movq 3*N(%rsi), %r11 sbbq %rdx, %r11 movq 4*N(%rsi), %r12 sbbq %rdx, %r12 movq 5*N(%rsi), %r13 sbbq %rdx, %r13 cmovcq (%rsi), %r8 cmovcq N(%rsi), %r9 cmovcq 2*N(%rsi), %r10 cmovcq 3*N(%rsi), %r11 cmovcq 4*N(%rsi), %r12 cmovcq 5*N(%rsi), %r13 movq %r8, G(%rsp) movq %r9, G+N(%rsp) movq %r10, G+2*N(%rsp) movq %r11, G+3*N(%rsp) movq %r12, G+4*N(%rsp) movq %r13, G+5*N(%rsp) movq %rbp, G+6*N(%rsp) // Also maintain reduced < 2^384 vector [u,v] such that // [f,g] == x * 2^{5*i-843} * [u,v] (mod p_384) // starting with [p_384,x] == x * 2^{5*0-843} * [0,2^843] (mod p_384) // The weird-looking 5*i modifications come in because we are doing // 64-bit word-sized Montgomery reductions at each stage, which is // 5 bits more than the 59-bit requirement to keep things stable. // After the 15th and last iteration and sign adjustment, when // f == 1 for in-scope cases, we have x * 2^{75-843} * u == 1, i.e. // x * u == 2^768 as required. xorl %eax, %eax movq %rax, U(%rsp) movq %rax, U+N(%rsp) movq %rax, U+2*N(%rsp) movq %rax, U+3*N(%rsp) movq %rax, U+4*N(%rsp) movq %rax, U+5*N(%rsp) // The starting constant 2^843 mod p_384 is // 0x0000000000000800:00001000000007ff:fffff00000000000 // :00001000000007ff:fffff00000000800:0000000000000000 // where colons separate 64-bit subwords, least significant at the right. // These are constructed dynamically to reduce large constant loads. movq %rax, V(%rsp) movq $0xfffff00000000800, %rcx movq %rcx, V+N(%rsp) movq $0x00001000000007ff, %rdx movq %rdx, V+2*N(%rsp) btr $11, %rcx movq %rcx, V+3*N(%rsp) movq %rdx, V+4*N(%rsp) bts $11, %rax movq %rax, V+5*N(%rsp) // Start of main loop. We jump into the middle so that the divstep // portion is common to the special fifteenth iteration after a uniform // first 14. movq $15, i movq $1, d jmp bignum_montinv_p384_midloop bignum_montinv_p384_loop: // Separate out the matrix into sign-magnitude pairs movq %r8, %r9 sarq $63, %r9 xorq %r9, %r8 subq %r9, %r8 movq %r10, %r11 sarq $63, %r11 xorq %r11, %r10 subq %r11, %r10 movq %r12, %r13 sarq $63, %r13 xorq %r13, %r12 subq %r13, %r12 movq %r14, %r15 sarq $63, %r15 xorq %r15, %r14 subq %r15, %r14 // Adjust the initial values to allow for complement instead of negation // This initial offset is the same for [f,g] and [u,v] compositions. // Save it in temporary storage for the [u,v] part and do [f,g] first. movq %r8, %rax andq %r9, %rax movq %r10, %rdi andq %r11, %rdi addq %rax, %rdi movq %rdi, tmp movq %r12, %rax andq %r13, %rax movq %r14, %rsi andq %r15, %rsi addq %rax, %rsi movq %rsi, tmp2 // Now the computation of the updated f and g values. This maintains a // 2-word carry between stages so we can conveniently insert the shift // right by 59 before storing back, and not overwrite digits we need // again of the old f and g values. // // Digit 0 of [f,g] xorl %ebx, %ebx movq F(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq G(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rdi adcq %rdx, %rbx xorl %ebp, %ebp movq F(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rsi adcq %rdx, %rbp movq G(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp // Digit 1 of [f,g] xorl %ecx, %ecx movq F+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq G+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx shrdq $59, %rbx, %rdi movq %rdi, F(%rsp) xorl %edi, %edi movq F+N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbp adcq %rdx, %rdi movq G+N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rdi shrdq $59, %rbp, %rsi movq %rsi, G(%rsp) // Digit 2 of [f,g] xorl %esi, %esi movq F+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rsi movq G+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rsi shrdq $59, %rcx, %rbx movq %rbx, F+N(%rsp) xorl %ebx, %ebx movq F+2*N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rdi adcq %rdx, %rbx movq G+2*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rdi adcq %rdx, %rbx shrdq $59, %rdi, %rbp movq %rbp, G+N(%rsp) // Digit 3 of [f,g] xorl %ebp, %ebp movq F+3*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rsi adcq %rdx, %rbp movq G+3*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rsi adcq %rdx, %rbp shrdq $59, %rsi, %rcx movq %rcx, F+2*N(%rsp) xorl %ecx, %ecx movq F+3*N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbx adcq %rdx, %rcx movq G+3*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbx adcq %rdx, %rcx shrdq $59, %rbx, %rdi movq %rdi, G+2*N(%rsp) // Digit 4 of [f,g] xorl %edi, %edi movq F+4*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbp adcq %rdx, %rdi movq G+4*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbp adcq %rdx, %rdi shrdq $59, %rbp, %rsi movq %rsi, F+3*N(%rsp) xorl %esi, %esi movq F+4*N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rcx adcq %rdx, %rsi movq G+4*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rcx adcq %rdx, %rsi shrdq $59, %rcx, %rbx movq %rbx, G+3*N(%rsp) // Digits 5 and 6 of [f,g] movq F+5*N(%rsp), %rax xorq %r9, %rax movq F+6*N(%rsp), %rbx xorq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq G+5*N(%rsp), %rax xorq %r11, %rax movq G+6*N(%rsp), %rdx xorq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rdi adcq %rdx, %rbx shrdq $59, %rdi, %rbp movq %rbp, F+4*N(%rsp) shrdq $59, %rbx, %rdi sarq $59, %rbx movq F+5*N(%rsp), %rax movq %rdi, F+5*N(%rsp) movq F+6*N(%rsp), %rdi movq %rbx, F+6*N(%rsp) xorq %r13, %rax xorq %r13, %rdi andq %r12, %rdi negq %rdi mulq %r12 addq %rax, %rsi adcq %rdx, %rdi movq G+5*N(%rsp), %rax xorq %r15, %rax movq G+6*N(%rsp), %rdx xorq %r15, %rdx andq %r14, %rdx subq %rdx, %rdi mulq %r14 addq %rax, %rsi adcq %rdx, %rdi shrdq $59, %rsi, %rcx movq %rcx, G+4*N(%rsp) shrdq $59, %rdi, %rsi movq %rsi, G+5*N(%rsp) sarq $59, %rdi movq %rdi, G+6*N(%rsp) // Get the initial carries back from storage and do the [u,v] accumulation movq tmp, %rbx movq tmp2, %rbp // Digit 0 of [u,v] xorl %ecx, %ecx movq U(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq V(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq U(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, U(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq V(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, V(%rsp) // Digit 1 of [u,v] xorl %ebx, %ebx movq U+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq V+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq U+N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, U+N(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq V+N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, V+N(%rsp) // Digit 2 of [u,v] xorl %ecx, %ecx movq U+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq V+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq U+2*N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, U+2*N(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq V+2*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, V+2*N(%rsp) // Digit 3 of [u,v] xorl %ebx, %ebx movq U+3*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq V+3*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq U+3*N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, U+3*N(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq V+3*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, V+3*N(%rsp) // Digit 4 of [u,v] xorl %ecx, %ecx movq U+4*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq V+4*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq U+4*N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, U+4*N(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq V+4*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, V+4*N(%rsp) // Digits 5 and 6 of u (top is unsigned) movq U+5*N(%rsp), %rax xorq %r9, %rax movq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq V+5*N(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rcx adcq %rbx, %rdx // Preload for last use of old u digit 3 movq U+5*N(%rsp), %rax movq %rcx, U+5*N(%rsp) movq %rdx, U+6*N(%rsp) // Digits 5 and 6 of v (top is unsigned) xorq %r13, %rax movq %r13, %rcx andq %r12, %rcx negq %rcx mulq %r12 addq %rax, %rsi adcq %rdx, %rcx movq V+5*N(%rsp), %rax xorq %r15, %rax movq %r15, %rdx andq %r14, %rdx subq %rdx, %rcx mulq %r14 addq %rax, %rsi adcq %rcx, %rdx movq %rsi, V+5*N(%rsp) movq %rdx, V+6*N(%rsp) // Montgomery reduction of u amontred(u) // Montgomery reduction of v amontred(v) bignum_montinv_p384_midloop: divstep59(d,ff,gg) movq %rsi, d // Next iteration decq i jnz bignum_montinv_p384_loop // The 15th and last iteration does not need anything except the // u value and the sign of f; the latter can be obtained from the // lowest word of f. So it's done differently from the main loop. // Find the sign of the new f. For this we just need one digit // since we know (for in-scope cases) that f is either +1 or -1. // We don't explicitly shift right by 59 either, but looking at // bit 63 (or any bit >= 60) of the unshifted result is enough // to distinguish -1 from +1; this is then made into a mask. movq F(%rsp), %rax movq G(%rsp), %rcx imulq %r8, %rax imulq %r10, %rcx addq %rcx, %rax sarq $63, %rax // Now separate out the matrix into sign-magnitude pairs // and adjust each one based on the sign of f. // // Note that at this point we expect |f|=1 and we got its // sign above, so then since [f,0] == x * 2^{-768} [u,v] (mod p_384) // we want to flip the sign of u according to that of f. movq %r8, %r9 sarq $63, %r9 xorq %r9, %r8 subq %r9, %r8 xorq %rax, %r9 movq %r10, %r11 sarq $63, %r11 xorq %r11, %r10 subq %r11, %r10 xorq %rax, %r11 movq %r12, %r13 sarq $63, %r13 xorq %r13, %r12 subq %r13, %r12 xorq %rax, %r13 movq %r14, %r15 sarq $63, %r15 xorq %r15, %r14 subq %r15, %r14 xorq %rax, %r15 // Adjust the initial value to allow for complement instead of negation movq %r8, %rax andq %r9, %rax movq %r10, %r12 andq %r11, %r12 addq %rax, %r12 // Digit 0 of [u] xorl %r13d, %r13d movq U(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r12 adcq %rdx, %r13 movq V(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r12 movq %r12, U(%rsp) adcq %rdx, %r13 // Digit 1 of [u] xorl %r14d, %r14d movq U+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r13 adcq %rdx, %r14 movq V+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r13 movq %r13, U+N(%rsp) adcq %rdx, %r14 // Digit 2 of [u] xorl %r15d, %r15d movq U+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq V+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 movq %r14, U+2*N(%rsp) adcq %rdx, %r15 // Digit 3 of [u] xorl %r14d, %r14d movq U+3*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r15 adcq %rdx, %r14 movq V+3*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r15 movq %r15, U+3*N(%rsp) adcq %rdx, %r14 // Digit 4 of [u] xorl %r15d, %r15d movq U+4*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq V+4*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 movq %r14, U+4*N(%rsp) adcq %rdx, %r15 // Digits 5 and 6 of u (top is unsigned) movq U+5*N(%rsp), %rax xorq %r9, %rax andq %r8, %r9 negq %r9 mulq %r8 addq %rax, %r15 adcq %rdx, %r9 movq V+5*N(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %r9 mulq %r10 addq %rax, %r15 movq %r15, U+5*N(%rsp) adcq %rdx, %r9 movq %r9, U+6*N(%rsp) // Montgomery reduce u amontred(u) // Perform final strict reduction mod p_384 and copy to output movl $0xffffffff, %eax movq %rax, %rbx notq %rbx xorl %ebp, %ebp leaq -2(%rbp), %rcx leaq -1(%rbp), %rdx movq U(%rsp), %r8 subq %rax, %r8 movq U+N(%rsp), %r9 sbbq %rbx, %r9 movq U+2*N(%rsp), %r10 sbbq %rcx, %r10 movq U+3*N(%rsp), %r11 sbbq %rdx, %r11 movq U+4*N(%rsp), %r12 sbbq %rdx, %r12 movq U+5*N(%rsp), %r13 sbbq %rdx, %r13 cmovcq U(%rsp), %r8 cmovcq U+N(%rsp), %r9 cmovcq U+2*N(%rsp), %r10 cmovcq U+3*N(%rsp), %r11 cmovcq U+4*N(%rsp), %r12 cmovcq U+5*N(%rsp), %r13 movq res, %rdi movq %r8, (%rdi) movq %r9, N(%rdi) movq %r10, 2*N(%rdi) movq %r11, 3*N(%rdi) movq %r12, 4*N(%rdi) movq %r13, 5*N(%rdi) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
47,628
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/p384_montjmixadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point mixed addition on NIST curve P-384 in Montgomery-Jacobian coordinates // // extern void p384_montjmixadd // (uint64_t p3[static 18],uint64_t p1[static 18],uint64_t p2[static 12]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // The "mixed" part means that p2 only has x and y coordinates, with the // implicit z coordinate assumed to be the identity. // // Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2 // Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjmixadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjmixadd) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 // Pointer-offset pairs for inputs and outputs // These assume %rdi = p3, %rsi = p1 and %rcx = p2, // which needs to be set up explicitly before use. // However the %rdi value never changes. #define x_1 0(%rsi) #define y_1 NUMSIZE(%rsi) #define z_1 (2*NUMSIZE)(%rsi) #define x_2 0(%rcx) #define y_2 NUMSIZE(%rcx) #define x_3 0(%rdi) #define y_3 NUMSIZE(%rdi) #define z_3 (2*NUMSIZE)(%rdi) // Pointer-offset pairs for temporaries, with some aliasing // NSPACE is the total stack needed for these temporaries #define zp2 (NUMSIZE*0)(%rsp) #define ww (NUMSIZE*0)(%rsp) #define resx (NUMSIZE*0)(%rsp) #define yd (NUMSIZE*1)(%rsp) #define y2a (NUMSIZE*1)(%rsp) #define x2a (NUMSIZE*2)(%rsp) #define zzx2 (NUMSIZE*2)(%rsp) #define zz (NUMSIZE*3)(%rsp) #define t1 (NUMSIZE*3)(%rsp) #define t2 (NUMSIZE*4)(%rsp) #define zzx1 (NUMSIZE*4)(%rsp) #define resy (NUMSIZE*4)(%rsp) #define xd (NUMSIZE*5)(%rsp) #define resz (NUMSIZE*5)(%rsp) // Temporaries for the actual input pointers #define input_x (NUMSIZE*6)(%rsp) #define input_y (NUMSIZE*6+8)(%rsp) #define NSPACE (NUMSIZE*6+16) // Corresponds exactly to bignum_montmul_p384 #define montmul_p384(P0,P1,P2) \ movq P2, %rdx ; \ xorl %r15d, %r15d ; \ mulxq P1, %r8, %r9 ; \ mulxq 0x8+P1, %rbx, %r10 ; \ addq %rbx, %r9 ; \ mulxq 0x10+P1, %rbx, %r11 ; \ adcq %rbx, %r10 ; \ mulxq 0x18+P1, %rbx, %r12 ; \ adcq %rbx, %r11 ; \ mulxq 0x20+P1, %rbx, %r13 ; \ adcq %rbx, %r12 ; \ mulxq 0x28+P1, %rbx, %r14 ; \ adcq %rbx, %r13 ; \ adcq %r15, %r14 ; \ movq %r8, %rdx ; \ shlq $0x20, %rdx ; \ addq %r8, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r8, %rbx ; \ adcq %r8, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq %rbp, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x8+P2, %rdx ; \ xorl %r8d, %r8d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ adoxq %r8, %r15 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r14 ; \ adcq %rbx, %r15 ; \ adcq %r8, %r8 ; \ movq %r9, %rdx ; \ shlq $0x20, %rdx ; \ addq %r9, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r9, %rbx ; \ adcq %r9, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r10 ; \ sbbq %rbx, %r11 ; \ sbbq %rbp, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r14 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r15 ; \ adcq $0x0, %r8 ; \ movq 0x10+P2, %rdx ; \ xorl %r9d, %r9d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ adoxq %r9, %r8 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r15 ; \ adcq %rbx, %r8 ; \ adcq %r9, %r9 ; \ movq %r10, %rdx ; \ shlq $0x20, %rdx ; \ addq %r10, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r10, %rbx ; \ adcq %r10, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r11 ; \ sbbq %rbx, %r12 ; \ sbbq %rbp, %r13 ; \ sbbq $0x0, %r14 ; \ sbbq $0x0, %r15 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r8 ; \ adcq $0x0, %r9 ; \ movq 0x18+P2, %rdx ; \ xorl %r10d, %r10d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r15 ; \ adoxq %rbx, %r8 ; \ adoxq %r10, %r9 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r8 ; \ adcq %rbx, %r9 ; \ adcq %r10, %r10 ; \ movq %r11, %rdx ; \ shlq $0x20, %rdx ; \ addq %r11, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r11, %rbx ; \ adcq %r11, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r12 ; \ sbbq %rbx, %r13 ; \ sbbq %rbp, %r14 ; \ sbbq $0x0, %r15 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r9 ; \ adcq $0x0, %r10 ; \ movq 0x20+P2, %rdx ; \ xorl %r11d, %r11d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r15 ; \ adoxq %rbx, %r8 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ adoxq %r11, %r10 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r9 ; \ adcq %rbx, %r10 ; \ adcq %r11, %r11 ; \ movq %r12, %rdx ; \ shlq $0x20, %rdx ; \ addq %r12, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r12, %rbx ; \ adcq %r12, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r13 ; \ sbbq %rbx, %r14 ; \ sbbq %rbp, %r15 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r10 ; \ adcq $0x0, %r11 ; \ movq 0x28+P2, %rdx ; \ xorl %r12d, %r12d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r15 ; \ adoxq %rbx, %r8 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ adoxq %r12, %r11 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r10 ; \ adcq %rbx, %r11 ; \ adcq %r12, %r12 ; \ movq %r13, %rdx ; \ shlq $0x20, %rdx ; \ addq %r13, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r13, %rbx ; \ adcq %r13, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r14 ; \ sbbq %rbx, %r15 ; \ sbbq %rbp, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ xorl %edx, %edx ; \ xorl %ebp, %ebp ; \ xorl %r13d, %r13d ; \ movq $0xffffffff00000001, %rax ; \ addq %r14, %rax ; \ movl $0xffffffff, %ebx ; \ adcq %r15, %rbx ; \ movl $0x1, %ecx ; \ adcq %r8, %rcx ; \ adcq %r9, %rdx ; \ adcq %r10, %rbp ; \ adcq %r11, %r13 ; \ adcq $0x0, %r12 ; \ cmovne %rax, %r14 ; \ cmovne %rbx, %r15 ; \ cmovne %rcx, %r8 ; \ cmovne %rdx, %r9 ; \ cmovne %rbp, %r10 ; \ cmovne %r13, %r11 ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %r8, 0x10+P0 ; \ movq %r9, 0x18+P0 ; \ movq %r10, 0x20+P0 ; \ movq %r11, 0x28+P0 // Corresponds exactly to bignum_montsqr_p384 #define montsqr_p384(P0,P1) \ movq P1, %rdx ; \ mulxq 0x8+P1, %r9, %r10 ; \ mulxq 0x18+P1, %r11, %r12 ; \ mulxq 0x28+P1, %r13, %r14 ; \ movq 0x18+P1, %rdx ; \ mulxq 0x20+P1, %r15, %rcx ; \ xorl %ebp, %ebp ; \ movq 0x10+P1, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ movq 0x8+P1, %rdx ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ adcxq %rbp, %r15 ; \ adoxq %rbp, %rcx ; \ adcq %rbp, %rcx ; \ xorl %ebp, %ebp ; \ movq 0x20+P1, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ movq 0x10+P1, %rdx ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x28+P1, %rax, %rdx ; \ adcxq %rax, %r15 ; \ adoxq %rdx, %rcx ; \ movq 0x28+P1, %rdx ; \ mulxq 0x20+P1, %rbx, %rbp ; \ mulxq 0x18+P1, %rax, %rdx ; \ adcxq %rax, %rcx ; \ adoxq %rdx, %rbx ; \ movl $0x0, %eax ; \ adcxq %rax, %rbx ; \ adoxq %rax, %rbp ; \ adcq %rax, %rbp ; \ xorq %rax, %rax ; \ movq P1, %rdx ; \ mulxq P1, %r8, %rax ; \ adcxq %r9, %r9 ; \ adoxq %rax, %r9 ; \ movq 0x8+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r10, %r10 ; \ adoxq %rax, %r10 ; \ adcxq %r11, %r11 ; \ adoxq %rdx, %r11 ; \ movq 0x10+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r12, %r12 ; \ adoxq %rax, %r12 ; \ adcxq %r13, %r13 ; \ adoxq %rdx, %r13 ; \ movq 0x18+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r14, %r14 ; \ adoxq %rax, %r14 ; \ adcxq %r15, %r15 ; \ adoxq %rdx, %r15 ; \ movq 0x20+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %rcx, %rcx ; \ adoxq %rax, %rcx ; \ adcxq %rbx, %rbx ; \ adoxq %rdx, %rbx ; \ movq 0x28+P1, %rdx ; \ mulxq %rdx, %rax, %rsi ; \ adcxq %rbp, %rbp ; \ adoxq %rax, %rbp ; \ movl $0x0, %eax ; \ adcxq %rax, %rsi ; \ adoxq %rax, %rsi ; \ movq %rbx, P0 ; \ movq %r8, %rdx ; \ shlq $0x20, %rdx ; \ addq %r8, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r8, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r8 ; \ addq %rbx, %rax ; \ adcq %rdx, %r8 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r9 ; \ sbbq %r8, %r10 ; \ sbbq %rbx, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ movq %rdx, %r8 ; \ sbbq $0x0, %r8 ; \ movq %r9, %rdx ; \ shlq $0x20, %rdx ; \ addq %r9, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r9, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r9 ; \ addq %rbx, %rax ; \ adcq %rdx, %r9 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r10 ; \ sbbq %r9, %r11 ; \ sbbq %rbx, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r8 ; \ movq %rdx, %r9 ; \ sbbq $0x0, %r9 ; \ movq %r10, %rdx ; \ shlq $0x20, %rdx ; \ addq %r10, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r10, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r10 ; \ addq %rbx, %rax ; \ adcq %rdx, %r10 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r11 ; \ sbbq %r10, %r12 ; \ sbbq %rbx, %r13 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ movq %rdx, %r10 ; \ sbbq $0x0, %r10 ; \ movq %r11, %rdx ; \ shlq $0x20, %rdx ; \ addq %r11, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r11, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r11 ; \ addq %rbx, %rax ; \ adcq %rdx, %r11 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r12 ; \ sbbq %r11, %r13 ; \ sbbq %rbx, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ movq %rdx, %r11 ; \ sbbq $0x0, %r11 ; \ movq %r12, %rdx ; \ shlq $0x20, %rdx ; \ addq %r12, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r12, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r12 ; \ addq %rbx, %rax ; \ adcq %rdx, %r12 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r13 ; \ sbbq %r12, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %r11 ; \ movq %rdx, %r12 ; \ sbbq $0x0, %r12 ; \ movq %r13, %rdx ; \ shlq $0x20, %rdx ; \ addq %r13, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r13, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r13 ; \ addq %rbx, %rax ; \ adcq %rdx, %r13 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r8 ; \ sbbq %r13, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq $0x0, %r11 ; \ sbbq $0x0, %r12 ; \ movq %rdx, %r13 ; \ sbbq $0x0, %r13 ; \ movq P0, %rbx ; \ addq %r8, %r14 ; \ adcq %r9, %r15 ; \ adcq %r10, %rcx ; \ adcq %r11, %rbx ; \ adcq %r12, %rbp ; \ adcq %r13, %rsi ; \ movl $0x0, %r8d ; \ adcq %r8, %r8 ; \ xorq %r11, %r11 ; \ xorq %r12, %r12 ; \ xorq %r13, %r13 ; \ movq $0xffffffff00000001, %rax ; \ addq %r14, %rax ; \ movl $0xffffffff, %r9d ; \ adcq %r15, %r9 ; \ movl $0x1, %r10d ; \ adcq %rcx, %r10 ; \ adcq %rbx, %r11 ; \ adcq %rbp, %r12 ; \ adcq %rsi, %r13 ; \ adcq $0x0, %r8 ; \ cmovne %rax, %r14 ; \ cmovne %r9, %r15 ; \ cmovne %r10, %rcx ; \ cmovne %r11, %rbx ; \ cmovne %r12, %rbp ; \ cmovne %r13, %rsi ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %rcx, 0x10+P0 ; \ movq %rbx, 0x18+P0 ; \ movq %rbp, 0x20+P0 ; \ movq %rsi, 0x28+P0 // Almost-Montgomery variant which we use when an input to other muls // with the other argument fully reduced (which is always safe). #define amontsqr_p384(P0,P1) \ movq P1, %rdx ; \ mulxq 0x8+P1, %r9, %r10 ; \ mulxq 0x18+P1, %r11, %r12 ; \ mulxq 0x28+P1, %r13, %r14 ; \ movq 0x18+P1, %rdx ; \ mulxq 0x20+P1, %r15, %rcx ; \ xorl %ebp, %ebp ; \ movq 0x10+P1, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ movq 0x8+P1, %rdx ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ adcxq %rbp, %r15 ; \ adoxq %rbp, %rcx ; \ adcq %rbp, %rcx ; \ xorl %ebp, %ebp ; \ movq 0x20+P1, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ movq 0x10+P1, %rdx ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x28+P1, %rax, %rdx ; \ adcxq %rax, %r15 ; \ adoxq %rdx, %rcx ; \ movq 0x28+P1, %rdx ; \ mulxq 0x20+P1, %rbx, %rbp ; \ mulxq 0x18+P1, %rax, %rdx ; \ adcxq %rax, %rcx ; \ adoxq %rdx, %rbx ; \ movl $0x0, %eax ; \ adcxq %rax, %rbx ; \ adoxq %rax, %rbp ; \ adcq %rax, %rbp ; \ xorq %rax, %rax ; \ movq P1, %rdx ; \ mulxq P1, %r8, %rax ; \ adcxq %r9, %r9 ; \ adoxq %rax, %r9 ; \ movq 0x8+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r10, %r10 ; \ adoxq %rax, %r10 ; \ adcxq %r11, %r11 ; \ adoxq %rdx, %r11 ; \ movq 0x10+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r12, %r12 ; \ adoxq %rax, %r12 ; \ adcxq %r13, %r13 ; \ adoxq %rdx, %r13 ; \ movq 0x18+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r14, %r14 ; \ adoxq %rax, %r14 ; \ adcxq %r15, %r15 ; \ adoxq %rdx, %r15 ; \ movq 0x20+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %rcx, %rcx ; \ adoxq %rax, %rcx ; \ adcxq %rbx, %rbx ; \ adoxq %rdx, %rbx ; \ movq 0x28+P1, %rdx ; \ mulxq %rdx, %rax, %rsi ; \ adcxq %rbp, %rbp ; \ adoxq %rax, %rbp ; \ movl $0x0, %eax ; \ adcxq %rax, %rsi ; \ adoxq %rax, %rsi ; \ movq %rbx, P0 ; \ movq %r8, %rdx ; \ shlq $0x20, %rdx ; \ addq %r8, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r8, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r8 ; \ addq %rbx, %rax ; \ adcq %rdx, %r8 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r9 ; \ sbbq %r8, %r10 ; \ sbbq %rbx, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ movq %rdx, %r8 ; \ sbbq $0x0, %r8 ; \ movq %r9, %rdx ; \ shlq $0x20, %rdx ; \ addq %r9, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r9, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r9 ; \ addq %rbx, %rax ; \ adcq %rdx, %r9 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r10 ; \ sbbq %r9, %r11 ; \ sbbq %rbx, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r8 ; \ movq %rdx, %r9 ; \ sbbq $0x0, %r9 ; \ movq %r10, %rdx ; \ shlq $0x20, %rdx ; \ addq %r10, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r10, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r10 ; \ addq %rbx, %rax ; \ adcq %rdx, %r10 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r11 ; \ sbbq %r10, %r12 ; \ sbbq %rbx, %r13 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ movq %rdx, %r10 ; \ sbbq $0x0, %r10 ; \ movq %r11, %rdx ; \ shlq $0x20, %rdx ; \ addq %r11, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r11, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r11 ; \ addq %rbx, %rax ; \ adcq %rdx, %r11 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r12 ; \ sbbq %r11, %r13 ; \ sbbq %rbx, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ movq %rdx, %r11 ; \ sbbq $0x0, %r11 ; \ movq %r12, %rdx ; \ shlq $0x20, %rdx ; \ addq %r12, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r12, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r12 ; \ addq %rbx, %rax ; \ adcq %rdx, %r12 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r13 ; \ sbbq %r12, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %r11 ; \ movq %rdx, %r12 ; \ sbbq $0x0, %r12 ; \ movq %r13, %rdx ; \ shlq $0x20, %rdx ; \ addq %r13, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r13, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r13 ; \ addq %rbx, %rax ; \ adcq %rdx, %r13 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r8 ; \ sbbq %r13, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq $0x0, %r11 ; \ sbbq $0x0, %r12 ; \ movq %rdx, %r13 ; \ sbbq $0x0, %r13 ; \ movq P0, %rbx ; \ addq %r8, %r14 ; \ adcq %r9, %r15 ; \ adcq %r10, %rcx ; \ adcq %r11, %rbx ; \ adcq %r12, %rbp ; \ adcq %r13, %rsi ; \ movl $0x0, %r8d ; \ movq $0xffffffff00000001, %rax ; \ movl $0xffffffff, %r9d ; \ movl $0x1, %r10d ; \ cmovnc %r8, %rax ; \ cmovnc %r8, %r9 ; \ cmovnc %r8, %r10 ; \ addq %rax, %r14 ; \ adcq %r9, %r15 ; \ adcq %r10, %rcx ; \ adcq %r8, %rbx ; \ adcq %r8, %rbp ; \ adcq %r8, %rsi ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %rcx, 0x10+P0 ; \ movq %rbx, 0x18+P0 ; \ movq %rbp, 0x20+P0 ; \ movq %rsi, 0x28+P0 // Corresponds exactly to bignum_sub_p384 #define sub_p384(P0,P1,P2) \ movq P1, %rax ; \ subq P2, %rax ; \ movq 0x8+P1, %rdx ; \ sbbq 0x8+P2, %rdx ; \ movq 0x10+P1, %r8 ; \ sbbq 0x10+P2, %r8 ; \ movq 0x18+P1, %r9 ; \ sbbq 0x18+P2, %r9 ; \ movq 0x20+P1, %r10 ; \ sbbq 0x20+P2, %r10 ; \ movq 0x28+P1, %r11 ; \ sbbq 0x28+P2, %r11 ; \ sbbq %rcx, %rcx ; \ movl $0xffffffff, %esi ; \ andq %rsi, %rcx ; \ xorq %rsi, %rsi ; \ subq %rcx, %rsi ; \ subq %rsi, %rax ; \ movq %rax, P0 ; \ sbbq %rcx, %rdx ; \ movq %rdx, 0x8+P0 ; \ sbbq %rax, %rax ; \ andq %rsi, %rcx ; \ negq %rax; \ sbbq %rcx, %r8 ; \ movq %r8, 0x10+P0 ; \ sbbq $0x0, %r9 ; \ movq %r9, 0x18+P0 ; \ sbbq $0x0, %r10 ; \ movq %r10, 0x20+P0 ; \ sbbq $0x0, %r11 ; \ movq %r11, 0x28+P0 // Additional macros to help with final multiplexing #define testzero6(P) \ movq P, %rax ; \ movq 8+P, %rdx ; \ orq 16+P, %rax ; \ orq 24+P, %rdx ; \ orq 32+P, %rax ; \ orq 40+P, %rdx ; \ orq %rdx, %rax #define mux6(r0,r1,r2,r3,r4,r5,PNE,PEQ) \ movq PEQ, %rax ; \ movq PNE, r0 ; \ cmovzq %rax, r0 ; \ movq 8+PEQ, %rax ; \ movq 8+PNE, r1 ; \ cmovzq %rax, r1 ; \ movq 16+PEQ, %rax ; \ movq 16+PNE, r2 ; \ cmovzq %rax, r2 ; \ movq 24+PEQ, %rax ; \ movq 24+PNE, r3 ; \ cmovzq %rax, r3 ; \ movq 32+PEQ, %rax ; \ movq 32+PNE, r4 ; \ cmovzq %rax, r4 ; \ movq 40+PEQ, %rax ; \ movq 40+PNE, r5 ; \ cmovzq %rax, r5 #define load6(r0,r1,r2,r3,r4,r5,P) \ movq P, r0 ; \ movq 8+P, r1 ; \ movq 16+P, r2 ; \ movq 24+P, r3 ; \ movq 32+P, r4 ; \ movq 40+P, r5 #define store6(P,r0,r1,r2,r3,r4,r5) \ movq r0, P ; \ movq r1, 8+P ; \ movq r2, 16+P ; \ movq r3, 24+P ; \ movq r4, 32+P ; \ movq r5, 40+P S2N_BN_SYMBOL(p384_montjmixadd): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save registers and make room on stack for temporary variables // Put the input arguments in non-volatile places on the stack pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp movq %rsi, input_x movq %rdx, input_y // Main code, just a sequence of basic field operations // 8 * multiply + 3 * square + 7 * subtract amontsqr_p384(zp2,z_1) movq input_x, %rsi movq input_y, %rcx montmul_p384(y2a,z_1,y_2) movq input_y, %rcx montmul_p384(x2a,zp2,x_2) montmul_p384(y2a,zp2,y2a) movq input_x, %rsi sub_p384(xd,x2a,x_1) movq input_x, %rsi sub_p384(yd,y2a,y_1) amontsqr_p384(zz,xd) montsqr_p384(ww,yd) movq input_x, %rsi montmul_p384(zzx1,zz,x_1) montmul_p384(zzx2,zz,x2a) sub_p384(resx,ww,zzx1) sub_p384(t1,zzx2,zzx1) movq input_x, %rsi montmul_p384(resz,xd,z_1) sub_p384(resx,resx,zzx2) sub_p384(t2,zzx1,resx) movq input_x, %rsi montmul_p384(t1,t1,y_1) montmul_p384(t2,yd,t2) sub_p384(resy,t2,t1) // Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence) movq input_x, %rsi testzero6(z_1) // Multiplex: if p1 <> 0 just copy the computed result from the staging area. // If p1 = 0 then return the point p2 augmented with a z = 1 coordinate (in // Montgomery form so not the simple constant 1 but rather 2^384 - p_384), // hence giving 0 + p2 = p2 for the final result. movq input_y, %rcx mux6(%r8,%r9,%r10,%r11,%rbx,%rbp,resx,x_2) mux6(%r12,%r13,%r14,%r15,%rdx,%rcx,resy,y_2) store6(x_3,%r8,%r9,%r10,%r11,%rbx,%rbp) store6(y_3,%r12,%r13,%r14,%r15,%rdx,%rcx) load6(%r8,%r9,%r10,%r11,%rbx,%rbp,resz) movq $0xffffffff00000001, %rax cmovzq %rax, %r8 movl $0x00000000ffffffff, %eax cmovzq %rax, %r9 movq $1, %rax cmovzq %rax, %r10 movl $0, %eax cmovzq %rax, %r11 cmovzq %rax, %rbx cmovzq %rax, %rbp store6(z_3,%r8,%r9,%r10,%r11,%rbx,%rbp) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
3,550
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_triple_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Triple modulo p_384, z := (3 * x) mod p_384 // Input x[6]; output z[6] // // extern void bignum_triple_p384 // (uint64_t z[static 6], uint64_t x[static 6]); // // The input x can be any 6-digit bignum, not necessarily reduced modulo p_384, // and the result is always fully reduced, i.e. z = (3 * x) mod p_384. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p384) .text #define z %rdi #define x %rsi #define d0 %r8 #define d1 %r9 #define d2 %r10 #define d3 %r11 #define d4 %rbx #define d5 %rsi #define a %rax #define c %rcx #define q %rdx #define ashort %eax #define qshort %edx S2N_BN_SYMBOL(bignum_triple_p384): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // We seem to need (just!) one extra register, which we need to save and restore pushq %rbx // Multiply, accumulating the result as 2^384 * h + [d5;d4;d3;d2;d1;d0] // but actually immediately producing q = h + 1, our quotient approximation, // by adding 1 to it. xorl ashort, ashort movq (x), q movq q, d0 adcxq q, q adoxq q, d0 movq 8(x), q movq q, d1 adcxq q, q adoxq q, d1 movq 16(x), q movq q, d2 adcxq q, q adoxq q, d2 movq 24(x), q movq q, d3 adcxq q, q adoxq q, d3 movq 32(x), q movq q, d4 adcxq q, q adoxq q, d4 movq 40(x), q movq q, d5 adcxq q, q adoxq q, d5 movl $1, qshort adcxq a, q adoxq a, q // Initial subtraction of z - q * p_384, with bitmask c for the carry // Actually done as an addition of (z - 2^384 * h) + q * (2^384 - p_384) // which, because q = h + 1, is exactly 2^384 + (z - q * p_384), and // therefore CF <=> 2^384 + (z - q * p_384) >= 2^384 <=> z >= q * p_384. movq q, c shlq $32, c movq q, a subq c, a sbbq $0, c addq a, d0 adcq c, d1 adcq q, d2 adcq $0, d3 adcq $0, d4 adcq $0, d5 sbbq c, c notq c // Now use that mask for a masked addition of p_384, which again is in // fact done by a masked subtraction of 2^384 - p_384, so that we only // have three nonzero digits and so can avoid using another register. movl $0x00000000ffffffff, qshort xorl ashort, ashort andq c, q subq q, a negq c subq a, d0 movq d0, (z) sbbq q, d1 movq d1, 8(z) sbbq c, d2 movq d2, 16(z) sbbq $0, d3 movq d3, 24(z) sbbq $0, d4 movq d4, 32(z) sbbq $0, d5 movq d5, 40(z) // Return popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
9,273
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_montsqr_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^384) mod p_384 // Input x[6]; output z[6] // // extern void bignum_montsqr_p384 // (uint64_t z[static 6], uint64_t x[static 6]); // // Does z := (x^2 / 2^384) mod p_384, assuming x^2 <= 2^384 * p_384, which is // guaranteed in particular if x < p_384 initially (the "intended" case). // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p384) .text #define z %rdi #define x %rsi // Some temp registers for the last correction stage #define d %rax #define u %rdx #define v %r10 #define w %r11 // A zero register, very often #define zero %rbp #define zeroe %ebp // Add %rdx * m into a register-pair (high,low) // maintaining consistent double-carrying with adcx and adox, // using %rax and %rbx as temporaries #define mulpadd(high,low,m) \ mulxq m, %rax, %rbx ; \ adcxq %rax, low ; \ adoxq %rbx, high // Core one-step "short" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1], // adding to the existing [d5;d4;d3;d2;d1] and re-using d0 as a // temporary internally, as well as %rax, %rbx and %rdx. // It is OK for d6 and d0 to be the same register (they often are) // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // // montreds(d6,d5,d4,d3,d2,d1,d0) #define montreds(d6,d5,d4,d3,d2,d1,d0) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ movq d0, %rdx ; \ shlq $32, %rdx ; \ addq d0, %rdx ; \ /* Construct [%rbx;d0;%rax;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel so we can re-use d0 */ \ /* and %rbx as temps. */ \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, d0, %rax ; \ movl $0x00000000ffffffff, %ebx ; \ mulxq %rbx, %rbx, d0 ; \ addq %rbx, %rax ; \ adcq %rdx, d0 ; \ movl $0, %ebx ; \ adcq %rbx, %rbx ; \ /* Now subtract that and add 2^384 * w */ \ subq %rax, d1 ; \ sbbq d0, d2 ; \ sbbq %rbx, d3 ; \ sbbq $0, d4 ; \ sbbq $0, d5 ; \ movq %rdx, d6 ; \ sbbq $0, d6 S2N_BN_SYMBOL(bignum_montsqr_p384): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // Save more registers to play with pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 // Set up an initial window [%rcx;%r15;...%r9] = [34;05;03;01] // Note that we are using %rcx as the first step past the rotating window movq (x), %rdx mulxq 8(x), %r9, %r10 mulxq 24(x), %r11, %r12 mulxq 40(x), %r13, %r14 movq 24(x), %rdx mulxq 32(x), %r15, %rcx // Clear our zero register, and also initialize the flags for the carry chain xorl zeroe, zeroe // Chain in the addition of 02 + 12 + 13 + 14 + 15 to that window // (no carry-out possible) movq 16(x), %rdx mulpadd(%r11,%r10,(x)) mulpadd(%r12,%r11,8(x)) movq 8(x), %rdx mulpadd(%r13,%r12,24(x)) mulpadd(%r14,%r13,32(x)) mulpadd(%r15,%r14,40(x)) adcxq zero, %r15 adoxq zero, %rcx adcq zero, %rcx // Again zero out the flags. Actually they are already cleared but it may // help decouple these in the OOO engine not to wait for the chain above xorl zeroe, zeroe // Now chain in the 04 + 23 + 24 + 25 + 35 + 45 terms // We are running out of registers in our rotating window, so we start // using %rbx (and hence need care with using mulpadd after this). Thus // our result so far is in [%rbp;%rbx;%rcx;%r15;...%r9] movq 32(x), %rdx mulpadd(%r13,%r12,(x)) movq 16(x), %rdx mulpadd(%r14,%r13,24(x)) mulpadd(%r15,%r14,32(x)) mulxq 40(x), %rax, %rdx adcxq %rax, %r15 adoxq %rdx, %rcx // First set up the last couple of spots in our window, [%rbp;%rbx] = 45 // then add the last other term 35 movq 40(x), %rdx mulxq 32(x), %rbx, %rbp mulxq 24(x), %rax, %rdx adcxq %rax, %rcx adoxq %rdx, %rbx movl $0, %eax adcxq %rax, %rbx adoxq %rax, %rbp adcq %rax, %rbp // Just for a clear fresh start for the flags; we don't use the zero xorq %rax, %rax // Double and add to the 00 + 11 + 22 + 33 + 44 + 55 terms // For one glorious moment the entire squaring result is all in the // register file as [%rsi;%rbp;%rbx;%rcx;%r15;...;%r8] // (since we've now finished with x we can re-use %rsi) movq (x), %rdx mulxq (x), %r8, %rax adcxq %r9, %r9 adoxq %rax, %r9 movq 8(x), %rdx mulxq %rdx, %rax, %rdx adcxq %r10, %r10 adoxq %rax, %r10 adcxq %r11, %r11 adoxq %rdx, %r11 movq 16(x), %rdx mulxq %rdx, %rax, %rdx adcxq %r12, %r12 adoxq %rax, %r12 adcxq %r13, %r13 adoxq %rdx, %r13 movq 24(x), %rdx mulxq %rdx, %rax, %rdx adcxq %r14, %r14 adoxq %rax, %r14 adcxq %r15, %r15 adoxq %rdx, %r15 movq 32(x), %rdx mulxq %rdx, %rax, %rdx adcxq %rcx, %rcx adoxq %rax, %rcx adcxq %rbx, %rbx adoxq %rdx, %rbx movq 40(x), %rdx mulxq %rdx, %rax, %rsi adcxq %rbp, %rbp adoxq %rax, %rbp movl $0, %eax adcxq %rax, %rsi adoxq %rax, %rsi // We need just *one* more register as a temp for the Montgomery steps. // Since we are writing to the z buffer anyway, make use of that to stash %rbx. movq %rbx, (z) // Montgomery reduce the %r13,...,%r8 window 6 times montreds(%r8,%r13,%r12,%r11,%r10,%r9,%r8) montreds(%r9,%r8,%r13,%r12,%r11,%r10,%r9) montreds(%r10,%r9,%r8,%r13,%r12,%r11,%r10) montreds(%r11,%r10,%r9,%r8,%r13,%r12,%r11) montreds(%r12,%r11,%r10,%r9,%r8,%r13,%r12) montreds(%r13,%r12,%r11,%r10,%r9,%r8,%r13) // Now we can safely restore %rbx before accumulating movq (z), %rbx addq %r8, %r14 adcq %r9, %r15 adcq %r10, %rcx adcq %r11, %rbx adcq %r12, %rbp adcq %r13, %rsi movl $0, %r8d adcq %r8, %r8 // We now have a pre-reduced 7-word form z = [%r8; %rsi;%rbp;%rbx;%rcx;%r15;%r14] // Next, accumulate in different registers z - p_384, or more precisely // // [%r8; %r13;%r12;%r11;%r10;%r9;%rax] = z + (2^384 - p_384) xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 movq $0xffffffff00000001, %rax addq %r14, %rax movl $0x00000000ffffffff, %r9d adcq %r15, %r9 movl $0x0000000000000001, %r10d adcq %rcx, %r10 adcq %rbx, %r11 adcq %rbp, %r12 adcq %rsi, %r13 adcq $0, %r8 // ~ZF <=> %r12 >= 1 <=> z + (2^384 - p_384) >= 2^384 <=> z >= p_384, which // determines whether to use the further reduced argument or the original z. cmovnzq %rax, %r14 cmovnzq %r9, %r15 cmovnzq %r10, %rcx cmovnzq %r11, %rbx cmovnzq %r12, %rbp cmovnzq %r13, %rsi // Write back the result movq %r14, (z) movq %r15, 8(z) movq %rcx, 16(z) movq %rbx, 24(z) movq %rbp, 32(z) movq %rsi, 40(z) // Restore registers and return popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
3,428
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_double_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Double modulo p_384, z := (2 * x) mod p_384, assuming x reduced // Input x[6]; output z[6] // // extern void bignum_double_p384 // (uint64_t z[static 6], uint64_t x[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_double_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_double_p384) .text #define z %rdi #define x %rsi #define d0 %rdx #define d1 %rcx #define d2 %r8 #define d3 %r9 #define d4 %r10 #define d5 %r11 #define c %rax // Re-use the input pointer as a temporary once we're done #define a %rsi #define ashort %esi S2N_BN_SYMBOL(bignum_double_p384): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // Load the input and double it so that 2^384 * c + [d5;d4;d3;d2;d1;d0] = 2 * x // Could also consider using shld to decouple carries *or* combining this // and the next block into a double carry chain with ADCX and ADOX. xorq c, c movq (x), d0 addq d0, d0 movq 8(x), d1 adcq d1, d1 movq 16(x), d2 adcq d2, d2 movq 24(x), d3 adcq d3, d3 movq 32(x), d4 adcq d4, d4 movq 40(x), d5 adcq d5, d5 adcq c, c // Now subtract p_384 from 2^384 * c + [d5;d4;d3;d2;d1;d0] to get 2 * x - p_384 // This is actually done by *adding* the 7-word negation r_384 = 2^448 - p_384 // where r_384 = [-1; 0; 0; 0; 1; 0x00000000ffffffff; 0xffffffff00000001] movq $0xffffffff00000001, a addq a, d0 movl $0x00000000ffffffff, ashort adcq a, d1 adcq $1, d2 adcq $0, d3 adcq $0, d4 adcq $0, d5 adcq $-1, c // Since by hypothesis x < p_384 we know 2 * x - p_384 < 2^384, so the top // carry c actually gives us a bitmask for 2 * x - p_384 < 0, which we // now use to make r' = mask * (2^384 - p_384) for a compensating subtraction. // We don't quite have enough ABI-modifiable registers to create all three // nonzero digits of r while maintaining d0..d5, but make the first two now. andq a, c // c = masked 0x00000000ffffffff xorq a, a subq c, a // a = masked 0xffffffff00000001 // Do the first two digits of addition and writeback subq a, d0 movq d0, (z) sbbq c, d1 movq d1, 8(z) // Preserve the carry chain while creating the extra masked digit since // the logical operation will clear CF sbbq d0, d0 andq a, c // c = masked 0x0000000000000001 negq d0 // Do the rest of the addition and writeback sbbq c, d2 movq d2, 16(z) sbbq $0, d3 movq d3, 24(z) sbbq $0, d4 movq d4, 32(z) sbbq $0, d5 movq d5, 40(z) #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
42,735
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/p384_montjmixadd_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point mixed addition on NIST curve P-384 in Montgomery-Jacobian coordinates // // extern void p384_montjmixadd_alt // (uint64_t p3[static 18],uint64_t p1[static 18],uint64_t p2[static 12]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // The "mixed" part means that p2 only has x and y coordinates, with the // implicit z coordinate assumed to be the identity. // // Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2 // Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjmixadd_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjmixadd_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 // Pointer-offset pairs for inputs and outputs // These assume %rdi = p3, %rsi = p1 and %rcx = p2, // which needs to be set up explicitly before use. // However the %rdi value never changes. #define x_1 0(%rsi) #define y_1 NUMSIZE(%rsi) #define z_1 (2*NUMSIZE)(%rsi) #define x_2 0(%rcx) #define y_2 NUMSIZE(%rcx) #define x_3 0(%rdi) #define y_3 NUMSIZE(%rdi) #define z_3 (2*NUMSIZE)(%rdi) // Pointer-offset pairs for temporaries, with some aliasing // NSPACE is the total stack needed for these temporaries #define zp2 (NUMSIZE*0)(%rsp) #define ww (NUMSIZE*0)(%rsp) #define resx (NUMSIZE*0)(%rsp) #define yd (NUMSIZE*1)(%rsp) #define y2a (NUMSIZE*1)(%rsp) #define x2a (NUMSIZE*2)(%rsp) #define zzx2 (NUMSIZE*2)(%rsp) #define zz (NUMSIZE*3)(%rsp) #define t1 (NUMSIZE*3)(%rsp) #define t2 (NUMSIZE*4)(%rsp) #define zzx1 (NUMSIZE*4)(%rsp) #define resy (NUMSIZE*4)(%rsp) #define xd (NUMSIZE*5)(%rsp) #define resz (NUMSIZE*5)(%rsp) // Temporaries for the actual input pointers #define input_x (NUMSIZE*6)(%rsp) #define input_y (NUMSIZE*6+8)(%rsp) #define NSPACE (NUMSIZE*6+16) // Corresponds exactly to bignum_montmul_p384_alt #define montmul_p384(P0,P1,P2) \ movq P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ xorl %r10d, %r10d ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ xorl %r11d, %r11d ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ xorl %r12d, %r12d ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ xorl %r13d, %r13d ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ xorl %r14d, %r14d ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ xorl %r15d, %r15d ; \ movq %r8, %rbx ; \ shlq $0x20, %rbx ; \ addq %r8, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r8 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r8, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r9 ; \ sbbq %rdx, %r10 ; \ sbbq %rbp, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x8+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %r8, %r8 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %r8, %r8 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ sbbq %r8, %r8 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %r8, %r8 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r8, %r8 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r8, %r8 ; \ negq %r8; \ movq %r9, %rbx ; \ shlq $0x20, %rbx ; \ addq %r9, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r9 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r9, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r10 ; \ sbbq %rdx, %r11 ; \ sbbq %rbp, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r14 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r15 ; \ adcq $0x0, %r8 ; \ movq 0x10+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %r9, %r9 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ sbbq %r9, %r9 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %r9, %r9 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r9, %r9 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r9, %r9 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %r8 ; \ sbbq %r9, %r9 ; \ negq %r9; \ movq %r10, %rbx ; \ shlq $0x20, %rbx ; \ addq %r10, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r10 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r10, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r11 ; \ sbbq %rdx, %r12 ; \ sbbq %rbp, %r13 ; \ sbbq $0x0, %r14 ; \ sbbq $0x0, %r15 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r8 ; \ adcq $0x0, %r9 ; \ movq 0x18+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ sbbq %r10, %r10 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %r10, %r10 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r10, %r10 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r10, %r10 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %r8 ; \ sbbq %r10, %r10 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %r10, %r10 ; \ negq %r10; \ movq %r11, %rbx ; \ shlq $0x20, %rbx ; \ addq %r11, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r11 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r11, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r12 ; \ sbbq %rdx, %r13 ; \ sbbq %rbp, %r14 ; \ sbbq $0x0, %r15 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r9 ; \ adcq $0x0, %r10 ; \ movq 0x20+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %r11, %r11 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r11, %r11 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r11, %r11 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %r8 ; \ sbbq %r11, %r11 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %r11, %r11 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %r11, %r11 ; \ negq %r11; \ movq %r12, %rbx ; \ shlq $0x20, %rbx ; \ addq %r12, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r12 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r12, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r13 ; \ sbbq %rdx, %r14 ; \ sbbq %rbp, %r15 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r10 ; \ adcq $0x0, %r11 ; \ movq 0x28+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r12, %r12 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r12, %r12 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %r8 ; \ sbbq %r12, %r12 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %r12, %r12 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %r12, %r12 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %r12, %r12 ; \ negq %r12; \ movq %r13, %rbx ; \ shlq $0x20, %rbx ; \ addq %r13, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r13 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r13, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r14 ; \ sbbq %rdx, %r15 ; \ sbbq %rbp, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r11 ; \ adcq $0x0, %r12 ; \ xorl %edx, %edx ; \ xorl %ebp, %ebp ; \ xorl %r13d, %r13d ; \ movq $0xffffffff00000001, %rax ; \ addq %r14, %rax ; \ movl $0xffffffff, %ebx ; \ adcq %r15, %rbx ; \ movl $0x1, %ecx ; \ adcq %r8, %rcx ; \ adcq %r9, %rdx ; \ adcq %r10, %rbp ; \ adcq %r11, %r13 ; \ adcq $0x0, %r12 ; \ cmovneq %rax, %r14 ; \ cmovneq %rbx, %r15 ; \ cmovneq %rcx, %r8 ; \ cmovneq %rdx, %r9 ; \ cmovneq %rbp, %r10 ; \ cmovneq %r13, %r11 ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %r8, 0x10+P0 ; \ movq %r9, 0x18+P0 ; \ movq %r10, 0x20+P0 ; \ movq %r11, 0x28+P0 // Corresponds exactly to bignum_montsqr_p384_alt #define montsqr_p384(P0,P1) \ movq P1, %rbx ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ movq %rax, %r9 ; \ movq %rdx, %r10 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ movq %rax, %r11 ; \ movq %rdx, %r12 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ movq %rax, %r13 ; \ movq %rdx, %r14 ; \ movq 0x18+P1, %rax ; \ mulq 0x20+P1; \ movq %rax, %r15 ; \ movq %rdx, %rcx ; \ movq 0x10+P1, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rbp, %rbp ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ sbbq %rbp, %rbp ; \ movq 0x8+P1, %rbx ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %rbp, %rbp ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %rbp, %rbp ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ adcq $0x0, %rcx ; \ movq 0x20+P1, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %rbp, %rbp ; \ movq 0x10+P1, %rbx ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %rbp, %rbp ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %rbp, %rbp ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %rcx ; \ sbbq %rbp, %rbp ; \ xorl %ebx, %ebx ; \ movq 0x18+P1, %rax ; \ mulq 0x28+P1; \ subq %rbp, %rdx ; \ xorl %ebp, %ebp ; \ addq %rax, %rcx ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ movq 0x20+P1, %rax ; \ mulq 0x28+P1; \ addq %rax, %rbx ; \ adcq %rdx, %rbp ; \ xorl %r8d, %r8d ; \ addq %r9, %r9 ; \ adcq %r10, %r10 ; \ adcq %r11, %r11 ; \ adcq %r12, %r12 ; \ adcq %r13, %r13 ; \ adcq %r14, %r14 ; \ adcq %r15, %r15 ; \ adcq %rcx, %rcx ; \ adcq %rbx, %rbx ; \ adcq %rbp, %rbp ; \ adcl %r8d, %r8d ; \ movq P1, %rax ; \ mulq %rax; \ movq %r8, P0 ; \ movq %rax, %r8 ; \ movq 0x8+P1, %rax ; \ movq %rbp, 0x8+P0 ; \ addq %rdx, %r9 ; \ sbbq %rbp, %rbp ; \ mulq %rax; \ negq %rbp; \ adcq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rbp, %rbp ; \ movq 0x10+P1, %rax ; \ mulq %rax; \ negq %rbp; \ adcq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %rbp, %rbp ; \ movq 0x18+P1, %rax ; \ mulq %rax; \ negq %rbp; \ adcq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %rbp, %rbp ; \ movq 0x20+P1, %rax ; \ mulq %rax; \ negq %rbp; \ adcq %rax, %rcx ; \ adcq %rdx, %rbx ; \ sbbq %rbp, %rbp ; \ movq 0x28+P1, %rax ; \ mulq %rax; \ negq %rbp; \ adcq 0x8+P0, %rax ; \ adcq P0, %rdx ; \ movq %rax, %rbp ; \ movq %rdx, %rsi ; \ movq %rbx, P0 ; \ movq %r8, %rbx ; \ shlq $0x20, %rbx ; \ addq %r8, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r8 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r8 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r8, %r9 ; \ sbbq %rdx, %r10 ; \ sbbq %rax, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ movq %rbx, %r8 ; \ sbbq $0x0, %r8 ; \ movq %r9, %rbx ; \ shlq $0x20, %rbx ; \ addq %r9, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r9 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r9 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r9, %r10 ; \ sbbq %rdx, %r11 ; \ sbbq %rax, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r8 ; \ movq %rbx, %r9 ; \ sbbq $0x0, %r9 ; \ movq %r10, %rbx ; \ shlq $0x20, %rbx ; \ addq %r10, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r10 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r10 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r10, %r11 ; \ sbbq %rdx, %r12 ; \ sbbq %rax, %r13 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ movq %rbx, %r10 ; \ sbbq $0x0, %r10 ; \ movq %r11, %rbx ; \ shlq $0x20, %rbx ; \ addq %r11, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r11 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r11 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r11, %r12 ; \ sbbq %rdx, %r13 ; \ sbbq %rax, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ movq %rbx, %r11 ; \ sbbq $0x0, %r11 ; \ movq %r12, %rbx ; \ shlq $0x20, %rbx ; \ addq %r12, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r12 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r12 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r12, %r13 ; \ sbbq %rdx, %r8 ; \ sbbq %rax, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %r11 ; \ movq %rbx, %r12 ; \ sbbq $0x0, %r12 ; \ movq %r13, %rbx ; \ shlq $0x20, %rbx ; \ addq %r13, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r13 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r13 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r13, %r8 ; \ sbbq %rdx, %r9 ; \ sbbq %rax, %r10 ; \ sbbq $0x0, %r11 ; \ sbbq $0x0, %r12 ; \ movq %rbx, %r13 ; \ sbbq $0x0, %r13 ; \ movq P0, %rbx ; \ addq %r8, %r14 ; \ adcq %r9, %r15 ; \ adcq %r10, %rcx ; \ adcq %r11, %rbx ; \ adcq %r12, %rbp ; \ adcq %r13, %rsi ; \ movl $0x0, %r8d ; \ adcq %r8, %r8 ; \ xorq %r11, %r11 ; \ xorq %r12, %r12 ; \ xorq %r13, %r13 ; \ movq $0xffffffff00000001, %rax ; \ addq %r14, %rax ; \ movl $0xffffffff, %r9d ; \ adcq %r15, %r9 ; \ movl $0x1, %r10d ; \ adcq %rcx, %r10 ; \ adcq %rbx, %r11 ; \ adcq %rbp, %r12 ; \ adcq %rsi, %r13 ; \ adcq $0x0, %r8 ; \ cmovneq %rax, %r14 ; \ cmovneq %r9, %r15 ; \ cmovneq %r10, %rcx ; \ cmovneq %r11, %rbx ; \ cmovneq %r12, %rbp ; \ cmovneq %r13, %rsi ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %rcx, 0x10+P0 ; \ movq %rbx, 0x18+P0 ; \ movq %rbp, 0x20+P0 ; \ movq %rsi, 0x28+P0 // Corresponds exactly to bignum_sub_p384 #define sub_p384(P0,P1,P2) \ movq P1, %rax ; \ subq P2, %rax ; \ movq 0x8+P1, %rdx ; \ sbbq 0x8+P2, %rdx ; \ movq 0x10+P1, %r8 ; \ sbbq 0x10+P2, %r8 ; \ movq 0x18+P1, %r9 ; \ sbbq 0x18+P2, %r9 ; \ movq 0x20+P1, %r10 ; \ sbbq 0x20+P2, %r10 ; \ movq 0x28+P1, %r11 ; \ sbbq 0x28+P2, %r11 ; \ sbbq %rcx, %rcx ; \ movl $0xffffffff, %esi ; \ andq %rsi, %rcx ; \ xorq %rsi, %rsi ; \ subq %rcx, %rsi ; \ subq %rsi, %rax ; \ movq %rax, P0 ; \ sbbq %rcx, %rdx ; \ movq %rdx, 0x8+P0 ; \ sbbq %rax, %rax ; \ andq %rsi, %rcx ; \ negq %rax; \ sbbq %rcx, %r8 ; \ movq %r8, 0x10+P0 ; \ sbbq $0x0, %r9 ; \ movq %r9, 0x18+P0 ; \ sbbq $0x0, %r10 ; \ movq %r10, 0x20+P0 ; \ sbbq $0x0, %r11 ; \ movq %r11, 0x28+P0 // Additional macros to help with final multiplexing #define testzero6(P) \ movq P, %rax ; \ movq 8+P, %rdx ; \ orq 16+P, %rax ; \ orq 24+P, %rdx ; \ orq 32+P, %rax ; \ orq 40+P, %rdx ; \ orq %rdx, %rax #define mux6(r0,r1,r2,r3,r4,r5,PNE,PEQ) \ movq PEQ, %rax ; \ movq PNE, r0 ; \ cmovzq %rax, r0 ; \ movq 8+PEQ, %rax ; \ movq 8+PNE, r1 ; \ cmovzq %rax, r1 ; \ movq 16+PEQ, %rax ; \ movq 16+PNE, r2 ; \ cmovzq %rax, r2 ; \ movq 24+PEQ, %rax ; \ movq 24+PNE, r3 ; \ cmovzq %rax, r3 ; \ movq 32+PEQ, %rax ; \ movq 32+PNE, r4 ; \ cmovzq %rax, r4 ; \ movq 40+PEQ, %rax ; \ movq 40+PNE, r5 ; \ cmovzq %rax, r5 #define load6(r0,r1,r2,r3,r4,r5,P) \ movq P, r0 ; \ movq 8+P, r1 ; \ movq 16+P, r2 ; \ movq 24+P, r3 ; \ movq 32+P, r4 ; \ movq 40+P, r5 #define store6(P,r0,r1,r2,r3,r4,r5) \ movq r0, P ; \ movq r1, 8+P ; \ movq r2, 16+P ; \ movq r3, 24+P ; \ movq r4, 32+P ; \ movq r5, 40+P S2N_BN_SYMBOL(p384_montjmixadd_alt): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save registers and make room on stack for temporary variables // Put the input arguments in non-volatile places on the stack pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp movq %rsi, input_x movq %rdx, input_y // Main code, just a sequence of basic field operations // 8 * multiply + 3 * square + 7 * subtract montsqr_p384(zp2,z_1) movq input_x, %rsi movq input_y, %rcx montmul_p384(y2a,z_1,y_2) movq input_y, %rcx montmul_p384(x2a,zp2,x_2) montmul_p384(y2a,zp2,y2a) movq input_x, %rsi sub_p384(xd,x2a,x_1) movq input_x, %rsi sub_p384(yd,y2a,y_1) montsqr_p384(zz,xd) montsqr_p384(ww,yd) movq input_x, %rsi montmul_p384(zzx1,zz,x_1) montmul_p384(zzx2,zz,x2a) sub_p384(resx,ww,zzx1) sub_p384(t1,zzx2,zzx1) movq input_x, %rsi montmul_p384(resz,xd,z_1) sub_p384(resx,resx,zzx2) sub_p384(t2,zzx1,resx) movq input_x, %rsi montmul_p384(t1,t1,y_1) montmul_p384(t2,yd,t2) sub_p384(resy,t2,t1) // Test if z_1 = 0 to decide if p1 = 0 (up to projective equivalence) movq input_x, %rsi testzero6(z_1) // Multiplex: if p1 <> 0 just copy the computed result from the staging area. // If p1 = 0 then return the point p2 augmented with a z = 1 coordinate (in // Montgomery form so not the simple constant 1 but rather 2^384 - p_384), // hence giving 0 + p2 = p2 for the final result. movq input_y, %rcx mux6(%r8,%r9,%r10,%r11,%rbx,%rbp,resx,x_2) mux6(%r12,%r13,%r14,%r15,%rdx,%rcx,resy,y_2) store6(x_3,%r8,%r9,%r10,%r11,%rbx,%rbp) store6(y_3,%r12,%r13,%r14,%r15,%rdx,%rcx) load6(%r8,%r9,%r10,%r11,%rbx,%rbp,resz) movq $0xffffffff00000001, %rax cmovzq %rax, %r8 movl $0x00000000ffffffff, %eax cmovzq %rax, %r9 movq $1, %rax cmovzq %rax, %r10 movl $0, %eax cmovzq %rax, %r11 cmovzq %rax, %rbx cmovzq %rax, %rbp store6(z_3,%r8,%r9,%r10,%r11,%rbx,%rbp) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
9,008
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_tomont_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert to Montgomery form z := (2^384 * x) mod p_384 // Input x[6]; output z[6] // // extern void bignum_tomont_p384 // (uint64_t z[static 6], uint64_t x[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tomont_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tomont_p384) .text #define z %rdi #define x %rsi // Fairly consistently used as a zero register #define zero %rbp // Some temp registers for the last correction stage #define d %rax #define u %rdx #define v %rcx #define w %rsi #define vshort %ecx #define wshort %esi // Add %rdx * m into a register-pair (high,low) // maintaining consistent double-carrying with adcx and adox, // using %rax and %rcx as temporaries #define mulpadd(high,low,m) \ mulxq m, %rax, %rcx ; \ adcxq %rax, low ; \ adoxq %rcx, high // Core one-step Montgomery reduction macro. Takes input in // [d7;d6;d5;d4;d3;d2;d1;d0] and returns result in [d7;d6;d5;d4;d3;d2;d1], // adding to the existing contents, re-using d0 as a temporary internally // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // // montredc(d7,d6,d5,d4,d3,d2,d1,d0) // // This particular variant, with its mix of addition and subtraction // at the top, is not intended to maintain a coherent carry or borrow out. // It is assumed the final result would fit in [d7;d6;d5;d4;d3;d2;d1]. // which is always the case here as the top word is even always in {0,1} #define montredc(d7,d6,d5,d4,d3,d2,d1,d0) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ movq d0, %rdx ; \ shlq $32, %rdx ; \ addq d0, %rdx ; \ /* Construct [%rbp;%rcx;%rax;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel so we can re-use d0 as a temp */ \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rcx, %rax ; \ movl $0x00000000ffffffff, %ecx ; \ mulxq %rcx, d0, %rcx ; \ adcq d0, %rax ; \ adcq %rdx, %rcx ; \ adcl %ebp, %ebp ; \ /* Now subtract that and add 2^384 * w */ \ subq %rax, d1 ; \ sbbq %rcx, d2 ; \ sbbq %rbp, d3 ; \ sbbq $0, d4 ; \ sbbq $0, d5 ; \ sbbq $0, %rdx ; \ addq %rdx, d6 ; \ adcq $0, d7 S2N_BN_SYMBOL(bignum_tomont_p384): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // We are essentially just doing a Montgomery multiplication of x and the // precomputed constant y = 2^768 mod p, so the code is almost the same // modulo a few registers and the change from loading y[i] to using constants, // plus the easy digits y[4] = 1 and y[5] = 0 being treated specially. // Because there is no y pointer to keep, we use one register less. pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 // Do row 0 computation, which is a bit different: // set up initial window [%r14,%r13,%r12,%r11,%r10,%r9,%r8] = y[0] * x // Unlike later, we only need a single carry chain movq $0xfffffffe00000001, %rdx mulxq (x), %r8, %r9 mulxq 8(x), %rcx, %r10 addq %rcx, %r9 mulxq 16(x), %rcx, %r11 adcq %rcx, %r10 mulxq 24(x), %rcx, %r12 adcq %rcx, %r11 mulxq 32(x), %rcx, %r13 adcq %rcx, %r12 mulxq 40(x), %rcx, %r14 adcq %rcx, %r13 adcq $0, %r14 // Montgomery reduce the zeroth window xorq %r15, %r15 montredc(%r15, %r14,%r13,%r12,%r11,%r10,%r9,%r8) // Add row 1 xorq zero, zero movq $0x0000000200000000, %rdx xorq %r8, %r8 mulpadd(%r10,%r9,(x)) mulpadd(%r11,%r10,8(x)) mulpadd(%r12,%r11,16(x)) mulpadd(%r13,%r12,24(x)) mulpadd(%r14,%r13,32(x)) mulpadd(%r15,%r14,40(x)) adcxq zero, %r15 adoxq zero, %r8 adcxq zero, %r8 // Montgomery reduce window 1 montredc(%r8, %r15,%r14,%r13,%r12,%r11,%r10,%r9) // Add row 2 xorq zero, zero movq $0xfffffffe00000000, %rdx xorq %r9, %r9 mulpadd(%r11,%r10,(x)) mulpadd(%r12,%r11,8(x)) mulpadd(%r13,%r12,16(x)) mulpadd(%r14,%r13,24(x)) mulpadd(%r15,%r14,32(x)) mulpadd(%r8,%r15,40(x)) adcxq zero, %r8 adoxq zero, %r9 adcxq zero, %r9 // Montgomery reduce window 2 montredc(%r9, %r8,%r15,%r14,%r13,%r12,%r11,%r10) // Add row 3 xorq zero, zero movq $0x0000000200000000, %rdx xorq %r10, %r10 mulpadd(%r12,%r11,(x)) mulpadd(%r13,%r12,8(x)) mulpadd(%r14,%r13,16(x)) mulpadd(%r15,%r14,24(x)) mulpadd(%r8,%r15,32(x)) mulpadd(%r9,%r8,40(x)) adcxq zero, %r9 adoxq zero, %r10 adcxq zero, %r10 // Montgomery reduce window 3 montredc(%r10, %r9,%r8,%r15,%r14,%r13,%r12,%r11) // Add row 4. The multiplier y[4] = 1, so we just add x to the window // while extending it with one more digit, initially this carry xorq %r11, %r11 addq (x), %r12 adcq 8(x), %r13 adcq 16(x), %r14 adcq 24(x), %r15 adcq 32(x), %r8 adcq 40(x), %r9 adcq $0, %r10 adcq $0, %r11 // Montgomery reduce window 4 montredc(%r11, %r10,%r9,%r8,%r15,%r14,%r13,%r12) // Add row 5, The multiplier y[5] = 0, so this is trivial: all we do is // bring down another zero digit into the window. xorq %r12, %r12 // Montgomery reduce window 5 montredc(%r12, %r11,%r10,%r9,%r8,%r15,%r14,%r13) // We now have a pre-reduced 7-word form [%r12;%r11;%r10;%r9;%r8;%r15;%r14] // We know, writing B = 2^{6*64} that the full implicit result is // B^2 c <= z + (B - 1) * p < B * p + (B - 1) * p < 2 * B * p, // so the top half is certainly < 2 * p. If c = 1 already, we know // subtracting p will give the reduced modulus. But now we do a // comparison to catch cases where the residue is >= p. // First set [0;0;0;w;v;u] = 2^384 - p_384 movq $0xffffffff00000001, u movl $0x00000000ffffffff, vshort movl $0x0000000000000001, wshort // Let dd = [%r11;%r10;%r9;%r8;%r15;%r14] be the topless 6-word intermediate result. // Set CF if the addition dd + (2^384 - p_384) >= 2^384, hence iff dd >= p_384. movq %r14, d addq u, d movq %r15, d adcq v, d movq %r8, d adcq w, d movq %r9, d adcq $0, d movq %r10, d adcq $0, d movq %r11, d adcq $0, d // Now just add this new carry into the existing %r12. It's easy to see they // can't both be 1 by our range assumptions, so this gives us a {0,1} flag adcq $0, %r12 // Now convert it into a bitmask negq %r12 // Masked addition of 2^384 - p_384, hence subtraction of p_384 andq %r12, u andq %r12, v andq %r12, w addq u, %r14 adcq v, %r15 adcq w, %r8 adcq $0, %r9 adcq $0, %r10 adcq $0, %r11 // Write back the result movq %r14, (z) movq %r15, 8(z) movq %r8, 16(z) movq %r9, 24(z) movq %r10, 32(z) movq %r11, 40(z) // Restore registers and return popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
2,819
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_mod_p384_6.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Reduce modulo field characteristic, z := x mod p_384 // Input x[6]; output z[6] // // extern void bignum_mod_p384_6 // (uint64_t z[static 6], uint64_t x[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_mod_p384_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_mod_p384_6) .text #define z %rdi #define x %rsi #define d0 %rdx #define d1 %rcx #define d2 %r8 #define d3 %r9 #define d4 %r10 #define d5 %r11 #define c %rax #define cshort %eax // Re-use the input pointer as a temporary once we're done #define a %rsi S2N_BN_SYMBOL(bignum_mod_p384_6): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // Load the input and subtract p_384 from it movq (x), d0 movl $0x00000000ffffffff, cshort subq c, d0 movq 8(x), d1 notq c sbbq c, d1 movq 16(x), d2 sbbq $-2, d2 movq 24(x), d3 sbbq $-1, d3 movq 32(x), d4 sbbq $-1, d4 movq 40(x), d5 sbbq $-1, d5 // Capture the top carry as a bitmask to indicate we need to add p_384 back on, // which we actually do in a more convenient way by subtracting r_384 // where r_384 = [-1; 0; 0; 0; 1; 0x00000000ffffffff; 0xffffffff00000001] // We don't quite have enough ABI-modifiable registers to create all three // nonzero digits of r while maintaining d0..d5, but make the first two now. notq c sbbq a, a andq a, c // c = masked 0x00000000ffffffff xorq a, a subq c, a // a = masked 0xffffffff00000001 // Do the first two digits of addition and writeback subq a, d0 movq d0, (z) sbbq c, d1 movq d1, 8(z) // Preserve the carry chain while creating the extra masked digit since // the logical operation will clear CF sbbq d0, d0 andq a, c // c = masked 0x0000000000000001 negq d0 // Do the rest of the addition and writeback sbbq c, d2 movq d2, 16(z) sbbq $0, d3 movq d3, 24(z) sbbq $0, d4 movq d4, 32(z) sbbq $0, d5 movq d5, 40(z) #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
2,112
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_littleendian_6.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert 6-digit (384-bit) bignum to/from little-endian form // Input x[6]; output z[6] // // extern void bignum_littleendian_6 // (uint64_t z[static 6], uint64_t x[static 6]); // // The same function is given two other prototypes whose names reflect the // treatment of one or other argument as a byte array rather than word array: // // extern void bignum_fromlebytes_6 // (uint64_t z[static 6], uint8_t x[static 48]); // // extern void bignum_tolebytes_6 // (uint8_t z[static 48], uint64_t x[static 6]); // // Since x86 is little-endian, this is just copying. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_littleendian_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_littleendian_6) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_fromlebytes_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_fromlebytes_6) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tolebytes_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tolebytes_6) .text #define z %rdi #define x %rsi #define a %rax S2N_BN_SYMBOL(bignum_littleendian_6): S2N_BN_SYMBOL(bignum_fromlebytes_6): S2N_BN_SYMBOL(bignum_tolebytes_6): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif movq (x), a movq a, (z) movq 8(x), a movq a, 8(z) movq 16(x), a movq a, 16(z) movq 24(x), a movq a, 24(z) movq 32(x), a movq a, 32(z) movq 40(x), a movq a, 40(z) #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
3,652
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_triple_p384_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Triple modulo p_384, z := (3 * x) mod p_384 // Input x[6]; output z[6] // // extern void bignum_triple_p384_alt // (uint64_t z[static 6], uint64_t x[static 6]); // // The input x can be any 6-digit bignum, not necessarily reduced modulo p_384, // and the result is always fully reduced, i.e. z = (3 * x) mod p_384. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_triple_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_triple_p384_alt) .text #define z %rdi #define x %rsi #define d0 %r8 #define d1 %r9 #define d2 %r10 #define d3 %r11 #define d4 %rbx #define d5 %rsi #define a %rax #define c %rcx #define q %rcx #define d %rdx #define ashort %eax #define cshort %ecx #define qshort %ecx #define dshort %edx S2N_BN_SYMBOL(bignum_triple_p384_alt): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // We seem to need (just!) one extra register, which we need to save and restore pushq %rbx // Multiply, accumulating the result as 2^384 * h + [d5;d4;d3;d2;d1;d0] // but actually immediately producing q = h + 1, our quotient approximation, // by adding 1 to it. movl $3, cshort movq (x), a mulq c movq a, d0 movq d, d1 movq 8(x), a xorq d2, d2 mulq c addq a, d1 adcq d, d2 movq 16(x), a xorq d3, d3 mulq c addq a, d2 adcq d, d3 movq 24(x), a xorq d4, d4 mulq c addq a, d3 adcq d, d4 movq 32(x), a mulq c addq a, d4 adcq $0, d movq 40(x), a movq d, d5 mulq c addq a, d5 movl $1, qshort adcq d, q // Initial subtraction of z - q * p_384, with bitmask c for the carry // Actually done as an addition of (z - 2^384 * h) + q * (2^384 - p_384) // which, because q = h + 1, is exactly 2^384 + (z - q * p_384), and // therefore CF <=> 2^384 + (z - q * p_384) >= 2^384 <=> z >= q * p_384. movq q, d shlq $32, d movq q, a subq d, a sbbq $0, d addq a, d0 adcq d, d1 adcq q, d2 adcq $0, d3 adcq $0, d4 adcq $0, d5 sbbq d, d notq d // Now use that mask for a masked addition of p_384, which again is in // fact done by a masked subtraction of 2^384 - p_384, so that we only // have three nonzero digits and so can avoid using another register. movl $0x00000000ffffffff, qshort xorl ashort, ashort andq d, q subq q, a negq d subq a, d0 movq d0, (z) sbbq q, d1 movq d1, 8(z) sbbq d, d2 movq d2, 16(z) sbbq $0, d3 movq d3, 24(z) sbbq $0, d4 movq d4, 32(z) sbbq $0, d5 movq d5, 40(z) // Return popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
5,437
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_deamont_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert from almost-Montgomery form, z := (x / 2^384) mod p_384 // Input x[6]; output z[6] // // extern void bignum_deamont_p384 // (uint64_t z[static 6], uint64_t x[static 6]); // // Convert a 6-digit bignum x out of its (optionally almost) Montgomery form, // "almost" meaning any 6-digit input will work, with no range restriction. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_deamont_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_deamont_p384) .text #define z %rdi #define x %rsi // Additional temps in the correction phase #define u %rax #define v %rcx #define w %rdx #define vshort %ecx // Core one-step "short" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1], // adding to the existing contents of [d5;d4;d3;d2;d1;d0]. This // is intended only for 6-word inputs as in mapping out of Montgomery, // not for the general case of Montgomery multiplication. It is fine // for d6 to be the same register as d0. // // Parms: montreds(d6,d5,d4,d3,d2,d1,d0) // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 #define montreds(d6,d5,d4,d3,d2,d1,d0) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ movq d0, %rdx ; \ shlq $32, %rdx ; \ addq d0, %rdx ; \ /* Construct [%rsi;%rcx;%rax;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel so we can re-use d0 */ \ /* as a temp. */ \ xorq %rsi, %rsi ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rcx, %rax ; \ movl $0x00000000ffffffff, %ecx ; \ mulxq %rcx, d0, %rcx ; \ adcq d0, %rax ; \ adcq %rdx, %rcx ; \ adcq $0, %rsi ; \ /* Now subtract that and add 2^384 * w */ \ subq %rax, d1 ; \ sbbq %rcx, d2 ; \ sbbq %rsi, d3 ; \ sbbq $0, d4 ; \ sbbq $0, d5 ; \ movq %rdx, d6 ; \ sbbq $0, d6 S2N_BN_SYMBOL(bignum_deamont_p384): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // Save more registers to play with pushq %r12 pushq %r13 // Set up an initial window [%r13,%r12,%r11,%r10,%r9,%r8] = x movq (x), %r8 movq 8(x), %r9 movq 16(x), %r10 movq 24(x), %r11 movq 32(x), %r12 movq 40(x), %r13 // Montgomery reduce window 0 montreds(%r8,%r13,%r12,%r11,%r10,%r9,%r8) // Montgomery reduce window 1 montreds(%r9,%r8,%r13,%r12,%r11,%r10,%r9) // Montgomery reduce window 2 montreds(%r10,%r9,%r8,%r13,%r12,%r11,%r10) // Montgomery reduce window 3 montreds(%r11,%r10,%r9,%r8,%r13,%r12,%r11) // Montgomery reduce window 4 montreds(%r12,%r11,%r10,%r9,%r8,%r13,%r12) // Montgomery reduce window 5 montreds(%r13,%r12,%r11,%r10,%r9,%r8,%r13) // Do a test addition of dd = [%r13;%r12;%r11;%r10;%r9;%r8] and // 2^384 - p_384 = [0;0;0;1;v;u], hence setting CF iff // dd + (2^384 - p_384) >= 2^384, hence iff dd >= p_384. movq $0xffffffff00000001, u movl $0x00000000ffffffff, vshort movq %r8, w addq u, w movq %r9, w adcq v, w movq %r10, w adcq $1, w movq %r11, w adcq $0, w movq %r12, w adcq $0, w movq %r13, w adcq $0, w // Convert CF to a bitmask in w sbbq w, w // Masked addition of 2^384 - p_384, hence subtraction of p_384 andq w, u andq w, v andq $1, w addq u, %r8 adcq v, %r9 adcq w, %r10 adcq $0, %r11 adcq $0, %r12 adcq $0, %r13 // Write back the result movq %r8, (z) movq %r9, 8(z) movq %r10, 16(z) movq %r11, 24(z) movq %r12, 32(z) movq %r13, 40(z) // Restore registers and return popq %r13 popq %r12 #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
10,353
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_montsqr_p384_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery square, z := (x^2 / 2^384) mod p_384 // Input x[6]; output z[6] // // extern void bignum_montsqr_p384_alt // (uint64_t z[static 6], uint64_t x[static 6]); // // Does z := (x^2 / 2^384) mod p_384, assuming x^2 <= 2^384 * p_384, which is // guaranteed in particular if x < p_384 initially (the "intended" case). // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montsqr_p384_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montsqr_p384_alt) .text #define z %rdi #define x %rsi // Some temp registers for the last correction stage #define d %rax #define u %rdx #define v %r10 #define w %r11 // A zero register, very often #define zero %rbp #define zeroe %ebp // Add %rbx * m into a register-pair (high,low) maintaining consistent // carry-catching with carry (negated, as bitmask) and using %rax and %rdx // as temporaries #define mulpadd(carry,high,low,m) \ movq m, %rax ; \ mulq %rbx; \ subq carry, %rdx ; \ addq %rax, low ; \ adcq %rdx, high ; \ sbbq carry, carry // Initial version assuming no carry-in #define mulpadi(carry,high,low,m) \ movq m, %rax ; \ mulq %rbx; \ addq %rax, low ; \ adcq %rdx, high ; \ sbbq carry, carry // End version not catching the top carry-out #define mulpade(carry,high,low,m) \ movq m, %rax ; \ mulq %rbx; \ subq carry, %rdx ; \ addq %rax, low ; \ adcq %rdx, high // Core one-step "short" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1], // adding to the existing [d5;d4;d3;d2;d1] and re-using d0 as a // temporary internally, as well as %rax, %rbx and %rdx. // It is OK for d6 and d0 to be the same register (they often are) // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // // montreds(d6,d5,d4,d3,d2,d1,d0) #define montreds(d6,d5,d4,d3,d2,d1,d0) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ movq d0, %rbx ; \ shlq $32, %rbx ; \ addq d0, %rbx ; \ /* Construct [%rax;%rdx;d0;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel so we can re-use d0 */ \ /* and %rbx as temps. */ \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, d0 ; \ movq $0x00000000ffffffff, %rax ; \ mulq %rbx; \ addq %rax, d0 ; \ movl $0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ /* Now subtract that and add 2^384 * w */ \ subq d0, d1 ; \ sbbq %rdx, d2 ; \ sbbq %rax, d3 ; \ sbbq $0, d4 ; \ sbbq $0, d5 ; \ movq %rbx, d6 ; \ sbbq $0, d6 S2N_BN_SYMBOL(bignum_montsqr_p384_alt): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // Save more registers to play with pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 // Set up an initial window [%rcx;%r15;...%r9] = [34;05;03;01] // Note that we are using %rcx as the first step past the rotating window movq (x), %rbx movq 8(x), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 24(x), %rax mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 40(x), %rax mulq %rbx movq %rax, %r13 movq %rdx, %r14 movq 24(x), %rax mulq 32(x) movq %rax, %r15 movq %rdx, %rcx // Chain in the addition of 02 + 12 + 13 + 14 + 15 to that window // (no carry-out possible) movq 16(x), %rbx mulpadi(%rbp,%r11,%r10,(x)) mulpadd(%rbp,%r12,%r11,8(x)) movq 8(x), %rbx mulpadd(%rbp,%r13,%r12,24(x)) mulpadd(%rbp,%r14,%r13,32(x)) mulpade(%rbp,%r15,%r14,40(x)) adcq $0, %rcx // Now chain in the 04 + 23 + 24 + 25 + 35 + 45 terms // We are running out of registers in our rotating window, so we start // using %rbx (and hence need care with using mulpadd after this). Thus // our result so far is in [%rbp;%rbx;%rcx;%r15;...%r9] movq 32(x), %rbx mulpadi(%rbp,%r13,%r12,(x)) movq 16(x), %rbx mulpadd(%rbp,%r14,%r13,24(x)) mulpadd(%rbp,%r15,%r14,32(x)) mulpadd(%rbp,%rcx,%r15,40(x)) xorl %ebx, %ebx movq 24(x), %rax mulq 40(x) subq %rbp, %rdx xorl %ebp, %ebp addq %rax, %rcx adcq %rdx, %rbx adcl %ebp, %ebp movq 32(x), %rax mulq 40(x) addq %rax, %rbx adcq %rdx, %rbp // Double the window as [%r8;%rbp;%rbx;%rcx;%r15;...%r9] xorl %r8d, %r8d addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %r15, %r15 adcq %rcx, %rcx adcq %rbx, %rbx adcq %rbp, %rbp adcl %r8d, %r8d // Add the doubled window to the 00 + 11 + 22 + 33 + 44 + 55 terms // For one glorious moment the entire squaring result is all in the // register file as [%rsi;%rbp;%rbx;%rcx;%r15;...;%r8] // (since we've now finished with x we can re-use %rsi). But since // we are so close to running out of registers, we do a bit of // reshuffling and temporary storage in the output buffer. movq (x), %rax mulq %rax movq %r8, (z) movq %rax, %r8 movq 8(x), %rax movq %rbp, 8(z) addq %rdx, %r9 sbbq %rbp, %rbp mulq %rax negq %rbp adcq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 16(x), %rax mulq %rax negq %rbp adcq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 24(x), %rax mulq %rax negq %rbp adcq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 32(x), %rax mulq %rax negq %rbp adcq %rax, %rcx adcq %rdx, %rbx sbbq %rbp, %rbp movq 40(x), %rax mulq %rax negq %rbp adcq 8(z), %rax adcq (z), %rdx movq %rax, %rbp movq %rdx, %rsi // We need just *one* more register as a temp for the Montgomery steps. // Since we are writing to the z buffer anyway, make use of that again // to stash %rbx. movq %rbx, (z) // Montgomery reduce the %r13,...,%r8 window 6 times montreds(%r8,%r13,%r12,%r11,%r10,%r9,%r8) montreds(%r9,%r8,%r13,%r12,%r11,%r10,%r9) montreds(%r10,%r9,%r8,%r13,%r12,%r11,%r10) montreds(%r11,%r10,%r9,%r8,%r13,%r12,%r11) montreds(%r12,%r11,%r10,%r9,%r8,%r13,%r12) montreds(%r13,%r12,%r11,%r10,%r9,%r8,%r13) // Now we can safely restore %rbx before accumulating movq (z), %rbx addq %r8, %r14 adcq %r9, %r15 adcq %r10, %rcx adcq %r11, %rbx adcq %r12, %rbp adcq %r13, %rsi movl $0, %r8d adcq %r8, %r8 // We now have a pre-reduced 7-word form z = [%r8; %rsi;%rbp;%rbx;%rcx;%r15;%r14] // Next, accumulate in different registers z - p_384, or more precisely // // [%r8; %r13;%r12;%r11;%r10;%r9;%rax] = z + (2^384 - p_384) xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 movq $0xffffffff00000001, %rax addq %r14, %rax movl $0x00000000ffffffff, %r9d adcq %r15, %r9 movl $0x0000000000000001, %r10d adcq %rcx, %r10 adcq %rbx, %r11 adcq %rbp, %r12 adcq %rsi, %r13 adcq $0, %r8 // ~ZF <=> %r12 >= 1 <=> z + (2^384 - p_384) >= 2^384 <=> z >= p_384, which // determines whether to use the further reduced argument or the original z. cmovnzq %rax, %r14 cmovnzq %r9, %r15 cmovnzq %r10, %rcx cmovnzq %r11, %rbx cmovnzq %r12, %rbp cmovnzq %r13, %rsi // Write back the result movq %r14, (z) movq %r15, 8(z) movq %rcx, 16(z) movq %rbx, 24(z) movq %rbp, 32(z) movq %rsi, 40(z) // Restore registers and return popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
2,683
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_bigendian_6.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert 6-digit (384-bit) bignum to/from big-endian form // Input x[6]; output z[6] // // extern void bignum_bigendian_6 // (uint64_t z[static 6], uint64_t x[static 6]); // // The same function is given two other prototypes whose names reflect the // treatment of one or other argument as a byte array rather than word array: // // extern void bignum_frombebytes_6 // (uint64_t z[static 6], uint8_t x[static 48]); // // extern void bignum_tobebytes_6 // (uint8_t z[static 48], uint64_t x[static 6]); // // Since x86 is little-endian, and bignums are stored with little-endian // word order, this is simply byte reversal and is implemented as such. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_bigendian_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_bigendian_6) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_frombebytes_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_frombebytes_6) S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_tobebytes_6) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_tobebytes_6) .text #define z %rdi #define x %rsi #define a %rax #define b %rdx // All loads and stores are word-sized, then we use BSWAP to // reverse the byte order, as well as switching round the word order // when writing back. The reads and writes are organized in mirror-image // pairs (0-5, 1-4, 2-3) to allow x and z to point to the same buffer // without using more intermediate registers. S2N_BN_SYMBOL(bignum_bigendian_6): S2N_BN_SYMBOL(bignum_frombebytes_6): S2N_BN_SYMBOL(bignum_tobebytes_6): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // 0 and 5 words movq (x), a movq 40(x), b bswapq a bswapq b movq a, 40(z) movq b, (z) // 1 and 4 words movq 8(x), a movq 32(x), b bswapq a bswapq b movq a, 32(z) movq b, 8(z) // 2 and 3 words movq 16(x), a movq 24(x), b bswapq a bswapq b movq a, 24(z) movq b, 16(z) #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
4,449
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_demont_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Convert from Montgomery form z := (x / 2^384) mod p_384, assuming x reduced // Input x[6]; output z[6] // // extern void bignum_demont_p384 // (uint64_t z[static 6], uint64_t x[static 6]); // // This assumes the input is < p_384 for correctness. If this is not the case, // use the variant "bignum_deamont_p384" instead. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_demont_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_demont_p384) .text #define z %rdi #define x %rsi // Core one-step "short" Montgomery reduction macro. Takes input in // [d5;d4;d3;d2;d1;d0] and returns result in [d6;d5;d4;d3;d2;d1], // adding to the existing contents of [d5;d4;d3;d2;d1;d0]. This // is intended only for 6-word inputs as in mapping out of Montgomery, // not for the general case of Montgomery multiplication. It is fine // for d6 to be the same register as d0. // // Parms: montreds(d6,d5,d4,d3,d2,d1,d0) // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 #define montreds(d6,d5,d4,d3,d2,d1,d0) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ movq d0, %rdx ; \ shlq $32, %rdx ; \ addq d0, %rdx ; \ /* Construct [%rsi;%rcx;%rax;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel so we can re-use d0 */ \ /* as a temp. */ \ xorq %rsi, %rsi ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rcx, %rax ; \ movl $0x00000000ffffffff, %ecx ; \ mulxq %rcx, d0, %rcx ; \ adcq d0, %rax ; \ adcq %rdx, %rcx ; \ adcq $0, %rsi ; \ /* Now subtract that and add 2^384 * w */ \ subq %rax, d1 ; \ sbbq %rcx, d2 ; \ sbbq %rsi, d3 ; \ sbbq $0, d4 ; \ sbbq $0, d5 ; \ movq %rdx, d6 ; \ sbbq $0, d6 S2N_BN_SYMBOL(bignum_demont_p384): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // Save more registers to play with pushq %r12 pushq %r13 // Set up an initial window [%r13,%r12,%r11,%r10,%r9,%r8] = x movq (x), %r8 movq 8(x), %r9 movq 16(x), %r10 movq 24(x), %r11 movq 32(x), %r12 movq 40(x), %r13 // Montgomery reduce window 0 montreds(%r8,%r13,%r12,%r11,%r10,%r9,%r8) // Montgomery reduce window 1 montreds(%r9,%r8,%r13,%r12,%r11,%r10,%r9) // Montgomery reduce window 2 montreds(%r10,%r9,%r8,%r13,%r12,%r11,%r10) // Montgomery reduce window 3 montreds(%r11,%r10,%r9,%r8,%r13,%r12,%r11) // Montgomery reduce window 4 montreds(%r12,%r11,%r10,%r9,%r8,%r13,%r12) // Montgomery reduce window 5 montreds(%r13,%r12,%r11,%r10,%r9,%r8,%r13) // Write back the result movq %r8, (z) movq %r9, 8(z) movq %r10, 16(z) movq %r11, 24(z) movq %r12, 32(z) movq %r13, 40(z) // Restore registers and return popq %r13 popq %r12 #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
2,201
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_neg_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Negate modulo p_384, z := (-x) mod p_384, assuming x reduced // Input x[6]; output z[6] // // extern void bignum_neg_p384 (uint64_t z[static 6], uint64_t x[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_neg_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_neg_p384) .text #define z %rdi #define x %rsi #define n0 %rax #define n1 %rcx #define n2 %rdx #define n3 %r8 #define n4 %r9 #define q %r10 #define n0short %eax S2N_BN_SYMBOL(bignum_neg_p384): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // Or together the input digits and create a bitmask q if this is nonzero, so // that we avoid doing -0 = p_384 and hence maintain strict modular reduction movq (x), n0 orq 8(x), n0 movq 16(x), n1 orq 24(x), n1 movq 32(x), n2 orq 40(x), n2 orq n1, n0 orq n2, n0 negq n0 sbbq q, q // Let [q;n4;n3;n2;n1;n0] = if q then p_384 else 0 movl $0x00000000ffffffff, n0short andq q, n0 movq $0xffffffff00000000, n1 andq q, n1 movq $0xfffffffffffffffe, n2 andq q, n2 movq q, n3 movq q, n4 // Do the subtraction subq (x), n0 sbbq 8(x), n1 sbbq 16(x), n2 sbbq 24(x), n3 sbbq 32(x), n4 sbbq 40(x), q // Write back movq n0, (z) movq n1, 8(z) movq n2, 16(z) movq n3, 24(z) movq n4, 32(z) movq q, 40(z) #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
4,004
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_cmul_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Multiply by a single word modulo p_384, z := (c * x) mod p_384, assuming // x reduced // Inputs c, x[6]; output z[6] // // extern void bignum_cmul_p384 // (uint64_t z[static 6], uint64_t c, uint64_t x[static 6]); // // Standard x86-64 ABI: RDI = z, RSI = c, RDX = x // Microsoft x64 ABI: RCX = z, RDX = c, R8 = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_cmul_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_cmul_p384) .text #define z %rdi // Temporarily moved here for initial multiply #define x %rcx // Likewise this is thrown away after initial multiply #define m %rdx #define a %rax #define c %rcx #define d0 %rsi #define d1 %r8 #define d2 %r9 #define d3 %r10 #define d4 %r11 #define d5 %r12 // Multiplier again for second stage #define q %rdx #define ashort %eax #define cshort %ecx #define qshort %edx S2N_BN_SYMBOL(bignum_cmul_p384): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // We seem to need (just!) one extra register, which we need to save and restore pushq %r12 // Shuffle inputs (since we want multiplier in %rdx) movq %rdx, x movq %rsi, m // Multiply, accumulating the result as 2^384 * h + [d5;d4;d3;d2;d1;d0] // but actually immediately producing q = h + 1, our quotient approximation, // by adding 1 to it. Note that by hypothesis x is reduced mod p_384, so our // product is <= (2^64 - 1) * (p_384 - 1) and hence h <= 2^64 - 2, meaning // there is no danger this addition of 1 could wrap. mulxq (x), d0, d1 mulxq 8(x), a, d2 addq a, d1 mulxq 16(x), a, d3 adcq a, d2 mulxq 24(x), a, d4 adcq a, d3 mulxq 32(x), a, d5 adcq a, d4 mulxq 40(x), a, q adcq a, d5 adcq $1, q // It's easy to see -p_384 <= z - q * p_384 < p_384, so we just need to // subtract q * p_384 and then correct if that is negative by adding p_384. // // Write p_384 = 2^384 - r where r = 2^128 + 2^96 - 2^32 + 1 // // We want z - q * (2^384 - r) // = (2^384 * h + l) - q * (2^384 - r) // = 2^384 * (h - q) + (l + q * r) // = 2^384 * (-1) + (l + q * r) xorq c, c movq $0xffffffff00000001, a mulxq a, a, c adcxq a, d0 adoxq c, d1 movl $0x00000000ffffffff, ashort mulxq a, a, c adcxq a, d1 adoxq c, d2 adcxq q, d2 movl $0, ashort movl $0, cshort adoxq a, a adcq a, d3 adcq c, d4 adcq c, d5 adcq c, c subq $1, c // The net c value is now the top word of the 7-word answer, hence will // be -1 if we need a corrective addition, 0 otherwise, usable as a mask. // Now use that mask for a masked addition of p_384, which again is in // fact done by a masked subtraction of 2^384 - p_384, so that we only // have three nonzero digits and so can avoid using another register. movl $0x00000000ffffffff, qshort xorq a, a andq c, q subq q, a andq $1, c subq a, d0 movq d0, (z) sbbq q, d1 movq d1, 8(z) sbbq c, d2 movq d2, 16(z) sbbq $0, d3 movq d3, 24(z) sbbq $0, d4 movq d4, 32(z) sbbq $0, d5 movq d5, 40(z) // Return popq %r12 #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
8,630
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_montmul_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery multiply, z := (x * y / 2^384) mod p_384 // Inputs x[6], y[6]; output z[6] // // extern void bignum_montmul_p384 // (uint64_t z[static 6], uint64_t x[static 6], uint64_t y[static 6]); // // Does z := (2^{-384} * x * y) mod p_384, assuming that the inputs x and y // satisfy x * y <= 2^384 * p_384 (in particular this is true if we are in // the "usual" case x < p_384 and y < p_384). // // Standard x86-64 ABI: RDI = z, RSI = x, RDX = y // Microsoft x64 ABI: RCX = z, RDX = x, R8 = y // ----------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_montmul_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_montmul_p384) .text #define z %rdi #define x %rsi // We move the y argument here so we can use %rdx for multipliers #define y %rcx // Some temp registers for the last correction stage #define d %rax #define u %rdx #define v %rcx #define w %rbx // Add %rdx * m into a register-pair (high,low) // maintaining consistent double-carrying with adcx and adox, // using %rax and %rbx as temporaries #define mulpadd(high,low,m) \ mulxq m, %rax, %rbx ; \ adcxq %rax, low ; \ adoxq %rbx, high // Core one-step Montgomery reduction macro. Takes input in // [d7;d6;d5;d4;d3;d2;d1;d0] and returns result in [d7;d6;d5;d4;d3;d2;d1], // adding to the existing contents, re-using d0 as a temporary internally // // We want to add (2^384 - 2^128 - 2^96 + 2^32 - 1) * w // where w = [d0 + (d0<<32)] mod 2^64 // // montredc(d7,d6,d5,d4,d3,d2,d1,d0) // // This particular variant, with its mix of addition and subtraction // at the top, is not intended to maintain a coherent carry or borrow out. // It is assumed the final result would fit in [d7;d6;d5;d4;d3;d2;d1]. // which is always the case here as the top word is even always in {0,1} #define montredc(d7,d6,d5,d4,d3,d2,d1,d0) \ /* Our correction multiplier is w = [d0 + (d0<<32)] mod 2^64 */ \ movq d0, %rdx ; \ shlq $32, %rdx ; \ addq d0, %rdx ; \ /* Construct [%rbp;%rbx;%rax;-] = (2^384 - p_384) * w */ \ /* We know the lowest word will cancel so we can re-use d0 as a temp */ \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0x00000000ffffffff, %ebx ; \ mulxq %rbx, d0, %rbx ; \ adcq d0, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ /* Now subtract that and add 2^384 * w */ \ subq %rax, d1 ; \ sbbq %rbx, d2 ; \ sbbq %rbp, d3 ; \ sbbq $0, d4 ; \ sbbq $0, d5 ; \ sbbq $0, %rdx ; \ addq %rdx, d6 ; \ adcq $0, d7 S2N_BN_SYMBOL(bignum_montmul_p384): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save more registers to play with pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 // Copy y into a safe register to start with movq %rdx, y // Do row 0 computation, which is a bit different: // set up initial window [%r14,%r13,%r12,%r11,%r10,%r9,%r8] = y[0] * x // Unlike later, we only need a single carry chain movq (y), %rdx xorl %r15d, %r15d mulxq (x), %r8, %r9 mulxq 8(x), %rbx, %r10 addq %rbx, %r9 mulxq 16(x), %rbx, %r11 adcq %rbx, %r10 mulxq 24(x), %rbx, %r12 adcq %rbx, %r11 mulxq 32(x), %rbx, %r13 adcq %rbx, %r12 mulxq 40(x), %rbx, %r14 adcq %rbx, %r13 adcq %r15, %r14 // Montgomery reduce the zeroth window montredc(%r15, %r14,%r13,%r12,%r11,%r10,%r9,%r8) // Add row 1 movq 8(y), %rdx xorl %r8d, %r8d mulpadd(%r10,%r9,(x)) mulpadd(%r11,%r10, 8(x)) mulpadd(%r12,%r11,16(x)) mulpadd(%r13,%r12,24(x)) mulpadd(%r14,%r13,32(x)) adoxq %r8, %r15 mulxq 40(x), %rax, %rbx adcq %rax, %r14 adcq %rbx, %r15 adcq %r8, %r8 // Montgomery reduce window 1 montredc(%r8, %r15,%r14,%r13,%r12,%r11,%r10,%r9) // Add row 2 movq 16(y), %rdx xorl %r9d, %r9d mulpadd(%r11,%r10,(x)) mulpadd(%r12,%r11,8(x)) mulpadd(%r13,%r12,16(x)) mulpadd(%r14,%r13,24(x)) mulpadd(%r15,%r14,32(x)) adoxq %r9, %r8 mulxq 40(x), %rax, %rbx adcq %rax, %r15 adcq %rbx, %r8 adcq %r9, %r9 // Montgomery reduce window 2 montredc(%r9, %r8,%r15,%r14,%r13,%r12,%r11,%r10) // Add row 3 movq 24(y), %rdx xorl %r10d, %r10d mulpadd(%r12,%r11,(x)) mulpadd(%r13,%r12,8(x)) mulpadd(%r14,%r13,16(x)) mulpadd(%r15,%r14,24(x)) mulpadd(%r8,%r15,32(x)) adoxq %r10, %r9 mulxq 40(x), %rax, %rbx adcq %rax, %r8 adcq %rbx, %r9 adcq %r10, %r10 // Montgomery reduce window 3 montredc(%r10, %r9,%r8,%r15,%r14,%r13,%r12,%r11) // Add row 4 movq 32(y), %rdx xorl %r11d, %r11d mulpadd(%r13,%r12,(x)) mulpadd(%r14,%r13,8(x)) mulpadd(%r15,%r14,16(x)) mulpadd(%r8,%r15,24(x)) mulpadd(%r9,%r8,32(x)) adoxq %r11, %r10 mulxq 40(x), %rax, %rbx adcq %rax, %r9 adcq %rbx, %r10 adcq %r11, %r11 // Montgomery reduce window 4 montredc(%r11, %r10,%r9,%r8,%r15,%r14,%r13,%r12) // Add row 5 movq 40(y), %rdx xorl %r12d, %r12d mulpadd(%r14,%r13,(x)) mulpadd(%r15,%r14,8(x)) mulpadd(%r8,%r15,16(x)) mulpadd(%r9,%r8,24(x)) mulpadd(%r10,%r9,32(x)) adoxq %r12, %r11 mulxq 40(x), %rax, %rbx adcq %rax, %r10 adcq %rbx, %r11 adcq %r12, %r12 // Montgomery reduce window 5 montredc(%r12, %r11,%r10,%r9,%r8,%r15,%r14,%r13) // We now have a pre-reduced 7-word form z = [%r12; %r11;%r10;%r9;%r8;%r15;%r14] // Next, accumulate in different registers z - p_384, or more precisely // // [%r12; %r13;%rbp;%rdx;%rcx;%rbx;%rax] = z + (2^384 - p_384) xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movq $0xffffffff00000001, %rax addq %r14, %rax movl $0x00000000ffffffff, %ebx adcq %r15, %rbx movl $0x0000000000000001, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0, %r12 // ~ZF <=> %r12 >= 1 <=> z + (2^384 - p_384) >= 2^384 <=> z >= p_384, which // determines whether to use the further reduced argument or the original z. cmovnzq %rax, %r14 cmovnzq %rbx, %r15 cmovnzq %rcx, %r8 cmovnzq %rdx, %r9 cmovnzq %rbp, %r10 cmovnzq %r13, %r11 // Write back the result movq %r14, (z) movq %r15, 8(z) movq %r8, 16(z) movq %r9, 24(z) movq %r10, 32(z) movq %r11, 40(z) // Restore registers and return popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif
marvin-hansen/iggy-streaming-system
95,564
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/bignum_inv_p384.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Modular inverse modulo p_384 = 2^384 - 2^128 - 2^96 + 2^32 - 1 // Input x[6]; output z[6] // // extern void bignum_inv_p384(uint64_t z[static 6],uint64_t x[static 6]); // // If the 6-digit input x is coprime to p_384, i.e. is not divisible // by it, returns z < p_384 such that x * z == 1 (mod p_384). Note that // x does not need to be reduced modulo p_384, but the output always is. // If the input is divisible (i.e. is 0 or p_384), then there can be no // modular inverse and z = 0 is returned. // // Standard x86-64 ABI: RDI = z, RSI = x // Microsoft x64 ABI: RCX = z, RDX = x // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(bignum_inv_p384) S2N_BN_SYM_PRIVACY_DIRECTIVE(bignum_inv_p384) .text // Size in bytes of a 64-bit word #define N 8 // Pointer-offset pairs for temporaries on stack // The u and v variables are 6 words each as expected, but the f and g // variables are 8 words each -- they need to have at least one extra // word for a sign word, and to preserve alignment we "round up" to 8. // In fact, we currently keep an extra word in u and v as well. #define f 0(%rsp) #define g (8*N)(%rsp) #define u (16*N)(%rsp) #define v (24*N)(%rsp) #define tmp (32*N)(%rsp) #define tmp2 (33*N)(%rsp) #define i (34*N)(%rsp) #define d (35*N)(%rsp) #define mat (36*N)(%rsp) // Backup for the input pointer #define res (40*N)(%rsp) // Total size to reserve on the stack #define NSPACE (42*N) // Syntactic variants to make x86_att version simpler to generate #define F 0 #define G (8*N) #define U (16*N) #define V (24*N) #define MAT (36*N) #define ff (%rsp) #define gg (8*N)(%rsp) // --------------------------------------------------------------------------- // Core signed almost-Montgomery reduction macro from P[6..0] to P[5..0]. // --------------------------------------------------------------------------- #define amontred(P) \ /* We only know the input is -2^444 < x < 2^444. To do traditional */ \ /* unsigned Montgomery reduction, start by adding 2^61 * p_384. */ \ movq $0xe000000000000000, %r8 ; \ xorl %eax, %eax ; \ addq P, %r8 ; \ movq $0x000000001fffffff, %r9 ; \ leaq -1(%rax), %rax ; \ adcq N+P, %r9 ; \ movq $0xdfffffffe0000000, %r10 ; \ adcq 2*N+P, %r10 ; \ movq 3*N+P, %r11 ; \ adcq %rax, %r11 ; \ movq 4*N+P, %r12 ; \ adcq %rax, %r12 ; \ movq 5*N+P, %r13 ; \ adcq %rax, %r13 ; \ movq $0x1fffffffffffffff, %r14 ; \ adcq 6*N+P, %r14 ; \ /* Correction multiplier is %rbx = w = [d0 + (d0<<32)] mod 2^64 */ \ movq %r8, %rbx ; \ shlq $32, %rbx ; \ addq %r8, %rbx ; \ /* Construct [%rbp;%rdx;%rax;-] = (2^384 - p_384) * w */ \ /* We know lowest word will cancel so can re-use %r8 as a temp */ \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r8 ; \ movq $0x00000000ffffffff, %rax ; \ mulq %rbx; \ addq %r8, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ /* Now subtract that and add 2^384 * w, catching carry in %rax */ \ subq %rax, %r9 ; \ sbbq %rdx, %r10 ; \ sbbq %rbp, %r11 ; \ sbbq $0, %r12 ; \ sbbq $0, %r13 ; \ sbbq $0, %r14 ; \ sbbq %rax, %rax ; \ addq %rbx, %r14 ; \ adcq $0, %rax ; \ /* Now if top is nonzero we subtract p_384 (almost-Montgomery) */ \ negq %rax; \ movq $0x00000000ffffffff, %rbx ; \ andq %rax, %rbx ; \ movq $0xffffffff00000000, %rcx ; \ andq %rax, %rcx ; \ movq $0xfffffffffffffffe, %rdx ; \ andq %rax, %rdx ; \ subq %rbx, %r9 ; \ movq %r9, P ; \ sbbq %rcx, %r10 ; \ movq %r10, N+P ; \ sbbq %rdx, %r11 ; \ movq %r11, 2*N+P ; \ sbbq %rax, %r12 ; \ movq %r12, 3*N+P ; \ sbbq %rax, %r13 ; \ movq %r13, 4*N+P ; \ sbbq %rax, %r14 ; \ movq %r14, 5*N+P // Very similar to a subroutine call to the s2n-bignum word_divstep59. // But different in register usage and returning the final matrix as // // [ %r8 %r10] // [ %r12 %r14] // // and also returning the matrix still negated (which doesn't matter) #define divstep59(din,fin,gin) \ movq din, %rsi ; \ movq fin, %rdx ; \ movq gin, %rcx ; \ movq %rdx, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq $0xfffffffffffffffe, %rax ; \ xorl %ebp, %ebp ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %rdx ; \ leaq (%rcx,%rax), %rdi ; \ shlq $0x16, %rdx ; \ shlq $0x16, %rdi ; \ sarq $0x2b, %rdx ; \ sarq $0x2b, %rdi ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %rbx ; \ leaq (%rcx,%rax), %rcx ; \ sarq $0x2a, %rbx ; \ sarq $0x2a, %rcx ; \ movq %rdx, MAT(%rsp) ; \ movq %rbx, MAT+0x8(%rsp) ; \ movq %rdi, MAT+0x10(%rsp) ; \ movq %rcx, MAT+0x18(%rsp) ; \ movq fin, %r12 ; \ imulq %r12, %rdi ; \ imulq %rdx, %r12 ; \ movq gin, %r13 ; \ imulq %r13, %rbx ; \ imulq %rcx, %r13 ; \ addq %rbx, %r12 ; \ addq %rdi, %r13 ; \ sarq $0x14, %r12 ; \ sarq $0x14, %r13 ; \ movq %r12, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ movq %r13, %rcx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq $0xfffffffffffffffe, %rax ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %r8 ; \ leaq (%rcx,%rax), %r10 ; \ shlq $0x16, %r8 ; \ shlq $0x16, %r10 ; \ sarq $0x2b, %r8 ; \ sarq $0x2b, %r10 ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %r15 ; \ leaq (%rcx,%rax), %r11 ; \ sarq $0x2a, %r15 ; \ sarq $0x2a, %r11 ; \ movq %r13, %rbx ; \ movq %r12, %rcx ; \ imulq %r8, %r12 ; \ imulq %r15, %rbx ; \ addq %rbx, %r12 ; \ imulq %r11, %r13 ; \ imulq %r10, %rcx ; \ addq %rcx, %r13 ; \ sarq $0x14, %r12 ; \ sarq $0x14, %r13 ; \ movq %r12, %rbx ; \ andq $0xfffff, %rbx ; \ movabsq $0xfffffe0000000000, %rax ; \ orq %rax, %rbx ; \ movq %r13, %rcx ; \ andq $0xfffff, %rcx ; \ movabsq $0xc000000000000000, %rax ; \ orq %rax, %rcx ; \ movq MAT(%rsp), %rax ; \ imulq %r8, %rax ; \ movq MAT+0x10(%rsp), %rdx ; \ imulq %r15, %rdx ; \ imulq MAT+0x8(%rsp), %r8 ; \ imulq MAT+0x18(%rsp), %r15 ; \ addq %r8, %r15 ; \ leaq (%rax,%rdx), %r9 ; \ movq MAT(%rsp), %rax ; \ imulq %r10, %rax ; \ movq MAT+0x10(%rsp), %rdx ; \ imulq %r11, %rdx ; \ imulq MAT+0x8(%rsp), %r10 ; \ imulq MAT+0x18(%rsp), %r11 ; \ addq %r10, %r11 ; \ leaq (%rax,%rdx), %r13 ; \ movq $0xfffffffffffffffe, %rax ; \ movl $0x2, %edx ; \ movq %rbx, %rdi ; \ movq %rax, %r8 ; \ testq %rsi, %rsi ; \ cmovs %rbp, %r8 ; \ testq $0x1, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ cmovs %rbp, %r8 ; \ movq %rbx, %rdi ; \ testq %rdx, %rcx ; \ cmoveq %rbp, %r8 ; \ cmoveq %rbp, %rdi ; \ sarq $1, %rcx ; \ xorq %r8, %rdi ; \ xorq %r8, %rsi ; \ btq $0x3f, %r8 ; \ cmovbq %rcx, %rbx ; \ movq %rax, %r8 ; \ subq %rax, %rsi ; \ leaq (%rcx,%rdi), %rcx ; \ sarq $1, %rcx ; \ movl $0x100000, %eax ; \ leaq (%rbx,%rax), %r8 ; \ leaq (%rcx,%rax), %r12 ; \ shlq $0x15, %r8 ; \ shlq $0x15, %r12 ; \ sarq $0x2b, %r8 ; \ sarq $0x2b, %r12 ; \ movabsq $0x20000100000, %rax ; \ leaq (%rbx,%rax), %r10 ; \ leaq (%rcx,%rax), %r14 ; \ sarq $0x2b, %r10 ; \ sarq $0x2b, %r14 ; \ movq %r9, %rax ; \ imulq %r8, %rax ; \ movq %r13, %rdx ; \ imulq %r10, %rdx ; \ imulq %r15, %r8 ; \ imulq %r11, %r10 ; \ addq %r8, %r10 ; \ leaq (%rax,%rdx), %r8 ; \ movq %r9, %rax ; \ imulq %r12, %rax ; \ movq %r13, %rdx ; \ imulq %r14, %rdx ; \ imulq %r15, %r12 ; \ imulq %r11, %r14 ; \ addq %r12, %r14 ; \ leaq (%rax,%rdx), %r12 S2N_BN_SYMBOL(bignum_inv_p384): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi #endif // Save registers and make room for temporaries pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp // Save the return pointer for the end so we can overwrite %rdi later movq %rdi, res // Copy the constant p_384 into f including the 7th zero digit movl $0xffffffff, %eax movq %rax, F(%rsp) movq %rax, %rbx notq %rbx movq %rbx, F+N(%rsp) xorl %ebp, %ebp leaq -2(%rbp), %rcx movq %rcx, F+2*N(%rsp) leaq -1(%rbp), %rdx movq %rdx, F+3*N(%rsp) movq %rdx, F+4*N(%rsp) movq %rdx, F+5*N(%rsp) movq %rbp, F+6*N(%rsp) // Copy input but to g, reduced mod p_384 so that g <= f as assumed // in the divstep bound proof. movq (%rsi), %r8 subq %rax, %r8 movq N(%rsi), %r9 sbbq %rbx, %r9 movq 2*N(%rsi), %r10 sbbq %rcx, %r10 movq 3*N(%rsi), %r11 sbbq %rdx, %r11 movq 4*N(%rsi), %r12 sbbq %rdx, %r12 movq 5*N(%rsi), %r13 sbbq %rdx, %r13 cmovcq (%rsi), %r8 cmovcq N(%rsi), %r9 cmovcq 2*N(%rsi), %r10 cmovcq 3*N(%rsi), %r11 cmovcq 4*N(%rsi), %r12 cmovcq 5*N(%rsi), %r13 movq %r8, G(%rsp) movq %r9, G+N(%rsp) movq %r10, G+2*N(%rsp) movq %r11, G+3*N(%rsp) movq %r12, G+4*N(%rsp) movq %r13, G+5*N(%rsp) movq %rbp, G+6*N(%rsp) // Also maintain reduced < 2^384 vector [u,v] such that // [f,g] == x * 2^{5*i-75} * [u,v] (mod p_384) // starting with [p_384,x] == x * 2^{5*0-75} * [0,2^75] (mod p_384) // The weird-looking 5*i modifications come in because we are doing // 64-bit word-sized Montgomery reductions at each stage, which is // 5 bits more than the 59-bit requirement to keep things stable. xorl %eax, %eax movq %rax, U(%rsp) movq %rax, U+N(%rsp) movq %rax, U+2*N(%rsp) movq %rax, U+3*N(%rsp) movq %rax, U+4*N(%rsp) movq %rax, U+5*N(%rsp) movl $2048, %ecx movq %rax, V(%rsp) movq %rcx, V+N(%rsp) movq %rax, V+2*N(%rsp) movq %rax, V+3*N(%rsp) movq %rax, V+4*N(%rsp) movq %rax, V+5*N(%rsp) // Start of main loop. We jump into the middle so that the divstep // portion is common to the special fifteenth iteration after a uniform // first 14. movq $15, i movq $1, d jmp midloop loop: // Separate out the matrix into sign-magnitude pairs movq %r8, %r9 sarq $63, %r9 xorq %r9, %r8 subq %r9, %r8 movq %r10, %r11 sarq $63, %r11 xorq %r11, %r10 subq %r11, %r10 movq %r12, %r13 sarq $63, %r13 xorq %r13, %r12 subq %r13, %r12 movq %r14, %r15 sarq $63, %r15 xorq %r15, %r14 subq %r15, %r14 // Adjust the initial values to allow for complement instead of negation // This initial offset is the same for [f,g] and [u,v] compositions. // Save it in temporary storage for the [u,v] part and do [f,g] first. movq %r8, %rax andq %r9, %rax movq %r10, %rdi andq %r11, %rdi addq %rax, %rdi movq %rdi, tmp movq %r12, %rax andq %r13, %rax movq %r14, %rsi andq %r15, %rsi addq %rax, %rsi movq %rsi, tmp2 // Now the computation of the updated f and g values. This maintains a // 2-word carry between stages so we can conveniently insert the shift // right by 59 before storing back, and not overwrite digits we need // again of the old f and g values. // // Digit 0 of [f,g] xorl %ebx, %ebx movq F(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq G(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rdi adcq %rdx, %rbx xorl %ebp, %ebp movq F(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rsi adcq %rdx, %rbp movq G(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp // Digit 1 of [f,g] xorl %ecx, %ecx movq F+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq G+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx shrdq $59, %rbx, %rdi movq %rdi, F(%rsp) xorl %edi, %edi movq F+N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbp adcq %rdx, %rdi movq G+N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rdi shrdq $59, %rbp, %rsi movq %rsi, G(%rsp) // Digit 2 of [f,g] xorl %esi, %esi movq F+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rsi movq G+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rsi shrdq $59, %rcx, %rbx movq %rbx, F+N(%rsp) xorl %ebx, %ebx movq F+2*N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rdi adcq %rdx, %rbx movq G+2*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rdi adcq %rdx, %rbx shrdq $59, %rdi, %rbp movq %rbp, G+N(%rsp) // Digit 3 of [f,g] xorl %ebp, %ebp movq F+3*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rsi adcq %rdx, %rbp movq G+3*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rsi adcq %rdx, %rbp shrdq $59, %rsi, %rcx movq %rcx, F+2*N(%rsp) xorl %ecx, %ecx movq F+3*N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rbx adcq %rdx, %rcx movq G+3*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbx adcq %rdx, %rcx shrdq $59, %rbx, %rdi movq %rdi, G+2*N(%rsp) // Digit 4 of [f,g] xorl %edi, %edi movq F+4*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbp adcq %rdx, %rdi movq G+4*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbp adcq %rdx, %rdi shrdq $59, %rbp, %rsi movq %rsi, F+3*N(%rsp) xorl %esi, %esi movq F+4*N(%rsp), %rax xorq %r13, %rax mulq %r12 addq %rax, %rcx adcq %rdx, %rsi movq G+4*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rcx adcq %rdx, %rsi shrdq $59, %rcx, %rbx movq %rbx, G+3*N(%rsp) // Digits 5 and 6 of [f,g] movq F+5*N(%rsp), %rax xorq %r9, %rax movq F+6*N(%rsp), %rbx xorq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rdi adcq %rdx, %rbx movq G+5*N(%rsp), %rax xorq %r11, %rax movq G+6*N(%rsp), %rdx xorq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rdi adcq %rdx, %rbx shrdq $59, %rdi, %rbp movq %rbp, F+4*N(%rsp) shrdq $59, %rbx, %rdi sarq $59, %rbx movq F+5*N(%rsp), %rax movq %rdi, F+5*N(%rsp) movq F+6*N(%rsp), %rdi movq %rbx, F+6*N(%rsp) xorq %r13, %rax xorq %r13, %rdi andq %r12, %rdi negq %rdi mulq %r12 addq %rax, %rsi adcq %rdx, %rdi movq G+5*N(%rsp), %rax xorq %r15, %rax movq G+6*N(%rsp), %rdx xorq %r15, %rdx andq %r14, %rdx subq %rdx, %rdi mulq %r14 addq %rax, %rsi adcq %rdx, %rdi shrdq $59, %rsi, %rcx movq %rcx, G+4*N(%rsp) shrdq $59, %rdi, %rsi movq %rsi, G+5*N(%rsp) sarq $59, %rdi movq %rdi, G+6*N(%rsp) // Get the initial carries back from storage and do the [u,v] accumulation movq tmp, %rbx movq tmp2, %rbp // Digit 0 of [u,v] xorl %ecx, %ecx movq U(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq V(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq U(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, U(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq V(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, V(%rsp) // Digit 1 of [u,v] xorl %ebx, %ebx movq U+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq V+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq U+N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, U+N(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq V+N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, V+N(%rsp) // Digit 2 of [u,v] xorl %ecx, %ecx movq U+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq V+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq U+2*N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, U+2*N(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq V+2*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, V+2*N(%rsp) // Digit 3 of [u,v] xorl %ebx, %ebx movq U+3*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq V+3*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rcx adcq %rdx, %rbx xorl %ebp, %ebp movq U+3*N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rcx, U+3*N(%rsp) addq %rax, %rsi adcq %rdx, %rbp movq V+3*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rsi adcq %rdx, %rbp movq %rsi, V+3*N(%rsp) // Digit 4 of [u,v] xorl %ecx, %ecx movq U+4*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %rbx adcq %rdx, %rcx movq V+4*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %rbx adcq %rdx, %rcx xorl %esi, %esi movq U+4*N(%rsp), %rax xorq %r13, %rax mulq %r12 movq %rbx, U+4*N(%rsp) addq %rax, %rbp adcq %rdx, %rsi movq V+4*N(%rsp), %rax xorq %r15, %rax mulq %r14 addq %rax, %rbp adcq %rdx, %rsi movq %rbp, V+4*N(%rsp) // Digits 5 and 6 of u (top is unsigned) movq U+5*N(%rsp), %rax xorq %r9, %rax movq %r9, %rbx andq %r8, %rbx negq %rbx mulq %r8 addq %rax, %rcx adcq %rdx, %rbx movq V+5*N(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %rbx mulq %r10 addq %rax, %rcx adcq %rbx, %rdx // Preload for last use of old u digit 3 movq U+5*N(%rsp), %rax movq %rcx, U+5*N(%rsp) movq %rdx, U+6*N(%rsp) // Digits 5 and 6 of v (top is unsigned) xorq %r13, %rax movq %r13, %rcx andq %r12, %rcx negq %rcx mulq %r12 addq %rax, %rsi adcq %rdx, %rcx movq V+5*N(%rsp), %rax xorq %r15, %rax movq %r15, %rdx andq %r14, %rdx subq %rdx, %rcx mulq %r14 addq %rax, %rsi adcq %rcx, %rdx movq %rsi, V+5*N(%rsp) movq %rdx, V+6*N(%rsp) // Montgomery reduction of u amontred(u) // Montgomery reduction of v amontred(v) midloop: divstep59(d,ff,gg) movq %rsi, d // Next iteration decq i jnz loop // The 15th and last iteration does not need anything except the // u value and the sign of f; the latter can be obtained from the // lowest word of f. So it's done differently from the main loop. // Find the sign of the new f. For this we just need one digit // since we know (for in-scope cases) that f is either +1 or -1. // We don't explicitly shift right by 59 either, but looking at // bit 63 (or any bit >= 60) of the unshifted result is enough // to distinguish -1 from +1; this is then made into a mask. movq F(%rsp), %rax movq G(%rsp), %rcx imulq %r8, %rax imulq %r10, %rcx addq %rcx, %rax sarq $63, %rax // Now separate out the matrix into sign-magnitude pairs // and adjust each one based on the sign of f. // // Note that at this point we expect |f|=1 and we got its // sign above, so then since [f,0] == x * [u,v] (mod p_384) // we want to flip the sign of u according to that of f. movq %r8, %r9 sarq $63, %r9 xorq %r9, %r8 subq %r9, %r8 xorq %rax, %r9 movq %r10, %r11 sarq $63, %r11 xorq %r11, %r10 subq %r11, %r10 xorq %rax, %r11 movq %r12, %r13 sarq $63, %r13 xorq %r13, %r12 subq %r13, %r12 xorq %rax, %r13 movq %r14, %r15 sarq $63, %r15 xorq %r15, %r14 subq %r15, %r14 xorq %rax, %r15 // Adjust the initial value to allow for complement instead of negation movq %r8, %rax andq %r9, %rax movq %r10, %r12 andq %r11, %r12 addq %rax, %r12 // Digit 0 of [u] xorl %r13d, %r13d movq U(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r12 adcq %rdx, %r13 movq V(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r12 movq %r12, U(%rsp) adcq %rdx, %r13 // Digit 1 of [u] xorl %r14d, %r14d movq U+N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r13 adcq %rdx, %r14 movq V+N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r13 movq %r13, U+N(%rsp) adcq %rdx, %r14 // Digit 2 of [u] xorl %r15d, %r15d movq U+2*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq V+2*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 movq %r14, U+2*N(%rsp) adcq %rdx, %r15 // Digit 3 of [u] xorl %r14d, %r14d movq U+3*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r15 adcq %rdx, %r14 movq V+3*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r15 movq %r15, U+3*N(%rsp) adcq %rdx, %r14 // Digit 4 of [u] xorl %r15d, %r15d movq U+4*N(%rsp), %rax xorq %r9, %rax mulq %r8 addq %rax, %r14 adcq %rdx, %r15 movq V+4*N(%rsp), %rax xorq %r11, %rax mulq %r10 addq %rax, %r14 movq %r14, U+4*N(%rsp) adcq %rdx, %r15 // Digits 5 and 6 of u (top is unsigned) movq U+5*N(%rsp), %rax xorq %r9, %rax andq %r8, %r9 negq %r9 mulq %r8 addq %rax, %r15 adcq %rdx, %r9 movq V+5*N(%rsp), %rax xorq %r11, %rax movq %r11, %rdx andq %r10, %rdx subq %rdx, %r9 mulq %r10 addq %rax, %r15 movq %r15, U+5*N(%rsp) adcq %rdx, %r9 movq %r9, U+6*N(%rsp) // Montgomery reduce u amontred(u) // Perform final strict reduction mod p_384 and copy to output movl $0xffffffff, %eax movq %rax, %rbx notq %rbx xorl %ebp, %ebp leaq -2(%rbp), %rcx leaq -1(%rbp), %rdx movq U(%rsp), %r8 subq %rax, %r8 movq U+N(%rsp), %r9 sbbq %rbx, %r9 movq U+2*N(%rsp), %r10 sbbq %rcx, %r10 movq U+3*N(%rsp), %r11 sbbq %rdx, %r11 movq U+4*N(%rsp), %r12 sbbq %rdx, %r12 movq U+5*N(%rsp), %r13 sbbq %rdx, %r13 cmovcq U(%rsp), %r8 cmovcq U+N(%rsp), %r9 cmovcq U+2*N(%rsp), %r10 cmovcq U+3*N(%rsp), %r11 cmovcq U+4*N(%rsp), %r12 cmovcq U+5*N(%rsp), %r13 movq res, %rdi movq %r8, (%rdi) movq %r9, N(%rdi) movq %r10, 2*N(%rdi) movq %r11, 3*N(%rdi) movq %r12, 4*N(%rdi) movq %r13, 5*N(%rdi) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
261,450
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/p384_montjscalarmul_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Montgomery-Jacobian form scalar multiplication for P-384 // Input scalar[6], point[18]; output res[18] // // extern void p384_montjscalarmul_alt // (uint64_t res[static 18], // uint64_t scalar[static 6], // uint64_t point[static 18]); // // This function is a variant of its affine point version p384_scalarmul. // Here, input and output points are assumed to be in Jacobian form with // their coordinates in the Montgomery domain. Thus, if priming indicates // Montgomery form, x' = (2^384 * x) mod p_384 etc., each point argument // is a triple (x',y',z') representing the affine point (x/z^2,y/z^3) when // z' is nonzero or the point at infinity (group identity) if z' = 0. // // Given scalar = n and point = P, assumed to be on the NIST elliptic // curve P-384, returns a representation of n * P. If the result is the // point at infinity (either because the input point was or because the // scalar was a multiple of p_384) then the output is guaranteed to // represent the point at infinity, i.e. to have its z coordinate zero. // // Standard x86-64 ABI: RDI = res, RSI = scalar, RDX = point // Microsoft x64 ABI: RCX = res, RDX = scalar, R8 = point // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjscalarmul_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjscalarmul_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 #define JACSIZE (3*NUMSIZE) // Intermediate variables on the stack. // The table is 16 entries, each of size JACSIZE = 3 * NUMSIZE // Uppercase syntactic variants make x86_att version simpler to generate. #define SCALARB (0*NUMSIZE) #define scalarb (0*NUMSIZE)(%rsp) #define ACC (1*NUMSIZE) #define acc (1*NUMSIZE)(%rsp) #define TABENT (4*NUMSIZE) #define tabent (4*NUMSIZE)(%rsp) #define TAB (7*NUMSIZE) #define tab (7*NUMSIZE)(%rsp) #define res (55*NUMSIZE)(%rsp) #define NSPACE (56*NUMSIZE) // Avoid using .rep for the sake of the BoringSSL/AWS-LC delocator, // which doesn't accept repetitions, assembler macros etc. #define selectblock_xz(I) \ cmpq $I, %rdi ; \ cmovzq TAB+JACSIZE*(I-1)(%rsp), %rax ; \ cmovzq TAB+JACSIZE*(I-1)+8(%rsp), %rbx ; \ cmovzq TAB+JACSIZE*(I-1)+16(%rsp), %rcx ; \ cmovzq TAB+JACSIZE*(I-1)+24(%rsp), %rdx ; \ cmovzq TAB+JACSIZE*(I-1)+32(%rsp), %r8 ; \ cmovzq TAB+JACSIZE*(I-1)+40(%rsp), %r9 ; \ cmovzq TAB+JACSIZE*(I-1)+96(%rsp), %r10 ; \ cmovzq TAB+JACSIZE*(I-1)+104(%rsp), %r11 ; \ cmovzq TAB+JACSIZE*(I-1)+112(%rsp), %r12 ; \ cmovzq TAB+JACSIZE*(I-1)+120(%rsp), %r13 ; \ cmovzq TAB+JACSIZE*(I-1)+128(%rsp), %r14 ; \ cmovzq TAB+JACSIZE*(I-1)+136(%rsp), %r15 #define selectblock_y(I) \ cmpq $I, %rdi ; \ cmovzq TAB+JACSIZE*(I-1)+48(%rsp), %rax ; \ cmovzq TAB+JACSIZE*(I-1)+56(%rsp), %rbx ; \ cmovzq TAB+JACSIZE*(I-1)+64(%rsp), %rcx ; \ cmovzq TAB+JACSIZE*(I-1)+72(%rsp), %rdx ; \ cmovzq TAB+JACSIZE*(I-1)+80(%rsp), %r8 ; \ cmovzq TAB+JACSIZE*(I-1)+88(%rsp), %r9 S2N_BN_SYMBOL(p384_montjscalarmul_alt): // The Windows version literally calls the standard ABI version. // This simplifies the proofs since subroutine offsets are fixed. #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx callq p384_montjscalarmul_alt_standard popq %rsi popq %rdi ret p384_montjscalarmul_alt_standard: #endif // Real start of the standard ABI code. pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbp pushq %rbx subq $NSPACE, %rsp // Preserve the "res" input argument; others get processed early. movq %rdi, res // Reduce the input scalar mod n_384, i.e. conditionally subtract n_384. // Store it to "scalarb". movq (%rsi), %r8 movq $0xecec196accc52973, %rax subq %rax, %r8 movq 8(%rsi), %r9 movq $0x581a0db248b0a77a, %rax sbbq %rax, %r9 movq 16(%rsi), %r10 movq $0xc7634d81f4372ddf, %rax sbbq %rax, %r10 movq 24(%rsi), %r11 movq $0xffffffffffffffff, %rax sbbq %rax, %r11 movq 32(%rsi), %r12 sbbq %rax, %r12 movq 40(%rsi), %r13 sbbq %rax, %r13 cmovcq (%rsi), %r8 cmovcq 8(%rsi), %r9 cmovcq 16(%rsi), %r10 cmovcq 24(%rsi), %r11 cmovcq 32(%rsi), %r12 cmovcq 40(%rsi), %r13 movq %r8, SCALARB(%rsp) movq %r9, SCALARB+8(%rsp) movq %r10, SCALARB+16(%rsp) movq %r11, SCALARB+24(%rsp) movq %r12, SCALARB+32(%rsp) movq %r13, SCALARB+40(%rsp) // Set the tab[0] table entry to the input point = 1 * P movq (%rdx), %rax movq %rax, TAB(%rsp) movq 8(%rdx), %rax movq %rax, TAB+8(%rsp) movq 16(%rdx), %rax movq %rax, TAB+16(%rsp) movq 24(%rdx), %rax movq %rax, TAB+24(%rsp) movq 32(%rdx), %rax movq %rax, TAB+32(%rsp) movq 40(%rdx), %rax movq %rax, TAB+40(%rsp) movq 48(%rdx), %rax movq %rax, TAB+48(%rsp) movq 56(%rdx), %rax movq %rax, TAB+56(%rsp) movq 64(%rdx), %rax movq %rax, TAB+64(%rsp) movq 72(%rdx), %rax movq %rax, TAB+72(%rsp) movq 80(%rdx), %rax movq %rax, TAB+80(%rsp) movq 88(%rdx), %rax movq %rax, TAB+88(%rsp) movq 96(%rdx), %rax movq %rax, TAB+96(%rsp) movq 104(%rdx), %rax movq %rax, TAB+104(%rsp) movq 112(%rdx), %rax movq %rax, TAB+112(%rsp) movq 120(%rdx), %rax movq %rax, TAB+120(%rsp) movq 128(%rdx), %rax movq %rax, TAB+128(%rsp) movq 136(%rdx), %rax movq %rax, TAB+136(%rsp) // Compute and record tab[1] = 2 * p, ..., tab[15] = 16 * P leaq TAB+JACSIZE*1(%rsp), %rdi leaq TAB(%rsp), %rsi callq p384_montjscalarmul_alt_p384_montjdouble leaq TAB+JACSIZE*2(%rsp), %rdi leaq TAB+JACSIZE*1(%rsp), %rsi leaq TAB(%rsp), %rdx callq p384_montjscalarmul_alt_p384_montjadd leaq TAB+JACSIZE*3(%rsp), %rdi leaq TAB+JACSIZE*1(%rsp), %rsi callq p384_montjscalarmul_alt_p384_montjdouble leaq TAB+JACSIZE*4(%rsp), %rdi leaq TAB+JACSIZE*3(%rsp), %rsi leaq TAB(%rsp), %rdx callq p384_montjscalarmul_alt_p384_montjadd leaq TAB+JACSIZE*5(%rsp), %rdi leaq TAB+JACSIZE*2(%rsp), %rsi callq p384_montjscalarmul_alt_p384_montjdouble leaq TAB+JACSIZE*6(%rsp), %rdi leaq TAB+JACSIZE*5(%rsp), %rsi leaq TAB(%rsp), %rdx callq p384_montjscalarmul_alt_p384_montjadd leaq TAB+JACSIZE*7(%rsp), %rdi leaq TAB+JACSIZE*3(%rsp), %rsi callq p384_montjscalarmul_alt_p384_montjdouble leaq TAB+JACSIZE*8(%rsp), %rdi leaq TAB+JACSIZE*7(%rsp), %rsi leaq TAB(%rsp), %rdx callq p384_montjscalarmul_alt_p384_montjadd leaq TAB+JACSIZE*9(%rsp), %rdi leaq TAB+JACSIZE*4(%rsp), %rsi callq p384_montjscalarmul_alt_p384_montjdouble leaq TAB+JACSIZE*10(%rsp), %rdi leaq TAB+JACSIZE*9(%rsp), %rsi leaq TAB(%rsp), %rdx callq p384_montjscalarmul_alt_p384_montjadd leaq TAB+JACSIZE*11(%rsp), %rdi leaq TAB+JACSIZE*5(%rsp), %rsi callq p384_montjscalarmul_alt_p384_montjdouble leaq TAB+JACSIZE*12(%rsp), %rdi leaq TAB+JACSIZE*11(%rsp), %rsi leaq TAB(%rsp), %rdx callq p384_montjscalarmul_alt_p384_montjadd leaq TAB+JACSIZE*13(%rsp), %rdi leaq TAB+JACSIZE*6(%rsp), %rsi callq p384_montjscalarmul_alt_p384_montjdouble leaq TAB+JACSIZE*14(%rsp), %rdi leaq TAB+JACSIZE*13(%rsp), %rsi leaq TAB(%rsp), %rdx callq p384_montjscalarmul_alt_p384_montjadd leaq TAB+JACSIZE*15(%rsp), %rdi leaq TAB+JACSIZE*7(%rsp), %rsi callq p384_montjscalarmul_alt_p384_montjdouble // Add the recoding constant sum_i(16 * 32^i) to the scalar to allow signed // digits. The digits of the constant, in lowest-to-highest order, are as // follows; they are generated dynamically to use fewer large constant loads. // // 0x0842108421084210 // 0x1084210842108421 // 0x2108421084210842 // 0x4210842108421084 // 0x8421084210842108 // 0x0842108421084210 movq $0x1084210842108421, %rax movq %rax, %rcx shrq $1, %rax movq SCALARB(%rsp), %r8 addq %rax, %r8 movq SCALARB+8(%rsp), %r9 adcq %rcx, %r9 leaq (%rcx,%rcx), %rcx movq SCALARB+16(%rsp), %r10 adcq %rcx, %r10 leaq (%rcx,%rcx), %rcx movq SCALARB+24(%rsp), %r11 adcq %rcx, %r11 leaq (%rcx,%rcx), %rcx movq SCALARB+32(%rsp), %r12 adcq %rcx, %r12 movq SCALARB+40(%rsp), %r13 adcq %rax, %r13 sbbq %rdi, %rdi negq %rdi // Record the top bitfield in %rdi then shift the whole scalar left 4 bits // to align the top of the next bitfield with the MSB (bits 379..383). shldq $4, %r13, %rdi shldq $4, %r12, %r13 shldq $4, %r11, %r12 shldq $4, %r10, %r11 shldq $4, %r9, %r10 shldq $4, %r8, %r9 shlq $4, %r8 movq %r8, SCALARB(%rsp) movq %r9, SCALARB+8(%rsp) movq %r10, SCALARB+16(%rsp) movq %r11, SCALARB+24(%rsp) movq %r12, SCALARB+32(%rsp) movq %r13, SCALARB+40(%rsp) // Initialize the accumulator to the corresponding entry using constant-time // lookup in the table. This top digit, uniquely, is not recoded so there is // no sign adjustment to make. On the x86 integer side we don't have enough // registers to hold all the fields; this could be better done with SIMD // registers anyway. So we do x and z coordinates in one sweep, y in another // (this is a rehearsal for below where we might need to negate the y). xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d selectblock_xz(1) selectblock_xz(2) selectblock_xz(3) selectblock_xz(4) selectblock_xz(5) selectblock_xz(6) selectblock_xz(7) selectblock_xz(8) selectblock_xz(9) selectblock_xz(10) selectblock_xz(11) selectblock_xz(12) selectblock_xz(13) selectblock_xz(14) selectblock_xz(15) selectblock_xz(16) movq %rax, ACC(%rsp) movq %rbx, ACC+8(%rsp) movq %rcx, ACC+16(%rsp) movq %rdx, ACC+24(%rsp) movq %r8, ACC+32(%rsp) movq %r9, ACC+40(%rsp) movq %r10, ACC+96(%rsp) movq %r11, ACC+104(%rsp) movq %r12, ACC+112(%rsp) movq %r13, ACC+120(%rsp) movq %r14, ACC+128(%rsp) movq %r15, ACC+136(%rsp) xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %r8d, %r8d xorl %r9d, %r9d selectblock_y(1) selectblock_y(2) selectblock_y(3) selectblock_y(4) selectblock_y(5) selectblock_y(6) selectblock_y(7) selectblock_y(8) selectblock_y(9) selectblock_y(10) selectblock_y(11) selectblock_y(12) selectblock_y(13) selectblock_y(14) selectblock_y(15) selectblock_y(16) movq %rax, ACC+48(%rsp) movq %rbx, ACC+56(%rsp) movq %rcx, ACC+64(%rsp) movq %rdx, ACC+72(%rsp) movq %r8, ACC+80(%rsp) movq %r9, ACC+88(%rsp) // Main loop over size-5 bitfields: double 5 times then add signed digit // At each stage we shift the scalar left by 5 bits so we can simply pick // the top 5 bits as the bitfield, saving some fiddle over indexing. movl $380, %ebp p384_montjscalarmul_alt_mainloop: subq $5, %rbp leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p384_montjscalarmul_alt_p384_montjdouble leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p384_montjscalarmul_alt_p384_montjdouble leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p384_montjscalarmul_alt_p384_montjdouble leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p384_montjscalarmul_alt_p384_montjdouble leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p384_montjscalarmul_alt_p384_montjdouble // Choose the bitfield and adjust it to sign and magnitude movq SCALARB(%rsp), %r8 movq SCALARB+8(%rsp), %r9 movq SCALARB+16(%rsp), %r10 movq SCALARB+24(%rsp), %r11 movq SCALARB+32(%rsp), %r12 movq SCALARB+40(%rsp), %r13 movq %r13, %rdi shrq $59, %rdi shldq $5, %r12, %r13 shldq $5, %r11, %r12 shldq $5, %r10, %r11 shldq $5, %r9, %r10 shldq $5, %r8, %r9 shlq $5, %r8 movq %r8, SCALARB(%rsp) movq %r9, SCALARB+8(%rsp) movq %r10, SCALARB+16(%rsp) movq %r11, SCALARB+24(%rsp) movq %r12, SCALARB+32(%rsp) movq %r13, SCALARB+40(%rsp) subq $16, %rdi sbbq %rsi, %rsi // %rsi = sign of digit (-1 = negative) xorq %rsi, %rdi subq %rsi, %rdi // %rdi = absolute value of digit // Conditionally select the table entry tab[i-1] = i * P in constant time // Again, this is done in two sweeps, first doing x and z then y. xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %r8d, %r8d xorl %r9d, %r9d xorl %r10d, %r10d xorl %r11d, %r11d xorl %r12d, %r12d xorl %r13d, %r13d xorl %r14d, %r14d xorl %r15d, %r15d selectblock_xz(1) selectblock_xz(2) selectblock_xz(3) selectblock_xz(4) selectblock_xz(5) selectblock_xz(6) selectblock_xz(7) selectblock_xz(8) selectblock_xz(9) selectblock_xz(10) selectblock_xz(11) selectblock_xz(12) selectblock_xz(13) selectblock_xz(14) selectblock_xz(15) selectblock_xz(16) movq %rax, TABENT(%rsp) movq %rbx, TABENT+8(%rsp) movq %rcx, TABENT+16(%rsp) movq %rdx, TABENT+24(%rsp) movq %r8, TABENT+32(%rsp) movq %r9, TABENT+40(%rsp) movq %r10, TABENT+96(%rsp) movq %r11, TABENT+104(%rsp) movq %r12, TABENT+112(%rsp) movq %r13, TABENT+120(%rsp) movq %r14, TABENT+128(%rsp) movq %r15, TABENT+136(%rsp) xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx xorl %edx, %edx xorl %r8d, %r8d xorl %r9d, %r9d selectblock_y(1) selectblock_y(2) selectblock_y(3) selectblock_y(4) selectblock_y(5) selectblock_y(6) selectblock_y(7) selectblock_y(8) selectblock_y(9) selectblock_y(10) selectblock_y(11) selectblock_y(12) selectblock_y(13) selectblock_y(14) selectblock_y(15) selectblock_y(16) // Store it to "tabent" with the y coordinate optionally negated. // This is done carefully to give coordinates < p_384 even in // the degenerate case y = 0 (when z = 0 for points on the curve). // The digits of the prime p_384 are generated dynamically from // the zeroth via not/lea to reduce the number of constant loads. movq %rax, %r10 orq %rbx, %r10 movq %rcx, %r11 orq %rdx, %r11 movq %r8, %r12 orq %r9, %r12 orq %r11, %r10 orq %r12, %r10 cmovzq %r10, %rsi movl $0xffffffff, %r10d movq %r10, %r11 notq %r11 leaq (%r10,%r11), %r13 subq %rax, %r10 leaq -1(%r13), %r12 sbbq %rbx, %r11 movq %r13, %r14 sbbq %rcx, %r12 sbbq %rdx, %r13 movq %r14, %r15 sbbq %r8, %r14 sbbq %r9, %r15 testq %rsi, %rsi cmovnzq %r10, %rax cmovnzq %r11, %rbx cmovnzq %r12, %rcx cmovnzq %r13, %rdx cmovnzq %r14, %r8 cmovnzq %r15, %r9 movq %rax, TABENT+48(%rsp) movq %rbx, TABENT+56(%rsp) movq %rcx, TABENT+64(%rsp) movq %rdx, TABENT+72(%rsp) movq %r8, TABENT+80(%rsp) movq %r9, TABENT+88(%rsp) // Add to the accumulator leaq TABENT(%rsp), %rdx leaq ACC(%rsp), %rsi leaq ACC(%rsp), %rdi callq p384_montjscalarmul_alt_p384_montjadd testq %rbp, %rbp jne p384_montjscalarmul_alt_mainloop // That's the end of the main loop, and we just need to copy the // result in "acc" to the output. movq res, %rdi movq ACC(%rsp), %rax movq %rax, (%rdi) movq ACC+8(%rsp), %rax movq %rax, 8(%rdi) movq ACC+16(%rsp), %rax movq %rax, 16(%rdi) movq ACC+24(%rsp), %rax movq %rax, 24(%rdi) movq ACC+32(%rsp), %rax movq %rax, 32(%rdi) movq ACC+40(%rsp), %rax movq %rax, 40(%rdi) movq ACC+48(%rsp), %rax movq %rax, 48(%rdi) movq ACC+56(%rsp), %rax movq %rax, 56(%rdi) movq ACC+64(%rsp), %rax movq %rax, 64(%rdi) movq ACC+72(%rsp), %rax movq %rax, 72(%rdi) movq ACC+80(%rsp), %rax movq %rax, 80(%rdi) movq ACC+88(%rsp), %rax movq %rax, 88(%rdi) movq ACC+96(%rsp), %rax movq %rax, 96(%rdi) movq ACC+104(%rsp), %rax movq %rax, 104(%rdi) movq ACC+112(%rsp), %rax movq %rax, 112(%rdi) movq ACC+120(%rsp), %rax movq %rax, 120(%rdi) movq ACC+128(%rsp), %rax movq %rax, 128(%rdi) movq ACC+136(%rsp), %rax movq %rax, 136(%rdi) // Restore stack and registers and return addq $NSPACE, %rsp popq %rbx popq %rbp popq %r12 popq %r13 popq %r14 popq %r15 ret // Local copies of subroutines, complete clones at the moment p384_montjscalarmul_alt_p384_montjadd: pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $0x160, %rsp movq %rsi, 0x150(%rsp) movq %rdx, 0x158(%rsp) movq 0x60(%rsi), %rbx movq 0x68(%rsi), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x78(%rsi), %rax mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x88(%rsi), %rax mulq %rbx movq %rax, %r13 movq %rdx, %r14 movq 0x78(%rsi), %rax mulq 0x80(%rsi) movq %rax, %r15 movq %rdx, %rcx movq 0x70(%rsi), %rbx movq 0x60(%rsi), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x68(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rbp, %rbp movq 0x68(%rsi), %rbx movq 0x78(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x80(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x88(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq $0x0, %rcx movq 0x80(%rsi), %rbx movq 0x60(%rsi), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x70(%rsi), %rbx movq 0x78(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x80(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x88(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r15 adcq %rdx, %rcx sbbq %rbp, %rbp xorl %ebx, %ebx movq 0x78(%rsi), %rax mulq 0x88(%rsi) subq %rbp, %rdx xorl %ebp, %ebp addq %rax, %rcx adcq %rdx, %rbx adcl %ebp, %ebp movq 0x80(%rsi), %rax mulq 0x88(%rsi) addq %rax, %rbx adcq %rdx, %rbp xorl %r8d, %r8d addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %r15, %r15 adcq %rcx, %rcx adcq %rbx, %rbx adcq %rbp, %rbp adcl %r8d, %r8d movq 0x60(%rsi), %rax mulq %rax movq %r8, (%rsp) movq %rax, %r8 movq 0x68(%rsi), %rax movq %rbp, 0x8(%rsp) addq %rdx, %r9 sbbq %rbp, %rbp mulq %rax negq %rbp adcq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x70(%rsi), %rax mulq %rax negq %rbp adcq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x78(%rsi), %rax mulq %rax negq %rbp adcq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x80(%rsi), %rax mulq %rax negq %rbp adcq %rax, %rcx adcq %rdx, %rbx sbbq %rbp, %rbp movq 0x88(%rsi), %rax mulq %rax negq %rbp adcq 0x8(%rsp), %rax adcq (%rsp), %rdx movq %rax, %rbp movq %rdx, %rsi movq %rbx, (%rsp) movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r8 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r8, %r9 sbbq %rdx, %r10 sbbq %rax, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 movq %rbx, %r8 sbbq $0x0, %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r9 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r9, %r10 sbbq %rdx, %r11 sbbq %rax, %r12 sbbq $0x0, %r13 sbbq $0x0, %r8 movq %rbx, %r9 sbbq $0x0, %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r10 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r10, %r11 sbbq %rdx, %r12 sbbq %rax, %r13 sbbq $0x0, %r8 sbbq $0x0, %r9 movq %rbx, %r10 sbbq $0x0, %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r11 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r11, %r12 sbbq %rdx, %r13 sbbq %rax, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 movq %rbx, %r11 sbbq $0x0, %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r12 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r12, %r13 sbbq %rdx, %r8 sbbq %rax, %r9 sbbq $0x0, %r10 sbbq $0x0, %r11 movq %rbx, %r12 sbbq $0x0, %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r13 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r13, %r8 sbbq %rdx, %r9 sbbq %rax, %r10 sbbq $0x0, %r11 sbbq $0x0, %r12 movq %rbx, %r13 sbbq $0x0, %r13 movq (%rsp), %rbx addq %r8, %r14 adcq %r9, %r15 adcq %r10, %rcx adcq %r11, %rbx adcq %r12, %rbp adcq %r13, %rsi movl $0x0, %r8d adcq %r8, %r8 xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %r9d adcq %r15, %r9 movl $0x1, %r10d adcq %rcx, %r10 adcq %rbx, %r11 adcq %rbp, %r12 adcq %rsi, %r13 adcq $0x0, %r8 cmovneq %rax, %r14 cmovneq %r9, %r15 cmovneq %r10, %rcx cmovneq %r11, %rbx cmovneq %r12, %rbp cmovneq %r13, %rsi movq %r14, (%rsp) movq %r15, 0x8(%rsp) movq %rcx, 0x10(%rsp) movq %rbx, 0x18(%rsp) movq %rbp, 0x20(%rsp) movq %rsi, 0x28(%rsp) movq 0x158(%rsp), %rsi movq 0x60(%rsi), %rbx movq 0x68(%rsi), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x78(%rsi), %rax mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x88(%rsi), %rax mulq %rbx movq %rax, %r13 movq %rdx, %r14 movq 0x78(%rsi), %rax mulq 0x80(%rsi) movq %rax, %r15 movq %rdx, %rcx movq 0x70(%rsi), %rbx movq 0x60(%rsi), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x68(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rbp, %rbp movq 0x68(%rsi), %rbx movq 0x78(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x80(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x88(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq $0x0, %rcx movq 0x80(%rsi), %rbx movq 0x60(%rsi), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x70(%rsi), %rbx movq 0x78(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x80(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x88(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r15 adcq %rdx, %rcx sbbq %rbp, %rbp xorl %ebx, %ebx movq 0x78(%rsi), %rax mulq 0x88(%rsi) subq %rbp, %rdx xorl %ebp, %ebp addq %rax, %rcx adcq %rdx, %rbx adcl %ebp, %ebp movq 0x80(%rsi), %rax mulq 0x88(%rsi) addq %rax, %rbx adcq %rdx, %rbp xorl %r8d, %r8d addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %r15, %r15 adcq %rcx, %rcx adcq %rbx, %rbx adcq %rbp, %rbp adcl %r8d, %r8d movq 0x60(%rsi), %rax mulq %rax movq %r8, 0xf0(%rsp) movq %rax, %r8 movq 0x68(%rsi), %rax movq %rbp, 0xf8(%rsp) addq %rdx, %r9 sbbq %rbp, %rbp mulq %rax negq %rbp adcq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x70(%rsi), %rax mulq %rax negq %rbp adcq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x78(%rsi), %rax mulq %rax negq %rbp adcq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x80(%rsi), %rax mulq %rax negq %rbp adcq %rax, %rcx adcq %rdx, %rbx sbbq %rbp, %rbp movq 0x88(%rsi), %rax mulq %rax negq %rbp adcq 0xf8(%rsp), %rax adcq 0xf0(%rsp), %rdx movq %rax, %rbp movq %rdx, %rsi movq %rbx, 0xf0(%rsp) movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r8 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r8, %r9 sbbq %rdx, %r10 sbbq %rax, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 movq %rbx, %r8 sbbq $0x0, %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r9 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r9, %r10 sbbq %rdx, %r11 sbbq %rax, %r12 sbbq $0x0, %r13 sbbq $0x0, %r8 movq %rbx, %r9 sbbq $0x0, %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r10 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r10, %r11 sbbq %rdx, %r12 sbbq %rax, %r13 sbbq $0x0, %r8 sbbq $0x0, %r9 movq %rbx, %r10 sbbq $0x0, %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r11 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r11, %r12 sbbq %rdx, %r13 sbbq %rax, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 movq %rbx, %r11 sbbq $0x0, %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r12 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r12, %r13 sbbq %rdx, %r8 sbbq %rax, %r9 sbbq $0x0, %r10 sbbq $0x0, %r11 movq %rbx, %r12 sbbq $0x0, %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r13 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r13, %r8 sbbq %rdx, %r9 sbbq %rax, %r10 sbbq $0x0, %r11 sbbq $0x0, %r12 movq %rbx, %r13 sbbq $0x0, %r13 movq 0xf0(%rsp), %rbx addq %r8, %r14 adcq %r9, %r15 adcq %r10, %rcx adcq %r11, %rbx adcq %r12, %rbp adcq %r13, %rsi movl $0x0, %r8d adcq %r8, %r8 xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %r9d adcq %r15, %r9 movl $0x1, %r10d adcq %rcx, %r10 adcq %rbx, %r11 adcq %rbp, %r12 adcq %rsi, %r13 adcq $0x0, %r8 cmovneq %rax, %r14 cmovneq %r9, %r15 cmovneq %r10, %rcx cmovneq %r11, %rbx cmovneq %r12, %rbp cmovneq %r13, %rsi movq %r14, 0xf0(%rsp) movq %r15, 0xf8(%rsp) movq %rcx, 0x100(%rsp) movq %rbx, 0x108(%rsp) movq %rbp, 0x110(%rsp) movq %rsi, 0x118(%rsp) movq 0x150(%rsp), %rsi movq 0x158(%rsp), %rcx movq 0x30(%rsi), %rbx movq 0x60(%rcx), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x68(%rcx), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x70(%rcx), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x78(%rcx), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x80(%rcx), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0x88(%rcx), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0x38(%rsi), %rbx movq 0x60(%rcx), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0x68(%rcx), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x70(%rcx), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x78(%rcx), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x80(%rcx), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0x88(%rcx), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0x40(%rsi), %rbx movq 0x60(%rcx), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0x68(%rcx), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x70(%rcx), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x78(%rcx), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x80(%rcx), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0x88(%rcx), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0x48(%rsi), %rbx movq 0x60(%rcx), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0x68(%rcx), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0x70(%rcx), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0x78(%rcx), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0x80(%rcx), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0x88(%rcx), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0x50(%rsi), %rbx movq 0x60(%rcx), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0x68(%rcx), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0x70(%rcx), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0x78(%rcx), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0x80(%rcx), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0x88(%rcx), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0x58(%rsi), %rbx movq 0x60(%rcx), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0x68(%rcx), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0x70(%rcx), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0x78(%rcx), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0x80(%rcx), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0x88(%rcx), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0x120(%rsp) movq %r15, 0x128(%rsp) movq %r8, 0x130(%rsp) movq %r9, 0x138(%rsp) movq %r10, 0x140(%rsp) movq %r11, 0x148(%rsp) movq 0x150(%rsp), %rsi movq 0x158(%rsp), %rcx movq 0x30(%rcx), %rbx movq 0x60(%rsi), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x68(%rsi), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x70(%rsi), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x78(%rsi), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x80(%rsi), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0x88(%rsi), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0x38(%rcx), %rbx movq 0x60(%rsi), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0x68(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x70(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x78(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x80(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0x88(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0x40(%rcx), %rbx movq 0x60(%rsi), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0x68(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x70(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x78(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x80(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0x88(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0x48(%rcx), %rbx movq 0x60(%rsi), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0x68(%rsi), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0x70(%rsi), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0x78(%rsi), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0x80(%rsi), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0x88(%rsi), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0x50(%rcx), %rbx movq 0x60(%rsi), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0x68(%rsi), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0x70(%rsi), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0x78(%rsi), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0x80(%rsi), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0x88(%rsi), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0x58(%rcx), %rbx movq 0x60(%rsi), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0x68(%rsi), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0x70(%rsi), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0x78(%rsi), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0x80(%rsi), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0x88(%rsi), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0x30(%rsp) movq %r15, 0x38(%rsp) movq %r8, 0x40(%rsp) movq %r9, 0x48(%rsp) movq %r10, 0x50(%rsp) movq %r11, 0x58(%rsp) movq 0x158(%rsp), %rcx movq (%rcx), %rbx movq (%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x8(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x10(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x18(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x20(%rsp), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0x28(%rsp), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0x8(%rcx), %rbx movq (%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0x8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x10(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x18(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x20(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0x28(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0x10(%rcx), %rbx movq (%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0x8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x10(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x18(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x20(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0x28(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0x18(%rcx), %rbx movq (%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0x8(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0x10(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0x18(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0x20(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0x28(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0x20(%rcx), %rbx movq (%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0x8(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0x10(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0x18(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0x20(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0x28(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0x28(%rcx), %rbx movq (%rsp), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0x8(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0x10(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0x18(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0x20(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0x28(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0x60(%rsp) movq %r15, 0x68(%rsp) movq %r8, 0x70(%rsp) movq %r9, 0x78(%rsp) movq %r10, 0x80(%rsp) movq %r11, 0x88(%rsp) movq 0x150(%rsp), %rsi movq (%rsi), %rbx movq 0xf0(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0xf8(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x100(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x108(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x110(%rsp), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0x118(%rsp), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0x8(%rsi), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0xf8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x100(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x108(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x110(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0x118(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0x10(%rsi), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0xf8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x100(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x108(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x110(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0x118(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0x18(%rsi), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0xf8(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0x100(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0x108(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0x110(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0x118(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0x20(%rsi), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0xf8(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0x100(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0x108(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0x110(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0x118(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0x28(%rsi), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0xf8(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0x100(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0x108(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0x110(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0x118(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0xc0(%rsp) movq %r15, 0xc8(%rsp) movq %r8, 0xd0(%rsp) movq %r9, 0xd8(%rsp) movq %r10, 0xe0(%rsp) movq %r11, 0xe8(%rsp) movq 0x30(%rsp), %rbx movq (%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x8(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x10(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x18(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x20(%rsp), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0x28(%rsp), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0x38(%rsp), %rbx movq (%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0x8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x10(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x18(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x20(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0x28(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0x40(%rsp), %rbx movq (%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0x8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x10(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x18(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x20(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0x28(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0x48(%rsp), %rbx movq (%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0x8(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0x10(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0x18(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0x20(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0x28(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0x50(%rsp), %rbx movq (%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0x8(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0x10(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0x18(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0x20(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0x28(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0x58(%rsp), %rbx movq (%rsp), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0x8(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0x10(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0x18(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0x20(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0x28(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0x30(%rsp) movq %r15, 0x38(%rsp) movq %r8, 0x40(%rsp) movq %r9, 0x48(%rsp) movq %r10, 0x50(%rsp) movq %r11, 0x58(%rsp) movq 0x120(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0xf8(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x100(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x108(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x110(%rsp), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0x118(%rsp), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0x128(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0xf8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x100(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x108(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x110(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0x118(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0x130(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0xf8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x100(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x108(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x110(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0x118(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0x138(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0xf8(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0x100(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0x108(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0x110(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0x118(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0x140(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0xf8(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0x100(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0x108(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0x110(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0x118(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0x148(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0xf8(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0x100(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0x108(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0x110(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0x118(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0x120(%rsp) movq %r15, 0x128(%rsp) movq %r8, 0x130(%rsp) movq %r9, 0x138(%rsp) movq %r10, 0x140(%rsp) movq %r11, 0x148(%rsp) movq 0x60(%rsp), %rax subq 0xc0(%rsp), %rax movq 0x68(%rsp), %rdx sbbq 0xc8(%rsp), %rdx movq 0x70(%rsp), %r8 sbbq 0xd0(%rsp), %r8 movq 0x78(%rsp), %r9 sbbq 0xd8(%rsp), %r9 movq 0x80(%rsp), %r10 sbbq 0xe0(%rsp), %r10 movq 0x88(%rsp), %r11 sbbq 0xe8(%rsp), %r11 sbbq %rcx, %rcx movl $0xffffffff, %esi andq %rsi, %rcx xorq %rsi, %rsi subq %rcx, %rsi subq %rsi, %rax movq %rax, 0xf0(%rsp) sbbq %rcx, %rdx movq %rdx, 0xf8(%rsp) sbbq %rax, %rax andq %rsi, %rcx negq %rax sbbq %rcx, %r8 movq %r8, 0x100(%rsp) sbbq $0x0, %r9 movq %r9, 0x108(%rsp) sbbq $0x0, %r10 movq %r10, 0x110(%rsp) sbbq $0x0, %r11 movq %r11, 0x118(%rsp) movq 0x30(%rsp), %rax subq 0x120(%rsp), %rax movq 0x38(%rsp), %rdx sbbq 0x128(%rsp), %rdx movq 0x40(%rsp), %r8 sbbq 0x130(%rsp), %r8 movq 0x48(%rsp), %r9 sbbq 0x138(%rsp), %r9 movq 0x50(%rsp), %r10 sbbq 0x140(%rsp), %r10 movq 0x58(%rsp), %r11 sbbq 0x148(%rsp), %r11 sbbq %rcx, %rcx movl $0xffffffff, %esi andq %rsi, %rcx xorq %rsi, %rsi subq %rcx, %rsi subq %rsi, %rax movq %rax, 0x30(%rsp) sbbq %rcx, %rdx movq %rdx, 0x38(%rsp) sbbq %rax, %rax andq %rsi, %rcx negq %rax sbbq %rcx, %r8 movq %r8, 0x40(%rsp) sbbq $0x0, %r9 movq %r9, 0x48(%rsp) sbbq $0x0, %r10 movq %r10, 0x50(%rsp) sbbq $0x0, %r11 movq %r11, 0x58(%rsp) movq 0xf0(%rsp), %rbx movq 0xf8(%rsp), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x108(%rsp), %rax mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x118(%rsp), %rax mulq %rbx movq %rax, %r13 movq %rdx, %r14 movq 0x108(%rsp), %rax mulq 0x110(%rsp) movq %rax, %r15 movq %rdx, %rcx movq 0x100(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0xf8(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rbp, %rbp movq 0xf8(%rsp), %rbx movq 0x108(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x110(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x118(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq $0x0, %rcx movq 0x110(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x100(%rsp), %rbx movq 0x108(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x110(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x118(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r15 adcq %rdx, %rcx sbbq %rbp, %rbp xorl %ebx, %ebx movq 0x108(%rsp), %rax mulq 0x118(%rsp) subq %rbp, %rdx xorl %ebp, %ebp addq %rax, %rcx adcq %rdx, %rbx adcl %ebp, %ebp movq 0x110(%rsp), %rax mulq 0x118(%rsp) addq %rax, %rbx adcq %rdx, %rbp xorl %r8d, %r8d addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %r15, %r15 adcq %rcx, %rcx adcq %rbx, %rbx adcq %rbp, %rbp adcl %r8d, %r8d movq 0xf0(%rsp), %rax mulq %rax movq %r8, 0x90(%rsp) movq %rax, %r8 movq 0xf8(%rsp), %rax movq %rbp, 0x98(%rsp) addq %rdx, %r9 sbbq %rbp, %rbp mulq %rax negq %rbp adcq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x100(%rsp), %rax mulq %rax negq %rbp adcq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x108(%rsp), %rax mulq %rax negq %rbp adcq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x110(%rsp), %rax mulq %rax negq %rbp adcq %rax, %rcx adcq %rdx, %rbx sbbq %rbp, %rbp movq 0x118(%rsp), %rax mulq %rax negq %rbp adcq 0x98(%rsp), %rax adcq 0x90(%rsp), %rdx movq %rax, %rbp movq %rdx, %rsi movq %rbx, 0x90(%rsp) movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r8 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r8, %r9 sbbq %rdx, %r10 sbbq %rax, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 movq %rbx, %r8 sbbq $0x0, %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r9 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r9, %r10 sbbq %rdx, %r11 sbbq %rax, %r12 sbbq $0x0, %r13 sbbq $0x0, %r8 movq %rbx, %r9 sbbq $0x0, %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r10 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r10, %r11 sbbq %rdx, %r12 sbbq %rax, %r13 sbbq $0x0, %r8 sbbq $0x0, %r9 movq %rbx, %r10 sbbq $0x0, %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r11 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r11, %r12 sbbq %rdx, %r13 sbbq %rax, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 movq %rbx, %r11 sbbq $0x0, %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r12 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r12, %r13 sbbq %rdx, %r8 sbbq %rax, %r9 sbbq $0x0, %r10 sbbq $0x0, %r11 movq %rbx, %r12 sbbq $0x0, %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r13 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r13, %r8 sbbq %rdx, %r9 sbbq %rax, %r10 sbbq $0x0, %r11 sbbq $0x0, %r12 movq %rbx, %r13 sbbq $0x0, %r13 movq 0x90(%rsp), %rbx addq %r8, %r14 adcq %r9, %r15 adcq %r10, %rcx adcq %r11, %rbx adcq %r12, %rbp adcq %r13, %rsi movl $0x0, %r8d adcq %r8, %r8 xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %r9d adcq %r15, %r9 movl $0x1, %r10d adcq %rcx, %r10 adcq %rbx, %r11 adcq %rbp, %r12 adcq %rsi, %r13 adcq $0x0, %r8 cmovneq %rax, %r14 cmovneq %r9, %r15 cmovneq %r10, %rcx cmovneq %r11, %rbx cmovneq %r12, %rbp cmovneq %r13, %rsi movq %r14, 0x90(%rsp) movq %r15, 0x98(%rsp) movq %rcx, 0xa0(%rsp) movq %rbx, 0xa8(%rsp) movq %rbp, 0xb0(%rsp) movq %rsi, 0xb8(%rsp) movq 0x30(%rsp), %rbx movq 0x38(%rsp), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x48(%rsp), %rax mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x58(%rsp), %rax mulq %rbx movq %rax, %r13 movq %rdx, %r14 movq 0x48(%rsp), %rax mulq 0x50(%rsp) movq %rax, %r15 movq %rdx, %rcx movq 0x40(%rsp), %rbx movq 0x30(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x38(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rbp, %rbp movq 0x38(%rsp), %rbx movq 0x48(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x50(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x58(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq $0x0, %rcx movq 0x50(%rsp), %rbx movq 0x30(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x40(%rsp), %rbx movq 0x48(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x50(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x58(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r15 adcq %rdx, %rcx sbbq %rbp, %rbp xorl %ebx, %ebx movq 0x48(%rsp), %rax mulq 0x58(%rsp) subq %rbp, %rdx xorl %ebp, %ebp addq %rax, %rcx adcq %rdx, %rbx adcl %ebp, %ebp movq 0x50(%rsp), %rax mulq 0x58(%rsp) addq %rax, %rbx adcq %rdx, %rbp xorl %r8d, %r8d addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %r15, %r15 adcq %rcx, %rcx adcq %rbx, %rbx adcq %rbp, %rbp adcl %r8d, %r8d movq 0x30(%rsp), %rax mulq %rax movq %r8, (%rsp) movq %rax, %r8 movq 0x38(%rsp), %rax movq %rbp, 0x8(%rsp) addq %rdx, %r9 sbbq %rbp, %rbp mulq %rax negq %rbp adcq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x40(%rsp), %rax mulq %rax negq %rbp adcq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x48(%rsp), %rax mulq %rax negq %rbp adcq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x50(%rsp), %rax mulq %rax negq %rbp adcq %rax, %rcx adcq %rdx, %rbx sbbq %rbp, %rbp movq 0x58(%rsp), %rax mulq %rax negq %rbp adcq 0x8(%rsp), %rax adcq (%rsp), %rdx movq %rax, %rbp movq %rdx, %rsi movq %rbx, (%rsp) movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r8 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r8, %r9 sbbq %rdx, %r10 sbbq %rax, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 movq %rbx, %r8 sbbq $0x0, %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r9 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r9, %r10 sbbq %rdx, %r11 sbbq %rax, %r12 sbbq $0x0, %r13 sbbq $0x0, %r8 movq %rbx, %r9 sbbq $0x0, %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r10 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r10, %r11 sbbq %rdx, %r12 sbbq %rax, %r13 sbbq $0x0, %r8 sbbq $0x0, %r9 movq %rbx, %r10 sbbq $0x0, %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r11 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r11, %r12 sbbq %rdx, %r13 sbbq %rax, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 movq %rbx, %r11 sbbq $0x0, %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r12 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r12, %r13 sbbq %rdx, %r8 sbbq %rax, %r9 sbbq $0x0, %r10 sbbq $0x0, %r11 movq %rbx, %r12 sbbq $0x0, %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r13 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r13, %r8 sbbq %rdx, %r9 sbbq %rax, %r10 sbbq $0x0, %r11 sbbq $0x0, %r12 movq %rbx, %r13 sbbq $0x0, %r13 movq (%rsp), %rbx addq %r8, %r14 adcq %r9, %r15 adcq %r10, %rcx adcq %r11, %rbx adcq %r12, %rbp adcq %r13, %rsi movl $0x0, %r8d adcq %r8, %r8 xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %r9d adcq %r15, %r9 movl $0x1, %r10d adcq %rcx, %r10 adcq %rbx, %r11 adcq %rbp, %r12 adcq %rsi, %r13 adcq $0x0, %r8 cmovneq %rax, %r14 cmovneq %r9, %r15 cmovneq %r10, %rcx cmovneq %r11, %rbx cmovneq %r12, %rbp cmovneq %r13, %rsi movq %r14, (%rsp) movq %r15, 0x8(%rsp) movq %rcx, 0x10(%rsp) movq %rbx, 0x18(%rsp) movq %rbp, 0x20(%rsp) movq %rsi, 0x28(%rsp) movq 0xc0(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x98(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0xa0(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0xa8(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0xb0(%rsp), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0xb8(%rsp), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0xc8(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0x98(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0xa0(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0xa8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0xb0(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0xb8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0xd0(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0x98(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0xa0(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0xa8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0xb0(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0xb8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0xd8(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0x98(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0xa0(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0xa8(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0xb0(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0xb8(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0xe0(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0x98(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0xa0(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0xa8(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0xb0(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0xb8(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0xe8(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0x98(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0xa0(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0xa8(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0xb0(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0xb8(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0xc0(%rsp) movq %r15, 0xc8(%rsp) movq %r8, 0xd0(%rsp) movq %r9, 0xd8(%rsp) movq %r10, 0xe0(%rsp) movq %r11, 0xe8(%rsp) movq 0x60(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x98(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0xa0(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0xa8(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0xb0(%rsp), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0xb8(%rsp), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0x68(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0x98(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0xa0(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0xa8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0xb0(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0xb8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0x70(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0x98(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0xa0(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0xa8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0xb0(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0xb8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0x78(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0x98(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0xa0(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0xa8(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0xb0(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0xb8(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0x80(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0x98(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0xa0(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0xa8(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0xb0(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0xb8(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0x88(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0x98(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0xa0(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0xa8(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0xb0(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0xb8(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0x60(%rsp) movq %r15, 0x68(%rsp) movq %r8, 0x70(%rsp) movq %r9, 0x78(%rsp) movq %r10, 0x80(%rsp) movq %r11, 0x88(%rsp) movq (%rsp), %rax subq 0xc0(%rsp), %rax movq 0x8(%rsp), %rdx sbbq 0xc8(%rsp), %rdx movq 0x10(%rsp), %r8 sbbq 0xd0(%rsp), %r8 movq 0x18(%rsp), %r9 sbbq 0xd8(%rsp), %r9 movq 0x20(%rsp), %r10 sbbq 0xe0(%rsp), %r10 movq 0x28(%rsp), %r11 sbbq 0xe8(%rsp), %r11 sbbq %rcx, %rcx movl $0xffffffff, %esi andq %rsi, %rcx xorq %rsi, %rsi subq %rcx, %rsi subq %rsi, %rax movq %rax, (%rsp) sbbq %rcx, %rdx movq %rdx, 0x8(%rsp) sbbq %rax, %rax andq %rsi, %rcx negq %rax sbbq %rcx, %r8 movq %r8, 0x10(%rsp) sbbq $0x0, %r9 movq %r9, 0x18(%rsp) sbbq $0x0, %r10 movq %r10, 0x20(%rsp) sbbq $0x0, %r11 movq %r11, 0x28(%rsp) movq 0x60(%rsp), %rax subq 0xc0(%rsp), %rax movq 0x68(%rsp), %rdx sbbq 0xc8(%rsp), %rdx movq 0x70(%rsp), %r8 sbbq 0xd0(%rsp), %r8 movq 0x78(%rsp), %r9 sbbq 0xd8(%rsp), %r9 movq 0x80(%rsp), %r10 sbbq 0xe0(%rsp), %r10 movq 0x88(%rsp), %r11 sbbq 0xe8(%rsp), %r11 sbbq %rcx, %rcx movl $0xffffffff, %esi andq %rsi, %rcx xorq %rsi, %rsi subq %rcx, %rsi subq %rsi, %rax movq %rax, 0x90(%rsp) sbbq %rcx, %rdx movq %rdx, 0x98(%rsp) sbbq %rax, %rax andq %rsi, %rcx negq %rax sbbq %rcx, %r8 movq %r8, 0xa0(%rsp) sbbq $0x0, %r9 movq %r9, 0xa8(%rsp) sbbq $0x0, %r10 movq %r10, 0xb0(%rsp) sbbq $0x0, %r11 movq %r11, 0xb8(%rsp) movq 0x150(%rsp), %rsi movq 0x60(%rsi), %rbx movq 0xf0(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0xf8(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x100(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x108(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x110(%rsp), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0x118(%rsp), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0x68(%rsi), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0xf8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x100(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x108(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x110(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0x118(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0x70(%rsi), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0xf8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x100(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x108(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x110(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0x118(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0x78(%rsi), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0xf8(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0x100(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0x108(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0x110(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0x118(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0x80(%rsi), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0xf8(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0x100(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0x108(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0x110(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0x118(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0x88(%rsi), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0xf8(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0x100(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0x108(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0x110(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0x118(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0xf0(%rsp) movq %r15, 0xf8(%rsp) movq %r8, 0x100(%rsp) movq %r9, 0x108(%rsp) movq %r10, 0x110(%rsp) movq %r11, 0x118(%rsp) movq (%rsp), %rax subq 0x60(%rsp), %rax movq 0x8(%rsp), %rdx sbbq 0x68(%rsp), %rdx movq 0x10(%rsp), %r8 sbbq 0x70(%rsp), %r8 movq 0x18(%rsp), %r9 sbbq 0x78(%rsp), %r9 movq 0x20(%rsp), %r10 sbbq 0x80(%rsp), %r10 movq 0x28(%rsp), %r11 sbbq 0x88(%rsp), %r11 sbbq %rcx, %rcx movl $0xffffffff, %esi andq %rsi, %rcx xorq %rsi, %rsi subq %rcx, %rsi subq %rsi, %rax movq %rax, (%rsp) sbbq %rcx, %rdx movq %rdx, 0x8(%rsp) sbbq %rax, %rax andq %rsi, %rcx negq %rax sbbq %rcx, %r8 movq %r8, 0x10(%rsp) sbbq $0x0, %r9 movq %r9, 0x18(%rsp) sbbq $0x0, %r10 movq %r10, 0x20(%rsp) sbbq $0x0, %r11 movq %r11, 0x28(%rsp) movq 0xc0(%rsp), %rax subq (%rsp), %rax movq 0xc8(%rsp), %rdx sbbq 0x8(%rsp), %rdx movq 0xd0(%rsp), %r8 sbbq 0x10(%rsp), %r8 movq 0xd8(%rsp), %r9 sbbq 0x18(%rsp), %r9 movq 0xe0(%rsp), %r10 sbbq 0x20(%rsp), %r10 movq 0xe8(%rsp), %r11 sbbq 0x28(%rsp), %r11 sbbq %rcx, %rcx movl $0xffffffff, %esi andq %rsi, %rcx xorq %rsi, %rsi subq %rcx, %rsi subq %rsi, %rax movq %rax, 0xc0(%rsp) sbbq %rcx, %rdx movq %rdx, 0xc8(%rsp) sbbq %rax, %rax andq %rsi, %rcx negq %rax sbbq %rcx, %r8 movq %r8, 0xd0(%rsp) sbbq $0x0, %r9 movq %r9, 0xd8(%rsp) sbbq $0x0, %r10 movq %r10, 0xe0(%rsp) sbbq $0x0, %r11 movq %r11, 0xe8(%rsp) movq 0x120(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x98(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0xa0(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0xa8(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0xb0(%rsp), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0xb8(%rsp), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0x128(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0x98(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0xa0(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0xa8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0xb0(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0xb8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0x130(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0x98(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0xa0(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0xa8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0xb0(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0xb8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0x138(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0x98(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0xa0(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0xa8(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0xb0(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0xb8(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0x140(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0x98(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0xa0(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0xa8(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0xb0(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0xb8(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0x148(%rsp), %rbx movq 0x90(%rsp), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0x98(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0xa0(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0xa8(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0xb0(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0xb8(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0x90(%rsp) movq %r15, 0x98(%rsp) movq %r8, 0xa0(%rsp) movq %r9, 0xa8(%rsp) movq %r10, 0xb0(%rsp) movq %r11, 0xb8(%rsp) movq 0x158(%rsp), %rcx movq 0x60(%rcx), %rbx movq 0xf0(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0xf8(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x100(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x108(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x110(%rsp), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0x118(%rsp), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0x68(%rcx), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0xf8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x100(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x108(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x110(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0x118(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0x70(%rcx), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0xf8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x100(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x108(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x110(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0x118(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0x78(%rcx), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0xf8(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0x100(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0x108(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0x110(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0x118(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0x80(%rcx), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0xf8(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0x100(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0x108(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0x110(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0x118(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0x88(%rcx), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0xf8(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0x100(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0x108(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0x110(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0x118(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0xf0(%rsp) movq %r15, 0xf8(%rsp) movq %r8, 0x100(%rsp) movq %r9, 0x108(%rsp) movq %r10, 0x110(%rsp) movq %r11, 0x118(%rsp) movq 0xc0(%rsp), %rbx movq 0x30(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x38(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x40(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x48(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x50(%rsp), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0x58(%rsp), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0xc8(%rsp), %rbx movq 0x30(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0x38(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x40(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x48(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x50(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0x58(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0xd0(%rsp), %rbx movq 0x30(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0x38(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x40(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x48(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x50(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0x58(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0xd8(%rsp), %rbx movq 0x30(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0x38(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0x40(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0x48(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0x50(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0x58(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0xe0(%rsp), %rbx movq 0x30(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0x38(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0x40(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0x48(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0x50(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0x58(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0xe8(%rsp), %rbx movq 0x30(%rsp), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0x38(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0x40(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0x48(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0x50(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0x58(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0xc0(%rsp) movq %r15, 0xc8(%rsp) movq %r8, 0xd0(%rsp) movq %r9, 0xd8(%rsp) movq %r10, 0xe0(%rsp) movq %r11, 0xe8(%rsp) movq 0xc0(%rsp), %rax subq 0x90(%rsp), %rax movq 0xc8(%rsp), %rdx sbbq 0x98(%rsp), %rdx movq 0xd0(%rsp), %r8 sbbq 0xa0(%rsp), %r8 movq 0xd8(%rsp), %r9 sbbq 0xa8(%rsp), %r9 movq 0xe0(%rsp), %r10 sbbq 0xb0(%rsp), %r10 movq 0xe8(%rsp), %r11 sbbq 0xb8(%rsp), %r11 sbbq %rcx, %rcx movl $0xffffffff, %esi andq %rsi, %rcx xorq %rsi, %rsi subq %rcx, %rsi subq %rsi, %rax movq %rax, 0xc0(%rsp) sbbq %rcx, %rdx movq %rdx, 0xc8(%rsp) sbbq %rax, %rax andq %rsi, %rcx negq %rax sbbq %rcx, %r8 movq %r8, 0xd0(%rsp) sbbq $0x0, %r9 movq %r9, 0xd8(%rsp) sbbq $0x0, %r10 movq %r10, 0xe0(%rsp) sbbq $0x0, %r11 movq %r11, 0xe8(%rsp) movq 0x158(%rsp), %rcx movq 0x60(%rcx), %r8 movq 0x68(%rcx), %r9 movq 0x70(%rcx), %r10 movq 0x78(%rcx), %r11 movq 0x80(%rcx), %rbx movq 0x88(%rcx), %rbp movq %r8, %rax movq %r9, %rdx orq %r10, %rax orq %r11, %rdx orq %rbx, %rax orq %rbp, %rdx orq %rdx, %rax negq %rax sbbq %rax, %rax movq 0x150(%rsp), %rsi movq 0x60(%rsi), %r12 movq 0x68(%rsi), %r13 movq 0x70(%rsi), %r14 movq 0x78(%rsi), %r15 movq 0x80(%rsi), %rdx movq 0x88(%rsi), %rcx cmoveq %r12, %r8 cmoveq %r13, %r9 cmoveq %r14, %r10 cmoveq %r15, %r11 cmoveq %rdx, %rbx cmoveq %rcx, %rbp orq %r13, %r12 orq %r15, %r14 orq %rcx, %rdx orq %r14, %r12 orq %r12, %rdx negq %rdx sbbq %rdx, %rdx cmpq %rdx, %rax cmoveq 0xf0(%rsp), %r8 cmoveq 0xf8(%rsp), %r9 cmoveq 0x100(%rsp), %r10 cmoveq 0x108(%rsp), %r11 cmoveq 0x110(%rsp), %rbx cmoveq 0x118(%rsp), %rbp movq %r8, 0xf0(%rsp) movq %r9, 0xf8(%rsp) movq %r10, 0x100(%rsp) movq %r11, 0x108(%rsp) movq %rbx, 0x110(%rsp) movq %rbp, 0x118(%rsp) movq 0x158(%rsp), %rcx movq 0x150(%rsp), %rsi movq (%rsp), %r8 cmovbq (%rsi), %r8 cmova (%rcx), %r8 movq 0x8(%rsp), %r9 cmovbq 0x8(%rsi), %r9 cmova 0x8(%rcx), %r9 movq 0x10(%rsp), %r10 cmovbq 0x10(%rsi), %r10 cmova 0x10(%rcx), %r10 movq 0x18(%rsp), %r11 cmovbq 0x18(%rsi), %r11 cmova 0x18(%rcx), %r11 movq 0x20(%rsp), %rbx cmovbq 0x20(%rsi), %rbx cmova 0x20(%rcx), %rbx movq 0x28(%rsp), %rbp cmovbq 0x28(%rsi), %rbp cmova 0x28(%rcx), %rbp movq 0xc0(%rsp), %r12 cmovbq 0x30(%rsi), %r12 cmova 0x30(%rcx), %r12 movq 0xc8(%rsp), %r13 cmovbq 0x38(%rsi), %r13 cmova 0x38(%rcx), %r13 movq 0xd0(%rsp), %r14 cmovbq 0x40(%rsi), %r14 cmova 0x40(%rcx), %r14 movq 0xd8(%rsp), %r15 cmovbq 0x48(%rsi), %r15 cmova 0x48(%rcx), %r15 movq 0xe0(%rsp), %rdx cmovbq 0x50(%rsi), %rdx cmova 0x50(%rcx), %rdx movq 0xe8(%rsp), %rax cmovbq 0x58(%rsi), %rax cmova 0x58(%rcx), %rax movq %r8, (%rdi) movq %r9, 0x8(%rdi) movq %r10, 0x10(%rdi) movq %r11, 0x18(%rdi) movq %rbx, 0x20(%rdi) movq %rbp, 0x28(%rdi) movq 0xf0(%rsp), %r8 movq 0xf8(%rsp), %r9 movq 0x100(%rsp), %r10 movq 0x108(%rsp), %r11 movq 0x110(%rsp), %rbx movq 0x118(%rsp), %rbp movq %r12, 0x30(%rdi) movq %r13, 0x38(%rdi) movq %r14, 0x40(%rdi) movq %r15, 0x48(%rdi) movq %rdx, 0x50(%rdi) movq %rax, 0x58(%rdi) movq %r8, 0x60(%rdi) movq %r9, 0x68(%rdi) movq %r10, 0x70(%rdi) movq %r11, 0x78(%rdi) movq %rbx, 0x80(%rdi) movq %rbp, 0x88(%rdi) addq $0x160, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret p384_montjscalarmul_alt_p384_montjdouble: pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $0x158, %rsp movq %rdi, 0x150(%rsp) movq 0x60(%rsi), %rbx movq 0x68(%rsi), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x78(%rsi), %rax mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x88(%rsi), %rax mulq %rbx movq %rax, %r13 movq %rdx, %r14 movq 0x78(%rsi), %rax mulq 0x80(%rsi) movq %rax, %r15 movq %rdx, %rcx movq 0x70(%rsi), %rbx movq 0x60(%rsi), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x68(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rbp, %rbp movq 0x68(%rsi), %rbx movq 0x78(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x80(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x88(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq $0x0, %rcx movq 0x80(%rsi), %rbx movq 0x60(%rsi), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x70(%rsi), %rbx movq 0x78(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x80(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x88(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r15 adcq %rdx, %rcx sbbq %rbp, %rbp xorl %ebx, %ebx movq 0x78(%rsi), %rax mulq 0x88(%rsi) subq %rbp, %rdx xorl %ebp, %ebp addq %rax, %rcx adcq %rdx, %rbx adcl %ebp, %ebp movq 0x80(%rsi), %rax mulq 0x88(%rsi) addq %rax, %rbx adcq %rdx, %rbp xorl %r8d, %r8d addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %r15, %r15 adcq %rcx, %rcx adcq %rbx, %rbx adcq %rbp, %rbp adcl %r8d, %r8d movq 0x60(%rsi), %rax mulq %rax movq %r8, (%rsp) movq %rax, %r8 movq 0x68(%rsi), %rax movq %rbp, 0x8(%rsp) addq %rdx, %r9 sbbq %rbp, %rbp mulq %rax negq %rbp adcq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x70(%rsi), %rax mulq %rax negq %rbp adcq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x78(%rsi), %rax mulq %rax negq %rbp adcq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x80(%rsi), %rax mulq %rax negq %rbp adcq %rax, %rcx adcq %rdx, %rbx sbbq %rbp, %rbp movq 0x88(%rsi), %rax mulq %rax negq %rbp adcq 0x8(%rsp), %rax adcq (%rsp), %rdx movq %rax, %rbp movq %rdx, %rdi movq %rbx, (%rsp) movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r8 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r8, %r9 sbbq %rdx, %r10 sbbq %rax, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 movq %rbx, %r8 sbbq $0x0, %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r9 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r9, %r10 sbbq %rdx, %r11 sbbq %rax, %r12 sbbq $0x0, %r13 sbbq $0x0, %r8 movq %rbx, %r9 sbbq $0x0, %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r10 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r10, %r11 sbbq %rdx, %r12 sbbq %rax, %r13 sbbq $0x0, %r8 sbbq $0x0, %r9 movq %rbx, %r10 sbbq $0x0, %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r11 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r11, %r12 sbbq %rdx, %r13 sbbq %rax, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 movq %rbx, %r11 sbbq $0x0, %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r12 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r12, %r13 sbbq %rdx, %r8 sbbq %rax, %r9 sbbq $0x0, %r10 sbbq $0x0, %r11 movq %rbx, %r12 sbbq $0x0, %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r13 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r13, %r8 sbbq %rdx, %r9 sbbq %rax, %r10 sbbq $0x0, %r11 sbbq $0x0, %r12 movq %rbx, %r13 sbbq $0x0, %r13 movq (%rsp), %rbx addq %r8, %r14 adcq %r9, %r15 adcq %r10, %rcx adcq %r11, %rbx adcq %r12, %rbp adcq %r13, %rdi movl $0x0, %r8d adcq %r8, %r8 xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %r9d adcq %r15, %r9 movl $0x1, %r10d adcq %rcx, %r10 adcq %rbx, %r11 adcq %rbp, %r12 adcq %rdi, %r13 adcq $0x0, %r8 cmovneq %rax, %r14 cmovneq %r9, %r15 cmovneq %r10, %rcx cmovneq %r11, %rbx cmovneq %r12, %rbp cmovneq %r13, %rdi movq %r14, (%rsp) movq %r15, 0x8(%rsp) movq %rcx, 0x10(%rsp) movq %rbx, 0x18(%rsp) movq %rbp, 0x20(%rsp) movq %rdi, 0x28(%rsp) movq 0x30(%rsi), %rbx movq 0x38(%rsi), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x48(%rsi), %rax mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x58(%rsi), %rax mulq %rbx movq %rax, %r13 movq %rdx, %r14 movq 0x48(%rsi), %rax mulq 0x50(%rsi) movq %rax, %r15 movq %rdx, %rcx movq 0x40(%rsi), %rbx movq 0x30(%rsi), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x38(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rbp, %rbp movq 0x38(%rsi), %rbx movq 0x48(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x50(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x58(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq $0x0, %rcx movq 0x50(%rsi), %rbx movq 0x30(%rsi), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x40(%rsi), %rbx movq 0x48(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x50(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x58(%rsi), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r15 adcq %rdx, %rcx sbbq %rbp, %rbp xorl %ebx, %ebx movq 0x48(%rsi), %rax mulq 0x58(%rsi) subq %rbp, %rdx xorl %ebp, %ebp addq %rax, %rcx adcq %rdx, %rbx adcl %ebp, %ebp movq 0x50(%rsi), %rax mulq 0x58(%rsi) addq %rax, %rbx adcq %rdx, %rbp xorl %r8d, %r8d addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %r15, %r15 adcq %rcx, %rcx adcq %rbx, %rbx adcq %rbp, %rbp adcl %r8d, %r8d movq 0x30(%rsi), %rax mulq %rax movq %r8, 0x30(%rsp) movq %rax, %r8 movq 0x38(%rsi), %rax movq %rbp, 0x38(%rsp) addq %rdx, %r9 sbbq %rbp, %rbp mulq %rax negq %rbp adcq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x40(%rsi), %rax mulq %rax negq %rbp adcq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x48(%rsi), %rax mulq %rax negq %rbp adcq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x50(%rsi), %rax mulq %rax negq %rbp adcq %rax, %rcx adcq %rdx, %rbx sbbq %rbp, %rbp movq 0x58(%rsi), %rax mulq %rax negq %rbp adcq 0x38(%rsp), %rax adcq 0x30(%rsp), %rdx movq %rax, %rbp movq %rdx, %rdi movq %rbx, 0x30(%rsp) movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r8 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r8, %r9 sbbq %rdx, %r10 sbbq %rax, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 movq %rbx, %r8 sbbq $0x0, %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r9 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r9, %r10 sbbq %rdx, %r11 sbbq %rax, %r12 sbbq $0x0, %r13 sbbq $0x0, %r8 movq %rbx, %r9 sbbq $0x0, %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r10 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r10, %r11 sbbq %rdx, %r12 sbbq %rax, %r13 sbbq $0x0, %r8 sbbq $0x0, %r9 movq %rbx, %r10 sbbq $0x0, %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r11 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r11, %r12 sbbq %rdx, %r13 sbbq %rax, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 movq %rbx, %r11 sbbq $0x0, %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r12 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r12, %r13 sbbq %rdx, %r8 sbbq %rax, %r9 sbbq $0x0, %r10 sbbq $0x0, %r11 movq %rbx, %r12 sbbq $0x0, %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r13 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r13, %r8 sbbq %rdx, %r9 sbbq %rax, %r10 sbbq $0x0, %r11 sbbq $0x0, %r12 movq %rbx, %r13 sbbq $0x0, %r13 movq 0x30(%rsp), %rbx addq %r8, %r14 adcq %r9, %r15 adcq %r10, %rcx adcq %r11, %rbx adcq %r12, %rbp adcq %r13, %rdi movl $0x0, %r8d adcq %r8, %r8 xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %r9d adcq %r15, %r9 movl $0x1, %r10d adcq %rcx, %r10 adcq %rbx, %r11 adcq %rbp, %r12 adcq %rdi, %r13 adcq $0x0, %r8 cmovneq %rax, %r14 cmovneq %r9, %r15 cmovneq %r10, %rcx cmovneq %r11, %rbx cmovneq %r12, %rbp cmovneq %r13, %rdi movq %r14, 0x30(%rsp) movq %r15, 0x38(%rsp) movq %rcx, 0x40(%rsp) movq %rbx, 0x48(%rsp) movq %rbp, 0x50(%rsp) movq %rdi, 0x58(%rsp) movq (%rsi), %rax addq (%rsp), %rax movq 0x8(%rsi), %rcx adcq 0x8(%rsp), %rcx movq 0x10(%rsi), %r8 adcq 0x10(%rsp), %r8 movq 0x18(%rsi), %r9 adcq 0x18(%rsp), %r9 movq 0x20(%rsi), %r10 adcq 0x20(%rsp), %r10 movq 0x28(%rsi), %r11 adcq 0x28(%rsp), %r11 sbbq %rdx, %rdx movl $0x1, %ebx andq %rdx, %rbx movl $0xffffffff, %ebp andq %rbp, %rdx xorq %rbp, %rbp subq %rdx, %rbp addq %rbp, %rax movq %rax, 0xf0(%rsp) adcq %rdx, %rcx movq %rcx, 0xf8(%rsp) adcq %rbx, %r8 movq %r8, 0x100(%rsp) adcq $0x0, %r9 movq %r9, 0x108(%rsp) adcq $0x0, %r10 movq %r10, 0x110(%rsp) adcq $0x0, %r11 movq %r11, 0x118(%rsp) movq (%rsi), %rax subq (%rsp), %rax movq 0x8(%rsi), %rdx sbbq 0x8(%rsp), %rdx movq 0x10(%rsi), %r8 sbbq 0x10(%rsp), %r8 movq 0x18(%rsi), %r9 sbbq 0x18(%rsp), %r9 movq 0x20(%rsi), %r10 sbbq 0x20(%rsp), %r10 movq 0x28(%rsi), %r11 sbbq 0x28(%rsp), %r11 sbbq %rcx, %rcx movl $0xffffffff, %ebx andq %rbx, %rcx xorq %rbx, %rbx subq %rcx, %rbx subq %rbx, %rax movq %rax, 0xc0(%rsp) sbbq %rcx, %rdx movq %rdx, 0xc8(%rsp) sbbq %rax, %rax andq %rbx, %rcx negq %rax sbbq %rcx, %r8 movq %r8, 0xd0(%rsp) sbbq $0x0, %r9 movq %r9, 0xd8(%rsp) sbbq $0x0, %r10 movq %r10, 0xe0(%rsp) sbbq $0x0, %r11 movq %r11, 0xe8(%rsp) movq 0xc0(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0xf8(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x100(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x108(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x110(%rsp), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0x118(%rsp), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0xc8(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0xf8(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x100(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x108(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x110(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0x118(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0xd0(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0xf8(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x100(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x108(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x110(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0x118(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0xd8(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0xf8(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0x100(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0x108(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0x110(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0x118(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0xe0(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0xf8(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0x100(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0x108(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0x110(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0x118(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0xe8(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0xf8(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0x100(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0x108(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0x110(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0x118(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0x60(%rsp) movq %r15, 0x68(%rsp) movq %r8, 0x70(%rsp) movq %r9, 0x78(%rsp) movq %r10, 0x80(%rsp) movq %r11, 0x88(%rsp) movq 0x30(%rsi), %rax addq 0x60(%rsi), %rax movq 0x38(%rsi), %rcx adcq 0x68(%rsi), %rcx movq 0x40(%rsi), %r8 adcq 0x70(%rsi), %r8 movq 0x48(%rsi), %r9 adcq 0x78(%rsi), %r9 movq 0x50(%rsi), %r10 adcq 0x80(%rsi), %r10 movq 0x58(%rsi), %r11 adcq 0x88(%rsi), %r11 movl $0x0, %edx adcq %rdx, %rdx movabsq $0xffffffff00000001, %rbp addq %rbp, %rax movl $0xffffffff, %ebp adcq %rbp, %rcx adcq $0x1, %r8 adcq $0x0, %r9 adcq $0x0, %r10 adcq $0x0, %r11 adcq $0xffffffffffffffff, %rdx movl $0x1, %ebx andq %rdx, %rbx andq %rbp, %rdx xorq %rbp, %rbp subq %rdx, %rbp subq %rbp, %rax movq %rax, 0xf0(%rsp) sbbq %rdx, %rcx movq %rcx, 0xf8(%rsp) sbbq %rbx, %r8 movq %r8, 0x100(%rsp) sbbq $0x0, %r9 movq %r9, 0x108(%rsp) sbbq $0x0, %r10 movq %r10, 0x110(%rsp) sbbq $0x0, %r11 movq %r11, 0x118(%rsp) movq 0x60(%rsp), %rbx movq 0x68(%rsp), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x78(%rsp), %rax mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x88(%rsp), %rax mulq %rbx movq %rax, %r13 movq %rdx, %r14 movq 0x78(%rsp), %rax mulq 0x80(%rsp) movq %rax, %r15 movq %rdx, %rcx movq 0x70(%rsp), %rbx movq 0x60(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x68(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rbp, %rbp movq 0x68(%rsp), %rbx movq 0x78(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x80(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x88(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq $0x0, %rcx movq 0x80(%rsp), %rbx movq 0x60(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x70(%rsp), %rbx movq 0x78(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x80(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x88(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r15 adcq %rdx, %rcx sbbq %rbp, %rbp xorl %ebx, %ebx movq 0x78(%rsp), %rax mulq 0x88(%rsp) subq %rbp, %rdx xorl %ebp, %ebp addq %rax, %rcx adcq %rdx, %rbx adcl %ebp, %ebp movq 0x80(%rsp), %rax mulq 0x88(%rsp) addq %rax, %rbx adcq %rdx, %rbp xorl %r8d, %r8d addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %r15, %r15 adcq %rcx, %rcx adcq %rbx, %rbx adcq %rbp, %rbp adcl %r8d, %r8d movq 0x60(%rsp), %rax mulq %rax movq %r8, 0x120(%rsp) movq %rax, %r8 movq 0x68(%rsp), %rax movq %rbp, 0x128(%rsp) addq %rdx, %r9 sbbq %rbp, %rbp mulq %rax negq %rbp adcq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x70(%rsp), %rax mulq %rax negq %rbp adcq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x78(%rsp), %rax mulq %rax negq %rbp adcq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x80(%rsp), %rax mulq %rax negq %rbp adcq %rax, %rcx adcq %rdx, %rbx sbbq %rbp, %rbp movq 0x88(%rsp), %rax mulq %rax negq %rbp adcq 0x128(%rsp), %rax adcq 0x120(%rsp), %rdx movq %rax, %rbp movq %rdx, %rdi movq %rbx, 0x120(%rsp) movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r8 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r8, %r9 sbbq %rdx, %r10 sbbq %rax, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 movq %rbx, %r8 sbbq $0x0, %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r9 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r9, %r10 sbbq %rdx, %r11 sbbq %rax, %r12 sbbq $0x0, %r13 sbbq $0x0, %r8 movq %rbx, %r9 sbbq $0x0, %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r10 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r10, %r11 sbbq %rdx, %r12 sbbq %rax, %r13 sbbq $0x0, %r8 sbbq $0x0, %r9 movq %rbx, %r10 sbbq $0x0, %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r11 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r11, %r12 sbbq %rdx, %r13 sbbq %rax, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 movq %rbx, %r11 sbbq $0x0, %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r12 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r12, %r13 sbbq %rdx, %r8 sbbq %rax, %r9 sbbq $0x0, %r10 sbbq $0x0, %r11 movq %rbx, %r12 sbbq $0x0, %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r13 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r13, %r8 sbbq %rdx, %r9 sbbq %rax, %r10 sbbq $0x0, %r11 sbbq $0x0, %r12 movq %rbx, %r13 sbbq $0x0, %r13 movq 0x120(%rsp), %rbx addq %r8, %r14 adcq %r9, %r15 adcq %r10, %rcx adcq %r11, %rbx adcq %r12, %rbp adcq %r13, %rdi movl $0x0, %r8d adcq %r8, %r8 xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %r9d adcq %r15, %r9 movl $0x1, %r10d adcq %rcx, %r10 adcq %rbx, %r11 adcq %rbp, %r12 adcq %rdi, %r13 adcq $0x0, %r8 cmovneq %rax, %r14 cmovneq %r9, %r15 cmovneq %r10, %rcx cmovneq %r11, %rbx cmovneq %r12, %rbp cmovneq %r13, %rdi movq %r14, 0x120(%rsp) movq %r15, 0x128(%rsp) movq %rcx, 0x130(%rsp) movq %rbx, 0x138(%rsp) movq %rbp, 0x140(%rsp) movq %rdi, 0x148(%rsp) movq 0x30(%rsp), %rbx movq (%rsi), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x8(%rsi), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x10(%rsi), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x18(%rsi), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x20(%rsi), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0x28(%rsi), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0x38(%rsp), %rbx movq (%rsi), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0x8(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x10(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x18(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x20(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0x28(%rsi), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0x40(%rsp), %rbx movq (%rsi), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0x8(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x10(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x18(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x20(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0x28(%rsi), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0x48(%rsp), %rbx movq (%rsi), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0x8(%rsi), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0x10(%rsi), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0x18(%rsi), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0x20(%rsi), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0x28(%rsi), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0x50(%rsp), %rbx movq (%rsi), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0x8(%rsi), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0x10(%rsi), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0x18(%rsi), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0x20(%rsi), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0x28(%rsi), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0x58(%rsp), %rbx movq (%rsi), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0x8(%rsi), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0x10(%rsi), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0x18(%rsi), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0x20(%rsi), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0x28(%rsi), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0x90(%rsp) movq %r15, 0x98(%rsp) movq %r8, 0xa0(%rsp) movq %r9, 0xa8(%rsp) movq %r10, 0xb0(%rsp) movq %r11, 0xb8(%rsp) movq 0xf0(%rsp), %rbx movq 0xf8(%rsp), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x108(%rsp), %rax mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x118(%rsp), %rax mulq %rbx movq %rax, %r13 movq %rdx, %r14 movq 0x108(%rsp), %rax mulq 0x110(%rsp) movq %rax, %r15 movq %rdx, %rcx movq 0x100(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0xf8(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rbp, %rbp movq 0xf8(%rsp), %rbx movq 0x108(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x110(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x118(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq $0x0, %rcx movq 0x110(%rsp), %rbx movq 0xf0(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x100(%rsp), %rbx movq 0x108(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x110(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x118(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r15 adcq %rdx, %rcx sbbq %rbp, %rbp xorl %ebx, %ebx movq 0x108(%rsp), %rax mulq 0x118(%rsp) subq %rbp, %rdx xorl %ebp, %ebp addq %rax, %rcx adcq %rdx, %rbx adcl %ebp, %ebp movq 0x110(%rsp), %rax mulq 0x118(%rsp) addq %rax, %rbx adcq %rdx, %rbp xorl %r8d, %r8d addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %r15, %r15 adcq %rcx, %rcx adcq %rbx, %rbx adcq %rbp, %rbp adcl %r8d, %r8d movq 0xf0(%rsp), %rax mulq %rax movq %r8, 0xc0(%rsp) movq %rax, %r8 movq 0xf8(%rsp), %rax movq %rbp, 0xc8(%rsp) addq %rdx, %r9 sbbq %rbp, %rbp mulq %rax negq %rbp adcq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x100(%rsp), %rax mulq %rax negq %rbp adcq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x108(%rsp), %rax mulq %rax negq %rbp adcq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x110(%rsp), %rax mulq %rax negq %rbp adcq %rax, %rcx adcq %rdx, %rbx sbbq %rbp, %rbp movq 0x118(%rsp), %rax mulq %rax negq %rbp adcq 0xc8(%rsp), %rax adcq 0xc0(%rsp), %rdx movq %rax, %rbp movq %rdx, %rdi movq %rbx, 0xc0(%rsp) movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r8 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r8, %r9 sbbq %rdx, %r10 sbbq %rax, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 movq %rbx, %r8 sbbq $0x0, %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r9 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r9, %r10 sbbq %rdx, %r11 sbbq %rax, %r12 sbbq $0x0, %r13 sbbq $0x0, %r8 movq %rbx, %r9 sbbq $0x0, %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r10 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r10, %r11 sbbq %rdx, %r12 sbbq %rax, %r13 sbbq $0x0, %r8 sbbq $0x0, %r9 movq %rbx, %r10 sbbq $0x0, %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r11 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r11, %r12 sbbq %rdx, %r13 sbbq %rax, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 movq %rbx, %r11 sbbq $0x0, %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r12 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r12, %r13 sbbq %rdx, %r8 sbbq %rax, %r9 sbbq $0x0, %r10 sbbq $0x0, %r11 movq %rbx, %r12 sbbq $0x0, %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r13 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r13, %r8 sbbq %rdx, %r9 sbbq %rax, %r10 sbbq $0x0, %r11 sbbq $0x0, %r12 movq %rbx, %r13 sbbq $0x0, %r13 movq 0xc0(%rsp), %rbx addq %r8, %r14 adcq %r9, %r15 adcq %r10, %rcx adcq %r11, %rbx adcq %r12, %rbp adcq %r13, %rdi movl $0x0, %r8d adcq %r8, %r8 xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %r9d adcq %r15, %r9 movl $0x1, %r10d adcq %rcx, %r10 adcq %rbx, %r11 adcq %rbp, %r12 adcq %rdi, %r13 adcq $0x0, %r8 cmovneq %rax, %r14 cmovneq %r9, %r15 cmovneq %r10, %rcx cmovneq %r11, %rbx cmovneq %r12, %rbp cmovneq %r13, %rdi movq %r14, 0xc0(%rsp) movq %r15, 0xc8(%rsp) movq %rcx, 0xd0(%rsp) movq %rbx, 0xd8(%rsp) movq %rbp, 0xe0(%rsp) movq %rdi, 0xe8(%rsp) movabsq $0xffffffff, %r9 subq 0x120(%rsp), %r9 movabsq $0xffffffff00000000, %r10 sbbq 0x128(%rsp), %r10 movq $0xfffffffffffffffe, %r11 sbbq 0x130(%rsp), %r11 movq $0xffffffffffffffff, %r12 sbbq 0x138(%rsp), %r12 movq $0xffffffffffffffff, %r13 sbbq 0x140(%rsp), %r13 movq $0xffffffffffffffff, %r14 sbbq 0x148(%rsp), %r14 movq $0x9, %rcx movq %r9, %rax mulq %rcx movq %rax, %r8 movq %rdx, %r9 movq %r10, %rax xorl %r10d, %r10d mulq %rcx addq %rax, %r9 adcq %rdx, %r10 movq %r11, %rax xorl %r11d, %r11d mulq %rcx addq %rax, %r10 adcq %rdx, %r11 movq %r12, %rax xorl %r12d, %r12d mulq %rcx addq %rax, %r11 adcq %rdx, %r12 movq %r13, %rax xorl %r13d, %r13d mulq %rcx addq %rax, %r12 adcq %rdx, %r13 movq %r14, %rax movl $0x1, %r14d mulq %rcx addq %rax, %r13 adcq %rdx, %r14 movl $0xc, %ecx movq 0x90(%rsp), %rax mulq %rcx addq %rax, %r8 adcq %rdx, %r9 sbbq %rbx, %rbx movq 0x98(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %rbx, %rbx movq 0xa0(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rbx, %rbx movq 0xa8(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rbx, %rbx movq 0xb0(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbx, %rbx movq 0xb8(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r13 adcq %rdx, %r14 movabsq $0xffffffff00000001, %rax mulq %r14 addq %rax, %r8 adcq %rdx, %r9 adcq %r14, %r10 movq %r14, %rax sbbq %rcx, %rcx movl $0xffffffff, %edx negq %rcx mulq %rdx addq %rax, %r9 adcq %rdx, %r10 adcq %rcx, %r11 adcq $0x0, %r12 adcq $0x0, %r13 sbbq %rcx, %rcx notq %rcx movl $0xffffffff, %edx xorq %rax, %rax andq %rcx, %rdx subq %rdx, %rax andq $0x1, %rcx subq %rax, %r8 movq %r8, 0x120(%rsp) sbbq %rdx, %r9 movq %r9, 0x128(%rsp) sbbq %rcx, %r10 movq %r10, 0x130(%rsp) sbbq $0x0, %r11 movq %r11, 0x138(%rsp) sbbq $0x0, %r12 movq %r12, 0x140(%rsp) sbbq $0x0, %r13 movq %r13, 0x148(%rsp) movq 0xc0(%rsp), %rax subq (%rsp), %rax movq 0xc8(%rsp), %rdx sbbq 0x8(%rsp), %rdx movq 0xd0(%rsp), %r8 sbbq 0x10(%rsp), %r8 movq 0xd8(%rsp), %r9 sbbq 0x18(%rsp), %r9 movq 0xe0(%rsp), %r10 sbbq 0x20(%rsp), %r10 movq 0xe8(%rsp), %r11 sbbq 0x28(%rsp), %r11 sbbq %rcx, %rcx movl $0xffffffff, %ebx andq %rbx, %rcx xorq %rbx, %rbx subq %rcx, %rbx subq %rbx, %rax movq %rax, 0xf0(%rsp) sbbq %rcx, %rdx movq %rdx, 0xf8(%rsp) sbbq %rax, %rax andq %rbx, %rcx negq %rax sbbq %rcx, %r8 movq %r8, 0x100(%rsp) sbbq $0x0, %r9 movq %r9, 0x108(%rsp) sbbq $0x0, %r10 movq %r10, 0x110(%rsp) sbbq $0x0, %r11 movq %r11, 0x118(%rsp) movq 0x30(%rsp), %rbx movq 0x38(%rsp), %rax mulq %rbx movq %rax, %r9 movq %rdx, %r10 movq 0x48(%rsp), %rax mulq %rbx movq %rax, %r11 movq %rdx, %r12 movq 0x58(%rsp), %rax mulq %rbx movq %rax, %r13 movq %rdx, %r14 movq 0x48(%rsp), %rax mulq 0x50(%rsp) movq %rax, %r15 movq %rdx, %rcx movq 0x40(%rsp), %rbx movq 0x30(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x38(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rbp, %rbp movq 0x38(%rsp), %rbx movq 0x48(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x50(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x58(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 adcq $0x0, %rcx movq 0x50(%rsp), %rbx movq 0x30(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x40(%rsp), %rbx movq 0x48(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %rbp, %rbp movq 0x50(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x58(%rsp), %rax mulq %rbx subq %rbp, %rdx addq %rax, %r15 adcq %rdx, %rcx sbbq %rbp, %rbp xorl %ebx, %ebx movq 0x48(%rsp), %rax mulq 0x58(%rsp) subq %rbp, %rdx xorl %ebp, %ebp addq %rax, %rcx adcq %rdx, %rbx adcl %ebp, %ebp movq 0x50(%rsp), %rax mulq 0x58(%rsp) addq %rax, %rbx adcq %rdx, %rbp xorl %r8d, %r8d addq %r9, %r9 adcq %r10, %r10 adcq %r11, %r11 adcq %r12, %r12 adcq %r13, %r13 adcq %r14, %r14 adcq %r15, %r15 adcq %rcx, %rcx adcq %rbx, %rbx adcq %rbp, %rbp adcl %r8d, %r8d movq 0x30(%rsp), %rax mulq %rax movq %r8, 0xc0(%rsp) movq %rax, %r8 movq 0x38(%rsp), %rax movq %rbp, 0xc8(%rsp) addq %rdx, %r9 sbbq %rbp, %rbp mulq %rax negq %rbp adcq %rax, %r10 adcq %rdx, %r11 sbbq %rbp, %rbp movq 0x40(%rsp), %rax mulq %rax negq %rbp adcq %rax, %r12 adcq %rdx, %r13 sbbq %rbp, %rbp movq 0x48(%rsp), %rax mulq %rax negq %rbp adcq %rax, %r14 adcq %rdx, %r15 sbbq %rbp, %rbp movq 0x50(%rsp), %rax mulq %rax negq %rbp adcq %rax, %rcx adcq %rdx, %rbx sbbq %rbp, %rbp movq 0x58(%rsp), %rax mulq %rax negq %rbp adcq 0xc8(%rsp), %rax adcq 0xc0(%rsp), %rdx movq %rax, %rbp movq %rdx, %rdi movq %rbx, 0xc0(%rsp) movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r8 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r8, %r9 sbbq %rdx, %r10 sbbq %rax, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 movq %rbx, %r8 sbbq $0x0, %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r9 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r9, %r10 sbbq %rdx, %r11 sbbq %rax, %r12 sbbq $0x0, %r13 sbbq $0x0, %r8 movq %rbx, %r9 sbbq $0x0, %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r10 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r10, %r11 sbbq %rdx, %r12 sbbq %rax, %r13 sbbq $0x0, %r8 sbbq $0x0, %r9 movq %rbx, %r10 sbbq $0x0, %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r11 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r11, %r12 sbbq %rdx, %r13 sbbq %rax, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 movq %rbx, %r11 sbbq $0x0, %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r12 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r12, %r13 sbbq %rdx, %r8 sbbq %rax, %r9 sbbq $0x0, %r10 sbbq $0x0, %r11 movq %rbx, %r12 sbbq $0x0, %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %rax, %r13 movl $0x0, %eax adcq %rbx, %rdx adcl %eax, %eax subq %r13, %r8 sbbq %rdx, %r9 sbbq %rax, %r10 sbbq $0x0, %r11 sbbq $0x0, %r12 movq %rbx, %r13 sbbq $0x0, %r13 movq 0xc0(%rsp), %rbx addq %r8, %r14 adcq %r9, %r15 adcq %r10, %rcx adcq %r11, %rbx adcq %r12, %rbp adcq %r13, %rdi movl $0x0, %r8d adcq %r8, %r8 xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %r9d adcq %r15, %r9 movl $0x1, %r10d adcq %rcx, %r10 adcq %rbx, %r11 adcq %rbp, %r12 adcq %rdi, %r13 adcq $0x0, %r8 cmovneq %rax, %r14 cmovneq %r9, %r15 cmovneq %r10, %rcx cmovneq %r11, %rbx cmovneq %r12, %rbp cmovneq %r13, %rdi movq %r14, 0xc0(%rsp) movq %r15, 0xc8(%rsp) movq %rcx, 0xd0(%rsp) movq %rbx, 0xd8(%rsp) movq %rbp, 0xe0(%rsp) movq %rdi, 0xe8(%rsp) movq 0x150(%rsp), %rdi movq 0xf0(%rsp), %rax subq 0x30(%rsp), %rax movq 0xf8(%rsp), %rdx sbbq 0x38(%rsp), %rdx movq 0x100(%rsp), %r8 sbbq 0x40(%rsp), %r8 movq 0x108(%rsp), %r9 sbbq 0x48(%rsp), %r9 movq 0x110(%rsp), %r10 sbbq 0x50(%rsp), %r10 movq 0x118(%rsp), %r11 sbbq 0x58(%rsp), %r11 sbbq %rcx, %rcx movl $0xffffffff, %ebx andq %rbx, %rcx xorq %rbx, %rbx subq %rcx, %rbx subq %rbx, %rax movq %rax, 0x60(%rdi) sbbq %rcx, %rdx movq %rdx, 0x68(%rdi) sbbq %rax, %rax andq %rbx, %rcx negq %rax sbbq %rcx, %r8 movq %r8, 0x70(%rdi) sbbq $0x0, %r9 movq %r9, 0x78(%rdi) sbbq $0x0, %r10 movq %r10, 0x80(%rdi) sbbq $0x0, %r11 movq %r11, 0x88(%rdi) movq 0x60(%rsp), %rbx movq 0x120(%rsp), %rax mulq %rbx movq %rax, %r8 movq %rdx, %r9 movq 0x128(%rsp), %rax mulq %rbx xorl %r10d, %r10d addq %rax, %r9 adcq %rdx, %r10 movq 0x130(%rsp), %rax mulq %rbx xorl %r11d, %r11d addq %rax, %r10 adcq %rdx, %r11 movq 0x138(%rsp), %rax mulq %rbx xorl %r12d, %r12d addq %rax, %r11 adcq %rdx, %r12 movq 0x140(%rsp), %rax mulq %rbx xorl %r13d, %r13d addq %rax, %r12 adcq %rdx, %r13 movq 0x148(%rsp), %rax mulq %rbx xorl %r14d, %r14d addq %rax, %r13 adcq %rdx, %r14 xorl %r15d, %r15d movq %r8, %rbx shlq $0x20, %rbx addq %r8, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r8 movabsq $0xffffffff, %rax mulq %rbx addq %r8, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r9 sbbq %rdx, %r10 sbbq %rbp, %r11 sbbq $0x0, %r12 sbbq $0x0, %r13 sbbq $0x0, %rbx addq %rbx, %r14 adcq $0x0, %r15 movq 0x68(%rsp), %rbx movq 0x120(%rsp), %rax mulq %rbx addq %rax, %r9 adcq %rdx, %r10 sbbq %r8, %r8 movq 0x128(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r8, %r8 movq 0x130(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r8, %r8 movq 0x138(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r8, %r8 movq 0x140(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r8, %r8 movq 0x148(%rsp), %rax mulq %rbx subq %r8, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r8, %r8 negq %r8 movq %r9, %rbx shlq $0x20, %rbx addq %r9, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r9 movabsq $0xffffffff, %rax mulq %rbx addq %r9, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r10 sbbq %rdx, %r11 sbbq %rbp, %r12 sbbq $0x0, %r13 sbbq $0x0, %r14 sbbq $0x0, %rbx addq %rbx, %r15 adcq $0x0, %r8 movq 0x70(%rsp), %rbx movq 0x120(%rsp), %rax mulq %rbx addq %rax, %r10 adcq %rdx, %r11 sbbq %r9, %r9 movq 0x128(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %r9, %r9 movq 0x130(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r9, %r9 movq 0x138(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r9, %r9 movq 0x140(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r9, %r9 movq 0x148(%rsp), %rax mulq %rbx subq %r9, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r9, %r9 negq %r9 movq %r10, %rbx shlq $0x20, %rbx addq %r10, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r10 movabsq $0xffffffff, %rax mulq %rbx addq %r10, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r11 sbbq %rdx, %r12 sbbq %rbp, %r13 sbbq $0x0, %r14 sbbq $0x0, %r15 sbbq $0x0, %rbx addq %rbx, %r8 adcq $0x0, %r9 movq 0x78(%rsp), %rbx movq 0x120(%rsp), %rax mulq %rbx addq %rax, %r11 adcq %rdx, %r12 sbbq %r10, %r10 movq 0x128(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %r10, %r10 movq 0x130(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r10, %r10 movq 0x138(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r10, %r10 movq 0x140(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r10, %r10 movq 0x148(%rsp), %rax mulq %rbx subq %r10, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r10, %r10 negq %r10 movq %r11, %rbx shlq $0x20, %rbx addq %r11, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r11 movabsq $0xffffffff, %rax mulq %rbx addq %r11, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r12 sbbq %rdx, %r13 sbbq %rbp, %r14 sbbq $0x0, %r15 sbbq $0x0, %r8 sbbq $0x0, %rbx addq %rbx, %r9 adcq $0x0, %r10 movq 0x80(%rsp), %rbx movq 0x120(%rsp), %rax mulq %rbx addq %rax, %r12 adcq %rdx, %r13 sbbq %r11, %r11 movq 0x128(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r13 adcq %rdx, %r14 sbbq %r11, %r11 movq 0x130(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r11, %r11 movq 0x138(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r11, %r11 movq 0x140(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r11, %r11 movq 0x148(%rsp), %rax mulq %rbx subq %r11, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r11, %r11 negq %r11 movq %r12, %rbx shlq $0x20, %rbx addq %r12, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r12 movabsq $0xffffffff, %rax mulq %rbx addq %r12, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r13 sbbq %rdx, %r14 sbbq %rbp, %r15 sbbq $0x0, %r8 sbbq $0x0, %r9 sbbq $0x0, %rbx addq %rbx, %r10 adcq $0x0, %r11 movq 0x88(%rsp), %rbx movq 0x120(%rsp), %rax mulq %rbx addq %rax, %r13 adcq %rdx, %r14 sbbq %r12, %r12 movq 0x128(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r14 adcq %rdx, %r15 sbbq %r12, %r12 movq 0x130(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r15 adcq %rdx, %r8 sbbq %r12, %r12 movq 0x138(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r8 adcq %rdx, %r9 sbbq %r12, %r12 movq 0x140(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %r12, %r12 movq 0x148(%rsp), %rax mulq %rbx subq %r12, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %r12, %r12 negq %r12 movq %r13, %rbx shlq $0x20, %rbx addq %r13, %rbx xorl %ebp, %ebp movabsq $0xffffffff00000001, %rax mulq %rbx movq %rdx, %r13 movabsq $0xffffffff, %rax mulq %rbx addq %r13, %rax adcq %rbx, %rdx adcl %ebp, %ebp subq %rax, %r14 sbbq %rdx, %r15 sbbq %rbp, %r8 sbbq $0x0, %r9 sbbq $0x0, %r10 sbbq $0x0, %rbx addq %rbx, %r11 adcq $0x0, %r12 xorl %edx, %edx xorl %ebp, %ebp xorl %r13d, %r13d movabsq $0xffffffff00000001, %rax addq %r14, %rax movl $0xffffffff, %ebx adcq %r15, %rbx movl $0x1, %ecx adcq %r8, %rcx adcq %r9, %rdx adcq %r10, %rbp adcq %r11, %r13 adcq $0x0, %r12 cmovneq %rax, %r14 cmovneq %rbx, %r15 cmovneq %rcx, %r8 cmovneq %rdx, %r9 cmovneq %rbp, %r10 cmovneq %r13, %r11 movq %r14, 0xf0(%rsp) movq %r15, 0xf8(%rsp) movq %r8, 0x100(%rsp) movq %r9, 0x108(%rsp) movq %r10, 0x110(%rsp) movq %r11, 0x118(%rsp) movq 0xb8(%rsp), %rcx movq %rcx, %r13 shrq $0x3e, %rcx movq 0xb0(%rsp), %r12 shldq $0x2, %r12, %r13 movq 0xa8(%rsp), %r11 shldq $0x2, %r11, %r12 movq 0xa0(%rsp), %r10 shldq $0x2, %r10, %r11 movq 0x98(%rsp), %r9 shldq $0x2, %r9, %r10 movq 0x90(%rsp), %r8 shldq $0x2, %r8, %r9 shlq $0x2, %r8 addq $0x1, %rcx subq 0x120(%rsp), %r8 sbbq 0x128(%rsp), %r9 sbbq 0x130(%rsp), %r10 sbbq 0x138(%rsp), %r11 sbbq 0x140(%rsp), %r12 sbbq 0x148(%rsp), %r13 sbbq $0x0, %rcx movabsq $0xffffffff00000001, %rax mulq %rcx addq %rax, %r8 adcq %rdx, %r9 adcq %rcx, %r10 movq %rcx, %rax sbbq %rcx, %rcx movl $0xffffffff, %edx negq %rcx mulq %rdx addq %rax, %r9 adcq %rdx, %r10 adcq %rcx, %r11 adcq $0x0, %r12 adcq $0x0, %r13 sbbq %rcx, %rcx notq %rcx movl $0xffffffff, %edx xorq %rax, %rax andq %rcx, %rdx subq %rdx, %rax andq $0x1, %rcx subq %rax, %r8 movq %r8, (%rdi) sbbq %rdx, %r9 movq %r9, 0x8(%rdi) sbbq %rcx, %r10 movq %r10, 0x10(%rdi) sbbq $0x0, %r11 movq %r11, 0x18(%rdi) sbbq $0x0, %r12 movq %r12, 0x20(%rdi) sbbq $0x0, %r13 movq %r13, 0x28(%rdi) movabsq $0xffffffff, %r8 subq 0xc0(%rsp), %r8 movabsq $0xffffffff00000000, %r9 sbbq 0xc8(%rsp), %r9 movq $0xfffffffffffffffe, %r10 sbbq 0xd0(%rsp), %r10 movq $0xffffffffffffffff, %r11 sbbq 0xd8(%rsp), %r11 movq $0xffffffffffffffff, %r12 sbbq 0xe0(%rsp), %r12 movq $0xffffffffffffffff, %r13 sbbq 0xe8(%rsp), %r13 movq %r13, %r14 shrq $0x3d, %r14 shldq $0x3, %r12, %r13 shldq $0x3, %r11, %r12 shldq $0x3, %r10, %r11 shldq $0x3, %r9, %r10 shldq $0x3, %r8, %r9 shlq $0x3, %r8 addq $0x1, %r14 movl $0x3, %ecx movq 0xf0(%rsp), %rax mulq %rcx addq %rax, %r8 adcq %rdx, %r9 sbbq %rbx, %rbx movq 0xf8(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r9 adcq %rdx, %r10 sbbq %rbx, %rbx movq 0x100(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r10 adcq %rdx, %r11 sbbq %rbx, %rbx movq 0x108(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r11 adcq %rdx, %r12 sbbq %rbx, %rbx movq 0x110(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r12 adcq %rdx, %r13 sbbq %rbx, %rbx movq 0x118(%rsp), %rax mulq %rcx subq %rbx, %rdx addq %rax, %r13 adcq %rdx, %r14 movabsq $0xffffffff00000001, %rax mulq %r14 addq %rax, %r8 adcq %rdx, %r9 adcq %r14, %r10 movq %r14, %rax sbbq %rcx, %rcx movl $0xffffffff, %edx negq %rcx mulq %rdx addq %rax, %r9 adcq %rdx, %r10 adcq %rcx, %r11 adcq $0x0, %r12 adcq $0x0, %r13 sbbq %rcx, %rcx notq %rcx movl $0xffffffff, %edx xorq %rax, %rax andq %rcx, %rdx subq %rdx, %rax andq $0x1, %rcx subq %rax, %r8 movq %r8, 0x30(%rdi) sbbq %rdx, %r9 movq %r9, 0x38(%rdi) sbbq %rcx, %r10 movq %r10, 0x40(%rdi) sbbq $0x0, %r11 movq %r11, 0x48(%rdi) sbbq $0x0, %r12 movq %r12, 0x50(%rdi) sbbq $0x0, %r13 movq %r13, 0x58(%rdi) addq $0x158, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
48,586
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/p384_montjadd.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point addition on NIST curve P-384 in Montgomery-Jacobian coordinates // // extern void p384_montjadd // (uint64_t p3[static 18],uint64_t p1[static 18],uint64_t p2[static 18]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // // Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2 // Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjadd) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjadd) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 // Pointer-offset pairs for inputs and outputs // These assume %rdi = p3, %rsi = p1 and %rcx = p2, // which needs to be set up explicitly before use. // The %rdi value never changes, however. #define x_1 0(%rsi) #define y_1 NUMSIZE(%rsi) #define z_1 (2*NUMSIZE)(%rsi) #define x_2 0(%rcx) #define y_2 NUMSIZE(%rcx) #define z_2 (2*NUMSIZE)(%rcx) #define x_3 0(%rdi) #define y_3 NUMSIZE(%rdi) #define z_3 (2*NUMSIZE)(%rdi) // In one place it's convenient to use another register // since the squaring function overwrites %rcx #define z_2_alt (2*NUMSIZE)(%rsi) // Pointer-offset pairs for temporaries, with some aliasing // NSPACE is the total stack needed for these temporaries #define z1sq (NUMSIZE*0)(%rsp) #define ww (NUMSIZE*0)(%rsp) #define resx (NUMSIZE*0)(%rsp) #define yd (NUMSIZE*1)(%rsp) #define y2a (NUMSIZE*1)(%rsp) #define x2a (NUMSIZE*2)(%rsp) #define zzx2 (NUMSIZE*2)(%rsp) #define zz (NUMSIZE*3)(%rsp) #define t1 (NUMSIZE*3)(%rsp) #define t2 (NUMSIZE*4)(%rsp) #define x1a (NUMSIZE*4)(%rsp) #define zzx1 (NUMSIZE*4)(%rsp) #define resy (NUMSIZE*4)(%rsp) #define xd (NUMSIZE*5)(%rsp) #define z2sq (NUMSIZE*5)(%rsp) #define resz (NUMSIZE*5)(%rsp) #define y1a (NUMSIZE*6)(%rsp) // Temporaries for the actual input pointers #define input_x (NUMSIZE*7)(%rsp) #define input_y (NUMSIZE*7+8)(%rsp) #define NSPACE (NUMSIZE*7+16) // Corresponds exactly to bignum_montmul_p384 #define montmul_p384(P0,P1,P2) \ movq P2, %rdx ; \ xorl %r15d, %r15d ; \ mulxq P1, %r8, %r9 ; \ mulxq 0x8+P1, %rbx, %r10 ; \ addq %rbx, %r9 ; \ mulxq 0x10+P1, %rbx, %r11 ; \ adcq %rbx, %r10 ; \ mulxq 0x18+P1, %rbx, %r12 ; \ adcq %rbx, %r11 ; \ mulxq 0x20+P1, %rbx, %r13 ; \ adcq %rbx, %r12 ; \ mulxq 0x28+P1, %rbx, %r14 ; \ adcq %rbx, %r13 ; \ adcq %r15, %r14 ; \ movq %r8, %rdx ; \ shlq $0x20, %rdx ; \ addq %r8, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r8, %rbx ; \ adcq %r8, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq %rbp, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x8+P2, %rdx ; \ xorl %r8d, %r8d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ adoxq %r8, %r15 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r14 ; \ adcq %rbx, %r15 ; \ adcq %r8, %r8 ; \ movq %r9, %rdx ; \ shlq $0x20, %rdx ; \ addq %r9, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r9, %rbx ; \ adcq %r9, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r10 ; \ sbbq %rbx, %r11 ; \ sbbq %rbp, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r14 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r15 ; \ adcq $0x0, %r8 ; \ movq 0x10+P2, %rdx ; \ xorl %r9d, %r9d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ adoxq %r9, %r8 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r15 ; \ adcq %rbx, %r8 ; \ adcq %r9, %r9 ; \ movq %r10, %rdx ; \ shlq $0x20, %rdx ; \ addq %r10, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r10, %rbx ; \ adcq %r10, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r11 ; \ sbbq %rbx, %r12 ; \ sbbq %rbp, %r13 ; \ sbbq $0x0, %r14 ; \ sbbq $0x0, %r15 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r8 ; \ adcq $0x0, %r9 ; \ movq 0x18+P2, %rdx ; \ xorl %r10d, %r10d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r15 ; \ adoxq %rbx, %r8 ; \ adoxq %r10, %r9 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r8 ; \ adcq %rbx, %r9 ; \ adcq %r10, %r10 ; \ movq %r11, %rdx ; \ shlq $0x20, %rdx ; \ addq %r11, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r11, %rbx ; \ adcq %r11, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r12 ; \ sbbq %rbx, %r13 ; \ sbbq %rbp, %r14 ; \ sbbq $0x0, %r15 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r9 ; \ adcq $0x0, %r10 ; \ movq 0x20+P2, %rdx ; \ xorl %r11d, %r11d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r15 ; \ adoxq %rbx, %r8 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ adoxq %r11, %r10 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r9 ; \ adcq %rbx, %r10 ; \ adcq %r11, %r11 ; \ movq %r12, %rdx ; \ shlq $0x20, %rdx ; \ addq %r12, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r12, %rbx ; \ adcq %r12, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r13 ; \ sbbq %rbx, %r14 ; \ sbbq %rbp, %r15 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r10 ; \ adcq $0x0, %r11 ; \ movq 0x28+P2, %rdx ; \ xorl %r12d, %r12d ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x10+P1, %rax, %rbx ; \ adcxq %rax, %r15 ; \ adoxq %rbx, %r8 ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r8 ; \ adoxq %rbx, %r9 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r9 ; \ adoxq %rbx, %r10 ; \ adoxq %r12, %r11 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcq %rax, %r10 ; \ adcq %rbx, %r11 ; \ adcq %r12, %r12 ; \ movq %r13, %rdx ; \ shlq $0x20, %rdx ; \ addq %r13, %rdx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %rbx, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %r13, %rbx ; \ adcq %r13, %rax ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ subq %rax, %r14 ; \ sbbq %rbx, %r15 ; \ sbbq %rbp, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %rdx ; \ addq %rdx, %r11 ; \ adcq $0x0, %r12 ; \ xorl %edx, %edx ; \ xorl %ebp, %ebp ; \ xorl %r13d, %r13d ; \ movq $0xffffffff00000001, %rax ; \ addq %r14, %rax ; \ movl $0xffffffff, %ebx ; \ adcq %r15, %rbx ; \ movl $0x1, %ecx ; \ adcq %r8, %rcx ; \ adcq %r9, %rdx ; \ adcq %r10, %rbp ; \ adcq %r11, %r13 ; \ adcq $0x0, %r12 ; \ cmovne %rax, %r14 ; \ cmovne %rbx, %r15 ; \ cmovne %rcx, %r8 ; \ cmovne %rdx, %r9 ; \ cmovne %rbp, %r10 ; \ cmovne %r13, %r11 ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %r8, 0x10+P0 ; \ movq %r9, 0x18+P0 ; \ movq %r10, 0x20+P0 ; \ movq %r11, 0x28+P0 // Corresponds exactly to bignum_montsqr_p384 #define montsqr_p384(P0,P1) \ movq P1, %rdx ; \ mulxq 0x8+P1, %r9, %r10 ; \ mulxq 0x18+P1, %r11, %r12 ; \ mulxq 0x28+P1, %r13, %r14 ; \ movq 0x18+P1, %rdx ; \ mulxq 0x20+P1, %r15, %rcx ; \ xorl %ebp, %ebp ; \ movq 0x10+P1, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ movq 0x8+P1, %rdx ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ adcxq %rbp, %r15 ; \ adoxq %rbp, %rcx ; \ adcq %rbp, %rcx ; \ xorl %ebp, %ebp ; \ movq 0x20+P1, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ movq 0x10+P1, %rdx ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x28+P1, %rax, %rdx ; \ adcxq %rax, %r15 ; \ adoxq %rdx, %rcx ; \ movq 0x28+P1, %rdx ; \ mulxq 0x20+P1, %rbx, %rbp ; \ mulxq 0x18+P1, %rax, %rdx ; \ adcxq %rax, %rcx ; \ adoxq %rdx, %rbx ; \ movl $0x0, %eax ; \ adcxq %rax, %rbx ; \ adoxq %rax, %rbp ; \ adcq %rax, %rbp ; \ xorq %rax, %rax ; \ movq P1, %rdx ; \ mulxq P1, %r8, %rax ; \ adcxq %r9, %r9 ; \ adoxq %rax, %r9 ; \ movq 0x8+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r10, %r10 ; \ adoxq %rax, %r10 ; \ adcxq %r11, %r11 ; \ adoxq %rdx, %r11 ; \ movq 0x10+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r12, %r12 ; \ adoxq %rax, %r12 ; \ adcxq %r13, %r13 ; \ adoxq %rdx, %r13 ; \ movq 0x18+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r14, %r14 ; \ adoxq %rax, %r14 ; \ adcxq %r15, %r15 ; \ adoxq %rdx, %r15 ; \ movq 0x20+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %rcx, %rcx ; \ adoxq %rax, %rcx ; \ adcxq %rbx, %rbx ; \ adoxq %rdx, %rbx ; \ movq 0x28+P1, %rdx ; \ mulxq %rdx, %rax, %rsi ; \ adcxq %rbp, %rbp ; \ adoxq %rax, %rbp ; \ movl $0x0, %eax ; \ adcxq %rax, %rsi ; \ adoxq %rax, %rsi ; \ movq %rbx, P0 ; \ movq %r8, %rdx ; \ shlq $0x20, %rdx ; \ addq %r8, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r8, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r8 ; \ addq %rbx, %rax ; \ adcq %rdx, %r8 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r9 ; \ sbbq %r8, %r10 ; \ sbbq %rbx, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ movq %rdx, %r8 ; \ sbbq $0x0, %r8 ; \ movq %r9, %rdx ; \ shlq $0x20, %rdx ; \ addq %r9, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r9, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r9 ; \ addq %rbx, %rax ; \ adcq %rdx, %r9 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r10 ; \ sbbq %r9, %r11 ; \ sbbq %rbx, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r8 ; \ movq %rdx, %r9 ; \ sbbq $0x0, %r9 ; \ movq %r10, %rdx ; \ shlq $0x20, %rdx ; \ addq %r10, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r10, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r10 ; \ addq %rbx, %rax ; \ adcq %rdx, %r10 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r11 ; \ sbbq %r10, %r12 ; \ sbbq %rbx, %r13 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ movq %rdx, %r10 ; \ sbbq $0x0, %r10 ; \ movq %r11, %rdx ; \ shlq $0x20, %rdx ; \ addq %r11, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r11, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r11 ; \ addq %rbx, %rax ; \ adcq %rdx, %r11 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r12 ; \ sbbq %r11, %r13 ; \ sbbq %rbx, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ movq %rdx, %r11 ; \ sbbq $0x0, %r11 ; \ movq %r12, %rdx ; \ shlq $0x20, %rdx ; \ addq %r12, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r12, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r12 ; \ addq %rbx, %rax ; \ adcq %rdx, %r12 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r13 ; \ sbbq %r12, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %r11 ; \ movq %rdx, %r12 ; \ sbbq $0x0, %r12 ; \ movq %r13, %rdx ; \ shlq $0x20, %rdx ; \ addq %r13, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r13, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r13 ; \ addq %rbx, %rax ; \ adcq %rdx, %r13 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r8 ; \ sbbq %r13, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq $0x0, %r11 ; \ sbbq $0x0, %r12 ; \ movq %rdx, %r13 ; \ sbbq $0x0, %r13 ; \ movq P0, %rbx ; \ addq %r8, %r14 ; \ adcq %r9, %r15 ; \ adcq %r10, %rcx ; \ adcq %r11, %rbx ; \ adcq %r12, %rbp ; \ adcq %r13, %rsi ; \ movl $0x0, %r8d ; \ adcq %r8, %r8 ; \ xorq %r11, %r11 ; \ xorq %r12, %r12 ; \ xorq %r13, %r13 ; \ movq $0xffffffff00000001, %rax ; \ addq %r14, %rax ; \ movl $0xffffffff, %r9d ; \ adcq %r15, %r9 ; \ movl $0x1, %r10d ; \ adcq %rcx, %r10 ; \ adcq %rbx, %r11 ; \ adcq %rbp, %r12 ; \ adcq %rsi, %r13 ; \ adcq $0x0, %r8 ; \ cmovne %rax, %r14 ; \ cmovne %r9, %r15 ; \ cmovne %r10, %rcx ; \ cmovne %r11, %rbx ; \ cmovne %r12, %rbp ; \ cmovne %r13, %rsi ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %rcx, 0x10+P0 ; \ movq %rbx, 0x18+P0 ; \ movq %rbp, 0x20+P0 ; \ movq %rsi, 0x28+P0 // Almost-Montgomery variant which we use when an input to other muls // with the other argument fully reduced (which is always safe). #define amontsqr_p384(P0,P1) \ movq P1, %rdx ; \ mulxq 0x8+P1, %r9, %r10 ; \ mulxq 0x18+P1, %r11, %r12 ; \ mulxq 0x28+P1, %r13, %r14 ; \ movq 0x18+P1, %rdx ; \ mulxq 0x20+P1, %r15, %rcx ; \ xorl %ebp, %ebp ; \ movq 0x10+P1, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r10 ; \ adoxq %rbx, %r11 ; \ mulxq 0x8+P1, %rax, %rbx ; \ adcxq %rax, %r11 ; \ adoxq %rbx, %r12 ; \ movq 0x8+P1, %rdx ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x28+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ adcxq %rbp, %r15 ; \ adoxq %rbp, %rcx ; \ adcq %rbp, %rcx ; \ xorl %ebp, %ebp ; \ movq 0x20+P1, %rdx ; \ mulxq P1, %rax, %rbx ; \ adcxq %rax, %r12 ; \ adoxq %rbx, %r13 ; \ movq 0x10+P1, %rdx ; \ mulxq 0x18+P1, %rax, %rbx ; \ adcxq %rax, %r13 ; \ adoxq %rbx, %r14 ; \ mulxq 0x20+P1, %rax, %rbx ; \ adcxq %rax, %r14 ; \ adoxq %rbx, %r15 ; \ mulxq 0x28+P1, %rax, %rdx ; \ adcxq %rax, %r15 ; \ adoxq %rdx, %rcx ; \ movq 0x28+P1, %rdx ; \ mulxq 0x20+P1, %rbx, %rbp ; \ mulxq 0x18+P1, %rax, %rdx ; \ adcxq %rax, %rcx ; \ adoxq %rdx, %rbx ; \ movl $0x0, %eax ; \ adcxq %rax, %rbx ; \ adoxq %rax, %rbp ; \ adcq %rax, %rbp ; \ xorq %rax, %rax ; \ movq P1, %rdx ; \ mulxq P1, %r8, %rax ; \ adcxq %r9, %r9 ; \ adoxq %rax, %r9 ; \ movq 0x8+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r10, %r10 ; \ adoxq %rax, %r10 ; \ adcxq %r11, %r11 ; \ adoxq %rdx, %r11 ; \ movq 0x10+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r12, %r12 ; \ adoxq %rax, %r12 ; \ adcxq %r13, %r13 ; \ adoxq %rdx, %r13 ; \ movq 0x18+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %r14, %r14 ; \ adoxq %rax, %r14 ; \ adcxq %r15, %r15 ; \ adoxq %rdx, %r15 ; \ movq 0x20+P1, %rdx ; \ mulxq %rdx, %rax, %rdx ; \ adcxq %rcx, %rcx ; \ adoxq %rax, %rcx ; \ adcxq %rbx, %rbx ; \ adoxq %rdx, %rbx ; \ movq 0x28+P1, %rdx ; \ mulxq %rdx, %rax, %rsi ; \ adcxq %rbp, %rbp ; \ adoxq %rax, %rbp ; \ movl $0x0, %eax ; \ adcxq %rax, %rsi ; \ adoxq %rax, %rsi ; \ movq %rbx, P0 ; \ movq %r8, %rdx ; \ shlq $0x20, %rdx ; \ addq %r8, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r8, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r8 ; \ addq %rbx, %rax ; \ adcq %rdx, %r8 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r9 ; \ sbbq %r8, %r10 ; \ sbbq %rbx, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ movq %rdx, %r8 ; \ sbbq $0x0, %r8 ; \ movq %r9, %rdx ; \ shlq $0x20, %rdx ; \ addq %r9, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r9, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r9 ; \ addq %rbx, %rax ; \ adcq %rdx, %r9 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r10 ; \ sbbq %r9, %r11 ; \ sbbq %rbx, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r8 ; \ movq %rdx, %r9 ; \ sbbq $0x0, %r9 ; \ movq %r10, %rdx ; \ shlq $0x20, %rdx ; \ addq %r10, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r10, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r10 ; \ addq %rbx, %rax ; \ adcq %rdx, %r10 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r11 ; \ sbbq %r10, %r12 ; \ sbbq %rbx, %r13 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ movq %rdx, %r10 ; \ sbbq $0x0, %r10 ; \ movq %r11, %rdx ; \ shlq $0x20, %rdx ; \ addq %r11, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r11, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r11 ; \ addq %rbx, %rax ; \ adcq %rdx, %r11 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r12 ; \ sbbq %r11, %r13 ; \ sbbq %rbx, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ movq %rdx, %r11 ; \ sbbq $0x0, %r11 ; \ movq %r12, %rdx ; \ shlq $0x20, %rdx ; \ addq %r12, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r12, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r12 ; \ addq %rbx, %rax ; \ adcq %rdx, %r12 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r13 ; \ sbbq %r12, %r8 ; \ sbbq %rbx, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %r11 ; \ movq %rdx, %r12 ; \ sbbq $0x0, %r12 ; \ movq %r13, %rdx ; \ shlq $0x20, %rdx ; \ addq %r13, %rdx ; \ movq $0xffffffff00000001, %rax ; \ mulxq %rax, %r13, %rax ; \ movl $0xffffffff, %ebx ; \ mulxq %rbx, %rbx, %r13 ; \ addq %rbx, %rax ; \ adcq %rdx, %r13 ; \ movl $0x0, %ebx ; \ adcq %rbx, %rbx ; \ subq %rax, %r8 ; \ sbbq %r13, %r9 ; \ sbbq %rbx, %r10 ; \ sbbq $0x0, %r11 ; \ sbbq $0x0, %r12 ; \ movq %rdx, %r13 ; \ sbbq $0x0, %r13 ; \ movq P0, %rbx ; \ addq %r8, %r14 ; \ adcq %r9, %r15 ; \ adcq %r10, %rcx ; \ adcq %r11, %rbx ; \ adcq %r12, %rbp ; \ adcq %r13, %rsi ; \ movl $0x0, %r8d ; \ movq $0xffffffff00000001, %rax ; \ movl $0xffffffff, %r9d ; \ movl $0x1, %r10d ; \ cmovnc %r8, %rax ; \ cmovnc %r8, %r9 ; \ cmovnc %r8, %r10 ; \ addq %rax, %r14 ; \ adcq %r9, %r15 ; \ adcq %r10, %rcx ; \ adcq %r8, %rbx ; \ adcq %r8, %rbp ; \ adcq %r8, %rsi ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %rcx, 0x10+P0 ; \ movq %rbx, 0x18+P0 ; \ movq %rbp, 0x20+P0 ; \ movq %rsi, 0x28+P0 // Corresponds exactly to bignum_sub_p384 #define sub_p384(P0,P1,P2) \ movq P1, %rax ; \ subq P2, %rax ; \ movq 0x8+P1, %rdx ; \ sbbq 0x8+P2, %rdx ; \ movq 0x10+P1, %r8 ; \ sbbq 0x10+P2, %r8 ; \ movq 0x18+P1, %r9 ; \ sbbq 0x18+P2, %r9 ; \ movq 0x20+P1, %r10 ; \ sbbq 0x20+P2, %r10 ; \ movq 0x28+P1, %r11 ; \ sbbq 0x28+P2, %r11 ; \ sbbq %rcx, %rcx ; \ movl $0xffffffff, %esi ; \ andq %rsi, %rcx ; \ xorq %rsi, %rsi ; \ subq %rcx, %rsi ; \ subq %rsi, %rax ; \ movq %rax, P0 ; \ sbbq %rcx, %rdx ; \ movq %rdx, 0x8+P0 ; \ sbbq %rax, %rax ; \ andq %rsi, %rcx ; \ negq %rax; \ sbbq %rcx, %r8 ; \ movq %r8, 0x10+P0 ; \ sbbq $0x0, %r9 ; \ movq %r9, 0x18+P0 ; \ sbbq $0x0, %r10 ; \ movq %r10, 0x20+P0 ; \ sbbq $0x0, %r11 ; \ movq %r11, 0x28+P0 // Additional macros to help with final multiplexing #define load6(r0,r1,r2,r3,r4,r5,P) \ movq P, r0 ; \ movq 8+P, r1 ; \ movq 16+P, r2 ; \ movq 24+P, r3 ; \ movq 32+P, r4 ; \ movq 40+P, r5 #define store6(P,r0,r1,r2,r3,r4,r5) \ movq r0, P ; \ movq r1, 8+P ; \ movq r2, 16+P ; \ movq r3, 24+P ; \ movq r4, 32+P ; \ movq r5, 40+P ; \ #define czload6(r0,r1,r2,r3,r4,r5,P) \ cmovzq P, r0 ; \ cmovzq 8+P, r1 ; \ cmovzq 16+P, r2 ; \ cmovzq 24+P, r3 ; \ cmovzq 32+P, r4 ; \ cmovzq 40+P, r5 #define muxload6(r0,r1,r2,r3,r4,r5,P0,P1,P2) \ movq P0, r0 ; \ cmovbq P1, r0 ; \ cmovnbe P2, r0 ; \ movq 8+P0, r1 ; \ cmovbq 8+P1, r1 ; \ cmovnbe 8+P2, r1 ; \ movq 16+P0, r2 ; \ cmovbq 16+P1, r2 ; \ cmovnbe 16+P2, r2 ; \ movq 24+P0, r3 ; \ cmovbq 24+P1, r3 ; \ cmovnbe 24+P2, r3 ; \ movq 32+P0, r4 ; \ cmovbq 32+P1, r4 ; \ cmovnbe 32+P2, r4 ; \ movq 40+P0, r5 ; \ cmovbq 40+P1, r5 ; \ cmovnbe 40+P2, r5 S2N_BN_SYMBOL(p384_montjadd): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save registers and make room on stack for temporary variables // Put the input arguments in non-volatile places on the stack pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp movq %rsi, input_x movq %rdx, input_y // Main code, just a sequence of basic field operations // 8 * multiply + 3 * square + 7 * subtract amontsqr_p384(z1sq,z_1) movq input_y, %rsi amontsqr_p384(z2sq,z_2_alt) movq input_x, %rsi movq input_y, %rcx montmul_p384(y1a,z_2,y_1) movq input_x, %rsi movq input_y, %rcx montmul_p384(y2a,z_1,y_2) movq input_y, %rcx montmul_p384(x2a,z1sq,x_2) movq input_x, %rsi montmul_p384(x1a,z2sq,x_1) montmul_p384(y2a,z1sq,y2a) montmul_p384(y1a,z2sq,y1a) sub_p384(xd,x2a,x1a) sub_p384(yd,y2a,y1a) amontsqr_p384(zz,xd) montsqr_p384(ww,yd) montmul_p384(zzx1,zz,x1a) montmul_p384(zzx2,zz,x2a) sub_p384(resx,ww,zzx1) sub_p384(t1,zzx2,zzx1) movq input_x, %rsi montmul_p384(xd,xd,z_1) sub_p384(resx,resx,zzx2) sub_p384(t2,zzx1,resx) montmul_p384(t1,t1,y1a) movq input_y, %rcx montmul_p384(resz,xd,z_2) montmul_p384(t2,yd,t2) sub_p384(resy,t2,t1) // Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0 // The condition codes get set by a comparison (P2 != 0) - (P1 != 0) // So "NBE" <=> ~(CF \/ ZF) <=> P1 = 0 /\ ~(P2 = 0) // and "B" <=> CF <=> ~(P1 = 0) /\ P2 = 0 // and "Z" <=> ZF <=> (P1 = 0 <=> P2 = 0) // Multiplex the z outputs accordingly and re-store in resz movq input_y, %rcx load6(%r8,%r9,%r10,%r11,%rbx,%rbp,z_2) movq %r8, %rax movq %r9, %rdx orq %r10, %rax orq %r11, %rdx orq %rbx, %rax orq %rbp, %rdx orq %rdx, %rax negq %rax sbbq %rax, %rax movq input_x, %rsi load6(%r12,%r13,%r14,%r15,%rdx,%rcx,z_1) cmovzq %r12, %r8 cmovzq %r13, %r9 cmovzq %r14, %r10 cmovzq %r15, %r11 cmovzq %rdx, %rbx cmovzq %rcx, %rbp orq %r13, %r12 orq %r15, %r14 orq %rcx, %rdx orq %r14, %r12 orq %r12, %rdx negq %rdx sbbq %rdx, %rdx cmpq %rdx, %rax czload6(%r8,%r9,%r10,%r11,%rbx,%rbp,resz) store6(resz,%r8,%r9,%r10,%r11,%rbx,%rbp) // Multiplex the x and y outputs too, keeping the results in registers movq input_y, %rcx movq input_x, %rsi muxload6(%r8,%r9,%r10,%r11,%rbx,%rbp,resx,x_1,x_2) muxload6(%r12,%r13,%r14,%r15,%rdx,%rax,resy,y_1,y_2) // Finally store back the multiplexed values store6(x_3,%r8,%r9,%r10,%r11,%rbx,%rbp) load6(%r8,%r9,%r10,%r11,%rbx,%rbp,resz) store6(y_3,%r12,%r13,%r14,%r15,%rdx,%rax) store6(z_3,%r8,%r9,%r10,%r11,%rbx,%rbp) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif
marvin-hansen/iggy-streaming-system
43,692
thirdparty/crates/aws-lc-sys-0.25.1/aws-lc/third_party/s2n-bignum/x86_att/p384/p384_montjadd_alt.S
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT-0 // ---------------------------------------------------------------------------- // Point addition on NIST curve P-384 in Montgomery-Jacobian coordinates // // extern void p384_montjadd_alt // (uint64_t p3[static 18],uint64_t p1[static 18],uint64_t p2[static 18]); // // Does p3 := p1 + p2 where all points are regarded as Jacobian triples with // each coordinate in the Montgomery domain, i.e. x' = (2^384 * x) mod p_384. // A Jacobian triple (x',y',z') represents affine point (x/z^2,y/z^3). // // Standard x86-64 ABI: RDI = p3, RSI = p1, RDX = p2 // Microsoft x64 ABI: RCX = p3, RDX = p1, R8 = p2 // ---------------------------------------------------------------------------- #include "_internal_s2n_bignum.h" S2N_BN_SYM_VISIBILITY_DIRECTIVE(p384_montjadd_alt) S2N_BN_SYM_PRIVACY_DIRECTIVE(p384_montjadd_alt) .text .balign 4 // Size of individual field elements #define NUMSIZE 48 // Pointer-offset pairs for inputs and outputs // These assume %rdi = p3, %rsi = p1 and %rcx = p2, // which needs to be set up explicitly before use. // The %rdi value never changes, however. #define x_1 0(%rsi) #define y_1 NUMSIZE(%rsi) #define z_1 (2*NUMSIZE)(%rsi) #define x_2 0(%rcx) #define y_2 NUMSIZE(%rcx) #define z_2 (2*NUMSIZE)(%rcx) #define x_3 0(%rdi) #define y_3 NUMSIZE(%rdi) #define z_3 (2*NUMSIZE)(%rdi) // In one place it's convenient to use another register // since the squaring function overwrites %rcx #define z_2_alt (2*NUMSIZE)(%rsi) // Pointer-offset pairs for temporaries, with some aliasing // NSPACE is the total stack needed for these temporaries #define z1sq (NUMSIZE*0)(%rsp) #define ww (NUMSIZE*0)(%rsp) #define resx (NUMSIZE*0)(%rsp) #define yd (NUMSIZE*1)(%rsp) #define y2a (NUMSIZE*1)(%rsp) #define x2a (NUMSIZE*2)(%rsp) #define zzx2 (NUMSIZE*2)(%rsp) #define zz (NUMSIZE*3)(%rsp) #define t1 (NUMSIZE*3)(%rsp) #define t2 (NUMSIZE*4)(%rsp) #define x1a (NUMSIZE*4)(%rsp) #define zzx1 (NUMSIZE*4)(%rsp) #define resy (NUMSIZE*4)(%rsp) #define xd (NUMSIZE*5)(%rsp) #define z2sq (NUMSIZE*5)(%rsp) #define resz (NUMSIZE*5)(%rsp) #define y1a (NUMSIZE*6)(%rsp) // Temporaries for the actual input pointers #define input_x (NUMSIZE*7)(%rsp) #define input_y (NUMSIZE*7+8)(%rsp) #define NSPACE (NUMSIZE*7+16) // Corresponds exactly to bignum_montmul_p384_alt #define montmul_p384(P0,P1,P2) \ movq P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ movq %rax, %r8 ; \ movq %rdx, %r9 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ xorl %r10d, %r10d ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ xorl %r11d, %r11d ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ xorl %r12d, %r12d ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ xorl %r13d, %r13d ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ xorl %r14d, %r14d ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ xorl %r15d, %r15d ; \ movq %r8, %rbx ; \ shlq $0x20, %rbx ; \ addq %r8, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r8 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r8, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r9 ; \ sbbq %rdx, %r10 ; \ sbbq %rbp, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r14 ; \ adcq $0x0, %r15 ; \ movq 0x8+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %r8, %r8 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %r8, %r8 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ sbbq %r8, %r8 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %r8, %r8 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r8, %r8 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r8, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r8, %r8 ; \ negq %r8; \ movq %r9, %rbx ; \ shlq $0x20, %rbx ; \ addq %r9, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r9 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r9, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r10 ; \ sbbq %rdx, %r11 ; \ sbbq %rbp, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r14 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r15 ; \ adcq $0x0, %r8 ; \ movq 0x10+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %r9, %r9 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ sbbq %r9, %r9 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %r9, %r9 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r9, %r9 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r9, %r9 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r9, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %r8 ; \ sbbq %r9, %r9 ; \ negq %r9; \ movq %r10, %rbx ; \ shlq $0x20, %rbx ; \ addq %r10, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r10 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r10, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r11 ; \ sbbq %rdx, %r12 ; \ sbbq %rbp, %r13 ; \ sbbq $0x0, %r14 ; \ sbbq $0x0, %r15 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r8 ; \ adcq $0x0, %r9 ; \ movq 0x18+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ sbbq %r10, %r10 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %r10, %r10 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r10, %r10 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r10, %r10 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %r8 ; \ sbbq %r10, %r10 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r10, %rdx ; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %r10, %r10 ; \ negq %r10; \ movq %r11, %rbx ; \ shlq $0x20, %rbx ; \ addq %r11, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r11 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r11, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r12 ; \ sbbq %rdx, %r13 ; \ sbbq %rbp, %r14 ; \ sbbq $0x0, %r15 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r9 ; \ adcq $0x0, %r10 ; \ movq 0x20+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %r11, %r11 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r11, %r11 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r11, %r11 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %r8 ; \ sbbq %r11, %r11 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %r11, %r11 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r11, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %r11, %r11 ; \ negq %r11; \ movq %r12, %rbx ; \ shlq $0x20, %rbx ; \ addq %r12, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r12 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r12, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r13 ; \ sbbq %rdx, %r14 ; \ sbbq %rbp, %r15 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r10 ; \ adcq $0x0, %r11 ; \ movq 0x28+P2, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %r12, %r12 ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %r12, %r12 ; \ movq 0x10+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %r8 ; \ sbbq %r12, %r12 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r8 ; \ adcq %rdx, %r9 ; \ sbbq %r12, %r12 ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r9 ; \ adcq %rdx, %r10 ; \ sbbq %r12, %r12 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %r12, %rdx ; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %r12, %r12 ; \ negq %r12; \ movq %r13, %rbx ; \ shlq $0x20, %rbx ; \ addq %r13, %rbx ; \ xorl %ebp, %ebp ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r13 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %r13, %rax ; \ adcq %rbx, %rdx ; \ adcl %ebp, %ebp ; \ subq %rax, %r14 ; \ sbbq %rdx, %r15 ; \ sbbq %rbp, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %rbx ; \ addq %rbx, %r11 ; \ adcq $0x0, %r12 ; \ xorl %edx, %edx ; \ xorl %ebp, %ebp ; \ xorl %r13d, %r13d ; \ movq $0xffffffff00000001, %rax ; \ addq %r14, %rax ; \ movl $0xffffffff, %ebx ; \ adcq %r15, %rbx ; \ movl $0x1, %ecx ; \ adcq %r8, %rcx ; \ adcq %r9, %rdx ; \ adcq %r10, %rbp ; \ adcq %r11, %r13 ; \ adcq $0x0, %r12 ; \ cmovneq %rax, %r14 ; \ cmovneq %rbx, %r15 ; \ cmovneq %rcx, %r8 ; \ cmovneq %rdx, %r9 ; \ cmovneq %rbp, %r10 ; \ cmovneq %r13, %r11 ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %r8, 0x10+P0 ; \ movq %r9, 0x18+P0 ; \ movq %r10, 0x20+P0 ; \ movq %r11, 0x28+P0 // Corresponds exactly to bignum_montsqr_p384_alt #define montsqr_p384(P0,P1) \ movq P1, %rbx ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ movq %rax, %r9 ; \ movq %rdx, %r10 ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ movq %rax, %r11 ; \ movq %rdx, %r12 ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ movq %rax, %r13 ; \ movq %rdx, %r14 ; \ movq 0x18+P1, %rax ; \ mulq 0x20+P1; \ movq %rax, %r15 ; \ movq %rdx, %rcx ; \ movq 0x10+P1, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rbp, %rbp ; \ movq 0x8+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r11 ; \ adcq %rdx, %r12 ; \ sbbq %rbp, %rbp ; \ movq 0x8+P1, %rbx ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %rbp, %rbp ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %rbp, %rbp ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ adcq $0x0, %rcx ; \ movq 0x20+P1, %rbx ; \ movq P1, %rax ; \ mulq %rbx; \ addq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %rbp, %rbp ; \ movq 0x10+P1, %rbx ; \ movq 0x18+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r13 ; \ adcq %rdx, %r14 ; \ sbbq %rbp, %rbp ; \ movq 0x20+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %rbp, %rbp ; \ movq 0x28+P1, %rax ; \ mulq %rbx; \ subq %rbp, %rdx ; \ addq %rax, %r15 ; \ adcq %rdx, %rcx ; \ sbbq %rbp, %rbp ; \ xorl %ebx, %ebx ; \ movq 0x18+P1, %rax ; \ mulq 0x28+P1; \ subq %rbp, %rdx ; \ xorl %ebp, %ebp ; \ addq %rax, %rcx ; \ adcq %rdx, %rbx ; \ adcl %ebp, %ebp ; \ movq 0x20+P1, %rax ; \ mulq 0x28+P1; \ addq %rax, %rbx ; \ adcq %rdx, %rbp ; \ xorl %r8d, %r8d ; \ addq %r9, %r9 ; \ adcq %r10, %r10 ; \ adcq %r11, %r11 ; \ adcq %r12, %r12 ; \ adcq %r13, %r13 ; \ adcq %r14, %r14 ; \ adcq %r15, %r15 ; \ adcq %rcx, %rcx ; \ adcq %rbx, %rbx ; \ adcq %rbp, %rbp ; \ adcl %r8d, %r8d ; \ movq P1, %rax ; \ mulq %rax; \ movq %r8, P0 ; \ movq %rax, %r8 ; \ movq 0x8+P1, %rax ; \ movq %rbp, 0x8+P0 ; \ addq %rdx, %r9 ; \ sbbq %rbp, %rbp ; \ mulq %rax; \ negq %rbp; \ adcq %rax, %r10 ; \ adcq %rdx, %r11 ; \ sbbq %rbp, %rbp ; \ movq 0x10+P1, %rax ; \ mulq %rax; \ negq %rbp; \ adcq %rax, %r12 ; \ adcq %rdx, %r13 ; \ sbbq %rbp, %rbp ; \ movq 0x18+P1, %rax ; \ mulq %rax; \ negq %rbp; \ adcq %rax, %r14 ; \ adcq %rdx, %r15 ; \ sbbq %rbp, %rbp ; \ movq 0x20+P1, %rax ; \ mulq %rax; \ negq %rbp; \ adcq %rax, %rcx ; \ adcq %rdx, %rbx ; \ sbbq %rbp, %rbp ; \ movq 0x28+P1, %rax ; \ mulq %rax; \ negq %rbp; \ adcq 0x8+P0, %rax ; \ adcq P0, %rdx ; \ movq %rax, %rbp ; \ movq %rdx, %rsi ; \ movq %rbx, P0 ; \ movq %r8, %rbx ; \ shlq $0x20, %rbx ; \ addq %r8, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r8 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r8 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r8, %r9 ; \ sbbq %rdx, %r10 ; \ sbbq %rax, %r11 ; \ sbbq $0x0, %r12 ; \ sbbq $0x0, %r13 ; \ movq %rbx, %r8 ; \ sbbq $0x0, %r8 ; \ movq %r9, %rbx ; \ shlq $0x20, %rbx ; \ addq %r9, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r9 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r9 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r9, %r10 ; \ sbbq %rdx, %r11 ; \ sbbq %rax, %r12 ; \ sbbq $0x0, %r13 ; \ sbbq $0x0, %r8 ; \ movq %rbx, %r9 ; \ sbbq $0x0, %r9 ; \ movq %r10, %rbx ; \ shlq $0x20, %rbx ; \ addq %r10, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r10 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r10 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r10, %r11 ; \ sbbq %rdx, %r12 ; \ sbbq %rax, %r13 ; \ sbbq $0x0, %r8 ; \ sbbq $0x0, %r9 ; \ movq %rbx, %r10 ; \ sbbq $0x0, %r10 ; \ movq %r11, %rbx ; \ shlq $0x20, %rbx ; \ addq %r11, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r11 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r11 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r11, %r12 ; \ sbbq %rdx, %r13 ; \ sbbq %rax, %r8 ; \ sbbq $0x0, %r9 ; \ sbbq $0x0, %r10 ; \ movq %rbx, %r11 ; \ sbbq $0x0, %r11 ; \ movq %r12, %rbx ; \ shlq $0x20, %rbx ; \ addq %r12, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r12 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r12 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r12, %r13 ; \ sbbq %rdx, %r8 ; \ sbbq %rax, %r9 ; \ sbbq $0x0, %r10 ; \ sbbq $0x0, %r11 ; \ movq %rbx, %r12 ; \ sbbq $0x0, %r12 ; \ movq %r13, %rbx ; \ shlq $0x20, %rbx ; \ addq %r13, %rbx ; \ movq $0xffffffff00000001, %rax ; \ mulq %rbx; \ movq %rdx, %r13 ; \ movq $0xffffffff, %rax ; \ mulq %rbx; \ addq %rax, %r13 ; \ movl $0x0, %eax ; \ adcq %rbx, %rdx ; \ adcl %eax, %eax ; \ subq %r13, %r8 ; \ sbbq %rdx, %r9 ; \ sbbq %rax, %r10 ; \ sbbq $0x0, %r11 ; \ sbbq $0x0, %r12 ; \ movq %rbx, %r13 ; \ sbbq $0x0, %r13 ; \ movq P0, %rbx ; \ addq %r8, %r14 ; \ adcq %r9, %r15 ; \ adcq %r10, %rcx ; \ adcq %r11, %rbx ; \ adcq %r12, %rbp ; \ adcq %r13, %rsi ; \ movl $0x0, %r8d ; \ adcq %r8, %r8 ; \ xorq %r11, %r11 ; \ xorq %r12, %r12 ; \ xorq %r13, %r13 ; \ movq $0xffffffff00000001, %rax ; \ addq %r14, %rax ; \ movl $0xffffffff, %r9d ; \ adcq %r15, %r9 ; \ movl $0x1, %r10d ; \ adcq %rcx, %r10 ; \ adcq %rbx, %r11 ; \ adcq %rbp, %r12 ; \ adcq %rsi, %r13 ; \ adcq $0x0, %r8 ; \ cmovneq %rax, %r14 ; \ cmovneq %r9, %r15 ; \ cmovneq %r10, %rcx ; \ cmovneq %r11, %rbx ; \ cmovneq %r12, %rbp ; \ cmovneq %r13, %rsi ; \ movq %r14, P0 ; \ movq %r15, 0x8+P0 ; \ movq %rcx, 0x10+P0 ; \ movq %rbx, 0x18+P0 ; \ movq %rbp, 0x20+P0 ; \ movq %rsi, 0x28+P0 // Corresponds exactly to bignum_sub_p384 #define sub_p384(P0,P1,P2) \ movq P1, %rax ; \ subq P2, %rax ; \ movq 0x8+P1, %rdx ; \ sbbq 0x8+P2, %rdx ; \ movq 0x10+P1, %r8 ; \ sbbq 0x10+P2, %r8 ; \ movq 0x18+P1, %r9 ; \ sbbq 0x18+P2, %r9 ; \ movq 0x20+P1, %r10 ; \ sbbq 0x20+P2, %r10 ; \ movq 0x28+P1, %r11 ; \ sbbq 0x28+P2, %r11 ; \ sbbq %rcx, %rcx ; \ movl $0xffffffff, %esi ; \ andq %rsi, %rcx ; \ xorq %rsi, %rsi ; \ subq %rcx, %rsi ; \ subq %rsi, %rax ; \ movq %rax, P0 ; \ sbbq %rcx, %rdx ; \ movq %rdx, 0x8+P0 ; \ sbbq %rax, %rax ; \ andq %rsi, %rcx ; \ negq %rax; \ sbbq %rcx, %r8 ; \ movq %r8, 0x10+P0 ; \ sbbq $0x0, %r9 ; \ movq %r9, 0x18+P0 ; \ sbbq $0x0, %r10 ; \ movq %r10, 0x20+P0 ; \ sbbq $0x0, %r11 ; \ movq %r11, 0x28+P0 // Additional macros to help with final multiplexing #define load6(r0,r1,r2,r3,r4,r5,P) \ movq P, r0 ; \ movq 8+P, r1 ; \ movq 16+P, r2 ; \ movq 24+P, r3 ; \ movq 32+P, r4 ; \ movq 40+P, r5 #define store6(P,r0,r1,r2,r3,r4,r5) \ movq r0, P ; \ movq r1, 8+P ; \ movq r2, 16+P ; \ movq r3, 24+P ; \ movq r4, 32+P ; \ movq r5, 40+P ; \ #define czload6(r0,r1,r2,r3,r4,r5,P) \ cmovzq P, r0 ; \ cmovzq 8+P, r1 ; \ cmovzq 16+P, r2 ; \ cmovzq 24+P, r3 ; \ cmovzq 32+P, r4 ; \ cmovzq 40+P, r5 #define muxload6(r0,r1,r2,r3,r4,r5,P0,P1,P2) \ movq P0, r0 ; \ cmovbq P1, r0 ; \ cmovnbe P2, r0 ; \ movq 8+P0, r1 ; \ cmovbq 8+P1, r1 ; \ cmovnbe 8+P2, r1 ; \ movq 16+P0, r2 ; \ cmovbq 16+P1, r2 ; \ cmovnbe 16+P2, r2 ; \ movq 24+P0, r3 ; \ cmovbq 24+P1, r3 ; \ cmovnbe 24+P2, r3 ; \ movq 32+P0, r4 ; \ cmovbq 32+P1, r4 ; \ cmovnbe 32+P2, r4 ; \ movq 40+P0, r5 ; \ cmovbq 40+P1, r5 ; \ cmovnbe 40+P2, r5 S2N_BN_SYMBOL(p384_montjadd_alt): #if WINDOWS_ABI pushq %rdi pushq %rsi movq %rcx, %rdi movq %rdx, %rsi movq %r8, %rdx #endif // Save registers and make room on stack for temporary variables // Put the input arguments in non-volatile places on the stack pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 pushq %r15 subq $NSPACE, %rsp movq %rsi, input_x movq %rdx, input_y // Main code, just a sequence of basic field operations // 8 * multiply + 3 * square + 7 * subtract montsqr_p384(z1sq,z_1) movq input_y, %rsi montsqr_p384(z2sq,z_2_alt) movq input_x, %rsi movq input_y, %rcx montmul_p384(y1a,z_2,y_1) movq input_x, %rsi movq input_y, %rcx montmul_p384(y2a,z_1,y_2) movq input_y, %rcx montmul_p384(x2a,z1sq,x_2) movq input_x, %rsi montmul_p384(x1a,z2sq,x_1) montmul_p384(y2a,z1sq,y2a) montmul_p384(y1a,z2sq,y1a) sub_p384(xd,x2a,x1a) sub_p384(yd,y2a,y1a) montsqr_p384(zz,xd) montsqr_p384(ww,yd) montmul_p384(zzx1,zz,x1a) montmul_p384(zzx2,zz,x2a) sub_p384(resx,ww,zzx1) sub_p384(t1,zzx2,zzx1) movq input_x, %rsi montmul_p384(xd,xd,z_1) sub_p384(resx,resx,zzx2) sub_p384(t2,zzx1,resx) montmul_p384(t1,t1,y1a) movq input_y, %rcx montmul_p384(resz,xd,z_2) montmul_p384(t2,yd,t2) sub_p384(resy,t2,t1) // Load in the z coordinates of the inputs to check for P1 = 0 and P2 = 0 // The condition codes get set by a comparison (P2 != 0) - (P1 != 0) // So "NBE" <=> ~(CF \/ ZF) <=> P1 = 0 /\ ~(P2 = 0) // and "B" <=> CF <=> ~(P1 = 0) /\ P2 = 0 // and "Z" <=> ZF <=> (P1 = 0 <=> P2 = 0) // Multiplex the z outputs accordingly and re-store in resz movq input_y, %rcx load6(%r8,%r9,%r10,%r11,%rbx,%rbp,z_2) movq %r8, %rax movq %r9, %rdx orq %r10, %rax orq %r11, %rdx orq %rbx, %rax orq %rbp, %rdx orq %rdx, %rax negq %rax sbbq %rax, %rax movq input_x, %rsi load6(%r12,%r13,%r14,%r15,%rdx,%rcx,z_1) cmovzq %r12, %r8 cmovzq %r13, %r9 cmovzq %r14, %r10 cmovzq %r15, %r11 cmovzq %rdx, %rbx cmovzq %rcx, %rbp orq %r13, %r12 orq %r15, %r14 orq %rcx, %rdx orq %r14, %r12 orq %r12, %rdx negq %rdx sbbq %rdx, %rdx cmpq %rdx, %rax czload6(%r8,%r9,%r10,%r11,%rbx,%rbp,resz) store6(resz,%r8,%r9,%r10,%r11,%rbx,%rbp) // Multiplex the x and y outputs too, keeping the results in registers movq input_y, %rcx movq input_x, %rsi muxload6(%r8,%r9,%r10,%r11,%rbx,%rbp,resx,x_1,x_2) muxload6(%r12,%r13,%r14,%r15,%rdx,%rax,resy,y_1,y_2) // Finally store back the multiplexed values store6(x_3,%r8,%r9,%r10,%r11,%rbx,%rbp) load6(%r8,%r9,%r10,%r11,%rbx,%rbp,resz) store6(y_3,%r12,%r13,%r14,%r15,%rdx,%rax) store6(z_3,%r8,%r9,%r10,%r11,%rbx,%rbp) // Restore stack and registers addq $NSPACE, %rsp popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx #if WINDOWS_ABI popq %rsi popq %rdi #endif ret #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack, "", %progbits #endif